diff --git a/OCR YOLO11/license_plate_detector.pt b/OCR YOLO11/license_plate_detector.pt new file mode 100644 index 0000000..a0ebd6b Binary files /dev/null and b/OCR YOLO11/license_plate_detector.pt differ diff --git a/OCR YOLO11/yolomain.py b/OCR YOLO11/yolomain.py new file mode 100644 index 0000000..e501f7d --- /dev/null +++ b/OCR YOLO11/yolomain.py @@ -0,0 +1,37 @@ +from ultralytics import YOLO +from ultralytics.utils.downloads import safe_download +from ultralytics.utils.plotting import Annotator, colors +from PIL import Image +import os +os.chdir("C:/Users/celma/OneDrive - Hanze/School/periode 1.4/IOT/YOLO11/License Plate Recognition.v11i.yolov11") + +crop_dir_name = "C:/Users/celma/OneDrive - Hanze/School/periode 1.4/IOT/YOLO11/License Plate Recognition.v11i.yolov11/test/crops" +if not os.path.exists(crop_dir_name): + os.makedirs(crop_dir_name) + +model = YOLO("license_plate_detector.pt") # Loads the pre-trained YOLO11 model +names = model.names + + +results = model.predict("test/images/000c.jpg", show=True, save=True) +img = Image.open("test/images/000c.jpg") # Load the original image using PIL +boxes = results[0].boxes.xyxy.cpu().tolist() +clss = results[0].boxes.cls.cpu().tolist() +# annotator = Annotator(results[0], line_width=2, example=names) + +#print(results) +print(boxes) + +if boxes is not None: + for box, cls in zip(boxes,clss): + # annotator.box_label(box, color=colors(int(cls), True), label=names(int(cls))) + + crop_obj = boxes[int(box[1]):int(box[3]) + int(box[0]):int(box[2])] + +cropped_img = img.crop( + (int(box[0]), int(box[1]), int(box[2]), int(box[3])) +) + +save_path = os.path.join(crop_dir_name, "cropped_image.jpg") +cropped_img.save("cropped_image.jpg") +cropped_img.show() \ No newline at end of file