From 798c596acca3c6ddd227f6d49addb8b4ba1254b3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E7=8E=8B=E5=BA=86=E5=88=9A?= Date: Fri, 28 Mar 2025 13:19:54 +0800 Subject: [PATCH] add yolo v10 and modify pipeline --- __pycache__/pipeline_01.cpython-39.pyc | Bin 0 -> 8330 bytes __pycache__/track_reid.cpython-39.pyc | Bin 16206 -> 19424 bytes contrast/__pycache__/__init__.cpython-39.pyc | Bin 191 -> 191 bytes .../__pycache__/config.cpython-39.pyc | Bin 1615 -> 1615 bytes .../__pycache__/inference.cpython-39.pyc | Bin 12098 -> 12060 bytes contrast/feat_extract/inference.py | 2 +- .../model/__pycache__/CBAM.cpython-39.pyc | Bin 3020 -> 3030 bytes .../model/__pycache__/Tool.cpython-39.pyc | Bin 2083 -> 2093 bytes .../model/__pycache__/__init__.cpython-39.pyc | Bin 828 -> 838 bytes .../__pycache__/fmobilenet.cpython-39.pyc | Bin 5051 -> 5061 bytes .../model/__pycache__/lcnet.cpython-39.pyc | Bin 7221 -> 7231 bytes .../model/__pycache__/loss.cpython-39.pyc | Bin 823 -> 833 bytes .../model/__pycache__/metric.cpython-39.pyc | Bin 2978 -> 2988 bytes .../__pycache__/mobilenet_v2.cpython-39.pyc | Bin 5858 -> 5871 bytes .../__pycache__/mobilenet_v3.cpython-39.pyc | Bin 6592 -> 6605 bytes .../__pycache__/mobilevit.cpython-39.pyc | Bin 8228 -> 8228 bytes .../model/__pycache__/resbam.cpython-39.pyc | Bin 4248 -> 4261 bytes .../__pycache__/resnet_face.cpython-39.pyc | Bin 4138 -> 4148 bytes .../__pycache__/resnet_pre.cpython-39.pyc | Bin 13899 -> 13912 bytes .../model/__pycache__/utils.cpython-39.pyc | Bin 288 -> 298 bytes execute_pipeline.py | 31 + imgs_to_video.py | 127 +++ .../__pycache__/experimental.cpython-39.pyc | Bin 4791 -> 4867 bytes models/experimental.py | 6 +- pipeline.py | 67 +- pipeline_01.py | 395 +++++++++ track_reid.py | 189 +++- .../__pycache__/dotracks.cpython-39.pyc | Bin 17627 -> 17627 bytes .../__pycache__/dotracks_back.cpython-39.pyc | Bin 6965 -> 6965 bytes .../__pycache__/dotracks_front.cpython-39.pyc | Bin 5681 -> 5681 bytes .../__pycache__/track_back.cpython-39.pyc | Bin 6390 -> 6419 bytes .../__pycache__/track_front.cpython-39.pyc | Bin 4455 -> 4455 bytes .../__pycache__/bot_sort.cpython-39.pyc | Bin 7205 -> 7165 bytes .../__pycache__/byte_tracker.cpython-39.pyc | Bin 15012 -> 15012 bytes tracking/trackers/bot_sort.py | 10 +- tracking/tracking_pipeline.py | 180 ++++ .../__pycache__/annotator.cpython-39.pyc | Bin 2074 -> 2074 bytes .../__pycache__/drawtracks.cpython-39.pyc | Bin 9071 -> 9071 bytes .../utils/__pycache__/plotting.cpython-39.pyc | Bin 13489 -> 13489 bytes .../__pycache__/read_data.cpython-39.pyc | Bin 16121 -> 16121 bytes ultralytics/__init__.py | 23 +- .../__pycache__/__init__.cpython-312.pyc | Bin 627 -> 772 bytes .../__pycache__/__init__.cpython-39.pyc | Bin 600 -> 736 bytes ultralytics/cfg/__init__.py | 455 ++++++---- .../cfg/__pycache__/__init__.cpython-312.pyc | Bin 24447 -> 26679 bytes .../cfg/__pycache__/__init__.cpython-39.pyc | Bin 16974 -> 18561 bytes ultralytics/cfg/datasets/Argoverse.yaml | 17 +- .../datasets/{DOTAv2.yaml => DOTAv1.5.yaml} | 21 +- ultralytics/cfg/datasets/DOTAv1.yaml | 35 + ultralytics/cfg/datasets/GlobalWheat2020.yaml | 7 +- ultralytics/cfg/datasets/ImageNet.yaml | 11 +- ultralytics/cfg/datasets/Objects365.yaml | 9 +- ultralytics/cfg/datasets/SKU-110K.yaml | 11 +- ultralytics/cfg/datasets/VOC.yaml | 5 +- ultralytics/cfg/datasets/VisDrone.yaml | 11 +- .../cfg/datasets/african-wildlife.yaml | 24 + ultralytics/cfg/datasets/brain-tumor.yaml | 22 + ultralytics/cfg/datasets/carparts-seg.yaml | 43 + ultralytics/cfg/datasets/coco-pose.yaml | 14 +- ultralytics/cfg/datasets/coco.yaml | 13 +- ultralytics/cfg/datasets/coco128-seg.yaml | 11 +- ultralytics/cfg/datasets/coco128.yaml | 11 +- ultralytics/cfg/datasets/coco8-pose.yaml | 12 +- ultralytics/cfg/datasets/coco8-seg.yaml | 11 +- ultralytics/cfg/datasets/coco8.yaml | 11 +- ultralytics/cfg/datasets/crack-seg.yaml | 21 + ultralytics/cfg/datasets/dota8.yaml | 34 + ultralytics/cfg/datasets/open-images-v7.yaml | 11 +- ultralytics/cfg/datasets/package-seg.yaml | 21 + ultralytics/cfg/datasets/tiger-pose.yaml | 24 + ultralytics/cfg/datasets/xView.yaml | 9 +- ultralytics/cfg/default.yaml | 205 ++--- ultralytics/cfg/models/README.md | 3 +- ultralytics/cfg/models/rt-detr/rtdetr-l.yaml | 54 +- .../cfg/models/rt-detr/rtdetr-resnet101.yaml | 42 + .../cfg/models/rt-detr/rtdetr-resnet50.yaml | 42 + ultralytics/cfg/models/rt-detr/rtdetr-x.yaml | 54 +- ultralytics/cfg/models/v10/yolov10b.yaml | 40 + ultralytics/cfg/models/v10/yolov10l.yaml | 40 + ultralytics/cfg/models/v10/yolov10m.yaml | 43 + ultralytics/cfg/models/v10/yolov10n.yaml | 40 + ultralytics/cfg/models/v10/yolov10s.yaml | 39 + ultralytics/cfg/models/v10/yolov10x.yaml | 40 + ultralytics/cfg/models/v3/yolov3-spp.yaml | 66 +- ultralytics/cfg/models/v3/yolov3-tiny.yaml | 50 +- ultralytics/cfg/models/v3/yolov3.yaml | 66 +- ultralytics/cfg/models/v5/yolov5-p6.yaml | 72 +- ultralytics/cfg/models/v5/yolov5.yaml | 54 +- ultralytics/cfg/models/v6/yolov6.yaml | 34 +- .../cfg/models/v8/yolov8-cls-resnet101.yaml | 25 + .../cfg/models/v8/yolov8-cls-resnet50.yaml | 25 + ultralytics/cfg/models/v8/yolov8-cls.yaml | 14 +- .../cfg/models/v8/yolov8-ghost-p2.yaml | 54 ++ .../cfg/models/v8/yolov8-ghost-p6.yaml | 56 ++ ultralytics/cfg/models/v8/yolov8-ghost.yaml | 47 + ultralytics/cfg/models/v8/yolov8-obb.yaml | 46 + ultralytics/cfg/models/v8/yolov8-p2.yaml | 46 +- ultralytics/cfg/models/v8/yolov8-p6.yaml | 48 +- ultralytics/cfg/models/v8/yolov8-pose-p6.yaml | 50 +- ultralytics/cfg/models/v8/yolov8-pose.yaml | 38 +- ultralytics/cfg/models/v8/yolov8-rtdetr.yaml | 46 +- ultralytics/cfg/models/v8/yolov8-seg-p6.yaml | 48 +- ultralytics/cfg/models/v8/yolov8-seg.yaml | 36 +- ultralytics/cfg/models/v8/yolov8-world.yaml | 48 ++ ultralytics/cfg/models/v8/yolov8-worldv2.yaml | 46 + ultralytics/cfg/models/v8/yolov8.yaml | 46 +- ultralytics/cfg/models/v9/yolov9c.yaml | 36 + ultralytics/cfg/models/v9/yolov9e.yaml | 60 ++ ultralytics/cfg/trackers/botsort.yaml | 14 +- ultralytics/cfg/trackers/bytetrack.yaml | 12 +- ultralytics/data/__init__.py | 11 +- .../data/__pycache__/__init__.cpython-312.pyc | Bin 468 -> 469 bytes .../data/__pycache__/__init__.cpython-39.pyc | Bin 442 -> 437 bytes .../data/__pycache__/augment.cpython-312.pyc | Bin 53434 -> 68488 bytes .../data/__pycache__/augment.cpython-39.pyc | Bin 31819 -> 44657 bytes .../data/__pycache__/base.cpython-312.pyc | Bin 18140 -> 18861 bytes .../data/__pycache__/base.cpython-39.pyc | Bin 11303 -> 11746 bytes .../data/__pycache__/build.cpython-312.pyc | Bin 9024 -> 8729 bytes .../data/__pycache__/build.cpython-39.pyc | Bin 6307 -> 6226 bytes .../__pycache__/converter.cpython-312.pyc | Bin 15069 -> 19267 bytes .../data/__pycache__/converter.cpython-39.pyc | Bin 10638 -> 13515 bytes .../data/__pycache__/dataset.cpython-312.pyc | Bin 20197 -> 22631 bytes .../data/__pycache__/dataset.cpython-39.pyc | Bin 12971 -> 14996 bytes .../data/__pycache__/loaders.cpython-312.pyc | Bin 26330 -> 32556 bytes .../data/__pycache__/loaders.cpython-39.pyc | Bin 15833 -> 20952 bytes .../data/__pycache__/utils.cpython-312.pyc | Bin 41262 -> 41893 bytes .../data/__pycache__/utils.cpython-39.pyc | Bin 25913 -> 26696 bytes ultralytics/data/annotator.py | 8 +- ultralytics/data/augment.py | 816 +++++++++++++----- ultralytics/data/base.py | 137 +-- ultralytics/data/build.py | 97 ++- ultralytics/data/converter.py | 438 +++++++--- ultralytics/data/dataset.py | 299 ++++--- ultralytics/data/explorer/__init__.py | 5 + .../__pycache__/__init__.cpython-312.pyc | Bin 0 -> 240 bytes .../__pycache__/__init__.cpython-39.pyc | Bin 0 -> 215 bytes .../__pycache__/explorer.cpython-312.pyc | Bin 0 -> 24259 bytes .../__pycache__/explorer.cpython-39.pyc | Bin 0 -> 16904 bytes .../__pycache__/utils.cpython-312.pyc | Bin 0 -> 10472 bytes .../explorer/__pycache__/utils.cpython-39.pyc | Bin 0 -> 7285 bytes ultralytics/data/explorer/explorer.py | 472 ++++++++++ ultralytics/data/explorer/gui/__init__.py | 1 + ultralytics/data/explorer/gui/dash.py | 268 ++++++ ultralytics/data/explorer/utils.py | 166 ++++ ultralytics/data/loaders.py | 415 ++++++--- ultralytics/data/scripts/get_coco.sh | 2 +- ultralytics/data/split_dota.py | 288 +++++++ ultralytics/data/utils.py | 398 +++++---- ultralytics/engine/__init__.py | 1 + .../__pycache__/__init__.cpython-312.pyc | Bin 152 -> 153 bytes .../__pycache__/__init__.cpython-39.pyc | Bin 144 -> 139 bytes .../__pycache__/exporter.cpython-39.pyc | Bin 0 -> 39034 bytes .../engine/__pycache__/model.cpython-312.pyc | Bin 25138 -> 45491 bytes .../engine/__pycache__/model.cpython-39.pyc | Bin 17216 -> 35770 bytes .../__pycache__/predictor.cpython-312.pyc | Bin 24088 -> 24360 bytes .../__pycache__/predictor.cpython-39.pyc | Bin 13844 -> 14179 bytes .../__pycache__/results.cpython-312.pyc | Bin 35121 -> 43421 bytes .../engine/__pycache__/results.cpython-39.pyc | Bin 24466 -> 30534 bytes .../__pycache__/trainer.cpython-312.pyc | Bin 45171 -> 48117 bytes .../engine/__pycache__/trainer.cpython-39.pyc | Bin 24813 -> 26170 bytes .../__pycache__/validator.cpython-312.pyc | Bin 20226 -> 21120 bytes .../__pycache__/validator.cpython-39.pyc | Bin 13079 -> 13396 bytes ultralytics/engine/exporter.py | 783 ++++++++++------- ultralytics/engine/model.py | 791 ++++++++++++----- ultralytics/engine/predictor.py | 357 ++++---- ultralytics/engine/results.py | 452 ++++++---- ultralytics/engine/trainer.py | 515 ++++++----- ultralytics/engine/tuner.py | 209 +++-- ultralytics/engine/validator.py | 86 +- ultralytics/hub/__init__.py | 94 +- .../hub/__pycache__/__init__.cpython-312.pyc | Bin 5396 -> 6856 bytes .../hub/__pycache__/__init__.cpython-39.pyc | Bin 4056 -> 4983 bytes .../hub/__pycache__/auth.cpython-312.pyc | Bin 5377 -> 6061 bytes .../hub/__pycache__/auth.cpython-39.pyc | Bin 3627 -> 4297 bytes .../hub/__pycache__/utils.cpython-312.pyc | Bin 11256 -> 11407 bytes .../hub/__pycache__/utils.cpython-39.pyc | Bin 8346 -> 8464 bytes ultralytics/hub/auth.py | 75 +- ultralytics/hub/session.py | 423 ++++++--- ultralytics/hub/utils.py | 121 +-- ultralytics/models/__init__.py | 5 +- .../__pycache__/__init__.cpython-312.pyc | Bin 295 -> 366 bytes .../__pycache__/__init__.cpython-39.pyc | Bin 277 -> 341 bytes ultralytics/models/fastsam/__init__.py | 2 +- .../__pycache__/__init__.cpython-312.pyc | Bin 381 -> 382 bytes .../__pycache__/__init__.cpython-39.pyc | Bin 362 -> 357 bytes .../fastsam/__pycache__/model.cpython-312.pyc | Bin 1592 -> 1688 bytes .../fastsam/__pycache__/model.cpython-39.pyc | Bin 1356 -> 1444 bytes .../__pycache__/predict.cpython-312.pyc | Bin 4182 -> 5754 bytes .../__pycache__/predict.cpython-39.pyc | Bin 2316 -> 3937 bytes .../__pycache__/prompt.cpython-312.pyc | Bin 20037 -> 22669 bytes .../fastsam/__pycache__/prompt.cpython-39.pyc | Bin 10335 -> 13138 bytes .../fastsam/__pycache__/utils.cpython-312.pyc | Bin 2724 -> 2725 bytes .../fastsam/__pycache__/utils.cpython-39.pyc | Bin 1910 -> 1905 bytes .../fastsam/__pycache__/val.cpython-312.pyc | Bin 1138 -> 2434 bytes .../fastsam/__pycache__/val.cpython-39.pyc | Bin 890 -> 2175 bytes ultralytics/models/fastsam/model.py | 16 +- ultralytics/models/fastsam/predict.py | 51 +- ultralytics/models/fastsam/prompt.py | 154 ++-- ultralytics/models/fastsam/utils.py | 10 +- ultralytics/models/fastsam/val.py | 30 +- ultralytics/models/nas/__init__.py | 2 +- .../nas/__pycache__/__init__.cpython-312.pyc | Bin 315 -> 316 bytes .../nas/__pycache__/__init__.cpython-39.pyc | Bin 297 -> 292 bytes .../nas/__pycache__/model.cpython-312.pyc | Bin 3354 -> 4318 bytes .../nas/__pycache__/model.cpython-39.pyc | Bin 2391 -> 3344 bytes .../nas/__pycache__/predict.cpython-312.pyc | Bin 2202 -> 3083 bytes .../nas/__pycache__/predict.cpython-39.pyc | Bin 1317 -> 2191 bytes .../nas/__pycache__/val.cpython-312.pyc | Bin 1412 -> 2365 bytes .../models/nas/__pycache__/val.cpython-39.pyc | Bin 950 -> 1895 bytes ultralytics/models/nas/model.py | 42 +- ultralytics/models/nas/predict.py | 37 +- ultralytics/models/nas/val.py | 44 +- ultralytics/models/rtdetr/__init__.py | 2 +- .../__pycache__/__init__.cpython-312.pyc | Bin 327 -> 328 bytes .../__pycache__/__init__.cpython-39.pyc | Bin 309 -> 304 bytes .../rtdetr/__pycache__/model.cpython-312.pyc | Bin 1482 -> 2513 bytes .../rtdetr/__pycache__/model.cpython-39.pyc | Bin 1255 -> 2339 bytes .../__pycache__/predict.cpython-312.pyc | Bin 3520 -> 4760 bytes .../rtdetr/__pycache__/predict.cpython-39.pyc | Bin 2555 -> 3735 bytes .../rtdetr/__pycache__/train.cpython-312.pyc | Bin 3896 -> 4899 bytes .../rtdetr/__pycache__/train.cpython-39.pyc | Bin 2996 -> 3985 bytes .../rtdetr/__pycache__/val.cpython-312.pyc | Bin 7848 -> 7135 bytes .../rtdetr/__pycache__/val.cpython-39.pyc | Bin 4774 -> 5168 bytes ultralytics/models/rtdetr/model.py | 48 +- ultralytics/models/rtdetr/predict.py | 38 +- ultralytics/models/rtdetr/train.py | 69 +- ultralytics/models/rtdetr/val.py | 118 ++- ultralytics/models/sam/__init__.py | 4 +- .../sam/__pycache__/__init__.cpython-312.pyc | Bin 268 -> 267 bytes .../sam/__pycache__/__init__.cpython-39.pyc | Bin 249 -> 244 bytes .../sam/__pycache__/amg.cpython-312.pyc | Bin 12079 -> 12232 bytes .../models/sam/__pycache__/amg.cpython-39.pyc | Bin 7508 -> 7654 bytes .../sam/__pycache__/build.cpython-312.pyc | Bin 4526 -> 4561 bytes .../sam/__pycache__/build.cpython-39.pyc | Bin 3679 -> 3687 bytes .../sam/__pycache__/model.cpython-312.pyc | Bin 2806 -> 5727 bytes .../sam/__pycache__/model.cpython-39.pyc | Bin 2309 -> 5214 bytes .../sam/__pycache__/predict.cpython-312.pyc | Bin 21096 -> 27492 bytes .../sam/__pycache__/predict.cpython-39.pyc | Bin 13763 -> 20134 bytes ultralytics/models/sam/amg.py | 51 +- ultralytics/models/sam/build.py | 88 +- ultralytics/models/sam/model.py | 91 +- .../__pycache__/__init__.cpython-312.pyc | Bin 164 -> 165 bytes .../__pycache__/__init__.cpython-39.pyc | Bin 156 -> 151 bytes .../__pycache__/decoders.cpython-312.pyc | Bin 8694 -> 10109 bytes .../__pycache__/decoders.cpython-39.pyc | Bin 5718 -> 7135 bytes .../__pycache__/encoders.cpython-312.pyc | Bin 30517 -> 33059 bytes .../__pycache__/encoders.cpython-39.pyc | Bin 19494 -> 22027 bytes .../modules/__pycache__/sam.cpython-312.pyc | Bin 2363 -> 3369 bytes .../modules/__pycache__/sam.cpython-39.pyc | Bin 1874 -> 2869 bytes .../__pycache__/tiny_encoder.cpython-312.pyc | Bin 29490 -> 38129 bytes .../__pycache__/tiny_encoder.cpython-39.pyc | Bin 17053 -> 25663 bytes .../__pycache__/transformer.cpython-312.pyc | Bin 10344 -> 13066 bytes .../__pycache__/transformer.cpython-39.pyc | Bin 6803 -> 9471 bytes ultralytics/models/sam/modules/decoders.py | 45 +- ultralytics/models/sam/modules/encoders.py | 157 ++-- ultralytics/models/sam/modules/sam.py | 38 +- .../models/sam/modules/tiny_encoder.py | 402 ++++++--- ultralytics/models/sam/modules/transformer.py | 60 +- ultralytics/models/sam/predict.py | 356 ++++---- ultralytics/models/utils/loss.py | 260 +++--- ultralytics/models/utils/ops.py | 108 +-- ultralytics/models/yolo/__init__.py | 6 +- .../yolo/__pycache__/__init__.cpython-312.pyc | Bin 357 -> 402 bytes .../yolo/__pycache__/__init__.cpython-39.pyc | Bin 336 -> 375 bytes .../yolo/__pycache__/model.cpython-312.pyc | Bin 2087 -> 5373 bytes .../yolo/__pycache__/model.cpython-39.pyc | Bin 1300 -> 3457 bytes ultralytics/models/yolo/classify/__init__.py | 2 +- .../__pycache__/__init__.cpython-312.pyc | Bin 465 -> 466 bytes .../__pycache__/__init__.cpython-39.pyc | Bin 446 -> 441 bytes .../__pycache__/predict.cpython-312.pyc | Bin 3138 -> 4128 bytes .../__pycache__/predict.cpython-39.pyc | Bin 2389 -> 3021 bytes .../__pycache__/train.cpython-312.pyc | Bin 10136 -> 10300 bytes .../classify/__pycache__/train.cpython-39.pyc | Bin 6840 -> 6964 bytes .../classify/__pycache__/val.cpython-312.pyc | Bin 7863 -> 7972 bytes .../classify/__pycache__/val.cpython-39.pyc | Bin 5344 -> 5443 bytes ultralytics/models/yolo/classify/predict.py | 16 +- ultralytics/models/yolo/classify/train.py | 76 +- ultralytics/models/yolo/classify/val.py | 51 +- ultralytics/models/yolo/detect/__init__.py | 2 +- .../__pycache__/__init__.cpython-312.pyc | Bin 348 -> 349 bytes .../__pycache__/__init__.cpython-39.pyc | Bin 330 -> 325 bytes .../__pycache__/predict.cpython-312.pyc | Bin 2353 -> 2354 bytes .../detect/__pycache__/predict.cpython-39.pyc | Bin 1628 -> 1625 bytes .../detect/__pycache__/train.cpython-312.pyc | Bin 7917 -> 9081 bytes .../detect/__pycache__/train.cpython-39.pyc | Bin 6064 -> 6703 bytes .../detect/__pycache__/val.cpython-312.pyc | Bin 20139 -> 20856 bytes .../detect/__pycache__/val.cpython-39.pyc | Bin 11891 -> 12444 bytes ultralytics/models/yolo/detect/predict.py | 14 +- ultralytics/models/yolo/detect/train.py | 87 +- ultralytics/models/yolo/detect/val.py | 223 ++--- ultralytics/models/yolo/model.py | 127 ++- ultralytics/models/yolo/obb/__init__.py | 7 + .../obb/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 328 bytes .../obb/__pycache__/__init__.cpython-39.pyc | Bin 0 -> 304 bytes .../obb/__pycache__/predict.cpython-312.pyc | Bin 0 -> 3250 bytes .../obb/__pycache__/predict.cpython-39.pyc | Bin 0 -> 2178 bytes .../obb/__pycache__/train.cpython-312.pyc | Bin 0 -> 2350 bytes .../yolo/obb/__pycache__/train.cpython-39.pyc | Bin 0 -> 1879 bytes .../yolo/obb/__pycache__/val.cpython-312.pyc | Bin 0 -> 13076 bytes .../yolo/obb/__pycache__/val.cpython-39.pyc | Bin 0 -> 8281 bytes ultralytics/models/yolo/obb/predict.py | 53 ++ ultralytics/models/yolo/obb/train.py | 42 + ultralytics/models/yolo/obb/val.py | 185 ++++ ultralytics/models/yolo/pose/__init__.py | 2 +- .../pose/__pycache__/__init__.cpython-312.pyc | Bin 331 -> 332 bytes .../pose/__pycache__/__init__.cpython-39.pyc | Bin 313 -> 308 bytes .../pose/__pycache__/predict.cpython-312.pyc | Bin 3791 -> 3883 bytes .../pose/__pycache__/predict.cpython-39.pyc | Bin 2358 -> 2453 bytes .../pose/__pycache__/train.cpython-312.pyc | Bin 4322 -> 4372 bytes .../pose/__pycache__/train.cpython-39.pyc | Bin 3261 -> 3286 bytes .../yolo/pose/__pycache__/val.cpython-312.pyc | Bin 14574 -> 14800 bytes .../yolo/pose/__pycache__/val.cpython-39.pyc | Bin 9060 -> 9491 bytes ultralytics/models/yolo/pose/predict.py | 30 +- ultralytics/models/yolo/pose/train.py | 50 +- ultralytics/models/yolo/pose/val.py | 203 +++-- ultralytics/models/yolo/segment/__init__.py | 2 +- .../__pycache__/__init__.cpython-312.pyc | Bin 358 -> 359 bytes .../__pycache__/__init__.cpython-39.pyc | Bin 340 -> 335 bytes .../__pycache__/predict.cpython-312.pyc | Bin 3694 -> 3886 bytes .../__pycache__/predict.cpython-39.pyc | Bin 2296 -> 2479 bytes .../segment/__pycache__/train.cpython-312.pyc | Bin 3434 -> 3489 bytes .../segment/__pycache__/train.cpython-39.pyc | Bin 2735 -> 2761 bytes .../segment/__pycache__/val.cpython-312.pyc | Bin 16272 -> 16724 bytes .../segment/__pycache__/val.cpython-39.pyc | Bin 9643 -> 10204 bytes ultralytics/models/yolo/segment/predict.py | 22 +- ultralytics/models/yolo/segment/train.py | 28 +- ultralytics/models/yolo/segment/val.py | 210 +++-- ultralytics/models/yolov10/__init__.py | 5 + .../__pycache__/__init__.cpython-312.pyc | Bin 0 -> 348 bytes .../__pycache__/__init__.cpython-39.pyc | Bin 0 -> 326 bytes .../yolov10/__pycache__/card.cpython-312.pyc | Bin 0 -> 1735 bytes .../yolov10/__pycache__/card.cpython-39.pyc | Bin 0 -> 1676 bytes .../yolov10/__pycache__/model.cpython-312.pyc | Bin 0 -> 2046 bytes .../yolov10/__pycache__/model.cpython-39.pyc | Bin 0 -> 1610 bytes .../__pycache__/predict.cpython-312.pyc | Bin 0 -> 2878 bytes .../__pycache__/predict.cpython-39.pyc | Bin 0 -> 1683 bytes .../yolov10/__pycache__/train.cpython-312.pyc | Bin 0 -> 1548 bytes .../yolov10/__pycache__/train.cpython-39.pyc | Bin 0 -> 1228 bytes .../yolov10/__pycache__/val.cpython-312.pyc | Bin 0 -> 1837 bytes .../yolov10/__pycache__/val.cpython-39.pyc | Bin 0 -> 1191 bytes ultralytics/models/yolov10/card.py | 64 ++ ultralytics/models/yolov10/model.py | 36 + ultralytics/models/yolov10/predict.py | 38 + ultralytics/models/yolov10/train.py | 20 + ultralytics/models/yolov10/val.py | 24 + ultralytics/nn/__init__.py | 32 +- .../nn/__pycache__/__init__.cpython-312.pyc | Bin 561 -> 562 bytes .../nn/__pycache__/__init__.cpython-39.pyc | Bin 528 -> 523 bytes .../__pycache__/autobackend.cpython-312.pyc | Bin 33551 -> 35820 bytes .../nn/__pycache__/autobackend.cpython-39.pyc | Bin 18662 -> 20312 bytes .../nn/__pycache__/tasks.cpython-312.pyc | Bin 50012 -> 61116 bytes .../nn/__pycache__/tasks.cpython-39.pyc | Bin 32611 -> 40317 bytes ultralytics/nn/autobackend.py | 527 ++++++----- ultralytics/nn/modules/__init__.py | 162 +++- .../__pycache__/__init__.cpython-312.pyc | Bin 1716 -> 2292 bytes .../__pycache__/__init__.cpython-39.pyc | Bin 1632 -> 2182 bytes .../modules/__pycache__/block.cpython-312.pyc | Bin 24907 -> 60949 bytes .../modules/__pycache__/block.cpython-39.pyc | Bin 15800 -> 37545 bytes .../modules/__pycache__/conv.cpython-312.pyc | Bin 21224 -> 22432 bytes .../modules/__pycache__/conv.cpython-39.pyc | Bin 13392 -> 14598 bytes .../modules/__pycache__/head.cpython-312.pyc | Bin 24760 -> 38289 bytes .../modules/__pycache__/head.cpython-39.pyc | Bin 13303 -> 21596 bytes .../__pycache__/transformer.cpython-312.pyc | Bin 26932 -> 28571 bytes .../__pycache__/transformer.cpython-39.pyc | Bin 15331 -> 16991 bytes .../modules/__pycache__/utils.cpython-312.pyc | Bin 4253 -> 4412 bytes .../modules/__pycache__/utils.cpython-39.pyc | Bin 2753 -> 2913 bytes ultralytics/nn/modules/block.py | 567 +++++++++++- ultralytics/nn/modules/conv.py | 107 ++- ultralytics/nn/modules/head.py | 369 +++++--- ultralytics/nn/modules/transformer.py | 157 ++-- ultralytics/nn/modules/utils.py | 49 +- ultralytics/nn/tasks.py | 630 ++++++++++---- ultralytics/solutions/__init__.py | 1 + ultralytics/solutions/ai_gym.py | 150 ++++ ultralytics/solutions/distance_calculation.py | 181 ++++ ultralytics/solutions/heatmap.py | 281 ++++++ ultralytics/solutions/object_counter.py | 278 ++++++ ultralytics/solutions/speed_estimation.py | 198 +++++ ultralytics/trackers/README.md | 331 +++++-- ultralytics/trackers/__init__.py | 2 +- .../__pycache__/__init__.cpython-39.pyc | Bin 312 -> 0 bytes .../__pycache__/basetrack.cpython-39.pyc | Bin 2415 -> 0 bytes .../__pycache__/bot_sort.cpython-39.pyc | Bin 6022 -> 0 bytes .../__pycache__/byte_tracker.cpython-39.pyc | Bin 13250 -> 0 bytes .../trackers/__pycache__/track.cpython-39.pyc | Bin 2336 -> 0 bytes ultralytics/trackers/basetrack.py | 72 +- ultralytics/trackers/bot_sort.py | 70 +- ultralytics/trackers/byte_tracker.py | 205 +++-- ultralytics/trackers/track.py | 59 +- .../utils/__pycache__/__init__.cpython-39.pyc | Bin 152 -> 0 bytes .../utils/__pycache__/gmc.cpython-39.pyc | Bin 5409 -> 0 bytes .../__pycache__/kalman_filter.cpython-39.pyc | Bin 11415 -> 0 bytes .../utils/__pycache__/matching.cpython-39.pyc | Bin 4763 -> 0 bytes ultralytics/trackers/utils/gmc.py | 196 +++-- ultralytics/trackers/utils/kalman_filter.py | 356 ++++---- ultralytics/trackers/utils/matching.py | 40 +- ultralytics/utils/__init__.py | 579 ++++++++----- .../__pycache__/__init__.cpython-312.pyc | Bin 42706 -> 50371 bytes .../utils/__pycache__/__init__.cpython-39.pyc | Bin 31143 -> 36687 bytes .../__pycache__/autobatch.cpython-312.pyc | Bin 5329 -> 5324 bytes .../__pycache__/autobatch.cpython-39.pyc | Bin 3646 -> 3639 bytes .../utils/__pycache__/checks.cpython-312.pyc | Bin 29400 -> 36740 bytes .../utils/__pycache__/checks.cpython-39.pyc | Bin 20151 -> 25043 bytes .../utils/__pycache__/dist.cpython-312.pyc | Bin 3904 -> 3501 bytes .../utils/__pycache__/dist.cpython-39.pyc | Bin 2563 -> 2436 bytes .../__pycache__/downloads.cpython-312.pyc | Bin 22772 -> 26967 bytes .../__pycache__/downloads.cpython-39.pyc | Bin 16116 -> 20092 bytes .../utils/__pycache__/files.cpython-312.pyc | Bin 7698 -> 9245 bytes .../utils/__pycache__/files.cpython-39.pyc | Bin 5212 -> 6426 bytes .../__pycache__/instance.cpython-312.pyc | Bin 19828 -> 21531 bytes .../utils/__pycache__/instance.cpython-39.pyc | Bin 13061 -> 14952 bytes .../utils/__pycache__/loss.cpython-312.pyc | Bin 27915 -> 44134 bytes .../utils/__pycache__/loss.cpython-39.pyc | Bin 14355 -> 24934 bytes .../utils/__pycache__/metrics.cpython-312.pyc | Bin 55495 -> 71188 bytes .../utils/__pycache__/metrics.cpython-39.pyc | Bin 39022 -> 49238 bytes .../utils/__pycache__/ops.cpython-312.pyc | Bin 39766 -> 44038 bytes .../utils/__pycache__/ops.cpython-39.pyc | Bin 27630 -> 30100 bytes .../utils/__pycache__/patches.cpython-312.pyc | Bin 2878 -> 3469 bytes .../utils/__pycache__/patches.cpython-39.pyc | Bin 2209 -> 2628 bytes .../__pycache__/plotting.cpython-312.pyc | Bin 39246 -> 61485 bytes .../utils/__pycache__/plotting.cpython-39.pyc | Bin 22485 -> 36606 bytes .../utils/__pycache__/tal.cpython-312.pyc | Bin 17017 -> 20417 bytes .../utils/__pycache__/tal.cpython-39.pyc | Bin 10750 -> 13113 bytes .../__pycache__/torch_utils.cpython-312.pyc | Bin 36054 -> 37739 bytes .../__pycache__/torch_utils.cpython-39.pyc | Bin 21088 -> 22731 bytes ultralytics/utils/autobatch.py | 26 +- ultralytics/utils/benchmarks.py | 245 +++--- ultralytics/utils/callbacks/__init__.py | 2 +- .../__pycache__/__init__.cpython-312.pyc | Bin 302 -> 303 bytes .../__pycache__/__init__.cpython-39.pyc | Bin 282 -> 277 bytes .../__pycache__/base.cpython-312.pyc | Bin 6459 -> 6307 bytes .../callbacks/__pycache__/base.cpython-39.pyc | Bin 5723 -> 5667 bytes .../callbacks/__pycache__/hub.cpython-39.pyc | Bin 2945 -> 2932 bytes ultralytics/utils/callbacks/base.py | 68 +- ultralytics/utils/callbacks/clearml.py | 94 +- ultralytics/utils/callbacks/comet.py | 183 ++-- ultralytics/utils/callbacks/dvc.py | 81 +- ultralytics/utils/callbacks/hub.py | 71 +- ultralytics/utils/callbacks/mlflow.py | 151 +++- ultralytics/utils/callbacks/neptune.py | 65 +- ultralytics/utils/callbacks/raytune.py | 13 +- ultralytics/utils/callbacks/tensorboard.py | 81 +- ultralytics/utils/callbacks/wb.py | 124 ++- ultralytics/utils/checks.py | 461 +++++++--- ultralytics/utils/dist.py | 46 +- ultralytics/utils/downloads.py | 322 ++++--- ultralytics/utils/errors.py | 14 +- ultralytics/utils/files.py | 73 +- ultralytics/utils/instance.py | 109 ++- ultralytics/utils/loss.py | 533 +++++++++--- ultralytics/utils/metrics.py | 723 +++++++++++----- ultralytics/utils/ops.py | 337 +++++--- ultralytics/utils/patches.py | 43 +- ultralytics/utils/plotting.py | 709 ++++++++++++--- ultralytics/utils/tal.py | 210 +++-- ultralytics/utils/torch_utils.py | 364 +++++--- ultralytics/utils/triton.py | 92 ++ ultralytics/utils/tuner.py | 117 +-- ultralytics/yolo/__init__.py | 5 - .../yolo/__pycache__/__init__.cpython-39.pyc | Bin 184 -> 0 bytes ultralytics/yolo/cfg/__init__.py | 10 - ultralytics/yolo/data/__init__.py | 17 - ultralytics/yolo/engine/__init__.py | 10 - .../__pycache__/__init__.cpython-39.pyc | Bin 489 -> 0 bytes ultralytics/yolo/utils/__init__.py | 15 - .../utils/__pycache__/__init__.cpython-39.pyc | Bin 755 -> 0 bytes ultralytics/yolo/v8/__init__.py | 10 - .../v8/__pycache__/__init__.cpython-39.pyc | Bin 487 -> 0 bytes utils/__pycache__/dataloaders.cpython-39.pyc | Bin 43426 -> 43426 bytes utils/__pycache__/getsource.cpython-39.pyc | Bin 1740 -> 1740 bytes utils/dataloaders.py | 18 +- 471 files changed, 19109 insertions(+), 7342 deletions(-) create mode 100644 __pycache__/pipeline_01.cpython-39.pyc create mode 100644 execute_pipeline.py create mode 100644 imgs_to_video.py create mode 100644 pipeline_01.py create mode 100644 tracking/tracking_pipeline.py rename ultralytics/cfg/datasets/{DOTAv2.yaml => DOTAv1.5.yaml} (56%) create mode 100644 ultralytics/cfg/datasets/DOTAv1.yaml create mode 100644 ultralytics/cfg/datasets/african-wildlife.yaml create mode 100644 ultralytics/cfg/datasets/brain-tumor.yaml create mode 100644 ultralytics/cfg/datasets/carparts-seg.yaml create mode 100644 ultralytics/cfg/datasets/crack-seg.yaml create mode 100644 ultralytics/cfg/datasets/dota8.yaml create mode 100644 ultralytics/cfg/datasets/package-seg.yaml create mode 100644 ultralytics/cfg/datasets/tiger-pose.yaml create mode 100644 ultralytics/cfg/models/rt-detr/rtdetr-resnet101.yaml create mode 100644 ultralytics/cfg/models/rt-detr/rtdetr-resnet50.yaml create mode 100644 ultralytics/cfg/models/v10/yolov10b.yaml create mode 100644 ultralytics/cfg/models/v10/yolov10l.yaml create mode 100644 ultralytics/cfg/models/v10/yolov10m.yaml create mode 100644 ultralytics/cfg/models/v10/yolov10n.yaml create mode 100644 ultralytics/cfg/models/v10/yolov10s.yaml create mode 100644 ultralytics/cfg/models/v10/yolov10x.yaml create mode 100644 ultralytics/cfg/models/v8/yolov8-cls-resnet101.yaml create mode 100644 ultralytics/cfg/models/v8/yolov8-cls-resnet50.yaml create mode 100644 ultralytics/cfg/models/v8/yolov8-ghost-p2.yaml create mode 100644 ultralytics/cfg/models/v8/yolov8-ghost-p6.yaml create mode 100644 ultralytics/cfg/models/v8/yolov8-ghost.yaml create mode 100644 ultralytics/cfg/models/v8/yolov8-obb.yaml create mode 100644 ultralytics/cfg/models/v8/yolov8-world.yaml create mode 100644 ultralytics/cfg/models/v8/yolov8-worldv2.yaml create mode 100644 ultralytics/cfg/models/v9/yolov9c.yaml create mode 100644 ultralytics/cfg/models/v9/yolov9e.yaml create mode 100644 ultralytics/data/explorer/__init__.py create mode 100644 ultralytics/data/explorer/__pycache__/__init__.cpython-312.pyc create mode 100644 ultralytics/data/explorer/__pycache__/__init__.cpython-39.pyc create mode 100644 ultralytics/data/explorer/__pycache__/explorer.cpython-312.pyc create mode 100644 ultralytics/data/explorer/__pycache__/explorer.cpython-39.pyc create mode 100644 ultralytics/data/explorer/__pycache__/utils.cpython-312.pyc create mode 100644 ultralytics/data/explorer/__pycache__/utils.cpython-39.pyc create mode 100644 ultralytics/data/explorer/explorer.py create mode 100644 ultralytics/data/explorer/gui/__init__.py create mode 100644 ultralytics/data/explorer/gui/dash.py create mode 100644 ultralytics/data/explorer/utils.py create mode 100644 ultralytics/data/split_dota.py create mode 100644 ultralytics/engine/__pycache__/exporter.cpython-39.pyc create mode 100644 ultralytics/models/yolo/obb/__init__.py create mode 100644 ultralytics/models/yolo/obb/__pycache__/__init__.cpython-312.pyc create mode 100644 ultralytics/models/yolo/obb/__pycache__/__init__.cpython-39.pyc create mode 100644 ultralytics/models/yolo/obb/__pycache__/predict.cpython-312.pyc create mode 100644 ultralytics/models/yolo/obb/__pycache__/predict.cpython-39.pyc create mode 100644 ultralytics/models/yolo/obb/__pycache__/train.cpython-312.pyc create mode 100644 ultralytics/models/yolo/obb/__pycache__/train.cpython-39.pyc create mode 100644 ultralytics/models/yolo/obb/__pycache__/val.cpython-312.pyc create mode 100644 ultralytics/models/yolo/obb/__pycache__/val.cpython-39.pyc create mode 100644 ultralytics/models/yolo/obb/predict.py create mode 100644 ultralytics/models/yolo/obb/train.py create mode 100644 ultralytics/models/yolo/obb/val.py create mode 100644 ultralytics/models/yolov10/__init__.py create mode 100644 ultralytics/models/yolov10/__pycache__/__init__.cpython-312.pyc create mode 100644 ultralytics/models/yolov10/__pycache__/__init__.cpython-39.pyc create mode 100644 ultralytics/models/yolov10/__pycache__/card.cpython-312.pyc create mode 100644 ultralytics/models/yolov10/__pycache__/card.cpython-39.pyc create mode 100644 ultralytics/models/yolov10/__pycache__/model.cpython-312.pyc create mode 100644 ultralytics/models/yolov10/__pycache__/model.cpython-39.pyc create mode 100644 ultralytics/models/yolov10/__pycache__/predict.cpython-312.pyc create mode 100644 ultralytics/models/yolov10/__pycache__/predict.cpython-39.pyc create mode 100644 ultralytics/models/yolov10/__pycache__/train.cpython-312.pyc create mode 100644 ultralytics/models/yolov10/__pycache__/train.cpython-39.pyc create mode 100644 ultralytics/models/yolov10/__pycache__/val.cpython-312.pyc create mode 100644 ultralytics/models/yolov10/__pycache__/val.cpython-39.pyc create mode 100644 ultralytics/models/yolov10/card.py create mode 100644 ultralytics/models/yolov10/model.py create mode 100644 ultralytics/models/yolov10/predict.py create mode 100644 ultralytics/models/yolov10/train.py create mode 100644 ultralytics/models/yolov10/val.py create mode 100644 ultralytics/solutions/__init__.py create mode 100644 ultralytics/solutions/ai_gym.py create mode 100644 ultralytics/solutions/distance_calculation.py create mode 100644 ultralytics/solutions/heatmap.py create mode 100644 ultralytics/solutions/object_counter.py create mode 100644 ultralytics/solutions/speed_estimation.py delete mode 100644 ultralytics/trackers/__pycache__/__init__.cpython-39.pyc delete mode 100644 ultralytics/trackers/__pycache__/basetrack.cpython-39.pyc delete mode 100644 ultralytics/trackers/__pycache__/bot_sort.cpython-39.pyc delete mode 100644 ultralytics/trackers/__pycache__/byte_tracker.cpython-39.pyc delete mode 100644 ultralytics/trackers/__pycache__/track.cpython-39.pyc delete mode 100644 ultralytics/trackers/utils/__pycache__/__init__.cpython-39.pyc delete mode 100644 ultralytics/trackers/utils/__pycache__/gmc.cpython-39.pyc delete mode 100644 ultralytics/trackers/utils/__pycache__/kalman_filter.cpython-39.pyc delete mode 100644 ultralytics/trackers/utils/__pycache__/matching.cpython-39.pyc create mode 100644 ultralytics/utils/triton.py delete mode 100644 ultralytics/yolo/__init__.py delete mode 100644 ultralytics/yolo/__pycache__/__init__.cpython-39.pyc delete mode 100644 ultralytics/yolo/cfg/__init__.py delete mode 100644 ultralytics/yolo/data/__init__.py delete mode 100644 ultralytics/yolo/engine/__init__.py delete mode 100644 ultralytics/yolo/engine/__pycache__/__init__.cpython-39.pyc delete mode 100644 ultralytics/yolo/utils/__init__.py delete mode 100644 ultralytics/yolo/utils/__pycache__/__init__.cpython-39.pyc delete mode 100644 ultralytics/yolo/v8/__init__.py delete mode 100644 ultralytics/yolo/v8/__pycache__/__init__.cpython-39.pyc diff --git a/__pycache__/pipeline_01.cpython-39.pyc b/__pycache__/pipeline_01.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..595214a187c2e69e8ec8490ed693e83c083e5a57 GIT binary patch literal 8330 zcmbVRYiu0Xb)GwqeatSG%jH|o)mv*@6iG{wt+J|W%dyi3omzs^Or;K%dxzAL`{JD; zWfFJMq?GHVwJfDc93!%wCF;1AQO8D6xU%X;(I5R$^jCh)ufi2&2`JE_b&EnM;(q7O zl3K|QjCKZd=H4^+ea}7TJ7a9DvdK-?R-lk*WZB$~_xD#ho)~qC|Nhhh&R>iKSoK!XKq*Xpv>8fU&j7rBV-PNp< zRp~^fre-!-RS(!{@b+gCL7 zqo33Y`<0xs>|Lr?>aOtonisf%D3<0t5e^V@acuM+KI)};{SlNo zMFoB2yLkPpqFxKy3_a{_RO*2{Es9m|YbV^L@vxtZ;yJfisZi^)UMZ-Hvgf1c?iuu0 zt`=uJw^1w$KVmGFxmS-E)Ey?uUB6r{SBj!Uljyv3WRBzU`#?Mm0-!~kj|DZjfyFYz z^rIMwC_c96)dIJ~P;mXCG_~+39)B+gXHs(`%4W1_%IWR5!FA+JuAxn1@(K&JD{S(L z71|}$`l+Rc7bexvX?;UmO^cF#*h6dilv-c4NLbzkR-!aluFaq!)AuUVS2Qt<0o=}? zIC}bo7hsW8QIt5{C^x)Hx#qc}VCoG0LfqS{XbFL%nB`2?# zRIP@Qx^8H@Zne%ADwIyU?)*Zr5`7Vy(V`kg2iZitE|3B0j#ewdRQ~_97nzf1>x~9x z^<}#;f{r?OL#1`K2L{)oodC6FC>?9>is(iykpa09cak1$IPE|?If?0q@hM1AoqKJb z+P2g*$#1lBp*@9-X{W7v%^R-?TIhRMgqCVM9-n&C)CSNd(7il(_ayIZgV=(y9WBw0 z3$?&`q_*K%Utr3YERgl92Z=NuC93a0a3u@07L%m$*t1xeX?<8*Wy4yN1$tmewu1YF zc8Q(&!zQ#}GZvVVNsAj)ZiTgTs@6u%jJ_E-6U}%t(M+PeHD_L8;%K{kY)HGO1>)S@ z3UNAHvYV-9TBa^*JTB9hwO0&jwyafc5n~faWkT_jpW~aIH886#X?B(cC&Ob{ff!E& zaoI(yKvrLgQnEil*E{L`jy0Ht(lcE z&K4NP`|2+(*@ZHh%O+~rR3k{Vk}@m1S9PY%@8t|iqkEx={j>A9);{?D+IKJHr|Qq- zKl$!^Yaf1l?N6V-`Swq5e(=W4kKV(V>sP;Z{py9!U0nOlkFH;RtFz?Ik1pR_eg4MVKe+kfit2arRvbgP zWfDn%2vUHzh=k0Y!Ao6EtW@`*Z$i@z%KSDvaN8Vd)Mk){7QStaj87NrP_H$@cu~yw zbrFP?DAs1Y(5QH|&?wh}(46y@{4icEK2xqPRE2?dL!-1f9>&YnbD|u0g&s=I@Nx;Y zu@0UeT7H8Rr&S6_l_sWP-_&PTc1%+Lp@Gqa#x#+sG8gxuQO87f)G?7Cbu6mVevM%q z)fv?Xwc2s!=(vSxzqshR{=yUr`P7h4hfgy%u^cnmA>C%j*(lOE_K==qd1kZyx;TLn zcb!Q(j~i$5ACU>P04Fj!d1tOQtw2|2j$&G3 z(l`V+UVLxu{fjq$_RiV|Ka38?hUBY)Vjo5&$XJX0L}>TK{X`xhasXsfko6D;K?*T3 zPU#1U93pa<$Ppp{mRcb#9;WnBBELdp0whe3z>)NcW5oM4BFBmRI!I_hy2T@uC2f4K zY-KuS%N?U~eilTtvXHC{taP6Bu?KYVICv#G8tWI?89J6q$4|i2N%cLt3-u3efGGE{@Ibg-lpTJp~&PgAB!Zycs7s z0dE>SlB*auoAI(nw~|2Ue8^f1ZH_dd2T3NlO?Cx6B~>of&eSwSTiSct)3t^ z+uQ1sI`3K4)$F2m`Bb%>`#-fDpU~z%jWE3IIsx0-?7~iTt*}eFcnB-ekKW_F|AOA^ zjrhD9h+Y@lWba*L?`!s>_W{`(49b4qy%{Z$9leu7s&56lgb(n+RjlU59EMi)<^bvr zNz|qj%)4-YhZl}Rmac#F%Gz5m<=4Sv{^p0@UwiHv4(QE~F0K9X8*P*G)}^(}ACh5t z@7?yvTemh&ysafqUD3mMqbPu>{4n7?1K+`|&nXKMS_(ixXA_QE5`ie7=g=f1MS!a` z2VQ8Bu0>I(3ExrboetUL^7zQXbufI;2RbYjMR1Ua8+g@5aB!+#6x^K-stuP=FqxuT z1VanViIHP*xUrm89mCb2kIVOeNrN7rQFxjP-{lTat7IEb;_-(-2wa}YHVI{E(5^sG z-_z&KlbWFOtj;~07=2_k)XUX-)qp)w!4j3~)y6T>qyK?NTi%Wdo+FoPp%RQtRO+Q- z#Xp9`dP(vIU^D%KaqC+sFMb!~)@MXWPb_D!P?7(%M1wC33mS45Q@eq+t~dJg(xm51Lq|DO5iGsth8hp!_?q` z!91TWHatO|w;&}I#!1gjmuEt~?uVwb-R5k)TnkgBdaZ;$Yj{GF_AJu8O5YBKX@$!p zM-vzUqH%&yKo>Pft+?dxIQ@5J*G;vPRLESRc4HB8WRY?4mC}wB`LELW9;5NGrO;A~ z;8^8KWdSm`zVRVD8?tjw8Uh&&1O7;8G%A5Zn8d}{L-s_-zTuFUHv2{RA=R;uPoXpa zX%NjwL%C796Ng@$W%8;>Zk4#?@wb&?UPAgji6kPht!_tJ=)g1}GQU=(C$vhQJy6%E~N_ z>=N@Ik=f3%VGKPm9zDEg0yXf?;hn|17w;I}eG}v;^zeS&hc(V71*%*WtEs^o_?6O=O{l6H+80r2do?t(KOBxUn-&G{%@MHw*e+Rnl)@QgPsIQ_s>~18AfbM4s!)iqgXsfhDab$$c%>Y`XZCq{1_?wA zr^_lz7zbjhR~9|-HEQDbh@7T&3_lPeMOm^!;`fOMgAXpjC_(Y7Fjj0dyc!P^gbfr* zKnJQ}LB6vJ=?YFI6H`SWpaDTTc=f1ZAx=>BBaC7Z6kH19)0KKL_{t$MO%<#NwkQWr z`F3ZhL0T>+QK}SuzdXGp$XeT`k-fMqxrVF#Dw1bwU?nnW`mD zVDl8%NPQDH<7rR$<$4X?VH>K1Hl-rrapK-$K%*BsX%w?G3KeFEFwI=Wi9kSnPabl?SBqeh&>NkD4i ziL=y4sv{cgSW`4QClxiJgxQHr3-WTE__S~-5wf|EEsAH=;1ziCgpw*J73CuIo>ako z)z5h<8k<)9$ct7U^Kr11vn=M23fc6%@Hnv^)qWM<{SZX6ZNyvpKGXH=FS5G+3*9o= z0PBlD6&un`-DGwQFbQxfVJ6Hp%jg3DQ5oA` zWe*tQHO%fmNi86_Px`?30{`?<_4L}M7uGJmgFwiQ3qL!(P+KlHZV#f!xw@D;9XSa~ z2o!`apt?9mM1tJjG;**s*9iQBQyv_#QpLxwn$d?xzcSJYZtuG58-C*W?zu7#&W=8K z?n{? zlE)^r8nBn*#=)H=x1YEO0wj3?c39zKO_{B?(=@vf=025uimviEzql2wNHGt%mTzwfS0L&RQ)R4FUoP5g@3b!o&FK z#(ORp7xyD%U;{-aA@a zc+~ulKtd~u9u!s{xz{`Osp z%tYpL`!6q~LMzIOI>Q^K7J_mG&<0MaV)-f}pkf0~ck~T^cB7q-paFpdXXu{#>d&si zM+ynhPezd^a)QX?M9BG#B#;(gY$bAMi|_1nVj}vbd5i)g{#QXvi~MT3)3~#`qS(i@ go=IfV_K=-TJlV#ex9vaB#@gu!l@k4Ld)#LK3kxgPpa1{> literal 0 HcmV?d00001 diff --git a/__pycache__/track_reid.cpython-39.pyc b/__pycache__/track_reid.cpython-39.pyc index d9e0026dde176163652ae5fee30e8efe5368f216..4b57508726181feb82d822c5b36a4fddb94d84d3 100644 GIT binary patch delta 6625 zcmbW533Oc5d4S)2@69TWBugHxBUzgzkF||7TD%)<;{|VE8)&97!&vu88cDPGz9)I< zeHmOC(vsN7=O!ej+jfEED4``2yHdMj zyNP1#GpIc#Cw5Vq)%)qif*@2ilrz^w_hzKIw>Tj z#7*34p~?mz^?Jk5c-Atzjv1yEhmo1_6G>`~Mxq0K@$Q~TUuV`5SBk>91q}-C9P(mI zi@%NVE`=Kn~RHv>-blXSW)I+`0_lRculM45ak;{bp;P1ao z>;j0Z@Ykvo>aS8b#&ue7PPGeZ;h<^<>>|s>UDG*2167J$JfheomYWyOdN`Q_iR@5j z^rNpq!4-BXFC6oZ`Rp=YI_4iMm@S-BW&?AIy%OR@b`V%>mjg@e3SK(90&*&?QofQ0 z$I7NvyNVajuH;obIHy8u)piXpqebH?dsDU=%-NxI2_zLm(z`OL+zwe4JUCl9r*OAb z#mlEbH_mBxZBj`ps$$pKt9Z>0<>*faU3R_QU^m)Lb~A6J$|RW~>{GnyMu@6b^=u7Y zVTCXkpZnBd71jbuEX8u5T+`Z|imQPCO2uBy>tB9Acf*q)Mq!?5=z_%S2T-+cc+4DX>PA z*ZryzAgBoMx(M(>E|&3I&0vUSd0soVnz!+8-a}WN(}}TBzMSO;F`v zv+B%RDji$iHqA9arM~nUtLjFV(`2@dmd#1KKry!;EAf6gls+EdedF4Ui`~Tg?0{6* zc6-3;;6+w=taDn0Aw9*5X1nIVMDqcNeUA^!Mj)?yj@av9;u};_L#@}_0gvD3SY<9^9{r5 zoNgDv(E4FrVCx;XcnO>!x#k1!IcGiY+i3hRzOlt~Xeu44&nXw=co z#FXLac(xo_xK~w_k%vEzzprc!Yiui2bzI5RhGnbCw%tP+6Y^OU zsxH7ll!gC(|NZwDJQ%`*sn{e@Io2p5yv%@naD8K3Nf|@ zuuuhniDi4I!UL=YlXn1OUMpda$5V-{qhrYNEsb2BaE{MNPo@l(u#8xVX)z;_iYL>< z27|-Wa6CDhfLeydZo<}G$1*2j_Btjy+E8}VaY5zmDaV^($&olL93&)ChRF_LaXmYc zu~MNp~JgU z4Ut0C4;8T?FI0jmq*M*5<=~A~LmqoxJWv#>4Xf;C=r0fStloL$8~A%;>%tE;@m^7v zTH+C{#m|#F=ie%>Ae!r_cm8K3>vi(7hz6%hc^HfkI8pNLsRC0ZE(h--?}}dp*Ojsn zXwT7yv)%omR>WjsvAaCtV%s2DygvH~To^Uc?#1 zZzHA=XA$=y?nitA@jHm$Mc~($qk|2|I4ZRo!?&^rF!_^+Pa)1B&Lbj-ZQ|9c8uvFa z_>%bds+!ziWAH)548ZY?f{MiPm|*{ciGPCl3k2>Bh8vzOEj;^2jCLao#HSITL7=f< z-$6WtkTy^SWz@kcm~0-?z6Nk~Hy$hOl1Q|b<-UN)4_#8Zf;5pl#b%I&~sAsF*d(d43;V1I;pe~fqz z@nuBc68a(Jl%Z`{W*8a>7>1X{Piw9QqypEGQmt2GZ;P*my2#-Ce}tM{`8td|6yry}j6HD!g{a$9D1p;2QD$)wht}5aE`+BrGOdZgxw> zjEa|9DqG-?_GA-q)>$U|H=N*C5r2yKGsFhO%i?;=*4$r0Fbv0TYMdt7PWD%rgNA|q zE#S1ep+2u$uVB({#H$EtDh@&js_Jl}nbH`5t06@mk?JGQY#${O(9ZK2J!2evq0XSu{W^Fcf{GYSkIC*LpL@vW(-+*?TuiOZHPA!Zy^?~T1op& zo4_9_BR-k^OalsL5$8IzK2+D=)}49XjB@{3z)r3nBADR%-l&!*DHS9{^=Z= zI`)0U4-iYHj{O*;*ASmYd=B7{Ec-l0qyWBvDJq;d4`b2?Ch!tQrIJWbux=&da_1YWRxZwT^^>Og7rF*ja$N*_UZ`w^Y_;F*wc4` zyd%Edx0d`+xcjFHWa-7cPxLqC<888I@m&A?VB|Ip)RO%oHqhI^a0ZhIR366*}@vo>!1BVqgTqO2xIG@9T@7OSi zLWS=kbMRDZ@Y>AzYY#s2_Gg}X``lB5lj+mR>|jr%r!Nxij&yVn^hNu6ItF^XuNOhJ z6B#x>2yXP`1bpt`Iy57soqg>==lu#-(u8w}ZN`Kq zfa6Y@!^sIF>}I_f^?>-KuGC`8g<0U`kLM+^ObFoECe!h$zI#yI&CsvnLMPr^h<@mWQzPjUS30|J&hjqRFJhhAUp1aAGMI{P6;G45T(qY7|VJ z)#enlmlL?u)bL9599(r+F6x3@HL2JdNJXRWO;C^LH0pr>fdtqHRD-u~@r?p^OMz=B za9gJe+zAvMnA`|M_a8fhQ<{5 zX3|M{UL-2W>Bgh7-LjK7eDZ@(<8VD8Z$!d(I-Yo(W`^Q%)`2CoA#)1$lL&084^}}! z{&+NT@Ij1zOKjV;A^0xFD;FeBIoU)QbX8zg%&QyhY zp^aFf1^4E7OL8m&7aek8rmT?Jco74P>UUlk==JH@80 RzJh(;wcbK+i#O=k{|B~pqrdc6H700J%_SRurLYF1O+QOC>bgpFCmMzJLEK8P;U`Q$~axLSXS!t}@*`;Sz zmX#H2BAY+}Tc%AM2oQ4xJ4HC61V{+s3L%LhMWreQnWB7>Z*MJE%gRC$Jr8|+ZtU^9Pn0(>`?+gRu!26Q56rUIc5QL2 zL0b}ArnPF7+7(AwY)FsJN_wSMRS#CzOInRqOTRj8saCHwXv?TBYgf|mDwil#kYu=C zN>*vhwG~Gg*&V*z3Q07Qt*bBDDHIJs`LkcqXtDO+rk80ewNT7;U3hm9A;T!kt+%vf*tm(F%m=7pV z1jV`1$JlO>DQm6^&v^54UOC{clky6!Pq{~)@#Q_Ied5)!Bg`+hl}E(!^6e}t-Yf5{ znW5{Lb5WKImWg zt0GB*3yLKEwX{vX;1f;tFR)GXf2yxxif7+H=KtNW-ou_1oy#Z1E6cBE?~BS6>na_O zrrN6GNoG5`&oYNue6`rOqP^eo8tE}>nll<>;UZ!1tnB#J+*n#SZ6`QsSUELiOzR6Y zABhzGX>}@2PH?=FhNfr4OjS+i1>efCsg+~l1rN!L(d#~rFOjhfQ|IkeUHJ6k#fuB% zOKBlMh=^CJ1~#st(gnrPdM>c=1*J8bykVh|nrIYnB9qE+%uOBB`6_DUO@xIyg5ybN zyCy|XW3*%&s$Nq~>z2qhUd6h^eU0HlFT^*wlH+PB>3EZ=jA}bdN;e&^HLhlLj$z17 zM$@U6lRiewVZpkYOY2;mwtbOLK}7DH~+BWUoFvu+2=+h};PpW>V+6nb3K&Xsp>O?3z25a2zzpLtPn1 z*6hd*aj~ZU=2(eg#VvJGk5iapWuH}C-}4sI;KNW0B{F6*ZjW=_BI}u4vBU^a0y}e&~APrxb^?c^0?@XhoMF zo64fy5TF^wv#LGrgit!j4O{2D1A-5V6SdU?{3giDMw*tGZnqpi#1d+jZ$Y~!%XQ81 zyP+7?9A-EQ_LxGVPEa+?jIG)kF5asRuZ;OEBdr+;I~KT9=-{KUq3k!bm^Yo&RXZly z>uOkB4Ar&y5Jxj9@^!U^afp?<-o#YlQpZm~B^H|03_atnAx7Jt_BQsFqa?GI!wx!Z zoJUE$h)x))JVbjmP98UPJ8p9|F`@Ijsl)mOL0T4|LtV{Ey&gK^)ymbZTo!Yyo-Co( zD_MoorSNlNps9@w&f85_%WQ7`{zyM#OXvS^jjFH%Vngesc&qh#cCV;!Yh$mAk+u#N z5;JXetXiCE8)o~(hs4w3+V-uB0@mO1%jSZ-Jzqks5Hs!D*cS0d`v_Yt7IkbM_$p1u zJ%kbF1oSDxvg4Tv$Cpy~>nZDo2p#vVO1sgv_`8tY2y6zn0zJTxIN8x(*g-|I;Z09y z1|Q@j=-CbIAxz6XVV+0r%fMdXE5LQYjRaEls5Cr}p`!3O{W;$KnJL}kH&Z$0bK@W9 z`>Dc_9C!lI09}&SNIZ!mG7L8WH!(y|LU!O2Knh3$CXf+FqYEr8GM)9+ZUEib1vifA zraqPBeYeZvbmvp-u-Mf#RG5RtF@ocB{bV`bET`kkE70*iPzxZ>U496}8_Az&InD&w~T zGX%#!rqfxWbcMv_<}7|2O2|_D>%c5<1o#@@hU^ZM?!@3u2_2UNr81LV{tdK$6Q~EU zz4<-B8w5wrSjA%3E*Bp04IU_fNue(fNul$0}lWpWI+BM;6c&Q z9g9AM;==^mp*(XyPuT9bU*tW08WN8H#Z)KGbZ_c?6!ph|oxm=@U9Y>nA4l~G;CsO3 z2~VtC+u8YD^gIdtkEF-XpyMp?ec(Z$L)^9YWC1yjKLz{%cp5-{pu|nJ>ASd zzqe;pX4B%CjW5=QsE?NbrG#l;OAEdae1jy%*hp{YogX2UwAxeUl9#{F8<$hE7 zAu+tAbCv4>7p@0_MX5~IJ}#3V21!$50grfe%T`t^KH4(d)I=?j0RJa!dJd|o3HONb zPpHnlfDdREb6ZCWUd-4Bc!0~sT9h_T}YA7@xBJd3l!2D4Ku9 zlUD|$eXVq__2!ut5RVL0_6FBV)b7!O6EYuU(k&0%GH9-_p&xYhAD}xh$!?VFKvFWK zJbRoSV|0-Y(M7sM{C(h9p^auT&@+1e=$Z3RKm5_D=SQRM(KYSS(Mw;?(T;e>+OGC> z>pP~bH!MZF?p0sM1Zy`7<$iC!VOu-41dRRay zr4Z3S{mNxxHxZ$r2itb7eqvYnG=K0?b9cAn_anp{-b^H#rcLC=BtheYD?3>RggX<2B z{KewjlDq2z8|ey|OX+>INXh!C2##zB1Y{5MbcHH}HqutI!6W;ak6xKRrz5T6?U7(` NIM5s@4Xh5a{{W@D!7l&+ diff --git a/contrast/__pycache__/__init__.cpython-39.pyc b/contrast/__pycache__/__init__.cpython-39.pyc index 5c772700a8ff41a63e1c4beb552566d5475c04ac..6c5ac4a4a0a734bb23ad5750d7d3ac17fc554b0f 100644 GIT binary patch delta 20 acmdnbxSx?Hk(ZZ?0SFfBKTMywk}XEOjc#Ree& diff --git a/contrast/feat_extract/__pycache__/config.cpython-39.pyc b/contrast/feat_extract/__pycache__/config.cpython-39.pyc index f61f73a7b88262ed56f8b703cd4686fc83f010d0..b3c7da0445ac3e82808b5e7f064a0274a8e5b161 100644 GIT binary patch delta 445 zcmYjNJx{|h5G74WLmI+J84yD1zyxAoXob485)xx!>JsI{4Tjhkon5N3@DmuqKPU?W z1LB9kd;=0AKY$&lAcTkKckiCg@BA9P24TFu?ys1$IQYn0!E<<1&vFzhPrPLntGRH3zD0?RJ-%0}v%BM&3D` zGpPW{9kJR>;V70Fh!N=8FR3+v0`Qb;&5)`$yFa8*Go>Squ~ZX4LB63}MhKv3ZQ1)r z3$8S6EY%!gT1J=3Osz&vXN!Vb{#(LZny%x5syk=fEcxVY9?}|=nhTQ*sZrp#Szx3z zE6Fn?V`)66V8(cUp?|CD%h}y0zchga6M%DS$*G8DfL9C^`9C4*ryf_ZYd#sf)o}H} s+NnNQ_xkNxXc!Y4QU!A7ytkzkn}*xdS^Nz>X6@!o%~scTeYge)He_AnJ6ynmPB^uj#D)65Lr9%pRKC8?@KavTGKI+mhp?!!M(h1j^5ftxs?+60TlnymTQVo#=c!qKr5~7BM zs`rm(TxnRW)Rb_XhZo9BE&5J(4g6C63juCrx{NX?Zk-Ji^4VG0fd$Mp7bXVH>lr~S zqflvMfN*-N3vVqa5<|*HZaP*eHAMfxu*(v7Tir)imH*z8#!?1B>*N#Q*>R diff --git a/contrast/feat_extract/__pycache__/inference.cpython-39.pyc b/contrast/feat_extract/__pycache__/inference.cpython-39.pyc index dfc7104efd6fd915ba35d596413afbb6025d0105..c7eb02ce568120301b7764e296740cd7db730bef 100644 GIT binary patch delta 2839 zcma);Yiu0V6~}#dXCEXj-1iBgbIor6YO9|A zo!|%k0BiZT_nvd+-ZST(bMF4;_~qkGvazwgjz9nU+g~ehJ$$|CbX;6ePsN{3mg|nx zf6@K*_@@Vz;?&9Rr0+tYUQVNs_K_(G8~_)qaJlI@nAcobt0W;5^4O! zGGd)bGTY{VlK4c326Z(xxvQCNs1ayj=%lRGY_CiiNID@&1feW_N5@^;(}@|I{D9U% z*{Vj;n?<*pN?*LK2LglM`E_d^jm`G4U@;Eq8elWf4{TxN+G&8lc;#_fvvq=`SFvda zL`fNh(=4+bFD%j^MDeQQ(U*BdcLA&YUG|gihV~xdD|NHX$Vtq<3hV;z1$F~_7`e67 z&0jj$;X1BfDlW29an zuc+bs-5W7HWahwfxNgO)w{cKpj;&VpD!XZGqszivjP^ozTwQ6pywAMi2zGrGBDiv~p-jIx#fuegCm04m2&^>vOJ+b!&Nr{^h~<{Vb8#KZ-xaN#8nb0MLH4SL zj(o|TU9zU>&#Z~*1ZX;hl(M&WcJTUiL$sbRAE+6>WJwRdX>No7%zugR6q|_w{#( z^Qxis2i>N$QZkT4a+*w{AZ4^wwWUW7sW)4+{u4A!dtMuZd3Z7fGPR&~ZPgQ9Gul{!Qs$!1O#|y2?e2UI$F%5+-i!S;MWIES8;8 zNH!Z3)`WJzZOP{|5o1(=s3CBwjw`9t2^}uD*RYNQ6d0yY4AS#zO;yFr26U@8Rmh*H(W4Oj-|H;ixktP=zq#j+adAogb5p6` z#OW>A{vKEnHca1PT#bA^+gJMn#y^aV+cw)@z<41t=CBTZz8h~CWA>-=+_U(+y|7q@ zi{=6}>2_K5_3so#^;rK#@w__Q|J<kP@}Dg~6N~EQt((@( zG`YSkXZZ(??PgD($x=OMeVh8n){Waf|A=;SM9mFB@2H`HgM1X11_p;(ui z)}Z9nrt<;+igKizFLPZ;^dWO3op3Sb2YLL)qBcCapiNvH$316T)iF$(pjY>QY@N9_;Y zBD&oEzy>;L3<7!+kGu<*u7En%$%$H7hBh_74f*eY7a0Y0Cf|`g!6R*rIj(JTb488! Lz_3@nnt$qlV2P!+ delta 2854 zcma)8U2I&{5%$@=`-8DJo5VkHoY>y{tbJq0aZN}=h(k;oH~bk30->hO_Img3`i9-T z+qu^vPJ&Du-~lYCa!@3qD5L}=%#Q+c!~-EX@Bkzw&?c>fPNn{+eJE1aN^Z_BwjFw`&1{r?##?xSsAW)5t5UJcIa&L24G_+4&jXxBcSjUVn!{FWk=mc~Dx*5zS>fs-@JZe>J6(Or!u<2&7 zqL$~6Gtc0&XJ`{x;gVCPTbQDafQ9kS#YtNry%n&vYJxEc66-#|Ho$Fw?SOs;b1|*v z9~E&*76-5cwAiJ@9wjm~0Ii<_exmugVZ$$u5Z!^5;gkwLqI)3S2^a+23kWiSDxh{i zue_<|Xln<^A-x97rR!SG{#DZ}k`FHwwaav(v({y%8KT=Ev*g~yiGF>=-B|AdATwrE zE2zkbXsHKX0_??w`^j`*Ie~?5RWr(62?L6TGo{tE4!HjSveIx=v3$+bmBoKQA8?Sw*8>! zBR*MNWV$-^ed;r_@A&nqma)2x;z_x+?(l<$c<&%PgS|Ot^lL0<>&uMIIie#Fp}Xjb zStade?sV-kQJ)r1$(!p3;>e?EP>(!PpPJuU@=X0oF)1(CA8YMl6&15Qi^v)$T?{A- zWt42IOb^K4G;B!eyAS!J!_n~!hKCAU+3bP@UupQUI4Y|fzYyK>pN)SJJ#uE*o>eCh z<0-&PfR`DRIIv*j=B5qe?DS~Uq=-uvn}UwX|1IAXMOgHz+}ylsF|~nC0d()4m6hiD zmf!MvIOhFo%r0kb(w=6W&?$MkdCP(W|GW84F*AMB3Lz#&c&C{lU934+%`>R5>s4qu zmb$&SgL)1y3DAy@f|>#7q(yYf%%rNhR&tq~ll5to71Rw#Q-)mES0YdQm^EDEIVGoP z(F(_RxS!sIog&~D*nlz6ODE;$E4!1gfII&}Yg_uoC-UAFe}%r#ND$LpoI1Kq@^3A7 zBtQE0-p#Gu;-W0H&WLsL&CX@=wzf5Q!}gRx7oe+;yoM$6ic=wa3?g*u{5YUg zy|w6ge%38jwml(O{J1>R)_=P$8z1W0(jsk1Pmqk~@SuMZqD25aFhKz$Xcc95W1NkP zvNyFSjXJwFehTD{Rp{qneCJ4KQhnm;^nX*A!$Qh)YqK3XSC^psDL^kZ=(%~Bc`pXB z4x}qTK>1Xl{MWSo1C(b1&>J|W!todIcX8%RU4{Cc+lu&Kl>cepC`M$e zV~sc|2Re=q=<%$lbsP#va?q+nph5uMFwZi@&$o5JDUFqaqJ0&@4*;*pv}yLe1M&^% z#XZ~4j2C$lp8@Ypz}tW}26GMlk$;Ax<%;EyM}LCg&vMf26T9RkGe4oCyGQw57*)po zR*7HU9sHVmDx7x;JRJ^r%GfF`tS$fbesM;a2vySJuWi6Vw zgfzCpC6@byR;P1-_WjZh79)_?!i zY~)Pp2Y@cihORsL3aqZ)WIg=h6Nt70dgSS@R9)~YxL!9tl&A@U3-XJu4K1%Rsbb(3 zoe?r1hywJTHOjT?*G@EJtuN9Bg?E`M0CW)N)h>H{@^NtRGAKjO72QKmfjJ4#!{{t1 zJ&`^EbrEnC@HJpBU==_=WOz1I^@G*~cic`Qc98-ne*IH0n6oRO-vU?)XoX6(?#5vB z2a^p$Ab6HOgF;j*c=RE*Tmk52NBgSiSS4rqHvJy#j~V(215O1)BCx#AbIU58clg7Q Xi%E}WWnig`a9%^}J8YLnyPy6)Z)L2E diff --git a/contrast/feat_extract/inference.py b/contrast/feat_extract/inference.py index ab5c7b9..755f049 100644 --- a/contrast/feat_extract/inference.py +++ b/contrast/feat_extract/inference.py @@ -48,7 +48,7 @@ class FeatsInterface: modpath = os.path.join(curpath, conf.test_model) self.model.load_state_dict(torch.load(modpath, map_location=conf.device)) self.model.eval() - print('load model {} '.format(conf.testbackbone)) + # print('load model {} '.format(conf.testbackbone)) def inference(self, images, detections=None): ''' diff --git a/contrast/feat_extract/model/__pycache__/CBAM.cpython-39.pyc b/contrast/feat_extract/model/__pycache__/CBAM.cpython-39.pyc index c47423e5c961b1114a17f93576e35c1e4be237f2..9c425a6c056623f8ef5b314a99ac3b03640cca8b 100644 GIT binary patch delta 69 zcmX>jeodS&k(ZZ?0SFfBKTH?l+Q_$yN#mEPi&czEYDsExNk~y*a&~53dQ5VDUP)16 WaY;;CYGO%zY6Xy!y!jc^GY$X^a2W{z delta 59 zcmca6eny-xk(ZZ?0SGqwtYr2Z2gp{>vzT^=jW9aB^H;&=cSgE Mq!w?!!5GR003g2<`v3p{ diff --git a/contrast/feat_extract/model/__pycache__/__init__.cpython-39.pyc b/contrast/feat_extract/model/__pycache__/__init__.cpython-39.pyc index 50f8f0c4c68772c3bc2fe9d91cbff554c50cb0a9..b79395e3158057c5bfb2b5997ee8dfb32ee207f2 100644 GIT binary patch delta 142 zcmdnPc8rZTk(ZZ?0SFfBKTKEJ$jic{62+C4o1c`KlbV-WQp5z5isCLxEe3Mp(-MtP?~F;# V&nqcPEG~%$8dj28JeiN#0ssYQDM=KbIQ&4-DP6Y-{| zMfTD6>crQTeBxa^DHVU8o~t(IT+eNJ7ogd?aeZ~A(Ym$jL9^{OuDMO`ihGYHte38oS#KQcvEgKpB7NYt(M-#82N~TSG`zq6@r# zkx|1pNq2bbO;Zl%{k=yl0n>QKD5t8tmtl);aM`TVwkJV66bTB&QxDz+Z@H`VAXw0o7iTtYk_uUvAM+mb_5QP;bDwh2s%3=_(D$)& z|K1z>ebkXMf2Qbsac1HB#>;OTn+vrEOHHUYns=7!&1SvPzAU{K;#T`dXjBLnL&_I5 zOYy#jEi-{Xl|yu?sEffR9K%&Pg==bg@;JqS(Syq#HwKSzn5-cabSbKVhg((xj~WL4 zlJa<3iwPfRwP&dmho>1C!dn>zE^tS4g@=wFr}mz{ul+kcdtsc@CKyf+Lr2O{2#>Wl zk`B4uk*aF95YB5!TsNk(!P8|(aM6Pd&V?h4QHF~*CJ0T zC9;M}(1k2TWylYxVc|riB*ug8E0L!n|H8~f0u@*hNTYAAxm;jS8a~DtL7yUiHTMm4 z)AGCO^o0q$q3RT<);Y4iI)(O@^+e3{H9o=ZCJEcC4BjmG1Yd9~!VE6oDpm+Da1O@zSw`l*5M}9|w#aM8jLUF|@sk7w3 zRs7)8hyRLYim8z8zRVnfGG2;JC(m#$!xtT8vH9p>-4D&pO~F}w9=kEdlXch5>jRbt XxDuFzPH6s01+JqKzb&p|Eq?bOGoPfz diff --git a/contrast/feat_extract/model/__pycache__/lcnet.cpython-39.pyc b/contrast/feat_extract/model/__pycache__/lcnet.cpython-39.pyc index 6a41ee84aa134f74f0701b846706ed8c7e6b684a..fc264b4d0991e4928b07d53a35844041f0f6d9d8 100644 GIT binary patch delta 73 zcmdmLvEPCxk(ZZ?0SFfBKTO}qW5KNP%gn_p#wE2RHMu0DC^0!ZGcP?RIX|zYD6zOC aCM`9wBtEqQ$VuLu#QaE{@z!Kl=??&a{2IXk delta 63 zcmdmQvDJbnk(ZZ?0SLDHknds@^KAW;r|WmdBk|yoSm7M9+RA(S5lN%ToRL( SnphH_S^?xFZw_FdBn$wphZcGO delta 52 zcmaE_`$(5Nk(ZZ?0SJ7}Y&LRhGfNq`SjD)cmZT<^gcKzvXJ_W6$0Xkjd&FIO4@qIJN{O0@SP4d1sU(1)}jAxp|*La=X`_|9w zre-VE2aT$Xr60S>^N(BMVZ)EYSb3eeaUi{TU!Jn@leZ~1U8rZ|=^HN$_B*Yf92ikI z)~arVQM(mNPt}v(#?=i`p^4l~^4DBWi7i^p&Ip(GvVU^VxUVV$_kj&ylMY6gvo9c* zfakzA^+%f%EtZ23dD4_Yz`1lQbq95qN>+ip)T}XPf7nmXt)D_XqCL}@R*&&yh4Et- zY8C1=)DvJ0sM4?TDRZ4=(NP85L>zgxrV~!X&jUr^CZMw}KrI5Bz!sqM)}VBJJ;*k! z3+&K8BcFWAO^VD*_*X!aK3pFgLj5Co*%IaRpVI*fhdlNqea|n56PmK^T*Q~}d3Vwb zLs++r5zhJ44n#y>Y)7=|!d|-F<6_ZL5BX4C#=wXpMhM5Esm?TPOkfHs6Ab(^kA?0Rbnym5k(8YUniJE^P zx^xT5YlQzm*A9CrkKH zwbIE^y>eDJYNblc=uo}gyW(PYa3n2BOZ9EF`3PQhq^tHWAy1E%VIayoMmvJY zAhL+jR5?_mAxGopF)bj9y*njD<_=IFBB}`aACsuC@&ODf06yoVwT4!MtvL-tXPBJB zjZIZ3GcQ^EEjh!Mq)<>#2!$6_BQ1DWqn<9S@XK@!J6tfBe5XY#rvbyDK1^Xz3$ni} TYaStvHQ1GQ;hS^9y|(ZRSaxqR diff --git a/contrast/feat_extract/model/__pycache__/mobilevit.cpython-39.pyc b/contrast/feat_extract/model/__pycache__/mobilevit.cpython-39.pyc index 6dc81724e73dcdb60ad814f19620a5dbf3623631..a8f32ba74a511a1a188dd0b446a597d9437f315a 100644 GIT binary patch delta 2291 zcmai0Uu;uV7~ko?wd*>%t-!|G!NNw?F~&NFMusxPEHR1}Ga(f8uC#aCg|_$ZYuBMf zwiy2dLpTuw55@#EAu$@lgC-`#C!=pB>dTFZ;f?!X;=2#}{myL}q#{ku?|$byzw@2% zeBXD@>GjF$li_S6($d7gYdi1U3@5xIBa_}|#d?`)i^;dV--@`L@b!ppdDwR}wvl;B z55@UIJ&?VWkU#j0$!=)2GT0GK%N4Taa!xN4G>xJdP_P+NJ75C?W|w^oLDA}`4ZPZr zyyCwcQ7Z(g3%qW*C$K$`fY>XGf$pd>p^Paz-STqa=%AV*Xb017cu2APFlB9JMcyjA zo42KgFw+0v@S0;u9vI+ObF8g}lFG5G&AY^q{JHsdS1+b^GE7l`F&U|Q`bBWCb31G7 zK*iJ+PF4vqrD1s|*fr(u0R6|o2q_Hv+w#Yo8=PH`HSfPf^ z?hXAC7>AWzva9821h$gG7L^-_)LPTwUUbiaqDm?TQ6wSqgg^;Ka*+OfK#$e=hs+RhfVa|77>R*Hj&+Ket4g4Az zenbtVc`jOgmmx~uO10e-VYeiCgkti~bra(%t8M77Q%VLqq@S`l5QbTz5hxpScr3cQ zO?mC1S3!6UFbZfW*J%52Dua=Smg^B7&2p=|+DAuf3E3jQX^(Qr{@8wYkFue1>}77U zwU!c=ZI3n2ZHd;S0rR*~&<`2Yl(}_r{j_IU z#T3_ox(i4JR!|HIY>+VSKyq+PIL*)@1x^UkegICAvxT8{lXR@*Mdibdjp83Rj413uAfPXYrTRz2uqA@oklmpLXpS*T6LZ zCm3vx!DV3FmnB=g(3snQx~9am+Bjim8GudjK~`y8g5nea38gm}Y+u#Tm#B#TX~2J1 z`jt(S6UwlP=mLljt$B+_zvNa=Q-UlR>v?V&_m`6f%rGRwj;euop->0+JxFT#_gQkU zJItZC>tIX@ydX{!e+ZS}+~vHwx%JR8Xowr%w(xXu5DVuchGdxF2vq@U`G@&Hn5e*5fg2y7dzOJ$RUVv!;=KGSJ}NHpY6sJ>?|cvV zi{XKl)b2Q{sjHA?)pt2#iW_%A^xf84_zch73DVp48?q*Nl*a3-J|CDSUq2iynp zCjeEo>iP*BF!PHAUHv&w4}|+vq`|^P$ECiP(So=NxCFQgfLk=cVEg$Q%9m`fQJl+o TaXwyBT8oTQg^Kd_=GlJ%f|9;& delta 2291 zcmai0O>7%Q6wdfBb{wb9U+Xw=>L53YlQfM(3y2h&st~DyNGS;vNUE)CZ;~yx*L`-J zmWn3y!cU=THMF9Yia3-?l@O@P0VJe~6M_>+z-5JmxOERm+&RGeX6-mtDYdlUzIpTQ z%$xVUneokun-jrwDAd%zzaQuK-wDRt!jeh%b7H$pwRFhy?yp5mj(fUAR37!rbwrq# zbWx07)C1d1z4AMcF%g9)!C-|nEt5-^N*O(u(=-aBLqQx?D_{phH$Np0gI_dzX$Q|X zD6e_1hSUsx>H;q+_xpx?y)bvmyss-9hp7fr<3#0E-`s%uLeL2J-FA^;KeoxMlM}M5 zu|GA0o?Z8M$I(;+4zh*LYG5r*G^hf*-ndT;$)6hUMs|YR&M-+n#$>1#=`H_2`!IVf zU)j`7y-^|Lly=KM{Ek5Z2j>$q(li&sSV>`t@@>H)tTio)ae1$4u;(dIo(8x8&wv)F=0%PK2ZD#- zR_jg3bZ~V1akTdW{y#foOaZNyoiL46W$O#gQo2xDFe$GS4ar3FIY-E*T6(^09-e~i zgh2>-PWqjgkWtH<)r;jNl^%K)j>dehh?F=f654~=7zmwBRi%F!!+Ewye*{Cg78bTt zUv<5|goYnc12g>XG(WXf254p@)pAD!os#4p>Xd(O8{ex`Z9#jRaxz!}{dAENVVDKl z17{qtRm1u4#eNmFi;kl&4M=UW>ePe`w+^R{q35CHI?ufrpcZN$%~u79%dc9)T(aM{ zUf8e3P#U|Ln{2L1!nWlqF7dNM+tKi5%t9qaZJYKO&B*n(zF@t4UzOjq^+#W0hSKGM zQFXD^}O02~I% zok^GJBwI;eU2RL8q1QpdWK;mCG&1-%>->j~gOAWX!ymrSPc50vl&S9hzTlL3!pP~b z7_)S2<8tS$YgOg69aAY#i51id2Np=^w_({hCG1A{Ab}l#H3`5@w&M)dlccS#i_*i% zjpENT%W~%{V|0`)ns4CUZF%w)qfCgJGyrsSQW-c{S)4Y_+*sD~6!}f+TyiaB__XSk zpG0==JppbGkY})52A6@cMI}qT_@K7`L?*?oTKF=)tN^eG-pESEH{m!7fS~jigXO6h z`U(}%J_q>kO1~DJ7_Vov4B|s;zQnzs^HY;4MkU$NeRvfIlRXVM&X5e+ss`SJ1N)62 ztvw3s0swRe-hl-$CIoH}yMbTyAv^4QtSc#PU34Ba?zQ)Z_?KLLR$Dn@->l%Zx=3?t3w)hM!$+cKQT#}!~_J|wu*VwX% z$QODKiRa~d@2S;WOtSo%mQNc6zS~tDAXEI8pq*dN=I(L!s=sVfW@)5QAk?ts(~W{z zEIAt-_dZ<*sH#=Rk8jG%F6VUh=RjQ$&Q&o377jWt^#hE^JzWM|0o(u}Eb3>lynGF1 Z3zpl+FIsNwkC&9UNJgPdMR|AU!oRJK!iN9= diff --git a/contrast/feat_extract/model/__pycache__/resbam.cpython-39.pyc b/contrast/feat_extract/model/__pycache__/resbam.cpython-39.pyc index 55f08d730d584e9152221d12dea4a7bc21f49df3..053b1ab282cd71f511988a22e2efa660ccbcc2d4 100644 GIT binary patch delta 813 zcmaKqL2uJA6vvxpX}flAyJ<)WX(MYZNEq8B5EP0SFl|CYLNJNTvO)-%yG3Nq^x{e& z(AWpy<_lc7AU*)%IOumpqh@APApFVz(bALPC(r{(uM4 z=Pxh_M~7zDsI_n!XtdSWiNlVAamIW*_l>fPUNE65!%u~M=2QN8f#_=`VG`gx9wclGeN%jc02)7A$2y28JgbhSy z$ppf^tUQPVDY<~V*y$GGEG3*HXj{2A3LW)yYHiacd4aGU7In`vZSqD4a5;XA#+v+`YPHmEWLa+(B74@CH&2x+S*khH{ zot9U|G9C=&<*by8V-ED@j`~u+w(*~R?fhiZ+qiXtEuBDAjJk#9t~R=FmN9fqph|%* z5V^(sqeJRzaShN17~h2U(Z}&4Y-tfuFo5E$;`{qSB=DKg)xZ=YI>H3Fg36tREuEwb zwk!n96bkBRW2U9+Sh}C=3}c>`LhnwN^}66HyV>e^mIT2i?9eLK0YvzY$F8($t3mV9 EA3IK{7XSbN delta 795 zcmaKq!EVz)5QgJ8PST{Ch6*H9L7}0fHEBx$si~rZXq!r?5{E)BNKswOn8w0gM`P!J zgrIjgu@`s%xNt&w2X4H8ue<|-8}qM0i8x?sKhN&W?EX8m9~a&)2v>+oj=#dj*2%^f zan-z>P6g8IyHfgLT8W0qADa-Cs(o{A?nbxM>-vfBC5PmC&jWeV^CFp$8;{_5RewIY5A!at1MC7_U=O$lJYb|&5|P(WOV2_#NqkBB zylCg?3Ic3E%gVH2+ES;}I|nN;SAjLa0?q;304`29fpKznU97D$;EaomvT<>~B53`c z_#l>eZWpP}KRQyM#PWqhm@Y6;A@yCn&0_PJ?^wO8EjQ8K^(~&-CZS2BdjqvxI^0iJ#5PD@U|a;P!(sm! zk2ufB8H9OScAQf;ko-bW6`E!QXB7yAGJ delta 58 zcmdm@uu6e9k(ZZ?0SI>ZGG*x>&_LTR-LL`kgV!`FSNpiNz)Hd8s8O Msl}V!ncngO0RN>FCjbBd diff --git a/contrast/feat_extract/model/__pycache__/resnet_pre.cpython-39.pyc b/contrast/feat_extract/model/__pycache__/resnet_pre.cpython-39.pyc index e2b72422ce681ac992e04ea14759ba3b356a82de..9254dccf60e0a5094d1b84dd2ce10658df279d91 100644 GIT binary patch delta 1624 zcmZvcPiz}S6vi`lyl&$7PtsV8-P%r?G;{@(8yu4a+NSLxh(PKz5iPi-W^1p*D$Z^+ zyKzEL)j%tO1RThyDiUx>Xho=ys4Q`V3OI5@LPC2$>VX3YwY_qJ`Q8R%A}sl*x9`1~ z+4;Vi{h{`AEu@9R?Je@RnZ18E^h$s|BaFcQP*vBQlD^__t=xDkFywD92DSQJ$y#!X z;_E%vSgvuZ?SPMsiA$mH*r@31xX-4AKRl>BC#T|*jcoW`HvT-~Dc}rH0Hy(Invs#) zS~v7%x79XQb^ans=YaFTr1&$EP8Nd}H?S^GAxTIh%88+9ad-@2OybPM#8!|G<1r6B zCE>Q|D~pCoB+mwlfWt9G;k28N~kH`k5~t2@dTm?3D<9!j;FySa=pMIfCS4RBEgQ5 zVio!92FjLM&$<4~W{$sr29lM;+>6Jvz>Ijmt7m)=Arw?Q_+i8^0WSm8N`vBfECTz0 z3&6H@54x_f*^4rvMw{MhmyNGsfC12S{~F3KAy)!++4-vYHoceS#E;z!`au2L~-nxlIz z!5~?Pp$Tw?AQmV49IebUKBqrSP z&$CCYB@VGUaXWERnMJ)K{z#mU(P<#raQ=8f%qA^%q49lEu)hDgp@z7H7P5yNtcc|U zkIkUV4RFnz(`5_!dkuZ?*Htdptn$3=DKd{V84HWt&PA@*4Qt6RktVnk*U|AL`QnJy z)Ps?o3u~ja1k{0LU_^ZvzWM`7J;H7%?X1pNQWNKEZB@W7(8g=*u?B z{duNrqVg%W)3`hE4vXVX|E3_>2{e_p!8O)(TPC-BoE9LHy`jqCe}hQ0;0q+_1l~bJ-C;yqySg*kO<$6}R@4(gdH0MD rhc=pfqW?3G_-1%DAY`E^j^|Ian_?}$KMo7fK7=}G|ye4rRI{`P?^09&_j>yrN?S61#Sq5LoZc@5S@2yYLiCF<9EKF*`4p5 z{UiHZHfV%GH6Hob+dXhEIN@hIxaHsAe&OHgs}TWXE|biC>GMAe*S7DQFvnm6eG9oc@;J{LVy0Pp5_DYCq^W3aq=dGMgoe1hc zuLMt3M`L|bpf-;)uVGbQ5A(dN>!3dVB5bmLHyydbA_Fo<>M8r6B0Su%{^Hw^`+<#s zC6Q>R4(QW>dRPqV9SPyHY^QkUVZ_>iO@JCuD^z(v8Mzexbk3gJEqs&qZW=*eO;OEm zt;dt}(0d4Cg7g7!3@Fd93rh?!QA=B(PXea^*&ELVZKP>grVPR?FUbF*L4lG%ZFHK?M+?%R zzoMhKf4vdj~e7p@rdi77%(W2@T#cLrmU>tm^6e043r7n9t9DYHf(j(MtLOm z=3(VsFlp!LY%*({H>o&AOiTE(rcDQ-v}u_)94e19jt$HH7X9N3u}$m@cVmN^axBCD zi5-vNgvA-+GSX3=is#sb`+JJ-{kU3cSAgSx@=nokPtz;5;x7d;-85+Abj{C&%i@ zk$lFxTW-X^MB*l}YHOW8Zb<|yzEb{1YwOwqDpijT)hhx2-HFy1)6_+}&OdEy9#aRy zYm1s$G95B3+nl1W5zuA|24(WXe>OK|W@!vHfPEU5-ugr&$ujuDL?OE`DC4+*$s}Hen_=3J6 Ss&1+}k2k(ZZ?0SI>b_r1O5l42~CqD4u7SWI-h+*8a0#0HWOlnB1S_{OSh+r93-}sg+DodlER7vU9Cdp2b zO_7}@JEP(%Q89O*i{g=D%JR;tq>68M27scDtlG3pXx7EHlIN7gvzj5unL$Kzoac6g zRV3DhQP44x?hj>Vw?Xu&O^fGf;ywo|JlE>Wiveil_9oR62LbTB3->|J^MZmy;LcG@ zQ~P{m89J*|eoD>+pi;6#M06?AY#`Ca*82cRd||lAQ9iZ{0nMsup|tEv5x7vx1wgC4 zyayHJ<9uS*`peMRW$mduz>D1dmv;z9OyesIsF~8_ia>yyP-4f%a#@zw<#FO(gO|5n z#I7t|yG?NB$y(FfYOJrxjYj=p=4pAO`dGHTMt$pf3B?2YO`&W5#8&v*ev2YJ$h}Jo z0>wPJ(qecke>Ic#yq&UWQ@-bYlz9j7S-4lY>s%)ausk!ch$$Rr4hz2*7V&kMb2iDp HbZ-0sfEm4R delta 618 zcmYLGJ#Q015S>}ucW0k{K3|*O86Wo8OOqr=boKu)!Y$p(yl;@bC#Q~8K z7o^$Z7iHF*Y24Dn|3GPFwu_V@w<)Ig5_yw|MrJT;#Sr2dmGVy}8GV^$$O#jc6NqOM z=oswtnIJ-MouP9HY{JtwfkpGm(imh_mKNF-wli#J*%sN(skF*8&F3&g|Be#Ms-0I^ zl|G!@w^)$XUZRW^Lp<1`3(Ar?Ef8o~_1Wb1hv1J`rzP{; z@kjy}Ug?c!H3W^)-r<5ig&+$Rcm#AsItq?J7LQY!j_k#?S#Oiz4{uOC}(7OVEsdAX1ybXL6J@IyU|L)!X3)OU| AvH$=8 diff --git a/models/experimental.py b/models/experimental.py index 2795871..d0fc839 100644 --- a/models/experimental.py +++ b/models/experimental.py @@ -76,7 +76,11 @@ def attempt_load(weights, device=None, inplace=True, fuse=True): model = Ensemble() for w in weights if isinstance(weights, list) else [weights]: - ckpt = torch.load(attempt_download(w), map_location=device, weights_only=False) # load + if torch.__version__ >= '2.6': + ckpt = torch.load(attempt_download(w), map_location=device, weights_only=False) # load + else: + ckpt = torch.load(attempt_download(w), map_location=device) + ckpt = (ckpt.get('ema') or ckpt['model']).to(device).float() # FP32 model # Model compatibility updates diff --git a/pipeline.py b/pipeline.py index f13830c..8f63a4f 100644 --- a/pipeline.py +++ b/pipeline.py @@ -11,7 +11,7 @@ import pickle import numpy as np from pathlib import Path from scipy.spatial.distance import cdist -from track_reid import yolo_resnet_tracker +from track_reid import yolo_resnet_tracker, yolov10_resnet_tracker from tracking.dotrack.dotracks_back import doBackTracks from tracking.dotrack.dotracks_front import doFrontTracks @@ -65,22 +65,20 @@ def pipeline( eventpath, savepath, SourceType, - weights + weights, + YoloVersion="V5" ): ''' eventpath: 单个事件的存储路径 ''' + optdict = {} + optdict["weights"] = weights if SourceType == "video": vpaths = get_video_pairs(eventpath) elif SourceType == "image": vpaths = get_image_pairs(eventpath) - - optdict = {} - optdict["weights"] = weights - - event_tracks = [] ## 构造购物事件字典 @@ -101,9 +99,9 @@ def pipeline( savepath_spdict.mkdir(parents=True, exist_ok=True) pf_path = Path(savepath_spdict) / Path(str(evtname)+".pickle") - if pf_path.exists(): - print(f"Pickle file have saved: {evtname}.pickle") - return + # if pf_path.exists(): + # print(f"Pickle file have saved: {evtname}.pickle") + # return '''====================== 构造 ShoppingDict 模块 =======================''' ShoppingDict = {"eventPath": eventpath, @@ -160,12 +158,16 @@ def pipeline( '''================= 3. Yolo + Resnet + Tracker =================''' optdict["source"] = vpath optdict["save_dir"] = savepath_pipeline_imgs - optdict["is_save_img"] = False + optdict["is_save_img"] = True optdict["is_save_video"] = True - yrtOut = yolo_resnet_tracker(**optdict) + if YoloVersion == "V5": + yrtOut = yolo_resnet_tracker(**optdict) + elif YoloVersion == "V10": + yrtOut = yolov10_resnet_tracker(**optdict) + yrtOut_save = [] for frdict in yrtOut: fr_dict = {} @@ -285,21 +287,32 @@ def pipeline( trajpath = os.path.join(savepath_pipeline, "trajectory.png") cv2.imwrite(trajpath, img_cat) -def main(): +def execute_pipeline(evtdir = r"D:\datasets\ym\后台数据\unzip", + source_type = "video", # video, image, + save_path = r"D:\work\result_pipeline", + yolo_ver = "V10", # V10, V5 + + weight_yolo_v5 = r'./ckpts/best_cls10_0906.pt' , + weight_yolo_v10 = r'./ckpts/best_v10s_width0375_1205.pt', + k=0 + ): + ''' + 运行函数 pipeline(),遍历事件文件夹,每个文件夹是一个事件 ''' - 函数:pipeline(),遍历事件文件夹,选择类型 image 或 video, - ''' parmDict = {} - evtdir = r"../dataset/backend_20250310" - parmDict["SourceType"] = "video" # video, image - parmDict["savepath"] = r"../dataset/run" - parmDict["weights"] = r'./ckpts/best_cls10_0906.pt' + parmDict["SourceType"] = source_type + parmDict["savepath"] = save_path + parmDict["YoloVersion"] = yolo_ver + if parmDict["YoloVersion"] == "V5": + parmDict["weights"] = weight_yolo_v5 + elif parmDict["YoloVersion"] == "V10": + parmDict["weights"] = weight_yolo_v10 evtdir = Path(evtdir) - k, errEvents = 0, [] + errEvents = [] for item in evtdir.iterdir(): if item.is_dir(): - # item = evtdir/Path("20250303-103058-074_6914973604223_6914973604223") + item = evtdir/Path("20250310-175352-741") parmDict["eventpath"] = item pipeline(**parmDict) # try: @@ -307,19 +320,21 @@ def main(): # except Exception as e: # errEvents.append(str(item)) k+=1 - if k==5: + if k==1: break - errfile = os.path.join(parmDict["savepath"], f'error_events.txt') + errfile = os.path.join(parmDict["savepath"], 'error_events.txt') with open(errfile, 'w', encoding='utf-8') as f: for line in errEvents: f.write(line + '\n') - - if __name__ == "__main__": - main() + execute_pipeline() + # spath_v10 = r"D:\work\result_pipeline_v10" + # spath_v5 = r"D:\work\result_pipeline_v5" + # execute_pipeline(save_path=spath_v10, yolo_ver="V10") + # execute_pipeline(save_path=spath_v5, yolo_ver="V5") diff --git a/pipeline_01.py b/pipeline_01.py new file mode 100644 index 0000000..adeec4e --- /dev/null +++ b/pipeline_01.py @@ -0,0 +1,395 @@ +# -*- coding: utf-8 -*- +""" +Created on Sun Sep 29 08:59:21 2024 + +@author: ym +""" +import os +# import sys +import cv2 +import pickle +import numpy as np +from pathlib import Path +from scipy.spatial.distance import cdist +from track_reid import yolo_resnet_tracker, yolov10_resnet_tracker + +from tracking.dotrack.dotracks_back import doBackTracks +from tracking.dotrack.dotracks_front import doFrontTracks +from tracking.utils.drawtracks import plot_frameID_y2, draw_all_trajectories +from utils.getsource import get_image_pairs, get_video_pairs +from tracking.utils.read_data import read_similar + +class CameraEvent_: + def __init__(self): + self.cameraType = '', # "front", "back" + self.videoPath = '', + self.imagePaths = [], + self.yoloResnetTracker =[], + self.tracking = None, + +class ShoppingEvent_: + def __init__(self): + self.eventPath = '' + self.eventName = '' + self.barcode = '' + self.eventType = '', # "input", "output", "other" + self.frontCamera = None + self.backCamera = None + self.one2n = [] + + + +def save_subimgs(imgdict, boxes, spath, ctype, featdict = None): + ''' + 当前 box 特征和该轨迹前一个 box 特征的相似度,可用于和跟踪序列中的相似度进行比较 + ''' + boxes = boxes[np.argsort(boxes[:, 7])] + for i in range(len(boxes)): + simi = None + tid, fid, bid = int(boxes[i, 4]), int(boxes[i, 7]), int(boxes[i, 8]) + + if i>0: + _, fid0, bid0 = int(boxes[i-1, 4]), int(boxes[i-1, 7]), int(boxes[i-1, 8]) + if f"{fid0}_{bid0}" in featdict.keys() and f"{fid}_{bid}" in featdict.keys(): + feat0 = featdict[f"{fid0}_{bid0}"] + feat1 = featdict[f"{fid}_{bid}"] + simi = 1 - np.maximum(0.0, cdist(feat0[None, :], feat1[None, :], "cosine"))[0][0] + + img = imgdict[f"{fid}_{bid}"] + imgpath = spath / f"{ctype}_tid{tid}-{fid}-{bid}.png" + if simi is not None: + imgpath = spath / f"{ctype}_tid{tid}-{fid}-{bid}_sim{simi:.2f}.png" + + cv2.imwrite(imgpath, img) + + +def save_subimgs_1(imgdict, boxes, spath, ctype, simidict = None): + ''' + 当前 box 特征和该轨迹 smooth_feat 特征的相似度, yolo_resnet_tracker 函数中, + 采用该方式记录特征相似度 + ''' + for i in range(len(boxes)): + tid, fid, bid = int(boxes[i, 4]), int(boxes[i, 7]), int(boxes[i, 8]) + + key = f"{fid}_{bid}" + img = imgdict[key] + imgpath = spath / f"{ctype}_tid{tid}-{fid}-{bid}.png" + if simidict is not None and key in simidict.keys(): + imgpath = spath / f"{ctype}_tid{tid}-{fid}-{bid}_sim{simidict[key]:.2f}.png" + + cv2.imwrite(imgpath, img) + +def show_result(event_tracks, yrtDict, savepath_pipe): + '''保存 Tracking 输出的运动轨迹子图,并记录相似度''' + + savepath_pipe_subimgs = savepath_pipe / Path("subimgs") + if not savepath_pipe_subimgs.exists(): + savepath_pipe_subimgs.mkdir(parents=True, exist_ok=True) + + + + + for CamerType, vts in event_tracks: + if len(vts.tracks)==0: continue + if CamerType == 'front': + # yolos = ShoppingDict["frontCamera"]["yoloResnetTracker"] + + yolos = yrtDict["frontyrt"] + ctype = 1 + if CamerType == 'back': + # yolos = ShoppingDict["backCamera"]["yoloResnetTracker"] + + yolos = yrtDict["backyrt"] + ctype = 0 + + imgdict, featdict, simidict = {}, {}, {} + for y in yolos: + imgdict.update(y["imgs"]) + featdict.update(y["feats"]) + simidict.update(y["featsimi"]) + + for track in vts.Residual: + if isinstance(track, np.ndarray): + save_subimgs(imgdict, track, savepath_pipe_subimgs, ctype, featdict) + else: + save_subimgs(imgdict, track.slt_boxes, savepath_pipe_subimgs, ctype, featdict) + + '''(3) 轨迹显示与保存''' + illus = [None, None] + for CamerType, vts in event_tracks: + if len(vts.tracks)==0: continue + + if CamerType == 'front': + edgeline = cv2.imread("./tracking/shopcart/cart_tempt/board_ftmp_line.png") + + h, w = edgeline.shape[:2] + # nh, nw = h//2, w//2 + # edgeline = cv2.resize(edgeline, (nw, nh), interpolation=cv2.INTER_AREA) + + img_tracking = draw_all_trajectories(vts, edgeline, savepath_pipe, CamerType, draw5p=True) + illus[0] = img_tracking + + plt = plot_frameID_y2(vts) + plt.savefig(os.path.join(savepath_pipe, "front_y2.png")) + + if CamerType == 'back': + edgeline = cv2.imread("./tracking/shopcart/cart_tempt/edgeline.png") + + h, w = edgeline.shape[:2] + # nh, nw = h//2, w//2 + # edgeline = cv2.resize(edgeline, (nw, nh), interpolation=cv2.INTER_AREA) + + img_tracking = draw_all_trajectories(vts, edgeline, savepath_pipe, CamerType, draw5p=True) + illus[1] = img_tracking + + illus = [im for im in illus if im is not None] + if len(illus): + img_cat = np.concatenate(illus, axis = 1) + if len(illus)==2: + H, W = img_cat.shape[:2] + cv2.line(img_cat, (int(W/2), 0), (int(W/2), int(H)), (128, 128, 255), 3) + + trajpath = os.path.join(savepath_pipe, "trajectory.png") + cv2.imwrite(trajpath, img_cat) + + + + +def pipeline(eventpath, + SourceType, + weights, + DataType = "raw", #raw, pkl: images or videos, pkl, pickle file + YoloVersion="V5", + savepath = None, + saveimages = True + ): + + ## 构造购物事件字典 + evtname = Path(eventpath).stem + barcode = evtname.split('_')[-1] if len(evtname.split('_'))>=2 \ + and len(evtname.split('_')[-1])>=8 \ + and evtname.split('_')[-1].isdigit() else '' + + '''事件结果存储文件夹: savepath_pipe, savepath_pkl''' + if not savepath: + savepath = Path(__file__).resolve().parents[0] / "events_result" + savepath_pipe = Path(savepath) / Path("yolos_tracking") / evtname + + + savepath_pkl = Path(savepath) / "shopping_pkl" + if not savepath_pkl.exists(): + savepath_pkl.mkdir(parents=True, exist_ok=True) + pklpath = Path(savepath_pkl) / Path(str(evtname)+".pickle") + + yrtDict = {} + + yrt_out = [] + if DataType == "raw": + ### 不重复执行已经过yolo-resnet-tracker + # if pklpath.exists(): + # print(f"Pickle file have saved: {evtname}.pickle") + # return + + if SourceType == "video": + vpaths = get_video_pairs(eventpath) + elif SourceType == "image": + vpaths = get_image_pairs(eventpath) + + + + for vpath in vpaths: + '''================= 2. 事件结果存储文件夹 =================''' + + + if isinstance(vpath, list): + savepath_pipe_imgs = savepath_pipe / Path("images") + else: + savepath_pipe_imgs = savepath_pipe / Path(str(Path(vpath).stem)) + + if not savepath_pipe_imgs.exists(): + savepath_pipe_imgs.mkdir(parents=True, exist_ok=True) + + optdict = {} + optdict["weights"] = weights + optdict["source"] = vpath + optdict["save_dir"] = savepath_pipe_imgs + optdict["is_save_img"] = saveimages + optdict["is_save_video"] = True + + + if YoloVersion == "V5": + yrtOut = yolo_resnet_tracker(**optdict) + elif YoloVersion == "V10": + yrtOut = yolov10_resnet_tracker(**optdict) + + yrt_out.append((vpath, yrtOut)) + + elif DataType == "pkl": + pass + + else: + return + + + + '''====================== 构造 ShoppingDict 模块 =======================''' + ShoppingDict = {"eventPath": eventpath, + "eventName": evtname, + "barcode": barcode, + "eventType": '', # "input", "output", "other" + "frontCamera": {}, + "backCamera": {}, + "one2n": [] # + } + procpath = Path(eventpath).joinpath('process.data') + if procpath.is_file(): + SimiDict = read_similar(procpath) + ShoppingDict["one2n"] = SimiDict['one2n'] + + event_tracks = [] + for vpath, yrtOut in yrt_out: + '''================= 1. 构造相机事件字典 =================''' + CameraEvent = {"cameraType": '', # "front", "back" + "videoPath": '', + "imagePaths": [], + "yoloResnetTracker": [], + "tracking": [], + } + + if isinstance(vpath, list): + CameraEvent["imagePaths"] = vpath + bname = os.path.basename(vpath[0]) + if not isinstance(vpath, list): + CameraEvent["videoPath"] = vpath + bname = os.path.basename(vpath).split('.')[0] + if bname.split('_')[0] == "0" or bname.find('back')>=0: + CameraEvent["cameraType"] = "back" + if bname.split('_')[0] == "1" or bname.find('front')>=0: + CameraEvent["cameraType"] = "front" + + + '''2种保存方式: (1) save images, (2) no save images''' + ### (1) save images + yrtOut_save = [] + for frdict in yrtOut: + fr_dict = {} + for k, v in frdict.items(): + if k != "imgs": + fr_dict[k]=v + yrtOut_save.append(fr_dict) + CameraEvent["yoloResnetTracker"] = yrtOut_save + + ### (2) no save images + # CameraEvent["yoloResnetTracker"] = yrtOut + + '''================= 4. tracking =================''' + '''(1) 生成用于 tracking 模块的 boxes、feats''' + bboxes = np.empty((0, 6), dtype=np.float64) + trackerboxes = np.empty((0, 9), dtype=np.float64) + trackefeats = {} + for frameDict in yrtOut: + tboxes = frameDict["tboxes"] + ffeats = frameDict["feats"] + + boxes = frameDict["bboxes"] + bboxes = np.concatenate((bboxes, np.array(boxes)), axis=0) + trackerboxes = np.concatenate((trackerboxes, np.array(tboxes)), axis=0) + for i in range(len(tboxes)): + fid, bid = int(tboxes[i, 7]), int(tboxes[i, 8]) + trackefeats.update({f"{fid}_{bid}": ffeats[f"{fid}_{bid}"]}) + + + '''(2) tracking, 后摄''' + if CameraEvent["cameraType"] == "back": + vts = doBackTracks(trackerboxes, trackefeats) + vts.classify() + event_tracks.append(("back", vts)) + + CameraEvent["tracking"] = vts + ShoppingDict["backCamera"] = CameraEvent + + yrtDict["backyrt"] = yrtOut + + '''(2) tracking, 前摄''' + if CameraEvent["cameraType"] == "front": + vts = doFrontTracks(trackerboxes, trackefeats) + vts.classify() + event_tracks.append(("front", vts)) + + CameraEvent["tracking"] = vts + ShoppingDict["frontCamera"] = CameraEvent + + yrtDict["frontyrt"] = yrtOut + + '''========================== 保存模块 =================================''' + # 保存 ShoppingDict + with open(str(pklpath), 'wb') as f: + pickle.dump(ShoppingDict, f) + + # 绘制并保存轨迹图 + show_result(event_tracks, yrtDict, savepath_pipe) + + + +def execute_pipeline(evtdir = r"D:\datasets\ym\后台数据\unzip", + DataType = "raw", # raw, pkl + save_path = r"D:\work\result_pipeline", + kk=1, + source_type = "video", # video, image, + yolo_ver = "V10", # V10, V5 + weight_yolo_v5 = r'./ckpts/best_cls10_0906.pt' , + weight_yolo_v10 = r'./ckpts/best_v10s_width0375_1205.pt', + saveimages = True + ): + ''' + 运行函数 pipeline(),遍历事件文件夹,每个文件夹是一个事件 + ''' + parmDict = {} + parmDict["DataType"] = DataType + parmDict["savepath"] = save_path + parmDict["SourceType"] = source_type + + parmDict["YoloVersion"] = yolo_ver + if parmDict["YoloVersion"] == "V5": + parmDict["weights"] = weight_yolo_v5 + elif parmDict["YoloVersion"] == "V10": + parmDict["weights"] = weight_yolo_v10 + + parmDict["saveimages"] = saveimages + + + evtdir = Path(evtdir) + errEvents = [] + k = 0 + for item in evtdir.iterdir(): + if item.is_dir(): + item = evtdir/Path("20250310-175352-741") + parmDict["eventpath"] = item + + pipeline(**parmDict) + # try: + # pipeline(**parmDict) + # except Exception as e: + # errEvents.append(str(item)) + + k+=1 + if kk is not None and k==kk: + break + + errfile = os.path.join(parmDict["savepath"], 'error_events.txt') + with open(errfile, 'w', encoding='utf-8') as f: + for line in errEvents: + f.write(line + '\n') + +if __name__ == "__main__": + execute_pipeline() + + # spath_v10 = r"D:\work\result_pipeline_v10" + # spath_v5 = r"D:\work\result_pipeline_v5" + # execute_pipeline(save_path=spath_v10, yolo_ver="V10") + # execute_pipeline(save_path=spath_v5, yolo_ver="V5") + + + + + \ No newline at end of file diff --git a/track_reid.py b/track_reid.py index 3357650..06d158f 100644 --- a/track_reid.py +++ b/track_reid.py @@ -64,7 +64,10 @@ from hands.hand_inference import hand_pose from contrast.feat_extract.config import config as conf from contrast.feat_extract.inference import FeatsInterface +from ultralytics import YOLOv10 + ReIDEncoder = FeatsInterface(conf) +print(f'load model {conf.testbackbone} in {Path(__file__).stem}') IMG_FORMATS = '.bmp', '.dng', '.jpeg', '.jpg', '.mpo', '.png', '.tif', '.tiff', '.webp', '.pfm' # include image suffixes VID_FORMATS = '.asf', '.avi', '.gif', '.m4v', '.mkv', '.mov', '.mp4', '.mpeg', '.mpg', '.ts', '.wmv' # include video suffixes @@ -131,12 +134,158 @@ def init_trackers(tracker_yaml = None, bs=1): trackers = [] for _ in range(bs): tracker = TRACKER_MAP[cfg.tracker_type](args=cfg, frame_rate=30) + if cfg.with_reid: + tracker.encoder = ReIDEncoder + trackers.append(tracker) return trackers +'''=============== used in pipeline.py for Yolov10 ==================''' +def yolov10_resnet_tracker( + weights = ROOT / 'ckpts/best_v10s_width0375_1205.pt', # model path or triton URL + source = '', # file/dir/URL/glob/screen/0(webcam) + save_dir = '', + is_save_img = True, + is_save_video = True, + + tracker_yaml = "./tracking/trackers/cfg/botsort.yaml", + line_thickness=3, # bounding box thickness (pixels) + hide_labels=False, # hide labels + ): + + ## load a custom model + model = YOLOv10(weights) -'''=============== used in pipeline.py ==================''' + custom = {"conf": 0.25, "batch": 1, "save": False, "mode": "predict"} + kwargs = {"save": True, "imgsz": 640, "conf": 0.1} + args = {**model.overrides, **custom, **kwargs} + predictor = model.task_map[model.task]["predictor"](overrides=args, _callbacks=model.callbacks) + + vid_path, vid_writer = None, None + tracker = init_trackers(tracker_yaml)[0] + yoloResnetTracker = [] + for i, result in enumerate(predictor.stream_inference(source)): + datamode = predictor.dataset.mode + + det = result.boxes.data.cpu().numpy() + im0 = result.orig_img + names = result.names + path = result.path + im_array = result.plot() + + + ## to do tracker.update() + det_tracking = Boxes(det, im0.shape) + tracks, outfeats = tracker.update(det_tracking, im0) + + + + if datamode == "video": + frameId = predictor.dataset.frame + elif datamode == "image": + frameId = predictor.dataset.count + annotator = Annotator(im0.copy(), line_width=line_thickness, example=str(names)) + + simdict, simdict1 = {}, {} + for fid, bid, mfeat, cfeat, features in outfeats: + if mfeat is not None and cfeat is not None: + simi = 1 - np.maximum(0.0, cdist(mfeat[None, :], cfeat[None, :], "cosine"))[0][0] + simdict.update({f"{int(frameId)}_{int(bid)}":simi}) + + if cfeat is not None and len(features)>=2: + mfeat = features[-2] + simi = 1 - np.maximum(0.0, cdist(mfeat[None, :], cfeat[None, :], "cosine"))[0][0] + simdict1.update({f"{int(frameId)}_{int(bid)}":simi}) + + + if len(tracks) > 0: + tracks[:, 7] = frameId + # trackerBoxes = np.concatenate([trackerBoxes, tracks], axis=0) + '''================== 1. 存储 dets/subimgs/features Dict =============''' + imgs, features = ReIDEncoder.inference(im0, tracks) + imgdict, featdict = {}, {} + for ii, bid in enumerate(tracks[:, 8]): + featdict.update({f"{int(frameId)}_{int(bid)}": features[ii, :]}) # [f"feat_{int(bid)}"] = features[i, :] + imgdict.update({f"{int(frameId)}_{int(bid)}": imgs[ii]}) + + frameDict = {"path": path, + "fid": int(frameId), + "bboxes": det, + "tboxes": tracks, + "imgs": imgdict, + "feats": featdict, + "featsimi": simdict, # 当前 box 特征和该轨迹 smooth_feat 特征的相似度 + "featsimi1": simdict1 # 当前 box 特征和该轨迹前一个 box 特征的相似度 + } + yoloResnetTracker.append(frameDict) + + # imgs, features = inference_image(im0, tracks) + # TrackerFeats = np.concatenate([TrackerFeats, features], axis=0) + + '''================== 2. 提取手势位置 ===================''' + for *xyxy, id, conf, cls, fid, bid in reversed(tracks): + name = ('' if id==-1 else f'id:{int(id)} ') + names[int(cls)] + if f"{int(frameId)}_{int(bid)}" in simdict.keys(): + sim = simdict[f"{int(frameId)}_{int(bid)}"] + label = f"{name} {sim:.2f}" + else: + label = None if hide_labels else name + + + # label = None if hide_labels else (name if hide_conf else f'{name} {conf:.1f}') + + if id >=0 and cls==0: + color = colors(int(cls), True) + elif id >=0 and cls!=0: + color = colors(int(id), True) + else: + color = colors(19, True) # 19为调色板的最后一个元素 + annotator.box_label(xyxy, label, color=color) + + '''====== Save results (image and video) ======''' + # save_path = str(save_dir / Path(path).name) # 带有后缀名 + im0 = annotator.result() + if is_save_img: + save_path_img = str(save_dir / Path(path).stem) + if datamode == 'image': + imgpath = save_path_img + ".png" + if datamode == 'video' : + imgpath = save_path_img + f"_{frameId}.png" + cv2.imwrite(Path(imgpath), im0) + + # if dataset.mode == 'video' and is_save_video: + + if is_save_video: + if datamode == 'video': + video_path = str(save_dir / Path(path).stem) + '.mp4' # 带有后缀名 + else: + videoname = str(Path(path).stem).split('_')[0] + '.mp4' + video_path = str(save_dir / videoname) + + if vid_path != video_path: # new video + vid_path = video_path + vid_cap = predictor.dataset.cap + + if isinstance(vid_writer, cv2.VideoWriter): + vid_writer.release() # release previous video writer + if vid_cap: # video + fps = vid_cap.get(cv2.CAP_PROP_FPS) + w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH)) + h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) + else: # stream + fps, w, h = 25, im0.shape[1], im0.shape[0] + ## for image rotating in dataloader.LoadImages.__next__() + w, h = im0.shape[1], im0.shape[0] + + video_path = str(Path(video_path).with_suffix('.mp4')) # force *.mp4 suffix on results videos + vid_writer = cv2.VideoWriter(video_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h)) + vid_writer.write(im0) + + return yoloResnetTracker + + +'''=============== used in pipeline.py for Yolov5 ==================''' @smart_inference_mode() def yolo_resnet_tracker( weights=ROOT / 'yolov5s.pt', # model path or triton URL @@ -660,8 +809,6 @@ def run( def parse_opt(): modelpath = ROOT / 'ckpts/best_cls10_0906.pt' # 'ckpts/best_15000_0908.pt', 'ckpts/yolov5s.pt', 'ckpts/best_20000_cls30.pt, best_yolov5m_250000' - - '''datapath为视频文件目录或视频文件''' datapath = r"D:/datasets/ym/videos/标记视频/" # ROOT/'data/videos', ROOT/'data/images' images # datapath = r"D:\datasets\ym\highvalue\videos" @@ -714,7 +861,7 @@ def find_video_imgs(root_dir): -def main(): +def main_v5(): ''' run(): 单张图像或单个视频文件的推理,不支持图像序列, ''' @@ -733,10 +880,10 @@ def main(): # p = r"D:\exhibition\images\153112511_0_seek_105.mp4" # p = r"D:\exhibition\images\image" - p = r"D:\全实时\202502\tracker\1_1740891284792.mp4" - optdict["project"] = r"D:\全实时\202502\tracker" - - # optdict["project"] = r"D:\exhibition\result" + p = r"D:\datasets\ym\后台数据\unzip\20250310-175352-741" + optdict["project"] = r"D:\work\result" + + optdict["weights"] = ROOT / 'ckpts/best_cls10_0906.pt' if os.path.isdir(p): files = find_video_imgs(p) k = 0 @@ -745,17 +892,39 @@ def main(): run(**optdict) k += 1 - if k == 1: + if k == 2: break elif os.path.isfile(p): optdict["source"] = p run(**optdict) +def main_v10(): + datapath = r'D:\datasets\ym\后台数据\unzip\20250310-175352-741\0.mp4' + savepath = r'D:\work\result' + savepath = savepath / Path(str(Path(datapath).stem)) + if not savepath.exists(): + savepath.mkdir(parents=True, exist_ok=True) + + weightpath = ROOT / 'ckpts/best_v10s_width0375_1205.pt' + + optdict = {} + optdict["weights"] = weightpath + optdict["source"] = datapath + optdict["save_dir"] = savepath + optdict["is_save_img"] = True + optdict["is_save_video"] = True + + yrtOut = yolov10_resnet_tracker(**optdict) + + if __name__ == '__main__': - main() + # main_v5() + + + main_v10() diff --git a/tracking/dotrack/__pycache__/dotracks.cpython-39.pyc b/tracking/dotrack/__pycache__/dotracks.cpython-39.pyc index ce553675900457d6277aa93ad2d305a7ce6d22fe..31724ced010678abbd8ab1aed39431c9b2f58c07 100644 GIT binary patch delta 22 ccmccJ$#}bykvox>mx}=i7VAIU$bG>D08bzW-T(jq delta 22 ccmccJ$#}bykvox>mx}=i7CE+Uc5|CMunsaM&FsnTyXXNLkdgeJN7L^p)Pu?IRDyk2ZDKY>Nh9JTSL>Ot{@I}R^iDrVNvOz>1h{y*K1(OemNAg+#nYUQ;N^=V;i)<&WNhkor!(Aeq K(R%V`31( z<{xZ&Qo9A+#XP#Wjdz;a9 zb0D8CBPRzV6mD+kU(Lv9wb@vZm6lV^&^+eZO8w|Fy)!!z=8 zQa$sW6N^fUY=B}#8X#5dMdBc~1c=ZD5qcm(A4C{{2*b_aMRqfCXM)(WAR=q>Ua>3` zzLEGe(KL`$28hT85jh|tck*5FNIg>^^A>AfX>LJfkrjw7R-Bw!P^nj3kXVwLn4_1H aSzMBsmz-L}1eA=LoFb9UXfgSMgfjsCyHpbZ diff --git a/tracking/dotrack/__pycache__/track_front.cpython-39.pyc b/tracking/dotrack/__pycache__/track_front.cpython-39.pyc index 8baa7a1090cfb1a467026ed7b62fd204f03c0516..523c566028a65ee1b8e0cb7a73946396dbe17a38 100644 GIT binary patch delta 20 acmaE^^jwKMk(ZZ?0SFfBKitS2BM1OI{sn&k delta 20 acmaE^^jwKMk(ZZ?0SGq7%WUM15d;7_wFLSA diff --git a/tracking/trackers/__pycache__/bot_sort.cpython-39.pyc b/tracking/trackers/__pycache__/bot_sort.cpython-39.pyc index b02abbde4e06511db66045f83d534ef82372b030..7dd0e3c23ab475569f4110bcb57298a6bc2ecbc6 100644 GIT binary patch delta 405 zcmZ2#@zFTv6hwqg_7tsUoH+To z=pn|K$z5U^RXq delta 432 zcmexszSM#*k(ZZ?0SLq;+S2FCZ{*v?&uBFHD8HgW4PzEdkzNUF30pH`4MRNp3 z%j6jXiy7-T+X$vHGImd1EL0>O4AiH|Qe+CGib6m{D2ND~d{9zcz8A!_01}#PMV26T z0EqAf5&l48a*l8!WAo;7!tRXRNHUWZL~9xQC(jc-#27m{QB1=s4x}L-L?EdGaT7p< z0*FWi5$Zrfle0)1#7+Vc$&(L@sf(oonNfVHxk>RUnZ+f=@wth`*+nUn#N``emlJb!X m0x63D5osVI14MufEGnFQKuVX9fATx2W2{AfK#|G2q@4lnGH3As diff --git a/tracking/trackers/__pycache__/byte_tracker.cpython-39.pyc b/tracking/trackers/__pycache__/byte_tracker.cpython-39.pyc index 18b2f8f7290258d366b3dbc9f5b1d7db752f2c2a..33d3dddfde50f84e6a8ba1f20b1e8131351c0e7a 100644 GIT binary patch delta 20 acmZ2dx}=mlk(ZZ?0SFfBKitSY(+U7TTLuLH delta 20 acmZ2dx}=mlk(ZZ?0SMR`+ct8~v;qJ)JOtnX diff --git a/tracking/trackers/bot_sort.py b/tracking/trackers/bot_sort.py index ed0c96d..fb9ddba 100644 --- a/tracking/trackers/bot_sort.py +++ b/tracking/trackers/bot_sort.py @@ -116,11 +116,13 @@ class BOTSORT(BYTETracker): self.proximity_thresh = args.proximity_thresh self.appearance_thresh = args.appearance_thresh - if args.with_reid: - # Haven't supported BoT-SORT(reid) yet - # self.encoder = ReIDInterface(config) + # if args.with_reid: + # # Haven't supported BoT-SORT(reid) yet + # # self.encoder = ReIDInterface(config) - self.encoder = FeatsInterface(conf) + # self.encoder = FeatsInterface(conf) + + # print('load model {} in BOTSORT'.format(conf.testbackbone)) # self.gmc = GMC(method=args.gmc_method) # commented by WQG diff --git a/tracking/tracking_pipeline.py b/tracking/tracking_pipeline.py new file mode 100644 index 0000000..4e2b9d7 --- /dev/null +++ b/tracking/tracking_pipeline.py @@ -0,0 +1,180 @@ +# -*- coding: utf-8 -*- +""" +Created on Thu Mar 27 16:09:07 2025 + +@author: ym +""" +import os +import sys +import cv2 +import pickle +import numpy as np +from pathlib import Path +from scipy.spatial.distance import cdist + +from .dotrack.dotracks_back import doBackTracks +from .dotrack.dotracks_front import doFrontTracks +from .utils.drawtracks import plot_frameID_y2, draw_all_trajectories +from .utils.read_data import read_similar + + + + +class CameraEvent_: + def __init__(self): + self.cameraType = '', # "front", "back" + self.videoPath = '', + self.imagePaths = [], + self.yoloResnetTracker =[], + self.tracking = None, + +class ShoppingEvent_: + def __init__(self): + self.eventPath = '' + self.eventName = '' + self.barcode = '' + self.eventType = '', # "input", "output", "other" + self.frontCamera = None + self.backCamera = None + self.one2n = [] + + + + + +def main(): + ''' + 将一个对象读取,修改其中一个属性 + + ''' + + + evt_pkfile = 'path.pickle' + with open(evt_pkfile, 'rb') as f: + ShoppingDict = pickle.load(f) + + savepath = "" + + back_camera = ShoppingDict["backCamera"]["cameraType"] + back_yrt = ShoppingDict["backCamera"]["yoloResnetTracker"] + front_camera = ShoppingDict["frontCamera"]["cameraType"] + front_yrt = ShoppingDict["frontCamera"]["yoloResnetTracker"] + yrts = [(back_camera, back_yrt), (front_camera, front_yrt)] + + + shopping_event = ShoppingEvent_() + shopping_event.eventPath = ShoppingDict["eventPath"] + shopping_event.eventName = ShoppingDict["eventName"] + shopping_event.barcode = ShoppingDict["barcode"] + + yrtDict = {} + event_tracks = [] + for camera_type, yrtOut in yrts: + ''' + inputs: + yrtOut + camera_type + outputs: + CameraEvent + ''' + + camera_event = CameraEvent_() + + + + '''================= 4. tracking =================''' + '''(1) 生成用于 tracking 模块的 boxes、feats''' + bboxes = np.empty((0, 6), dtype=np.float64) + trackerboxes = np.empty((0, 9), dtype=np.float64) + trackefeats = {} + for frameDict in yrtOut: + tboxes = frameDict["tboxes"] + ffeats = frameDict["feats"] + + boxes = frameDict["bboxes"] + bboxes = np.concatenate((bboxes, np.array(boxes)), axis=0) + trackerboxes = np.concatenate((trackerboxes, np.array(tboxes)), axis=0) + for i in range(len(tboxes)): + fid, bid = int(tboxes[i, 7]), int(tboxes[i, 8]) + trackefeats.update({f"{fid}_{bid}": ffeats[f"{fid}_{bid}"]}) + + + '''(2) tracking, 后摄''' + if CameraEvent["cameraType"] == "back": + vts = doBackTracks(trackerboxes, trackefeats) + vts.classify() + event_tracks.append(("back", vts)) + + + + camera_event.camera_type = camera_type + camera_event.yoloResnetTracker = yrtOut + camera_event.tracking = vts + camera_event.videoPath = ShoppingDict["backCamera"]["videoPath"] + camera_event.imagePaths = ShoppingDict["backCamera"]["imagePaths"] + shopping_event.backCamera = camera_event + + yrtDict["backyrt"] = yrtOut + + '''(2) tracking, 前摄''' + if CameraEvent["cameraType"] == "front": + vts = doFrontTracks(trackerboxes, trackefeats) + vts.classify() + event_tracks.append(("front", vts)) + + camera_event.camera_type = camera_type + camera_event.yoloResnetTracker = yrtOut + camera_event.tracking = vts + camera_event.videoPath = ShoppingDict["frontCamera"]["videoPath"] + camera_event.imagePaths = ShoppingDict["frontCamera"]["imagePaths"] + shopping_event.backCamera = camera_event + + yrtDict["frontyrt"] = yrtOut + + + name = Path(evt_pkfile).stem + pf_path = os.path.join(savepath, name+"_new.pickle") + with open(str(pf_path), 'wb') as f: + pickle.dump(shopping_event, f) + + + + illus = [None, None] + for CamerType, vts in event_tracks: + if len(vts.tracks)==0: continue + + if CamerType == 'front': + edgeline = cv2.imread("./tracking/shopcart/cart_tempt/board_ftmp_line.png") + + h, w = edgeline.shape[:2] + # nh, nw = h//2, w//2 + # edgeline = cv2.resize(edgeline, (nw, nh), interpolation=cv2.INTER_AREA) + + img_tracking = draw_all_trajectories(vts, edgeline, savepath_pipeline, CamerType, draw5p=True) + illus[0] = img_tracking + + plt = plot_frameID_y2(vts) + plt.savefig(os.path.join(savepath_pipeline, "front_y2.png")) + + if CamerType == 'back': + edgeline = cv2.imread("./tracking/shopcart/cart_tempt/edgeline.png") + + h, w = edgeline.shape[:2] + # nh, nw = h//2, w//2 + # edgeline = cv2.resize(edgeline, (nw, nh), interpolation=cv2.INTER_AREA) + + img_tracking = draw_all_trajectories(vts, edgeline, savepath_pipeline, CamerType, draw5p=True) + illus[1] = img_tracking + + + + + + + + + + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/tracking/utils/__pycache__/annotator.cpython-39.pyc b/tracking/utils/__pycache__/annotator.cpython-39.pyc index b5eb1c9719ff603aae928218f46e120f82a8703f..182245b994fe5a0a778c57d457438d49a51bb499 100644 GIT binary patch delta 264 zcmbOwFiU_Zk(ZZ?0SFfBKTO}qvzeJOYVrZ*bTMwA&@HCCq+5)cw^(x$lTve5G89Qp zwq@y51}R_3{IUT^70JQ~pum3+TOLeoe#uhHD6R}--{Q#0%u5ZaEJ)SlF5&@6@=mU0 z)7Inz$*~udmV~5Ml-y$X@$_?zcXR}*5d^6b0ud4*f_w5sHVpwu5ED#Dfmi~QIoX4_ rLGCD$0TFVO)7Z77q`^`k0-+qDXYwj`HAeNx7ujWzOoB@NW0wH{Ac{GC delta 264 zcmbOwFiU_Zk(ZZ?0SKNLtxns>vzeLk*5m`s>0&%Up<7IONw*j?Z?WbiCZ*=AWGIrF zY|GNA%ng)U$^5bbNEOM!2%x}!5L*FEY<|g7%P6h_WZ&Y*$;?X)sVqp<|jdyeesu2RI5e5;GAcAM|MK%oqDG(D(NP}2{lR4Rg sxIykHk_8d+lhfF>q-4NSAOfKrqG$3db~Q$g$rssWkxYV0{bQE_0JFI|D*ylh diff --git a/tracking/utils/__pycache__/drawtracks.cpython-39.pyc b/tracking/utils/__pycache__/drawtracks.cpython-39.pyc index bccf31be23888fa3c4d47df21246c99b74edfc25..90325be1f2a417f74204b8f4ceb8a2a6f9f86c23 100644 GIT binary patch delta 2027 zcmaJ?O-x)>6z1`GZ-9YXOQ8ZSzt&`k)(!($pjx!1BKFUg#5loBE;A2!@aDbgd5`ii z(NWu)SWQ~JabaTI7-Oqxq7oA|u3Wk3!j+5b%8ikQ>8=Y`p6?EW(?)!YFXw#sJLjHz z-?{hRIdSJiQna=<*YWr1g``y;3 zn9tfp5ihs}B_Bvk997m5hUs}8QHHCmh(vycW{}_oz)8T97;WI7kPgAukdapsB_p?S zH&HMw0fo#powbpPo|E4;o$ErZO1Q-}awA8XVMM82G^>^U&7EBJWb>h}SKym&aZ#D2 zXz8B(mzt-Ivb^2==eRDfahAmL2}eXJlhT+4sR;44QE*2XK87(W|4Ke$jLE$%Lys;q zp)5fp)@cfMW4JnB@`5O&Q_!9T*AOlRYZS`mmY#!aAgl$RFr=4Y*Nd25hqQJRZnZpS zT;2GsZwMf^&S|t2M zhx88Ht{*v6QjPh*zZiHI9eN*6B16}fVwIn%@s*lsEem$+sIL)xmP#7i3#>VcB?uPO_O528;rrzHQfaR{lhF27@ zm;k?-sj?|^a{vOrJ;5yyfUO+#WnEpl4!F%AP)D;w%eMoWfUr`?R)|(> z1+1E^71>sf&D+(uszR?wFUHuXtU|P2a#HnlvnGd-Q{B%D>NYuy=$f+u7p>pU3Apt? z>z+IH6}X*r9a;y4p0=N@O&e|>V2t6OvgYOK{ayV6V|r!Xu&&jp^w7q}JNsvh!T+83 z2+zj_-aT;0n3M;5CiW~N)g=aHt#Usfp&5Ch=i%~c(APopWmcd!;C=}307V96=UvKs z4&8Td?1g}eB2s2?ZArE80azv{AB8#gl5TiBAioswM9Omgf)mpR$Y&xSw|}HMow(!) zSMKfIyAS76nXkGfmCOglQs6s2=TY)lZ-?b$OlEtB^r21-R4&KYn9bVxzzZm@ybm+X z(073E0Y5OP1d8HSsU!@~@pDDjuW>|Nsk`8Yksi{ksCd;eMB!|!ax0SPLHPQFW)q&6 zcf8zTwPmEM#&FfhRE?3UFT!9t?(oKY7}C65XGCNyig6;4`p5yq~u$Dy|zAg zBtP%FV36$WzcROo#p2hhgkR7Pf!c)!Aaec192dOx@F|;|aMg=a4V8Nk`=^?O?+1MG z1@xaUjw}D}?>1c7F>un>%~O<118Hjtq4JY~ms2^mNZ)l=Vch@_z-3U4xtuSGPA*56 NWZxj}o$I6c(BSBmvXBU%!}O+JfQaJ;t~6JFPDZJJU-> zP_XihKNX*vVcHs&X^iWO=;8Mz{zL4XCon}8VsEGC>h8i6h$!*9gP%F^!RcwW)2 zfWfm(i#j6F>->kNce>#l#;wv8S-u&no=>Sm^tNOUGA!$#{Wz{r;PLVmf`1f!Vqc( zk+Ds)kQ3gmp3ON}yjP?TRK1)vDnYl0L21_8r> z2>~fZOvvwTcI6dV&=C3n@CHEsL^%X&P~{L{9KuA|^)C5_Z!)bw&PItohE*fOSv5&^ zShpPCq;iLNx*})QcuLrcJWtat>t4wQA|Mh!Zvj>9BD-9jZY(&D8C*(1KB68D(lP zI?DKBM^l>|a5cze1VKP!Fvhsu@r)v7yyLcVclU5-PpqtbV`+p4d84Q4ZM%Dcu41oO@^m#)=r2$y8NFRWpNIG&wJ|I>j?Yq=6 z$b@xKYk3i4?_uN@G31WqW|X6$miM>IW~z}s6|{(Pg}>T!dPt7Ri}0>H8&I+N4iP|% z{?|Rr=f8yQAYP~Bfg#fl2x(7;+7B2P5G9rC%lyKz?g899)vfkGx>?7POJD8YI(AkW z`rpJ8GBJOAd_e|h z%C=$g_P+L`IG>@qW0hGl=a$N@V>%)Z7~~}tck=QfV2AJk_?EMp6$S4pXAp50MBD=rUqHkb m5b*#+d<7A8Afg^be4A`#9K-l`@^s@PY`;MQeZ{G-;Z?P@B{k)1*yLOq%UOliIhw*`~4cpMwOpO*h#e-`u|W z&&+>69!)-)bcM3Bv(m)pW^MAe>x&GJ?@2}6wet`RFQ#K)5DxbR%MnwoyXW-vp)A{=BmZOdSC;Ux-YMJ1|b7h z0DhoJK+WMTh+FA_+oNnk_#8cU7Y(!{?7)m%{ygF`pd9E0wgWu^mNH~S^n}4*K+iT{ zC$I~s0&J0p3V=RfH;w1(Y`{$BA6qOoUT}@=qhv)stu3THDcdI&sF_c6ep^_U2c4?R z*0l%eg7kM`^-8H}UZ>^Q8VLD!p zSc;w)(j(g3w%DW%SOydWo9Iz-J?o*`l>;nlo?q!&oH<$1LBE$|7qp7GmXm9vkWZPw0Yl0>UDl#(MAJv#m2KWUiV)tBF0wMTLt4!EleJn7V`5ZOpR#Xk3=>k0 zQHs7c+E!k^{E!H(r91Sf-mVWCwkNjJOnHBi^ppd=!7QP)KRm|u*f5`_kWZxm$GN0s&LHA+_AJ}S+hqE)^#uH4Gd4K13O zdv1s?v3p}igHIgv09sjg_65hl#3{gj8AQhfEayROVm!{nd|I@E8Hd6lgL120V&^EC z?=eqTHL&6ZPl09gXW%EuQY)E{SamNuDPJX8y}9;()pMsvp}K-j*to|S;b-VU^_tRo z1Et90XR+fnmDCKgi{?ztSBtN?dtGOWG4_;9h!s{=LOfz5#C2n0fnFai_xb}V`s8m% zx>`D64K|#=Y;_rjDV|v2q<52R3T^um>Au&k;1-3#hu*{Ns;Si$r!OjV|7G1j>b4w( zN>0G5z$EadfW@lqiilf2oW3CtA2r8TB;c~BTi)Y1iKM8YZHJ>Xs767W87nSR^2dO3a!{0d;79nlP}@?Bup z%};$lF-6ucKb>ocvs3iA;goU%Q*P4PM!#^&Y~y}G)!!#P^SWQHJ0S)SXI1s;Q7y1K znoxu3T`%rb-TX8bNtGT%Qu68S z107`-F*qxrI;>2%t{;_@FI;#l;AMBr?*sL0y)5fX`H+}zIq@GPtaCn$mg7KNkMR*? zllIt)=nnZ?O4&EGyCuNxn$*IWavc4gbg8wO-J>5`RW@|7t$EjH=*g{sFS5+I|24 delta 1977 zcmaKtTTC2P9L70inJugoVJT3`mKIpbPzw~fEDN+$+Y*Xm6?&T*VarZo?6QMrr$w-} z7f>5aTiU+Ma-6)&r^?___-m_}yK5hW;Q;FWYQr&=M-(NJ)0l8H`&E?ym%Ybs=8DJ;SE#N4JteBCo_ztvm z0=s}`feOGi2~_|*2lUcdzQGRI^ZAoY&6)+XY(LEh^Qoec@}%uLF+k0H=<{Y_Ssr|< zKG#l zQ2%5Z&&1enn%!7P!9WwMrRM`RL7ah;$@NGyX2kUb_oEX=y6fdYh3WG^X_4GL6sjqM zZeO{n-Slgqk8!(o&1dOh$7{y*dhzcl5WPK420r85BgQ#7k=PJ7d#wn!`poU|C{NDH zd~k=)z-l9aDQ!-dwJEiDtfTMBIyw$RB0DlgPKJI|k6VATcE@9lkLv0p?v9P3LrNK? zXbaO*<>f1164F`TYm6A%j6uuI#7>$j?=O;>@}O1A5>ET0N4OCm;$!qz`8RCR{;FbC z>OTM5aaU!%UphwO9c8a>3=LbtK4NvQAd^0=Y-Go1zViAB8U7Tn@|>~dPKIIWBZ&vc z4f7>#YusvT5QXY^%5rBBQ?I)$Fz!}|D`$)o(Fc+xVCMk5yGRvlnxX+A$- zPggau;-sU%u*Dkq6gg@ot0Pw3$6k}CGE&`9`@inFS4^S$G%sSj%Npj>bhmn4>0*FV z?D03T;z=smFvMo;sSRH*-R7Q+ohiZCUos{}I9UmCh_MJatnnnjb+oc36i)FczdJJ3 zG70Oj;NoVhD>y>&FbZ$fZ1E*+BHgL+E7(Qh@LtUXyKL*V#pz3~x&MmpAN5;Lz$L!` zj03L#7X=(v?cRvk<%#r7;W)=l<3{@}p)?=Yts^|%X`0alC%k)GpwGQMCxm=>E2RWT z*6D^lEv-3?%R&(-ahE35hzVth??Sx;ya&7wT%z9|U%L`t2L1uy)(&-=)-+sTSL|yI zKQTq#T@7@;$z-q7{iZX@EV_I|XPZMJEpyEW1=Y}Ak(pB=wSG#xoXD!`Ge-3A+L45+ zrFT8OTlMoZ7$jW=prrK&cii1j8|$D|E%m->ygCP*r#&q}c9M>@XzVKe*rKx=)E?em zHiMTp1XPcciPZJuCgsTkKMvQhIs5x?Bdfz^tIK&rd^leGfx3b&xL%Yf%7dN-=xrWwUpa@tG$Qcem4FJOc a2jmoIp``iCP*(ty!RLTmz-_v)dH5ettkuH+ diff --git a/ultralytics/__init__.py b/ultralytics/__init__.py index a0ae59f..8ff1b4f 100644 --- a/ultralytics/__init__.py +++ b/ultralytics/__init__.py @@ -1,12 +1,27 @@ # Ultralytics YOLO 🚀, AGPL-3.0 license -__version__ = '8.0.173' +__version__ = "8.1.34" -from ultralytics.models import RTDETR, SAM, YOLO +from ultralytics.data.explorer.explorer import Explorer +from ultralytics.models import RTDETR, SAM, YOLO, YOLOWorld, YOLOv10 from ultralytics.models.fastsam import FastSAM from ultralytics.models.nas import NAS -from ultralytics.utils import SETTINGS as settings +from ultralytics.utils import ASSETS, SETTINGS as settings from ultralytics.utils.checks import check_yolo as checks from ultralytics.utils.downloads import download -__all__ = '__version__', 'YOLO', 'NAS', 'SAM', 'FastSAM', 'RTDETR', 'checks', 'download', 'settings' +__all__ = ( + "__version__", + "ASSETS", + "YOLO", + "YOLOWorld", + "NAS", + "SAM", + "FastSAM", + "RTDETR", + "checks", + "download", + "settings", + "Explorer", + "YOLOv10" +) diff --git a/ultralytics/__pycache__/__init__.cpython-312.pyc b/ultralytics/__pycache__/__init__.cpython-312.pyc index cc440bfbfa44694c91de2b927e7513b3fce6a136..aebdf610dc1e6c9d118b10439197a4f39443d364 100644 GIT binary patch literal 772 zcmZuuO^ee&7@mBmNz-mucUN4zco2e+Y*Fw75els$XcsmGaV|q>#%^#XsgrcqdeWQ! zz~A6+@#4kHKv_^ccq{D9lQUyi3-yIOd0yV<{hZH1-~nM@(6`A49f0rBxiH}?JFms> zBgg;*nU?89qdL?z$qiyq6Pnb57PX;G9q3ROx+-Rp2KAt)xJ7*0giXb563`ZG(Kg&t zzC${68(vb}C70HFst&iK#2Kb5N1noB*^PAOF~0}DS+FmQpH-*aVuQcc`>P& zAqB30VYtWIGIo7NYL=3DosX+1OY1bktM&d8mBqK$g|y5tsZ^^!o&QZEtMjXy;^?Kg zK$TK8B;WmyphzpV3-7E4XLT+*sg;{@`i`uBWkV6I77nClOKSEvH5Uh6-NivDB?O^Q z;9MktU)}h)q&Pl(J&7}nrvlv>Zvt&BLY+Be~x71LiOk8@09$r@xy$HPS5W!u9;Y zcw|{#iUoKw7l{kvA~K1Ped^jn-{YT)6yFVk!Km*y^rEM6r+7{RuS+Ju!1fI~c3k2o z@BU*_;Tv3oDEx*iP!4m#5>j|@HZ+A*q;kxJ9bw%_!ez_q5@K1;V9sv9k6Ae#Q@3(? z)~`5j;MxHzvWKiad=fUQB{l-t_RCCWB7|^;K)!UVeFF0xm@zP7P>#WN4EADRex#i{ I*;{0ze>z=hI{*Lx diff --git a/ultralytics/__pycache__/__init__.cpython-39.pyc b/ultralytics/__pycache__/__init__.cpython-39.pyc index 2dffc61c6b8b47b3ef1edd218d623dd169c41346..a34898bc5a8222ffec7edc47234e39ef1b1e518d 100644 GIT binary patch literal 736 zcmZut%Wl&^6!qJVW2b3K%X7uf8;c4S2%(Bd2?z;NskVd|MH-E4Q>@O|)y$-j@&o-9 zercAl;ulykcL)$EjOF7qzURIUNz>TU=kAwp=QoaJeTU2T(f_O$`V~?`Z6T|+Ylqs* zL5>aFr7rW3$9&|o00k^WA&XFCVjdl^7{!MBbjT8v7#`4+jnIhgp|QD#be~Pof#DIo z#SYP-<^yz8$Ay&*uY;%gasK$p4?TgE*(#cUzN9^Gc;>6%tenluGvyZ3x5|5e`sP#( z0N?eT)+z*gb^J&tg)fPaTBNDFm=>9%f@x9A%R)sO%h!ung$~8l$ELc#>z?+|sO~>? zv?q0zsv*W#4Hs?SVa&lY?gIn>0{{;o0&q2`z*G`Eg-awFDckN`aIlky8W!1YKGI+G za7862bS>LTkad~dNi{f=l$o?r|H!X#pRFr@ zp)2Rc77BGj&YU~<-g6&Ov*{ptrsgXCQV{wEWBaJmxRbvKCBz7$Scz4tFcqsz!y41E zuE3i{Ys|n##dT^j3tJVhQJXoqUU7pqSQGD6+@$-ggvMM}v{8l;(XT3i%+ZHthQHviyD!JPsd#JCoOB@|e$-L3^O zb%;6HjoB<=J(WpFL@6Pki0?rfz{=}ghrJ}34U&jwa^w&;xVQ+n)IvhiEG-D}X3NsL Uk<-szqG2JRR910 diff --git a/ultralytics/cfg/__init__.py b/ultralytics/cfg/__init__.py index 7bc48f2..c34cc17 100644 --- a/ultralytics/cfg/__init__.py +++ b/ultralytics/cfg/__init__.py @@ -1,34 +1,62 @@ # Ultralytics YOLO 🚀, AGPL-3.0 license import contextlib -import re +import os import shutil +import subprocess import sys from pathlib import Path from types import SimpleNamespace from typing import Dict, List, Union +import re -from ultralytics.utils import (ASSETS, DEFAULT_CFG, DEFAULT_CFG_DICT, DEFAULT_CFG_PATH, LOGGER, RANK, SETTINGS, - SETTINGS_YAML, IterableSimpleNamespace, __version__, checks, colorstr, deprecation_warn, - yaml_load, yaml_print) +from ultralytics.utils import ( + ASSETS, + DEFAULT_CFG, + DEFAULT_CFG_DICT, + DEFAULT_CFG_PATH, + LOGGER, + RANK, + ROOT, + RUNS_DIR, + SETTINGS, + SETTINGS_YAML, + TESTS_RUNNING, + IterableSimpleNamespace, + __version__, + checks, + colorstr, + deprecation_warn, + yaml_load, + yaml_print, +) # Define valid tasks and modes -MODES = 'train', 'val', 'predict', 'export', 'track', 'benchmark' -TASKS = 'detect', 'segment', 'classify', 'pose' -TASK2DATA = {'detect': 'coco8.yaml', 'segment': 'coco8-seg.yaml', 'classify': 'imagenet10', 'pose': 'coco8-pose.yaml'} +MODES = {"train", "val", "predict", "export", "track", "benchmark"} +TASKS = {"detect", "segment", "classify", "pose", "obb"} +TASK2DATA = { + "detect": "coco8.yaml", + "segment": "coco8-seg.yaml", + "classify": "imagenet10", + "pose": "coco8-pose.yaml", + "obb": "dota8.yaml", +} TASK2MODEL = { - 'detect': 'yolov8n.pt', - 'segment': 'yolov8n-seg.pt', - 'classify': 'yolov8n-cls.pt', - 'pose': 'yolov8n-pose.pt'} + "detect": "yolov8n.pt", + "segment": "yolov8n-seg.pt", + "classify": "yolov8n-cls.pt", + "pose": "yolov8n-pose.pt", + "obb": "yolov8n-obb.pt", +} TASK2METRIC = { - 'detect': 'metrics/mAP50-95(B)', - 'segment': 'metrics/mAP50-95(M)', - 'classify': 'metrics/accuracy_top1', - 'pose': 'metrics/mAP50-95(P)'} + "detect": "metrics/mAP50-95(B)", + "segment": "metrics/mAP50-95(M)", + "classify": "metrics/accuracy_top1", + "pose": "metrics/mAP50-95(P)", + "obb": "metrics/mAP50-95(B)", +} -CLI_HELP_MSG = \ - f""" +CLI_HELP_MSG = f""" Arguments received: {str(['yolo'] + sys.argv[1:])}. Ultralytics 'yolo' commands use the following syntax: yolo TASK MODE ARGS @@ -42,7 +70,7 @@ CLI_HELP_MSG = \ yolo train data=coco128.yaml model=yolov8n.pt epochs=10 lr0=0.01 2. Predict a YouTube video using a pretrained segmentation model at image size 320: - yolo predict model=yolov8n-seg.pt source='https://youtu.be/Zgi9g1ksQHc' imgsz=320 + yolo predict model=yolov8n-seg.pt source='https://youtu.be/LNwODJXcvt4' imgsz=320 3. Val a pretrained detection model at batch-size 1 and image size 640: yolo val model=yolov8n.pt data=coco128.yaml batch=1 imgsz=640 @@ -50,6 +78,9 @@ CLI_HELP_MSG = \ 4. Export a YOLOv8n classification model to ONNX format at image size 224 by 128 (no TASK required) yolo export model=yolov8n-cls.pt format=onnx imgsz=224,128 + 6. Explore your datasets using semantic search and SQL with a simple GUI powered by Ultralytics Explorer API + yolo explorer + 5. Run special commands: yolo help yolo checks @@ -64,16 +95,84 @@ CLI_HELP_MSG = \ """ # Define keys for arg type checks -CFG_FLOAT_KEYS = 'warmup_epochs', 'box', 'cls', 'dfl', 'degrees', 'shear' -CFG_FRACTION_KEYS = ('dropout', 'iou', 'lr0', 'lrf', 'momentum', 'weight_decay', 'warmup_momentum', 'warmup_bias_lr', - 'label_smoothing', 'hsv_h', 'hsv_s', 'hsv_v', 'translate', 'scale', 'perspective', 'flipud', - 'fliplr', 'mosaic', 'mixup', 'copy_paste', 'conf', 'iou', 'fraction') # fraction floats 0.0 - 1.0 -CFG_INT_KEYS = ('epochs', 'patience', 'batch', 'workers', 'seed', 'close_mosaic', 'mask_ratio', 'max_det', 'vid_stride', - 'line_width', 'workspace', 'nbs', 'save_period') -CFG_BOOL_KEYS = ('save', 'exist_ok', 'verbose', 'deterministic', 'single_cls', 'rect', 'cos_lr', 'overlap_mask', 'val', - 'save_json', 'save_hybrid', 'half', 'dnn', 'plots', 'show', 'save_txt', 'save_conf', 'save_crop', - 'show_labels', 'show_conf', 'visualize', 'augment', 'agnostic_nms', 'retina_masks', 'boxes', 'keras', - 'optimize', 'int8', 'dynamic', 'simplify', 'nms', 'profile') +CFG_FLOAT_KEYS = {"warmup_epochs", "box", "cls", "dfl", "degrees", "shear", "time"} +CFG_FRACTION_KEYS = { + "dropout", + "iou", + "lr0", + "lrf", + "momentum", + "weight_decay", + "warmup_momentum", + "warmup_bias_lr", + "label_smoothing", + "hsv_h", + "hsv_s", + "hsv_v", + "translate", + "scale", + "perspective", + "flipud", + "fliplr", + "bgr", + "mosaic", + "mixup", + "copy_paste", + "conf", + "iou", + "fraction", +} # fraction floats 0.0 - 1.0 +CFG_INT_KEYS = { + "epochs", + "patience", + "batch", + "workers", + "seed", + "close_mosaic", + "mask_ratio", + "max_det", + "vid_stride", + "line_width", + "workspace", + "nbs", + "save_period", +} +CFG_BOOL_KEYS = { + "save", + "exist_ok", + "verbose", + "deterministic", + "single_cls", + "rect", + "cos_lr", + "overlap_mask", + "val", + "save_json", + "save_hybrid", + "half", + "dnn", + "plots", + "show", + "save_txt", + "save_conf", + "save_crop", + "save_frames", + "show_labels", + "show_conf", + "visualize", + "augment", + "agnostic_nms", + "retina_masks", + "show_boxes", + "keras", + "optimize", + "int8", + "dynamic", + "simplify", + "nms", + "profile", + "multi_scale", +} def cfg2dict(cfg): @@ -109,53 +208,72 @@ def get_cfg(cfg: Union[str, Path, Dict, SimpleNamespace] = DEFAULT_CFG_DICT, ove # Merge overrides if overrides: overrides = cfg2dict(overrides) - if 'save_dir' not in cfg: - overrides.pop('save_dir', None) # special override keys to ignore + if "save_dir" not in cfg: + overrides.pop("save_dir", None) # special override keys to ignore check_dict_alignment(cfg, overrides) cfg = {**cfg, **overrides} # merge cfg and overrides dicts (prefer overrides) # Special handling for numeric project/name - for k in 'project', 'name': + for k in "project", "name": if k in cfg and isinstance(cfg[k], (int, float)): cfg[k] = str(cfg[k]) - if cfg.get('name') == 'model': # assign model to 'name' arg - cfg['name'] = cfg.get('model', '').split('.')[0] + if cfg.get("name") == "model": # assign model to 'name' arg + cfg["name"] = cfg.get("model", "").split(".")[0] LOGGER.warning(f"WARNING ⚠️ 'name=model' automatically updated to 'name={cfg['name']}'.") # Type and Value checks - for k, v in cfg.items(): - if v is not None: # None values may be from optional args - if k in CFG_FLOAT_KEYS and not isinstance(v, (int, float)): - raise TypeError(f"'{k}={v}' is of invalid type {type(v).__name__}. " - f"Valid '{k}' types are int (i.e. '{k}=0') or float (i.e. '{k}=0.5')") - elif k in CFG_FRACTION_KEYS: - if not isinstance(v, (int, float)): - raise TypeError(f"'{k}={v}' is of invalid type {type(v).__name__}. " - f"Valid '{k}' types are int (i.e. '{k}=0') or float (i.e. '{k}=0.5')") - if not (0.0 <= v <= 1.0): - raise ValueError(f"'{k}={v}' is an invalid value. " - f"Valid '{k}' values are between 0.0 and 1.0.") - elif k in CFG_INT_KEYS and not isinstance(v, int): - raise TypeError(f"'{k}={v}' is of invalid type {type(v).__name__}. " - f"'{k}' must be an int (i.e. '{k}=8')") - elif k in CFG_BOOL_KEYS and not isinstance(v, bool): - raise TypeError(f"'{k}={v}' is of invalid type {type(v).__name__}. " - f"'{k}' must be a bool (i.e. '{k}=True' or '{k}=False')") + check_cfg(cfg) # Return instance return IterableSimpleNamespace(**cfg) +def check_cfg(cfg, hard=True): + """Check Ultralytics configuration argument types and values.""" + for k, v in cfg.items(): + if v is not None: # None values may be from optional args + if k in CFG_FLOAT_KEYS and not isinstance(v, (int, float)): + if hard: + raise TypeError( + f"'{k}={v}' is of invalid type {type(v).__name__}. " + f"Valid '{k}' types are int (i.e. '{k}=0') or float (i.e. '{k}=0.5')" + ) + cfg[k] = float(v) + elif k in CFG_FRACTION_KEYS: + if not isinstance(v, (int, float)): + if hard: + raise TypeError( + f"'{k}={v}' is of invalid type {type(v).__name__}. " + f"Valid '{k}' types are int (i.e. '{k}=0') or float (i.e. '{k}=0.5')" + ) + cfg[k] = v = float(v) + if not (0.0 <= v <= 1.0): + raise ValueError(f"'{k}={v}' is an invalid value. " f"Valid '{k}' values are between 0.0 and 1.0.") + elif k in CFG_INT_KEYS and not isinstance(v, int): + if hard: + raise TypeError( + f"'{k}={v}' is of invalid type {type(v).__name__}. " f"'{k}' must be an int (i.e. '{k}=8')" + ) + cfg[k] = int(v) + elif k in CFG_BOOL_KEYS and not isinstance(v, bool): + if hard: + raise TypeError( + f"'{k}={v}' is of invalid type {type(v).__name__}. " + f"'{k}' must be a bool (i.e. '{k}=True' or '{k}=False')" + ) + cfg[k] = bool(v) + + def get_save_dir(args, name=None): """Return save_dir as created from train/val/predict arguments.""" - if getattr(args, 'save_dir', None): + if getattr(args, "save_dir", None): save_dir = args.save_dir else: from ultralytics.utils.files import increment_path - project = args.project or Path(SETTINGS['runs_dir']) / args.task - name = name or args.name or f'{args.mode}' + project = args.project or (ROOT.parent / "tests/tmp/runs" if TESTS_RUNNING else RUNS_DIR) / args.task + name = name or args.name or f"{args.mode}" save_dir = increment_path(Path(project) / name, exist_ok=args.exist_ok if RANK in (-1, 0) else True) return Path(save_dir) @@ -165,23 +283,26 @@ def _handle_deprecation(custom): """Hardcoded function to handle deprecated config keys.""" for key in custom.copy().keys(): - if key == 'hide_labels': - deprecation_warn(key, 'show_labels') - custom['show_labels'] = custom.pop('hide_labels') == 'False' - if key == 'hide_conf': - deprecation_warn(key, 'show_conf') - custom['show_conf'] = custom.pop('hide_conf') == 'False' - if key == 'line_thickness': - deprecation_warn(key, 'line_width') - custom['line_width'] = custom.pop('line_thickness') + if key == "boxes": + deprecation_warn(key, "show_boxes") + custom["show_boxes"] = custom.pop("boxes") + if key == "hide_labels": + deprecation_warn(key, "show_labels") + custom["show_labels"] = custom.pop("hide_labels") == "False" + if key == "hide_conf": + deprecation_warn(key, "show_conf") + custom["show_conf"] = custom.pop("hide_conf") == "False" + if key == "line_thickness": + deprecation_warn(key, "line_width") + custom["line_width"] = custom.pop("line_thickness") return custom def check_dict_alignment(base: Dict, custom: Dict, e=None): """ - This function checks for any mismatched keys between a custom configuration list and a base configuration list. - If any mismatched keys are found, the function prints out similar keys from the base list and exits the program. + This function checks for any mismatched keys between a custom configuration list and a base configuration list. If + any mismatched keys are found, the function prints out similar keys from the base list and exits the program. Args: custom (dict): a dictionary of custom configuration options @@ -194,36 +315,35 @@ def check_dict_alignment(base: Dict, custom: Dict, e=None): if mismatched: from difflib import get_close_matches - string = '' + string = "" for x in mismatched: matches = get_close_matches(x, base_keys) # key list - matches = [f'{k}={base[k]}' if base.get(k) is not None else k for k in matches] - match_str = f'Similar arguments are i.e. {matches}.' if matches else '' + matches = [f"{k}={base[k]}" if base.get(k) is not None else k for k in matches] + match_str = f"Similar arguments are i.e. {matches}." if matches else "" string += f"'{colorstr('red', 'bold', x)}' is not a valid YOLO argument. {match_str}\n" raise SyntaxError(string + CLI_HELP_MSG) from e def merge_equals_args(args: List[str]) -> List[str]: """ - Merges arguments around isolated '=' args in a list of strings. - The function considers cases where the first argument ends with '=' or the second starts with '=', - as well as when the middle one is an equals sign. + Merges arguments around isolated '=' args in a list of strings. The function considers cases where the first + argument ends with '=' or the second starts with '=', as well as when the middle one is an equals sign. Args: args (List[str]): A list of strings where each element is an argument. Returns: - List[str]: A list of strings where the arguments around isolated '=' are merged. + (List[str]): A list of strings where the arguments around isolated '=' are merged. """ new_args = [] for i, arg in enumerate(args): - if arg == '=' and 0 < i < len(args) - 1: # merge ['arg', '=', 'val'] - new_args[-1] += f'={args[i + 1]}' + if arg == "=" and 0 < i < len(args) - 1: # merge ['arg', '=', 'val'] + new_args[-1] += f"={args[i + 1]}" del args[i + 1] - elif arg.endswith('=') and i < len(args) - 1 and '=' not in args[i + 1]: # merge ['arg=', 'val'] - new_args.append(f'{arg}{args[i + 1]}') + elif arg.endswith("=") and i < len(args) - 1 and "=" not in args[i + 1]: # merge ['arg=', 'val'] + new_args.append(f"{arg}{args[i + 1]}") del args[i + 1] - elif arg.startswith('=') and i > 0: # merge ['arg', '=val'] + elif arg.startswith("=") and i > 0: # merge ['arg', '=val'] new_args[-1] += arg else: new_args.append(arg) @@ -247,11 +367,11 @@ def handle_yolo_hub(args: List[str]) -> None: """ from ultralytics import hub - if args[0] == 'login': - key = args[1] if len(args) > 1 else '' + if args[0] == "login": + key = args[1] if len(args) > 1 else "" # Log in to Ultralytics HUB using the provided API key hub.login(key) - elif args[0] == 'logout': + elif args[0] == "logout": # Log out from Ultralytics HUB hub.logout() @@ -271,39 +391,47 @@ def handle_yolo_settings(args: List[str]) -> None: python my_script.py yolo settings reset ``` """ - url = 'https://docs.ultralytics.com/quickstart/#ultralytics-settings' # help URL + url = "https://docs.ultralytics.com/quickstart/#ultralytics-settings" # help URL try: if any(args): - if args[0] == 'reset': + if args[0] == "reset": SETTINGS_YAML.unlink() # delete the settings file SETTINGS.reset() # create new settings - LOGGER.info('Settings reset successfully') # inform the user that settings have been reset + LOGGER.info("Settings reset successfully") # inform the user that settings have been reset else: # save a new setting new = dict(parse_key_value_pair(a) for a in args) check_dict_alignment(SETTINGS, new) SETTINGS.update(new) - LOGGER.info(f'💡 Learn about settings at {url}') + LOGGER.info(f"💡 Learn about settings at {url}") yaml_print(SETTINGS_YAML) # print the current settings except Exception as e: LOGGER.warning(f"WARNING ⚠️ settings error: '{e}'. Please see {url} for help.") +def handle_explorer(): + """Open the Ultralytics Explorer GUI.""" + checks.check_requirements("streamlit") + LOGGER.info("💡 Loading Explorer dashboard...") + subprocess.run(["streamlit", "run", ROOT / "data/explorer/gui/dash.py", "--server.maxMessageSize", "2048"]) + + def parse_key_value_pair(pair): """Parse one 'key=value' pair and return key and value.""" - re.sub(r' *= *', '=', pair) # remove spaces around equals sign - k, v = pair.split('=', 1) # split on first '=' sign + k, v = pair.split("=", 1) # split on first '=' sign + k, v = k.strip(), v.strip() # remove spaces assert v, f"missing '{k}' value" return k, smart_value(v) def smart_value(v): """Convert a string to an underlying type such as int, float, bool, etc.""" - if v.lower() == 'none': + v_lower = v.lower() + if v_lower == "none": return None - elif v.lower() == 'true': + elif v_lower == "true": return True - elif v.lower() == 'false': + elif v_lower == "false": return False else: with contextlib.suppress(Exception): @@ -311,7 +439,7 @@ def smart_value(v): return v -def entrypoint(debug=''): +def entrypoint(debug=""): """ This function is the ultralytics package entrypoint, it's responsible for parsing the command line arguments passed to the package. @@ -326,135 +454,160 @@ def entrypoint(debug=''): It uses the package's default cfg and initializes it using the passed overrides. Then it calls the CLI function with the composed cfg """ - args = (debug.split(' ') if debug else sys.argv)[1:] + args = (debug.split(" ") if debug else sys.argv)[1:] if not args: # no arguments passed LOGGER.info(CLI_HELP_MSG) return special = { - 'help': lambda: LOGGER.info(CLI_HELP_MSG), - 'checks': checks.check_yolo, - 'version': lambda: LOGGER.info(__version__), - 'settings': lambda: handle_yolo_settings(args[1:]), - 'cfg': lambda: yaml_print(DEFAULT_CFG_PATH), - 'hub': lambda: handle_yolo_hub(args[1:]), - 'login': lambda: handle_yolo_hub(args), - 'copy-cfg': copy_default_cfg} + "help": lambda: LOGGER.info(CLI_HELP_MSG), + "checks": checks.collect_system_info, + "version": lambda: LOGGER.info(__version__), + "settings": lambda: handle_yolo_settings(args[1:]), + "cfg": lambda: yaml_print(DEFAULT_CFG_PATH), + "hub": lambda: handle_yolo_hub(args[1:]), + "login": lambda: handle_yolo_hub(args), + "copy-cfg": copy_default_cfg, + "explorer": lambda: handle_explorer(), + } full_args_dict = {**DEFAULT_CFG_DICT, **{k: None for k in TASKS}, **{k: None for k in MODES}, **special} - # Define common mis-uses of special commands, i.e. -h, -help, --help + # Define common misuses of special commands, i.e. -h, -help, --help special.update({k[0]: v for k, v in special.items()}) # singular - special.update({k[:-1]: v for k, v in special.items() if len(k) > 1 and k.endswith('s')}) # singular - special = {**special, **{f'-{k}': v for k, v in special.items()}, **{f'--{k}': v for k, v in special.items()}} + special.update({k[:-1]: v for k, v in special.items() if len(k) > 1 and k.endswith("s")}) # singular + special = {**special, **{f"-{k}": v for k, v in special.items()}, **{f"--{k}": v for k, v in special.items()}} overrides = {} # basic overrides, i.e. imgsz=320 for a in merge_equals_args(args): # merge spaces around '=' sign - if a.startswith('--'): - LOGGER.warning(f"WARNING ⚠️ '{a}' does not require leading dashes '--', updating to '{a[2:]}'.") + if a.startswith("--"): + LOGGER.warning(f"WARNING ⚠️ argument '{a}' does not require leading dashes '--', updating to '{a[2:]}'.") a = a[2:] - if a.endswith(','): - LOGGER.warning(f"WARNING ⚠️ '{a}' does not require trailing comma ',', updating to '{a[:-1]}'.") + if a.endswith(","): + LOGGER.warning(f"WARNING ⚠️ argument '{a}' does not require trailing comma ',', updating to '{a[:-1]}'.") a = a[:-1] - if '=' in a: + if "=" in a: try: k, v = parse_key_value_pair(a) - if k == 'cfg': # custom.yaml passed - LOGGER.info(f'Overriding {DEFAULT_CFG_PATH} with {v}') - overrides = {k: val for k, val in yaml_load(checks.check_yaml(v)).items() if k != 'cfg'} + if k == "cfg" and v is not None: # custom.yaml passed + LOGGER.info(f"Overriding {DEFAULT_CFG_PATH} with {v}") + overrides = {k: val for k, val in yaml_load(checks.check_yaml(v)).items() if k != "cfg"} else: overrides[k] = v except (NameError, SyntaxError, ValueError, AssertionError) as e: - check_dict_alignment(full_args_dict, {a: ''}, e) + check_dict_alignment(full_args_dict, {a: ""}, e) elif a in TASKS: - overrides['task'] = a + overrides["task"] = a elif a in MODES: - overrides['mode'] = a + overrides["mode"] = a elif a.lower() in special: special[a.lower()]() return elif a in DEFAULT_CFG_DICT and isinstance(DEFAULT_CFG_DICT[a], bool): overrides[a] = True # auto-True for default bool args, i.e. 'yolo show' sets show=True elif a in DEFAULT_CFG_DICT: - raise SyntaxError(f"'{colorstr('red', 'bold', a)}' is a valid YOLO argument but is missing an '=' sign " - f"to set its value, i.e. try '{a}={DEFAULT_CFG_DICT[a]}'\n{CLI_HELP_MSG}") + raise SyntaxError( + f"'{colorstr('red', 'bold', a)}' is a valid YOLO argument but is missing an '=' sign " + f"to set its value, i.e. try '{a}={DEFAULT_CFG_DICT[a]}'\n{CLI_HELP_MSG}" + ) else: - check_dict_alignment(full_args_dict, {a: ''}) + check_dict_alignment(full_args_dict, {a: ""}) # Check keys check_dict_alignment(full_args_dict, overrides) # Mode - mode = overrides.get('mode') + mode = overrides.get("mode") if mode is None: - mode = DEFAULT_CFG.mode or 'predict' - LOGGER.warning(f"WARNING ⚠️ 'mode' is missing. Valid modes are {MODES}. Using default 'mode={mode}'.") + mode = DEFAULT_CFG.mode or "predict" + LOGGER.warning(f"WARNING ⚠️ 'mode' argument is missing. Valid modes are {MODES}. Using default 'mode={mode}'.") elif mode not in MODES: raise ValueError(f"Invalid 'mode={mode}'. Valid modes are {MODES}.\n{CLI_HELP_MSG}") # Task - task = overrides.pop('task', None) + task = overrides.pop("task", None) if task: if task not in TASKS: raise ValueError(f"Invalid 'task={task}'. Valid tasks are {TASKS}.\n{CLI_HELP_MSG}") - if 'model' not in overrides: - overrides['model'] = TASK2MODEL[task] + if "model" not in overrides: + overrides["model"] = TASK2MODEL[task] # Model - model = overrides.pop('model', DEFAULT_CFG.model) + model = overrides.pop("model", DEFAULT_CFG.model) if model is None: - model = 'yolov8n.pt' - LOGGER.warning(f"WARNING ⚠️ 'model' is missing. Using default 'model={model}'.") - overrides['model'] = model - if 'rtdetr' in model.lower(): # guess architecture + model = "yolov8n.pt" + LOGGER.warning(f"WARNING ⚠️ 'model' argument is missing. Using default 'model={model}'.") + overrides["model"] = model + # stem = Path(model).stem.lower() + stem = model.lower() + if "rtdetr" in stem: # guess architecture from ultralytics import RTDETR + model = RTDETR(model) # no task argument - elif 'fastsam' in model.lower(): + elif "fastsam" in stem: from ultralytics import FastSAM + model = FastSAM(model) - elif 'sam' in model.lower(): + elif "sam" in stem: from ultralytics import SAM + model = SAM(model) - else: + elif re.search("v3|v5|v6|v8|v9", stem): from ultralytics import YOLO + model = YOLO(model, task=task) - if isinstance(overrides.get('pretrained'), str): - model.load(overrides['pretrained']) + else: + from ultralytics import YOLOv10 + + # Special case for the HuggingFace Hub + split_path = model.split('/') + if len(split_path) == 2 and (not os.path.exists(model)): + model = YOLOv10.from_pretrained(model) + else: + model = YOLOv10(model) + if isinstance(overrides.get("pretrained"), str): + model.load(overrides["pretrained"]) # Task Update if task != model.task: if task: - LOGGER.warning(f"WARNING ⚠️ conflicting 'task={task}' passed with 'task={model.task}' model. " - f"Ignoring 'task={task}' and updating to 'task={model.task}' to match model.") + LOGGER.warning( + f"WARNING ⚠️ conflicting 'task={task}' passed with 'task={model.task}' model. " + f"Ignoring 'task={task}' and updating to 'task={model.task}' to match model." + ) task = model.task # Mode - if mode in ('predict', 'track') and 'source' not in overrides: - overrides['source'] = DEFAULT_CFG.source or ASSETS - LOGGER.warning(f"WARNING ⚠️ 'source' is missing. Using default 'source={overrides['source']}'.") - elif mode in ('train', 'val'): - if 'data' not in overrides and 'resume' not in overrides: - overrides['data'] = TASK2DATA.get(task or DEFAULT_CFG.task, DEFAULT_CFG.data) - LOGGER.warning(f"WARNING ⚠️ 'data' is missing. Using default 'data={overrides['data']}'.") - elif mode == 'export': - if 'format' not in overrides: - overrides['format'] = DEFAULT_CFG.format or 'torchscript' - LOGGER.warning(f"WARNING ⚠️ 'format' is missing. Using default 'format={overrides['format']}'.") + if mode in ("predict", "track") and "source" not in overrides: + overrides["source"] = DEFAULT_CFG.source or ASSETS + LOGGER.warning(f"WARNING ⚠️ 'source' argument is missing. Using default 'source={overrides['source']}'.") + elif mode in ("train", "val"): + if "data" not in overrides and "resume" not in overrides: + overrides["data"] = DEFAULT_CFG.data or TASK2DATA.get(task or DEFAULT_CFG.task, DEFAULT_CFG.data) + LOGGER.warning(f"WARNING ⚠️ 'data' argument is missing. Using default 'data={overrides['data']}'.") + elif mode == "export": + if "format" not in overrides: + overrides["format"] = DEFAULT_CFG.format or "torchscript" + LOGGER.warning(f"WARNING ⚠️ 'format' argument is missing. Using default 'format={overrides['format']}'.") # Run command in python - # getattr(model, mode)(**vars(get_cfg(overrides=overrides))) # default args using default.yaml getattr(model, mode)(**overrides) # default args from model + # Show help + LOGGER.info(f"💡 Learn more at https://docs.ultralytics.com/modes/{mode}") + # Special modes -------------------------------------------------------------------------------------------------------- def copy_default_cfg(): """Copy and create a new default configuration file with '_copy' appended to its name.""" - new_file = Path.cwd() / DEFAULT_CFG_PATH.name.replace('.yaml', '_copy.yaml') + new_file = Path.cwd() / DEFAULT_CFG_PATH.name.replace(".yaml", "_copy.yaml") shutil.copy2(DEFAULT_CFG_PATH, new_file) - LOGGER.info(f'{DEFAULT_CFG_PATH} copied to {new_file}\n' - f"Example YOLO command with this new custom cfg:\n yolo cfg='{new_file}' imgsz=320 batch=8") + LOGGER.info( + f"{DEFAULT_CFG_PATH} copied to {new_file}\n" + f"Example YOLO command with this new custom cfg:\n yolo cfg='{new_file}' imgsz=320 batch=8" + ) -if __name__ == '__main__': +if __name__ == "__main__": # Example: entrypoint(debug='yolo predict model=yolov8n.pt') - entrypoint(debug='') + entrypoint(debug="") diff --git a/ultralytics/cfg/__pycache__/__init__.cpython-312.pyc b/ultralytics/cfg/__pycache__/__init__.cpython-312.pyc index f668d66fb56b968d3932d05095966d9335ab4e56..bbf01000ff783dfed90eef59194545ea0b8c27e8 100644 GIT binary patch delta 10094 zcmbVxYgC-amEc$Xe$mi$L-VG22-K1QNk}{;5SEaH9_RtG<(BMr)87X)^h@=94FWd- z#Y!xVJrQ;}$0WAKIJVdF*mFXx%#1enPLy$SMxM#8KO_|4Px7%nnarFyXLf=m@0rK` z$lm)kjfCSpC%bjJ?^ku}R^3~#Tle1j-CvQvl!^WCGBPY2B-#J>126VIXV25rYMxtL z-A2MXpN{8vO;{f>_zZ-Dyf$o%n0zKRuM3+a7M~?z^;sh}pH0P)aE31f==9;|zO|K) z6#0rG#lGT5iLWG5>MM+CLa=hU+;wyu%5x(WTi8u2W-pbqfjF#rt z&sNKoypwnFIeadBv3?Ej)^I`OB`Pylmjdj z$^aH!H2T&8zhdC$g>MOb*TJ_GzU$%Zf$s*s>@4T25ga_}*YM?nO{fwKf<-X(>++#T zzCy6{>-fsEdS5Ny#IHF^e06*?Uj^TdlYBKa-2}z8f`#|0gzJECGrwN23RP6swh7#SnpR+g=4B-w1AW^bnM=%GWsis|UF?jHZHuA}TZO@Te5x3j0W z#}ByfuI@eTPc?yEZ3Y;&q8$l@6}?0Q!D00iiYf--bUa2S#Tcgo9|}r}xlf1&2O|MG z+@@FC#QORaT|6cVtW8_U{MzQ~aSI=l0!_7Jfk@b^r{zFKD-cv7C_~@@z-F{#84z27Cd{7b$Q~kQ2|Ed z=^OJL3&Uu`W6(@YcTPPWhr2AE<%HsSU=G4DEfwhHe$#p~*=7Xk6>VSaw4x1$MMcZ^ zhZUU^iU^8c90cx)krxK2Ac#uFXn;l%ala6c1qa18cdFq4RkWd4LeYllMg;weE*OjU zEBZlk#6Ot&h^fzrqK|}5C*sg^Fc20LLw|TO6c1C(gyIQaF+^fwAQS|aG!~B~B*oMZ z>nDX`QNeDFk~-P|S!D;{nK8!l9_(9}V%+pkj#x#9==Ls+eLK3GseW zq9Gon76T)bf*)iL#dyUQ48z{@r)HrI_ZqB6G^%K$5s?;PQ8yR}!wiR_Qj?;iFj|l> z00Y*EgRxOX55fdsvSEcKQ85H#>Tr#L!~o76OzT)Q5W)G4(^!89R=_wS(7pq}Q!xpr zL!#u54J)Qt9F{*c4mj-EFP(;UP*!vaTuDpeR9>w{T=0yb9{NmOisfoM#GP3?zCP;4=f zI2?%kaXu9r6{Ju!pcY_%!jcFy5{l|!B%$EtF5AD8#yprcs#B+KBtS(kfo#@N5w3)%igv;J?P<%=w{ zF6Z)w%yXnx!c(B8dR+4kv@D}IIpii== zb4#;FfD{f9tdPDx91BQvlwHX!*A_LTcp+m{ATZ$B42+YK38#k|!!+XFz>d1(@;f*Jtnlm!{=^j;j` ziy-z8VP$Palhfp}A8_1BAf3=mXp`D0@~kDPkye_HaP+u@`cq9wt%UklO_C{{>({)! zOfNo^By^KxP1o?6qy}y}=og?)!lS3Aq;~lxV7VfxH~Ie{?@o}n`VSPCIa&(-KV>~l($Y?7d2>=5)ST7~!%df;$d8M$D$Gp4J1^h9`yw1KmxU_M;eO@ zqIzSG1PtMXAl7zY9?7wiwcZRxACd&nV)mW8_xN`oIMmVW-{1LY4@Ct)&m;IF0PJ6K zA8YmM)YFa%pYrqqpcOJAq9TJcpE~sPJJX1f{Mk0 z!kLbl`kBBinGxmk#`kqsj=X2Q)wFlMX|LSawZxHqqa!PcCUm{^R7HR$G|6#A$PKzcg_Lr8B2q%A3|scRX)cbYw5&R?j=C@3_lm%U%z@ zvvv05f_wAF?!r4I)qRU4rO0!(Y_?;zUao3=f7eyVdwXteJ21cPfZTczICqn(#j5Jr z9(is1RdPj?A9_SS{;2$jPxd_loQwOAb7=u^&MyGY<<+yNUdxkR-szn)beu zMTm{@gzTQmr;tEI-rIW0qsCe>Kz|tuM*$C_a6hk~f;b75BEa<>a z9;Rp1Y$V1D6o*c69#TE(6&;*JQKg`hh+gSZ#F@g=vwUaT@Xl0?5I#idTTp4$gYFbx zVscSKN#qxgy}sqm)>m8QO&!-OIu?#UA@hT`_{co`$|J0(cz>mOgF=gyw6YZ61nkNU zTEN~YF3r3M*C$C5o~Z7QhutV{@+3il;r#??t%|OAKt&T?$38DED@)&la04O@w9zfM zF{IkCL`l=yl}h7sr3ouV^6WQOb!o;f3r#EQ$?;~SfA$p}bPOhnh5fXoaZ<0EZgd!G z6zd?^smsO~jUz_CI}iqo(X3XW5~LwS+f{Q{8Vm)8qk<^XLxA+^&>M)4DY{`{Or&Tm z(Weow0qZm&O0kHd#Twic>1A>HiwD8q0h5szz)=gL%^ama28se)oe$WzOG`b~f7Euz zRy5NyyJ5jrw^-qQv*p#6O9MA58qV#W-ZpRZuvbfy#ra%TUi(yloR0^Abqd#B|uWKNe*dMPeCFAVR*0~)w0lf#O^Vg7> z;+AsR&2^uVRNQ<3Tv+~$@vN36cSjOYADk88Li(yo@cMrzcA#$ zdP5=#yK9)D4M#sgU_&(cLH#jBhrYZzTR;~FPg7W3<4D6mihBDl!D7T_b z&@gyZ!YKVY6d$-!*~Kjd`hN!8lkEL1pRSlEfZ))YQFZM1*^joaVcXZ5bf94M%<3!3 z<2C8i1Sm67V;Zuk0UkKm5{jxOUWeL~*Ykz}{rxs$Mz3kS{7_todZa;Nm4&e!BB1E$ z*MgHJQ31jbAt_M~{Vl!n{HvY=u>g+&gB40XAP)A$z-FketyQ#>pn1phF}P5t`u*Vc z*9{~>bx2hk9~*bq)WCrNm#P+A+JkUv0|P=2x_-LN8@DuddyO;)I4RjFPlO7m5+N#} zl_RPPKymkghnMIBhcbwV{$Bv0YHj*$%uV8or#zJOzDUbKm!Rrp+=8fprnrC5S#Ify z=XJ$1!wb4~cXXDg_n+A>XKh)~ZM~zjKiz$%dwTzD&O17}Zr}XceewZ89*D@!=uKTL zbtghkD@g}Ab-66FkXu&75N~7Kd{&d{$dCyQ`&mVaE~y#Pf|FCw0hAh9*9CE1UNF!LKVb4 z;E>1J4;ykfpcbI709bW;=%0->U6QbCjlbjn5$F}20P=Y?svr;y@P{#) zwCX6{12T#LH>}tT84$>xd12uEz%!w#J&Q*Bbk%iZ-eRV6(VR8C`?|ScIlJe&xsa`G zUe^L!o_pWg;n4ofq3wMUi0=EXFuT$XHC5EEO!p`rn4sXYd&kbZ`|;fvm9@I*~}ho`LD#roUIp~E6yU^ ziq%OCujTFRgVsmb;5Ik=b!#KZVxeu-na=dd+{J<8`v!YuTOriEx6NDQNE%>YWG4*} z(A5@TsMdAyfpeV3PZ*Mhp;A2kBnR7VO0~;{df@oA?Nt1Pfp;g-o;t%Ox0hM-)XRM! zsXHU!$g}u-3AdM;fOP@8x%~{uV~@10(ceoNBp18bmS3uY+=}9Yu8c`TQqLFT2?OuR z!9Hy(VK>{xGD?#Aq+uBwWGC7S8%w43<<23h%$Fq%Z{w0c0$%0nz}*s>WNJzpP{Be? z1^cu1YDZy+q`fqPd@ixyv~MEC%)O&tTRCB7fzHyLHTUI$c2!9;Ko>i+!<$pB=GU&O z_p+bvs3S$}w>#G7tV>#ecKv-<7D`@L*O6bbA*mlB)F$m+7K=+-P%aqp1{_;bKeR_( zh7D}8qefdZVMR%bYm-*w4aa*~Wq{dhO}AmE9x9ofu<>j?= zWJYqu#htK5-{v>1P;CaPwG(!g%D#$fhOb|t+5%MGiAh`Tk zTY6Th0|aS+#v3NG)W%sLu?1q~c*R2kVIBvHCZMR9aNu|#bOU|tX-(ZalB zr?I?K-DI1jlj*vl$J4p{-=P3Iwbq%rGITLJ=@<$kE@uS?5}06u95^WU;X~fCmNXBE zPxq9TmbyT`0c`Kq^DVeyX%`V@QX(x1Y!-8R^7#W0 zkkn&9lAWycWbym)^j%!J(%5^Q-zA0Y$ZmIz{@FOMoiKgFG)hLf)7nS5QQ|##d1CjI z8a*naZbtU!vJbnO%dS8PZa6e^RD*zifI0R4PhxJIMY`7KeT4T1`e{?#%Z691-?P4c zU?Fc4O|rSJqt(CH1;(u&>;j8~u;1?Y6g zk(J&;R%rMFr$Y-xH7Xyk86r|bI8Jle)dTLMV^E!P1QsDTZbDzL1_IXjoS?pLQPm4k zh)90`1PZAc*VNP`aKk>*aTH(dJb(ZF+23Bduo6CFxd+?Hx7fo6x3{P!6>=E29s&~; zUIx(89XF(2!Bjhf{uijGZvyD0&tvwl5d4(=_k$-%39HM=vtx{)`hM?wy3bm_i8NIJ z5*@6r+ieX$Q13Wg#H@#!YR2s&^~sT~$&rTSNKO7*y%quhR}NnEfQvP#hE@4R*c4+b;L3oht{A8wLM$=S&JG{W zH>CrM3}iE6{dQW+o;jRf`E~vG$0)d!tNJf0Od+=?8h}O0Yk+sXs3!uikL>&UvQC^M zu}<9v-qw_*sujoJ@e5dweDz$b4uLZm3y0zL$q!@@(e#5K#`2C-*Zu}a@&W?YXZ#3r zHxbMuKrbw1;i?BJk426YPX09_zYjpMcZlGOpug5hX^f`)-~Yhs5d;uG0dE??GYHVr zQdeFLC@7ZRj-LIS4<6dpd4Q@5tYEzhk5VpzJOGM@!lPsANtNa!wtz$s9fPsL3m(4k z;VYP^WcJfo#1GL2cx!}5Jf4n1Z;B2cXZX~XE5_C)WKn`0n^o?+I(j>(2Wwt~8m~*y z^Fm)@Kr!OGWGE1(7ub%Yu1U1|)mI$)8Rl@|RaZ1M8g)nG2;fZ*ruHjCNQ({C7BLR4 zAiRD`gDFLaIuil{Urjqw{+9(qXk+nM$PV9T5b`JR7cEBiH%E7rZC@UgT0wyWji z%=+7z8*e*m@3<;IcGZ00v}78l_AKRIuIgze9yMMmH%H}ITs}qRQ=%M`- zu3Fhy2Vv5Ub&GWyF9qiu3-vp%cFfoBmN)OYD*kfYwOWXjI(!%@b=o05oMHdc!ex}r zmdusS9i4Ok%GSKJlW_JmU+!gP4>#xcE*$g8y(i=or(`-JpBj}%$7J{TP1BR?)WdI1 z`s5Q&$i9Fa2+D4L!6Yo^mrQk?8<^MSerCO zSjKt%qB(n-pEnnLVygZ;%RQrKcEs7IE6*2yVygH&%Q@Z2Ea&xi%+?F}=ksM}#cc0_ zdEFLF+GhgSt)=f| z|Iq!G`-dfOm0Zy;c-t0Tg|}Sm=UwYBb>47oKHvGdt8k{1b)V~G7arNxx|m%*w`;CJ zZa#QT_-N?Ep=&2$5`7roubR!3tJ|;Y|Hkq&%ax?OA4B_&$H|vkn7bttmzBfJ#~UV3 z%8&KS1F`wX;_}H;xNL6erYW&#&X65t^X9Tet79gA-da9;dTwmKddD5l`dglj^PY{@ zJ&_-G{pGk&4Y0dw1GKo85I=eJLwwPF^F-PDU&Hx>i@^k;B|pBdryiM8md6^@3J4z6z^Mzc zdnela?pOJ6#DrO6L(+@Yx75BJX& z=*3HV(>M52$aNe2Px#PUdC;Q8&P=(@cO0INLGcQ9>ID5 ziZ!J~JA03I?Nn7Bt-!hukp8;}zK7r?1TQ0a62UZr?;v;!!H*G~L!c`D+n7UhY%*nQ zr~wW&sDL_1of0~OG~Y&$L@*9OF>QleDG?Uh=x(5cA18hY5N3vudj^hV-{#D>xs2PK z<2Gmcgv<$-&3BPL^!e zFJ)9_nWk)ay9t-wOQ!61HTgLjnRDFTV$ae5zNEsTnaa3(fY?dqQaQKn0FnK{FSs(Y zZaQaLoY^&7c_~wNH_khnmJlf)KY0(o_x6%9QU=gQ3htITNcIUYN$5aG2;D+R$b!)^8s@!$fjLmWHv;6rU`JU? zWhF*xCsHcLA&C;>vMS4l>_)ClvOc%ujW>B%#6nL}EZ6R4liJD#SxIG+t%~>WH;iN) zd#^6$f8G86{r`Voe}CP4UqT=K2qphGDapt{dZPUs{%7`FNX}IK0$nICYf=T-kjAY+ z499Tlpf;p)>y$hj)Q1diL&)ehhD>ggQl|-;LrLzWP_p~N7aF%fmDZih>D+moo-=Sp z&cx}SXWjXnnM>l5Ig3&%;H;dDOW{(Ll7mZAF}#yY??b$LppDoHQ^zwnl+Bz5&t68< zKZf}#!`b;fpqYFw(5xw)yNJtn7jrr85-!(W%H_GsxcoDWyPUUh$gkoGcoXmBHN1h> z`PnScz&Ut>pXHoqG;SBSiYq*W+!b6Aw5WtqF>l~X6n-i2tH!x9-pD(ZX61<-G$%ljEf{U!>Y%iWU(MAImNN`CLgyQ}nyU?o zt|DesgGQ07k+@ZBkcC`S?a0(0+}72#bKmxZa#AAa>1o}&huGBKE|*r;h}atl%j#3! zpsX9hJQwhZvX&niieOP;`UYh~FCX^xhrD>u#mZWa7kQA;34C9O4~w$i7xW53z&|Rp zLlJ=|^Xhe^p4~Jz&Q>DTd3NrqCRt^2_XXDV)ej1tJA89*>Gq?!DMJYw2m49GUL1-J zdHA7-uV0YWy^#@F?F&MV^Mgkm--mf#kTpU-@5L@V)ZoZaBr3}4KqM-wgSd_ozpM{M zs5jA&Y#!zVef^?`<9*&y*|MzvAv<}wqBr0rv!;x3O}}u;(=RI@utQ6yWCM&!SO|JW z9xVI3L0&cu@mLswjt5Thveq9A3`M#4H;83zC?a?RK3NkAj6{cIqc1Wv>KXD1(46%} z!hT!@%ShJyu@~A$!mcD3;dp=b<3nCC0K*H0dcC5rU)Bvr@F3WdS%K#{+3X9#pn8@K z7(-rR(1W2#MAn78BhX1vHl7M_9znzbj+c$WK$!Oo2RN}`Hc(B%kQa2T!@YuR61=B) z5A-7t;am>T4@G%g&yNHI(GwYzb*FgT3&!LmI>k5?2m=FpWfTJ8z98?Rv%+FJ_gY^> z@Ib%K5oi+h4tc0sIFokLpcoktBH?9w{iD6$nauWkgMOIQa9Gw11tTKuUw>p+Ht3ZG z;)qCoW44=3w9*sw_VPi2Sd;8Noi`d^W@c|+I70pLghPUC#=IB^dlkblYp`nQB7t%9 z3bH;jBnCo((=hmfu(%eM%xKsff{y5gKximPSE`!ofaMZchk<^P4{Iyr@rbUWQ?&NvQen2kQS;w8DI8Trh%rFM43N;OC%p0)D~|Rpi@gH#JLI#+F8Eb4thWWE4z0 zr(362O?zk1v>;Wqyve@W`G)T1=Kb@V_e(9EOAI=IDi$j$XHuoAt*^Gex=Lz4A{{*@ z9XT!?=a$sS=|>M3WbZ>DY3oDxY$*#FW%IVOJ9g)^^NR0kyjWZ^y-F(GbnWCd->Ye~>)VyC?@+qFLs1;M zVJo`2Yxd+1ir=eh`JeS}p`*J}anAM5>szmnQ1v~Edbgq;sFShf{M{H)ZphwZemfz7~$l?U=B^0)bi&@h3pS1IdpG%R?y0CuT<^${dYH*}W^ z;STt^%*q1viXz6t&&egXk9!TeUs=q+c(Nn&!iGIm@kzA`dvoG)8{ zqj1AD@$W`o8J#cOK@K=}Bayt|$UCFs>K>6=i&$l2r002);zK z-zNW7UQlq63K~NJAw;1W&ndF9mSU}NAO1_?a=END_~EA#SW}7I1f<7v&6Bc3T2bMYn~goP`)-AhP5@Q>_$9KMQF&N zIL#lCS$g+Sh7#n4H~`-`gK`bqMtnR#x=0v54P`iqsNb?Wih}^;_+fkvik~=*(mw9J zz>6X|u~wtG3xaAgu=aiO4M#fqBJr#%N#Rs6=4IM*NJ?PR(0k-g6{k*|*9t&GW2%@+ zFzsSEHiiIDX{OXK#~U#KpU}rnd{rXs`BkvfwB(QLo5oLT92FZK6JIxVLSu&OAo zc9fmgl{w({73E=zsW4!50S5aoKn4Ib+_^+goT-NEl^5a-U6Yu`nibQ9Z_sK=iUF+( zyCE~d{4=Y4y8X%avukrE4_+=_U<(#amS>yKHBb63*WWPZJ=r$VI&qTx{f1JAM!(*W zrCMUhD~>|ravVaVD)K`|zA+w&FdAiPk+g68$SZ?-ndQMT2)~Li3Pe^G4*r08LP-_% z1OP60a!;p!dicrV&y7yDzpKk$w4^Q?jL+K7*`LXp^uKG!T~X2Zt|5=Sw5eh}T^g@t zm|82Xz^Llc(eLTd&vu}cB0;5vs)8VZ499LZ$LoT#3lHkaGh6ad8kMe(OQ%CE*!xszrG8AyWyI*2IZo7DojSX6M)1(z0!yX{ zCqO)btt@hE>yyY%-rriX{(ej=K0I-uEqEH!a5>5W@e%Oorc)B|bP6w*@&L3Zlh(FN zp1hbQrd_T%0rDx!;sAefwOF4hart7)!+Zhfh-nks4E%8_qml8bZA?oM18@q-nr&sa z>;Ot^KSV8mi$u27qg?X!ZL8EpV+OK&cR_mbBTu1SNz4E=m6+OH>7`1(Yz4oZ>};<^ zIb^iGa<4091lfv5qX4CHQF+j!9)FdY(k5o+Da2LAG^Y?Yi#rm%i5aP{FlLo>6o|Lo zjw*>^d$qc1%tSpas*ahcFAyS@S1Zh~DIw3*#7qP0$ZL-5au_fRymQqCOG9O2X2htN zm^t?FZZT#GAK>afDO?Z2+MB2t`wtj;vmM|Y+FzN|K?692$P>)-+OL_uV!E4i%Vk7Imr2-7&KJi-Y8b>OiIPzwMAA%5@v zG1dGIkkfh!!>j2{n?gky3HY+|tv;=)u8vws=iyYcrE4P^BNJU4n-#=ylD0E-z+)Xe z#Zl04TI(1Ni2V-bNX1_THhvyR7k+|fze~v+`F+<>B`8N5s7?*NFqM(hht8Pj^ExgB z5?xC<@cPN|!7@_$SeFH!%-EieJsmK$R4cjq*u_-3DR2%DK+cgU_VLkXXqH2&9?Kz< zhmBcw%Fm>H7C@g@)?!|WhWKccVwP4>-MM7xur>!?ia?D&f&r1sCNYBH(b$IrLn7$3 zb;ot?@76UbE5Nlnj-%AVC_Fa;UOx576|W4r5um+qkOEyY-(5C-i@I}`lGiDDgAxj} z@mrKo1R4ih%0f$rBE0v;6?j8I*|5(W;zq9>Yk$wYZmESyF8OU6sXV^UzWbeR*Vjk~`lUcf z>JLldA<2I7XL?M=j=wk#$`4DO-BPz(vL9K{A6?AKpV)b}Z=TIq)LAAQFRj0@o@|`g zIqvF9Z&@=Yqa^8^X3>x`$zK}0Fi1l4hN8RrvRhXBWerKDoaD)pOO+QYN%g#;;I6*t zmNj*98!?{K{ENQ$mNjki&~(~@we+m!j?Omex>S9E-1n^BB;{?oCSLPPZQWAO@tZxq z`5vFt%|X9-RJ~YTKhrKX?7H6g)8@CEuUAQr^+D%zp~KMa0d&ujMoye)$~Y)>9g~jx z=ezo(gZ*^8?Sntl2g%hF*;`#NCVf3=hI_ZB{d1br^h^GJDJSqV^8iWr*3=%By1i2Gz*3ck(5$A zy?a(aa~g)axf3mR_DK8Pk~;(gY>S}ZF-S9nl!HUod*J^cfw0E|(bi6tDd!+kg1V~_ z|B`Oe_bJJzOcP1sSCe+$Gj63tfPDEe&ZCwX;ZdKw420Xfq3(ip;U!de! zN+u~0D0z+&`T&M!C{eH@Y#9k!TZN46qxahq3RLz9pUNT%l3l^=8MRVSL1DjkhVO%mZmf}VDvz9HIolt2h znlLRTBLn%9zi~XLL9>+P)aaMgu)vpgAtq%Pnn+$!snU-iiLotdk+l^~n3q_!Z6BIQ z`p}$=EK7yVhCN8~`0g_esA4jGQn=hUT|8rv>}%$2YnN!H)P3{;{XQ5#W|aM5elkkA zUzmZ`UKW=a_{?-Ypr56NoJ^DkjZ?p}bBTfMO!18HlIsD@E;W&#^flCLP~L}D6S6(X nZBwDr5BIYgG=!F{3~N4n`dxL_2W;}>f%n+VB@M%7D}(-DHN3Vw diff --git a/ultralytics/cfg/__pycache__/__init__.cpython-39.pyc b/ultralytics/cfg/__pycache__/__init__.cpython-39.pyc index 30ff71091d3a0543190f4847d7b2703b3b7b4f04..14e2e94ce83066c101d706a7194d78cb6a0a31df 100644 GIT binary patch delta 8495 zcmbVR3vgUldA{e~ySsNETFJ63TMx@C*|EH~_3%rUZCSD7R~#HU*h)zj!uD#=pffH^frv6GkCrJ@2AIzG;Ko% z_xsQ4VVls-RQCS&-2Zj{^Pm5m^Z)1m`4#q_>nv{8)kPHiwLbOw#Pg%qdoL@J3>f-3dG#E!f_O7aCdN_#i@B@x8Gt<6SF>`*XBrKlIIDXKY zDHVkyiP?f59~T5)X54|2<~GLCmb zJJdZF;YG(B?4339xnw|Wg|z4-u!TT^Ko@}LYQwSkY%y1?3>JEzWa+b7A5)tGOW)Ju zYV_R5rSm5rdMs0M_P-o@gjEN66Q^cMxuUQVv&FJVaMLku%drz>J6o7Y*jC;wIN1yu zrpQbt%mPnbeEYe?rL2ROgq_Woa#rHZqh}MP;w1}1PK?ba9?iiKxmk$WE6PRU_#B~v{fo*CT(9EE6(^DAJ6$(CTII0ZcPZw zvVGmj<}E*9Pr|Cm#pl>q?6`W#+>y7`Rq)& zmYi&{0PPbZRbT6|n$8!=@8vwyX3QM4Pui7q#gAl)rP*}Jv>_@{wjUhNWlK5XM@yEl zOPHT*1!`C2>vQIql}p?CV$qp|6Ul=9noyai;>#l(%bIo?O7u*zFzyGd`J!!RGqo-z zF;Y=170b?W9SttH?2bq!%hR?avfT0`xop8oU&?Z4($@-Owr`Y7CyU^MnPaAtnM_4( zvtp%TW46fsSSE+?!XRZYk-TY7rv(Ixsqm$un6|4W6Oy`ZS=^Q^Ep{H zbC@N2vUn*il^fPf)^^gxY0*k?QR{ZfE*4UTw7{8h{FtzuY{5*^V4x#ga0Y4J700tV zD-}R=IksO%D@f$C1!&7=QZaL)P^4D6khfF8Oi|83tXQd9B4?JQDnEe1o3!jQeMrosgGA|O}&R+KR#Mc{H za3obR?wyOb)sqU^UQXa=e?{>s@s~F_Q5RB%~g^{%hqQXPXJmJjx!z<91`IwB(x6BZD5DVKFbln@~Va{+ev;mo9!nNGu4(Fj6P z6XSS@DPm&N()=2jlw#0BykEwP-2tFP4NYeT{>4~?#We4QrkG)9;wg|uy;Qul`X_Lb zt1Kvs%1PxKd*bDHsq^Z*=4zaknJ`?{VHHJG9M#nv&C$oz%c@;*nV56}Ty<5PcHjln zouI2-W>X=w0}>yUFeIUIv+go`hB-R%xQeR|DNmAaqf$kuB6gSh*P#9@|9|y5hC?09 zYdq{~x}qu5O#FaHrr9-Sf1#jaqQYyYS7C2d+8DbTA{(V$kqVi`Yi3(9 za@R3IYd$+smOD}*VTy?|ZN>>_wq)6K=!6qERLhp#JF@glV~nvn?;qB0tQQof!XZ!w z@ZMPeL^Y|)(8HEu3NGe=_F3hg02H%8e-A(V-2jT(h`?)RI#XFIQ*Yxbf5F?j9!EIF z-;&b6Hn9#!1tGQLwywolBenPZf(qGLMx~CH|7d0E_{r5d=dIh&Tzxm!xSmt<+PuaP z7*l#_$O(x3PH-y318qtHF$iq9O_|qSRvJM&B4P9eWu77CW3G-FWNnI5$Ag%?DR9IK zDi3kvIjj`W)yuTn!*fFyET=3A3Au~%tVo!4BI7M=NU~|~>l@nk%*7qccI-YUU+NR( zf}K>qe;a>-HmT7h6Qg)0>-`Xxu<1BLOnVK@o2$PGtjG~qN1%3Ul}v$26mej1Vq=pQ zdD+Ytxh2RUB1S-J4EQ=`-Ig3;n$T-1Y4f{d_lVK%v0{D{cG>Y}a41@g8EQ=Jz(jPZ z;yN(id)hWsrI&vjZ?)}XfzcN7#ItZ4<7`?L{Rni%v0;$o7qFo$V(D|NOkg$8M#t8wV!`2<;udn)4CM%gR`PN?gcYp;YlYSXL0UH} zut60cs%h89kU@&)myT?Dh=meo$0_YwdbZ_7mi(<6gWB8gvzdE3_QOLHRsl(&7(T?j zx3^6UuTJ|dRosN6T+B&^6dL-|a!`^*9x0Kb98UO5F<%-!3}eND_eR@;tmxg}{^;&` z>XwNL=~%3_KTPdaY4H+XkJrS$)ZQ5^5!Lm+*M9H_jR{Fib{sk1nD{6WHV`U$akk)? zGpB?o3O{z@+}ZSlr_Mc+e)!^E@l=~g(gT^FmfL`W(esjX0CK#?QeRp#+V;}S0>^S3{+R~$wZ#}VP!286Om%LZI zHhS?y+!Kj?4K!xCX2o`Dz2EzT#K4JbX!=IMx|F81=CiD?p>gj-tjk+s!b)3DBB!w> z=NX#*O%PV1UT^0{_R7-f&fRKIYA$*+Tif6Hht$cZ34D&g=K+udP=7*_K1i+A36;b1 z>zCtn@GG;I(ykbW#}j3^uQ-laBfbUT@T< zFV+3Q=?3BomJ3)E)1sbO|A>IhSdb--7k!;dGwEOOl~D6of`;dZPt9a3q-({3n87>F z66rFnEKq^KZ{p*m=#JjKCw8>3r@Tu$?qgr`KE1!G8Ulmdar)74lm+ZDiwtMEik(H3 zEqx)tv9mZquD2lx;lYUj;duyKfHBu~zGM{=&ZKqM@wi-~397Vuk$O79LKT>G%3bK? z#~-<|m^fE7IYO+uR&a6Co*XOU5bW*k^)+neb1hWd^~t&mWmn%sIon4}y`|Z?jXgbB zvq3jS44~`^5q6t0ujkPpSAU3kXq9fXtZWUi9YQ!!-IEMT? zLtD_-fsyCH9qy8jlgo8w?pKU?k>;L?Xhn@+OA+7jHXq#duyiEtWwdp}Y3ibiypPRe zAEj;FRSBv%0fABNfrlu|qgxLBCi0nQu)$-;#kn`G!nFbK=Q{?fLaqx6+_`W*i^3Z< zZueZMyOv%cWG{h)6c_8Ygft#?x|5cfM^Zz9Zf1qtgv4)v>yB~BApB$CUZPcJqfPMHqINd+xzcil8t)XcY4*YK+M-I zScXl3j7fzm=^Rzy_geKTh#x1bX#NzHQfx}J9=M|L*o)zLmPM-iF~;k7{EFg4PNK?KeT&xKTwvs^#c*>4F3evY z9mAL9;=o%B_gTMyN0maF*4<43rV*GMyF03{Asf;c>iPOb%6f+#ZJ~iTxjG_XgRA2{ zOrx7GyluQGm)IIk%B|y@q+I0=xfbwfy!994mT~hsZ*z5I-KyxC`_;ovBc)vk$-sQj z4e)kcGBNR*8{}G<40;WUb-vkI4--1v;0wx4U9R*kJnnsC_koEHrp$+!GS}|3x+*$O zWSJYfj0Er<%Hs&-`Owp$LZtTWSNfI0XI$N(lwTDCE=m;MIiwU8fJ3?t+(Q!A1>Cc6 zN*k63N+RHMUbbg*^_tV}gk9X(Ih#=^;3VN&UCl)XQ#cI`VafSbXCvR{bnyH5c8rV4 zrEclWwwff~cQ^rz!l1=fh_6{qDa|$Zq#`kzOmMV?Exc=y%^R)(Zz$Dkn&@y6Q=Qke zlFF0whV=PP2l*@CV{hs_;3%>Gg7t(#%+VThzpOG6)5@w z-#Z_XX6|!I(Z-v|{E#C?mZc)(CPsDnBF@=wdhMhe6*l9|t|YOfNXeg`NC}8(mbPV-!3?;Mp}_ z51x7$6Xr*e;YSLmK^X>R_k05=5$u=u!~N@=9(Ubk#;eVUOQ;S92O7*{EVCqwBY;$*8-(Y&Hy?(Go(nB4^n%1 z{YuvtZb0^|l)cIyT9*APf7?n6S`I=>$r)VO0@DJeDPZ;!c9E%`*}tK>JcotDQ1mc7 z5)fbIBX@`&g3u@Ud2~FoY{VnfzS9WjFr02&R^=s%Mwci!-D#b_okI1dJma~B1LuNC zH4Y_ciP6cdoft0{ypERO(kJ^fYJg6l5%0tMlEW{cg>#3J6M2e?-=o%Aim`TP8bdiV zKgP}BS7oXLwz>8j0~a$fZp3$1+1^6nTdM+9v`|PbXA+Kb881TGiG*TR#hPnZ`}`ZJ0vyRO?hUM5sg~`~ z1*c6H#RhJ~anp$Ml+No~MoW3_oIwRv4{ox2+*g(Ybd4^k==45ypj~>X=8(m^JEwmA z$PneAvS9gkZ3I6%km)1iZfR@f&Rb?0{|Kh<)MTC;cx;ej$!GS6ct0A*2V@%lGTP#G z0{4uUg#WH4y!qgXt&;l7`2K!{@7UYao}QYf|DBgQ{3n2N3&1;$U!X4}iPt~8@S|5g zuzIWG1qSbDPkN^Yk4DMjT1qzVJvaF1yDWMqldvKv*RN$;tt;4?1#;SG7(bNe5s}gt zah1Ra07ivN&5sfIU4Xd@-HEL2-EMF8Mh_hgwgFW>;te0(80lU!>XV1AL<=ObjmXDA zZZ%M7$a^U7=_Bo)efVhaT)eW+tsHPG18!x|tsFu%sy}}I+K(5kdadl~f0On3 z-j@zvAE4d@c>`4-KCoo0G9`ZZxTd>Cy2G5Ok8E$*OrkV+)GNr`{h+XH*i*jW`{I$7 zyNo+Y^rM0*bbM$i3RISXZv_iIDl$v>rmJx$;h z0-qs3^_GxPODS`0OWElC#ZX)D7QJ8belpZlr7~E2lfW2(_Yt6SyjI&A)S>{8_Z)s? z^!UZM?S1(C$y4WqoY^lB>H7r!g}@I0d{tPO82NEa{E)E!N`RJ-uNE;jd{mjFZ+N&| zu>I5ZrBH`2B>WLj?YfRD`7oCy$RF7e6KBvjE9P@A}cs z>J+_yiNG8IO5$tPr5pf7lQbG%6yUjgtX{JeH5c8+vSZj5rNLZN`@Z9X}Pl2%`B$8eea+ zg>)K=5m_~q;kt4}{+Ihha!7m&d;;e+N*MxwMz8-)vZ0dq%CTNm-1feEY^*A;%H*e* zO~mmgf&U_aPyCAbIa;Y`Ew)aLUO0O~27!2k806VqrPgHv?!C1FG&>i+;e{^))H delta 6911 zcmbVQYj7OLao*Y8dmnHB2!ePKIDil6NRR|b@dbgPKtj~RG%1UeL@mjj7B_pq5%=Qk z9!Ss&pKMZA6j>6fbrL%gD?qH+k{#s*rNktzlH-b%RFYCsPRdF)N##)$Cvw?Qs{F@c zlrUeO1Vx;JO`Z zUM~cU08_ZawS2G;GC~s9^Wj3oh!mnmv=B35k`~Cv3k^m?q0zYX;`S;?L8F_8jMY5M zBRtAuJoJig^zb-u;Eg;XTfMxAH}e+0Lbj5;)#~FbrgR2oJ*{>qtCaKXWreS@ zRs*(M-GCjJL&h53X{_a4#yZ|@tmmtZ0p7!VFDS-cR)VvvYE@%apB1noRw%1?C{~0g zUr~7<@4pZ*Qhb!J;cG82V*_8u*F$I{-viDF9{|r?JO!RXD{A#YPC~u`^o@M*f^KYD zrZ7p{#5aSs`4-w1J_Op9Q9jJKLiG^e#_z`OFdySP_)ZMEmG9!aLD^>A%|~X`i%fjm z+RC@{d!A?T89Sn+_Imr&oh;#UHz=mkEuaGN94HWNAtO&l|GZ>&a@FBO?KIYZml?J8!_9QwbGbJuEbMG z=tt8JTh+5U+ew#Z{Lri=CNY@bKx-@txgt7p89!>nQF$wkb@Fuq7Z|}ziM(mVOAyJM z@Hfm7t1w9;A_qBTmx{~dP0vljGGCuI^I2cx#iAc5=SvRFZ@P5G58G9;$~o)!HBs`{ zbrg&5$A~R0m(2#98a0;LoLw>VxpS5uGAl9`{kS<*ERoIWV!<}z!g6v&QyPbunk=2Q zu)y%G3BQ-hPOd=0`np`v*@Z}(E1Ct&Hf-k#C zp`($&cRw+Xs@{8v->$mKLTpi)P%g5Q;)FV{&TFp5)d~|=T-Axsf-X88&>YRY%;UzHS2|9=$n z7FW|1O_^aZ@hQFnzp6c0RGH#5-(p^?G-KuEP~D6cX~y@-jH;u{Ieq`9n9wE_tdg+( za;&aq#jGN($howhSM4kRr-lxxp;N-HWv?_}aaLxPoZ>34vKQ#Y>|R=IvKc$+t!z2A zv~OjcX>H;O?{rJ=mPfE^IFX7tA+!0C>4+neFonGsun{|sh!G)#vxg$XJKVO)tF)cm zE@;V9x_Asf!XOqlo@l*rHh!}#RbS69XBpP>_A+JXq`WZg`?rZ0cvYLqqI zLW!!Kps3g4YFnKiS6k{hNyc??23j*J!1Yf62*sm}MF)rjA8^Li^O|UNl~Zh9JyATy zHLmB?f;O))bf418JDh+M9utDlT1PI*t&> zz31CkSI>efP7t68i3R{)cjzoSDbYfSTOv+fQa<48rkJv2r#MdJ8wze)gE*~$Jj!6q z&x6%&1W>d%3#)OpowcxjRj|*z)Z6Mk+!3jM27TDS%A&HsXb+#vWA2Qz8CBfvsKBsa z$!`l9XN!z$Gb&uHjwC z0wlqSuKK}D#db;sUqjFB0lOkSZ5BB)1h>4mJ0qK4g^1k-plD49+$g+t12(DRnVL#{ z(zGq{^wNi&53x`*iO+*s?}M(DOOxHtFw{A6Ty^u1&(7XHVq|p6DxyjeW8VVXkMUK` z4S3(|>Gei>_Kwxp@^w`_hb~b8NHG*L7%(lTCRyZei;m`z<}#&1dF&5}_XY2*o(EX^ zrQSyeGg3@U6Zyp&{(>O zkHI4~JneNRllzG32PtzGr^FYeK!Iuiq^XERD67lSkT|5=MbblN3k%?P6wgg2!-Rri ze(BX@8)IMde!uU2o!sfE{hPdP{X1%nT>n=h2Qky*(0S5(yT8*LUbC%B{8$;IV1HD~Y%oXYKgLRq+i)S^CZODUChr`Kg}u6cIi%Z__272e}i+ zjd%eFf~jv~tG9JSwMzTm4_1oUE;I6k5x)jX?S$8J#cavfsc4o90=_aw(I{4uP~^U| z84G1bsVGjNje}o0R;!3SY76U(_%bQFZAz^-Psz3%Z)jtHY2J>F>)2axXS??oiNM4d z1RWeTbV|F*qCAopRIYMuQAO#16Wdkotq&<2M>0x*z-1H#pTmKK6sexaO$Mh$(CgZ? zqk1lo+^{FPLC7#jLhHFmA%|+dIF%eY7aB<7T2QfYDyRZAq+LNJ2OgAUxC}VyTBqjg zWiuz_9uu?ZzHLV04baxYv33}UqBbH)wK7S~JV?j;^$+31BJ&VAtahNvL$ry*=H)gH zPm$z$y&XV%$ct}Iv18tj&7SopM19?Yb;uicGU)ffFh~Zq08tPlI$Qn%U(W>f%UI3a zY#!xq3M~y^+wpd9$yHwo&$FE3M1Gag1@rwi{ zOn{s~^u$N$C`3q8)2%(AG}kHKz)*YjPmp%?g#>R|r0Q#zqb)S?6|Rm|Z*_IPl52F| zMXT*{Kn_=@Nx5deO3IaQl52;Y#ydVDw@jPYd8ezRZc|0exyOc_7Rv7ky1;zU4e%~` z`FscJbl&Z(z`(2B;LFNoWHekvdw7HQlcDX+jZB%xo&Q|7gBlFO^?GB&J=N1!UEPUF z&Fftq#cvYHjc74R8#s*pRTnv znVGePUf#FJ=EH87YZc|9hP<6T)pt=X{|CQoJ}ljwa$?Z9!3_g0FH3Q)8?IuW3<5(= zJKrcf2PNYsIdo9&U~QqFCux_@N8AW_T{i-ckyI8^n_U!Le2WtWyh(}=xtip0oi%Qx zLxE?9P*#wxAsDw7+M;fhS8Miuoe$4PrJY+{Qq*=?lYtYhiScc2l$;IcYT|qRZoWOg zuCRU{uQhJWjh<)o(c+VQ$4xv}!Lt)Q1M_k4#Fu$S`K~&TE_%STy9%be<{QA&u*}pB z@(8G@c?68x=tkWLUNm5>(+%r;`RF2ZHegjzQA62A7&zLYNMj>z^wi*thMr%5pLB6i zPH2zx&lvS#VtZxl-sRT#1ag)VM|A z2oyeuZAEGxs;fz!8on4P-^)J%{)d*0IY{`;vJRHe4WC!#y+)&ZO>K9iKDo4c`=`}F z5@*>VZ+KT~_p`u|Zt314ev81D2;9g`^^(UAkLJz7Bsa$n<8(yb)3ocMyFVh`^fA)n z57F_dn&1n&wht!J#3}vOb@ta;E#udRlLC*_1U}fcVT&9|mHAcB1^Z4WN|__q2N~Wd zz5_;RMjf!s|4to~wG-Zyu*a2iMaQJw7$0*^`oaHPU`+_3Ux(Ts+CM6r?b z@s40Qcz(cjLtgh}c|(=WxiD4On{dDL_I9lFzB1A+CsmsWRhc(UiLCRZqf}qW0`>8l zk^erD*+L$AUAs}<-Z9htDVU6!%v1Lq+m$4B%swRMfS_WO)z$)V@n;02({Fg5gpb#R zn@0DqksTk$cj7wVz5CQnn`)YVIxh|Q=KvK7mB+^)IrQM6`;-6tOA8-ddC_Bg_A=8O z-?Jx1hSrLc$Gm6vJbL6&YLg|1)#nJO%3eM^-O2hHX%%r$o zJJCZ$Yh^3It(GJQ=mt69y*YMa?avT*5Ap5#7{2^~XYGBW2^S6hiNgmEBd%#g!~5ah zODkvq(FuThzf=(!t1{wUytm7H^WJsCbW@UZKqYB9rXLiRT`5?V9iZQC4>1gQE5`e~ zI;65}N#Len#GI1Ab7Ds1$__L?Jl^Fw_ZMA|;v*+;Mh8lkM_8l%`Hi|1pW+sg) z+KcTQNG*{)GX%a$;8zGxJth_iP&rrIdad5^eO=XGr1oh5zj55geU9qg`-CV7SrLo3 zsrwNE^t2$Th$sMM3IA#a#|^yv7JPi+0<;febW3hC;vQ~zHQ_$vb6Bk&%9BLKdR zcNb3n9ZA|N>7e7#z-kTVw zhpCPOeN{TFQ_PUJHF~GKeINV-`oB#E(rKySX(%oqi3HL8a2k)JxneqvSp7E;QtXiY zm-{8PU3?2PafQH-2>cxY9z;!Nn(nmpX z^pOYm%ViUPPkmn^kS9Oao;BeNcs!aZU7!202kjmPw=>he>^q_h* pcuidgPEA+!V7R$|do&SlZs}<6YVB<83&+CY#%NWEgu_wwe*qF36?gyu diff --git a/ultralytics/cfg/datasets/Argoverse.yaml b/ultralytics/cfg/datasets/Argoverse.yaml index 76255e4..43755f7 100644 --- a/ultralytics/cfg/datasets/Argoverse.yaml +++ b/ultralytics/cfg/datasets/Argoverse.yaml @@ -1,17 +1,17 @@ # Ultralytics YOLO 🚀, AGPL-3.0 license -# Argoverse-HD dataset (ring-front-center camera) http://www.cs.cmu.edu/~mengtial/proj/streaming/ by Argo AI +# Argoverse-HD dataset (ring-front-center camera) https://www.cs.cmu.edu/~mengtial/proj/streaming/ by Argo AI +# Documentation: https://docs.ultralytics.com/datasets/detect/argoverse/ # Example usage: yolo train data=Argoverse.yaml # parent # ├── ultralytics # └── datasets # └── Argoverse ← downloads here (31.5 GB) - # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] -path: ../datasets/Argoverse # dataset root dir -train: Argoverse-1.1/images/train/ # train images (relative to 'path') 39384 images -val: Argoverse-1.1/images/val/ # val images (relative to 'path') 15062 images -test: Argoverse-1.1/images/test/ # test images (optional) https://eval.ai/web/challenges/challenge-page/800/overview +path: ../datasets/Argoverse # dataset root dir +train: Argoverse-1.1/images/train/ # train images (relative to 'path') 39384 images +val: Argoverse-1.1/images/val/ # val images (relative to 'path') 15062 images +test: Argoverse-1.1/images/test/ # test images (optional) https://eval.ai/web/challenges/challenge-page/800/overview # Classes names: @@ -24,7 +24,6 @@ names: 6: traffic_light 7: stop_sign - # Download script/URL (optional) --------------------------------------------------------------------------------------- download: | import json @@ -64,7 +63,9 @@ download: | # Download 'https://argoverse-hd.s3.us-east-2.amazonaws.com/Argoverse-HD-Full.zip' (deprecated S3 link) dir = Path(yaml['path']) # dataset root dir urls = ['https://drive.google.com/file/d/1st9qW3BeIwQsnR0t8mRpvbsSWIo16ACi/view?usp=drive_link'] - download(urls, dir=dir) + print("\n\nWARNING: Argoverse dataset MUST be downloaded manually, autodownload will NOT work.") + print(f"WARNING: Manually download Argoverse dataset '{urls[0]}' to '{dir}' and re-run your command.\n\n") + # download(urls, dir=dir) # Convert annotations_dir = 'Argoverse-HD/annotations/' diff --git a/ultralytics/cfg/datasets/DOTAv2.yaml b/ultralytics/cfg/datasets/DOTAv1.5.yaml similarity index 56% rename from ultralytics/cfg/datasets/DOTAv2.yaml rename to ultralytics/cfg/datasets/DOTAv1.5.yaml index c663bdd..701535f 100644 --- a/ultralytics/cfg/datasets/DOTAv2.yaml +++ b/ultralytics/cfg/datasets/DOTAv1.5.yaml @@ -1,18 +1,19 @@ # Ultralytics YOLO 🚀, AGPL-3.0 license -# DOTA 2.0 dataset https://captain-whu.github.io/DOTA/index.html for object detection in aerial images by Wuhan University -# Example usage: yolo train model=yolov8n-obb.pt data=DOTAv2.yaml +# DOTA 1.5 dataset https://captain-whu.github.io/DOTA/index.html for object detection in aerial images by Wuhan University +# Documentation: https://docs.ultralytics.com/datasets/obb/dota-v2/ +# Example usage: yolo train model=yolov8n-obb.pt data=DOTAv1.5.yaml # parent # ├── ultralytics # └── datasets -# └── dota2 ← downloads here (2GB) +# └── dota1.5 ← downloads here (2GB) # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] -path: ../datasets/DOTAv2 # dataset root dir -train: images/train # train images (relative to 'path') 1411 images -val: images/val # val images (relative to 'path') 458 images -test: images/test # test images (optional) 937 images +path: ../datasets/DOTAv1.5 # dataset root dir +train: images/train # train images (relative to 'path') 1411 images +val: images/val # val images (relative to 'path') 458 images +test: images/test # test images (optional) 937 images -# Classes for DOTA 2.0 +# Classes for DOTA 1.5 names: 0: plane 1: ship @@ -30,8 +31,6 @@ names: 13: soccer ball field 14: swimming pool 15: container crane - 16: airport - 17: helipad # Download script/URL (optional) -download: https://github.com/ultralytics/yolov5/releases/download/v1.0/DOTAv2.zip +download: https://github.com/ultralytics/yolov5/releases/download/v1.0/DOTAv1.5.zip diff --git a/ultralytics/cfg/datasets/DOTAv1.yaml b/ultralytics/cfg/datasets/DOTAv1.yaml new file mode 100644 index 0000000..f6364d3 --- /dev/null +++ b/ultralytics/cfg/datasets/DOTAv1.yaml @@ -0,0 +1,35 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license +# DOTA 1.0 dataset https://captain-whu.github.io/DOTA/index.html for object detection in aerial images by Wuhan University +# Documentation: https://docs.ultralytics.com/datasets/obb/dota-v2/ +# Example usage: yolo train model=yolov8n-obb.pt data=DOTAv1.yaml +# parent +# ├── ultralytics +# └── datasets +# └── dota1 ← downloads here (2GB) + +# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] +path: ../datasets/DOTAv1 # dataset root dir +train: images/train # train images (relative to 'path') 1411 images +val: images/val # val images (relative to 'path') 458 images +test: images/test # test images (optional) 937 images + +# Classes for DOTA 1.0 +names: + 0: plane + 1: ship + 2: storage tank + 3: baseball diamond + 4: tennis court + 5: basketball court + 6: ground track field + 7: harbor + 8: bridge + 9: large vehicle + 10: small vehicle + 11: helicopter + 12: roundabout + 13: soccer ball field + 14: swimming pool + +# Download script/URL (optional) +download: https://github.com/ultralytics/yolov5/releases/download/v1.0/DOTAv1.zip diff --git a/ultralytics/cfg/datasets/GlobalWheat2020.yaml b/ultralytics/cfg/datasets/GlobalWheat2020.yaml index 165004f..ae6bfa0 100644 --- a/ultralytics/cfg/datasets/GlobalWheat2020.yaml +++ b/ultralytics/cfg/datasets/GlobalWheat2020.yaml @@ -1,14 +1,14 @@ # Ultralytics YOLO 🚀, AGPL-3.0 license -# Global Wheat 2020 dataset http://www.global-wheat.com/ by University of Saskatchewan +# Global Wheat 2020 dataset https://www.global-wheat.com/ by University of Saskatchewan +# Documentation: https://docs.ultralytics.com/datasets/detect/globalwheat2020/ # Example usage: yolo train data=GlobalWheat2020.yaml # parent # ├── ultralytics # └── datasets # └── GlobalWheat2020 ← downloads here (7.0 GB) - # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] -path: ../datasets/GlobalWheat2020 # dataset root dir +path: ../datasets/GlobalWheat2020 # dataset root dir train: # train images (relative to 'path') 3422 images - images/arvalis_1 - images/arvalis_2 @@ -29,7 +29,6 @@ test: # test images (optional) 1276 images names: 0: wheat_head - # Download script/URL (optional) --------------------------------------------------------------------------------------- download: | from ultralytics.utils.downloads import download diff --git a/ultralytics/cfg/datasets/ImageNet.yaml b/ultralytics/cfg/datasets/ImageNet.yaml index c1aa155..0dc344a 100644 --- a/ultralytics/cfg/datasets/ImageNet.yaml +++ b/ultralytics/cfg/datasets/ImageNet.yaml @@ -1,18 +1,18 @@ # Ultralytics YOLO 🚀, AGPL-3.0 license # ImageNet-1k dataset https://www.image-net.org/index.php by Stanford University # Simplified class names from https://github.com/anishathalye/imagenet-simple-labels +# Documentation: https://docs.ultralytics.com/datasets/classify/imagenet/ # Example usage: yolo train task=classify data=imagenet # parent # ├── ultralytics # └── datasets # └── imagenet ← downloads here (144 GB) - # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] -path: ../datasets/imagenet # dataset root dir -train: train # train images (relative to 'path') 1281167 images -val: val # val images (relative to 'path') 50000 images -test: # test images (optional) +path: ../datasets/imagenet # dataset root dir +train: train # train images (relative to 'path') 1281167 images +val: val # val images (relative to 'path') 50000 images +test: # test images (optional) # Classes names: @@ -2020,6 +2020,5 @@ map: n13133613: ear n15075141: toilet_tissue - # Download script/URL (optional) download: yolo/data/scripts/get_imagenet.sh diff --git a/ultralytics/cfg/datasets/Objects365.yaml b/ultralytics/cfg/datasets/Objects365.yaml index 415eff9..9b11720 100644 --- a/ultralytics/cfg/datasets/Objects365.yaml +++ b/ultralytics/cfg/datasets/Objects365.yaml @@ -1,17 +1,17 @@ # Ultralytics YOLO 🚀, AGPL-3.0 license # Objects365 dataset https://www.objects365.org/ by Megvii +# Documentation: https://docs.ultralytics.com/datasets/detect/objects365/ # Example usage: yolo train data=Objects365.yaml # parent # ├── ultralytics # └── datasets # └── Objects365 ← downloads here (712 GB = 367G data + 345G zips) - # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] -path: ../datasets/Objects365 # dataset root dir -train: images/train # train images (relative to 'path') 1742289 images +path: ../datasets/Objects365 # dataset root dir +train: images/train # train images (relative to 'path') 1742289 images val: images/val # val images (relative to 'path') 80000 images -test: # test images (optional) +test: # test images (optional) # Classes names: @@ -381,7 +381,6 @@ names: 363: Curling 364: Table Tennis - # Download script/URL (optional) --------------------------------------------------------------------------------------- download: | from tqdm import tqdm diff --git a/ultralytics/cfg/datasets/SKU-110K.yaml b/ultralytics/cfg/datasets/SKU-110K.yaml index e6deac2..fff1baa 100644 --- a/ultralytics/cfg/datasets/SKU-110K.yaml +++ b/ultralytics/cfg/datasets/SKU-110K.yaml @@ -1,23 +1,22 @@ # Ultralytics YOLO 🚀, AGPL-3.0 license # SKU-110K retail items dataset https://github.com/eg4000/SKU110K_CVPR19 by Trax Retail +# Documentation: https://docs.ultralytics.com/datasets/detect/sku-110k/ # Example usage: yolo train data=SKU-110K.yaml # parent # ├── ultralytics # └── datasets # └── SKU-110K ← downloads here (13.6 GB) - # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] -path: ../datasets/SKU-110K # dataset root dir -train: train.txt # train images (relative to 'path') 8219 images -val: val.txt # val images (relative to 'path') 588 images -test: test.txt # test images (optional) 2936 images +path: ../datasets/SKU-110K # dataset root dir +train: train.txt # train images (relative to 'path') 8219 images +val: val.txt # val images (relative to 'path') 588 images +test: test.txt # test images (optional) 2936 images # Classes names: 0: object - # Download script/URL (optional) --------------------------------------------------------------------------------------- download: | import shutil diff --git a/ultralytics/cfg/datasets/VOC.yaml b/ultralytics/cfg/datasets/VOC.yaml index 6bdcc4f..cd6d5ad 100644 --- a/ultralytics/cfg/datasets/VOC.yaml +++ b/ultralytics/cfg/datasets/VOC.yaml @@ -1,12 +1,12 @@ # Ultralytics YOLO 🚀, AGPL-3.0 license # PASCAL VOC dataset http://host.robots.ox.ac.uk/pascal/VOC by University of Oxford +# Documentation: # Documentation: https://docs.ultralytics.com/datasets/detect/voc/ # Example usage: yolo train data=VOC.yaml # parent # ├── ultralytics # └── datasets # └── VOC ← downloads here (2.8 GB) - # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] path: ../datasets/VOC train: # train images (relative to 'path') 16551 images @@ -42,7 +42,6 @@ names: 18: train 19: tvmonitor - # Download script/URL (optional) --------------------------------------------------------------------------------------- download: | import xml.etree.ElementTree as ET @@ -81,7 +80,7 @@ download: | urls = [f'{url}VOCtrainval_06-Nov-2007.zip', # 446MB, 5012 images f'{url}VOCtest_06-Nov-2007.zip', # 438MB, 4953 images f'{url}VOCtrainval_11-May-2012.zip'] # 1.95GB, 17126 images - download(urls, dir=dir / 'images', curl=True, threads=3) + download(urls, dir=dir / 'images', curl=True, threads=3, exist_ok=True) # download and unzip over existing paths (required) # Convert path = dir / 'images/VOCdevkit' diff --git a/ultralytics/cfg/datasets/VisDrone.yaml b/ultralytics/cfg/datasets/VisDrone.yaml index a1a4a46..773f0b0 100644 --- a/ultralytics/cfg/datasets/VisDrone.yaml +++ b/ultralytics/cfg/datasets/VisDrone.yaml @@ -1,17 +1,17 @@ # Ultralytics YOLO 🚀, AGPL-3.0 license # VisDrone2019-DET dataset https://github.com/VisDrone/VisDrone-Dataset by Tianjin University +# Documentation: https://docs.ultralytics.com/datasets/detect/visdrone/ # Example usage: yolo train data=VisDrone.yaml # parent # ├── ultralytics # └── datasets # └── VisDrone ← downloads here (2.3 GB) - # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] -path: ../datasets/VisDrone # dataset root dir -train: VisDrone2019-DET-train/images # train images (relative to 'path') 6471 images -val: VisDrone2019-DET-val/images # val images (relative to 'path') 548 images -test: VisDrone2019-DET-test-dev/images # test images (optional) 1610 images +path: ../datasets/VisDrone # dataset root dir +train: VisDrone2019-DET-train/images # train images (relative to 'path') 6471 images +val: VisDrone2019-DET-val/images # val images (relative to 'path') 548 images +test: VisDrone2019-DET-test-dev/images # test images (optional) 1610 images # Classes names: @@ -26,7 +26,6 @@ names: 8: bus 9: motor - # Download script/URL (optional) --------------------------------------------------------------------------------------- download: | import os diff --git a/ultralytics/cfg/datasets/african-wildlife.yaml b/ultralytics/cfg/datasets/african-wildlife.yaml new file mode 100644 index 0000000..af8af36 --- /dev/null +++ b/ultralytics/cfg/datasets/african-wildlife.yaml @@ -0,0 +1,24 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license +# African-wildlife dataset by Ultralytics +# Documentation: https://docs.ultralytics.com/datasets/detect/african-wildlife/ +# Example usage: yolo train data=african-wildlife.yaml +# parent +# ├── ultralytics +# └── datasets +# └── african-wildlife ← downloads here (100 MB) + +# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] +path: ../datasets/african-wildlife # dataset root dir +train: train/images # train images (relative to 'path') 1052 images +val: valid/images # val images (relative to 'path') 225 images +test: test/images # test images (relative to 'path') 227 images + +# Classes +names: + 0: buffalo + 1: elephant + 2: rhino + 3: zebra + +# Download script/URL (optional) +download: https://ultralytics.com/assets/african-wildlife.zip diff --git a/ultralytics/cfg/datasets/brain-tumor.yaml b/ultralytics/cfg/datasets/brain-tumor.yaml new file mode 100644 index 0000000..be61098 --- /dev/null +++ b/ultralytics/cfg/datasets/brain-tumor.yaml @@ -0,0 +1,22 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license +# Brain-tumor dataset by Ultralytics +# Documentation: https://docs.ultralytics.com/datasets/detect/brain-tumor/ +# Example usage: yolo train data=brain-tumor.yaml +# parent +# ├── ultralytics +# └── datasets +# └── brain-tumor ← downloads here (4.05 MB) + +# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] +path: ../datasets/brain-tumor # dataset root dir +train: train/images # train images (relative to 'path') 893 images +val: valid/images # val images (relative to 'path') 223 images +test: # test images (relative to 'path') + +# Classes +names: + 0: negative + 1: positive + +# Download script/URL (optional) +download: https://ultralytics.com/assets/brain-tumor.zip diff --git a/ultralytics/cfg/datasets/carparts-seg.yaml b/ultralytics/cfg/datasets/carparts-seg.yaml new file mode 100644 index 0000000..a1c25ba --- /dev/null +++ b/ultralytics/cfg/datasets/carparts-seg.yaml @@ -0,0 +1,43 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license +# Carparts-seg dataset by Ultralytics +# Documentation: https://docs.ultralytics.com/datasets/segment/carparts-seg/ +# Example usage: yolo train data=carparts-seg.yaml +# parent +# ├── ultralytics +# └── datasets +# └── carparts-seg ← downloads here (132 MB) + +# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] +path: ../datasets/carparts-seg # dataset root dir +train: train/images # train images (relative to 'path') 3516 images +val: valid/images # val images (relative to 'path') 276 images +test: test/images # test images (relative to 'path') 401 images + +# Classes +names: + 0: back_bumper + 1: back_door + 2: back_glass + 3: back_left_door + 4: back_left_light + 5: back_light + 6: back_right_door + 7: back_right_light + 8: front_bumper + 9: front_door + 10: front_glass + 11: front_left_door + 12: front_left_light + 13: front_light + 14: front_right_door + 15: front_right_light + 16: hood + 17: left_mirror + 18: object + 19: right_mirror + 20: tailgate + 21: trunk + 22: wheel + +# Download script/URL (optional) +download: https://ultralytics.com/assets/carparts-seg.zip diff --git a/ultralytics/cfg/datasets/coco-pose.yaml b/ultralytics/cfg/datasets/coco-pose.yaml index 670d55b..b50b7a5 100644 --- a/ultralytics/cfg/datasets/coco-pose.yaml +++ b/ultralytics/cfg/datasets/coco-pose.yaml @@ -1,20 +1,20 @@ # Ultralytics YOLO 🚀, AGPL-3.0 license -# COCO 2017 dataset http://cocodataset.org by Microsoft +# COCO 2017 dataset https://cocodataset.org by Microsoft +# Documentation: https://docs.ultralytics.com/datasets/pose/coco/ # Example usage: yolo train data=coco-pose.yaml # parent # ├── ultralytics # └── datasets # └── coco-pose ← downloads here (20.1 GB) - # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] -path: ../datasets/coco-pose # dataset root dir -train: train2017.txt # train images (relative to 'path') 118287 images -val: val2017.txt # val images (relative to 'path') 5000 images -test: test-dev2017.txt # 20288 of 40670 images, submit to https://competitions.codalab.org/competitions/20794 +path: ../datasets/coco-pose # dataset root dir +train: train2017.txt # train images (relative to 'path') 118287 images +val: val2017.txt # val images (relative to 'path') 5000 images +test: test-dev2017.txt # 20288 of 40670 images, submit to https://competitions.codalab.org/competitions/20794 # Keypoints -kpt_shape: [17, 3] # number of keypoints, number of dims (2 for x,y or 3 for x,y,visible) +kpt_shape: [17, 3] # number of keypoints, number of dims (2 for x,y or 3 for x,y,visible) flip_idx: [0, 2, 1, 4, 3, 6, 5, 8, 7, 10, 9, 12, 11, 14, 13, 16, 15] # Classes diff --git a/ultralytics/cfg/datasets/coco.yaml b/ultralytics/cfg/datasets/coco.yaml index 8a70a5b..d0297f7 100644 --- a/ultralytics/cfg/datasets/coco.yaml +++ b/ultralytics/cfg/datasets/coco.yaml @@ -1,17 +1,17 @@ # Ultralytics YOLO 🚀, AGPL-3.0 license -# COCO 2017 dataset http://cocodataset.org by Microsoft +# COCO 2017 dataset https://cocodataset.org by Microsoft +# Documentation: https://docs.ultralytics.com/datasets/detect/coco/ # Example usage: yolo train data=coco.yaml # parent # ├── ultralytics # └── datasets # └── coco ← downloads here (20.1 GB) - # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] -path: ../datasets/coco # dataset root dir -train: train2017.txt # train images (relative to 'path') 118287 images -val: val2017.txt # val images (relative to 'path') 5000 images -test: test-dev2017.txt # 20288 of 40670 images, submit to https://competitions.codalab.org/competitions/20794 +path: ../datasets/coco # dataset root dir +train: train2017.txt # train images (relative to 'path') 118287 images +val: val2017.txt # val images (relative to 'path') 5000 images +test: test-dev2017.txt # 20288 of 40670 images, submit to https://competitions.codalab.org/competitions/20794 # Classes names: @@ -96,7 +96,6 @@ names: 78: hair drier 79: toothbrush - # Download script/URL (optional) download: | from ultralytics.utils.downloads import download diff --git a/ultralytics/cfg/datasets/coco128-seg.yaml b/ultralytics/cfg/datasets/coco128-seg.yaml index 8c2e3da..e898a40 100644 --- a/ultralytics/cfg/datasets/coco128-seg.yaml +++ b/ultralytics/cfg/datasets/coco128-seg.yaml @@ -1,17 +1,17 @@ # Ultralytics YOLO 🚀, AGPL-3.0 license # COCO128-seg dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017) by Ultralytics +# Documentation: https://docs.ultralytics.com/datasets/segment/coco/ # Example usage: yolo train data=coco128.yaml # parent # ├── ultralytics # └── datasets # └── coco128-seg ← downloads here (7 MB) - # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] -path: ../datasets/coco128-seg # dataset root dir -train: images/train2017 # train images (relative to 'path') 128 images -val: images/train2017 # val images (relative to 'path') 128 images -test: # test images (optional) +path: ../datasets/coco128-seg # dataset root dir +train: images/train2017 # train images (relative to 'path') 128 images +val: images/train2017 # val images (relative to 'path') 128 images +test: # test images (optional) # Classes names: @@ -96,6 +96,5 @@ names: 78: hair drier 79: toothbrush - # Download script/URL (optional) download: https://ultralytics.com/assets/coco128-seg.zip diff --git a/ultralytics/cfg/datasets/coco128.yaml b/ultralytics/cfg/datasets/coco128.yaml index 9749ab6..8d47ee0 100644 --- a/ultralytics/cfg/datasets/coco128.yaml +++ b/ultralytics/cfg/datasets/coco128.yaml @@ -1,17 +1,17 @@ # Ultralytics YOLO 🚀, AGPL-3.0 license # COCO128 dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017) by Ultralytics +# Documentation: https://docs.ultralytics.com/datasets/detect/coco/ # Example usage: yolo train data=coco128.yaml # parent # ├── ultralytics # └── datasets # └── coco128 ← downloads here (7 MB) - # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] -path: ../datasets/coco128 # dataset root dir -train: images/train2017 # train images (relative to 'path') 128 images -val: images/train2017 # val images (relative to 'path') 128 images -test: # test images (optional) +path: ../datasets/coco128 # dataset root dir +train: images/train2017 # train images (relative to 'path') 128 images +val: images/train2017 # val images (relative to 'path') 128 images +test: # test images (optional) # Classes names: @@ -96,6 +96,5 @@ names: 78: hair drier 79: toothbrush - # Download script/URL (optional) download: https://ultralytics.com/assets/coco128.zip diff --git a/ultralytics/cfg/datasets/coco8-pose.yaml b/ultralytics/cfg/datasets/coco8-pose.yaml index e6fab8b..4dee5be 100644 --- a/ultralytics/cfg/datasets/coco8-pose.yaml +++ b/ultralytics/cfg/datasets/coco8-pose.yaml @@ -1,20 +1,20 @@ # Ultralytics YOLO 🚀, AGPL-3.0 license # COCO8-pose dataset (first 8 images from COCO train2017) by Ultralytics +# Documentation: https://docs.ultralytics.com/datasets/pose/coco8-pose/ # Example usage: yolo train data=coco8-pose.yaml # parent # ├── ultralytics # └── datasets # └── coco8-pose ← downloads here (1 MB) - # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] -path: ../datasets/coco8-pose # dataset root dir -train: images/train # train images (relative to 'path') 4 images -val: images/val # val images (relative to 'path') 4 images -test: # test images (optional) +path: ../datasets/coco8-pose # dataset root dir +train: images/train # train images (relative to 'path') 4 images +val: images/val # val images (relative to 'path') 4 images +test: # test images (optional) # Keypoints -kpt_shape: [17, 3] # number of keypoints, number of dims (2 for x,y or 3 for x,y,visible) +kpt_shape: [17, 3] # number of keypoints, number of dims (2 for x,y or 3 for x,y,visible) flip_idx: [0, 2, 1, 4, 3, 6, 5, 8, 7, 10, 9, 12, 11, 14, 13, 16, 15] # Classes diff --git a/ultralytics/cfg/datasets/coco8-seg.yaml b/ultralytics/cfg/datasets/coco8-seg.yaml index e6faca1..d8b6ed2 100644 --- a/ultralytics/cfg/datasets/coco8-seg.yaml +++ b/ultralytics/cfg/datasets/coco8-seg.yaml @@ -1,17 +1,17 @@ # Ultralytics YOLO 🚀, AGPL-3.0 license # COCO8-seg dataset (first 8 images from COCO train2017) by Ultralytics +# Documentation: https://docs.ultralytics.com/datasets/segment/coco8-seg/ # Example usage: yolo train data=coco8-seg.yaml # parent # ├── ultralytics # └── datasets # └── coco8-seg ← downloads here (1 MB) - # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] -path: ../datasets/coco8-seg # dataset root dir -train: images/train # train images (relative to 'path') 4 images -val: images/val # val images (relative to 'path') 4 images -test: # test images (optional) +path: ../datasets/coco8-seg # dataset root dir +train: images/train # train images (relative to 'path') 4 images +val: images/val # val images (relative to 'path') 4 images +test: # test images (optional) # Classes names: @@ -96,6 +96,5 @@ names: 78: hair drier 79: toothbrush - # Download script/URL (optional) download: https://ultralytics.com/assets/coco8-seg.zip diff --git a/ultralytics/cfg/datasets/coco8.yaml b/ultralytics/cfg/datasets/coco8.yaml index eeb5d9d..2925f81 100644 --- a/ultralytics/cfg/datasets/coco8.yaml +++ b/ultralytics/cfg/datasets/coco8.yaml @@ -1,17 +1,17 @@ # Ultralytics YOLO 🚀, AGPL-3.0 license # COCO8 dataset (first 8 images from COCO train2017) by Ultralytics +# Documentation: https://docs.ultralytics.com/datasets/detect/coco8/ # Example usage: yolo train data=coco8.yaml # parent # ├── ultralytics # └── datasets # └── coco8 ← downloads here (1 MB) - # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] -path: ../datasets/coco8 # dataset root dir -train: images/train # train images (relative to 'path') 4 images -val: images/val # val images (relative to 'path') 4 images -test: # test images (optional) +path: ../datasets/coco8 # dataset root dir +train: images/train # train images (relative to 'path') 4 images +val: images/val # val images (relative to 'path') 4 images +test: # test images (optional) # Classes names: @@ -96,6 +96,5 @@ names: 78: hair drier 79: toothbrush - # Download script/URL (optional) download: https://ultralytics.com/assets/coco8.zip diff --git a/ultralytics/cfg/datasets/crack-seg.yaml b/ultralytics/cfg/datasets/crack-seg.yaml new file mode 100644 index 0000000..2054f62 --- /dev/null +++ b/ultralytics/cfg/datasets/crack-seg.yaml @@ -0,0 +1,21 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license +# Crack-seg dataset by Ultralytics +# Documentation: https://docs.ultralytics.com/datasets/segment/crack-seg/ +# Example usage: yolo train data=crack-seg.yaml +# parent +# ├── ultralytics +# └── datasets +# └── crack-seg ← downloads here (91.2 MB) + +# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] +path: ../datasets/crack-seg # dataset root dir +train: train/images # train images (relative to 'path') 3717 images +val: valid/images # val images (relative to 'path') 112 images +test: test/images # test images (relative to 'path') 200 images + +# Classes +names: + 0: crack + +# Download script/URL (optional) +download: https://ultralytics.com/assets/crack-seg.zip diff --git a/ultralytics/cfg/datasets/dota8.yaml b/ultralytics/cfg/datasets/dota8.yaml new file mode 100644 index 0000000..f58b501 --- /dev/null +++ b/ultralytics/cfg/datasets/dota8.yaml @@ -0,0 +1,34 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license +# DOTA8 dataset 8 images from split DOTAv1 dataset by Ultralytics +# Documentation: https://docs.ultralytics.com/datasets/obb/dota8/ +# Example usage: yolo train model=yolov8n-obb.pt data=dota8.yaml +# parent +# ├── ultralytics +# └── datasets +# └── dota8 ← downloads here (1MB) + +# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] +path: ../datasets/dota8 # dataset root dir +train: images/train # train images (relative to 'path') 4 images +val: images/val # val images (relative to 'path') 4 images + +# Classes for DOTA 1.0 +names: + 0: plane + 1: ship + 2: storage tank + 3: baseball diamond + 4: tennis court + 5: basketball court + 6: ground track field + 7: harbor + 8: bridge + 9: large vehicle + 10: small vehicle + 11: helicopter + 12: roundabout + 13: soccer ball field + 14: swimming pool + +# Download script/URL (optional) +download: https://github.com/ultralytics/yolov5/releases/download/v1.0/dota8.zip diff --git a/ultralytics/cfg/datasets/open-images-v7.yaml b/ultralytics/cfg/datasets/open-images-v7.yaml index bb1e3ff..d9cad9f 100644 --- a/ultralytics/cfg/datasets/open-images-v7.yaml +++ b/ultralytics/cfg/datasets/open-images-v7.yaml @@ -1,17 +1,17 @@ # Ultralytics YOLO 🚀, AGPL-3.0 license # Open Images v7 dataset https://storage.googleapis.com/openimages/web/index.html by Google +# Documentation: https://docs.ultralytics.com/datasets/detect/open-images-v7/ # Example usage: yolo train data=open-images-v7.yaml # parent # ├── ultralytics # └── datasets # └── open-images-v7 ← downloads here (561 GB) - # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] -path: ../datasets/open-images-v7 # dataset root dir -train: images/train # train images (relative to 'path') 1743042 images -val: images/val # val images (relative to 'path') 41620 images -test: # test images (optional) +path: ../datasets/open-images-v7 # dataset root dir +train: images/train # train images (relative to 'path') 1743042 images +val: images/val # val images (relative to 'path') 41620 images +test: # test images (optional) # Classes names: @@ -617,7 +617,6 @@ names: 599: Zebra 600: Zucchini - # Download script/URL (optional) --------------------------------------------------------------------------------------- download: | from ultralytics.utils import LOGGER, SETTINGS, Path, is_ubuntu, get_ubuntu_version diff --git a/ultralytics/cfg/datasets/package-seg.yaml b/ultralytics/cfg/datasets/package-seg.yaml new file mode 100644 index 0000000..44fe550 --- /dev/null +++ b/ultralytics/cfg/datasets/package-seg.yaml @@ -0,0 +1,21 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license +# Package-seg dataset by Ultralytics +# Documentation: https://docs.ultralytics.com/datasets/segment/package-seg/ +# Example usage: yolo train data=package-seg.yaml +# parent +# ├── ultralytics +# └── datasets +# └── package-seg ← downloads here (102 MB) + +# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] +path: ../datasets/package-seg # dataset root dir +train: images/train # train images (relative to 'path') 1920 images +val: images/val # val images (relative to 'path') 89 images +test: test/images # test images (relative to 'path') 188 images + +# Classes +names: + 0: package + +# Download script/URL (optional) +download: https://ultralytics.com/assets/package-seg.zip diff --git a/ultralytics/cfg/datasets/tiger-pose.yaml b/ultralytics/cfg/datasets/tiger-pose.yaml new file mode 100644 index 0000000..d37df04 --- /dev/null +++ b/ultralytics/cfg/datasets/tiger-pose.yaml @@ -0,0 +1,24 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license +# Tiger Pose dataset by Ultralytics +# Documentation: https://docs.ultralytics.com/datasets/pose/tiger-pose/ +# Example usage: yolo train data=tiger-pose.yaml +# parent +# ├── ultralytics +# └── datasets +# └── tiger-pose ← downloads here (75.3 MB) + +# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] +path: ../datasets/tiger-pose # dataset root dir +train: train # train images (relative to 'path') 210 images +val: val # val images (relative to 'path') 53 images + +# Keypoints +kpt_shape: [12, 2] # number of keypoints, number of dims (2 for x,y or 3 for x,y,visible) +flip_idx: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11] + +# Classes +names: + 0: tiger + +# Download script/URL (optional) +download: https://ultralytics.com/assets/tiger-pose.zip diff --git a/ultralytics/cfg/datasets/xView.yaml b/ultralytics/cfg/datasets/xView.yaml index bdc2d91..d2e957a 100644 --- a/ultralytics/cfg/datasets/xView.yaml +++ b/ultralytics/cfg/datasets/xView.yaml @@ -1,17 +1,17 @@ # Ultralytics YOLO 🚀, AGPL-3.0 license # DIUx xView 2018 Challenge https://challenge.xviewdataset.org by U.S. National Geospatial-Intelligence Agency (NGA) # -------- DOWNLOAD DATA MANUALLY and jar xf val_images.zip to 'datasets/xView' before running train command! -------- +# Documentation: https://docs.ultralytics.com/datasets/detect/xview/ # Example usage: yolo train data=xView.yaml # parent # ├── ultralytics # └── datasets # └── xView ← downloads here (20.7 GB) - # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] -path: ../datasets/xView # dataset root dir -train: images/autosplit_train.txt # train images (relative to 'path') 90% of 847 train images -val: images/autosplit_val.txt # train images (relative to 'path') 10% of 847 train images +path: ../datasets/xView # dataset root dir +train: images/autosplit_train.txt # train images (relative to 'path') 90% of 847 train images +val: images/autosplit_val.txt # train images (relative to 'path') 10% of 847 train images # Classes names: @@ -76,7 +76,6 @@ names: 58: Pylon 59: Tower - # Download script/URL (optional) --------------------------------------------------------------------------------------- download: | import json diff --git a/ultralytics/cfg/default.yaml b/ultralytics/cfg/default.yaml index 12e3708..bd074b1 100644 --- a/ultralytics/cfg/default.yaml +++ b/ultralytics/cfg/default.yaml @@ -1,116 +1,127 @@ # Ultralytics YOLO 🚀, AGPL-3.0 license # Default training settings and hyperparameters for medium-augmentation COCO training -task: detect # (str) YOLO task, i.e. detect, segment, classify, pose -mode: train # (str) YOLO mode, i.e. train, val, predict, export, track, benchmark +task: detect # (str) YOLO task, i.e. detect, segment, classify, pose +mode: train # (str) YOLO mode, i.e. train, val, predict, export, track, benchmark # Train settings ------------------------------------------------------------------------------------------------------- -model: # (str, optional) path to model file, i.e. yolov8n.pt, yolov8n.yaml -data: # (str, optional) path to data file, i.e. coco128.yaml -epochs: 100 # (int) number of epochs to train for -patience: 50 # (int) epochs to wait for no observable improvement for early stopping of training -batch: 16 # (int) number of images per batch (-1 for AutoBatch) -imgsz: 640 # (int | list) input images size as int for train and val modes, or list[w,h] for predict and export modes -save: True # (bool) save train checkpoints and predict results +model: # (str, optional) path to model file, i.e. yolov8n.pt, yolov8n.yaml +data: # (str, optional) path to data file, i.e. coco128.yaml +epochs: 100 # (int) number of epochs to train for +time: # (float, optional) number of hours to train for, overrides epochs if supplied +patience: 100 # (int) epochs to wait for no observable improvement for early stopping of training +batch: 16 # (int) number of images per batch (-1 for AutoBatch) +imgsz: 640 # (int | list) input images size as int for train and val modes, or list[w,h] for predict and export modes +save: True # (bool) save train checkpoints and predict results save_period: -1 # (int) Save checkpoint every x epochs (disabled if < 1) -cache: False # (bool) True/ram, disk or False. Use cache for data loading -device: # (int | str | list, optional) device to run on, i.e. cuda device=0 or device=0,1,2,3 or device=cpu -workers: 8 # (int) number of worker threads for data loading (per RANK if DDP) -project: # (str, optional) project name -name: # (str, optional) experiment name, results saved to 'project/name' directory -exist_ok: False # (bool) whether to overwrite existing experiment -pretrained: True # (bool | str) whether to use a pretrained model (bool) or a model to load weights from (str) -optimizer: auto # (str) optimizer to use, choices=[SGD, Adam, Adamax, AdamW, NAdam, RAdam, RMSProp, auto] -verbose: True # (bool) whether to print verbose output -seed: 0 # (int) random seed for reproducibility -deterministic: True # (bool) whether to enable deterministic mode -single_cls: False # (bool) train multi-class data as single-class -rect: False # (bool) rectangular training if mode='train' or rectangular validation if mode='val' -cos_lr: False # (bool) use cosine learning rate scheduler -close_mosaic: 10 # (int) disable mosaic augmentation for final epochs (0 to disable) -resume: False # (bool) resume training from last checkpoint -amp: True # (bool) Automatic Mixed Precision (AMP) training, choices=[True, False], True runs AMP check -fraction: 1.0 # (float) dataset fraction to train on (default is 1.0, all images in train set) -profile: False # (bool) profile ONNX and TensorRT speeds during training for loggers -freeze: None # (int | list, optional) freeze first n layers, or freeze list of layer indices during training +val_period: 1 # (int) Validation every x epochs +cache: False # (bool) True/ram, disk or False. Use cache for data loading +device: # (int | str | list, optional) device to run on, i.e. cuda device=0 or device=0,1,2,3 or device=cpu +workers: 8 # (int) number of worker threads for data loading (per RANK if DDP) +project: # (str, optional) project name +name: # (str, optional) experiment name, results saved to 'project/name' directory +exist_ok: False # (bool) whether to overwrite existing experiment +pretrained: True # (bool | str) whether to use a pretrained model (bool) or a model to load weights from (str) +optimizer: auto # (str) optimizer to use, choices=[SGD, Adam, Adamax, AdamW, NAdam, RAdam, RMSProp, auto] +verbose: True # (bool) whether to print verbose output +seed: 0 # (int) random seed for reproducibility +deterministic: True # (bool) whether to enable deterministic mode +single_cls: False # (bool) train multi-class data as single-class +rect: False # (bool) rectangular training if mode='train' or rectangular validation if mode='val' +cos_lr: False # (bool) use cosine learning rate scheduler +close_mosaic: 10 # (int) disable mosaic augmentation for final epochs (0 to disable) +resume: False # (bool) resume training from last checkpoint +amp: True # (bool) Automatic Mixed Precision (AMP) training, choices=[True, False], True runs AMP check +fraction: 1.0 # (float) dataset fraction to train on (default is 1.0, all images in train set) +profile: False # (bool) profile ONNX and TensorRT speeds during training for loggers +freeze: None # (int | list, optional) freeze first n layers, or freeze list of layer indices during training +multi_scale: False # (bool) Whether to use multiscale during training # Segmentation -overlap_mask: True # (bool) masks should overlap during training (segment train only) -mask_ratio: 4 # (int) mask downsample ratio (segment train only) +overlap_mask: True # (bool) masks should overlap during training (segment train only) +mask_ratio: 4 # (int) mask downsample ratio (segment train only) # Classification -dropout: 0.0 # (float) use dropout regularization (classify train only) +dropout: 0.0 # (float) use dropout regularization (classify train only) # Val/Test settings ---------------------------------------------------------------------------------------------------- -val: True # (bool) validate/test during training -split: val # (str) dataset split to use for validation, i.e. 'val', 'test' or 'train' -save_json: False # (bool) save results to JSON file -save_hybrid: False # (bool) save hybrid version of labels (labels + additional predictions) -conf: # (float, optional) object confidence threshold for detection (default 0.25 predict, 0.001 val) -iou: 0.7 # (float) intersection over union (IoU) threshold for NMS -max_det: 300 # (int) maximum number of detections per image -half: False # (bool) use half precision (FP16) -dnn: False # (bool) use OpenCV DNN for ONNX inference -plots: True # (bool) save plots during train/val +val: True # (bool) validate/test during training +split: val # (str) dataset split to use for validation, i.e. 'val', 'test' or 'train' +save_json: False # (bool) save results to JSON file +save_hybrid: False # (bool) save hybrid version of labels (labels + additional predictions) +conf: # (float, optional) object confidence threshold for detection (default 0.25 predict, 0.001 val) +iou: 0.7 # (float) intersection over union (IoU) threshold for NMS +max_det: 300 # (int) maximum number of detections per image +half: False # (bool) use half precision (FP16) +dnn: False # (bool) use OpenCV DNN for ONNX inference +plots: True # (bool) save plots and images during train/val -# Prediction settings -------------------------------------------------------------------------------------------------- -source: # (str, optional) source directory for images or videos -show: False # (bool) show results if possible -save_txt: False # (bool) save results as .txt file -save_conf: False # (bool) save results with confidence scores -save_crop: False # (bool) save cropped images with results -show_labels: True # (bool) show object labels in plots -show_conf: True # (bool) show object confidence scores in plots -vid_stride: 1 # (int) video frame-rate stride -stream_buffer: False # (bool) buffer all streaming frames (True) or return the most recent frame (False) -line_width: # (int, optional) line width of the bounding boxes, auto if missing -visualize: False # (bool) visualize model features -augment: False # (bool) apply image augmentation to prediction sources -agnostic_nms: False # (bool) class-agnostic NMS -classes: # (int | list[int], optional) filter results by class, i.e. classes=0, or classes=[0,2,3] -retina_masks: False # (bool) use high-resolution segmentation masks -boxes: True # (bool) Show boxes in segmentation predictions +# Predict settings ----------------------------------------------------------------------------------------------------- +source: # (str, optional) source directory for images or videos +vid_stride: 1 # (int) video frame-rate stride +stream_buffer: False # (bool) buffer all streaming frames (True) or return the most recent frame (False) +visualize: False # (bool) visualize model features +augment: False # (bool) apply image augmentation to prediction sources +agnostic_nms: False # (bool) class-agnostic NMS +classes: # (int | list[int], optional) filter results by class, i.e. classes=0, or classes=[0,2,3] +retina_masks: False # (bool) use high-resolution segmentation masks +embed: # (list[int], optional) return feature vectors/embeddings from given layers + +# Visualize settings --------------------------------------------------------------------------------------------------- +show: False # (bool) show predicted images and videos if environment allows +save_frames: False # (bool) save predicted individual video frames +save_txt: False # (bool) save results as .txt file +save_conf: False # (bool) save results with confidence scores +save_crop: False # (bool) save cropped images with results +show_labels: True # (bool) show prediction labels, i.e. 'person' +show_conf: True # (bool) show prediction confidence, i.e. '0.99' +show_boxes: True # (bool) show prediction boxes +line_width: # (int, optional) line width of the bounding boxes. Scaled to image size if None. # Export settings ------------------------------------------------------------------------------------------------------ -format: torchscript # (str) format to export to, choices at https://docs.ultralytics.com/modes/export/#export-formats -keras: False # (bool) use Kera=s -optimize: False # (bool) TorchScript: optimize for mobile -int8: False # (bool) CoreML/TF INT8 quantization -dynamic: False # (bool) ONNX/TF/TensorRT: dynamic axes -simplify: False # (bool) ONNX: simplify model -opset: # (int, optional) ONNX: opset version -workspace: 4 # (int) TensorRT: workspace size (GB) -nms: False # (bool) CoreML: add NMS +format: torchscript # (str) format to export to, choices at https://docs.ultralytics.com/modes/export/#export-formats +keras: False # (bool) use Kera=s +optimize: False # (bool) TorchScript: optimize for mobile +int8: False # (bool) CoreML/TF INT8 quantization +dynamic: False # (bool) ONNX/TF/TensorRT: dynamic axes +simplify: False # (bool) ONNX: simplify model using `onnxslim` +opset: # (int, optional) ONNX: opset version +workspace: 4 # (int) TensorRT: workspace size (GB) +nms: False # (bool) CoreML: add NMS # Hyperparameters ------------------------------------------------------------------------------------------------------ -lr0: 0.01 # (float) initial learning rate (i.e. SGD=1E-2, Adam=1E-3) -lrf: 0.01 # (float) final learning rate (lr0 * lrf) -momentum: 0.937 # (float) SGD momentum/Adam beta1 -weight_decay: 0.0005 # (float) optimizer weight decay 5e-4 -warmup_epochs: 3.0 # (float) warmup epochs (fractions ok) -warmup_momentum: 0.8 # (float) warmup initial momentum -warmup_bias_lr: 0.1 # (float) warmup initial bias lr -box: 7.5 # (float) box loss gain -cls: 0.5 # (float) cls loss gain (scale with pixels) -dfl: 1.5 # (float) dfl loss gain -pose: 12.0 # (float) pose loss gain -kobj: 1.0 # (float) keypoint obj loss gain -label_smoothing: 0.0 # (float) label smoothing (fraction) -nbs: 64 # (int) nominal batch size -hsv_h: 0.015 # (float) image HSV-Hue augmentation (fraction) -hsv_s: 0.7 # (float) image HSV-Saturation augmentation (fraction) -hsv_v: 0.4 # (float) image HSV-Value augmentation (fraction) -degrees: 0.0 # (float) image rotation (+/- deg) -translate: 0.1 # (float) image translation (+/- fraction) -scale: 0.5 # (float) image scale (+/- gain) -shear: 0.0 # (float) image shear (+/- deg) -perspective: 0.0 # (float) image perspective (+/- fraction), range 0-0.001 -flipud: 0.0 # (float) image flip up-down (probability) -fliplr: 0.5 # (float) image flip left-right (probability) -mosaic: 1.0 # (float) image mosaic (probability) -mixup: 0.0 # (float) image mixup (probability) -copy_paste: 0.0 # (float) segment copy-paste (probability) +lr0: 0.01 # (float) initial learning rate (i.e. SGD=1E-2, Adam=1E-3) +lrf: 0.01 # (float) final learning rate (lr0 * lrf) +momentum: 0.937 # (float) SGD momentum/Adam beta1 +weight_decay: 0.0005 # (float) optimizer weight decay 5e-4 +warmup_epochs: 3.0 # (float) warmup epochs (fractions ok) +warmup_momentum: 0.8 # (float) warmup initial momentum +warmup_bias_lr: 0.1 # (float) warmup initial bias lr +box: 7.5 # (float) box loss gain +cls: 0.5 # (float) cls loss gain (scale with pixels) +dfl: 1.5 # (float) dfl loss gain +pose: 12.0 # (float) pose loss gain +kobj: 1.0 # (float) keypoint obj loss gain +label_smoothing: 0.0 # (float) label smoothing (fraction) +nbs: 64 # (int) nominal batch size +hsv_h: 0.015 # (float) image HSV-Hue augmentation (fraction) +hsv_s: 0.7 # (float) image HSV-Saturation augmentation (fraction) +hsv_v: 0.4 # (float) image HSV-Value augmentation (fraction) +degrees: 0.0 # (float) image rotation (+/- deg) +translate: 0.1 # (float) image translation (+/- fraction) +scale: 0.5 # (float) image scale (+/- gain) +shear: 0.0 # (float) image shear (+/- deg) +perspective: 0.0 # (float) image perspective (+/- fraction), range 0-0.001 +flipud: 0.0 # (float) image flip up-down (probability) +fliplr: 0.5 # (float) image flip left-right (probability) +bgr: 0.0 # (float) image channel BGR (probability) +mosaic: 1.0 # (float) image mosaic (probability) +mixup: 0.0 # (float) image mixup (probability) +copy_paste: 0.0 # (float) segment copy-paste (probability) +auto_augment: randaugment # (str) auto augmentation policy for classification (randaugment, autoaugment, augmix) +erasing: 0.4 # (float) probability of random erasing during classification training (0-1) +crop_fraction: 1.0 # (float) image crop fraction for classification evaluation/inference (0-1) # Custom config.yaml --------------------------------------------------------------------------------------------------- -cfg: # (str, optional) for overriding defaults.yaml +cfg: # (str, optional) for overriding defaults.yaml # Tracker settings ------------------------------------------------------------------------------------------------------ -tracker: botsort.yaml # (str) tracker type, choices=[botsort.yaml, bytetrack.yaml] +tracker: botsort.yaml # (str) tracker type, choices=[botsort.yaml, bytetrack.yaml] diff --git a/ultralytics/cfg/models/README.md b/ultralytics/cfg/models/README.md index 4749441..c022fb5 100644 --- a/ultralytics/cfg/models/README.md +++ b/ultralytics/cfg/models/README.md @@ -14,8 +14,7 @@ Model `*.yaml` files may be used directly in the Command Line Interface (CLI) wi yolo task=detect mode=train model=yolov8n.yaml data=coco128.yaml epochs=100 ``` -They may also be used directly in a Python environment, and accepts the same -[arguments](https://docs.ultralytics.com/usage/cfg/) as in the CLI example above: +They may also be used directly in a Python environment, and accepts the same [arguments](https://docs.ultralytics.com/usage/cfg/) as in the CLI example above: ```python from ultralytics import YOLO diff --git a/ultralytics/cfg/models/rt-detr/rtdetr-l.yaml b/ultralytics/cfg/models/rt-detr/rtdetr-l.yaml index bd20da1..c6eb0b3 100644 --- a/ultralytics/cfg/models/rt-detr/rtdetr-l.yaml +++ b/ultralytics/cfg/models/rt-detr/rtdetr-l.yaml @@ -2,49 +2,49 @@ # RT-DETR-l object detection model with P3-P5 outputs. For details see https://docs.ultralytics.com/models/rtdetr # Parameters -nc: 80 # number of classes +nc: 80 # number of classes scales: # model compound scaling constants, i.e. 'model=yolov8n-cls.yaml' will call yolov8-cls.yaml with scale 'n' # [depth, width, max_channels] l: [1.00, 1.00, 1024] backbone: # [from, repeats, module, args] - - [-1, 1, HGStem, [32, 48]] # 0-P2/4 - - [-1, 6, HGBlock, [48, 128, 3]] # stage 1 + - [-1, 1, HGStem, [32, 48]] # 0-P2/4 + - [-1, 6, HGBlock, [48, 128, 3]] # stage 1 - - [-1, 1, DWConv, [128, 3, 2, 1, False]] # 2-P3/8 - - [-1, 6, HGBlock, [96, 512, 3]] # stage 2 + - [-1, 1, DWConv, [128, 3, 2, 1, False]] # 2-P3/8 + - [-1, 6, HGBlock, [96, 512, 3]] # stage 2 - - [-1, 1, DWConv, [512, 3, 2, 1, False]] # 4-P3/16 - - [-1, 6, HGBlock, [192, 1024, 5, True, False]] # cm, c2, k, light, shortcut + - [-1, 1, DWConv, [512, 3, 2, 1, False]] # 4-P3/16 + - [-1, 6, HGBlock, [192, 1024, 5, True, False]] # cm, c2, k, light, shortcut - [-1, 6, HGBlock, [192, 1024, 5, True, True]] - - [-1, 6, HGBlock, [192, 1024, 5, True, True]] # stage 3 + - [-1, 6, HGBlock, [192, 1024, 5, True, True]] # stage 3 - - [-1, 1, DWConv, [1024, 3, 2, 1, False]] # 8-P4/32 - - [-1, 6, HGBlock, [384, 2048, 5, True, False]] # stage 4 + - [-1, 1, DWConv, [1024, 3, 2, 1, False]] # 8-P4/32 + - [-1, 6, HGBlock, [384, 2048, 5, True, False]] # stage 4 head: - - [-1, 1, Conv, [256, 1, 1, None, 1, 1, False]] # 10 input_proj.2 + - [-1, 1, Conv, [256, 1, 1, None, 1, 1, False]] # 10 input_proj.2 - [-1, 1, AIFI, [1024, 8]] - - [-1, 1, Conv, [256, 1, 1]] # 12, Y5, lateral_convs.0 + - [-1, 1, Conv, [256, 1, 1]] # 12, Y5, lateral_convs.0 - - [-1, 1, nn.Upsample, [None, 2, 'nearest']] - - [7, 1, Conv, [256, 1, 1, None, 1, 1, False]] # 14 input_proj.1 + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [7, 1, Conv, [256, 1, 1, None, 1, 1, False]] # 14 input_proj.1 - [[-2, -1], 1, Concat, [1]] - - [-1, 3, RepC3, [256]] # 16, fpn_blocks.0 - - [-1, 1, Conv, [256, 1, 1]] # 17, Y4, lateral_convs.1 + - [-1, 3, RepC3, [256]] # 16, fpn_blocks.0 + - [-1, 1, Conv, [256, 1, 1]] # 17, Y4, lateral_convs.1 - - [-1, 1, nn.Upsample, [None, 2, 'nearest']] - - [3, 1, Conv, [256, 1, 1, None, 1, 1, False]] # 19 input_proj.0 - - [[-2, -1], 1, Concat, [1]] # cat backbone P4 - - [-1, 3, RepC3, [256]] # X3 (21), fpn_blocks.1 + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [3, 1, Conv, [256, 1, 1, None, 1, 1, False]] # 19 input_proj.0 + - [[-2, -1], 1, Concat, [1]] # cat backbone P4 + - [-1, 3, RepC3, [256]] # X3 (21), fpn_blocks.1 - - [-1, 1, Conv, [256, 3, 2]] # 22, downsample_convs.0 - - [[-1, 17], 1, Concat, [1]] # cat Y4 - - [-1, 3, RepC3, [256]] # F4 (24), pan_blocks.0 + - [-1, 1, Conv, [256, 3, 2]] # 22, downsample_convs.0 + - [[-1, 17], 1, Concat, [1]] # cat Y4 + - [-1, 3, RepC3, [256]] # F4 (24), pan_blocks.0 - - [-1, 1, Conv, [256, 3, 2]] # 25, downsample_convs.1 - - [[-1, 12], 1, Concat, [1]] # cat Y5 - - [-1, 3, RepC3, [256]] # F5 (27), pan_blocks.1 + - [-1, 1, Conv, [256, 3, 2]] # 25, downsample_convs.1 + - [[-1, 12], 1, Concat, [1]] # cat Y5 + - [-1, 3, RepC3, [256]] # F5 (27), pan_blocks.1 - - [[21, 24, 27], 1, RTDETRDecoder, [nc]] # Detect(P3, P4, P5) + - [[21, 24, 27], 1, RTDETRDecoder, [nc]] # Detect(P3, P4, P5) diff --git a/ultralytics/cfg/models/rt-detr/rtdetr-resnet101.yaml b/ultralytics/cfg/models/rt-detr/rtdetr-resnet101.yaml new file mode 100644 index 0000000..a68bb5d --- /dev/null +++ b/ultralytics/cfg/models/rt-detr/rtdetr-resnet101.yaml @@ -0,0 +1,42 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license +# RT-DETR-ResNet101 object detection model with P3-P5 outputs. + +# Parameters +nc: 80 # number of classes +scales: # model compound scaling constants, i.e. 'model=yolov8n-cls.yaml' will call yolov8-cls.yaml with scale 'n' + # [depth, width, max_channels] + l: [1.00, 1.00, 1024] + +backbone: + # [from, repeats, module, args] + - [-1, 1, ResNetLayer, [3, 64, 1, True, 1]] # 0 + - [-1, 1, ResNetLayer, [64, 64, 1, False, 3]] # 1 + - [-1, 1, ResNetLayer, [256, 128, 2, False, 4]] # 2 + - [-1, 1, ResNetLayer, [512, 256, 2, False, 23]] # 3 + - [-1, 1, ResNetLayer, [1024, 512, 2, False, 3]] # 4 + +head: + - [-1, 1, Conv, [256, 1, 1, None, 1, 1, False]] # 5 + - [-1, 1, AIFI, [1024, 8]] + - [-1, 1, Conv, [256, 1, 1]] # 7 + + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [3, 1, Conv, [256, 1, 1, None, 1, 1, False]] # 9 + - [[-2, -1], 1, Concat, [1]] + - [-1, 3, RepC3, [256]] # 11 + - [-1, 1, Conv, [256, 1, 1]] # 12 + + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [2, 1, Conv, [256, 1, 1, None, 1, 1, False]] # 14 + - [[-2, -1], 1, Concat, [1]] # cat backbone P4 + - [-1, 3, RepC3, [256]] # X3 (16), fpn_blocks.1 + + - [-1, 1, Conv, [256, 3, 2]] # 17, downsample_convs.0 + - [[-1, 12], 1, Concat, [1]] # cat Y4 + - [-1, 3, RepC3, [256]] # F4 (19), pan_blocks.0 + + - [-1, 1, Conv, [256, 3, 2]] # 20, downsample_convs.1 + - [[-1, 7], 1, Concat, [1]] # cat Y5 + - [-1, 3, RepC3, [256]] # F5 (22), pan_blocks.1 + + - [[16, 19, 22], 1, RTDETRDecoder, [nc]] # Detect(P3, P4, P5) diff --git a/ultralytics/cfg/models/rt-detr/rtdetr-resnet50.yaml b/ultralytics/cfg/models/rt-detr/rtdetr-resnet50.yaml new file mode 100644 index 0000000..7145910 --- /dev/null +++ b/ultralytics/cfg/models/rt-detr/rtdetr-resnet50.yaml @@ -0,0 +1,42 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license +# RT-DETR-ResNet50 object detection model with P3-P5 outputs. + +# Parameters +nc: 80 # number of classes +scales: # model compound scaling constants, i.e. 'model=yolov8n-cls.yaml' will call yolov8-cls.yaml with scale 'n' + # [depth, width, max_channels] + l: [1.00, 1.00, 1024] + +backbone: + # [from, repeats, module, args] + - [-1, 1, ResNetLayer, [3, 64, 1, True, 1]] # 0 + - [-1, 1, ResNetLayer, [64, 64, 1, False, 3]] # 1 + - [-1, 1, ResNetLayer, [256, 128, 2, False, 4]] # 2 + - [-1, 1, ResNetLayer, [512, 256, 2, False, 6]] # 3 + - [-1, 1, ResNetLayer, [1024, 512, 2, False, 3]] # 4 + +head: + - [-1, 1, Conv, [256, 1, 1, None, 1, 1, False]] # 5 + - [-1, 1, AIFI, [1024, 8]] + - [-1, 1, Conv, [256, 1, 1]] # 7 + + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [3, 1, Conv, [256, 1, 1, None, 1, 1, False]] # 9 + - [[-2, -1], 1, Concat, [1]] + - [-1, 3, RepC3, [256]] # 11 + - [-1, 1, Conv, [256, 1, 1]] # 12 + + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [2, 1, Conv, [256, 1, 1, None, 1, 1, False]] # 14 + - [[-2, -1], 1, Concat, [1]] # cat backbone P4 + - [-1, 3, RepC3, [256]] # X3 (16), fpn_blocks.1 + + - [-1, 1, Conv, [256, 3, 2]] # 17, downsample_convs.0 + - [[-1, 12], 1, Concat, [1]] # cat Y4 + - [-1, 3, RepC3, [256]] # F4 (19), pan_blocks.0 + + - [-1, 1, Conv, [256, 3, 2]] # 20, downsample_convs.1 + - [[-1, 7], 1, Concat, [1]] # cat Y5 + - [-1, 3, RepC3, [256]] # F5 (22), pan_blocks.1 + + - [[16, 19, 22], 1, RTDETRDecoder, [nc]] # Detect(P3, P4, P5) diff --git a/ultralytics/cfg/models/rt-detr/rtdetr-x.yaml b/ultralytics/cfg/models/rt-detr/rtdetr-x.yaml index 848cb52..0e819b0 100644 --- a/ultralytics/cfg/models/rt-detr/rtdetr-x.yaml +++ b/ultralytics/cfg/models/rt-detr/rtdetr-x.yaml @@ -2,53 +2,53 @@ # RT-DETR-x object detection model with P3-P5 outputs. For details see https://docs.ultralytics.com/models/rtdetr # Parameters -nc: 80 # number of classes +nc: 80 # number of classes scales: # model compound scaling constants, i.e. 'model=yolov8n-cls.yaml' will call yolov8-cls.yaml with scale 'n' # [depth, width, max_channels] x: [1.00, 1.00, 2048] backbone: # [from, repeats, module, args] - - [-1, 1, HGStem, [32, 64]] # 0-P2/4 - - [-1, 6, HGBlock, [64, 128, 3]] # stage 1 + - [-1, 1, HGStem, [32, 64]] # 0-P2/4 + - [-1, 6, HGBlock, [64, 128, 3]] # stage 1 - - [-1, 1, DWConv, [128, 3, 2, 1, False]] # 2-P3/8 + - [-1, 1, DWConv, [128, 3, 2, 1, False]] # 2-P3/8 - [-1, 6, HGBlock, [128, 512, 3]] - - [-1, 6, HGBlock, [128, 512, 3, False, True]] # 4-stage 2 + - [-1, 6, HGBlock, [128, 512, 3, False, True]] # 4-stage 2 - - [-1, 1, DWConv, [512, 3, 2, 1, False]] # 5-P3/16 - - [-1, 6, HGBlock, [256, 1024, 5, True, False]] # cm, c2, k, light, shortcut + - [-1, 1, DWConv, [512, 3, 2, 1, False]] # 5-P3/16 + - [-1, 6, HGBlock, [256, 1024, 5, True, False]] # cm, c2, k, light, shortcut - [-1, 6, HGBlock, [256, 1024, 5, True, True]] - [-1, 6, HGBlock, [256, 1024, 5, True, True]] - [-1, 6, HGBlock, [256, 1024, 5, True, True]] - - [-1, 6, HGBlock, [256, 1024, 5, True, True]] # 10-stage 3 + - [-1, 6, HGBlock, [256, 1024, 5, True, True]] # 10-stage 3 - - [-1, 1, DWConv, [1024, 3, 2, 1, False]] # 11-P4/32 + - [-1, 1, DWConv, [1024, 3, 2, 1, False]] # 11-P4/32 - [-1, 6, HGBlock, [512, 2048, 5, True, False]] - - [-1, 6, HGBlock, [512, 2048, 5, True, True]] # 13-stage 4 + - [-1, 6, HGBlock, [512, 2048, 5, True, True]] # 13-stage 4 head: - - [-1, 1, Conv, [384, 1, 1, None, 1, 1, False]] # 14 input_proj.2 + - [-1, 1, Conv, [384, 1, 1, None, 1, 1, False]] # 14 input_proj.2 - [-1, 1, AIFI, [2048, 8]] - - [-1, 1, Conv, [384, 1, 1]] # 16, Y5, lateral_convs.0 + - [-1, 1, Conv, [384, 1, 1]] # 16, Y5, lateral_convs.0 - - [-1, 1, nn.Upsample, [None, 2, 'nearest']] - - [10, 1, Conv, [384, 1, 1, None, 1, 1, False]] # 18 input_proj.1 + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [10, 1, Conv, [384, 1, 1, None, 1, 1, False]] # 18 input_proj.1 - [[-2, -1], 1, Concat, [1]] - - [-1, 3, RepC3, [384]] # 20, fpn_blocks.0 - - [-1, 1, Conv, [384, 1, 1]] # 21, Y4, lateral_convs.1 + - [-1, 3, RepC3, [384]] # 20, fpn_blocks.0 + - [-1, 1, Conv, [384, 1, 1]] # 21, Y4, lateral_convs.1 - - [-1, 1, nn.Upsample, [None, 2, 'nearest']] - - [4, 1, Conv, [384, 1, 1, None, 1, 1, False]] # 23 input_proj.0 - - [[-2, -1], 1, Concat, [1]] # cat backbone P4 - - [-1, 3, RepC3, [384]] # X3 (25), fpn_blocks.1 + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [4, 1, Conv, [384, 1, 1, None, 1, 1, False]] # 23 input_proj.0 + - [[-2, -1], 1, Concat, [1]] # cat backbone P4 + - [-1, 3, RepC3, [384]] # X3 (25), fpn_blocks.1 - - [-1, 1, Conv, [384, 3, 2]] # 26, downsample_convs.0 - - [[-1, 21], 1, Concat, [1]] # cat Y4 - - [-1, 3, RepC3, [384]] # F4 (28), pan_blocks.0 + - [-1, 1, Conv, [384, 3, 2]] # 26, downsample_convs.0 + - [[-1, 21], 1, Concat, [1]] # cat Y4 + - [-1, 3, RepC3, [384]] # F4 (28), pan_blocks.0 - - [-1, 1, Conv, [384, 3, 2]] # 29, downsample_convs.1 - - [[-1, 16], 1, Concat, [1]] # cat Y5 - - [-1, 3, RepC3, [384]] # F5 (31), pan_blocks.1 + - [-1, 1, Conv, [384, 3, 2]] # 29, downsample_convs.1 + - [[-1, 16], 1, Concat, [1]] # cat Y5 + - [-1, 3, RepC3, [384]] # F5 (31), pan_blocks.1 - - [[25, 28, 31], 1, RTDETRDecoder, [nc]] # Detect(P3, P4, P5) + - [[25, 28, 31], 1, RTDETRDecoder, [nc]] # Detect(P3, P4, P5) diff --git a/ultralytics/cfg/models/v10/yolov10b.yaml b/ultralytics/cfg/models/v10/yolov10b.yaml new file mode 100644 index 0000000..a9dc721 --- /dev/null +++ b/ultralytics/cfg/models/v10/yolov10b.yaml @@ -0,0 +1,40 @@ +# Parameters +nc: 80 # number of classes +scales: # model compound scaling constants, i.e. 'model=yolov8n.yaml' will call yolov8.yaml with scale 'n' + # [depth, width, max_channels] + b: [0.67, 1.00, 512] + +# YOLOv8.0n backbone +backbone: + # [from, repeats, module, args] + - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2 + - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4 + - [-1, 3, C2f, [128, True]] + - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8 + - [-1, 6, C2f, [256, True]] + - [-1, 1, SCDown, [512, 3, 2]] # 5-P4/16 + - [-1, 6, C2f, [512, True]] + - [-1, 1, SCDown, [1024, 3, 2]] # 7-P5/32 + - [-1, 3, C2fCIB, [1024, True]] + - [-1, 1, SPPF, [1024, 5]] # 9 + - [-1, 1, PSA, [1024]] # 10 + +# YOLOv8.0n head +head: + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 6], 1, Concat, [1]] # cat backbone P4 + - [-1, 3, C2fCIB, [512, True]] # 13 + + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 4], 1, Concat, [1]] # cat backbone P3 + - [-1, 3, C2f, [256]] # 16 (P3/8-small) + + - [-1, 1, Conv, [256, 3, 2]] + - [[-1, 13], 1, Concat, [1]] # cat head P4 + - [-1, 3, C2fCIB, [512, True]] # 19 (P4/16-medium) + + - [-1, 1, SCDown, [512, 3, 2]] + - [[-1, 10], 1, Concat, [1]] # cat head P5 + - [-1, 3, C2fCIB, [1024, True]] # 22 (P5/32-large) + + - [[16, 19, 22], 1, v10Detect, [nc]] # Detect(P3, P4, P5) diff --git a/ultralytics/cfg/models/v10/yolov10l.yaml b/ultralytics/cfg/models/v10/yolov10l.yaml new file mode 100644 index 0000000..047de26 --- /dev/null +++ b/ultralytics/cfg/models/v10/yolov10l.yaml @@ -0,0 +1,40 @@ +# Parameters +nc: 80 # number of classes +scales: # model compound scaling constants, i.e. 'model=yolov8n.yaml' will call yolov8.yaml with scale 'n' + # [depth, width, max_channels] + l: [1.00, 1.00, 512] # YOLOv8l summary: 365 layers, 43691520 parameters, 43691504 gradients, 165.7 GFLOPs + +# YOLOv8.0n backbone +backbone: + # [from, repeats, module, args] + - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2 + - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4 + - [-1, 3, C2f, [128, True]] + - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8 + - [-1, 6, C2f, [256, True]] + - [-1, 1, SCDown, [512, 3, 2]] # 5-P4/16 + - [-1, 6, C2f, [512, True]] + - [-1, 1, SCDown, [1024, 3, 2]] # 7-P5/32 + - [-1, 3, C2fCIB, [1024, True]] + - [-1, 1, SPPF, [1024, 5]] # 9 + - [-1, 1, PSA, [1024]] # 10 + +# YOLOv8.0n head +head: + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 6], 1, Concat, [1]] # cat backbone P4 + - [-1, 3, C2fCIB, [512, True]] # 13 + + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 4], 1, Concat, [1]] # cat backbone P3 + - [-1, 3, C2f, [256]] # 16 (P3/8-small) + + - [-1, 1, Conv, [256, 3, 2]] + - [[-1, 13], 1, Concat, [1]] # cat head P4 + - [-1, 3, C2fCIB, [512, True]] # 19 (P4/16-medium) + + - [-1, 1, SCDown, [512, 3, 2]] + - [[-1, 10], 1, Concat, [1]] # cat head P5 + - [-1, 3, C2fCIB, [1024, True]] # 22 (P5/32-large) + + - [[16, 19, 22], 1, v10Detect, [nc]] # Detect(P3, P4, P5) diff --git a/ultralytics/cfg/models/v10/yolov10m.yaml b/ultralytics/cfg/models/v10/yolov10m.yaml new file mode 100644 index 0000000..5bdb5bf --- /dev/null +++ b/ultralytics/cfg/models/v10/yolov10m.yaml @@ -0,0 +1,43 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license +# YOLOv8 object detection model with P3-P5 outputs. For Usage examples see https://docs.ultralytics.com/tasks/detect + +# Parameters +nc: 80 # number of classes +scales: # model compound scaling constants, i.e. 'model=yolov8n.yaml' will call yolov8.yaml with scale 'n' + # [depth, width, max_channels] + m: [0.67, 0.75, 768] # YOLOv8m summary: 295 layers, 25902640 parameters, 25902624 gradients, 79.3 GFLOPs + +# YOLOv8.0n backbone +backbone: + # [from, repeats, module, args] + - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2 + - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4 + - [-1, 3, C2f, [128, True]] + - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8 + - [-1, 6, C2f, [256, True]] + - [-1, 1, SCDown, [512, 3, 2]] # 5-P4/16 + - [-1, 6, C2f, [512, True]] + - [-1, 1, SCDown, [1024, 3, 2]] # 7-P5/32 + - [-1, 3, C2fCIB, [1024, True]] + - [-1, 1, SPPF, [1024, 5]] # 9 + - [-1, 1, PSA, [1024]] # 10 + +# YOLOv8.0n head +head: + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 6], 1, Concat, [1]] # cat backbone P4 + - [-1, 3, C2f, [512]] # 13 + + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 4], 1, Concat, [1]] # cat backbone P3 + - [-1, 3, C2f, [256]] # 16 (P3/8-small) + + - [-1, 1, Conv, [256, 3, 2]] + - [[-1, 13], 1, Concat, [1]] # cat head P4 + - [-1, 3, C2fCIB, [512, True]] # 19 (P4/16-medium) + + - [-1, 1, SCDown, [512, 3, 2]] + - [[-1, 10], 1, Concat, [1]] # cat head P5 + - [-1, 3, C2fCIB, [1024, True]] # 22 (P5/32-large) + + - [[16, 19, 22], 1, v10Detect, [nc]] # Detect(P3, P4, P5) diff --git a/ultralytics/cfg/models/v10/yolov10n.yaml b/ultralytics/cfg/models/v10/yolov10n.yaml new file mode 100644 index 0000000..1ee7437 --- /dev/null +++ b/ultralytics/cfg/models/v10/yolov10n.yaml @@ -0,0 +1,40 @@ +# Parameters +nc: 80 # number of classes +scales: # model compound scaling constants, i.e. 'model=yolov8n.yaml' will call yolov8.yaml with scale 'n' + # [depth, width, max_channels] + n: [0.33, 0.25, 1024] + +# YOLOv8.0n backbone +backbone: + # [from, repeats, module, args] + - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2 + - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4 + - [-1, 3, C2f, [128, True]] + - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8 + - [-1, 6, C2f, [256, True]] + - [-1, 1, SCDown, [512, 3, 2]] # 5-P4/16 + - [-1, 6, C2f, [512, True]] + - [-1, 1, SCDown, [1024, 3, 2]] # 7-P5/32 + - [-1, 3, C2f, [1024, True]] + - [-1, 1, SPPF, [1024, 5]] # 9 + - [-1, 1, PSA, [1024]] # 10 + +# YOLOv8.0n head +head: + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 6], 1, Concat, [1]] # cat backbone P4 + - [-1, 3, C2f, [512]] # 13 + + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 4], 1, Concat, [1]] # cat backbone P3 + - [-1, 3, C2f, [256]] # 16 (P3/8-small) + + - [-1, 1, Conv, [256, 3, 2]] + - [[-1, 13], 1, Concat, [1]] # cat head P4 + - [-1, 3, C2f, [512]] # 19 (P4/16-medium) + + - [-1, 1, SCDown, [512, 3, 2]] + - [[-1, 10], 1, Concat, [1]] # cat head P5 + - [-1, 3, C2fCIB, [1024, True, True]] # 22 (P5/32-large) + + - [[16, 19, 22], 1, v10Detect, [nc]] # Detect(P3, P4, P5) diff --git a/ultralytics/cfg/models/v10/yolov10s.yaml b/ultralytics/cfg/models/v10/yolov10s.yaml new file mode 100644 index 0000000..c61e08c --- /dev/null +++ b/ultralytics/cfg/models/v10/yolov10s.yaml @@ -0,0 +1,39 @@ +# Parameters +nc: 80 # number of classes +scales: # model compound scaling constants, i.e. 'model=yolov8n.yaml' will call yolov8.yaml with scale 'n' + # [depth, width, max_channels] + s: [0.33, 0.50, 1024] + +backbone: + # [from, repeats, module, args] + - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2 + - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4 + - [-1, 3, C2f, [128, True]] + - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8 + - [-1, 6, C2f, [256, True]] + - [-1, 1, SCDown, [512, 3, 2]] # 5-P4/16 + - [-1, 6, C2f, [512, True]] + - [-1, 1, SCDown, [1024, 3, 2]] # 7-P5/32 + - [-1, 3, C2fCIB, [1024, True, True]] + - [-1, 1, SPPF, [1024, 5]] # 9 + - [-1, 1, PSA, [1024]] # 10 + +# YOLOv8.0n head +head: + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 6], 1, Concat, [1]] # cat backbone P4 + - [-1, 3, C2f, [512]] # 13 + + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 4], 1, Concat, [1]] # cat backbone P3 + - [-1, 3, C2f, [256]] # 16 (P3/8-small) + + - [-1, 1, Conv, [256, 3, 2]] + - [[-1, 13], 1, Concat, [1]] # cat head P4 + - [-1, 3, C2f, [512]] # 19 (P4/16-medium) + + - [-1, 1, SCDown, [512, 3, 2]] + - [[-1, 10], 1, Concat, [1]] # cat head P5 + - [-1, 3, C2fCIB, [1024, True, True]] # 22 (P5/32-large) + + - [[16, 19, 22], 1, v10Detect, [nc]] # Detect(P3, P4, P5) diff --git a/ultralytics/cfg/models/v10/yolov10x.yaml b/ultralytics/cfg/models/v10/yolov10x.yaml new file mode 100644 index 0000000..ab5fc8f --- /dev/null +++ b/ultralytics/cfg/models/v10/yolov10x.yaml @@ -0,0 +1,40 @@ +# Parameters +nc: 80 # number of classes +scales: # model compound scaling constants, i.e. 'model=yolov8n.yaml' will call yolov8.yaml with scale 'n' + # [depth, width, max_channels] + x: [1.00, 1.25, 512] + +# YOLOv8.0n backbone +backbone: + # [from, repeats, module, args] + - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2 + - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4 + - [-1, 3, C2f, [128, True]] + - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8 + - [-1, 6, C2f, [256, True]] + - [-1, 1, SCDown, [512, 3, 2]] # 5-P4/16 + - [-1, 6, C2fCIB, [512, True]] + - [-1, 1, SCDown, [1024, 3, 2]] # 7-P5/32 + - [-1, 3, C2fCIB, [1024, True]] + - [-1, 1, SPPF, [1024, 5]] # 9 + - [-1, 1, PSA, [1024]] # 10 + +# YOLOv8.0n head +head: + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 6], 1, Concat, [1]] # cat backbone P4 + - [-1, 3, C2fCIB, [512, True]] # 13 + + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 4], 1, Concat, [1]] # cat backbone P3 + - [-1, 3, C2f, [256]] # 16 (P3/8-small) + + - [-1, 1, Conv, [256, 3, 2]] + - [[-1, 13], 1, Concat, [1]] # cat head P4 + - [-1, 3, C2fCIB, [512, True]] # 19 (P4/16-medium) + + - [-1, 1, SCDown, [512, 3, 2]] + - [[-1, 10], 1, Concat, [1]] # cat head P5 + - [-1, 3, C2fCIB, [1024, True]] # 22 (P5/32-large) + + - [[16, 19, 22], 1, v10Detect, [nc]] # Detect(P3, P4, P5) diff --git a/ultralytics/cfg/models/v3/yolov3-spp.yaml b/ultralytics/cfg/models/v3/yolov3-spp.yaml index 406e019..6724f4e 100644 --- a/ultralytics/cfg/models/v3/yolov3-spp.yaml +++ b/ultralytics/cfg/models/v3/yolov3-spp.yaml @@ -2,47 +2,45 @@ # YOLOv3-SPP object detection model with P3-P5 outputs. For details see https://docs.ultralytics.com/models/yolov3 # Parameters -nc: 80 # number of classes -depth_multiple: 1.0 # model depth multiple -width_multiple: 1.0 # layer channel multiple +nc: 80 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple # darknet53 backbone backbone: # [from, number, module, args] - [[-1, 1, Conv, [32, 3, 1]], # 0 - [-1, 1, Conv, [64, 3, 2]], # 1-P1/2 - [-1, 1, Bottleneck, [64]], - [-1, 1, Conv, [128, 3, 2]], # 3-P2/4 - [-1, 2, Bottleneck, [128]], - [-1, 1, Conv, [256, 3, 2]], # 5-P3/8 - [-1, 8, Bottleneck, [256]], - [-1, 1, Conv, [512, 3, 2]], # 7-P4/16 - [-1, 8, Bottleneck, [512]], - [-1, 1, Conv, [1024, 3, 2]], # 9-P5/32 - [-1, 4, Bottleneck, [1024]], # 10 - ] + - [-1, 1, Conv, [32, 3, 1]] # 0 + - [-1, 1, Conv, [64, 3, 2]] # 1-P1/2 + - [-1, 1, Bottleneck, [64]] + - [-1, 1, Conv, [128, 3, 2]] # 3-P2/4 + - [-1, 2, Bottleneck, [128]] + - [-1, 1, Conv, [256, 3, 2]] # 5-P3/8 + - [-1, 8, Bottleneck, [256]] + - [-1, 1, Conv, [512, 3, 2]] # 7-P4/16 + - [-1, 8, Bottleneck, [512]] + - [-1, 1, Conv, [1024, 3, 2]] # 9-P5/32 + - [-1, 4, Bottleneck, [1024]] # 10 # YOLOv3-SPP head head: - [[-1, 1, Bottleneck, [1024, False]], - [-1, 1, SPP, [512, [5, 9, 13]]], - [-1, 1, Conv, [1024, 3, 1]], - [-1, 1, Conv, [512, 1, 1]], - [-1, 1, Conv, [1024, 3, 1]], # 15 (P5/32-large) + - [-1, 1, Bottleneck, [1024, False]] + - [-1, 1, SPP, [512, [5, 9, 13]]] + - [-1, 1, Conv, [1024, 3, 1]] + - [-1, 1, Conv, [512, 1, 1]] + - [-1, 1, Conv, [1024, 3, 1]] # 15 (P5/32-large) - [-2, 1, Conv, [256, 1, 1]], - [-1, 1, nn.Upsample, [None, 2, 'nearest']], - [[-1, 8], 1, Concat, [1]], # cat backbone P4 - [-1, 1, Bottleneck, [512, False]], - [-1, 1, Bottleneck, [512, False]], - [-1, 1, Conv, [256, 1, 1]], - [-1, 1, Conv, [512, 3, 1]], # 22 (P4/16-medium) + - [-2, 1, Conv, [256, 1, 1]] + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 8], 1, Concat, [1]] # cat backbone P4 + - [-1, 1, Bottleneck, [512, False]] + - [-1, 1, Bottleneck, [512, False]] + - [-1, 1, Conv, [256, 1, 1]] + - [-1, 1, Conv, [512, 3, 1]] # 22 (P4/16-medium) - [-2, 1, Conv, [128, 1, 1]], - [-1, 1, nn.Upsample, [None, 2, 'nearest']], - [[-1, 6], 1, Concat, [1]], # cat backbone P3 - [-1, 1, Bottleneck, [256, False]], - [-1, 2, Bottleneck, [256, False]], # 27 (P3/8-small) + - [-2, 1, Conv, [128, 1, 1]] + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 6], 1, Concat, [1]] # cat backbone P3 + - [-1, 1, Bottleneck, [256, False]] + - [-1, 2, Bottleneck, [256, False]] # 27 (P3/8-small) - [[27, 22, 15], 1, Detect, [nc]], # Detect(P3, P4, P5) - ] + - [[27, 22, 15], 1, Detect, [nc]] # Detect(P3, P4, P5) diff --git a/ultralytics/cfg/models/v3/yolov3-tiny.yaml b/ultralytics/cfg/models/v3/yolov3-tiny.yaml index 69d8e42..f3fe257 100644 --- a/ultralytics/cfg/models/v3/yolov3-tiny.yaml +++ b/ultralytics/cfg/models/v3/yolov3-tiny.yaml @@ -2,38 +2,36 @@ # YOLOv3-tiny object detection model with P4-P5 outputs. For details see https://docs.ultralytics.com/models/yolov3 # Parameters -nc: 80 # number of classes -depth_multiple: 1.0 # model depth multiple -width_multiple: 1.0 # layer channel multiple +nc: 80 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple # YOLOv3-tiny backbone backbone: # [from, number, module, args] - [[-1, 1, Conv, [16, 3, 1]], # 0 - [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 1-P1/2 - [-1, 1, Conv, [32, 3, 1]], - [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 3-P2/4 - [-1, 1, Conv, [64, 3, 1]], - [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 5-P3/8 - [-1, 1, Conv, [128, 3, 1]], - [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 7-P4/16 - [-1, 1, Conv, [256, 3, 1]], - [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 9-P5/32 - [-1, 1, Conv, [512, 3, 1]], - [-1, 1, nn.ZeroPad2d, [[0, 1, 0, 1]]], # 11 - [-1, 1, nn.MaxPool2d, [2, 1, 0]], # 12 - ] + - [-1, 1, Conv, [16, 3, 1]] # 0 + - [-1, 1, nn.MaxPool2d, [2, 2, 0]] # 1-P1/2 + - [-1, 1, Conv, [32, 3, 1]] + - [-1, 1, nn.MaxPool2d, [2, 2, 0]] # 3-P2/4 + - [-1, 1, Conv, [64, 3, 1]] + - [-1, 1, nn.MaxPool2d, [2, 2, 0]] # 5-P3/8 + - [-1, 1, Conv, [128, 3, 1]] + - [-1, 1, nn.MaxPool2d, [2, 2, 0]] # 7-P4/16 + - [-1, 1, Conv, [256, 3, 1]] + - [-1, 1, nn.MaxPool2d, [2, 2, 0]] # 9-P5/32 + - [-1, 1, Conv, [512, 3, 1]] + - [-1, 1, nn.ZeroPad2d, [[0, 1, 0, 1]]] # 11 + - [-1, 1, nn.MaxPool2d, [2, 1, 0]] # 12 # YOLOv3-tiny head head: - [[-1, 1, Conv, [1024, 3, 1]], - [-1, 1, Conv, [256, 1, 1]], - [-1, 1, Conv, [512, 3, 1]], # 15 (P5/32-large) + - [-1, 1, Conv, [1024, 3, 1]] + - [-1, 1, Conv, [256, 1, 1]] + - [-1, 1, Conv, [512, 3, 1]] # 15 (P5/32-large) - [-2, 1, Conv, [128, 1, 1]], - [-1, 1, nn.Upsample, [None, 2, 'nearest']], - [[-1, 8], 1, Concat, [1]], # cat backbone P4 - [-1, 1, Conv, [256, 3, 1]], # 19 (P4/16-medium) + - [-2, 1, Conv, [128, 1, 1]] + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 8], 1, Concat, [1]] # cat backbone P4 + - [-1, 1, Conv, [256, 3, 1]] # 19 (P4/16-medium) - [[19, 15], 1, Detect, [nc]], # Detect(P4, P5) - ] + - [[19, 15], 1, Detect, [nc]] # Detect(P4, P5) diff --git a/ultralytics/cfg/models/v3/yolov3.yaml b/ultralytics/cfg/models/v3/yolov3.yaml index 7cc0afa..716866a 100644 --- a/ultralytics/cfg/models/v3/yolov3.yaml +++ b/ultralytics/cfg/models/v3/yolov3.yaml @@ -2,47 +2,45 @@ # YOLOv3 object detection model with P3-P5 outputs. For details see https://docs.ultralytics.com/models/yolov3 # Parameters -nc: 80 # number of classes -depth_multiple: 1.0 # model depth multiple -width_multiple: 1.0 # layer channel multiple +nc: 80 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple # darknet53 backbone backbone: # [from, number, module, args] - [[-1, 1, Conv, [32, 3, 1]], # 0 - [-1, 1, Conv, [64, 3, 2]], # 1-P1/2 - [-1, 1, Bottleneck, [64]], - [-1, 1, Conv, [128, 3, 2]], # 3-P2/4 - [-1, 2, Bottleneck, [128]], - [-1, 1, Conv, [256, 3, 2]], # 5-P3/8 - [-1, 8, Bottleneck, [256]], - [-1, 1, Conv, [512, 3, 2]], # 7-P4/16 - [-1, 8, Bottleneck, [512]], - [-1, 1, Conv, [1024, 3, 2]], # 9-P5/32 - [-1, 4, Bottleneck, [1024]], # 10 - ] + - [-1, 1, Conv, [32, 3, 1]] # 0 + - [-1, 1, Conv, [64, 3, 2]] # 1-P1/2 + - [-1, 1, Bottleneck, [64]] + - [-1, 1, Conv, [128, 3, 2]] # 3-P2/4 + - [-1, 2, Bottleneck, [128]] + - [-1, 1, Conv, [256, 3, 2]] # 5-P3/8 + - [-1, 8, Bottleneck, [256]] + - [-1, 1, Conv, [512, 3, 2]] # 7-P4/16 + - [-1, 8, Bottleneck, [512]] + - [-1, 1, Conv, [1024, 3, 2]] # 9-P5/32 + - [-1, 4, Bottleneck, [1024]] # 10 # YOLOv3 head head: - [[-1, 1, Bottleneck, [1024, False]], - [-1, 1, Conv, [512, 1, 1]], - [-1, 1, Conv, [1024, 3, 1]], - [-1, 1, Conv, [512, 1, 1]], - [-1, 1, Conv, [1024, 3, 1]], # 15 (P5/32-large) + - [-1, 1, Bottleneck, [1024, False]] + - [-1, 1, Conv, [512, 1, 1]] + - [-1, 1, Conv, [1024, 3, 1]] + - [-1, 1, Conv, [512, 1, 1]] + - [-1, 1, Conv, [1024, 3, 1]] # 15 (P5/32-large) - [-2, 1, Conv, [256, 1, 1]], - [-1, 1, nn.Upsample, [None, 2, 'nearest']], - [[-1, 8], 1, Concat, [1]], # cat backbone P4 - [-1, 1, Bottleneck, [512, False]], - [-1, 1, Bottleneck, [512, False]], - [-1, 1, Conv, [256, 1, 1]], - [-1, 1, Conv, [512, 3, 1]], # 22 (P4/16-medium) + - [-2, 1, Conv, [256, 1, 1]] + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 8], 1, Concat, [1]] # cat backbone P4 + - [-1, 1, Bottleneck, [512, False]] + - [-1, 1, Bottleneck, [512, False]] + - [-1, 1, Conv, [256, 1, 1]] + - [-1, 1, Conv, [512, 3, 1]] # 22 (P4/16-medium) - [-2, 1, Conv, [128, 1, 1]], - [-1, 1, nn.Upsample, [None, 2, 'nearest']], - [[-1, 6], 1, Concat, [1]], # cat backbone P3 - [-1, 1, Bottleneck, [256, False]], - [-1, 2, Bottleneck, [256, False]], # 27 (P3/8-small) + - [-2, 1, Conv, [128, 1, 1]] + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 6], 1, Concat, [1]] # cat backbone P3 + - [-1, 1, Bottleneck, [256, False]] + - [-1, 2, Bottleneck, [256, False]] # 27 (P3/8-small) - [[27, 22, 15], 1, Detect, [nc]], # Detect(P3, P4, P5) - ] + - [[27, 22, 15], 1, Detect, [nc]] # Detect(P3, P4, P5) diff --git a/ultralytics/cfg/models/v5/yolov5-p6.yaml b/ultralytics/cfg/models/v5/yolov5-p6.yaml index d468377..2fd3ac7 100644 --- a/ultralytics/cfg/models/v5/yolov5-p6.yaml +++ b/ultralytics/cfg/models/v5/yolov5-p6.yaml @@ -2,7 +2,7 @@ # YOLOv5 object detection model with P3-P6 outputs. For details see https://docs.ultralytics.com/models/yolov5 # Parameters -nc: 80 # number of classes +nc: 80 # number of classes scales: # model compound scaling constants, i.e. 'model=yolov5n-p6.yaml' will call yolov5-p6.yaml with scale 'n' # [depth, width, max_channels] n: [0.33, 0.25, 1024] @@ -14,48 +14,46 @@ scales: # model compound scaling constants, i.e. 'model=yolov5n-p6.yaml' will ca # YOLOv5 v6.0 backbone backbone: # [from, number, module, args] - [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 - [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 - [-1, 3, C3, [128]], - [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 - [-1, 6, C3, [256]], - [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 - [-1, 9, C3, [512]], - [-1, 1, Conv, [768, 3, 2]], # 7-P5/32 - [-1, 3, C3, [768]], - [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64 - [-1, 3, C3, [1024]], - [-1, 1, SPPF, [1024, 5]], # 11 - ] + - [-1, 1, Conv, [64, 6, 2, 2]] # 0-P1/2 + - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4 + - [-1, 3, C3, [128]] + - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8 + - [-1, 6, C3, [256]] + - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16 + - [-1, 9, C3, [512]] + - [-1, 1, Conv, [768, 3, 2]] # 7-P5/32 + - [-1, 3, C3, [768]] + - [-1, 1, Conv, [1024, 3, 2]] # 9-P6/64 + - [-1, 3, C3, [1024]] + - [-1, 1, SPPF, [1024, 5]] # 11 # YOLOv5 v6.0 head head: - [[-1, 1, Conv, [768, 1, 1]], - [-1, 1, nn.Upsample, [None, 2, 'nearest']], - [[-1, 8], 1, Concat, [1]], # cat backbone P5 - [-1, 3, C3, [768, False]], # 15 + - [-1, 1, Conv, [768, 1, 1]] + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 8], 1, Concat, [1]] # cat backbone P5 + - [-1, 3, C3, [768, False]] # 15 - [-1, 1, Conv, [512, 1, 1]], - [-1, 1, nn.Upsample, [None, 2, 'nearest']], - [[-1, 6], 1, Concat, [1]], # cat backbone P4 - [-1, 3, C3, [512, False]], # 19 + - [-1, 1, Conv, [512, 1, 1]] + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 6], 1, Concat, [1]] # cat backbone P4 + - [-1, 3, C3, [512, False]] # 19 - [-1, 1, Conv, [256, 1, 1]], - [-1, 1, nn.Upsample, [None, 2, 'nearest']], - [[-1, 4], 1, Concat, [1]], # cat backbone P3 - [-1, 3, C3, [256, False]], # 23 (P3/8-small) + - [-1, 1, Conv, [256, 1, 1]] + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 4], 1, Concat, [1]] # cat backbone P3 + - [-1, 3, C3, [256, False]] # 23 (P3/8-small) - [-1, 1, Conv, [256, 3, 2]], - [[-1, 20], 1, Concat, [1]], # cat head P4 - [-1, 3, C3, [512, False]], # 26 (P4/16-medium) + - [-1, 1, Conv, [256, 3, 2]] + - [[-1, 20], 1, Concat, [1]] # cat head P4 + - [-1, 3, C3, [512, False]] # 26 (P4/16-medium) - [-1, 1, Conv, [512, 3, 2]], - [[-1, 16], 1, Concat, [1]], # cat head P5 - [-1, 3, C3, [768, False]], # 29 (P5/32-large) + - [-1, 1, Conv, [512, 3, 2]] + - [[-1, 16], 1, Concat, [1]] # cat head P5 + - [-1, 3, C3, [768, False]] # 29 (P5/32-large) - [-1, 1, Conv, [768, 3, 2]], - [[-1, 12], 1, Concat, [1]], # cat head P6 - [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge) + - [-1, 1, Conv, [768, 3, 2]] + - [[-1, 12], 1, Concat, [1]] # cat head P6 + - [-1, 3, C3, [1024, False]] # 32 (P6/64-xlarge) - [[23, 26, 29, 32], 1, Detect, [nc]], # Detect(P3, P4, P5, P6) - ] + - [[23, 26, 29, 32], 1, Detect, [nc]] # Detect(P3, P4, P5, P6) diff --git a/ultralytics/cfg/models/v5/yolov5.yaml b/ultralytics/cfg/models/v5/yolov5.yaml index 4a3fced..8fdc79e 100644 --- a/ultralytics/cfg/models/v5/yolov5.yaml +++ b/ultralytics/cfg/models/v5/yolov5.yaml @@ -2,7 +2,7 @@ # YOLOv5 object detection model with P3-P5 outputs. For details see https://docs.ultralytics.com/models/yolov5 # Parameters -nc: 80 # number of classes +nc: 80 # number of classes scales: # model compound scaling constants, i.e. 'model=yolov5n.yaml' will call yolov5.yaml with scale 'n' # [depth, width, max_channels] n: [0.33, 0.25, 1024] @@ -14,37 +14,35 @@ scales: # model compound scaling constants, i.e. 'model=yolov5n.yaml' will call # YOLOv5 v6.0 backbone backbone: # [from, number, module, args] - [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 - [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 - [-1, 3, C3, [128]], - [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 - [-1, 6, C3, [256]], - [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 - [-1, 9, C3, [512]], - [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 - [-1, 3, C3, [1024]], - [-1, 1, SPPF, [1024, 5]], # 9 - ] + - [-1, 1, Conv, [64, 6, 2, 2]] # 0-P1/2 + - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4 + - [-1, 3, C3, [128]] + - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8 + - [-1, 6, C3, [256]] + - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16 + - [-1, 9, C3, [512]] + - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32 + - [-1, 3, C3, [1024]] + - [-1, 1, SPPF, [1024, 5]] # 9 # YOLOv5 v6.0 head head: - [[-1, 1, Conv, [512, 1, 1]], - [-1, 1, nn.Upsample, [None, 2, 'nearest']], - [[-1, 6], 1, Concat, [1]], # cat backbone P4 - [-1, 3, C3, [512, False]], # 13 + - [-1, 1, Conv, [512, 1, 1]] + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 6], 1, Concat, [1]] # cat backbone P4 + - [-1, 3, C3, [512, False]] # 13 - [-1, 1, Conv, [256, 1, 1]], - [-1, 1, nn.Upsample, [None, 2, 'nearest']], - [[-1, 4], 1, Concat, [1]], # cat backbone P3 - [-1, 3, C3, [256, False]], # 17 (P3/8-small) + - [-1, 1, Conv, [256, 1, 1]] + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 4], 1, Concat, [1]] # cat backbone P3 + - [-1, 3, C3, [256, False]] # 17 (P3/8-small) - [-1, 1, Conv, [256, 3, 2]], - [[-1, 14], 1, Concat, [1]], # cat head P4 - [-1, 3, C3, [512, False]], # 20 (P4/16-medium) + - [-1, 1, Conv, [256, 3, 2]] + - [[-1, 14], 1, Concat, [1]] # cat head P4 + - [-1, 3, C3, [512, False]] # 20 (P4/16-medium) - [-1, 1, Conv, [512, 3, 2]], - [[-1, 10], 1, Concat, [1]], # cat head P5 - [-1, 3, C3, [1024, False]], # 23 (P5/32-large) + - [-1, 1, Conv, [512, 3, 2]] + - [[-1, 10], 1, Concat, [1]] # cat head P5 + - [-1, 3, C3, [1024, False]] # 23 (P5/32-large) - [[17, 20, 23], 1, Detect, [nc]], # Detect(P3, P4, P5) - ] + - [[17, 20, 23], 1, Detect, [nc]] # Detect(P3, P4, P5) diff --git a/ultralytics/cfg/models/v6/yolov6.yaml b/ultralytics/cfg/models/v6/yolov6.yaml index cb5e32a..f39dfb4 100644 --- a/ultralytics/cfg/models/v6/yolov6.yaml +++ b/ultralytics/cfg/models/v6/yolov6.yaml @@ -2,8 +2,8 @@ # YOLOv6 object detection model with P3-P5 outputs. For Usage examples see https://docs.ultralytics.com/models/yolov6 # Parameters -nc: 80 # number of classes -activation: nn.ReLU() # (optional) model default activation function +nc: 80 # number of classes +activation: nn.ReLU() # (optional) model default activation function scales: # model compound scaling constants, i.e. 'model=yolov6n.yaml' will call yolov8.yaml with scale 'n' # [depth, width, max_channels] n: [0.33, 0.25, 1024] @@ -15,39 +15,39 @@ scales: # model compound scaling constants, i.e. 'model=yolov6n.yaml' will call # YOLOv6-3.0s backbone backbone: # [from, repeats, module, args] - - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2 - - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4 + - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2 + - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4 - [-1, 6, Conv, [128, 3, 1]] - - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8 + - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8 - [-1, 12, Conv, [256, 3, 1]] - - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16 + - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16 - [-1, 18, Conv, [512, 3, 1]] - - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32 + - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32 - [-1, 6, Conv, [1024, 3, 1]] - - [-1, 1, SPPF, [1024, 5]] # 9 + - [-1, 1, SPPF, [1024, 5]] # 9 # YOLOv6-3.0s head head: - [-1, 1, Conv, [256, 1, 1]] - [-1, 1, nn.ConvTranspose2d, [256, 2, 2, 0]] - - [[-1, 6], 1, Concat, [1]] # cat backbone P4 + - [[-1, 6], 1, Concat, [1]] # cat backbone P4 - [-1, 1, Conv, [256, 3, 1]] - - [-1, 9, Conv, [256, 3, 1]] # 14 + - [-1, 9, Conv, [256, 3, 1]] # 14 - [-1, 1, Conv, [128, 1, 1]] - [-1, 1, nn.ConvTranspose2d, [128, 2, 2, 0]] - - [[-1, 4], 1, Concat, [1]] # cat backbone P3 + - [[-1, 4], 1, Concat, [1]] # cat backbone P3 - [-1, 1, Conv, [128, 3, 1]] - - [-1, 9, Conv, [128, 3, 1]] # 19 + - [-1, 9, Conv, [128, 3, 1]] # 19 - [-1, 1, Conv, [128, 3, 2]] - - [[-1, 15], 1, Concat, [1]] # cat head P4 + - [[-1, 15], 1, Concat, [1]] # cat head P4 - [-1, 1, Conv, [256, 3, 1]] - - [-1, 9, Conv, [256, 3, 1]] # 23 + - [-1, 9, Conv, [256, 3, 1]] # 23 - [-1, 1, Conv, [256, 3, 2]] - - [[-1, 10], 1, Concat, [1]] # cat head P5 + - [[-1, 10], 1, Concat, [1]] # cat head P5 - [-1, 1, Conv, [512, 3, 1]] - - [-1, 9, Conv, [512, 3, 1]] # 27 + - [-1, 9, Conv, [512, 3, 1]] # 27 - - [[19, 23, 27], 1, Detect, [nc]] # Detect(P3, P4, P5) + - [[19, 23, 27], 1, Detect, [nc]] # Detect(P3, P4, P5) diff --git a/ultralytics/cfg/models/v8/yolov8-cls-resnet101.yaml b/ultralytics/cfg/models/v8/yolov8-cls-resnet101.yaml new file mode 100644 index 0000000..6867f88 --- /dev/null +++ b/ultralytics/cfg/models/v8/yolov8-cls-resnet101.yaml @@ -0,0 +1,25 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license +# YOLOv8-cls image classification model. For Usage examples see https://docs.ultralytics.com/tasks/classify + +# Parameters +nc: 1000 # number of classes +scales: # model compound scaling constants, i.e. 'model=yolov8n-cls.yaml' will call yolov8-cls.yaml with scale 'n' + # [depth, width, max_channels] + n: [0.33, 0.25, 1024] + s: [0.33, 0.50, 1024] + m: [0.67, 0.75, 1024] + l: [1.00, 1.00, 1024] + x: [1.00, 1.25, 1024] + +# YOLOv8.0n backbone +backbone: + # [from, repeats, module, args] + - [-1, 1, ResNetLayer, [3, 64, 1, True, 1]] # 0-P1/2 + - [-1, 1, ResNetLayer, [64, 64, 1, False, 3]] # 1-P2/4 + - [-1, 1, ResNetLayer, [256, 128, 2, False, 4]] # 2-P3/8 + - [-1, 1, ResNetLayer, [512, 256, 2, False, 23]] # 3-P4/16 + - [-1, 1, ResNetLayer, [1024, 512, 2, False, 3]] # 4-P5/32 + +# YOLOv8.0n head +head: + - [-1, 1, Classify, [nc]] # Classify diff --git a/ultralytics/cfg/models/v8/yolov8-cls-resnet50.yaml b/ultralytics/cfg/models/v8/yolov8-cls-resnet50.yaml new file mode 100644 index 0000000..8ffd111 --- /dev/null +++ b/ultralytics/cfg/models/v8/yolov8-cls-resnet50.yaml @@ -0,0 +1,25 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license +# YOLOv8-cls image classification model. For Usage examples see https://docs.ultralytics.com/tasks/classify + +# Parameters +nc: 1000 # number of classes +scales: # model compound scaling constants, i.e. 'model=yolov8n-cls.yaml' will call yolov8-cls.yaml with scale 'n' + # [depth, width, max_channels] + n: [0.33, 0.25, 1024] + s: [0.33, 0.50, 1024] + m: [0.67, 0.75, 1024] + l: [1.00, 1.00, 1024] + x: [1.00, 1.25, 1024] + +# YOLOv8.0n backbone +backbone: + # [from, repeats, module, args] + - [-1, 1, ResNetLayer, [3, 64, 1, True, 1]] # 0-P1/2 + - [-1, 1, ResNetLayer, [64, 64, 1, False, 3]] # 1-P2/4 + - [-1, 1, ResNetLayer, [256, 128, 2, False, 4]] # 2-P3/8 + - [-1, 1, ResNetLayer, [512, 256, 2, False, 6]] # 3-P4/16 + - [-1, 1, ResNetLayer, [1024, 512, 2, False, 3]] # 4-P5/32 + +# YOLOv8.0n head +head: + - [-1, 1, Classify, [nc]] # Classify diff --git a/ultralytics/cfg/models/v8/yolov8-cls.yaml b/ultralytics/cfg/models/v8/yolov8-cls.yaml index 5332f1d..180fc65 100644 --- a/ultralytics/cfg/models/v8/yolov8-cls.yaml +++ b/ultralytics/cfg/models/v8/yolov8-cls.yaml @@ -2,7 +2,7 @@ # YOLOv8-cls image classification model. For Usage examples see https://docs.ultralytics.com/tasks/classify # Parameters -nc: 1000 # number of classes +nc: 1000 # number of classes scales: # model compound scaling constants, i.e. 'model=yolov8n-cls.yaml' will call yolov8-cls.yaml with scale 'n' # [depth, width, max_channels] n: [0.33, 0.25, 1024] @@ -14,16 +14,16 @@ scales: # model compound scaling constants, i.e. 'model=yolov8n-cls.yaml' will c # YOLOv8.0n backbone backbone: # [from, repeats, module, args] - - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2 - - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4 + - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2 + - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4 - [-1, 3, C2f, [128, True]] - - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8 + - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8 - [-1, 6, C2f, [256, True]] - - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16 + - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16 - [-1, 6, C2f, [512, True]] - - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32 + - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32 - [-1, 3, C2f, [1024, True]] # YOLOv8.0n head head: - - [-1, 1, Classify, [nc]] # Classify + - [-1, 1, Classify, [nc]] # Classify diff --git a/ultralytics/cfg/models/v8/yolov8-ghost-p2.yaml b/ultralytics/cfg/models/v8/yolov8-ghost-p2.yaml new file mode 100644 index 0000000..aee2093 --- /dev/null +++ b/ultralytics/cfg/models/v8/yolov8-ghost-p2.yaml @@ -0,0 +1,54 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license +# YOLOv8 object detection model with P2-P5 outputs. For Usage examples see https://docs.ultralytics.com/tasks/detect + +# Parameters +nc: 80 # number of classes +scales: # model compound scaling constants, i.e. 'model=yolov8n.yaml' will call yolov8.yaml with scale 'n' + # [depth, width, max_channels] + n: [0.33, 0.25, 1024] # YOLOv8n-ghost-p2 summary: 491 layers, 2033944 parameters, 2033928 gradients, 13.8 GFLOPs + s: [0.33, 0.50, 1024] # YOLOv8s-ghost-p2 summary: 491 layers, 5562080 parameters, 5562064 gradients, 25.1 GFLOPs + m: [0.67, 0.75, 768] # YOLOv8m-ghost-p2 summary: 731 layers, 9031728 parameters, 9031712 gradients, 42.8 GFLOPs + l: [1.00, 1.00, 512] # YOLOv8l-ghost-p2 summary: 971 layers, 12214448 parameters, 12214432 gradients, 69.1 GFLOPs + x: [1.00, 1.25, 512] # YOLOv8x-ghost-p2 summary: 971 layers, 18664776 parameters, 18664760 gradients, 103.3 GFLOPs + +# YOLOv8.0-ghost backbone +backbone: + # [from, repeats, module, args] + - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2 + - [-1, 1, GhostConv, [128, 3, 2]] # 1-P2/4 + - [-1, 3, C3Ghost, [128, True]] + - [-1, 1, GhostConv, [256, 3, 2]] # 3-P3/8 + - [-1, 6, C3Ghost, [256, True]] + - [-1, 1, GhostConv, [512, 3, 2]] # 5-P4/16 + - [-1, 6, C3Ghost, [512, True]] + - [-1, 1, GhostConv, [1024, 3, 2]] # 7-P5/32 + - [-1, 3, C3Ghost, [1024, True]] + - [-1, 1, SPPF, [1024, 5]] # 9 + +# YOLOv8.0-ghost-p2 head +head: + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 6], 1, Concat, [1]] # cat backbone P4 + - [-1, 3, C3Ghost, [512]] # 12 + + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 4], 1, Concat, [1]] # cat backbone P3 + - [-1, 3, C3Ghost, [256]] # 15 (P3/8-small) + + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 2], 1, Concat, [1]] # cat backbone P2 + - [-1, 3, C3Ghost, [128]] # 18 (P2/4-xsmall) + + - [-1, 1, GhostConv, [128, 3, 2]] + - [[-1, 15], 1, Concat, [1]] # cat head P3 + - [-1, 3, C3Ghost, [256]] # 21 (P3/8-small) + + - [-1, 1, GhostConv, [256, 3, 2]] + - [[-1, 12], 1, Concat, [1]] # cat head P4 + - [-1, 3, C3Ghost, [512]] # 24 (P4/16-medium) + + - [-1, 1, GhostConv, [512, 3, 2]] + - [[-1, 9], 1, Concat, [1]] # cat head P5 + - [-1, 3, C3Ghost, [1024]] # 27 (P5/32-large) + + - [[18, 21, 24, 27], 1, Detect, [nc]] # Detect(P2, P3, P4, P5) diff --git a/ultralytics/cfg/models/v8/yolov8-ghost-p6.yaml b/ultralytics/cfg/models/v8/yolov8-ghost-p6.yaml new file mode 100644 index 0000000..b35f4cd --- /dev/null +++ b/ultralytics/cfg/models/v8/yolov8-ghost-p6.yaml @@ -0,0 +1,56 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license +# YOLOv8 object detection model with P3-P6 outputs. For Usage examples see https://docs.ultralytics.com/tasks/detect + +# Parameters +nc: 80 # number of classes +scales: # model compound scaling constants, i.e. 'model=yolov8n-p6.yaml' will call yolov8-p6.yaml with scale 'n' + # [depth, width, max_channels] + n: [0.33, 0.25, 1024] # YOLOv8n-ghost-p6 summary: 529 layers, 2901100 parameters, 2901084 gradients, 5.8 GFLOPs + s: [0.33, 0.50, 1024] # YOLOv8s-ghost-p6 summary: 529 layers, 9520008 parameters, 9519992 gradients, 16.4 GFLOPs + m: [0.67, 0.75, 768] # YOLOv8m-ghost-p6 summary: 789 layers, 18002904 parameters, 18002888 gradients, 34.4 GFLOPs + l: [1.00, 1.00, 512] # YOLOv8l-ghost-p6 summary: 1049 layers, 21227584 parameters, 21227568 gradients, 55.3 GFLOPs + x: [1.00, 1.25, 512] # YOLOv8x-ghost-p6 summary: 1049 layers, 33057852 parameters, 33057836 gradients, 85.7 GFLOPs + +# YOLOv8.0-ghost backbone +backbone: + # [from, repeats, module, args] + - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2 + - [-1, 1, GhostConv, [128, 3, 2]] # 1-P2/4 + - [-1, 3, C3Ghost, [128, True]] + - [-1, 1, GhostConv, [256, 3, 2]] # 3-P3/8 + - [-1, 6, C3Ghost, [256, True]] + - [-1, 1, GhostConv, [512, 3, 2]] # 5-P4/16 + - [-1, 6, C3Ghost, [512, True]] + - [-1, 1, GhostConv, [768, 3, 2]] # 7-P5/32 + - [-1, 3, C3Ghost, [768, True]] + - [-1, 1, GhostConv, [1024, 3, 2]] # 9-P6/64 + - [-1, 3, C3Ghost, [1024, True]] + - [-1, 1, SPPF, [1024, 5]] # 11 + +# YOLOv8.0-ghost-p6 head +head: + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 8], 1, Concat, [1]] # cat backbone P5 + - [-1, 3, C3Ghost, [768]] # 14 + + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 6], 1, Concat, [1]] # cat backbone P4 + - [-1, 3, C3Ghost, [512]] # 17 + + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 4], 1, Concat, [1]] # cat backbone P3 + - [-1, 3, C3Ghost, [256]] # 20 (P3/8-small) + + - [-1, 1, GhostConv, [256, 3, 2]] + - [[-1, 17], 1, Concat, [1]] # cat head P4 + - [-1, 3, C3Ghost, [512]] # 23 (P4/16-medium) + + - [-1, 1, GhostConv, [512, 3, 2]] + - [[-1, 14], 1, Concat, [1]] # cat head P5 + - [-1, 3, C3Ghost, [768]] # 26 (P5/32-large) + + - [-1, 1, GhostConv, [768, 3, 2]] + - [[-1, 11], 1, Concat, [1]] # cat head P6 + - [-1, 3, C3Ghost, [1024]] # 29 (P6/64-xlarge) + + - [[20, 23, 26, 29], 1, Detect, [nc]] # Detect(P3, P4, P5, P6) diff --git a/ultralytics/cfg/models/v8/yolov8-ghost.yaml b/ultralytics/cfg/models/v8/yolov8-ghost.yaml new file mode 100644 index 0000000..adc1802 --- /dev/null +++ b/ultralytics/cfg/models/v8/yolov8-ghost.yaml @@ -0,0 +1,47 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license +# YOLOv8 object detection model with P3-P5 outputs. For Usage examples see https://docs.ultralytics.com/tasks/detect +# Employs Ghost convolutions and modules proposed in Huawei's GhostNet in https://arxiv.org/abs/1911.11907v2 + +# Parameters +nc: 80 # number of classes +scales: # model compound scaling constants, i.e. 'model=yolov8n.yaml' will call yolov8.yaml with scale 'n' + # [depth, width, max_channels] + n: [0.33, 0.25, 1024] # YOLOv8n-ghost summary: 403 layers, 1865316 parameters, 1865300 gradients, 5.8 GFLOPs + s: [0.33, 0.50, 1024] # YOLOv8s-ghost summary: 403 layers, 5960072 parameters, 5960056 gradients, 16.4 GFLOPs + m: [0.67, 0.75, 768] # YOLOv8m-ghost summary: 603 layers, 10336312 parameters, 10336296 gradients, 32.7 GFLOPs + l: [1.00, 1.00, 512] # YOLOv8l-ghost summary: 803 layers, 14277872 parameters, 14277856 gradients, 53.7 GFLOPs + x: [1.00, 1.25, 512] # YOLOv8x-ghost summary: 803 layers, 22229308 parameters, 22229292 gradients, 83.3 GFLOPs + +# YOLOv8.0n-ghost backbone +backbone: + # [from, repeats, module, args] + - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2 + - [-1, 1, GhostConv, [128, 3, 2]] # 1-P2/4 + - [-1, 3, C3Ghost, [128, True]] + - [-1, 1, GhostConv, [256, 3, 2]] # 3-P3/8 + - [-1, 6, C3Ghost, [256, True]] + - [-1, 1, GhostConv, [512, 3, 2]] # 5-P4/16 + - [-1, 6, C3Ghost, [512, True]] + - [-1, 1, GhostConv, [1024, 3, 2]] # 7-P5/32 + - [-1, 3, C3Ghost, [1024, True]] + - [-1, 1, SPPF, [1024, 5]] # 9 + +# YOLOv8.0n head +head: + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 6], 1, Concat, [1]] # cat backbone P4 + - [-1, 3, C3Ghost, [512]] # 12 + + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 4], 1, Concat, [1]] # cat backbone P3 + - [-1, 3, C3Ghost, [256]] # 15 (P3/8-small) + + - [-1, 1, GhostConv, [256, 3, 2]] + - [[-1, 12], 1, Concat, [1]] # cat head P4 + - [-1, 3, C3Ghost, [512]] # 18 (P4/16-medium) + + - [-1, 1, GhostConv, [512, 3, 2]] + - [[-1, 9], 1, Concat, [1]] # cat head P5 + - [-1, 3, C3Ghost, [1024]] # 21 (P5/32-large) + + - [[15, 18, 21], 1, Detect, [nc]] # Detect(P3, P4, P5) diff --git a/ultralytics/cfg/models/v8/yolov8-obb.yaml b/ultralytics/cfg/models/v8/yolov8-obb.yaml new file mode 100644 index 0000000..7a7f60c --- /dev/null +++ b/ultralytics/cfg/models/v8/yolov8-obb.yaml @@ -0,0 +1,46 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license +# YOLOv8 Oriented Bounding Boxes (OBB) model with P3-P5 outputs. For Usage examples see https://docs.ultralytics.com/tasks/detect + +# Parameters +nc: 80 # number of classes +scales: # model compound scaling constants, i.e. 'model=yolov8n.yaml' will call yolov8.yaml with scale 'n' + # [depth, width, max_channels] + n: [0.33, 0.25, 1024] # YOLOv8n summary: 225 layers, 3157200 parameters, 3157184 gradients, 8.9 GFLOPs + s: [0.33, 0.50, 1024] # YOLOv8s summary: 225 layers, 11166560 parameters, 11166544 gradients, 28.8 GFLOPs + m: [0.67, 0.75, 768] # YOLOv8m summary: 295 layers, 25902640 parameters, 25902624 gradients, 79.3 GFLOPs + l: [1.00, 1.00, 512] # YOLOv8l summary: 365 layers, 43691520 parameters, 43691504 gradients, 165.7 GFLOPs + x: [1.00, 1.25, 512] # YOLOv8x summary: 365 layers, 68229648 parameters, 68229632 gradients, 258.5 GFLOPs + +# YOLOv8.0n backbone +backbone: + # [from, repeats, module, args] + - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2 + - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4 + - [-1, 3, C2f, [128, True]] + - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8 + - [-1, 6, C2f, [256, True]] + - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16 + - [-1, 6, C2f, [512, True]] + - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32 + - [-1, 3, C2f, [1024, True]] + - [-1, 1, SPPF, [1024, 5]] # 9 + +# YOLOv8.0n head +head: + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 6], 1, Concat, [1]] # cat backbone P4 + - [-1, 3, C2f, [512]] # 12 + + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 4], 1, Concat, [1]] # cat backbone P3 + - [-1, 3, C2f, [256]] # 15 (P3/8-small) + + - [-1, 1, Conv, [256, 3, 2]] + - [[-1, 12], 1, Concat, [1]] # cat head P4 + - [-1, 3, C2f, [512]] # 18 (P4/16-medium) + + - [-1, 1, Conv, [512, 3, 2]] + - [[-1, 9], 1, Concat, [1]] # cat head P5 + - [-1, 3, C2f, [1024]] # 21 (P5/32-large) + + - [[15, 18, 21], 1, OBB, [nc, 1]] # OBB(P3, P4, P5) diff --git a/ultralytics/cfg/models/v8/yolov8-p2.yaml b/ultralytics/cfg/models/v8/yolov8-p2.yaml index 3e286aa..5392774 100644 --- a/ultralytics/cfg/models/v8/yolov8-p2.yaml +++ b/ultralytics/cfg/models/v8/yolov8-p2.yaml @@ -2,7 +2,7 @@ # YOLOv8 object detection model with P2-P5 outputs. For Usage examples see https://docs.ultralytics.com/tasks/detect # Parameters -nc: 80 # number of classes +nc: 80 # number of classes scales: # model compound scaling constants, i.e. 'model=yolov8n.yaml' will call yolov8.yaml with scale 'n' # [depth, width, max_channels] n: [0.33, 0.25, 1024] @@ -14,41 +14,41 @@ scales: # model compound scaling constants, i.e. 'model=yolov8n.yaml' will call # YOLOv8.0 backbone backbone: # [from, repeats, module, args] - - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2 - - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4 + - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2 + - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4 - [-1, 3, C2f, [128, True]] - - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8 + - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8 - [-1, 6, C2f, [256, True]] - - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16 + - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16 - [-1, 6, C2f, [512, True]] - - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32 + - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32 - [-1, 3, C2f, [1024, True]] - - [-1, 1, SPPF, [1024, 5]] # 9 + - [-1, 1, SPPF, [1024, 5]] # 9 # YOLOv8.0-p2 head head: - - [-1, 1, nn.Upsample, [None, 2, 'nearest']] - - [[-1, 6], 1, Concat, [1]] # cat backbone P4 - - [-1, 3, C2f, [512]] # 12 + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 6], 1, Concat, [1]] # cat backbone P4 + - [-1, 3, C2f, [512]] # 12 - - [-1, 1, nn.Upsample, [None, 2, 'nearest']] - - [[-1, 4], 1, Concat, [1]] # cat backbone P3 - - [-1, 3, C2f, [256]] # 15 (P3/8-small) + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 4], 1, Concat, [1]] # cat backbone P3 + - [-1, 3, C2f, [256]] # 15 (P3/8-small) - - [-1, 1, nn.Upsample, [None, 2, 'nearest']] - - [[-1, 2], 1, Concat, [1]] # cat backbone P2 - - [-1, 3, C2f, [128]] # 18 (P2/4-xsmall) + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 2], 1, Concat, [1]] # cat backbone P2 + - [-1, 3, C2f, [128]] # 18 (P2/4-xsmall) - [-1, 1, Conv, [128, 3, 2]] - - [[-1, 15], 1, Concat, [1]] # cat head P3 - - [-1, 3, C2f, [256]] # 21 (P3/8-small) + - [[-1, 15], 1, Concat, [1]] # cat head P3 + - [-1, 3, C2f, [256]] # 21 (P3/8-small) - [-1, 1, Conv, [256, 3, 2]] - - [[-1, 12], 1, Concat, [1]] # cat head P4 - - [-1, 3, C2f, [512]] # 24 (P4/16-medium) + - [[-1, 12], 1, Concat, [1]] # cat head P4 + - [-1, 3, C2f, [512]] # 24 (P4/16-medium) - [-1, 1, Conv, [512, 3, 2]] - - [[-1, 9], 1, Concat, [1]] # cat head P5 - - [-1, 3, C2f, [1024]] # 27 (P5/32-large) + - [[-1, 9], 1, Concat, [1]] # cat head P5 + - [-1, 3, C2f, [1024]] # 27 (P5/32-large) - - [[18, 21, 24, 27], 1, Detect, [nc]] # Detect(P2, P3, P4, P5) + - [[18, 21, 24, 27], 1, Detect, [nc]] # Detect(P2, P3, P4, P5) diff --git a/ultralytics/cfg/models/v8/yolov8-p6.yaml b/ultralytics/cfg/models/v8/yolov8-p6.yaml index 3635ed9..2d6d5f9 100644 --- a/ultralytics/cfg/models/v8/yolov8-p6.yaml +++ b/ultralytics/cfg/models/v8/yolov8-p6.yaml @@ -2,7 +2,7 @@ # YOLOv8 object detection model with P3-P6 outputs. For Usage examples see https://docs.ultralytics.com/tasks/detect # Parameters -nc: 80 # number of classes +nc: 80 # number of classes scales: # model compound scaling constants, i.e. 'model=yolov8n-p6.yaml' will call yolov8-p6.yaml with scale 'n' # [depth, width, max_channels] n: [0.33, 0.25, 1024] @@ -14,43 +14,43 @@ scales: # model compound scaling constants, i.e. 'model=yolov8n-p6.yaml' will ca # YOLOv8.0x6 backbone backbone: # [from, repeats, module, args] - - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2 - - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4 + - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2 + - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4 - [-1, 3, C2f, [128, True]] - - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8 + - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8 - [-1, 6, C2f, [256, True]] - - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16 + - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16 - [-1, 6, C2f, [512, True]] - - [-1, 1, Conv, [768, 3, 2]] # 7-P5/32 + - [-1, 1, Conv, [768, 3, 2]] # 7-P5/32 - [-1, 3, C2f, [768, True]] - - [-1, 1, Conv, [1024, 3, 2]] # 9-P6/64 + - [-1, 1, Conv, [1024, 3, 2]] # 9-P6/64 - [-1, 3, C2f, [1024, True]] - - [-1, 1, SPPF, [1024, 5]] # 11 + - [-1, 1, SPPF, [1024, 5]] # 11 # YOLOv8.0x6 head head: - - [-1, 1, nn.Upsample, [None, 2, 'nearest']] - - [[-1, 8], 1, Concat, [1]] # cat backbone P5 - - [-1, 3, C2, [768, False]] # 14 + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 8], 1, Concat, [1]] # cat backbone P5 + - [-1, 3, C2, [768, False]] # 14 - - [-1, 1, nn.Upsample, [None, 2, 'nearest']] - - [[-1, 6], 1, Concat, [1]] # cat backbone P4 - - [-1, 3, C2, [512, False]] # 17 + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 6], 1, Concat, [1]] # cat backbone P4 + - [-1, 3, C2, [512, False]] # 17 - - [-1, 1, nn.Upsample, [None, 2, 'nearest']] - - [[-1, 4], 1, Concat, [1]] # cat backbone P3 - - [-1, 3, C2, [256, False]] # 20 (P3/8-small) + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 4], 1, Concat, [1]] # cat backbone P3 + - [-1, 3, C2, [256, False]] # 20 (P3/8-small) - [-1, 1, Conv, [256, 3, 2]] - - [[-1, 17], 1, Concat, [1]] # cat head P4 - - [-1, 3, C2, [512, False]] # 23 (P4/16-medium) + - [[-1, 17], 1, Concat, [1]] # cat head P4 + - [-1, 3, C2, [512, False]] # 23 (P4/16-medium) - [-1, 1, Conv, [512, 3, 2]] - - [[-1, 14], 1, Concat, [1]] # cat head P5 - - [-1, 3, C2, [768, False]] # 26 (P5/32-large) + - [[-1, 14], 1, Concat, [1]] # cat head P5 + - [-1, 3, C2, [768, False]] # 26 (P5/32-large) - [-1, 1, Conv, [768, 3, 2]] - - [[-1, 11], 1, Concat, [1]] # cat head P6 - - [-1, 3, C2, [1024, False]] # 29 (P6/64-xlarge) + - [[-1, 11], 1, Concat, [1]] # cat head P6 + - [-1, 3, C2, [1024, False]] # 29 (P6/64-xlarge) - - [[20, 23, 26, 29], 1, Detect, [nc]] # Detect(P3, P4, P5, P6) + - [[20, 23, 26, 29], 1, Detect, [nc]] # Detect(P3, P4, P5, P6) diff --git a/ultralytics/cfg/models/v8/yolov8-pose-p6.yaml b/ultralytics/cfg/models/v8/yolov8-pose-p6.yaml index abf0cfc..60007ac 100644 --- a/ultralytics/cfg/models/v8/yolov8-pose-p6.yaml +++ b/ultralytics/cfg/models/v8/yolov8-pose-p6.yaml @@ -2,8 +2,8 @@ # YOLOv8-pose-p6 keypoints/pose estimation model. For Usage examples see https://docs.ultralytics.com/tasks/pose # Parameters -nc: 1 # number of classes -kpt_shape: [17, 3] # number of keypoints, number of dims (2 for x,y or 3 for x,y,visible) +nc: 1 # number of classes +kpt_shape: [17, 3] # number of keypoints, number of dims (2 for x,y or 3 for x,y,visible) scales: # model compound scaling constants, i.e. 'model=yolov8n-p6.yaml' will call yolov8-p6.yaml with scale 'n' # [depth, width, max_channels] n: [0.33, 0.25, 1024] @@ -15,43 +15,43 @@ scales: # model compound scaling constants, i.e. 'model=yolov8n-p6.yaml' will ca # YOLOv8.0x6 backbone backbone: # [from, repeats, module, args] - - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2 - - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4 + - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2 + - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4 - [-1, 3, C2f, [128, True]] - - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8 + - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8 - [-1, 6, C2f, [256, True]] - - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16 + - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16 - [-1, 6, C2f, [512, True]] - - [-1, 1, Conv, [768, 3, 2]] # 7-P5/32 + - [-1, 1, Conv, [768, 3, 2]] # 7-P5/32 - [-1, 3, C2f, [768, True]] - - [-1, 1, Conv, [1024, 3, 2]] # 9-P6/64 + - [-1, 1, Conv, [1024, 3, 2]] # 9-P6/64 - [-1, 3, C2f, [1024, True]] - - [-1, 1, SPPF, [1024, 5]] # 11 + - [-1, 1, SPPF, [1024, 5]] # 11 # YOLOv8.0x6 head head: - - [-1, 1, nn.Upsample, [None, 2, 'nearest']] - - [[-1, 8], 1, Concat, [1]] # cat backbone P5 - - [-1, 3, C2, [768, False]] # 14 + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 8], 1, Concat, [1]] # cat backbone P5 + - [-1, 3, C2, [768, False]] # 14 - - [-1, 1, nn.Upsample, [None, 2, 'nearest']] - - [[-1, 6], 1, Concat, [1]] # cat backbone P4 - - [-1, 3, C2, [512, False]] # 17 + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 6], 1, Concat, [1]] # cat backbone P4 + - [-1, 3, C2, [512, False]] # 17 - - [-1, 1, nn.Upsample, [None, 2, 'nearest']] - - [[-1, 4], 1, Concat, [1]] # cat backbone P3 - - [-1, 3, C2, [256, False]] # 20 (P3/8-small) + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 4], 1, Concat, [1]] # cat backbone P3 + - [-1, 3, C2, [256, False]] # 20 (P3/8-small) - [-1, 1, Conv, [256, 3, 2]] - - [[-1, 17], 1, Concat, [1]] # cat head P4 - - [-1, 3, C2, [512, False]] # 23 (P4/16-medium) + - [[-1, 17], 1, Concat, [1]] # cat head P4 + - [-1, 3, C2, [512, False]] # 23 (P4/16-medium) - [-1, 1, Conv, [512, 3, 2]] - - [[-1, 14], 1, Concat, [1]] # cat head P5 - - [-1, 3, C2, [768, False]] # 26 (P5/32-large) + - [[-1, 14], 1, Concat, [1]] # cat head P5 + - [-1, 3, C2, [768, False]] # 26 (P5/32-large) - [-1, 1, Conv, [768, 3, 2]] - - [[-1, 11], 1, Concat, [1]] # cat head P6 - - [-1, 3, C2, [1024, False]] # 29 (P6/64-xlarge) + - [[-1, 11], 1, Concat, [1]] # cat head P6 + - [-1, 3, C2, [1024, False]] # 29 (P6/64-xlarge) - - [[20, 23, 26, 29], 1, Pose, [nc, kpt_shape]] # Pose(P3, P4, P5, P6) + - [[20, 23, 26, 29], 1, Pose, [nc, kpt_shape]] # Pose(P3, P4, P5, P6) diff --git a/ultralytics/cfg/models/v8/yolov8-pose.yaml b/ultralytics/cfg/models/v8/yolov8-pose.yaml index 9f48e1e..60388ef 100644 --- a/ultralytics/cfg/models/v8/yolov8-pose.yaml +++ b/ultralytics/cfg/models/v8/yolov8-pose.yaml @@ -2,8 +2,8 @@ # YOLOv8-pose keypoints/pose estimation model. For Usage examples see https://docs.ultralytics.com/tasks/pose # Parameters -nc: 1 # number of classes -kpt_shape: [17, 3] # number of keypoints, number of dims (2 for x,y or 3 for x,y,visible) +nc: 1 # number of classes +kpt_shape: [17, 3] # number of keypoints, number of dims (2 for x,y or 3 for x,y,visible) scales: # model compound scaling constants, i.e. 'model=yolov8n-pose.yaml' will call yolov8-pose.yaml with scale 'n' # [depth, width, max_channels] n: [0.33, 0.25, 1024] @@ -15,33 +15,33 @@ scales: # model compound scaling constants, i.e. 'model=yolov8n-pose.yaml' will # YOLOv8.0n backbone backbone: # [from, repeats, module, args] - - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2 - - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4 + - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2 + - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4 - [-1, 3, C2f, [128, True]] - - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8 + - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8 - [-1, 6, C2f, [256, True]] - - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16 + - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16 - [-1, 6, C2f, [512, True]] - - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32 + - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32 - [-1, 3, C2f, [1024, True]] - - [-1, 1, SPPF, [1024, 5]] # 9 + - [-1, 1, SPPF, [1024, 5]] # 9 # YOLOv8.0n head head: - - [-1, 1, nn.Upsample, [None, 2, 'nearest']] - - [[-1, 6], 1, Concat, [1]] # cat backbone P4 - - [-1, 3, C2f, [512]] # 12 + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 6], 1, Concat, [1]] # cat backbone P4 + - [-1, 3, C2f, [512]] # 12 - - [-1, 1, nn.Upsample, [None, 2, 'nearest']] - - [[-1, 4], 1, Concat, [1]] # cat backbone P3 - - [-1, 3, C2f, [256]] # 15 (P3/8-small) + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 4], 1, Concat, [1]] # cat backbone P3 + - [-1, 3, C2f, [256]] # 15 (P3/8-small) - [-1, 1, Conv, [256, 3, 2]] - - [[-1, 12], 1, Concat, [1]] # cat head P4 - - [-1, 3, C2f, [512]] # 18 (P4/16-medium) + - [[-1, 12], 1, Concat, [1]] # cat head P4 + - [-1, 3, C2f, [512]] # 18 (P4/16-medium) - [-1, 1, Conv, [512, 3, 2]] - - [[-1, 9], 1, Concat, [1]] # cat head P5 - - [-1, 3, C2f, [1024]] # 21 (P5/32-large) + - [[-1, 9], 1, Concat, [1]] # cat head P5 + - [-1, 3, C2f, [1024]] # 21 (P5/32-large) - - [[15, 18, 21], 1, Pose, [nc, kpt_shape]] # Pose(P3, P4, P5) + - [[15, 18, 21], 1, Pose, [nc, kpt_shape]] # Pose(P3, P4, P5) diff --git a/ultralytics/cfg/models/v8/yolov8-rtdetr.yaml b/ultralytics/cfg/models/v8/yolov8-rtdetr.yaml index a058106..27b790b 100644 --- a/ultralytics/cfg/models/v8/yolov8-rtdetr.yaml +++ b/ultralytics/cfg/models/v8/yolov8-rtdetr.yaml @@ -2,45 +2,45 @@ # YOLOv8 object detection model with P3-P5 outputs. For Usage examples see https://docs.ultralytics.com/tasks/detect # Parameters -nc: 80 # number of classes +nc: 80 # number of classes scales: # model compound scaling constants, i.e. 'model=yolov8n.yaml' will call yolov8.yaml with scale 'n' # [depth, width, max_channels] - n: [0.33, 0.25, 1024] # YOLOv8n summary: 225 layers, 3157200 parameters, 3157184 gradients, 8.9 GFLOPs - s: [0.33, 0.50, 1024] # YOLOv8s summary: 225 layers, 11166560 parameters, 11166544 gradients, 28.8 GFLOPs - m: [0.67, 0.75, 768] # YOLOv8m summary: 295 layers, 25902640 parameters, 25902624 gradients, 79.3 GFLOPs - l: [1.00, 1.00, 512] # YOLOv8l summary: 365 layers, 43691520 parameters, 43691504 gradients, 165.7 GFLOPs - x: [1.00, 1.25, 512] # YOLOv8x summary: 365 layers, 68229648 parameters, 68229632 gradients, 258.5 GFLOPs + n: [0.33, 0.25, 1024] # YOLOv8n summary: 225 layers, 3157200 parameters, 3157184 gradients, 8.9 GFLOPs + s: [0.33, 0.50, 1024] # YOLOv8s summary: 225 layers, 11166560 parameters, 11166544 gradients, 28.8 GFLOPs + m: [0.67, 0.75, 768] # YOLOv8m summary: 295 layers, 25902640 parameters, 25902624 gradients, 79.3 GFLOPs + l: [1.00, 1.00, 512] # YOLOv8l summary: 365 layers, 43691520 parameters, 43691504 gradients, 165.7 GFLOPs + x: [1.00, 1.25, 512] # YOLOv8x summary: 365 layers, 68229648 parameters, 68229632 gradients, 258.5 GFLOPs # YOLOv8.0n backbone backbone: # [from, repeats, module, args] - - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2 - - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4 + - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2 + - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4 - [-1, 3, C2f, [128, True]] - - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8 + - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8 - [-1, 6, C2f, [256, True]] - - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16 + - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16 - [-1, 6, C2f, [512, True]] - - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32 + - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32 - [-1, 3, C2f, [1024, True]] - - [-1, 1, SPPF, [1024, 5]] # 9 + - [-1, 1, SPPF, [1024, 5]] # 9 # YOLOv8.0n head head: - - [-1, 1, nn.Upsample, [None, 2, 'nearest']] - - [[-1, 6], 1, Concat, [1]] # cat backbone P4 - - [-1, 3, C2f, [512]] # 12 + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 6], 1, Concat, [1]] # cat backbone P4 + - [-1, 3, C2f, [512]] # 12 - - [-1, 1, nn.Upsample, [None, 2, 'nearest']] - - [[-1, 4], 1, Concat, [1]] # cat backbone P3 - - [-1, 3, C2f, [256]] # 15 (P3/8-small) + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 4], 1, Concat, [1]] # cat backbone P3 + - [-1, 3, C2f, [256]] # 15 (P3/8-small) - [-1, 1, Conv, [256, 3, 2]] - - [[-1, 12], 1, Concat, [1]] # cat head P4 - - [-1, 3, C2f, [512]] # 18 (P4/16-medium) + - [[-1, 12], 1, Concat, [1]] # cat head P4 + - [-1, 3, C2f, [512]] # 18 (P4/16-medium) - [-1, 1, Conv, [512, 3, 2]] - - [[-1, 9], 1, Concat, [1]] # cat head P5 - - [-1, 3, C2f, [1024]] # 21 (P5/32-large) + - [[-1, 9], 1, Concat, [1]] # cat head P5 + - [-1, 3, C2f, [1024]] # 21 (P5/32-large) - - [[15, 18, 21], 1, RTDETRDecoder, [nc]] # Detect(P3, P4, P5) + - [[15, 18, 21], 1, RTDETRDecoder, [nc]] # Detect(P3, P4, P5) diff --git a/ultralytics/cfg/models/v8/yolov8-seg-p6.yaml b/ultralytics/cfg/models/v8/yolov8-seg-p6.yaml index 5ac0936..78c0444 100644 --- a/ultralytics/cfg/models/v8/yolov8-seg-p6.yaml +++ b/ultralytics/cfg/models/v8/yolov8-seg-p6.yaml @@ -2,7 +2,7 @@ # YOLOv8-seg-p6 instance segmentation model. For Usage examples see https://docs.ultralytics.com/tasks/segment # Parameters -nc: 80 # number of classes +nc: 80 # number of classes scales: # model compound scaling constants, i.e. 'model=yolov8n-seg-p6.yaml' will call yolov8-seg-p6.yaml with scale 'n' # [depth, width, max_channels] n: [0.33, 0.25, 1024] @@ -14,43 +14,43 @@ scales: # model compound scaling constants, i.e. 'model=yolov8n-seg-p6.yaml' wil # YOLOv8.0x6 backbone backbone: # [from, repeats, module, args] - - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2 - - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4 + - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2 + - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4 - [-1, 3, C2f, [128, True]] - - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8 + - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8 - [-1, 6, C2f, [256, True]] - - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16 + - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16 - [-1, 6, C2f, [512, True]] - - [-1, 1, Conv, [768, 3, 2]] # 7-P5/32 + - [-1, 1, Conv, [768, 3, 2]] # 7-P5/32 - [-1, 3, C2f, [768, True]] - - [-1, 1, Conv, [1024, 3, 2]] # 9-P6/64 + - [-1, 1, Conv, [1024, 3, 2]] # 9-P6/64 - [-1, 3, C2f, [1024, True]] - - [-1, 1, SPPF, [1024, 5]] # 11 + - [-1, 1, SPPF, [1024, 5]] # 11 # YOLOv8.0x6 head head: - - [-1, 1, nn.Upsample, [None, 2, 'nearest']] - - [[-1, 8], 1, Concat, [1]] # cat backbone P5 - - [-1, 3, C2, [768, False]] # 14 + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 8], 1, Concat, [1]] # cat backbone P5 + - [-1, 3, C2, [768, False]] # 14 - - [-1, 1, nn.Upsample, [None, 2, 'nearest']] - - [[-1, 6], 1, Concat, [1]] # cat backbone P4 - - [-1, 3, C2, [512, False]] # 17 + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 6], 1, Concat, [1]] # cat backbone P4 + - [-1, 3, C2, [512, False]] # 17 - - [-1, 1, nn.Upsample, [None, 2, 'nearest']] - - [[-1, 4], 1, Concat, [1]] # cat backbone P3 - - [-1, 3, C2, [256, False]] # 20 (P3/8-small) + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 4], 1, Concat, [1]] # cat backbone P3 + - [-1, 3, C2, [256, False]] # 20 (P3/8-small) - [-1, 1, Conv, [256, 3, 2]] - - [[-1, 17], 1, Concat, [1]] # cat head P4 - - [-1, 3, C2, [512, False]] # 23 (P4/16-medium) + - [[-1, 17], 1, Concat, [1]] # cat head P4 + - [-1, 3, C2, [512, False]] # 23 (P4/16-medium) - [-1, 1, Conv, [512, 3, 2]] - - [[-1, 14], 1, Concat, [1]] # cat head P5 - - [-1, 3, C2, [768, False]] # 26 (P5/32-large) + - [[-1, 14], 1, Concat, [1]] # cat head P5 + - [-1, 3, C2, [768, False]] # 26 (P5/32-large) - [-1, 1, Conv, [768, 3, 2]] - - [[-1, 11], 1, Concat, [1]] # cat head P6 - - [-1, 3, C2, [1024, False]] # 29 (P6/64-xlarge) + - [[-1, 11], 1, Concat, [1]] # cat head P6 + - [-1, 3, C2, [1024, False]] # 29 (P6/64-xlarge) - - [[20, 23, 26, 29], 1, Segment, [nc, 32, 256]] # Pose(P3, P4, P5, P6) + - [[20, 23, 26, 29], 1, Segment, [nc, 32, 256]] # Pose(P3, P4, P5, P6) diff --git a/ultralytics/cfg/models/v8/yolov8-seg.yaml b/ultralytics/cfg/models/v8/yolov8-seg.yaml index fbb08fc..700b795 100644 --- a/ultralytics/cfg/models/v8/yolov8-seg.yaml +++ b/ultralytics/cfg/models/v8/yolov8-seg.yaml @@ -2,7 +2,7 @@ # YOLOv8-seg instance segmentation model. For Usage examples see https://docs.ultralytics.com/tasks/segment # Parameters -nc: 80 # number of classes +nc: 80 # number of classes scales: # model compound scaling constants, i.e. 'model=yolov8n-seg.yaml' will call yolov8-seg.yaml with scale 'n' # [depth, width, max_channels] n: [0.33, 0.25, 1024] @@ -14,33 +14,33 @@ scales: # model compound scaling constants, i.e. 'model=yolov8n-seg.yaml' will c # YOLOv8.0n backbone backbone: # [from, repeats, module, args] - - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2 - - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4 + - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2 + - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4 - [-1, 3, C2f, [128, True]] - - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8 + - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8 - [-1, 6, C2f, [256, True]] - - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16 + - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16 - [-1, 6, C2f, [512, True]] - - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32 + - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32 - [-1, 3, C2f, [1024, True]] - - [-1, 1, SPPF, [1024, 5]] # 9 + - [-1, 1, SPPF, [1024, 5]] # 9 # YOLOv8.0n head head: - - [-1, 1, nn.Upsample, [None, 2, 'nearest']] - - [[-1, 6], 1, Concat, [1]] # cat backbone P4 - - [-1, 3, C2f, [512]] # 12 + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 6], 1, Concat, [1]] # cat backbone P4 + - [-1, 3, C2f, [512]] # 12 - - [-1, 1, nn.Upsample, [None, 2, 'nearest']] - - [[-1, 4], 1, Concat, [1]] # cat backbone P3 - - [-1, 3, C2f, [256]] # 15 (P3/8-small) + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 4], 1, Concat, [1]] # cat backbone P3 + - [-1, 3, C2f, [256]] # 15 (P3/8-small) - [-1, 1, Conv, [256, 3, 2]] - - [[-1, 12], 1, Concat, [1]] # cat head P4 - - [-1, 3, C2f, [512]] # 18 (P4/16-medium) + - [[-1, 12], 1, Concat, [1]] # cat head P4 + - [-1, 3, C2f, [512]] # 18 (P4/16-medium) - [-1, 1, Conv, [512, 3, 2]] - - [[-1, 9], 1, Concat, [1]] # cat head P5 - - [-1, 3, C2f, [1024]] # 21 (P5/32-large) + - [[-1, 9], 1, Concat, [1]] # cat head P5 + - [-1, 3, C2f, [1024]] # 21 (P5/32-large) - - [[15, 18, 21], 1, Segment, [nc, 32, 256]] # Segment(P3, P4, P5) + - [[15, 18, 21], 1, Segment, [nc, 32, 256]] # Segment(P3, P4, P5) diff --git a/ultralytics/cfg/models/v8/yolov8-world.yaml b/ultralytics/cfg/models/v8/yolov8-world.yaml new file mode 100644 index 0000000..c21a7f0 --- /dev/null +++ b/ultralytics/cfg/models/v8/yolov8-world.yaml @@ -0,0 +1,48 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license +# YOLOv8-World object detection model with P3-P5 outputs. For details see https://docs.ultralytics.com/tasks/detect + +# Parameters +nc: 80 # number of classes +scales: # model compound scaling constants, i.e. 'model=yolov8n.yaml' will call yolov8.yaml with scale 'n' + # [depth, width, max_channels] + n: [0.33, 0.25, 1024] # YOLOv8n summary: 225 layers, 3157200 parameters, 3157184 gradients, 8.9 GFLOPs + s: [0.33, 0.50, 1024] # YOLOv8s summary: 225 layers, 11166560 parameters, 11166544 gradients, 28.8 GFLOPs + m: [0.67, 0.75, 768] # YOLOv8m summary: 295 layers, 25902640 parameters, 25902624 gradients, 79.3 GFLOPs + l: [1.00, 1.00, 512] # YOLOv8l summary: 365 layers, 43691520 parameters, 43691504 gradients, 165.7 GFLOPs + x: [1.00, 1.25, 512] # YOLOv8x summary: 365 layers, 68229648 parameters, 68229632 gradients, 258.5 GFLOPs + +# YOLOv8.0n backbone +backbone: + # [from, repeats, module, args] + - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2 + - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4 + - [-1, 3, C2f, [128, True]] + - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8 + - [-1, 6, C2f, [256, True]] + - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16 + - [-1, 6, C2f, [512, True]] + - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32 + - [-1, 3, C2f, [1024, True]] + - [-1, 1, SPPF, [1024, 5]] # 9 + +# YOLOv8.0n head +head: + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 6], 1, Concat, [1]] # cat backbone P4 + - [-1, 3, C2fAttn, [512, 256, 8]] # 12 + + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 4], 1, Concat, [1]] # cat backbone P3 + - [-1, 3, C2fAttn, [256, 128, 4]] # 15 (P3/8-small) + + - [[15, 12, 9], 1, ImagePoolingAttn, [256]] # 16 (P3/8-small) + + - [15, 1, Conv, [256, 3, 2]] + - [[-1, 12], 1, Concat, [1]] # cat head P4 + - [-1, 3, C2fAttn, [512, 256, 8]] # 19 (P4/16-medium) + + - [-1, 1, Conv, [512, 3, 2]] + - [[-1, 9], 1, Concat, [1]] # cat head P5 + - [-1, 3, C2fAttn, [1024, 512, 16]] # 22 (P5/32-large) + + - [[15, 19, 22], 1, WorldDetect, [nc, 512, False]] # Detect(P3, P4, P5) diff --git a/ultralytics/cfg/models/v8/yolov8-worldv2.yaml b/ultralytics/cfg/models/v8/yolov8-worldv2.yaml new file mode 100644 index 0000000..322b97d --- /dev/null +++ b/ultralytics/cfg/models/v8/yolov8-worldv2.yaml @@ -0,0 +1,46 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license +# YOLOv8-World-v2 object detection model with P3-P5 outputs. For details see https://docs.ultralytics.com/tasks/detect + +# Parameters +nc: 80 # number of classes +scales: # model compound scaling constants, i.e. 'model=yolov8n.yaml' will call yolov8.yaml with scale 'n' + # [depth, width, max_channels] + n: [0.33, 0.25, 1024] # YOLOv8n summary: 225 layers, 3157200 parameters, 3157184 gradients, 8.9 GFLOPs + s: [0.33, 0.50, 1024] # YOLOv8s summary: 225 layers, 11166560 parameters, 11166544 gradients, 28.8 GFLOPs + m: [0.67, 0.75, 768] # YOLOv8m summary: 295 layers, 25902640 parameters, 25902624 gradients, 79.3 GFLOPs + l: [1.00, 1.00, 512] # YOLOv8l summary: 365 layers, 43691520 parameters, 43691504 gradients, 165.7 GFLOPs + x: [1.00, 1.25, 512] # YOLOv8x summary: 365 layers, 68229648 parameters, 68229632 gradients, 258.5 GFLOPs + +# YOLOv8.0n backbone +backbone: + # [from, repeats, module, args] + - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2 + - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4 + - [-1, 3, C2f, [128, True]] + - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8 + - [-1, 6, C2f, [256, True]] + - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16 + - [-1, 6, C2f, [512, True]] + - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32 + - [-1, 3, C2f, [1024, True]] + - [-1, 1, SPPF, [1024, 5]] # 9 + +# YOLOv8.0n head +head: + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 6], 1, Concat, [1]] # cat backbone P4 + - [-1, 3, C2fAttn, [512, 256, 8]] # 12 + + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 4], 1, Concat, [1]] # cat backbone P3 + - [-1, 3, C2fAttn, [256, 128, 4]] # 15 (P3/8-small) + + - [15, 1, Conv, [256, 3, 2]] + - [[-1, 12], 1, Concat, [1]] # cat head P4 + - [-1, 3, C2fAttn, [512, 256, 8]] # 18 (P4/16-medium) + + - [-1, 1, Conv, [512, 3, 2]] + - [[-1, 9], 1, Concat, [1]] # cat head P5 + - [-1, 3, C2fAttn, [1024, 512, 16]] # 21 (P5/32-large) + + - [[15, 18, 21], 1, WorldDetect, [nc, 512, True]] # Detect(P3, P4, P5) diff --git a/ultralytics/cfg/models/v8/yolov8.yaml b/ultralytics/cfg/models/v8/yolov8.yaml index 2255450..b328e98 100644 --- a/ultralytics/cfg/models/v8/yolov8.yaml +++ b/ultralytics/cfg/models/v8/yolov8.yaml @@ -2,45 +2,45 @@ # YOLOv8 object detection model with P3-P5 outputs. For Usage examples see https://docs.ultralytics.com/tasks/detect # Parameters -nc: 80 # number of classes +nc: 80 # number of classes scales: # model compound scaling constants, i.e. 'model=yolov8n.yaml' will call yolov8.yaml with scale 'n' # [depth, width, max_channels] - n: [0.33, 0.25, 1024] # YOLOv8n summary: 225 layers, 3157200 parameters, 3157184 gradients, 8.9 GFLOPs - s: [0.33, 0.50, 1024] # YOLOv8s summary: 225 layers, 11166560 parameters, 11166544 gradients, 28.8 GFLOPs - m: [0.67, 0.75, 768] # YOLOv8m summary: 295 layers, 25902640 parameters, 25902624 gradients, 79.3 GFLOPs - l: [1.00, 1.00, 512] # YOLOv8l summary: 365 layers, 43691520 parameters, 43691504 gradients, 165.7 GFLOPs - x: [1.00, 1.25, 512] # YOLOv8x summary: 365 layers, 68229648 parameters, 68229632 gradients, 258.5 GFLOPs + n: [0.33, 0.25, 1024] # YOLOv8n summary: 225 layers, 3157200 parameters, 3157184 gradients, 8.9 GFLOPs + s: [0.33, 0.50, 1024] # YOLOv8s summary: 225 layers, 11166560 parameters, 11166544 gradients, 28.8 GFLOPs + m: [0.67, 0.75, 768] # YOLOv8m summary: 295 layers, 25902640 parameters, 25902624 gradients, 79.3 GFLOPs + l: [1.00, 1.00, 512] # YOLOv8l summary: 365 layers, 43691520 parameters, 43691504 gradients, 165.7 GFLOPs + x: [1.00, 1.25, 512] # YOLOv8x summary: 365 layers, 68229648 parameters, 68229632 gradients, 258.5 GFLOPs # YOLOv8.0n backbone backbone: # [from, repeats, module, args] - - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2 - - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4 + - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2 + - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4 - [-1, 3, C2f, [128, True]] - - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8 + - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8 - [-1, 6, C2f, [256, True]] - - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16 + - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16 - [-1, 6, C2f, [512, True]] - - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32 + - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32 - [-1, 3, C2f, [1024, True]] - - [-1, 1, SPPF, [1024, 5]] # 9 + - [-1, 1, SPPF, [1024, 5]] # 9 # YOLOv8.0n head head: - - [-1, 1, nn.Upsample, [None, 2, 'nearest']] - - [[-1, 6], 1, Concat, [1]] # cat backbone P4 - - [-1, 3, C2f, [512]] # 12 + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 6], 1, Concat, [1]] # cat backbone P4 + - [-1, 3, C2f, [512]] # 12 - - [-1, 1, nn.Upsample, [None, 2, 'nearest']] - - [[-1, 4], 1, Concat, [1]] # cat backbone P3 - - [-1, 3, C2f, [256]] # 15 (P3/8-small) + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 4], 1, Concat, [1]] # cat backbone P3 + - [-1, 3, C2f, [256]] # 15 (P3/8-small) - [-1, 1, Conv, [256, 3, 2]] - - [[-1, 12], 1, Concat, [1]] # cat head P4 - - [-1, 3, C2f, [512]] # 18 (P4/16-medium) + - [[-1, 12], 1, Concat, [1]] # cat head P4 + - [-1, 3, C2f, [512]] # 18 (P4/16-medium) - [-1, 1, Conv, [512, 3, 2]] - - [[-1, 9], 1, Concat, [1]] # cat head P5 - - [-1, 3, C2f, [1024]] # 21 (P5/32-large) + - [[-1, 9], 1, Concat, [1]] # cat head P5 + - [-1, 3, C2f, [1024]] # 21 (P5/32-large) - - [[15, 18, 21], 1, Detect, [nc]] # Detect(P3, P4, P5) + - [[15, 18, 21], 1, Detect, [nc]] # Detect(P3, P4, P5) diff --git a/ultralytics/cfg/models/v9/yolov9c.yaml b/ultralytics/cfg/models/v9/yolov9c.yaml new file mode 100644 index 0000000..66c02d6 --- /dev/null +++ b/ultralytics/cfg/models/v9/yolov9c.yaml @@ -0,0 +1,36 @@ +# YOLOv9 + +# parameters +nc: 80 # number of classes + +# gelan backbone +backbone: + - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2 + - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4 + - [-1, 1, RepNCSPELAN4, [256, 128, 64, 1]] # 2 + - [-1, 1, ADown, [256]] # 3-P3/8 + - [-1, 1, RepNCSPELAN4, [512, 256, 128, 1]] # 4 + - [-1, 1, ADown, [512]] # 5-P4/16 + - [-1, 1, RepNCSPELAN4, [512, 512, 256, 1]] # 6 + - [-1, 1, ADown, [512]] # 7-P5/32 + - [-1, 1, RepNCSPELAN4, [512, 512, 256, 1]] # 8 + - [-1, 1, SPPELAN, [512, 256]] # 9 + +head: + - [-1, 1, nn.Upsample, [None, 2, 'nearest']] + - [[-1, 6], 1, Concat, [1]] # cat backbone P4 + - [-1, 1, RepNCSPELAN4, [512, 512, 256, 1]] # 12 + + - [-1, 1, nn.Upsample, [None, 2, 'nearest']] + - [[-1, 4], 1, Concat, [1]] # cat backbone P3 + - [-1, 1, RepNCSPELAN4, [256, 256, 128, 1]] # 15 (P3/8-small) + + - [-1, 1, ADown, [256]] + - [[-1, 12], 1, Concat, [1]] # cat head P4 + - [-1, 1, RepNCSPELAN4, [512, 512, 256, 1]] # 18 (P4/16-medium) + + - [-1, 1, ADown, [512]] + - [[-1, 9], 1, Concat, [1]] # cat head P5 + - [-1, 1, RepNCSPELAN4, [512, 512, 256, 1]] # 21 (P5/32-large) + + - [[15, 18, 21], 1, Detect, [nc]] # DDetect(P3, P4, P5) diff --git a/ultralytics/cfg/models/v9/yolov9e.yaml b/ultralytics/cfg/models/v9/yolov9e.yaml new file mode 100644 index 0000000..8e15a42 --- /dev/null +++ b/ultralytics/cfg/models/v9/yolov9e.yaml @@ -0,0 +1,60 @@ +# YOLOv9 + +# parameters +nc: 80 # number of classes + +# gelan backbone +backbone: + - [-1, 1, Silence, []] + - [-1, 1, Conv, [64, 3, 2]] # 1-P1/2 + - [-1, 1, Conv, [128, 3, 2]] # 2-P2/4 + - [-1, 1, RepNCSPELAN4, [256, 128, 64, 2]] # 3 + - [-1, 1, ADown, [256]] # 4-P3/8 + - [-1, 1, RepNCSPELAN4, [512, 256, 128, 2]] # 5 + - [-1, 1, ADown, [512]] # 6-P4/16 + - [-1, 1, RepNCSPELAN4, [1024, 512, 256, 2]] # 7 + - [-1, 1, ADown, [1024]] # 8-P5/32 + - [-1, 1, RepNCSPELAN4, [1024, 512, 256, 2]] # 9 + + - [1, 1, CBLinear, [[64]]] # 10 + - [3, 1, CBLinear, [[64, 128]]] # 11 + - [5, 1, CBLinear, [[64, 128, 256]]] # 12 + - [7, 1, CBLinear, [[64, 128, 256, 512]]] # 13 + - [9, 1, CBLinear, [[64, 128, 256, 512, 1024]]] # 14 + + - [0, 1, Conv, [64, 3, 2]] # 15-P1/2 + - [[10, 11, 12, 13, 14, -1], 1, CBFuse, [[0, 0, 0, 0, 0]]] # 16 + - [-1, 1, Conv, [128, 3, 2]] # 17-P2/4 + - [[11, 12, 13, 14, -1], 1, CBFuse, [[1, 1, 1, 1]]] # 18 + - [-1, 1, RepNCSPELAN4, [256, 128, 64, 2]] # 19 + - [-1, 1, ADown, [256]] # 20-P3/8 + - [[12, 13, 14, -1], 1, CBFuse, [[2, 2, 2]]] # 21 + - [-1, 1, RepNCSPELAN4, [512, 256, 128, 2]] # 22 + - [-1, 1, ADown, [512]] # 23-P4/16 + - [[13, 14, -1], 1, CBFuse, [[3, 3]]] # 24 + - [-1, 1, RepNCSPELAN4, [1024, 512, 256, 2]] # 25 + - [-1, 1, ADown, [1024]] # 26-P5/32 + - [[14, -1], 1, CBFuse, [[4]]] # 27 + - [-1, 1, RepNCSPELAN4, [1024, 512, 256, 2]] # 28 + - [-1, 1, SPPELAN, [512, 256]] # 29 + +# gelan head +head: + - [-1, 1, nn.Upsample, [None, 2, 'nearest']] + - [[-1, 25], 1, Concat, [1]] # cat backbone P4 + - [-1, 1, RepNCSPELAN4, [512, 512, 256, 2]] # 32 + + - [-1, 1, nn.Upsample, [None, 2, 'nearest']] + - [[-1, 22], 1, Concat, [1]] # cat backbone P3 + - [-1, 1, RepNCSPELAN4, [256, 256, 128, 2]] # 35 (P3/8-small) + + - [-1, 1, ADown, [256]] + - [[-1, 32], 1, Concat, [1]] # cat head P4 + - [-1, 1, RepNCSPELAN4, [512, 512, 256, 2]] # 38 (P4/16-medium) + + - [-1, 1, ADown, [512]] + - [[-1, 29], 1, Concat, [1]] # cat head P5 + - [-1, 1, RepNCSPELAN4, [512, 1024, 512, 2]] # 41 (P5/32-large) + + # detect + - [[35, 38, 41], 1, Detect, [nc]] # Detect(P3, P4, P5) diff --git a/ultralytics/cfg/trackers/botsort.yaml b/ultralytics/cfg/trackers/botsort.yaml index cbbf348..0c66dc6 100644 --- a/ultralytics/cfg/trackers/botsort.yaml +++ b/ultralytics/cfg/trackers/botsort.yaml @@ -1,17 +1,17 @@ # Ultralytics YOLO 🚀, AGPL-3.0 license # Default YOLO tracker settings for BoT-SORT tracker https://github.com/NirAharon/BoT-SORT -tracker_type: botsort # tracker type, ['botsort', 'bytetrack'] -track_high_thresh: 0.5 # threshold for the first association -track_low_thresh: 0.1 # threshold for the second association -new_track_thresh: 0.6 # threshold for init new track if the detection does not match any tracks -track_buffer: 30 # buffer to calculate the time when to remove tracks -match_thresh: 0.8 # threshold for matching tracks +tracker_type: botsort # tracker type, ['botsort', 'bytetrack'] +track_high_thresh: 0.5 # threshold for the first association +track_low_thresh: 0.1 # threshold for the second association +new_track_thresh: 0.6 # threshold for init new track if the detection does not match any tracks +track_buffer: 30 # buffer to calculate the time when to remove tracks +match_thresh: 0.8 # threshold for matching tracks # min_box_area: 10 # threshold for min box areas(for tracker evaluation, not used for now) # mot20: False # for tracker evaluation(not used for now) # BoT-SORT settings -gmc_method: sparseOptFlow # method of global motion compensation +gmc_method: sparseOptFlow # method of global motion compensation # ReID model related thresh (not supported yet) proximity_thresh: 0.5 appearance_thresh: 0.25 diff --git a/ultralytics/cfg/trackers/bytetrack.yaml b/ultralytics/cfg/trackers/bytetrack.yaml index 5060f92..29d352c 100644 --- a/ultralytics/cfg/trackers/bytetrack.yaml +++ b/ultralytics/cfg/trackers/bytetrack.yaml @@ -1,11 +1,11 @@ # Ultralytics YOLO 🚀, AGPL-3.0 license # Default YOLO tracker settings for ByteTrack tracker https://github.com/ifzhang/ByteTrack -tracker_type: bytetrack # tracker type, ['botsort', 'bytetrack'] -track_high_thresh: 0.5 # threshold for the first association -track_low_thresh: 0.1 # threshold for the second association -new_track_thresh: 0.6 # threshold for init new track if the detection does not match any tracks -track_buffer: 30 # buffer to calculate the time when to remove tracks -match_thresh: 0.8 # threshold for matching tracks +tracker_type: bytetrack # tracker type, ['botsort', 'bytetrack'] +track_high_thresh: 0.5 # threshold for the first association +track_low_thresh: 0.1 # threshold for the second association +new_track_thresh: 0.6 # threshold for init new track if the detection does not match any tracks +track_buffer: 30 # buffer to calculate the time when to remove tracks +match_thresh: 0.8 # threshold for matching tracks # min_box_area: 10 # threshold for min box areas(for tracker evaluation, not used for now) # mot20: False # for tracker evaluation(not used for now) diff --git a/ultralytics/data/__init__.py b/ultralytics/data/__init__.py index 6fa7e84..9f91ce9 100644 --- a/ultralytics/data/__init__.py +++ b/ultralytics/data/__init__.py @@ -4,5 +4,12 @@ from .base import BaseDataset from .build import build_dataloader, build_yolo_dataset, load_inference_source from .dataset import ClassificationDataset, SemanticDataset, YOLODataset -__all__ = ('BaseDataset', 'ClassificationDataset', 'SemanticDataset', 'YOLODataset', 'build_yolo_dataset', - 'build_dataloader', 'load_inference_source') +__all__ = ( + "BaseDataset", + "ClassificationDataset", + "SemanticDataset", + "YOLODataset", + "build_yolo_dataset", + "build_dataloader", + "load_inference_source", +) diff --git a/ultralytics/data/__pycache__/__init__.cpython-312.pyc b/ultralytics/data/__pycache__/__init__.cpython-312.pyc index a315137862b8c97c18dffe6e18dd1edd9e1bec6c..79aaa2a176d699156245f75bae535d33e04b47f9 100644 GIT binary patch delta 64 zcmcb@e3hB^G%qg~0}xz@f0RCRBX2mPl$m};er~FMd11PKN@{9BUTTScWqwY6nW4et RT1HmR9pAl9AV zk}v7}I={86pu4c8u)C3)ptw!zymhDch-{Ht{#$YB%}aJcn<7j(c{XuHERLbHLP6 zFUtBQ{8Qtv1@!pm;-6XmTD88|b?R$(irNw_s88*Be;w+b$NO84{?60-+rZNrkTzdS zo6pl0AZ?+RwxC~f-}DYu*yNVtR_4*K*`bf8M*1ZQ!Ef~&4j5aEe&YdMi^*>SH2ckf7QY41+OKz~#T_erkwCNT z3q{()a<{Jsglco)N6D_XE?*?#!Uy(-Auks3*!ZXB&S1oae|@fwKqw#w+gwqtd^8vi zDaE3lzNo9s7jm@*T!Fr5Amk7DsV-NfC(ssbN3meHuOr~*F!(ULz933kLnC?5?|b#3A99$z%r+7)o^3r0I#tziJ(S*jOb&))6I+ahMNYod?Q1Q9jSTbK!;0mhAcye|5S_54Q1`?I3 z!$83~rrG6MyGj}WSq8j=G5U`cXUlB zbtk2`F%sx%k4PXe*B=+m?F@GZ=I*<%W3E3C=m`a)bNj+%^VVsj*ix;_sKq*^moFOeF zgw6dBgo!%kaui9_bl%vIUY}YMn{BGirsk()4VvoH7bInkr4_Mno9?r3nu6E`*F|iT z+aSBBbR~fq1X6o4VxB22X5Th1$STFhllr(>4n%w9P?I~287(tnaf{dM54U-}axoQl z5lGZEi(YxBskB@~AXf!cAmRczpnF$uH(9Ti>P?RKtSR8C)o7X_^TJfF4gYCBn$W zBAeJ_@iJEb5qC03g-}2>6e-l7cOiUb4#vi;mdqYK=tYWszSw7uIA1E`Xw4i+0 zc*s6kHhb7OVJ)7_$_AyH<vxRGS*=ob3a6ohTiCy5 z&&atSk5{b#`*m+SE5@u9puV!4&ioyeC7E97eIxsHPDShidc2c>;8IOO@u)(I+xlk+ zhzMTZNMH)>te`XkE%+}&Je?So(PVw!ZZze*XU{X`zvt1LDz9480R@v0_i2NP>rkw{usz0!fad=lJwTJ&95+RJdjhgpCwwYR!S0U8K-}CKmi+-a zE`{7?&XZ(bmKZB1AUQx!%K#EHxL>d-&i69J3eeczB_$1 zm9{xi3YvSG{ix)qzF1=ApDAVEcG}pX(xc2D%(40{d#W*zEaob!G!E)^r?b=ZbJ*BD zX-44B%2t-i(mM81*}R$W5G$6w0Kl37Rm&ktAT)qEb@TP!!2Y`|Z{D@!iaY~(Dnh^| z5=-`3e2N@otIL;fy*$(X+>U2Tyy@Vkky#h)6;}-UG~49nZEx&-ZSUA! zEkF3kDgR5|r@POtzOZnE__oV`aaZ^FuI{l=^un%Qp2lem@~J*O*I0*svp331Zyz#5 zX$bu1m!2o7h~G-x5$WFPI^B?Q(CF85T<8a-kZn-%8%PWz)i?knlAec5gV3@0lX=j@ zuJq@XnALjj!3+*rhOAmGo7it(pO$6tO9LjzzR{oBmub|~*rGkfFa(%1`W zs+~8KHfR~N4y6y-1`Yfl>j3q^FleE_?ZzQ{XfE}L_cLeE5KC9S8wSPGwfPH>U(9hf z>ao&TP|n87S@_#-YKkXN12Mbb=C>5<_<$TkPJj9V-JpG?w#e=Q=N!ry${flX${sWi zW)3>362I+1{b0tRLw#=76PjXa_@2$bWId>F*DqyDcNQB=gAVp^O+h-(!u#Q*_W~mo zp?3`%ra}Gg91vtK8_Td-Buv$zSw{Bjj6$|KVl(8#(x{w;*ORVGJU^)C&%8d79^=o{ z1!`k3$8TE7{}PWFC~lMkzG%SZbJh1XC>D#LXOAo+8(*A)W7C3-8^9sr4v2Hu4SsK= z(+37ZqT?OPjOA?dNBesM(8BSm{MeJVD{g~6%()J-7`lCZ&=K{Xy13MrBInbVlEu~6 zBQG1M)K-kLxUy#Cn7)G=ud;eiN$e_#um|!BrIYD$8y@8@0$Ty%`cO~Y*xuXK1@=bW zi<^3bq3BI98}RO0qjwFgz}_90C){r??c6n<>A&i&f1m zxw#>(@2rdK_hA@qabu6qzmLPtxW2DGuJ6Zxee>{lUfkXu4Eeg0S|X&PvPWj-&KC?b ziq`>+VTuMDzKq1kFMvY`;@cM)6hhf{yeTLQ5&0@BC8~bgYlg+N5?zvns z$~;|R>BWMo@q(%ojtd0~hwYckE03>yZo@MhPD#%+9l!Nd^GkQ1zWZ#}>78TK z8_)V*-E(fw*w$_5LT9~WciesPj@I!zTF2V_<97tcRt3hU2gb|WhwVqUk6TM7GfKv+ zB~TbnmFJrl6M2vc(S*O8$;%t?goFnu;bR0211P*)ks}9?^5~2i(gd41V_A$i?-WXu z`zWoS05u@f+L1p(fGE-J5M;q8S)&bkn7$t*@DPDd5ZDg@nuHE|h+c^zdmB|2o65_K=n6`H+_lf*+qcXP+~PE;eZA`6%k&xx3;}ZCRIs}5%?%T z@`95OFq5Y$2G^fXevmT!4S|mXxbwBai?Q=zfZ%&T@I5y~K->uSuvmVKsv)5-e~Lf? zAy3fjlLS6Z;243YNQ;Z)0}z7BRaI~=5~kv3@%y7Sb8Gb(Q^=fk)<1OH|CUzSyoxrKO`{9OW^c3+{VGn7YVHbVD;J`|!)oVjtDB-)Lqw}nGsR)G-y zlP#3N&mLVkV|yVz5{=4Qg`y}Q0nuUH*b#;&h>*>@4j>4TXkF8qk~m?l#L^e-l5DJN z(OQe8&^BzplF9yVQDcm#(`|TXKWI@n=xBAirDnJD#!57)W-?cziQmaY{4&jO+zzJ^ zJXqb_PbF3$btvwD^M}mvwg7p6PD(=C1flBkl?8MU)j@#hHPXl)zNs+XY_h$V=`xkR zw^Cv++~kVon@WYsP5B6w+RvywU7d%AYqW5a->m524(QO;< zf`lCiySl5N3|{`lEYURK9J%eb}hc)=OR`K&Q(;ZLj^g!(D5kG`JE{~gOTLW;kpb}D9gfQSs>O5sMCKzB`lH(TP$D23Bb|#V@ z$W<`p>`vD1x3GD)yvP@e+=5(k71jD7c4lIU{Ft)80l<-?ko!0#5?D?8>5Yahzq`QJ)LomCc>X@E0LdTNq|uq$eEm(>iJm_o@$Y9n`39Zag6`Z+B0izX$H4Xv9Mvb+}

M8q1rMJuUv9LpuhTSGwI43_a6Xj3u72UW@u|B50YCd>UUe+cAqU{su5RxN`=ah8 zu9ZB2aEP4)+DuTesrEj}cg{zOVFVJTSIa)k3ouG9HjK9mjry^5K$z_e1UovT$#tTF zSSTV8k?`{qLXxW7Ch+5GN6qLCP#uu>`nqad0Z)eqSmOvpSH)+vTj!}KH+^^?IRJ&7 zfGv&azCa-4T3k1?w!Xe@rq)`_6(N)-MzD>)QY)bdd|0k(AOhXN)`4H3%B_`6qOhd? zCQ+x$4-Z0E<_Hn}OOzEt))lLc_JVMM_-6X6Ob+MaLK6oGk|!ilFvaKQswP*J5BoHz zVY#clH^k9DS0h5&7Enk;uz#^7)EdClqP1wuhwg#UY5zr;X{{_LP-RQ%nr$4hm=dmp zYUo`xN@uAr>V659j7%FRBpI%={xJX4MQWb+EHO^rYi}#I{Q{9{@u$YMi4cW;^1Zak zl;!E_<&JWy3s`CECDPPZeYX#;;J&HVM?n9aX;dp4SXkLp+E71m8s__Ps5XL~w0%>W zpIW!Z8LG9XYbxIp)7S@J*Q6M!n@-h&;bX*`d>}uepd`(wiUx(x0E>K=E{T@`6G3?j zNArb33LoMtIVMywU>S-RxCPm#KzF#Wo+h(z9$H@=?81`64E9E+j2P2~9Y}vTR24<{ z0@#d%_n!LNCI?X@=yPE^BU(F^ps2~FHqrwj9=_(tU@smEt|u?_%48*EE4q`y>r-6N zsdc(~e6kM+LO0=b-g4`_HQeliazO%*tEQLmEdP|i&j|dSz-0nj&g*EV#q4-%QBIKW zSCS}x+^yJ`&(z^Pv)1xOP$WW)IYHn3%$cYddKL>+j1>;`|d^&ZCaC8#C^R1 z>{H%oO<+m1L68MxqF$LKP-1;3U)amL%GP1UxvE2cpDg?OF;DA4J(!^>tB7!|uo{ii ztl%64%n6hq0ke*EMMbS8$r57SDrZit4sIR` zLz(MGKHwDkA=jGdnLi;R`Jq^Hs)0&TVfIgJZm#7Piy_b- zh+DXPYnUg@Zc@Iu4?vIrr;VSn^=sWWqKmk`Cn*1v4XyQ*a?|kC1XobBrJM;db3#FMVzF(c9~! z(o%eAGXSVR%#@DauMX-*f41d&QkvwqlJk!{6myoF5pg0tsP`NE7R@cFrs@4gHBF5T zfjii`6s7SuEm6xRwS>j5S4#}4X-4vi3WsA-Eor^&W*lJ%U2H?P(KqMf4{B7lA`Tr8;@ zFR43~{|)I({h2Li8qSzcm!Hl5fpot9{Fd_#=gsHJFO=LqY(0_>i)r$IQYItsNZ(k- zv@z?n|M#9nS8}(+cVAvH&he~VJo=wIwi~X8J2x=KQA+6Z$Q+Yo4uSr;59~%r6Nn-* zRd{$jO%tR^^l1$3>4|bo#Vvc;cuP6f&eA5~ym|$x!bbZ#J$-`!@2;3~w!^nKMuLRgQ$#l@`Y<5|;B z)LxvuY<%{zi?eSVpMBfJwjE=4cqg{)dTst#`|c|Sox^oir+3)cC;g35>FBuMfQ9oC zmb@nLXtTc;#sNl1QA&z#;iA22KN(#c4USNB-I1rM`RRiv3;Fz_t0nFJ05DK zb&*>r?Y%P3kcgckksCvE^^m!}-97!Dgyd~TN9NZc4lBu8)Vh+?fto}h0*UsB>uyrB z`te`iJp7$^Z$e1YWTCNnsg1=*&R@5+Ye)N6Hny+R8|_r>pcbLoS6^4>u_@ZBpk$DS z??Wra_YbWShGJ?b13jwpl7fKcu1GhmELWgsaU|FU%7yva3qwCl@eg~UsX>+68zfE2 z)!OeG2*}~oo(F2{7m|tjxjnqPcbj zfkIs!@PdRAI7>3m1s6-G6HapCLNy|O8@FH!EZE)K?Y0YEQ%HQI2q112lmM?3pE9|a zm9~4#Bz5Hyc64^X-;1L=& zg+%0;tXJxEVJRYAHKd#7yjR>{Dtd3XRAB0o-kVosa=o`O&$M28&r@g0f8Q_VnA#=b z$%>2En_q>iVrjxCS-Wk=z+0T@aCXJr#6ooXfyAvDm(Rbu zHxd6F1%pwzN`Sh~-ayJCs`D&J zU8}t7e|deeh=$=0ub1QDFRz!gf*aS^bkYzv(a;x!paT+$;wIjL>fNm&`mH74AzC$p zR*Aa_Ixc2!bQZX8r}aj3jrEqT%P)b=X`9R|*8Y6bCM-`1xX2r=3_fX?_M$dxs@16w z1kH-Cmn}2p$5g@~CIUX@50G=zuh5C&^`Wkixaki25%;TFf+WSdd? zS#m1I63%pC&;5!m?5;QeJyq};>+Y_|Ox2|1i|jMqh0@2^OWliWIjWofjkbB*E?*nm-{ko2>Q`7nPtSqdO>VRA9D1PICT z=u@lKRgG}16xKrZCq9GUa-Sln&(sEERh=N?fiMC@yQ*AZKUEZm3Q^oumCALf!6YRo zxmapOs^1&*BO;Sa_S&xC9FNKuf%K7eR#sHwE4ksABKu8NlBoPU6F zd~!eBnV2QGq&VWUvzuRB0K_5|4pbM_3;gt4zbhVwCG%Qhu!Z<#jkBQwsNual+w>qM2~j>yPulT{0&mZrH}Nd=Z+9U`KDbHzJ&nN~|UsuCo%|ksqP9 z9tB7QYbp|M7L)cC=6`}{a6Wnf8q5c;J+l2^_ppIwebP0$X|GkH?+Yrr-qdvD0=u)pAV-hz7Y8#1u_2MeY{=rM_KM8fN=}g9}kUyzMoLQzrZ!Av|)*Ay+nD#5eeVVX7GaBCi zfW-11IJj!ko^d4SNc)KY_^KBgF3wptK4;n4@cRaRhHcnz#jLYuA6{~B$&o!1>6Mey z-6!UZPhWT@_e|tW`dH=4;nhcO9=A_>JKf1%e&8lB!NboLv8<2%WcN!nXj)kE*9mYJ zQ6HX^wMC911U2x*lyD7k_sqA&r$Q>m-8nwO^ig^p0k}4f=k^0-(l1!(Kuo9_=97B) zhm@PZ>ebE7@3`>b);BeQK;86VHi0<=Y6uh);Q9t(ck1fcOsx}mfmTNza?lQb(^aFz zRCYDnV=B11OEQZUVn9YV{#p^#jjAnajp3gD+YzP^7`V(?bJA`*u_yXm%fF{OT;Srs z$ujH)wGyWjnqdzGY$9NyBBpqIRP%{fJk^Ba5?kfVs@c=%q)HM?FKPZjL4 zsSV?2UvXsP{7jMizcgtxRk6{AE%X|;KhMR!zbijgd6{>EZrxa)(Zmjko-m|(l1hGZiB@L%HsAXm+`9TOq9pY7PX{)KrNg9j93>IctWn%Bm_Gj zt?I2$k@71c>_tD65&U(cy~b5VLljLX^;Qw_+9WlFu|1nOOX(tq81&hQR$2=U8M zw9x_Jv(vX1+Jo9<=t$7I5CcQu8GLit7sa+X+yv0Z$YTLl0Txk`%aZEY7nb)V)C{x; z=bqxIjq#{C)jm`dEpGae_M>QQniWW%LNXi^l?h(|3J6o-rDyP%;IbVJ{CDA2vDE@{ z)yW+MX#arh#~A4a+=SGCyDUk1C3rF|9pQ`Tu6Zu8OysMJh`1|wt`Yx5{LxBJWQWEr zp>T+@#0@YB#pw_;R5`?H$h#?1i&O!4o*pd(egzP>`(@w0Mkq$%UO6KFJzIFFqFUqs zaec5`IP*1~NC+0%^O3_WcBmw48`UE1=3*1hRJoZwf2b(gSWRB;!dR8i`^Y=!GM0O# z&Y3&<+e7~>ZRTo|B-ExVs((2RZ;HayO=$$)C6+xVa#^7OS7_R@hJdDp7@dVM(HD?D4wb>xnGo`F33 z;Pm7&6|^vV;K)6y~K>|AT$j+xDW7il=Lzs6A0|D*6-qQbfyl zeQLMGn&li`ekFVKr=OZ@h|zKpRI?s9NLE4&@-D&(5wHAl4e(`?_(0wjx5P8S`^z;k zS|;(d5S>q_OX#L8zZzZCAyZM;kWdgaT?dWn$QvTZ9;>bXvI4M`nVyP6i353r(!Jn z5ronfWwpJfG15ioIIk;!9V^rXVt0v?@!Y4z36)n&{uxAZ__c%ZY6}Tl5Q0Yw+aZ<= z8@89W(csD@xwoq=j3_XiENKtzB0cN&{^qb#h8K1v2Cy;ggBi)5$SaA_=DwF)(g@oj zZwbs(F)BWURO>dy>rzGcEhPCuG*Tg_QkZs$?of8r(7D9NDK*BD6`f`oE?z!<7k=Wn zwwKP)YgLM4yqY}>X9=zq;5}938OLc#KUX7I^4eNe!#p;%YW0Jtb;@LYSjE&8uh_1t zMyw#lzcRcpBxHlK=L9s19w+LFLAcIOjG0#whNr2^*Q50;36JV0KJG*c}?SciAeI;itnunPHaFb!`cSNRY3eCP)nrs~=XAqs0i>?rW9t>n-yoJq(c zM(`O{_0)8mLNnpkR<`!3nX$N`wL?a{3unV38|D21foBOkN8kj3lLWp<;CX9wg>Nj1Y3RSXWv_r;`gOiz?xZcd~zYs<51n)E~;2EUl(5Maq}S zoP6ltR-6xxIg6x;(X!86HaNCNNj>MkK{NYDZ!UZK3)VEbI!aa@1?!AdN?JOLAFs~$ zJE*htgXN!291aIYXS08Q=GJuX2N;xaDjH8f#aW=5tG1GFv7ZMDJ|gx5DH;JIpR`;ltLVP>5{*M8X^=#qt2D9Qp^@vyh*bq zv?4~cCF$A8&wXXhWKr40qS zzH@y0&N1(<@$J5`K=5K9I*xz0N3R<6McGG8a2`yX!H#|Y^PQ9F8HaCu=+?K_ZaBa0 z;>Mlh8+T5u^*-i2)_ZZ<-0^91Po-U~TQOd@;=;7XiOiJ~f!!AaJ>!9%vHSL22=r0l zM9z_$#_gq7tU8>jTlUbhOO;j6&3R_diPq1}RXh-r8F|AFK`#1v68p#(cEQ=hj^nsS z#-dFH;e6&LDOo{hfR zcdqZvy3NNMo{65wJ|6w-VodM_=Ur4F=c2QG+*v*nx!{~WY?#c*9>xY5d_BLG8rjf^ zg)thRTOT(;rs6EXOVmq~2(jU8Ba=qKX+487D&eI>qGH97pHp}|Ih7O1#%&Qqf#b9) zHUzE{W%^|rYk|sYvXr%>+H5ZySmVhd?he9jCh~e@ZeG25HDA8X&D^_(PJFm8h_ESB zGgGzy&ruqIE<*BRbe*(+2X@LUb$KO|+4+;XMU#bPlR348lR3GQc}0|D&$ybGXDWEl zQ-P}mR!bI`3EAnUnPRt`DqB6;{Ka#IO>Kk;t~QdcPU_tj{Dr1?i9TE}7I!s$psfmo zdi6w=!@Zp7vRLbvKP%#}c#YYVa}IKHr#9*H^vpl4qDHVeq9ss;unIj?<%KcWfs;Q@!Fa!cN}XCGN~eCCOU7nht|GGSkM$(}c2c-r}dbHZMA$zC>D zSn_oG6X}!1Wn_9x<`*3eJ`z;_lumnk^AnrjnpOLR|Er77o6h&1y?>%%$NLV8!+FJ$ zo@TphaO9cyZ@N;d%PcrNcyRDy27Bey!?DZuqD%IyBULBL&P;#V{dM<*edQ&4@yNCb zdj+-L`1HCb)}7e);$0{2I@9)Y@aw?|``SzP{Nu$ZA}APvyK(@fO)^p7ZLAK|^Ks)*T@+cw zZ%`K{isHYhQGIIHt6Wr`8MUaXgF02@`XxH_AJMPZJs>46G8i`Ut1_VLBu?n7C5B9) z(kMH;qO5LEchEQhX`lEqh^q|R5qXLW4a7AEINV1LZho;qDNEm7UaWmGyr41o4r$07 z%BRol#~C&&y$+hA4%YKhS)NvVhcw{8_JR2Fvq&lXFQ-+HOjlNvvQT3=d*dD(|GU4& zmivNP9d(CZltRWmd&=15%Cs1rG4#C+yxBTgH@=_2qXW0DS)m4?M6?o-s6SDFUT&OU#G3KrMm=E|S7g3bH+$AmV7{8LDeR|% zH>sJh4T7;FonCv~|M%bk3gJ1fqOg+5j{DD74LcvE-mX ze8yS6V^BJ%Pp0WlNk=6~cc|Q`>(_nBuunQE(V5TRpm^Mb(EBj%>PtFp{~B7k_O$)L zES5Ez!*I_+%!Y*-aiR1denH1P$BNgSn{j5#vH4Fgd1A?N--+7sS&J@~+;ncn|Koew zAFG@xC3F+{4%Ck89gIW}aRX)<=Ylu!p766)!gm)!0`H&vBV-qD8uM1MQDtKfj!xUi zwUoG-4uU}s@E|`}^t)8-4Jt84fH;p#qk)Kr(i!mbjV?p9y-VZLyLhbp+0mldTgZ2n z*fOC7qk4a~&YFE>#<9H58%G=ymRXlFvX9LFME}V0hwdMg6f4Sw%!?$)k`2_vmy0y)T zWc5cJ7?{#soo!@er#HC`eNqI``hLUnz#;u`olJsD8QMI~k#;III5jCR@3r z3w<0F*p&)s<0rX-Ln}-3G30A&@KFub3EL6%Y(PCurJnLB^^6T(zaulLn)$eARlz^1 z$%sx@3;Qv)Pvgebr?mNbKpM=9l<~3I?=LH{4cg!Xu)dI;m=Z2;XzXu!6&aPr?m1X$ zkIqqR-Cd*JhIDP>=15xp3)*e2QL+j7Z?}ial-d2cFXX9n&|w(LO3G13IbzPii~*Dw z%p$0c3L-Dx*8!@rfp30@$m8yblx-5Sn!rzaT*_Y}_+tFH>h&cbn*K9_kMo)&K}M{H zVm>?6ogE=5QY1t*95px9V7I8wjnN6|rnss!5t%k5lkgNHS7 zsr};Cj_v&SyIY|~@xT%v_^{AENSexIV6Tp+ClMakf(F)BCR6Y&N#85s*1fM6+Br6D zg-#gbCm5F9LI>zcoph}XMu2__z6eG=I1j+dc%U)0N|AbPZAq0iTw7T>bxWZpwUaUB z{g$L}A6hSYo>MzHP_`}vjXsFOHOUKCtj{H`f#n|8w$5;G7dB47DaAf`rHYgo_8lv0 zTo@iLSMj8EvXX)oX|=kIaa$`wo;#xs47o`x+}|eYB8P4EElXxCPheonR*4=5DpNeZwmIwrp7j*;AEcjZoq1Am)o`Gycb(JUQ|!RP%QL zV)A#X>g;QoNZg{VXkqE6_sC$4a=#a9cH9vOU<$B(CSG%KV^_b7qa_%sL)DwcxC2YmaKTu?!g1@u;q=S7`TI9bmbk`Z z#=^HOIhQhuMrvL#Ok~uIS!>2JYG4b@SU5Iq!I-u5a(cnYEpMh*|1@LTWM?lvWdzyV-;)1 zGuI~N_WvX^fZVZ>d*5`-*0O(S<*++)$|ketp0b~~|B6mpHV+`H;i|!qCz`kkNYna&G~BH zOQokv&$L~rUol~AB#R~EV8%$*iTM-Ox+@lAmU;iW_wA}#a`*9D#`Ee;G1c&G%`{wP4N zB9;iLwC_y2A^omMxU*}o66S-`mgs(jPC|-1Gr%?QlF&rNyVe72F)JzDUs5ABpK*vX zMA}U>%p;}~n~Ln0-#YH~4XOB}(M;X#Q`|}YS}bSs$pIi$Ac)jpF^<~yE+6g}0ZUVQ z!VeV*HF;Hnn4(bOPj3-{BA_Ef}a z*d|Zlg1Ucw1VdsaV;FcAmyU=ys3at7XI`h`nd<(ZwV!%%t$f{mK?7`{{Rz3SiRUw*H5S`z^tD@>^u<(wuDltCg(( zXBPD8gaoTO5K6-1p-HuhlnVdQfYz$5#oaYrY9Hvkmk=IylTd_ zK(LF4Tc>!|X!CoDXN|r5$~^t^Mt1d;TSV-)OhiIvuegMR++_#^;yZcADfJNoj}mAh z(%?iwTYTc)%0YUfNH_T&`g*S@P3_7Q_e1xyb`sbpU!*jxGF;slNypud5jjBRI@mY9 zQznj;&H|lH%Q{9BMq6f}Fq}lmg`fMotL8zJ-7YmtZ}k75|3cP=vs=zf=T=W-Z5Z>m zUGxUVy@9cgJr_H=$2+>mc7_g`hxNlNSpRoxV-OVCxkn2gDHy37X+2&sQgXcQRQ5}S zrwb>tmJFLO)z7D0g)=M8W}RJr);zJ}u8TX`$9J?()OQSd&$$)f-K-fXj=ELQdXm+$yc zu7P3ldF8xf8G4vXSv0_boJMGk0h!cPEllDYMs~}q^S~K;UR`LwUE=KIt8)tUbkc}C zbIK~m+mLz4#9sV@i>066bFV?YcNk6>qmPavYobtH%(Q06^DUOOxFbgiWmVRYkY>Kb zO8Y-w+1DQ8rj;%FQcInqLp^`x;wJLotI(3P3KL6=FGaqNu-LrX(G(tK_a~yF{{)Cj z-O9qki{^ZWY)A@_NS!NE3%uT!iTY?wtE(>KwcBD1C$gSdG;;5Wm8a&PsW^SpiOpkW zH;)%DJrjL-@7MQYbA9i*y<^LoCU*KR?hK9Z3{7N&$E;yatk~rJAq_{u^j<^qM8YzG zuTi79$j3=u50bYUOzWiI+Z?7IoVq4CBwyfEPsYcczqVA}06ZW>IL1?1!45qK1UCw32@xTrUCs&VCr%a{hlJ8E2wZVCyc=q-NllL%YqHor%p zMOBj2;iXC>iX}{pgsM`mQX-3(Kh`g-NyPl|sPnt);Al*`%>uLJqdCPS@v})WM6FU( ziSTb+4&R2I6)s_Tu*lbd^hAh=OjFA@k#Ikuat?w>KP6m*$NwbHvmowCqO*I^5R{f& z9YRGM72}SIs&5%Pe`hq9ihgf%nz99N=K0dm zL-#_ln?#W=aU;bP$Ob%$m~a!<6~qw9h>qt+!zJ2AoF}mnC)ZP%KeEmrAB)L0s;Z1a zz8+`7({Fz(u+};<4y-B1}WQt6kj@wtRTD^Sx#%AxPRm+>=>FV3I z=9Te0^`&v^=G(oio0m5>L{S3a#h=R|y_5!LR)k(GhN%oJInh33LYEkl5YTJu$&@WG&dF7*}QgLWXQha;*-`e0$cl>MF# IfYbQ@2M9xg$N&HU delta 15487 zcma)j33yZ2vG6_GZEcoq$-6Ci0SmCf>|4Ob#_VP@BshQ(l57iP$sEaeiHV%lX-(1w z$VqCG&>u+J7rXE`Q0FzKP5+RzY4g&)L=MiwxuLK0HC=eG3$Y2b^djoR#A;0hblS**Gp6a5*ttKF8$( zE-!{F;5aMbZ0BU5$QTPo|%*dFB4MD>lz)kutVxX zvWAT)v&ucbme!zkx>Rer)!}nlsXN$8eF3X~{{a|+)#D2U9X_W!P%8CA`8Z_Y^OuQMq^S6W~1@p*#v^>dIc0YJBKJ+rV^@eL#I(kWxM z6{+lkF0cf<1?F(ED_oACLnOffu5K#|9-b6u8Q-$g;q}(n&u6>!(}{)cO~_#1(m#@f z0}@q^mKL|qWd}84r3OnTua9HsG(<-xQ7mP94LMM|%dj+IVWr=9*iC~0R;Ei~|7OT8 z!7@9}YN;-InLQtUU5v!bTH9vi5yk7t5}S;c}$yFwa@ zP0=C*(-3f7;mUz?r}GdjMR1R<6eA7+?=u?;J;Jy`rYsr<?V@3 zAh{F%#eQ;jvs=SG((3%t?BZ+LCxsX⩰kb zWGBoSq@0~IPb=e{Wk4gM+T(M%+uZ??cxWXcXbHBj0)QPyyS72s>1s)#HV@e>`$_VY zWr-CxuxHW=QyQQ+p#jh>d}OeNH8vQ|FVmVVTae@)RiV{veY!1l06%R*fZ8!AtN8;w z_>bnp!+x*WM0I_=%kKnfPs(ot5YlP{_lRye;t;HXe*v6wkMMz_6F@Q-CbxW(C+ZE#`f572!#>~nQq-3{q9)cxYGBac7 ziCiz4mk3DMipW8V!?7pODtL%$XOrIpc3)KB>ZN>Cs{O4E4KU9AY=$)}iz`hN(vteA zmZzHSv!;=+v36^pOca>O_G3nxQ`u#kR%TbT+4+=Ivgh;X-bZ(1he}c2=nm3@Q0DW2 zh1<-BJwH}jGIcLO$_>m~kg0kSieO&L3o7hm#j{>g4XQ5KzN_sG9y@aMNdL|&#=@|s zW;pe=oJ%>E4_{eQ18>waR5H_|T)G!(;_k8*8^amX)d1{rQC2!fB<&&zMCK&%Q%qRi z==QnWTj&bbHK&mDv$JznY{T($u1RYJIVs9PbfO;YmZ#J0st+_dK(g4`4GIAGtaJrC zTHH6#RNM7*A3Uf7fd|1xwy8LKi@e#+t5sW=e z(hI(L^%CDApF~-QQPi^J8%VUwt9@c10wXv>G8@fB z5JJ$6;1L8p2sm4jWArfqcFQEE=vNVoQx;`SEuyTgh1MYEC;~o>$FcMT5-32POQr+e z9H0Z$aMEY++c)u>ti24l4FB5a!gDU|W7n4Esrc@B2)LI@hKDT6N@T{m3&|H;7c(wq zhKrVs*p{-en#(K0+wZ@+eecNjz2W+O zBikL}N=LZBF_OE#S9g5Jh$d^ym=)G!ff>zS@e$d}$VwaOW(6xtD^UgF$L4DiWxh7< zGsc;2eIY8R$x*B=G;^vZ+8^8ND9h!#~=lsmlvdY&b$$~V7)tq&vM68-^}&a?7W z=IMOl?!$M^YZQm5b^3jvq;4PlqiX!-U|p*U^SI66%$FL#FZ&Si!S}J(R;A@}_MyZ& zlcU3&)c21C_Q|S!M8kGgu2rkjXY}fB8rXL#D?=8jV3*(M2JNDC*dn(rq!!j#A-+!T z@;mXBZxwul*nqDs2o3=db>Q!U|Jm#gHu+tW9^qI-eSN*t>j(s#ZuFuCiDY#NeB#T+ zB+v)24FrDp7g)iLRHdsF%FSe4$Iet)_ogbdr40p1rg#DhK;Ulz-HEToJCh2RlJ7XA zM-M@U47L^p4>bS_8;lAz7}z^DdD?fhuwAPwLxNpaLsvi}u*&M;!Dne##_b&Ch^;`d z7(h%n;`a;#Z!flGB*GK}X+Zq9ZsD3ne^fo9Nk5Y|tbfZC)}+6$*}&)IL_P?twfa;g z+CaNPvZx23EZKFwZGB<8htHiRsXN#K!1d*>ho050nL#W=p*1d&RUU~Dtenj1XyqXp z_XnAB-JB5jQMesy$Ih^KdLF?g1gKdB~!t*^Hn5NOd1H0 zP*E+}+`0F-U8paesSzd_nmz6H5T{A8or%4%?j^2?v=*o&cSU`#{@tT`Zy@DQ0C1-{ zVte02BmxbxA6L!adW72$j%*|tk1W2eQ7bKyIw4iOI)s+wjxM``hx9}Z>3mffJWK)rL98?BC#D7E@$+?k9G>Pnm2W^=++L{|L z$2EYJurB-e2bYy_Lne23xJ7k?*Y60xNu@}(h-8~c9MaZ9wu_`zB%1hM^gI z`eahPB*%DZ=)?LK$jj1s6GGAnZ(49_9I5t-obqUk#q!bK}NU%8H~NIGO}~%xN8mppY?TjhHR9u72dKTeZ$kltm+~S zvh9LXM&REOWgDoZql?w=&mQV*a+6R@?r}~>nqM;)14*8c2+%7AP=q_|nO8GR<#(~JY;UU?$FPR1(Iz>4| zmaCX>(B07j(Q`0<0t(;3mLQ*js6Nd8?$0HuEWKqB=^fhAvK3jM{dvaFj}ASlSnwlg zN=%?=g6qMXtdaaBw6v1p{oh#n9s=kj8lvOLq_; zAL(5LpCRB*#(!f8WGzT8#gFk77qMsneFCT0=klPl9N_YUjh`>T45Y$UxPxy8(ja+{ zT>Y^AJ)MbqpSulqqe$k9yM_Th&_%RTdPI^eV8$|ZQY3sDMXVnBbK5gy-kDujix!L& zEx1~=XryS-Skc_8MN3ACmRtq@uV~q|viTPtcsXY<^>WosrJ%P?2r~V8!d~g9loq&} z9jZ=csh$71wr0|%Njl6mSH`6+sv%79G`BX}b<#|A$QeBVQPtK|A8ewq(NTO-10$hQ z$uBjs1)(C{2oCjmm=ffd*b||A_IT)F$%RtW9mo+awuSDu^vZGn2Cp?>E>NyFTv4jV zSC5YMegC~v%k7C@y3bu2x1EU#d~XjIM|&!48u7~ewDlwLAEH%)0BpKDgGC*!4al$KjAqEt3S@-OdT z7+;bN4NmThF7U5lFH}YoxUd1L!~|&rT)Tbel$6X@GIw)zvf`+12=Z}+3TP1ZAui}n zjx`W-qCP}YbfO?hW6p&AEiwM_jYP_$t7SdU$wRqOhsV?GXmnd)7*m`cX|+j<3fh~r z?>7V-6Wm z_ryB(=+}D`kZ`DB?>@OrQwLRHG!<;dv8;7`o)y^sGHic2w*4itO_5|87 zu7|21Tw^*reyloTE^i(imyx^d(Z1DQNo?qtii-A*wkB>MBa0iCG+m0lE&%{$Ytlq^ z<0ln<;-jj<*9rs=0}xdWFpO50^z>3bXt0)Sbi(40k?~;gs#K}S%VDUm*cy_Mc}_gw&2ue ztu}gwc0|s|sWg`REsH$SrDLz1noV@!ri=Sx>xmuLI`y~uToMDhKFOOQ;ziy`{N${>&NiN2R>F7R9%#gF zoZM6~NeL!OQf=NL@9P)r<Dbbv zOOGEM)#i^C*av2g6f7K09^N*r3FlYzRv%wBqRV4v&K|X$Njc*jSa@Onu=%I*tL58A z%D0bAUwBJSjEj5aw^V}8$b8Q)mbOkM-#RZp|Nengr-yz1LJrx&a=smswouiej9x}+1l85GwKuF#fY`hS zG`A3nUz&_SW=Z*mdko%c>>Ytw2A$)A!j)1pp-?MxCX!2(X%l;iN?I=pBm~o!Ui$yD z(N+3eI<`P4?5=_HshQiP!w1N0RPfPCg2wWc#2ohDnH;Mcvw{!e;LukFD;Pa^z?9){ zUVL7m?QB@r+~RfPY3ft|l))~1#~PYtmBMSQBVhHlFR=PrN*$CsI*K9XG|SrF(cWP% zl~n5o&}c+xQ^nahkN>W$l!84NfIwXcE+OComP?S%*=v!Ke~N(1z5w*7*wIHqW#%+oA6sWDS6Y4Cn*z{^fu$g*t0X2=0N=M1!ZgOYQa z1bN$(SrLyE2&M{1T6B9+K!1QSfhpIg3>hx|maMk`tsPPw*nB>M1vut=^beCe6u~ZR zK?h{G;Sy!agvuH<(@rqc8En_k0)+(#S>I3|`_WLbA`v_z_S>Ni+19mih~RIej%MrX zL#+<4r<0qpS)5?~%lGT8PBZ>y5{9$2wN<#66 zx@jNjGWu<&OHP&yq+JNUuUmdo#ZLe5fLd+3ueai6(ooWmX3ImkS|p`e2`q^>c5=EC znZyIDZY*&CG8N)JpaTgSfkY5iaJ!GIL`?9(-364JTUtQ8l`^bbg#dZTS4~J%@?}ha zjkQ!#emQ!0m4Q?(*fk#|wjs(P*7^cVZ1AP?i!6~8`3$PzKY)TO^6}M2_w>rf45`O= z9BuBEv-@8A<3?2L8khxcsI(IMyGNzl5QpFqY%3YadW4TDs~$x z_Nw_nF&Q@8lTK5)2v4WQrDyp~5LJf~t^la2k#Rv*vNdFsEG%pSO6>nxCVR7hmAzR| zA5El5X#92-!Se`SK=5q@-vRJti8L5nr%~!9rZzvoc`bo2Jg@fCH>W3M7vrID##lx^ z910uY_y7)tWyuOSoXZ|6|JhIE?9I1lY4zOOA&^%#u>XF0UmeB|aZxjm>bWgNtr3;{ zjs{P(OIr0AlH)M#DU$QijR)vv_zA;?;z|~sk2>f5=94nmu;<~fz1{`NMUZ{Fy^To|0 zg_RR>lChc`S6ha*_^9 zL;i>%|IEA>?>l?n1=EF!3x}>0EV)!TylS{`q+sP0LpjSENsssDwvLPvC!6-(!p%5t zyG&F%!MIa;5eK7`l0> zPcrtW_l|HU4qY`gptaT2T)}H=zv#Q6;g(_>oH-ppaF3zEln_PmGP0lqD)4khZsAx~ z-dIllm~GlvcFtICKDs@;C%G2)<{_E;}%ih8gSQ7 z<|ny+R-9;I?pa2Q^)U0b7LvLHZ`%K`)&a<7P znz`3BrsI`;H79CDH3iqe(59yKZ8))E%#zX9e4=?Q+1zJ8VUIr2vrZdN8b8RlpDaAz zae4Jy3ofr1oxSarUY(G5Q=OQjoe&buy6#OkEkaV-v96l|;*(8d8Z#Wh^){Z_ zbHVwF=b~p+Q+`cj=}#He6($c}D zqnd^IwtHavOLc>Fmy1U=8*b`Vshh}zpwiWln}SMNBPGjN*-49RDPceTk4-ibH9i4t zFcQK-KrD-5=}K0%GnZ|7H!-&>W~YJ}&hy{7$TOJ0bSH8$NteteKd*?|xJG%jneA-O z&OB^o=Krge##pyt`XGz5_rF%nt%U#>E-i8A4eqYQB3&_|7LNE|XesP6m ziNCR}fy58y^hHIp+u?&`R?s!&3X7P+Jx5PNN4yri4YJRKP$19>RY3`~X14aPxwY!V z-uXQpPgnNKpI&>-m~^b;Xve7;XKJrx&N#pJ-1=9^YuZcN%T;d`gztZFbn)(R#-1z2 zyulNf22!jP{6E7XX7SE3r`CcLmFS}Yy*K7oYRAe4+N(+1TEXC zX$N!4RgRX0Jy^iCi;}yM2!wNIEWlk*~ zUak0VSW10D5XhyUE2d^gvw8Tq}=-{dF%9`azAgLY1ot*!=bnN3?vsvjlqpHtI;S)gd=HPMfa>I3K$a4*lt)Yp_u!O$6DX)Vd4@3Ya4=#cu2ox(rpZD{Rr8I$?OxGH_RSpa@$p zGemLQg#sZsEjounfy#-nOX!dXaZlhsz!e-ZVs9Z}dYADzA)f8K1eg4X@nK^JT%N{f z8)*~T<-3G7Vo&Pa&i$9g*2p6C!2J)r06H~BvKISW5bf~Jvig1A4lC>(E~}@(+5*RR zm^=3br0dJxkk{>kY(fX98xJ92-xL#T{k3jiz)yG1rJeAhmOca!?tiJ8+_*eMWt+zZ zXD7Iwc9uF?VJ*d9J_uMH)NOUPQVQ`Y^zHKpt#FC2#ZQB77vG$r4-Fj9)WbcnEH~_? zVeB2xU7-9Hmj49-k3~bY2g_eY@WuEO2QVXMj;6-MrOUm3h>8L$N@Lg4Hsbo536=hc zS2n`aTq6(e4)44_yw7oUpYzH-=YVY><;923J{(@S@vZ$=r*DPNi~de~p>MmHDxn@d z3T4r9Pe&~sK|h6!^IaUI3dtTP?989dA*#R^hII^U{nXP57RBMk47sS=3D<)`c>Il9 zVNn}rkhr-JjoTrv^tuDVO^){M7+=1LJ^UO21~8J3@ie|Z1Hhgnsv~lcP7Jr8fMDmx zTsUMx@j+ij>K`NcKL}#n7BwixdZ`yxv{IJ z!Vy#9K-I9~^6Idu@QP`DIB7#~#RtZ;kCH5X+7sIT!=e7B@U+@+_Kx?Hc3w|O?=O35 z$ylnn-*z=~#z^LjfgRz@8CNprg)`=lq|QHHF&0O0y7Xk}z=HnLE16~CjJYGJbB|YC zOR@AT`&-Yr!)dcdQ%bO*5-E*vK5y83S$VnjihWZ!vnHIec_el7iHhr{gACwGl-HM}y-btT6&lIj}OxW}a3uix}U7hGri+ecc< zxB$29KhWr|X;Z7QTo`TR@bRYpL;cRMnh1+%9t}_)YqXiN`~o ztGDsXnqV#XksJOMgp>MP;5}%CW$)w{Ppoa8U=#dRLc0~BBQP9bcmmexP0}AtOmCYG zf2yFFymU3`hfJQjNSXExs5+4=S?Gb#sXxCpDfea<55(R9In)F7L*^giuTcKm)b z%eZ5yI-)tEj@x0?UCK^~wOpiA3s8>bsNIjF(sZffsaVIIQg;7s3pS{7DH`N1CA)Yh zji}ha+}X_j^jHG3{jEp=H*oi|2mb!-LUa+pyZ8@yhzh6M( z9#zuqQf(gIVx9^w^EM%9<0qe!jlp%$z&yAQ8lVqiy*)^)@-)}`TZ249x-Xh?fM;CR zA(NHd%{11d$Yf9j?DJTPnH~PeYWvvc?eBKH-EqaV;qunE$m`XkrVZh}`>*bGjqG)W z8xCA;@QyTidsp?!*q{GV61rwe=}SA2)?avf+R15W3SXRdcG`d~oH1*_Ih_1j`la+y z)2d$8wURmL=nu<=w_Ya0)uY>XT-~;RWZVAH5@)ZfH*kDjzv;xH-iP|D&ZG{Iv+4bt z!bbauW=14S0XzX&?1NEOQwPAoxTA~h6gPgl6M-3l4Z#}-egYr>a+XhWE&vw16S;*s9d?!amsZa$HO&;YZ%uxu0zsACn?wYJ7mG{0f?k|QeD0fa&jhi zrbyS^Dl@?V#m>tp@(y*J>r3B&lkSFFuZjAXx*Nxbq7Z34vZQPhBXJd3l2$x%Q)`;#Btp#FTVLAIDma(W|Ygz z^9h`=uOqh;6>!f2Tam-{L`h^n1LDp-eSejac~6bc=-VVGC)dQ}geiJ&LsT*!V~<}V z8R(_c6!phqIm|A3Ux|X?UmJl*>vJgxSb7)427TXCkY_^j&UEm~r7V~NFY@ojIe{D_ zmq0P&)d>Ft;kv&Z5&?Y8V@`{{gdmp9qBtl~$#3{YGh4Xyr)0Kl(9Dz(I#c)`Q$2!A zdJsOs-B`V`Z>yTz&$CB^r25O*i_D@3In1 z9SiS}5IkQK19ojK#hVBR?6Ss(Qopz6H=Qr9loH?~kn`G&!?}AFQOeabR}K8e`Qo=nqKyYRb_E|L~TZOJB{Y&2}4LJqL z|JI-fu8hO30mtbGFocy{I7}@|cckCM(mM$FU5-WA^=br81eliRDKq*ymN4e#jv{xu zBzwY@l(T4Ry3H(Vmcto#tJl4P5>N%GDg-k7ZY7YJpI9Q{OrJDn$QU!2`2Q)9f8PC?$;xpU{v%$=F)BFH^28fM>%*nt z^^wwu+$S3O_0iI(#FLG&_3_fU#8Zul^*yCM5>GcK*Y}q8u1}Sw+#ZI+)r8iaKBIPKY;sbYd`MyBli*99kA}g-Cejl zio1i>-MG6OcQd%V$9f;`-iNzc+!d_%>qyAOAVtb1{HFYdkxcZaPH;O+yso3kFZ zj#@LX8>M5`Z0(-f`)awh;@bVS!g7+a+Prn&o2K-S*H+eRO=kf~ZhmW9{ae`DzP4)5#@(@{=P#al_9N$C zJpcS+dA@vnzAxo?U&@JFn3yq?o!@9RwpUus_QHCleWmSAMeoZkw69Uwn2mp(-^D+B z%nSU)-+0mDObWq?NI< z_)S?kYY4w-YuFmWZ^kmw#d&w+sY<)HWLKK)<(9o(*)U_VTmgSfE>s(pcDsNFh5CAB zrB-nKd=C1#UCc?=rPX@7fd7@kO08M5>(!oox)^7*;uNZtX5n(JP}_29O{-=}+3k&5 zwZ4qkR3#gB3**+VH&+VHTFpW&Y|qlF*BsPj)!OxyCSG@1EUnsF->5kC%Z*y$THRSK zTy7zdwzF6~X*CO#rd4Ry)+$+8TUMxm>L@8tez}V{oe~>KSeHPEf#H z(6G7a5D18<@)zGS+>~8&HtlAWpyd5A&g%@Wb^?K51+(*k_cRoXZ^ckuw(n+e z4}(F2g2}Cb`Vr)4rx3)n&@u!T4~w{Mu%vp`WD7 zWvf*!m+k$mxThWOW10_+D#od7Pat3;+rtPlMk1e2WaOVbsUk7*NBBC0tNk#7ULpwc zmJl*X@~6mPN|V8~CW9Gk802rnO`ienXtisd6K8x{M{?2<`m+w)P~QMq83?V#zJX_} zWZ=otc8zYV?BT3OdU2Mn*TdgJwXH#NIt%{%1g)@1%fgKm zIx5Jk-rU%93dEvC`vWKv6H)M2f=RC7^tPDO;Tc@v*%xZHnsEo zMr*-dkRxfB%2Rp!KOv@uHA#_Bc3l zcCB68uxo8_+KS2IbV1K+HtB6+=52GX5UHHK$ANtjSDV;i1y#M8+X2e_V(ibFuc4yg z^T2JJ#d&)M*@8qx6LXL8lL^C5yo|@cjVsnU?2QS~5*k)67awc1FuU4MAmU|z+nAkA zxLIIDq5|YB;8i!V)~YuZxDD!JU_@2oOUUd^fzT9I4;N>%QkwmK4l>(3>mSqVmZDoKm zlU&5&H)Rc3!}v|>6~Y*4--Y_tlE?gaYQi1|2=g-63hVW)!VI##v~mB1R=ZNK&IW*| z5JY6Tunx4!g_xQi;OUp`)_P%meH*Jf%@(TF(H?w4mEYq1G0w?y23MOyq7W@`R;+Av z;|Q@D!mKnqFZCkb-Yy5gz4TKzb(>k*|63Ofu z8*e29ggr&1{q3=v@!i;t=_I@_Fd zjbArF0;XOucM>~El#%KtciE|l$6~Kt#IIqc9%I=VvM+?qn^{oUMQ{v^|2L4_PFU$jQ-Wj|#FF+cEGlni_;q z25>ls!`-t|bJUuwj3$(Yr^d4)pBeQK29+_oAIUbac?6wfy>kbnqsY@BRI=#i@PVp^ z_G}Kz8&ud@chj5AI@j4UDcm7x57xn3wYHn9gXK^{3pZYGIx>0OY^}MuUb8Ds&CQot zq~DEhe5187n^LpwE`CC~u8OiBXR9A$aFl`I62V>aT|_r4xY}g5V&;UAh-bj{rQ#Fd z{>Jbx6VDmLgXiOI2(rJ@$)H3h_G!#V0l>#SwgJ=nrHumQ2nsb6^b$(}zoyDE90jXh z<$Aqh6VMEI&tl6t@8OD?^|Wob>_N<|eHJx;3oZ`@zpsD1BHb66PHg|}zTx3#Y9Pu1 zYt~-fTwZQK_Mm%PL>PJ=hd-;hXp^%E%_C{59B~3#f71@@) zDS)ZFwpy>Q3cEDuyC`T=4G5$&_4So@r!a$$nJct51b!N`5Lt=Pt__h@VP>^fUs-ho z^0m|1SLD$zP$ z2tHlPq8yAPK<;nA=M)PUHnHS^5D#)I<^iIFpmF}#@nX2%72>(yBML8Pa6@5HWyg9e8Zx6(I+NRmGh zUM1c&hhkRdK+OAHCI^$~CJ=%b16<@fAL`{`w75?%7k!pRtxwg*hbEt5UM{z6t7aE% zA*GZncTTXsLiq~q)z)Ui(!-#J;N$aih2yWz6)OA;?LYxNb_d&^4N~$q@JS0$x+x(?!JI zGVBE=1Q?CX^Y`FtQ$-Xrj|lN1D>Y?wMlT3w;j>UneTztca8Kdou3;A(43tA2D!!Ju zPDUbW8Pm`&-GHDx;F_G2_k^6%bhwbxdcWprkZ#iBz8KsYbq}1EmI9H`6Mu)R1COjRu7Ee#v8S! z42I9@ltVp>X+q{N*i9->+L~lant7}+;`?JrNleG~$99t4q?N=k)*%o;q;?IgOwv0U{l*s@z$#FyF&Wcg z3Q(KgA6t%F89Yl_*&FelY&YG_n6aJQ&Jb2G{Fm8>{N41v*m7cLxcNcWhWdXRX+!cv z{YG;gMlMc_ukrn9zMn(hw8SLNvbw`ygwk)A`(tS5$WC4@rMtt#6t5(9Mt8<`#&;&V zsqR=e58RS_!|aZBM^tPXZ*{YHG9jtsZsZQ_AzQ7w>Io2{t<2! zCOhO$QZ?s8Xw1v}){rf9<`aDBX%=KQH{8VXW}^XmMObx{n?R!vWBs+c0n}a7t2&!f zHJf4^cL*y%$6M@8hE`-Fs@wD<*J#VC%pF!3p$w&Eb=9u12xHYXu9Xt#$rG-*I`5j- z=3S%eCN?V8H3?T;bL+TkZsXco!0&=Pyj*Wq8eVGzp$4>(`0z(@wdWAT%mk=n9!ND~ zA% zuV*mHgfs#K;yVckazg=i^VNFZvY7^#JvBH+2)>E1;eM@FoQ~ zzv?8`Ads-KlMp{J?4}9c0_r(DOIt$#^_&CL(9Hs%64y<{1ymVFYJ52X5FFYWCSU?Q zM!G}OF@SFd05vSJY&Ro$G?;$gXx_utT`@mz*iUp5;qsg;>)EH`l< z1zHEV--jo|g1q*kl)Y9O2x)H*LINuvMM!-}g`Cp~&dwPGKq>_y700*&WZkD$|oWlj#jGD3`d@TLI1 zz06`*xlNgjy~^Mcg4q#;mjuiB$jN?<&xu!UVouv(&_Dp>2r2a@Z$*-P6|vc=02SHS zn91C(0)14KY-pRAiG1u1gHIw51az_#-#T6?#kU!3EilGl>-c4+A=p}AY~ix~t$goO z4BlWM{qt>%2~$}`?4RQj`LjYn(-9;zW6S^%&4t2wW5zrTq?9r9L`R4PjJ!feCkLXV zGpy$4L+EG+Jb{v7g44iIgVQiyZD6{ktT@omoJK#Oe@ICR`bl@wRto4R(@lfZNDKPO z?&S0vU(%dLMmP3mbjml5EeK+X}#seU6hHh&HI6cTvK&zF$gd`IQw zy>4Oir;&d|%C&X=oYX(;zZ35=?&bd+^5-T0U(xx|o88`bm==BSBY1CA-ust$669As zB6<0x%_8!RA)jeWPxnG2w%T@MKa__h|H`MikO;;l$G5*T_SGM@27;NgPB>II*;FVwgEVW(Ryu zY%fruenWFzcvo{`cvGh#rdzO=5_Um+lOawFLW?Nkn>BB}gFpLE6W%T`kmbG4m4&d& zB|hyzF|v+N;=wKI6K)Y}-XdZD2DvnQ4e#1l7!YIj@MbpgB`hZgBaZeyo^i+NWnV zSSq9fbFGO2?}iwb@E#-%WL`8e@|;UrkW&hssA5jPoqtLAS8qKkGh|UFhMVM*8+h@)fiDG}a`8Ns?T!fZ#dKIbVf>7}1Q!~sdCKm>Smtd4IbQ;G|!K=hG0 z8AFeO0)Q3ZTFC@k1PBy^oVeVf?*wL#VC{k4=6X}dKo8KTiDlD?Ll}Vd$S!ngYQ;%$ z9?Az6elsO>g(Hw=Zt3xqj7SC=IIy#_q6KTq7+^bv(a3gAXnB!Z3n=-qqH(Zaq`(e> zBW=S9TgEz!5iBjjBPhG^YNKucImXiU_40DPQFF5`yRMdB7`{XWMh;L9y@@RL4=@l; zRYu?&KRR;aE$OvRDBpd`e4R;8mjsS5Zj~XL zsFX1^U~!vKYzH|G+pkY?Gcq>_Y;Izu)v|gg1-X`EoD@e8iJ8MjLjI8v$QxsE8YiW2 zA^q)CW(>cWFrJtwyatQg1RiBWOH4OwEY0Ta0@L2l;1Gj*8Bm63pJH&9!DR+j284ch z7y|&iz;&?-RooSu=#FR`fohF5k)f}6k*^B(a~`qJ;A)E$#7K^*2lmFUR(W1J0);}XrgFWtua$1-ljVfap~;&|tyVY#)cFf6(D<<=4Hxsq0g z#nMzpM4DZ+A4J`Y;MbqWuNu8a8Iz6=a5snbJSP%(LQJGiUfSSGgCna3WRtHtgJ1ns zkh4M*4DiOv)iNn(m9IJsRbZh)6$Ny;O0KKS79vtzmM5ZJ`(?(a7)Z;4=AA;?g;4Xx zRr5ju*9gM%`dwQ1DZEQjHJDVia@T}N4Q&M40#XCYgZ(!U4Uis21ThOJZyIZ!eUNbC ztpt?$$njVVXbiSpWO$Je;R>yLh|)yC4@onhX5=YCKHcTB4A2)j?Wk8A31q^p@`ceS3USUbxLWirSob? zP(4UYW&Z^RL?jS6%lhXD#NEW@8cfPkl{SS-Y9S;DYL^+c5eCtXYuIjDA=!oAaX5?T zALcmF=rAUWju{a0gGOgkPnh+v%Kg1)Vo#wUg-iYtW5K`&sNsk3;2dieLS`h)N!su* zwTdh9N51S7uJ&^XB6`?qvDz_!1eDcIQd{kSeYf&ry)$Z!;Wq<@cmls!YrkwJF-kd$ z1n7V}eo^M}3)nOP1AGU<=E~0lx<&o!tK8e(Y)2n^*+gnxS%&zjZ$|*tms&1O$#)gX z`dWLVMMcC4$l9hC0hyTdtj-ZbUa9sXOR!+M}ohXcod}{|0uv1+Nxwr)_VM`ef z&8Zv>d5jWapMaVaB5AMIDl|otvh*GVSbRZzPR)ea*-52JVJh}@BH9Nq52zW0(82}) zSSCu2SO`@B2cV0;DVOq0r@u z))jA`g9*EAZlHA?dZdANLA5L6qxHR^GQ9%u4S^1qJr*kQMb&|YWfY9oU?^(#)k*^z z)#6GK-6}nd?Lg`=s+}($FQ7+S*JvW4M8MD@I@fB@f%;1Ma|bYXe3DSdyxKSe_`NST=`4bbHp#sIZB6*NvO!u2To zugdh*^QLbLih{Jj@Mi`>VyI<>N5sc?co<{jTjt&dI5nA!T6>1vJz?_# zy9v#dT|w<0&@DhUrN0DyQr+aNv|vvl7y)YryS~+1yxwusp7A1mRd^3%_R2;gl-d>p z-YDYeE!syKGk&bEaQ{j0vS+?Eqi6%Z_d*3TPw{%8vGh7YV<7aQY%dC7IUtO>-Gl2Y z&89|CArMC2f=dM+p&9{w-P$_NfrNPiV7cA^tii4t>h|dOLT8MPQ&y{a)IsaOfmA{6 z*wm_B$1C+p0oLlyvEIo`1|UUTBjlsgT*STkoU(pStj8$ ziiq^vR$_@0D?>OXwhAfQDnJSxE2p%{*V!=Vcd?hkL<}yYEiP@7cj3~~FfG5B!?TqoLpoxz70FlUH33(HQX zu}!+1ME=?-b*+MD50_56nRk!%vgfV*nre*-@8ejD=qJ)!7`?p0>c%^r6Bu~ zUy8(c(oS}b63-lDp^#@%fnX)xF#Af(I72swb*Up#>IhcGLu>iTm^E@2R@2>#mB(A} zo@=zP?5wnnk~PZZEc86T(UCD)BggiDg1{D*o1g`iPw7^)$qa3Jp z;-5F#{}6Y`@89K=HSMl(C4O^!7y4=Y_t@+B{VMuq0^pF7mVl|ck=QYs2a&pmsj8>I zFteTdspL*C$H8vxseBwWOZW`}W*BL|wpqhQRWX(t%u`iiCJpUBFsw}uN^D3JZBb7& z;gE9em5_lMQ$Q>VGu$&^SGIv>&dUak9tyyGy)^PEC4;c)Fw*9xT5pFW^J-)NV3#Bl zdn)pY2Q4LOhZbO7=DrYal%H2{Y@wHeFn@?=B9G#+AVHz_B|x20pHL|+Vl6Ba(dk;LH>LVmkjUb#zro@@5-Lf`|EoxNE3OncT+c{s)`V3g zRpRcD>f4W08k@D*Nm;_U@!EFHO$#G;VnOM0xT;Y~CqYw!zlgdZ>bbfgw#yeaNx2{- zcHyj>Kv%80acl|^{N|b)bwPR9_Q;CkmFBiPdaYt_gcv0^$Ar@|)nyKvCcZwk5_vOjz~zkAmSzXbpll?*&pyK1Q3w_O>%ka7;6+rU=eJ^VN^%# zngH&F6cAe;l5apZXr;bnP~H!zp2`QT2#hLE5yFB)Y%d`5TLbQ(#C0uYe|K*VK)d%& zn(|qecvq}B;iu}7bUhhhSlYUHZOE!NKq?ue<#bvK-vGOhn;B~e*fgbold!f|_wu`p zFH+xj6Baa-{jSj#ZMZ^Ndj!{Pd)yh>7h&={xF5)SVNu;Eh=Fgag zb81N>1``17&2`OC&J|9EnD{=(@8KE!HV?!w*vq9$4I>ZTOl8fg%9|^EJko+!Luf(( zmNLNrVJw1WkYpb>sArOPiCF{WAhFLO4I&14E(ucJXH}{cB5!>sU%kPAX|p*cVSNp8 zH_>X=+IF4MG*(SCGqHb`IR*508T$+as&*mupxqp-BxT_2f5HbBnPP5j+f7D{?OyO4 z4s-QCiI$2SGByO=Dg+MG#yrHRAB+P`jm|v-K#&Vz-PZ%bk1S7{;(JU$vAGK=C&1nS z5D&p>5g_IPV>ba{gdkleY!bi-i#kl&6t}H#8wpqqut{%Fre$j2gXBtpk0xM|X8=1d z1^bvhutSMS0cVyOht}V?nG#_scQ;|X3RQrVVnJAMP;m=kJ$P2BGfqP+QU7@44y9gIcWQ>VKaW7{o!j(>f&WDyp3o;L zHJ5_eOPITgQTWq{MMS||`jEQp!PP#DKum8BD!Vk;{lYeFKTJr-(eIC4kLxW)njhfW z$^Hrga2Z9${~3co{3~mW`;qp`m<-BtkYE+u2*x1_0v>C# zPNEq5=NS;g*#DFPu?rSX4xAp?Uqal&DjBb;5MGG`d#cv|0$&vwrhErK!S%Pvib_fUlGp_tG!AR!;6{E-llhfs)2fCd9ZmG)Ae3ev!v$a+EagI5xvCz6{y2Myx* zWC#2O&02)Nxx%39CjxZe;Ft6gi{{V4NLSv0qk}bU0i}8!`9LQDObBVE2_qS%4NDqA zl_on~AUgsL2H@?Quf%sim!T@>#?r)E78Zdfjl9=#u>VY~4Z*H+xSPFh*uR1tBUseq zIn29g9h$S^*t&{E`prC+=6o;bjKaW&Ptw$d+(^PKYiNyvgp{;6u{I8gRoa4n-)QUl$7VaUWOp;>&K<@2a z4}eXr?!$t^iVc-tcx8RM4yZlr1cgJH5Qd|P@tRtE7t@4m`Sm7beqM>%8J!@bH3?^wSqEF~NH`!8>+NO3omd?{^0oK>pXbY=`d!T77 zV!LhLgnSXHW4MN~P9Bbrnb`XBPAWc(`;0MR43;}m5dpIUj@2y-!Po!pcl^@#{lvd~ z^RX&%fcH0yh%bu#JZ>;A!L}d-*@5Yt+Grs-U>Z%8Z580ZkprSi$swXK+(C~lIbaS+ z@fcyNo7#t^E`9-vi~;Uw{20HIKpMhR6uspjhQrnigerGZ(ucZ3oCdpdo3bqR$ zry^k+*VY1ldwF?Ow(uS+)2rxEC;TDny0%)z=x7yRWHTy1u1`Jbk0h0T8}%)^LG$;0 zqMH{2Bt|ZyV&AFsiYTa|LIs+GkW|l+NO{NSL;FZoi7;EbLyL)`?pPuepwTwgHauBn z3Ff^kE%18R< z(`n44R-*=QZX*6gAA@+Hcc8mddyEAb@DMKa2EZF8bV2QB_B%7A%u=L+1j4?=fGh}B zKCj`&O=G7{eSLF%c37d~VAoOggc(Wfj*oY;Re`g*Nvf z)&Gpl3q-1-vKl*N9HOblfW-yAD}?Q_)EQ#~NP_IY%0O-<_)*+0LY6~ZYX33=$@O0l zgK)IKg!eNz#Nb{AVlftwp@8HN<=bpxsM+!XLJ)o!52ykV*EV4Irep8qMlug%_Gj+G zHv9#f&#Sm1e+k^3!X=v-BIX}iFky)|#R4?q!iB4VoSRkLbuGDlcI?d{PkTE%k)LM`^1OvG!oaYPV&rg6;Y8 zRDE#>F8ld6`Gi6E@PYynAOfgxmDvI;E><^d5x}1>J`hDO(iiW&ytY4JcP>`?^}QD> z*z5Ks(nU^Ez8oS4cAT4(3Ou{4U&pA}zro-)8T5cTA?yW$Ik}fumO9wZgg>JLy(1VE zaluE9lUmE}Vn-#GDA)tQ4NF**q*2BYd=1MCED$nSazK|$&7+l>;zlOy1K7#J zZg@nVh)xo3WOw8GS?js9j>oZcfKi-JxA`Igav&(Yj2R;TYI7 zXl%J40|prG{FpzlJyoNIHO|{GFCmr=+GxuILV`QUl2xNY2lg)uOonDTNjaSV!MUoZ z+#^HMsDSek>S)$q&eN-)Ij>R&v(#TVj$UpEe*bK?LJC4Ot- zK`GsUU;qzQ1Q}EWnMx_xZ?Cqm_Cf*4$*Y6{l8;!5tAWFir={SqX9{HRpgF_!K?E+; zx&WKlgvj~9TdCU1j70s&*u*9R$ zpW03U#=W_XndlkT!v-g6ljtv>Ve;lwi|~?Cg%Kmo7pW~H$0(ia=7P4}h6KgYS%Qtu zAp+<$;1*;Gt_RGn`Nh}|U{4ynBthh&U5ea?O|EcFpj?W;jYvPq5-$^#e=wM2DYy%I zG)%Zc9U`_>R-LW~#0oG#5`A)qfzDkB_~GIph7diIXAQ=Y2|T)Zc7XFzZOEYuSdc!t z=C`Ua10NC*)|s^ikHIoaoq|)M6&?)+WSfhRgOivIZCa2tlW>Ov9)YQ|5b<-{ZD@c28p@#(GsmmBm%0L?t~CDg#$^V^PQstr$>G4c(kaK0V6`dl@EAvxz2CmjwHVcoYy^np_0UH zR@1Zo3pF5$f29E)ctJ<%e|L!a7`*h=;!&asC=rP-ASD&Poh{d`)%-}%{dD5ykpj>G z2zZ!#U|&c#1)r<<71>CI4Rbg=;JY#RwH9IoQ5*7gFfa(fDFh--k!Jn}J{M3FlOKUh zSvIHG&P@wj@9^8O6{J$S>5f<0?6msIW^1#pY`H?7|ANK@Te*87oP_>63E_tjp&bCX zQB6Qamyb^)o`V7cAq;m8Aq@(MDC9>u3x3w3Eu2_@M*`M?92+;@2>%Y&AvHE`Gp&zx zAo4hcOU#BM%i9rQ8o0Y5FZ&zlVNQ7^#fCeu52J4Oxtasv-czlu&e@CNdL%TudNZOR zWJ|ogW9kRK=&`&M-KAd0(*JYuEmQ3o`ykqLZs}G5KM`pvksQ8BDC(+t6mNu~o zv5$q70@`YJ9tf>1{bqZ>Ad^&YB&bG>U{h|El4j+FBvILPT5eW^?JyQ@Xu135hMS_x zT}>wNkc z6m)w5GHI#>c40Z|(@z)zS+D};y0r`Gm4M7xXF;KPO(s1bT0$R0sHAW^_8emcs?00l zZ&Q>wBFQUbN-73PkTJO=FkOMzj|)v~k{yu^E;yz|Fo6>N{Y~bD(;^XZ*#C|NEi-A# zt}rebToAf|*lDIc#o#jx2xoA2#zlpKKEdm8ES44E2`~~)ZCRG$)C)1x96rCe^z_B@ zbLSVIK7G;6^Q4;#l`A!6gX@ky_58)Nczovh#TS=OFD@xv*CtAt9Su}iE}ZGX%vN=R zv{D)<%ThRC1H)euY_iz|c63d+6*4W_j<$e6u%p3&ciKdww(Ow!^c2~PBS=5KOdLT3 z8H2(qfn_MD2dG8{yW6oX7mkHyKs{2}4Npbam{AagR|Yg?0z2Z1cwl$$@V2$9ek&^(fFM-bFO~t$g$;20aLuWA`iU z%p4*yI0-m{j>KN(=@7b&?0?5bZk%2fJ9lnr$sR%kI}x!*!Y<(Lf%0jNmfEiz)GRT< z_gR$iBdnehGmNB}%jV{7ruLEOZ&(6-26OlYY|gB8zA8LoXh$J=EIvLa&!f^PD6??5 z4|cX(rf5kCjy7l=Ibb(w<#PM}g?a2Foqy=~Ll0?HpeN*(N_z#}k;Urmnh@Q>h{pD9 z?(vyM#SexB(dbqGk8xfybve?CWzbB)ANZ_uz#S+)+wZ#YD*pE34RYQ6a_4Vo{eGJ+ zx&4yf{{}W<;9qE+9a;0kp_?pPftCeogbNNPtIwDGPtH^6)l6r??-`#PiCDo9zxzVq zElg*HE4&@jAXg@=Rk0KeEk9}V0_S;j;Q-tL7D8|eJdl9_QLY@Yums6{#YrLue7bsZ zxK4-l%32J9;S2=B8MP=~i#cfuj3N4kjvRS%U9t+|o)4BKxwR3LG6FV*%gKCq2+P~p z%~6PjVVz?^5DbxSvOCPOow2oXXM%BO56E?*n7_nTz7$O;};+m z@hs!Wdk}pz3QuFBAn|Zo2!c%#A6^L4u9g-tJ8PZ(1+*+wBM0%>>hT8&5wtO@VxioqLqd_M~Es>KSpA;aurK+?_(gV zfMi7Zt5ztw;(OSZWgoPO5cY-)8Cb@(mVG5y!?VhTs0E4EYG`?fI?M*$p#>+t?t;b& zRCferDYlCSM4DL;&jmgkv2sHGe(}k`tjbl@iB>XCa*}Fr$X25d@p8ObJ z{Y3`CUnXfU&{jSWh$`(gaG%W;t5`0%>1M0REZ7j#Xegh(DP?MHzrswD%)ZMQSLBK{ z9k%SswKI@8v^MRws6@bEHk}g-ZtC3m=bn2Sn~x}^pea|YF*_FGL(TfSa{JO@VV`6m zoxv>&b_s!KJg~4WWiZXlETt63>5TR{JadgTo(0$Dp=^)wg&vYX#>R+)B%DMfHclpT z0xT%)r6v)FM;IPv0Q)NCmI=;kjU%wQn$Xr)5Nt$#ZKW8DN9760heJ`0tahyt4|947{HqXPiEcx>BeQ8@SqPuX;(gg zVg*K`>Ajv1G%!D}oxRT$aIgeH2Not9F!9Dhfb$)D;<;hH=#`h}WRhf#>S zfn14_WoBQ&9zl^W%z;0{2^3)83eQpb2M9cERZ!Peb@a=#+-r~A>xHN3_q*QFk`yiQ zq$L0vz@0*}mcU*YDqiRhLX|qFRqW4H*4HcY1RexnsRbE2UQ)KAb6mOR=q9)|WtUDV4I}n)ta&aGb0)Tb#(>Psh5VBI&1!y?3#R5isuh8$H?;QtZXadFt@bqE0* ze!@{YHznC%PrfJ))9EBfyZTgad?qW6YnbR|Sq(gX4n9Gk)N!FkF;(-hBTKORZC%-2 z1emK09QQ?`)MraW`!+mB!?14^+(as%18Uv=9j>iuy>3t4%SN$L`yjZuM$r! zwVnZ#y2&%ooqqP|(zMdgJmrI#2e+jm^+X`7Gx-1=p3=}geVJ)seF59R0PwzzTEq`1 zSo0jtUivcbXaFy)v6G=^Zf-Uctr4_EP6o#9*j2xuu^eP*YF8@jgIO%F1TcVNpgYe! z!^3pN2ZnZ?^GPV69*Qo5!EQ+w2enwh)fs(e10ehloF%XHzsA%Q`hgfSn=aY;{ zKv1)T2dlW9HH4*rVW03Iz z6zjku+&FlI-U&te|4~k;gNRTJ#eaE-ov6Ct;YFnNj18DXirC{kY2_I1w$y$e%{ak2 zG_}v<4%$qsvdZuu1>dJ|wHFXXsQnm8g;4w3dS?Rl9|tcb=(chJ)d`tX_i%J1y0(xP z5n*b3C&F!28YH$?o*nj^(f!!Msu*wO>3?5ZlGWNEM>6Jz9Cj9I zL(l6gp7l7sVhQsrY;S_GR0C6h36_G9==Z?0oaPRR;g-%RzuS}xRmmB8e~goNID<=e zXxvtr2O9(^^Qh9(Zf;=B7b{bX5i4USA%NKIoCr891w(oYD^a9iWPL0YNv-GMCTstD z)NY?bJKdqsbaO}bjW-*3ZV1oDF1IdU#<6n#P`kt7A#<~S$Gh>%D>k;9i(JjG#C{qz z+4MVSKf~Zz2K13=e~`h481yW*NWHs6gcFFwMwCu42s@+dLw&9%LZ68oi9n@~J1AYW zfZ%f>jx(VFg#Be`(>OJRpwF=RV9XcDf!e||juy#bTblB3OHrJN-{_U$r@CqTTcBOS zc5aI7kRu@?xfEzK>2C(5W}5SeQ(re^e^#tZD7sS)4xz*25lpzFhgP}6_Ho*CLgxl? z7PNZcAyTm8)V0f2v%n4{ZU3^O^SeefBelTt6Yp|USQ^4Y92qBE&;?b(G^P1R4#!3L~|mN42(MzVuCXi zEDCe*Z;%RZn-A^pWWXs14cpUB*;y|?OS!#&2g7} zS+>;J^h}`Uw#;$`W8RCfbWt0+1Qys71rQ=n;jC4y$p#_~#XSY$4F5yn*tlUo$I5yj zhiuAs5po_zgr{m#NRb1hf^A*UuTyt6Y$lAEcX;+<3aRib4f9TF%`)cUcTx#5H6i%Y ziZS9W?~idGzAoVgoT#yec>zBt;MyFM{+~Vf3UIXm@dMzBjuFh8(5l}|%RMcfGT>XE zL*5krnN*lTPfs{CoGf5QVls=;oyRq1Bi&aV3dxEr9>qt&A~!Taa)%WPIR%dupnty> z4j^MI)i&Hk#?WtS+sGoeTF3~?(2GcG2%K90jq=qLRTUhNNN=Pj{XF0xV#kOG@J?o zFR?%iPDQF*%6UJsB zD9O(Iv5FI!3^dc*D`4G}Q%qsL>JF?8i&%7zSw4wZq<>Lw!EuuSP&nRPVvi)1m{9OM zZ8a3y&m#tXrw|B{&B=`ZB%jMH7f|55c5@gw2m?sZdwY})Z2vxcl;l;d?I?7k_-)GU z?Ca@O4_tW?vZBrB6m^D1lLeqT1^nDzo5=yRS!bvnGRC4RI*SJlHiqj0Na)5zc%noo zD*WBtzRbOutj(O~B5sW-DCMj;gR4D;$2>5ki(v*;Fgc`-+5m0dfCUNCxCemr12%|Q z&0#-4H*P;{#ev<2@D5yl(ALpP-bk%r{VA)+;hQjM;_+iLZSwX%#=tWTyZVuq!iHrW zNG6t!UqE?duwzWvU$9VidCVEVIbr1}p`~-u)&VV>mY~S20cG&9!rSOgtENxPR{PO9Gi{}@gDSYkw zZ~pq1|4d=ARX9pHGN&mt1A>3N-3wTf(A{)#yRr^Hrr1~{``;p(0eQD!KMwBkXlIX~ zTg(Po~AP^ta7-Yzhc(eHDeE$0rbXN2+#fqr9wr|J4uvI-;CeRZP4Uk zwdEQhh_1NB{tI@=Hg@U2qnD(Sr{!tfDc}rHVycIE*G=ILCG+kf5)a;IcVl=n=fIaD zX|eo*d!)1O6_dG;2TElgUO1bn{4%~+7O`i@*I|ba_j_{j57E5#+9?H6BGgmx6svy-{0*bb}j2QeMA#WTP8W16-zh89x%mUQ?GL; zs_%qX?$`{PI4>72KJ%2ngV2AY?O4GJym^=W5p%A>%u?|@JtunhT^Cvwz5hltLF0!n z2eObOZlN5vKzUKNKyj-3f8+-#4?TfPs32tFlk{AK4f{CFlyg%{!LjxJErZhTzlW@D z5>5!gEvzFBlZ+aQR8T5pwT09igw;ygeMA-u_2uml&<>Dv2aKIPj^f@j;inKok9|>& zg<}{_OReIIS*ag|^1G#wi6famH>EgC z!Jj}zUtw8mEb9sbVgdQEQck(5;H)ggo#lr5(p5g^=em0aeBf^rD;>hq*k0ObO#?G! z;HO~G^}iAEYuizg(#j$zUj@M*MB#KCspFL*E>=sZ%o zBj7p`z$AI#nG|@4OR&~VfM0>K@rJ@Fd2k7Q(vc@*Q~f76F=f=s?2O5(^(5Mnc5tLz z`+JG`x{t3aWG>{kPPT9ZXjKFCN^X3QS z#o85lPa&^5L+Y?IiWTg%L?VZ%x4Pg-$6l^v^0- z6m{R+100%In{@WxoDv+$U*ga`;CB*h`*3RKq_r3LG`=&A+|x93f#aHSYUF@brz$G| z0Rg+)V^tCJM7Rp#S)UQ2VZ)y!Zk350!j)JsdS__^x%%t12pXCJzwLzY z(?UegQ+mXu0!WC`>fWZf6UF;a7U6fTc>jrFP~x9F)?X#-7vSKqz!2XaY@-r>&i~S@ zcYMp+Y!!WnCirDN$ez5KX;DKH(3^U7yGWl36qX-FzghaFqjQC$jMleQ%xQTKxXot~ z1}y=J5tb|XEJaYU{qz`i;zui-@yJfl>wMYsL*Y|nz8vh}e@|sx9n=6}7vJ@D%>Qbw zENjv7TAiCUe6g}Tk~M_5LXQ`*@;QaGVuM1{j=EEX-w&DnoiouARq7oJJJDAd z4j#iC(E!xj#?YfX7E6smw2hs+&o{wm)-9QWKDaBWezedzT`at~+S+VD0|pxxWrB7n z+$D1bd>mo?ki`5Z9`ay(HsR(1%NXoR*Jd%0w)pNE?BC;n)WH!ffm7px5(j-X#V%oX zzwl79?ZC}0;rJW1bZe0N5bjAI?V@-nOPHt0ivkbN`dg~c(Rxd7AODEnS}kc{(`A}RNtfZ7e`i1(IO;w6JbQ%NUWqbvB+@Uj+L48ob-fr=&4F)2r zdyN@JRC}IQP^>t&iC)glpQBiE5?y0ghWreNr=$G|mRx3V2|;P1AMva1m-W%LD4?vxL%9CP1}>o7P5b#UO(oM@$$ zeHwlJO+fG~=P}3W`zv8gn)D2&etEtb?&CNh?41NoAefHhGKPOYk+R>ww^h;Ekw4TT ze{HJUBO2(ee^6K!HfPjE!{$s52;=ELId@#MGTZ)^gUT0CgCH+0#qpnLJ9IKMiVDH* z1K3tOx|*ZKetm_WB0vcN6R_41H(ixr)u|St8}*GE&gX%(jT|Q<#Fa3U?)2c4?3>a97K#GhV4AAJ+yd*o%3jcYxbl}oC@c{$5`nst5|2cpmi z4pXU8r%Zmk&~7!Z0{O9T;F1g<-lo4WbwG;RmlsfUb?-L-LUf%X%Uam*QLPSFV@)y% zPU|SiUZ5RSJ3wUnM?`$dIY0phFVQ#fRJNPOE+n9kP8&`fqKtz|FiFAwD>OobX$usZ zjKPHo!n9IlR(8B8z^>|5Yxd2U=>7y8a073KLa0A^aS%6*KB<|C;{MfF!NyP@tg8tQCMe9P0jCRcoPIPbV?h?RAv;82;uMSkt9~z| zim_G4+9-u?Xk>kFbiISiqW@?E%o6B}{CNzdax-aDHDrQ8cA`-0?jN{3|kN3KnJ;!_9p~8(IOVxFS%Wl!n9_NC9#xcdiP9YPX%^8 zba8mFh0=LUvy_(U;=K4EMQy(n;Tn7mTB&86V~18|#5`+A4m5!SeQqX2e8|e;SQBpJ zgc;U89>dG!BD@pQbN6Gh?htWfg;G=>VJIT1=7GX~Af&)?7L0WC7V}Cl)wCJ4NV}sMhKYwIz~*r(Q4F z6g&MeCan@X9UC-Rl}2=s^yx=f;3Wp+dzI@T%AsJh)@H(I7zFS%Nq7?Qd5sTj26YC1 zhk=}0yidI?>at^qyYc!uwoca9ui0-glV2Dd*U^z(+rEc|77&Q7JDdV5Nz)W_i(rik z;x`GFoAM(gwKZlX_VVE3-wrck|jEvnxah*3cFA<3G{vRUN^P>4!gC-`m-k@reVwJFd8T1^IM@_cHEVmu7V;3;|(@G--zF{|iK` zntzPn0}mctrhO^&}p0&RNforS>2q*;oKH~ZNoG!u- z0P*DDRIF#pXiQ@$j~Jh2xr1;B0sJRfl*S>6d{Th^j}eD(<=FXz#v$Hv;N37ty;y@7 z@;xss&h=pc4-3edDFFt+T#uDa4e8GW+_4zIK~kkYW7&iD&#~hLW&6ubVGDmT0`22owS!@u-{QmiIov3P5Kj+=034fQ zV1@^iA2IjgA?M26hdkN>O!1Ptu_i22XaLV`6;=WcDewzOPF6P2JRg#mAOO+&#VF>B zaxoN@_FzA(6(|Ihy+3+4Q*U1pJ$N>}vXTrjK>Z7eLF`rN*fVY%j5bQfNikP5&(0NI z#_p$CS~AndR8Bz)RX3R5646&tDW|+;iq|1g7AuPRS&KdcjItq#9)FNBJy;@PTNU0# zk9{2Hvm6hdw}2V5h=HWCN!w3}QI(SNeYJH8s6$v8v6Xs}lSsraw2-nnr$psfalC0p zjwS@(K9=|;PBXEZ%^d)in1bk{6P|1ymN;kM7DEtx|9J>Qn7|NBn>EhOJ}8`^+Krf? zfrHJdsx}`cl^LIW1>0Vl3JDjBfhyEK!dH~Yi#rA@jcxk+E^a#YhT761J8j(QzK6x? zYOQ*uEqiWC2l^j-M&7a`vb4WH1MGNhR3*DC-M#(KLCkETSUZcry}Lhyd{J4ol)Wq> zH<7KmNtH@Hp*@PAG<^2ybEjW=ZmE3X>C=m)A@Al%14H z3F9c3UrG7`TH)g@H}e=2?VF9-6T(?-p%JtcQ!erO%Z(fSH^$8yA|Oj7@<;Lq^2f%$ zHFj!jZ~j|j&HOjzAIU$Re+pqGchrP=2-UCV|1lE@gu@FeWl)&I7PPvr0V EzwF*RuK)l5 delta 13292 zcmb7r3v^s(b?*7kydN4#wk%7sEsr0GG_ozrlI=LQV<~>fkJz%4C?=UCDC0Rt(#WHk z$$v)HgAs$VO?cEH;ZH9R)5fL=K|q04+}m7MdRfp5th>^(DD8c4=xU+RrfIv{v};ik zw?MzW&%8V_?YcT^_W9qB{rdOb|It7Fnf}^aVoRc-A*kVR+;`L1_c2ZTF=cB1LdXo^ zXFc=mmWxH98Ga*>Zk=8oUp?IxZxh;4%?KKy%bF3oq{rKl3mXl{H6Yi4T*PQZt`WI4 z$Tb*-fiqfb}uTo7~(-Kc0jGw z?~e#OWSJAwW+vZ%E<30GK;N!5xH_VHfjd)+3qyck+H_Ja(ZM**j{PoXJkhGDBN+E8v_~IgW8f9Spu-#mYE)xO( zv;Xkk#}1oj?xdNJnN()tvH5H|dtsNl=)E)4Mg-IWG@*XidynW+Z+h2$%oFM#;l4|{ zT!A{1-zK!)tG*q+H?aFqHgmz0c}uNo z*uH`0l+6TgU6$?5XVWPwe?XZH318O(4o82tp+o&c!(d;fZA`cI-afhUHUk9rQ~xB; zaNw|dJTln3290d@R5q1y@Nm0XL0oC(T(x>4Fni(Obs5CP+lZeWMD4gb)$-BK zgVgsnM3nnbF{N&{Y)$kb6{{zp?He02vPn?7N=X`(`_-V1m&GgEj@qO6IYbn0y~*QW z4SC&8L|hwv7*$mjMWgJ!V$klL-(kUuGxOd>7Sg;rl8GjH%X+!XWnx&ozZcaf7W7-G_=1D93wfSlX4`3$Rp{ zj5!>wqI3ij9_+yW2{U8P<>Y}+P~#r;wawj4v_2^auml9u*Vec7(8zB{01V+V^7A?K z4Ht5;2FWxVq2`Yg7*bD1*Pd}rC+2L|bSh&<#%6L*45lNbmLzRthj2LMOC_;ZhXGjl z8XP2)s%(8eY3p-Id6#-Kx-wy(*!aEo+QACV+~Ent;L13s6Gi`>ytTKpUpYS=mHhIaYmt{c4Cn&9wfo zrl}A0ca8Qrv(%B5_fIQlWh{S_Qq_H(p)8~01*H5m94*AJx4?jwFF17S90(*y&sONP znyDf{9fvAz$Mb~c%swY9XZAK>IkTS`_rqsgWwgSj3m6`dsNHUU0OwzJ`iv>99Nd!> ztk*s@cK3OI*+f&Vd;1?}eKUZWVQ{dld=_cBMBsA-K2P8a1S&0S_Ew?tI@(qQ%{0Fk zg29jORqIZ#UVe4&R=v33I|OIZl)a8(Wp&CFyboPU8r4y^L}3)vR#F?*xhUr}SH4UQ zUA6~@aN4xJNgkgUJ@Up^sKcwvNnPOTq|1qoTUo=eFk^}t~7DC3ivm2hW^L$)|$ ziwA6xaMZYX&=#Y%I8}#GyD2e_gRI76?j`JQ#Tc5t{HKFA^dhlJ6_X>N9igp?G^dWs zss~vGrSg4f3>n~hMktT>UZ<3saj!)hvq*r-VnIjop4YS@lx8*qlQ@NP7+R(^nShXe zotiSQkdVn70n$;BOF{*VJOm5S>l;+TMvgu=FB4QH=IfCVc06CF%ULYdA~DlKerRB2 z9U2UIkaOW@y{f6-KlFPMZ$XUf;QTY7>mZ_Q$W<2ct!@%5>C&9XM~!=irRyYxm;LoX_QX zGq&%7`u2(DXjT33c1lr?O5YDIzjb0--}On_YCz)D%Z|Z}8*$7SQssQ|%U`3^RRX*x zUUon|f2vEw)i+MvDPB@&~l==>VvjqN# z!1oA{VwTqkkn*!5;8$3ta3jcKwV1Rx)mg5I8ob`KeD*jC<{6p2S$W<%S-c@*&W=?~ zDaU%d6;ZMSqrO1I-miW%vfWEYM$<1;%jqt0pW1)=>&Hf_O45<>06KtgiGa2bMs{j? zW;zyjmiT{ZiT@8E?wy?+%TLOb2S)Nkly?igh1cxp@?Rup!?IU0jU z%LgQ47@o?auK`!u2(2|vOW@+VAJ_030z{AbGcV9R62TbNvS za55`X3)xIQktWi8^D!et)~RO%0`Zon-hTJ+Lq{qBZ2x#Vl^aVL zb7fjm19_9czY?gfjyaKN`BPe05DC!o*T;4UaZ?>Kjuah|eFVki8qH3!x++O2$)ko7sp2=r)_&^AMAa7CExyt*6;x6xYO@s;H?F?q_K7&orbL z2{L=WIUJGyhTRIEf2fm18>2#DH^psu#J_2MRORARvN>%5p zkfLq*L+10MD6VTp;D&fcNLdg@pj|5j3c+!GT&LWAt>A{MBaGk`7i#?HU1;}-eB)G8 z!Bz0%-E4%ex)xmpZ7PDAFXugZe<5gu+q6sKlE~7P&)2|irw2Ar;@%w^M#PGI%> zWmG}dJrKzmlO1G)1|FwQ%t=F(Vm$7r6o-3ql=6!hxW*&1Ga19dF*BHJ8XhL2mr>5x zURG~U?%T!(;*;naYpk-BjVK-NLmXb`33;wht3+x~OC5oTy*8-dPVHJr7K+{?R)}6a zD}??_uS@+j_3`3|X<;NWHRrpDY)NVe3kU+%o#mgS_Rl(1RkhVZ95PQ;3k2$xLfbf_ zm&I0Z5_gjCu0rQK&$-ZXczASFx{-iCI|62`JGJWgo(8L#fTv=P)G@*;nuX3yV*X$t zxLxY%yDzsL9756m*MW2>o0~rapVh1!NUx>4_f`%h!I})}jZwe~qi)Znsvf1);76)z zOXdqAv@m{rI+r%-zD~WFZC9~$w750uIK9z?70t};ie_@X&?kxcZbW3ZMk&e@>vahJ z90n@U=2ogr4DjEd3_6Yn@Sw^236N>Y+zi#rO0WWA>q-pEZbg;GycOoX*QhTusb|5I zS<7+VX*ZHjJs+jM1z`I!*$g$YUAWkCV!mE0K^v8iP@@Jch;3KLtXrl&VM8Wnag~$z zW@n@&Z>X284c!%fvh~!oW6f2yCvW*QjrT6~rnP3^UK&PB(RHOYlg|H2EAHJy1lkr< z?1B989js#ig~q8;tja`W<~%|@N=Khdt^otJsPE(tx?54F+Gp0QduF=b!Duw9%$fVv zMvoyXot==0>FALsXAEK$kl9{#l}F{^Vl&Qs;|!VH!=fkrgnq@ zTm9=?q1cI3Ja`aB_?ZOuX-RVTYR)UXRaqnumc&2se+QGBP2WuDtckNRDcp1!WDU4@|mI*p{DF1KUN7%DCjdc0@` z{pJpK$QUyxf>y_RQCP9IBYb;AK`ZBa*Il~a^%x-|jL@RjXn=jzXnTh_cq|Efqr)68 zRTu9*-dxsmP{w~sYZ_GI;wII1antc~bjm5Fr5+}5gupO?qXdo-sP2LJtOW!8H(GuK z3C*v*bnz~6Z29$zd&H{4dH?lb!FNNzU}%8BKsPfHHGb)dIPrUpTX&VRtivz?j??Lc zb>8A-JCyzo)zjp$m2!yEd`7Z%;{`uMWlZP8YU|}#k)mbJih1pW$l?mbiKycmbHW*w zH&-1xP(6+VHLQ+aS=X1>>E7zPhwktV&kc$fdkvk;svBZaLWW@+!Or1xG9m2g6UEUfgNILmDwsS8} z-vh||Xip~r%4Z)L4E38lh5Db+CfQ8aBOr=anG%>E;JxP2lBIW%w znn%7y12Ol6D8&S!{$-9Mv^uOXM=3@+_aQXfK@0lFG{}{xU(E^B|2?gSNI_~vz4lZW z0!I$hJ6u0H`cAup7?$GzlJ+BMn3|!PuFTknQ1KNSpCtYr-f@$^)9;tmW;jV2we+|A z{tu3cQ>Tz)70oK1ZnDUpg9iC5RV)!;d+YO*;(*=&rN}ix04SFqbK-sI11+a|Km2JY z2D6j$tW5rjQf~u*TsJArRqO@1Y6@sTSOlc!abbbMz0P%?q~p*ICSeRAE=1PQi`3*- zu1?3dTBTe0$7z^`8Ot221e+X2W8;Aj5Z|ox(U2YFNH3orh{L0wXF4@Elar+J;CfQQ zq;+9z(s^0V>jFF*RR011v?FoCeG;r0IL`Aq+lQNCGoPGvzG`4!a+F59RUGYx3wx>` zL|8Y8ihD4Urt6T!APyhrwuxZNP4~~ep$OzOB3fBmTSOb$QAayUcvS%km?RaDBqWsoC{1F);%;av^6dH(@PDkHysMyJ67po;echAy7T^QreV4Sn zd&*xB1;kVg_f^+~i!QToZ84!!8GXA2KU_6^{9ykm{1$ve!Il6n?&UjD!9E zjwSH%%)FpFpV?4^>G%ptZTGa9$RHq*ufL+RS0>X5SU2PIHK^>shDzce6is+GA~pux zykpZO6TIm#5(OQPC}`tuEL$v0oJ!=19GvM!Fl1{K!H^jaQf9VB*#V11r&4)vB|iDe zXQPKmot$tAxY~+#pOdMH$vi^ju%*dim*C=C2g1lPW(#bTl}eI!D=w5+1svtv8rhBD z!G2a*&ZL`Zi{Kb=i=4*RMwj%7_q2TR=#Lh@GU$-{Gt`&3;PqEJul(KpuRJ~Y`Z)jn z_2BEzf0F*bKbRzKSN?OKFkop%?) zQyx0*4c}EbQw2oOQGysevxgtdAr>QrpnNRvyY4T9uEACR4X}pLp8Q_HW$5SK@LOa8? z*!bSYOwf7vX?-XGr!_A;_{iy#r!VY@mN+?u8*JFG(RW@Y!ldvQdJ{8XXbMT5skjKygfyec87WbMxii^(6d&=B-EA6teqa(mBF|%V zhX#Fb(d>DeLzgK^PZ41MpeT1xdVs(an6FMPN&{sl!QQfm7SmIa?^}JA-u>w;zGbuy z^j5xBiLGL{LGHrH@z%QLx%qrnLM2N9p^_e&cQ@LVFGwFSY2n5pna#0-A@8O^BF8GR z#bE^F;^DLS_z(0+Q%;h;vO_h3&ypB#d=Tb)+O+bg5_1od89z=tog}c0re%*t?xp-b zfLN35D}{fZix#2*P8o{+A=E=J`6x98duj4<0u_YXej?Qvy374kaSs6^x}!!9P^#|G zS)DvxR+zV9C`KBk7jtV7*!MV9!ibf)akjYi6|fNL#%Tpkn-0AL9~X6^HiFtFfm^ii zzTB%GzSgfVJg%O**5^+3n!W0m*I+tvd_}rd|8G26TsY#O7)(l0B>r?Z32Y;ukAmHt zo7?DYDnA(oA4OpRf?**CRy)wyT#+#*>6aHjpaEh@(8&HuKV^0|4cyE*&7U zyHb3+&l4V|z58kJy2`$=KTOAdK}_KUGw>bN3rC7L>n{6A&y4G&7c%Y-YEwFXE<^-< zi+(6VU%}Ic)@wAwP0>S?_zT|J60{5~2J_x2U%9X08HX8-dHke47en~~w4H!{^Nj0A zhYTptLJ0B9fSSMF?he4k-AOZ*b^>`%AYXrJN%BM|AAIcf zpB^Z_L`|j$Ob{S9R0auMVaH*~Q9;$KfSNDGWzSPZnm~%cG6ASNaG2AZ11QjnFoIIk zlLTh@GNC#czD2Tz@G+7lS5v=sfVej`J(is*eXH9{W3pXKuF4ouh1gdE=e3Fs(Sv8L zYW}T_kCZqI11$Y#A-mtCF{?swhO#`xB6>eWQrv#VCE4o3GBj{`r&WwaG3ez zi#>>2tM|V6;lclq_sVVwa~w~^^YU!OuhTAd^rb^CVydeztqF+^4Pj`o3-Ze^?JDY5 z={#_iA_+{B(hYNt7F|U+^Es1N3tCwGz<7}rnPA=u!|oj8v%Hi3$( zC}}?1!{2U{mq1Kfv%d;TF4ES5G$x_r<He&_2jL1i&9jI}0c0y3dwlFCB( z7YE8@2vqDcN%#3ON8m{UmG;B2tt{B`?n77##2X%vBwMd^froDfP{13eZN^*qWV?{! z7+>WJYfDC{%h%Q6FMO(KduH+}d}K$PlOY0O0_<~brxZC}?EW}T7AZi-#i779dt#15 zbcnJ~5Fmq^4?NtvAVt#ToOFbS8tjq?YNL!>m(%n?lI+|8SirOG@n<9rJ-I0OFa4;GS`n@3bJD;Bhw? diff --git a/ultralytics/data/__pycache__/base.cpython-312.pyc b/ultralytics/data/__pycache__/base.cpython-312.pyc index 977602fef61e9f7b04647085c0ba10cdfd183bcf..fcacfb4fa28aecf5524e3970eb279e0b0a7483e3 100644 GIT binary patch delta 3272 zcmb7GdrVu`8NcV6>leo0w=u8FD?Ab~CNvP<5JG@zB+Wx>x}@OvUW~!m?zz|DA-k}& z(#W)RkTVssEW-I~pkzxnOBK=7MOm_RO18NgB}GZ$FVb`0xwGo$>MUiUWQ?IGE~!s)5fBYUc>u=* zs2*dSqo{y{lcEF4BDpLm;bS3xxIac54afvRw+cn0st#*NLvx^u-cGxBMvrgAZ-orY)o<5h;5ACAM>tXF4j`jKo zDU_FhXo}0@aQivF$KkdMLieX~ypQV>2}MCV{7Ld3fu+*g?;YZ;!>>859MAhayl5Tu zy1gfD73hPMDzy#U3+Hk2k0z*xd115qDgZab@2B3TK{dLdZB_>rBC&~m1C{6kt5eDX zii>nifbMGx01C@=j{)!_V0a5y(3IgFZBz$ z%3C#M%><`|p_e1pz00Qd$&Qs&(`4%7^$esm?{BhF*Be^Q?Rm@%!;bbW@o{#MN6w6!X-*sx z>?hzNFo5bZ+rUJ4B9jBjq};gJ)W&}p^k6%SQnU9+ssaqM>GkMxnnGn#P@)3s$%igj zj44c_CxQ}>0S#KS(Yx82oUFJ7ZbfMz=^S;QPFS4+0m(o%`ioAd0i4W29m91G%7XF$ z4d>AHUB-GbKe03*3&=N*g&Ir7W{QA!&Jxv3UUW70;E=E zqH{S4!`Iv5gMVskNNg5N4l2d;O}A)5YiJEf0?I&gA1#`<&{3n+LZC>2vTdGx?yAjVn*wE7Z`4lw5^P7;vf2Hk37 zoU0+RJP? zDk}AR+%C@`x{`lVo&xKL9HR1qj9f^@Q*v$YclNX-vYU>*J(qS zhPtg$yjU-}ZD}#RSS`6z&0xk6pY9YDzPc?>QaFWeVG;p6R&W7v*?rJM_!0u7eW345 zsQ@4nOY6X5_(~~9cZ?BnyyAlWBp;RAg&v<*aE-v%iDsNY0frLEW`&8M5raUc0?V?n znMh{Qv9dz&TXd?do8F+p@0Wc{Th3s`r`beH0)?dVaD>3y=uX82TP(DvoAhE$RO;yW zx*U9LM;KfBZL(m3z>iVLRu6uN{%o^o=7=DKAqqTE&;w5+dF4^?2XvxxMA35gwX$n`MQ56+o35KG3#sPe5^KAXAIaIZMuBScsySo!8}oaXGOf$ziipV;v30HJ zj57_>4WY^tFLW>NjBIaOeEq}i6-&CJ$gYyQ0a_PFgQc(>5w^7MHVM|y|onYiM)L%ofX8w;dQuv)l*UTEy`MA3(P7J|ra51Wr#2cE6K6XJW52f$irZU#{F*uy-W({;Ay(5I4QOy}nVg z-|IoYZ~l#JQ-V&ne9NFGL6E70A7hzRYFd$lCiRW-zNmOv!|@=pzaN8mPr zKM`0)AGOttlfN6`4GfRoq#n@ejC`%0O4Y8Ybt`H56_p7O)Rh$Niq1d?RqBe)OynB< zXIVC8_j(_&Fy^%y4O1~+bER#eaf8C-#cD)Ozne=rwi8%~5R9eQcUTk8_>yNnX4rLkDa{x* zuWvLq4dKs_w*9!}Lt;U$bZp8+<@G~e&hO?M;Xlyn_6Fm}RPjUUi6oFWRrDD}eq*-( E1>kvD&;S4c delta 2561 zcmaJ@YfM|$9lz&ZU;Fxj&C73NE@0z0am>Rc0TT$X0AVXENvEhR8FTDwFc_PhdkK%( zP1@}P8fgnUACj!<$huCW$~qBQYLiLZvX;=PlQMg$D$hb&w?&(_FC~z4?U!ihm^$jZ ztyl8T|HnDM`#b;hzvtdJ5ZHVHXn$K&q@wWkZ11b#mc|>}3YC+dXDKSggqiDrWGE^I zLb3!kV%YU4D#3(U^fi@BDNiyXY7`_W^q`>}rByQdm$`F5fd!5V!`NiNEiu51EQF+X#Nb$4RvGjNuh4$+-O}$kc6Nl!9Z{#5*tN& zZIiYUlZmfj(VtwT9@>C1ydi(JMbo=zg3!Xlw#nz-8hqvV?OR`NzN~^ktn0-gtP2VR!URUqx~&H! zQp$=DP%ChR!M`1zMPKpq=!(6Y^(EMG1v=-o(9nP$+xIa}sR+GyumT;cEUqlUgY#F2 zPRL%SUZD${^E^l}6iSQmVIe&`^|GVgK1CKJxh1G zygQqERJ#J$9n3+cRBqhP-@_J^5x~udMMWk9Q*eU#ZUW@Ca1srBL=Z=JJ+0tpDb@ZE z-Ef}7cb6ds&I>s%AdXDM#mFq2C7C$_WZiD2u(p*tgB;<=JdYlDOvvNmz%S8e-5?DY zQ=a-i(Uuo*@*j3GkvtBNbQF{bEF-7yhfR4+Psnp}7>I@9lR0^CEFK98yG72={x+F2 zPvA28m+!EL6wIy8%g7apfv_OWjZ6mMXe0)|Pe#3rAqQePFap1WMw*@h>*!X~tTJ$Q zI8)-vXk5sDVBo@)rj^#KtylK1YSv(8Ps`1kw5xrK0!R2wzUJCfYuy>gp?iFDy0RtR zaw2QBue4olTW!o(_pb)imd3TJweGi$-8goezA<>)m-hADe)bQ(=8Uy2-TqA4GQ6c= z%%{OaiYYn+wm7O-zo3DOSXA({R|qy!%)vu4Em_oHzB+C_97FwnXX6_<1S14$2|R}( zr@)I7<51eINxp9NB$+Qv60P_L@LvAYe*%1vYHNLy1|Ok^ZB7}vBLZ!Pl&}3I0Orv< zM~>?W9n5#gUSi!M@N?wusI+WgGsi{)XN9OpP6wXnVFyXyL1#N0a?)Vp+vrlqK!4U= zb8RSNZ(2K^u^+nWysggozxtcw89$BjhJm2eT&b-6$go$7Lx{*DYIy;RV47cF(wl;T$b`>XLQ6ge@cXlz(p9IqD$Kmt1_B3KZ}IEDdJR{oZ~0($7h?J(23@`=&d;*Ue-kc~l|!D1cr9?-IB};P(XXqU(Ll z7syAA_&kPBzDqrz%`CU&r%cwY(Uvt=Wc7U3WXT$=S(}|0dQ;X`MdFr1UcWNtG|8~2UHE;7W_Gz%ADQ5NC4kK&YZlu``u+1r1@3w(v z`E8DNv-%xQ#a4f8(z4b0CkB$HaNFQXqA@{WfWS}CrT(L(=Q8K_=zJ)+2ee(b*il%T9t diff --git a/ultralytics/data/__pycache__/base.cpython-39.pyc b/ultralytics/data/__pycache__/base.cpython-39.pyc index 0fe3f01655558e36e08081249d2a73039068d055..f4038622d38aa41794e127a998b3ad6ad8389dbb 100644 GIT binary patch delta 3694 zcmZu!Z)_CD72nz2yW2aT&&D=}!+){{+xSdz45UB|4JEOm6yzX7n&t{!mfN}W+IPF> znLV)GJua!6Z=p%GQYwW;GE${BsIAnvA5hw+>4!*_s`9Cy_9Jc8rm0k^Qol4x=zFur z*&yz;zkT!O&6_vx&Ad1JlW*StR>o{^Pf76G^_}~rL(lv&b4w#~a7WvFOzDvcQa3@u7_{UI zQm$?F_!-CcY_n{yQvG?8(`nNK^z`etH?NoMYt+#Tu2Zy2HD-FY>*%Iq>24KCKJgw2 z+ij;%u36M5lugcOKheN+`yYxVF#=RxVP@r20m+BI;QPr7J9a_qf8*0rm!_#l3*JR$ z78Y%%6nvf>A(uDyqcTGof41K<#T7(m*VBJnN%3`vn-Fg=sO#b7XCITBykSW17L3>3W{Z@V8` zLpOK#NNc2?@OsyG_%T0mTMqu*{`TljkPgW*OdYS9j5^+ydHbPn2_7~Oyp_3oa0W$X zmxl_cRdxzHgpDmj!{qT2b?9=HJy(l{t#;UCXYkjK3wzEW<3)t?0O7V%cA3t)-YK`{ zSTl^djEw~O&i)4aA&fC(vo9eXXAnfU62`e%sg`L@W6z-$;qfdAkZMRgHYP}#?{#Vw zgO+Ipmck9sH8?a1%~gF3M+%qj1nNzKW=M*MfD04ehYzrL8=0W3QUc8B{0f5#0!GY0iFw%Ww zH?EG21B8m{tb|DlmQ&cbRO}2~XKc`fV+Y(z}&3Nj1%#YnBb>(Gg>5eLS@s%GDsU9cN>hoS=U4vbc zyak8Rj{z!v{Fd}CNE69R;T~FOvs5c~6Wi(ac3+M3ydqho9={P^x)Z%8rAf(2J(OtF z6*&2%Cb5saj1~7)P${pG?-E~G=&*>DcwNp)m*je)uKDs?gdGJFI=yXv!q?X1KB=Dc zla{uqK#${fdEM(hzUC);q}#ZmJJ;kUFR_&;OZ8N}%}XzA_Z7d9^4p3sGcP zj9u$}J`ry0$CfyZ1B)@C4toc!6u2^NwOvw#nsif5^v?SDe!{dx-b7y8}PR>PH_b3RnXAutJu((;R(q=MYUq+5nb*s7d zD2;GndY$SQKtVHBn1`N>eeJ{b}glItOvXC83bE|JeRGhYhVYY6WJ z(9h8bN!|^)87d;lz`vh#lHD@4Gmu~6&`OaBNILx+?v4*h;|~DCw?pI> z;h@dgOy~&Xg?ZO5&}e63O=b;*8wfuP76%SCgi90~YTcW_%(V&g^6lI#Qg6kmn5<+w z>>ZT91rQP^BnEpO$(XoTR*HBJ`Y!ayh z!5@eEVrX?f_;hG+95+y2F_{gyz;^6BWQ1iqvQXG6R&@}B?gxi3eLwH6+K^)czP|r3H zeuWct)x;eknt>a(Q*I{X&gqVDA7S-W`N+*OV zO;@nrpB*XxOMYLf2uWL zW?5X_rdkYCbmhvG>WVk-I>DXMj}uK!P~1B_CbsF|6AU^IydJQG%Roq(;QhU)bsgxI z-X~ZVw2$p9wFKbCNgp+1#^E(ab+_hKYo2a%-PA3+fL{zITZuj_ym_dIZn^AU$9lO) zSNNkUS}WOD7^{(~=2AB2WWUF?Y@Vh~59gp`xV50}AUU3I=qQZWQx-$Gh=5l-CN|wM zq%dDHOfC#JJNpg72M8Y_VAvSCilO?jz4=8^pyl$zuL@zoNfZ%BrUR($@Zk5wwb*DzOPA#75(XyJVwZo&PIvFa-^|NoF)>jc;L@)uMX1moDhC#~* nU)^`O*GFayuDig+`oVrIy589LN~f@F6Z(Sx?t5Axoyz|Ka+hBZ delta 3264 zcmZuzU2Ggz6`nh@KeOxg#$Ion*h%bl;yB*?lixN;lMwPlnlx$?qJk5381LL&PrN&` zJu{ow?pP~oK><-HT+x>buq6;`kWhnPN=3sIp?0gd*8qD^DpP^?(VFHziRqp=g|ZA@~@ao6L-xW$MvG-7*%6|dAgH` zG^;-{oaD3)Z%5kVh9xa{+eytEj#gulb}Ns(t41Q188Fh0DaJF~4`p6N4r|EKu4(1$ zl~pff^$ATFeLtvfBoor zV%7aX8Ub&@1)5Oyb&Qmm6E*JfnI@bJ)G~y*-Qua<0rB^Imw0d8Y1S`BdaiXnPP0!E zJSEya8`|U_nF@Rexg#^@W-7cHy1ob9#6n(?N$^{q1AA~>PQxjpD}N8tBx_0*LRk%Y zY|};JP>vhN(Ih?rDbaMDCD=x0;=iPu=(ZmGi^qDqbvC$srS}BuKmR~#>@HQ^ddNY1 zzi^aY7QZjphdX;N8=$zgj3?(zL7CbY*m6O;$^pDiPJ<;TiT7INNg!Sxh{vJ6h2?TCwPO2sJN@m;KxyN z(OA;pbeLF7M#&{MZ+N9JwU~|)G{z!gEe*aJnvNbNUeSJ}FR>h0Hg86zlbAN(k6|wCUd%`7$Si70lvUXUJ*Kshc@rz zFm-EPWJH-LTh(#g<0M}+(5OQsIDy%UHT@w8nbu!of$8Z?TUmiLqOV?s-@Pv}_y)?u z)m449;fAKSHDu5tc7oo8m3QCDySTVMVoMCg_v36))x(DAq!NeUiyNI3dZ^1u-!znz zt`#ZqyDy^mL)2D{Yb#m@G!l$z2Z(@b2Q->2n_{v5j5x62k@f&Qg27VR3deCIQ+&n^ z!T{>@k*NE&BaO=K12W|{fZGl{L-b@YWjA@;UW|r41BVrN!w#opqQc#}OaTwvIWBV- zuYBp;RqOJ_E9X9cHSR%=A}<^xIHa{nqup%sm1+X&C(>y8&2lcTD?q|T26!d38otA& zj&`Ur)2rTuKDB>Bzq4O5knE0RQ=kgHQxYmaWtKK!xedFvKB2 z@~nV`*n^|JSdN3s_cq>V>;=&rEVfn0p(W5o@GJo}=-69{Ahbi74nm7|8~d`eKbi6- zXG83ys-Ywt9#pUy*ZqK(v+?Fmqaev9rCytyFsc<#CYruqmuYlfs~!gUxpmS8ARoXN z2_D=UoIu(y#r93|8pRH76U2cF?Au(Lc3?IAz@32^nwce_zJ#qp^(-ZqoVix+cae5~ z`Re968~i2?KNun&I*foy972Nch(B$)#GVkNLzhlc_(3^IxvQfM)4=u|zadkVDc`N| zIM3)*g69b42!0^$4jqV_uHj?i6m zvi~s?%k#r;GVHX+M^5b|W6KP_hOdVmH$dmppJ{SP{B)#okn|!u`TJE}wPqDZY|F~` z$}H7CY29Dqb*D#);vXZU?Ul9EoZ&9PF9;r7@>jJ4p(6LKgMb9fH2Rbdxi}~)a4L?^X?@5tPkbs;@%=#Lw{1$2K?WPexO-u5eYQj|wGjjQEJ*K_#Hny|<&%c}9v4Kaq+KbPyH zDaPhRu>Epd`9S&n1+r)#!D|Hf5M-g%bnwKo>h>hB2bSwq{m+v7*EFA06Kj;g>p1lh zK0yIN)AN`Cn^`~W|J2k|tjOku&)5M!Yln#9FuqrrQr6=fcoVg+we2@#k(k$V^7DIJ!NOdSa=M@r?8R(9S2%g>?>br zzmMPlrz)J{E|mN`-UH@$)IDKO^HR_P9&gldh0Ru2a)Xjxs^j@ly5ojZC5{)*60dWz z8OgW;qvYQ?){#QCJImX4*4WM~@LM$KVGf3!I8o@>oELBHnrM#@GnFtT39b|DCQw;I zeG#aM0hK#Y9>N<06nVH$Kz>%-EoWy2@*3h)-qnIvn-mLwDY1G?OK^ z=Cd@8;64@M(kn(1DnxsYq>fZSi9O1#~H)nz3BE`RV lAL*wqDwB9ALn<|OSW;6L_I#!1e?g5SD_+~P&(M2}{{mM4@4)~7 diff --git a/ultralytics/data/__pycache__/build.cpython-312.pyc b/ultralytics/data/__pycache__/build.cpython-312.pyc index 88587ddcfc5a4ca92bba3837e5192026502c148e..030a11dd434a6a423c77c738ee9117a10e63a8e1 100644 GIT binary patch delta 1783 zcmYjRe@qis9KUzHyV5KDbyui`7Ot$It=eK_;+zAg4s{xh^T*IFva@%T@}tFfCr)>W ziB2U(5#Arvm=WT(gbcE|ku2FC%S@L2v24i{TZ- zK=>Oj0zd>s&6mpboHvR3j;g7;l+aZu>nRxF(U0a&Z5#2AX>JS`sJUXK6$wcs@wM6VeKk1PZI$mTj2Zl)`;NFI!UW_JVF@ z#rru|xB_LEmgBmx1vvnfC@7cKiK21G5_O1D%rA2x%QXwx(*A3Oy#D`f1Z`6STPOl}z!xFp;d>b8f`hxH`7UVwZWrk$oX2|`&dHEv|$VvsHfJ_jLVy`y9N^diHx`^<8-IZgN{tGBV%(8 zH1{`8+5BmDUO#28OxI<^(&2`|hAA;TdSu2S+}w2Sqm1ZD*AF%J?-(=9I6UdPbj>K8 zwoW^O`4}0eIBXrXW^ArZb)b68bEopAIP4qrEmk^78G+=fe+AS_t-j_2##aOn$M(QhYB^=LO2G>$&M_YRPa0l z!^BEO$4f%Ij>&dBweQgNk~fORcHOttOdip@dU5bS> zqJKs#OV^L?yBnMq>t{Sw8KHbpm<9_US{`a+Ntb?TR72{4rRCj`)neZH5_|462t;zHTtk zq35q5*NC)XngE6C!WLnJg~czmAXR8Atn(Qh)~&SO%R0NB?~`0qV9*nEA!@Lf*IoGE za>L*ilVK^S0A=tv8;o3o&ij9@%~h$iCXv+FNvUoU@-4%iJl)!=!VsNHCA^L|&sw$AX&6&m^t0V+ z$O6fI!2!fb>o;8_B*}Ts5Z)lxr1@023w}(*mkC@UaFxIS0U0S$EkA_GTX<@>(TH?B zdbjLe(?elzzxfY~I3ol`Yo>(|VO%TBQ=xja_3m5u+8+uBQ6%6G?#Av-e@Pfa^kH~{ ztjvB$UhW(;g+efaQh|Sfj80V5I!XURQgllv7(ydeetrb=(g delta 2128 zcmZ8iTWlN06`k1!pCUyrDLy38TDb~is+K56uIxrh9LKg|2aX?g9N{u+yG!dI1sOt1Xh4P_PK%t)&LVpq<`G{o$u@?$r1Wt?o7;9Nb^i}i@ zMMg#k*fV#|J@eR^xp)4se`&z~na}4zP<}mdHP_PovHxZ4@fjU7-`>wuQK(8{;3* zgU;6jXMRR>V#umNw2V>Gy6L~1ANsbzqGZn$&S!O3p^F zNj>xvU(Z8s@bU27e*+^ljSRqzqBN^W!6v3y;ppX-$Z0TmlLHh?K@mq_m0&wehE5Zm z5$YW`>vLG=Ffv6@xIp39^wg~lh2jQ0q?8Nt8t3Ih?Pd* z0*3Dgt?)ilf*)`X^pIdyf8D=XE1}xz!~gG}*bpgAH9-Uffz6I}3nq@5JYejHCHX-q z)%}|_#c$ygNQrOMsg?`UQ{(EWBA+~Z_^7-;V`Sj?Etfi$A1Y^zf<-SjwoZs}ZPcR0 z7tWV0L4#{#iP=oHK#o`*9WK)-8Oo081C~QKG*uxMU(P7``iwQ>_8!Yo)<{l0cY%9U zwi?EDGNhCI7%3XM<*ZqjkI^IKe7U4DUMQDzVmT+WqZwT%&{kl5mMaIB*icKw+R*kM z?Qxhb<pwDQvfNa#b|D1S85cm|==2j; zo>-m;b zVMm!Rh9dFl!7GC^@#UsYJI<(P6m6a^Tq(>Xmm}Nk7DlzAV0hX)<+T&gy0-=SCA*FB z`((5gg7j+J+o9>!saCt4ktiVc-;>dm_=!>~tL5ujdJI8@{LA2c2OTM8?9WIyaa zXJUvL&gAlZJw9G>z~$xHMTdA2=zoI`VUc!7hp>Ydr5#wHGg5+gf@eu;^10?%qJVM& zrQGxz={Toxv?tW>cm_BvOJ53Y#R~m>Xf{k3@k;>9dA6jD5KRa7v2YZtv=B~t&VlC_ z@ag}g?}s0k=AU0Y{@0U#Ik2>M>FgK2qu1TX=6eKYx+sqUM}uI1$3`G*&un@`{D z+z!o^RPRcvXFj^P{Zrw3>e-d9-8Z-GSPQxudTrzqd+Cc!OBgN0A}2Ul{%9;o-;2G5 ze@+we@5IY6XPL&Di7e12!!0H`U^+G4M6Y;?Pnv=jFlzsakcmueawh)~ zo~z|H&~%%k=6@hRf#xG_j)TFWfPahW0RLuAs`=?pTU)yLId(8roe|uyU=|PLQ+M41 z+0p|$EM|7oS6e?9Aus3`=u9G#*3}C{F6CsSKxCCYKyt36$!al2G*ZlxZh1e+WyVM0 zDbeM=ojvqYV%Q1!fn>&p=-tHMpW&;4YI$B(m7!YR@YVdo<2e{5g{z7bw-M1-V11TX zFO6$iGGv@DlayOyTc}a`&6cj--!iir`b?-zF?fT)B!f2@{EopT29yDz2ikf(?|{j? z`c*p7cB*goz~Y%N2FVR6H|f0P4y{P-Gs$`JhSbNJ!3Udnq}Iv{vp-opbwfHxU2>x1 zFpM(r9~m&kJ*N$`S~#L!gp<(N(>ahzX=C(|d>0eCyS*oH8aR#Jy^X(khn{Osc>V-! z_Pppi{bPIkDU6@O-y??itN3r!|25kF6-s}LDa0vyY|K3$%~+&~s=RqGVBW z^Ds^(bGg~NM`yOZiXUDJ=$$I{>n8`~JQ==YIXH?Q_qLGXGly?@++ltr^h0O2&T2jj z3zi_k5Au)}vrk^h1Elq0@yh!}pZg`g#Jp0i8dP9F?IrziL;ao%gQmS81^7){>E`iI z;+O?L^tn^zKJ$2;6)c$0kJFbiuuvA^>3f~fqUGR~zLV||RuQyGRQ;rEED%zJZ_KaB zH0&hS$i&HuM4ph_@Y)))T9EdR;BUf6a{h88ZJLo1sg%@YwvM|*AX9^y#~DmVVlQ{S zQQe}>-MQJhIBvXB0lrQ4BrE6w$Ddc-4E&W`)l}|P>Ck{R9jwWb_UT)XUoVBLj- zO#h`#RKq-iB-_Ly-8fBRW6WY&|CfKW4frMV9=U$vW>rEoD6|YJIxoRo%M6>b$RYgHI@Y_7E=#I#d^(aSbzE^ndwt};f&N$akcQ!ZTWXe<)Ty91dRiea^LZryT1ghR3{FwWBG%m9e%S$r(oBZ+1}pjZ`NJLGO)wENdj!+%m&P9mRTLs7vO~ev(Ub3Ovt42SK)^!* zdjcK_Scd2A?&4>thUXBZ^cm8rs8|M-=(w`Dgl@^Ul9JgGGF*jCpqOHBiH*F(7KBu9 zvmDA8rd*e;L4U``Se>sr?vZ84b&h7|#pewH@tu$0%R~okcIG>*EFO~*Q)%tBTTR1i Jv)at=e*q}EGAsZ9 delta 1315 zcmYjQUrZcD7~gMpZ+CC+ZaEIQ1B$XhOS!aCFclkYtF31Xh(#%F0ZYlk>|KvLb}zGg zG~iyrLTh7kF*w3rh=a5C)X?hnH4$xlONxM)Rp!;a|uC#EF_R%M34|)bE zoR_lwd=yR^#*}B%i8Z_6?7a#70B8AUq=)}V2KPWd0GZ+3*yWt#RHhu8xka~ddBZDv z>reAzN)L4MIpu5U;T@5C(9eI3%q3T69N!77V&E`4@YoQaSL?Astj{N+N`?QS-b|#0 zEvnW*A$vR848Sw|C;ba}me-?;aAfcIXdl2SZpKE86sEl(^4Ix9?A4Q0E0IuI)u`T- zdMJkqg%V*ddZR9(QEJqa*jsEfb-G$CQ)_;DYTC-#fsJ+X?_!U4H8kHTtT}GbP%0j( zxH*pd&t;SRuh>F&yy`nkk23s5LRckdqv93az~{5^-kDRF&0Z96M!;EwhKy0xDl`Ru zRAMiS$TUJ$Ysf440()M>BFlELa+RGEy0Yd`=ixTv{M-1zk#Y3;`w=8UfPjKi#i=(x z6-}`h{RjN{`hFV|-Eulow-jRc3s8hWjMQWp) zlQT|GWv*qr)`ZJ{PUKX;y4gkEYmTa?g?foEntd?Gx6O|a&kM(Sgobv_VZaVgGg?4Zb65)?hxE@#KA*{hD_7dIWt%Uc2J zMe#VTyvP1qyy6U5IVW#d%lOoMYxL+a|KPD@%@3GkuPwdX{?(++#5HXg>qWYR?jm&> ziAUk(Y91@dLNg;vnu6_$?3nLW*^09iY*d_VlnKt*E&gZwlS7+AZC-ql$ZiUFT|g+H zCg8e&EdjUqbV~sHmRTlZew9{%yyNXyU6W&;6fw&#QXQ=89Pij#i@wQ~eb&}5kC z#!?X`BZ_VXiLf%PjHtR*5=@5G-D(L>)2)S471l-c-TH{3+aUF;!^ViI+XS%Yd7|4) zYazGj3He7`Xx;N&mM3@9Ws(BeRxJG{`a$Cd&5=1^5B+56U`i{0{~S(&hg+t*AW&OSy}y`*dy7 zKrF&EojTFqL^I4#l;N93W8v7zmS*w1a&5&Cz_Gx`VKzqGu^lB%ng!x0sn)QEul&1` zQ-}kaMvVh{*&^{(O`DBE(2H3OX0_sPG@CW82(A_zw9nXfBDf2)-I#TXuV}qm_GyF< zidVItvmtM+lpRK>Tl`;bd9okCVfbV|D)&iz?34I__MjRvIhg=Z!gqH>k0-{Ha)&@( z%)<#%AZapgBnT)KSoSG8h+{5-NT>z%AdW6fYoJ0aY2_u=MMbWiBqls6f?u5WVjexA z;kEKeabJs5F=!Mo*OrP`TAbTxwLHCCb`X=QV=!4l%LA*qw8UhdLG2)JzMRB%eZ?AD zEAxS@Cv<}DA(jFE0glD7N|$X7k6Di-Fc6aizhO)Y0&hsggv}Kr2cBse?FU z3@SoIP;m^dL&7K+X+5^nhF4XBQB+KrR7Ap*F!I0p=r?^?KOVVaR`zrC8;*v#X5H?PEtcf6w!f_<5 z37cRNtjH3QOV|ZFZAA!5TfVeQJ^DmZ!qHD8obb8ea|<@0q&PMxF=e40h{ci<1|Ef1 zN`tk8*xTypvCu`>$yevwiNQJ($8d2h}lz^Ix)|C>p3r{R=29Z?- zl|e-r_PA-6_y=Q|(nA-ECR5oy7tTbxkHb!KI_NK6ssnSwf{HZ?8rZ=SsofKYlR+q| zy}vC{EEM;Xf@jc&BIt|*3bG|QPm&xt@rK|Wtia<>dgOy52|+PQvWE&KvydHMZA-M2hA58s=cyNV&k5snjP#z-VIo<(ej_`2IkH$!p z6dY4CE~TV|c3&!!&!C{UR+94{m>pH9iXKRGxy-t>Q)Swc=rD34S}d)`0zr^y^jHil zA4!9D{eS1Rmq_PTA{|r-Zl5$`mDOYfMLS7Oe251lPFbDeFDyF47&iU4;Lf3$Jr6Fo z%|cfcp3C5)*)IMpwp)t?2kq%s%A13N`)C?6Mo!Sg6*l{x2q?!Qd7lNHpqECY9(^Fu zmGWr1O6CNVR5`y^M&ujCaa+wnC9Cfz1h*i`+BeDXY7!Mu3R)DbyFeCQx&B3(Zb^vL zLcTq!5}bt;gmUpoo2!f}pim)?je~L@Y{RQ_RH%q+;6{4!WaMrT-)B|uXrtaN)SQ#b ziB&4l>k}H-f(`N_1~65gi8<;-8#v*w7AQ@!ezbkz$SF zv}@C`PnFC1sVr%Tw{G3LbuUE+_y84*MNcv;&(JV!FvxIR-*7lQ>Nj-diQ)`2kfwYT zi?3u>9XU>iSQf2}tcHu>P*w#MvD)21YQ%o`7pqu&Vza7Hv@hnX%o@UhUM9@-z^JTF zV*ZcrMzPLQM-Ga6Je$bp#IJg``@3<7DbGz$FcyrlZ$r~Oe4GgM$B2*D6Sm6KK*qX! zZ08kEu|<)*QB*tcEIqg9?4GHbo6fqdr)FCDmiA5Uj3raQDP6z&R#|0=o7$MJT9qkl zojGx%?6LXs+Sm7<-#fkKW_c69w<@TqqN!)n-qo3kHCM?S73*$MtutG%TGFd`WT>5~ z_FLYXZ&zNZoFT80H@)lM?)u@O?;pB$;=1=*UuMIeWZJuDj(R#xJw1EyaE9uhqXKCv zkfDO-+uv_!nz7ty*mRTHoo?7Pb>e6CYwh2(U$;-$XB#%9+SAl-q+EQVc-s3$*_?MZ za4^$#)t>RTz1{o6f$tAoC$G0$JDJ(onf7+hQ3uo1!P!II8S2O!)tjbzGZdX_pRY(# zsclp3=Xa&Z`C{sI>v`)md9!%MuROKWMQ?fD^vslHd>d}~cFh-i=Zfpo#r5;$Ro`0m z?Z+-WHobMmm~MDHQ?p^NW?Q;u+qK^7-b~FC@36ld`N_!a;qKX^$7Xw;ojnxD?Cnk0 z^v+cuPgftG4M#K8vAOD#>FSf2>XCH$i1t5~ijvZWb>drPt2ehy`KH@1G^DFq&Z`zw z3aa$I#&vH8uhm~~`O!*P$LzkY8ymY)15+nnIX-*n86clN6!=I9cq!F_hH#aLwdHk5 zD^c2TPf0k|rR|U1*+&u;2grpA!o7q1c(Z~~8(-M{-0oS&_G=?^I}WCI9L(%EJiEPn z)^_Aw)zN!jAPGyw>7M_&qaz#}KDqmOlJGu3-gD+?eabBZIr{t0xw|~-AD3CXYBfLI zWb5*oez~&!b6ri^8y!gBiHX5jtH^4EyfpEBy4h15yD9yr?0+>7# zjfS|!U~HJ>S+w3I%;O9%;pUpbVj|*>el|87r5iz!1&=rOg_tnSqH&zn3eTGDYdcPbQ#fL?7Jip9dLdlfcXLH+QcZUH~Y2B7dDO>H2= z4uPIyi=`S)ITWDTk}I{fwIpb~UCRz@wbEOo6W?6cuG9}&(FS;_&aX7k2C=2iyTSErDfhXv?^aN>W_{py)`jz+BM>XOLbvu%p{M%uQ2e;-lHAe*~4j6~`L6XSj9M!RR ze|zCW0@_-7TlQ|*B0nmDC@6SPx?371H+>|aBP z+pZ_11LgynCwDYN z?Gf@`iTF>xp(NP)1RKIR^ozG+KYJV+;>IW07(QG0M8>uGOUm-trScC&`#XI`>3L$m zf;7$`4Q&YI1>Z2R5AY2e?&}MUurFdo89ccvs|iI|CO~JE9M5Le+(1D5#fmyu&!1Kt zf%WWLS)JUB>$#R&&&S(I!d;s3XI#Fq&IL`8OEa&tPTCig0KKoXOp*&~fI!FTJt=ZQ zi?EK+m!;Yl^avYr?M8%6xpp(c7D8W<3M^O=wh?;!q<6uNumj*EREn_TB=p58`+^H$ zH^Pnu55mQS-Zj~~P=au24lYBuoY0r2x)v%BuFTrOFF}PBF?$6wlspziQ6j@ming^b^1N8L_DLts!=~iyEj+W;;)`p4uKd>`w|Mli z8}6E!z~#EDEtgl!R&03Jx$&biqRjs>C3dU}C6^Nx_d+vaGLP;2nRCb_(%TZHtoW=ZIl+HfgA{82r}VTG z5s;@%m|?vN0dtkG#Ect!U(ZD}pCLhv#gaRW!NrJ0t_Sl$$scaj(*|T~VnPLBFxJQ~ zmob)NP*G1oD+q@;WiIWdi>0s=#LylYCn%vyE|tnM12Kh1N($`wPjSZ~3S%vpBEM8{ zaLdJCn+(Z8zs%E83E>`6ut_Y~;eg6<6(yYfq8ltY1SzH? z?<>SgoP+nfsN#36T6DH`lEdi|tm1;z<*6(X2A4cKt`<9OZYf?RxbO*=JK&Xm8aCJp zF_&-fZHr9W@AG%v;KwgIi!u(loYeC$vV*voZ(5>>@4pH|E#)EZE%AcrFlO3U~=KCmFEO`>1DgO8~q+$sUuK32{_sEXw$)4yG>v zuKfC3b#XQz))R@*OfoDj9oGC*;UQsH7`Y$jaxnw2`x!REfWgi)5AS{(1qL&SK_Hyp zH<-Ti&EY4dYn>Olyns@@qZD2!Nf^BGlKl0<%Y8qh zPmxvgaNEM{Ff#l*%&uUD_Deom1Q1xsk)FO#nAz@AO8r?a#EE)i90TjeYZSNKFZwxq zT=F2;zd=%Cm_3i#UqBYjzu*Ao%p_Flck3YA4Y9Iwgg_hvTTl8Q4-tpfuaW0S(85L) zF-m?{3DGkJi{3{k{+lUDz#=B$cmMtBa*LyBV2+{@}8$1NHeVmHQX@tfyJ97EJCmg=aU$WZCcy)e1 zF;2!LB@HQWJZ=V!_uf~PwZZLW1L09V6y*GPQ&_P0h^(uy8C>`<=Z_6>ELea<*8Wgm zE_`MzXlKDbBv>#EA*x1)f=OO-WD0Cw^o`ipG5ZZ>zl96}bph@;msO68MkNm@s~HNg zpu%&W(5r}|LfK_MK~M=wBWF=hB86{YHh~?6Q!MyPVepDMp9g)T9y$rW(*^9)%bF=H zty5Z%#C-w7+PuQZl415TQbpe~tA=A?xU3=+$r_PJK86XRi6@%XhI@OEx2z7XsU(Iu znYxMu{uUz!><=(|6Ee;tUiOqGbLVe==+N0Z*tUO$kJ|!gJx2V=PpBJjtD0`B>Tj#6 zZmUYb|Fo8->b}K)ul9SxSNC7}LT1(0o69?HTDFbtxTQ6WuRXKz^v0AoZ7ff9r;YU) zEd;u5XdCBsMdO+?#?!`G=f0b|uH?MV^1jMCwp|KR+4kR59eB@HG`UN(jO}{QY@b|y z&VSZ_NjbgatG)~YI62-jZ?jLMEctY)=+;+%ntFbf4}{ktyGKQ^ve(T=AZ} zYD)3ux|su+m2KA!%J=0GW3oTEAIXr6Ir9FNU8Hl!UJ#~54wYczd@TJwZ{7E`i&m5Zh{H$%$yQ2pmOQwqi)o}G`fH*Md(->2w|Lm)iT)9WAN zeJ=K^;=OgvNy)D*0y)iJAebTRDbNA_oZdi{%t?`WDFn~<<2862vkx)*BWCDO=Nw;@ z{lb6DLq}HkIJ^-zSas$~;vWH>9{{t|6mWluX delta 4463 zcma)9du&_P89&GG>$e?06T5NZyqlNXrmp~PXlP1HQ<9XXN$JD!y-n=;5zf8o!@F*R z#)i;3NOh*+P%#2SqFajy%O15Ie>T*qyw#I|eS zbAIRhzVn^m`OfoR`Z@e`%y`9M&;s(D-1^#Kb<1g^Yg{0u7Xb@c7zKlc91cQKmPKVT z`Jfz9nLMh9DF>AjhEdg^O0riEYDlh#GBNF-mgtnvfI%IrBB`F$v+8Hypn=tJ#=|m} zc}6j4VzoqLW_2XBoPhO)FDa4J@4D46r`X-oxf3@iYGlbTXKUbdGXEZ;yG0GYS=%y_ zh;c2)o*HgpIe`la0t$wXgyX|FD_>VOKunC}<@F?)05^>aeQO>ll&z?LwpPK*7&Vb2 z4N35ns##S+OE&zGs@+^cVI@tfX;On->dmS(6t>|5>I3FSDSV73-89*SKU7!XJL=VD zN)1U#KSg`-MvZ3=N1%tiJS9VTtdQ#u!90*F4?#rDLJE=;AIeou-diF=gD=3Mf`#Xn za7rO6B69qBb*T~A1!d77qQbDYB&sNyK_DtvSyBgRE~oinStryZYfWx1j7=<|B^V(@?|)b|t0~f>9ZG3L&HwI54FrO-=t@)<~1aHx&u%d{H zZKe#Omem)$?I&5vC>lf~YoHSPM##*=@>&bNs5Kk>#oD4FQcYt#&KgDa8St{~ys1d9 z6HLnYuL9snGH5AN$}H+d6D35Qr7WU_HB&@#%TjKYG)$@_WgP}78+q;Ib%!J{H?C6$3a_SokvQ^;?WSj`(Bxl8x!S7C#p#N zN+r1~Nydeuxca-YluL9CL(v&&qEWOai3z18+KxhG5p9tQY3R@C^$eVZC?FamEjam* zr(qI4T`mNQy&^u#sQ2Pn5?*yANLJBK9Hl#2>-Tcj z%R2u5Ht^q!CY@WfOM#>PrfWuL((7crY!aUON($XWW{-A)7QYo81rvkwe){c3cE$2w zeh)SG_ffN#h*s7$EH4fQolKGMgabRwmYt(y1G~5MH6kO~ZMfI{B4W#no>*V8O|n6F zg!ZC^5Ws@jzfOjjVIVq0Y0A9@-&Cc%aUYRD?22M zI8KgI-@vZNb{%cQF{?NmDA^&?>rN!1iG$JL5Etd~5A3g-=?RMHp@F0WmBB5JP3=i} z^{_7owe>zkkGGtPJ09i*KBwYG#)NP*r$R9Saa>LrJHmz$8jx)G1QIwFUvYFoB{n-x zYU@|zv`a0$>1<*&eDElDkl5poowYEGp=%SIz#XowoqK70eW=j4gP}wyf!-k^x_J5L z34sZ4y8)POlRLAf%86}@YSXjhPmfr@s%~q2uDWBf*M%SxdXS=gi?U|~s>(0{j>h#G>S##FeG8340u32>XUNKFX&dRR2 zRuX*O?Me4dugG{?v+lr5$Gp4kx~FAEKGU6P-IDcmrr~vO`6~ye4xH_n>A2=y`;PTp z*G1P{U@kaUlil#>c*gtaf~Pm*>Al+bMAoxs!LvW(*`M_soPwX#tvuU3U)O%k)0L@f zpAKB=nS(F&%=M&uuGY1uVaC%%J@1{`I~_c;Z^7G27|d8_da~Yi?^M29cd>4+WA3TB zwb_l^Gv4hBp1zEy@9OS7Sx^6h=Rn4DAnQ4lhKufTPg*$*r_^VN?m+Pn+4s*J5_y6^nMZ*BY4wwb;QWtp}|vW;688oM)% z-IvkTzTMfzC$2X(OX}bS%Y}}afo$WZ8*GkxnazcN@2NRt0Huw0pl|FH}=RQfedzHlr!%<;HqogFseSN&z$Cg0IuRO=%T zZroChCtM1mSg5QbDfD~S3nOIljm1MkI1%@8$GH$$D~WickILiRF<)UV_>P66QQr{f zqsxKyh2tA>ePtD1zs0@ekhC;@uce%FG#ZR^(h|)n_>u6av`~|JUPvIquSp2Tk4X36 zqsD?ze6!U>XXK}>dFkhBh?#8{kd z62|FGyy+QPAR0;_MDLC{)es7^!yKYF;-n!OM1;jrZX_Iva>yjf_!wnT zl$mKI?FbhQhZ3U#hY-DN=Co2b<$aYo~1q}sSkCS3?|+G=~oOn8^#VF4j)H9!Rp4%gz>h!zWp!+j?#2<)?Po+omW}y&P9!B(vp`G^qIyu3G+&V@+!bM(=e~5SOb`{bVr_{ zSX+?mDApI`28xY<@uq`$6UAo0SSBm-7K*I|kCQ?PE!Y6#N?Y=FiX9YN^G=FgfU!>w z<=qsQ7H}EG9>93gy?HOiD+>1I6#D>EnqHN!ptuq+*2z_QKgCrNzEMr^#LnAwW@Q(= zRRyg0{iY9LKR(_31$+SuEywWYRv$LB21x#)Q#yXfX-HEfsaNLY{8$WO><_d%q+^jG z6aQd1&T_}m9b)knz(U{v?4QjB22?##MahM9c++FNyWmKNFm?Jfnw+5tomGfVGsz7) zO;C>E;$txm1qCjrK#6D+y@XTiAGgtV(Nr<{h5&vo{(SvM%G)YnbKW9dD~H zSl-rN^|t@X_5j(7%bIUj;;D^cV?8iB@~eQ}FtP3Hn_bx4?y9O+ByG*jE%ab-3C80I zAxJkyzGdsy-St-VJZ{yh02&u_bBY(i}AW2>hA-tsmQ2z-qdE;j*+1ZeYsk~lz{#L2HBX%Zx9fn-u3jtUh1Q5cQe zABNh(J?9RW;#zKglmni*k8{tx=ic-9?mU0?CtAA_k4FXgjeh1}M>2f_%j?|;&DDMyI1NB%r z2IUY9UpX$6<5Z@RD?&LzM`?m4uaI(5P0`d1LI>&46{$Q(hv^9P579Kuz@C75IJg z?PtZQPk=J4M{XaSn|W+bwbiP?no)DV7MKml?9rQl2sA~v5jqe`;gCW1%b~-m z2`o+`nL@J1eIqm*n#STG_x<5dKXd|%Cz1Rql82GZBRPfSG?I@YIfLXZk_99mcV7)Z zknXMO&idFncR(&2ZUU1f0uhp|3iS3!sdTVvR?UMI!!YfNt(k^(@OBcL7OST5gvxA{ z9z3y74Z5f0T>GtnAlQOKHc5*(gpvZ3saOz}#W~^pP%G#JR|pNjBW=OCuI6lUQ)q?k zfGs(~Cg}vXg-(bDw_*I8a5ZrLS}P1aP~(!qE8&ieXSxj%Te2hf%d#U*c(YAj7;Q!D zXlKBQY?D^>oIqtskc7F)3*lfm>q|A4B7^; z?R=&ccVbSQ#_~d5=p>x@Hi2dT*iJgJPHJ1AaVLCTcuAy*El}{PxbOU}$HZ2mm2?Ii zFtqDZUTCEpFtxk@bdb{__eNy0{U!*G)8V{um9Y7%WOc?KTp5Cvk-Pw2})&duoVFrg8>Q)^|NY>haX&d9cq6>wqB@Dm`6%{arbD;eAsN4zSs ze|F-XG%aisDmMw%?F>jMC^rNe5J=5i8*zR_uzU+DgT6w_?ddpFH*ODJbtm3T4@3^@^=1 zR#USpOS*~;ip?sT0r5{kZubWk) z%)rq^^T^8GB6ZF3MHWEj-S6BxTmIgB)ajw-X18uSO~gStEy!!H+6lz z6g|_`1l6LTk5W`v2OVADkmW_rTDfO)?2PCCykii(_6? zuPmv$wFsj;neW)QhYq;^Hnf-2+|=-g$fkRGIQzcKe2A_Y>O1v`b(aWXyK4R#*sKN+ zVO(}!9{%`fhDhYl+j1~NQY1uDqDbz2I}|t}vR7{AN2W-id9QTygXyP5_oumi@4a>H z#;t3Y6|G*WsTQ{irSEs^+LKW8dE8_^8{28P27_EBmrmTehDF6!DC$n#I}+-$pUv%q zB4=F7jX$WasnsUtock2}0$5MesA5gIr0Ke1vRcK^HdNl_%ThGMHfN%^$UibkGG7?% zF5!ua&ezX~JDZ;mvkdGA%etS-e~TP&XGec=PNqSOxfoHN6`m2EA+^%9!?oO7X*=TwoqoG!{WeqG5 ztY`wTh{@JV>`3;GGvdInb|qbQ|8cCC;-Y#|o+WO4eBm+f zoWZJTGWu?FJ-0L0ZKAMeKx9#7&%?`$FttL_2P`jW*-UG|tD$Q)`wQr0FC+O&_j}{H z(2ubAL-)7if0FtN4kR@%$j7noxXQ$%dw+oaxGFEc<2~HaJ_9pb3bYHNdwrrn0`4ml z$Feo3vt=OV$fC)#nr2k=1$MzrO^&|bhUN{a)G%uE#R2Z5G~{)ip0vVYRwuu-_tIz>_MBO>=t43sB8^#{qf~ z;Ppp#aDf==gnvRzTnPkhoSJT%4L*yGHF;mys zb?gdMmhx6H!oG>j-@+DIqic(pS=eRwvHcT$Se#q`7lG&N#C>7^7!lpS-ap>PhT9PD z>x)aJhF!%JvGdD9Ap-y7@V90FwNt>_x=<%AfV)r*=wdz4!aEazJCalnwt~DI)I;@f zE5yqoxI4+MFv1;2umL*T12l|CgvwilMz-2wOSWSGwd0fkGK_NAK`u&c!wGg0bbv#I z*ja(sI64LtDMCmToiL8_$AUP3!&?DB)K^;(CxU<&fO512I2~beD+YWq;35}A_V=q_ za$*#or$kEHbf_jS`v`a713tpt>4ZyLuP^HN*~v}{M5Ym#0m2rYxFd1Fh@P`sQY!(t z928kWv<>k8hyarzkUeG(<2}QO!5bp_$_UND9fLS+vXZ8Gct-KUf*OGiueKsJp_K$y zJ{4SCGKhIsvHl4cR%I26qj{9R;0-TO)QJKrBH2O#+yDq%(>zmV>;s zlk61e1nfnc!?>|+q3xtIJMi&_lZ2I0d)yhMlYkueArS9OKro+z+>^xIgU12+CDyz2 z3<1&;tz3U@0n|8gA;v8UDhB z@?eVD&`*Zg)Q$9|9d7-8rH=139vspP%0~ z(4EeWxBGEErO z3P`5DxUA{w3CPsg?}gzD(UJ+UMQWp;Y$SG@I=~~8f(Vq|M8b3J(^z^6$Q>X675prP z>CeJ{28bh^Pw>x7@J-?q@I7Z0@&$x3kz>Jmp5u2?zGSQIHTxZD--{f4SBtYqnJm z>+PjQT%0F^t8r8E?cy>DM*zjXgXAkndce+y=!18i-0uOuSto#1$i_(b35=2qi4sYS z5}Axbs|!cx8xr_dW^+C3-Ip&{L3qQ1&4V^eusp^ zO$38{5q-MzUEp^V`3U&&bW>N4ayQur%&1&U1U8K2qRyW4wueRuZV`usRQ5^@!JIB6aYp#-o3L~fMQJjx}md%0QL=iKet zvunq-_He;YLq(jp8vcNWRCg6Z;!)Ao6!8aDsf0u+&=LuRSVff(>K|35R6;^jQKj&G z>v!l`{>^;fe9xJ0zS%#Pu01wah(<#Mej|aqEU=r9e<88?rvPyj{;j707-6(b@+%|F zQ=p}?RFU&CMV?$%D!#nWo71vC@Au>bc@;*b9IS-$Az1S<{{qQ}S%9evXGlK6T3Ccd z7id0e#8~Wylr=GJLCH6hjC{4|KTt;K4wdT7tET^+{RL;b_Wa>26NVa<{NNE0uJVO3X- zohm9~QBBWDa4a?)#yqVlj#8%$!rN@AE;~wtQsO8~${@-aG9nF-p?J;b_{J56f4)VD zlF0{b-#n@LZNE(%GEeJ)2C1ve-vEmPWJw;X*8;Gj%u~-&U_4k4;ej>4ZcTOO)Xm21 zuoIZ4*V}4AJ5rB2!3M2`&Jq?>h(hL}GEA+XSTFA()cgAeAPceZ0&zl2*2w_4uZ8WH z-2`I zwI(O*#F%zN(Me5nG}f#Wz_^DA@%v!c+$Uf-%91)+qWsAvIyq=3#*@Hl(Fx32SnHxx zYjs*#kfkn5K5!266boLKbiz^@a*)X62=dY_HM+7M?^xsMtZkLoyv|F3Uiu1y*hh}Jls->LND0N!vtqqbQ7?_iorb`mfItkd7R`0lx z{@97uQ>?u~S#Zju?KD_v*I{?Ty#x}EJIT5>I!90iU7O$DB%)tbK;#8mUsI~3VK>^G zH00T_DA|d6(uq6Gqf&!rNIk((J5&r#ckBc_kaTLVq4rG0h~ z%2joO5Jo%e0i(V1()DL*y4?Z0k(l@De`u`M2CilPol)YnIBhz4k$#=jGUhIzDe!*^ zvPtB7o2wv4?2e>PcCtQL$;{IvsdqJqlW8Sv+p24M(%f<>;{9;XtUH^p1)NG~GS7n0 z=e8F#8T!skk zzsYjRS~2OM0aLuGwTpPX<8!m}w$ZFhL%A%6EQsqjWEK7s{b5{KSRb z+g2Afy$?hV>m7&eF(W%NWftvH)yx_*MiDlnYJN70eABp;T}d{3sZ=g!FBnxC7(3oyuyAIU@PE&)zJbY8Zt2ucTY!Y#(z2gA1d1a%~ zf3mrv8G%}koZ#QW6QZ2I1S5|kM8rq!2M&29OI_bcxmvJul2>f}N4UOTg5DF!=YaQD z!zd*m{=!M#PsoPd(_B zwy#*Na`tKd{=Vk#F3s|JAaW{S6mNHSKj7q+OPc>W$38u!wR+dFU9Gu$9rGLb?^0$uGOe_{BHr@$^sB7{?e84NijORdo|td za}4;r2;c^XtGqN?G7IHX+yKUG_$03u4axE+RNnBEPO_-x6a$)Z>@+)X# z32g*Qw!!fN;zu4_z^Rdu(hUDD5^`~Ruj?;WxKUuPY}wrPS!0Es2_qNbzd{4IP|7zk zRiR;~*UKdi&CGk#AR@~_6hw)DgH8?2Pf;gZRWnGZ1x%%6l0Q2zdMq;E)h$TNlI1pQCIz`uuZ z1K~FaTdjE|6@VTJ9q`2+=jPT|R}0#u!=I+1dKzeM{X5gU$0ztkCc%{|^Xlz=6nzfB z4I1VYlvn{y52JGxUM-jT^Ej0Y7Y40dkpBXOUqy~uVl%_osQF5p1hU^%0Qb7|qASBB zub6=Mhxh|fzDvYgx%T~XJ8XZa)Jc0JXpmBpZkN;0HdUIW?UbWnPT?0|ICkt*F3zvP zj9(W2-PbpVSIG52v=dLQgxU;W?7UEZ$}SbHUHBGQUh#QP05728YY6{9cn`tL8eMFP nZ{r-*Bb6$fDjN^uGh?Lzu(L;?c3_kuVOfo-EurDqIkoA3Ju4~d diff --git a/ultralytics/data/__pycache__/dataset.cpython-312.pyc b/ultralytics/data/__pycache__/dataset.cpython-312.pyc index 74ed7db86f4d35ea9e9abcabee5281f27b611107..c3334d900f8d261673a6778533bf7b7ca6a993aa 100644 GIT binary patch delta 8387 zcma)B32+MRw5mjPxEjhHxlpS~)P`#lb>X^1eYig15Bn1h;f6$G zxG@n32Ur`gwk**UZerykO-w8gFHfupuShh9n-eYJmPBi~HPIGsgZkodyJI7_gOhzH zIJrb=y5z+5aa_2=fz(o}1@nVft5&I;ZPd2mOH~>Uff;-Og|AFePI|>@Z9cyj$!`ipp3@Q)8+rN+d3t zx|q_{q$$SIguo0UHeMxeQwF89W*S)EE-R)IGu!E3x}MBfZu?izVYy_O{;Pz9=PSaYPx11FM6(2w4T$ z8q1Z^4aIWm(I_o&cRKrnZrUdVimZaPp+ta6n&@VCb&1<@8HzSYnqa|1q;Yqk)eS`v zpW}a6-#w%!l_r+J+5o|=$h5^qC3C`>1*y~D?bYDe}A0&d82rK*KA|g zl)ey0!qdnBENl`^JLNO^eq_hV^UN#cF;;O#-F<*%1ee~&)-^Ot- zmNTxHqzy~nhb1koJV1y}hzkhBgH)HAkW!?9HJwmXRuLOIGN@{bLGX47-l64|QYj@V z181o*lk_4`IQLOh8#p;Faha z>1X6rR3c8vbS z;GDjaMGsd}_iWKMU&{s8e2xFKI<3A@-Tp>><9t=!>All?;dQ;LWwxs2jVk|{b?5Fm zd(T`|J1bvr=$dWlnyU)IuV>euG0%;k9luaJ+q7n`p=YjYEo(#fWM8*RJ^qRXfvfe3 zx45F(lBpewb<|gJ%3E0PUvw5#*W9S1FIKGc2f0W1$N7a$&R72V-6wZHwddU(JXg|s zV>eH4RXoc-cJ)-{i>~-y*wuD+02dv<`CoVl3Ki)Am~e$cA(8CF-@6cY1N2{X<{f2$ zYD)}ZNBkhv2b~0mBxwXjT?3^wEi0{|U#r`|Z>IlV_qonKtlfgJ6`()JlY{UEZQ%&5 zCL>TZa229RLiEq;H}RjPZ`Pmle;i08nqP^8NdWrQ`jz~XSHI)GmtT$pl05*yT2hbY zLkN!|pvocpso2=Z*U@O>#tZ?1RU#2nxa=Hs_NTFI)Tw!2@BNq;-KUz+8_7+TA+PjEz^P z0x$D|%U5#COZ%JdgTs2dDem<_9diGz5Y;y}T&)+UcsjJX#w`nz{IH+yZ>gehd8+6X zkVr9*meGgA8rNm_r9wKlJXUxgbUhR-B-`oc6iLPty5xapNLk`V7$Uf|fE`ymm&C#;9TKZgTb8(!zlx=p*$%t>Y z?x!=AH5uZN3zEF-IaXjISF&f)c`0k;JnX>6|RM?9ELj z7hW#Pm5(`-9eJY^XM50RI*Pn<;behaJm@st^hmIA$7SE`PA1+jdnO$)xNB0FbYCvH zlwIm1FPEY^o#YR4!7`}nGa*$pZkt zL4~0H8d!v>SI~E#1p@ePE(sI{S#SRkWOF>F0P`*NvkKLuVM@uEVt@pH2AbIdhT76{ zBvY0EwXqR`_i70f3egR#U{E`#OXk`hQij8NY?%|-$=qHdTVdWriV2;V3wnHqcY-ZN`{T-3Pe=IZ>X$EL@wiEFNO%-r|THF4j};YY63 zK6-NJ+qLyrbH040W&M>SGvcOyta#1;ivL>eo~fO5vb~A#zWQwYgN}?F)D!3y!`{ml zSQhC+*ox4C@C-oEYq{{cESCnlnV<%@+@t7!7*?T@OeYlZ;uK3TbrKsQSFqX%{AUTE zpW=#bm^_vcmCV?XRhU$cMp%*jEgrAMN69zYTP07?WM>^{vnM*+`G2M_bxyeYXG{BO z*GkHNg1)kHZ6*i}Z6kz+hfE_dopTXOe+^(2!+ip?Clqr?m&tdLGG;s1;Dduc z(9{t{1cxxLFagV&Cn06LZu2C zLK_!{z&Y0xP!lAsh!~lGYj3;nV2;3O+RX{C%0icz(^ao zw5V=ohVD3UoUKob0uv{ZaWEFq)F_b%9HD(U**kz4>?5 zKR7O;$my6O?s`Z>13sikvM8t772&!wcp@RO5A>paTiW<45u!vD15g%xijqk5XxcEv z{Cg2oLJ|?VHUWaFBxB>*1L`wPqDIrEV%+VePj>m+ zc7g8>A_`oh4&L6Ey%{)5B`HNUMW%0b1=>HbX>PZ9ZP$eSnLao>ShhYWnrYY#9H0Vc zh6^&Yyn#U>i;G|fXnl}9ar?RZLvu8?YN-iUgp#Rownm$ea|$*8jRB6>q3*rCm+S(A z_7&!$PT*f$fTl|_Fp|xF5VA<)(D`;>`%;7S6QTAE`^FEUblzrC#)wfxgb+$oZSlY% z*;s~c%3=b2zP-vZ?4)w{tEDbdVxj}eo=F$ou(~PZ7)I6io=0}4xiQ`-`u)Q%fQ&9?N=mppFzrPW>c3Nc3<{IHLLLOdpz=)Zq4Cd_^~?I?)Dq}vh40*-lesTJI%GZHn}l-9mDH#wA=5?#W@%4&i*k6 z59e@9ILZZm(sNE4@A-5X!*dj8yxy#$0})5(UMJ(?(IEw-&%SoZ@F=1IWMnGvm26$q z1?nn>Y88?jOrL}8OvfO0Q0O~7VrFZ80wiBVvU7D2MFR7fc zG$6?W+-(OPOmi`flrI)3iAZ1w6_V9tEX2x=mx&vw>0!MwEe*)4(8+n3aKwT=9MLg8btq@bDlpgT0KHq$vZ~T~kvUfl`JQ(=rm-x?n)2KtY$H zO3f)-ObiXO2xCd>2Uops#sMj9>X@cO1&Tus8KaS*T*1f{MxlTjav(;E^;9IcfdRT} zZAa$t5cmwX*+t<7U7B~cOziVG;f_SkcJPk#+*rJV6j27%F_sMk6P-@wG#m7e<$<165-Bhaz<_9b zmb#()Omy9hqkz!9b`?8OVZTPf*S-dwEnke}9cZwT zbO4nE=}*@!D??wKpfa*k15?zszBBW8_#49^avb4xgbIWg04%r7C-yrnlXYlmPeJ^i&ZSO04XIJl^>pU>iaByndQ+s~0 zX5*g@oo_o|ccJ3Uv1>J5-#7qS!lL>5z&Za}|9Nw!e&vtrw_Gq^nz%UejgQQBZ-Lqp zh{(@%p6$Hgp7n?5$2T0v%$HZ4GEYxTPt2AtKfiXStbIzj0g-s^VlC%up0BPy9i9%) z*EC?3Y-D;Q_o{0=SA4b@@|4#Dq1iwP$-~pbcfMF?zu@EweXn~0i+--6^+L-_ofkV_ z-gbT4!?W8SzE*yCp}}ug|JO}{s~g}AHyp#O6+PQmyJ8M{WM>t>`l`P1EblO@>0O%| z9i~9Py({1oOz>>&2eK-VoUhJ??w7LG$+GDxZ1OeZQ`~t}*5njSNmm!TRmPaxo zkStcmAZP(=PjClT;ULiy%-&m`*r>S`vcAOfZ3W-D_CRFIjsrahc5EU41}%dHcKYH~ z`o+83ARzh9-D@E;46N`f{qMV<>V4Z+@%hNf$g>B}J%0A_3o90&6c;FCv)YU5ZKiQ&P%hU9W$N|$f8`oXaD{nKhbl5rIDC*F?AC%;t)th zLH&U8v_D3GqEF@0EH zTxN?C6ERzuX<~~%^J#2w0^v^(K8f%tggOLvkDtL3W@qfAA^InI`9fqQmeMANvE)a1 z1i^{GP9cmXOrns-5e5K)7523~%*Yawrx2nDGQuFjXA#&Up2QN{mx?9KVv}J6lzWmy zNFf{nFe>QtW!1&DcK9U_CYot^S9NKKIqBmO+Z9-I_4%#OJ2H;#WEPqPz2tk?|22f~ zBm5IW5aC?_%L$%4`2+sM5y=k_eh82so&E@k2ybIW3jm8wO9jvGRWsc-a0>GJ)YZrr&JvI*MGpYVK|Zr^<*!|u5V8j$@6 zD43Qzoz&Fi2oq59A4mp&%hvps8$O4D$;-Lhbd|a8{_@mxtswcBj6V?1FsQy zzOv(Z-*LL(!4rHp{pN%9nH4~O0+NXe7v^Oy3}45>&M9t?mmMcOVb>Hm+hG9|kkcwX zm`#ywv*TGQ3ZpVBnX3>F+8+j)pCx9Sy+8Vda0s$LDU83OS_bSnn0Xd3qn*#%aRYrK zIs3uugEDh0uFv{r;E!Mk^q$b3Ee&-UQx97p1kbL>c09A|%G{Yb%nOn+TDCt!qL(A4 zG%2PG**+>dBbXY@g*lMgPup%N`+R4oRlr$HSo z$Ik>H=>BjC%u3t+&>Kd`C4&l~Oc0LXu;`NHl0om*IE01;kgx_@wy;sKoBWZs?Y%ei zTWDwrpuo;i?ziyPSOesg;Wyz8Ax8LciKyxt#EP?ag6l Z0bhQz%F7%4Vq*#4aBAdt9K6`%{|9RP5|RJ_ delta 5866 zcma)A3sfA}dA@gcXITNudx2ecVR$aPAV322L=TXJtVbk}kYukduZG=OSZQCrvw$Eg zgJZdtPt?TfO)I%oD>ZG7rJ}TQNGqSVIg$G~apUxqHF6;nHA&kvZt6Bc0y}Qgll1>* zK|*rQNeA|u|Ni%%xv&5K?|0|@oAlp)m*%`pl8Y%V`M@qaUky3AI#OAf>Hs-KBQsynw zA&9SIGnqqJlrV~rG%)l;e@Gev^O z1Cp#tVYy!kt4-2QB`6Pt;Z=#4)Gdcqr72Tdy0E2F%4j?mTG7;wU)I(`tN&LMTrqw& z*zGoG#(!(XIgzF~4FBg6b=K+3`UL;8&NqiuuucQG=%l0Hmp(OCm zstW#+=xCyoOIDwuiHzkZUNI%fujdhxG=@l0{6qRVO35g33kUcHlV_bS{ zO;m^C3e(I!Unm+%_iSn{g&|P?bbee3Vu4yo~aaJ@B57C&-+m{+#iS**53&;N3p; zRK_)tzQ?mHZG5}MMdN(XB2`QEf%IZ-l6vICV7;rUd1;mXvVTx0pO{eh+Aa@HM7%{Aj(vHfD`Lg-HI`n&GNYkhxm{QB`Xcg}X~Pj&47 zR&e^rQ?o}-q>h}JIUwI%-FkE9Y+HA#t$Swm?r#;}={Y>z_vCEf@l@aO>1UNxUvQ>p zU}n$Yos;qD=Z0oZ4(kt0KC>VSd7jPl22vs|5JO4Fx!sFq9xr$`(`;*5G_10&xmUz9 z3)@O-$&>UMI$uXD`JdnW%HH$)f7(iNYVWP%PZnOLyQe-ZeBJ2Z1taTbI0@HL@ z2BRbl2RvqMYHE_$b}V-ybO9W=ZdjTrbclL6di!OXu>v6XJ&_bLXmvY--^&%zHW@7-pfIwPH<#(rw(<|ER`Z(b8o`~z4^~%fFMtY73`GXiF_sN7!#u_g zAy^O|CYs-XqLv*SR#;e$`!I3CvQ_antDVr>f2?jb{Tdjkw|H*N^IIh@H1J1Wa7uS8#q&Pmm?QcFr|S+tleYZna}t6JytNS=M#Y-29|FEwv> zd=}nz05aWgVY8Y3SbWqMm(_&AE`owxM)(~7x4^1U{%wGVe(=NE26~AX)U9>6K<`KY zSiUfO=+ibw_=&o$d~2^+IAh@RbtU|*y58ml*6A1Ia6{6V6qBYK8I$M-a3yKL+@aqO z6ZfjIazC%AcaA@FPKK2S*j7Nc1ps>YK-N};qH02p`V|$_&!b5Wnq=q$&_q2YKK81I zPpjDPbmj&}2U#l^ibHNzd|__$cq~^yrJ{2}vN7`>Pl;isu%3rKP5j563hJ85Y1k(; z8Z}YhaP@Aco6>{p0zBu=)QqrQs+utzQWLC)e}1*o3L8rT_5{t~mlK-E>QU7-E!x_%F_vv?PWF;%M=6iZ#jprlPMI?3HlW5&&?+|B&;CQgU>=H}M5m0*{S zFzh&c1wmg}91Qyd04*EZhixdLBnD#v_Emnh**abe6)JA0+x3ZHWoV61j1?GeAD<=n zgiO&qUrK7~Zz^xMe?&ylb1#Qf)=t;9&q(VQ$XYS~{vfRrix+xmo@l>cBZ}?!b23FM zJYy=jC*+B?1p`QnC^7URy$L@dytO6LTL~_u^O2_x?msB)Nb{!<%o38Cov>x%ZkefD zbg}_;(*y3pVzGo&4-*WOXvt2(U5A@nSFcBcB!z+!>}>;zDyf69p>ROzS0sI(3`n7< zbg1)FjcGn4#S()G8wsfjf4bGV;a5Ei{R)KCQ=feJN~^tfKPN%x8i|%zZ?et$Cg&mxK1G+XaPH(Goq|ds zRp*;&*67D%y?kL_(=)*T3jE`?zs?m|P6BoMl@$3m+bh-^1EwKznyMlN4i?BbRS&UG zeCXI|D`UXCq+9hsM^0M#fh;>8URS%>O#nkJWcV8ZrL7g&Xb z~=wekg7|*t)l|=Lfo+)0w`{y;sJEKv)C6v!#8#~){2Abuy0V)Z)*BB ztQjMU9MueJBA}V&p+w9FbKtfy313smuXL1W?L!egjy=KuzQeQT8j3ncIgL_2!X*GL zPmjzMA;!>G*x&N}Ek*vfQT#iEZ|b&>hojq^-c!GhCg@dqzu5#zPaq(#VtN|Jh**m1 ziF7u`q&`;8efx@wmro=A04lWy9RK49vRK65-r{oh-mu^vjo+1k1>f5H z#@-oe8=gFz_ezQ6S}Y-z8|I42=JKrNWEvWR(R9|9xL1r6;PXiW%jv+Scyc zXncfTpjuBZd#8G~y+(~maXR zHiYd6T>uAI9L3!TI}lnBS^?bU>44EADIKT$;9mRqr_msZa0X!%VGIE&OnP~Af{5Of zK4Uy~yf9)#2EsuEOy8^r0evq$3p$5y!(t!8lK}35bYS=D>ZedWfuP6pGblZa@EHW% zM_xopAD4_$H$p$czas<@1`$Gh^?m zgj)!2B2*&$2tYHyryTa5Se!xlbA-PDSjqFghDwCnSWyF@^So@){Gp>|E%CU0Mx`tkuIwSOC)f{!D@fM3giOplapZPlnZEgCL z+{f|+fQpj|*)5&e^hH8wKsVY5!UXwOqh*el_Ot=P{#4JmDUEQ~q36c+m4po!PL2pK z0BEM6XgCx-m2T{Ns9r=kt=BOnDo4^ED$+;DW1xl&L{GMQpKyg9?GCP|zNl^39o%~+ z$PWtZCpss1$>Foq#V;SWwU>i><=4A&BuE4Ftl4Wg2kgdcgd8GhxrddA90iuWw5#(Y zM+U1Ud_ArY1XUHvdb$@tl9m%a|Napt-N}D`q~y2@4aLXT7$5)TOZhXbLFmHHBl&`* z{Hak8%jO9{ddaZf-N1g#lSj9B??OQnqeBt+1f?(7Br3925Kt|D#b>Lgp+qRGrqAel znBYd0-Z}M!`2rU8^R9(AA9JkNjQv1SS)txa`q?5h(Zfy;O5||qIJ2RouW=baf2^!c z_e*pPeL3`DT_@=wU_@xu5M zEGcxE*1=LVbC=*cbnTkO0otKn2c(#{V(qpR*rUU+XRsCp4gN z>M!?Og>u0fC=Xbh%A2gs<;|ilZ4O#PXkG zd7Cv>9usY{=5}jGd555TO~o27k6Sy-JFSWGgte=@%bF}tTD!};QJ*VMox2dz`}F*a zF+IDp^oQvuGdI9eR*1K3*oJQGReL&yzVyrx)Z`KF#yAN%K^dj06MW0z+dNNiT z4jz)GGE$hlV5xN$l%-=sB`F->%u_AbWEGFqEoS>(@M&p(Ur{qv&ogSP6}3?hzA3#q zQUhmLtSsrVvaCzxxGw8)J@Gj?culU#FH3MbdIJfD}2D5q?Nt+=M48NQln7LXbsLWk3x-Sl+&!HHca26`YE0HO!KFLR^sJWn2O#?a+uInUkww~L6hi& z@R9K(@g(r{5vBela43zlc$6|Wort~q%Puz?x zCYy01)=Yd}`lJNb&n3aK)Y_E`w&5G9X{<3Uae*mMKYsD?(^!tjeC4{~FDQ=d8;-4- zisslgqu$^uy?aW{;lb<4k-;$CW^9;n9gl@^r&wS0T&IA z-g4Dk^$pE?hS(?oi=TB@`EHCmLA=WWVsci(_wJR{3v%`!vr79Dd@J)%kQm>ZcU5e5 z&t)JE_i}KsN12@MoStHM3f1hHn%!@zRc3m#nyM|ZsnTrruHSQBl5)WxW`9^08;RGM z&uQ8)`;=-n*cr|p&f_THX(~+^mg@3RBI9d@8}^D3D>cJp9;ajH8LCUEuFGs4!eMAW ze}syXnh6f`Vvx^mNexlU;b32Ghcp#j%8fmGkqW3CKSGTY_LA!Zb;F{QaZLU}QS4S# z&W{o)n^k^{+Mf)*klXv@43Pr%QEg7_uo7*Tiaiq$%VwkzX%w8?Bo!rDlHW}va#DgQ zU_#2^TaZ#x>c8awP33~keUnn|?jwCm@}^`;b-Xm zEWtUz{GE8$Zqg(akW+q<>dy!N-hWt{38o8U#f=qbkD7+(YmVg}zZpDRczBSe)>vO`Jd#M3ZsGP~|Ccx56g!IuWdlnrKHryfG0 zvQUh+JUPKTgD0e9uxIFCwhs;F@OYmMw4sIq%azlUO=+3a%8>Gq>g0ctcsJ1(JT?60hv?+zOGzH!x17}+ceUcG zp3nHFP{2P!K*N?~u_*p&D!5Pfa>7f#BbUUU?Z%8-b zIzY+w*c)>2<&m8&;XO|3Y4ALABkjx0_)SUgSxWE)Kfajo;YpflJ$p;md-UF0@{PD% zM0rZ@L20fQ2c6dYsNByq(YKjsCYvcee+#Fo_aDWya6JWeEA)Te49_(@-?5Zcr@<97 z_@9x1R(8E^-tpOy^*6HHn(2{Aww<<*s#cv9_%GYJK7%^p*bPniDMht)#bcIg!$W`& zAwX1PGp3_jF>IZYr6r$D0}G3nZFL+3{m!A8YI=?WALTln+WkK#sje!juES9&4p%s{ zoMocPq$_ZQ!sT){yS}*%!_wlXa6x^Fx-rhR$&~H+g-=I3l;LG%zTqd7> z-@;%t9n-{cm74uG!PvIvTBPuiWwwYC_Fw4jXX*9xG*S+HhfrF~UvPB(+f>w7sR+r^ zm}yojU6V{xFEXNhD_|Ipw+WDu61{^xg}F>AJDf>pw#bsG*J45OK!31ptlm1BXe-&7 z6(2HAVJNn{YBzHH={VU8sj9vwT7)laUt9?RL?Y) zn1aBj&V(!@sHRw3ZBZNOB!VUMni1Lv(`wY(>N%@i@TWXQXP!~Fp^jJutD(|BXLD6q zfcBaU8p-QSq38~QorpXY7+6FEq)LprVoH;#T$EUjjsR^&M6a!l@Kr31`bCzz9mSG) zXs=GOn1%@r&l179=aPMBxa(8RMi%MH0xXX>C71-$xgM>jv*9QJk|eoSkkU6#=_A(;FI7ECDYPi@-SZCBe{+ecfcAaq!^Q&ao~b^``7@QfDZw`GI!84=cF2W03; zJZbx#^CQc@GPB-2x65fNCiwIQV$wZjO9gM<7^nyC{OHsDC8qk z4nGm|z88FB$H`*}KJTZRiC;r9_?moEf>Tc-QA{`G#f+Y6#N3*o(jtlM5p+h-*=Fo^ zZ*WW*xvI-3&W#{MsL40r{rO+}eJIJpugi=5cnWw1@NB}f8P6b|A;bnfcHS?7&f^)@ zbGImW9ns_P|9$Ff@!*Tf%`)XxB*?U}wu(czIr3zOuPWCU800S6Fw$(MAWVSBLkJ?;%hOboU8C-%;hG#&OOOqn3dOoGpfDYk!Kz$TnQlc+PPvoef+IJ~6(h zb#(y_FLF2}*03U7ycGi9B?9pymXAW2tGn^Z!9muo?Zlfd<~Urhk{q8i@~vp zu{r`95>G^MbnantfsP#!iug$*z9Bah=DH6Jjp)dtA20e@M!5JEMh^Ql-gR81Eqsy| zvpaZuVtYX(xjpoLiC`u8?}_Qw*NGe;_!U5yio`DZZRq@usOFCe{)FI93BEx4$6^HN??kYyD`aJfm! z_sKt|?5dM%5smLpQn~zptZi?zG86Pn4rIx6&e2G>gI$x)Py5;1y}o=q*G&5IV&5Aw z@+8>J+c0{dQq5E|t|vby(=A%??a8yP_o3(6I}+GUQ9;Euv?Y@Q@h_sBQ&@@w`_GB~ z3jjjRoJvHuF!ij_#ZP}l5 zOoxYka|qaiU*DZQ|F>vUN=FR*GPRd#p|lvvH9M4X4U~#*Lhlo=u(wh{W^VY*s#I`; zivT_N(eA;tJc#IgZCtQuV(OS(lGYAAA@Xctt9{4Er6A(yDwG+fC>k4P)9yCqv*3Ub z9oqUGDO%dS;vJ?d6}mU5?4!A5w2R4WV>IpEyHlDxL^nkI0D(;)?sFa!*D-~?8$A)Fs4N-&~AREx_^3S7eZ9M`cBeB5-2C=}N`f!{6^+AQjm24WfvPGFc8w?y6YbmJe{;T7qSWVLLH^PgV za^fZY<4~sm1`=WalkIzx(^2N7Rxz;fYMlz@$?gl4cC!cbBn0lb5^yO~zyT;Na?2vM zBfs4CyB@_bI{mAF$myqD*N6!SZ|C@N80Q`Oi)6jM_0K=(zRfZTQ5Mie%R|45=79>E=&bp?20GtrF^cDf!9 zjQ^|+idKPo;O8eI=ZmA_-zAtP*w8c5lpDd{ADEm^y|*U4r`G%%dOt^ELe`0H5jd5U z_Va%Z&L2E>;_vVhCSlw0C(7%f`EOA{ukh8;-9Z??eBol`lLor}f|+gjhUo=gKRC3L zWW7A}K;5k4dg88$zZ!h!;I>v23v8zG6IC1;A0#p<#)&Scg$)!Ias*Y8(HkX7s1)6Y zgqe;Ig}oOjjE?R!IN1iFnZgW;Eq2)u!OIh*)nVo+E)5zcJ5Jum+XKjm+o>EBDEj8) WZ7A7=M~Nrmg;s2Su#w4T^8W`L{_ar# delta 5437 zcma)AYiu0V6`ngY`}BIfcI@>deoX8n_QodU{d5vS97AYu1PPC9Qik!)?0W2ZZ0^iD zv01v%L_h_BMi;e+hC)=@5(NTnq=G6$L2A{iJj#!vmXXr-2U11*qy14N#E+)u+;L)K z3hk`sn{&^7ojLcsW}mEFFw$CmeNup{82e107(SEkB=VWT!!)W;NMM@=NUIe6!t>Z?UuatlgS#wcGM-c6+{^kBM6y_R{=PPA9BRyDQ&icjvq9 zo_vq3J)>kBj_)Z0RhLPyoG)W9*g4s!h2F zT?$LlPJ|ACAmUZZ1`Xn>YC5K`s<~(&dxllyy>o-vKy+E_x{+;1M+~F9-_WRII;EpC zuH{Z|WJjW1P2I3kAKV@{Q4%H5OOHh#5XlEDo7lj9xeogd`cM`&*4mDwPbM3mzoz)U1lJmr|EfR24;C z1+r;tW!i?MG;5ZFI)1t;F78nghsCfjs+G%zql4Pbf=|~XD^jpr&!Ejn%cfIw19=k8 zJBS?*Uo}RT;Pe)DEY(AL*qKz<_7P;@WV(*ea;D0DAUnEg2XYzAk?!H_^0-Fn1|&sm zoNmPNTiC~`!9B~7a=_b*Ci&vsD zE7h+c$xEl}r^H)tgu$bH&-3EPSKy*(O0*wfR$je&wL*7bdndv!fU)z^0_kIT^rmJ} z{#G}zy_qd!Ir~PYD?2Y?a@aCGzu?;Cos|6~vpIuX(h>rnwUT|BIdwh0Du1o>BDIme z_bOxlW$7{CO(UPO3yp`iZ$ff2!WMwB9HBR3!$aWv&kK|~0 zun-~$Xhw7|0$L2c9pNCtAp~wJw*r-Gpgr&ilHu8+2Lw^$1ZdHthjHZH2zi7fIF2|0 z(dkMgW0v8R{0T3QgvzcocPaPcIIam7DQ-1M@Q68^xrYt6>>@I&wyaIo!2mQr?-BND zOC_yGs$?<+cfB4x9b@~mcO}+C@1b0bZf7rMhuBBi6=E*UGOazeM(7A4rd{%8DK10P z2nP^S2=l??5oiV}ciNzqR#q{z=A`g_rHPqxko2kMctw}m-r_^dj8{xcSLX*j1WS1e zfY2jG!Qk6SM(iLm{KR?uOuiz=+2z)kZ#xA8#~_eB3Qth3Tb@zNn&%tzF<{U$2;TuF};DR#MSa~~cm-du5L237JLgw%87evwn+Il{KK_xAV% z;;>YetC1=s+S8z0Jw6PBA;u>K%y0h&0u8x`fp0C!UbO>Et-U7p(={b1y<@lR>SNRI<XLvC#-x-cy+8zMxX}p4EDH`ZKJO1D+8WSp^ypLzJkGn zZS1{nGnO@qSyUj`l?+euCftgpE8}ySR98$#xqbJoLtz!8xR7J%aT6APq_=C!|C$S8 z=%zRI#mVpVcC6nImZG>tNZocB5~@;g9bbdI;gop#9)g{9hYl_S3%p zVSdolriU*%@A3*HVatRf42l7)1F!`041d1t!g|nVZ}qj*K3CZ7Mm7|ETxn3QT?;Z_ zvLLETtMEEeF+E-shI@<#LIJCWs~6JfeG=SW0N@KyM?sy%HZ9*+i_kGYT8(^9(B)^u zlf)4rrV47TD(lgTP#%Y8YznKhI8bp;C91-6$#VkmBt4-gPXRS4jR+^PZj=1FsyLbA zbbVE#@B3+pD8SK0z=k$K7mG+`;BJJwsSjWY+|5{pW95Ci4n)90utk?3@HA*|6V|%o zWa@L~@~;(2!){vQ`T_%@P~os;E0;7X)Ij&pp*Z$^I6sd)(?3wVY8-PXvB0wC^k?W_ zFpRG(oUwl9nJ=u*m8t?J4Y;6m-ovXuF2+%(Gz7yPH07{#=XLmy(oZ$JUva6@kG9$W zm0Lm?AfxyE70wT?f9@Ka6}Kt$CHCIH#5x{_WMI=12*(k4M&Q8x84HuYAfLy%b?i@T zvTV&@oIR9lf#h}yd#(X6UPi_>7|qquZvls$&2=7$OnB2O#5a6+;bS_GZ9{XQQ1SJA zT&wu53YyJjX%{YoF)D}`saC*n5X9!(45Ff2arCevisQh&Y|W}wwG5J{aZZLi*n(_I z8?f&Z_C<@9S+;2StmyP-*z+pFYuKIPPPSlbE*hrHi#S9&NGIO*Vtx9_AtO!03XP*v^l0mj> zsO1R35(eTU{WVAhY3*rvP|QrZG<_ZEHvk~(Os@-~rcDh^r@ugU4cVeohA$xaKndcS zr*5gI!a%HRxU|@gadz!@B+el1av+60YD(ZKNIwi*nAV=0>b>N+-3RFFG2^7;2-Hw zt)$aRyfH0NeYIaf3yLwZR0v`aezkGSpuYtkeHQ>D8HMj&lvruwu^K<; z1Tg6+0{7Ht#j#9hDwODLWd9if!+Q{=AQ2Rs^A>Oj*dX6t9KwazVsu76)_J!uS?CXgdy%&y^NlP`3}G^Qdd-2Ib}#`Z6k?OTWN;`T3yf t1o2_p)z?-mV<*0+haGYFq=6qdBRD3pQ(O*hMUtgVO&DyRiYMaf{{ToWAua#_ diff --git a/ultralytics/data/__pycache__/loaders.cpython-312.pyc b/ultralytics/data/__pycache__/loaders.cpython-312.pyc index a799acde422428af99369575883f504ccff0ecf9..9766f1f042dcfe2499e0f2b10d274580d79afa05 100644 GIT binary patch delta 14789 zcmb_@3wRXQm1b4HyVX+b(Jx6YQF?&XAOskJLA(qSZy_+q*gQq6yCk(}b<4L(faKOT z>o{IW$RKf@Nb+M4Gnvm2d)MY8Gr^ge#GcH4`pe5$jJM7J-f;5 zIk&31B_TZizTE;H)xC9Jb?V&n_|H}9C;Y2_;B(&1&bD!IC0jr2Fdn|1bCAyuc|)FO z_`{WAsaPfo;(~SqJ;lG8*afYIIgvZehq%MK$G8xGSRc|IHi$;i6wR+R$D&g0wv~8U*$e9G|_E(KkuVG`i?~4Q6tCM z1h@{sxPfk=DHaHcgiSnaak#cq;gA>;Z~=jCsYW=^+tU*xvLx(pIk2w==pLXuT3Ubx zq9LKBVV~Bv*2bpTEXyR^)+>urJu3@BLP)=AHH_=87g{<+wMu10PeAUJ1mMGC`_tqT)?aQCkvi>sFul3 zgThH6*48BkL1J)Q&b#b37(|h~Edd$1814twvP~Qdijo8$NZkv2K&)b+M0zI(LThU{ z8kSpI>xG^0I~<5$-;8t!5w_>r71VQ6#I4Xn!Vs|X6%2TFOFcOoX zPXqpfPV`qr8 zjIuEfRaEoaz+Aim6kGt~e4>v5L|7oAiOR8j#RvqVG;g$QYLdtI{qN|NQ8Z}z^ z66$!worrU-4y|cO-@(TX{h$`1Ha+gF+PrjI!;H51^VSwL0w0J z3+j%+OAZ)2xFOvIlL5*DMws5Eb@_tM59;gI`o?*f#st$~OW??WNnWNs>spR?^rqQC z|JvxzD%9%bRW$ceCykqm7aCsRLgu&ysO2TofK`52YZJFWyTR8|KzQ>#bT{$fiy9G%s)0YP)vOw-y!9ubb`k zEt`wkWcmYJ3BQ}oCgsX|=x@vmtb4Wg_|WyJR?7UAx6#)vOS5e9muZ=6t)ydCi*6tb zhg&f3aN5va2Wehyk$b={AJVF3&+%_`KaAaBwnEP_t$)a_aU#xjmE-#to6+HzF+;1? zw(A7rPoAxY7x2!=TEeoIUouwp!_=MFjyJ<#KH8O4z*k~9Pic2jYu6RQTB{YLCr?{09nbPA%b~x|DpL9xaBiBg zm(V7=HEU*f5Uetf_QpKdX9((ewmnV*)$Ztb?1zm6x8)6cb_36^q5EBN$CvZxY%IXG2K*M>Y?2Kp|_=>l-{Z#t-#F!9M9K9NaeY=+G`GSnrfA z8hi5F-doPaSG*&eFYO=mjyo5#2jx{)`EOU>axJ-1J@V+Kwz2AQ*AjSO`}sp_Z+Ysk zS6zMdMg8UM(d@ARZOyaOALn&Db-ZgUKdsk$@~1d`UjC;R&bNT>%KtB=y3(A9Cyq_# zx~BEILwwG;#*4ep?;hdLH-Bp4ic4?Q73A^F8#q_hryDt|<6_qNtidhcaD0AmE6=%> ze4fG|PrZcd^hdeYeRYN()mrzhHT$5tds*ShxTe-PKv6)iscXD*R9&@8u~ zzPZ3kH@K@@rRo4Pevt(@nwgE(RfeKYf44Hr7+n-MchKAo-U1$XX4EBDXq~z$nW{W{ zz)Jtly~nYr&z$+^x6letorx@h#@FZ*o^W{q!Tw|gk|rc;kvswZ2TKZq>&P9f99}ZCWF$bpFwjtX{-9)Z1 zwSXQhtmJ(+o+|v1Pi%mhQ{=B9by;JPq z&)xVrgtotcuAjle1_}qpv!Yc46dZ;N1GQVFny#l<$D>D;}y$pF^=7GHzwj1bc zW^XwkGJ~habv>lCo=MTuYu6v3mBQXcihr`V93DQtd4KcH=Iz3V&yW4?=0)N00Qln& zTA?GaI@$#1B~K#>BUu8(Uqqh4d-$ZvCM77fqz?&`L-T1EXt&sx(#J@MoQ3*S_5nH8 zvlG1T00?d(N(eTV@QJ<~BpltT8Pc5D==z>m59!9IQ6y+lNDK(dtQa7QVXB9Uw4}2z zPh#4DS*SU(ZUL{J{-&%r@jP^rN|Zfrv0t=(%{CY~@A!u8ZeGDT(;a8YNa%8SG<@4x zeZKMB=5tSe;P!y28jcUe$2JcoZhMxEdzMXjR($us_l{gY@@C$R<9{o?z4AwA-e0wM z(&-)EHMHxwraMJdNoUm^b+4~@Q5vgzdCiqIFAR(q*GwdGmtw=6Lpw(TLrsJH&pqBd z&Z6Pwq2}bm?QipMO38}d6V9f44$kA9a&o!Oi%)&+smWaTaQ0C4NZ_(KDvoV_q4TG? zRZ|u&&x5tsR;cw`UJQS?{Cm~ct8do*u>OtuHv?~MnAo@f_P!^^_dPN3(4mRk!;`sr zi-*la=5JZ27IH5CR5gwtKOY}CIMz1Sa<%cb?bo(nIi7T^Oj=eFAKkRIo<33GPh8^( zvYLDqS*L0SxJ77&#X|iBGgYZt`RCALItvKu(t?Yp&!0Y*Lr{q{`}O_y_Hab3Rb)*f zWl+1#ZzC8E5cKN_1~$Zt1YEL&qOh4jk!P`T5D7(6j08tj6#D|aLxMv}xVN6+KGBsJ zEz=uU7`^w}`Fvy9z3MEZaIZ*jbl$TS8tbN4>x>>{2<#YypY({5VHfbIjev{&@0w0oPZW5gh;rJ1V~?o-Sfdg zHG-socQY_YKx)D=7?)}Udbhw-d(?_xEZQFK=w;RcFdb12gd>2&uoWr@(rf@J1K$*H zOTR`2uW3-EP}xif;>_Geh}jYmq%JppKp`bkd526?$Eqr{hjv&2fH!AJB&k zN5O6aM1dX-y9Nvc#sSlSS+;65aXlScSPT%&e_U9#PXCIcEVz(~`6`Z(`5AC08o5{0 z=0^?S1*v01%V&+^SmbJ$+42E1hitGOlQw%u2c@*vAJ=v1(JK(368&9vg^W;2FE^A0 zB^xc?xPZR7s5W7V8_yc!mXIBtu_z!Q%3a*p4`pa+SH^&Q18bL^ZpZEbLQ>jcAjz({ z=?pI!!H2ShaQBwr_+~IW z%`_>v9q6{Dtm`{O_@q5#Bkl1o<^O^Hyt;}%O?m%5-wW`Zyolr_AQHMRs*6i5({BHA z{+INR{S_vRpUEQnH~w|2M&K60h@MDThKvJ~dnNX0#y03hrHpJtrSz~#6zjK+KD2mA zu~l`f6~#!{K&GAmfWZ>|!0&Vs{N55W9d{VJz>R)+aXEjBzO(o_DlREAPZqc!^t8}t zmplh)icMP!XghCBVQ*0(0(|+$9vsJ5Ys(Z9z z*>GaT)rzEJY0|P(wczFl*=lZvfu3WVzXkIUB;SW02`xO@X1CFDZ+W4ylfMUc*g0)6 z8EX||j4d@|OgApyTFRV^oe1Vjo1-CyTT6jIq^;}V@{%uUT~QH0(rM`oV+uN2NfY2R zNU}g02*Bq6)1Ru16?TF)(K14sp+@D%^bpG>B_PuUCaOCiL-r^f4MuvIHAhq&%(5~8 zIzI4JnL{n$qzLFk8tDFYu>6p!67Nqi2?85KNeCpgw8uQdU^77-OA$*!h_-9?qJ?EG zqKajP-s+%=PtCGjY{8O0qx)G!YdfZVv}{BI6F{I`mGqC|CMek@;3UouPESC6_39SV zD?&z)!00M#s#$tz#uPJ+VnZ(>m{G*K8lj4(#E85#5MjH-h z7(2_5Z^JAT_%IoB&R$?nYjvp!NH5O)kV=*c5+>-t%<0R9)kI*X%YbPT4A`P5##*i> zBm`t(xe)3l_+?LM$&_mU3C&;!SPss zm+`AH@~^TpBkjxN9YWa&6QH;`a;?@GN)Gz{)yw!?`s>wkJI2D>Qbs0t^z_3|@ekAK zhd21@pqi`#0*|Cnm=Ih8S&ifnZCbNBv6ek!8!DxXw!=?2Wn(;Hn}-=21!Aqomij=n zFJ*%al*%{;-T1Q=s7slr4O4GQmmlCf?_fW^C&dR?;7cxI*~F%h$d13qk}i5mLPM?X zeD+&W{}5nt{XSIxJ7?=wFab)!E+0G_DRg7w+P~r{zs{x0chbCdHN1yzT(`VxGJo0Y z{8ed+<5#Y{vvT9>OKaGz;3W-n9qiwL9Rp6VY3X-0M#sh2<4uOXb3b%3X***$B^!aXzk#>j6#S(!ur-{>4qHf%VhlUL2!NfN{&15~&%ww~ zE-mECfx0~AIOfePFJ);gg3+)A!duW{quT-tI))3PXAMpqPg#!+{ZD=&x~et=u%b2d zx6wGI1EdYw@;tp;>cOvMW?1$lV;wk;8O|?8&SzE=eJx}K-!knW&jH~ImZkXs+%uoe zkrCly7r5E*ZW&Y%KJb_1T>+mp5BG|IQSgx&yUdvd&iCHGtSQBJssRQTxKJATXwhtW zM8Qpf;1b=S{z6_x9{t(6goLG+NJ8XotU(Q@ zCIeClBYOw(2tGK11a&59MIs{kE|7UL0?Y}1fEH9v13Ut{KspPo2QB0)9n+0%8EiQx zpEKUc$s4TvdU&Mra@A;G~8+mYnqBgvyDZXfL!KiZKz(wXe+N}h}*yLys6Bv~m#sUrE525ItGI5`CtTk3{I08IqkCW4g@qlXJ8t`G#(gzE^VLq7I9K8Du1mYd%7*rS zbC=2$T#jYba@(7z9`{z`T7g^rL;YioFK@rH{e908Y~UDmTs6FAyJma6?tAsu>&L4% z{?xnauEO0r?!w`&p{_CKE%%~9{Um&{b$5!EBx^U`ly9~tYqs7h+Lm-~gOuN}eaJr2 zGLc(83HWZ-P}WETdpO0 z-;Rmgo!H!V$p!$h(@F?|7O0LBmK^aY+?Y8b;DK1SDO`tG7qosu^Zq#U_G>j74YpRMO)E4HQo zt6>3s-&KHV6x2p;y1KE(U<4n%NOQDwF=w zdHtERQ-wNJ`YQdbalvNXoWo~^&LG17?S|Wxb>o$Fw>+zE6|BY(9rxmy^Ji{5gmH&3 z;V8e8Uy)q2?q=i7)yc{&xAGg3_6F+OiiZt(zcZWp9*1H6VNT}hPj>p43jjuo?r5|p z4sd?Hzh;(_PRCnJQv(RB`sqq^Pz$+@gD_gk5ER1^!W1dft8+mDM8f-v=Y3@bpdWzu zO-PvFJEYT;f|=X0TK$(pT_vn%038%P_PG_ zzbyvFdG;bygh2_10F>wv306r095@RH;mBusDh)HRlbzZa5@al6xXyqi%=}21t}2R

$ci zL1grZ7>C=18~T427UyS1ZKXdvc90UH&FJo;D@QATG;leir9vr zzu&f+E_~GOO1s2yeTWAm04ea0p6+|JaDjm}1j3Hg>D423U@#zk&UIyB>U&ul=Z1rP zE_RR)o;?VamhPA!zkgi~POyFNZiM)vf z;x2$RJHQ=O6=G;g5&r=0!CFJ3s6mPMc-m-bOxYrhx;q@t^Qf)?MW!7 zYHz?8dq!M!hhVPp9@4@2#880dt~Q4{RKSpY(wRf}OQWGp1keHwp9QdcW8XBoKsVhN!Ov#}{O_ z48;bu_YzPz_!ksfR`?5WVx~c7t`YQ23#xwh2N}w+U?!rFf@6pJ6|;~4xV?HN>^r{} z3`gN_3YfLPwPIK0oO(tHYRJ=rFQy83b z%0rnijSaqo`>_prKvYb#_Mg=?>27>uKcrHxyXx3EtpDk9vP>bhwmi-k(m=~yew1!` z?3>NmB(ME>=(%aY+r*-jqMJNEpG&F7mZ^MlDy`|VKIc&O`ED3T0z1Na%`{Xp{; zf2s$t6OQKOJBPhPURpFFj^|aJ+j7^Dcg_kYYJS;VN28CIubp3+;>uZ;OIx}6_X__V z7OiSlj5EH3DIiZ&Z(^o^{0;`a&*(6;WA;o6$nnoO(-d}5PX9O$c@yN)d_yq1#g0&> z7t2i6r#QlMYy=>FUCN~7+?)OUKM7d#V6#B(VK*kG&*B}k2J|T2&M^m{VjYq<@N+JQ z8#D(w5LF09<57Mphco5hgD4|^S`T+qcz3UcMH)sp@RO51@bl@7e?9aIo_~yfbfm>z zn=*9Da!-q&2TXYT(Mt(t<8RRdC@Sk{{;X({NrAK(z6(ZaLF+qMe+Cy^nEIubGWA5oIUSs z6Ely>xrZAYj{hG+A>gpEWQdJZc5t)W;58Jm&q?^_}*L=bu{jAwg$2&jafP5C^H(+xh_a4#Z@OjhaC4A-77M`10O7!bu#98pk_7!{xK5hY4|1swfhl@4bjjhCouY zh14#PzZPtgO4^=NY_c0$c2CojG&SAbHf^@uA-a{`W3y(PZXet2qNFEHPfvHh?|&Jf zCf&U_-~1on|9QX1@B7A=|3s7@lFYZQRx=0B6T80H^u)HWWga2fLV-{u_=UnI{zOS6 zC*nCz=vnge>0Q7z$_d;F65&p09_1qBgjV1MT|~19dZ4CfHSJu@z-s(l&B$sxto7&v z2E}qX-X%5)bv>Pev`f)<3k{9*2byAHqW`5Sp)?n;uE2u+%c>CJt4^b2NFdEw$7wcM*qk{#TTzERW6 zHEK?JIIdSO7tQg}M{I8Tw#D6-ogUcCHIt++0eg&SKcU3Wx6PNDboIauJ9eb!fUBLXIq7D(g8s&`da3R?E@DU;ff`@X_nPE&>HehgC8CyF*VCyQ zN1^E~CtBoND&dlutwB0+)J1}G)A52Knm}T*=o?EtaLYP+BFizi#cNic{#BNXK52Kd`|p_FW-7~c z-z@fsXVKoc)_9sX69ly?ypLrLnSFZGAY`oPD#->iQx`GQVenn*qVr>=%ki#r=akS z;m*mVvx00_453iF5DSGAO@kD$DZC`KH;HKd#0n&pNXl_35);vuDI}^8Nzq8MLb?^w zB5tNXb1fx#)Z|{Wtr!@jVK7Ji+?~Lx3%a4q!OUC6yeVVeP2;lZ{1pTIO`~_(;~U^F zWK28r;L9}a^1_#S+FJr&w%fGHT}3UPf>YDG9=cxlR?VBoQnin~C8Zuc^8N1A@e`?I zkEKq9rjCVD4Na+VbE>s%D%_TeiKz}Lb+&t|qdT>;d&;;CCmyc6ST$Dn{9_X(FPFW_ zziys(FL;D8zjgi3v?|btnKbMn5)@97%XvW3!-agumsh)t{xsKP9@d~^ijX7+0_LRI-X*%_NlmLREfelxBD#}&5RmAlZlHK_pB9_+~NO zAXd`B{EdNQ(5moqv_k-yY8QmgfKfb!oTsrv3-TrUu^OT?`2{6;SUe6Sk-Z=M-C^eW zq+nNPBrFTH&@RnzbZtSnmBtbFl zJl806%F%dCL>wW;k?cX1tcU;#6z{2NZ$BE1MdIC3fMVap(DZe|1MsVi&IOPKtK wy#4$ACrj(anXq`f(-&G}W+z9iuA;|M5Sk~;~Af^Cqb)vfPyWRwu@f}7`hxWCbMldI%FfQaW{&{d7136?X1 zC&BB2moB0VE2d4&LQFW0i#H8veZyr&|84(u~p9FG1TnRLgEq)5V6m7V@UDd>`P*ug& z&>fD-`-C1v8y6#DAF^2w%kj=VvLJ?;oS#8n5j(UU(U`ayt4x!%o$*fb*edMNp$8F} z#Sdsz6=r5$AMFS?3zCF)sY?8{(j|Q|)T9r9fMgpphRn~Hhr@%m&ze5~^XDFpj7CSI z7lXGQ?kRWmMBOX(U#-94uDGb8YUDLr{#^ipsplgqH*vgE&G$)8?c z_KNANrW>xcS54zrhUV-J!{l3Vz^|Q(Q)p=86#(R4kZVZV@Jm8Cf25Gt5U{QAKlC1 z+!c};?fF0C=Ipfa^o{Tyx-JlC3uwe@XjOgYdAy%FQ-F>2KoX8WmlWo1KZA@}K>E4& zZP~+>qnk!H-LUynM!)ET)&l`PkH>7A+Z@4nN?*k zUwj&?myqD9A}TGLi%G6nfK?=j@TC$U=eeKoU9)_duIOGpu>&o!==}G*TAlNr*`*84 z1~s}ObpjmL2p@b<&NvRm6rWj~Xd%|ZNDK5+bDo^wBii%a30;H-s_)T3BxPgQ&?pK* zOlpbCQeta55SnlEgFgtjFN%o#r+ebMM52wdA0Q6hx-vu4sHLsJ4LWdi3(k{X z59&(!2tOA~pxm;jSJ$iWHS`)~Q@SO|)87Pr5Gom0mjW$c?RtP;u(b6&*onC@vEs#} z(sn6gWMcX7)JwCp!4i7JWKtoc&0H~{0X z67ga95(j}SHku;JI<>D|MP8tr*OrpAwDzczCf05$#8Kjv|9i|8s;yW;{Ism%fx_p3 z72rl^dsGIf%%n!uJ+^KIeX_zuPSWQqR=Uh{{#(VwDwKVm-l|w?$2$glHur$Twp11q zN`sXb^(qpjZ&nu5Usqm)aJ_HcUN=*t^DtDb1-Rx@hw9_AMk`=71O*Ds_kT+CerTRWRq zpN&R>9i5esr{HN(i*wSp`I~$Ig&()Uo}?*hPCyCT(AcpjX-Q^a8@;=2zb2AFw`_la zSn1>2i%1m$ic%}nxjZ>A$InGF`}7c?+2mll1!{JBYx`=FMGZTWS?EP~DG)M&9}|mk z>Y*Kv!)%tF59VPCKm;5D;uMu4QBmx{h7CxL(a6sAebrb*458@I3`4*ZYf{W`G$9^p zY7aMyYmtFPM;Ob3gO2qmWSv2D_?0PXVfHbb~}- z6r@8#(@TpGu426e>k#$q#4stOn;XI{fL*|&nSkKDMnIBzFjPa|*yo{^eLgU17YcY9 zemIBLZSi5sz@Y8rlUivEuy(FVlK>mos0nfOkE;u5F{E3rdoAjk|Nkz{Y?rX3)?COR zIOv5s6CrG=Pnp?LR&^WcIBH3DflIHSv~K}iR3<&*U8DHrw2 zJW0~)TV3>r>kahH{cfv1Nub;Cf~8j%JAvExrYB2Z2)8(}bK{1V!D0FSOnGUVB?3X? zbKDD>wBLG)L`-v`C$2Ml%Hf7Z%yUZ_FwX~BG)T+Ajk-4_b%?S9mc-gag4`v>{D^S< zOqOM)Dve_iV}Y5dC*nt)$iLSL^f>T@t4EB-TV zPDH!I6hQnTmQa-xofwXRo-&P+u(cIQ8xp)M@n4V}MKXb83z9uR6acE2+YrAGm4JRh z;S>@Q1?2iWBz?@nqH9(yzY2;FE#0SCDvtj(bVy$y%&J>#L)C-TW6mp{%bs^E{@dAw zsf@yDn`0<3m>4U$vf}cJDcjNk{tobM8Qd~fd8PVt^$p9iJ2`~|J1*?G?eap?mcGkd ze(YL_7}Pmx95Esey=0rwa_+J_zQQY2m#c31%BFl}H+;dt{R6v)5C6oS&*1U1W(az1 zm&|kHZrS})cK=O#@iYX=)g#s8&aZf0^t|I(e%o7`a+X4B1>%atc*$2*ytrb@vuc>f zakdfLcprb$zI3{93F7K2y_b8R>l?`!HjFv$**H(ZOg3lD8ru8J-m#*=gP+}dCp!mD z>%Ht94__`w*RXL_g22S@_PPT4XH;SP1PPr z9XXb=A0Mm%{JeOw+u5$s6(cK7jMd%9E*{uEZOa~b{IH-on`8w1YHDTP zL$D~r0ZYwVpp$`D(p)qE8HpU8YXr_-{DoxRBe5Si_(h2fo=(+!=$3sZvzo+!L<3L% zVDjd`oQqKWqMXXqdN<}$ilG}JCVP5dN=Q<$<+{@e3!a8fuIqap82-Q`UOEu4h~I*m z_$HF=KomnWSXKBLNztL5l@x18f}fMxg-~OQ(AcILG{hz<2w_kKK}bNSN+UCPtp7AJ zH35;(kuuAt8a~oYuS9Za;VGToi{khUU2&>u!(1xXiTPR6``IoC=vziw=qi@c1UYio zlaCa~MhPYAi@t3R{hQ{TZMi(HDRfW%_|)4Z`$^~k6=KK1S0T33$^d06}f{h!mt zWGO8UkAcj;7Ebus#@S{+CeO6CA&->Pj)q*|tv#Aco}n)`eA*b%GM~iLy^ULfYKG)0 zuR0dyQ~m=N`otM8!FfFys&TJGt=K@{ZCsN%AK)`7r6-Hm&?Auw_b1~Xm?FRU0T?X) z7}sHOy7>1{gd-!0o8FdhsXM&cg)4WGd3%N_HVC13@+yJ`8GaZBLI!|7L zUU;76UG^RlFewbf*Fgv#7imGu@B37-)PnYCxI)VNsZp>m}c01le19bIs)AaRd zzvlO3a&s$W05jW<(uxuv?Qj1sM3B2XE>)St@8HyICn!XU`AE3EOV}xjad9EJtC+fC z(wQznNC@H~?xWV8P9v?faEu0ko*E5yAmguERL*T^xd;EBcg;ec&0H>_$?9 zcfS$}DD=DzeuBvWS-+>T&r3a&?v*f)!lZuQmHck@H2In*A71fBuT+JWc&I&k%K zh>&dbt*)~h#)L2Ln8^OpL$^xSO_i)mEm;pjwIPZTSbi&u#@bjk2Gq*gTmaKDvikRn zzDfD4OP6bc85J!68YKh#MFSi_jXWLni;u59iAMpwMFgN2yCfmh8E)zkag69@1A8Lv ziYXdviia?@Cd$CCn40792soRBAR&N)1F=U{+nvxNB6*U~VA4yb>7Ha6(6h<6oVxP1haxnD%e#ihW!U?cI-TK zn5}l71S!T%9q~w4yRcbY4(EV>oHRibPwZ&tN$v~>1dOJ}GNXaghnX=#itchqKWxx0 zCu{F=Kt4P|P7va`3;#iccs@*MmJ#P&4#f*1lKYlO<>;_d>t;C=Dz zUXr-ENECsdYBEvnxNaNUVhgmXr)u3cP1}s)J|~mu!|gO_GShZ;rqkA$Y0a57^`!kH zRXb6CzwhjQ0U#y2Q(*SovuDq1&*S@k-{Y)1G?X*&cj%|Tx13xsjNju;@1KpE)3|~Y zreS!7XO@ix{cA3ma&IkI@|##l;MXeKmE=OwWS&GhRY@mKZGO#cp zakD(QFev4R7KY__WMM>pM;Au%n<|f0#uvsVZ@RpvGO;jG*}Je;?la|mmHi9*D+d-1 zR1Pj2tQ=Z6BbKw!(4|+qmAKI`M9>v|TH-ftn z+&zZ7QEv=)W4Lp0H}37h-5%T>#odIr7k7Jc_qeyu+ke+s_%ZLWchEa@*IYQ}Kk6U# z9sjWv>;?gD>`QLAg5T8qit=3#u}nji>#hoX#FG~) z?y^5+MFSUJf3-04#^u+~%+EywS1z1u-%Qz2>f#%(zIy(0lqm&;Vy)~hMp?vexXa6B zKT6cHSOK<}|M06$HM)*<7&R#0+B|tp(dp`YA8* z0%mj}Epc1o8Hpz)o|SkC@ig8z6AjGO8mj2euhspa`N~N}*I) zj#43#YHD^W7i9{Cs$203g(z1jRBB$M%=loTaI@i-<0t7t!K)Pu1vP>fj_k!+tsIRM z3U0Mp3tedu&qnb~jWHNU@PVNYGS1+`C(oU_cFqs|VmPnd;*C;u`C6kKD!05AmWshO zPV=>L&Gmd0F&HU&(;)n3G#2&#yUp z;Dc#ru2HYoR2Vpy=jSd>BYYj(T8*E?>r#(ie>F31Iqv zC_fpND!x;6>tRFnbO+laa3;&8fWvYTKeEe6RU8=}J-EJGT#0+sZG5S|v*gUBIawsT z7vOmBEE8fkl|QyMJqo*&meF}gH-iTos6;QhSKN?eSZZQmg>2P|eh^@G;$84MmQ>B_*#d<^sagsPg;UN2{FdA@i*{Fr zqbWysIvRBl6#tZS8Ehh+tM;Ils*dOSm0C4kOWi`naxFlav;1in(iO{QResrb@dV^p zliY4ebT9N3yLZ{glBxzM;4sMz@p^h-Eon6|1Dgm8r+OuDT;glEJ@1q3l<#mrvBCEDg0ZCT_NVRhGE8f=^Xv}~u+?+Mm(92S>osdVxt?l) z5$!j=0esc65e~Fc2nX+_*3;p^R%*jsJ>(7DGSvSG5AzQBQ=3P&P+kp(k1(ZbwNgy= z5;rWQJ{vx|fv3TA_!xh2?|8!-<~wG1bo236Y70GG-O~c@(7y+b%^zcJ!BTjvl}5`W zZ2w+DN_?u5=ZWy>>hUd(S0?OFos`sUU(Ts5b3GTH*p~ChnbS*bpqKT37EXJk9JSRk z_62#L+|0LfTPAw^y;iPjc?oZ9ACRx){sV6uIY;|?{nVC$@f%nlYz=(bQb$^Yp4~7I z%g67rr~4i#ySJ6Zo1Sb9ZkeXh8eH9nw0+X@(}+FON+QIl95h;~<%DN?6YpVNt&U5M zPj5aeBeNGN=<@;Q^$hZwC$U;5TgV&^^2b+K|#{^D|q9Y25sc5Vd%R z`NXq{YMY<$pn~lx z{r|vaG$(sNQNB%)^DmTZMR4_(5NQ|rQM;oaMKjyFqK+c%Cz$>?f*myjn&}tahlsrB zFm}1IX_5-`yzZ2x29YDm7D2?TL_i-{>M^F6cXn&>G9F2b>Jh{~G@ts=I^{I&d_M0? zC7L6*Z{Nrs!+-W%fqlce)U#K)v6Ry^39PM7c|u)@~PGR zsuLwj)usC$Uf@1Qwt2YwobpqLfj}g{TR|dl)v_N(HhzLArMYB-Gvy_m4c0EK%qXx>TSmjon|8c%u+My0Q^iv#?*6B5-hN|5@i;F`u(aGM9E^qt5&1z zqF*hpR9tl_uxOFYW*JqjThXp~Yxy<7^T z1bAFzEe25nm@GBEBdoFA=3)v zLW)7r1cs-u=m%jTp6>$WPfCzdpj3=(|CU=;lv&gOi|(1Ng%`+9IYGX6Ua6Xj5`a$8 zU@vx}IQj-=%=e;^v$bj!I0`acoEwlSN^o8#&YrncxODlAONE)sXI?*FxO(B-{Au*^XN@d3|%&jL0W+#K?f!fC9{;Z`E%IV0*@?bbF&TWO}={LNMx z7)JW=f^l;+9NQfCfCKPkRO=g>lfBsZdg*$5Q}4jX!eWFBsuLK551?uBsDo&bDqlTw zdG^BWtIqenzWLw3{iRrz)S%93Q~{OiL;Zpih@|jW#6w)WmJJpxO|WRq0lV$I&aM-wZv!3 z3_5BJ;g;$e(n4GTsjOj*Vj+%Wp=QisEH@j=Fo*vX7V5Co9PeFrdPDUwH7+=k4q0?> zBO)-D&;kJRtpn>|Y_i-2TjnZMO$2RiCr~rPTNM9TC1DJdcriC zN5b@G#`Jf%@dzoB$~NTk_jbR zn&V0iNpi4lm7J|L%ARn-woKJa3}boelruXupeO${st}DG_(73l*wh8D`A z?D;!IzfP(t^R4Q5sxC6im3wpj(Aq9FJ# z9tIN#3@e3MH{`Dy|Rpiq@gDwO@ z;_xy%D4=BN>p!!m+x`DOA}tWaRlLP#@mdAoEnqED8EA*G_M50<)88p8r^JT&Cw^q;a8}k$JO0^tx>uTy>$!fJTZckeFm@TKhKg zsuF`df~iC59mZ!Ea3!hJ4DK+H6%N`~3P10!L4U10^|MHihUUUrT`waSgzrTot&KA--2%(q)0=I7>s+L8)@pY66 z90Ufapbc7RgEePB9Vf7#aGwBuv`?5hYx}a)#ru{ew-qnhRw+aHLT{I-i;nVpPfpmBP>r5VP-0)_V9+ZLhWThkwmR9 zAcK&JQFj@8kHME1bX%4fWiNFY!BJcRb>JAG$8w*}Ok{>K$;@GSRzz~&AG>%OS1^g7 zZHv&B2rmJpw(Z$o62DM|dujZpybJ~_8;!_2b!-EH`!5o)qMLN+*7Hu+ZUB`T^pc|Y zj@3l-E6~A;D$A+3RZ&DyrNlg3(-aLlHFpG!K3$)4y98B~Zc3}e<{G7tlCbQE2Ec;k zxW1dZNmqXTW3}dOh44JJ$Ra?+!gQ=+W?yfwl&FoA?oxIF)QQWBUEf=m*u8_c`~+)2TPIjWzF zpQZ4utgURV1eK%L>~`tu6LFp_@_Yp*Wmw|kJbkS|4GLARD0I`nu;v!%dLiR6Pu2rG zDC9urAq*i*02*W< zU47Hi2aC9vS{f8R-bS*0Mk#FqicLSdqZ=r*y~}rz_WQWnKxntF>=YchYCBSF3pb*Y zg`lu0iV?zfDBR62TAk4PqP4_b4Rs0jTM{aEQIO!5Y8DzP_)7&mf2o2`-5UVaqCk@K z(jt)LVDv%R4VZdp?rB-eW(?+E#;$8~_?V5Ha-dUllzZFJdlOs*41%;ZX7_I3RO?{M z%3aIP-sc-N)A6Hb)|tFGfAux6fDg@SbsW!R3r`ISdXCa=Am!9OKDog_mcus~+hq-_ z_wfMh688OsUS^3><)m6<_H9du*zpf=1sMc}b;8VB!@DjJjhknu(hr8EoLdb>yNyUp zaOjuu^gFnuCE(BlnZp@dF|~-R?~j9X8dq=%K_3DghE2y72Mju55QAz*j8yE1k&YcP zK*B*-MZ)6sd}Oo2;4tH{bDqXaj6I$PeD5jSBoDOn37S#P3Fn2#k6bW=d*6D z0?}f4>Rjk(?Sl@+<${rJwY@-SrFBENeUz%jazk)1joJXT(qo5uYdqCW#5Zh{h7;}4 zBfbXRZior^AsAQc*kR7sjdg2bE^STj!iwrr8)40h$3e`#fbilvr4ntDlKz5sb&R-@ zP*|d~M%$Huo*Mz)Tl_XOsMc;Fu2e{e4L5%A$$8cAr|7RjdL(Z=DSjb6E%c(uvkj$a z1Y_E?b6iRsXYu3GkK=v)+UDCbr#)Q=w&PLnxViK@ru2Zkhl4>`6=6sC={63s;!JLL ziU5JQBFi);I=#LCRQR90(ygEm_s+1N0Q_C_EB)V=yW>!?V-mG@33Rz6XxoZPt?lXW z1=!kS0T7lexb0>EEmY{7bi9V*v|ew!I!sQrg|D5?8dd*xLE4<;0>&7eg=7Pn#|3P? z?Q9>T-Z62VWoY=S@no^Ieq*7oead-S^QcifOS1DDV0|9U`XQ9_p}AqH=R)9Fn9+e$ zfjQfEK%_dID)`OR7Pt*mSV<`b=Yb^)B`<|pmedXdP-^|kdMY$g3PyM8L}3h1gF~fZ zod1PZa&sWehJ!$fX^PnILAfPrs4L;nDpXcf#j9_(b8QYvDiriyY6~i;zPw|S_qUOE zJXYXeY1!%zVI)W$Ha7P_iS~zl24$O0k)FhDO^EYA`W{+;$Gmw3&IeiG>m)JwdIo5_ zdzTIOwlW(g!t90#6Gm=*pp|LC5dp|{Un|!dkl232jKTqL030+o0X~f1feCoyA@!i( z@I&H&aaf!#j=&)SwZVk{6)5!{ZDonY*|RECekk!6IP!jPNZb_uVL0IpOK(R)2ONAf zJPMT_M)n3x`_#qqj7bK$@y8> zYC~8B%@gxbM#D^4Y$)0xnu!zn6U`CFt3mC`?EW2EtqGmle-4;!SV-N*?)vis}H*URey~gC9HU0Z;!WoI};^^4vPfSd}J0bd)gVPO=#(s zgn0lRahgZ-Z9hn;v|}DgWyh(UE(F7RmbPBM7MyaX9*=S*a82zTp?;a&0sDvbF@WD; z=}tt<4R)XiHQ?Gxyk9O=eVB(iGB7tcU}-{bxLv}WnsFC{S{eS7Q3?_>Oqnnpm1~RY zv#j@dR*@_PUP-AnCj2o5VironE0{|v8r!kjM5=(KYttJ48|J4SH2@uz?xH=%hQdNY6N3_;0cr0|NFT>61)oF2uxv;bIYdR%hS#LE{gxd7Ro+77@=u;jCA<48XPFw@el<`(dV5hQDoI)|&b` zRKJ_nRy3=BoNsQ+o5Q~Gpg!*uS2zz8B88dcKmeBIKYkTUZI{f$5h zS0Q?=CT=9vjD7|?uB~2aLFi~DD0GDOJIqgEg?0=PW^^2+@@HZt2Lu{*3#pmS?3NL{ z4FU>(FcMD@WCq2mvQO;H8G5nN<16@ii2Z-@=Fd}sp9ayMg`-V5)o=te#!K*9{mYIW}m#MxP z?uC*F9&?)6J@DPAw%Pt}L-D(aeSAkB*`s}!&CxGgH_?OcJWdTa-)yhhj^a&rX-94l z-)q^IZ5ujN<b>nK@|wF zNSG2F;w=T*)7-b05iwvqr!Hg=h9LU<4}&I+B2J^LZKgn%n8Ldpyf{d`2B9a(;BMHQ z5Cbh^eEI~Rp~QozKx|ebvv2<~PU0a{;2I*@S{uUPPH!@-$pjRc*eRRI&|HN#g{VN& zk^*aH@KTt?zBb#dvnMaS1CeXsFd!Lf85AX=-F!~NVoFBD#d-zpA}Da#X;>_PSo7Sy zU0*JmhvU+-Q$u>%I8W+z1~Sp20~0U0d8Ua0P$pld@Cwsp@?S;_u5ulQw0*L;RzgnJ zAR@Gg(+P(|-h6ad2GL=9J6J?#pvPGy9rj|fKm#zapxK=6+jiR00{K+-9>7~H+r$bi z=m!4;DuR2LUn(nE^|A>zkhuLNXn9-bOUUF~&BMEPUE2U>F&dAs4hGNx?qPg_!CnUY z7?9`o81`g8J1TO6`i`huC)~b0XRAR{-I{J;H5800SdavO|w89qDswRQ*PC1-&j{yH=LEe10T{wV{2 z3xZZi{lsJguWeyYT_hp33z8rm{wgYtnG>xj^bmz4AV!PbMxk%UP_%0zCELU-f!nP@ z{j-{&aws_VIm{XP`bL+_$9bSSs)L2W1N25hodc?cArsgYYi<(=0ZM~}mw>sW6MFU+ zAtyjGV2%uPtfMB->11#@5i$*=zkJIv^ys;~NiFDa3K?NNiJy=JV~>hzs0vDfueu4O$fKkE{RbxcSie<@xD3f^J$zu zU)5(>I=W;L>$RiM_A&hg68+z1FFD2po4fUp?Gy?)#r!~M77ap|ghR%Vyl|*cacOGj zD~^K3ZL$u!fl!lwj;HUibuM85wXsarGIP0%ok=S`E8^<=V=qtR3eF+e4%OZ_1*X{o z*ha(%hGh)Mrq3hyn8D;cG6V@M@zE&9A)A+j+U{kH`tE2=PFUo1HevHSQQ2-jTAvJy zU0R#x0S*VsGw9Sb3S4R^>@uD=?;bmpuI&bf1jh7s5o~{+@Jo)b>uQPA;4~wLfw_NW ztxxvaoOXvIID`NVE8$becq=Dd0e7xzB+>>`y@;saq7=LN>;ONX2*kCSbq{R&|88FC zC6_=^a8wd2*LE~jui;4TVtI{*v(nO)4*6Zet;lT==Tns_##$ED6)5_d1)Ad1+9 zLVGjW(vHuBCQx1wZmh!X8FOgvGlH_*@h~LrRM;+uS;xT?FB3vVi5yU5tinnJRp+m; zMk?y~96ib1i;tu5$gKxeM+|2Mn@@fmoavC9go;@kSl;f3d!Jt65(#5rSt2oV}JWi6!=QBtzaHI`lE3Ec7 z@Ca6cym|E%^h0ei!2B8a$?^!d@%Mw5cRTv>@J?3g8o*QQo^vvP^2w+3Ck2z?SQbUs z1}I+OG!NvRbLgx%4ruC#9zXM$&*XK(|FL!&>YZOj542}C5lxiC=r(*gD%N%Bje#@Ch#Jm6?fdg2BmS)xMHQr0H`ZhU-_Q7D{a2px*IWCE@;FDMbzRxaq zu!DG(v^x}^*Wb|>=9c~TBcr*ay3g7{6X2xM)-7m2RD&Hu^htzm>fbQfV6cr)5t{rr zc3;zt7@+mP!UM1whg4pHz#dyx`+)6l4<)&)2Acf*LSFqTw!T{zzJv$=lU?Ba01?Z< zrOx;PZWjsw{}G-&1beA}i|3kn-epW67i;c$VR?0dT>lOaew|ImkZW`}jgvO}pdU*s zKI@yV1QO`fD(}k5c^ai2oLLlEWz7JQ5hX2;C`pi zoBR%Mv-7A?ld|7sZB#vgKK%v8zr%o(s%PbJ(L_VkqZM78$>lKqFD@VIXKDN7VXc!m ziZ^Ik=IBSMr25ZDpYLO`++Vz86@4F(mZ>IpxJKiICna>alhDn>T*V=J^_Yn zbg-%6bcL5GVGVuhQGQ`!6>>k}Jx*>WuYy(EFfME)U?CfQ+6Y!p!dAxrFW?H{%DkC{ z!-cVhRfQG&Q}V?J=xw*m`d6_+hi)Y6e-+<=$a=#_`6g0EZX{_JQ+M$V2)HbJV_^1- zk4xN=_#VV<2%|_x$%!qve3+mHH}8u5VXu3>El_|q$O%gAT8^&>an%Z;z*IT>2vjLt zb6&snY;0wSRiT2p-Pknog<4}Cr{Q&uUio9GQI*ro4i0qa2I?-*C={S;VZkfc+ur!1 zp}m8zoxqu&ZEJG~IhF?ZaRcDn^1KR-DNd71))kyrKu<*i>FsT-1r~+x3UsBhg#|0S zDBQP-%?CF<^qwbTm_I;geVZQ4;!OKb)U|B$A<~y$qQ*O6$oB z#L>I-b{ZT1YuKKp=fR@k1tBvQ*;RCLA?5131V00iz&wB<4I!`kF9@OpY)}h>(4O*z zWfs-%GaapS`7LBIJ?!(jiwlFOfa?z@&~dgMw2H<9-xFE|6(x%;WZKgvwW}X6EwSeD zD*z}|AhlHuCT5pwHGFCdOm-W^Qm^4LoeY8lh#149QyFVQyunA!Y1lE3nUkjEnlk67 zo>0GrTw>kQUMmW>(EzpiqE+z&MGhUMDX;QmOM-VPbbWSz5W|9pdHGnLeAYml za+AVyG!6bewj)R}0nVdH!J~b>e1UD|8zIiNblq2?vECf|A=z^GBMAD#8wYk|%ZnKx z&{IW@FQ?nY!B}izqGb`jFLYZ5BuZa=hSG((; zk%h&GTRe%(Pgm7@7j@%uUq*A$5DoGI%$T3x^q{7`*xVo$2k5vAk zE+N0yvz9C&Sk=6F)BXDO>(}qSey{c87uiP-C$ot}Ov7(7@)>{R(07u%SexBzZ?;qR zrb%<6pKsvZyyr0{PqH_^z8`e9Ho>?yq2H@iS0 zEgkV2#>=xJXP>%Qv7J#rJZGQH$(Qs#7L`BJ`{jS@Lz}xWjUe#Z@W|fNBeq>RW@kky zUz$37v0N-)xKZvm20|7HV)C4^Q6>*YtB0-?bUPx3~*E#AbN@s9Eqh}Y&f94lw}xGU`Jj5B}k{;^|Y7j8~D>br;@TMVZ1L-tVW z42hDe2%G0~Zc2utTdS$mmek2p<0l3IzXo{csZ%HFL!C22pT#HhMSCb<<$Us}u4$hz zNIjYh0bcnffh6!8#^byT8qQp;zzTZC@Jvt3n4AGZ=YV04V8k;3EjL;%#A*#5TVi6w zjeBg7)e=iuExDwXOpi^m1?DzV>q3Yd+4%ffy@5#{V z5a^rnwBTvQ(}t&=2mMtIA02KdO){cA#UA*@X?-EQ5b>}Po!WC6x4aNw)QbSdE{7K^ zx66wxvO+g+xS)wo-5&Y^eWbQw3FC$CUPCP<-HT8w51rG|y3O6Vh+Ze<_R<^Qn|OSY zU1C)?Rom=EmawKmhZo)lap>Kx)%u8?GvRLWEKHps0WKRf#O+_E+3KbW152cz=$h7R zRcmY&<+dfZ(BP(5QC3rh?L^8$i;%PO19t~cl7b7ZBn*fLYdgJ$B?g&4^cqS!5Anth zC|yzij5mR@WtG@NOBys7Ur2cIr*zTeC3tvN1J+kxNb|})_R?nSYk}`5`}g& zwyUXk0lVG{11f!Zktu^S-tr_2r_iP-hHEz{-C$2i+-};gHZKa;?!^E*&<^3cHQxE0 z$-6Gc)EsIw^t*dWW7_!*Z@XZ_yMwOUjZ0(?dz9Dr$X)T{mLCp`U%t1uU4Ao>x|Mu9 zt&2Ebel%CKv!x6uUe$elXqV3(QZXO^-9!@GSIj$Zt~^uOKPZ0|9}WxhpW|=+Br(8x z`hd=xsjQotH-?5M=R>K{6XR)LOW!2>8~ghc!PJ?vvy+py@FQZjL;=M&XUg36L;0C0 zXI?(oxGxcLRCscH6K(SO#;%5tGo7v2zM0Eb-uj^N23GYWnM~0xWiq~g)=4LQ)3J+_ z;x-5%ZX~dWz%YQ%N0j)2U(o8=shf{TdafiFiUSjYYXrWz!cLzkLla>7S+}S+CH`ic+8k@ zlb^=h&-(vHRs;wDukArRFmLKARMi^f5(X z`2Z1QH(~+d<}Ac_))C<}YC93*~%C z9F~u?Zw!~wC63Ca_JLl9sF=NGHb0armL2=7{Av4lyH^Y1)7HAS$Yjd$&5rA;1{j3# zs966t5Kl9;0wVrRnyzIumtu#$$ec4S<2pAGvau@^v=^D!@0zs`N3eIp++1YmbaB+v zXm=QP0)Z`Bi{a#GzzD>~4Vuz(5?=}N5aLdlcpWR~#B70;c1>vuYO)dSf&K;EW7sxb zJd5pac1BZv?c963JN;c z#=c%_JVE)>xj^Uw92Ho<#REOcS^Y!S_d=JgS zv9hQ2^CSdQ(ICxyqf)M{H;>o?#@|?;VcQpHV2RGEsDk3S*a?_!5k&4A*#q zMUJ!~GSqY=oS(@~*^VIZU#lqC4-|pW;G(l`%}FE124KsuVs&p0viEF(=pLXA8iEM0VoLa zaxRk*)6_&hrqm!73Hwa~#O|sks9jeMNq)Eqk3-oGMAu_OR*RLeLRP1szMLH2(|ve5 zuq)Z7O6g)MaiNq>Suj$%2)9&97sdWJ_>F2ubHcVu&UD#z<`2q`dak)?-GnWO^0zsP z`pl48X^tH7$}cAWq<2#lL5qUpgYc3Clbb64F&09U)*yW`J-8mFCE`vu!IQP4bt>=a9>(@>VBW~tIND$5IV_;Mzq@;=KmYD}H3 zQBI_knnVla>qfV>5JkRa%xdC&FUsTHS}7L1y>KUNArnW8r`}J|nyB~U?S)h?QX^z8 zG$M&MEi;_-EHw0>p;0w7Vv+=&I35ZojlAi3N=uu#fh}mBzuA$$+VsQ@@fg(jDig=? zTGthS109#CvtPcQx}MF-6RDdDYLxh`uNw1$8bg3Wzi+_fe6wgzx_)Rb&)sQ1JZ(bYdS89BtazWAiQKRisz}PKtL(|_X%4^ zKjJxgcYjaY>hP?5`AvDTfAH6sa+7pqO(+mg5kD0MR-Au@iNe$Zp9US#I#jzUzu#b) z;%PayXXEj`Xjy6{|(a|2@3? zP_aB`3lRanuCHuC$NSW|axki8$}@wxkyp@BXC(fJh)A_x%I2T1uoe zvKJ-aIoV>F;Tou1k*t;`{TS3`mot;a?35t436fp%7aWtNi+&6z z)Lcdp35WXCvPdWxB{_X#_1qAE*6U?v=~vPvJ~zk)j6rJ@Wq zE)uUK5-+V_)uJlJx>?~dm2Ddfre`>>)U!i9$PQJ11JMGzGUcT&PFcfMfJ)woH!cKD zSM>!jJb#~Jm*fGvBv(>$T@~t(vcO=wI9v)zyy?3U@32R$k;@GWq0+I{;!rwI6G2qm zxS&jBqRVYTv5yg#k;KCK>6k)YSyk+a5iTnfyU0R_E+{+`52W-$DyfXm51+K%Sy4(+ z&XZE+I+&_t-RV@e#8Wf2o8?(Io0=5mnG|7;?GE{&!1Oj^Kjs4~N{OxXp{KUZ>*2s<^ZAx# z#^kiBP(KdomUCGwtQC|isqE_W>tH>5Ra;EFui7WIF11-Nql3)Fq00+(lwFJTb&C}i zFVk^WU3Hu#RQaZ^OVy!<9l>!H9BbkFu@-Fy~K~t&QvabF+(*l z^OeV;%|NoxT(+n?O`d%DmYrRm{y1F~nt)+9@;IW;A_H#8pWN~=drh9%+qrEshKVeJ z^$O=L=;(!Jk)^$`cd|;WM}3vZIJh|}+L_$6ojV83Dkf_D%JwKF(fCbjJk9VLz;P)5 zkLlwL0wVx^h+Ntc$}g6)v7k_)W2Iggp??@76oJ|WHIs_%7GMQcSz1wbMgEF~4ZE-_|GgBVfe_&5gSiWio;YUTDelw&Kd`N>o}aCBO^LYE4P4XOkO{-m;IqEj#SNaCQRXfN4l9MJCFQLSfX3h@N1rd7Xg`>|@RcLvUrZfoLPe8eQ1(+Xm+^le& zxjgDbOW4>b$92RQbr~W%r(%M5k3OUH->D-)l!f>w;z8#H0@1e#e}}+2(b7+3cg(uw zT-I?jbWc4)>u3V8W}51*aTo4o<9e4N{u6DZZ@qo@pBfERPv#$ufa9~WGPa9-ESJVQ z+0W%WW7kzlu(Ty8C77^g9XnIWPF@rr<5T>C+Kh|5=tuLV$#RDN2PIr;j82uy903{@ zxW?>8(ftjQiQ2U!$z-uM+#b_y@*uE&wv7p*O0&`QfN1(NpyECPHxW25=T6+wN!frO zDyb8xNX`FFU`T#&;^DC#LaD|RbULb#A!tX{g8j&`vD-(EoE%qETqT?mlXl;?_Te&q fwrKCC&2xt33-=5(Mv~`~JCYA1)5$@rId1$vuhVYV diff --git a/ultralytics/data/__pycache__/utils.cpython-312.pyc b/ultralytics/data/__pycache__/utils.cpython-312.pyc index 184140b8e1c4bf47561d9ff41553607954cd729b..d6e9800803b3f470b26a90864400e14408922b61 100644 GIT binary patch delta 11942 zcmd^lYj_mLm1uX*`=Qb3HKUO<(g-~VNyAeh9!3a+ct0cr27xV&)FWx6(Tu2W0aAJ_ z8QU?~#0bh+aBPf-z1Luq8yqDYoNQuPJF&6(!U&Xw?TZt0*SlG-voVgGb?m*@zUS01 zlHxe`+x_m(+tQq_I#qS5>eQ+8sOtazcfw!4A=utdN-}fs#G3zS$9DOsEmP16M@#>1 zM^AoJKF^7qXbjEhDQGI_DQqh2DQYTWI8&&&r=+Q*r?jcG$J^v(ICE%bPgzqLq%BS5 zot$WWj&G`fcM`mP0vDJCKTTH)tv966wR}3?Mo%YYrHl6NQjTkyE2ccmHO&)K0nVp~ z`MkkOF$2;ILSlYE>&coWKC1R=Q2K-Xd zE9S!c9)VlUiFprmqUR+6CkZ_*6K9I~P*xpqiZcc*948ith3y)#2yn~$d2iYKSZx!X zDQx5|bidGUQBD2+o=~&o-y5L663(w&hWt`Btv$V}MhthTnyy{|J-rcC)7#zy0C94# zUDbB=20BzNBq6;o(AvAqiY*0MqHkyllc5Pz+}E>L)%HNe(a$x%;j`w=>WuURX6@V4 zF-r^tdcy&ER(~WE**j-8{ajbBs{sOKqqFqK1-^>DVJzd9(+Pm{Xr4*t7t<$A-t^_z zhp{6dH+TA_PO=@cb4JHZm-wUwkR-?luYokuQKymK$>WdeOQmJ$bIyC_G-R` z{@%VNb2%VX>)rqfw)Zy&d;A>%@;IRCMw?Qu@xs5FM}29p3L1g@+vuH)9}2qr6fN0A zpK#^Uw=$R0x$e9?Lt@cHji`A<0BC(gXct^SB)OOd>;jDD6lGe2dYgk3Td-vTMIrLywj!j9s+Y^ty&=yy& zFX~jBijGmr0ZMLp3G~h$#rYB+PeJdwiVk2N{XA}l1QrJqI;LLs?dn+euqVV3w@8kAsc5?e*<562)p#cSM6G+^$yz;DGNRuW6dk_!= zeF*l_!Mq*CyCAvA%d47DAbeIJYp|pmfP`Y{@sQ<+SV#YN-b{W!E%n?_f9mgA$m5@& zJ7z4-1li-rK>(l{{QCg}!>T42mg$Q#^0He1@691Y@FEWaPz|Bh=JsGHKpvv+%_tds z7;!>4Kpw#wU&owL3Uu@Y!m^~Ax&!^a5g;XzN3j??A%_s)>{TJusv6oten|>QKqI8; z0pPj&&_e}QI$WSDfjncQnmTPIOTR$QouU&ffb z`e+?}v3U02^vc6^H=XX|14jp_GP38IvovP!iIwk&?QD*1V7wcoWtFMr&1D6NJGn$SH z@1~?4TyZNU?Re(V%oB57UUXv5$ow;frRYK-x6|)*EH9ya>Hx}!EUXhBh+6$OBO0A7$<~QbN zL;B6+>c-jn%eI4D8@QFYV546&lMgOL)vw=(f!rT|KjpulIik_K+E+VPzx?OD}u;o={MdJ&|@# zZzR;;5eZ8kIRe<$VAxOkJt#+5D0c=tQg5Iw*d7ds9;Wm?XjACbrVM)3msW_GM8$Zq zU-Fc827(=(a=B+;P?S5p^mkI{&8nLb#Mj*^`C@gAB}o;&Cdi=K^00mmEe>uC*;_|$M$d89t*M(*^> zP^ZW<*K?0&wvRCb8PW#Un0QK|V!0>MD+eQCf5^Mo(+Gs(MSK0Bz5p@@_H)P+46{Ba znHIskBfuEu^V9~~{e2}+xtI5k|~aGWn>7}Nk$ ztmGJ`A^<=!>li}_x*c9Kc@|4vptsAj^`|j;kO~z({wH)!#q7f6fKaV>$zMu-f}bGxIRZ4H5;##D z?sYJJMUYf&3BhNgDUmCH{3%w%+9;c&?%6XmgDB|le*qxAA6VZ)oGfixaFfv`w6V3k*~ios7hqzIA8Da>ixLiS)U zY%?~gm$BB*=`ZGG<-dWYx?org>?f~4S~3G*lVgA^r>6Ni+f_hb#ZrjxU`Fp*ZAaD| zT65E6JGkdiW-Oy<%v3becc%XI{@A>gVX67KGtcH^h2xe7`dcm+CTpt{k=eztQy^2d4cCWqZtbw_$sTK!}AO1eT%lO#F?vF zNe=XQHDncLbm$35%%zdgk%ocBy^xf4m`Ahdm;E3r%@~!D_n`cr=pPpSPQTp5agW+* zbAux(Du{ekBV${Rqq7w!-SVipShC3Iw6RAKUV@3@L*~iQF#aflY^H-toL;S*l)%Zz zFZL)}3>OpaqJkoH+33q(OCOA4XDO4_1W`DrON1_u*+qRVci&cs19Xb+kbN&N@rR9y zPBe5c1a@mhBjQMwoR(-WXCzVz`uZjoS(s016CEes2YA`ZYEs=doOaQY!1LT8dsjBy zv(#N;7Y$3H%HeHV2*+7J*_I^>fu zkE18$)2Sp{uu&)fWePASdx8pv1}F7Mn||Y-6en_`K+a20O4OpKrL;jqH@|Sx=I#@@}pGNu1vtm@H#ax>Gyi~<_i7J4j+YHHc`!ah{ z!4!?IB6gVZ^=0X_;CQw^YETSaB^X(zO3rCwV?eQ?HBhUUH)dfi7*HAxNX!7yfK3?% z!2n{n*QDj^5{w2h$&kyYI#YDWcDbC^SEnTbg?N5CX^8R^BRyK3XS!S1HHVGwO0_j@ zvVrInAvBrZY+ckSS0wsTjEVlS2PQm^CHku6ju_j74q*FzxiIGGtqh8BI!StGj&U)p zk%z4jNcde+Kh|;w?d?3A-5ABJTJEL0y|O6T6g4ZRVV-5zezFES6AfIFAlr4Vlia+m9yOD!~mkUuuWb) z*{NA9J!GdFS5{Xh;hBIqGof!_B4sBv(PRkV%V9@5#0n)Da6Wl`qOCX!CUI${%Y^tj zP}fX9Kb)%-=R%jd+9KXg%WB<=>=2<>COXG$uoc#wXIpMxB4zxl)aZk?4-5Qys$YGN z{kPDkY6gcm2s#@)WqNk?Cc1U?c9W_L`+GpeKXPo%qnS+S$IsYM@t@U@51<|SEj_g+ zQeF#5FHc(WrJ0KHRV}juaqBRY2=#N;c^BRT%7g{8fBYBjk3(E6Y2jGGf)oCO;*rin zox^*MbVK%2NelEI%FgAz0>MENCw+cxQ|{f)>JW1Tz?V)EfIOVH)vfE~-=W`Kw@d#U ztoJ^(uP+2sR=GaPr_;;pa|bgKjVsQK6NKM>0&zP@1&nIh+8>tv`&SVXAp}hZLGz+& zgW+~OK8*Cn9f2_{F(SZ$ve~Papw#Sdl_H@&IY4e>D|1i^h9%h_ZVQmjSPuuTW|#p* zWC>#;@+-t>{GkvHY*+|Kh9@`VmZE(j4glU1)o|CCs#brW9KqNk6!D9y78ioPvmq-8 z+6k;Tf@%beS#6L~wYY*R-=p%f$`e%(W%BQUmvBEvnOfy#_TlP4TEdU;5tz_ZpeFn@fmTvU><#8<1C9cW4Q6m{Bg_dp<4RkMu%bd z`SdY&=-(=(Yo3v$ft_}_d(lhCUn@4T7DiDe5NedsW8(MiVWpvG!pYoKmGko zn%v-;FIXF_+RIA{8dCIc+dK^p?G;BxL$>)!xfWm>#3GZA6;dda6Bp_3hAI$vAH=8QP0lT3@0^HdlhuoyWh+~Vq7l<^q+mNWVg{z-cGf{)IB`VS1u4f=Q5Gv= z2sjIMg8J8pnN!6cR?HBr*rf1c24hmDVw4<=x%4O7tf}D~D9L0cPQ@f;#7jQhW_IwL z>`gELh_eB9m7&UaXD1c|Y!Ju?Gnr_E%wQnc`sIRkPgl&7R%lAi!+EKgPJ607>C@VR zM>N&e7;oF4HcX*y+gOhb+fG2^4N!af) z03mof*%i%n0O5_96YVg9iFPnml^ zv)t2rnmN5EXfRvL#n)7^fOAszjBe6Mn_7|{4S259uY2L3;2?7lrlECB=j8@l%XCE1@thJ1nG0|d+({2Qb;kqLV0{^vZu zhqTHsR(a278lTVinP#SMCOiL$9}fMFwl@D@aH{D95_tr{hXB0!aVus7n?RO zFZH#z2ltb7mH|sB1_?8Q%zhFxHn9Rw%~D@$FNw4TBuUkfzA!3$$(CbdrMUXRtgCQmPS+85@ub3qHew zHfs0Jn1>=ZH9rC>1Gux7JcMXwQ~wcD4+DUMdQrm3!GUE4I2jN6o#slYz7u}ZS^8~% zX~iMKEmP9Lj>p|MOztt0JC^f0$J$c4K?XA?j5$(D13*wuVIX5hYW0t~kOYt2IXG$5ayk^Xd7b;`cf*Y2yF-zIFr2-0VjuX~vNrmqw zrQES_I_n)9SL_@8pmn}b^QJyyv(@-!juzq4jLj+9%c(YiZ@Wr2JM>osEyA3%&0r8= z8k0}LI8H33Edke(fKJprJn8s1=|wGE?;1ptsDtZQqo{|Rso&r=tI2D&t*DI$`dekc zEcqq+V&EsE!S<^fxL#b;bw#&9;Xs-4aGUJzp0_fwk)k>g;%rfiI%R4*f!>>p$U$$T z2gDx0Puig!ZoAt#4FNXIb*qKLJI=;sndbUKp8qUV z)_i%$w9E3008~(%*aKFTYexT&J^h9PPDcNXJ%u24QPbT}A28yiT~tg51kn&RJz$!4 zi)N+IhwAi9xYpATLf*mo&?dgUgeg(cA1*3Aa?}7s8Tir9h}XF87Op zI!VPYOsoe+0IQJ;2xhSocI)2j2QT(9K>qo;l@Kn`UxJ!aA)OVze@l_~g3UFMn7&y#1*4mfdy2JJL9ApLxq! zdd78r!*yp37MH|}>1$yx)kJ3VNpyasIJ2TQje9-KR9m5WJ;z#Gs(HOs2kADRl1LE^ zHI&ewMhvN3yJjml&5Pp{`s>*l^tTaVJIxng^jlbYKU9YEO+9B%qf7QIE)Y!So3`8&@`?756(jv4-LW~{ zF>7c-1Nc~SKByFXIz6=~1CHr`u;vYblq z-8avWf}(sbb@b>yUNHK=Sn}lHA$mI}Lx{)b_B1i=TzbM0g(w%w9Q1s@k*2P6>)}S- z-nE+T*+2BBWUoz(ORz|kkfU-DN4pIr|u=Rcz_JurI^H;-y)5BX&|5RTve zK8s|(jo>u)W!{c=P%Ow)HMoKQ8n9j?L8nl)v_&Fd=fZv&0&uiYDj!z$P^&i}s(c&y z8$hv@k}v@E#GiPG4GMuobbVoQpp-@)DCK`QdT`Llr=Zo}MDXM|ZtLOeiXmTW6Ashd z7iO2ma9c!n(c41>gE)q>ylUCGdBf(r3f`+xjlrIdX13Rv7GP5}qvD54$6%WT*ZYi| zRt!jO_$_Ze@N{1V{w$8l}dq={)yK$^U z&`^K*IRE=d;w%E>KLq(b9g?b-BY2Zaa7(Cmxcs9F57>6JVDnR06`WYmzF6M~w^nVD zJ~%q4$I=a0V8YZ^YziD>UkpfODQ0y&-RQea>hidAw4eUff!w-5q-KC?+*TC99Y}(C zFH9-mb|lOXVN+nf4f9(b#R7C$$UFos^s@sM;F**@RHFX?5Ykq@YxFCHu2eyd{kdFM;J-%N?^Y`U@2Kep3t6; z+<3nJ0{^^rr2aX>3)a)tGyZFNvoGwvmb>mFgSIGjSbN-j)O@1yx;_67RxV}H?X03R z@^|~s_QNLxv#KwOZv-y|W80fs`dGucvQJr_vW%=cQ*?U6jlzXvg$w^ybG>lsjl$Zo z!rJSFb+Lx+HyU=0HSCJ*ZW(Lv|9xib&7zi%kivvUmytTme{929*ZgqR%kEX9@ySzZ5m^F9Yy5y$IeJbavoHJHh@pWm?&vO|q{HGH$xV-gns$qkv zd^(ZI<%Rf9>|AO|ET#A}sSMWplB0BsPJgLd3-D*@`CH7^H+(h8TkPhymy~SDu>Qi8 zza?7}%hqivG{6bT)PtIWsp|g)*yHdGktl??9zM!L?9O#*>48U6>C#8jv+(n(bJ~RR zgrfo-9DqNKVH-W)<)EjM(&45igN}BkX%xXlztiPfd~Wh9Ft}uy^1E=GuqR<5K@)4I zUBYO)((pW;9`DW&6b-%DoldO>O>5C0um}9UyAx0ZFvQ^YoiimY3ycY@WK?%ptL02! zr15WR%!f~EIXRnN`9_W)n&|_NI%sdY4r3O4cp81wN&n_xHhepH?O=IHGvFIZJ3zv; zBp5+hi@35GFvLHQkRV#fJLJhpCJ`)Rs(_hJlBh{qY5gHr-D=G5LBM>i$-VB?$1N*s z$#ypGVzfAs5Ob&`Jq*pm9=PxO!J&D@l`uhR46sAoKN)AgYqTD*9J1Uny2gyIk;3!o zF{5kTxZr=#PY(U4q_Rf4a<1;Rxw@50Oq;x#0drfVEmGxUw{Pp9pE|ElSLZcT^)cVz zO6=E)8aLS16OcP2B0(R6u#xv;Dhl8%N6^^EPi+?>7Qq?#NqB(qF!zz*&>1IYyL7HQ z3pjo1M}kqef}hYp5}Y|m{ytsMEuJv0;dNOPjY5(xJ5F!mXz+1SWASY1hn~Y`m@mEg zM7PBgj>w+Y0Om(G9XTM_PGILv02*OAWcsV43k-h+xlLdXvXAAgLZ^p32SC-wze`bd z?7I~58Wx}`kAL#jf~mDVmfz+?2%v6AHFh^)grf>ooR91HXO@zM4tZF=tlCzEMYbomKsBRe8uf^1QB2?_pz^TB;ugnrynY02A1~^2)cRX-7m)voL@mQI}k9x5q&}?$8iupS2p3Fp=a(i6|_7f2dSI+bg|aGqo!aL@&(@(0-= zL?-4@3#l1k`-wl)kN%CXPY#A8U!+$eSy*OVvueYZ=4}ld*jI_`Wv~-W&H#<{-@)1ltj`BX}GEexXO+LU4uS)^J3{;x6l<}5hOiMW*3$TKYk@Vq8J|5Cbc^klGd@gT4 zMp~Bhd3*xASHL?F-YfYew~$)~}44a;m{`1E58?|7EQ zjzLXp`2s!z%GSB*#fw=pVUktEyLF=miE= z5<}cT?~$ow)gMS=c}bVQ*IhDjxU+qwi@uQL)3thutfW8IJL9T=B&6$fORZflu}f%zZ0^)w^{*21Qb-EeX{T0bpf4HM zl0Ec)8y)cqhu(eE)6pt=hTMW1NQ$RQP3Jkyc4S#f-;3T%Qm7*)k?fRaB8LcYM{ zYX&~uf0>WgrF0hNDI-j<;c^t8zmOZ!!pJ-Mf=I`^H`0t=E1gZrC5iN7dwO(~oX8g< zeJ9JuUtB`Brf%a@rw-6ZQiuN4b5>0C5)6RcYBKoZNMSvMM^MJ8en)K0pY42o58V5cJb$)Ar`JK(fwBLY&v_ zdxjOZV97cFA}%L~L%{hjY@^Qf0@6zx(+`m{`up?(lf&z3b9+Sr+39oXpARd`Pk02f zPPRC_7U7Le_yhcuXD)H83MimTfdW=I0>GIiXfQp1;3$IdGQ`v%0vtEtTL@$XpjAc) zAh-v?asaGQLme6Eilq;ZMTV4quP1rBGjFpS(I$gTCZwRU*CZsA_~Rz{xxKE$y;w{Q$8?#G-?ba(c; zeX!B#d)$M4eyB+lzJtYBy>J`>sx!oT+d|rQuS*o&BFONDw9uZ`cCQ#Bts&wG>3aI4 z)?Sy`Bb)$oF&O}@E=cmrxxbT7OBB#lV>+0@@>o@1L*(!PIqB4Z+NqH!W` zZ0Ky=1=n;+$&KX7>Ez1m$;*OqJ;4<{*8nCjoB97IrM{ff;V}7R=2-vPZ5J$O>uw~M zOedFIPcFN~koYz-leGC2;mwSz^>6IHn)B*lu)g8##-`w&z0-U41snHG*S7=@9-6K{ zG`*!YxcTm2f@_vTu3&Q6`zdJ?8E3aVx%TYZb5+5(`e1R*E4#05vDV`o z-rKVC(bzKyrxV7?Kt|<-?8((wrwtXK+}Ulx@#7jyrV64RWn z`KO{-gjG(!yq<4r&NjSWnGETx(N)b(%^OkG36Oa+cQXTg!CX`GYE`g&Q#xecja4_N zXy3K#Fr6L^RPPq6n^$PxtuO=T2BY6o$lcJ}_vCWM8xDe*T=ky9saNy+2|M&DwRBnO zxdN*}v3(9Sqx5u1-p>7XOB{C{T;iy6ctnTP<#zb`d)wTC!{6cP^Lqz7{XP*0zOE3L zNd5-5gZCWq@NVAGHs~mK(!-^8b-Ul|7wAi+X~WJ{4&FcD6J5Q1UbjPVNglt!0gv!7 zO!)9ta~e}V0I+$5IhEbUX%NP-Nu;DNGOEeN=cJ(jvGN;l9mvkp6jTO5+S5FJoyIcq=2!ho-29x1QKK>K>~;-4)C% zpSG1>+H^VNxvjzVyQV951>@^)<`rC6aXxTKd^>N|x$-Ahe1F9i_I2H>I@m?b+sVkL z_f1jsz_QgGgYR@>FThumBAz11IU&gmKE0So>3F*||yroMerZEYQdtMkEpW zCoLx|-Ek4Bxi?+EPBqL0{%dVFOZJGE6BeGmsE+gsw4kYB4mN;OsghMEEJuh)PJzqE zYtfw+ig_L27@V3 z6ueM?#&K1IJMzAls2JXa4G@xfv+}ktmO}g25L~i= znvX?)f1Ewdp3B7eo>)c1@k9<@v4$lI1~$OD>CzhHWU>uU8q47(8^j`ny55G zU4#U*syZiro|L!q2}#TX!;XMf!aY}cWNoC!$f!)klaV@LV6<}slLn1m%!QFqxR0j? zGuOdLIh2vo&I@@_#T>VE5IA$t@kuqzvvbuQfVCXZ1@r*}1S9%UBK%n5=;{s5tVl)R zu|h=)12o^fusRJ;RjP^s0b2U(hSjMiK1DXcbW4@>i&`6xLgth*(X6MAjjAFoaN5I- zcf0F@P(UYQyC~ zTe2`u7r4e%J~dye(3Szek>1mk&Xq$e9^90(=PO%*3K!^HJxvX8JE%>+~VD&(#Yi>KHw~?STZv zXz2%Cyntxf-&ciD-Q8OHL!;*MuvByl@+Vjb_%iq|H^k z2{Gu&eI??Bibx*4QrkuTg%<7DulWe+{y-nvkquUPYDd71CM{&}h#sFPxqR(zp$;ij zwCt|5VYEO3*k49KdmPgAxdaFjg;>O(XAx3)d>we0=I;v`8wP!n>*yvy@C$+kDHJc~ zH<;2PK*fZN9~q$0 z{Fw2&Hv3}Uje<4P1#2!hUN6{ir8`*PbbWL4&Dit_6}9gy7}muuqP-cNFcx<~x*lD5 zTs5mCx^zo7x?M4@|!WXGse@#V2b~GOy5je=967#yC&po zX_cd@(WXa?AKEf*#wCq)j_sRhyI{G%1q%-clZ9(>A_S`TegcuC1)v$^-39VQbxlpy|D3FI5hig1`Rs4`O!5|Z8JOrM zcxjkA(nmztn3-P|n|7~Ky_uJ_yGrw`HI4?A>Yr8d4SK^rC#f3rbiBdNHfrf#9!sFV zZb&Cq`f)=p9cfIYP$&IKqn()1PN@Q{3`bfIDOSY%!onui9YZtjiA$4N*he(u+`>v< zaK^f0Q34y_=&i@%40C=^mK0CdG^GslIw=9#s64zLjRx9w@a))&hRA^yY_O@kQSo+6 zu>GhcJhxCDStYaGxJJM7Tr@{EFE})hTHKVc5YoGAjmyAC#BDPAaTOo4;M>4K*u1L> zor?uypkCB(2d%D5u@OdQc`G&>3^~ikDON8F`)d=_5ri)hQt=B#NlKA|@WHtu*dms< zFBBIj#R@_x*2#pAQ=G>HSu5HWwIK$LN8bFYCg>>zXzdEEUDk>5U!|qn_86lUTrH^A zO8?TFkisWL1`f=me$J?)nbgbrxluPK>gP?QK0*y=peVw>-VwWq5nR>9j6_dI>~RIa zodKrV7|=^akx`IJ=2K;n6quEBdlSt@JWiE$eDXQ2y8;zTZ|*g(HU-QJBcz!2lyNrF z8W^Qi*$gmkTos6t%_EC-$7%s*ugyOBJ^+Jt6@*5k1#?aMr$9`Me0|UM5dyKEc(~A@y7Wu36>>vcWD$TBU`or z%#p1CbHi>o@)PFNzrpdhL2>DGzBEyQbNN>}^JD|?=cAPI`QazbC^;3gh*wea!LRZ< z7uO0Lr79*oxUxYu@&)JA4G~X3Kko@Fq@w3>u>*{=aB67(6;Aab^nfglj0HYfa(t!2kJwn)Uq5&4PNTmk=CbYk; zPw=<9MYydJ`h9{4%WVh<5@Ap)fIAenxI&y_oqr3+kk$jX7{yxYLv7g`eup@X*x%9N zIlAcFT8FG?*PU@f6s8|XIvrX_rA_gO`xJ0O8@=6@S-K9B^Su&cp*Td22=^ko2Ehji z?gJ1~;V}SR+n$z*6E8Gpf8kJh-u{*Y_j>q={WQ>?Pv36;VEs&1;RW|}R>h6X)zg`) zuV=13-Zd8gc2wGo-ZHvlTJN}_%b(WeU)L4f;+Pt;gIx8#y|HDcv=Xvv)B8C^Q=jvd zZ1w9g@pU%c>$xg`S2gi<$*QX1eq~Z2%(d3#7}}v}^ACM_%Q@NFhFl=z*)6(FDyPR_Jp zM^g;x;L=l+TKzrYrNJ&tC}*sIOJdPdz_|^#xp?=024BEA6VkNy`-VJyiUX%yjD)mZ z{cSj5VKMb8Z*^}EPJUPq!;Ip5hBOce^uzsz8oW}t2SJAsMo}fYy&d7HyAnhT(I{W9 zh*uT>l7%!-S)ZF13Y9`7Qs{GmQ+omm|I=X{GU!@cJwA`r+B%f5mB#kjH50%oX3&zp zL(2+^e^mHP;ZOL>l~di<3aY0~dB-=8ZXNU7Vm0P1Nk{Mcj%0@MZOGz|cemFP5T4Q6b*z`4}+?8}wO(A!sPz~vJmi8acp?7;z z>CcxNl0-Egc*&%LYhV^d=tL&;)$%y{*TY5*owP}9N|YD{x1(z1A`jdWnkS^j&s#}p zVHUDame; z$nUesmw-H^>hp_k;WC$aTGg=U;;qM(2Ofii5){6uGjN`b?SslEHt+J{a~MbE(rp0G%F$E z6E>hs(hf0c__9*54loorRqzQ%+OYlwzq;SY59QP0k$m#z)Y!Lm#DWI8PRIZeVY9r9 zK6h_%?0yt!==Ul2l3rNE2k7m4vxcz^&ybLD-_9L7=XP=7E@Z6(;8cZlp5D$@Wsxg8 zfT>?}x6WUunlU$jr5ZBe{tr8vr(J|wYNbLI?-tvIZ$l-@<_6b5dbl-!YwS*`YY5`v zVc3F-yG6fmNZa5#0^gZ91P3;h3@Fz|n_;`u;DJBf4eRlS0BaMSc|sL75nH4L-r-q| zrd@!~d=}qFD}xg!`GxkbBXA1f_Z80ty>Kgzl*7tU;XPD1pr69j1q3)oa01ZVDTXwX z-wWLmaM^@n`n~W?jCT-wsR$EXDCh~~0gsdkkE*WC<7@Z#!*RlHEUm?Yrw}xv3{a|a z#3BmutFc&db`>SlY7s%+*Y%BWp{KD zbZ*{4iI&SQyJcXOt$qH$j}O3|Ncu+d-qPx48ZXpeBIi_N^^a?xG@Ug~xcKYqvevfkx?zQE8c!)>*=i@X5>tCKJA-yR@R2EO(Tifix67#bSRxo3)|PPokLEyr3obbFmO6$E&vdohvo3 zR;d8~GA?81I@9Y_)zQ0DhPO<4yY!}Cn=^JrbHQl!t^{p6%$4~EHy#dtv%diB83sPq zVZBT*2@p6P;A*tYL70~TmPVZ@c*JJVjgV96ENtwMw?PaK8$LCku&-FS;(?nceMGi2 z@9IU|DA>_e3%6Ks+751`N@l_l8OJH7A=aEZcTd7YGvxuNa<0j9XDRGOEj@J7ZdLK{ z7cmzQwT-J7+Il2S!|On368)>$jypX*a2SD@+j1QJ@MI!{-Cvwsl5sZ_3d)WO2D1$_ zxa$mQ9m-X^D0I;J?>b_m1V0ujJ3)9?q7QyIv34sW4{q6p;@)gDCo(Z)e5;oDzo1J=^`(UxF1{<>qfzr04n34fJ!wAatCWa_NC1Mdg z06#GcP{)|ttWB+(Elp4-{;g80-aufQsaMVF?jq`>*+Z;JogC(`oVxZP&lwHa>Rp3U z7aTTkINcM@`}~rl%{_Jb^u27<8LVd?0C0!}k#2o-+3@!uR~PwMzzp}~ZUOG9l>=+1 zWzn}se+P6SO_$r#*(JfpK^`6)&{t91c?7s?Dz5ONP4#7k!_zCcU3~bH0>iet#!b6h zt9Ngzh5)g5K=4S#;j>|`oV4+D8)+0k5f&u!SC$ixV7Lud%(i3s@A7HW>h(J|7-UBf>d|P@{3Lip0kA-^> zph*(ZIfkK+JmDi}J)UHXeO`|woC7hnQ}IvkAv;k);jcl++^Z3s=ca z5Kn(FnPF6p4eXdtpwpAd!|0QSbXzy=*wxy!dx!ENTgceFt)_MJ&fRxaH#R7G32ES? zR=4j+_|Tvfxz{6TLf}Sl62S`yt|E97!3=`mBfvv;C29*VFJ;!CHiU0yBAj1+A>Eo@ zKi}_luM==bgMUWU(FdPe=_I856D9)wz-*dkHvKD8JI&PomMMMTcxXg76V)_goQZPL zPoAnj7)5etGj*hs%-Sp@;a0wt#EsS5V&FB=IE$~@CHo19nU&cZLJp8m>e&4(Nxsb> hoRw8s#CZ!ovyO926i-`AFP8*cTJJCpvXdw+{eQlfgg<*_VF9xt-I#QU1aj^o&}6E9%JK^g0gq>*Oxeb4fe zH<^Rw6a#TF+!hLnn^7SUrzH&$a0)%plS7*(Kxj&UbMk;vIPB0s$ti0S_xIf=$&TZ& z{8{O|@7?#^@4M@Ff8YJ?olkw9J@Y)PPE}QfH2lTy{m$@V>)q9@?CEzed8(Y7-#?#e zT;qXkdu~Dhf?P*`M{Z&NLd6SaJ9AzAUAaa5i*m{Sq~e9Li*rl*mmnSPUpk`k$Ol>f zGQ6ws?oKaH8tvzvYINa z$>fGtW>d$~*@0rp8ZnoEX?j33y_1Rc@=5PYY^|L1&F$(YaaI(n07X;*!A-;IeEL*T zY*D;5@_WAhY>8aspTFcf$-r>h8W>5LBP*zmfU2X8)v*{8x68Bs7pjtA2?mn%i`(R} zz-G2bRsxOj>&gkHPi2M%%*>hedin2x7ItLnzXFrYw~e%u@-IT$eRC<E`?OA-!3vH1N3XpVYa*jr)m4Tz)B* z-(c5{Hgiw2##_epi@Nz|yEWc6!+TKU^W?mmrgAA}$0~8G3(pvX&}Si(u))JOR3FqX zVcnc%c5S5vGHlEXneRYmzFlkA;#<2`X1WnfH4J5z>g{^li*gOK9Xjn+t6e|AQ1t@a z3)*3Ox%_fXth|tSUNDa8rG`?Y)nqqLFni9JcF_=zqmssEtyFuSzWCd;A;!C~U;}nz zuQtKnuGt2PskN!dMfMz5w+W5r$xFUcB)^z@N+GLmw2d1?h zBQM6A)?Cm_wMsZJEnEfRFWy*~WKoz$?2=Q_uFbowy2?DeiZ8}6V>FOj;-+rKLg&jS zjY@(lnPjCVyzA{MyNP!vmFpGZl6Y{7yzQaqy+iM&Ott34pK%ghOw>ET>DZ<%iA2vtS1-7YNX-GUnM13c<)I`Q+Z zNxhqT`!;aJ2uQMBcqyGBGD^fn07{J!$r8yC$rJGrDG(_VSxaOih~vwcLz%36UqgJ$ zSz=BQIY&9+!9sp8Wu^0Y#Qnr>CZYzM2Fx*XsZ$`CyklhYmi$@6yp{vtC*$Hc2?~&Y zfA-h_%1;YRCL6nUoFF|tpBCdJLG$Va$I_>Z1r%TkHC<{hXj~mVd(80cn_HldVt ze6F0l&=_BKFBPCh`t6ibNL_a=$PKudk4*EF{7U2I9o@kD80%miMvO&(%DyX}D;`gb zRkHx|;X9#w*djcg_-bPzwwN_QKF*@dLkRvMswOGl(j*V0KB{Y4D1pCQ3VGCNB~poO z#;A+_vU?E!&az%ebS)~%=sqBd~4Fl#so5ExR_7ttMeJc)7Nxe3u~}A;#lG{*H*U!dVCQ zMPjXyju_Je(*j2-9FWlN96XlhO(-WmXPa`9;MWWK^(70<%iQ(yW;p##bP?o#dZ>*wSv_qhRuSo zl)H00gV{_Gt5Wsw9VA56I9{3y)A6Q6It2%m!((Q|->a-lKE~6h#0N?Enw2Sk+}5(@ z-@r6GKs0!h2G;PZ&*Om)iLzFBl_;JFd=9PC0CL4ski)=llJxPj+Y$q^d97C z5>YczJCw=u0iLo_u&DtG_P&R_X*6kNP0CkxGeL7bsHO-B`m4r6F^+-9d;PCNEgDZq3zvG$!iGASM5)qprM-*YP>$G^@sCPcVuesJKm9K6+J=ayMxfDbG{{39Hsd zRmF4YMjgjSp3)W~5`5KjM#l=He#9c|Jgdx^L{rVbw!G(Ai&lwEYQ#OSm3eHF#v3q@ z50o&Bl{(u?#fZnO`tzFYZPu)Y2?S^&PQvu;n zaX-Sq84at!_EFbtFS-_g8$-Yp=R4+{!)#FjZoet>B0K(=il$w1*OF4+Z{H1{H zNAU9)Z&M>yZ~Lug)wogF)H%QWnAM_8EQla6h~dTg2g%uf(&q~(7~~z7yrqyGKr^4Q zsj<(%7FzAlBx72>%?|N}76pHu)?8P1t%ev4d}THBD)SIFhIw--Vh7OOE_BxraikM` z5v++mV8#Ju#>oo?id?4z7c0SDWp+#K0E%9E$)GB&`IUAX)`xJg+FD=-__Fg_hE3>9 zMgmesDD?gjzWHC3xxUt9H7RS?AF6LoR<%(9ln)B=F(CQ?-mzD|p(-{w|d)orPo z#5dnam_p2(XR2_whM$vD-FNxEL>kY_*zyiw{rcr4Hb*|Uysg|wY=Tw=x(IvVf%2B9 z1;+^=IGwjrr*;TY5F$!4L1KF{`5`)MD->OC_z#rhC!%^!Bk6=Q=0NJ0S;(G1Ku1&I zgfeC(Z(6DRVA`D^>aSBD0!vsX1rFk0iDRU)S()isO9y2=ZHr>yiF%M^wd0@m98M&4 z!Ya^?JzGd|$3uo7zuglLFDKy_h#Z#D6^&&YNZ7+Sh(m@m?S%$M(t~3I94=_Oz~3Ry zv_Q0I!0;eGi^3B{jEE6{-;6Sy)m`xzTa5n@Yt^HA)L29wR!K!vZX7`Iad6Wn&`K+a^450NT?YA+!(e&8RjpoU?s$T>IB9=y_Cb1 zFeE%~LwI0J7f)D`(JCIas(GjdaCeDZrOv|&i{d<@oM9D!!E4pnUfURr^6CqOHZ=eW zlx2H>HRb4o9$stV5ZX2r?92gX`cQqH?IX~#4IXRJN`AcS(MaW2M9lrlH{snd%U$e# z;XJDm*lqBp*_>YYOXUP@#_Mqk$OH#6v$DxmUz-&u$C25jGVim4aA5bTytqQo7$*lY z-JnW6pF63XCw3VjxQMGc4U@{bl)@H{@T_J#Or9K4VcTy9>>!`_kU^)6y>L8t>qpCa zDN?F}XN>?{6Xf>sp06;q{gS6tjbpUN6L5SYq{gILHS-z1Kvh^(Y2h7iGfFj9%&tK- zQ7guAiin2RkS7AC){gQnI`=2~8N#{~fHI~Y&dtG_U-SRHhv*XXZ17nBmFzZNKQ|c=qeZr9st^e9GweA_6Hx2NNxFM(#PX4(Ist<1%ayPxY@Dk^L zskj>fG3mT(7F%DG>o*?SN})sFWyA5ACx(VHr>-7|0e95aS>eugh)xClm0S698`rOO zn=5_@?u^!Nkaj(gQYFXb1UF&VJXpvTw|v0W-Lh#p`>M=u`osa(2a=9MM4fnj4XI?C zi?uHi_bSvYoQ_Z-b0|agTKP-Dcky!~zmVC@@p%oDq;`oKB0nag{FFMGdqzI8dD%_R z67#=_kc&vxiCoTR&}hLxY9Eg8au@lFTQ+cgt>O z$)4W&at|Vq0M;;Y9r1<-d+;DY#7mntL>oZ42^|rM-_3iImhbjxR`~R%af}|sF?xd) z=GX+=!L$0Cv?B;QO2PYr*t4lO9iRiuQL5Uj1_*m>Z2+)68sWhf_=th3rB*crLKJHd zhEu8%ox~RpKLi&vPzzkw2I{yU-HXT{_wMv5bKWLb>`0coFla(qnSPDM&2e`DSIvox z2KRM{Z$%Sn;QSv)!2>7qXEH^#&k*Dch4PaJNY+1c;uzTr)^ASPjWRa1Iq<22KE%n` zHI#9ROP)O$Ladt34!It_jja0DR7_CSpb8){@i#;oklpWYAL)ip+)K>sOe`=kkjZDP zfq|#UvKB!?g9X*;KC&SEhx8cJ^#H}xA^ekZCBTYEduJPhz^i{0!TRthCw~};Yn{B= zMDE?zhH%G0?jF-Z!Kz+jHmi8xqkD0WgwY4u`qZ_L|B_cli~*Da*hfGMO4`ZbnVLg1 znhBnYTB&ZNx)rGI>=RGRe;_aI+S0TIW20g=#`SXn z1mA}8m+zG?@A`H71*7D*0+l)?P=|FGu;$qQ(HK?);!nEBmd(45uzG3k?%Iw}aI)BQzeK}*Ta@C$&)xjtN1piPrW#P8s4ZXz6Gz@Dn3}-gUc?X;1mAYn=b?*Pm36boirT%tjO7_M`OkRLhIcqq@)I3vLk?=-k2Xx(%RQ!d#1{^h@f3{FAms&6$J{vovnH2H8sa%S6|hxzeGO#Ex9F!4T?#;ubU7Ze=gZ+I_bz z3sFnMME>Os8iGTb=}}y&`ttC;`rE5WVJ;CobWRn%OPbxVSLcWkC%@~!(5 zNg9dEXvmR$d-hGEf6|~0%&__ufE#|$K+2!)-{&)_^l`cUjyXGNNSy%Azy~olmUutq z&>V>+RIkD|cN5`wRjb8Kv+bclRg=}C%MaWUXY1rA?%3_#25umsjUd=At$Dpd*G$_F zUY7Ulh{>Ki8-nm9u!xhmfZTUyyxcApkeUL`LsXC2E+{uoz37kO8f0)}9PwNs-#u-o zrdUfl)FXA(;6{cI)8}lr5IE+BO~+>y=mN`$;uc1vvZt-gps9)xD#o5DlTsBC8d&!r z^=Fh?L9N8Fs!KjoB1bb+PA3PiSj!ch~pKzX43I+KfH;7qfdb5 zyEY)Q3)r4Y5IG^lYT-%UieP)eY&r|V=70w;0E6%Lls0>hpL!FVjz{xo6F7D{o>-xo z@3|Hqk0-3E3W7ouMW;k)fZIs8qOg4B;L@&5==nj>O9S~LI@5>QfK_`Wuz%? zmb(unT5H4}%2IQ$+=&{0*Z8D;sCjr1W$z`T_5rg~o%Ff0sWjCj(xef(7P79hf9=Y3 zs*9p9m^MxKhbiVwSO{hVRN&q9D3ezXt&X-(HLV~!llsn`$&kF|aQB-#@$E!t@-u_E zv^7%T$}bAlumhxVn8?-IR7$PTI5Hl^qx@%pd745kItzsYp3rU)pZe6{Cs{bL|1^rs z)77RNfAd&4kuO+@W9r6q>Zfl$t5;t@3)OEN4q_P7(!Ohr|D9m*ER~@09~bn`2jpL5)BcdcQ+HaC9N-l20D( zWDiWec=Q{_qO0J4qdnK7K`^!v-{=!Zsg-w=!gg^7DcnqCA92@GDnqFn zBEO)|@5{Eqja8SC#3?K=@w7NAhXzmSqE3E&uq`}?WMV|>rH9WC)ADfR#wOXrTguI( z7}&LA&;Ef!ckWR?{OAuKzO8p)=e|4lZa;WH4VB}=Z`0EGaW@uTNJXe&yq!`D5C8xG delta 9876 zcmb7K36NYzd4B!oK6bVDDXm5;?J+CuYA+p@R+lYHvTUt{E?Z;SKK5<4-3xS$LKE8pL54@rnh zMO*W~?!W*3yZi6%|7yMPZT6)XS$U$YEU4k%nC~}y&&fy1m$IiHS^aF0X9$OJtUc2a>&SG*Iu$OMj%K=IT_}fQt9dxKhL^>*JNY>YNtq=heIh-+^S+L=~@nobftdf6g5nTv3ay<4wF7 z1nc>FzKpk=W3df<17FS~C~f2$`3l~O(oM-wGQ?LtuJcvA4c#~Kn|M3#K+9&niAQ-C zN<+IfzM8K&r|~s2dTa~d%-8Z>;I}60d0(@}`}x3_ix2X3z-{B~kQN=1zhZ}2Q0~>o z;rPB=_g(%k zS2wHg-MQ)b&SWxwM=~L@sqFaiGr4qbdYxSBX=f|tQO~ZPUhJdjCeR09FG*#_`_qY$ zWICQtm?w+~kcJyT)7zO?FJJWhj19^!d6#vp2F_-Ap%^Hj7zBpKliB3yyx7_a0x=+e z=RLqW<*;wX>gy=|<4H4qB4M0p2ch8y(DW)+#cG(?BR}l>ZW-YP1Bm*>E_v3!rT;cm zY-ZT*zF&9{HNTnH|0C3CuF@7UI_~KNH*$X{KTmanr@f zYj0nZC`fX0>aZtks3P_Ph}G0$<1Lz8nw7M?Az$nx-jGd$?sz@RSfl*b`kegO_EJ|)YP3Rt+Xo9 zhfQjWG-mSx7e>RDmd+%hr1hS5HAg4R!uA; zro4l9KI}TK7itT2W`k8X!>sB_?Sf1E27}f$YK6-4RD1FvZH(bo*PQcPbvrTCdo;@h zu9X%8?6RtzzTjWPmA~jMgtP0pyAZ^_xWwxitOY|gTVBgoqt$C;Ixsc7;bA>nuG-ej z*9(=ZpCA3)qV@mNPxUBSm2#@0ZA;0lDlNClcsDi>azm-dDc!7*3YvA5A~fPGE7ap# zYn540NR8UK4%OE$|E^-gGS8V-rWG2Pc6JEs2wk<<0joi-u57DqxWD0$#$5$`d$nGa zpuPrWvT{`&U6Em-F5aCein|CLlNTym8}?H1J^~2>qXam>2dkUpo0Tip)8&Zjwkw^? z+8!gHPMM;MXs*39q93?4;RgWDw#y$>Et8))7?Am@&8u$*YBBbcG{I zDH?+2czIn-n5|u07;(D9Y`>8l&m^;^;jEd|p<3>{sW2e(wX5otIukQMMD-p=x8d6^ zKU%wV_#BZXsi{?Xs60ksoPeW=sC0rriogVcNdjI1N~TISgH+up19hvm7OD6Efjacx`BWa_cRf>66v(TBCPj{TVJI&urMoU-Yf1aZdVOTe&&T0 ziRf+8HF8mC>_^-@3kmi()R_MlSS5wa69*x2E_dw5M`-#u&OV~s8H zna0NM&YP1Fo|;baB#(@oiS$Q%f|0p@qcEc)(up-jH$_xMvX+)xP~o{#StF6jr;`yu z#z}QiXV2OD`)L;U0z{eZQ9adliw1?-tPjb(&COo5H`8*mxrfcl4>zwGSUhV=JmGvU zeP%qD?aw5P$;ZLW2m@&T3Rc1D@N3e==j1P%x9MkpbAyF0O(sT9MCMpTQbuHM;gMlf zWNumQ&_2Rur!pf+5y_2#lr}Tcxi8Wd>5puQ^hW;=K~&#B5c1WQJ)3FuQA`dW=K!j! z;}Wxk_!NOp5>U%sxIbSc)POv;yzBOb108icl$WS&vA62@dBQ0YqPzHd_I}wVk?n6K_tF}7&mglRCm&LkEVB}^4NK`=7)%i2DUx4s|H3C$po%R z20tSuKCPNkS)M#CK1#G#oeBA^70s1@0i@9ipy^Gl?uu8Jzg^Mne+G3(e4}h>{Q_Hl z`Ma%k?DFd?`6IoQ@dNUU%k-G-UWT++pARMmyO#KcA_U%MYaBGiC}=5*iJh(v8cW{9O1M zuLGKzrK~k)1>I!lHB&d?9nI+Cu<0&&&TF&GSYvw6g9aEM+WlOIkK}&XU2vHJ({H(& zRsZwaM3B35t)Lg!0-qu=%9^3`8ajC%*59LX?@sNW;ey+8pVy|D5SE+!CUx;i?nfQx z(hQf%INV64o!3g`+&_ycao>=3QkYB33JdNDhtB{Qy6B!^a zaV?J_&8)tPCn!vprC(tAk752b93BLfO_!?Gaz+u?L=f%~JI_kBa6Xyw71Mv7HEX51 zS&gvgwIZ(>(m1@Rd)`-om`XL4o7jjKNUjzfK@(zyJG+sHsqS(NseUGtO_!o3Ub;Cv5Lw>-2o%Z;TqJ;dO{HSyc3&tW%+v9oS!yuePrpj^CWzVw`* zFQb;@?t-`AEBFfm%Lgy+CElW>#w?%Ns3!P6`NPf?WiP=O1$7|^2RSH%(dJ^v3Sd&N zTQs%TvExgv5bb!j#R~B@(~sS4HVbkI$Nu)KoP9hTy)Wp=896Qh2vHzgQL093h?gp zT8ho+2$*}!Ri+xy3OB$hoYB#%_oAy%4rj4%nta~R@qViu*a5SR51M|WxaiIX`8tJN z588kla)T8B)yBCI+s(3$`4a;sKfc`CbwOua|6X+_#Fg%QyQ9 z^;JOF!IY888fGFpnsfx|k|+CHigiRxT33)hw!L}SoUAD-2@@cUJCz-ybVV+2hYp>| znu*i53Xv0{f+&;$`~sDH1W3$wC}qSGBStPg1s_xG7OmF~Vr*d=r&8t#@f;CfARt!_ ztVcTG?tzxpYO2=&M3>mUIdfr$6H{i6JgRgq!R72g)7CXa=9~>GsVzE$lI^BrXtRvX zOq&T?=cd|Z9LB#VLS@GKb z;yHG$#GrDSXn~%CVqUOZn(0<53AV4~nZ<>%w5%73oAja=Dn_?-!W%D}zLKANEV}z% z=l&MWKq&wcmotE}Ugyo=M2P#%Fb_1VC>;aoJg78K6Avk?6^1%+n`M^U(kIG!*~6q_ z${k*o3kqh*V{TqyEdhFM}(sjQ=Bm6M?Yrovjn+bL5c?t+o2R;5j56PYFq)1ij$oOda*OKHtk z8Cs&&62LC20uX77S%&;;cG#-00`MJl@!lH}u2BlVn>t-ozKmjsiC@RChZr)ltxq~~ zu>w|*uYKHuOhIqr611O+0B$*v9Gx^Gi3r7XmoIJjtjqlk9CMl8ygMB8tMDqDG{ukP zS2v&Z{{Sse#xKf^Lz|m_jEeXt0*jaV+o%qUSLOSMUhO8;2yN|!HsL98Lv=|SFypAp zN4AVPKCCX@CZ4|{a3d8$lx71PCuhRtT2q=AxEzq3BDkV$GTKorrHvph+r$+K3H<~mI+a;#5B1BDO%hVhN zIosl22`Fm>MefqX*JWvYckf>l_CEvf7Wg2=_w*horuv_29y6 z#V?7FO^cJ%_&fnY7#(rYb+V`&rPC81O=F9l@L)S+Vo;o~)s{%Z*2TL2lhIy1oGrdojq^*gm=P{oD7{Q)>y>Pxvba7ItcesUk_ z4@-*&Eec5d%~0|2&=4x238-gU2B}^sVh;~dAYhg|rP2~gkn-^3uoQ79UBF82#YHTW zZ|&a2Hp-oQqW7(W&<-P7^*I*Yu>Ht^8)iH=Dbyj-o&1a{R|$1^7S9bG1Uy6_>J;@T zifZhG?Tt@m&!+N1S+xW5**!HyM5dE;8B}Yhh5uT_p6Fn&P z2-1GLhQ-%p=iVbncAz4DMnK7%)P{4H$qS3RgmPpz5*Df4>!l%@fqRWC&ZTTi@%%J7NF|FlwGPLg)tCz`r z9es7^*T@^-AB4=NQZsL~LX_{vZD^VcI+nrcHP7fDXn~z=DJ_@Z+PADZ+^aG5cMK3Q zt1bqqx7jigx$GHU%UGRUw|}>Kx`K`HjinPNlA3=|;lDyM8IYgae{(Y>S!{PcXC%dk zNhWAPnwLPk^c`5n^0Mu~pRWA{QF+GTbly0JO^*7jiYW?bcgfH1ACl2`l->Ras@Oe> zerz@hZ`w(MUMqMN11n=mcJ;!`HtP5?*TI`Z`!)EM?kB? zibsi#c2cPDN~ts_qP0D0rD9-V4J3NCpKmWrU)Qh1f`{l`(lAq_1_VK3vfc90!?*US zg>R(pHvzyI8lN`C4ps9T_(lxghe(aA{5CyusmcYB%l zZ8xCKyuYI_-U;3+4~3PrfbIC5*Ilkxz3ulPv{Q6Qpo0!Iq6>TF8%K8({|qz1GoYS^ z$5~(qkf0s!v0xgs3B820+AQPlDJYSX6)+sYS;%D3mr{!e$lNAO#RoPa%<@|9k`FC# zE+zuAI+P|}W9Onco%+$?l3i&3N!?t$4D+43)y%4_5Hq zFJc1Flqkb32*}s(T(e>m?Tu0d5YrG<=)|1$MU+==i0yLAU6EqB*hNih*B4I|*-Syk zMI8d<05XZZQ2C7AM6C|b;+;uk4OZ0BP;~&iqWZ2~Sk;zeJ$qE9rkPsQbnc+iegcc^7se1ZXm|)e z^*G{Z?uKBKKj3!<_lug#&mH@77E&?Hfiva_x<`68=im5DZ>*M6fFXt1~ptKC#2yuYGqDVikdZaRV0O0ER%nsf*eE8Ob@f`gT&h8qwzM@ zCchGIXYaZEllTu^or{K~i`ZS)i7eth*g}}ue;aM>Q1%0S$Qshec~QQX5O7gXDM=XO zm$H}76qR~piL-joS``ouM{6A0QI4KM=`}iVwinMUJeMKoGRg6Zcp{fVc;kdKD#hf5 zB}!!{uM(qDVWhLfdYZ%z0!lx5NPbB>B$8>_pRB!aBeldq7%NX?aR6Xzc|4a*pK&6X zezZX9SxzJ3zwMkhh0;$d!+9I^UC>F3%G?f6@=85~-!4CqY&$*yE;u{wL{9+f|C-NX zASinjbq$NdpcO}`!*(%D9kvnJL)bwoC8&e|#0mGrPh{uV<}!6^Jb1RtpAkiwA3H7I z7;6qy5q&j*8d*KQB2-JoIs)}_+jw)a0Y%%t=hiz8#E%@jL%l8Ap`*9$jPKfiaNmx@ zhm_cCFaEPmGCS=A$F0;`iF6N@juJRSphVzv1fC*5Z%-<*=mZfIR;WuU=!^-Hhv-$2 z=vAjl{`>ggnqD~YaN+TUOMeY6zt``tDDyY^_XI10p None: + """Initializes the BaseTransform object.""" pass def apply_image(self, labels): - """Applies image transformation to labels.""" + """Applies image transformations to labels.""" pass def apply_instances(self, labels): - """Applies transformations to input 'labels' and returns object instances.""" + """Applies transformations to object instances in labels.""" pass def apply_semantic(self, labels): @@ -37,13 +55,14 @@ class BaseTransform: pass def __call__(self, labels): - """Applies label transformations to an image, instances and semantic masks.""" + """Applies all label transformations to an image, instances, and semantic masks.""" self.apply_image(labels) self.apply_instances(labels) self.apply_semantic(labels) class Compose: + """Class for composing multiple image transformations.""" def __init__(self, transforms): """Initializes the Compose object with a list of transforms.""" @@ -60,18 +79,23 @@ class Compose: self.transforms.append(transform) def tolist(self): - """Converts list of transforms to a standard Python list.""" + """Converts the list of transforms to a standard Python list.""" return self.transforms def __repr__(self): - """Return string representation of object.""" + """Returns a string representation of the object.""" return f"{self.__class__.__name__}({', '.join([f'{t}' for t in self.transforms])})" class BaseMixTransform: - """This implementation is from mmyolo.""" + """ + Class for base mix (MixUp/Mosaic) transformations. + + This implementation is from mmyolo. + """ def __init__(self, dataset, pre_transform=None, p=0.0) -> None: + """Initializes the BaseMixTransform object with dataset, pre_transform, and probability.""" self.dataset = dataset self.pre_transform = pre_transform self.p = p @@ -92,11 +116,11 @@ class BaseMixTransform: if self.pre_transform is not None: for i, data in enumerate(mix_labels): mix_labels[i] = self.pre_transform(data) - labels['mix_labels'] = mix_labels + labels["mix_labels"] = mix_labels # Mosaic or MixUp labels = self._mix_transform(labels) - labels.pop('mix_labels', None) + labels.pop("mix_labels", None) return labels def _mix_transform(self, labels): @@ -124,8 +148,8 @@ class Mosaic(BaseMixTransform): def __init__(self, dataset, imgsz=640, p=1.0, n=4): """Initializes the object with a dataset, image size, probability, and border.""" - assert 0 <= p <= 1.0, f'The probability should be in range [0, 1], but got {p}.' - assert n in (4, 9), 'grid must be equal to 4 or 9.' + assert 0 <= p <= 1.0, f"The probability should be in range [0, 1], but got {p}." + assert n in (4, 9), "grid must be equal to 4 or 9." super().__init__(dataset=dataset, p=p) self.dataset = dataset self.imgsz = imgsz @@ -141,9 +165,45 @@ class Mosaic(BaseMixTransform): def _mix_transform(self, labels): """Apply mixup transformation to the input image and labels.""" - assert labels.get('rect_shape', None) is None, 'rect and mosaic are mutually exclusive.' - assert len(labels.get('mix_labels', [])), 'There are no other images for mosaic augment.' - return self._mosaic4(labels) if self.n == 4 else self._mosaic9(labels) + assert labels.get("rect_shape", None) is None, "rect and mosaic are mutually exclusive." + assert len(labels.get("mix_labels", [])), "There are no other images for mosaic augment." + return ( + self._mosaic3(labels) if self.n == 3 else self._mosaic4(labels) if self.n == 4 else self._mosaic9(labels) + ) # This code is modified for mosaic3 method. + + def _mosaic3(self, labels): + """Create a 1x3 image mosaic.""" + mosaic_labels = [] + s = self.imgsz + for i in range(3): + labels_patch = labels if i == 0 else labels["mix_labels"][i - 1] + # Load image + img = labels_patch["img"] + h, w = labels_patch.pop("resized_shape") + + # Place img in img3 + if i == 0: # center + img3 = np.full((s * 3, s * 3, img.shape[2]), 114, dtype=np.uint8) # base image with 3 tiles + h0, w0 = h, w + c = s, s, s + w, s + h # xmin, ymin, xmax, ymax (base) coordinates + elif i == 1: # right + c = s + w0, s, s + w0 + w, s + h + elif i == 2: # left + c = s - w, s + h0 - h, s, s + h0 + + padw, padh = c[:2] + x1, y1, x2, y2 = (max(x, 0) for x in c) # allocate coords + + img3[y1:y2, x1:x2] = img[y1 - padh :, x1 - padw :] # img3[ymin:ymax, xmin:xmax] + # hp, wp = h, w # height, width previous for next iteration + + # Labels assuming imgsz*2 mosaic size + labels_patch = self._update_labels(labels_patch, padw + self.border[0], padh + self.border[1]) + mosaic_labels.append(labels_patch) + final_labels = self._cat_labels(mosaic_labels) + + final_labels["img"] = img3[-self.border[0] : self.border[0], -self.border[1] : self.border[1]] + return final_labels def _mosaic4(self, labels): """Create a 2x2 image mosaic.""" @@ -151,10 +211,10 @@ class Mosaic(BaseMixTransform): s = self.imgsz yc, xc = (int(random.uniform(-x, 2 * s + x)) for x in self.border) # mosaic center x, y for i in range(4): - labels_patch = labels if i == 0 else labels['mix_labels'][i - 1] + labels_patch = labels if i == 0 else labels["mix_labels"][i - 1] # Load image - img = labels_patch['img'] - h, w = labels_patch.pop('resized_shape') + img = labels_patch["img"] + h, w = labels_patch.pop("resized_shape") # Place img in img4 if i == 0: # top left @@ -178,7 +238,7 @@ class Mosaic(BaseMixTransform): labels_patch = self._update_labels(labels_patch, padw, padh) mosaic_labels.append(labels_patch) final_labels = self._cat_labels(mosaic_labels) - final_labels['img'] = img4 + final_labels["img"] = img4 return final_labels def _mosaic9(self, labels): @@ -187,10 +247,10 @@ class Mosaic(BaseMixTransform): s = self.imgsz hp, wp = -1, -1 # height, width previous for i in range(9): - labels_patch = labels if i == 0 else labels['mix_labels'][i - 1] + labels_patch = labels if i == 0 else labels["mix_labels"][i - 1] # Load image - img = labels_patch['img'] - h, w = labels_patch.pop('resized_shape') + img = labels_patch["img"] + h, w = labels_patch.pop("resized_shape") # Place img in img9 if i == 0: # center @@ -218,7 +278,7 @@ class Mosaic(BaseMixTransform): x1, y1, x2, y2 = (max(x, 0) for x in c) # allocate coords # Image - img9[y1:y2, x1:x2] = img[y1 - padh:, x1 - padw:] # img9[ymin:ymax, xmin:xmax] + img9[y1:y2, x1:x2] = img[y1 - padh :, x1 - padw :] # img9[ymin:ymax, xmin:xmax] hp, wp = h, w # height, width previous for next iteration # Labels assuming imgsz*2 mosaic size @@ -226,16 +286,16 @@ class Mosaic(BaseMixTransform): mosaic_labels.append(labels_patch) final_labels = self._cat_labels(mosaic_labels) - final_labels['img'] = img9[-self.border[0]:self.border[0], -self.border[1]:self.border[1]] + final_labels["img"] = img9[-self.border[0] : self.border[0], -self.border[1] : self.border[1]] return final_labels @staticmethod def _update_labels(labels, padw, padh): """Update labels.""" - nh, nw = labels['img'].shape[:2] - labels['instances'].convert_bbox(format='xyxy') - labels['instances'].denormalize(nw, nh) - labels['instances'].add_padding(padw, padh) + nh, nw = labels["img"].shape[:2] + labels["instances"].convert_bbox(format="xyxy") + labels["instances"].denormalize(nw, nh) + labels["instances"].add_padding(padw, padh) return labels def _cat_labels(self, mosaic_labels): @@ -246,24 +306,28 @@ class Mosaic(BaseMixTransform): instances = [] imgsz = self.imgsz * 2 # mosaic imgsz for labels in mosaic_labels: - cls.append(labels['cls']) - instances.append(labels['instances']) + cls.append(labels["cls"]) + instances.append(labels["instances"]) + # Final labels final_labels = { - 'im_file': mosaic_labels[0]['im_file'], - 'ori_shape': mosaic_labels[0]['ori_shape'], - 'resized_shape': (imgsz, imgsz), - 'cls': np.concatenate(cls, 0), - 'instances': Instances.concatenate(instances, axis=0), - 'mosaic_border': self.border} # final_labels - final_labels['instances'].clip(imgsz, imgsz) - good = final_labels['instances'].remove_zero_area_boxes() - final_labels['cls'] = final_labels['cls'][good] + "im_file": mosaic_labels[0]["im_file"], + "ori_shape": mosaic_labels[0]["ori_shape"], + "resized_shape": (imgsz, imgsz), + "cls": np.concatenate(cls, 0), + "instances": Instances.concatenate(instances, axis=0), + "mosaic_border": self.border, + } + final_labels["instances"].clip(imgsz, imgsz) + good = final_labels["instances"].remove_zero_area_boxes() + final_labels["cls"] = final_labels["cls"][good] return final_labels class MixUp(BaseMixTransform): + """Class for applying MixUp augmentation to the dataset.""" def __init__(self, dataset, pre_transform=None, p=0.0) -> None: + """Initializes MixUp object with dataset, pre_transform, and probability of applying MixUp.""" super().__init__(dataset=dataset, pre_transform=pre_transform, p=p) def get_indexes(self): @@ -271,36 +335,67 @@ class MixUp(BaseMixTransform): return random.randint(0, len(self.dataset) - 1) def _mix_transform(self, labels): - """Applies MixUp augmentation https://arxiv.org/pdf/1710.09412.pdf.""" + """Applies MixUp augmentation as per https://arxiv.org/pdf/1710.09412.pdf.""" r = np.random.beta(32.0, 32.0) # mixup ratio, alpha=beta=32.0 - labels2 = labels['mix_labels'][0] - labels['img'] = (labels['img'] * r + labels2['img'] * (1 - r)).astype(np.uint8) - labels['instances'] = Instances.concatenate([labels['instances'], labels2['instances']], axis=0) - labels['cls'] = np.concatenate([labels['cls'], labels2['cls']], 0) + labels2 = labels["mix_labels"][0] + labels["img"] = (labels["img"] * r + labels2["img"] * (1 - r)).astype(np.uint8) + labels["instances"] = Instances.concatenate([labels["instances"], labels2["instances"]], axis=0) + labels["cls"] = np.concatenate([labels["cls"], labels2["cls"]], 0) return labels class RandomPerspective: + """ + Implements random perspective and affine transformations on images and corresponding bounding boxes, segments, and + keypoints. These transformations include rotation, translation, scaling, and shearing. The class also offers the + option to apply these transformations conditionally with a specified probability. + + Attributes: + degrees (float): Degree range for random rotations. + translate (float): Fraction of total width and height for random translation. + scale (float): Scaling factor interval, e.g., a scale factor of 0.1 allows a resize between 90%-110%. + shear (float): Shear intensity (angle in degrees). + perspective (float): Perspective distortion factor. + border (tuple): Tuple specifying mosaic border. + pre_transform (callable): A function/transform to apply to the image before starting the random transformation. + + Methods: + affine_transform(img, border): Applies a series of affine transformations to the image. + apply_bboxes(bboxes, M): Transforms bounding boxes using the calculated affine matrix. + apply_segments(segments, M): Transforms segments and generates new bounding boxes. + apply_keypoints(keypoints, M): Transforms keypoints. + __call__(labels): Main method to apply transformations to both images and their corresponding annotations. + box_candidates(box1, box2): Filters out bounding boxes that don't meet certain criteria post-transformation. + """ + + def __init__( + self, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, border=(0, 0), pre_transform=None + ): + """Initializes RandomPerspective object with transformation parameters.""" - def __init__(self, - degrees=0.0, - translate=0.1, - scale=0.5, - shear=0.0, - perspective=0.0, - border=(0, 0), - pre_transform=None): self.degrees = degrees self.translate = translate self.scale = scale self.shear = shear self.perspective = perspective - # Mosaic border - self.border = border + self.border = border # mosaic border self.pre_transform = pre_transform def affine_transform(self, img, border): - """Center.""" + """ + Applies a sequence of affine transformations centered around the image center. + + Args: + img (ndarray): Input image. + border (tuple): Border dimensions. + + Returns: + img (ndarray): Transformed image. + M (ndarray): Transformation matrix. + s (float): Scale factor. + """ + + # Center C = np.eye(3, dtype=np.float32) C[0, 2] = -img.shape[1] / 2 # x translation (pixels) @@ -387,6 +482,8 @@ class RandomPerspective: xy = xy[:, :2] / xy[:, 2:3] segments = xy.reshape(n, -1, 2) bboxes = np.stack([segment2box(xy, self.size[0], self.size[1]) for xy in segments], 0) + segments[..., 0] = segments[..., 0].clip(bboxes[:, 0:1], bboxes[:, 2:3]) + segments[..., 1] = segments[..., 1].clip(bboxes[:, 1:2], bboxes[:, 3:4]) return bboxes, segments def apply_keypoints(self, keypoints, M): @@ -419,21 +516,21 @@ class RandomPerspective: Args: labels (dict): a dict of `bboxes`, `segments`, `keypoints`. """ - if self.pre_transform and 'mosaic_border' not in labels: + if self.pre_transform and "mosaic_border" not in labels: labels = self.pre_transform(labels) - labels.pop('ratio_pad', None) # do not need ratio pad + labels.pop("ratio_pad", None) # do not need ratio pad - img = labels['img'] - cls = labels['cls'] - instances = labels.pop('instances') + img = labels["img"] + cls = labels["cls"] + instances = labels.pop("instances") # Make sure the coord formats are right - instances.convert_bbox(format='xyxy') + instances.convert_bbox(format="xyxy") instances.denormalize(*img.shape[:2][::-1]) - border = labels.pop('mosaic_border', self.border) + border = labels.pop("mosaic_border", self.border) self.size = img.shape[1] + border[1] * 2, img.shape[0] + border[0] * 2 # w, h # M is affine matrix - # scale for func:`box_candidates` + # Scale for func:`box_candidates` img, M, scale = self.affine_transform(img, border) bboxes = self.apply_bboxes(instances.bboxes, M) @@ -446,24 +543,38 @@ class RandomPerspective: if keypoints is not None: keypoints = self.apply_keypoints(keypoints, M) - new_instances = Instances(bboxes, segments, keypoints, bbox_format='xyxy', normalized=False) + new_instances = Instances(bboxes, segments, keypoints, bbox_format="xyxy", normalized=False) # Clip new_instances.clip(*self.size) # Filter instances instances.scale(scale_w=scale, scale_h=scale, bbox_only=True) # Make the bboxes have the same scale with new_bboxes - i = self.box_candidates(box1=instances.bboxes.T, - box2=new_instances.bboxes.T, - area_thr=0.01 if len(segments) else 0.10) - labels['instances'] = new_instances[i] - labels['cls'] = cls[i] - labels['img'] = img - labels['resized_shape'] = img.shape[:2] + i = self.box_candidates( + box1=instances.bboxes.T, box2=new_instances.bboxes.T, area_thr=0.01 if len(segments) else 0.10 + ) + labels["instances"] = new_instances[i] + labels["cls"] = cls[i] + labels["img"] = img + labels["resized_shape"] = img.shape[:2] return labels - def box_candidates(self, box1, box2, wh_thr=2, ar_thr=100, area_thr=0.1, eps=1e-16): # box1(4,n), box2(4,n) - # Compute box candidates: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio + def box_candidates(self, box1, box2, wh_thr=2, ar_thr=100, area_thr=0.1, eps=1e-16): + """ + Compute box candidates based on a set of thresholds. This method compares the characteristics of the boxes + before and after augmentation to decide whether a box is a candidate for further processing. + + Args: + box1 (numpy.ndarray): The 4,n bounding box before augmentation, represented as [x1, y1, x2, y2]. + box2 (numpy.ndarray): The 4,n bounding box after augmentation, represented as [x1, y1, x2, y2]. + wh_thr (float, optional): The width and height threshold in pixels. Default is 2. + ar_thr (float, optional): The aspect ratio threshold. Default is 100. + area_thr (float, optional): The area ratio threshold. Default is 0.1. + eps (float, optional): A small epsilon value to prevent division by zero. Default is 1e-16. + + Returns: + (numpy.ndarray): A boolean array indicating which boxes are candidates based on the given thresholds. + """ w1, h1 = box1[2] - box1[0], box1[3] - box1[1] w2, h2 = box2[2] - box2[0], box2[3] - box2[1] ar = np.maximum(w2 / (h2 + eps), h2 / (w2 + eps)) # aspect ratio @@ -471,15 +582,33 @@ class RandomPerspective: class RandomHSV: + """ + This class is responsible for performing random adjustments to the Hue, Saturation, and Value (HSV) channels of an + image. + + The adjustments are random but within limits set by hgain, sgain, and vgain. + """ def __init__(self, hgain=0.5, sgain=0.5, vgain=0.5) -> None: + """ + Initialize RandomHSV class with gains for each HSV channel. + + Args: + hgain (float, optional): Maximum variation for hue. Default is 0.5. + sgain (float, optional): Maximum variation for saturation. Default is 0.5. + vgain (float, optional): Maximum variation for value. Default is 0.5. + """ self.hgain = hgain self.sgain = sgain self.vgain = vgain def __call__(self, labels): - """Applies image HSV augmentation""" - img = labels['img'] + """ + Applies random HSV augmentation to an image within the predefined limits. + + The modified image replaces the original image in the input 'labels' dict. + """ + img = labels["img"] if self.hgain or self.sgain or self.vgain: r = np.random.uniform(-1, 1, 3) * [self.hgain, self.sgain, self.vgain] + 1 # random gains hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV)) @@ -496,10 +625,23 @@ class RandomHSV: class RandomFlip: - """Applies random horizontal or vertical flip to an image with a given probability.""" + """ + Applies a random horizontal or vertical flip to an image with a given probability. - def __init__(self, p=0.5, direction='horizontal', flip_idx=None) -> None: - assert direction in ['horizontal', 'vertical'], f'Support direction `horizontal` or `vertical`, got {direction}' + Also updates any instances (bounding boxes, keypoints, etc.) accordingly. + """ + + def __init__(self, p=0.5, direction="horizontal", flip_idx=None) -> None: + """ + Initializes the RandomFlip class with probability and direction. + + Args: + p (float, optional): The probability of applying the flip. Must be between 0 and 1. Default is 0.5. + direction (str, optional): The direction to apply the flip. Must be 'horizontal' or 'vertical'. + Default is 'horizontal'. + flip_idx (array-like, optional): Index mapping for flipping keypoints, if any. + """ + assert direction in ["horizontal", "vertical"], f"Support direction `horizontal` or `vertical`, got {direction}" assert 0 <= p <= 1.0 self.p = p @@ -507,26 +649,35 @@ class RandomFlip: self.flip_idx = flip_idx def __call__(self, labels): - """Resize image and padding for detection, instance segmentation, pose.""" - img = labels['img'] - instances = labels.pop('instances') - instances.convert_bbox(format='xywh') + """ + Applies random flip to an image and updates any instances like bounding boxes or keypoints accordingly. + + Args: + labels (dict): A dictionary containing the keys 'img' and 'instances'. 'img' is the image to be flipped. + 'instances' is an object containing bounding boxes and optionally keypoints. + + Returns: + (dict): The same dict with the flipped image and updated instances under the 'img' and 'instances' keys. + """ + img = labels["img"] + instances = labels.pop("instances") + instances.convert_bbox(format="xywh") h, w = img.shape[:2] h = 1 if instances.normalized else h w = 1 if instances.normalized else w # Flip up-down - if self.direction == 'vertical' and random.random() < self.p: + if self.direction == "vertical" and random.random() < self.p: img = np.flipud(img) instances.flipud(h) - if self.direction == 'horizontal' and random.random() < self.p: + if self.direction == "horizontal" and random.random() < self.p: img = np.fliplr(img) instances.fliplr(w) # For keypoints if self.flip_idx is not None and instances.keypoints is not None: instances.keypoints = np.ascontiguousarray(instances.keypoints[:, self.flip_idx, :]) - labels['img'] = np.ascontiguousarray(img) - labels['instances'] = instances + labels["img"] = np.ascontiguousarray(img) + labels["instances"] = instances return labels @@ -546,9 +697,9 @@ class LetterBox: """Return updated labels and image with added border.""" if labels is None: labels = {} - img = labels.get('img') if image is None else image + img = labels.get("img") if image is None else image shape = img.shape[:2] # current shape [height, width] - new_shape = labels.pop('rect_shape', self.new_shape) + new_shape = labels.pop("rect_shape", self.new_shape) if isinstance(new_shape, int): new_shape = (new_shape, new_shape) @@ -571,45 +722,72 @@ class LetterBox: if self.center: dw /= 2 # divide padding into 2 sides dh /= 2 - if labels.get('ratio_pad'): - labels['ratio_pad'] = (labels['ratio_pad'], (dw, dh)) # for evaluation if shape[::-1] != new_unpad: # resize img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR) top, bottom = int(round(dh - 0.1)) if self.center else 0, int(round(dh + 0.1)) left, right = int(round(dw - 0.1)) if self.center else 0, int(round(dw + 0.1)) - img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, - value=(114, 114, 114)) # add border + img = cv2.copyMakeBorder( + img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=(114, 114, 114) + ) # add border + if labels.get("ratio_pad"): + labels["ratio_pad"] = (labels["ratio_pad"], (left, top)) # for evaluation if len(labels): labels = self._update_labels(labels, ratio, dw, dh) - labels['img'] = img - labels['resized_shape'] = new_shape + labels["img"] = img + labels["resized_shape"] = new_shape return labels else: return img def _update_labels(self, labels, ratio, padw, padh): """Update labels.""" - labels['instances'].convert_bbox(format='xyxy') - labels['instances'].denormalize(*labels['img'].shape[:2][::-1]) - labels['instances'].scale(*ratio) - labels['instances'].add_padding(padw, padh) + labels["instances"].convert_bbox(format="xyxy") + labels["instances"].denormalize(*labels["img"].shape[:2][::-1]) + labels["instances"].scale(*ratio) + labels["instances"].add_padding(padw, padh) return labels class CopyPaste: + """ + Implements the Copy-Paste augmentation as described in the paper https://arxiv.org/abs/2012.07177. This class is + responsible for applying the Copy-Paste augmentation on images and their corresponding instances. + """ def __init__(self, p=0.5) -> None: + """ + Initializes the CopyPaste class with a given probability. + + Args: + p (float, optional): The probability of applying the Copy-Paste augmentation. Must be between 0 and 1. + Default is 0.5. + """ self.p = p def __call__(self, labels): - """Implement Copy-Paste augmentation https://arxiv.org/abs/2012.07177, labels as nx5 np.array(cls, xyxy).""" - im = labels['img'] - cls = labels['cls'] + """ + Applies the Copy-Paste augmentation to the given image and instances. + + Args: + labels (dict): A dictionary containing: + - 'img': The image to augment. + - 'cls': Class labels associated with the instances. + - 'instances': Object containing bounding boxes, and optionally, keypoints and segments. + + Returns: + (dict): Dict with augmented image and updated instances under the 'img', 'cls', and 'instances' keys. + + Notes: + 1. Instances are expected to have 'segments' as one of their attributes for this augmentation to work. + 2. This method modifies the input dictionary 'labels' in place. + """ + im = labels["img"] + cls = labels["cls"] h, w = im.shape[:2] - instances = labels.pop('instances') - instances.convert_bbox(format='xyxy') + instances = labels.pop("instances") + instances.convert_bbox(format="xyxy") instances.denormalize(w, h) if self.p and len(instances.segments): n = len(instances) @@ -632,27 +810,32 @@ class CopyPaste: i = cv2.flip(im_new, 1).astype(bool) im[i] = result[i] - labels['img'] = im - labels['cls'] = cls - labels['instances'] = instances + labels["img"] = im + labels["cls"] = cls + labels["instances"] = instances return labels class Albumentations: - """Albumentations transformations. Optional, uninstall package to disable. - Applies Blur, Median Blur, convert to grayscale, Contrast Limited Adaptive Histogram Equalization, - random change of brightness and contrast, RandomGamma and lowering of image quality by compression.""" + """ + Albumentations transformations. + + Optional, uninstall package to disable. Applies Blur, Median Blur, convert to grayscale, Contrast Limited Adaptive + Histogram Equalization, random change of brightness and contrast, RandomGamma and lowering of image quality by + compression. + """ def __init__(self, p=1.0): """Initialize the transform object for YOLO bbox formatted params.""" self.p = p self.transform = None - prefix = colorstr('albumentations: ') + prefix = colorstr("albumentations: ") try: import albumentations as A - check_version(A.__version__, '1.0.3', hard=True) # version requirement + check_version(A.__version__, "1.0.3", hard=True) # version requirement + # Transforms T = [ A.Blur(p=0.01), A.MedianBlur(p=0.01), @@ -660,59 +843,81 @@ class Albumentations: A.CLAHE(p=0.01), A.RandomBrightnessContrast(p=0.0), A.RandomGamma(p=0.0), - A.ImageCompression(quality_lower=75, p=0.0)] # transforms - self.transform = A.Compose(T, bbox_params=A.BboxParams(format='yolo', label_fields=['class_labels'])) + A.ImageCompression(quality_lower=75, p=0.0), + ] + self.transform = A.Compose(T, bbox_params=A.BboxParams(format="yolo", label_fields=["class_labels"])) - LOGGER.info(prefix + ', '.join(f'{x}'.replace('always_apply=False, ', '') for x in T if x.p)) + LOGGER.info(prefix + ", ".join(f"{x}".replace("always_apply=False, ", "") for x in T if x.p)) except ImportError: # package not installed, skip pass except Exception as e: - LOGGER.info(f'{prefix}{e}') + LOGGER.info(f"{prefix}{e}") def __call__(self, labels): """Generates object detections and returns a dictionary with detection results.""" - im = labels['img'] - cls = labels['cls'] + im = labels["img"] + cls = labels["cls"] if len(cls): - labels['instances'].convert_bbox('xywh') - labels['instances'].normalize(*im.shape[:2][::-1]) - bboxes = labels['instances'].bboxes + labels["instances"].convert_bbox("xywh") + labels["instances"].normalize(*im.shape[:2][::-1]) + bboxes = labels["instances"].bboxes # TODO: add supports of segments and keypoints if self.transform and random.random() < self.p: new = self.transform(image=im, bboxes=bboxes, class_labels=cls) # transformed - if len(new['class_labels']) > 0: # skip update if no bbox in new im - labels['img'] = new['image'] - labels['cls'] = np.array(new['class_labels']) - bboxes = np.array(new['bboxes'], dtype=np.float32) - labels['instances'].update(bboxes=bboxes) + if len(new["class_labels"]) > 0: # skip update if no bbox in new im + labels["img"] = new["image"] + labels["cls"] = np.array(new["class_labels"]) + bboxes = np.array(new["bboxes"], dtype=np.float32) + labels["instances"].update(bboxes=bboxes) return labels # TODO: technically this is not an augmentation, maybe we should put this to another files class Format: + """ + Formats image annotations for object detection, instance segmentation, and pose estimation tasks. The class + standardizes the image and instance annotations to be used by the `collate_fn` in PyTorch DataLoader. - def __init__(self, - bbox_format='xywh', - normalize=True, - return_mask=False, - return_keypoint=False, - mask_ratio=4, - mask_overlap=True, - batch_idx=True): + Attributes: + bbox_format (str): Format for bounding boxes. Default is 'xywh'. + normalize (bool): Whether to normalize bounding boxes. Default is True. + return_mask (bool): Return instance masks for segmentation. Default is False. + return_keypoint (bool): Return keypoints for pose estimation. Default is False. + mask_ratio (int): Downsample ratio for masks. Default is 4. + mask_overlap (bool): Whether to overlap masks. Default is True. + batch_idx (bool): Keep batch indexes. Default is True. + bgr (float): The probability to return BGR images. Default is 0.0. + """ + + def __init__( + self, + bbox_format="xywh", + normalize=True, + return_mask=False, + return_keypoint=False, + return_obb=False, + mask_ratio=4, + mask_overlap=True, + batch_idx=True, + bgr=0.0, + ): + """Initializes the Format class with given parameters.""" self.bbox_format = bbox_format self.normalize = normalize self.return_mask = return_mask # set False when training detection only self.return_keypoint = return_keypoint + self.return_obb = return_obb self.mask_ratio = mask_ratio self.mask_overlap = mask_overlap self.batch_idx = batch_idx # keep the batch indexes + self.bgr = bgr def __call__(self, labels): """Return formatted image, classes, bounding boxes & keypoints to be used by 'collate_fn'.""" - img = labels.pop('img') + img = labels.pop("img") h, w = img.shape[:2] - cls = labels.pop('cls') - instances = labels.pop('instances') + cls = labels.pop("cls") + instances = labels.pop("instances") instances.convert_bbox(format=self.bbox_format) instances.denormalize(w, h) nl = len(instances) @@ -722,31 +927,37 @@ class Format: masks, instances, cls = self._format_segments(instances, cls, w, h) masks = torch.from_numpy(masks) else: - masks = torch.zeros(1 if self.mask_overlap else nl, img.shape[0] // self.mask_ratio, - img.shape[1] // self.mask_ratio) - labels['masks'] = masks + masks = torch.zeros( + 1 if self.mask_overlap else nl, img.shape[0] // self.mask_ratio, img.shape[1] // self.mask_ratio + ) + labels["masks"] = masks if self.normalize: instances.normalize(w, h) - labels['img'] = self._format_img(img) - labels['cls'] = torch.from_numpy(cls) if nl else torch.zeros(nl) - labels['bboxes'] = torch.from_numpy(instances.bboxes) if nl else torch.zeros((nl, 4)) + labels["img"] = self._format_img(img) + labels["cls"] = torch.from_numpy(cls) if nl else torch.zeros(nl) + labels["bboxes"] = torch.from_numpy(instances.bboxes) if nl else torch.zeros((nl, 4)) if self.return_keypoint: - labels['keypoints'] = torch.from_numpy(instances.keypoints) + labels["keypoints"] = torch.from_numpy(instances.keypoints) + if self.return_obb: + labels["bboxes"] = ( + xyxyxyxy2xywhr(torch.from_numpy(instances.segments)) if len(instances.segments) else torch.zeros((0, 5)) + ) # Then we can use collate_fn if self.batch_idx: - labels['batch_idx'] = torch.zeros(nl) + labels["batch_idx"] = torch.zeros(nl) return labels def _format_img(self, img): - """Format the image for YOLOv5 from Numpy array to PyTorch tensor.""" + """Format the image for YOLO from Numpy array to PyTorch tensor.""" if len(img.shape) < 3: img = np.expand_dims(img, -1) - img = np.ascontiguousarray(img.transpose(2, 0, 1)[::-1]) + img = img.transpose(2, 0, 1) + img = np.ascontiguousarray(img[::-1] if random.uniform(0, 1) > self.bgr else img) img = torch.from_numpy(img) return img def _format_segments(self, instances, cls, w, h): - """convert polygon points to bitmap.""" + """Convert polygon points to bitmap.""" segments = instances.segments if self.mask_overlap: masks, sorted_idx = polygons2masks_overlap((h, w), segments, downsample_ratio=self.mask_ratio) @@ -761,140 +972,281 @@ class Format: def v8_transforms(dataset, imgsz, hyp, stretch=False): """Convert images to a size suitable for YOLOv8 training.""" - pre_transform = Compose([ - Mosaic(dataset, imgsz=imgsz, p=hyp.mosaic), - CopyPaste(p=hyp.copy_paste), - RandomPerspective( - degrees=hyp.degrees, - translate=hyp.translate, - scale=hyp.scale, - shear=hyp.shear, - perspective=hyp.perspective, - pre_transform=None if stretch else LetterBox(new_shape=(imgsz, imgsz)), - )]) - flip_idx = dataset.data.get('flip_idx', []) # for keypoints augmentation + pre_transform = Compose( + [ + Mosaic(dataset, imgsz=imgsz, p=hyp.mosaic), + CopyPaste(p=hyp.copy_paste), + RandomPerspective( + degrees=hyp.degrees, + translate=hyp.translate, + scale=hyp.scale, + shear=hyp.shear, + perspective=hyp.perspective, + pre_transform=None if stretch else LetterBox(new_shape=(imgsz, imgsz)), + ), + ] + ) + flip_idx = dataset.data.get("flip_idx", []) # for keypoints augmentation if dataset.use_keypoints: - kpt_shape = dataset.data.get('kpt_shape', None) + kpt_shape = dataset.data.get("kpt_shape", None) if len(flip_idx) == 0 and hyp.fliplr > 0.0: hyp.fliplr = 0.0 LOGGER.warning("WARNING ⚠️ No 'flip_idx' array defined in data.yaml, setting augmentation 'fliplr=0.0'") elif flip_idx and (len(flip_idx) != kpt_shape[0]): - raise ValueError(f'data.yaml flip_idx={flip_idx} length must be equal to kpt_shape[0]={kpt_shape[0]}') + raise ValueError(f"data.yaml flip_idx={flip_idx} length must be equal to kpt_shape[0]={kpt_shape[0]}") - return Compose([ - pre_transform, - MixUp(dataset, pre_transform=pre_transform, p=hyp.mixup), - Albumentations(p=1.0), - RandomHSV(hgain=hyp.hsv_h, sgain=hyp.hsv_s, vgain=hyp.hsv_v), - RandomFlip(direction='vertical', p=hyp.flipud), - RandomFlip(direction='horizontal', p=hyp.fliplr, flip_idx=flip_idx)]) # transforms + return Compose( + [ + pre_transform, + MixUp(dataset, pre_transform=pre_transform, p=hyp.mixup), + Albumentations(p=1.0), + RandomHSV(hgain=hyp.hsv_h, sgain=hyp.hsv_s, vgain=hyp.hsv_v), + RandomFlip(direction="vertical", p=hyp.flipud), + RandomFlip(direction="horizontal", p=hyp.fliplr, flip_idx=flip_idx), + ] + ) # transforms # Classification augmentations ----------------------------------------------------------------------------------------- -def classify_transforms(size=224, mean=(0.0, 0.0, 0.0), std=(1.0, 1.0, 1.0)): # IMAGENET_MEAN, IMAGENET_STD +def classify_transforms( + size=224, + mean=DEFAULT_MEAN, + std=DEFAULT_STD, + interpolation: T.InterpolationMode = T.InterpolationMode.BILINEAR, + crop_fraction: float = DEFAULT_CROP_FTACTION, +): + """ + Classification transforms for evaluation/inference. Inspired by timm/data/transforms_factory.py. + + Args: + size (int): image size + mean (tuple): mean values of RGB channels + std (tuple): std values of RGB channels + interpolation (T.InterpolationMode): interpolation mode. default is T.InterpolationMode.BILINEAR. + crop_fraction (float): fraction of image to crop. default is 1.0. + + Returns: + (T.Compose): torchvision transforms + """ + + if isinstance(size, (tuple, list)): + assert len(size) == 2 + scale_size = tuple(math.floor(x / crop_fraction) for x in size) + else: + scale_size = math.floor(size / crop_fraction) + scale_size = (scale_size, scale_size) + + # aspect ratio is preserved, crops center within image, no borders are added, image is lost + if scale_size[0] == scale_size[1]: + # simple case, use torchvision built-in Resize w/ shortest edge mode (scalar size arg) + tfl = [T.Resize(scale_size[0], interpolation=interpolation)] + else: + # resize shortest edge to matching target dim for non-square target + tfl = [T.Resize(scale_size)] + tfl += [T.CenterCrop(size)] + + tfl += [ + T.ToTensor(), + T.Normalize( + mean=torch.tensor(mean), + std=torch.tensor(std), + ), + ] + + return T.Compose(tfl) + + +# Classification augmentations train --------------------------------------------------------------------------------------- +def classify_augmentations( + size=224, + mean=DEFAULT_MEAN, + std=DEFAULT_STD, + scale=None, + ratio=None, + hflip=0.5, + vflip=0.0, + auto_augment=None, + hsv_h=0.015, # image HSV-Hue augmentation (fraction) + hsv_s=0.4, # image HSV-Saturation augmentation (fraction) + hsv_v=0.4, # image HSV-Value augmentation (fraction) + force_color_jitter=False, + erasing=0.0, + interpolation: T.InterpolationMode = T.InterpolationMode.BILINEAR, +): + """ + Classification transforms with augmentation for training. Inspired by timm/data/transforms_factory.py. + + Args: + size (int): image size + scale (tuple): scale range of the image. default is (0.08, 1.0) + ratio (tuple): aspect ratio range of the image. default is (3./4., 4./3.) + mean (tuple): mean values of RGB channels + std (tuple): std values of RGB channels + hflip (float): probability of horizontal flip + vflip (float): probability of vertical flip + auto_augment (str): auto augmentation policy. can be 'randaugment', 'augmix', 'autoaugment' or None. + hsv_h (float): image HSV-Hue augmentation (fraction) + hsv_s (float): image HSV-Saturation augmentation (fraction) + hsv_v (float): image HSV-Value augmentation (fraction) + force_color_jitter (bool): force to apply color jitter even if auto augment is enabled + erasing (float): probability of random erasing + interpolation (T.InterpolationMode): interpolation mode. default is T.InterpolationMode.BILINEAR. + + Returns: + (T.Compose): torchvision transforms + """ # Transforms to apply if albumentations not installed if not isinstance(size, int): - raise TypeError(f'classify_transforms() size {size} must be integer, not (list, tuple)') - if any(mean) or any(std): - return T.Compose([CenterCrop(size), ToTensor(), T.Normalize(mean, std, inplace=True)]) - else: - return T.Compose([CenterCrop(size), ToTensor()]) + raise TypeError(f"classify_transforms() size {size} must be integer, not (list, tuple)") + scale = tuple(scale or (0.08, 1.0)) # default imagenet scale range + ratio = tuple(ratio or (3.0 / 4.0, 4.0 / 3.0)) # default imagenet ratio range + primary_tfl = [T.RandomResizedCrop(size, scale=scale, ratio=ratio, interpolation=interpolation)] + if hflip > 0.0: + primary_tfl += [T.RandomHorizontalFlip(p=hflip)] + if vflip > 0.0: + primary_tfl += [T.RandomVerticalFlip(p=vflip)] + secondary_tfl = [] + disable_color_jitter = False + if auto_augment: + assert isinstance(auto_augment, str) + # color jitter is typically disabled if AA/RA on, + # this allows override without breaking old hparm cfgs + disable_color_jitter = not force_color_jitter -def hsv2colorjitter(h, s, v): - """Map HSV (hue, saturation, value) jitter into ColorJitter values (brightness, contrast, saturation, hue)""" - return v, v, s, h - - -def classify_albumentations( - augment=True, - size=224, - scale=(0.08, 1.0), - hflip=0.5, - vflip=0.0, - hsv_h=0.015, # image HSV-Hue augmentation (fraction) - hsv_s=0.7, # image HSV-Saturation augmentation (fraction) - hsv_v=0.4, # image HSV-Value augmentation (fraction) - mean=(0.0, 0.0, 0.0), # IMAGENET_MEAN - std=(1.0, 1.0, 1.0), # IMAGENET_STD - auto_aug=False, -): - """YOLOv8 classification Albumentations (optional, only used if package is installed).""" - prefix = colorstr('albumentations: ') - try: - import albumentations as A - from albumentations.pytorch import ToTensorV2 - - check_version(A.__version__, '1.0.3', hard=True) # version requirement - if augment: # Resize and crop - T = [A.RandomResizedCrop(height=size, width=size, scale=scale)] - if auto_aug: - # TODO: implement AugMix, AutoAug & RandAug in albumentations - LOGGER.info(f'{prefix}auto augmentations are currently not supported') + if auto_augment == "randaugment": + if TORCHVISION_0_11: + secondary_tfl += [T.RandAugment(interpolation=interpolation)] else: - if hflip > 0: - T += [A.HorizontalFlip(p=hflip)] - if vflip > 0: - T += [A.VerticalFlip(p=vflip)] - if any((hsv_h, hsv_s, hsv_v)): - T += [A.ColorJitter(*hsv2colorjitter(hsv_h, hsv_s, hsv_v))] # brightness, contrast, saturation, hue - else: # Use fixed crop for eval set (reproducibility) - T = [A.SmallestMaxSize(max_size=size), A.CenterCrop(height=size, width=size)] - T += [A.Normalize(mean=mean, std=std), ToTensorV2()] # Normalize and convert to Tensor - LOGGER.info(prefix + ', '.join(f'{x}'.replace('always_apply=False, ', '') for x in T if x.p)) - return A.Compose(T) + LOGGER.warning('"auto_augment=randaugment" requires torchvision >= 0.11.0. Disabling it.') - except ImportError: # package not installed, skip - pass - except Exception as e: - LOGGER.info(f'{prefix}{e}') + elif auto_augment == "augmix": + if TORCHVISION_0_13: + secondary_tfl += [T.AugMix(interpolation=interpolation)] + else: + LOGGER.warning('"auto_augment=augmix" requires torchvision >= 0.13.0. Disabling it.') + + elif auto_augment == "autoaugment": + if TORCHVISION_0_10: + secondary_tfl += [T.AutoAugment(interpolation=interpolation)] + else: + LOGGER.warning('"auto_augment=autoaugment" requires torchvision >= 0.10.0. Disabling it.') + + else: + raise ValueError( + f'Invalid auto_augment policy: {auto_augment}. Should be one of "randaugment", ' + f'"augmix", "autoaugment" or None' + ) + + if not disable_color_jitter: + secondary_tfl += [T.ColorJitter(brightness=hsv_v, contrast=hsv_v, saturation=hsv_s, hue=hsv_h)] + + final_tfl = [ + T.ToTensor(), + T.Normalize(mean=torch.tensor(mean), std=torch.tensor(std)), + T.RandomErasing(p=erasing, inplace=True), + ] + + return T.Compose(primary_tfl + secondary_tfl + final_tfl) +# NOTE: keep this class for backward compatibility class ClassifyLetterBox: - """YOLOv8 LetterBox class for image preprocessing, i.e. T.Compose([LetterBox(size), ToTensor()])""" + """ + YOLOv8 LetterBox class for image preprocessing, designed to be part of a transformation pipeline, e.g., + T.Compose([LetterBox(size), ToTensor()]). + + Attributes: + h (int): Target height of the image. + w (int): Target width of the image. + auto (bool): If True, automatically solves for short side using stride. + stride (int): The stride value, used when 'auto' is True. + """ def __init__(self, size=(640, 640), auto=False, stride=32): - """Resizes image and crops it to center with max dimensions 'h' and 'w'.""" + """ + Initializes the ClassifyLetterBox class with a target size, auto-flag, and stride. + + Args: + size (Union[int, Tuple[int, int]]): The target dimensions (height, width) for the letterbox. + auto (bool): If True, automatically calculates the short side based on stride. + stride (int): The stride value, used when 'auto' is True. + """ super().__init__() self.h, self.w = (size, size) if isinstance(size, int) else size self.auto = auto # pass max size integer, automatically solve for short side using stride self.stride = stride # used with auto - def __call__(self, im): # im = np.array HWC + def __call__(self, im): + """ + Resizes the image and pads it with a letterbox method. + + Args: + im (numpy.ndarray): The input image as a numpy array of shape HWC. + + Returns: + (numpy.ndarray): The letterboxed and resized image as a numpy array. + """ imh, imw = im.shape[:2] - r = min(self.h / imh, self.w / imw) # ratio of new/old - h, w = round(imh * r), round(imw * r) # resized image - hs, ws = (math.ceil(x / self.stride) * self.stride for x in (h, w)) if self.auto else self.h, self.w + r = min(self.h / imh, self.w / imw) # ratio of new/old dimensions + h, w = round(imh * r), round(imw * r) # resized image dimensions + + # Calculate padding dimensions + hs, ws = (math.ceil(x / self.stride) * self.stride for x in (h, w)) if self.auto else (self.h, self.w) top, left = round((hs - h) / 2 - 0.1), round((ws - w) / 2 - 0.1) - im_out = np.full((self.h, self.w, 3), 114, dtype=im.dtype) - im_out[top:top + h, left:left + w] = cv2.resize(im, (w, h), interpolation=cv2.INTER_LINEAR) + + # Create padded image + im_out = np.full((hs, ws, 3), 114, dtype=im.dtype) + im_out[top : top + h, left : left + w] = cv2.resize(im, (w, h), interpolation=cv2.INTER_LINEAR) return im_out +# NOTE: keep this class for backward compatibility class CenterCrop: - """YOLOv8 CenterCrop class for image preprocessing, i.e. T.Compose([CenterCrop(size), ToTensor()])""" + """YOLOv8 CenterCrop class for image preprocessing, designed to be part of a transformation pipeline, e.g., + T.Compose([CenterCrop(size), ToTensor()]). + """ def __init__(self, size=640): """Converts an image from numpy array to PyTorch tensor.""" super().__init__() self.h, self.w = (size, size) if isinstance(size, int) else size - def __call__(self, im): # im = np.array HWC + def __call__(self, im): + """ + Resizes and crops the center of the image using a letterbox method. + + Args: + im (numpy.ndarray): The input image as a numpy array of shape HWC. + + Returns: + (numpy.ndarray): The center-cropped and resized image as a numpy array. + """ imh, imw = im.shape[:2] m = min(imh, imw) # min dimension top, left = (imh - m) // 2, (imw - m) // 2 - return cv2.resize(im[top:top + m, left:left + m], (self.w, self.h), interpolation=cv2.INTER_LINEAR) + return cv2.resize(im[top : top + m, left : left + m], (self.w, self.h), interpolation=cv2.INTER_LINEAR) +# NOTE: keep this class for backward compatibility class ToTensor: - """YOLOv8 ToTensor class for image preprocessing, i.e. T.Compose([LetterBox(size), ToTensor()]).""" + """YOLOv8 ToTensor class for image preprocessing, i.e., T.Compose([LetterBox(size), ToTensor()]).""" def __init__(self, half=False): """Initialize YOLOv8 ToTensor object with optional half-precision support.""" super().__init__() self.half = half - def __call__(self, im): # im = np.array HWC in BGR order + def __call__(self, im): + """ + Transforms an image from a numpy array to a PyTorch tensor, applying optional half-precision and normalization. + + Args: + im (numpy.ndarray): Input image as a numpy array with shape (H, W, C) in BGR order. + + Returns: + (torch.Tensor): The transformed image as a PyTorch tensor in float32 or float16, normalized to [0, 1]. + """ im = np.ascontiguousarray(im.transpose((2, 0, 1))[::-1]) # HWC to CHW -> BGR to RGB -> contiguous im = torch.from_numpy(im) # to torch im = im.half() if self.half else im.float() # uint8 to fp16/32 diff --git a/ultralytics/data/base.py b/ultralytics/data/base.py index 429533d..6af8d3c 100644 --- a/ultralytics/data/base.py +++ b/ultralytics/data/base.py @@ -15,7 +15,6 @@ import psutil from torch.utils.data import Dataset from ultralytics.utils import DEFAULT_CFG, LOCAL_RANK, LOGGER, NUM_THREADS, TQDM - from .utils import HELP_URL, IMG_FORMATS @@ -47,20 +46,23 @@ class BaseDataset(Dataset): transforms (callable): Image transformation function. """ - def __init__(self, - img_path, - imgsz=640, - cache=False, - augment=True, - hyp=DEFAULT_CFG, - prefix='', - rect=False, - batch_size=16, - stride=32, - pad=0.5, - single_cls=False, - classes=None, - fraction=1.0): + def __init__( + self, + img_path, + imgsz=640, + cache=False, + augment=True, + hyp=DEFAULT_CFG, + prefix="", + rect=False, + batch_size=16, + stride=32, + pad=0.5, + single_cls=False, + classes=None, + fraction=1.0, + ): + """Initialize BaseDataset with given configuration and options.""" super().__init__() self.img_path = img_path self.imgsz = imgsz @@ -84,11 +86,11 @@ class BaseDataset(Dataset): self.buffer = [] # buffer size = batch size self.max_buffer_length = min((self.ni, self.batch_size * 8, 1000)) if self.augment else 0 - # Cache stuff - if cache == 'ram' and not self.check_cache_ram(): + # Cache images + if cache == "ram" and not self.check_cache_ram(): cache = False self.ims, self.im_hw0, self.im_hw = [None] * self.ni, [None] * self.ni, [None] * self.ni - self.npy_files = [Path(f).with_suffix('.npy') for f in self.im_files] + self.npy_files = [Path(f).with_suffix(".npy") for f in self.im_files] if cache: self.cache_images(cache) @@ -102,54 +104,62 @@ class BaseDataset(Dataset): for p in img_path if isinstance(img_path, list) else [img_path]: p = Path(p) # os-agnostic if p.is_dir(): # dir - f += glob.glob(str(p / '**' / '*.*'), recursive=True) + f += glob.glob(str(p / "**" / "*.*"), recursive=True) # F = list(p.rglob('*.*')) # pathlib elif p.is_file(): # file with open(p) as t: t = t.read().strip().splitlines() parent = str(p.parent) + os.sep - f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path + f += [x.replace("./", parent) if x.startswith("./") else x for x in t] # local to global path # F += [p.parent / x.lstrip(os.sep) for x in t] # local to global path (pathlib) else: - raise FileNotFoundError(f'{self.prefix}{p} does not exist') - im_files = sorted(x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in IMG_FORMATS) + raise FileNotFoundError(f"{self.prefix}{p} does not exist") + im_files = sorted(x.replace("/", os.sep) for x in f if x.split(".")[-1].lower() in IMG_FORMATS) # self.img_files = sorted([x for x in f if x.suffix[1:].lower() in IMG_FORMATS]) # pathlib - assert im_files, f'{self.prefix}No images found in {img_path}' + assert im_files, f"{self.prefix}No images found in {img_path}" except Exception as e: - raise FileNotFoundError(f'{self.prefix}Error loading data from {img_path}\n{HELP_URL}') from e + raise FileNotFoundError(f"{self.prefix}Error loading data from {img_path}\n{HELP_URL}") from e if self.fraction < 1: - im_files = im_files[:round(len(im_files) * self.fraction)] + # im_files = im_files[: round(len(im_files) * self.fraction)] + num_elements_to_select = round(len(im_files) * self.fraction) + im_files = random.sample(im_files, num_elements_to_select) return im_files def update_labels(self, include_class: Optional[list]): - """include_class, filter labels to include only these classes (optional).""" + """Update labels to include only these classes (optional).""" include_class_array = np.array(include_class).reshape(1, -1) for i in range(len(self.labels)): if include_class is not None: - cls = self.labels[i]['cls'] - bboxes = self.labels[i]['bboxes'] - segments = self.labels[i]['segments'] - keypoints = self.labels[i]['keypoints'] + cls = self.labels[i]["cls"] + bboxes = self.labels[i]["bboxes"] + segments = self.labels[i]["segments"] + keypoints = self.labels[i]["keypoints"] j = (cls == include_class_array).any(1) - self.labels[i]['cls'] = cls[j] - self.labels[i]['bboxes'] = bboxes[j] + self.labels[i]["cls"] = cls[j] + self.labels[i]["bboxes"] = bboxes[j] if segments: - self.labels[i]['segments'] = [segments[si] for si, idx in enumerate(j) if idx] + self.labels[i]["segments"] = [segments[si] for si, idx in enumerate(j) if idx] if keypoints is not None: - self.labels[i]['keypoints'] = keypoints[j] + self.labels[i]["keypoints"] = keypoints[j] if self.single_cls: - self.labels[i]['cls'][:, 0] = 0 + self.labels[i]["cls"][:, 0] = 0 def load_image(self, i, rect_mode=True): """Loads 1 image from dataset index 'i', returns (im, resized hw).""" im, f, fn = self.ims[i], self.im_files[i], self.npy_files[i] if im is None: # not cached in RAM if fn.exists(): # load npy - im = np.load(fn) + try: + im = np.load(fn) + except Exception as e: + LOGGER.warning(f"{self.prefix}WARNING ⚠️ Removing corrupt *.npy image file {fn} due to: {e}") + Path(fn).unlink(missing_ok=True) + im = cv2.imread(f) # BGR else: # read image im = cv2.imread(f) # BGR - if im is None: - raise FileNotFoundError(f'Image Not Found {f}') + if im is None: + raise FileNotFoundError(f"Image Not Found {f}") + h0, w0 = im.shape[:2] # orig hw if rect_mode: # resize long side to imgsz while maintaining aspect ratio r = self.imgsz / max(h0, w0) # ratio @@ -174,17 +184,17 @@ class BaseDataset(Dataset): def cache_images(self, cache): """Cache images to memory or disk.""" b, gb = 0, 1 << 30 # bytes of cached images, bytes per gigabytes - fcn = self.cache_images_to_disk if cache == 'disk' else self.load_image + fcn = self.cache_images_to_disk if cache == "disk" else self.load_image with ThreadPool(NUM_THREADS) as pool: results = pool.imap(fcn, range(self.ni)) pbar = TQDM(enumerate(results), total=self.ni, disable=LOCAL_RANK > 0) for i, x in pbar: - if cache == 'disk': + if cache == "disk": b += self.npy_files[i].stat().st_size else: # 'ram' self.ims[i], self.im_hw0[i], self.im_hw[i] = x # im, hw_orig, hw_resized = load_image(self, i) b += self.ims[i].nbytes - pbar.desc = f'{self.prefix}Caching images ({b / gb:.1f}GB {cache})' + pbar.desc = f"{self.prefix}Caching images ({b / gb:.1f}GB {cache})" pbar.close() def cache_images_to_disk(self, i): @@ -200,15 +210,17 @@ class BaseDataset(Dataset): for _ in range(n): im = cv2.imread(random.choice(self.im_files)) # sample image ratio = self.imgsz / max(im.shape[0], im.shape[1]) # max(h, w) # ratio - b += im.nbytes * ratio ** 2 + b += im.nbytes * ratio**2 mem_required = b * self.ni / n * (1 + safety_margin) # GB required to cache dataset into RAM mem = psutil.virtual_memory() cache = mem_required < mem.available # to cache or not to cache, that is the question if not cache: - LOGGER.info(f'{self.prefix}{mem_required / gb:.1f}GB RAM required to cache images ' - f'with {int(safety_margin * 100)}% safety margin but only ' - f'{mem.available / gb:.1f}/{mem.total / gb:.1f}GB available, ' - f"{'caching images ✅' if cache else 'not caching images ⚠️'}") + LOGGER.info( + f'{self.prefix}{mem_required / gb:.1f}GB RAM required to cache images ' + f'with {int(safety_margin * 100)}% safety margin but only ' + f'{mem.available / gb:.1f}/{mem.total / gb:.1f}GB available, ' + f"{'caching images ✅' if cache else 'not caching images ⚠️'}" + ) return cache def set_rectangle(self): @@ -216,7 +228,7 @@ class BaseDataset(Dataset): bi = np.floor(np.arange(self.ni) / self.batch_size).astype(int) # batch index nb = bi[-1] + 1 # number of batches - s = np.array([x.pop('shape') for x in self.labels]) # hw + s = np.array([x.pop("shape") for x in self.labels]) # hw ar = s[:, 0] / s[:, 1] # aspect ratio irect = ar.argsort() self.im_files = [self.im_files[i] for i in irect] @@ -243,12 +255,14 @@ class BaseDataset(Dataset): def get_image_and_label(self, index): """Get and return label information from the dataset.""" label = deepcopy(self.labels[index]) # requires deepcopy() https://github.com/ultralytics/ultralytics/pull/1948 - label.pop('shape', None) # shape is for rect, remove it - label['img'], label['ori_shape'], label['resized_shape'] = self.load_image(index) - label['ratio_pad'] = (label['resized_shape'][0] / label['ori_shape'][0], - label['resized_shape'][1] / label['ori_shape'][1]) # for evaluation + label.pop("shape", None) # shape is for rect, remove it + label["img"], label["ori_shape"], label["resized_shape"] = self.load_image(index) + label["ratio_pad"] = ( + label["resized_shape"][0] / label["ori_shape"][0], + label["resized_shape"][1] / label["ori_shape"][1], + ) # for evaluation if self.rect: - label['rect_shape'] = self.batch_shapes[self.batch[index]] + label["rect_shape"] = self.batch_shapes[self.batch[index]] return self.update_labels_info(label) def __len__(self): @@ -256,24 +270,32 @@ class BaseDataset(Dataset): return len(self.labels) def update_labels_info(self, label): - """custom your label format here.""" + """Custom your label format here.""" return label def build_transforms(self, hyp=None): - """Users can custom augmentations here - like: + """ + Users can customize augmentations here. + + Example: + ```python if self.augment: # Training transforms return Compose([]) else: # Val transforms return Compose([]) + ``` """ raise NotImplementedError def get_labels(self): - """Users can custom their own format here. - Make sure your output is a list with each element like below: + """ + Users can customize their own format here. + + Note: + Ensure output is a dictionary with the following keys: + ```python dict( im_file=im_file, shape=shape, # format: (height, width) @@ -284,5 +306,6 @@ class BaseDataset(Dataset): normalized=True, # or False bbox_format="xyxy", # or xywh, ltwh ) + ``` """ raise NotImplementedError diff --git a/ultralytics/data/build.py b/ultralytics/data/build.py index 9d40e5a..6bfb48f 100644 --- a/ultralytics/data/build.py +++ b/ultralytics/data/build.py @@ -9,23 +9,34 @@ import torch from PIL import Image from torch.utils.data import dataloader, distributed -from ultralytics.data.loaders import (LOADERS, LoadImages, LoadPilAndNumpy, LoadScreenshots, LoadStreams, LoadTensor, - SourceTypes, autocast_list) +from ultralytics.data.loaders import ( + LOADERS, + LoadImagesAndVideos, + LoadPilAndNumpy, + LoadScreenshots, + LoadStreams, + LoadTensor, + SourceTypes, + autocast_list, +) from ultralytics.data.utils import IMG_FORMATS, VID_FORMATS from ultralytics.utils import RANK, colorstr from ultralytics.utils.checks import check_file - from .dataset import YOLODataset from .utils import PIN_MEMORY class InfiniteDataLoader(dataloader.DataLoader): - """Dataloader that reuses workers. Uses same syntax as vanilla DataLoader.""" + """ + Dataloader that reuses workers. + + Uses same syntax as vanilla DataLoader. + """ def __init__(self, *args, **kwargs): """Dataloader that infinitely recycles workers, inherits from DataLoader.""" super().__init__(*args, **kwargs) - object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler)) + object.__setattr__(self, "batch_sampler", _RepeatSampler(self.batch_sampler)) self.iterator = super().__iter__() def __len__(self): @@ -38,7 +49,9 @@ class InfiniteDataLoader(dataloader.DataLoader): yield next(self.iterator) def reset(self): - """Reset iterator. + """ + Reset iterator. + This is useful when we want to modify settings of dataset while training. """ self.iterator = self._get_iterator() @@ -64,49 +77,51 @@ class _RepeatSampler: def seed_worker(worker_id): # noqa """Set dataloader worker seed https://pytorch.org/docs/stable/notes/randomness.html#dataloader.""" - worker_seed = torch.initial_seed() % 2 ** 32 + worker_seed = torch.initial_seed() % 2**32 np.random.seed(worker_seed) random.seed(worker_seed) -def build_yolo_dataset(cfg, img_path, batch, data, mode='train', rect=False, stride=32): - """Build YOLO Dataset""" +def build_yolo_dataset(cfg, img_path, batch, data, mode="train", rect=False, stride=32): + """Build YOLO Dataset.""" return YOLODataset( img_path=img_path, imgsz=cfg.imgsz, batch_size=batch, - augment=mode == 'train', # augmentation + augment=mode == "train", # augmentation hyp=cfg, # TODO: probably add a get_hyps_from_cfg function rect=cfg.rect or rect, # rectangular batches cache=cfg.cache or None, single_cls=cfg.single_cls or False, stride=int(stride), - pad=0.0 if mode == 'train' else 0.5, - prefix=colorstr(f'{mode}: '), - use_segments=cfg.task == 'segment', - use_keypoints=cfg.task == 'pose', + pad=0.0 if mode == "train" else 0.5, + prefix=colorstr(f"{mode}: "), + task=cfg.task, classes=cfg.classes, data=data, - fraction=cfg.fraction if mode == 'train' else 1.0) + fraction=cfg.fraction if mode == "train" else 1.0, + ) def build_dataloader(dataset, batch, workers, shuffle=True, rank=-1): """Return an InfiniteDataLoader or DataLoader for training or validation set.""" batch = min(batch, len(dataset)) nd = torch.cuda.device_count() # number of CUDA devices - nw = min([os.cpu_count() // max(nd, 1), batch if batch > 1 else 0, workers]) # number of workers + nw = min([os.cpu_count() // max(nd, 1), workers]) # number of workers sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle) generator = torch.Generator() generator.manual_seed(6148914691236517205 + RANK) - return InfiniteDataLoader(dataset=dataset, - batch_size=batch, - shuffle=shuffle and sampler is None, - num_workers=nw, - sampler=sampler, - pin_memory=PIN_MEMORY, - collate_fn=getattr(dataset, 'collate_fn', None), - worker_init_fn=seed_worker, - generator=generator) + return InfiniteDataLoader( + dataset=dataset, + batch_size=batch, + shuffle=shuffle and sampler is None, + num_workers=nw, + sampler=sampler, + pin_memory=PIN_MEMORY, + collate_fn=getattr(dataset, "collate_fn", None), + worker_init_fn=seed_worker, + generator=generator, + ) def check_source(source): @@ -114,10 +129,10 @@ def check_source(source): webcam, screenshot, from_img, in_memory, tensor = False, False, False, False, False if isinstance(source, (str, int, Path)): # int for local usb camera source = str(source) - is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS) - is_url = source.lower().startswith(('https://', 'http://', 'rtsp://', 'rtmp://')) - webcam = source.isnumeric() or source.endswith('.streams') or (is_url and not is_file) - screenshot = source.lower() == 'screen' + is_file = Path(source).suffix[1:] in (IMG_FORMATS | VID_FORMATS) + is_url = source.lower().startswith(("https://", "http://", "rtsp://", "rtmp://", "tcp://")) + webcam = source.isnumeric() or source.endswith(".streams") or (is_url and not is_file) + screenshot = source.lower() == "screen" if is_url and is_file: source = check_file(source) # download elif isinstance(source, LOADERS): @@ -130,42 +145,42 @@ def check_source(source): elif isinstance(source, torch.Tensor): tensor = True else: - raise TypeError('Unsupported image type. For supported types see https://docs.ultralytics.com/modes/predict') + raise TypeError("Unsupported image type. For supported types see https://docs.ultralytics.com/modes/predict") return source, webcam, screenshot, from_img, in_memory, tensor -def load_inference_source(source=None, imgsz=640, vid_stride=1, stream_buffer=False): +def load_inference_source(source=None, batch=1, vid_stride=1, buffer=False): """ Loads an inference source for object detection and applies necessary transformations. Args: source (str, Path, Tensor, PIL.Image, np.ndarray): The input source for inference. - imgsz (int, optional): The size of the image for inference. Default is 640. + batch (int, optional): Batch size for dataloaders. Default is 1. vid_stride (int, optional): The frame interval for video sources. Default is 1. - stream_buffer (bool, optional): Determined whether stream frames will be buffered. Default is False. + buffer (bool, optional): Determined whether stream frames will be buffered. Default is False. Returns: dataset (Dataset): A dataset object for the specified input source. """ - source, webcam, screenshot, from_img, in_memory, tensor = check_source(source) - source_type = source.source_type if in_memory else SourceTypes(webcam, screenshot, from_img, tensor) + source, stream, screenshot, from_img, in_memory, tensor = check_source(source) + source_type = source.source_type if in_memory else SourceTypes(stream, screenshot, from_img, tensor) # Dataloader if tensor: dataset = LoadTensor(source) elif in_memory: dataset = source - elif webcam: - dataset = LoadStreams(source, imgsz=imgsz, vid_stride=vid_stride, stream_buffer=stream_buffer) + elif stream: + dataset = LoadStreams(source, vid_stride=vid_stride, buffer=buffer) elif screenshot: - dataset = LoadScreenshots(source, imgsz=imgsz) + dataset = LoadScreenshots(source) elif from_img: - dataset = LoadPilAndNumpy(source, imgsz=imgsz) + dataset = LoadPilAndNumpy(source) else: - dataset = LoadImages(source, imgsz=imgsz, vid_stride=vid_stride) + dataset = LoadImagesAndVideos(source, batch=batch, vid_stride=vid_stride) # Attach source types to the dataset - setattr(dataset, 'source_type', source_type) + setattr(dataset, "source_type", source_type) return dataset diff --git a/ultralytics/data/converter.py b/ultralytics/data/converter.py index 1e3b429..eff4dac 100644 --- a/ultralytics/data/converter.py +++ b/ultralytics/data/converter.py @@ -1,31 +1,120 @@ # Ultralytics YOLO 🚀, AGPL-3.0 license import json -import shutil from collections import defaultdict from pathlib import Path import cv2 import numpy as np -from ultralytics.utils import TQDM +from ultralytics.utils import LOGGER, TQDM +from ultralytics.utils.files import increment_path def coco91_to_coco80_class(): - """Converts 91-index COCO class IDs to 80-index COCO class IDs. + """ + Converts 91-index COCO class IDs to 80-index COCO class IDs. Returns: (list): A list of 91 class IDs where the index represents the 80-index class ID and the value is the corresponding 91-index class ID. """ return [ - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, None, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, None, 24, 25, None, - None, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, None, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, - 51, 52, 53, 54, 55, 56, 57, 58, 59, None, 60, None, None, 61, None, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, - None, 73, 74, 75, 76, 77, 78, 79, None] + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + None, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + None, + 24, + 25, + None, + None, + 26, + 27, + 28, + 29, + 30, + 31, + 32, + 33, + 34, + 35, + 36, + 37, + 38, + 39, + None, + 40, + 41, + 42, + 43, + 44, + 45, + 46, + 47, + 48, + 49, + 50, + 51, + 52, + 53, + 54, + 55, + 56, + 57, + 58, + 59, + None, + 60, + None, + None, + 61, + None, + 62, + 63, + 64, + 65, + 66, + 67, + 68, + 69, + 70, + 71, + 72, + None, + 73, + 74, + 75, + 76, + 77, + 78, + 79, + None, + ] -def coco80_to_coco91_class(): # +def coco80_to_coco91_class(): """ Converts 80-index (val2014) to 91-index (paper). For details see https://tech.amikelive.com/node-718/what-object-categories-labels-are-in-coco-dataset/. @@ -41,16 +130,102 @@ def coco80_to_coco91_class(): # ``` """ return [ - 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34, - 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, - 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90] + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 27, + 28, + 31, + 32, + 33, + 34, + 35, + 36, + 37, + 38, + 39, + 40, + 41, + 42, + 43, + 44, + 46, + 47, + 48, + 49, + 50, + 51, + 52, + 53, + 54, + 55, + 56, + 57, + 58, + 59, + 60, + 61, + 62, + 63, + 64, + 65, + 67, + 70, + 72, + 73, + 74, + 75, + 76, + 77, + 78, + 79, + 80, + 81, + 82, + 84, + 85, + 86, + 87, + 88, + 89, + 90, + ] -def convert_coco(labels_dir='../coco/annotations/', use_segments=False, use_keypoints=False, cls91to80=True): - """Converts COCO dataset annotations to a format suitable for training YOLOv5 models. +def convert_coco( + labels_dir="../coco/annotations/", + save_dir="coco_converted/", + use_segments=False, + use_keypoints=False, + cls91to80=True, +): + """ + Converts COCO dataset annotations to a YOLO annotation format suitable for training YOLO models. Args: labels_dir (str, optional): Path to directory containing COCO dataset annotation files. + save_dir (str, optional): Path to directory to save results to. use_segments (bool, optional): Whether to include segmentation masks in the output. use_keypoints (bool, optional): Whether to include keypoint annotations in the output. cls91to80 (bool, optional): Whether to map 91 COCO class IDs to the corresponding 80 COCO class IDs. @@ -67,78 +242,79 @@ def convert_coco(labels_dir='../coco/annotations/', use_segments=False, use_keyp """ # Create dataset directory - save_dir = Path('yolo_labels') - if save_dir.exists(): - shutil.rmtree(save_dir) # delete dir - for p in save_dir / 'labels', save_dir / 'images': + save_dir = increment_path(save_dir) # increment if save directory already exists + for p in save_dir / "labels", save_dir / "images": p.mkdir(parents=True, exist_ok=True) # make dir # Convert classes coco80 = coco91_to_coco80_class() # Import json - for json_file in sorted(Path(labels_dir).resolve().glob('*.json')): - fn = Path(save_dir) / 'labels' / json_file.stem.replace('instances_', '') # folder name + for json_file in sorted(Path(labels_dir).resolve().glob("*.json")): + fn = Path(save_dir) / "labels" / json_file.stem.replace("instances_", "") # folder name fn.mkdir(parents=True, exist_ok=True) with open(json_file) as f: data = json.load(f) # Create image dict - images = {f'{x["id"]:d}': x for x in data['images']} + images = {f'{x["id"]:d}': x for x in data["images"]} # Create image-annotations dict imgToAnns = defaultdict(list) - for ann in data['annotations']: - imgToAnns[ann['image_id']].append(ann) + for ann in data["annotations"]: + imgToAnns[ann["image_id"]].append(ann) # Write labels file - for img_id, anns in TQDM(imgToAnns.items(), desc=f'Annotations {json_file}'): - img = images[f'{img_id:d}'] - h, w, f = img['height'], img['width'], img['file_name'] + for img_id, anns in TQDM(imgToAnns.items(), desc=f"Annotations {json_file}"): + img = images[f"{img_id:d}"] + h, w, f = img["height"], img["width"], img["file_name"] bboxes = [] segments = [] keypoints = [] for ann in anns: - if ann['iscrowd']: + if ann["iscrowd"]: continue # The COCO box format is [top left x, top left y, width, height] - box = np.array(ann['bbox'], dtype=np.float64) + box = np.array(ann["bbox"], dtype=np.float64) box[:2] += box[2:] / 2 # xy top-left corner to center box[[0, 2]] /= w # normalize x box[[1, 3]] /= h # normalize y if box[2] <= 0 or box[3] <= 0: # if w <= 0 and h <= 0 continue - cls = coco80[ann['category_id'] - 1] if cls91to80 else ann['category_id'] - 1 # class + cls = coco80[ann["category_id"] - 1] if cls91to80 else ann["category_id"] - 1 # class box = [cls] + box.tolist() if box not in bboxes: bboxes.append(box) - if use_segments and ann.get('segmentation') is not None: - if len(ann['segmentation']) == 0: - segments.append([]) - continue - elif len(ann['segmentation']) > 1: - s = merge_multi_segment(ann['segmentation']) - s = (np.concatenate(s, axis=0) / np.array([w, h])).reshape(-1).tolist() - else: - s = [j for i in ann['segmentation'] for j in i] # all segments concatenated - s = (np.array(s).reshape(-1, 2) / np.array([w, h])).reshape(-1).tolist() - s = [cls] + s - if s not in segments: + if use_segments and ann.get("segmentation") is not None: + if len(ann["segmentation"]) == 0: + segments.append([]) + continue + elif len(ann["segmentation"]) > 1: + s = merge_multi_segment(ann["segmentation"]) + s = (np.concatenate(s, axis=0) / np.array([w, h])).reshape(-1).tolist() + else: + s = [j for i in ann["segmentation"] for j in i] # all segments concatenated + s = (np.array(s).reshape(-1, 2) / np.array([w, h])).reshape(-1).tolist() + s = [cls] + s segments.append(s) - if use_keypoints and ann.get('keypoints') is not None: - keypoints.append(box + (np.array(ann['keypoints']).reshape(-1, 3) / - np.array([w, h, 1])).reshape(-1).tolist()) + if use_keypoints and ann.get("keypoints") is not None: + keypoints.append( + box + (np.array(ann["keypoints"]).reshape(-1, 3) / np.array([w, h, 1])).reshape(-1).tolist() + ) # Write - with open((fn / f).with_suffix('.txt'), 'a') as file: + with open((fn / f).with_suffix(".txt"), "a") as file: for i in range(len(bboxes)): if use_keypoints: - line = *(keypoints[i]), # cls, box, keypoints + line = (*(keypoints[i]),) # cls, box, keypoints else: - line = *(segments[i] - if use_segments and len(segments[i]) > 0 else bboxes[i]), # cls, box or segments - file.write(('%g ' * len(line)).rstrip() % line + '\n') + line = ( + *(segments[i] if use_segments and len(segments[i]) > 0 else bboxes[i]), + ) # cls, box or segments + file.write(("%g " * len(line)).rstrip() % line + "\n") + + LOGGER.info(f"COCO data converted successfully.\nResults saved to {save_dir.resolve()}") def convert_dota_to_yolo_obb(dota_root_path: str): @@ -160,48 +336,52 @@ def convert_dota_to_yolo_obb(dota_root_path: str): Notes: The directory structure assumed for the DOTA dataset: - - DOTA - - images - - train - - val - - labels - - train_original - - val_original - After the function execution, the new labels will be saved in: - DOTA - - labels - - train - - val + ├─ images + │ ├─ train + │ └─ val + └─ labels + ├─ train_original + └─ val_original + + After execution, the function will organize the labels into: + + - DOTA + └─ labels + ├─ train + └─ val """ dota_root_path = Path(dota_root_path) # Class names to indices mapping class_mapping = { - 'plane': 0, - 'ship': 1, - 'storage-tank': 2, - 'baseball-diamond': 3, - 'tennis-court': 4, - 'basketball-court': 5, - 'ground-track-field': 6, - 'harbor': 7, - 'bridge': 8, - 'large-vehicle': 9, - 'small-vehicle': 10, - 'helicopter': 11, - 'roundabout': 12, - 'soccer ball-field': 13, - 'swimming-pool': 14, - 'container-crane': 15, - 'airport': 16, - 'helipad': 17} + "plane": 0, + "ship": 1, + "storage-tank": 2, + "baseball-diamond": 3, + "tennis-court": 4, + "basketball-court": 5, + "ground-track-field": 6, + "harbor": 7, + "bridge": 8, + "large-vehicle": 9, + "small-vehicle": 10, + "helicopter": 11, + "roundabout": 12, + "soccer-ball-field": 13, + "swimming-pool": 14, + "container-crane": 15, + "airport": 16, + "helipad": 17, + } def convert_label(image_name, image_width, image_height, orig_label_dir, save_dir): - orig_label_path = orig_label_dir / f'{image_name}.txt' - save_path = save_dir / f'{image_name}.txt' + """Converts a single image's DOTA annotation to YOLO OBB format and saves it to a specified directory.""" + orig_label_path = orig_label_dir / f"{image_name}.txt" + save_path = save_dir / f"{image_name}.txt" - with orig_label_path.open('r') as f, save_path.open('w') as g: + with orig_label_path.open("r") as f, save_path.open("w") as g: lines = f.readlines() for line in lines: parts = line.strip().split() @@ -211,20 +391,21 @@ def convert_dota_to_yolo_obb(dota_root_path: str): class_idx = class_mapping[class_name] coords = [float(p) for p in parts[:8]] normalized_coords = [ - coords[i] / image_width if i % 2 == 0 else coords[i] / image_height for i in range(8)] - formatted_coords = ['{:.6g}'.format(coord) for coord in normalized_coords] + coords[i] / image_width if i % 2 == 0 else coords[i] / image_height for i in range(8) + ] + formatted_coords = ["{:.6g}".format(coord) for coord in normalized_coords] g.write(f"{class_idx} {' '.join(formatted_coords)}\n") - for phase in ['train', 'val']: - image_dir = dota_root_path / 'images' / phase - orig_label_dir = dota_root_path / 'labels' / f'{phase}_original' - save_dir = dota_root_path / 'labels' / phase + for phase in ["train", "val"]: + image_dir = dota_root_path / "images" / phase + orig_label_dir = dota_root_path / "labels" / f"{phase}_original" + save_dir = dota_root_path / "labels" / phase save_dir.mkdir(parents=True, exist_ok=True) image_paths = list(image_dir.iterdir()) - for image_path in TQDM(image_paths, desc=f'Processing {phase} images'): - if image_path.suffix != '.png': + for image_path in TQDM(image_paths, desc=f"Processing {phase} images"): + if image_path.suffix != ".png": continue image_name_without_ext = image_path.stem img = cv2.imread(str(image_path)) @@ -237,8 +418,8 @@ def min_index(arr1, arr2): Find a pair of indexes with the shortest distance between two arrays of 2D points. Args: - arr1 (np.array): A NumPy array of shape (N, 2) representing N 2D points. - arr2 (np.array): A NumPy array of shape (M, 2) representing M 2D points. + arr1 (np.ndarray): A NumPy array of shape (N, 2) representing N 2D points. + arr2 (np.ndarray): A NumPy array of shape (M, 2) representing M 2D points. Returns: (tuple): A tuple containing the indexes of the points with the shortest distance in arr1 and arr2 respectively. @@ -263,31 +444,30 @@ def merge_multi_segment(segments): segments = [np.array(i).reshape(-1, 2) for i in segments] idx_list = [[] for _ in range(len(segments))] - # record the indexes with min distance between each segment + # Record the indexes with min distance between each segment for i in range(1, len(segments)): idx1, idx2 = min_index(segments[i - 1], segments[i]) idx_list[i - 1].append(idx1) idx_list[i].append(idx2) - # use two round to connect all the segments + # Use two round to connect all the segments for k in range(2): - # forward connection + # Forward connection if k == 0: for i, idx in enumerate(idx_list): - # middle segments have two indexes - # reverse the index of middle segments + # Middle segments have two indexes, reverse the index of middle segments if len(idx) == 2 and idx[0] > idx[1]: idx = idx[::-1] segments[i] = segments[i][::-1, :] segments[i] = np.roll(segments[i], -idx[0], axis=0) segments[i] = np.concatenate([segments[i], segments[i][:1]]) - # deal with the first segment and the last one + # Deal with the first segment and the last one if i in [0, len(idx_list) - 1]: s.append(segments[i]) else: idx = [0, idx[1] - idx[0]] - s.append(segments[i][idx[0]:idx[1] + 1]) + s.append(segments[i][idx[0] : idx[1] + 1]) else: for i in range(len(idx_list) - 1, -1, -1): @@ -296,3 +476,67 @@ def merge_multi_segment(segments): nidx = abs(idx[1] - idx[0]) s.append(segments[i][nidx:]) return s + + +def yolo_bbox2segment(im_dir, save_dir=None, sam_model="sam_b.pt"): + """ + Converts existing object detection dataset (bounding boxes) to segmentation dataset or oriented bounding box (OBB) + in YOLO format. Generates segmentation data using SAM auto-annotator as needed. + + Args: + im_dir (str | Path): Path to image directory to convert. + save_dir (str | Path): Path to save the generated labels, labels will be saved + into `labels-segment` in the same directory level of `im_dir` if save_dir is None. Default: None. + sam_model (str): Segmentation model to use for intermediate segmentation data; optional. + + Notes: + The input directory structure assumed for dataset: + + - im_dir + ├─ 001.jpg + ├─ .. + └─ NNN.jpg + - labels + ├─ 001.txt + ├─ .. + └─ NNN.txt + """ + from ultralytics.data import YOLODataset + from ultralytics.utils.ops import xywh2xyxy + from ultralytics.utils import LOGGER + from ultralytics import SAM + from tqdm import tqdm + + # NOTE: add placeholder to pass class index check + dataset = YOLODataset(im_dir, data=dict(names=list(range(1000)))) + if len(dataset.labels[0]["segments"]) > 0: # if it's segment data + LOGGER.info("Segmentation labels detected, no need to generate new ones!") + return + + LOGGER.info("Detection labels detected, generating segment labels by SAM model!") + sam_model = SAM(sam_model) + for l in tqdm(dataset.labels, total=len(dataset.labels), desc="Generating segment labels"): + h, w = l["shape"] + boxes = l["bboxes"] + if len(boxes) == 0: # skip empty labels + continue + boxes[:, [0, 2]] *= w + boxes[:, [1, 3]] *= h + im = cv2.imread(l["im_file"]) + sam_results = sam_model(im, bboxes=xywh2xyxy(boxes), verbose=False, save=False) + l["segments"] = sam_results[0].masks.xyn + + save_dir = Path(save_dir) if save_dir else Path(im_dir).parent / "labels-segment" + save_dir.mkdir(parents=True, exist_ok=True) + for l in dataset.labels: + texts = [] + lb_name = Path(l["im_file"]).with_suffix(".txt").name + txt_file = save_dir / lb_name + cls = l["cls"] + for i, s in enumerate(l["segments"]): + line = (int(cls[i]), *s.reshape(-1)) + texts.append(("%g " * len(line)).rstrip() % line) + if texts: + with open(txt_file, "a") as f: + f.writelines(text + "\n" for text in texts) + LOGGER.info(f"Generated segment labels saved in {save_dir}") diff --git a/ultralytics/data/dataset.py b/ultralytics/data/dataset.py index 65fe141..42b7cc1 100644 --- a/ultralytics/data/dataset.py +++ b/ultralytics/data/dataset.py @@ -8,15 +8,16 @@ import cv2 import numpy as np import torch import torchvision +from PIL import Image from ultralytics.utils import LOCAL_RANK, NUM_THREADS, TQDM, colorstr, is_dir_writeable - -from .augment import Compose, Format, Instances, LetterBox, classify_albumentations, classify_transforms, v8_transforms +from ultralytics.utils.ops import resample_segments +from .augment import Compose, Format, Instances, LetterBox, classify_augmentations, classify_transforms, v8_transforms from .base import BaseDataset from .utils import HELP_URL, LOGGER, get_hash, img2label_paths, verify_image, verify_image_label # Ultralytics dataset *.cache version, >= 1.0.0 for YOLOv8 -DATASET_CACHE_VERSION = '1.0.3' +DATASET_CACHE_VERSION = "1.0.3" class YOLODataset(BaseDataset): @@ -25,40 +26,54 @@ class YOLODataset(BaseDataset): Args: data (dict, optional): A dataset YAML dictionary. Defaults to None. - use_segments (bool, optional): If True, segmentation masks are used as labels. Defaults to False. - use_keypoints (bool, optional): If True, keypoints are used as labels. Defaults to False. + task (str): An explicit arg to point current task, Defaults to 'detect'. Returns: (torch.utils.data.Dataset): A PyTorch dataset object that can be used for training an object detection model. """ - def __init__(self, *args, data=None, use_segments=False, use_keypoints=False, **kwargs): - self.use_segments = use_segments - self.use_keypoints = use_keypoints + def __init__(self, *args, data=None, task="detect", **kwargs): + """Initializes the YOLODataset with optional configurations for segments and keypoints.""" + self.use_segments = task == "segment" + self.use_keypoints = task == "pose" + self.use_obb = task == "obb" self.data = data - assert not (self.use_segments and self.use_keypoints), 'Can not use both segments and keypoints.' + assert not (self.use_segments and self.use_keypoints), "Can not use both segments and keypoints." super().__init__(*args, **kwargs) - def cache_labels(self, path=Path('./labels.cache')): - """Cache dataset labels, check images and read shapes. + def cache_labels(self, path=Path("./labels.cache")): + """ + Cache dataset labels, check images and read shapes. + Args: - path (Path): path where to save the cache file (default: Path('./labels.cache')). + path (Path): Path where to save the cache file. Default is Path('./labels.cache'). + Returns: (dict): labels. """ - x = {'labels': []} + x = {"labels": []} nm, nf, ne, nc, msgs = 0, 0, 0, 0, [] # number missing, found, empty, corrupt, messages - desc = f'{self.prefix}Scanning {path.parent / path.stem}...' + desc = f"{self.prefix}Scanning {path.parent / path.stem}..." total = len(self.im_files) - nkpt, ndim = self.data.get('kpt_shape', (0, 0)) + nkpt, ndim = self.data.get("kpt_shape", (0, 0)) if self.use_keypoints and (nkpt <= 0 or ndim not in (2, 3)): - raise ValueError("'kpt_shape' in data.yaml missing or incorrect. Should be a list with [number of " - "keypoints, number of dims (2 for x,y or 3 for x,y,visible)], i.e. 'kpt_shape: [17, 3]'") + raise ValueError( + "'kpt_shape' in data.yaml missing or incorrect. Should be a list with [number of " + "keypoints, number of dims (2 for x,y or 3 for x,y,visible)], i.e. 'kpt_shape: [17, 3]'" + ) with ThreadPool(NUM_THREADS) as pool: - results = pool.imap(func=verify_image_label, - iterable=zip(self.im_files, self.label_files, repeat(self.prefix), - repeat(self.use_keypoints), repeat(len(self.data['names'])), repeat(nkpt), - repeat(ndim))) + results = pool.imap( + func=verify_image_label, + iterable=zip( + self.im_files, + self.label_files, + repeat(self.prefix), + repeat(self.use_keypoints), + repeat(len(self.data["names"])), + repeat(nkpt), + repeat(ndim), + ), + ) pbar = TQDM(results, desc=desc, total=total) for im_file, lb, shape, segments, keypoint, nm_f, nf_f, ne_f, nc_f, msg in pbar: nm += nm_f @@ -66,7 +81,7 @@ class YOLODataset(BaseDataset): ne += ne_f nc += nc_f if im_file: - x['labels'].append( + x["labels"].append( dict( im_file=im_file, shape=shape, @@ -75,60 +90,63 @@ class YOLODataset(BaseDataset): segments=segments, keypoints=keypoint, normalized=True, - bbox_format='xywh')) + bbox_format="xywh", + ) + ) if msg: msgs.append(msg) - pbar.desc = f'{desc} {nf} images, {nm + ne} backgrounds, {nc} corrupt' + pbar.desc = f"{desc} {nf} images, {nm + ne} backgrounds, {nc} corrupt" pbar.close() if msgs: - LOGGER.info('\n'.join(msgs)) + LOGGER.info("\n".join(msgs)) if nf == 0: - LOGGER.warning(f'{self.prefix}WARNING ⚠️ No labels found in {path}. {HELP_URL}') - x['hash'] = get_hash(self.label_files + self.im_files) - x['results'] = nf, nm, ne, nc, len(self.im_files) - x['msgs'] = msgs # warnings + LOGGER.warning(f"{self.prefix}WARNING ⚠️ No labels found in {path}. {HELP_URL}") + x["hash"] = get_hash(self.label_files + self.im_files) + x["results"] = nf, nm, ne, nc, len(self.im_files) + x["msgs"] = msgs # warnings save_dataset_cache_file(self.prefix, path, x) return x def get_labels(self): """Returns dictionary of labels for YOLO training.""" self.label_files = img2label_paths(self.im_files) - cache_path = Path(self.label_files[0]).parent.with_suffix('.cache') + cache_path = Path(self.label_files[0]).parent.with_suffix(".cache") try: cache, exists = load_dataset_cache_file(cache_path), True # attempt to load a *.cache file - assert cache['version'] == DATASET_CACHE_VERSION # matches current version - assert cache['hash'] == get_hash(self.label_files + self.im_files) # identical hash + assert cache["version"] == DATASET_CACHE_VERSION # matches current version + assert cache["hash"] == get_hash(self.label_files + self.im_files) # identical hash except (FileNotFoundError, AssertionError, AttributeError): cache, exists = self.cache_labels(cache_path), False # run cache ops # Display cache - nf, nm, ne, nc, n = cache.pop('results') # found, missing, empty, corrupt, total + nf, nm, ne, nc, n = cache.pop("results") # found, missing, empty, corrupt, total if exists and LOCAL_RANK in (-1, 0): - d = f'Scanning {cache_path}... {nf} images, {nm + ne} backgrounds, {nc} corrupt' + d = f"Scanning {cache_path}... {nf} images, {nm + ne} backgrounds, {nc} corrupt" TQDM(None, desc=self.prefix + d, total=n, initial=n) # display results - if cache['msgs']: - LOGGER.info('\n'.join(cache['msgs'])) # display warnings + if cache["msgs"]: + LOGGER.info("\n".join(cache["msgs"])) # display warnings # Read cache - [cache.pop(k) for k in ('hash', 'version', 'msgs')] # remove items - labels = cache['labels'] + [cache.pop(k) for k in ("hash", "version", "msgs")] # remove items + labels = cache["labels"] if not labels: - LOGGER.warning(f'WARNING ⚠️ No images found in {cache_path}, training may not work correctly. {HELP_URL}') - self.im_files = [lb['im_file'] for lb in labels] # update im_files + LOGGER.warning(f"WARNING ⚠️ No images found in {cache_path}, training may not work correctly. {HELP_URL}") + self.im_files = [lb["im_file"] for lb in labels] # update im_files # Check if the dataset is all boxes or all segments - lengths = ((len(lb['cls']), len(lb['bboxes']), len(lb['segments'])) for lb in labels) + lengths = ((len(lb["cls"]), len(lb["bboxes"]), len(lb["segments"])) for lb in labels) len_cls, len_boxes, len_segments = (sum(x) for x in zip(*lengths)) if len_segments and len_boxes != len_segments: LOGGER.warning( - f'WARNING ⚠️ Box and segment counts should be equal, but got len(segments) = {len_segments}, ' - f'len(boxes) = {len_boxes}. To resolve this only boxes will be used and all segments will be removed. ' - 'To avoid this please supply either a detect or segment dataset, not a detect-segment mixed dataset.') + f"WARNING ⚠️ Box and segment counts should be equal, but got len(segments) = {len_segments}, " + f"len(boxes) = {len_boxes}. To resolve this only boxes will be used and all segments will be removed. " + "To avoid this please supply either a detect or segment dataset, not a detect-segment mixed dataset." + ) for lb in labels: - lb['segments'] = [] + lb["segments"] = [] if len_cls == 0: - LOGGER.warning(f'WARNING ⚠️ No labels found in {cache_path}, training may not work correctly. {HELP_URL}') + LOGGER.warning(f"WARNING ⚠️ No labels found in {cache_path}, training may not work correctly. {HELP_URL}") return labels def build_transforms(self, hyp=None): @@ -140,13 +158,18 @@ class YOLODataset(BaseDataset): else: transforms = Compose([LetterBox(new_shape=(self.imgsz, self.imgsz), scaleup=False)]) transforms.append( - Format(bbox_format='xywh', - normalize=True, - return_mask=self.use_segments, - return_keypoint=self.use_keypoints, - batch_idx=True, - mask_ratio=hyp.mask_ratio, - mask_overlap=hyp.overlap_mask)) + Format( + bbox_format="xywh", + normalize=True, + return_mask=self.use_segments, + return_keypoint=self.use_keypoints, + return_obb=self.use_obb, + batch_idx=True, + mask_ratio=hyp.mask_ratio, + mask_overlap=hyp.overlap_mask, + bgr=hyp.bgr if self.augment else 0.0, # only affect training. + ) + ) return transforms def close_mosaic(self, hyp): @@ -157,15 +180,28 @@ class YOLODataset(BaseDataset): self.transforms = self.build_transforms(hyp) def update_labels_info(self, label): - """custom your label format here.""" - # NOTE: cls is not with bboxes now, classification and semantic segmentation need an independent cls label - # we can make it also support classification and semantic segmentation by add or remove some dict keys there. - bboxes = label.pop('bboxes') - segments = label.pop('segments') - keypoints = label.pop('keypoints', None) - bbox_format = label.pop('bbox_format') - normalized = label.pop('normalized') - label['instances'] = Instances(bboxes, segments, keypoints, bbox_format=bbox_format, normalized=normalized) + """ + Custom your label format here. + + Note: + cls is not with bboxes now, classification and semantic segmentation need an independent cls label + Can also support classification and semantic segmentation by adding or removing dict keys there. + """ + bboxes = label.pop("bboxes") + segments = label.pop("segments", []) + keypoints = label.pop("keypoints", None) + bbox_format = label.pop("bbox_format") + normalized = label.pop("normalized") + + # NOTE: do NOT resample oriented boxes + segment_resamples = 100 if self.use_obb else 1000 + if len(segments) > 0: + # list[np.array(1000, 2)] * num_samples + # (N, 1000, 2) + segments = np.stack(resample_segments(segments, n=segment_resamples), axis=0) + else: + segments = np.zeros((0, segment_resamples, 2), dtype=np.float32) + label["instances"] = Instances(bboxes, segments, keypoints, bbox_format=bbox_format, normalized=normalized) return label @staticmethod @@ -176,65 +212,75 @@ class YOLODataset(BaseDataset): values = list(zip(*[list(b.values()) for b in batch])) for i, k in enumerate(keys): value = values[i] - if k == 'img': + if k == "img": value = torch.stack(value, 0) - if k in ['masks', 'keypoints', 'bboxes', 'cls']: + if k in ["masks", "keypoints", "bboxes", "cls", "segments", "obb"]: value = torch.cat(value, 0) new_batch[k] = value - new_batch['batch_idx'] = list(new_batch['batch_idx']) - for i in range(len(new_batch['batch_idx'])): - new_batch['batch_idx'][i] += i # add target image index for build_targets() - new_batch['batch_idx'] = torch.cat(new_batch['batch_idx'], 0) + new_batch["batch_idx"] = list(new_batch["batch_idx"]) + for i in range(len(new_batch["batch_idx"])): + new_batch["batch_idx"][i] += i # add target image index for build_targets() + new_batch["batch_idx"] = torch.cat(new_batch["batch_idx"], 0) return new_batch # Classification dataloaders ------------------------------------------------------------------------------------------- class ClassificationDataset(torchvision.datasets.ImageFolder): """ - YOLO Classification Dataset. + Extends torchvision ImageFolder to support YOLO classification tasks, offering functionalities like image + augmentation, caching, and verification. It's designed to efficiently handle large datasets for training deep + learning models, with optional image transformations and caching mechanisms to speed up training. - Args: - root (str): Dataset path. + This class allows for augmentations using both torchvision and Albumentations libraries, and supports caching images + in RAM or on disk to reduce IO overhead during training. Additionally, it implements a robust verification process + to ensure data integrity and consistency. Attributes: - cache_ram (bool): True if images should be cached in RAM, False otherwise. - cache_disk (bool): True if images should be cached on disk, False otherwise. - samples (list): List of samples containing file, index, npy, and im. - torch_transforms (callable): torchvision transforms applied to the dataset. - album_transforms (callable, optional): Albumentations transforms applied to the dataset if augment is True. + cache_ram (bool): Indicates if caching in RAM is enabled. + cache_disk (bool): Indicates if caching on disk is enabled. + samples (list): A list of tuples, each containing the path to an image, its class index, path to its .npy cache + file (if caching on disk), and optionally the loaded image array (if caching in RAM). + torch_transforms (callable): PyTorch transforms to be applied to the images. """ - def __init__(self, root, args, augment=False, cache=False, prefix=''): + def __init__(self, root, args, augment=False, prefix=""): """ Initialize YOLO object with root, image size, augmentations, and cache settings. Args: - root (str): Dataset path. - args (Namespace): Argument parser containing dataset related settings. - augment (bool, optional): True if dataset should be augmented, False otherwise. Defaults to False. - cache (bool | str | optional): Cache setting, can be True, False, 'ram' or 'disk'. Defaults to False. + root (str): Path to the dataset directory where images are stored in a class-specific folder structure. + args (Namespace): Configuration containing dataset-related settings such as image size, augmentation + parameters, and cache settings. It includes attributes like `imgsz` (image size), `fraction` (fraction + of data to use), `scale`, `fliplr`, `flipud`, `cache` (disk or RAM caching for faster training), + `auto_augment`, `hsv_h`, `hsv_s`, `hsv_v`, and `crop_fraction`. + augment (bool, optional): Whether to apply augmentations to the dataset. Default is False. + prefix (str, optional): Prefix for logging and cache filenames, aiding in dataset identification and + debugging. Default is an empty string. """ super().__init__(root=root) if augment and args.fraction < 1.0: # reduce training fraction - self.samples = self.samples[:round(len(self.samples) * args.fraction)] - self.prefix = colorstr(f'{prefix}: ') if prefix else '' - self.cache_ram = cache is True or cache == 'ram' - self.cache_disk = cache == 'disk' + self.samples = self.samples[: round(len(self.samples) * args.fraction)] + self.prefix = colorstr(f"{prefix}: ") if prefix else "" + self.cache_ram = args.cache is True or args.cache == "ram" # cache images into RAM + self.cache_disk = args.cache == "disk" # cache images on hard drive as uncompressed *.npy files self.samples = self.verify_images() # filter out bad images - self.samples = [list(x) + [Path(x[0]).with_suffix('.npy'), None] for x in self.samples] # file, index, npy, im - self.torch_transforms = classify_transforms(args.imgsz) - self.album_transforms = classify_albumentations( - augment=augment, - size=args.imgsz, - scale=(1.0 - args.scale, 1.0), # (0.08, 1.0) - hflip=args.fliplr, - vflip=args.flipud, - hsv_h=args.hsv_h, # HSV-Hue augmentation (fraction) - hsv_s=args.hsv_s, # HSV-Saturation augmentation (fraction) - hsv_v=args.hsv_v, # HSV-Value augmentation (fraction) - mean=(0.0, 0.0, 0.0), # IMAGENET_MEAN - std=(1.0, 1.0, 1.0), # IMAGENET_STD - auto_aug=False) if augment else None + self.samples = [list(x) + [Path(x[0]).with_suffix(".npy"), None] for x in self.samples] # file, index, npy, im + scale = (1.0 - args.scale, 1.0) # (0.08, 1.0) + self.torch_transforms = ( + classify_augmentations( + size=args.imgsz, + scale=scale, + hflip=args.fliplr, + vflip=args.flipud, + erasing=args.erasing, + auto_augment=args.auto_augment, + hsv_h=args.hsv_h, + hsv_s=args.hsv_s, + hsv_v=args.hsv_v, + ) + if augment + else classify_transforms(size=args.imgsz, crop_fraction=args.crop_fraction) + ) def __getitem__(self, i): """Returns subset of data and targets corresponding to given indices.""" @@ -247,30 +293,30 @@ class ClassificationDataset(torchvision.datasets.ImageFolder): im = np.load(fn) else: # read image im = cv2.imread(f) # BGR - if self.album_transforms: - sample = self.album_transforms(image=cv2.cvtColor(im, cv2.COLOR_BGR2RGB))['image'] - else: - sample = self.torch_transforms(im) - return {'img': sample, 'cls': j} + # Convert NumPy array to PIL image + im = Image.fromarray(cv2.cvtColor(im, cv2.COLOR_BGR2RGB)) + sample = self.torch_transforms(im) + return {"img": sample, "cls": j} def __len__(self) -> int: + """Return the total number of samples in the dataset.""" return len(self.samples) def verify_images(self): """Verify all images in dataset.""" - desc = f'{self.prefix}Scanning {self.root}...' - path = Path(self.root).with_suffix('.cache') # *.cache file path + desc = f"{self.prefix}Scanning {self.root}..." + path = Path(self.root).with_suffix(".cache") # *.cache file path with contextlib.suppress(FileNotFoundError, AssertionError, AttributeError): cache = load_dataset_cache_file(path) # attempt to load a *.cache file - assert cache['version'] == DATASET_CACHE_VERSION # matches current version - assert cache['hash'] == get_hash([x[0] for x in self.samples]) # identical hash - nf, nc, n, samples = cache.pop('results') # found, missing, empty, corrupt, total + assert cache["version"] == DATASET_CACHE_VERSION # matches current version + assert cache["hash"] == get_hash([x[0] for x in self.samples]) # identical hash + nf, nc, n, samples = cache.pop("results") # found, missing, empty, corrupt, total if LOCAL_RANK in (-1, 0): - d = f'{desc} {nf} images, {nc} corrupt' + d = f"{desc} {nf} images, {nc} corrupt" TQDM(None, desc=d, total=n, initial=n) - if cache['msgs']: - LOGGER.info('\n'.join(cache['msgs'])) # display warnings + if cache["msgs"]: + LOGGER.info("\n".join(cache["msgs"])) # display warnings return samples # Run scan if *.cache retrieval failed @@ -285,13 +331,13 @@ class ClassificationDataset(torchvision.datasets.ImageFolder): msgs.append(msg) nf += nf_f nc += nc_f - pbar.desc = f'{desc} {nf} images, {nc} corrupt' + pbar.desc = f"{desc} {nf} images, {nc} corrupt" pbar.close() if msgs: - LOGGER.info('\n'.join(msgs)) - x['hash'] = get_hash([x[0] for x in self.samples]) - x['results'] = nf, nc, len(samples), samples - x['msgs'] = msgs # warnings + LOGGER.info("\n".join(msgs)) + x["hash"] = get_hash([x[0] for x in self.samples]) + x["results"] = nf, nc, len(samples), samples + x["msgs"] = msgs # warnings save_dataset_cache_file(self.prefix, path, x) return samples @@ -299,6 +345,7 @@ class ClassificationDataset(torchvision.datasets.ImageFolder): def load_dataset_cache_file(path): """Load an Ultralytics *.cache dictionary from path.""" import gc + gc.disable() # reduce pickle load time https://github.com/ultralytics/ultralytics/pull/1585 cache = np.load(str(path), allow_pickle=True).item() # load dict gc.enable() @@ -307,19 +354,29 @@ def load_dataset_cache_file(path): def save_dataset_cache_file(prefix, path, x): """Save an Ultralytics dataset *.cache dictionary x to path.""" - x['version'] = DATASET_CACHE_VERSION # add cache version + x["version"] = DATASET_CACHE_VERSION # add cache version if is_dir_writeable(path.parent): if path.exists(): path.unlink() # remove *.cache file if exists np.save(str(path), x) # save cache for next time - path.with_suffix('.cache.npy').rename(path) # remove .npy suffix - LOGGER.info(f'{prefix}New cache created: {path}') + path.with_suffix(".cache.npy").rename(path) # remove .npy suffix + LOGGER.info(f"{prefix}New cache created: {path}") else: - LOGGER.warning(f'{prefix}WARNING ⚠️ Cache directory {path.parent} is not writeable, cache not saved.') + LOGGER.warning(f"{prefix}WARNING ⚠️ Cache directory {path.parent} is not writeable, cache not saved.") # TODO: support semantic segmentation class SemanticDataset(BaseDataset): + """ + Semantic Segmentation Dataset. + + This class is responsible for handling datasets used for semantic segmentation tasks. It inherits functionalities + from the BaseDataset class. + + Note: + This class is currently a placeholder and needs to be populated with methods and attributes for supporting + semantic segmentation tasks. + """ def __init__(self): """Initialize a SemanticDataset object.""" diff --git a/ultralytics/data/explorer/__init__.py b/ultralytics/data/explorer/__init__.py new file mode 100644 index 0000000..ce594dc --- /dev/null +++ b/ultralytics/data/explorer/__init__.py @@ -0,0 +1,5 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license + +from .utils import plot_query_result + +__all__ = ["plot_query_result"] diff --git a/ultralytics/data/explorer/__pycache__/__init__.cpython-312.pyc b/ultralytics/data/explorer/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..31a7d1849ce2941b9e2eaf03f7c170aafbf820ee GIT binary patch literal 240 zcmX@j%ge<81Q+5Tr56I}#~=<2FhLog#ej_I3@HpLj5!Rsj8Tk?3@J?Mj8ROL%$h7O z8G(|TjJE^}a`H>!3rkarD&vb%i%WA#ikN{iewxg;SW8PXbBe*-TkP@ii8(p(@hcfV zgG~Qrub+{ho2p-4n696anp%*TTB2W>pOar^XrK==A~C13Br~~KKP9mwQ9rc;Xh%_M zk$!x9W?p7Ve7s&k!ehxUVR7@@4;Va=S4ydsSFrNM%7HhJ zVVD__By)mT-=AfN|22w#Fce2Tw?dLcQbQ`5Q%WQ&Y0hMHCh6o&(M;*EY)#{#3(EFj zl?%q(F@H*d@m@H zSjur6cMRIjima0gT03Jp$&9H=J7H$qM&6_i)6BF=+vxxSv%uWQjkoKaX=XYtWqK!R zx_|Wh&V2xcAlg|w*=!;4+;h+Sp2zq7zVCeJ{O5`a3kAQl@4t_H={QCG4ZbKpz3f@F z&=hrzVkwpmQDNFk)9BZPG-0h*OZ?i9F0A+J!v?P*Z1ft#Ca;Nv>q2I)8Pe)P7O#~& zZC)EZ^`VMzrMHr#F@)^lDsMIM8$*t;)9WODQ>Z3fP`lQxTqf-kA*lM_~M*1O*{&?6w!hy#Sn~H^^aqyXs_~ZVg zyg!V=y7&uh7^9Cy`LG|Ot>;gjJcUUF4qvMSqg>#UkLBV%Rt?l)8mQTD^3<_oeP_fC z$KxFD9|~~;!7$X{4`mB6e}EGWB)1^i&kppR@f|+ZfAsh<-;v{Ieg&gTx%Fbr2$U-X z!@gjID93Y zPS5H%183w+V~*m&BtL}fIsLGv0bYy!!&)hXHR4l>AJ(y^S9D$rYh}%xby&k%UeSAP zNotbzRt!*Xn`rNwgiqu-Uha7uI~{g7zX=Kv4Llbg=OY2^HvHiemx>+!R=dFY27Qf+ zE3wp!ZkqamrYN_jpOom#_^)b2b~{``+P7QI)Muf zOEv2J{D>eLE=`c9+2;#{{DR=~30N%yzn6Zyzh^WW=6WVx80ld-E*9b9JyX$8^zx4F zJupDLKQt8&2815`iXKk>Jk{fgP4O*IRHDA*%RNea>LFHef%BV}sJo_$8Dqv&pK4E= zcFzxjvv4kJ+Pw;~lt>5N+W!9jfV^HXjC2mJsntJ&PPs-+Q)47}tusyI2tizNPC1Dh z2i;iulj5%Eu4u1lrZq3UFs&OimZqVon{p6e7dI=hW6C^K0^m8VyD1Oc6-}fgUZF&d zRVu#1b6Q(S3pH29tCcubGp&7#`ld#Sf0>@vl$*6M-?c940(|im{T0KsW?DaO7>2KS zaN1$*z^Cw|F0uDy)Xxgc4kn0{iW!Dg&B&C7Ayk3s40bXT z!T4x&JkIzdQw$IQSmaDJCV2W2RfNDX{n0pcG&&w(nP1^x!iv%c!-8l6(lQ(jae`=$ z#HM7gftv(cDTta#Ow@&-c2OI+yi+s;!#oEs)kh%%lt+r|c4->qwGM9!zTT){10r z-d=rmYG&$8*#duEXh?!+FT@?A)`dq0|er7iNwo50RuPP3q9>ZF-dN#4py4PQ}(NOI!$K zSzZQDF~veM{D6f>jg?t~tah3nGe99FKL|CI1i|Yq-A#Fc!j#h|_8f+R1}@3~k%|bz z!1)9Q+`_cOnJB1pj?A}UWW*!tCZ{H+jt;twqIGB}I_bl#{y5Ane1>3BidKnG`@(+V zl4zG-E^$+_XfOf^31jjF*-6oYDSW&?9*l}M;*VbD_>e#5*7HaLMBBIk?2j7>a}k)U z75JuGl3P?>4kR``&#H8qKz=$w7zj=kqZ|V7%qV*5W zA6dc2ZT9c*v0|vu_ofc7Q1F=VTj*HSeBbgt%k93Uwk2Vy>-|%A&ZY;>rJsE+J@DL5 zed+BNGTqOoeQcT=&-gB2!d#cl?`i6z^|7$~^pr=*8x*?3ZxjY=Bl9xw^5fC6&V~q^yUO8_bmSk9D&RUr1|R zP)ap?RWidri}H4EDaEvQ#p(EWRE!Fs)6VK+is8rUuDCeFn+!5@yek*k3@ z&noJEiERV7nQaG` znKHSziYA2sC2WCcAi87cBc3ULIFzsu`1nY~6N`%`PDYFp9MuEi;6kEyoDcF%Fi-je z#Y_SHgsmh9+A9Fza>x!@&4`MVKt|KDR7i70S02R`5Z5&2Q|pYw5|w__w1T+CY)V++ zIfeke@<#$3JGAfd?VjD9U5N^IJaCB}+6%9b zdUo>2nMB$LOo2#!?4-MjcVd`Bk~4Ykh4CQINlecumn9kl(MW^~#6^qj0qK^$b!H*F^I>W$MPItLZ`pM8 z7pA%oT`lkQT<=+E%ep*C-5d71uBKG{t!@mT&uV^WA#j>gC69ZM}N_D*X93-~nYwF=)&byjO z*4hRVSPze{RY)jJ4=@H~b-fS^1>*r+N6JryYkvu$50aw3Qa?=#b#XOcS-7UoG!Q~* zn8oFw8&wXvjj8ieX;eY7x=iuUK;36QrEZv|nbt+%Z-hoV zT1+G2&4ql^TGq^(q4yXitneJe^;Yt`Wx0vk2KZgkPwQDLX^G(h`4Re+{H6gCVIJ0e zt3qk1RLU3%WFWkP#S{W zk{`(DIDeUvV0aw??culRTh%vZuB6ljGuc!;2d8Ma!~gfd7%fB=1xFHpn3G7Uj~rqM z?PGwl#{HlPii|MvQI6rlLmbP(o4`beNeB^K1(3K zSSC2k1mkL*{tyb+Q;Z~gdzjOpO+uk7z&$?5k3$hr2E~~PJ{X4=Yyb-Bq&27r^jOrA z4uWE;SAD~8BE>LWL(yo+-OD^P%0Yhq(@JDO;EP1#9_9!)><4`nNP|)4s6Qk?T}nsc zlTbybONjGO_5hMdr9%`g1>}PdURzKn$XmfkXHm2JCjF=zUEA`D7ca)9K(i59Qzbwj z=5bX@>vESydICg#^%cfQ(w3H_gu>OF{fR?^*pE!>#j#vK(tr4#bt zWq&Y4z7^!<()r5xd5$>>s!q@hjrd}qK$pOP(LYWkZBZZa0}=tfIEd#GS~1Exq#K}o zhZ#B)6*$qX&eab%;@+XwW>3h-M<%Lk+B0At|g_pQJ@HNBN-S<&Qu|fTGvN zO$I{aED`VpcRe3~U>-GlqAnPL$$b&upT~DA=?Ma71kn_cc@~pMu;!dUG|myd z6_3~qR9v_-pu^4(EkPJ;A&v{5AHpm|Kqi>z5~kIYDaG$bzfGQHq)Hyyh^Y0mtXS!r z3UZ)PAi4}raGNEl#9?v6n97U`1C+|pa;!r%$_qu*lPse3GC;W~4+t>9k#7|cXQ*pV zQkHFaz#K1VFCMfC`k^r4FM!^?M6EPYwyLYWGrehR%Z-k^HpkT`XP!*gUs$$1pSL;k zR{PccGy7BQJJIXWT+^eOrbn|)k7cZn{kTs~n%WF%x8`@7)69uq)HBYw^hUPkrxHw&6(f$Qvi}8#lq@DY@iFXC6&kH{Nw@Om8}sb(~JyPJdL> zG0(mid^eb_*`74#tyL*=#@YfY@{amcZOWK#_AOrojY1^H2^mhva^u;Hm(#w9bk*dl zR^zN(p)?hhD<-P0IW?ZC?MPbgwryJ&`(gWX+x|K4(%904q$TfaN0fKh(X!Bc`}m#l z^aX!%H>kJDq;|Or~xiY0X#FT(i&Fb5&b2Ra+MZzw5i*NG3RYnIa_na)|{`}kgsaM%$7Nexaq=*nW|t`DSbudCt9P~ zvf`lX8Uc{X3X@l01o^L`GXYLO#sJ`@D8y22@V9ya2KY&eUZX@T;J~<=ry!r*gdF%& zo=2$XlZY(hDn0{@L8HW_q$pPkDrbPZ>PS)GOw-duOa@)L4oU<~_!Z-{p&V+RHnJK8 zh0xR+bvmk`>z@Wqyb=c`Y0JrkfMz7oTX##Zq-70=Z^qPJ0Hsw0j69|y%lqS?MB`rk z2Ez#bFX(GDwCXh-HKqA;?F8*MB@P|qBCxH8ic)#4Bj>}#Othsc_t^@(8tE6Z5h2C} zg2O=||Wy z5HyS=lH32lou`kgYroFDeR$sZ*2!$0`>(3IlLzl^Y?*JF_rKfm*2!DD7A9`)&1~F5 z7Jf}*>fqdoWZy4pTW0BeRqZwNoH^B!8vJ=xD~M)wtsm92&Ofx^&erTs_T`(}-nn%B z66hJtneN`DzSN~`(}_DfGEGnAng%mXgW0BYxu)kcP0weWe96Aqu9;JLXZ>vF>rdUY zQ;seFM?eAUqO6&xaoe3>asma&;s@;tfGl3xv=cnc!9akE z0fzQ7h$aDNO1pW0vHc7#R-_z~08s>q??>`DlL^*FbWRp|FQC%+D8fP%3d)p<7>;0f zHC|W?;K{mRx?o!Y0DS*A$Td8+20)pD`lW4}qK2IzyX_@XMptYKHX))Doj{~EKd59H97mW1doul!1Oz7z7a$#^I-fdm^_5N1LgX#Nyo0w;$&dxN|H zq72kAqDmhyYvVk!uQl^k3WLdg3}g+$$q4syS4#l>PhjQnFM=a#AtgVJK3#kYY$^yJ zLYBq9j)8gryTG<|Va^Zy4bXSs66`p~#zlQ(JRF+>`GNC80{)+4ViOECnN@C`#NOtx z91>+999Iw!d*IkhKv_@vGf}B>GxGj5NF)3=Z~z2vRYCA30!Ih3j={8T@UFFXmVNv1 zvXuc9;q1f7BX{i$srK*amhByB6PI>ByL|rn^p}RR=L6|y*>oqDg+O~pzPk3BYtEIc zZpl=)%(rK&JClZw`kuH`_Y>F|8~pdauaoGv`^d(yA-l0>u{vA5D_6ZIQ@tl!{b;Uw zf2Ml>a`nNq_hQ;Vl<@{~-jR%VB`cba%-3b@T{*ia zWA`kKXYCK=?E5nIeOdc~wCRAf!S)V(r@HfsBd|t^iQPrG;12-+f)W#KVlGpH1vwL{ z$iQR*65#J;FpU7SF^W)JAb_Pm;FKy)Wc3P2B;2b^>h(-QKuJcAsS5c*Jq6{T(ENZD zs!B&aUG_3Vf7tGtL zY5jWO6bwr=tW8b14pbeWzzD9w|8y~5Ix`Lgm>GEbV1CGx zBZkUSoJC*@0=r|%ROgv7RG(pl)JI^WM>LU=Vk}WtzCROoSUE(|OFE{P;lBk<<-ZM% z+X7M+*|00(ao@xbv5@8pXop2qQ~V@`YN3-!Zc)PyyA2Y@LUPQ{qw^j*0dPbOFA$MM zS}_vmGOHt$aH!l%4f}J*CK!NZ0|)D-sv1`)ZL{Sbr9}f1t@#`cI*iIE=sVV>D{1T5 zRU-+)*i{o{uKN&FJ4EfXhU1~yqZTy~xvI8IRa>^I12h%(nw-5kV{e`}E!*9T9l4!{ zGCL0~?OERWB+<;gxB|u`nm!FYzDys*NA)pHkvhge*{XBa#*DQwb!^$%p0C@waCYHX z8b!Rjrpl|<8S89xo|}IrU9)Z3vyRkUN+Lvs(kxhL5VvSVe3v{h%2RzZ3G_OGDF$R?ev=zv%CH%+tjt%8a{ zTaft*G6Tp?NVDq-e5KsB8LMs&!B(EWVA_aVMYjx!yaXFY>vENX(IdzkM*nZ1$?8fz z4Ys~TG?Q#VtWW3Fte=a5%M_-}J_1@FlNQNIV4>pHL4jtLr^Kg^pY$9j_Sd}(d6sUn zV7W!5{Na)L?}zf@XBF(SKaKnn`f-DnCG z$CBE+_8Q#DPYGFjTiVnn!C7nyeJ<`i_gHJTdA9EkdvKD<;qDIA8 zMWLTSegPF3K+AUj4oC~OqU_FG<>pM~=6PY^aJF)LQkw@-*ge;s8d%WJc4r-)r13)) zxi63Xl=rGC;oQW|pn0jY+X`F?zZYAlpbizw6Q zRha&Mh@B+9{SM%&Sh)VoI^6q6rP_7>R3Vx&O(Y^Ep%6*={>4!#i4=Vo)((+f`9<*c z^WOs(fYuK%_;qw}n~}(U1)#gJkS2sNgPScNbJgMn@2foZLXEXuLq~_8Em<(`)0iQxugr=zik!Fpg7gQV3m+2WAQUcZFD~nQ@ zu*Zr|xIy_X1slm3O~KZ&{21MoQE>vWtKyH;eAZmAY2vuqPJS~Q)~ul9*EHp8TW?#i z+~yq%r$a|Hw`@ww%5C8(qg$|te9a723l?nHYo-T`V2j-7q6gS;wkfYlnQX3Qr9MR*v$k(sED{8)hh&e+ zV3Al9ihtN23XF$PNsID=OaO!NDTyH>=Jtc7A;!tPQZ@mTmA+4KJhxSfCK#RACCcWF zB$${mkP}&`Rr41aDS~cIp$W)P10gW>B9??|WeUhwtslVT0ZuwIemI-Rj07)p5vi5y zGHtbvE@JHfdx;Pd43CGIlRIHEPcqN~bz^*jgYvL$wK}rZ2_GYXwH&`zL%iLybFFAV zh#I5TaT=_xK!J#-EM=uM`99DfuIUgkz>|w-CPtz909e`KrX^_8(X10R*#1~7GzFWD zU~eEL0cAMq6V(A!2D0cWS$r_kR#&0jup=9VExt85e^+zP`zJ^l%T6>hEgF;ZXh72H zepV5tv|bO>2j(dZ?g$5>JP0_kXar8SVu!(`Kt0e_@UajX8V^T=HQ!v+Q>r%@Ay$Dn zpWvJVQG+R9K%Vzl8qAI$kLnWG_KL&&Q|C`HZv4Y*mY}65!zj9V39-N$mQMPwW2q|X z&5b_LGjoDZ)cbH2C;A7}5@4;sX6@wXGC>`Y$k57u*#qW4oziMi3{sWFx81Xy4B8IQ zc4;Fx(V?zhB`q8o0b&JoViX2iPA2Ua&peOv)H!Hs2uaA%-NJDSfX79L5P~sms8=E+j^XVu$C7sMt|SCexG>unyg+PC4nfo!)n33h8bVz;JG94pwZ z`HK_~xOGv&OvtjrMWoPN=^U`Mx;%*r0uIqwmf8C_L&gzy<%!^p7HwcFjHFStk-07( zH17v{STt(mUqk10ba0sYx6vWu`upf3+Q_Tm1Aq-mcS(Mi4h9!UJ--!`{yTJ9(fJWL z0?reLc?o~gHi}}8E3df34i!#A2x>FbRp;3oiUz>KeV<1*;gz#+Fe<{EQ0{LelegN2 zw>!S0oj>tyd)Br6uWEOIF`KLDo$l-1IoFnqYs&&T=bd%+=3ECdt^-T8S=X_g>*8xuY=Q^Kpoljo~rpNenP)K{@S=ab1TAVU1`L-->JK&2?wX=aDL;0I4;?g&=BLmp z$R}vU$&-#gg+3exQ48yqm{*WfWyD7SgGdxC#7>5P2~*dh^A*fuCLM$z0!$Uj5Rk#Z zao{;j;E;PzzO96uJTnagH5dotC1odd@c#+3{1hD=Q^5kx#fujSsXuT)8po>A;Yu_- za3mkY5&J4Qe-F~p1MEBUOnZ)bIKw=gZr-yvaeMOi*d6^H?#?smO=okPp37`{F1zXZ zw8@pXZjy|gb1i!_Eqk*q`!d#jKOR?wsnKOy5VY3U_RsClIk#ot3INZ6tn*;b`DDiV zWY+mq+V<36gDG>{eC@Jp+d>;SXk^lSbZLBPB)zFW>o}FRoyuFCvw_$4k(eV(&n}%% zjiUd0`$J#X&h~+YrS3h;yOy_3Woou8S8e^_uG{e+CEic`V1H77x5_!&m)h}W1?<*A zLwe_WbI#2f=jN=lE9cyiaqhVN$kO?A@2RwNN7i{d=k#Wr-mLRN(v){LrjA2G@YQZg zjb>^)lNP*E;nduzT+Q}O&Gz(;!^3eb<_lWrWAV{#+rCLj127))yPWTv;_i20^YuX>7T}scg{^ zyP}j1B~Rg5EQ}VVtSO{Wd)>bi_PB1VRA;Q09(579r@5bJu!JuNHGkcG8DxLs`%$)*!05r$pK)liLFp zJ`-JTrj5BAs$pu#+A!`C*>@-EU{Qw$)0E>GYY`KF2Xhi!C4pz7EZvHRq4D8iuD@A8neBy>_jgU!tLWdeY-&V5oPkO^+LuiocdP5)Yyi6e2t$*gqw^2_+~m&720{nl zsDxswYS9$nZT_9f>yx+i-?iMZEIyp=>diJkmbLFqoAydLqZk{YVwvEKD)?5w8T3mP za4DFzaQ#K`d5&rND-ipY;#gID#LL3Ia;Bhaoz}#$Z1Pk1BpmV9z!7f^ocaJ!u^jd& zCsryvxtur&wx8M&8t!Joi3z{;gi7h?6WFS%-#<;4Z6gh43UD$7xTn59(bgv!SK=la z?x%r1LXp{(1E-!u*Qp3-vp@@lTCw#~5&@9^3A7Z1Hr4u@7=BBF$$x<;JUM!aOe0DdAh3fOp<+XV0PxT%@gs7~TC`@i49Dg39945l#Hnc0(%rFNwrBR1TJvW~-P z+u=NHf9y{QsV|ejW3VNXwmm@t?>4l}AHG$y+~ApOTO3&IOCFZDW^;~?jH4s#=*&5` zW*l1=25vukdrR7}HS0K$W6?wCUl+w#8a_ zNE1;WG-w@#C`FoxOW-NK)~g`VU(uNHyu3PmkWl^=j-`O%HQWi;MimCOo#Jbi7J`4+@u{UdM;&^UoQ7UWFj7z^r&!5Uh!4lSzd53l@#Th-t` z7MUgEJ!k%4SLXn*Sc$VrSF6F5f07k1(XWBtJ31n)bIv1i90p-TK9R`ECb05B@XK)d zTH(k?98N3wN8tFqWLyh6pdfM0olo&wHJHx+`f|(dfNsUqE|_YL&nT#&BX)8UO7{HyrB z5gj}^FWr)ao8SD)==7j-8XclZL*;{{ytJb47CLBf$^UC~h^fH8Mc*6f5TyCH=tEw| z|4(#Q(D@i0(tVxiLzKr4D9#@62tR=d@CtIh*6sOPFpzGY2dz%eJ#&rT^UC(Jko)Y^ysBbAhe2cEA`Zu=!^6{ZIk{Wov&#~wWauX64w(8yBD=L z9$jQ_@BC5k`@NZmkEUHmGd0Inq;$`CCC`Uu+o~ROt{kE%&%^h$TlJ0k+WHmp?Cd7b z56zXyDJV9cwzp->?W=l7bMFGZLEkk?|?{LHY zo%zPLd)9V+=Z6)J*R`iezx(s@8 zS4x)}xNe@^m#Ju7F=DU@d(w0 zr2g1duXlXB+o-Qtbr|##F(Omj-3@odMrFYXWyMf%NV>wz07pz<^OPq`i!3~(X3~P) zfxfrUA?xrO_(UxT=#r5$+#woA*$0b4jWREygS3GstJ8+b>(SI%zKhv|G|eMlbs}9e zdVjP`_xMW8U3XvZaX3;XNH>dYDvl%<^a&ok!p}qegp!EW#c8CxL`{$SKXUUMAy1-S ziXy6aROm?-MubU|3jhdxA|zrh*$_>8p)$BqbRSOxIfeg}a2}i$EltyZXQ1c}A5itb zq`I?I_uo*KUr>%;P__@K+7GDq52%gc`CDqs2h{ctsNEk>o?lYuveda>QjwoikxvYp ZXv^%SPbhf&`T*@W&=nt3=o0Yz{{T^i8Ych% literal 0 HcmV?d00001 diff --git a/ultralytics/data/explorer/__pycache__/explorer.cpython-39.pyc b/ultralytics/data/explorer/__pycache__/explorer.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c0d300d4780a22f3c6e204d3fe2a14e9962441ae GIT binary patch literal 16904 zcmds8TaX;rS?=rHcJ`{(YITX+mM!m&kw=o_#Ev34mL;vCM66^8bEcnZJZO(c%u zsJ>EFOR7qF&DW}WN$0%o8&$JpR#T-^HC;+qGo=id8-BKwMP0+sl?M1fU&`a(^ara$ zr6I19@`tMX>k{SQ=s$()}nOa)$jw)dxxsR39uoSUp@iTz#nYP<5&_ zr7GtYXT%wONpVKq$ql_Ujq5RI7p`}0Xr+QUU;Q31zFHx1irOVOEqFY%mJ8oEZl0t*(VC2-rOXts@yAtiW5W2#i^WCdn z730sM@1S8<+$hDZgJ}3uSI=E3pSd(UbK!jX?1d|@t27%k8ec%afmbbiHOE~o2dG}P zqY)|%slQIiC_Q`m#ilFP%EAqrei)53M7`QT>lb}_BN__qniqO4w>)QumBk9JSobFr zUjzafS1GBEQqmmN(Oywq9sl|(+AU>XbqvRRX{cnlrkirpZe}Tq)O5{xZLhMOo7d^e zN$pjr+)2NpITDSt~bl+qyfa3~r+w~C1fSH$Z}J7mzZ z7BsOR4;7_^8#YC)Ld!!R`ln++zhV4>eMmxOLs?SWc=WdFscrom>aX*mz!2Ij_fya! zGiWwk5oO9)S}!b@v0Fyq`tx!e!xjrclv=(?|F3~2%H@i02SK^~n)1fuXHQ%|>xOP6 zye90*vR7NUjs+LCzZQCx;5t3;bvK?!k|{RU#6djVBt4~uL{VFlebX%_52Me{&f+0e zPaHta3fHGL_@wbO@jHWG@C_twWl3qPd+8q&=6{X;bwyLwwRL^nprvRk;&$8E)VCDz zWN2)fZDUJa*J@Un+Dx}~%Ez@{2s4h>*1xK}s%@yB$1*NuY43j-yLeq&H`h~bt!=hb z^QgalXI{VB{T#XYIR@yT?iGyI$%}Q{39RFmM<>pj2X$KUiLscvttoHHy6J_B^=4?< zwKWTb3}UtFji5N&8sUR#&DKL}rrxYM*6Xw=g?yxY)ga2?h|GJw8${V!V=d06+*O?U zAku1$$nY_Gq*qoRjZ$7!xVRXdK?C&1-JKJnEq>I1)uWq-OBVYdNGt>Xa93MMHyRIy0j^0+6=w~>_oitY5Y(0Jk^8_=rK%2E- z9tG@81d$tdwQZR-@(KMC52ELsBSIg>O{r<)Ubru{6 z{GkSJT`otta=BV}n&7$RC|?Fkwf*>x;dqsh4>yRh7799{B1Op&BvHEN*h1KA;v-Zv zO34hB55_#D;`)BM+%=g)RE20s5I5U{UqHQO)P4FB!(6r_>wbzV{v9}CH|qxD*7xHG zy3DSbMT1on3xCCMQcfBSIPDyFGhCK?MUR%?~&~ z=Nx%SDIIhkaf7k$+ZyuiTE%tdo;-D|_*n6StwE<*S$5`5;_@TKM+J%YNG+E#68|`>g%Lq#IU19Q zOSmsKJ>g2ek&gR{(v^Cx2FV=d;tX(}(5ab0r!_MD`b~)aoJ3DtG|KqOSp_Nr^PwU^ zP(oa$gi%MXvRL;j#HN5Cxn-0Ax>^9o48X^jAyGxyo<-r6JB|W5O zw7hyi8+yyob81RWX(_ce+S&313(R0hVSBZL9+_ao0F7qQ@V!tX1DaarL(+jT9PjN( ztRHp0na9yZi<9T-7hQN`DAC*9W2TP7T6>zny4WkUtBFc?{& z)?Q`Z2nX9nIIy9K@3xH^C~rX}`okzg;V}MnC+lR<@5s`qle=TCn{Cq>Ks$P-k)^SA z%E^PVWSzk~8kH~Y;=5_wHI~Ma>f!EodP5}!c1K^&v@@F%DBS}LLo*#&fNt@1)fv45 zJcQYTqRUt_Q0SQbLo8v=ia%18S{I3C&pvH26==6 zx~N+u8*d6P#05P70UmxDQVxo_q^Hw>u_ux%ni6+oP0!Wqe&K}m+@g#2;z#vp0X@{} zVbMD4&fCDefEIOY#`Xh@)mbR|X9BgRgHWLFt3<7x1?eC@w8MmZJDOrTpXz$-bE`J# zOTABir^7trwjczixmb#D97@Yt29#19P(WhPV%(tOW)wMwIig*+EDLU;F|!Ayg(=y+d_PCMj{8^7)v`ct}BfKGJQ+iH6E+ zo(pWsz}O86St&Hi)YA>Bm8F9LRTNK7V?^orphPA&i3V06)anAVu;t3R_6nD1&)d-V zEP4x`M4}82sX5@^aePN0ZVX&ySRc{UiMI?RgYuL*p=Z>C>STYA|zYEFZeu?Pzlu#%IXGx&nlns*UeA|gID)JP{=#` z2|ySC6Tnnp`UYx^tf$*4sIX~>Gfh=q{BW3RLoMCVa9>}-J%_L+FfK#_3=o@{EoC#i zrJy{066u2aEDQ%PjyM|Fe5UpEdAA1Z3@J+S_7Jn8^=EA~+F zdqg5dKAp3|#gt~mJW_cIQgYJ@IWd5;$b=fU;6}Rd)*#BE?S)K&k>02`#9nH(O4U-Z zCE+|psjyB|Sjb3P*6B^AF&jnw6SwDDOK(;5eKC=_j6onRG|h~SXg+Ov%_dMNXC zdJaGh1LrXSTJowpAq_&1w6TmdzK9GYDIpC-jD^6Gj0Lc*p$@5jsI5Yx^Ja#|h88^e z37|}gDH9umLg=!s!U~~>nYIpTZ7drCxMDMhACSYA>X=&^;fKw^Eu}UDQ~+}5Tmx@l z{7sZ@shh)XV4Tg7Ek>F4H=#Bs4E-6hkFlWS^U2!_@CVK(q*Wp)TBj=&w*kRsTg2kQ z{H3`9g3q>SJBT)@@81m zP%$A@XPW>Uf}RGPvLlM6BHs02SJ+(EC;F!wYfwn*H;dPZb=65r2ivU!-mcq@EMvXL+T0ZhU}#n*1_KC=5%3O&{8A%JYtb1 zO>?3Z3wm3v(LHLFZ0Gxr4m!kg3ap?}WOM5@cKX!RVi-1p6GxAB*#V1{diAKJl%p-r zv1@K|sj)EC3ru?8#v70EhIAygWathchVMc%>4QvBp>H`P(x`HkW182Axv znH^pP4cPD%<|yDo;AqD}%`sVDfu)V?r_fH+O&CwM)ZjYj(v(B3Oq+BYX*H!9C>IUr zE?f!@Xl|w)SVpN1>l%8@^GrO+z_KR3(l$v{K{wN!Jg&cmYly9@5M>ZlgZ4AsyzEL7 zht|6K%*978ddsfG3u4u!p26f%v_2JjWULDkrKKTU93n_MtI-S=o%+pM7pu6Nhsi!W zDE^s?@gU$}qKTIB%*r&j0Mg2LVE;AB<0uoo%EM2*FXm9z8M{Uh)`htjTx?2iM5Tx?A&E?0`8Q6;m9=;? z16bpIr)#Tj$ep-BTXlmQ+&E&*!D7Eatc8p=v*Oo~mQr#)C6i~+@}FVh%eo5~hX~p3 zY{ipCXI!mlJzsBH6%a>E84Xf-iG*MthP9_d=fYYdC5Udw1%h{m^+~DTv?g|5%+@fD z4Xf+CDkm)Q<<9I2IRGyUZheTE6lAU&6zKxB!?q5Bz~8Y zuTpZIDr({`K9@~)=Z{b$cnFEIJEv#<8z`I*Im|)huv2H)FqBhA7^Rc{pf#3oz@&?7 z9rl-!L~^%XsK@3oh805K`5WU;w<$3 z0q$1r09ba_KD2f6 z$iWv2Ek(!Kfv*+%j3!RDai)+rk-rGpuCZ)S@ri=B_NS$%yw_?9R+E*KF3slxMK5%~ zfK+{A3zS5%IG|nH6;*csx4KV{Y4Xtz{j9`Qa|5`57$qX_4KMf zaaamz&hMQ9v<*e1SpEu*T0xa3Gm+D?Fiz|$OBWC+*<~2TKyiUo0kVmH*%8UiHC{tc z+b29q$CK&jCIem5`VTabzTJ9EJ6e zr;(A;m^>!rhr46I1O{VQERm1gC6S4nkVIyEf@~(tKVI$D4K8=&UuV;#8oF)Vkk@oT zIr4%I+dX75>6fGhbRFn-Ys&b^@CXT#-vJ=#R$+c@>&jgmCRw`wtFWR&CQ@~fLQV_2|n zY#SErPEK)m-GBSam+AApk0Br|Q?*yi4tWA#_-K@u1>5TxACM)X^C1L7CmbSN365Z_ z+j_q>6eZ*Z)o)X3+$PLm#&yIvOC#{#j&5)SCp@gF zTgol649ul)IQR zy>Km#5raX&hR2ld#Z0UlNEEG)yTUypZ!_j#|7z@}ahoMB0r&wB#cA1ySF;wp6}Kjz@}Ar@8DpAV05HFz#H%)|*2PC*|CEj< zDBR&q7yZ%L$#85P#$*C`{Y0;U`IqNFIn8J1DZ_wu+)(^0w%Q;QpvOJmrZ_U+Yoylcmc$E5ANbvQY`ow|qu0=ELASj0}p)uf62moN_d1j!2?Z@gHf zLJvtrK7~wy-qPRv%{G;RYh?gb;=G;fn&NWmqBa2(0z z;Xs4xc^kjIiu9hs%!42sGAko|8=*C^=aUS|X6yX6=hN=eKFHG5$9b6siQ+m8N=A}O zQY^LxSxSxT?WGv4f|AUP50bTN} zK1n&6bEJcy*j2(2Y$bA>2!m-$K%@2?0qU3XF+YCFDVRP#dU-a}<*Z4GC8% zj$cpV4xymbX2yXJm~&Za$3$69qAA3fDeLJlwUkHT073!ophY{qIp}1##n9$3jS&uQ zj!mj%@tDhh+p{Sc=+=T1=KL;l75hzL4ts`1vkYc?J5+OMaUVr>& z1&jP_TB3VawVO*Ic9vAASl)MQQwjI%6Z$)}F2dDxbGl$XV6AxW%{V{;9!t9VQ6m=v zd&QlG7cpEs8rF}})zR)Dh*$C>78^CV;4ubOA*>tHYX`Lb^!7;4Fx{?sxLA5wtoSb2 zRV%~T8M3Hko*@C(8o8PX4U0<<+f!RWv|0+dmXHfrsRFP2-{4P>P?K*6kmJpU_uS?H zp;#_7@D7(gXNxGc%0~1kwI=_~&b5q|O0`bC#UIL(uwdapAx=Ot3f%FNa7gh#z5c>(c`8@MF)qhH%B2gQ>ZxK0#xyNN zY?zp7YX~pa5R9Zj&;UtoDNE#f*B9XEzoQY7qPQ^_)?m8l7OQQD0196sv^3j#;GFbU zlRb<~XF#|e5ZDRDEViaE)u10iuOU6_UNspdi9f^4U;qj6#}ODDd?5kFU!ja*DwkdH z*HlZF$Yy)ZSa5Sff?IqG$=4HLqmMu)(~si%4r;9f$!6?eJnebW$V5N%;wi5@!I%(h zfS_)zHPpt-0u)}gu0N;LP8h(LYnN37M5s#ZP>2%O)2uf|%>?AT|N6Wp2UhR+1~WjRj|>HZG@5haZAPvuqFc&LwzB* zM-)=)0ybeGj)-((e|sD|yn>g%w&RgP#M0Rd2!EHJd}t6JaSt$5icH3#lKNb!>#9*sQl1A$gWrp8RX^t8Tn1y=Ohju5=#vd$b@lJtf^09gy7qu~y-9GIB>D z#H@u&!G0ZU5lK3{{?avnT8^Zhd}8qrc#Qsy8}1MjUQDJQzn?sCYWoG)Vqc|#HX~j@ zXY|7E%k-~uh)O}dRxoB|ku2z3Q53{?P;quH7nXRWz!fdm}%6cd7A4{{D%NPCRl%&Agc(%J1=Qn(PjqBzh~x?-%fkqC4DxsKsHxd38a5#VI1Kl_I^|+u& z@6Z)F&pREBcGr<@iBHKFHpeJOqnUfe1?~p#J dqegHDNgmi~5{^asQXXHt6(+xv>H&Sk`X4R6+j9T_ literal 0 HcmV?d00001 diff --git a/ultralytics/data/explorer/__pycache__/utils.cpython-312.pyc b/ultralytics/data/explorer/__pycache__/utils.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1303eee10fbdd0f0d8e394f6e2170d69f5e64676 GIT binary patch literal 10472 zcmb_CZEPFYk-PjBKSWB@r|mi``9q>@QMP0&{)!S=mL(^O9m`5=E6Fh{?n+vD$)$If zvLr9BOfS7kaR5n452kSe%3K4SjTndv+@bQJK;6Rur{D^dG}%hoIzVx4amYVk9pEng zb@Sftl9a^6NpVl&?%RFyX6C(_H*aR%{)floB%owM|2h8VR)Y9FdN7JfuRKmco&eJ z;vMngx}En$sz$1C-N9E!{3Cv-SG+}x)G$uC)-o=*)-jcg`z>;$p7Fp_!webleva5Y z6ChQ~AScOiuNY)ynH5h6)9_#$Jlo&jHv~1u`M%-d(`Wk6L%ljY!G zN!%ubt6n%2Dj#gnO;RQP7&(%GzcXU?&6&0n9gq=wVGZoZmr2xR?xa*jQ!uWspD z&bK4)*zp+f6XH{F37AzMj}(d-1x^?`3Vez#)QZL$u!!G5b^HQ>n0k=ohgu6QVqDVpOI@K+#u2yI%Sy_a( zs$e}4u%3v*dLk7PucyHweF@Q2ad8-HX-p6|Ehg$JCr?mPj16;R9LrFeL@6XueVvhb z%@RJZUD4EB1 z-JxlkxB9wst!_)IbBW74p3?eZB{7Lj2p>`dSOo=bz}inyni|xU8O1&|Bvl-4T7DVhfuv4cOKbHicxc z-YM8zLr*tQGQoR-!A6;}E5sBb`r#QH?a8khV-$EFC&x(yi+E%lg}#m_%}+^R2Ob}B z9{3>jQC~IcFe1GUJYFRN6|;lq=_n^7BTI47SX`z?X*oPWq4mr~$5@dCtF@0}qf%Vt zqT^Ip5Jj;1s69f3o~0z6$q?qQxN{j09neccD<(-~UVbHW;o^grFX5F1NX@Wg zsaCWJNum!xM8t|%1DAkRwU6;MxH3_^s8yRR@ZjO7Hc1d=mQl^KTr6N$t(dCnMjML+ zCIu!&P~EUChG2sX#b{1c&G4j>Av^&p$w^4fDXnM=w{rauL?onk_(^scagz9*zadq- zbbQs{lJ&Rb{DD<}Th`x}^S7`1yR!bSod0mLZ*JF{gAe_U@9#*l>4BTV>h^=#?FVz) zyOyoX>^*mGOW!BmdH+aq@YBGdd|lV=Z8xVDIOfbzRWC_06-N?(WF@J3ruW zo=Wwkq<4GM`~!cdhI#Es_j3GSw&m(h-P?AzXHiM_XY2RfI+v?&U*49jJEiqs+yC_P zq3r(W7GF+1z4TYBwR^I)d-ApWR!sNh`(pso^5&Vf!@c+8*~5dW+SIjd!EWtR5tjZ+fYeQjRJsVV!B`FJIF=#60IEi^HhsjdJ6eY(Auu|o* zSq*FqwnvKT^3$N}RZoJ(IKgI>< znQ$fC2~VO@_88Nu6z1wAv1oez8QsQv6FwPj-?&Vo6~3V;K4S!lsuD<~Y#WMFB_r<; zmq}zHHk5)itCXrquhrKUW5W<3d56(^a;H%%-W99XOKvvaO(OqiL%HKKMv|yiYGJ;d-!}oru5r_t zX&U|zU8QCgWD_=rwVm34@-kIl1LZC^k4kNE21>?fGv4ZB50$o+9s8LljNP_r?8v#^ zP>N&MR%fD4@s#8GYouRSl)f#W5(Zau6jvBNul=DUQ1Y?*BnE4X%v4;++MHWIT~zd)|4@LX^=`v5=^l(8 z378U%;PKbat~ilj`V!>cC@uF!jfp};ON?<5j;BQmoZO%jgY}5x(otMPoggqq?ICEV zT9Kz5I7+=VLyMwtm4bjF28lLWqJ+^&6e5I$s7!NF6gA+an~obf(Ug`!D&WP5KrQJ+ zwT=n`4|p$3u+R;(N1#wMr8ff^NFU*zss*zE8Aw6oAi;=(cpQCbE-#{*}=0zp%eW>9Yg&m zRI4;W#{f9OMO7Oei?LBgv;e<=S)`*`<+7$iE#{ zEkYFK`GD2%Wz{;%ih`uNK#E~tBMLv!fe=~&wzES-x)s=>x=K_+Pw48&Ll+x~$ul7> zYp6eHnM4>2O8FKiQtSt0TIf`PRRN`lR0|gwhkPTBy+V+qQJuI=!=csnlq7zSWFa0y z9Gbbvm}=r8DmkH&S5;CJkD%8U1P#$(3{s665;82IG!D}2@4$U zYuVt)+^e|#Vp`05_GI1|1(4RxkG;3O%l2IBbN8#Vt-bfJJ!l=8dtu@1{Ml50-n}z( zaU|Kdwzuo!XKz2dd@Q%Oe}&BMeSYQggT0sLjx9Vp|7_}5-qn%`(aDo*&f10c`S#SV zbal?TGuit)pFeqe?Z~m^q5IYMhgPaHM_$NUx{@d70*fQ*Ew?T`tlpYBwOYMn-Zn?h zbw2d?7DnbrQs(QSITLz#ZT_`X^ZVh|?d{p^?K#i>In$cAYGHbQI%P^#%_;L1=yl+s z+q+ni>RIxnW^Nt0naH`@=EyaNd%-pD%G7lI@N|m1Yt7emEf3{uo?ESXK3ntr%7GO+ zb8aYCb3X4FR$X-uz153d*RQ3{z5U$}JJVCQ=nrOo+l$uMEKQ`Q()7)l ze=_~l`4eZp?%1Mv&F6o&Cqp;0YFoFN8Yy^(K>W} zHr4a?Uw^nG&ED$yVB*JZcbgtGcRs9dyfM8rowlaw4;(XAuw*L5{WzDA} z$uyt*FmUViGMjHcxpFYy{8HX`4&gj_eI|9_?L@i;de>)~4jP=x>7JWc?~Hytd3!Qn zcSPs>-7Bf))PYp<;=p^JbXC^Z{GMmc*SPAVvOX&9&--?*IjUA24OvIS`_(_HzghpG z|HGkwy!gSzTVr?6-R6EK{(Sb{Y<}1CxuzF#je{#!vW+kO#&PaZ3*q1SMJwU1TsS;` z7?Rvq@~)=j$xruny>nt=V1583PXF57`m+;1AGkMg|J*M}e>L&RME>}NWZ$RzkG#{d z&^_Os3fyY?wQC;`n7%i?V$WZ=w0eQgUZC^Ojsl`*`rkSI!_gbjrD%HOE}gGCoOd7j z<(GTHagSAC;$sS-5eO5S%MdHSK#y%3xa-ji~Tz4xT$ zruUnF)Oxcuec;ZCj|XlK{ASCcWhvX(yV8_*4kmm5;Hm@tnd4Ia^vid5Ei?IZ!R%i;vB>GAOb4QF))Rr z6*xG9lP7dYmmt9dM~HA?EGUAJdWzZ2NqqdLH|sa15Pd=4LVP=k(qkJ6Q;1a(ref}N zvJj;f!69dF;!5~4LCm+2uRDeKAYo=8?q;0tn;4fu{5^qc<0h0XEf+ygmWh=oYm2mv z8skPQp-lkTDBMEsyCELWR5o!$P!l09(@UIu7ng5W-}=+F@R5Y&ti7YiNW zX4%rbkK(z@EcM3uzQMlUVQP=othc_Qd8@`0qPuq5SM*(tHvN$ECdqe|g7VTP>nI<(+XbshZXSTXn}q}nm&`_+y^U9W%R zHG}(=%Dczz|7Y6!<`Y5VrXsG8>Xt3{0|L{O7Ok9BKeg@c}dd{3&-*)Qs;Benit2QC~ zsA;OL8!pdKhD-zBz9&)ut=oZP>$!$6ge;-}pUT+L%)ln5TF@>z>lu&9ZJohGZSb+t zr~pt__!tI0A#%XsqlALqK&6Pzo*-p`of2^t9z^6l!bdm4ILKbLU!_HqG*Ru42#A5) zRa-m;NfQW&!?01Jh#W~4e?J3R3`h$=lRZ2kaAAnv?K&axyMQxbQ>|F$8kuW|*59DZ z_Ygq`QfWA~Ww9HKmb~$ife$7V|YYJ)U*6KRQdc z`fSO8N5drHt6R7>e{I#fJL}!O>TSz<+wPuSnaFtCa^4Hcfi;(Z)wMP2+M4Rgy0)*n zc4u9?S6#uZE10na_4DB2K%@A#X#6%v^uRph?>EFD^u`KFm)MN>DsI)1S>hmiXejx- zD2NhRK{`H;d>1XpQdQcFzc&)mF~9g0x?th1L!f<;gujoIG;OHegdixyBrV^vWpf+K zM<72GCt>h+K==y~e5Q<36IfpOP#q6iOJg{WYaeEHiz(m>o$W{`;PS{!lH_9>L3%$U z>VHo-e@j&Vmhk_P2!4kCcYj7a{TZ?MGos^9HV;|-C&~$zEfr+d_IQ%)BHJ;I F{{!vA)j|LO literal 0 HcmV?d00001 diff --git a/ultralytics/data/explorer/__pycache__/utils.cpython-39.pyc b/ultralytics/data/explorer/__pycache__/utils.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c17a0e42c74b77a92ea8036ccc541e0f2b9ad0af GIT binary patch literal 7285 zcmbVR-ESPpai5vpnf)f0UlOUiTpEmRX)mXZPxpaD<`JipC|LkaDI^oe8y(ieo!RA( zvooXaS&3w3fh3f14iEts;NIc~GVsd*es!Mm2l!veJSIpTh#UuU$g`+vTueIoH_yI8hSldLcxV5&u{cz*K7V5K|UAME}=I+yh$KAdg#DW$c93C9r zIyij3i$*2#!?^AB?Vc+-I^oEE#GpJLdW&~aI69c4_4Oeb^e>vOJoEbyYYYrg2lTo6QHSYv#`r-=Gd{lV+)tz<-1Kz z_!~xAUblme`-9MN{nYwfw-blFgORZxkw!nkQMg)5ceIxAH=3Js^Nzlvxu%<2(oi>K z-IDd3teb9ONt5k-);3*?XMN^bpPVJLDMBQ{2JBXMv>MpvUV1|!a|`Lb>m8j|0!x?DnYv@mAH#XFcfFnDm7K=s0JO? zprd+12g*EJ-^|p~X0H`)q@Uf8PbesR?!~)IL~h6HdalD1KN;~e(_LNeX^b+^BG1sL zc#B&oE>%{gsHD84q?T)r6x1<6t9A4(;)sAgd4b~|yh-75o^!w}z>%7ra1g|;s$8P9 zP;4{`oEXXHsFqbZSZ$!1_`%eTokDU zBSkuY*#?#gq!Me@s9T)c9U@}%%b{Z8p||)i6bS^oq@8G|+KGx?Zw~}bjH2$yX zuP7XW)zf-{fZy17 zMT1hVF#Y<$Iax~~6<2(_zvI}eq^G~Zqa87kQQVfKzP53rsQ{FN2^%AU#6_o3l0b^q z0|P#@bFho4-o8jwdBumr>xBJi^#g$7q=!e#ZzgLr z`46DTn52l`AYA146q_#`h(E``hcrmkjA0_y)86Osh(#1~@AaYnQ-l;tS^`lhY+#&8 zmRu-#7!qTudtd8ml`OBoiHA~ziXZYA64sCKNUXd{wP`BqC>my3==wH7f zom@{lHHJ9UsOL}hBsb9cwrbHxfVo3cw#`%BfdfyB{Tz=+J#w7jn@-`}_*bJIIbM*h zqQq2mO2ZQHXaBDG#2l7~m0@*Q8&1Vml0Vk(XkSqXu z`6p!VDm|lXn51*_e+3H)BQzSDc4j8VswcH%fp7zlb%ynK)EdM3JK&mCeW)Q=eFlTx#u{m?-Q`{~jRm}Nyyd-B;)vROv94s|&QI3;*344WF2=f=dl%KD0RxpEG!$2w}P{Sh%xC{-zTKvF!!qf@H{^!6vK3RIGXi zS6&}fc*}0=grN_(KiqY(jI%goG|aMLph2K2SEhMM0`4s#_i!zdL5p-{%AiR%MaD)N zU+*7bR5xCcWxWrpX?3u|qYR+<72 z>7mHnNh>`+)MITA@gYjX(OzbLp`r8dVG6maAWBUz%{~O~ApYPM5Rjut_09#i#mQEC zc$B~Tw&c^(fy)nZ9TSMM&wJ95%W7D!d64G!Bisq9eH-E4V29R=G%xIDZd#0YxobPF z(|9W_d%_DuOtE%qL{3_|2O-_(@GEH{4%?2`iCZO95QU?f<`9OZB{vxKU2exNZY0lc zrIyzxcxkcoEWR5e+D~hDAFV&yY=8D(^Va5r&r(zD+7TxAy&$#hC~^ab6Pwc#E(?K( zLV-aaj^|U_@Esa~FT&;HgIlRN0Hgjg&4&Tqc|d7rKQ(ay2}N3go^?P)fQJ`oDTSAf zrB>yfwq`d}r9-WX+mGVIw!Aux8tOWQP0$_oH3RuQf=Dq^t0cKkaeA8f`aN8_q!Vl- zTyB-+n1qql6>znNWWa9{jGWi+riRx~_1#o|p6Z-8(4FRKHX@)1_bmzJf*zzZ6Ot!g z5|2@6rFVXB=X1j^NYi#y3!#0sxBAKZ&5xV! ztBe6m!XxzFd6-VA(Y76VZ3Lx9_fbv_UO@0-m^jgdMRh3)A)HegbJV8MVMUq`%ySxHg}oLc3nT}4*YB{ z9Wr}3_blRJ*OM)xY3r}on~c^3E-sibpy42taEfRSla>Ux2Z32(FoBh< zb~p4vJK69o4s66idaIPf zYj-vp%Q7st8FC?ng#;W7e1C#KWj}5Mt%z4V&^lVO`sOvAUC(#eci&^@=TAUJ{5%F0 z=I-MUzd2MneFCV;5skq;N4XOYkS|;P=13;jPGBUCUf8fgGci-z$2XC74wGWUA4laR zm zWuCBo4u|yUtABs%qmMuT#%D&Zx#T#9eU~{df=nM7;vE<{6==%5>EtQDDMy#p8D2(s z>JR$CC`!%J%If?-vwavgWn8XDE*D{NUFJ=$J7Mp-Otg?`LkNZ}UhF+{vyfL@$~hQh z*G7Iuh87buGOBR?FDD5~<@R7~Gn&GHb^$lj@ogbMq_mWMJECqC#H$TXvTNDFi0T%H%yRHVDHJlovX?q1QpUT*wgdK43Pi)RZz;X!g*> zpHdMK!Xl|OvVtnJ|2k)s))8PJW^Rk8K0Y469n*9{S02qk-X2d`hMQA)LpS~}|DE!G z=ii;xd4lnVzTH^jF=|LA9Y)xO$`Ef*kBK`(bSNYK)Kb?+{tyGL#dDt!nsjAs+Jhd2 z2I|T%bADKUQ{WV#a!!Th-53D%)j@v85|Y`ig$V%7FcR|qkY1Y{qwt%;JbAZ}9}Z-i vS!S(dz$~|VE^2?Bvo5ZvFE6Wf*AbNFRt!V(+n^5~**{mT7i-08`J?{_L5gHS literal 0 HcmV?d00001 diff --git a/ultralytics/data/explorer/explorer.py b/ultralytics/data/explorer/explorer.py new file mode 100644 index 0000000..d21a5c2 --- /dev/null +++ b/ultralytics/data/explorer/explorer.py @@ -0,0 +1,472 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license + +from io import BytesIO +from pathlib import Path +from typing import Any, List, Tuple, Union + +import cv2 +import numpy as np +import torch +from PIL import Image +from matplotlib import pyplot as plt +from pandas import DataFrame +from tqdm import tqdm + +from ultralytics.data.augment import Format +from ultralytics.data.dataset import YOLODataset +from ultralytics.data.utils import check_det_dataset +from ultralytics.models.yolo.model import YOLO +from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR +from .utils import get_sim_index_schema, get_table_schema, plot_query_result, prompt_sql_query, sanitize_batch + + +class ExplorerDataset(YOLODataset): + def __init__(self, *args, data: dict = None, **kwargs) -> None: + super().__init__(*args, data=data, **kwargs) + + def load_image(self, i: int) -> Union[Tuple[np.ndarray, Tuple[int, int], Tuple[int, int]], Tuple[None, None, None]]: + """Loads 1 image from dataset index 'i' without any resize ops.""" + im, f, fn = self.ims[i], self.im_files[i], self.npy_files[i] + if im is None: # not cached in RAM + if fn.exists(): # load npy + im = np.load(fn) + else: # read image + im = cv2.imread(f) # BGR + if im is None: + raise FileNotFoundError(f"Image Not Found {f}") + h0, w0 = im.shape[:2] # orig hw + return im, (h0, w0), im.shape[:2] + + return self.ims[i], self.im_hw0[i], self.im_hw[i] + + def build_transforms(self, hyp: IterableSimpleNamespace = None): + """Creates transforms for dataset images without resizing.""" + return Format( + bbox_format="xyxy", + normalize=False, + return_mask=self.use_segments, + return_keypoint=self.use_keypoints, + batch_idx=True, + mask_ratio=hyp.mask_ratio, + mask_overlap=hyp.overlap_mask, + ) + + +class Explorer: + def __init__( + self, + data: Union[str, Path] = "coco128.yaml", + model: str = "yolov8n.pt", + uri: str = USER_CONFIG_DIR / "explorer", + ) -> None: + # Note duckdb==0.10.0 bug https://github.com/ultralytics/ultralytics/pull/8181 + checks.check_requirements(["lancedb>=0.4.3", "duckdb<=0.9.2"]) + import lancedb + + self.connection = lancedb.connect(uri) + self.table_name = Path(data).name.lower() + "_" + model.lower() + self.sim_idx_base_name = ( + f"{self.table_name}_sim_idx".lower() + ) # Use this name and append thres and top_k to reuse the table + self.model = YOLO(model) + self.data = data # None + self.choice_set = None + + self.table = None + self.progress = 0 + + def create_embeddings_table(self, force: bool = False, split: str = "train") -> None: + """ + Create LanceDB table containing the embeddings of the images in the dataset. The table will be reused if it + already exists. Pass force=True to overwrite the existing table. + + Args: + force (bool): Whether to overwrite the existing table or not. Defaults to False. + split (str): Split of the dataset to use. Defaults to 'train'. + + Example: + ```python + exp = Explorer() + exp.create_embeddings_table() + ``` + """ + if self.table is not None and not force: + LOGGER.info("Table already exists. Reusing it. Pass force=True to overwrite it.") + return + if self.table_name in self.connection.table_names() and not force: + LOGGER.info(f"Table {self.table_name} already exists. Reusing it. Pass force=True to overwrite it.") + self.table = self.connection.open_table(self.table_name) + self.progress = 1 + return + if self.data is None: + raise ValueError("Data must be provided to create embeddings table") + + data_info = check_det_dataset(self.data) + if split not in data_info: + raise ValueError( + f"Split {split} is not found in the dataset. Available keys in the dataset are {list(data_info.keys())}" + ) + + choice_set = data_info[split] + choice_set = choice_set if isinstance(choice_set, list) else [choice_set] + self.choice_set = choice_set + dataset = ExplorerDataset(img_path=choice_set, data=data_info, augment=False, cache=False, task=self.model.task) + + # Create the table schema + batch = dataset[0] + vector_size = self.model.embed(batch["im_file"], verbose=False)[0].shape[0] + table = self.connection.create_table(self.table_name, schema=get_table_schema(vector_size), mode="overwrite") + table.add( + self._yield_batches( + dataset, + data_info, + self.model, + exclude_keys=["img", "ratio_pad", "resized_shape", "ori_shape", "batch_idx"], + ) + ) + + self.table = table + + def _yield_batches(self, dataset: ExplorerDataset, data_info: dict, model: YOLO, exclude_keys: List[str]): + """Generates batches of data for embedding, excluding specified keys.""" + for i in tqdm(range(len(dataset))): + self.progress = float(i + 1) / len(dataset) + batch = dataset[i] + for k in exclude_keys: + batch.pop(k, None) + batch = sanitize_batch(batch, data_info) + batch["vector"] = model.embed(batch["im_file"], verbose=False)[0].detach().tolist() + yield [batch] + + def query( + self, imgs: Union[str, np.ndarray, List[str], List[np.ndarray]] = None, limit: int = 25 + ) -> Any: # pyarrow.Table + """ + Query the table for similar images. Accepts a single image or a list of images. + + Args: + imgs (str or list): Path to the image or a list of paths to the images. + limit (int): Number of results to return. + + Returns: + (pyarrow.Table): An arrow table containing the results. Supports converting to: + - pandas dataframe: `result.to_pandas()` + - dict of lists: `result.to_pydict()` + + Example: + ```python + exp = Explorer() + exp.create_embeddings_table() + similar = exp.query(img='https://ultralytics.com/images/zidane.jpg') + ``` + """ + if self.table is None: + raise ValueError("Table is not created. Please create the table first.") + if isinstance(imgs, str): + imgs = [imgs] + assert isinstance(imgs, list), f"img must be a string or a list of strings. Got {type(imgs)}" + embeds = self.model.embed(imgs) + # Get avg if multiple images are passed (len > 1) + embeds = torch.mean(torch.stack(embeds), 0).cpu().numpy() if len(embeds) > 1 else embeds[0].cpu().numpy() + return self.table.search(embeds).limit(limit).to_arrow() + + def sql_query( + self, query: str, return_type: str = "pandas" + ) -> Union[DataFrame, Any, None]: # pandas.dataframe or pyarrow.Table + """ + Run a SQL-Like query on the table. Utilizes LanceDB predicate pushdown. + + Args: + query (str): SQL query to run. + return_type (str): Type of the result to return. Can be either 'pandas' or 'arrow'. Defaults to 'pandas'. + + Returns: + (pyarrow.Table): An arrow table containing the results. + + Example: + ```python + exp = Explorer() + exp.create_embeddings_table() + query = "SELECT * FROM 'table' WHERE labels LIKE '%person%'" + result = exp.sql_query(query) + ``` + """ + assert return_type in { + "pandas", + "arrow", + }, f"Return type should be either `pandas` or `arrow`, but got {return_type}" + import duckdb + + if self.table is None: + raise ValueError("Table is not created. Please create the table first.") + + # Note: using filter pushdown would be a better long term solution. Temporarily using duckdb for this. + table = self.table.to_arrow() # noqa NOTE: Don't comment this. This line is used by DuckDB + if not query.startswith("SELECT") and not query.startswith("WHERE"): + raise ValueError( + f"Query must start with SELECT or WHERE. You can either pass the entire query or just the WHERE clause. found {query}" + ) + if query.startswith("WHERE"): + query = f"SELECT * FROM 'table' {query}" + LOGGER.info(f"Running query: {query}") + + rs = duckdb.sql(query) + if return_type == "arrow": + return rs.arrow() + elif return_type == "pandas": + return rs.df() + + def plot_sql_query(self, query: str, labels: bool = True) -> Image.Image: + """ + Plot the results of a SQL-Like query on the table. + Args: + query (str): SQL query to run. + labels (bool): Whether to plot the labels or not. + + Returns: + (PIL.Image): Image containing the plot. + + Example: + ```python + exp = Explorer() + exp.create_embeddings_table() + query = "SELECT * FROM 'table' WHERE labels LIKE '%person%'" + result = exp.plot_sql_query(query) + ``` + """ + result = self.sql_query(query, return_type="arrow") + if len(result) == 0: + LOGGER.info("No results found.") + return None + img = plot_query_result(result, plot_labels=labels) + return Image.fromarray(img) + + def get_similar( + self, + img: Union[str, np.ndarray, List[str], List[np.ndarray]] = None, + idx: Union[int, List[int]] = None, + limit: int = 25, + return_type: str = "pandas", + ) -> Union[DataFrame, Any]: # pandas.dataframe or pyarrow.Table + """ + Query the table for similar images. Accepts a single image or a list of images. + + Args: + img (str or list): Path to the image or a list of paths to the images. + idx (int or list): Index of the image in the table or a list of indexes. + limit (int): Number of results to return. Defaults to 25. + return_type (str): Type of the result to return. Can be either 'pandas' or 'arrow'. Defaults to 'pandas'. + + Returns: + (pandas.DataFrame): A dataframe containing the results. + + Example: + ```python + exp = Explorer() + exp.create_embeddings_table() + similar = exp.get_similar(img='https://ultralytics.com/images/zidane.jpg') + ``` + """ + assert return_type in { + "pandas", + "arrow", + }, f"Return type should be either `pandas` or `arrow`, but got {return_type}" + img = self._check_imgs_or_idxs(img, idx) + similar = self.query(img, limit=limit) + + if return_type == "arrow": + return similar + elif return_type == "pandas": + return similar.to_pandas() + + def plot_similar( + self, + img: Union[str, np.ndarray, List[str], List[np.ndarray]] = None, + idx: Union[int, List[int]] = None, + limit: int = 25, + labels: bool = True, + ) -> Image.Image: + """ + Plot the similar images. Accepts images or indexes. + + Args: + img (str or list): Path to the image or a list of paths to the images. + idx (int or list): Index of the image in the table or a list of indexes. + labels (bool): Whether to plot the labels or not. + limit (int): Number of results to return. Defaults to 25. + + Returns: + (PIL.Image): Image containing the plot. + + Example: + ```python + exp = Explorer() + exp.create_embeddings_table() + similar = exp.plot_similar(img='https://ultralytics.com/images/zidane.jpg') + ``` + """ + similar = self.get_similar(img, idx, limit, return_type="arrow") + if len(similar) == 0: + LOGGER.info("No results found.") + return None + img = plot_query_result(similar, plot_labels=labels) + return Image.fromarray(img) + + def similarity_index(self, max_dist: float = 0.2, top_k: float = None, force: bool = False) -> DataFrame: + """ + Calculate the similarity index of all the images in the table. Here, the index will contain the data points that + are max_dist or closer to the image in the embedding space at a given index. + + Args: + max_dist (float): maximum L2 distance between the embeddings to consider. Defaults to 0.2. + top_k (float): Percentage of the closest data points to consider when counting. Used to apply limit when running + vector search. Defaults: None. + force (bool): Whether to overwrite the existing similarity index or not. Defaults to True. + + Returns: + (pandas.DataFrame): A dataframe containing the similarity index. Each row corresponds to an image, and columns + include indices of similar images and their respective distances. + + Example: + ```python + exp = Explorer() + exp.create_embeddings_table() + sim_idx = exp.similarity_index() + ``` + """ + if self.table is None: + raise ValueError("Table is not created. Please create the table first.") + sim_idx_table_name = f"{self.sim_idx_base_name}_thres_{max_dist}_top_{top_k}".lower() + if sim_idx_table_name in self.connection.table_names() and not force: + LOGGER.info("Similarity matrix already exists. Reusing it. Pass force=True to overwrite it.") + return self.connection.open_table(sim_idx_table_name).to_pandas() + + if top_k and not (1.0 >= top_k >= 0.0): + raise ValueError(f"top_k must be between 0.0 and 1.0. Got {top_k}") + if max_dist < 0.0: + raise ValueError(f"max_dist must be greater than 0. Got {max_dist}") + + top_k = int(top_k * len(self.table)) if top_k else len(self.table) + top_k = max(top_k, 1) + features = self.table.to_lance().to_table(columns=["vector", "im_file"]).to_pydict() + im_files = features["im_file"] + embeddings = features["vector"] + + sim_table = self.connection.create_table(sim_idx_table_name, schema=get_sim_index_schema(), mode="overwrite") + + def _yield_sim_idx(): + """Generates a dataframe with similarity indices and distances for images.""" + for i in tqdm(range(len(embeddings))): + sim_idx = self.table.search(embeddings[i]).limit(top_k).to_pandas().query(f"_distance <= {max_dist}") + yield [ + { + "idx": i, + "im_file": im_files[i], + "count": len(sim_idx), + "sim_im_files": sim_idx["im_file"].tolist(), + } + ] + + sim_table.add(_yield_sim_idx()) + self.sim_index = sim_table + return sim_table.to_pandas() + + def plot_similarity_index(self, max_dist: float = 0.2, top_k: float = None, force: bool = False) -> Image: + """ + Plot the similarity index of all the images in the table. Here, the index will contain the data points that are + max_dist or closer to the image in the embedding space at a given index. + + Args: + max_dist (float): maximum L2 distance between the embeddings to consider. Defaults to 0.2. + top_k (float): Percentage of closest data points to consider when counting. Used to apply limit when + running vector search. Defaults to 0.01. + force (bool): Whether to overwrite the existing similarity index or not. Defaults to True. + + Returns: + (PIL.Image): Image containing the plot. + + Example: + ```python + exp = Explorer() + exp.create_embeddings_table() + + similarity_idx_plot = exp.plot_similarity_index() + similarity_idx_plot.show() # view image preview + similarity_idx_plot.save('path/to/save/similarity_index_plot.png') # save contents to file + ``` + """ + sim_idx = self.similarity_index(max_dist=max_dist, top_k=top_k, force=force) + sim_count = sim_idx["count"].tolist() + sim_count = np.array(sim_count) + + indices = np.arange(len(sim_count)) + + # Create the bar plot + plt.bar(indices, sim_count) + + # Customize the plot (optional) + plt.xlabel("data idx") + plt.ylabel("Count") + plt.title("Similarity Count") + buffer = BytesIO() + plt.savefig(buffer, format="png") + buffer.seek(0) + + # Use Pillow to open the image from the buffer + return Image.fromarray(np.array(Image.open(buffer))) + + def _check_imgs_or_idxs( + self, img: Union[str, np.ndarray, List[str], List[np.ndarray], None], idx: Union[None, int, List[int]] + ) -> List[np.ndarray]: + if img is None and idx is None: + raise ValueError("Either img or idx must be provided.") + if img is not None and idx is not None: + raise ValueError("Only one of img or idx must be provided.") + if idx is not None: + idx = idx if isinstance(idx, list) else [idx] + img = self.table.to_lance().take(idx, columns=["im_file"]).to_pydict()["im_file"] + + return img if isinstance(img, list) else [img] + + def ask_ai(self, query): + """ + Ask AI a question. + + Args: + query (str): Question to ask. + + Returns: + (pandas.DataFrame): A dataframe containing filtered results to the SQL query. + + Example: + ```python + exp = Explorer() + exp.create_embeddings_table() + answer = exp.ask_ai('Show images with 1 person and 2 dogs') + ``` + """ + result = prompt_sql_query(query) + try: + df = self.sql_query(result) + except Exception as e: + LOGGER.error("AI generated query is not valid. Please try again with a different prompt") + LOGGER.error(e) + return None + return df + + def visualize(self, result): + """ + Visualize the results of a query. TODO. + + Args: + result (pyarrow.Table): Table containing the results of a query. + """ + pass + + def generate_report(self, result): + """ + Generate a report of the dataset. + + TODO + """ + pass diff --git a/ultralytics/data/explorer/gui/__init__.py b/ultralytics/data/explorer/gui/__init__.py new file mode 100644 index 0000000..9e68dc1 --- /dev/null +++ b/ultralytics/data/explorer/gui/__init__.py @@ -0,0 +1 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license diff --git a/ultralytics/data/explorer/gui/dash.py b/ultralytics/data/explorer/gui/dash.py new file mode 100644 index 0000000..b082d49 --- /dev/null +++ b/ultralytics/data/explorer/gui/dash.py @@ -0,0 +1,268 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license + +import time +from threading import Thread + +import pandas as pd + +from ultralytics import Explorer +from ultralytics.utils import ROOT, SETTINGS +from ultralytics.utils.checks import check_requirements + +check_requirements(("streamlit>=1.29.0", "streamlit-select>=0.3")) + +import streamlit as st +from streamlit_select import image_select + + +def _get_explorer(): + """Initializes and returns an instance of the Explorer class.""" + exp = Explorer(data=st.session_state.get("dataset"), model=st.session_state.get("model")) + thread = Thread( + target=exp.create_embeddings_table, kwargs={"force": st.session_state.get("force_recreate_embeddings")} + ) + thread.start() + progress_bar = st.progress(0, text="Creating embeddings table...") + while exp.progress < 1: + time.sleep(0.1) + progress_bar.progress(exp.progress, text=f"Progress: {exp.progress * 100}%") + thread.join() + st.session_state["explorer"] = exp + progress_bar.empty() + + +def init_explorer_form(): + """Initializes an Explorer instance and creates embeddings table with progress tracking.""" + datasets = ROOT / "cfg" / "datasets" + ds = [d.name for d in datasets.glob("*.yaml")] + models = [ + "yolov8n.pt", + "yolov8s.pt", + "yolov8m.pt", + "yolov8l.pt", + "yolov8x.pt", + "yolov8n-seg.pt", + "yolov8s-seg.pt", + "yolov8m-seg.pt", + "yolov8l-seg.pt", + "yolov8x-seg.pt", + "yolov8n-pose.pt", + "yolov8s-pose.pt", + "yolov8m-pose.pt", + "yolov8l-pose.pt", + "yolov8x-pose.pt", + ] + with st.form(key="explorer_init_form"): + col1, col2 = st.columns(2) + with col1: + st.selectbox("Select dataset", ds, key="dataset", index=ds.index("coco128.yaml")) + with col2: + st.selectbox("Select model", models, key="model") + st.checkbox("Force recreate embeddings", key="force_recreate_embeddings") + + st.form_submit_button("Explore", on_click=_get_explorer) + + +def query_form(): + """Sets up a form in Streamlit to initialize Explorer with dataset and model selection.""" + with st.form("query_form"): + col1, col2 = st.columns([0.8, 0.2]) + with col1: + st.text_input( + "Query", + "WHERE labels LIKE '%person%' AND labels LIKE '%dog%'", + label_visibility="collapsed", + key="query", + ) + with col2: + st.form_submit_button("Query", on_click=run_sql_query) + + +def ai_query_form(): + """Sets up a Streamlit form for user input to initialize Explorer with dataset and model selection.""" + with st.form("ai_query_form"): + col1, col2 = st.columns([0.8, 0.2]) + with col1: + st.text_input("Query", "Show images with 1 person and 1 dog", label_visibility="collapsed", key="ai_query") + with col2: + st.form_submit_button("Ask AI", on_click=run_ai_query) + + +def find_similar_imgs(imgs): + """Initializes a Streamlit form for AI-based image querying with custom input.""" + exp = st.session_state["explorer"] + similar = exp.get_similar(img=imgs, limit=st.session_state.get("limit"), return_type="arrow") + paths = similar.to_pydict()["im_file"] + st.session_state["imgs"] = paths + st.session_state["res"] = similar + + +def similarity_form(selected_imgs): + """Initializes a form for AI-based image querying with custom input in Streamlit.""" + st.write("Similarity Search") + with st.form("similarity_form"): + subcol1, subcol2 = st.columns([1, 1]) + with subcol1: + st.number_input( + "limit", min_value=None, max_value=None, value=25, label_visibility="collapsed", key="limit" + ) + + with subcol2: + disabled = not len(selected_imgs) + st.write("Selected: ", len(selected_imgs)) + st.form_submit_button( + "Search", + disabled=disabled, + on_click=find_similar_imgs, + args=(selected_imgs,), + ) + if disabled: + st.error("Select at least one image to search.") + + +# def persist_reset_form(): +# with st.form("persist_reset"): +# col1, col2 = st.columns([1, 1]) +# with col1: +# st.form_submit_button("Reset", on_click=reset) +# +# with col2: +# st.form_submit_button("Persist", on_click=update_state, args=("PERSISTING", True)) + + +def run_sql_query(): + """Executes an SQL query and returns the results.""" + st.session_state["error"] = None + query = st.session_state.get("query") + if query.rstrip().lstrip(): + exp = st.session_state["explorer"] + res = exp.sql_query(query, return_type="arrow") + st.session_state["imgs"] = res.to_pydict()["im_file"] + st.session_state["res"] = res + + +def run_ai_query(): + """Execute SQL query and update session state with query results.""" + if not SETTINGS["openai_api_key"]: + st.session_state["error"] = ( + 'OpenAI API key not found in settings. Please run yolo settings openai_api_key="..."' + ) + return + st.session_state["error"] = None + query = st.session_state.get("ai_query") + if query.rstrip().lstrip(): + exp = st.session_state["explorer"] + res = exp.ask_ai(query) + if not isinstance(res, pd.DataFrame) or res.empty: + st.session_state["error"] = "No results found using AI generated query. Try another query or rerun it." + return + st.session_state["imgs"] = res["im_file"].to_list() + st.session_state["res"] = res + + +def reset_explorer(): + """Resets the explorer to its initial state by clearing session variables.""" + st.session_state["explorer"] = None + st.session_state["imgs"] = None + st.session_state["error"] = None + + +def utralytics_explorer_docs_callback(): + """Resets the explorer to its initial state by clearing session variables.""" + with st.container(border=True): + st.image( + "https://raw.githubusercontent.com/ultralytics/assets/main/logo/Ultralytics_Logotype_Original.svg", + width=100, + ) + st.markdown( + "

This demo is built using Ultralytics Explorer API. Visit API docs to try examples & learn more

", + unsafe_allow_html=True, + help=None, + ) + st.link_button("Ultrlaytics Explorer API", "https://docs.ultralytics.com/datasets/explorer/") + + +def layout(): + """Resets explorer session variables and provides documentation with a link to API docs.""" + st.set_page_config(layout="wide", initial_sidebar_state="collapsed") + st.markdown("

Ultralytics Explorer Demo

", unsafe_allow_html=True) + + if st.session_state.get("explorer") is None: + init_explorer_form() + return + + st.button(":arrow_backward: Select Dataset", on_click=reset_explorer) + exp = st.session_state.get("explorer") + col1, col2 = st.columns([0.75, 0.25], gap="small") + imgs = [] + if st.session_state.get("error"): + st.error(st.session_state["error"]) + else: + if st.session_state.get("imgs"): + imgs = st.session_state.get("imgs") + else: + imgs = exp.table.to_lance().to_table(columns=["im_file"]).to_pydict()["im_file"] + st.session_state["res"] = exp.table.to_arrow() + total_imgs, selected_imgs = len(imgs), [] + with col1: + subcol1, subcol2, subcol3, subcol4, subcol5 = st.columns(5) + with subcol1: + st.write("Max Images Displayed:") + with subcol2: + num = st.number_input( + "Max Images Displayed", + min_value=0, + max_value=total_imgs, + value=min(500, total_imgs), + key="num_imgs_displayed", + label_visibility="collapsed", + ) + with subcol3: + st.write("Start Index:") + with subcol4: + start_idx = st.number_input( + "Start Index", + min_value=0, + max_value=total_imgs, + value=0, + key="start_index", + label_visibility="collapsed", + ) + with subcol5: + reset = st.button("Reset", use_container_width=False, key="reset") + if reset: + st.session_state["imgs"] = None + st.experimental_rerun() + + query_form() + ai_query_form() + if total_imgs: + labels, boxes, masks, kpts, classes = None, None, None, None, None + task = exp.model.task + if st.session_state.get("display_labels"): + labels = st.session_state.get("res").to_pydict()["labels"][start_idx : start_idx + num] + boxes = st.session_state.get("res").to_pydict()["bboxes"][start_idx : start_idx + num] + masks = st.session_state.get("res").to_pydict()["masks"][start_idx : start_idx + num] + kpts = st.session_state.get("res").to_pydict()["keypoints"][start_idx : start_idx + num] + classes = st.session_state.get("res").to_pydict()["cls"][start_idx : start_idx + num] + imgs_displayed = imgs[start_idx : start_idx + num] + selected_imgs = image_select( + f"Total samples: {total_imgs}", + images=imgs_displayed, + use_container_width=False, + # indices=[i for i in range(num)] if select_all else None, + labels=labels, + classes=classes, + bboxes=boxes, + masks=masks if task == "segment" else None, + kpts=kpts if task == "pose" else None, + ) + + with col2: + similarity_form(selected_imgs) + display_labels = st.checkbox("Labels", value=False, key="display_labels") + utralytics_explorer_docs_callback() + + +if __name__ == "__main__": + layout() diff --git a/ultralytics/data/explorer/utils.py b/ultralytics/data/explorer/utils.py new file mode 100644 index 0000000..d1c4b9b --- /dev/null +++ b/ultralytics/data/explorer/utils.py @@ -0,0 +1,166 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license + +import getpass +from typing import List + +import cv2 +import numpy as np +import pandas as pd + +from ultralytics.data.augment import LetterBox +from ultralytics.utils import LOGGER as logger +from ultralytics.utils import SETTINGS +from ultralytics.utils.checks import check_requirements +from ultralytics.utils.ops import xyxy2xywh +from ultralytics.utils.plotting import plot_images + + +def get_table_schema(vector_size): + """Extracts and returns the schema of a database table.""" + from lancedb.pydantic import LanceModel, Vector + + class Schema(LanceModel): + im_file: str + labels: List[str] + cls: List[int] + bboxes: List[List[float]] + masks: List[List[List[int]]] + keypoints: List[List[List[float]]] + vector: Vector(vector_size) + + return Schema + + +def get_sim_index_schema(): + """Returns a LanceModel schema for a database table with specified vector size.""" + from lancedb.pydantic import LanceModel + + class Schema(LanceModel): + idx: int + im_file: str + count: int + sim_im_files: List[str] + + return Schema + + +def sanitize_batch(batch, dataset_info): + """Sanitizes input batch for inference, ensuring correct format and dimensions.""" + batch["cls"] = batch["cls"].flatten().int().tolist() + box_cls_pair = sorted(zip(batch["bboxes"].tolist(), batch["cls"]), key=lambda x: x[1]) + batch["bboxes"] = [box for box, _ in box_cls_pair] + batch["cls"] = [cls for _, cls in box_cls_pair] + batch["labels"] = [dataset_info["names"][i] for i in batch["cls"]] + batch["masks"] = batch["masks"].tolist() if "masks" in batch else [[[]]] + batch["keypoints"] = batch["keypoints"].tolist() if "keypoints" in batch else [[[]]] + return batch + + +def plot_query_result(similar_set, plot_labels=True): + """ + Plot images from the similar set. + + Args: + similar_set (list): Pyarrow or pandas object containing the similar data points + plot_labels (bool): Whether to plot labels or not + """ + similar_set = ( + similar_set.to_dict(orient="list") if isinstance(similar_set, pd.DataFrame) else similar_set.to_pydict() + ) + empty_masks = [[[]]] + empty_boxes = [[]] + images = similar_set.get("im_file", []) + bboxes = similar_set.get("bboxes", []) if similar_set.get("bboxes") is not empty_boxes else [] + masks = similar_set.get("masks") if similar_set.get("masks")[0] != empty_masks else [] + kpts = similar_set.get("keypoints") if similar_set.get("keypoints")[0] != empty_masks else [] + cls = similar_set.get("cls", []) + + plot_size = 640 + imgs, batch_idx, plot_boxes, plot_masks, plot_kpts = [], [], [], [], [] + for i, imf in enumerate(images): + im = cv2.imread(imf) + im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB) + h, w = im.shape[:2] + r = min(plot_size / h, plot_size / w) + imgs.append(LetterBox(plot_size, center=False)(image=im).transpose(2, 0, 1)) + if plot_labels: + if len(bboxes) > i and len(bboxes[i]) > 0: + box = np.array(bboxes[i], dtype=np.float32) + box[:, [0, 2]] *= r + box[:, [1, 3]] *= r + plot_boxes.append(box) + if len(masks) > i and len(masks[i]) > 0: + mask = np.array(masks[i], dtype=np.uint8)[0] + plot_masks.append(LetterBox(plot_size, center=False)(image=mask)) + if len(kpts) > i and kpts[i] is not None: + kpt = np.array(kpts[i], dtype=np.float32) + kpt[:, :, :2] *= r + plot_kpts.append(kpt) + batch_idx.append(np.ones(len(np.array(bboxes[i], dtype=np.float32))) * i) + imgs = np.stack(imgs, axis=0) + masks = np.stack(plot_masks, axis=0) if plot_masks else np.zeros(0, dtype=np.uint8) + kpts = np.concatenate(plot_kpts, axis=0) if plot_kpts else np.zeros((0, 51), dtype=np.float32) + boxes = xyxy2xywh(np.concatenate(plot_boxes, axis=0)) if plot_boxes else np.zeros(0, dtype=np.float32) + batch_idx = np.concatenate(batch_idx, axis=0) + cls = np.concatenate([np.array(c, dtype=np.int32) for c in cls], axis=0) + + return plot_images( + imgs, batch_idx, cls, bboxes=boxes, masks=masks, kpts=kpts, max_subplots=len(images), save=False, threaded=False + ) + + +def prompt_sql_query(query): + """Plots images with optional labels from a similar data set.""" + check_requirements("openai>=1.6.1") + from openai import OpenAI + + if not SETTINGS["openai_api_key"]: + logger.warning("OpenAI API key not found in settings. Please enter your API key below.") + openai_api_key = getpass.getpass("OpenAI API key: ") + SETTINGS.update({"openai_api_key": openai_api_key}) + openai = OpenAI(api_key=SETTINGS["openai_api_key"]) + + messages = [ + { + "role": "system", + "content": """ + You are a helpful data scientist proficient in SQL. You need to output exactly one SQL query based on + the following schema and a user request. You only need to output the format with fixed selection + statement that selects everything from "'table'", like `SELECT * from 'table'` + + Schema: + im_file: string not null + labels: list not null + child 0, item: string + cls: list not null + child 0, item: int64 + bboxes: list> not null + child 0, item: list + child 0, item: double + masks: list>> not null + child 0, item: list> + child 0, item: list + child 0, item: int64 + keypoints: list>> not null + child 0, item: list> + child 0, item: list + child 0, item: double + vector: fixed_size_list[256] not null + child 0, item: float + + Some details about the schema: + - the "labels" column contains the string values like 'person' and 'dog' for the respective objects + in each image + - the "cls" column contains the integer values on these classes that map them the labels + + Example of a correct query: + request - Get all data points that contain 2 or more people and at least one dog + correct query- + SELECT * FROM 'table' WHERE ARRAY_LENGTH(cls) >= 2 AND ARRAY_LENGTH(FILTER(labels, x -> x = 'person')) >= 2 AND ARRAY_LENGTH(FILTER(labels, x -> x = 'dog')) >= 1; + """, + }, + {"role": "user", "content": f"{query}"}, + ] + + response = openai.chat.completions.create(model="gpt-3.5-turbo", messages=messages) + return response.choices[0].message.content diff --git a/ultralytics/data/loaders.py b/ultralytics/data/loaders.py index 6656596..4b89770 100644 --- a/ultralytics/data/loaders.py +++ b/ultralytics/data/loaders.py @@ -22,76 +22,114 @@ from ultralytics.utils.checks import check_requirements @dataclass class SourceTypes: - webcam: bool = False + """Class to represent various types of input sources for predictions.""" + + stream: bool = False screenshot: bool = False from_img: bool = False tensor: bool = False class LoadStreams: - """YOLOv8 streamloader, i.e. `yolo predict source='rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP streams`.""" + """ + Stream Loader for various types of video streams, Supports RTSP, RTMP, HTTP, and TCP streams. - def __init__(self, sources='file.streams', imgsz=640, vid_stride=1, stream_buffer=False): + Attributes: + sources (str): The source input paths or URLs for the video streams. + vid_stride (int): Video frame-rate stride, defaults to 1. + buffer (bool): Whether to buffer input streams, defaults to False. + running (bool): Flag to indicate if the streaming thread is running. + mode (str): Set to 'stream' indicating real-time capture. + imgs (list): List of image frames for each stream. + fps (list): List of FPS for each stream. + frames (list): List of total frames for each stream. + threads (list): List of threads for each stream. + shape (list): List of shapes for each stream. + caps (list): List of cv2.VideoCapture objects for each stream. + bs (int): Batch size for processing. + + Methods: + __init__: Initialize the stream loader. + update: Read stream frames in daemon thread. + close: Close stream loader and release resources. + __iter__: Returns an iterator object for the class. + __next__: Returns source paths, transformed, and original images for processing. + __len__: Return the length of the sources object. + + Example: + ```bash + yolo predict source='rtsp://example.com/media.mp4' + ``` + """ + + def __init__(self, sources="file.streams", vid_stride=1, buffer=False): """Initialize instance variables and check for consistent input stream shapes.""" torch.backends.cudnn.benchmark = True # faster for fixed-size inference - self.stream_buffer = stream_buffer # buffer input streams + self.buffer = buffer # buffer input streams self.running = True # running flag for Thread - self.mode = 'stream' - self.imgsz = imgsz + self.mode = "stream" self.vid_stride = vid_stride # video frame-rate stride + sources = Path(sources).read_text().rsplit() if os.path.isfile(sources) else [sources] n = len(sources) - self.sources = [ops.clean_str(x) for x in sources] # clean source names for later - self.imgs, self.fps, self.frames, self.threads, self.shape = [[]] * n, [0] * n, [0] * n, [None] * n, [None] * n + self.bs = n + self.fps = [0] * n # frames per second + self.frames = [0] * n + self.threads = [None] * n self.caps = [None] * n # video capture objects + self.imgs = [[] for _ in range(n)] # images + self.shape = [[] for _ in range(n)] # image shapes + self.sources = [ops.clean_str(x) for x in sources] # clean source names for later for i, s in enumerate(sources): # index, source # Start thread to read frames from video stream - st = f'{i + 1}/{n}: {s}... ' - if urlparse(s).hostname in ('www.youtube.com', 'youtube.com', 'youtu.be'): # if source is YouTube video - # YouTube format i.e. 'https://www.youtube.com/watch?v=Zgi9g1ksQHc' or 'https://youtu.be/Zgi9g1ksQHc' + st = f"{i + 1}/{n}: {s}... " + if urlparse(s).hostname in ("www.youtube.com", "youtube.com", "youtu.be"): # if source is YouTube video + # YouTube format i.e. 'https://www.youtube.com/watch?v=Zgi9g1ksQHc' or 'https://youtu.be/LNwODJXcvt4' s = get_best_youtube_url(s) s = eval(s) if s.isnumeric() else s # i.e. s = '0' local webcam if s == 0 and (is_colab() or is_kaggle()): - raise NotImplementedError("'source=0' webcam not supported in Colab and Kaggle notebooks. " - "Try running 'source=0' in a local environment.") + raise NotImplementedError( + "'source=0' webcam not supported in Colab and Kaggle notebooks. " + "Try running 'source=0' in a local environment." + ) self.caps[i] = cv2.VideoCapture(s) # store video capture object if not self.caps[i].isOpened(): - raise ConnectionError(f'{st}Failed to open {s}') + raise ConnectionError(f"{st}Failed to open {s}") w = int(self.caps[i].get(cv2.CAP_PROP_FRAME_WIDTH)) h = int(self.caps[i].get(cv2.CAP_PROP_FRAME_HEIGHT)) fps = self.caps[i].get(cv2.CAP_PROP_FPS) # warning: may return 0 or nan self.frames[i] = max(int(self.caps[i].get(cv2.CAP_PROP_FRAME_COUNT)), 0) or float( - 'inf') # infinite stream fallback + "inf" + ) # infinite stream fallback self.fps[i] = max((fps if math.isfinite(fps) else 0) % 100, 0) or 30 # 30 FPS fallback success, im = self.caps[i].read() # guarantee first frame if not success or im is None: - raise ConnectionError(f'{st}Failed to read images from {s}') + raise ConnectionError(f"{st}Failed to read images from {s}") self.imgs[i].append(im) self.shape[i] = im.shape self.threads[i] = Thread(target=self.update, args=([i, self.caps[i], s]), daemon=True) - LOGGER.info(f'{st}Success ✅ ({self.frames[i]} frames of shape {w}x{h} at {self.fps[i]:.2f} FPS)') + LOGGER.info(f"{st}Success ✅ ({self.frames[i]} frames of shape {w}x{h} at {self.fps[i]:.2f} FPS)") self.threads[i].start() - LOGGER.info('') # newline - - # Check for common shapes - self.bs = self.__len__() + LOGGER.info("") # newline def update(self, i, cap, stream): """Read stream `i` frames in daemon thread.""" n, f = 0, self.frames[i] # frame number, frame array while self.running and cap.isOpened() and n < (f - 1): - # Only read a new frame if the buffer is empty - if not self.imgs[i] or not self.stream_buffer: + if len(self.imgs[i]) < 30: # keep a <=30-image buffer n += 1 cap.grab() # .read() = .grab() followed by .retrieve() if n % self.vid_stride == 0: success, im = cap.retrieve() if not success: im = np.zeros(self.shape[i], dtype=np.uint8) - LOGGER.warning('WARNING ⚠️ Video stream unresponsive, please check your IP camera connection.') + LOGGER.warning("WARNING ⚠️ Video stream unresponsive, please check your IP camera connection.") cap.open(stream) # re-open stream if signal was lost - self.imgs[i].append(im) # add image to buffer + if self.buffer: + self.imgs[i].append(im) + else: + self.imgs[i] = [im] else: time.sleep(0.01) # wait until the buffer is empty @@ -105,7 +143,7 @@ class LoadStreams: try: cap.release() # release video capture except Exception as e: - LOGGER.warning(f'WARNING ⚠️ Could not release VideoCapture object: {e}') + LOGGER.warning(f"WARNING ⚠️ Could not release VideoCapture object: {e}") cv2.destroyAllWindows() def __iter__(self): @@ -117,36 +155,62 @@ class LoadStreams: """Returns source paths, transformed and original images for processing.""" self.count += 1 - # Wait until a frame is available in each buffer - while not all(self.imgs): - if not all(x.is_alive() for x in self.threads) or cv2.waitKey(1) == ord('q'): # q to quit - self.close() - raise StopIteration - time.sleep(1 / min(self.fps)) + images = [] + for i, x in enumerate(self.imgs): + # Wait until a frame is available in each buffer + while not x: + if not self.threads[i].is_alive() or cv2.waitKey(1) == ord("q"): # q to quit + self.close() + raise StopIteration + time.sleep(1 / min(self.fps)) + x = self.imgs[i] + if not x: + LOGGER.warning(f"WARNING ⚠️ Waiting for stream {i}") - # Get and remove the next frame from imgs buffer - if self.stream_buffer: - images = [x.pop(0) for x in self.imgs] - else: - # Get the latest frame, and clear the rest from the imgs buffer - images = [] - for x in self.imgs: - images.append(x.pop(-1) if x else None) + # Get and remove the first frame from imgs buffer + if self.buffer: + images.append(x.pop(0)) + + # Get the last frame, and clear the rest from the imgs buffer + else: + images.append(x.pop(-1) if x else np.zeros(self.shape[i], dtype=np.uint8)) x.clear() - return self.sources, images, None, '' + return self.sources, images, [""] * self.bs def __len__(self): """Return the length of the sources object.""" - return len(self.sources) # 1E12 frames = 32 streams at 30 FPS for 30 years + return self.bs # 1E12 frames = 32 streams at 30 FPS for 30 years class LoadScreenshots: - """YOLOv8 screenshot dataloader, i.e. `yolo predict source=screen`.""" + """ + YOLOv8 screenshot dataloader. - def __init__(self, source, imgsz=640): - """source = [screen_number left top width height] (pixels).""" - check_requirements('mss') + This class manages the loading of screenshot images for processing with YOLOv8. + Suitable for use with `yolo predict source=screen`. + + Attributes: + source (str): The source input indicating which screen to capture. + screen (int): The screen number to capture. + left (int): The left coordinate for screen capture area. + top (int): The top coordinate for screen capture area. + width (int): The width of the screen capture area. + height (int): The height of the screen capture area. + mode (str): Set to 'stream' indicating real-time capture. + frame (int): Counter for captured frames. + sct (mss.mss): Screen capture object from `mss` library. + bs (int): Batch size, set to 1. + monitor (dict): Monitor configuration details. + + Methods: + __iter__: Returns an iterator object. + __next__: Captures the next screenshot and returns it. + """ + + def __init__(self, source): + """Source = [screen_number left top width height] (pixels).""" + check_requirements("mss") import mss # noqa source, *params = source.split() @@ -157,19 +221,19 @@ class LoadScreenshots: left, top, width, height = (int(x) for x in params) elif len(params) == 5: self.screen, left, top, width, height = (int(x) for x in params) - self.imgsz = imgsz - self.mode = 'stream' + self.mode = "stream" self.frame = 0 self.sct = mss.mss() self.bs = 1 + self.fps = 30 # Parse monitor shape monitor = self.sct.monitors[self.screen] - self.top = monitor['top'] if top is None else (monitor['top'] + top) - self.left = monitor['left'] if left is None else (monitor['left'] + left) - self.width = width or monitor['width'] - self.height = height or monitor['height'] - self.monitor = {'left': self.left, 'top': self.top, 'width': self.width, 'height': self.height} + self.top = monitor["top"] if top is None else (monitor["top"] + top) + self.left = monitor["left"] if left is None else (monitor["left"] + left) + self.width = width or monitor["width"] + self.height = height or monitor["height"] + self.monitor = {"left": self.left, "top": self.top, "width": self.width, "height": self.height} def __iter__(self): """Returns an iterator of the object.""" @@ -178,53 +242,75 @@ class LoadScreenshots: def __next__(self): """mss screen capture: get raw pixels from the screen as np array.""" im0 = np.asarray(self.sct.grab(self.monitor))[:, :, :3] # BGRA to BGR - s = f'screen {self.screen} (LTWH): {self.left},{self.top},{self.width},{self.height}: ' + s = f"screen {self.screen} (LTWH): {self.left},{self.top},{self.width},{self.height}: " self.frame += 1 - return [str(self.screen)], [im0], None, s # screen, img, vid_cap, string + return [str(self.screen)], [im0], [s] # screen, img, string -class LoadImages: - """YOLOv8 image/video dataloader, i.e. `yolo predict source=image.jpg/vid.mp4`.""" +class LoadImagesAndVideos: + """ + YOLOv8 image/video dataloader. - def __init__(self, path, imgsz=640, vid_stride=1): + This class manages the loading and pre-processing of image and video data for YOLOv8. It supports loading from + various formats, including single image files, video files, and lists of image and video paths. + + Attributes: + files (list): List of image and video file paths. + nf (int): Total number of files (images and videos). + video_flag (list): Flags indicating whether a file is a video (True) or an image (False). + mode (str): Current mode, 'image' or 'video'. + vid_stride (int): Stride for video frame-rate, defaults to 1. + bs (int): Batch size, set to 1 for this class. + cap (cv2.VideoCapture): Video capture object for OpenCV. + frame (int): Frame counter for video. + frames (int): Total number of frames in the video. + count (int): Counter for iteration, initialized at 0 during `__iter__()`. + + Methods: + _new_video(path): Create a new cv2.VideoCapture object for a given video path. + """ + + def __init__(self, path, batch=1, vid_stride=1): """Initialize the Dataloader and raise FileNotFoundError if file not found.""" parent = None - if isinstance(path, str) and Path(path).suffix == '.txt': # *.txt file with img/vid/dir on each line + if isinstance(path, str) and Path(path).suffix == ".txt": # *.txt file with img/vid/dir on each line parent = Path(path).parent path = Path(path).read_text().splitlines() # list of sources files = [] for p in sorted(path) if isinstance(path, (list, tuple)) else [path]: a = str(Path(p).absolute()) # do not use .resolve() https://github.com/ultralytics/ultralytics/issues/2912 - if '*' in a: + if "*" in a: files.extend(sorted(glob.glob(a, recursive=True))) # glob elif os.path.isdir(a): - files.extend(sorted(glob.glob(os.path.join(a, '*.*')))) # dir + files.extend(sorted(glob.glob(os.path.join(a, "*.*")))) # dir elif os.path.isfile(a): files.append(a) # files (absolute or relative to CWD) elif parent and (parent / p).is_file(): files.append(str((parent / p).absolute())) # files (relative to *.txt file parent) else: - raise FileNotFoundError(f'{p} does not exist') + raise FileNotFoundError(f"{p} does not exist") - images = [x for x in files if x.split('.')[-1].lower() in IMG_FORMATS] - videos = [x for x in files if x.split('.')[-1].lower() in VID_FORMATS] + images = [x for x in files if x.split(".")[-1].lower() in IMG_FORMATS] + videos = [x for x in files if x.split(".")[-1].lower() in VID_FORMATS] ni, nv = len(images), len(videos) - self.imgsz = imgsz self.files = images + videos self.nf = ni + nv # number of files + self.ni = ni # number of images self.video_flag = [False] * ni + [True] * nv - self.mode = 'image' + self.mode = "image" self.vid_stride = vid_stride # video frame-rate stride - self.bs = 1 + self.bs = batch if any(videos): self._new_video(videos[0]) # new video else: self.cap = None if self.nf == 0: - raise FileNotFoundError(f'No images or videos found in {p}. ' - f'Supported formats are:\nimages: {IMG_FORMATS}\nvideos: {VID_FORMATS}') + raise FileNotFoundError( + f"No images or videos found in {p}. " + f"Supported formats are:\nimages: {IMG_FORMATS}\nvideos: {VID_FORMATS}" + ) def __iter__(self): """Returns an iterator object for VideoStream or ImageFolder.""" @@ -232,71 +318,105 @@ class LoadImages: return self def __next__(self): - """Return next image, path and metadata from dataset.""" - if self.count == self.nf: - raise StopIteration - path = self.files[self.count] - - if self.video_flag[self.count]: - # Read video - self.mode = 'video' - for _ in range(self.vid_stride): - self.cap.grab() - success, im0 = self.cap.retrieve() - while not success: - self.count += 1 - self.cap.release() - if self.count == self.nf: # last video + """Returns the next batch of images or video frames along with their paths and metadata.""" + paths, imgs, info = [], [], [] + while len(imgs) < self.bs: + if self.count >= self.nf: # end of file list + if len(imgs) > 0: + return paths, imgs, info # return last partial batch + else: raise StopIteration - path = self.files[self.count] - self._new_video(path) - success, im0 = self.cap.read() - self.frame += 1 - # im0 = self._cv2_rotate(im0) # for use if cv2 autorotation is False - s = f'video {self.count + 1}/{self.nf} ({self.frame}/{self.frames}) {path}: ' + path = self.files[self.count] + if self.video_flag[self.count]: + self.mode = "video" + if not self.cap or not self.cap.isOpened(): + self._new_video(path) - else: - # Read image - self.count += 1 - im0 = cv2.imread(path) # BGR - if im0 is None: - raise FileNotFoundError(f'Image Not Found {path}') - s = f'image {self.count}/{self.nf} {path}: ' + for _ in range(self.vid_stride): + success = self.cap.grab() + if not success: + break # end of video or failure - return [path], [im0], self.cap, s + if success: + success, im0 = self.cap.retrieve() + if success: + self.frame += 1 + paths.append(path) + imgs.append(im0) + info.append(f"video {self.count + 1}/{self.nf} (frame {self.frame}/{self.frames}) {path}: ") + if self.frame == self.frames: # end of video + self.count += 1 + self.cap.release() + else: + # Move to the next file if the current video ended or failed to open + self.count += 1 + if self.cap: + self.cap.release() + if self.count < self.nf: + self._new_video(self.files[self.count]) + else: + self.mode = "image" + im0 = cv2.imread(path) # BGR + if im0 is None: + raise FileNotFoundError(f"Image Not Found {path}") + paths.append(path) + imgs.append(im0) + info.append(f"image {self.count + 1}/{self.nf} {path}: ") + self.count += 1 # move to the next file + if self.count >= self.ni: # end of image list + break + + return paths, imgs, info def _new_video(self, path): - """Create a new video capture object.""" + """Creates a new video capture object for the given path.""" self.frame = 0 self.cap = cv2.VideoCapture(path) + self.fps = int(self.cap.get(cv2.CAP_PROP_FPS)) + if not self.cap.isOpened(): + raise FileNotFoundError(f"Failed to open video {path}") self.frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT) / self.vid_stride) def __len__(self): - """Returns the number of files in the object.""" - return self.nf # number of files + """Returns the number of batches in the object.""" + return math.ceil(self.nf / self.bs) # number of files class LoadPilAndNumpy: + """ + Load images from PIL and Numpy arrays for batch processing. - def __init__(self, im0, imgsz=640): + This class is designed to manage loading and pre-processing of image data from both PIL and Numpy formats. + It performs basic validation and format conversion to ensure that the images are in the required format for + downstream processing. + + Attributes: + paths (list): List of image paths or autogenerated filenames. + im0 (list): List of images stored as Numpy arrays. + mode (str): Type of data being processed, defaults to 'image'. + bs (int): Batch size, equivalent to the length of `im0`. + + Methods: + _single_check(im): Validate and format a single image to a Numpy array. + """ + + def __init__(self, im0): """Initialize PIL and Numpy Dataloader.""" if not isinstance(im0, list): im0 = [im0] - self.paths = [getattr(im, 'filename', f'image{i}.jpg') for i, im in enumerate(im0)] + self.paths = [getattr(im, "filename", f"image{i}.jpg") for i, im in enumerate(im0)] self.im0 = [self._single_check(im) for im in im0] - self.imgsz = imgsz - self.mode = 'image' - # Generate fake paths + self.mode = "image" self.bs = len(self.im0) @staticmethod def _single_check(im): """Validate and format an image to numpy array.""" - assert isinstance(im, (Image.Image, np.ndarray)), f'Expected PIL/np.ndarray image type, but got {type(im)}' + assert isinstance(im, (Image.Image, np.ndarray)), f"Expected PIL/np.ndarray image type, but got {type(im)}" if isinstance(im, Image.Image): - if im.mode != 'RGB': - im = im.convert('RGB') + if im.mode != "RGB": + im = im.convert("RGB") im = np.asarray(im)[:, :, ::-1] im = np.ascontiguousarray(im) # contiguous return im @@ -310,7 +430,7 @@ class LoadPilAndNumpy: if self.count == 1: # loop only once as it's batch inference raise StopIteration self.count += 1 - return self.paths, self.im0, None, '' + return self.paths, self.im0, [""] * self.bs def __iter__(self): """Enables iteration for class LoadPilAndNumpy.""" @@ -319,18 +439,36 @@ class LoadPilAndNumpy: class LoadTensor: + """ + Load images from torch.Tensor data. + + This class manages the loading and pre-processing of image data from PyTorch tensors for further processing. + + Attributes: + im0 (torch.Tensor): The input tensor containing the image(s). + bs (int): Batch size, inferred from the shape of `im0`. + mode (str): Current mode, set to 'image'. + paths (list): List of image paths or filenames. + count (int): Counter for iteration, initialized at 0 during `__iter__()`. + + Methods: + _single_check(im, stride): Validate and possibly modify the input tensor. + """ def __init__(self, im0) -> None: + """Initialize Tensor Dataloader.""" self.im0 = self._single_check(im0) self.bs = self.im0.shape[0] - self.mode = 'image' - self.paths = [getattr(im, 'filename', f'image{i}.jpg') for i, im in enumerate(im0)] + self.mode = "image" + self.paths = [getattr(im, "filename", f"image{i}.jpg") for i, im in enumerate(im0)] @staticmethod def _single_check(im, stride=32): """Validate and format an image to torch.Tensor.""" - s = f'WARNING ⚠️ torch.Tensor inputs should be BCHW i.e. shape(1, 3, 640, 640) ' \ - f'divisible by stride {stride}. Input shape{tuple(im.shape)} is incompatible.' + s = ( + f"WARNING ⚠️ torch.Tensor inputs should be BCHW i.e. shape(1, 3, 640, 640) " + f"divisible by stride {stride}. Input shape{tuple(im.shape)} is incompatible." + ) if len(im.shape) != 4: if len(im.shape) != 3: raise ValueError(s) @@ -338,9 +476,11 @@ class LoadTensor: im = im.unsqueeze(0) if im.shape[2] % stride or im.shape[3] % stride: raise ValueError(s) - if im.max() > 1.0: - LOGGER.warning(f'WARNING ⚠️ torch.Tensor inputs should be normalized 0.0-1.0 but max value is {im.max()}. ' - f'Dividing input by 255.') + if im.max() > 1.0 + torch.finfo(im.dtype).eps: # torch.float32 eps is 1.2e-07 + LOGGER.warning( + f"WARNING ⚠️ torch.Tensor inputs should be normalized 0.0-1.0 but max value is {im.max()}. " + f"Dividing input by 255." + ) im = im.float() / 255.0 return im @@ -355,7 +495,7 @@ class LoadTensor: if self.count == 1: raise StopIteration self.count += 1 - return self.paths, self.im0, None, '' + return self.paths, self.im0, [""] * self.bs def __len__(self): """Returns the batch size.""" @@ -363,26 +503,23 @@ class LoadTensor: def autocast_list(source): - """ - Merges a list of source of different types into a list of numpy arrays or PIL images - """ + """Merges a list of source of different types into a list of numpy arrays or PIL images.""" files = [] for im in source: if isinstance(im, (str, Path)): # filename or uri - files.append(Image.open(requests.get(im, stream=True).raw if str(im).startswith('http') else im)) + files.append(Image.open(requests.get(im, stream=True).raw if str(im).startswith("http") else im)) elif isinstance(im, (Image.Image, np.ndarray)): # PIL or np Image files.append(im) else: - raise TypeError(f'type {type(im).__name__} is not a supported Ultralytics prediction source type. \n' - f'See https://docs.ultralytics.com/modes/predict for supported source types.') + raise TypeError( + f"type {type(im).__name__} is not a supported Ultralytics prediction source type. \n" + f"See https://docs.ultralytics.com/modes/predict for supported source types." + ) return files -LOADERS = LoadStreams, LoadPilAndNumpy, LoadImages, LoadScreenshots # tuple - - -def get_best_youtube_url(url, use_pafy=False): +def get_best_youtube_url(url, use_pafy=True): """ Retrieves the URL of the best quality MP4 video stream from a given YouTube video. @@ -397,16 +534,22 @@ def get_best_youtube_url(url, use_pafy=False): (str): The URL of the best quality MP4 video stream, or None if no suitable stream is found. """ if use_pafy: - check_requirements(('pafy', 'youtube_dl==2020.12.2')) + check_requirements(("pafy", "youtube_dl==2020.12.2")) import pafy # noqa - return pafy.new(url).getbestvideo(preftype='mp4').url + + return pafy.new(url).getbestvideo(preftype="mp4").url else: - check_requirements('yt-dlp') + check_requirements("yt-dlp") import yt_dlp - with yt_dlp.YoutubeDL({'quiet': True}) as ydl: + + with yt_dlp.YoutubeDL({"quiet": True}) as ydl: info_dict = ydl.extract_info(url, download=False) # extract info - for f in reversed(info_dict.get('formats', [])): # reversed because best is usually last + for f in reversed(info_dict.get("formats", [])): # reversed because best is usually last # Find a format with video codec, no audio, *.mp4 extension at least 1920x1080 size - good_size = (f.get('width') or 0) >= 1920 or (f.get('height') or 0) >= 1080 - if good_size and f['vcodec'] != 'none' and f['acodec'] == 'none' and f['ext'] == 'mp4': - return f.get('url') + good_size = (f.get("width") or 0) >= 1920 or (f.get("height") or 0) >= 1080 + if good_size and f["vcodec"] != "none" and f["acodec"] == "none" and f["ext"] == "mp4": + return f.get("url") + + +# Define constants +LOADERS = (LoadStreams, LoadPilAndNumpy, LoadImagesAndVideos, LoadScreenshots) diff --git a/ultralytics/data/scripts/get_coco.sh b/ultralytics/data/scripts/get_coco.sh index 126e7f0..764e280 100644 --- a/ultralytics/data/scripts/get_coco.sh +++ b/ultralytics/data/scripts/get_coco.sh @@ -1,6 +1,6 @@ #!/bin/bash # Ultralytics YOLO 🚀, AGPL-3.0 license -# Download COCO 2017 dataset http://cocodataset.org +# Download COCO 2017 dataset https://cocodataset.org # Example usage: bash data/scripts/get_coco.sh # parent # ├── ultralytics diff --git a/ultralytics/data/split_dota.py b/ultralytics/data/split_dota.py new file mode 100644 index 0000000..8a5469b --- /dev/null +++ b/ultralytics/data/split_dota.py @@ -0,0 +1,288 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license + +import itertools +from glob import glob +from math import ceil +from pathlib import Path + +import cv2 +import numpy as np +from PIL import Image +from tqdm import tqdm + +from ultralytics.data.utils import exif_size, img2label_paths +from ultralytics.utils.checks import check_requirements + +check_requirements("shapely") +from shapely.geometry import Polygon + + +def bbox_iof(polygon1, bbox2, eps=1e-6): + """ + Calculate iofs between bbox1 and bbox2. + + Args: + polygon1 (np.ndarray): Polygon coordinates, (n, 8). + bbox2 (np.ndarray): Bounding boxes, (n ,4). + """ + polygon1 = polygon1.reshape(-1, 4, 2) + lt_point = np.min(polygon1, axis=-2) + rb_point = np.max(polygon1, axis=-2) + bbox1 = np.concatenate([lt_point, rb_point], axis=-1) + + lt = np.maximum(bbox1[:, None, :2], bbox2[..., :2]) + rb = np.minimum(bbox1[:, None, 2:], bbox2[..., 2:]) + wh = np.clip(rb - lt, 0, np.inf) + h_overlaps = wh[..., 0] * wh[..., 1] + + l, t, r, b = (bbox2[..., i] for i in range(4)) + polygon2 = np.stack([l, t, r, t, r, b, l, b], axis=-1).reshape(-1, 4, 2) + + sg_polys1 = [Polygon(p) for p in polygon1] + sg_polys2 = [Polygon(p) for p in polygon2] + overlaps = np.zeros(h_overlaps.shape) + for p in zip(*np.nonzero(h_overlaps)): + overlaps[p] = sg_polys1[p[0]].intersection(sg_polys2[p[-1]]).area + unions = np.array([p.area for p in sg_polys1], dtype=np.float32) + unions = unions[..., None] + + unions = np.clip(unions, eps, np.inf) + outputs = overlaps / unions + if outputs.ndim == 1: + outputs = outputs[..., None] + return outputs + + +def load_yolo_dota(data_root, split="train"): + """ + Load DOTA dataset. + + Args: + data_root (str): Data root. + split (str): The split data set, could be train or val. + + Notes: + The directory structure assumed for the DOTA dataset: + - data_root + - images + - train + - val + - labels + - train + - val + """ + assert split in ["train", "val"] + im_dir = Path(data_root) / "images" / split + assert im_dir.exists(), f"Can't find {im_dir}, please check your data root." + im_files = glob(str(Path(data_root) / "images" / split / "*")) + lb_files = img2label_paths(im_files) + annos = [] + for im_file, lb_file in zip(im_files, lb_files): + w, h = exif_size(Image.open(im_file)) + with open(lb_file) as f: + lb = [x.split() for x in f.read().strip().splitlines() if len(x)] + lb = np.array(lb, dtype=np.float32) + annos.append(dict(ori_size=(h, w), label=lb, filepath=im_file)) + return annos + + +def get_windows(im_size, crop_sizes=[1024], gaps=[200], im_rate_thr=0.6, eps=0.01): + """ + Get the coordinates of windows. + + Args: + im_size (tuple): Original image size, (h, w). + crop_sizes (List(int)): Crop size of windows. + gaps (List(int)): Gap between crops. + im_rate_thr (float): Threshold of windows areas divided by image ares. + """ + h, w = im_size + windows = [] + for crop_size, gap in zip(crop_sizes, gaps): + assert crop_size > gap, f"invalid crop_size gap pair [{crop_size} {gap}]" + step = crop_size - gap + + xn = 1 if w <= crop_size else ceil((w - crop_size) / step + 1) + xs = [step * i for i in range(xn)] + if len(xs) > 1 and xs[-1] + crop_size > w: + xs[-1] = w - crop_size + + yn = 1 if h <= crop_size else ceil((h - crop_size) / step + 1) + ys = [step * i for i in range(yn)] + if len(ys) > 1 and ys[-1] + crop_size > h: + ys[-1] = h - crop_size + + start = np.array(list(itertools.product(xs, ys)), dtype=np.int64) + stop = start + crop_size + windows.append(np.concatenate([start, stop], axis=1)) + windows = np.concatenate(windows, axis=0) + + im_in_wins = windows.copy() + im_in_wins[:, 0::2] = np.clip(im_in_wins[:, 0::2], 0, w) + im_in_wins[:, 1::2] = np.clip(im_in_wins[:, 1::2], 0, h) + im_areas = (im_in_wins[:, 2] - im_in_wins[:, 0]) * (im_in_wins[:, 3] - im_in_wins[:, 1]) + win_areas = (windows[:, 2] - windows[:, 0]) * (windows[:, 3] - windows[:, 1]) + im_rates = im_areas / win_areas + if not (im_rates > im_rate_thr).any(): + max_rate = im_rates.max() + im_rates[abs(im_rates - max_rate) < eps] = 1 + return windows[im_rates > im_rate_thr] + + +def get_window_obj(anno, windows, iof_thr=0.7): + """Get objects for each window.""" + h, w = anno["ori_size"] + label = anno["label"] + if len(label): + label[:, 1::2] *= w + label[:, 2::2] *= h + iofs = bbox_iof(label[:, 1:], windows) + # Unnormalized and misaligned coordinates + return [(label[iofs[:, i] >= iof_thr]) for i in range(len(windows))] # window_anns + else: + return [np.zeros((0, 9), dtype=np.float32) for _ in range(len(windows))] # window_anns + + +def crop_and_save(anno, windows, window_objs, im_dir, lb_dir): + """ + Crop images and save new labels. + + Args: + anno (dict): Annotation dict, including `filepath`, `label`, `ori_size` as its keys. + windows (list): A list of windows coordinates. + window_objs (list): A list of labels inside each window. + im_dir (str): The output directory path of images. + lb_dir (str): The output directory path of labels. + + Notes: + The directory structure assumed for the DOTA dataset: + - data_root + - images + - train + - val + - labels + - train + - val + """ + im = cv2.imread(anno["filepath"]) + name = Path(anno["filepath"]).stem + for i, window in enumerate(windows): + x_start, y_start, x_stop, y_stop = window.tolist() + new_name = f"{name}__{x_stop - x_start}__{x_start}___{y_start}" + patch_im = im[y_start:y_stop, x_start:x_stop] + ph, pw = patch_im.shape[:2] + + cv2.imwrite(str(Path(im_dir) / f"{new_name}.jpg"), patch_im) + label = window_objs[i] + if len(label) == 0: + continue + label[:, 1::2] -= x_start + label[:, 2::2] -= y_start + label[:, 1::2] /= pw + label[:, 2::2] /= ph + + with open(Path(lb_dir) / f"{new_name}.txt", "w") as f: + for lb in label: + formatted_coords = ["{:.6g}".format(coord) for coord in lb[1:]] + f.write(f"{int(lb[0])} {' '.join(formatted_coords)}\n") + + +def split_images_and_labels(data_root, save_dir, split="train", crop_sizes=[1024], gaps=[200]): + """ + Split both images and labels. + + Notes: + The directory structure assumed for the DOTA dataset: + - data_root + - images + - split + - labels + - split + and the output directory structure is: + - save_dir + - images + - split + - labels + - split + """ + im_dir = Path(save_dir) / "images" / split + im_dir.mkdir(parents=True, exist_ok=True) + lb_dir = Path(save_dir) / "labels" / split + lb_dir.mkdir(parents=True, exist_ok=True) + + annos = load_yolo_dota(data_root, split=split) + for anno in tqdm(annos, total=len(annos), desc=split): + windows = get_windows(anno["ori_size"], crop_sizes, gaps) + window_objs = get_window_obj(anno, windows) + crop_and_save(anno, windows, window_objs, str(im_dir), str(lb_dir)) + + +def split_trainval(data_root, save_dir, crop_size=1024, gap=200, rates=[1.0]): + """ + Split train and val set of DOTA. + + Notes: + The directory structure assumed for the DOTA dataset: + - data_root + - images + - train + - val + - labels + - train + - val + and the output directory structure is: + - save_dir + - images + - train + - val + - labels + - train + - val + """ + crop_sizes, gaps = [], [] + for r in rates: + crop_sizes.append(int(crop_size / r)) + gaps.append(int(gap / r)) + for split in ["train", "val"]: + split_images_and_labels(data_root, save_dir, split, crop_sizes, gaps) + + +def split_test(data_root, save_dir, crop_size=1024, gap=200, rates=[1.0]): + """ + Split test set of DOTA, labels are not included within this set. + + Notes: + The directory structure assumed for the DOTA dataset: + - data_root + - images + - test + and the output directory structure is: + - save_dir + - images + - test + """ + crop_sizes, gaps = [], [] + for r in rates: + crop_sizes.append(int(crop_size / r)) + gaps.append(int(gap / r)) + save_dir = Path(save_dir) / "images" / "test" + save_dir.mkdir(parents=True, exist_ok=True) + + im_dir = Path(data_root) / "images" / "test" + assert im_dir.exists(), f"Can't find {im_dir}, please check your data root." + im_files = glob(str(im_dir / "*")) + for im_file in tqdm(im_files, total=len(im_files), desc="test"): + w, h = exif_size(Image.open(im_file)) + windows = get_windows((h, w), crop_sizes=crop_sizes, gaps=gaps) + im = cv2.imread(im_file) + name = Path(im_file).stem + for window in windows: + x_start, y_start, x_stop, y_stop = window.tolist() + new_name = f"{name}__{x_stop - x_start}__{x_start}___{y_start}" + patch_im = im[y_start:y_stop, x_start:x_stop] + cv2.imwrite(str(save_dir / f"{new_name}.jpg"), patch_im) + + +if __name__ == "__main__": + split_trainval(data_root="DOTAv2", save_dir="DOTAv2-split") + split_test(data_root="DOTAv2", save_dir="DOTAv2-split") diff --git a/ultralytics/data/utils.py b/ultralytics/data/utils.py index 3b780f2..c0a0773 100644 --- a/ultralytics/data/utils.py +++ b/ultralytics/data/utils.py @@ -17,36 +17,47 @@ import numpy as np from PIL import Image, ImageOps from ultralytics.nn.autobackend import check_class_names -from ultralytics.utils import (DATASETS_DIR, LOGGER, NUM_THREADS, ROOT, SETTINGS_YAML, TQDM, clean_url, colorstr, - emojis, yaml_load) +from ultralytics.utils import ( + DATASETS_DIR, + LOGGER, + NUM_THREADS, + ROOT, + SETTINGS_YAML, + TQDM, + clean_url, + colorstr, + emojis, + yaml_load, + yaml_save, +) from ultralytics.utils.checks import check_file, check_font, is_ascii from ultralytics.utils.downloads import download, safe_download, unzip_file from ultralytics.utils.ops import segments2boxes -HELP_URL = 'See https://docs.ultralytics.com/datasets/detect for dataset formatting guidance.' -IMG_FORMATS = 'bmp', 'dng', 'jpeg', 'jpg', 'mpo', 'png', 'tif', 'tiff', 'webp', 'pfm' # image suffixes -VID_FORMATS = 'asf', 'avi', 'gif', 'm4v', 'mkv', 'mov', 'mp4', 'mpeg', 'mpg', 'ts', 'wmv', 'webm' # video suffixes -PIN_MEMORY = str(os.getenv('PIN_MEMORY', True)).lower() == 'true' # global pin_memory for dataloaders +HELP_URL = "See https://docs.ultralytics.com/datasets/detect for dataset formatting guidance." +IMG_FORMATS = {"bmp", "dng", "jpeg", "jpg", "mpo", "png", "tif", "tiff", "webp", "pfm"} # image suffixes +VID_FORMATS = {"asf", "avi", "gif", "m4v", "mkv", "mov", "mp4", "mpeg", "mpg", "ts", "wmv", "webm"} # video suffixes +PIN_MEMORY = str(os.getenv("PIN_MEMORY", True)).lower() == "true" # global pin_memory for dataloaders def img2label_paths(img_paths): """Define label paths as a function of image paths.""" - sa, sb = f'{os.sep}images{os.sep}', f'{os.sep}labels{os.sep}' # /images/, /labels/ substrings - return [sb.join(x.rsplit(sa, 1)).rsplit('.', 1)[0] + '.txt' for x in img_paths] + sa, sb = f"{os.sep}images{os.sep}", f"{os.sep}labels{os.sep}" # /images/, /labels/ substrings + return [sb.join(x.rsplit(sa, 1)).rsplit(".", 1)[0] + ".txt" for x in img_paths] def get_hash(paths): """Returns a single hash value of a list of paths (files or dirs).""" size = sum(os.path.getsize(p) for p in paths if os.path.exists(p)) # sizes h = hashlib.sha256(str(size).encode()) # hash sizes - h.update(''.join(paths).encode()) # hash paths + h.update("".join(paths).encode()) # hash paths return h.hexdigest() # return hash def exif_size(img: Image.Image): """Returns exif-corrected PIL size.""" s = img.size # (width, height) - if img.format == 'JPEG': # only support JPEG images + if img.format == "JPEG": # only support JPEG images with contextlib.suppress(Exception): exif = img.getexif() if exif: @@ -60,24 +71,24 @@ def verify_image(args): """Verify one image.""" (im_file, cls), prefix = args # Number (found, corrupt), message - nf, nc, msg = 0, 0, '' + nf, nc, msg = 0, 0, "" try: im = Image.open(im_file) im.verify() # PIL verify shape = exif_size(im) # image size shape = (shape[1], shape[0]) # hw - assert (shape[0] > 9) & (shape[1] > 9), f'image size {shape} <10 pixels' - assert im.format.lower() in IMG_FORMATS, f'invalid image format {im.format}' - if im.format.lower() in ('jpg', 'jpeg'): - with open(im_file, 'rb') as f: + assert (shape[0] > 9) & (shape[1] > 9), f"image size {shape} <10 pixels" + assert im.format.lower() in IMG_FORMATS, f"invalid image format {im.format}" + if im.format.lower() in ("jpg", "jpeg"): + with open(im_file, "rb") as f: f.seek(-2, 2) - if f.read() != b'\xff\xd9': # corrupt JPEG - ImageOps.exif_transpose(Image.open(im_file)).save(im_file, 'JPEG', subsampling=0, quality=100) - msg = f'{prefix}WARNING ⚠️ {im_file}: corrupt JPEG restored and saved' + if f.read() != b"\xff\xd9": # corrupt JPEG + ImageOps.exif_transpose(Image.open(im_file)).save(im_file, "JPEG", subsampling=0, quality=100) + msg = f"{prefix}WARNING ⚠️ {im_file}: corrupt JPEG restored and saved" nf = 1 except Exception as e: nc = 1 - msg = f'{prefix}WARNING ⚠️ {im_file}: ignoring corrupt image/label: {e}' + msg = f"{prefix}WARNING ⚠️ {im_file}: ignoring corrupt image/label: {e}" return (im_file, cls), nf, nc, msg @@ -85,21 +96,21 @@ def verify_image_label(args): """Verify one image-label pair.""" im_file, lb_file, prefix, keypoint, num_cls, nkpt, ndim = args # Number (missing, found, empty, corrupt), message, segments, keypoints - nm, nf, ne, nc, msg, segments, keypoints = 0, 0, 0, 0, '', [], None + nm, nf, ne, nc, msg, segments, keypoints = 0, 0, 0, 0, "", [], None try: # Verify images im = Image.open(im_file) im.verify() # PIL verify shape = exif_size(im) # image size shape = (shape[1], shape[0]) # hw - assert (shape[0] > 9) & (shape[1] > 9), f'image size {shape} <10 pixels' - assert im.format.lower() in IMG_FORMATS, f'invalid image format {im.format}' - if im.format.lower() in ('jpg', 'jpeg'): - with open(im_file, 'rb') as f: + assert (shape[0] > 9) & (shape[1] > 9), f"image size {shape} <10 pixels" + assert im.format.lower() in IMG_FORMATS, f"invalid image format {im.format}" + if im.format.lower() in ("jpg", "jpeg"): + with open(im_file, "rb") as f: f.seek(-2, 2) - if f.read() != b'\xff\xd9': # corrupt JPEG - ImageOps.exif_transpose(Image.open(im_file)).save(im_file, 'JPEG', subsampling=0, quality=100) - msg = f'{prefix}WARNING ⚠️ {im_file}: corrupt JPEG restored and saved' + if f.read() != b"\xff\xd9": # corrupt JPEG + ImageOps.exif_transpose(Image.open(im_file)).save(im_file, "JPEG", subsampling=0, quality=100) + msg = f"{prefix}WARNING ⚠️ {im_file}: corrupt JPEG restored and saved" # Verify labels if os.path.isfile(lb_file): @@ -114,32 +125,32 @@ def verify_image_label(args): nl = len(lb) if nl: if keypoint: - assert lb.shape[1] == (5 + nkpt * ndim), f'labels require {(5 + nkpt * ndim)} columns each' - assert (lb[:, 5::ndim] <= 1).all(), 'non-normalized or out of bounds coordinate labels' - assert (lb[:, 6::ndim] <= 1).all(), 'non-normalized or out of bounds coordinate labels' + assert lb.shape[1] == (5 + nkpt * ndim), f"labels require {(5 + nkpt * ndim)} columns each" + points = lb[:, 5:].reshape(-1, ndim)[:, :2] else: - assert lb.shape[1] == 5, f'labels require 5 columns, {lb.shape[1]} columns detected' - assert (lb[:, 1:] <= 1).all(), \ - f'non-normalized or out of bounds coordinates {lb[:, 1:][lb[:, 1:] > 1]}' - assert (lb >= 0).all(), f'negative label values {lb[lb < 0]}' + assert lb.shape[1] == 5, f"labels require 5 columns, {lb.shape[1]} columns detected" + points = lb[:, 1:] + assert points.max() <= 1, f"non-normalized or out of bounds coordinates {points[points > 1]}" + assert lb.min() >= 0, f"negative label values {lb[lb < 0]}" + # All labels - max_cls = int(lb[:, 0].max()) # max label count - assert max_cls <= num_cls, \ - f'Label class {max_cls} exceeds dataset class count {num_cls}. ' \ - f'Possible class labels are 0-{num_cls - 1}' + max_cls = lb[:, 0].max() # max label count + assert max_cls <= num_cls, ( + f"Label class {int(max_cls)} exceeds dataset class count {num_cls}. " + f"Possible class labels are 0-{num_cls - 1}" + ) _, i = np.unique(lb, axis=0, return_index=True) if len(i) < nl: # duplicate row check lb = lb[i] # remove duplicates if segments: segments = [segments[x] for x in i] - msg = f'{prefix}WARNING ⚠️ {im_file}: {nl - len(i)} duplicate labels removed' + msg = f"{prefix}WARNING ⚠️ {im_file}: {nl - len(i)} duplicate labels removed" else: ne = 1 # label empty - lb = np.zeros((0, (5 + nkpt * ndim)), dtype=np.float32) if keypoint else np.zeros( - (0, 5), dtype=np.float32) + lb = np.zeros((0, (5 + nkpt * ndim) if keypoint else 5), dtype=np.float32) else: nm = 1 # label missing - lb = np.zeros((0, (5 + nkpt * ndim)), dtype=np.float32) if keypoint else np.zeros((0, 5), dtype=np.float32) + lb = np.zeros((0, (5 + nkpt * ndim) if keypoints else 5), dtype=np.float32) if keypoint: keypoints = lb[:, 5:].reshape(-1, nkpt, ndim) if ndim == 2: @@ -149,42 +160,56 @@ def verify_image_label(args): return im_file, lb, shape, segments, keypoints, nm, nf, ne, nc, msg except Exception as e: nc = 1 - msg = f'{prefix}WARNING ⚠️ {im_file}: ignoring corrupt image/label: {e}' + msg = f"{prefix}WARNING ⚠️ {im_file}: ignoring corrupt image/label: {e}" return [None, None, None, None, None, nm, nf, ne, nc, msg] def polygon2mask(imgsz, polygons, color=1, downsample_ratio=1): """ + Convert a list of polygons to a binary mask of the specified image size. + Args: - imgsz (tuple): The image size. - polygons (list[np.ndarray]): [N, M], N is the number of polygons, M is the number of points(Be divided by 2). - color (int): color - downsample_ratio (int): downsample ratio + imgsz (tuple): The size of the image as (height, width). + polygons (list[np.ndarray]): A list of polygons. Each polygon is an array with shape [N, M], where + N is the number of polygons, and M is the number of points such that M % 2 = 0. + color (int, optional): The color value to fill in the polygons on the mask. Defaults to 1. + downsample_ratio (int, optional): Factor by which to downsample the mask. Defaults to 1. + + Returns: + (np.ndarray): A binary mask of the specified image size with the polygons filled in. """ mask = np.zeros(imgsz, dtype=np.uint8) polygons = np.asarray(polygons, dtype=np.int32) polygons = polygons.reshape((polygons.shape[0], -1, 2)) cv2.fillPoly(mask, polygons, color=color) nh, nw = (imgsz[0] // downsample_ratio, imgsz[1] // downsample_ratio) - # NOTE: fillPoly first then resize is trying to keep the same way of loss calculation when mask-ratio=1. + # Note: fillPoly first then resize is trying to keep the same loss calculation method when mask-ratio=1 return cv2.resize(mask, (nw, nh)) def polygons2masks(imgsz, polygons, color, downsample_ratio=1): """ + Convert a list of polygons to a set of binary masks of the specified image size. + Args: - imgsz (tuple): The image size. - polygons (list[np.ndarray]): each polygon is [N, M], N is number of polygons, M is number of points (M % 2 = 0) - color (int): color - downsample_ratio (int): downsample ratio + imgsz (tuple): The size of the image as (height, width). + polygons (list[np.ndarray]): A list of polygons. Each polygon is an array with shape [N, M], where + N is the number of polygons, and M is the number of points such that M % 2 = 0. + color (int): The color value to fill in the polygons on the masks. + downsample_ratio (int, optional): Factor by which to downsample each mask. Defaults to 1. + + Returns: + (np.ndarray): A set of binary masks of the specified image size with the polygons filled in. """ return np.array([polygon2mask(imgsz, [x.reshape(-1)], color, downsample_ratio) for x in polygons]) def polygons2masks_overlap(imgsz, segments, downsample_ratio=1): """Return a (640, 640) overlap mask.""" - masks = np.zeros((imgsz[0] // downsample_ratio, imgsz[1] // downsample_ratio), - dtype=np.int32 if len(segments) > 255 else np.uint8) + masks = np.zeros( + (imgsz[0] // downsample_ratio, imgsz[1] // downsample_ratio), + dtype=np.int32 if len(segments) > 255 else np.uint8, + ) areas = [] ms = [] for si in range(len(segments)): @@ -206,7 +231,7 @@ def find_dataset_yaml(path: Path) -> Path: Find and return the YAML file associated with a Detect, Segment or Pose dataset. This function searches for a YAML file at the root level of the provided directory first, and if not found, it - performs a recursive search. It prefers YAML files that have the samestem as the provided path. An AssertionError + performs a recursive search. It prefers YAML files that have the same stem as the provided path. An AssertionError is raised if no YAML file is found or if multiple YAML files are found. Args: @@ -215,7 +240,7 @@ def find_dataset_yaml(path: Path) -> Path: Returns: (Path): The path of the found YAML file. """ - files = list(path.glob('*.yaml')) or list(path.rglob('*.yaml')) # try root level first and then recursive + files = list(path.glob("*.yaml")) or list(path.rglob("*.yaml")) # try root level first and then recursive assert files, f"No YAML file found in '{path.resolve()}'" if len(files) > 1: files = [f for f in files if f.stem == path.stem] # prefer *.yaml files that match @@ -239,57 +264,57 @@ def check_det_dataset(dataset, autodownload=True): (dict): Parsed dataset information and paths. """ - data = check_file(dataset) + file = check_file(dataset) # Download (optional) - extract_dir = '' - if isinstance(data, (str, Path)) and (zipfile.is_zipfile(data) or is_tarfile(data)): - new_dir = safe_download(data, dir=DATASETS_DIR, unzip=True, delete=False) - data = find_dataset_yaml(DATASETS_DIR / new_dir) - extract_dir, autodownload = data.parent, False + extract_dir = "" + if zipfile.is_zipfile(file) or is_tarfile(file): + new_dir = safe_download(file, dir=DATASETS_DIR, unzip=True, delete=False) + file = find_dataset_yaml(DATASETS_DIR / new_dir) + extract_dir, autodownload = file.parent, False - # Read YAML (optional) - if isinstance(data, (str, Path)): - data = yaml_load(data, append_filename=True) # dictionary + # Read YAML + data = yaml_load(file, append_filename=True) # dictionary # Checks - for k in 'train', 'val': + for k in "train", "val": if k not in data: - if k == 'val' and 'validation' in data: - LOGGER.info("WARNING ⚠️ renaming data YAML 'validation' key to 'val' to match YOLO format.") - data['val'] = data.pop('validation') # replace 'validation' key with 'val' key - else: + if k != "val" or "validation" not in data: raise SyntaxError( - emojis(f"{dataset} '{k}:' key missing ❌.\n'train' and 'val' are required in all data YAMLs.")) - if 'names' not in data and 'nc' not in data: + emojis(f"{dataset} '{k}:' key missing ❌.\n'train' and 'val' are required in all data YAMLs.") + ) + LOGGER.info("WARNING ⚠️ renaming data YAML 'validation' key to 'val' to match YOLO format.") + data["val"] = data.pop("validation") # replace 'validation' key with 'val' key + if "names" not in data and "nc" not in data: raise SyntaxError(emojis(f"{dataset} key missing ❌.\n either 'names' or 'nc' are required in all data YAMLs.")) - if 'names' in data and 'nc' in data and len(data['names']) != data['nc']: + if "names" in data and "nc" in data and len(data["names"]) != data["nc"]: raise SyntaxError(emojis(f"{dataset} 'names' length {len(data['names'])} and 'nc: {data['nc']}' must match.")) - if 'names' not in data: - data['names'] = [f'class_{i}' for i in range(data['nc'])] + if "names" not in data: + data["names"] = [f"class_{i}" for i in range(data["nc"])] else: - data['nc'] = len(data['names']) + data["nc"] = len(data["names"]) - data['names'] = check_class_names(data['names']) + data["names"] = check_class_names(data["names"]) # Resolve paths - path = Path(extract_dir or data.get('path') or Path(data.get('yaml_file', '')).parent) # dataset root - + path = Path(extract_dir or data.get("path") or Path(data.get("yaml_file", "")).parent) # dataset root if not path.is_absolute(): path = (DATASETS_DIR / path).resolve() - data['path'] = path # download scripts - for k in 'train', 'val', 'test': + + # Set paths + data["path"] = path # download scripts + for k in "train", "val", "test": if data.get(k): # prepend path if isinstance(data[k], str): x = (path / data[k]).resolve() - if not x.exists() and data[k].startswith('../'): + if not x.exists() and data[k].startswith("../"): x = (path / data[k][3:]).resolve() data[k] = str(x) else: data[k] = [str((path / x).resolve()) for x in data[k]] # Parse YAML - train, val, test, s = (data.get(x) for x in ('train', 'val', 'test', 'download')) + val, s = (data.get(x) for x in ("val", "download")) if val: val = [Path(x).resolve() for x in (val if isinstance(val, list) else [val])] # val path if not all(x.exists() for x in val): @@ -302,22 +327,22 @@ def check_det_dataset(dataset, autodownload=True): raise FileNotFoundError(m) t = time.time() r = None # success - if s.startswith('http') and s.endswith('.zip'): # URL + if s.startswith("http") and s.endswith(".zip"): # URL safe_download(url=s, dir=DATASETS_DIR, delete=True) - elif s.startswith('bash '): # bash script - LOGGER.info(f'Running {s} ...') + elif s.startswith("bash "): # bash script + LOGGER.info(f"Running {s} ...") r = os.system(s) else: # python script - exec(s, {'yaml': data}) - dt = f'({round(time.time() - t, 1)}s)' - s = f"success ✅ {dt}, saved to {colorstr('bold', DATASETS_DIR)}" if r in (0, None) else f'failure {dt} ❌' - LOGGER.info(f'Dataset download {s}\n') - check_font('Arial.ttf' if is_ascii(data['names']) else 'Arial.Unicode.ttf') # download fonts + exec(s, {"yaml": data}) + dt = f"({round(time.time() - t, 1)}s)" + s = f"success ✅ {dt}, saved to {colorstr('bold', DATASETS_DIR)}" if r in (0, None) else f"failure {dt} ❌" + LOGGER.info(f"Dataset download {s}\n") + check_font("Arial.ttf" if is_ascii(data["names"]) else "Arial.Unicode.ttf") # download fonts return data # dictionary -def check_cls_dataset(dataset, split=''): +def check_cls_dataset(dataset, split=""): """ Checks a classification dataset such as Imagenet. @@ -338,54 +363,62 @@ def check_cls_dataset(dataset, split=''): """ # Download (optional if dataset=https://file.zip is passed directly) - if str(dataset).startswith(('http:/', 'https:/')): + if str(dataset).startswith(("http:/", "https:/")): dataset = safe_download(dataset, dir=DATASETS_DIR, unzip=True, delete=False) + elif Path(dataset).suffix in (".zip", ".tar", ".gz"): + file = check_file(dataset) + dataset = safe_download(file, dir=DATASETS_DIR, unzip=True, delete=False) dataset = Path(dataset) data_dir = (dataset if dataset.is_dir() else (DATASETS_DIR / dataset)).resolve() if not data_dir.is_dir(): - LOGGER.warning(f'\nDataset not found ⚠️, missing path {data_dir}, attempting download...') + LOGGER.warning(f"\nDataset not found ⚠️, missing path {data_dir}, attempting download...") t = time.time() - if str(dataset) == 'imagenet': + if str(dataset) == "imagenet": subprocess.run(f"bash {ROOT / 'data/scripts/get_imagenet.sh'}", shell=True, check=True) else: - url = f'https://github.com/ultralytics/yolov5/releases/download/v1.0/{dataset}.zip' + url = f"https://github.com/ultralytics/yolov5/releases/download/v1.0/{dataset}.zip" download(url, dir=data_dir.parent) s = f"Dataset download success ✅ ({time.time() - t:.1f}s), saved to {colorstr('bold', data_dir)}\n" LOGGER.info(s) - train_set = data_dir / 'train' - val_set = data_dir / 'val' if (data_dir / 'val').exists() else data_dir / 'validation' if \ - (data_dir / 'validation').exists() else None # data/test or data/val - test_set = data_dir / 'test' if (data_dir / 'test').exists() else None # data/val or data/test - if split == 'val' and not val_set: + train_set = data_dir / "train" + val_set = ( + data_dir / "val" + if (data_dir / "val").exists() + else data_dir / "validation" + if (data_dir / "validation").exists() + else None + ) # data/test or data/val + test_set = data_dir / "test" if (data_dir / "test").exists() else None # data/val or data/test + if split == "val" and not val_set: LOGGER.warning("WARNING ⚠️ Dataset 'split=val' not found, using 'split=test' instead.") - elif split == 'test' and not test_set: + elif split == "test" and not test_set: LOGGER.warning("WARNING ⚠️ Dataset 'split=test' not found, using 'split=val' instead.") - nc = len([x for x in (data_dir / 'train').glob('*') if x.is_dir()]) # number of classes - names = [x.name for x in (data_dir / 'train').iterdir() if x.is_dir()] # class names list + nc = len([x for x in (data_dir / "train").glob("*") if x.is_dir()]) # number of classes + names = [x.name for x in (data_dir / "train").iterdir() if x.is_dir()] # class names list names = dict(enumerate(sorted(names))) # Print to console - for k, v in {'train': train_set, 'val': val_set, 'test': test_set}.items(): + for k, v in {"train": train_set, "val": val_set, "test": test_set}.items(): prefix = f'{colorstr(f"{k}:")} {v}...' if v is None: LOGGER.info(prefix) else: - files = [path for path in v.rglob('*.*') if path.suffix[1:].lower() in IMG_FORMATS] + files = [path for path in v.rglob("*.*") if path.suffix[1:].lower() in IMG_FORMATS] nf = len(files) # number of files nd = len({file.parent for file in files}) # number of directories if nf == 0: - if k == 'train': + if k == "train": raise FileNotFoundError(emojis(f"{dataset} '{k}:' no training images found ❌ ")) else: - LOGGER.warning(f'{prefix} found {nf} images in {nd} classes: WARNING ⚠️ no images found') + LOGGER.warning(f"{prefix} found {nf} images in {nd} classes: WARNING ⚠️ no images found") elif nd != nc: - LOGGER.warning(f'{prefix} found {nf} images in {nd} classes: ERROR ❌️ requires {nc} classes, not {nd}') + LOGGER.warning(f"{prefix} found {nf} images in {nd} classes: ERROR ❌️ requires {nc} classes, not {nd}") else: - LOGGER.info(f'{prefix} found {nf} images in {nd} classes ✅ ') + LOGGER.info(f"{prefix} found {nf} images in {nd} classes ✅ ") - return {'train': train_set, 'val': val_set, 'test': test_set, 'nc': nc, 'names': names} + return {"train": train_set, "val": val_set, "test": test_set, "nc": nc, "names": names} class HUBDatasetStats: @@ -393,7 +426,7 @@ class HUBDatasetStats: A class for generating HUB dataset JSON and `-hub` dataset directory. Args: - path (str): Path to data.yaml or data.zip (with data.yaml inside data.zip). Default is 'coco128.yaml'. + path (str): Path to data.yaml or data.zip (with data.yaml inside data.zip). Default is 'coco8.yaml'. task (str): Dataset task. Options are 'detect', 'segment', 'pose', 'classify'. Default is 'detect'. autodownload (bool): Attempt to download dataset if not found locally. Default is False. @@ -413,39 +446,42 @@ class HUBDatasetStats: ``` """ - def __init__(self, path='coco128.yaml', task='detect', autodownload=False): + def __init__(self, path="coco8.yaml", task="detect", autodownload=False): """Initialize class.""" path = Path(path).resolve() - LOGGER.info(f'Starting HUB dataset checks for {path}....') + LOGGER.info(f"Starting HUB dataset checks for {path}....") self.task = task # detect, segment, pose, classify - if self.task == 'classify': + if self.task == "classify": unzip_dir = unzip_file(path) data = check_cls_dataset(unzip_dir) - data['path'] = unzip_dir + data["path"] = unzip_dir else: # detect, segment, pose - zipped, data_dir, yaml_path = self._unzip(Path(path)) + _, data_dir, yaml_path = self._unzip(Path(path)) try: - # data = yaml_load(check_yaml(yaml_path)) # data dict - data = check_det_dataset(yaml_path, autodownload) # data dict - if zipped: - data['path'] = data_dir + # Load YAML with checks + data = yaml_load(yaml_path) + data["path"] = "" # strip path since YAML should be in dataset root for all HUB datasets + yaml_save(yaml_path, data) + data = check_det_dataset(yaml_path, autodownload) # dict + data["path"] = data_dir # YAML path should be set to '' (relative) or parent (absolute) except Exception as e: - raise Exception('error/HUB/dataset_stats/init') from e + raise Exception("error/HUB/dataset_stats/init") from e self.hub_dir = Path(f'{data["path"]}-hub') - self.im_dir = self.hub_dir / 'images' - self.im_dir.mkdir(parents=True, exist_ok=True) # makes /images - self.stats = {'nc': len(data['names']), 'names': list(data['names'].values())} # statistics dictionary + self.im_dir = self.hub_dir / "images" + self.stats = {"nc": len(data["names"]), "names": list(data["names"].values())} # statistics dictionary self.data = data - def _unzip(self, path): + @staticmethod + def _unzip(path): """Unzip data.zip.""" - if not str(path).endswith('.zip'): # path is data.yaml + if not str(path).endswith(".zip"): # path is data.yaml return False, None, path unzip_dir = unzip_file(path, path=path.parent) - assert unzip_dir.is_dir(), f'Error unzipping {path}, {unzip_dir} not found. ' \ - f'path/to/abc.zip MUST unzip to path/to/abc/' + assert unzip_dir.is_dir(), ( + f"Error unzipping {path}, {unzip_dir} not found. " f"path/to/abc.zip MUST unzip to path/to/abc/" + ) return True, str(unzip_dir), find_dataset_yaml(unzip_dir) # zipped, data_dir, yaml_path def _hub_ops(self, f): @@ -457,31 +493,31 @@ class HUBDatasetStats: def _round(labels): """Update labels to integer class and 4 decimal place floats.""" - if self.task == 'detect': - coordinates = labels['bboxes'] - elif self.task == 'segment': - coordinates = [x.flatten() for x in labels['segments']] - elif self.task == 'pose': - n = labels['keypoints'].shape[0] - coordinates = np.concatenate((labels['bboxes'], labels['keypoints'].reshape(n, -1)), 1) + if self.task == "detect": + coordinates = labels["bboxes"] + elif self.task == "segment": + coordinates = [x.flatten() for x in labels["segments"]] + elif self.task == "pose": + n = labels["keypoints"].shape[0] + coordinates = np.concatenate((labels["bboxes"], labels["keypoints"].reshape(n, -1)), 1) else: - raise ValueError('Undefined dataset task.') - zipped = zip(labels['cls'], coordinates) + raise ValueError("Undefined dataset task.") + zipped = zip(labels["cls"], coordinates) return [[int(c[0]), *(round(float(x), 4) for x in points)] for c, points in zipped] - for split in 'train', 'val', 'test': + for split in "train", "val", "test": self.stats[split] = None # predefine path = self.data.get(split) # Check split if path is None: # no split continue - files = [f for f in Path(path).rglob('*.*') if f.suffix[1:].lower() in IMG_FORMATS] # image files in split + files = [f for f in Path(path).rglob("*.*") if f.suffix[1:].lower() in IMG_FORMATS] # image files in split if not files: # no images continue # Get dataset statistics - if self.task == 'classify': + if self.task == "classify": from torchvision.datasets import ImageFolder dataset = ImageFolder(self.data[split]) @@ -491,41 +527,36 @@ class HUBDatasetStats: x[im[1]] += 1 self.stats[split] = { - 'instance_stats': { - 'total': len(dataset), - 'per_class': x.tolist()}, - 'image_stats': { - 'total': len(dataset), - 'unlabelled': 0, - 'per_class': x.tolist()}, - 'labels': [{ - Path(k).name: v} for k, v in dataset.imgs]} + "instance_stats": {"total": len(dataset), "per_class": x.tolist()}, + "image_stats": {"total": len(dataset), "unlabelled": 0, "per_class": x.tolist()}, + "labels": [{Path(k).name: v} for k, v in dataset.imgs], + } else: from ultralytics.data import YOLODataset - dataset = YOLODataset(img_path=self.data[split], - data=self.data, - use_segments=self.task == 'segment', - use_keypoints=self.task == 'pose') - x = np.array([ - np.bincount(label['cls'].astype(int).flatten(), minlength=self.data['nc']) - for label in TQDM(dataset.labels, total=len(dataset), desc='Statistics')]) # shape(128x80) + dataset = YOLODataset(img_path=self.data[split], data=self.data, task=self.task) + x = np.array( + [ + np.bincount(label["cls"].astype(int).flatten(), minlength=self.data["nc"]) + for label in TQDM(dataset.labels, total=len(dataset), desc="Statistics") + ] + ) # shape(128x80) self.stats[split] = { - 'instance_stats': { - 'total': int(x.sum()), - 'per_class': x.sum(0).tolist()}, - 'image_stats': { - 'total': len(dataset), - 'unlabelled': int(np.all(x == 0, 1).sum()), - 'per_class': (x > 0).sum(0).tolist()}, - 'labels': [{ - Path(k).name: _round(v)} for k, v in zip(dataset.im_files, dataset.labels)]} + "instance_stats": {"total": int(x.sum()), "per_class": x.sum(0).tolist()}, + "image_stats": { + "total": len(dataset), + "unlabelled": int(np.all(x == 0, 1).sum()), + "per_class": (x > 0).sum(0).tolist(), + }, + "labels": [{Path(k).name: _round(v)} for k, v in zip(dataset.im_files, dataset.labels)], + } # Save, print and return if save: - stats_path = self.hub_dir / 'stats.json' - LOGGER.info(f'Saving {stats_path.resolve()}...') - with open(stats_path, 'w') as f: + self.hub_dir.mkdir(parents=True, exist_ok=True) # makes dataset-hub/ + stats_path = self.hub_dir / "stats.json" + LOGGER.info(f"Saving {stats_path.resolve()}...") + with open(stats_path, "w") as f: json.dump(self.stats, f) # save stats.json if verbose: LOGGER.info(json.dumps(self.stats, indent=2, sort_keys=False)) @@ -535,22 +566,23 @@ class HUBDatasetStats: """Compress images for Ultralytics HUB.""" from ultralytics.data import YOLODataset # ClassificationDataset - for split in 'train', 'val', 'test': + self.im_dir.mkdir(parents=True, exist_ok=True) # makes dataset-hub/images/ + for split in "train", "val", "test": if self.data.get(split) is None: continue dataset = YOLODataset(img_path=self.data[split], data=self.data) with ThreadPool(NUM_THREADS) as pool: - for _ in TQDM(pool.imap(self._hub_ops, dataset.im_files), total=len(dataset), desc=f'{split} images'): + for _ in TQDM(pool.imap(self._hub_ops, dataset.im_files), total=len(dataset), desc=f"{split} images"): pass - LOGGER.info(f'Done. All images saved to {self.im_dir}') + LOGGER.info(f"Done. All images saved to {self.im_dir}") return self.im_dir def compress_one_image(f, f_new=None, max_dim=1920, quality=50): """ - Compresses a single image file to reduced size while preserving its aspect ratio and quality using either the - Python Imaging Library (PIL) or OpenCV library. If the input image is smaller than the maximum dimension, it will - not be resized. + Compresses a single image file to reduced size while preserving its aspect ratio and quality using either the Python + Imaging Library (PIL) or OpenCV library. If the input image is smaller than the maximum dimension, it will not be + resized. Args: f (str): The path to the input image file. @@ -573,9 +605,9 @@ def compress_one_image(f, f_new=None, max_dim=1920, quality=50): r = max_dim / max(im.height, im.width) # ratio if r < 1.0: # image too large im = im.resize((int(im.width * r), int(im.height * r))) - im.save(f_new or f, 'JPEG', quality=quality, optimize=True) # save + im.save(f_new or f, "JPEG", quality=quality, optimize=True) # save except Exception as e: # use OpenCV - LOGGER.info(f'WARNING ⚠️ HUB ops PIL failure {f}: {e}') + LOGGER.info(f"WARNING ⚠️ HUB ops PIL failure {f}: {e}") im = cv2.imread(f) im_height, im_width = im.shape[:2] r = max_dim / max(im_height, im_width) # ratio @@ -584,7 +616,7 @@ def compress_one_image(f, f_new=None, max_dim=1920, quality=50): cv2.imwrite(str(f_new or f), im) -def autosplit(path=DATASETS_DIR / 'coco8/images', weights=(0.9, 0.1, 0.0), annotated_only=False): +def autosplit(path=DATASETS_DIR / "coco8/images", weights=(0.9, 0.1, 0.0), annotated_only=False): """ Automatically split a dataset into train/val/test splits and save the resulting splits into autosplit_*.txt files. @@ -602,18 +634,18 @@ def autosplit(path=DATASETS_DIR / 'coco8/images', weights=(0.9, 0.1, 0.0), annot """ path = Path(path) # images dir - files = sorted(x for x in path.rglob('*.*') if x.suffix[1:].lower() in IMG_FORMATS) # image files only + files = sorted(x for x in path.rglob("*.*") if x.suffix[1:].lower() in IMG_FORMATS) # image files only n = len(files) # number of files random.seed(0) # for reproducibility indices = random.choices([0, 1, 2], weights=weights, k=n) # assign each image to a split - txt = ['autosplit_train.txt', 'autosplit_val.txt', 'autosplit_test.txt'] # 3 txt files + txt = ["autosplit_train.txt", "autosplit_val.txt", "autosplit_test.txt"] # 3 txt files for x in txt: if (path.parent / x).exists(): (path.parent / x).unlink() # remove existing - LOGGER.info(f'Autosplitting images from {path}' + ', using *.txt labeled images only' * annotated_only) + LOGGER.info(f"Autosplitting images from {path}" + ", using *.txt labeled images only" * annotated_only) for i, img in TQDM(zip(indices, files), total=n): if not annotated_only or Path(img2label_paths([str(img)])[0]).exists(): # check label - with open(path.parent / txt[i], 'a') as f: - f.write(f'./{img.relative_to(path.parent).as_posix()}' + '\n') # add image to txt file + with open(path.parent / txt[i], "a") as f: + f.write(f"./{img.relative_to(path.parent).as_posix()}" + "\n") # add image to txt file diff --git a/ultralytics/engine/__init__.py b/ultralytics/engine/__init__.py index e69de29..9e68dc1 100644 --- a/ultralytics/engine/__init__.py +++ b/ultralytics/engine/__init__.py @@ -0,0 +1 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license diff --git a/ultralytics/engine/__pycache__/__init__.cpython-312.pyc b/ultralytics/engine/__pycache__/__init__.cpython-312.pyc index 87fdfdae5719d257c76556f01686f0b2d8afcc21..fdaad91bf92f6653ca07841d4a17e1af075d45e6 100644 GIT binary patch delta 51 zcmbQiIFph0G%qg~0}xz@f0V8@k=IblLO&xvH&ws9FkL?-HMJlwwM4%%KPSJ;&|qS? FBLIUD5KjOA delta 50 zcmbQqID?V*G%qg~0}#}?ElOjU$ZIHRuAh;go2p-4n696aT9TSvQc{$doSm7MJ~6@( E09VrwmH+?% diff --git a/ultralytics/engine/__pycache__/__init__.cpython-39.pyc b/ultralytics/engine/__pycache__/__init__.cpython-39.pyc index 1c9e29a258585b8f12e8a808c2d1f6ead56e36bf..d1d5e84e4a09dbba6bb5911a735f12f935a960a8 100644 GIT binary patch delta 43 xcmbQh*v-hB$ji&c00dIikJ7a!@>&TQxLC!wq?V*6mxL50CTC~nrB94A0s!Mx3|RmG delta 48 zcmeBXoWRJN$ji&c00e35zfu?`@>+?Tx>&`yq^1`5q$U>SW#*;FROaX8mzhqCGy(u1 C4-O3g diff --git a/ultralytics/engine/__pycache__/exporter.cpython-39.pyc b/ultralytics/engine/__pycache__/exporter.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dc597f0f99515b0ed227dba41fb2c76816afda3d GIT binary patch literal 39034 zcmbuo36vb^eIM4<_cc8|2L^KiC=9TeX)Mq)1MFh4Ab?$*yAW{^n7xWiHfN@K2Hogm zwyFlelzS+dSXnD8ORj8Lw#fz4U}TA_P)huyD0z+##@34!%YN~b6U(on7uz(Sidj+_5H^F`cuYJ z^#jI%`m`}!pD||Y1*1?uXdJ9RZ9H8+WE`p=HV#X^zFM(9Ys}W?jJf)}F<(Do9H}2Q zj!Imv_Dua*eIaj?_=}uE`I=9M}zS6Yn zi;i8?7s?I0Xebh`B=cWT zQnS&xx3!2)Tu>5|q_3@&8?RovdTo0#ECNw?Dvjo2#dLa83X+s$dS}yWHg7CEp|o;i zwbCd*Ru;P*G)N^$pTp!|xxBpy8J}fD)oVYnka}%xaryS*YT0j$EUlZ1cgm(rJc>ov zY`&{TZ!N6Tm{L!YpkGqdOV;Art@Gs-X&;JOTiRNJgwyF1Ra{%@ZOi3~Q|?LhIcKGY zkk563I-YI;DoJl47tGbNzHt2&NsLyM@$RgxKVC|wkdmLI_*h}Zs@K3Gu1Z7=4clBy-8G#>VHI^HVCzRCNC?*L? zO5Z5IwO+w$!S1(Dq*>5@eXX*lR~ojnSgYw%>ovz(tZg`zW&6_#^n7M2jol64(@+^! ztY%&BNv2oo0^A7Nq(lYOP6@1=nGTlmnloLHXsc`^x2>O2(66X!nek!MH1{7;rq5lz z6f}@UNa9l}t;%?+Te1&0Ps!d*UuvwBt#V_zye0n{U}t67c|310n!)eB06I%+gU~I% zLCrl-Cy3vt_W-hwFM%x%viiUKbOB&D)*DWxUe=qIPH1%QRsB3x!CgRn-E1^YEm-U2 zk1c*1B$5hHybs(yHhWQlo-OS||Kx1CvGDQPJ3yyf!am)HV>9)|$Ck1U0!e9GN(q44 z$CuO#HT+g#G4}wXk1wJNEc^o4CBmO{51a*P47S#?sTYg-fde{F5q-JVY?OZxaxzO8 zdRQj)*~8^~i}kfyxyYO-03|&kR$9DYZ>-e=5aox|8v0=ybW9}>U5TF|_tSuU?FH`g}Azvz^?2jP!iUvzHaH-Q7Cw7jwkZ-xNx z{GzjHmmRzgEZ-_G-!7SmH~qkH5V%}hwCz%3v0k>lA+x-)2mn~>2`ogt$hjjcUi^I7 zDK9%--;IUy7Zz@uFE0Ztv%J)u`8gF+hbqrxOU^*%cc3!{881b8s^@106_Gl$mzPwB^3OH z;mP4ShsUnLk!w_FGqf5qBVW_tM#YVZ8y7brZc^NoxM^`S;%3F|6E`PrKis}etre3VBZT12+Z`HTwbQ@ zE7bN;y4X2OnBCpcb3y82v0kPJJ@vA^rv_W+4)x>;pu87eTY7{cSdtX|Jt+viy|{w$ zXr1y%?Y-p$(Ai7)nB=4WZ4h0u^mPjc^z@g_QGl}uE4q1hviCKQsHoH zrV%AX_M!yV*ri>VEprf87eFt0#=quy@tf-_E0ueIG1p%)A}?KkrGT}MO}t)jaF3v( zhPjBnOJH`<0+cF*tt(h5o_6npkaZotM|0;-yr}@sf(0N@X{^54g95&(=I~9m+{#vQ zZNtl{my*BsK7&$h9Zo2mjKsC?^Y0I${nibnS>{3w{z&hLF+>Erew$_SN!w)qr)C#vF@JH!?EAI&1h_z6LjZUmFeG*Usa)uVKSP z+(X^@(SxO-P({7Djw^(tHvo!t0pZ0ht>ac_3vE7Aa9W|yjAL7BZO~>OYIeA#eFfY9 zLHN}WS`!wkKm;?`ytd|sPt5VqNy{N*6Sw;y?1hVS)-{wP1FxJ<;nho>TrD>+sMhHX zy#4{6kh^QuEUEU{#gnxrV6=U@*hyDNVvV5LUVL#4IG>3(OfGiZJ%?|xN&_A9;-EA* zWkl90w&O*3nnk_vs_CVex+I}#Nh*HpbyU2)ZRu`1WE_Nm6;5kuEvw};i?H#rL5m=e zk39~@3B7}<`0>z>g?>z15dzlRNe-e+&cUtF%*3&1EdnAP(ksPcS%1;7HcD!>6L17jD96*##&Ll$DL>^dLP6zgd`p$Wf?|Wv6b*^ zp_?epiEaE{EwmZeLT<5BMzT{zsullA=s`?!#lN7vhPuq~X8iN-`_q0pI3V+&=q|F7Q!bwDoB>GOf=% zit1O+7PjPDS*+C7t+M{D2mgicM(h)MA#NQ&xfToeqB!%Kp5|El`IfqHZ@IiiyrYot zqH_MnZ{OvwWimc&I-a)T#iX5{R>tDSLTZ$)lBD(hE9;Hro@JiKdPWL{{s;=O2jKu_ zXkjg>&1r|Qq;lGB&E4HI>7A9|&05f`3FHGHv+!eaQK~xD=#%}F?v~I(piK~iio&_9 z<-oUPB{7Pd{6#$W>i|yW5Xh||l3I;OIcj2CuS86&hqpB&R*v5fna9kS8GjpV+DMd> z<&>GYudQfi(o9KMTEfy2mNByumXWYN3Co%N5|)*)yo3#ygA&#!VM7u|q9p6cN!W;l z?KDRvtY5;$B#gvKrprs%q=fApCJr9q&H=$FV;}TWd!KW zl};L+%a!GUb_*&P@q3$O-K)3|sS>X)bNvK=^b_`-!(*R?;{XQXSi*r;5{#~jBkUod z)t!)azzjRlYRrtB47DO_b56XSc!)!$6o)U_!Vv=^brg^xw)ip@W~cd=8WpFqSOeyZ zmY{>dsH)NQ$Bq3wgK<*VIll1^_Zxi%`2ChH^8&;!E$Yi#2M`GK)=u|aG`ge7a+^Tn zt%`{T&5#tcrTDG0(n-0szjBGU<))*7)=)FmG`s9rP^TR#EDX*&K+U+jR=qPR_D_#V%<)u0U z2Bfl+efue$ETOE5Q!4!pRB6i@oyux?EgjBj?m)0Wib1O7Fp>p-Iy}4ZaMju;ak92T zKyWm!KAdOTL!7EW#=_M|JNhv6<2-4@Si1+!i1?%M10w!7&OwLJF+LVD6K3*9!(%|f zHX}b40&Gl~>9=tr5-Hn^0!ia(nz$XYKJO&W%sU~RvDsDxrzFC^ptYiJW;bJ-@y*0$ zax=A=-pp)fH~ThooBf-4)E&7Uw!SItNtwy@ko8R`-OiYOvR85pFQuGpPyBC~{f)SK zzmNTg*uIu#=I@8?-*Y&+wJIpt!Ye4E6 zl)45a{Sf1RC}aY1e?fDGC2R-E93gAz-3V&kAz?d_?hCCv+!|5?rlYq#<=bCC=uQdk zLz$)EX`gnS0>Q%6kO9h@_m8RR6Pt&LecbF(cteB>Z*fXe-^yw6d+fR&FJB zUwbcXzup?IK7*9cqD8Np$;}{y~g3shOM*xWj(4QTM`O?l+%$ z$dlxPtl|Sw)~}&ttr~VNsyF)0>9J5FZ0_3JY0e=31?;dwYbWacZS!DjCw5wxvD|T+ zqpeZ%Y3HT(%MU~COAkZN5AlR|KIvTM`sF1GzgJL3xVjrIS9QNxd_RLWTxEoEr;OuOZb{bN*xqYxKH##QBtY^ggdi&$I^NK8t=2u;1_>L&~%Gea^Yj8U#c* z4*dH02N~I2yH+*lW-D@E8xC!bwPNo@?f0>7qSiM6n-(61-VC?K%o8U=n9(Fc{~5yonfrAJxy8FT5L( z6fa7OKBV}0r1&gWz?gZGFcfbuRL^0Y&O8WDgv_&kiRUCe>jE@vm;82|m(YHMVtuzi z=Z9X9&^$ta521^G=tYK_F99}(T4Ms6$IxqFmG332qO&9|z>IPYU=A?%=7}+!4fsur zg>Hv8Ct4Hl#jPXeuEwbOGQ-ivOISDX$^8D%gV^Szqx_PaUm$w1D{fc>AR{I`VbDvVDRx3Oj zk~MV$Yw#u@*>c-_7`o3J-U92w?<=h_xUaUb6URJ3Vgl7OwD)eRHmW4frnDltB(37CIb zUnc?UwKF%aUb^~{{;e;!|KtDgr4Hvx_X*6l4qBLB-XRKDG$DOaVH-AZN46`gGVF@z zduqecwr<1P*M7a*yc4etnR(!kO+A2k0&Fgkq4~oC& zp6+IEG{H^TmfPGwy_IDtpOgud}QnZ zT+1$38teCp`pt5epG2Zht+-g3HJi)!tWRy7@UG&`W~adB^cmgu;k#tM+&KcO`gB39 z1HbH`MpQ7+%P`9!u z8Fg;(CmD;5DK+*d)a6};pTNYJ9xB+Ovki4G_3%YPf+yhCP9yP^55)n6V4G8`fv6GUsE34v&owc}jf9WdZP?xm~svZN<3wko2`e#QNL# z@gj}7?PV9SzH9*865c)}vDO_2-47>#+;~9*d~1EN;rN^vEW@ng!T9ARk>NuK*M&ah zP4>eKoX3_Y8*>wnJqkyTtUQk_D@uCHeXZx%;$a2A`NYNc6Ucl#SZwR3@wN>pG@6aW zfdCv1sqXn=qX#&QDs@O+0US=t1f6pCahxG-L%;)Yal5F$f>opr^iHB3(|Tn^XK?^g z{)uH4A8ERmuocVKI*F#ZF|OiMB+WXxJo!*&7EKT7mQk%0k+ zyy#N1X1e>+*Ohe42|f7ZUeN=Y87{I@!PcP1?}wh2gBPo`lTq@q(JD_|m)4<%S1a~9 zPHDH3G}sZ1vI89%U0O?iDYB1J3Kip^qUkOZFN>?YxBl>cy= zIUEsaM&~_JMe;uK0n65FYD~Qh7I5cf@O&972WJL=S&$%Hsap-!9p!;zy$`STfbUra zRD1vxbyIL?NMP5|tIo@dD!7HKr^dZ~)tREUek?F~$}0pu4QwSJDT zNetd1SD*C_R<=rqtR}^~h}6mrZ%_?UspmBI`us3;2zwd-m1nfqAH4WSbtkbpsjYv& zdV@lHdC@69*bU<5&mMG$AbNark_^K>rK z`E5GC1IN?WJgx3&8=h9O+6;Y2r%p#8d|ZJ(PqSs`7+D{dh;WRjy`>KHA7^^u#(*}d zkbmwK`z()=NE$4kES{)5BibO|hPBDaFql9|ZMPr4Tiyn>5n&CDNRBM>gg=TVr@%Iv z`6wF6YUxNGG0ew*LiGd_n*7OZSPy5zQTo7G8jK8r!IV=h>PY&dTsZ&TXlzQG(Wb&- zl$OTBf2<*nC;CUpzN|Kg{JXX2M`-Cs@sFbM9OCnc-}(&Wovj!~%p{(Jy(uC&l%3ax zBjJyd;GCkI?}pnW9hNC2CLai^o>X6|6>P?EM~;z}Qyp*y$)ImV%?#;h zjX`iphqi{b!e(}b+|WKZkf`tJmXE=yEmyRD0bOw?ySR=DSBQ>meDX(&Me8uqT?85+ z_orpYO)EY}(YfciW5WEw9ZHZTFrw;^wFln=*|<`Hn+yeq0tFs`p)i^#3$E!vBujFs zzgifUqe#Ib9uNY!)P~BC{a^Br0q#*?!vbPu8*}4%(W(vXFlMgT*KF(mVRkYXtbYo} zn3S#**Bv7Is(Q%X_~(?>k5+o)A`m0(qinTcSr!2-xH1Arq6AqW8VK|-Sv2=E4lbJz zsnZQ9Z978#2S|AxwFq$_6dA|18o_c-a>0j3!-JYTaY0cytfCJLrNs*Cbz|U|yuNik z=1?L}Gg*((a0uLnRbS+zs#%Ay2Et~9>yjJ?Ys$nm!;I|=t%jciF9H-qtRZm5;%1^2 zhj<7CMXHIlm!0JO5V#AecG^tJI!s}8Ww5f-!o|-#NRUP&yn()U&cx~kT`T60;-7a0 zaH094*=Ocf!;Prfht`m15ULJtCRz#WkIjBFZ-Pr}4!s}2nosoP`YptO%Kv`2l`#8G zLRO1vBut0*+{sX5+)q1%8vb}|TG?O2tpq6gS+=6R<6)@5{gjt59lnuH*hnj8j$-GG znfCdMjy&%7oUO=6b z?Oj6QKLAM!@M~Jh>hAU)(Ej^c8H{-Lz7`I(`bs*Uz3*vn{TkXk)k>npfC7LGN!jtc z+#z2Yge%6!nTwCIJfBaWnlH{DDb790^UbbR>ZeZ473Ygb=e-O=fJ_pBbfbq?*Ve7S zhP9;x;uMIOcru7!MqUd=5>U6YSmV|lvKk46xXgLJXC4=3!SfZm7r#}mtlo0G*xiZ= z(b0%hWqpa2{uMZ0VzIFdg&qiCbz_W79hi1W(x0h^{5xn17yV!)->%hm{LF66qP&&&2op-LC5DYlLv(RzwLHVl&l0jpX`me(PM zHd(87+tcoNQC7MgQ2r6(7m?8Bl_dl`1DFhP67XPkEgv3-KPp&4PVk*vI1YSa*vC4; zh=U-^pTzspQDP#=@TiuH=e2{tTGHA`IIFqC+rccG=B5I{%&jnX08L1;8F=g(V5|cu zD{vFAwFY@C2XY8zRCajm9h~}rlyQXvI5GQ0a3T{`Qv3%?u>HAcuvtIq>?XoPPKr@zNgPFSx2%XQ(<% zZWQSMQK#S8LC!Ga+9TxXR(DR2V;qwbM(y8q#;T)h|HRDky)=sUg9L?Ev($8T6u!KI zYv?Jsd^jQ4(}~TvQuG7Vrtlzh40m$u@N-P65Y(1*cD0h`2sj<=*=`d&3c&NxccXx# zds-=^NVn4F*zFKtY_-#dal%M*!iTHzw%$s$66jUNjBI9|y(lSJO`4NqA!iC)I?Z}y z?rQII_A|Ww6u1%^<5*Yw0JtGg;xu=YI|A6;*NRuC!5i&E`TB#1@7vq)O?7^y-a~=_eikl1JX)xjfNZxkYG73NXCQUkP10<%Izby#n*-^n*h#lqE=qoL|Fki z6GSGrpv~AcA(FdD3$T9`j<9;5k!wa)v1HNR5LiDH5sUS_$MK%%c~4>*!e*))f&nCp zHf*N5VJ1bzxUC@D+K%xk#gYgE5s`W%JZ|P7GVxPVXFuZdejK(X5p-~CSZ)e};t`-Z zX(vN67`Ei#1k3H?^q50HtcTs=&9b9IXOR+r5F9L9N|;FKfW#B^0HW$|oVjwjXvEc2 zdOkp8wg5PinzCSwSY=W&iFaZQOUt(wAqrTtKcs-3m%{C~B#@Z5%TVawG+#8bHtsBC z6$%BG=dO2ubTVLJaF(pU*BB}fSVF8m03g+b2{ zc3-DY9NtN-pMdkr3>QAt3y%R-f3Uaic4;)5z ztQsdMpAZq4YQl^{bm^iKgJ_>-0`y%;m%t3=7)lo@2`E(cpdc(OU_=xZMt7|fT`ov5r z2JBDVKR3Xnuo>lr8*88)k`#IL2n5W*a1`$+Om88NX2CkEQDcWwC~Kf+YJFwvM9K!qb+HAL|I6ZP@rU!$_Pk`f#fN z$aeglFQ7!wZy`GfUQR?@=(s}r?7`#-TdjXV$Av?+f;9zI1jRQP(WWyItk!l8j_rCWG@Nx(!9w&dr5!I zmyN;RZFF4*$cSCJ^!kPKg@G>iZ=K^txCqAxue@c%R^BQB-hGyVpQ6*2W^wIsfVGA( z+`Fj)1wiXf7LdPH#@VLm^Fqir27}f>c_RR;yjB>|6;>nr`K$>NmkDA)oIWAg{3Up{UM4n)%|vVA zI>>{-JxD-|38EcufrJReHQ`?@gqVm6Knf&-WEF%OCsWM^7XYA{;CdhX=2>lXO48nf_ zy>h<~^7;A^zj#F>xPYYAAm$B{x^Xr8UQ!NQi1v6%@KEG*H*yLc@)1niI?XJDYJ0B2 zZ{&kkfx|D`(Yl4;?You)JavcOy7R2hFtKc1*{j{-T~78`&zfg#s%*LAZUxiUcTvF~ zB88oYN0uQL)VQ`AC;G7Fj%;5rY9QShHPyB*6W%$wOyth_(~sRbNm1_JIk7ah+&LXm z+@MQA$#9dy16zGD`wAhTbV(lC#g-$Ehh=bl1 z!;u?r#Tf<(+Rvg^NcdtIW}JSwS^v&AVEv5Q*BC@e-!OBHv@>Y^GGv8;SmSOyq^N!G zhC$DQ9C9mZeOn}%sg;2@g`Pv-DAmx-5s_ft`5=NDVo=j)D;3bpAP)?r9CW*J+>O)D z4j=#{4@0fA+yg%gY9@MsenFOOGlM%C+74PC5C<&%QPAf^9@0-WE>gAe_Jldfcym_^ z_rwXvPd^CbUKlZT-@h0BDdk_q{nOls_x*m{Q}7?~{nPNz`2GU?2mL(J%|6UV913-D zZav5qLr1j*4OK{rPGTN*Rd-Xu??Lz{HS-YO_f&QN?Xbk|HH#vPJoPn>`s{BHz}^11R3ZFs2Kk3^rU;GqfFt$4=&~a zom?0E@H&Z*0Vli19M2kvF0*vH8W4(uh8+}KV!sc;Q7Ta|M7b zweD-v7%|w1ptgj%BH3d3CXB^ z(tXgq_yY&tg339t3rK?cYVNZngv=c|a!l6M;o$AC^%mCD`bo6zi3{STiu3aN66G1o z&`Ihnk3$eXlvz^RN;+?+)Og2zxQX(}OqTkEOkVj2L2jBo5vhbzZeGZqMd_Oaxk>u5 zwobXZo;U=!3CXts+KNPozA`j~-83g{Yk9KD{qpGVEl=X4JjvHBPi5Lri-6S5Q*OU% zg2V{@GsaspBv*VnL^ms^t_U)nnvxsOBW(%?ezJTI5~`)XvKkbf1}rOF?{MAomsI$Xc1jR`&>xmi;`bFeda<*jQf7H+eYME0 zLrjcbH9~25IU*5R-!Cy(rD^|)i=HTqqM$H}i)i9@)Il!$KNx=yq{WgkHjafs~~rP60WqDz*__<)l8!V;3|*@h_*At#hh##$UN~BSg-*8 zuo*M^K#PxoqZ+E_phc1cIppt1d-B#Vf+I-j0LBcphe;8CsG&qZ^=7j1vY!i@Sk|}w zT)zd50dn9OxevZA{YXhI0q{_*@1PGmr3ZN>k)Yz8(dt-ryp?QE01HD-VtT;L$Ss}( z)jm?)WezbM`Yv(sT!-I{g5L_m0-)NTHFuy*;O~Ndo|annAoO)}RCEzSRUL0D<`~k9 z`+U%hIU$^a-!ms$S!h*h)?aoa<}Qx3l=!o#Z*O&~x(`^8Xfiznj@Y=^%9y)FCHam>#NS7w2ZmO)Y0x7-V4B&_P(oa_QQV=<@B3Vp!=&9=D~UC89RBS z=9TU|?Hn?p0c<`6E&(oD80&lke2g}aaHOAq zFTC+REwqW-mV2TzpR=M%H0N_$qTu+aW#w&YyYQ-Gn}eSp1g~0tg;yPIrG?u(-?=)f zeG^?5u$=X-qt^bWZSWdX-MmeCIRGAZ6)Bo81Q=qrBil70_pf#<#t)`wE|bX zXkmepGfQqc7~>)a7!zVY zZjZ7$LaB0^Kw$oRr1#>t>t)bL3*irWVFh{vVeUl>uR30@T0hO6|5Z3H*bJ5173VO9 z&}vBD3KX|kVU&XgdMC_k+D87a@bkMY2+-yGR%^|r#hTv=AP2Bl!}hqblShm5^DfNn z&~Ba%q*RU`isxFUyNmQJYA#Ud6;ecMqGweFHcs!1@8v0^1na(rMq7W2&dq9Ov2U>?(wD4%2_NnG$PO@4m$qi z5k!*;{IG$wAXQLR7D)mi>x)*i~(>0L7eWY0~x`S{Z2f-pQK#G#}Sf-EGlJ8bJ{5I zOUR!>t^{w8BJpuZkb}eE#}VTMhXqn3`=3C)Q)BI>lp@ZJg{}DFFd9mHk&wrJvx6^! zU9H`Y*dtvw7g=0@L*dPco27LW;EYk6*$2fQ4$P?VpA)w(I|<5-nhBxh`03t&Zt`5n zNw%V(^1*Hj2)!U13)weag1lI}&jj-t7-yC#@Z0BOo;kw1cK^eW{j+EZ-bi)w@z8vG z!0ZPW0GmDSLF5~(4&jY7#dLMp`rjmEM-{r>rJYDIN(vig``TlWQX|DN4f;`j7?LBW zA^B07F!P{vMHQv%H0XRt3F(iAkefkn{hbi<6T?ck_qHJI3M%7qX9_yKofZkTaL62f zJL&9$&};X69v(5@HCR<@M^E$a~IZp50|_YvT72k^TSzti{~#qSLID^iS* zp{upBk|Wp2G1kiYIdXoE+?E_)m0lKr`x)?ss?)$1A+G_sll*N;41NbCL5Cl8jr;g= zqD}q%ytM5iBvJD1Lu?g94>aV=n^DTBkPhb@HuHX26XVda-5%fSe+Uc{bUDyY+r?I1 zaLb^lldVK`7OkRYbhH=H=V1fkd0_JT{qUD=z?Q*@_9sxrNzDIHYp{LFIo*QYhUyFE zUg+Vc%zf{Qq$6WmL+uwIiWL@l8)%A10^})X%%!W z5#Xv_z)tCrUqb2M2X#{VB|WH9N7ecNNAtWio2LX*t#5F6f054CM*VW9QD8-jw7`^` zol>HoLJ<>smzxUI{ifEhpex&Px#?bgRL%yyHNq9!5QvHk@+PqG{APwX`9p zbylHAU9!<1A&d1k^JOlHeQKqjMN)^NgzcqI#7~9lG|k$1uqnAz(!{7i1!ADxi`89j zuCAim=%uR{uUUJM$4K5Js#wcQCm%TAUpW5=>p2HH`QPIvDv zDc*6!IF&I-Ev?S0m@O5;n6c}sx(dOz6fO9GOn#1K;rAkRdW_tK*RNl`bnenZ>Fk+> zb1xe?zor1gOJ4(3^!g%%uPmb4weg2q2%mgu>LCYY50t>MlP$>en*2v0;mwb(` zC?I9S8j|^@BM4>eE~c<-iMT-$LRMU1Hf@uN1zrjQKzHNwXoa$D7YCyrLn>56>qi+( zWD!eLEsumh#e)Aq`wDFqEiH_{^&Xq&i;RQvhz;mPFudv(obZQCX=UAF!OyadvVmnQ z3(ok5h~Rz!2l+@G zX^x;yYUhD(Mnzu=`bO}jBjcb+j>CU7{5XeK4W~Pzfb=#q+*=VwQixM{&f&3(Ko}jL zya-c_Suu?ZnE;=d6Wyr#h+)h+1kvF^Ac)UExq}46lV%J@ejKdbxb?F56Hv~8At@?v z2oj*klaPw6p+p~rj9d4R2Bb(Jn+X#F$7YsF98}jpneUkcD06TnX#x=wgFO&lHetdB zh{Xs9Zxq?x>7$kzzC)cN$!~vvRSTqETA$sFJADpGj1YYV8yUaV0WfQUb_)792vX;j8+6#sS$sym8uc9!50{RA2fGgnX-*#ow9 z21X&N46?I24n*h~QwQ3`w+Xx*chG)m{azJ@a;JQzDcQ0}g}QSPV9uHQAi&I4?H-VsOZ+*KpNaI0m-JCHC5U-3-C>e-S z9*8awYDx2TAj4BcvY_&{4+KF7VYc@ZQFNYaAHY~;pyHKpPXk#Z1NUGn0|M~VFlns* zh0H@8KS-;aSt#5b2I3p^tq3t?H8WAxC&J4qlnT)RBEiznadQ@8%yaL8)bA89a`VFI zJt(pv!PtGPm8m|B5kEqfFj{=b1e%C`AJ`mj4Yv>DDZ*6LY&&nlNFj_W4Oa(t0@;&3 zhTm#Sj$@J|Cpmx$3kv%i67yV8mU&#Y4aD7u1+^Ta%ne*9+H!r~yh zcNa{U`XY4o#{g^GQ9`yO4n!QGP4jH=84xw$d9L`Z+iy2kpJ6Pcpjcy#GyNDtsF=KV z%U-uu%gx4VXx$tw9y#Xae1(0VXz7jt5;{xJbcP%-Bq)y-pGB%eC+FwpTv)W}DCmfK z&IHTGS7$`=>^}{~v_2rLBtrRU?~|#l-7YI4>ws^E)ASn0^zJ&3Eya}+0;$V?U+Ya7DC1!5Bu z-V@c82zAWmq4YFv`{C4t$Rv9+rX!cJ$`z0y$qyFS%UyJY5HU0 zrG-eP#HjWzL)H~2jbHPcNZOSeVB}HwlywOo)cOnPn>%^16zD*SvilZ86A(FmLZlHB zL1g!-gZ%Bb2eR8x+eFa(vC`slV64JT9`wytxD%?8PfSKx1qV9Wzz0Afg9=ySs7PK~ z#9^!goqtB>zoWxLRIyog`ntHPAhCN9hGNv#Rg#uZV(C(l(E1TNf053w(3xU!T_jjA z=YPPkHk|~WI2~carpmwvVbz0%NigSi8tZ+7v2UX&aC{dX>o@s&g(V9e@00XBq%+9~ zqFdG%m^fyUd~W?P{V5qAGK#Gak-!@+5l0N{eFTd9Cz$tXq%Z8~B4aXGP*Pn{;-;jJ zTmJ{EBT8n7Vjz~NVp{(y!*L$KIso^UV5bRxkyyXU$TqW^^l_h9zd+~f4Et?5ze7im zvF?@XPZ9Q;M8@8Shf3=T6`KUIMOAXjX@*60Tq%yzP5)^)9sZ+uj-)se^5oy2#}h=` zK=gx*XBvv&K;7^F4dWvu!EyMA#*xrR)GiNI`2FAnD{^35$`SX^;;B4ZLIiL>o-EkH z`@@fG;8GmX|KO85+=IuxdZn;sCT&3@imnC%MJ>}V-8mu$ll9NJ zpt5vMVFb5a^$|$Mlafy2V1qOT7B}pw{SCaxvIEiN8vXxm5a1h6I9Xe)FPV#{FJNvI zy`gIoN%gI?w$w{&P{}VAqQ;<{+$AWjD-WpUMjZDTvYvRv8429P9Gt?~a|A}lT=x)# z6c4#B#T8mA5koe?DBud!>AUr9c6S@RlAT1c5-%CpPBz2{F0$b+iEDBv*#Gj(&9(Bf z+@2I)gBMUQCT)f#A+e0Ic8j!rFI;IF8M)7?E-9&WIo{ZzLQ=twzs)g}1M*vJpEQyn zLrC>r1{Y+`I9s81yo|YAzy1uVPGY2eU3XrwsP8@~M0(0%yMx;qWot`rzPoI|n1#ul zWpeF;zhHBnLxBBP;P{&>Mr-k~Hcu&7*pnA43^D&Y0tr_jy4VzLN=w9{I|zC+wg~Cj zscIVbCFzTcHg|${AvY6^qQt?(c~2Z{F-Y8CPgqKYp$4MVY9poH-H&gi9-P464ciyG zU`AOvK7&KCUtvG<$F>C=J>mxx`qas{ZPk!0^F$>BIlko3;_+d96V8@-Cd*B1t@O;e zoF+2k+aUTQB)-h~rE&s9o#ir3X?r}hWbl}VpR09@%{K^|Azuze7G3rNTWdHPuw72W@@&$ zPGn^}EwfYLZQ(0}XWXG=|EFII=I4(;>yG;-d;tBFT|$t@fzk3KO73)Uyga#`L5^8> zzkeM4K(mMol!&k$6G*cB0$r!UymGCkP^l&bqKSat_Ia@krNI+11R?3+%bFI zY<4@KpPmInV738?-48VD|Hc|FMBQr;`|Y9otB*|DMSKcu9rE4H<|^bHP}S_>nnS~l z{5=e{`{L@_D!zp%75G6qFh2dT`y*wTMbM`pQP-IR|I?LEE|pRMi?zf4xF5!GOM9Dj z*zTJODsW&$a<=S9<+C=FBBAb9B)Pq4QUaixTLRsoPOCsn*A=hWeQHZ!Fit_O!C<;E zNhTL=$b4{&tPKuSMEqWE8J)%FH3AYn<3t+No$21Q{MnZa)??G9?yc4fm|g1(r+CJ1 zL(ht{^}`s++xq+*CJ*a_f(ZK5qsWv#W!+(o_vrAhc(ssM2lKD7JRy$%DC7PT9H?Dg zc=hTlmoIy1oX5~46;kLb6aFrp|BGpGRh8f+=8_ACfM_{v|1H8`1z)J9zrq}H*#2dN z8cD3vwGyQ+qZ}|#TZaA}R`8Rc@t-5>rwNUDPXU7*gh?7inxvqkmP|}(I`H2-9^%B% zUD~<*?DYr8&2TzSM%! z$p{7d!2HaBE=q>_fY2l%az_x~qLG~82PqgTK(B?qxT6KFEOW#M-#e?LptjKn0_b^< zF*h}Pp|{mfh!^Iq6mdZZn4 zhXdYO_p4DSba(LZVIc-kvs8=$zuc zR^h2~%F$qp?06<&)j|caPr**-l=b`EqFcBBg7qI5`1jEK^_SpmKNmLuD}ByheQ~2> zu96^^x*afaEI>+_SG1a-lg^?~TQ9LxQDobtm~8_bAl@FVKc=${t9ufmLaqiexo=tj zo_W3r$Lkj+7A|A;n=o<=GdEE6STsc>Ts|_@BS9|6;8U!(1JRVYx=sE)a|WPOhBa$Wgf*o3SJS42?BX3gggZJ=JouyV1K9+66{Yzsz#km6q6jtt@_HrTu{JPw@jO;LY^4542+K=@!h@fMzjMEj+}GAoL(g zrOMk(`)Pivf#XGS8wk)q*$pZBA!ZHJE>NzMk;HpZ=pOO*EPu@mECUR>Z=M0-6bQFb zI3l1u1|qni({*sEyLayvyRfC$v{q-qS1->hEE(vKK$_X0xQ>0hOb_WER0!ERnl~Aw zgKqh)avg-$jS}rGlRju#53v|UUbQQi=k~!2z?z8S(XgJU-MwnPi>2yLTbf-$%)0P>l};KGHgH?Q3&oq7Gz&C=yFXD?j7 zDLOAk|AL%Q-MSy5OfPw^3CeTB!6#bLDf*1O2zy?KNh?armhA;&z>n&N8M$-(Uh7SM z{)%FuUgXA0XAMY92Z6?bUvN*J{%r+ActKU!O~bN`ekjfK^waK2xf%noUm)dMf{ze)g4`CdG9!Kw@(=bcevfzgwlNSyUT?0ggSFJOjL|C>7S5bM zvv8(#@zUiBrAz1WL8eO=FX1D%@oQqe+UOTjv#fVT03N1%P)9lx;u+*m89OAj+dWBZ z8%ao>vZ}Jsuz^0y=^@7h*N>O#6l7#O-qOlaU<;>ClVJaj>x4&)mzJff+Tnj>QBq8@ zEMH3C2qYi;!SJ*tNSF9ak}ZW|QqcA7htI!75kJqvX8{$3T@h5zGC&AMLVxmcsybvdR zawlW`6v~U$vh}{r3?OL+#vH+SZN*n;Mht|CcE6cuWh*{>#%YmDZ-hnr9n0mOz0NHYV{2+l3EVo2!M!#It=)!*{IQ$7+j*vg@m zvCSc8lq$)sA)Y`Kn^o_?sk5yi;O>xRfbvuhZ5xI%5-moBe>9Abz~L0z780$FS0`X9 z5w@V9b_3I?RebQNy&EGljPq=`w8zo$DJSx^7dH8Ep1mBa)(&v+!J)_JafaE-_C7gV z#4mCCTO)D`F(lm+t8)6WkOnYuu1XsAyOhW6LA_zTO8M3hEm&>t?6kl|3nZR(upj)m zP!;Ojssv&Os7ntTYcvkg@8K5dp=z|;)NI^zuXq)2nzkYZfTvAk_hfzj$o!8bnz)(Yw=?V*d> z2CFZ$Uv$o(&vQ`n=kGj7!DH_Wxl-DXwpLFEtIp)N9?qKhw30c&JXpt*4`50UVS@CLkQcA*mGt-e0#)~#|NQ;-LL!N#k793w__;I2PuJ4fKPFD_jj*qL6o|h@Wl5u zvD94+teTjdZ5B<+3htrU;_m;N2|E*UgY|v%T@-j_;5w}E*RhH zYY7S^{P!%1_^_A?R0v#38YN0NdszE!IutI1AyP<`RL)q|;)XTvY^6dra4kgi5 zS)b4%5=w%+Tvi79;&;Swh4pDp>WiG9WU#D^) zT@VBlHfb?$Muv<2yjt_t&vI6HN$>_LVhOQFfvF=S(-T&hR5+%n9=*@ZCfoIM^d;Ch z?UqHpv9+5$i~?B9d35Jx#VYBaUq?EFUsj$|-&qjNZ=@~wiL5U*R2V*X&Yzlzd}2{O`P=0=mI~sJSuVy zR7Rp&yi!$yNn6M+8~~=dA7*U_2lG&ar}`4j|MMAvN>dJ)D^2WE!hyMvj*3yhN)^sZ z3|3*W$N>%>73*`~4aee-;xLS#j!c19J0;lgFck1*vm@ZD4Tq0)`j7o^!XoZx$7p#_nj1rBW^ImXr9(P@8giY?S4HktUi zG+Bp-O{Q`FRY+t4Q_MnnG)(yeump2{^I_YhybOsS9c}J3Tz#2&QCHoOvFY~ZH&C!O z{e+e9j}WoM7PAeZtz1>;sC0T|7x1yD2@ju#;CWms;rq6MEwvJ?>r-;8=Eo#TC2*KZ zC1q3Y3#|Dd9pL}4G(g`toj;%>LQa27-*@SJkItXa`9nGs>9NS=x5x^%$bqoFLx*=N zi;GzCItuIRg@GRiz!*d?b%Xlv0 zG4b@{$%VDyUHF|#o)~maup$>>|wKjp)CnPY22 zDBTI&$6}HTMYtNqg$?YIO00Nn%I0pACO^@J8pUY-WxA0siplxXayn6G+= z8=b)JVVv@>MnObFD*0M?#C^H@ooo5b^;V@>pB^Ms>P7ifgx@oM&B`~qrVxmtkB9yK z3s(ziFjCiXKv=Sdh#)Ak1`lwhpt~Ov@1^i{v^q?NLecdri1Fee$>0l&!3PusUW?Qi zp|quQtq@Lspn*tG5_|`1wYywc*=_EK96TZ1wQ~aXC|!b&A8yeFe)1=pxs&*u!(&q$ z3QTm-V3J#a#``s7uqjLus`5sHH{3%(5f;i1;mnG}}l9seD^XeN$?9 zTZ#PQm#r-I_cLXl1rGEkQq37)Bt>f!!&xx%9ekzsowbvWh_uRKC?I=+VI&xNTSh>N zD~dfPTPcq3X2LymbUJ(CcoUDcpct6cvX$)XJ|^4wcruEI+VZ(5i;N_wlXtJ}ia z@H!KpL}WaR*qQwhli65KW{&aN`wh<7WSB4eWtsW%WthCS8_6S0hdJxnaZcjzobN<7 z-g9Or=Op?4@2z{Qs=ENsm`Q=Es;h3@=l}lS|9k)4k|pH|j%?(w2P?j%D8Hm1;_w+a zcmFMJt|+Rax)Mqszq!YO8QD!dVilES8t+hxV*1? zxT3FOIM5ehX?%$#!L{ck90nN~BjM>N2^zUSfheXGY@ z?d6})kDhi{-g`2dIfJY3bTXbwwtMnryPSQS~?vWPN`ZV zl8L5Y$-9%ud~hr}oG>2J7wu*FlCD#y4)vbOS9Kpc+;#eRZ{*Q=Q9HK$s8)gbb&SGTEE@<}zGG^mfLHE($P zma31cwYV-*wH?>cA%H=WEq~UP?x`<^ff*7 zK59hXD~)m5^$`1h1@c{GK*@l$s-%unZwX3ZX z0@^U*8!W%u@p~g6Vgs%nfUAw#CX~GiBidofF|2N4@EJy`PF9aifYwe+ip`VCc~{?- zQ%d`uyzc}}`{;Y@JK=%){>ccaxnD#Q^%hQF+`E> z2#qG=17q~G>*UdNC>qmK_(Y8l3}`x%^JJmS*oc-6m$STk&&1P4&Lev2Y+TjwStyno z9!@1g@nlBR2cj{Jm3lU+$5W%}P-;ZeqZuH*bVx@vGnyL8r0ms5kH*e~qUlgZkH(Xz zP6sRNY%~#9*_R!m5nWT`G0LSQq@5p0>C~2vP&BEA`n6>2%y3kH1y91EqnXedJWZfp zW;<95La71S?szhm7*(m^nKP-h#)>06hPn;tso_u}1*{huh$EYh(CGx~l^Dy!W9bkO zaz_YV+N;MiXz?jcKdb5CQ12Ph-{>CnNqjKLo7s=PNNDHd{RxdK%L>xYXSAdmrAIV6 zmTCfVqdU5SeHB0BRD>NrB36-~77 zW>2Wj$>^{~osdCg`F9uxYBjx#=^BQHT0#RHO`=;dux3BV5~l2(KH{D^oSM%)SKPVnwDi!{O^(TrvR8A;DWZ8W6-AYFh#)|{w5MgSsG zga$^F>^VmxVnS~(n&srQ#wkdQ8Zs0y~+lNX2fum z=CVjYn8VD}0Qh7wd?KZeCNv|n(WI*B35Lo}j`gN=;1ib8{E*=m&8SN+AsS88M7%1j z2YYVob~C%!Dx7IiI%o)t z$&4vw=ExC?+0zV@5MCsNLmk6m3}Z$F4S)rrkkZZX+3N_ydY17j!mh>?JfVTasFwK` ziO~FtL@@6XaS%P~K`Pal57Yzrz`z7H=tF(dnP?L96(b1zO!*MDFsl|xYUg%`jwa)o zIEd#sBhGkaw20UOAp0bv*cQ@K060v#*d00!%0R@O_Xfl7p!6n`+hHcc-bCW*$jDfR z<{y_kc83ln)1x{nk~tH_h+BI%8oI|+MRLeNT(n8=4n0jjDFYMt8Skhc@Hh?@hc`Kd z{s9$}_?)JRXbEqJ_|j+(CkAmEV%roTA#8W(6wbWh*r?7pB9^~>HlhyEg0JJrffN-x z2rR9G>&cAj;C|?-KFkSAv>%AwY?4{y0bs%1p~E<*LkCmIvs+ch5FChRVrP2rIesUi zW17z4hN%27##yDziD!#h8gBp;7yxi(_!N~XAYTIJ>?js!FZN3DSL`w1(X1<`?e5Ss zoJ%wMiFYysF~WivI5W9q)OLps2%gO{tOS^n#4r|*rUv@Ug8ez`2cTl_tsi3)Oh&5c zJBGiEltR02mCiX@Do6 zL7><`Ch3%JX--sC88RU88X2f;z7+6IQjO_c1+PRa?7OsF;t40nf|b11G0OK#RAV)PFY&?vFn0tH#HAD)jALqy?<8-b7rp#gUE)QC4}-8uDv=M2bT%{e1zzBGDh(nq}akMNKCrhl&8tMq@?wFX-ByypIh`-b)aaFEvDw zd>N+_D~t3teEzXIadgd6Xch+mnN;%>tA zo{QfPtrpMBt7yjspb;RgLsR3ISTeZ8gpejWh%3mHj8KGRrQ=z z^m>%N&3q>ACS1tX^S)Q8(p}0J_2r}Dy?LdS@B7{};Dyq)mqcBtoFoXvys%`V4ONJ# zFW`zTgKsBQ09S@U{>tjYT0r=tBPQc!$JT~QOz3Zzu!XNkG_Yx=1e?S05bD9^ zo2+v>HL3%DLV%HKDpL_ttdQbg<2h-Ldaa5n;|jN%F`qV{osBe9=$eU(Xa zs7N}j+BUmNirxxc(?d~98Io9h+z@mCFzM0$G)ObJrJ~+9VdvgTEUIE5n?lcicdQEK z{_xoq%JqL6%hEGm2}XERT76~T-p%37;qC42abK9!MC0D@*l?oVt&?d?CsSIxi`}=c z$os$wXnNisiO?`cB6%d}AW zGO0AmmjV}&QYc?)hz+d#ya#v|Rluu}e89@d>$Q}B4OcN3JQHKS^HqfcNWPq*ga}RR ze6T>K5HATK>%_L?OVXnQ1M%~DFKJHlKBoF;_vO85Eiu4aW%g3O44NCVsYD`aB7ZcT zumAed&NHcDt@GTggPp3TjU=^9C(~wc+1v^Bwy+y?YRN$;96MRv!Xslk*(%2?Pmtm) zEIPXv&(cf5S^VmC-DoMc1(7^c^qY4Iez*0eBH)e z-NuWD@O@>?{E{`fC2MBZ%`ItL*tj_x*o=fvO{?D?T3FVE+oKDCn$M}6Dc{!~o3B`z zt5}&`vo%+-bsFC9yQ`$_+_0q|s9|m@QgeS)?9-A7Sd1Ug~hk>@c=pPy}Mjdg@ zz+^ETqhS;W4GZydcQ@d41*Up%4dR{UN?gN$8+@SWJFY))!G`9F{vooF$f*k&vWKRc zlescx!XUzFP})R%fW#A$Ccrn4yhM7XV%>MBe~g6s)Hy=<7TKd%7f6y1;tL*Z?LQ{%_=&~dZojmE{KyOAaypr$;uu{B2L`V)5mc}tzh>=tqi^>IXXOo zex#~d)EB(02hm3kY4@|*5GbO4G&=Dl*=vl}LcL4YQL?;_k}cH8$=>k~sE=XK5mlw@ zh7GTrBgI6hEt(vI795%*Xne`s7kWh-JD1X-G_;Mf5Um9WnY}Au&ocVZ;E=x3)Co|L zZO0+szhFUWVUE(mUavI zrvmAK3rwj|)pxx*NecQ7I_<zPm-i0pBeA^q zOeQk|<|2TXiFkh)LR4DQyD32l&o=KRK?J0Yl_y4srSMRWwh;>B!|{4PLIr%zb-YS% z#fcBlhF?@ROpSgmHR)Lh)?6CDIDR{LbUOCdnX6}Tb8XxAcE7tjyX$CnFLo?k)IU7|ky#XL>hT2K0cg*)gQmtdwL?~_o z^j#gk3yOElRmk^-UoGjD>MSwu2R!2L6zb(-bvviDmyT~dNcwAG2QE+zK=T%5sz=Td zj{7-lnC{HGBA*cV@<|;|MVN?C;NA5&{gLZQ#f7n$P+kI%L)GNE=sxTEKU`OUm)c0}3nmwfDURJZu_uY)%8vWNv-2W}Z$A7P~+BNP8cfQ!}&wD_l^5rm` z>6tWfBRb7Wu=zAS@}7j2G&pglK~5uif`k*2D0IdV(+v6w$L>1y%y><4O>f|N`s+9m z>s+E#)qYe}&l%hA9G}^DeOY$R?%Arx7S^nPXT{qquAQ1&vt_nwYi`Y!>8@|qPW_!f zYRInHlB?Q!UB9vQ`k7qSK7%!$?gwXlyGnb%?wa=Aiuv}(a_x`Jw(rfZ+lz<)x_Op0 z9_QHu+G449N#$KnNmcoxQc_m_x1W_L)!Ueu@IlRiV7cCmy2Yf@3_s|e&8lh4ChMFs zVf})S=u1S-wh8xnSGodlDf5}|3D`qAITJybJV;s1l!kH+p*-VIrDATvH{k`Wmm$w` z`IgP8_dP{P-@Z{T%XU z&!xIkq*j!OZ0$YEb^1#79P4k{6JtIo6FbrU<)Dh}C{`x2>B|&v%mB+-K&Ip&-5$5b zm^?dVXr>-v9xXE0Vl5aXXW_KL3lRJp*QpS7n=FUT-B11}4k#8#zrbYZ zp7sizI-gAiojREd{S_9=m`)?lr~W#+qoL1xiPP0dt=wM1c|e*>TrOu=f{Mud(P!|Q zj%CsWZ)~&-oUsMJ$IDr3!zAVZKlq*g6P$o5>Xf?XnQgPRt&{$R+6E>#*DbqUw|%au zp@18^QDIGf=%Ql*rmU_e~y_6df*z8_ySLjLr^v+ zYHeZzz23wujy`G9wJ=$R`LVD67;Cxz|$|7<9kM5GxLb#D4f4HO-|15hnfnifuE?W zND`XG!Kbf8TNN@ z)mJ_23(CD(q#g6NmyG{JqL43(XwPU-*okF!#TZt&0T~Wl94#{aB6o;`F#!%%gw`|@ zv3+q=+wL3p6Stp+$S__?ckBinkhmFUHzk=EU4QL?J0QoB4Ewd$1XuAF^I*4mO z%USKXUbB(kvs$w!5ie zb4{(@LMa)BW?0w5v2Ij+Zx=&I%owjQ&?|M=&+s7a$IO7BU8MwSFYUd!_iOv-E7s*I zph$dauHxl84Ndb6t+|HQYo)Udn$ZSh@F0ckV#ADOmZgzop7gT4VW#hHp ztgWQm%XZC$HeX*l8`{OnS=mB4F03U)eQ!(;X;@-!8!J!9lOgRYbd!zwb7Zhgfhr>> z{(xIBXkg&hf`OBw6{e{JlO|A=6S`JiBrE}`qc;3-&ckF9oe;RF4=F;@O%ty4aub!S zKt@irf`I7xV<|v=)ive?BJhoG;pU!@^t&KW1wKa&mxThCi?AL;#m7ron!T;jKS4`c zL+LZA(F6qugbasUo4+A>#^|G@f1=C{r%U)sQ(4g>XR@%}S&{q5GlNG5)RJ)KHuO}C z7vzRcLwnH5DivmF+sG+yZ#W!oZ|w+CSv(~aw@?X^s~fkVKGoCnTt}#vxr;sB8)9DN zsbpeIcxr@NjnZ0KajmF8E9)O9JDJ9UdDP1)@m>*5_Nip@JlyPidFv0J?(QO!>4}r4 zpt>}C2t)8Bf$tuytWZi5`>JYZ<1uujp;VM=W_%JFgFfE!DaC@oN-QeE8*fy%jjKhe z13HX6_ik=)?SYh79#0FyJjYppNX3H8h-T+YPB0fK#Lfwa9Oy_$CeD`tcY*(fO)5jq zU+|-{VX6Gmd-$i0uSAw)gW(tzdK8=cDw!P88Ju2MKCWId-Tl_FtH<82eP6v+FmHpf z`&rfUtIyo2S+P*Fe7fbW4OcfzH!UjO^+PV4t6yxJ5Az-rWg&Mq#Q&1?&7!$)3Ff5D3YhcRJ5uhkCBpCrG zyn`Dq4dzpd6aAG%1K|D^4*{N8=Lk4eVEwaE# zkr=VAG?b@g^f0~Y%wE3-m5^=CR~7bgZ+oep0$?)a0|Pb3A^FlX(KPwO=&#~kzTE8Q z{1RIa3q0f+&bvnJs0Rs&*)m!+Ud#G|%qHeTsPdFFQ82JfO-k94d4EgJ-!kW4zffI& zrSWp(e06iKy7{No>n6MI1RJJa%{F#?7~Hf_RXty|HdnRw!>X1=pHj0Pl4VT;Prp7_ zz5b`wZIfLKtJY3-ef=q*l$!RBg0+(s9III8r^G7%Cu;Cutdj8{89OFi_vcLXT2nP^ ziWnr!k#C&EhfHzeCx)cxAudEIvd8Gj ziS#=}23+_OooH#SlP}Ph!UpYrKJ|wHXu8RFF!^aMJtH+!-b)pNW}2BHqj-|0No3d^ z^>N$_EfXbdpefyrhnUhK>y*AzQ9m{K9nW`qr+;s*V&k1PJ8vwVUDK5dteQMB)%(ro z7yJRz1YHVV3{IEM`9lkG&gT6Du>>ME1(2M*{}F{fc=j>_rfb-J!GmQFU_?oHhCLU& zm`&dM(ekC$%kk-a4sPpw9K-k9;{xEk2+A_Oj@|K{rZZ zD-OqDvlhlWkSqdg5r-qcu+C!6YO{I7WeG%NFs_eF{B0x+w~NVPq6^XK762@sZ6cA? zLBb$+Y)%`bj-edXlcI~vd5kjc&Tp;sUgbdy_Lx{Adh4e)tuxR^< z-9QkNHomp%$jRfIwuLu`jziW+Qkbkzp;-9}$QPaPcccXE<4q!knu?{v1<7C-_fqc6 zgPjV@ml_}9Inq<1bB1{zi*bXW7)DAb z93gbmzek_^Jvx~mmvW{mU8I_xL^^>qD5nlW8ZY1>kVc~wY5d&3@#p?^pzrl7$>Hi7 zJ@a)Pxw?+oy70o%wF^t1T&UanX_>FKe6sZO0362%gJf$rPN`Som*cpZ*@4rw=WhCc zY8ZIY?OFyw1e$8YC0-Qm!Qb5%ae6Q=xd)%`AYZV#E#(N%ZFh9WPPgr$a(rGtm=8Cs zsu0+dA#yJ~8%MAsQDUQYR#rY#5OYToD20j8Y=H@b$f}TUi9Un9w1%2wSWrB<^5`ju z3P{W?Bk>FinWhIkRNFF#it&W3mzcYVt>HX5bQR}l=>R?|_p7%ZS2%bF@roHy=D-iK z48$2xz!KHf+O?E`Wz!);6^oKsS%E@K;pi?z9zZDj8DkL$vW7ALHcP=@U_e?$hX_Rn z51?KEpA8G>RP9_I8E;y(+Za7W9nME8q?qFNzk~OSbnQSuRU85Rv?>MsCK2<-;FpKH5@SyjRg(ON4g@_I zXHzotQh@HeF2)6>(NBtDW&8u&iqWPd&(mm=moG-U#x~j}gZH6-IUi&iRB;gYByQ{O zG)@>~kCTqaG}@)tTjVjt(E*LHdcpA?)f|WF9*3TP&;=fjnd_Oytkf~{G;}8XZS$D* zL|;UuWv3s37YSF0aK}z^U8Q4d7~`P_U33 zgs+9xv#nidL^{CY#5f26g_AD}EtUd+vQ&UadK2i7kE$q`I4L@?UJUdngnN+>vx9R+ z$>7*zK2&w)qboE%!WXs3;nLEmVgg|Jg_79`QrZ0+Xbtw!_`s-+={D=wR=eq*n%9eN zGqRA2x-c%3N4&t*&EhKC5wdv6LREJE#Y`xBGy^`^k`uL18nIjtNHc;Mc{JyyXLi-D$-`4SEsTQl! zVe!9WC%_9nwM;Fa@TnDQ0CI2%lXS^@rQm>+TY^9DcU>1sCrXE;`P)pUE;04(7yRI` zYs`1zF5Sx-{?M?J3f&)HrNz0zJjq!FhIrAX_(Vc4&qSAKE$&^*n#X>5}+Y$qGamtR?dX9 zl5w4u91x74Ei@Uzo~92YPL6#RH={-IQNKo|P$&l(%SrD}*&4=XSk05v+hX}a8i%Ml z1cr`+l<8xb*y891tV@C5803^dfRY(qjhZs9(SQoG+ev}OK#~@dAe9|D?o0|k1x!hg zJW(f+xn)GY4Buy47LJDYUL0U?q?kEOVsbp}z$qL1#+Q<{7iwh)?JfCo5mL5-9r#8Q z)a`4{+3J9}KH+0P!AXeBVl2O9o^V?vjrteqU9h|4ZQH>>(ZCrH{b*Vg;TJpSTO5P^ zr5Rrn_tE4cQH9Mds8LO=7DKlZZwMfv<_?VNm`?z9Cn3m^wF9ueQh%4>VzNb! zOt@HC#cn4y0mj5g5I4!k2Bz}S5fiL?M#;tP*{-L1upV;gufH|(-)>y8;j>{NLm+~1 z=rq%17^rUVxVx3dZRNe>dZWLATx?!o9y}mp(%r{#lHnZS)hL|9Y@J~WYX?N4E0!=xlEsh^6C_xnL!jnMmUY9&bct4*t1?OvUP5@^P0`LOW-5xc!c_TwxldoO!4h(tKD z`#&Ka1p9^*^@L^fzJ4-~>ZBIWdnp47|6}w{58>InZk;%=e7PBfjz*e3;FOrw=gfP_ z=#sC%NJfnB;fYR>gUCS!5VqIr#MkR&W`=Jyr(^mRdPJ(1ydSz@9mlNq={!2GuRORsQ<^8*ar6R(kOl%Cy0NCT}q&4a^Ic$6_bbW z1ZpnzT}*Z=cFpNO zYyN)Q2W>aDe6VqT>*3ti!#6L?Zaw{(;;t#5d*S6z1$5l?5SZLm+%LMyCrcLn%8J$3 zs;?iuc{EquGg-0_d~~YiO89a(8+>%;FizJ7aGLZkz-xgD-8G}KC_?xtTzc&vzP7M( z{q2={cEgctE%O_8=QixV@!0H!BiW~)$;JlfW3T37ug-1InI9$s#nG7g!1`QZ{k7ij z^}XA7vmqN;|6$;XkJq+eYx!RI-SGT|y}1p0Z)9fIc29Ow>bbz`h2XO3?p!c5bNHJ2 zcF&y++plN7|Jnzy&F?st+i`4m!;@3qE0vck=c-zNR#EfKbMWVS?dogSF3c_6OR?&* zz?pM%fzF@pIRI(-wTrLK2itSO_UjGVVEb%vXV#y&<^9XbpHyZMB{iJBRXO`a2G5w* z53o&kuyn?@pc26jQ~!O0n+Lmwn!;i_sJda!#VYw1urfXrdAD}z)x6-OGyxXD2x)|svJ41R-&6Vw`Wp+ZxRutbd` zOx#!xTWac<+(N5u!*`XKOlCNIj;%ft60jS#)w8j8Y9e3 zt|*+Tr*L|L6cOy32rfCJqkU6~nE@yH+fh9kqI!5eICrR!p8SSu01r+<*|0 z6JT5bV{>3EqxDj~H*rO@=mcR@5MEsH^*<-uVaeJUd?pqnnxDZP7g|J4OjIMe1`YrP+2{)(>RX zIo@+vP~jwh!xaV+-1@c5ISrZ!bKcPSA`v-5q2q;jq}532AR~Nf7F&+EF@YW$gDo@u z-LluR7)Y^T&B+_~!D}(dN%31T*K&PG=Hn_*i48pqCMFNM$nlGC6{!;Gu`nomqyHn& zl)-OZrQ-+t^>5>s{w+FPrPCkN={t0K8YehpFm@}1`+S{M5|PDRr?lUo6LC($gTrLQ z{*+!3Mbjx|k@p4_Ds=5E&wokG)~k4m$ijU})zYcVmDeu6HZwjK-1_st-d_Zorh8|0 z%mp?sEMNUrgcr(3DzV^rzeMmK7hIGZQc0D(nG{Z61`h%*#pR^( zhl*O}?wkUOO@H(}ED;i1v3?ir9sy~b7z0Mj4ee#r;tYxuv`8`2=fFkW*l)tV6zM%U zh(i4)FT#?Efh8Py@E$JnI9>&A@gbmt@Pc zs1Icq%MP*)waIw!rDx6chfIwMTh*MYRSHJaFxWb^H*^eK1?tjCgumR;6FES!TZ zW#4fAk?rS!4yDcnhsZ;lbPcY-!e_BnbSAT4lg~ffya9Xzn*wloV=ET3x0Z$$Ga<4& z_{I&-aJWiga}mMJ!8m;{Wt#c1sZ0~S0Nz(lMsO1;Gyjd%X?lMgI2!Je zMf@1x0|P<&aN^Aiz|YpD4)k6(hfcHs1H^W?5wn}XneT)XrTIb94cCc_UV(5Upxk5) zowaJlh8@qCGhIM?5pY|kic|IvJ)*)(3y<|OBAoZ2Y56iaSkSt#;L{o4e7Me#!lvEF z6e^|~g=((sHCR7$Yxkew*?1XOc%Zaj68$}kXHa(RR%#Jt zHMNSg4&6fQ&`ny0Zqhn*Umv)k<$~Rl-jC|{Pd$ITe*a9%{MxO#wOg-0HoJEJO*Q-M z3-ixvxo5Sxx&ef4`KzbB6mI&Vzj}yc^?6Ou{TF*8^y_Y$_J-fhkQcFJ~v^MYz;9Di4Ex+PcNSlIA!W z+?%Xy>`V%t;ouCoRTgU;v$)(CVoQN!sVhK0jaXJhta19tc@*i*8xA?pXStvq=(1Uw z;Ki`ay+IlO_t57eLH0hxjd(1>ro%%HoO;wGY|Nxeyq?Mo=(4SXxH$qFT77RYBL>h= z;20lURdV+wb6uFKa$hP;2&*fWWa>CAb!?S|nm%lDCq&dTSuk@FKFo#C9{~BW*n?=^ z7atx>k7Mn(Xks7_x8cmrdYLY0bFApZj-zRI_6G zbhf&g2yA4oW^FbYnNi;vdVA=d)Y~cixB(S-Q)`^SK_ouw2s$WoEh`&&KqdbSDZ{9BxVzP66-YebqVo9{tsD`8ioDven zEWkf)wksI4MHF*C`5K!?-r9oc+Ws0 zHuV8!@kLnM-tsR_qtLr-8y3?EfPy@J7eeEgB1(nL>qU$#IW82CBjW1hND7j~h8|p= z9CM~~_)SR;*QFay*StEQfgI8e0ot@aad?L~40R3ykGQeQvbkw7tXd$!QI86q^|uaY zXDUz%W;?`i0agtnYSMEU7I{Win3|MP8KNj$LM!1-)Zd~f-^WQ18o$wYUH>7yVibze z6S99ZgSU;SutfJHDnG(Ei@Sp6C26i!-CCnBS7x2%>X*G0v; zY*Juz3ad3&P=)@~v4&1Tw-?-*TG0}x)zaQGULsHT&{C1oysA>*)7NB{i|3X+3D^n_ zSby0XY=WGLYqA3MZYoHOpBEqmXDEn<9V&DDhDI7Ywr4`dLu&C_ac^F!LJ*(5w8%R| zVK`hf-L?>geG@*%Hc25^4K?C?%o?^ua`M7nT4 zs9xz`BexA^IpS_;lejalNEtSNi90=j=MGx~^|+9kd-ujX2;fX>In^Lu>n(0a8}Ef>M>=~5bt2h|RFJN10XQnthUp@ZVRc{S&H zrdvE!gK(+;j&m79heay$I&{>$7k46GY$4@XSEqcEv?-KRy^o1p&Fh7d%n9*pLVBhc zt3^s!La2o^T=$4?%`19chHcnJ(Wyo!&F{pWT4jzR@{&&br~w%z3yxDtvyvfn!j8;7 zaW~*@!P-|Y<$2}J|8Y(^?|EK1=W4Hxegj}5*`Hp$U@uG4am8d*tmZAKzu8hgQngfQ z2(aqWtO5XMg5>9hZK>FbMbd@fd>IcWbXcq)&f^PXm25^~wp>}k*J6TcqrhM#Qwwty zS(=!(!r;+3+sCNJBF`+24Y>5IboFW!>NJ7YDNxCiJ zd4uJOU|}?tPGJLQ^G-$tcnTYYvCY3|<3mxXtgI=5klv=q%O9okoyaWdz@(ef62v&R zrUu`3aU?z>O>~r>AxFs|0a>gaQiRZEsx9qOn_UTMR)wS{DT)yfE}AC|AR%T&l*Nko z&bCB0R1`iSGUag0B}ZWigR>9Thl1f0I%fbJSspDT*hRaC%Hl|3VMPM4nVWhq0h#v2 z>}uje)2b$FZ|b_qre?aN+sdV~jS0b2*QOtpk@4@;q9&_q>q=AYDE3l?#8*^nDP>DF zrcgT+*wbVohJL^@4)<0wnk#1aiWzSVV+g=5*GgfqAPaz^1PFtC><2~@kA+fzHT@j+ zQSD%zZ!Py>>m6wr;UvXe&I^`4!#qU2qGatbUi10OBcW(I(2O?_hH2)f93i2>5P>~J z9kzWn_D7|>DR9SOcT^@Mjy?tARp2DgQI{3ZSk(~m!6a}Ia2v-cvUBBp4(rXxQ)iMZ z`Svi^@_3T=AqA4w^fmOB1lS&Ufk){FrO$h?e?f2F9jCRLUZSVONK_TxmrL_~7mE!8^g4sqJt6-kquja>>rF9+|6pb)kOwl@~9+IA7n9tM8aR z{IS1s-rt_{x6k=Eesu8Yk9YrY_bqMiVDC4NetTekRcCHh=k=27qaQ4pUA1qnZhyA# z<@vrdxxO=#WeYXS<{CF#+n#IOdb{!1pT%!_e_Zy%vfTD#+3F`r^UyWbo(* z=xp=8TP@iaBJ(fAb1%f_mJV^_aqXq^7tc@o=Ynf!o%p7=o95T-$gSCNOa06EzomMV z|EN6sQZzgG%KYG~xxrWG2hZjP&t})`m|b%YUhKy&9-j-W!d7o@HC=6*U%oN7d?Vc9 zk9{0mI_wNRhT=UKwb$_w?2dihBk4|-e^EkGK zyE5^I6SSIg;A&ug*_Pb0E!SJ-mhJd>_4=92I}>kD%(w2!weGs{$j!RB)rS{0c20MH z`}ktH($e2l!CW*S6fQZGnG$;3t9kC!Wte@%&`>ou+NqyJwqrT|7Z8nRXeAHNO?P z8X-^rJ1Zz?9p%n9@5(jry0Pn~I=kXYyg5#bs9-;*tbfI?K3}dZZ<}l1lU=$O0SeX2 ze^Iw=(TD4=)1>hDadTJa@%_rb-yb|t?fDN?E}U&^AkkzcJaG3%I6YW7G=Z&Rq}^UR z-qVU2|HSpDC8}q_9f$gXyStO^0PC$GKHPYwE3VNu#hgwt$BEif z9~3Xr!I~jElI%i`V`OIx9sH{KVz|shz70KM=+Lp0NIwa1u+Q>wv?N1w44({p=36$K zT~1_V19||P+*#I=VkRuCf;foaZqYs5I8XJrI4X#j;KnH@ar_2W3X?l^utM+&Qe<@y za*CA&bX*?GaOC<8EZhZSlh&f}PO+BruvtxE;tp6RQNCh0dOnhl(q8jvVR-N{=;x+y z#$NmjAT)4aWIVva-tgPxl9RA8*tV@}k?^1%^mq5~}wKO@&)O3~c?_ei#0%;U4Dl02lle ziq}`-lrM=u$&(nM6AP|SZLWjkklW+Z)451*Ez@eS9l@}PW*xQd8n>w7&S(jh6h3`V{m!I3bVQX5`o(L`x^3la0IHk1h^BlOIY7zSql6&-X1_ez7y+<*+5%3yQAmsJsS*{ zKj=yMN`eHG3|or@-%U{ zM^2t*4lis=4?=QmA85`iC(O`Mi9yAp6^xGt*qr!cFwWOBGum#0b>mqZ>eGqJvBy)W&Mh|Zk%b7?m)`|2!tPHo$iTXKSk)?Hllh5NqQAS3lJd%8bFYo|b{)dwoc1%&`D^nVSnyODtc@S6j8zu2|6InHf8(^`fRkngk^Cmk|88 zwS@B&qD!yTCsYfmxxnE z6iaF_2?LpB@n_O6lMs?G!Gr~qAr}7qgq~qR*ksJ$Lb={EAAlSxmnae2)moo@N^rXZ zUqkkw{XW>43g6W?h20ywvy|Z3nX#S3Wo==rxW;c zbZRl6q%FP3bU+eDKJ^_AVFqq}CP-&61^YgO zQ!+M@$8Q0;B(xgMV51R>@e#8XkdL@PEE_kPPK=rOi81;_u^2|EHtHb96^kz+DkiP} zH}s`#{DZJs!O$b>nigBP7d#FU1dt;LNLr=RuzJ3JbFO~#eEp-j`bU3Szhm;yops^) zb&u!PJ$|!3x9-R-S8m<0YztK2v>?s&qqhTVKJ_RKI~lYv{fJF&C3zGDD0k1`bVWG> z-ua)pXfqzCMW#*gxa7VBC3)P~nVoImgAGLB(C)(Wo31x2aOe33-WQ$#Lv;1v_@v?w%|qI}8M+?=;N? z_r2Y8wQjn4y6bxN^}&DEH07Gwa@jl6bUU~&>)-dcv%!5#%`_bdmweY3F3Ec^?lCK~ zNP2qK*?gBr=A?#4g@-eXNe$;SjyVZYkFW!dK`MMu?wlhWXV$8;%m5}(8%^Leeh zsoV=*bLYkk86w#MTZRWK^?E0~IFnPOwXH@Pyeau(=|uS+$PXP8?ytJl(hKg_+~-`< zoykAmdK}}=<*-i1ZD@KYQw>BRxpZRgk&-l}hnbr36Y9xPvRf7ynOnyju^Korh9SAw zgkgaQwT=h)J4Vx5$17U-R@q}(CX5|Fu;K*v$Bqm~NBF*|WrxnkG-l~ws-?U?5+T+g z5+ML%vAfu|9=;y0X`^lhZX|Jui+gCfVZ@)exY{hmA#kuLVCw0(m8D6aWGfAIUCsgt2@DEpD8|` zVBh6^^R*jtwHshRt8Jw8-`GF1C0Dy{CYq~l!^4_&Fq~$WV<{{cFguUlYPr>LYa;vN zSLR>TaxZGx7Y4EeuV$C(vsGy<;8k1sQEeRzW=rb-`(i0d_;s30+h1L=O8LiS2YjA) zx|SbMJU>!=xVPaJ8XyMl#Kl{sEXY#^Abb@}$@q6AQ8*DD3DHiyL(`m3Q@9|tI+3+x1Kciz~qr29B?QD;xO2r zmn@q#SXmC9dc_jU|3EK{SMmWCmFc9>B^U;BuTp|*vumH6tvsIfA7|*g-Sg{mu}SAh z2LAhQ{CRqM4jXIx>BKP-FG^RPT(VI%Oo@bGEA3kb^|reY_u|{bAXGhvk<;N%NZH%I zRacN63+Ry}0W4eq?LZ{*>S#0}06h}HIsuW0P6)@`3k$visZ>H=g@<_$Y&_Tu3+rr= zsvdJ7?@o^7OOt9;hnY$L2b89SucWT;r5^#>pD7;Wk_`80AirdN1D!}|rEjGZ)BK*N zyKy>A(&c^OtNYEBVT+K`n{5V%KQ_r*RC)Ajx=zw3np?vCo z+~xBxKH*lHI~VGfEi7C4Y1u9xw%n_mI)ym&=X1f;i#|NS9zMaE#ZtQSW4FFVN(5_q zdF9_!(A~I8IqB+kEi7&Nv~;O&`$tvvQ?cnsW@=~DYlp6H{r>I`cHbENi-{jhWE-B$ zRUKdSAq{e@u3x0oET6j-bhlM0sabUUd|j>?5AX!8cRljaVx@6=%r%`}#M@)8nGB^m zW~MskGE(_{<Hdr^Mn<^%w64y#YTC1?;Br&FOP5bOmig9BpITIKx$eD7 zm%EQt_$u!<*7@o_?Z9dJ&`k48W~M#Y)IlHC-7KXK@op)KpFTaa?V5LHS8nB|MTH(@ zd!M_@Za&>w<2&e@?p;*qdcFHDyMESI?rVeyCHt)L3-lS%7=1=})c1EQ=x&e520@9R zmhJS7x*pj5l*q`7-3BPLCG7UR97zTX>wVo1&W;ip*|A#=A?%jt%x-&KO6}6SzGmN= z&yKlTedV7$?ON$O?OHtG0wCP=J>l{-Ek5T~sv8z+>+hDX^Yy#_rpHy{tHW;GC6$Zr zfbS*O6o#9wG?vCA^djAs`(mzXANhiOrqI2Sf^H#^lvIA|F7x$@r*ym6Q0dz^)4ZtQ zLL-jLVkqEio&nI{at$Me%iSiw@3?ERjsc#>w|_dbsNizV`x#vfkiy+!U74?bI(=8c zWwC+PsML4DHQn-=LRVIs&)sy(Ib4PXL%NGF@D-Z9rL?g{BHquo4$hZgfjzK|44Xj@ zKRP@zrUxl4!%{)3IzBcq$Q`waN7hRzuX^iy_)vwJty1^XJKKs<(8&^tPje3{!nAq? zornX>S6g%8IV_=<%rz`XcSI4m!xzaE%-@VL6FL!Irb%V!hAnHwn@uch-oJ+<@qPMA zG>H&Y`VdZw9+%5?w?uIT|C3Vwb7dL+|5B;>PfFmwD0_0so}Vk>zfvNzO60GU%)FBM bP|18=G7xZ`a7`Y#boAoUzfsr^R?Pnb?dzCD literal 25138 zcmc(HdvH|OndiOzQmfUdCAHoUxJUx25i~aD9Uc}4@i3MhBy1zwbQ;~3)S@5qy)A?~ z682_1vm%3om}G-UoFF`QS*)2g-kREwt<(%jWh!`Ps?tr%X}Jd{R3`iI{3(DIPY!Y1KhlkMS=7vf1Tq&n zkrVk4*RTBY{XFhXAye4gZ|14Y9I}M1{Z^K@glyrW{-UtG-ySaRFJ@)dkRx2uU&8XX zkTblje_6P+zclRXcZJLP%UF3)s61TJUlDfqyTg_JmEo%Xs&I9Gb-1R#CS2QJ8?NiG zWA*Hz`fx*kL%6ZOG2GPO6kguHoRt@cR)mFqf#n^cmEq?8W}Y)~`#G`XRZeuiV^ZFz zzh#2=Ec=*ldOUo_^0YrTj9=@~NH7}lm@~zFoxM+N=<4k2%oGg;V!naFp^TFJMIss3gg+co3#o<2p0RcI_U`NJ&6IWR>+U>yxX-t@d%v&i;NHI6k|*{Z z>&@5>Ked1Vz9SjSkdk|1@dQKxjsU0MB=Y@cu~jq&ECFl4HfX9tHPIrP z1}&oXRcn8dxJtCW%Jm56PkgWZ+j&M7)y<@%SI zxIn2^UtE^e)|K<5T`YZ-*Xs3`i5}5~@yf+A^ih#hbB$P@RnskYh!v<=nNz1tbZ6D6 z%6V3qHHT`k3jNn$-qq-%R$MFA;I~e!MQMGYL9ENF+lc%6totV1H)P!}7aQ5TH@#}< zU!mrgBQIdhR%F#%DGKPlSzL*iZJ z?1wtjTM7i5R6-Cdpx*&00C4o`oe<&(?VXW{wL)|>1~m1DJllmnDveEy2H0Gvw_{Ho zeo7h9D|7`0{bQjRn|2Q{Qkz{YCIwH7#R6G#9F+oMa3F?(X$!y2h$vzLkvs*6W&Z;U>0ax|7bPlm*!(O@JdDBTLd z!K|gFB@KwepcD-YYrJSmYn|2G;E+I@qYkWYcQi5>92%4SwA9pHZYM+(^1H#Le4`+w z*_~z0jBPMIH%5-03P@5=49G$|Ej}jjlwLxxMf>@%^$G{Di34b9>;dSPpTU9pOx^A$ z1F_+#n6=kFACXI+uU(FgNdtkM1R84vtU$mY-r4OB$+%my=H$2^Q0y_ln4v@{=vUyL z=BC1jfblSb@m{`2V7whXV><(RywZuN9N2y$8V&92lg0ubVcjl)2F>mJU?dm|`a{7u z*26EL3w7`P5m8|D6>v-oVt_ntC>1GNCI$wB)I~G`G^EMW#kf(s!ii*MKGXr_cDA+s z3(aIW;LlzSmEp*~(Fq{fz_7yg+8;9b5r0s|3iJV~_em0=K@y(t;0b%_B`mGh2O~oa#Ine$LgJ4EVq;Py3#&pHwf(b4%_Xfgh&C;@hdmi9+nS6` zVgn4ARnAX8B#+YlgJ&SeUF2dAD3ohNmFu*7`qfzbB{*d^7vxaWuHCU`#W{COzNV5%6Ka-d&tfF;uxpZkM}3o#2CdBa)!t(Y zGa6|!%C(%)TJne{Na2vCry!A=xJk3|3?m!!;6^I7*2eEhvys^?RKM?a;G?{v0$rpS`Fh@v}o%jTBa;m4~U9r@5Z{UH)?6QNq*9# z$@@GvL96$u_RRRrujR4%p}{Ef57EzTqD-WA2_8_T&Kz-vN)+(v-lBNcLi^R?$p~b#Y1}FwPXI ztUY5Pc_w2fC99aSOp3@9>(W7{NM-1mB9KiQJ!7Fa&6u&W87q?DXvP_oeZylX6yB1l z$tFZ7_Nomffkt+)?P2PWMEIYv$zy|q!P6NFNN`vp#LSe82V=uNt(-V|#>$wf$BJr! z&>(vWsRMOcrxF2|OcaBy-S0N8AC86t>&IUjS}z7-5FYWi11GV7>$9e|UZKqE*?hdC z6B)Y?s#GxM^TkV_Bw^gE-EXJf|2L9XxL=oX^)2bTt;xErsk-gcmUE?xbxrK2st!Mn z#hM2EI2S8x@l(9$uECF8t5QeLO0_CEHSd(UXZC&FkuDRGWkRZ~dD?o<#yQKW9 z?L$}9>#eh6-|k4)uSwRgS@3oy>pSl>3UgK8ej?rYShDf4>-$oT+i&cf**v{(v7|gv zwK`d{deK>S{@~ezGlAK@51h?+ad+tKp_#E+@dIbe11tcS!xCu0iKIRtigJ;N9yfwq z{zYg$XacD=zsY}%?*&lDd-g&IRG4s^3Yl%f;8?h;soi)i(q>EmP{rg+{4&upV^c&*nV1X#iL`}kVapY9)Yx+6{y3h> z^+;af?z$SLW9KK&PR@4Ebthe$<_D1^?3<)*cs!u9LEI4QVJxy8rT;pINuK3KbY_QI z>J`&j-nYT#zGKozENbg4POA$IU4P!io#zKkL)?Jr#bq1^Mxir3gHc>CopV`GV~Q8e zqGiZ*60E?ecb3z}5-s%PG%r2Eb1|Jk8qY^CLjI+(MJKhZL_3#OW11V}VMDNswk}-= z&_)_GYq?&`i)V8i=R8I6j=f|d(PYh0DS8TeiKx>`5KWjS0f0Mt+^V~Uyg`431!aUAj<$JDj zFg}^Qk7W4MjJ(LyjUc?DKuYnBp+F>XdQ{pKU!_ooY?|lY5sCtE8WPM2M5D~T z=qyh=8}SmrgGu-tSfHaXBQ~v;kfE!wt!Pv=}1{J+E%}bH~z-)v`W?q1*rL?05IW?8y_B}A~G4w>uy<>7L8iw&fC3?An zrK%DO?PSD2Z7~Rn;GfXwBNSf&njG1P^l=zb`1t?eBG#yOz~rSKk5i)MXJ{r7D6?Xa z9cD|WUL*hkr;wF)$`Mp$tU*X;vP6VFV`0XbV#a8deme9PN2r1ws|QQ{KukJHxBSTr ze@Z6wqLpG7BdXFy6vZ8E6}%+nk@O&wzXP3u3YtY-4zIJtmIVEz8n%)+2)0c=7ek0ocMO+N(7l{%i`Kc z-aT~n(6z%0^+&EAzS1?xY3sbs6RAtIMX%=~6 zOwz+QlQI=yDxM<0vs zMs&)ojY=*SmChHSe-CM4q81Zok2U_jj@*AJ2~;eEdUs1{p9-3J^p9jT#m)^C`9wf9 zO6%PaT7NzwD6PahDa43T8P75%*DxhcA}XQKo>i11`k@f0G9PSJZJCD&%Jq?&2jX`60^p3JT=EMZy$4C{#mVZ%UW1 zN|vvhwn0alIr`PzcUr%2V}Gi(E9qPbHSEx=<@Msl+Q!R{OODH~ORjlavUWYn4=%2M zEWN%fxxVXW-%t8~*#8sX4}GciM-$GYc-S-Bce(#k|LeZRrj?XQH?2=Lq19_Wzp57I zYEspn>HUkX9rJyOoH1_KZd(u@xn7eJwy_bKTB)zaW}-C@A!@hn%a>{?BoZ+q1Fvr` zktry$7&=N!2lqkUxC4<0PTrWVtpJQcZ1OJ+(MNP7H)<$^3p$KS3>X0^(1#&*6%sl$ z5p2pdX_6Z!dAZKeNkpJ`p;x7#w8o+!%AMd2?;;vM}I}P$#fqi_C{d zmZ7!6DSv3KW;+(hX8}zmD#g|cL2tk-Fq@THq`bh+EGw2Ww$gw+Rj@<*p>KzRCj-J$ zJw4B@75V}ZIVv6L69TaTFJz6-gdmTN(g1>?z0;^|m zK=E+|Ve5w810S}6`)dIoSwx`(w;3A{6%GT$2WZ|2k6G3AK?BGdRg7N@PF@89a1GBh zHNL`Lka!ciBs)*x$KOOB@;DMu13OpIINNpk(4|Ay+}FhUl2rM|+vUf8Pyht| zq_!PQZ+kkq?de3{vE;U6)3!U6%OR7*FU7BIcz4^?ZK=vl)5RZ_)m?h#PDR7)5TuKd zDgVeBqgY6gcKt8Ny;xnYUY-=&J=$N>i##(id5PW9cfUYUYcCbbeaN)G@|Q7 zS_{Ko-^IPSf*BMHgyx|Gsl7l4D4XO9=}E?z=T=&1UWLY`UCg_*b{H3gf64?@H5pv> zTnY6BgONFYO2;iYPzw5ygi*f)V=mk{uq#Cc3Gf&V15(MPr+D^6LKB38tOe_0z7^&^ zS52$r`(cnz_9T6R-P_vPL?%)P}X|S%2lj}pj%O`)L)$}U;VT4 z_UX>W<;~NbuRR6OtMJ@)xu;7M=*!+mopJm-%;2-3FGe`Xu1WsWnTX^z3{!?|#`I8< z40U(~TosUz)X(E^-YB}JtBj~!rNwm46`|c@C^);*Nxl)useaI$;19p zWSRfIP2HO|0x;}6NQd63$F6_{84e|bd@*rm6_qiwyM?3-*lfr|$z&5|v(e;{2ud|| zh$_B|d$|iq{&wFfshSyj)BI-N?3WixI_|98dZQ+_vNP#iKD~dY?;FP!?M_lg&b!XK zW{Vc=!lJRC`ktUy3;yW-!3#(}dp{q7)%xzB6ZIm4?=_ujVD(M$xAcW#ep|M94CT}~ z0TRlV3Zk&^A|MJ(NmG4$hoj?yUYdong8xJm95ioJsMmb$d2cillcF#_D-VLPiL6aD zW$aq-5^1tPS3);+^JqL3fu&I@rYbo|EWLyhZ40UTf20~Dnqmvfaxm8AJ|90DKmYRC zm*?#_+NNJlx%MRNdzR9C!a%l5&mr^Axl0=DCI1|DsDu3Cy0n2+I<-M$7_f(sq6f4Q z_b@vEZ@{y2)`h(7S9u21GBG4oyq1yG)`F`14esSeB(SO5od)ePp!EZLow5J~n%YGG zMy%t(n^*=c0RPI0nD&fp-?TXF8p&>@mVu%0xpE$)Rb7rPD34l5*BdFotVG>}2$La6 zz5mpHZ11RS8`5(H`q3@y6m2Kkms-xJtYYz`Rdl>=nzE70yRe~e(gq`-!ENPpUzUm^2s6?KP_Sf z{Yu{0O0BoTEk_EBrms`MmGnGtDm6Z& zr6;c$ZYTL`m0H_gg1iETWJd0*w0VKtSNrEdwH!o!=~9DRH2nku)~Cg`P>ZFUK=I}= ziv$d;6l^ysJbmym(#WVcBKjrCuLTaORaK{cFzg>opU`qj5sxf7(sGSpHRp_&U_G!5RK&q zPO<=~+-a&!5WB!k$WLprleuPOy;g2xEwWD7E_8|_i&yi9gp+}Z@u&o9uRB{u5pSw0 zn8qxlQf_BiZ>u=)=#AQP?Bs2N4DUPbcre0NONz7i!NvnQS?#cq_lsxSWS+?F*=$v9h3wxRYeAoofGa7TvLvJsCnJ zT#4P7t%aZhp&N+c&seB|(gmFMQAF*_QVS|xH7OQ7hY_}-c{fSUA`gF*0)aCYlFlDR4=qQ_t z)JtU*RF8%k8&x7(6x#%GeZ?+WL>27Xu>^@;SbCK;QkTOw5DI!!KZ!mwWrST+&jued zs$^^f%mJkI`ZAsra+c7d$Cu@7Cz&ec83+@QE(Ok6F}>?fRl{`m9cRV)p0hoPrl%55 zKXdDJ;@M+~hW-WT^S`R7y4Zf9efH4{9qEeI$%@r;r&1N(+Z9LuvgLd2@3r4}^u3Ps zhVJBs?weDo4M#uW_=;!wh3Ajo*J0v;nL)<`j(-6_GHrt**{}jO&+_%KkGc-bv|jXH z@FrXb=DLy04gu~c){bQjfLaIRQ%)y{S$UBX=Vym+PO&YI2FW8XXT-kJ24L&+_N zQfr==v0N;@P`Xgo_F+lIH^yPHIdkdE{M16tPKtxk2b>#UaIXJw#~wt1oH=_Y?eZjD zp6k^KmnY@gny|-iS$c}7C|fGU64Wp$_Bt{-uAPF$5<^0hT`aH= zM}4Lc90#e)Itiz>#%;mRErkp+Mz|hiRm56|a9=C#2hoOH5kVu1*Kr_|cR|*K0w>yE zH%(g9h#iN2s1Qe21KzdhDsn>-4+us^p^K?p(+Fdg0z-&fz)=-gE-4<1Gzlf1l}P%h z^8B3GJtF8iLl7Iun~GQ1s~AcVlum~mSZq9T8-Zd=G#;GG&$2mo&VV}a&L5Z=Y;!uy zi4`owz46jm1Vt<;JV;bPoD-`=YpJc28XbefA0dRwixV*CqMy%{7IcQ+o^^2JQ&~SP zramy)LO^2&5c!w2RsSq3SqGXW3^@b*QNTlsfh^NN$EQA#CIPcU-?~KC_jgNQ#+`JA zl0T-T8;QrJ&|~5(naV8I?A1G;p+{!KOEPAJ2Fxm>(`hgqWgjG>)e_Z~35y9>HH!@m zta*@FzxSwCKN8rC>$$R;nb^fM7tYMZ7hD@Ya_;=l**M!bw`IZEakp;8<@!tYbKR-B zj@xw`=H+SI^r^E5CKS|+7Icb0ak#cg1l%7 z@>2-tfV_@J`4_ZAGyY7_6cHK5G)0S{MN?oeDPyATnxQ43Or;pS9xg+AdEO=Jdd8#$ zW7BHoUDt{=$~DD<{*a@zfKTX}L(@4x-}pkV;~cYK!5Yv*O5(rMm5^s}xP#8vfdY{H z5h7aUSAz`c>SJh;F{59EaL}>CkID8xQ6XXf zX~bxRGsXItke`^p^^Y=Z11m^0l1RNbq#{KSB6ZPYRj}q;^qk?zYm}pIjCfqd z+xktE#fugB1w&G#w>*t9$SxI}yKc61W;sa_U78fpMN&i;NfBL;A_7TQ*RtkgjP@)*u!Hak{SBv`c`5kgy9&O>cx#46lBT%x9b42=79C*fd4_ zmSGm*pkXCqAzB5Xy%2n>I0`F{XccV=jsjoc$pk*=_99G6y~H97V}QSQ!e9IrtG^EQ zw}kx`Zm;`6uQ}f#xfT~NXj%M+Iza8g5z}D=37yPAApoc1I@BD8pNd8QKM+A&{VrUNM-vnizUcC?@Ft()fP6c8C7N*gZa|dv-Q0}i*5*X-@ubT)ZTZku3xR#H zyoS6)v)!rk7TnH^Uswl6)6C$7M(xL4Gi$%FYq7Ha;-L$NW+zgWo?p9)$~x|GMa3OU z@kzJj-o;FRKYS|8O$zZ2(M(VdTm;V&ZLeEYK3G7`&BC!F(XQbb!!qJt1zb%A_bNOe zgKshJV>mtbF?YP?p3qThDVzEH2%>Zukp78$!eCI+EY=GQ!LIelV7#XrdOpJl>j`oI zD9}%ku5c8ag7wlXRPs9Yp!h|hShIJ<;nKJ05yKn?=Zue#PMrhntkOe){cTi_I~8D8 z`dvqmPErjZy)@gQ@{xV@-G&vHw_ne_Sm^jz!pjo;h#-mV4Lo?Deat$)9M!PS?r_bnB;bY<@65WPMd zkO3^@#mT#*%~`Q}dT75?Wdzd{1n(TrW}$uyv6Cs2;beG6od-lKQz74}am+xfiRs4a zm~QxmYRDo1HHM@8MoRFWm3c3T+bOeXj*j^2RTrEJRid9TEXr!imAeQ=l+I zOY5Yy;7oN#isVAS1)T+=ZHeGYAenb%u}`Sig^c*u?3&;T(X&iz{#o;qW%MR!wT<5q zQcLnK)NIWw!mZ>)`Rb+R5guxK!6zexXn7Y1lL~=_=i@VWxA1%m%mC|<-KqeDKV+;b zZE}U5kF3+48@~q3#=KgoSOSO4n-A&Z5v`W-JMx&3*K%6FQuXKWCsOs0x*N4#jo&F- zY>Rd`sY9z8kE^d9i=;2G!XsO?w#F}3U1w!6G6}NlAu}S-&$~pYSp1GzDMVjn_+=M* zAmJnLVj|!_>invYf@QMLPXC@t6jWs8lMEF6&N>U=vlLmDi7Zu0t!wZp(zT26?Zg9_G$Yz$x)hC(#>}#=1S4M{=Ko_P`C{&3iR=81ukN_x zs+ie)?#p+|s>!aKSTVX#_R?Zi-NhF!ypXP1o2*(3+qk`X!QOgz$DY4F^!-CO#};-x z`Hh3GkIYwJw_a^ZE#Iz^)Otf+n6GuQR@!7B%o_1=VH=I)&FBwU@d zr;yA;bnd)feWNDn>Vyn|No+m4b>_fANfU+k9=~*ax@)HPY|q`=owG%^Yj@6Ve0%qm z-PgCJT6U6Pay;EXoa`T7s0k{nsQdisv!`e63$B%PHlguKV|wM5hfXC{Zb_{ihsFEw*~1IY>om;n!55c)?ZjN$YQVvek#$}EeNPHZV=ETV<4Zy@FPol=$R11M|~ng1{p-m zqC$C&;EYe-7``1L^ui3ZRIfxsi2u@9#0TfMkM7t9SV*bJQo;)xYt>J_bmA}(^UwOF z2_lUMw_;S1NTzC+)lSFf%~}rl@dXfrfNEN{GkZRn5;8K${_rS5ay31}O7A3f!z*YV z4MpMno5JcV$~Yr3-^MMdAR|N$oH|lIgClGDMb7DRMZsnZA^j!VfyPqj(tn^X4TVD? zLYX22A<Z+t0%r)um9#el+D#+KHbv9f0rddD2igdfsPN@7T|v zxPVi_#^Gei#&@@0-F|(1p=CFGnutP7yH+G!D-!mDFr3;CE;ZcfOEQm6K6^=wXx~CE z0F5+C>$0ML3u?%Pg77qnT?5KOW*RImeP+NE*Mdh7GD?O#@}cD%yFeggkwn|9m?=sq z=R<-JgTU#SG)(tzP?EpIRH)e{l)hh~HQtV5w#Hlc@Hbn3vgU_t{{F;IgYO3ywmf$( zHq%8$vUH^6zc&Q_%v?uo&GZ+d*j)sjqMxfOHKjMnrly(F z0cvZ!M5VHKxusvD*C0g%uOT$6C(y=j+nbgGu?@A@>GE*Kj+y8mDHt2v3_aj zjYH|C^Jeu>`ZD!Ux(Nqe5zosGqn99O#$j~&#)WyxJOyg19vep<$HqBP z!oKEU=q}HrC|<2BUH-|%KcmIlgbJ^4pV&C}_UR%L`0KCKzuCCp+I6MzQsr#qGy(afY9H^nzus@wRJM!oKUbDc3G0pUgVFwzsTaTTY-Ey#iZlQUgJzkt$a~Ga*Ua zd6%*JFM?elwpxE)s0RhDM21|Dx(oW66_`BNj_@@TxZMhMPvAAuT>4;e8n($wJRK1g zg3wI)((f2q5UVIle}|_zFsmO9Rq0|dL&u(NV;lMvTD=M zs-$3k0tMVq2>FvqabP{zT-P;}huzz27W@!=f$ zD5=j!-_Ri`1G&;pZ4CDDz)X?P2bZ+ZrxHzsE7A$6Kf){J63r5y4^{|zh;J{It2kp6 zeTVjJ8(Pt~cn~#4F<1zQSPge@Y2)lmbCq+p#OltQtq2L*Ka>~_rH5n5;aFm5EYWl- zRXU!qk1yRYa_+DVQx3rvKa8XnluL+wE5P5&wQz>C)5=N5aR9a8#uc*VPnnGm?i7+^ zG;~x@OtHF8S&W0NhIJmwt{I?%@4z7?3wAd9z-;Nl6We!mH?hX|CQV;4iA7VUGp2EV zoI7nk#*OnHd%W#1=%gZfu4hK3fV5tZ6Iic?HzFXIDRm#yN{$gX$g`le#p{*hpdmqw zB6ga-9;u%kjXRZROlfV4m*7+teO+?`ffzlWVgv!<&KLHND#yQz_noGIY((%%Kf^#$ z8cC)I*2qx`3B`BF5HXZ-_e5g{N!g@NMg>F$g%X)rRhLu74DylT0}KY4%9$UeK-Mu$ zS~0$e@diQQ3?4zHu-FhDhYx#Q+<9T=TqS)%BI)j!-()BPdJNOW7maU zY4@6>dkyR*?s{tQ_1$xiCf%#%{7H8^3M*E@RF$Yh1S2d->kr;)y;Xf{GV#Kf(k}#( zF9Z_L4<-g*O4LZHG8vJV?lFEb{^fgSe%U1dzwQ;$AitBVFsZLJEa$#kyvJ&OyR&W& zXZ{gqMLvfy(Ec!JC#zt#{BC^T2E-)$l-85}QGvb_pr3SGt5`V6UaP~wT6R822L?rj zp)Y#sDqKP1gSN5yhiChsGGe1i+6=SU90myL89Sp_Vy<+>+n$Dmg2PIPOrzjC1#YMV zid~z)cfXWwR)S~$5-+?&djB~JwGV!2letD)tOzvt#+$1-(fmZJ^l-v{n9&Z8>30s} z$TWmPxJS)BJ$*_vHZT2W^yo2l_o1*G?5n36=8f);$+F~Gt$?b-ryflPopkzqFOB&_ z3e}W8q$v{X&N?=@m2P(<$v6So_>QRZm0R{zC-&78iNpuRFidVQDMSgKA7m%cB$;yL z9Z*8!zDc?7QbG<@iJ4h`K)Js{lCfhQVUdbWNH?kM4N87Y$@`T2JtZW~NGVGGffB+J ziB^$?+OZWw`TMn;tv+RK zxNmY;E2&lEJ<4gtpP1}=G36?Yt$X=Ts?FB=`wpeyC#Dwb7#j~QS@sa`ws!IN*J#gO z)|G0<$lY(&#&cUc)MDg5S>v=GUN7x4O8Zc1vUbej(|h=#e9PQk%D1R%Ww!37F0fR{ec~`%5A&3s zMry|wvF=r}k&3KODt+EJQC8V5+9Xe_bQeD?Djv~}_#|d1ctZWg7!JEY_GNIHab?>c z@o684L>7sp6R8MErYie6eejW)<&>|N<%C{FB3{BbS#cQ0{OFE}y9i63!a_=wx}KWzgx{z?85j{UG<{x47$yzu}4 diff --git a/ultralytics/engine/__pycache__/model.cpython-39.pyc b/ultralytics/engine/__pycache__/model.cpython-39.pyc index 01b202591a680b89a0bafe88025063ee7e73f903..b1d5c97e33f2de9df78a58b1917d69d2a080a4fe 100644 GIT binary patch literal 35770 zcmchATaX;rdEWHgcJ_kBg#ZYEw-!lhcd5kyqGVZtNk~h8AZY93zYLk ziT)`Le$IDVxtjWRY7TzqTlsdOR&aQ&&?>e|wUV4Wt#WNj%2#R?d>32O?U~w4d$u;) zo~zACnNn*{d%iX==jGPk_P*M__Ws)b_JP`g_I9=zJbZ}t%;Twh8BffJvw>0w}`kh-{ za)!l=Uc80x(o3CYx3g3jPOq(8{``|?SJqaBQ=36tZ)|K1=Xer%SA)9W3{iOQ`Inxl zzw+!e^-C|lxVBUr-tWb6(C)?cR@d|E-A+)y7Bn}v;^Dr{eh@|VcGnMD_1KH94D+4N zaPN-SZt07BVrhCt5qZG%Po^0{ITw-R)*HoR<1obA!W7|4r*3(QZDM3v!&S zTHbeR1^;qT3`#+HBY!l9?o-FwHq1-|L5b8SQzbsb{+LinaTJMEv!j{}6s1@n1Q2Fb6n7r;AIF_z{;TN!l>bp&`Ix_eD-ZaK{?c2y+Jk=Wt|NZJ z|2Up_$p7M9OD_AzQSxE`wL6p~9INjX{S*F4lzqg1-T#FD503kvNBQ&q z1*xryCsxtoi~dEFIDz(#OKU&jUy}PLzmhBFYM%fJ{^YQ9fn?!5yDyiUaq+j}u6t47 zHdCvG>ovkI9`TzS8$pQT zs+b$^^n$25BXzHBH6vYfFYI1z`T-tu8{Kxh+i{znI0!eqMj)+T^}=SiAGzIL5PC6~ zLga?%W*qo#+|70)>NmDrFLLA1Yj)6`WohfG*J}FmH)7WZz;T2q|x-Z1;7Ou9?uVfrD0=K(i$KC8ST791bkGHx}AkC4^(YKAT+jd)B za0PdxiE5VJms;poYbS0tA{Pv6*#$_~!e)#SUk<{nL0EOywhX)h9>Aoz*-^t>2P|5_ zo6YrBz|KlT!JBc=@jYJR?4-I0#s+X=Ofo+JYiFam+1Jq1Gq4hFMyI46&b2DIC`|W@ zQ5-J2sq8<0ox=rBbsrKT9s%?mD-d$Yj>|_Q* zNXs40DR3t0HkyJ{*P8K`3@t(M>}dLtnv$p&G=TL97#1SiCI{#BdaY(d@XqzSZD8cG zYY?=%z8y4T%y5h^LB=T9BxPwd4dO<2XsZxn$&i36>p(YmalPAZWrp>_2Ef!0fZQD- zM&z2J-G1Ebqs4xxuvJgpLYl6!O`h{w5!#e7t;BKIT<^z03T3Rka~DZcK#&#SpbWXO2wgvgW8YMKRdEj|N=bWxFTH$;n^#pJezs!R|m zqV18EO#yO_q*9IWolf;a*YCFiU0J{52VqNC*~Oi;ZU}xNC6fn*w*4~{D z+7G#&0eezi<4P~K02K14?$GOk2fAQdp4&_l8fYrbUOe{+;JWaaRE-;(?jlH7k0?X@ zS+YgdqqR{}8LbaJtdmmHwaRah9C?6fJF3UzbV*OTK)z5|`|U6!DIxPk`IF^VNoDN- zCy>H4gj`nFG^9qjj;o(o<`igR21z9~ zD#nsPS~v;$A{h<=qi_s!Mu7&zf>7v&$@6l%E?h4)Ut`==li-B_BF0bAU%gKHs@Fkx zttNyXA;?xWJrDwTKy!i&_Mtyf>~$btF@xaGtcTnp=~}%LTs!5y&}qg^2+x5CXI#=F zqFMlE-w}zO5-lT$Bii>V_dFy6g}Z`=@OMc11k1BHQoL(#H2r^m!EQ{Cz+U$sBxGl z^>~ypXmW}(BtRqOlzSOR)o`O9ibf>$XQ>f=@J60)b~d_fb`xAWgw_-HLufy|8n%^S z@z%lElOZKN-T)Uo<(|Vaa?f--SD*AnA$Z1%8(XV*9KT!MP7o@*p_Ff9o_$tMovo1? zyhbP}0MN?tC^|DDz6{P~SuEIIAcGd08a)*SL1KfMMBOko9MAWK8PNPh1-{VR znDiA60gX)LZUW9mEAYb9<7fCA&9}gcjY)f(M-_qDQkKNB&syU0m9A#Tl^Q{v7X4O# zy{?%qs{+4AR|WNzR!N<9REsuTIvEdG2cCphZ8*mnvBrgqCoFlgI%x?Wm9RI0hNs-g zHH`N~iYGm(Yxq!rdQ`U!3rcZ6T<=ryi`;8lK}X~UBF}@rYe$ z4Z2HS6H}KM6z9MOSG)1KE;OoV!>}7doG48uVU?ZzC+i$>NQ6MKc)=OP*#4BnmBK4=u~pp1avme z75hH|+V9RJ_Sot)dj}pNse3JmBYVIS!t>c^|K#?A&f{dv5#Sb*JOv>LDrlJG)MiH6O;Oq-vxeGrpg- zZXfXrS99UF@!Zi}2X_m8@oo5CJM*}5j92{9+j;m+>G>>s4hCxZ&bZ2Dbv12qCuXVg zpJ?@v7N@egVdlhPzX9b5yb*k*-%Ip^EK|^)7vm-nogPn71I;)DW_fSvcIvo^iY5#}5qud}0ca1_OI%ouO01t|782%{Gtp`gfjrM7K ziA_L#fzSm3H(@DZc!`PW3gL=O2g}Pkhfq){2`TtyT_%LMVmxvuiOAaTy;Nkae`Aty zbubTo>v-f}IsNp>>dES-m-2&BmEMR!vAWZ4E#<=_5Ch>+9F`n8UwU9zg2V~JVWnQ@ zG}i0ke%!7dqAsa*O{>=;3S%8b;r%E!oD#yv<3;*lh6M(ergJ8`83S)SH^&e1{tQO6p;|=$X>bZ!uNi2$FJ={B@x(@YCGUO2P zq1><>^*1(}Zw`wz(1#_lupwBBQPA3uQ6*4%*BMU3ML_3$z5Z_QyT{L-dhM+E8)3aP z=ootqepBO+el6&1!jtuy^rqU|2_HmrYxqPbamXE;ab}$Ignx5|nf#11=gbs%EIY-* zymKHwn7_bLRn5F0j8|4t0^cz_#O?4HYHd)wn?H(tem;*+bPxv!%N&K}Mjk@E@Y~KW zP+U6RFXJIgmn-l=+%xBgtyScxmOl}MOsly;1(5n0L?$&-TJGcCMR$FNW>fbXd0k4o z8~3(Qw}gh-h+8|hRqJ?8lsRh~P;P1Tg^hhjk)+@(zB+eT`RsQgMPvSW;e7RlwznDJ zywj^9b`XZ%xLaJ;p!JQWJird90o(qPKkzTJWZDH~R zirfw+-9J$%wYJfwg)B2_cEL>NxtL!64fghQPBDh$my|LcI@ zH((0-dB5-$ufd4B<(vg>`6a)+nZJ^ci(loh@|Aq|3vr3(H_Nwjb2- zCkFc8aDFNOM)9)3GGKC{6?ERs5A)TNL+8yAs10YHBK;%m-aGwoQO{rClN)@@g3Y?X zs!xeU7oEn5d0^<&-y_uDV-LpqM}U&bqr*6$ck{5bov-GuIxoY9e8=!>U8g=^cslyQLfaRoE@`7U64vZv!5P(r&T)skr0=<^00i z^j?&q8ll`aJRMI_O)AKF@u^%h7gxS1cjQ>EPwTs7)XAD>{F3+_P5|$%ARqTR2-1mt zF5v(rF2xp<7?=146a5CNwpdXTU$wQ48_Q6zO$bpnU}T*#y{H-3rzumE@d@q&b%jyFWr+@7pyG*=XZlgxRUr+zn>8{sCL~JxO`wIhqw8mYg;50B=e8YTyzhM< zf3QDjhKQ=Yx-=Wk@`uXRQFq`tzhI@Hg`1Yeb+k?ZfNUb3JI8K+!N?y9WCq|AG zWfQduV1^vGMm$2e;c?vk_xMCKHF7f*sNVCA3l)5>Z~&_MU`F6jrJ1mlA5Q6L)wtG8 zGUZ3mk}P1?xtY5K4oK7sfs!|YIJY2KaaPo4Kv_7s%0$R8jHmePHKJt&A0k|3d&Vyu z1!C4`fy8v5;mR%LFx$gl^%=PacUXOIXE|wO*YS&aKBsER_x?c)$DEMFtf22+9=G@S zB|&@Cj`IS}iQ4;pFukSn;E(K#eNepK3OqQ8ZFS=aZDN2rdaJNdtUeVDhl>Z%Rbo+) z;L-cX?4{D6LNOUZ)()n4CMRHL=A!Op(n!77K>SsnaB+-P}$Gn zM_5t_LXfzcUEDZ9v6>(kY0b!lPQWm~EwtX0@SjG!uy>5HqYUs!h6Bno+H&6Q-OZl|?l0s!uU zZf!xDTR;aE1U|6vM2VC{E!!mp6%#Ayb~)2fSLh*vaJ7kS24JDY(yVQbN8BBFr&EtI#SmU$(-Adz-Q7iH3lad!y3VIhE-kD= znV4xt#u2Zm;iH()6qztRdsx08kvF6)n>Y{DR@y?fGN=NAtTv5rG>P!s2>uW6zfI!9 zU*K`2dE{MCK*h;BbKf64gqG-EQ6vlnH$In4>{cMLJ2Q2hAc=)OPCT_r#z%Ma@tSR zL`WUh7V#nM_BW9Risu!mnK2<~f}hAQVoX3-m*5U^K2m*E$yPf^{k#fvvcp?YtS%`C zbQX$~FbVSHy0Dc~=_zbGDXeNk9THWH>YJhGgZcyHZFFII+H5^-^0#fXxDDpe?|~}m zFw&`e%srr~nnXZoUEcHn>lKM88Q=wn3x8{PEn<7X2ND zwqh7NM%;vt$IzUpBQey7aA9t{d=pxe3NwP19PhJfS~?G@7Nyb0s@qBA+-M%+O@nh! zCE}@>P4FU3A38Rx@&}1&whHDctHdKRTH}^f2>_lgrc!#&1h4gq>U-rsa6DVrZ`QSU<} zAEo;lAf^U8DM0jr=x3>-h~+1Ng<&F`M12#@jXK=0C@NDvrHhHhZO!hmg;Iw}4isp) zeI$o@s$ANh;N#z&XHuK5a$}%!uED^l5D_IzA<7tKj`^eEOGEe!KG7NuhDO9g<+StH z3k-#0SyG{0xL$-@DpLO3gxq1^oZgmqiq}hVd3%b%{B0VzC43l0pJG_uuYiW8y>Fw} zc1p(+?u0f}S|5?eWZKy$(zK$q9>G?n)*@?k(A#${c6XtIZF|L}V$lQe(X5sB9dEv-LXcbSCZA>u_XMCo5`C*hy)52UYI`Ld`6&Gt))(37e(kGNO95i8Nx$8QsOSYUhq*vr-9X z0v53p?idOUqvBKgK;1|uam7ei>_`YF>_T-!Vddp%#Z5znoV=5r~ng%TkNHxng33#kXTF{MB=)^D) z9l2=xWxc%0CikQoejgLEU;tulJ+(AtA*H~i;C=6T#V&9ds$K(MN>MDb|62JAuk*4v z>qg|yZV$I8EZJ~OM?Sz$gpf_b!t zN15ACNS#mVA_(SS(w?ZQpy4!0bDhn_*i{1B1JAIWVx&>f6(d^Crk z;JpSmXh;4W;ta}kRH5#my&fP++X26`3mr&pHdLd$1S#01CrrFE*Ft1DLC$(GMXCSd zY2y-ETr#%fGK6kJ%_Eo_qZeGDhgnT^KvaL3KSu3M!<6m(!x}|cn&e8TN4(uibZZ^L z*2At#)`GE(O>}-N1f3exIBV3Aa(X+Vjo&GyKr1x*0o|P>kznFjvkrO=Zw+A6RoI1u zq8F+3H-sir$pg!7DsKQzg*z{xWAzvY5>c#fl;Xi=i712?5@%I85KBf0n!wdRaIAl9 z!cx;Q(MVCDeo-*0${m*q>NA-^xeAm#kvtCD1X39+1>AGmAC{z@_h=tOw<;itts-vX zNd7dghCj-~2vhztt_5TSv>9?oW}L&;*DLj9QiSPRrd=Vtz{OMaU-fh0!!c5NV|cdU zuhOntTH0{|&felt*cE;W{Ni-#clnmnU5&A-9`RaiOrYYm*GvA4Kf7Dvnt9kCWwAY` zV0V=9DSp$rKD9e_v*OQ#`{b`9Ua>tL{T{-0zY2Y7#@~yu-4w3P`uikc$Ee8kHX|W> zP~(0k|8|~43No+m-JQBYH}iCTuYX{7>S`|fj(;D&!#_jxLrO3Lg8Tb-E8+KIdgZ45 zgYeXSk7r0zu81e0^fd>*JC;8PNE~__(PsVZs zk$CwB_Gi5sNfTH|!46O z$ZMh>QdNaK7z18`eiglojLg*wqEQ+aAeK8V!JEac|Y-cEjwXC5Zgq(}qK*n6Caml1wy zbChhOX>pnz$p_20%QQSe>L#e3T^*k3!wNT*NSxfr^b>_CMJLHF7w-qUP!^bxuq>#k zc?OhKlEoN*$?6T`>Fgg^ZAf?@#v_WJ&=%YxiM1^3*76O5Bt-I!eh7L3vQJVWQ&mqu z_B-e#Dc{J4nSN+TG*N{yYoW67hy-DDOaefIvd-`&LNfhcg4U~jDk!h4Tw28rKklD> z{pSDq)-PuGSsTm{jG*eiB<7fAbx#lS3o4OtSY%W)yoy>fHYN3<0zMHx$_w-QJ|;-T z@WNcpB`H$jizosAY9ZQ*BxU4kD9KVIiql1u`duncthne5+zS#O)FVwLZpPtsm2aLM$6C2j{OttVCmMbF=iz3Ja6;?F!?6$&%Bf0E$S5gS+9(SEoGj;a#5^CgZ`x!jSF5;dTLcO@tkxqpI0VbOU5X z%dR^xY(-JzXp%+aiH4ZUFGoVT-kZo;u%eN2=OcLe#S}&QJf*%CCq}ikfSkynEi^Uf zr=lX^Z=+U^>(3mF86@;mds)6}oAoAWjwx_yyi zI|X}FlPKZTSP=?BB%q0P8X_IE$8H>`iB5h;4KSmW5DXcd=h#N5M2v>pdOuJ;jX;Uy zqJaOv3R$M=yQ*P|2`P6=P(m%%{2gstrpNKa|>J*Xm1_jA11i_G}c)T1EN?3@|~x zeWpA%X%Nm6{yYTJ=GT9Xf4~3P@bBVR_*EXh!o%O=;g@kh)Jz02<$3L3TD`O_{2I&N z;DPehgxRd141blkD5ygzIU>jiNa<=`qR;FJF*!rOka>A-D-RFGa3P9iKLfRm^Q7fs$Tzs=MTI(Z5RX*-a@ z$V^142+D3VjD$O;*97WW?Qx!%bE`zH!{eWcc3#m0$}rn#{mtpO1K8B z69rt%RFvejbGB0bvJ7Ws1KLgM4*V0eLq?_;N(*?RY=I>(U~QV_AvEaeMGuU7#DdrY zC^OY#N(Ne2n=Gb=zIufGH#_7C=StBHEsqg=VPI!bWLPvcn(% zN?<}yQdzNKfP^8FW3ga>De(>F&dGLike)&U2+>R4r`$~qbX*i$aHy^1URHF`#de2A zr9tT8cFuz2;lx>Bpa2cf5+iVT!w%bU!n?T(Y~??&yC7W0dKT`uH>Q;UKQIyW_fbO~ zLB9-0CrG_8nL0BDOpxrkX{ej%tkH(_K^nK{XNf2EOMdlnuwxd0Ye*3+sT57`UMJs4ofAmJrj#l8JUl%TVgrB!?GfV z{vprg`4L(4>$vgfltr|8a}QOVV|gUOQxZ)pI}jbnI20&{n6V|n5GIzGv<{nO#r;M^ z22nP}luEhn;x-ehcn-+B{M7cLYQR0VzmonGHX@FcDAwAMOqicqzb`cZx~tM&_IqpI1WJpX`J6EAgt_aXb7pKHM4szcA?T@;%`P+joCJL;Pg~Dv;$Y=3r>P)!sR7? zLdD0IXqg?W;q()c4Wqt2no%3lo(uhO+Dmgsp1%T8wcEymAktB5tKlfYGI3NX(Q>bO zzp+}TZNrjHbWYhNzX3)oiG$v-)NF4?11xLyS{p;8AjO{^g& z;Wv5sEgt9%)v4~o2k*p4Lbexj^`NRzBnRys2py3uB2=W%Z6youR^TO$=q$_~Ln0wo zo4b-o$TfuOU*-P|_9{7Jhdm~3tY5K)dL~I5S~Cc_V<8}9kV)fJc2QdDAn0zT4y~cG zKh2OiETI{Ou$N}NxwK??cG8B8@E_T&#+5vQw+-=U5-fg$74%x&9qz!AVO-YB;EozR zT3~2xMv$JWs1aR8W-SKQ&FZo{CPtPxVt;+pMgcM6{?u1>r(!uXV~C-3`kR zl|-QA7@!<04AFMb*l`)?j*A6^ zjA14vMq<+Z828GK{QdK4Kv#APOiVFmOnw-a#0O%|qr+RnatcQ4M%Xg78#|MRHOtgP z+p%*lhUgETg8QM^NQ%{YHbnXMy8T)^49%4 z2w!CcNkT>$<|>75%Upj4&wPT+RTfViA#**9K+a(90;3L9)l|lR#N`=GWD>poJJ9$I zCspZ;vLNp>;D*?;W4joBCdOWaH=OWfj0HqypF-TLT+heTH)mu~U;gGS_44qwcn;V0 zAT|RIe9O7vbjV%jufqn2_c9Tx7fX84{-ZhgtBTxW33dPyo5WjXY=P2l3H8Aa2!9(- zA3*Zb6yiQk@AuHp`+OuGF>W8>nl?cd*LK$-SEEhHbE*8iB-wjaCs_$~hWy86{A%lAjGm*bD^mi2=70+PTuq6M@!Fa0=#mwD`CKX7FU z_m&{R_ItmQj)$#W?c>^WHtwUNm^3V0 zn29k;q@k=U&a$J?YL)g$|30+JF?XCTZJcrRqlx;dnT_IKO8jVErgN}ti;0Ynu%?5K z2PmY*M4+CC{2_Qb47610Y--&O(|+Ori!a3+BG7m#*#J$wkpo(+ZBIOKR@jaM#`VN+ zXBtegJS?;6Kw9$Iu@-bz6wOx}D<-;zMUC*E>g8fGahco_tPAL3r%c%elN-&NMr~&k zSxQBGEJq8isx3KF0TxZ)$V&6@j@J*6^LO#U*JDm+#ykn!^&podkYa0|nU0EDAoH8q1P z+^yL*N6{-y03!n7pvN#tD9W_UwlzVB!kENLb;Td5ztDCy z1IomXG<#P=bs6imRTr&YYV>C>>n*g6XhkM!)rAZWp)~UcDhWd)3#37Z=$A+LmAZHl8< z;97dFs?Bgj1!Me9vW%J?ZuAT$9fS|#nrLFHNCdU+ebto4; zhQrbmYJHCc03YX7(bN73&m`T4A-nKba2U==cVxG9wd0B2Hm_;Nbs7-TkHeuDWtPi%+YxxG>~W^U0E*vdlw;AlWs)U0#*(qL*K#nSJ%}VVLBL?s}FUSZ_5)0Lt(ePONF!SSEznM zF@~c7djrmhli`zTC&L+&vNZ9H4PxArEVY9Z&L)& z0ydecW3SCR-&XPdeA_H3ds)-1ZTbF-2{&c@5_l{1OKvvzyIdeRvOW&=28?vp)2OBa z5et_sULu4vY!TqLIjwM7u0y~ZDWzPX=qB0JxKtUJ?&vLHFuoW_hG79oXVSzu$_n8L zc`9mq63IAP1VpsqjU`L#RF>h~RDDeq!*Fy!BU||=P9`9y40r+%!1ki4)tut@!gBmg zEQ2uMQJ!t~Kjq~&H=+5kCiC|LxHb|^V1nyH5y+UU!svu4%DBClXE^r#yN5^SCc=wM z827iMyL7L`y73RsrHQCE+xFhX>q+?@g~)g-N`n2$Z!E0*8(?>kbd+S+T?iQfN=seU zuHfueqbjBZBmph_b39Qisq=qcw5{VA_DkiD36F7$TEJs zmI2j&ot&RCEcXDjQs?r=@^kqjofvw3EDL2^7ExV0gU%vXj$8Y-5`Mm+NC|;Be7lh@S_uqiM zFhv3S6BV3fBN#GBlVt>_);hM#Mq)vARov}J3u>T~;R84f%j%swDuwsAc==HrFe?P( zFu&0WPx7Jw>XSVCmpJ?(K_ng?xX!ufHPUmnheuVH@s%6!oWYaPA`T+f^k%i#@sXZx zWZs64tNfpByq*<8sR-bdystt7N^qA(io6epvPW4H%$$bmIN{}mX-hIi03w3G5W!in zDn3-}!i7zzD^xbBN(CLY)5Vj;`D!ENc$24y)UkC><{gr4Vz-T48-ZBHv7a^LtdjSx-5$Ag=oIb=wSt^;ER@F}d$O??DUS(4vdi&p zkqpWQ*pcLo*i~M9vkl=an0K3+(fXN-FOim?yZDl%gUJ?ia3IOsu^=Ah!6Yl&as#u{ zM^q~Ng2z+`C!>s7Myto3K^=W<5&b!V&McWU7wG`aP3Svf!eut_yFD(E$NwbeNtrQZ zC837{Rzl#Ft|8Z4sL0^Y@N5Jx$ff?2ctOUJn>CzAb@BFSaRh~v?Eml}QDU3wFSq(A zc#qy58DLOh^Cq+V8n}&2fXpZHLM1TMUumH71GApd7VgS^QUZPA&K5dCk(oo=h>2hP z5IjJvO9hOG+Z!a*{09JU_%l3=Oc!tbZ#!KxgMAl-f!f|gSlm+rSMTWM8>D0N$8&zp z;3L~SW76P!#~Nlh>FN5`l>DF5lXvHrzoqg+rO^~oq^%Q4`%~&WXjjtkj6ij5?j`fJ z$(56I7@Q>J<) z{REV#v>>yryV>E37n3g0D?)828}Z2F;)hn0J|P14r!o2a1fcWSP0&aT004;iBd;SPI{~D*L|?y#ga>;zD41G z#NE-E*b8SS{C~FHy_^cs)u!g1fqwJANEr~e7-P)6$;=FhEU}^JvfIKe{Fd|cIeCu& zLbz{XhY9XF#JwpXu#1D^G6~kazsuVrPqSm;O&qWxJ|Nc$uud@w#Y0QYAk!HPM9}0u ze9LakT+8tKVQycEy^*QL;nFX8N{orwYx zX3dK<^i-HvaiXFE<~G9(OL&+Gw`cH)-o#8W!7*E9&_$dq6I=Z-9#F1fg?nYAn#uVQMv77%{1gKxl`BT^P zKb7yysPB`xlemKUtqc~;hqrL7=-5S*4^*~aG4;%%cRA<@E)hEPpf^zk-soF zhTVp!aU?I;G6_2N#e6`V%=?h0_TcyP`TX|=`!&iWFGJ0sFzJchPG*u*8e?u`18-4x2j8^3?1I;G zxz9WKmjDW3SE?cbmIR0y>jBCta%h3BjBoqqQ>qXXEeZp`%qz}xI6d}I~W@ZT}Yi5lp4aN`7lNr2=k<^?;Zb0FHoxQ_Ih zJU)e0930lii{Mvnd|`vnbNI;ZbKsV%=TsReD(Av~#&S=F{}o?Y>diO@%yVGHT)qBA z-)k8hsn@Yc121SJrY75yB9zhXw&Y!7!vfr*h-YA_FXBagpwBSh=?$m!ORB;hHc(cZ z9EE?2KjtVsnhg~kRUsD&HF;l$ydNQ~@@IpGD?GG#81V2@Jp42dU*_S{Jjil0Cf=(w z%irOdIO=|%XLS4wE5>so*159L{(yJ?0}tQk;XmPklu*90fxJk?j5r0uJ^E#AjexH_ zN{JJ3F$woNgmRQb(Qn`Y_uE;dr8v32$|Im6J3ap8zNhi~uk%<804n&)xubb!-xR2W zpP2)P4;-7{Gk^Hl;bTV+9IVV|Ne z)3Z1m?oZdEY9KFA8MG2IWH?Pl>hOE{H?vdU8m*H06rg;P}! RDC4q0eCH41?1-~m`2SHd7ApV% literal 17216 zcmc(GU2q&%c3yYS{{RF*kOY4uQ7wrQ04;YYYqj>8OKL@tgd~fC6cSolV``@wa~r^b z^QYSlkQfi_ttEFY+a)Klo%Px)mBCF^v0SbsmC8es2bcZehxo})dFY2c#8s)%Q&N>D ztIDNgSKs?N%lb=RjQ{0u@e+>U z&(SDL*=?(Ce(k!AYp3mWa`l|ezFa%sDbx#co^KaBrFyATu9rKNdPVvQ?P_PDJ|XAD z_GIT!{ZMDBKGiu~KirwFPfLHPeWWu}pXnT}AMG5gAL|^iAMec8XFDhACpstVCp)L= zr(|BaeY*2V{gKY2^+!98)gSAesh^SlO8fDSTX*HW+J2(*Wc^9odc{%`YVy9NChs}* zr<8ruS~&C>-p5*6u%rB2Ubv3m!rR?eue*?oD$AE|zW)5x%gdLe(wZMOnyYKkBu@fw z%WtTb#^lLY-+sCA&b5~tH{N`6c_AO2@WRmV^utEG=cz`o>o@NBt+n+qnqJ%VgP_sr zDZkwaz2J7_bi2{vZLiagJ9)=KIVxVhdGp%x&1m}SwZ+SCUtexqS$w5&^|dR@*`C+0 zy?Zk%UVrnISFYWN@;5Foy&hGXUc0^GHE#z|(eLy&T0vA1_yRc}Rh#R6^LAsk)%K$a zbKdH#1%v3cx#+k0jm^IDLcif{d9Aj$f{F1>goD>T`pRp|d}Fi5#?9qxZ`?%pv7qDW zu+i$S`r7X{eR-oM5j*~u$HhxH0%cp4Z;@>3jt(;9s%qlCb)iuAM4t->+DRmgLCe$LXr*VBm&ER@c9aYEfTlGW! z)ICcbSF>{0VYzxmolqz5+ki5yUR9^mX}~_B9#M~C{*26cO+BX0$c&@v=hWlM#f)Pz z^16CLJt-r{<@zagRv^!+r`0pK|Ae}(=F~jqpHy$CXVsT*c1oR7U&h#J{}EMF=Vkt* zIDbz4l$<|?^XJu9i?^(dQtX`HmPpce=dnqcsL4xzPi@dDo!GIX15F9_VQ=Wcde ztJ|*EZw2mZPrD!^H(ZZr1+}WVQ?CUV76bqjXk747jT=} z2XWO>AY=n~{&IKwoZIV%U^=h8aKT+>Z@As}Wi5pE?wi-&G*7I#SN&CQvmMH^m%v}i z>cUXBRyIR_Y>j>Gt5!3_gZT_X2CPwMZ)L-8hNIQQuQZx!I`&Qrwb$zU8JJ1ezNzwl zz)}2cd8yuNj6A;m=;_Vd{cwDp!jk=7s~frotlL^0LoHG2D|c1*I_|SI+)7-JL0esO zxjFHJAGq^GJ{It1 z+C$pnem)Fe9Uq(6#GRu(fHd-i4&t$RyWjA`^`08r>qdjZuhEzfdYiiGUm|IobAbWh z>s(s&+5xVfefIVp4^+G`B4c5RcFQw#&$Y$$;o^CC!sxvkUH{HJG{6NRdA!m}FYqs{ z^m^?}%X-saaKHSb%ZFxRzt(MqEw9}g03V(U5b@r7UFC|j1RWD0Ba-Kavde6l@>g2~ z(d&W@xmf;*ZkacFB{1tD0G?f%oBJnP$-3{2ViUYy-Zj!tEca){lE_&3Yvokiw0$AH{1Rgx`5~2d}hOh9v-sP z>%FycXuNrZ`PI$9f7n76F*bfDiG9@zLWqh~FODF^WcCx0D@p5mD+qr9a!-5h?GQXt z^Sf&hLp9)r(y(^J{JxhZzqfpiWpsjss>%18L94qaBsL~ijflVDhnu=P##L=Rb^dRE zYF4!JA<4324?nMN_u9R!uXQ1(mKL0-AR84GwLGp#%QE>Z;Dlzv6&%4|pb4#A`++sI z8n8Ig!dyU`lQu7HLE56Ur50zF<-8(ob?BgHBAgsTc){BJDg$9#?hVpFdzJ3e03~5zGdn7YIj!UAJ{7I04isx0>=IX z@J|fwq4U78t?neQP9|4kZx%h+#YPvCm&XMJ*?!-iaMISd21^?9*We?cS?mGFBR zJ<1+x-LBX1U1*(=RN1eRGir5iv|V!}isfTPPW^)wHX-qENdprW%*Pp+l*?4Bpv;@F zl0iYA=RhnA&Y)1ES{meQ+ps@R=_9zIew;QqBqQwr5&OpGt)!}5KZS@Q`$7TJ{PwE6gr3J-0jkg8 z>{ClWkDt$aDR=-7*5=mMo>#-;T-+*ypXV(YOEz0ijvt zs$D$be==9KC+(_T#_x=M%r4tg&S2^d8j`gH$YO?V$+A3N#}OPu117Ra_Nxw9GWYN7 z-=M2y50^YYps)&f zQ)qnv(G)sAgkbvEff#@=xz9fMFAwai@Kfl+tU0%xF#iFsinpEK&xZw`?-n0elNPQ@ zzhbT0s-(*I;pOn2d-iJX9&uG2eCZ0kwM0|)b#Dt+m5MnZD${epHL)-9`7uVVM4K#zNzi?^E{6VgE{uW&B zoY&osszC@JHXspd?lc&`-)@DP>=JXae;4l*V0yc)ud-VfA!l5P{t}uM9D#&Va*jP^ zpSEj%J($_Qn&kysKZU7HYP{r+kL7+{!4Z59joGK20$9mvfSvj$LuXhJJ(U~g`t#5( z(1H)_VPTl>y$}`@$W7%wq|PgX1##C#N!b_4jyXBcU$k0QSpGytKCyR-;%HmL;%?=E zMFPaEqUb=*FGACnK#XPY{KItCKiH^Et-Sq84gx#zJA}I3X2yY`^O4?7ePXl|7~dU= zDGsy5+{b~;Ow&F8GF)NV8qB=18HBwK4E31BvWhP@AKiHx{Ftp;AP#Rk;YJvVF03B< zQ|WVrOh5ZE{wStm03y#bJV5$Sd6e3NHq3jow;b3M&Jdgmn<7t9`dj1Ns;4mX)11DD zpUA!)*<1TLmhP8u><%Uwwm2hiTcfrai&pS1)5n5jSgw`X=uc?n(JrpGV&Yj$j6jRV_m^ zP1!Cq(PVDQp0x+n(c+2G*#Ft5hlo&NpdaAGFwlm*19nmNZAUMIfgt*Bkwrk_jXYUn z1B`PDXeZO`mQ2qBJI|gnuNnpEtKhoYeKwpR1d1t@b2!Jqar&V#GX)MAwo10$C@U}> ze#ww!oJVDq{0DAa!qpL*wznM)95lZgqlJhj#F4SW+Ob4I8FG?qjI)@4!cpEq^&h z?tX_jNMy-@&Tx98;CfK%&=|Wk=OG4e&AA=OF7BXbLO~ff`kafXm@D>zYOP9qHk9aE zE$eOvZqw@$2;5jDVLTe-=iDu?ea?-IESF&mXxg3*&$+FdUvtG(h`Y=STpDv8%(Kl6 z3|7Gv^jb1>u$L!DDy|E$D;iKb#P+0tV!g^#BGD$!gc3 zs(x=4zZf7EK#gAHiN;DCu|c zUa}|fzf8;~Dy@3~5-3{NF&0%v^P@xBe}A(DFpV{hG>O@s$nHdTfA5Z9D&`>Y!*dhIGK*tUds`=0X2Ashs>0%|gCNCg_5(jgsY4$5ExOvDyL2M*8*Uk0b9XIVA zBQ)1r=|RTrK@6epHQTQAx}omD12qG!aC;1qPE<~yYYKl5*@#5*d|pg;x`|PlvWN7v z!oYvW5fC+28B8A=ExbF!;q)g-VM1NT^~rKL5|{J?oxztFSWIO#-eBu_B^2^9iDn7h zhHKcOpesnc%$galgug_`c<)Mm^!*tQvr=fsWTm{PDa*$w|7ZplaENa|k9AY#Y$7NT zBJ34H8bQXys*Y42=Aaz&r>xzADm<`*7ssxP5A5DjSQLNPO8niOf~pP+YT_ejr%2Ox zqXg%qh$H`rttQo>mJK(hG%W3wk?pe|I6Jh+D#3q&`}3QixvDx0_lGvygqjw|2Uglf zWdnh?jYEKY$r+yyE%p(P3G*yMUh4a?defoDzQ=j7^j&B;*&x zVoARHnZiUj#E^rN!{pG=ee1RBXuJJdS9w}{Nun#B8VCMb9dAu?aUBoJH`bb1LI9;Z z;sHK1`**q7^!w1+6!I}dX0gyA17OdU?R~-IaS$dBlQQu;>&UAhBo09qk1#BmSq~zX zy)c{TlYo78nei=%Z;BiDR+8(>x^n5P?9E9g$i3iRR!Z_KUfaFxZ{O)@FjpFU>LWjj zSHxn+R@u{8U}JANO?4Wm0iW6%fH`);&&#CaG6GVP9;X zo%7cbkoZZ`0>H|02G7|DJz*_vQYE}|`Nq;~ORu>9`}cPL?|=Q5?wr~4IU10i7Gk5_ zwVM03sQq~8(!iOkNgy}M)BLJWj0GVxB9G#ZY9rubRK|PLbfZG3j4f*v@6i;B$P?s( zZR1pKX-0nbim+?w`(K1bzWJ$SRY$>SsM^%+WsI`NRCAQ4 zdXFY(oy7Me3iQ(k0)O<+pjkMsCwYDt4ZK_v;MMEwYO$;Aca@1!Ewb-v_T~D$zAQ!m z3-(EroR8V;n|QTZ45henz)%eUKuH;kzQqA-uBcm&>iuf&wOI}!& zAhjxgr-E^T1R*?|ra1kQsoP zl+i%~8ZQkb7|DO|LwYVk`!b2ROfVIPyit$RM)qE9!@UHYzf~cmGJSrmmgEbiJCT__ zn41cwY$5?PwpITqQa2?UI2#a-W2xz=oV;|B3Y$(yrxB0n+o1Ec-}_zu{kLyu2FP@W zO^9ZpXn5DfdHvX!;H{-_e-T4b4jD5MT)083!IY;FZ!ue-$=8~*_p&e_`TuYP6ie12 z#N+HL7#1dD97vI8WqcmnR0Frv}$IQFy^> zHn0LEcbiP>#R(?Xl|Vp_ZS{VxgrCd%K_y~h#Nc`?su_zGr?avwmXae(jPADE1EGN@ z5$4)wIi&!Cn!5~s^|4?Y1Dq?df-f>AU`Pb%3rBekHM1=A5#<0Eky#e_eMWDCG1HEB z7xC9lR7sx+-E|X#y3gh+rxi_L_c%LJZDJJ2GaN4%3i=Mmg=V&SCP4Pc{$IiXlikMs zu9QX6BNj)}sTe7bb!2P=5)MN$LO9eXiCakmYTF$|+29xn(U6!zk`YK@qY#r6oCRhy z3`sHfkH82BisYlp2=V4sMcS&g6KFY}L;7y=LuNV-4gNI6=F6yLTu0#hc7}eiGbW&w zgtNYog#J0GAQ2^%mE;A)S6wt><35lYf%z~0)!?w9sPy6c=qj}w7+gAtu0Tn2UULvah}e>jKFY@;Yn;B2 zu1)ESLa9ih#Cs`_L z*D?Le+;j>9tHe#8h4>gunoTy3%BY+&=H+pS%fgF&*FK4p(<_Du=X>wJ~|hf*Q-Sr~wiOAqr7q zpcSEr!mt1+2y^HUFqh$nqA0Q7##*LC3Nev%Sd}GEF`di~BJ@sg96>vy{}}DjuA>;n zIJ$8RG)3B)ZX9L1fmY5a^`;xg+3wCV@Ee{`6I&KS5--WTle?!L@QPuN>5Y?JhAyTz zPVW@MM~1~X{dE*!6+YuprmDinWX2hUTq^gN(0Ux#kBdb7%am-F8RZ?Xar-+eR2T}T zbe4%%0p_E8c+6ZIQ>*dZ5xLTGPPrtm5T$1AZgy6nS`p`f#*B+bv=2WfK>Y}Y{84c?5j(O_0tW-`AM)EV7Wb5LEgE@C1D z^%Lk5E<#D1NozfY9-NbcMTTq9x2}mo&aroq4IM+hiKbqag_-2pEPE+h^-(q_*a+v5 zDNM%fEYC(NoeQW}SW6|=k$#fHWLr4$sJW_7u-qxBYx73B6(8=yYjTSE@z7-W z$m#NxgoN+KIFQwsjkO>zE5csZG~#=C=u5bHuhq#1ejRKa)Nn$vUxc`4La*pBpK_9s zeCIUUvpmCj)jlKf{h4vQwAKBL5+su(P9z1lJRrtWNnz{fQ#&K}BG+K5{>Z)on_~zA zHt`av9oo50ey0d)q)3w$;Y+DL6ib95OolH})FW~sH!PyQkeWic8Pa8f0YWvohre!Y zg1gocUejrUzztEG-(4GV334%pFmIszV0jZIpzcjrjQh1PZiJH1o81Otf(>3t9W_ATok>t6 zG*+Ad;G~ScaxgcC;|h*A8aAE)$fS@~Qwe;N!5_iD@N5#9G(~g9dK=r^{bq)lk`w}% zs}y)9dEkUSpjn(nvH7xZt$%=J_xaLAoQQ(92W6w2!~L0#%!`hKHu@Nt0@)(40y9Oq z!F)`P@s}wk8x}{fnSw$*Mc%s>6=aP0j$3%Xu{M5}XRB=XES3Ym$i))rRxa^d1w_V7 z#GKKlP&6e402>G%<~z}`i66(s;gtlDH~lGrSgQ#79$zsRKEk(MdxZvvWGCIymy z2MywZ{@qZ&$7{jyL2Kv0x5w5VTR`Sf4`2LA-1Y^CekMUQGzfB%NFpVSoJBJ=j2Q=q zjpO=&MG6NNXTe8}XP6Pnq>z+Kj^P5BhI58I5=<=STVh-vMnuQ; zP^)0RbOUM~u`Q#b*YATlut=fVj{qXdv$S*%F+`J|QfZ*}*V2_bNtNFC@WVNl8D&;e za*>EB8bUaIr46|!-n+bEx=d!~Y+2{0F;8)QdsjBqd*b@tHki?k!^RfhiC zK4QOtaSZ199&{Hni{TKlI>Z8ki9iTV^ROKstuUGfqU^IB3&T%bOAxQZ0Sh9glJ~2zn0q|NJYoq+xDS#oq3OV+Q^;@hn(%qz zzEbx*QdNQ8#pqrdN~@}31_^$UzMC_(@7qlC^0ktpy6X@%4CH_Vql@B$GwgCLB9)u`)0Ru6CcJ}O5-ye!Q``ChN~xji>{WNy#I zbl9C6JU(Yk9En+tZVXVY)b8D>RhRJDZ3Xq*{Ft%PU}-l6JIbiU)f zW4&vs(vI^>PPb})pR=Ar5BR%0n7a;EHv00sI10W#pT_;p$B7;!GsW`yjJv)`3uccO zG&eYH>aNLKsTFSXJVt-+)j6U%aLFo6OUe#y3t!0OUM1<`F>H9B5j{Ew(|?Jkqa z$U5tPj|b^LKtoOs`b>`DI}2n=qN7W_@HJY5EdTeF?3|v#%s6@wfiqN<*Qby68 zH@v>f;>)xw=bZ5a>D8u~J}1Q*DY}hm!|&5lyX!z3JV9Oh8RarkBtRwgbVs7ug6g;+ zXatpRGXDm1Z$X=&as){Y=55T*5CGF2+xSYtfO&$N=|AHw57g%W0Rw+TT#DsXPQC(3 z76;|~2xdSDBW#pgT0&Jj!Le__SzJcfBE;Cz;-d6p+M@m~jy+@czS@Y3>cFg%jmG<% zUfZy){zJ^tB+0RorWbhmvuL78kP1G6GoNTm)x8uC>*qM*T{frKcx+bKC^kNuud>-- z!|1e$Pcqn~#ku$$p3$_8%0L_Zuy9+`cr@mj*gU__%Lih3akNL!d%QeVeX2TBp1@qr zmt!hF#ot1i|5XQdC;+-}R{tmbNTwJmXMFtCZmq;0E}&!sdRq`$KRoU#q99vSk7)`n zO=^!uxfC>SKX*+?l=*Tz}1A(k$V!F!XYr#ar{~Pf z?pId`JIS0gXUq7z^{%g~zWVC-eWlqyB^UpU)c%D=qa+~2+mxK;n{+sX9`ZR49|VkHj|7r1)#193=P?&vyjTP*3N(2vA9=!!&Ua|io^L3bd;c>RuaC~$Nf=;*lZ zmA6skyXdBB=Zi*0{yOPWGBl%ruZmH=PPo(}!mIfpH7Wrms@KVM%B5vYj2dd{Fmo+=ru>I6 zzzo5W=t>!V+o1E4dLI!X+?vf9!2Y61eU?rNdHQtT0D!GWasaH#KGQsVZ*2OToD+ZwCRh63KIi{}Tb z7_WCI=;f+LLjKT&nrieD<9=x;3?Key^n2rEq0K%nzN$-P<&KNj<(924ZGvn?BGU>V zHCm)=HKv@ISEgTCGZjIO^7d!+-;>IDVMp+;-2T{{OG@*hTQ(Q{F4LM*3(=&=7f33D zA7LOWJL5Q&M}ChyPB^8b&mS8Oh8@0O zznAp}`@D`&(BW|m_%3*Z4qw1C;B^d#VGSMJS+65J0iCxUsm6V>l2*ZbkKEo2ofz`w7hszY3GeZB%mKFUY$O$7L zSBMQcp{FNDCPw1A{8e54Vr#s-i7#)8mmgRyKk$SiG+DFt*Z0irxmmv~{<-p}%9Wa* zYF0da^}e;TmUvkk4}V#0A2Xk-k3TS&H)Mn&V?#;kZR6bJ=)~x3eoQwU>mTHG!}H7y z--0h*+`tz%0d08x*Y#t_f7n7nPj&(ug*-cE6m*5n80$#V!B%DPtQ-q5A5?Izm_O-Hj(PEJ<&6hy-u5I7e!k!f4nzhf8CfZi|5RVdR*^hsIEj&#D&;Uw8X=r6ruems=0mec?& zGwFVIjZkBGS>^kfg#sWTRKX`nm#r()Ocdf6F!%e4R|>Z z$FZy&*Biz}7!?bZpcUB*5KC&HcdsYl-s~Ljx1dj1EQ?8HxQ~UaFnl(|v8Qkb$N~0UvO*J|B>cp$_wpCr(6BVJM z6MA|^IjxNA9lYL=(A)3p3+7vIys+@X;`xOa@6sPx?^)w5-F!>;y1wU$Osvs@AZD01 zbc8~4Rre%=uv8?B)X+{SB++ny*S z7A3x3H`H&|=0rho<7zvvw#U^4tLlOTh_^16*X71_+g5ek?&~t9>Svm!o7QxW`TA8| z31qz00GhmMu4&P}B3Zt;W^7*9?fXKVG1)QE@%G+z^|l1Q+y7X{rcNhJJsq%gR>A+r zi)w=SBjON25;N#d?OW*E4x@ZKq$Y?#9iSB>^3>(vgMvUL(GTma=(lx7^hToaAopTC8`Xf37#8FY2vW z{*ivWg3K3AgFWAf_HCDu#pwC%B@J7rhfxd=jHr*AAl!7Cd5D{yiJQ}Lt_n38?D@bt z-u$nHo67Y0`2VU_`fv5tPmM{S&UQT6Mpb$YVF581`dVlurbMTY5U{-NEw4)Dc;_&X@xIaz`F|xZ6la6##}P42Iy~i!tgMQ=&7C zndC0yDKJ`VIDGJh5K(e_)A7N5099elkISP}L^(hP$%yhGto3CW^#|e^(RU;gkOKwe z;D2c?NfK9YelrjF(^#S7P02Zs9^>gZs*EatdFUGlnEuw{FrtjF4v=1t7HxX2B*8tG zY6u$l=?bX0_O0Blm2V|Z{~auwdV;t_qVxGQ`b~pEyCph!A+6}%u1o`?8wEHOQDKpV zTtNCUwqko&4K(}_9^bs|lvKs?EA>buPxs5b=0Agiig^-5hAqr>WXh}!9;?r-K zy!u}w@`(D9=#mI`22($ZPhEqW!*Eat5ov0m^kTp|Ze2g4a8CNDJ`n~eSN~p2Y_b3+ zvJimM8LM5)7zR%W=h%G0AQhw%ODes!T#$t)Q@OSCRC?#9L|2cA|CRj*9#7&EOZH$> z#{uhI0qd=KM#wZAhCxCY1lTSJnM@9I&539R-MAHSa<%FyyA(S-frj!;Ca~f>>89gE zKgk$hmq*dJ_Ry3zH6JxYbA9O=!fgM0zL69fqvqpsIU^eoMHPb#K8*$qBeD_rSX?9C z5k<74QZ!XpASkd`HWe6FWbVCYdP}-Ee8UJqP-5uay(Y@^0f|05o{40Q7Od@Z*c`?X z(Oo0JXw8i17%9G{u`EH3C8Y;Zd9#L+r#RLzRxXh4l(D6{1y;@1h=?2*+XA9nn`0!i z(7$iaE}*W)FHt?jMY4gIqON|YfT$tD64k zeV8i$33ZVKlat=}-0mdNk(By9y1ta$T_1c%UnoU`znU7q|qHa=|gc zUE)%<8McYpY+Nh_qbo2x#J(aV0P1j|AX|VFqTX<7Ibaem5iHmwo4`enOJ$vGsNd)J zvRJz%HSBQE-RJT9dp&*U!Yn@jsnx<~n{CEqA0{oB;1y)IVR8yxIbbf!foM`AI4az% zH$3d;!h-Rb6c2`cK>_6?r58N@VQ(1y&jb0+=?wTZ2o3w*v16R=5vG z?~KNqJ6D@KmkhC!uK3Ab{$y|bq?bSGjSZZQm0pOQ3&qb3^Y908ecrnMVxpjAO1!Rj zJgBW(KDI)~)K%lHSKFt!w<7ah@43Fv8)sD0s`>gGyBBt^>#Oc>rK4ZZNToMqVykw% z9n5TvA+FBh)j6@;nss$;A}9a4{~iC7_$?){AJ^ye`utb{blH^1E4pD@uuW^HBvafh zUNCd+?0Ix5Va>jtJD0miuURXn_J2tc=6rAmY?j%#MQ2;Dcg%Gx*2e73%i>jg^NN(W zH%}eh1gSI8H>39}cdnTJo8%+)J@tC!i}ME;8e+%0lnLYom zWvV4XTV~qdY@atwcYMG7L2=bGx!}JMS_rLBeDQAZO%yjLiYftspSs~*a4(1XqFpOT z`J%mvlImr{LgYqtA-ZDZOPW^B^CkNp%ET4ssn(gp(}$tIBkQyi*41JcKzKB>bTzYd zvGrE_Qv0or_d8aY4}Euh@x7g^dpn=;U-YhZ^(HLV*}QAwd2!rP%R6e9YktP8Y`<%` zv-5WFZtur=YmURbG!W&Bd%HSMtxvv9W z-adZaJ?H*ZSCP;gW)#zk@2fT>M8>uaCHn07ZS9E+=02S{E55FnQ(V{0=@#i*)+Os2 zUAKHRW^7x@_|S64ayRd8{)(M9w#71@+n`88JDE6i_#s4IB%e?eSTb@#tw-D2-;E~Z z>bTs-%Wbi&vQOpZUwTQ{?CDeg2>WgAA^-5DES3BcwqMBJMi@%}@MV~U2~4>DDSQo1 z{A(?yo(%GyRm(=%P9wct=Bhj7Ie38 zBT!5*$U2~i#mIHj57<8pmlv+z;Z(5-oG&P4><3{na21-eKf^HxCNE&(!~}Pe6no(o za8fD7E;NF^Oz8_XqzAapaDX-&dlnz2q>XL6=5x$ zz=P9+adQce{^N_K)q*{Wy_xBN7gyMWPH+XllM6W};ef?dh)~!mh%Hdq3+x7DB@v3D zFkl{|&~Gcu=&aR*%np}M5+MPFiQf@nU_2&4HF=pRT4$7wNrMfX0Rjt)GsyxhJsARU65`xf7G}r`Iriqdd?!kSM zIcT!LB-R7q7CLq8q%}7U>7~XFTNp|Nwr8qMi@--|6|Klft=6V*47_Zbo5rs-L&A(~ z;4!N}(?^Us2eug5aucU_YI7jG9#VD(v^a=cfMu3lmD3av4YsA5KK))I!+@b0>*pB9VYK*Ljn9;#CG963UF&lUuZbUB}FWF=)ztP+jlle`5;3wz=i@6g~ZO|0zn(i;Fy3(#rTDay#s0R#GMUYbb~J&FzDl= z;wW2$Yhks*{wXF)xDo~ZB~1b(-LwpZ*DZK<0fdPs=v1V1*DYL|97jLfZp)9 zB@xya9$m0Sk(6%2VXP{@f;XiEy8wA;sW@A|6Dv6Uk_0YpgK=Dn{<^p=nlWpbrDkiV z&H}(1Z^M`aFhZOz=IP>zgJ00*nfB@S+0f$oWy2D;M%RxY{H(0{tpl@?>#8}`e9JZM zI$g5Nj30bpv0vBBX%^+Y1$^*7Z~JN6-PVr|-#Z*{J;Ar0i1nOVZ9Q|RH+I$+8w|yK zL$Utzd_Bus!ckC$nuJ_CRXTg@diPxSyl1|5_7tDt(b?cJ_R z&vZ}RRLYx5xBQmnd}etfx8VB7+(tYMud7cbRJt@n;N_fbpJ;!pZ#ED0udB1aS`=3o^XlUF z!i)8{_AKpLY5B12PTOkJk-I*=taGj8C_vG5bG z(kkFWeF|h3JO4NRYEhabPFLGZX1 z7!?v+m`)jl>Gg|lYSK>|Fu_a-i9Jg=hW5FnWDy=k`P5^9s`D)Peig&ifzABvYzcTZ zM6gKOci{c?>2ybQBf3-#19&Q*&3Eci{6y{cy)au)Tf*zS;N@pOg^d3#C>*jfrBZQ~ zQ8|8;2zbL)UF_?~b}~Qu8Ezcco#qc>h%4O!;(qJR^TKBmZO3_<=-rF~)|A$CvZgvg?Udq%)+0yhssIa*PD%e~~jpY3>$p7;GIH=ftT=QXXIT+3@+%RYEND<_^+!)Mhj?}%mXN?5WI zxp~(w&0Sg~=PobqzxDjm^UFt<4#jeI!ZS)`#*}zQH?5mj@cL~VdN89Wk4zkyZ3kTv zSC{kZ^2KB8>S}mVNj-}fuIdWGD6ha)`t0S|z~aHhhULPTy6Lqp(` zW8cFG6+SD>OTte|&|h{|lc&-D?KGX-Iu>cO;#YVaEUiy}z=3n{@f^+gnj``whyWkq z_o47Ba)-_zt&7gKyd93`R`a>lKdXz^J;%de?sMzv=kKd^skg7$qw}I0@&);#VL`Qc z>{jUM09MS@@L#!Tt`&R(psi#r?y~T^Z z9c@lg2fTfEL4V(dtj$MPA&yKhZ;v+M%%|?cR-6?4g8>{nkI5BGu*o7kpbPe$0Df4k z9fXZxe~Dx7VDgWc^kITINNJHw9K!_rA;Yf$d7XGD(n{nTjwoR-SuVU=|Cq>=)IMq> zGb9<0N@bD@R6CsNpLtYG{Wx3b@tu9e9BI?MsZx2;#a4gls5t59rkp#gCf^QvW%j_?$qWp03w*NXW8B1f~LO{|`ECw7LKQ delta 10267 zcma)i2~bfLH{W-R!2#V!UC%HpWI?u*?-$paoyaYedU$;=Dk{ zi6N&G#B}!{GF>r_J!3rXlJH(kkl-90l^(FHovQ#=!=US3sRRVG8*p6XdDlljjT z0^G0rRgTSn&wl=M&VT;%{r}P8dt~TGq~h=7av1^P_0I1NKiWN`&;{2ZHA~Qhji5=I z=OS$+eEBY(n{VTji9CT#z?BJYBJL};iQy}7N!(JK)Gf2gxOzfYikq@gTwLUmyA?Kt zTWM2rX|XHSon}kp;u2T7TV+$Z)i$+TV>_*JYi(M0hAqRbv+3M=o1UvAbs5~5woJFt zX5`W`SC%{5md(XeTqd{KW_IV;av)9Fa(TpFf|lPPXvJL~o*Y5g@~-l%O1u4GIyuEp zv5hzzMk>dAhxZMk?+d&S4MIEr;a^Mk@X0?xBvl`j+#qZ+T1885khT}kT-alarzyMxoGH8Rarl-bDYb7mRt%wRpjU3%Cy~YAN-<@aRK@dMHV^wP_O(t znNEvN0dE0fP`)vB0xhLxDoMymP3F)fil+w267<_r+ZJd(f>$4(at!*fjyuqCRXh4i)joZF`FXF~QGWTt zNIC8BJBIvz#y&LW^o$_2x=qv#Z20~eol@VjSgTn{!KC1(eA@p;FqUbW6s$>fF?|+f zDUwN~)Kop}(fkuh&OYqSFp;7>XwR6@m`5j`hUjLxq;qiGHA0IMd#h4oO zJ)KqV#iQK^a-Dcdp_uMoW-TO!HiQkL1R;h$CH%c8Li75c3j)Nb3gu*}cw_*jl~{uT z!9C$!+$n^kgjLkd;KqGcGA?wF(N3n*%8QHb7f0LM^~T zSb@Yv2?iMz&M{(wdu57onZW0FxEV7}_W-d989X=U06~}tuAjJQ?6RF1@p1k6gir`` z*>dF*`Ldm@-?v)5KU&?*!vDPPn03d$@JUU1%+0Lx32FK>rRvtTscWI! zFn!y(;EWd3u?2O@4U^Yam4_mdLrm>K82ObY6+s4`+>74;`8qjE_(_OxVUipMHFZNW zMRpL$19^9&bb{ySqaRqZr2*a*lF^bx0L@r5sJK;wexIjB<$F}@HtR*=GG2XkO{d1NyJOCjRw05z+5@ zlDl*`lrvvAR~SCBNG=?^eRARCCx(hAx&QV;0QJ6FU?tJtXB&c|IN$DaF<6Y5TujVB z;=FM`a~dMsM2UdoYIi#ZxoP;~QkMgcG;_{-g&BZ~386a62^A!nHXzqB|39<%I&O4^ z&;ZMDxYR_3VHGnCWL3Kh-7@X#Tv3+$uqb?xo!VL{7{Xj9!={AjK$d4=_fCPlOb8}~ z6L3*Yh_~b=AVd+18l6>2&|h>&r9zqyyG2Y0(Q?Pvl;QySCP53{=3N7IHz7uSdrE{7 zPli7Y89K95X(262p4#MEu<&8m{>_73?ito<>oTu6**-lrBZHPCJ+)^+H1fkq4~sd$^0C_b=HEx)Hs z-pTkNVn+S@G{R%hrh#h%D=DK>xs@bF)6x6d0+idC@&7L3y|iuph(U#_(8__zx;Vi! z07Ko^c#Y<`aF_wf#9OcSK70(AJGT9gG*W*Chf zEDB;p8kgdmbC9quUxGyoCj!{wdO&_{4m;P%oE25&`NdIpCh4@1wMNkWSF3YIgg1G2-@3K z5R41(g~=4+HWHux5*+?3er60OrCf4|@s7h4!QEhdCw+w}!R@)bfE?#E3p0Xi%Us@( zL0|`NPsV{WM2y3C(FHdR9$*k(2|f^D0*-AmfGXM#73CbZ7)(_6ef}iBQurO{z^8|6 z4XzKBZb~vqPF77`h-oq=Wyo}>GpG$64ebf1hl*LsGI=1T*3EX!bVbz#th!)o&#%;( zp^o{kxvs^+rOH+H&dEJ7y#e3gPj$H9(A=TLeXOx&>B6H+4=%CBU8}m5j|4xZKBl7k z2H1TAk<;|bKKg^)h|3-E_#$q9Wb`7dyM$3eOp-o*KAe6#s9Vr2<}BuhjjVPDE7>tA zi>2v8su|;?2v-@N^~`uywFMYU$eyJrZ#7Ic+-mtoOQ>&tU~XXk%%7ZjmYqBAoO4FA ztJv%+z!&rSIsLq8&J@!b=cRMfsIG+7mBb9$^VT_Q)KJbE%0HC~^p%tQ*JT7uwxVIC zA*!;jsI0MqqD9rh$h0(6#HQxOOvRx~^Iw_!3Tvuc8~`%eu|^qz1N1+;d*!jhlH0E@ zydEuVWD6V7dxvrih4?y^XT2myxu0AY5vhiVM9*M6g4Ru^rHlt+RsmCij+!lG4Z313 zNq>xY25WZM8KruXVrtP3&7bbz#730BUJ2Cj4XBd<97k~ztX>oH061thC!hQg`nQ|` zvKAf9?Khu2p((C5oRwu`dS*aH~j1-^|N`5%1>dB_}|+V-SFrTTSS9Ju38{c|NMn zzll*3_k3Be8hzhVRGPfRle*hO(jw4c66lxy#A5x0#^&=Ojj9LqO`*7s=9)8iVc1j7MegWtq;02dIL7h@fG zhJHLpF8Jk8XyT{P6k0COka@^eYSj4)lEulHfUzSQA-SGG?Mtq~owq$B7iSFdfIqlQ z69MAdWc;egDo47){2-?RdEB)g7rULFK^m~HWXNUr`JBU7nNn!50agSIw_g|zByc=d zA+#edhp)r#9z1t(c-X-(SZey5zlPo6YB{`v!!G+sr!|8~!+mi8b`w%C4H!?v1%A74 zjPc=eyd#NGvAi)@jWFNE#DWRdz05^SyeO}zATQ24=VS758m|xFDh}deK7F8iKwLpo zhl+NH{ubv{pzjtH1)rJn=C96Ooqv7q^(DiKsR6It$Q$fmETnUDx-9%Gge*qsPRE#tiJhu=@V(RzKgBz ziZ#9Ve#4W7Xwwn4>BvhlU)M*1-l5hdgwZ_TJlBj4TQh=7mPe%zN~2ZnY*qX6m6fWV zr&T+b=tr&xu2smOSE>#`#=9er>GxevTpx8sPM(d}?5nls)~be{Qt7w4r@G&2S)+<$ z#bt}yL|>1}AC$A!*5!`n+K)PZ+VyeQPkTP@VcSnelE%qBaK#IB@~czcn*N`7v@r2u6lc2TKN_qrcsZV?yv$c=Abz_A^>A9 z2g=)$d0XF6(50UbAa^`ZdO(tBH@X*t>ePwU38fz(O(F!8$gnd9{obnL`P$G@aWNVP z$^Tu28YxS3D5pXtVe$ep81fKiNeGLPU{0fiT}d70C-;Se>N2i_jU&TnN;i&24<1*L zY$;AmONQR7HsJxXnJg&kO)|9g{cmZIe-IB#Dp7My4nB_sKHfBW^T%J@;5sg2q3kGb zq>u&_{$t7RFe_TVavM8lkIq(Qig>fU|1K1w zUltqi;)#GS-p$X@^iD9hWp5I26cT&ho7i1C?M(rl{`RIt4FmFTS_FiDV3R>&)Rv5I zeQ6bz0spCF&gdDmywhOjmh4NbkJ@3uji|FG16WhH9)ekYf%r15{-I>IlLWXC)HlEu z`T{pX8qCVHLqm(O+6tzO(#cj^UmA|DfxrhE+j@aCR9}A32t7!!XF?mlV4DB z?n_JdUrFN{)e#`7|H*kove7qc_tr3YRlAvCpncc{Hg0FY1LliM99Nt^|Dbn_X@nxq z#setHp$HzL>dMF_)Kphe)rAZDFqs4rS9?8!8xVBxoZUZke$e5eukp(&hvPiAkEw?` z=$m!E>USX04a1Rs@R>Or9xIfjt^qn&&9lrYjBY2GL8i>!CIZ^#v~g^Tm=3( z_r-DMESCVd<@b6LKiyx3l4?hr%}l9!zx+I!iTF#hDsMb9AkhFs^s zW$I&IU8RJP=*67~!Iw1Ri1TPaxTVG&4mtt14&ySxkazrQ!nw$qreEbo9mHnj;9$Zg zm2pPsIHPmWJ_b7u;{t3wGx%QN;2ApEm>W#k&=WZCCRb;~U#V5Kh2FU3+Oj>s zGToTp#*_q{H(B6xSNOnw2JiBaeVn-5y=(1{g!{<8YRtPirH)G?Mi z7CCb+;=T}}j;&D)w&K)LrD;WJ+CZUEDl5oRS&NqY#dnJD@A$!v$2IRaK52|Lb*(gY zEgX&<>5m>c!yY*kJz{5%*ds%9#KJ^I#-bw^SojZdDr=4M#Y}k#W3r*~aoe)sC%zBA z^1)YrJh9e45bd|K{r1)Vp*6?(sAHUUjITPFNT)B-;A17V0AHok!J0Rk!|HboivuwU z6_prRi6N4?V@*;P)97bUediQd=u~Qy%4VtTh^cOks*mZjCp&H)h~Hwls8jM)4i#v)q!!)`imBLo74xkv(}G!QhJp^2YpMA#9Yc1=K9{j*i554q z#f{P8)|KMc1{@B;iuCqHdk5IwfoSiUmEJQkwI-yF>PuKXu)pZPAGi~EY+p`a zKKl67s{R10J}})HOY}P1^6i#z-|c~gfoOi+N`76ey!!scor!392V35;THZO`f&DMx zw3(hYb?NptbhMzBEvOAf3fi!fX8+9oko}FW%^GjCgg0ueBr5dG^n~{=9)6d<)bZ%h zgG0-$A9jAw$<`cLtvL8Ib=P_hq0L^m5c0I7tn6H)YGWChvw@jFl*)@xd7Sv)x;k|= zsx+@D%}-Ocvnewv$3quFLt*Qyo5YXgG(T>O7+?EY>hAU9B%#fHd5R<`?KC~>o$-dx zv0CfB>ie(Vd2KDF2_L1Nsh&_9OPQinAxjmmQPy=4`f*EfRm9+m8M8tcBAFH8s@wGo z^{|+2;bu0oB4Vgq=ac&C*v{Hd;ai9OEcMtMQ~TPV2A^%6gpyArrJs$FAjUJsfBy_5 zdHZ+0ijZ6W!-sY3SNGQ2%E%9!(rp6aM{+sDKQd(4(u5yXa`6_uEnE0;#@-4jV5JnM z8Le9Qtn46dC8Cuw1H@NlosIB`HwuI{g)}ae;6$3xmMM*Exp=nFRwRw*NgTAKa3$z?e7azVquM@x0n8TWtoCeU4);rew<#)=T7GZi?PO~^BfZDhicZAxt@s4oKD}@{R#1%Joo;&;Jg-J$){2*Mk+ur7o@_l8m3LGd zYhjEpeu`@0p|IY7%KFR61$3rAlZO&sr|JF{g&*H(TnLcx7VztS3;Jz;3L0orA?mmu zSu0iASF3DP=AmabYSepNihfz6LayVPXe9qjtEL{;aL)1X7i5S97$n1+!83!pC<)pa zF+5jnn0Nr-#9&))OIamIdtz5VhpDi;gacNbkKFkZs&!jaH@wZ`F1sH;GQ~x{tK6&N zL#Q1W*~iBLQ87Qli4=U-)5kg7!QK97{zL_N97RrODlqPj%Y1%2<=HsJHB{f;nY&~qlO0!j}Jd+UZZwDm89H~P08MH_kPN?Q^@LnbmA&_0rz8%;xF#Pb3}7IY2lcitd$HCo!Drne|sC0o0qNP3Pe)3-HJ42tdNjX{dy=dAw$% zH{?kIT(Mt-*87cs*+oniydx{HRmbD$5!;)APiYZ6q>#*2|CX1J9NlzsK<^s?dO4GC z#7><}SLlTB4`_66|K}Qs{(wdmom`_$gK%$>PTzKQ3%~g~tr7pn>PGg^iiN!O8c409b{` zBXEfO<5YWV?_lqdp5DRE-lK8#R_aLWp*@4g_jmN|i)*&$njm}Mp8b3G^(A07_Np=9 zo*}ps$?(x{j#`3oA;$oyY)`+!rS=$I0_>?!me1yb>1; z_;&&mQxl=1tTHD&u&OMJ<(C1@lF!IPLu_j9Q&m=op7+jq7l+^( z=H2#3Jr8=;R2>+pU?9@Ys*9rP3RYdQrmp&5D#KGlRwy_!KQ=dZFE4@0R#DJBRo6b#`pldgZoj>MVgI6ip=&7* z|3Kn#>x0rzYbb5H{U3HVvN@HX^qpjP_eSg&W)#z+km#w=^o}5Wdf?BLQF9Y(ZhG9g zYHnXOc1%m378c(QE(DjZ{vYM~eAt(;MY`E05s ziDuea${eK%SgIiG`-Ccir=0k}69w~2*ovf*{ViIsX+_+P`Y!q#o0&X~{?WDr9#N>1 zXZK~xlDpVS)8?d&xS0u7F{6OK7uU}a1Z%htrnK9m-2;~;p!IzeAZ^AI zVReErtA+UtGx<{>*!kf$E)l+G!)%@+OXRGkbNmaY-|H u87{gUEev>Tz*PfN#)dzjS(5xzkVy(&)DtB7KWA!`V*;||Ie{s+SpNk;z^3K^ diff --git a/ultralytics/engine/__pycache__/predictor.cpython-39.pyc b/ultralytics/engine/__pycache__/predictor.cpython-39.pyc index ec432e9a6cd6b8e3d23eabe63da2fcc87c6d2114..958802e8f5df62784f2960f6fbd15ce68c01f7ba 100644 GIT binary patch delta 6648 zcmZ`-d2Afld7n2kJ3BjjaJgI_%X39aqE?hBN=Qm0J|)CihC+WuptgEmd$0&Rf?FpvUu z8awXqdrL}|>h5B`cYW`B*LVEh`&#YYMxq#xTMB-|&-}D}`JUfSjJAJ_5?AsHXI!nZ zJd>%KS7oZ@HKbZiuZQv>*5xtshHMMxO_@gW5u|#}sz>wDdMqE4e4$#rp2#O;-l+A| zllf%5H{UDE;aaNRm+zDLvRO;l`}6(vf&4&yFh5uy$`92u`AmH{KU^Qlk4UCSZL~g? zAFGe&$7R{7P1LjbtjtGiJL;49$@)}&3guXS=lu$g^TZ{EC)U;cF0P(Zay`XAe1m;O z8+>s7{6i;BezCOT-a4JlPG!$DmrpM**#5s56aG(F%Kv-)kiIW_`rfN*XhHLTYV6K5 zN{vRL-sE;IyZxKW&fhyff3;+u)Y$9(>(RT~yLp25Tw-~PPxB=2MJ~!yyzi2dkJ<5c zh40~M-hYX$vwVW*_#htwM-R{NVQ?h*UOvXhQPaz3_yo@)m$Li#jw_5$@~KODKD|}i z$#>?A7(yJM2^EMa8v6b`O}{+>z`F zB$Z~PC@yB3i`i$K;)-2p72UH(vMUvCHw#_%ec9RB*){*w#4i86#0Y!akM-1tD~hKS z;+%mdB<`Ae(U{J8!sQ(Yir~JEnGb2%_xhzU{ z!M)hB1KqLhMi4$@kw^3WB{(_UpSFleH&`oN>h4JTV^EZV|ul`@_G&@6xLl^z zAhJ?%mW#E@svU&v`hv}KF)@gliU)~I`RDp)zW6!HejX$j60{NG0V2c|7)$3dkkjS9 zX+I*@=Kd^Lodk%Y8y~66_-Lo9eyE20pZDLHBG+(9@z@Dv9g5GjCDlJRkjdzmwXZ18 zDvOMVxN*tswqswyerw&SD>*wAPr%s><)E=k!zEL!kvbXNxWSH8oIY z=K^-&<^ziyb!@lPthbK6-~$M8w)I`e!EYeq_6l zozF30(`2_4rA&fpus|!rep%t>QKGaOc$q?}R{0kA9I_O}@c(sU&I%)UFV(&6k7swc z?{ncms#;a!x*JAL<)I8&5UZNpxU8;g4V9z55OFOJZU^;INn?`6xp^6OR!E?fsP-V& zL-KfpTkD$h7HWH{N#_lj_BwC4DdK}ZMwfJy`dni-Rqz;(m)W|u&cs`8y3_9tY%-=u zUpL6ZP|t+_-`QQ}otSk_6=(dNJF>Sz591}#Y!w#ag~*-G<0$3E>;_w4!Ic&qSHPhL z#=`O<%w2H5v1713B$8n0p3peu2|SR*b<(lhAASX0D4ZHvi)AK`daPkQN6yov{g=6j^O#I6 zabYVxi!{o$5v8G1t{USy{-MjN>=Ad~M6F&;NZR9kO{PilyWEhnd#foPF00SAm1-aP zYUmG!;t++f7AY%S)f5#&7t;9p@eR0x)gcJUaO*Pr|MH94S_lgn=FzQ*_nuHT*pu&3 ze_0>fsvmelTQkag5_5ih*S)*_E%cwNzzb&V;F)Io@F9-LE{{GS0u z9|L`!mMb0gA_zDdEdrXVQ-E2sFj1vCw!y@A+;PuxC#uR4tIL;~V?yt_`oHQ}%#XAQ! zaXuT`xyh};H9@OMqxFhACqri+Pa|}Ox}kL~A*a^w9+do0w`zJFF0-t9k?J8pVf9OD zMjWBz~c zzCRu)^MO$-F4#5a^lScI)BPEW+2Sw=LXh;95>7HP;g_dp*`5B2(=)xFC7wr!lt2Pt zy}z9v@P9si{8PVy@_bGWm@9Tt`zesuSl8`ccXpAZh+_7ImN+&F>D^#|(0^fO-2d{P zW&c-u?vDiF6@mzk?VrqzxA&8%3q;6u0>lD92d0G4?gdv2QOijp!_;g5O4XNJq96;1 z2yU|>vQ$tPobH)v~)G5oM-E>>!`n4nTzEH`A}UMQB@OOCj{WCC#FQ$&su z`7{yQ1~E-Uy8e(THp+IOau?94W!qegpe=|?oL>;Ovs`l>feQrIQf*cmGCBmtO0l+V z1Co?h4h7N2=w|S^yhr$zy~%c<)xr2E^@+%pQ!EsVQbxJfTo8l{bIFfI7#ZfS;gWm= zFh;J=0rtV=!^!KF`kW)4MZYc*eVT+Zz>7-2R>UPLsf}6}YP7#ZY!Xx1$o(BYhfYM% zOlE0-`>Lf5Ye_bV{2^wj18NMhDfXcrvRHy4PH7g~i*E?|aW;qC9Qv8yNvKJN|A&dt z9<~SdNfm8c0=;5diVZLw@ow^?B$AKRk92*Ur7^%L^PitNWYN`HLYoEu2Qvp|0WAo{ zsGiET%O_c!ll;YBfR}1&Mk(mOruY%;DdJgwr__S0cMoK8sV4r?2rwLyVzG ze1?bw82aL}V+-P+&qe*seZvcXL~WAmzHKhaALurYavP({W1wy~VK(tR>ZYOp4r#5T zs@NI2ICcMs^6#5bKfOQO4pH;gFM>3TPcHhq=nS(Dxr(P%3G}N=su=b(u4R-pO)gk- z4t2L+D-DU0UdUr`ANnimVP!3ZR-+nTGa9>HvlHPVT%uS;*;LoUURcxs0mJQ1lpExe z;A0R=(ILje2o!2HE;}ctO5|FT@YE!Eg7*@xk@+lmRy^~GY8J&9Ptlry zZsWY~8VgP21yAV=xH~*`UB)TdwIB6wqJ9$f=!?CY+F-3e=IIfNdB!FU5dzHZ_kT2e za9&)*iAtY4zp5U|uEv0H>vnd}6MQCjKwhH6DskkAR6$maUF8eA%2`1GSvulXZT8?| zpw^vS$iHhO!&d$L$ap&+A}8Shcan-Fj=b+A8OEN{=P|%E!R0xhR@- zx)6zZ8X-%)L#5_2F1o4!zIN=QD4oUS2^FETXu{h{C;SjG>U4P)dllt)*Cz(y0s-?v zK^l*Ekt9mDbeVF}GwQ7xVDq=Bw*>@!zJS}I?fA*j-EA>RTrUuLoyhMKlLmP~fA<}w zd-1V3ri+ST%_v=|A4qztLxv$(_!w22zcuV$xX&xzUX2lM%O9dY@%OR+KO@q z*(mvXMb%-UCVUt?8obtkx)1(|F!(q;6`}O*KmAKHVzFVgkhdf6H?FRZ-1ts(V;F@(-jc^nH2r0H}i5j;WmKzl=kG3H$_Jm3p+KpA?ZUP

b5X z6<9moZOAqnFXwreZ2kEL$Vv6!0;rUK-TvF|=tNC?Ua3H`Fma=7y?aeC9m}F0w8HZnbK0#08lm zXl-h2Ke0!tX^V}X9pZQ~&@sRY4?!JOiD3=-i1qINPd|7E<|5zbq=T&N!>Z?VJ@ViJ zvsEj)i%n6-W?gh#yDoNtJ1~l^mfhguB`QQo5%}ZsNz`ANooh>wWmM|qi2w|W758)Y zMd<{k4r~YZ@6)*QXl~kja%mpzJwY<;J@!2Y)G`dZn!3>q&=B;%X5%^1-P&%y|Io@D zJ@mNSBmFbP1SUNr0gW!*eAGT#zwH0np{KqOznk;yHrp(4xQL@o{0QU*QGhay(oJH5 zSdsH>#JlucG<|vcs^7k4e@gAfDAdxufZUb;?OSeZ-$vrEL*u)sSR!(k$P$qYMAAfT zA_RKG*NKpZO7AbeM>$%Bc$dia-IO_rIs4IY9G^1@Vlq9MoHEnq7b0fD)GgDp%u$`i z9i_@% delta 6242 zcmZu#Yj7OLao*Y8+uPfF;s6|82k;02Bu}JB5TafTMTrtAQZJa6Xfe7_Ivp?z@DBF? zXBPxT@8w(uEm3x;z_m{Nkd!(oN+n8cC1obDV=I;XNTrfgB|lP1Dzz1-qDp?q<;3M5 za=D_4!+bppfE3C=ZBKVkPfyQGPj}D#NAoN7R3(|T6#UIcKjzJiVmLzDUWCZL}0ctJlp&tQ2clB}>Xh>hVURl#sYlPc~AeR3lwV zOFCN5G0tzOFKa)O5>*#p5&>k3Qw)7rCnToMk%H%l^YrMs+KD)RPSFn zwCH^L^xU38VOQa~*7DiqS^wXo1^*{3>wln6-x!Qkv>ShBKBlqX_qD`B9jU8i@d=*c zJy%&N&L?@6_X11sKHh&-DJAXHs={~k93QyKR#_>{_wXS;42cXM;dw~(@KHX7Hf#6t z9d9sR;5)DCrM?YnobLkFFD>00BpYUpWm0U+v6sv&0Pw;R2Ao0)aiKU8r zp|IC+Me#`C4B>98aH-1eR$;zcw-3l+G!(q@im1A_DD0iPG=1P%k~t?C7%3_L$EnGk zB+DyqrTmK&Q-_~S?_}@!2h)vVZk$&=rJTf<;tVh?VTLpRPt*If{_fz7AE&d-D5^6v z#UVf1^TH96>>$;Fb<3h4zEp3yuFcEU#zGL=nm&j-tz|K1m))0^Z2#*$kN6+;9M4a| zJ3qeX#F6tSY1 zasTfA2TyuRO{rO)&Lh_~_?s~!7&^}uSrBO~@~U{Ws0Pu>@(k^l(=#^OpvEI6UyHY;P*=DaQJ=A8X`h&ovSMKx}! z%($6n>P-Wms`}mB{n=NPdAiQ(Rr8v1LRp2c`ajAI4Qbc4SJ^8JeXxJCk5=K24*U<( z{lA|We2gnYSmL7ktiqIwDre|c^NE_j<9d#8iQp3)1bx{=)UW98yN;`t{S8JMcRJQs?8xd!{sK;Jc zxc;VkuhNd9*QiC?W;5lQg!2eDR<*Y2>9{^I*8(rz;n5Mr(`$(l9FQE4v7(PzDzB<#>BA1 zdL`_WuwTL)?t^?$PO z(7k~o4&zj2uBb-}L1bRwRHY9FM!hm?*B$XJdIMIzVoWYvo?Ix1dq5X;zqNmeJ@3D| zf2k2VJObu~ZsufQVsW)2O7IzRDy+khU*kVXtX5FcPF@hdZCh#JFZ{NAQ z6X+BQ#V*i+NkV1!vMXkZj7gG=N6e71ORMtBVh{SnZW3Tip-aC2qGZ)u3uTyt*eK3Z zuOVz_x$c%?G(lO0l5J5WxguteAj+Yj^BWlD905>Ni|H)IOg0E;sR*NNise8j)U3J@ zVE@;MSS-P^kR4`(jo&mh|I-IX{eK*K(WIG*y4dd@86J@V{IHC1ll zE|_l2qYF#E&yWiU#(DHQ-EH#oM2%8q(uFL?MPAjMcO^GP*V}p9O%sKRG9*_^ z;A$rpFvltr@47wRtlPWJP~{j4G*hCsqp$B40bHfHfSX*-aRav|>HqQQq$w|fDn8?X zFk0x$;2`8ijA1!p8YVyCc92ACR~qHn<$0tNx#?%CA8H#?cB?#JuPi)T91t0d43C3! zghxanH_+XRvnZ;>j1t&OfI_MG0)dwSdgT3)7fdV?)As*jZ1;Si&N^Zz(c=KPoihDN zJGY&7!m-`urE=&Ve+OZQW>M5Z)w&5MmDx^Lb+{~C^XH~+!EF=Bf7$<2HUG?xeW_kZ zQ^k7_pEKjXx?@kLYj_Cf%S=prtZ7`MR34>mmYC^0DchK6ZnQ6;wYWB-GN1dlB!Ty=Z9ee25Y za9*3&&yz$yz8t=RcI2AU9pu)AnVA#Hn)>`~8^(hhC$0y)Jm4AU^)@n2JBFAZ;jtm* z*N4f>?vQ7$vv&L(=IiK+uB@WEZ)OiE2>FMnkmKq9IeZvoV9;K8wFcwz8zVcnPyR=@50$G)(rVmM&s=5gq&wy%R~c5z zp1-G^x(35*{oJ@0jt{q&>#ObHsXoO^BEv}2KS9HgTQpC<0W)g(j+Yuz$_3<0v|#M4 zjjyTV2v?hyXA-ZwYaJ7`)9s8q;iX|l8dA94YpQcaV&)bW+rlQH<13zpCG7TKC=y>9 zE6>bp>l(5&_|QP(e(zd`Pz)5`1Jmm6Stld%(x_03R0ZJeB_|E>?xdH#<)X-)?{D=> zyj%2QUdp|LGnT^r;lS+rHr*YB{#hS#2uG-Z+>-A4;AMbfx z=h-(k8g%hPcRytB(k7=nwZ5eb{J#XhpIY~jEI#SMgpwjGKHEJs z7w;fg(UtJxwP~Ko7i1zx_HPPbq|AYb?@c_>!{6e zMA;2FT@?7Bmqk^A7nzsk#-iQ_&3uI9=@P%L9DdV42xo0eT9bG0;iJ^Ks$vhY58

  • KQ;L^Ent@aHcUz%T%6po(ZLFNu;QkSE+$GmPNBMly0mY3hrf5iP2v#8)M?Qf>0qiW62QNwM6N zDSEatx9Et?RVTw`hb4^df3Q3E6xCg_$_n(k*3wG?FJC3BlUAhKGSxKYa#$c)vLd1l zb3~20b*kgUeKdx$h(Eq(yc5=Q4)K0f$}H4dvmynw*n3-&k!8kKg@Gz*V>KHq(B+#| zuQsL})N**k&Q+GgH%RG80+&fMYX+<;mI>7$D1MW$j@qn;HOALSP@W5wBmq9h1yD4T zS(<@wSj(_+VAIS{_sbf}LJCW07P|xAefXvzWugX4sTqdPiTQ>S;JItIaMmRJ3!_fA_eOfJir^Zsjl4awo7j-RRUV0uATW7;JKOsQ#I-D7lp-ALDP@hV$oZlq@7i#@EYBXxssg?vv% zN=FQmEnasNH_@Be?A?$?HCIb<9qC~p%_Hkfyo?O5a#h9%db5~*e)@q?83C|{Fbv2S zO7$bqGsTS9#S7W3!9rto+$o=z$Ysxd7mDjO{H6a9}3_i2^2|VEaM-%tG_d+ z6MN&23MDG#q@$P^BzKwIg4GZ*vC9RQc2@Ir+Lh*iq9@|f-f4@P_y~MSA5~;E8>Jps ztC_2+_+|7+x*MZ};W6BA!;|)Zc5uA&*Vwa?{?y}VPR*cT-UTX* znzH1TM;xs-!XM;zQs-9C65k~9TL6e6cwyqD;s%LFj-M%?dHPdl%8#CTCg{6GJ$?MC zljU=doj7|s=)ax21MJf$A3JsWY#9IIq%=W*0;N#>x27iW6qW`QtuPLLn|dfDZ9Fw8 z~2i40ZW1fDRo7?VM?_l^Ta;*XoxE9@5JQyv6rxYr33R_t< z4`<7=!U(%E_=#UM58R69{)xL@o)Uix9h=_#Prw6{-g%Tnw$J<9=zZS*_^ub`$F_d* zJkEJxQ+|tyv?-Dh7Jt57H^NqP`+9;zg;T5#C!M8*QASd9|67Oe^#A+t;m$Pa+DvHS zk4S=2l}za3bA)}KfK0W0gi-XDaV^Yiv}56+&>F%NXB@y}dN#Au z>@}Z=m??7?9yz9Em>HcV&RQwcGIObjpaI1R@vmsa_W`IlaWB-Xvtd$|J{V~(H;`pX zuekrlPw&pNnJ*DeV}Fqqfq4|aX)M?6W3&wpIlZ2z-vLbg8ek$TC2+tAHX&g~)sz1Z DEE&Vm diff --git a/ultralytics/engine/__pycache__/results.cpython-312.pyc b/ultralytics/engine/__pycache__/results.cpython-312.pyc index af95d4fbaf6f4431aaa1786676abf61aa0e8ad6a..b892a86e547c1dd16900cda2b25eceda4b388ada 100644 GIT binary patch delta 14444 zcmbVz3v^V+mEe2*|9{kfeV@Llg=irV0`nIL@fV1numFpeUQ2Y->K3nCASvAz#28D7 z9fZppMC>eboHGGKV!X_mIN9VFoJ?@yO`?v-O6d@rB;I5^GnpVDnIt=7@4fZ9TMfp^ z*?B;9z5A+e-MUqEe|35NdC31;FQ?eGnH*aEW@+BDro94 zx0!o#+H!g=Imz%^pW%`eaWG7qokDd0)n`E+6lwsd(Pz)J zbyBDapyn*7i$d4t0N9cR&ZS^0Ky6vjJPNe~)R6_vACSA9gQrLZyM^@-feo=z@=MlE z)RH+)H=DFZdb~ok-5=`m3BFLLue~SC`^bDrAG?cOlI)T;cS?y@Z6$fqA{ zqI5Z%ySV01xW`v>>_~SF@ALJBe9@YLa4>wdc0RcyTcLLX4C9B1yEt}d0IQrm0@ zS`EcX=4Y}e?#VRr@;gp4W!Po^768vOKBkT3nKlW}wn=$A?;s6E3+p7ijg92AQPIY` zc>PJX&At?+AuUmN_| z2PE!-q;iKZ(ie+wbor|URkx*qQ6pe@`>x!tr^gbRz4@7$Mz~O4PwxQd6$I}<#FN=0oqfEQ z)E>))RvvQeu#On^=a6eB?C@^YMSY=2Sb*_EoR{=|y4Z;u3r4L3+IIM&eL^V0c@aQn z3TW!?%VsHy917Ec zcEWE~tJE}+&yh~S=L==^@E-M5BFjVG9v=+66W+6X4v8GXuGng}<58c`5svsCoK`U0 z%|(FWKxS81;C$XrSX@}MYUH^Wb_aaOa3A;(;>GRb@A7wgaYeGY905w&qeK1C%4mPI z9S5)GC=B*#*NP@@Fp{P>GK(|4qDZfoi~9Pb50Ncm5W>Bcw8r~|YTBY5=f47XV`YiI z#}nx7E^#9}hJcC)*9jeZf!!I}MaE}Q8x02{;ZS8LEcAGT{y|@cz_sBJuqyfqG;Z3t ztr=O>&e;u?}4 zN((^1tS5r0j(o|qnEZ=JOKzJE5O1y($B+ zn%zol0mqrVYpPO(1-~x@+ngxP^`zY~kCl_>9Go&Kg$C{%@|t6#Lb`3u8lj2Ycer!Z z0?K7!Jr>)rXeKpIhgN8UdIY)3k4)UJWow5S~&N46~wBw!mnm7_MuNqdO}GTOPPVl zPUJ^33lfv1aA;UH`S-j%DvhuKS_*2iJ>Rx+4OTZ|u@;L)EVf{Q^LNWd=Fmp7-zjU5 zevXXg7ki$8S|k9)aptzxG@;GE1rka zoIj`V*-g`0!|#DFC@xgJgvbX7Yc{w`FeVzAaZBLHVO$*`eg~u!=TJTXuy%2ZRz259r8yqn>B;n zAK_rP`GgP*F~D^~3EesdxE^2hP?(Q!LH}VN=ZS_Z-PN22WxH7E?ZaPRv=cY>P*;en z1jz;>VHapB6c${#i=KkaI5-KI+Q6)g#JybQ6}lr!xjkON@9hZsIFPj6poak3*8`gy zMvp{MTJ@q9go2zM`%3y&16&IEp{iFQ&i6W9obrR3F`liiNnUZHp;an?@zeK!Gab$j(mKap|SJGdA5&@n`34{N8cqOEjiO{SH!e&OFW0HF1Kr9y0|TFjWK~jQmeIC^qrE=l*aAR zVi=4*(Nglw@)e6q5mP@afINxWM@lX$E~Ugp+!3=1_hUBUZcKT6Ou|IV(>-I>Kn3|_ z*lOb0T;>wa6`q(Q;3h{_=Q=Md)1bH_nh{-rDx^ZHh{}k+p+IU4c*x7EE##F7i|n%M zl9aru$R!_E7}PsL+@Hr!S)+B>AaaT-vS(A1&fI&GY{yk<9 z{t#Uuwu4z`%;iSrz;6L#Zgf@VTkgZulaE&Ea$>Ati#B96T0>@5<*GuUSYx??M)G8( zOP`IS5gEm<2i}%$&5yJ3yjcEm39k*wd0j03MR1V5C><$@<>5gXgn#jZmqxZ zc9GYXmYL#Q6puQ6VE66?0F$Lm?)e|*%X=sDQg~wIheM{56-FpBrFY1>!PNV z;f{SMLeYhSH-A?O5aD(SeLm0CFVq~8kP8dki?y&;Nrk_sy}P$B$qY*U{L&=L{YC0o z(DlJb@W7@3=_yIdJHg`>l1f2zHOLGC!rUL=8LRg$9A%$#4|t12Ef=FUbs>*@E!soaPAo`GUAbzZfC5!mPxjX}6GJn+9lT`H&h<|`qU*wRt*OydAj`aC_gTAB;za-V* zL$(LK9ljvhw8-6*l;QGH8xg9oKJGHnI{GXSmDGTj;%h(V=c9*`>Mrn!+aqAlCuLYo zN_+i5Af~5V=pipJs?Hn3X`I7us`T0bZzJt!!6p`U=Ov{uwD5UsMHM8SJIaAb8ELLB zCI{4u#@%$Q}(3^`_h~C z%KW_eh^W>KOqta=UYsyrbFqMw4y_c9{b$-85N^5P3vr@n@5_*>drrQ?y;M?@);OB16*=bS9aUzoH7-#b%k45|!+RHHX`uOA9@7$PM+OHGT>Div8QwJKj^z9@;a5@OOi zhGG(40vbd5k|ZV-=MrOg!Xbp&pqQDB3&26Z_)jW6kc5Lo-`jV&C3L$AFT-$PBB22w zI!l+ahLZ)T6YJPQsKR0{6cLmPR78sN$334le;BP?iPxQx>(2Il`95P{1w1#}4qCkxh>|8-kY_p0;$wae_e7nI`p7|zD zFT0GJVuo%eX3C7FmIM}}6-8>+sOK1BhM2AkqyyS7F+Esss*oh6?_yC-4?-9PCV}e) zlg(2{7o`!$3~9z=zrbR|H$P?=gcF_qfGhUwC>SUzVON$Z%%;BwmQt3jpgj0+jfq`F zjB6co@EZaR2$SniLSM3WZ4Eg5k+nArsCot5=)w^w$cDy~?B~f38y^Fusa!WlS`3j8 z2zdil zW6#!(`6Z*Wv#RG+7i6z#E|OQ8-l*95`kt|t^ZU;2n<|)>D42Jl<7PqaHS-O}b;phT z>-leYOg6M696L#9{WD9JjCGzreD3gtk{82Mg>{L-y2--&ME;V5b;)Ssw8=VV9aE0G z6VAC;c3f$@C|v8fwmae6oG@)B^ENzZDq6BBNA~?1*CvPb2M+lrj{IOlk>nH$QY4qi zH#Wk*VngOsgAx)&h!q@@5Nt7NR15Q?PYmuQR@sO(W|IWWxT~C5r#-WGz#z7g8Kp(* z$m(a_L{Is0JS2mFFsq{^z3IT~kbO~tUwJvWlPK5W0dJsJ21L1)SJ!P)vW`BGS4?Ze zj^>Tc>$zWjVf^=RoaP|ZM&@qbyr~I3iOZ+M+8HdU^L+zrRHcX_7GH6XrB%vysDn)f zMk@`s3a^sun`;yyT+XxPkDE(|w8qm*MwU!yi*D(1& zI_I>>d{%Kr0Sj_ge?~uLDoL11CQW6xY_73N^29j*mB7n^zqedz`TFkHcVEp-R4h-} zmVdYG+Q4^eCK`53?ASB0sx@J2owhpvsFv#uGg2leUsO9%v}n^$M(@<4BETi|$UxXj zAp~b9myTG;K5+XTTl2}bt!i>|m1PLLHz^gpa&W9O@q-7%v*ZAm63@!GDr;?2ybM{anpu>Msf%10sLRWHfn>IVj+QZ_{hd3WmyT}&I(#MHbN zTIwhV8k%dBI$n?Z9wQqP;UxSlHp;prvoi@;P}h;-Ipy@5JoSyvA@i+5ZUAdWN7}a) zus)AmgA$DqG5jP~gKkLNg6qGaMkPL1F6W@p4o68o)pJPjKHr;tJl*!|i?^ zFkQsf>_Ac#@pWUgF>;YjN_l^eTS`9(AK-UoT7a*EH@8KM94nzN{2U7^r|qR!Eym&p zSfJ1m-ooO)VL`P?Gx_w9LOYscAhP2(-+HeFCn+G}t9mmo80xy3NN_P&m>y2dPHJI*=AD<>^;hZ|`i zd`vjEn8d)+Ei5@7I2X9!OcXA7WADW7#}yMDzJ#rdjO?r&x}95izUo}n`FZE&U0@Tr zRj<}w@Ls4(j4DP4K}6nB zF(&KjCr6%~FuJF?GUD8|5NOC7^PVd|UvRGA#iA*fC*ks3=$~{gnR2a4xK>TN8m9kd zgQAM_@pJJPho*|^5=C`a_D&Y9oGMzEC|WmJw0=e@n}frD3Vgpi8b)uWj$Qr#)|K3; zWgHcE7S=H)$1p^1V1+xSP^2HQp+uv92UN~tj)EizvKJ1l6oWkE(@$B3VqmddmZoJT zFS{&H*Wv6c21`HT>}KD%33$Gm;hfCCuW%QOl~6$PfWjYwa}{+A(WX;o&``9 zJXq|*0z>TL@ucSuM;9uw!1%W4!=lG3`hFN}5T>x8y80Jb!dlgx}H zU#_~p+$CSj-mRC&YwoJ#^1^!twY>JOJx6Z7TS-pr&I=lHrY(-?9Q(B1Iju8KgQIS= zOk1p=uCW4N#lQNTY4~fl-7Af<@{+qjsYLF%2MC>a4SIR*y}}at@|g`RGq3iF^HTFY z#ws`5t1go}?r&jb@-~)oNLWs4hg|ksK-@F1B_7D9(2;^YZw(DI{g6Qci?65GV&3n- z>@?u8b6dS3$U4Zrfu%VG)tIz!)X(Fkw9gxXOQ+6GA6&xVg;S3=0*ckC} zJ4J;M;d;PZ_xHl}kf==}^!of`gZe|A!9GfFM;PyrIwIj9q&DCP8ZHNBnh~|4S|UI1 z%O}PA3YLjU30WB)SvT2~7nDd)OMZa^%@XeP)9eDszwnalISYnB(E*%;8}&XUvJx70 zABt8(N(Ds8;FhcpVsw<`Gy#-xv$0Evh|!5>0-+m2Jl^DCl43vesX;B-{T$^)!rNp7mDkU`TBQ{SJhQ#))Mamvx&}RJOH9<48VGi zLdpYP`jP*5yk=Pzi9lJ3ch#K8$~2v@vAn@f+;}i?NFm{UuoHQ*wr&2h=5%TS|M5w} zh`1>QAMRKP1c8lm5H8QV+i5uTAo*(B{M8Tie-s6O3rYLFQY~CBg6%?ElIQkSmS(-< zppe)yn=zASePW;8^-#ODyBiz0%JtiKyUESN#g26D&ekv{Tg;A7v;SWW-mil=r3;Qn znG_tzBR`Vnlb@ONGIJY-+EW`~D~P{A^ZFlwH3j zUv9@<+JjXK(6oH?c&D(JS{$!482Kj{PS8U^xRZJ0$6X3Cd~gjQdFNog>bKDJqD1&N z@?qR%!AQcOO6=$7e@CfY*n~VR{*f#_R8Cez z)y~&(glq8iPpD9xa@8N|Opq8@Mvfo4th+Ch%dMhr+dy?&6dHa^w<*b|{B1c@x2Zw5 zsc4FaH^D+)Up?6!;I4iZFQRcroy zmx1b4*^zQ~0Y%;_WIf#K8LmJ?EnF(nU5P?vD zUkVBPjI&(}YZuuDK+0Vma5rBjufC&W6o#M44WmbH%3U)Oxms@3fKq$RkRytv=7h(&jrmmUUVPOw7g zpE}08{4h*CF69*fQSy*f`m`kFTuafhB|qI?jPYsmlH8ilt7cI^dKa2}^npZCHOU>= z*NR8(qQpZ@UtC3aZm*)Iu}74Y3W07f6nn78prAqzgC)Y-P^5E<)*%4TS5k&~n&hJ4mLrlBjn}EH^M*od+6SVgp!M#|0b<^=vk}}pU8SHu};}2}E4#eQ) z3Cv#9eNXkUBjo!}Eno-8e?0ZQWgkI5h+^O-Oe!%c3gRkaXyRd=`2sW<)KJPH$*XtK z5c$uOue0^!wWsH?Pm^Cf{of@oFyzY94)Ve&oB6XqlYrMjA_WXfzIkfirg{V*9$_ig zR$w8@A*f|90Wwj*g2%a`s-R0?kgNX4NkBBINbNJjWd2a@U-4tiS$@nGz~KG%^R4p= zwxv=b!P4ymBLo}>s;0O~2+F$2^48B=7Q4}OsRRS8y+bqymyrL{8m|`}5HzZ~{9WKR zz_or~Pl)Rh!adZ~3i`S-x!)0v-u8p50%m_9B#3X5f|e$Uy`H*#{q3FL)c7D@b|6!u z_J7)j1^W|I1)y`0>Ek6T6rO&fu=^L^)_21Bc zx9>*mdhF)BU89X>x1QNLWu23-&beuI-)!4I)#gpKc_%u$raHojj__n#@30y!*GDVJ zo4ieys@P@D36+ynEAk|F@iHo3yndC!{bD}m7eHe0m{Kf(BHgVFCWb z0>*1ZHRqjfNX-lU~`bK4VNCRPcZfGF--}k=@-n(`-&=d)o9**27d0hu|YZ8#tt98&(O;K zC$z0BYaQNnpTXbzQAs(g!8iE1zn%3!PtAP>e(ra$bJ;Td0zdcnv%6XLFgslJ2ZmNC GEB+tMSC`5F delta 9206 zcmaJn3s_XwnfKnAJ1^$RJeV0?7Z7n!2J!twQA9z-plA%y;K;q;j4%Ut27$?)psk6R zm<9clh%t#;n@_E7qf5W9rpYFzub40GmRW41Q@cspZo0PnwLWN@wA=3f=MDqJWP5@C zp7Wpo{O6qi{I7E^FOM)+jxomf4F(;B&l6Q&YrePhsL{!MN*$fi(A#EiFw+!I^O{ym zTY5u!o3+8(W^1s~6hp10cE^ zDOjhBwIDVfv4ttdE*Wh_v~3EpyNC7IqfbI5y#bEWL0Wl?aahc}&@Kv1zKA~*Z0z!h zL9jD3U@Oz9dQ8XR1Mf5SY_gQAc!B=)(A$b-v}-}>o=}@m+SSog$_o*pDH0KVO?&;p z7O=C+4K7?&DYRov)$Uddbuo2WM1P7_R48v;fYqV_P9S8iySYLb^(Z?x)YM^V__zxqX1C0`@3;+$T5RxhbA_>jTNUaS!dz%~0A&W3TC-q@r4-Q{ec`=R&~A9r zGQ(BDJtT&9r_fweC>ZhigG9&+GRxtQmOMiRw|-)RA;`95fH!>}XxwIKPxof}gIt@x z73U>jS3bUK?i~67U5vHdfkt(-*HN!4=jZN zDVL_f?94IJR78z<`H^H4yv5~$)tL#OawfG#mWmqI@~xyu_=d^Kh?~GiBxWyeMi4cQZ;rRo*WTXR(@QAp5bUQ$^=9~0;VrrjELs~3 zs9E^gbUl3tKAC<|#<}2Pk&%7|zEgBzoTht5I^7S?%=jMGH+iPh0Q)?jnLVLq*h~aY6!iXExRgUVhjNF0S6q$?tdflTC~-!F z$X|YV6|cTXlnfu5l8n5HtT#qS#$67oHyt4gCO;|4u)2ogH5bRRU!i&J7x-#nMTc8B)&U7=E4A5U!UBb8tm$$ni*K48|;xINap4Gwz@3gfhQ7@SSi&B)6@1QlZG*yc%Pn9G~YHF!> z%#M@HBlA+=e8{`kUP<*knwS=PzNSv__Gc z#CIjL_=8BrL@!fnv`T8{w~=E;By*xIWig zys6!ZJv{IPs)^J$r$&vrVsy+Yx%M->CCKvWlItYJTVGZj&XJr%lcV?-b4xmuB`sD+`wj7URG$ziV#;LKd7+VaMxW{XOyKTlndo< z3p_$*w8KdFGw@&Y?yk*TE+m7pw>iXExyux$tGw$z=bflOopW)L3zb)B?HpY)so-F3RbE=E!igOas5*=17vtKkv<3LJYgQ6nOweJm8Q&hA zoE9Zhr4mGlM+rDaKp%pHsx#7JDPaEaXR0r2KUyw(o4Gs2!Md z#56HzHv4cKJwG?|=8T3Fd&gZ@$tgGKYBwqxtnX;V5N%1&n!=bV?~AJfOuVme9J zLQC2}PKr0a;jfQoLGA{v?Q8Tut6~O8DbAOalDrI(GL%`>ELl>Z5ZPtYMFfN7x-z58 z%Q)Ukb)Z{PjrW={h5L9>oFAD9ZGz%w`NN7-R#f z;k8Vllq?UpjoLzkWRTLDX^A0`LegWz(*zkw-%Jw=j^dF6S4H;7o~ezLrI<^Glw?S+ zz+A$uQZhtQGgA#@%Z^}nEvd!&$oz?pbgDhDAcdQJ2Nuf$`ol&uz4TaZ25Xkgf#n3n z;t3yTF|W3i)&O7ou95gtk!bVn|HTIuZ#qan2XAd!OfQB{Hx>2fCbfDZ%NN%WdZOA1 z6=vB&ke5BNtknsv5bSIdL|;S@4-zt>{NghN$d2#`DLsoI!FKm_?FlB7`}{&zLebrW z#imd@I#E#w@A0(@iL^bwaN|CI7{2jP&034>OcR<&ccih|-zs1n^X(JlIQHP|o|m~t zgziXK>>_+e#TmXfGE`0D0Zoy6)gQOnWj%#_8BZVle7yJ{^NtT?sy#M)CE zPHZ@zK9V&no>2lXZa%bV@nF-by(jjb&p#QunKLh*GjAkkK|E`5+_re2a@1@av<+%b zFOEBBUEX~;aD}-hUTcm!H^$8y;gK!RniY$xUCQeVT-7;>_j1_kBKXsmTxJ`M7QPM| z>m4=>_}!=M{9_*i&Um5pWQjb;zk! z8bwWE!d#?bNrRiij7#w{6U9bed5N8pT11r;9)VRuXSS+o_-I@4dilJEdQ6G^1d!xU ze3JmwTs+niFtP0kWf;R`>ZDkUZ5RS^?;6RmUOWr+50|Qzk}dcu9DX>TNBO&-bN5x; z(mMw&ryM67qps{z*(b7Zx@N{*Ge=w{w{r4NFFF}KA9*c$A^K*)Ro~mf>%tqw@zOQ% zoHak1H~i2e@2|MIzA?VOakysJ@GgFMoe<9vMziz&q*fRT?kcGCESSC8p~k35-OnMu zb)A9714FAKhS_#ZA;FpL4zTE~aAZx^xX9u6+qJ!B?C`*YIjK_ik8Dge#Xl-j1}Xbc zJblVEYV^z+UO_fMQV}U*P4;9GJ6@AdbQ5iAnNn`!H7_aEF)en`y5QqSmRn+aNiU^I zI!Vi?y`)5UB2T)aVYb@98^}=<1{xfU7{J#;wW66JtVr3;Cd)BX%qSU?LnXCJCz+D- zNLKZ{@g+tw@Ftnrm01qvKo>d6DaowBq_e}dy(*ZtGYgmnc@%P! z_%Uf$qqFkwZWUzzhy*HskVjgNHJKWa3kxw$2$f_~Nw-B(STT=)YXq3(OP8ypD4Wp+ zxc=C*d_1$EzvMgH+QXi4N2m>w1(Pc&@e^zXOJjD~e~@l+j;fKbrf}-ws|5EX0b~jC zJjGvQDLn23GvNB}4D|+Lg}~Pwa&^l!%j?^&`Cf0h9vH2E?NaG2zT`56&GF zPb>tfZ{4Uhj=~;gEj}WqV0aIRC6A%Lwl=D*5b@H`8;LYm)z+GB69-vO9uHmRu|cTI z4WTBT>JdbXo1wSKnY)7&&k^t%0Y4@{cB?l?=^p%`DQ`Lnr{p3#gVl`9(X-3$Wif2& zJq^p|+&602+%O0#$1b~DMN=hZmz@`D?^7nWU@SkIErDlwj{a|;kJxAfu+M)tUtoFtLFyt- zhCB_%KB}8;Frk$in( zl~4xJti^1sB-n5#UR;ZAEQ`M(fEcHTopSLhTkAJT6Ney-n*~CUV-C^q`EySu*^yJk30&Hn-rakbT_G0FWOyVS(5!EJHHnoONB1iFWh=VJkOepJEht$!I z1)2{Lg%0gE@bc3xE6K*9nq(h8e_EczuRGqf$ale7GO;-%OB4Iyidaa$I&@RKPNx$I ziysg+n&hDxTO;oDHwjvn5R;TpKl~z6Kwli9JBR4?lbw|EO|z&V^OJLkseP1W3dB#R zE_GkWax^U^C`=f?MZ^E@^TUx&0}Q;Pr7PjJQw5-#p~EZh_^FIuR6! zaYFQUaY_B)B0?~XpAu%IzB>uL$J@CE2Y9$*b8U6)TJD!Gp8mttey%7v+-|ZvMVvp( zO?L2dTf;(%fcHgw*dD|z$H#jwC`2`-#6`u4Xe(AtnYhg9UvX*oiMPN{Qz}Yp>Ng%X z4re@eL(_;Zs=w!O&#=ZRdy}LeF{6jb^~eH}6K6S(ma}IZgA$3LL~?2riG;*e@Go73 zrnRI!3C-IMA9go@ zc%(R&*Gw^QoowD|C|jNU8Nla)lLr>UfgWx}Djsq($X6)*j#D}QQ4cMxT@0c9;3pgM z0cVnMB66YlhX#ALFzQdqyg#Gh-s3Bo8#K(57OG`^p=dbpiu23&c=h-me>vYN(^$%N z;+En0VsbvIE$e&=2Yzt# z%p{1Tk`n~+4f2g*`7f~^HGY0R39_?!*nnE38P}pa3}x8Lj42qcj@)wC`nwNz_rcwu z74|;y3ADtrEkv5N^>}0UJ1ksKi8^>bwkV%u1fMguHxLokPOR-$H2fi!t6j(O6qxLy> z+0XM9GycKGS3Y3eZSn`qq-}e@uI5-Tt3; zYc@F^N7JV8xKHGfMnEP7A3#Yj*-w$kSR_J8eoNDnb^{R_{!uHFU$WMbW&*xX8V+LN zQxfwHtmcn7N_N&AJ=*WjSmQv)4c2v+VOb}!Xt!+E#MGU#sZSCp*?FXGWr_mp53m@g zc#z-$Zvhv&Ir=3z3($-^iWz#@HR#YaXvLbL7Y^KFVCLgmdJtYaxe!_(=ir{l0p^%p zEuVgPCDc7>rCAW3d~^B2i*F%FogmkMdLR$Vp0ZBYp@xO4 zh+N3$MO%OQ;qt+{e^1L9b>^L(_6_=UQQTSFR}E*MDpe5;6)AXZzy&8>$p!PFdaJCG z`ibw?7=SH9U58H5y&ficNDxUUQjt3oZBB(cdE>}&TO?OpqKfREjg-C+{Rrjf3_&(7 z!_-lO_@5P2n2aGCa~?c@`0oZXY&oK8F-X5fB8gp&{l>|TB>v++M?5_jZ>&FrX?A%| zvRcW3BB@c3{hZVJ7ot9RkvBzsSloLf^gh!9KYh-9?8uMl>Y;x-a+OJ=#7B`1$3!>$ z=l+#Ni^$Z-LQ%rEG={_KxW+kXKdt+zW@a*IOvv>q`XK!0z#MuhIFJ6&ai4He$oD}B zwNGrpfEy975qD-G7e-N?{Er3RNT_{&1-On~p$p*sVP zID<$x6MNy@hyl3^2!gN?i%6-Mfbr$l?sqzi$efR2vurI!R`Vy0Mr%>Q#^?GB)rj|G zii=3^H{sKB`xx|sd(U$oJdPt50_1BA6DchtK#r#CNU4T^YWT(ZY~5@e6Mw1jEF0S)iEKw6X-3k@tKH3= zk!>lnS%Vy$(3&{hHVuS;1eAk$q!e?21C;PgAJCRi&S}~52#-=sfdoo9ypr_$@9eH* z$3E>y`_Fwp{(Jxb|L%Y1ZO^jje!(JoO-)F_)B4~K#=d&`@kl%S+2d>PD=@`iMlcj&#)T2(qe*)Z6^l)Z`(GOEuhMztB4^$a#59yV$auaWU4#3M#6;}~-hg=?Y?bsgEHb?9uTpu}^%8ZGpyj|7JV2I@>wgfN#llMEm+6GF9Q=EC#NLYM2 zw4yDF=uS%iEQqNLGD8_u4L0a8)`{PRIx)nS>a8Q|jdr8s92<-nON`EQ%3!V0V`#=w zq(qHn#&V?885@ih#!95b%z9(hEE{FUYNHGBxUtbl7;6w}Fg6)$jc$Y*4Hay-sTeq9 zT2pD;(qA7C-`(3SzEa&HesXWU*coYM2gFchy9j%$#FE~C_^U`CQkF!UM7;LqQfRWa zd&Pm_3Cvp?$!2UlmBG}tk+g1ET8?Lj_2E=HWv5K>k(w4}i67Ull@;$~dqg_g8|s?0 z)~6=*F*6}PA8khBzekhopor8pi67qHcycOd=(gE4oIPV&I|i~Dvs;_gt&`<2bx;mx zhlgc}#5V12S$X<(YQPNq218wVSa78j4UR5M+?>+sST?F#hJ zg>f7*?J1tIM548~rLtXe>8#zAOeT?k5J5|PucNadS5Nam~i6bQ4!~EH^SL{I$@J;qHgjXlg{a!E{s%MwF0 z{~ZT)d%SB|YBD*I8(Wqjb`&rQOB+Fj9N1kMyUX}RqqVaWRyNa>$?{1(otiN#1Kgd> zfK~Pl#(2l!0|UgWN!@m5KENZ8^pPqM`P6|@#Eai<{H)K3C+hM?oS?y`7=No>?F1cV zLY-AJPz#PS6oyKrKhHZoh}AemBaRAMEAyj<7vVaCLBM@_%%dok%_I6^9pa&l?GW$1 zA{o9(+#kM0JRTkw{}axN6E(Ro?cbB;bS|69*y82z67k7!O|X*i^>89UB_M6$m2ij5 z%<`0(!9EpV3U3zo$JQ~Q_|upcD0)yR5fjhE_W3;rcJ1O@#ec^Vu`1q#8S-sJb`j|l z$@=ExRtl1~bV&<^T0wODEJ{}*eimh|_-kaXVzU0d%n^0*OHdh~iEm_Q#V_K^3O*`V zdMHdk$QvHrcrtXnE4me&J z8?=KCI9w`ICHbt*>=F5f{z8cNqB9-@DTYSUsoap%Uc8Y~Zzi&x$Yvt95V?&AO+Vpt zStAQAU&!d(_)LVE}g<& z*R|29%m@ji6ryxm%Vaa_%*mWRtu4}*B)hIsA}!W-5{3{`3>jG{lhlrkr>x5Usx+Ij zH0&UgXY_P>S{ngHwc%-P(zM63hNY!bCrvGBXS)*JT5@E{(A_|8ir%I@BJvwM3ZogV z3#tY(dSu+xNP4yGsAeNv>4$UxCN;gFQ)qv#(fOFQO?!jRQ~Ge))S&vtp!$(IHHpoN z-YuJEo+CSj_u?iOS}Mq<)|@=;w${goQ#RN6GzrM*EC(A(?a4N--&Tb)C2O#)l0c1@ zZ0|O$PnxAtv~0P=ZLPjN3#`^E><1rd#58eQjWRP9Mm~`67CkUSd=EAU-zz@bxU%bZ z3hpCvjhyu(?QQs3VGyN8R5d-uUMGIp-0WL|`1{2tn%-K9cOw2lG2HCN+u1WB(()nX zeY&NceN6mo%f+RO<1{{^82$_Xe3KKu$oL&jgQNkYfT{>K+A$|SQExyO zJgl}TXrgH@KCc*o3oP%q)rkg&Xd}urO*9+93!XWj;WbplGw+#eG1PfB=SOK@o01PW zF|@bLiA}U-LUxX^ptZJ<9RF%eUA|oR*1J6lQ4nA-fYXZ5A_yA!wOcN^?j0MT2F+vgFdaW%Fp-qkvnj zd6sW;+Dc_UZiF)-qsD1_3V8cz&m)S{+EP%Olzh7rLK8oc9sI~?r)=ob+NZovd%)0k z>MGyi)L~$Lbh&(P1;KN}i@C3yS297Qt)ftV3Ff%kS>klqT~4bJgVwDx>MwX12Kd0R zyA){SA*Ye35uZz(R|@CZsA@DAjpu@ou!%LqlzEo#wAW%ayA3Rb(G*8oUPIIC9L>?j zRHxHuHd-Xtl!;`CZyy845SwU{Tx-oQMfvs4QfdXJwaRFf!%!c1!ik{sFJpOokhfu8 zV_<$QlD>(gjZSo);&Sa_S=;na1}=ZK<1OAONpkribS9QAi*GhkRxYiV1odhxrvZ@w zF<+~+2&_=m0tH+Fzos5E)rm5WJFy_`)OR_#tT6jaaF4W`Y)_&{ils(N36^$*PXX;! zG*U@LT{f59ql)2`Lo6h|xN@}`W#Ye9c5k86UD2PK92(0_6_puJ%GmaMrv2WN+&FsW zRXkS+z8p!YMc+s^o#n*{EBAOn{er$f*smEVU~kxd}fCqBArg};NL z%rQ}1)l+>KS==R}VfD)3CQfFKZxg##_qFy>P@2VSOl6h$!0IHsNBryRm3MexeTu%( zbXK>EUP6;aHDwgNDchVZ2G2~pe;A`_jq5qH7_c6gGR+yY=%t)u6^!;!S|2vkJS5h2 zbtj5mT3Cr#5XAZ%t4>?nHBC4ZQz3=)yrS}N)kMO&yDFC88}jgUovM%miPdQ?9P;pD0( z;`>8k1l!d{=6}_zs;~IH>L1lt)j%_A_e5Ek?PSZ?3j9&5oyAz(69%Aa_V`&CzmPf; zE?J)>^&)!W4WhTDra-XCP*c#F?_+OeIJtYU^}RD4H#mK`XBAFUKQfSD{2d7JmkMz`51Be5( z12Be;cgO1x2&VCpOf8(LuGUV#{#7@UoS=xaFMqvs=99~kO0zDq;-A)xzNb)?Z7H~91o9EEaL@b zd7^6G2kLp0N_q4CG38MeHWsD%k@L!Vc3vIzkb#6=S0;R<*U1cIl8zUq=1sQW@sg&# z$d0!kQ}cm*&<+%w;6!ki*;ROlVD5qw)nvH%Tcl|VP<$?2F>O`(P`=u!8eqJHhYWv<0;?66S7H20d3oTIF~y0vP0uyXE7a^QG=Q)Z8ijPl6-Q18 ztsXOiz&F*StVx!5M4baH*l9OA;aL_}=33?zC)}uzErQ)tQQ_Nmt3!g&Hqj1O8k}ht zRHne>F(W*yKDHuX>(tII5#L<5oPFc+Z`W0`))Tbcw7C+Vq8}*I%otKBLlJkZUllrv zoI!UN?q81$q$4gKTyHJEkLpPsb5LOJoS9*xb5XM1kh z8I^)K7%V#vidCeVhe+M?i`4Fdfp{}T$BFzo5!(HHMr_!ybm@C2DEEF))<$D@U@B76 zD8UemiU&5-SFd1R9I;5nZWfPi=wmbD`3;>X})4Zpb_HuxfZm0a0lPb68~e6mWH!0t)$L zA&h&WkP(6X3LCYuX}ENx-g!T&)=Ev|rI{1INM#w(3+$LDUz3kO%xh$4q{0?DjL@8= zT3bk~EyWAvvq>F1Nns;~4n1=*CoFYKzeA?Jer9dSyCo56*=_KY6UJmqkN;Rx!w zw+3eR+@nhu<~D6NIe}B7(-n7}k%gDRMy*^f%W?c3H`i(>+%~4W5+_Qo0d3l}ld!^M z^IU?hhmdIByt1Gphz(4p7%-DMfRhE%o-)W$&)<&pD=!0!LM4zJekf(2r9mpqri($# z9HY~}b%_-{MrtzQaiP{fP=4T4%1Xgiz_%clsCR8GdG7h=h)4-*SWZDY6!VV~X(aLi zBIk%m5$hAb*u0|9N%47*gsY;8RTZaUF=&`0fJtdfqEA90Quh|wc#)khvQ*K-fi!6< zQb87ja-Nt9=afdX;WT9aPeiVf*ns3`flr?!fh9$ts8UotUZA}givi8czd!o@5va2` zWH-u!Dv;qTA#a#1Q{zaBz8vy})tTCgJj;cizDk-&wGb+k(ySpsI5AD~?3pN#nj;?@ zMEC;a!0=t5%Lf0<@*C`>#V%4#>pg&V`v%_pUx;h~!NrU}mBBjOFj`)6#1?(V)MU=$ zManHhZLd{8XZMdkn0{{6OrG18o)W@Hv5Dl7t zS3DMK77uNyijfZnS3*moZ9#7^5ULKw#oMZWU+lDOfXAq#!bU2y&TvPTj`n|y>E zMX9}Pj=YR|T9X1vJ?`6MCAGt@60o#M2tXK>dx}Qgh2mr5b~mn;aQK7ka>{bGVP!;;lM82c zoc|;zG~8-s(-_{qj6wGzSf$hB@Qs@AP?v5HiBoVfnsB7j?9py2(=N3z@y8vPWcoo^ zT3kP*#2b1ywCtkGGL+Pgp4rf?P2+cFBiy6<5YUKO3cgFW;>86N$ud+0CRK6&zy04_$uF2Omz~3BXjB|K{7GDEl+iI@o zdz*H>hVS0fr*-W;cy~hFd3b44`F8V~lH$v^&UNXZgqEwgarR|!<0>zxJ>7IR|v<=bed;! z@MB@AHfxZqTeMSpdJ6o(Wvey@OV3|GsY~MXx3%p2XN07^yi`)u(x6HM`fe)3cn6h` z){L;Yy!`nZ>2A6zQ$nI;=akBa#l@W+zOzX9uK3u_es)RxZs&UToJ^fT>JP=8ef{A} zo|%=dD@(IQ=k^W{hAxXtp~z3f_xcW?;`&{igMW|sC6)iP*m+NfkIo@K7mw|ls-sh` zdl=bB2M~T1zVN01d4^7p|4MAy{nMDlCSO3YM9_6iofYf%w8kXlra8fQNe{tUadb}$ z`>mMT^R2#xI$n7@#D9i@E?!?;<>!=owNd9ELK;)WW3Ovh6SXipoh!~#DVni+nv!QI z4@G757b)~1@xA_zj{l&bWN0r!e@zTsg#b)VYb+*O_G&^u*vSsTl`|o<+T@)KQ zg4O*70+)R5wOH7Kkkcs*R>j3Wyp5WcyWw66wG&xr{wSg=RIy|KIu;Zs_pkGR7zvsx z-nIX^wX%nP{!6OyDz7Zu!j9woZa8t`E`^`z{%u(o8b+qp;u;t`{~nyOl@>1??hx8-z5_Uc8lICXza6duoGA?7N#Hlbk5~Xh z#1TfGO+4EbTb)(rJOt?G;aUK=^G;xxZf#ti`sBAHRbu&(qXiO+OKMVLwlRe9yl|Vu zbV*l(__^Fja>;*)Tq)|V`{5Zx%SdhuWzm)>dg+@Few{71n^-UPg9pcS+5Gm(XX6V4JZ zA8#57Bgf^@dJ0J~mz~HBDwV)Xl(Ep~!Um;%u!-8IOl;6vFN=rd7WFRLsJCLHzR$k{ zMf3;6clNHk{M@}WYJnW5>dfdh)&y8%7~e|B4}9e3G%^Up2Mj~k$@EQ5`J*2fQC1Kj zS!AHdv?i@``6k!(7!yk>n*j&2CPmzLwC3jPEc#T1U4Zj)G}o@Nc=e7B@u{Pm*ctJ& zqifi-c=lL>=stF$kuW{lZosP;pgUb0L-|MLP3J7q3MhDz{eiT7bunmXa~o(49u_Yg zZxF-BkFwc?oSP}xF%pLaz|H;d&5{4VnOHIIKrB?ykdYDBp24#&?yo zi}7zmUHxx-^tD#Ufzrnp+OF&rfF$6xYwo+?A4-Vlw?0vSOM+~37eI)6-O#OV5i{HJ zTU>ZfP-ZkW3QUblo2ki+Hp;V;5;mmG(aJYnmL~6Vu)Y!ruLL^i_rNlW*Y8NKJ9=hl z1fbuf-ykc7i!bO&TcS3#(L{PL5%KIV5Fed6GbRhvqeL?t{0*jjc1mY`AvM+0Tx z+MD)}nBUP*xx&E^R<_)*5%K)?1~J;Za^*NG&JglX->@f4?V_hOvgpJB9$x;|j!W#A zG%_XpBSFm-!U&7-&n?8`Ew8~puc8$HK9L_0xz29>h$^Bx8@*w2=Y7}PDvtZB#DSe1 z>k?%EgY~)2?7;gnl*CaL;HJI9^_KEaR>Z?IQ-xMx&uF3az8;nTm{3rhG{$Eal?qbd z$NFwo`G3lMLP=j(lpmn{m-=oE@So5mu8_Pec@V+p78UYSS8!2mpdvq&68Mk1V(RbL zi|_Ah+~lrl1><)ObZaUs9rNaWHhuiyv;7l=z^ux12m~Ga;6OTA?5a}CFI_Hy8D4?;i?-B_#Np~CN-Vk? z?t_aUX9L^Q`P!Jl*oIVg8jCMsgM3esug?1Fv delta 7490 zcmaJm32+?MmHoSWdhSaajb=tyTb3-1Esu?FTRtS8_>%Df2H8d)>9?dAjb_H(E!$Fh ztgtfJ?5?rlXTuTt{DHrdKn5;hPv6{sS$Nu^+RQ+P>OmL-9NJ*t+FW#9X) z!w{1h&FlC6fBpXd@BMfDuNU7Xk9?m5(tf{3fX|@w1N#1nhXeKGXAdvAXL2;u8zKTF z)Rha5R`phmR`*tqMtUPe*d( z9N(^K#eEvQk;xAoA1~wzr&qIQ<#ulz^k68c4d?Z@~5o|!XCiD_2^oo@9O0TTPX zG0bBe4CV@H^Tde`wNOMorgO;+YPXt$ezh>jUh}uIts`-^$d{NK;P*S)*(G0hpj+Kq zII9_Ip+Bn)nC$kw_3YQaWewfxei(7KSsf_k&2$D8t(wCc>+r9PcdK{kh5lP`FlTf# zPLF-lAN6;ud$sXmA(J}L1nF$S*rOp^z9l-6LV)eC4iOlxYs$w$4PSx&aFVwa! za_O}&yuKbm7lJJaRwG!0U@d}mj5Joq5`Z3upAiKhhzbdj1bm~cJN8AAWpBo=k*(}< zdm@wxTx1Hyt;jS;=Y6v_nGo)xD>BAAkmPvHt>3%mk&Hkb}vaaQE5 ziY4*Z+qo{B>{y~la_MgNjYQ3o4ItQ_dM9FY^H0k5B5zt1`+Xu283Qf@Z6k;-5`Zs2 zwxsS3A~36NQTQ6^Ny-%|ubt7iqP!jK=MAmOPDFR1LW0`?orIs^10WE#u< zb2I{df&HZMp&7af=)YiAlTFu?%dB1f3iN&NwwR+C`WXA2dS$`KjR3P=4qZ2!hRfay zjgNxt2>a=RXo624TJVFT;?DBI&!{k|dG{n$tiWkO*G(@*lPS=#6`)F7C=nBEVEU#+ zL*FA%*A)3PLfxGAN#KP5J;Un;UJ-aj(+|8)0#!Rc5uNfYAiK0v#kTTFMjwlS-CsIg0OJE~o1 z#7Y9Fs~Y9tPe>O9t2Qpw3Z=N^fkE4008wwT;@AuBQ}ej| zgao>b<4~mpOuYpLY%MRE7OaHXX4TT@lsHLiaqd^hsi$l;gNP@sYJ@aaZoeR0AcG=} z)5Lk#MUqY8dZ$UL&Rh(JT0%k1v@QZ|rFszGVbxppL!woO`G;#n$S#GcGy+sM z&b5*#H2{CgYOoqXjODW2wDFP*(-2R6%eESXtEO~Cx&BtmpUdoEL*w<_;@#q8#cd21 z&h$Y<9<0a+C%R6~wVYG|9m`R*U&9dGUnu0XbY3;bi&}@8NdcH+-E>4B)0XW_=M1e9 z$Yt_cpE;Zv7|Cmp^gWzDlcDDD#Ojmt1c-0DSw7^An`O^4Zd%Fywxf@DSkuHKNzR^O~q*7_E>jU|U!; z)uMceJbkPywcL9MQ5&$&q*`6e^boeJXJ1Zrx1x7fWL$eif*Ia2SbLbXEJb?Rie;_0 zIe2>3kKk)!hF0W^sf||L{e`oAxpcpltH@`^&yH7|#;Gw)o6vMGn_kwDN;cZ@5?|Br z!gt++kcRsVNGuf@(TY^e;1ouO^pmW5c}KVvxjax#(LNBa8NF<1`B9%fg%VC6*vVd5 z9vglfP~#*3VO5kYmt4dHF}Dh$ul-brkAFoHAtL_lx_CbF=i4x@_nTQ`L@RP+D zsh5zui!_rKqDmp)s3H+)3y~Pxe_K6!bHxkp6+8oDF?Vg{0`DxL>S^}!$|BjqHmzz` z!UX2m$3|AQPok|V#Ly4IGwHFDAI}<#ph5pEg8LAh0Z>uK@)^)uMKZ=lZA%!?oOE#I z!A2_#^%+nn^bo#7@_GyfJc0nr#ftN^ruP>N%?Yz`8rUhwEWUcxWDqd4w*)`q zJJ1UrDwHIEq$C3rAH)`^!D;4;`M1BwXoY$_Rd8uSIJ%SSgs*t^p*Wo zo+eL`Lqf@G$@-5VrbBwk2B;%0l!MbkIfVTsALsYY@Hu$cAbO+a;M`&TL$j(}JwpZf zb5h8Du@Pq~wWV;^3(D0naQwDJ7~$V3aMGKs#6~F4SSG z2)6M-0+gN*>jXR}VOlxW$m?I_`eU6=!d~6DcrTwn7G?T51iY?#1~IHb=UvryW7&Y1 zI9noi0|P|6Oit^=(q6yB_HAm|(}@eKf-b@3f$7WeQy_3HBH|w=!ZTHNifds&FBAtv zldTSHg~}#S z3R(f24X(-x;tUa^fvckBr@@Vrymp)k115+GqUfvx7lz8nhNy#vE|KFBih}C$7Q(C$ z7a4}Lk)KwWW3@sw4qTAPN+~hNHjB=ltuR~&R~v}h#5cCvEkQ3H5z)PD@U%-7v8`8>YHD+d=}7fhWmQ#Y}LCv}{s z{v!lDsd2zVf?}&~ANmas2qO0z(Iv@{o~z)S0{_8KfQPOS{8o@gpfQ#Eq=^9d)M~t^ zNZE3dau0lx5(58`o*w-GjJXq@cfj)qev)J-{zcg9TU$2m#u?$^%+>@RzVrnMni1gn zNq-o@Un6)H0gnscLG0)3z_tbTc#?!Gn|LVTl3h-Z$K`^j{Z-hNZSmmyo}4S}Sr`iQ zGuptz;Rq`aJIK?lvW+!r4jQGk=OOX-1}?z4w>Jn|Fs7r>^d5fk5~P|58xSr`ppb=f ziG42fd3I&*arQuV75m+eMiv-3z+T?DkYAb<%?SI!$p10_?VOJAH&nR6z_32EciG78 z%jcNiy<;I;GU{b-+}^_5yoc5T?PibdXi)n3+t*3{8rh6hPLIxau=qCar?;H|CRFWg z5hkZm^(1V580XlU4s|VD@xYCTI#e)K{Z(K~vgh{2yIul}@3Pqd%0n;Tc8@|ARJ;wy zF}E}H1dtbSTLb_RL>AvaCcZ7REBhN97^@#<-`u}bPD&GtS!gKA{^q2I)%W?L;NWP<~fV3}P$Fcx+LR&isw zy8`v5pM%46trH92GsDlYm3RC{6{}JQ+Gj5;V|$lg8zDqp4Z3ZIib&u+0QQ+dh5CYC2EVwpq?k zG8QvuwVaWh+Z~XY?A?JHdKy7Jf*5H8?_{XQp0(W<^8+T>G zJm?&O!0-w%Uvt3KUGLEaiDN*_$V7~95v-lxBJ8h@E+uaE{L!Tf4*W~P{^#h6OSqJs z`kTo7ZwNk3zvaj9gC!5)K?_3gtm?vdrtof)sf_J_S{(lMw`{IdX$1yM2;aJga z&%_3tjeWqXL+nccU~}@gybc?9>fx*Sxc7R**$oP%NQjm0jVZP#v&Zi}=RkdgS!3V# z_a{ZW)U&SufZ$x9hq*iKKCIb9_GpW8l_@9m0 z*JrD5He9kFYAcDs{#RQ}I$6u$SQP6w7y_%-iZiVbL54H+Yy6t~K4|M^FAt88cQ8}4 z14ACNi}el_$$c}lLs^YV+RuJ7w5{tMoFKlV{|3Rk2tHLk8=A@B+0v`hW7oL4Nk13100N>^MMyMWZS@2A-MTHJo f;n!Jkz2y>paP*Bxp;TxR{$w3?Lu> zvCpX9>+b6Asy?c#s%!l46VdNJ6>0xgqfs*Ogq=Ta&wh4To5D5^=bsw%7@LeN!?B#A z+vG7fnLQ~@DKxF@ws=yTQax!+X*8|swtCW=(ml2&n3-@zve=o`R+V`mO19cnX^eJw;7Lp5mrrPf1gWr?jaQ$_80Z+g;|F z(=^9Z-c(MJ6T2%sbDQSUZ(Vn#XI|4h&-|wOo~ouQPjyqZr>3dKv!H2#XJOMqmJuj1E}t_(DupZHOz^gF4$gd( zC5h7fMjMo8a)n$5_{(Gsp>DCi$gKPrk2Huq!A+T^vCR2wCywyr+R4Gh0WN z&DWFftkaW*q!L>3`J@E4{_%>%vnz7RA*Qn~-ptJV0K?m`glsO*?4AKS-fGr_g- zYB>zQct8@|5Kjj;#=kgyaP#9McSCJ>hGomiX=5QPA%AI1k%)aQEo9udJg)?5{TjEf zqlQQM6qC64_CQQ6Y(!z*@&#m($+jWE;|}s2t$~=--|Y(q_&kapaJxA^89w<^1S-sw z2V4i-POgKGNxUwPJ0>}Z-%?K(zQlnb&s)f4Q(a94Z6Vm_cgGSA`uHw4AK-16C#4b~ zCINBL#j7b6%8r;c;P37T@)}|>r^`|xpEr?3=7X$}ylrkztt#8^^SH|n_OzFAZnxj- z4wm)#x_t-A=a9;jC9)>K59Ef=nQTVs z3f;zP=A&xGrX}JqTfM199IlbVcTDO&;PwVd!*LU7ebP$)F|BA`+A_c2?d1aY;C{Eg zy#vbat*-9w7FTOmX@f(=8i^eIhm`Lo>vPj|e#j3<0RY96=7YKEvOmMjC&`Ps z|D+YMM#m$uI4OBj&SdA3qj`UAW1%$uU`^!rp%rqUWk#6eBF%? z{_DJ+yi;fFs?q%bkc)7-lp(= zQyf14q4IQjpTr^UFRu4?1Up>a9sO?mw8-x!*OcbDJZ`_%&oiQj%!jW4c!3>Zg43la zkdCt?(rF;;7An>8{x~sY4i_2;GBvnRMNStQ@OuysiLH!7(m;FRag(>hKX@i<9g>*X z<2uA|g%rOGKup}tw>d;H_7Gif0mQZ+;d`)1zNp*fY2jQ;`b%dYZ%#PMd;QJ;hzhQ^ z+s&6Q>h{4p4lIEWz#oJP_n>-UF@VpGFt3S6mtAHr*IiwAW!;s;NKVzQq`UTow*q60 z@9%nd*ZX_l-5aUh60vU$Z*PijcSg25quW~}+gtD0Tlqd{!NJB9-Mmwvq`{#QsFt<* z0?uxp$McDQ83Ek=Yv3qB><^zUvDrb;a%P?{srg-IXw<<{se%S5{BH# zxes)-x&@iL0*g)}c#dpvR3}qef%Sw5#LYkA$Y(z$w;ii6>ei}z zhV)VQxS{x7l5xVE`R@v;LJ5*!@~9<&0kz>-?Fg~=q>zIvGf8-*oV-U;$ZuaZkl%OO zP_e-EL7xzDok^F#8K)t|>uu!YA_J*{fBw~G@=1|i;;oxi@J5l1RIWDx()QD5%w&DB zjXg}dil3K+5<0V}3h>(zy)3|2NNEmX_lR-Z2HL->C{PuUr$@3+8 zBt-|LpxhEjVLA)RMYR&5uxLOIt=dGvdBoqF$A-xD zd?WdKsWRuW2IbRL87Ce$^a&P|cS_UPDzdK3imF8+)iva0TFt^Z3eBBTHBx4`7{o)9 zKAnq@Vc-ZgpS5%JLQ7p`)-1rcPa!cOmP_mcamvpNE|2Gdaue5<4)vOHg45i#vv2C- zQUMy7o}yqq`JZKab{%kPQ*(wm3ZH|MP`1It${xhZNBCG#9|=7Rvr@oy^!cLkg$uf*Sm~;5O`({8O))JSaDaaj;-mlqjm< zIRxY0qce(}MZ}+MfPw1VA{5MqK>h9VR2-gNsKb%W&HCz)a#mO7H&fn_2sMDT_*^ry zFmM&zk2k>XJAJS~)Y*(=ms=>AqL9)n2`RbMeh~YdmP>oA#c7QYT5#h?;i8)xClNB+ z=zMf+HjvtldSGeuMq??oVSQstbfX|1Pz8PQDnXR0^a-grT{{~Lkhyd7P%oa=C4;@V zEP-j~LYxs%=OMDG$;>{1-D_d9)yG>14hZFkseZeKI}guf#lQMNxf@UipCRk!TeZPJJR92CLr38DR%$zFXQ;!@>1ZFu zX(u18PAJAk;sXQ4DAqprAZ-I6V>+Ls^Zi7x9@h{1JTyf9s?mUn($k--1Ids&qs0KBFj!}augaJ|H9MTVRRq}6iZpyTOxI*OFOWG|Rxt~W z)NVIsbBXEj3qPm9ssJajY z$fwl}$^3a}A;xx1scUfLl5I5$MJ!8B*ZeSf4ith?N5wp@L%xJ~7gWy~!#dKoZl8~j zCBQijy0(Wui@0JQso>v21PN-JQMh+Q7PaHvs5G>LP! zxq7>U&bVqEh)G&~{yshp`?-ewC|Y=*i)(cS;J|Dd1k-xzkX#MDZiQ-YJ|+v$H;-m? zOzwB_pl7?gW9GUJutGXodV_AR4h)M;Qw97>h?lV66>tT?N*RL`pN?754o`caUl`j| z%taM9rUhl$iG~TVl@3GA@55X*Rq~vWhgnX<-bsG3IG0^TR7>)WU&D+X1kDI=mBtJt za@0U}FWH>Nzlv#4#^&%JVDak+vJw0(VQULgsTLDcb_3H+x8K*g-;pR72rrN&wZ-gt z(p9@6XA<#$h~T>b0%+`0wR##Xg;jQ^wn+OMES>}V4V0NlIZ-WLzFW!YQqSgGaGY~Q zQwt-hg>Tl4Czel&8BNlu`s4Lcwe^nL3Kr7mkXDQ*&UqwelFdWGk-p)+s5w7k&L2-I z_#{a`sbUm{NiAc~Bi~%=UvfF=s`-jJ>R5i)vHYISG@=?-O_&8rS85I*c!-#rVeJyyi?|R?O!`BagV7yb+a3wX|xG(H#i8Qu^JG#RBK^PPJ z+=q$X*`7BRNv zYygGZUuSD}FO;-SpYD*Qfq!_UKGcC#ID$$8OO=KslG$S-Kgv>*r&eaEX3U@vVRxF) zStA}0g~-KThk+B(3dKQeiax#W*qK2ZVALP2$|kS&8c5OV)Ty^gq(It?#G7d#FRorHJ`d@O zy(V&gm0rwSgzx8GFiAD%MP4l#TVo=lFG$4aMg8PI6LZL}HF{FGCXM{DQ%`oT*^!)x zCvUqrPC5$l%tWJdK&D_qG9WnxNm1!$%E`MPlc;kZp0ePT4bG2~&q=zfU^nFo#MyNQ zQRhOkv%#tXB$Ti8nI2UlUz<*8s9u{cEri3O78@j=G)akgr(V>#43p>1$W+^BtpxD4nuv_S%yrn`#J?#CzQFNkb52htC&mBb ziaVvuPbW7($?m3PgIUD(;(Vw8K^c-zO@-8MFe-YSZ|jwujJNdtAx*-O31fZvp5T?p3s?yOMOYg zrJWZIB+e3vI0>*_PJ=b(&rnD$bXJ6nIRXh#l^xK9bW_3=x%ra$^kxKGN#=ZN8LlGb z9)m5;%s|qt5y1_A0nFZK(G1D3a_L-RNQdGI#}?lq8OP%LC5E%1pi6p*IWG<+wu?eZ z;AB>C85Xi^ONyEf&M;=!X$JUr{aZ`e%+uh2zrD3PBWp&u4M|S+!m^){S6|qd)pg9N zR}a&fyTt z|73~}a(j)LT-za$4d`Lqo~MgZvzL6bT}s&Pdh*%!w4{QWX`-Uz5FMKA zZF==Iw@l|Nv?>@;I=EHX%m;tbBt|{fi|0|7`K(nJqrM2V3WB zUu-uZ3hDVwVDA!+4IFY7jbnzH>0afM0Yk_@uDxI?6fCG2eIaCkB{ZWcpre{To!`b5 zbts90GnsGVhG7eFeS_@E8IO?+OWpp)#*;I(UC@t0md&?U_4}; zZKZ;jf^q8U5T6`Tq9fwQ6suD5i#4XIx8rTiyq)Olrq5SRM@WV%GX*7ce28bNbiPgZ z$2-`K5-wt=q(3>lHA7Q0YjCGdus>k#R;TZaFir_u}Z4u0R6X<#cjbcDHq&WHd zEFVw|!GcyB%u4H<-C@a5eN@0dTjB}+R!pqJ}X9feN0FEy0FbW)%DuQ~iedqW5 z-eR(jcCU{op}i&Kt-bll&HN2O;(;NC{~CV%f~4hsg~JO49C z##G*3k28KLXGR3{IZ^jI+zxj80&eIs;Oc1QI}lywJ_L3=$2Vd^N-a~KIvh~M^2y}< zzO2+(ic_dKWio?#Jkp)=TwRaL!y z;Lz=)K7=1&j`D!3+v#`n9X^i#cg)cEfq;${2yh4leg2pn$~&+QolhQDOiWELQK#-) z`CnsJl7!B$QYgW=3Sw$@6y1k1{sDs55wsxqJ%E@50w?@2mD}SF_JJP-E^5e6n$zT_ zxX0j6&AAm2Edq%C78f65I|R3e$lDPU9qNd&oq}^pJMCw2c7xx9taLfHvq2momsfB? zIPu~lfSTj9i{Y^kdeg$=QQdER>~bZ354ZxT2>eb#e&EVwe@?V5Ysvp|WerZq)lqqB zM4lQ>D;k#<-;--ksgA2Aw5ew+FVvi?xlntqHk?{?x%yh&&HC&0H#c727@k`P-h-8_ z5sd7pF)w1wiyA8;#)@m5f94xo7BQ|08&(6t8aC@*R^jFO*D7x=zP|Ww*0ONsa>!o6 z=1xk*nmn-O*Rl<4%}16GEg#u_XoE|u`YZKU8?H2jix%Fhi8xkw+)+@egX$xmAvR8gHhqO%{9PH2?7D~?(K^|S+o8l<%i1fntf*t7gEoq zj=Ij-F6Ue=yHXah)ZB87WxnISlTsJv_D8uugbRQ(!)Q6P_EQyOs+u&AIc+Njq1_ZK zI&(%2pFVsq*Kujr#a*xNzE&BXyCwpExobXX{AkyQyFS|Y;lA*`mdJ*dNKWfn$;*m+ zX&IvxFD|&Zuy(BcyET`&s~z9&_#h`-vhi%gJ*z!xogcBzzjk=6XWUvpQ852nPo$vc zY~9QC6KNS2YF?~~Su?@Aw3J;*`q~y*qw`lq=C2yBSPc`t;M{^}T2Ume=<>2VX{8@* zCU@Fa?y_YAV;yIA!^L9G)uJm!m(#;(Rk!rv)ZSYM0F3p1uRsWc-I3LM zATzwLHOzHH_H{=0`6K)M;hsQbUoeu|dtc024xrn~XaZkYPkUWL56h%yJ_?e*XkR!u zq1Hv!wussm&X_x{uDqA1A3Atqa6%y9a^=;9R~ANW^M_REJ(3U0qq>}kE@!myQq#qz zJ2io@E@xa9gdqjrPyKNH$cED!E*mfRTrmqvH(I$S0)GW-K3I0AU_*H8j_B5Xk*)i} zuC~b5ws4o{&K8sBAIw>!+@htuvC@95FP%P!EJdovG>84OK98NyZVyu!5+LEt{}qjOIkMsv_`bt2$P9a^pl!F8azc?_m%nsu+Xs!&4SOT-w{mZ|*%evoisZEn zZM-+HX7J{&>$`3RZ}+{^cl+==hr^p1BejiT*gmywcjvW-4edj9XV#5UX2-KCkYa-) zVsMNbiYDe%-c(*!4%Lk`3^$A$N~Q*8)F>>bNannVVcrY%IHlTQZB&;X(PfY8az9OC zl8eAer%9qKg6!z18=SBfUM`PVONJz86x7#uqHjWHoKPnXZ9cyC*vk7JF_Wl&+MlbFPpvz?ZX($*wC!wTG`%DOf5wt)Y{Xa|HdKTwR^P)(8m=cm`P3L1s%}+A z^tEH+aB|()>Pa!1v;kMEMhhVkB25xW>0H;kQVbh7Vx0fShm9;_Zec&21EfZh3jYwW zgYdnRxm?pzApMxp_so<1%CKTV&qDG20%=c$;{HM@FHL9#DLnmv`6oCP*GkZ-2qB81 zZe{=+CCjvC$?Gg9K7Ga{>tJ7J2SntbAl3qIV@XDrLCZ4D!K@UYl1!PE za7s|ej~HAK;!f&7(uO2pQKm?k0qFo_lclRnB>5XA^6N5{990i|$VUgv7@2ljCl__5 zL9=OOvP(_l)v7K)kklpxM7}|1;HHX4x_zQ4#H(vjFqcfO$v}UnE+h&`gASoZM!1bk z6?K+ib8}1M*_@0^;N&lB24unVcwT2k{A=b7I{c=36Z!kqa=5KY`i7anbu8Gp(p>xh ztsW?=^Px|ABL;4Z5XsP)5(J;E6S0lKa3`VbP0``L)!Toj@$3aMo z&+F>8gM#8heXf)$yA4!#;g7)}LBNL0$9KRT(_nw$velc`m&`4lV=ver4Eh5L%F5ts z2QFj!f)G$v+UoO^bwf;pH{f>kTjLm<4~jmA=Fsq{u<)(WfuLVD!&fXp(6|DE=BN}d zU}zwl2EGS7Ks}y{UBfgaF|e&W2vTfZTpA)G0E|&ZWKrCU3 zFa8gZ3M2rSQo{rVY$deVZ%{=EpUKlcNv<7g9N9CxC!Ab6+HuWr z(|X++&RqhjV+j+*r5MB`+`HT@UKTB`j}+I3H}41+*WWGP87QZ-yfYRF;6t ziLa+dp;S(d7`fb2Q%H>$s=-Tw?m!UitpONQcdm6q)87Bh1fL z#vu+b*j~%MRB*B2QrX2a@>jub;H0d#Uu66kkYe&)ZvgLAopitP$I15(X4cR(t;8!< z*lQTcSr5F%WWvtmOOWwqsLa0xVD`{elhi{QB7=x5JoF0NM`DNa;EqArw;S}43QI1pZZB{|xcnQTMCah*djR-)mUn(w>uPM?iskCKW01+ykI`HAI&9H7D8 zSqZP0*zfc6Zv2vQAi!Gp^Ocy8g3$|C%qU%8ZfM8T`+X*!5=!~yz)!zn_Hf2cxGW?I zrxg}~ngOk=bhvcXa5tIUeZnd0gZx$r*>v1M)`z~6O?#ML0{lT}G#~*KT!R9e{lq$u zYAMAwaTUP)XfwlEJML1*uDm(vxoJUB=xY(QUMho&Z@ZWu6BD^ zz8xxVK3vVtC*<&aa_8_oIW|FKF%t*Psc%73{Cn7*6admPMa<91(!mVd{Lc}y{jPRL zSiVCDZ;SJmk*HnX+tJNU$NZDA!OB9~jYJS%a=21=`dz{Ov@2?P&f=Zej+CetKu`SF zk-I*!{YWMIJ@WM}XQP~o3ERiax5 zH6P+E(okW*+jlgFEPA@agt%Ymt(Y8r+OFG(y-n3gOrmY)u*Kvbp3a&x9lu)IAneYm zO;U%gZ3PA%ABiQTEO~0SZdzwHITgBhkupN8BEAutXq31JrsK9v<;Gd<% zJUyLb5+6ij$D{{&41Ph)V(M@e4%T1gBCZ6Q3dC4%j0M>C#Zz?!dH>mTcF&DpJX^pn z`OYmT;BGkPADsLc>KI!VaRbJahqp z{|5Ha?A^2KL?*kR>^}h^+hjLyxpDP`Uer!c6b_nd;8T%R^(^h;0yd6E*DOEaGEA;U zuo3~4ghvq%{e^MC{4L0MOfat_j_1-?4_W!#H?36M0ejOAifY^}51$v2>nBo4`tvCg zxFwR`sCwSdiVm^l2v`z-c}htxoic+`ef!jBdK`{M7=xd|n@S0O1^L0y(4uEBy#WCo z9m;?elUWE75nxsNs|G>gN`PP{>XX||hKDVSsPOe7%3H`3Z2;A+n4-1IA9SMnNaJUC zKV$~v0AP#L2cl>Gc{nSp4>$`zoB(bvFnejNM`k1`^O!8AN}3cIG!{~H`e^#R>RW~z zyTZC9LlQKN&m>OUqCc_BC1a;E_f�X-+>Tc}_LdhVZjTD#nohfRSh{_e}Yd5c|H8 zeJB>2lPAF^W0_14Ct3buQVgYk2-twEmsVuf6*7Ol*b4ByLd8mn_`SL6x?1sjwbGRm zQcKLrF&J}<3wctF1Iz}9ljM-6)duopzMg!WWMEu^jPD4soeDG?)Cq3@SNQNdg^-8W zcq7GKaz;T~&!mY2!;Tz2W5lZD{CU-l56={fq)A}eWTF`+7k;tgFpwo@7iPdE1|w`; zx}jgimRk`tQ$Wt1J!1X~`0P*KfD z-B)&B-T;c;HO)BD3gxlEM{-7yi0;m?so=8Vs^yC1 zYQ~j}a6!#2>sVK~aofAT@XGBFD-VvE=IqmKl1-JO)? zfDjc2KYj7p(erKZ4d7%8HwkuE@5~`w8j}gRtLO~%lVy8S2f?Nk@9zVVDCd6)H1WSc z@DRcOz*(G`UOK7Eu;4WSv-KjJ>HeJAviPemC9dKe_d<14ij#*bqx!*|h(0H(FOKMo zFYg~$RZWyt9;=J0(I&K*}5-xE-0VS2|^i|+~ko^T5K4(UwHREnxABI*hu z+$^lFKx0M9sFw)(6bj7vV^F{sPRGN4A%FIAkUa$xU`v@5zB3)^1O`9E0ZfJ9LqbXu z5IE(PotMS;He49agwC_*@_1uvt_KuB)HdkDDL7?F(GTDB-c1!>Bm+vg=~L6FF%hTn zN;oY9P$#yD_dpv1B6Q<$IvU-rb*iVD0s%xlH>u8Ac_lSDD(XE zC|FZZ_t+y7D2C{mb7t_j**R*;d4IO#F+`#bWKDZ3F^Li9vW|oY;p0(&N;QN<<_al6MQyY^m z<6NE{{1_tOQMwA`Ah>#3;TqY^R{^IG|J(t-sinw%3Hj6O7WQ3|@~!mB_pwac0%}iy zpTzHkEgQCO;(dP5-mpx#BE1bwqJ|~pkdANtvkZ~>r-|#5cQBP|%u^B6PI}@xoqRXa zG9zzMTnLBl@8ZqI62MnM)ve!R7W}vzu08Pzn?nZ%{9r7=-^Buyu!6?}#T)+~;vt!S zAd|n%HbLHT%u58PBiM`JUVuwsU$39v2Iw)l(}`^YMX2e6oC?`xzpHnIlMCVC zF3Oz9%)Ml}Xt|VrF?}>KTvj_Kd0+9aBD!=-Wa*Z0?N-2PWOuNWVo}mg@Xt#$+DA$z z*>Ypwl@?Yh(Q79`t<%C4;g7!k>L8r-;Jnva!f3+*3E)}6jqeaQG~k3hg;W7Kj1b!| zqA&P`XaoJ=Zz8Ic{~nA^`ya2|zq6rbQ!QO2?f3Wp?AV`w_VXp}fB|mq&z3q0_y`W7 z(m6HwPG?N*B>(n?aZm>sJda=lf$M~xP9D;P5S z;bwa;=KmdnYAEC@5ul%h??>=UEciKsAx!=nzdmA^L5BYZri&3QLGT+a!3$v??Is?@ z0>2KyaxC}^0UqM~BmAQF77m@DouOFe5zK>s0bF=TnfszFseEz`%UJWT<&0H6Wa?Sz z{K@Sik+f_y`{5u1?@75xS_BFBE}E2!rD>xO*$uA;%4})=gSD&-e)Ic4r;;ikq;7-C z551x$5fmJfq)C^vlM5JG@_msEJ|BwVbCP~O&}pQG2R6C1;DJFTH9n9lrS^v@BIz#n z0d%&5eF$(Tz$77&AYIQsNES&;AIQm*-zluMNYfw8w@8a0%*S>g=@v^ZbcImn9EB#{Rx?8ulTq_Gnk zOokR{LNVzEg*2w*H6i(FNSn54`}!h*Q#%T2X`9zHzwN78ev_Z}{l0TXmSjl&#hjTr zbLPx;X6BqTqyO`X;O;F!$}iPw1q0uqhUeQ}KXg1LjSU_zJT}NO9LveQMxV)Prg4Qg z&1Z31eCf_~npS!??7WQ2G>asjtjgM&laqV7aftS>c=IoaL)@R{E-(RWw8E zo$af3R{Q2S=g_pyJJ&bQIgiFuz4LuF&Kh5>v(~r3xxly3xzM-BxyZNJx!70dtYaAg zvy$QTM;OjcBDcGpc?dx%{ zQ8T$*Y9`wRQ>=)r66TX(;md-5GGv)(iz}B?9$}qoPIZKFrg$`*`UrC|RsrMGaw&k- zaT+{%(e4gN5Kj)~mt)mfJ&~KvzwvIMi5XYsXyWJMp>+kh)wib^qR*bW?@qQnSSirsC z!x632c;&8mN_fSpc!6cB$uE@?SsWW;T(aqLU^*B`$>XV@kt|VVvnHb3rI!l&0Ub;u ze^KQUon2237b!$66SR_9YKKA-FA^eStBvGGYCW4qey!d@zQ0;ev^oV@m0~m`h=PUp zkze^z$^sVXf28?3S*0?QCr%gzE-4w++R0aD>lIu|R}sVT`N2AD4S7?G9F?q%Wa{jg z5_B%d1`?016zOH0Za>2}2UpCbR@ve;a;YKmhSn$snkI@gm(lX1El z899_H&4=n^9}JEo9;X9WoJ^6YQ#Wai?Kqs3aV#B;a?aGRWN=`s;`nxkGe3`%v*VbM z@E{v7z<73$^Yb#v*nC+LWLL+tgK6;?hb=b+JL2h1FErOc#`LYKf_*)nsPurJ@AB{gNV)@Ed&&Qxe9Mesah09VcDKU6?Z^kdd|N8|2|_!N`i>&CIeZFQnU9 zyL7pBy^eWDCvP?j-$|d}oGJ{biXk2qd-lV)wF=4YBL=en$xM=+S)wST%Nv(CS(}-W zlSnl=1iTr$Bysojc)Ga@LcR-o!jr*F6Wc?+npvVfj&02VaG05rF>*C|C)24|n_xkT ztu?kps>#7phI}uU5(0oa`5w}5Ey+8HD4Yw*D5rQ-F(f*cGMqBfdtGMb4?;$(fQ0)D z}_P9&?0Jwx6Ec<{>Kvc^PgIT_6jfB+ftuCY7FyLokkdSC?;=jL%o z;_D&B=O9Yd(cR|fEi~Hc@92&y0=+FgyuZ~G2tFi93?Gf^Hb@JVYvXV4m#YI$_m+^knysWd8YIUQ)$FhI&LZ-ZWwF0X|RrZ z#to%6b;e1v{qJ&-JeNO6>dGz3xggq4Fi)hMWQK{m+%6hsI)#K?oh}>@43o>%CUUi0 zPkvu$CgF8b^)TbYy687d&OL2a!@hxwYZ_y(SOgukS2)N}g^I?C$r}~cX`EIkBPp|L zA%%Qhv$EKBa%k4mWNcP8*-@Dz?hrA-T#{ayN$E}G`Zhh;6Ofa4D(`CnPV;nq;cdn& zzB8EEB{>yrQ7ys5-q=}6-rS{UE3iYN0a=W@GMLyUhl$Q+ggspbJv5PjuTtbe4-@=D zvLq*$DPC)^h@U!lTuQyj z#>j!h8ZQ`PIZYP`Gj9(risP`|Cboz)R~t%Z&PlK?P7yRFVoT#O+FNx}yW#WbOt8yv zkjU$^^|<+yzgJrnE0RU*6(;iN7QHSc56L;rCDTPwEDxvInBvM!P%@pX@G#7aotc|w zOiB#1HhC7%hD=W~@Q!t#Xy+;@&RFg$NOE=MCF7l|;_+k%XY)gI)6+q0*C&zj7+{;> zkkYSH%xI4JQd*)DGfveR6evZ%{{M2^j2!b7oTVR@KbOL#-^Z_HM%_D5?r_nRC6MIU zT%(|K7aeh~S&z%jjl))!ekmh9`>5^=D1)u>LP1YF7E*HBb~e~fWb+Hmz!%R(`Xx{0 zgh*q(bK?0pTPBM)o=k?k7FysZZ@My#GlXYfoPd@y^YLUdgi@=>Kj#-=EwyoUjCy}O znG7lQLp3JsZjeAEHu=lGfH5j81FB$Oyl^Lu#sgolJ&qVAkIXTW%MTfdwpMOSNI%vW zCrO61c`Of;q9?7jbnFjdV=M#Ei_S-qgz;EhPAh91u?8EC#Iuqi58{b+2$i10wL*61$b~GNFPeo$DW~ZNa}qYZ?M9_*W1<+GJ*GTcoGZ)p z*;@rHC+%XJL(8K@VL39*nYG)4IoD&^_`NEfD;H zA?`)LNW~2wG!%M*qAJXnlSPZ?H+~~}bRPue(8r^?;t=xKH2y3Sh)~hw zmta)5-|YocPtwDC+Byz8Bs?mw_QX; z7l7EY4?V?9*+Q?|*TT6M_ZO4uMY-FjQ(%fSfw@5N>0xG6H@e}x;l+##8RuLPd-W^& zSAuW!UF~~g;Oao6rZHk$7jE7>(Yzzlyknwyccgjub=z*Lok54?UfvbsNdCCkp&!5+ zm7t&W_PD4P&Nq;px?C_+<876+@g7gVhkt@>uPdy?QQ(hZAvN#W*V_RmqN|;Eb5z@j zN`jv5fS>0Kes5H<5=w7sb%P!jl>}m; zu)e-IDuh{#O82@0?jUIOIKO;0au9d;+5`QuNzFuTJz`Tp&vcGU@joJ=rA5Z5!VCR!d3yYw!dbWx@wQSiI2~#p>Ql&k%dU*AODl?+W ze2%*(WJ~f!xRV{nJ0?s8*G&aC>zm#VzSH;fzKQj&$a>efn;Wn9Tr!>yzSwu6Z=#|u zQc*YVSQ75?O$iyf;Z}A&DO%oBc~fgTsXVR>XYC5_eki=lHN~)dST_rS)y<;P+rr+; zDHvhDqM6b$#;SWJ^6TZJ4$H~zszqtLv_Hol(uyT}>O5-W^$;CG!w}w0Y;q0m#**!z` z-hvgNYnW$R_@{1j_UY4O(7Gb6IyurdZDAqn)`LN=_<)@YI}z`0RP zRq=VKYNQ6|$60aWE>MyAd~7OPOGy^dGo!yz#n$2IkQW-Q)9uS-UnSYDZ1TU?Dh0u% zL^~%NbXQQG)Vg>(_zW(UvRX#UQ#0=)QtA^#v!+SVHgG^~M&PekUNR)wOLPm;NPaac z#7bqdtwyqQT|T>-JhCodnLI9OD{UbG=*H7xFd(Huk{e_vyPbSQIXtp1!#-Uz zjD14aOzLxq`gc*C0xUW5@Oq8%0sd;>A5w8b>hm&Di3AN*3xbvwQgarxpMjB^&W@lt zJtKV~V7QEsdOs6jk8XRgxk6w^8{0BH!Ui(5S)bD`xQGWMe84mVSsZ;1?7lXN0?@ga z>c*Gnfp^VEOF|S)G}#=6#Rm9k1Lkf>Vdv1;_6^#y#1ujQYvP@6MO&&jo?5* z6%&S8(AkGGPQJHML5|PWE4p@ptfvCohY_t&fUE3*jgy(<&TL59mNl8saR2 zsF={Na|GrsU4n8JJcbIjmMcs)1*$$l2l&no+!qwGxDXKgo@{$jl5pnG>}N(;z7~p} zqT=x^4rbXGI1Ioo>v*2MR2&~*5Ge`A;zo?yEH$9POKYN%_8hV@eto*pGlGFsTyNIV z8K~)KKoCmhtO*?s-efYNZ2`TRt_k9LNQv+?GMCLL_`vdFuK@eZrWq3#JC&Bsz{Ivi zTrsGJJ@CQ0611}ehTy-&Gdh2OL*O=Wi8TX4mDp@xlf_d{5rX{KzUtZIO{F*eWAAwo9Bb3%3uh61I-2Gvef6 zKz&!~=-xYPz$GZ*wSampKH%orNz%`4Ryb~STGm%5srm%DOeGu%2f%>a0c(k6M#M+w z92D+g4zSdyqf77r^EoQP(NAMkjydG7dzKi`6l&seE_&I zZa?oK$6L%MJU*g=?$(&iB}UUOz<;0ou*GTw3li<{sM_BhGkaWcqd@+K{)($?7(f1Hg15ORDX&;zORM!vi?_$R95DXwl7)ksDB%{jiUY~3F z5`y}!`87y|>y(FqW%#{FCGi{t>xtukj_A#Z5mWBeFqKkUmNyV9m$QTaHIjUUfQ8~w zF&JTTyPD7ZgW&Xcn^Y)0Yv3s*7&Kh27}TQm2`9mJ$w)UdnCa9gZ~3L z&*c>CL0Zjp|9o%*(jDyeQA;T4(dHFoinFESRD#jthblXQ9$$dJL2^9#8E7O&H9n8e z&-dZgqlbrkL%7HRyNV2W@@(kOi5l^aC+41qw!-lHdb}__IG3cd%`xakB2ggd>7f={ zRN31D(&2GYYw*7?Qw{e@fsPih2m0&p;a^6a46OPNq@&Zzqu(v6qL)R}cT4;Sfa3p# zcoE{kH;-3Iu?r}r$JZ0=bG5o#_X<7yIkL`^o*PxPb#%MEE)Q^ys%WF9Svibx6YqRm zJ{*+)(PkB01jgiFZH{zNu-ktinv$?~UEYpBFn}{cmbRNDXz25V>~2q&+F=R<--kQG z_I&mh`LFgI_9ppe`x>I#YaAT$j#huC=3LEp>dw`Lb7qgtf2ID7)mK-)vHt4%@PbA# z(bln6FqkH+Q7gtazet$n5QRsY*dBi5#H=6WD#W{YnYSB=emrTUFUR~OwVULAI< z0rXn7Y_hEK#hn*+o?rJ$`jk{?TmiP8P~O181reB;HN!O%N;|o}w>}e%!y)laWBRG; ziHwp+MhRGbP;SClGqi%(JNUu40hu{(Ts!}!p3;>@GD;^hsv{ZIV|&N-b(8ZJj?_;W z@*{@)2}4=LP&RI;{yb&(xPHZ?A#EnzgrPQKsEv6y!iL&$!|F+`VM1$x`6j z-YE0lDDaW)&RuFYx0gM|p2|Bdy_JzqCOhXzm5hBQdrwPDU0WATZrC!hVP|B+&hUoz z(Y)^zpDRAU?_4?9uXUH}B00;h?R&fO&ED(wbua~IU8h}ORvRrR)_kI5O!KBv$%U@v z#N%CNec6B6e`D^-@v2p)s?XM*uALsmrPnjc-*%H9dsjF!?W0RiuRgV4)O}v`qVj@r zbRcY*GZqM^1zy<-;M$hA=e)UVV#T(|if!TTJ0dHbfDG?)hg;esyY^1(>W=K{4*T~- zcJYz4z-=LG39|Qu0%I>bB_VEKy>uJPSnc{&f{Q*S<1 z`Ap5Et^EAHh^=y@{>17@js95Q(Z2Vz#;J8e##DQ+SqL&eGHX`iBqCy*`_H=z8FLH!k*+wREB-8i?yPakGIf_l`WutJt3dW! z$*itY@t-Z?u3Y7xOT}Gt<@b3Z(W{0A*WG?0(H=@CN7W`(5Nw;+2Z{o_JL{Q%j+F59 z`bVC~BmNUwGLS%O8^$>JgQN>ITX2q-E z*yrQ}tegRLS?JY*tD~nXOAuno<*g<)7|o)%ImnWIc5^1E9;NY;$&l)SIq{N9T_#Da zB)NGgiwv%k3Lc8-KM-ZtliyiQp!T4P;@RyA0Y4kd2}6K7t}zNYG3F0~mLxeNq^~1p zwt+ds_6c*DU{1Vlc+en)*F)I$)B(W`=IHwem}wb(ssgcNR}phUy7_i^ zfq+L-RJtEtBLD}g55aXioMauXU2u-UDbx#oP&^Sn6PEV`9qBY4Mo4mLBQeRgf-h@7vl`S3^4ts8PAN$lLzPp7Ide;LZAS@CBA|nUp=_D_NF28 zWaIHh(llWxjTlOA7|L&&b8hS9+PoprRGLtp{=Uk7Pr=Ajjwyx}Bh{x2XDz2KV+HTZ zYDjIL-8$Cxu4)mwKGzJdIi-8DY1A-buZ-9$$Ch5V&msH!=2wTCx4x^|_MS@n={-H8 z&A7v`^7Ic>7Fdh&^v?pR(E1;ziyDfVpBAedYK1?oVF8l==-bEcBTM>g$fNx=n@(b9 zsRkhmdICXE4?}=~U^@f>=nzBS1u#5V{Y<}kX7}TcXHdc3!qIX71$19QQXVdV2M%){ z-p(p(6)=AE^O9qs!i9-s6TchE#AF9`Vi4mohM-Yj2E{ zF-rAYIo2QZ+#3Kn{nQ~wi(F#!h)nN);=u54k*0x6?Ju#-k3#lg<{ljVCUS5fBsvIb z7$MV@ltXTT@dl)#vfl0hdgonKs-YQ24%-)Ci=v7iHxH^c=v)CF=RPWl$v}Ya zF0ip%4oJtxv5NfWaJJwkOa68E>+C`D^+yU6Tw+wnPafG=lt}St#KLU}mCAaECmu;a z9Ik)nK594o5qlwrG<}$&H4drxgI78p&0^Vy$)Bg z=SraO@pCXC22H)EkFK})GC(tdE|dG0K^@t7#G?KSR_g-_td=}-q!YT5Jv5)>J^pkZ zZEj*R`MpRg0#XzMdeOso0jl5lpe)h3{5v=YA2&&G9$F!KWHwpyrGbt8P=H5ioGFp- z15Ll>K`zsJu%G`7$nV0kgeHENVh`tRxskFpEZZ7;tRqI&idBiBCd;3gJL5*K?}<4z zSZDnGj0(0d*bjrQ9(QnW@>-4m0kD5ix+cj-Ph@BOKWr=tE0r!yPB>@ljg)O+**18x z0t1zzUe?%#&!K9z ziBt}+n4Rbmo#dDVP48=UZPgaoyGP#3K1>Q{{Ik;=YmLOt2H)TI1mZ$RbG+38~jtwZ? zpjgzLt{L4H$tZ(2Rj?DjD@%i#_B^#kM&%z`PhNj26C9G!r^>WOD0>%={=>{I8OS=> z`Q<#a;Am9?(tfT!a5gag88b!X0aMX}O_^q=(a~x_8$Oy-wSP|HU1)jJeZyFf6|Dot z_YdQIQvBrtWft(`hq3F}vPRPvw(1sn{mc2voEfC>i0O*t>2$$hIWm6=TP@D7njI*O z9fO02g%`?9ly&z@A)|awdnvxOdIB?U0*FdL?t8(Ed>k?D6yrIFmKXmen&8ni?um;0 z@Ki1;KEQ+7N_oS+K-Vi>?KjEUk?g!4WU&O>kT!GVVy9i~p<=<6-;WfrU);L}VtdA_ zEl>6i*iAp_b3bzy9R4gZmt^1&p*ie`2|RxlD@EynSCg>DW(xaqVuvSk9)hpt2q_h` z(Bl9%wI&|CiHt)PJJOqx#c^a@jIpFIVksuEGP(uE4CMVthdzt={(1@!iT#zZ2TjVP z`8$s+`|9%(#AYytntmANN-SRy%XB0oH(Ab&bFm4ZFm3{^}( z$k&rUpC0iX!RR^!5(KotsA%%J2y_VODpFyLo-^>EG)S#~C!nIb26(#H#AH|LxC%3+4NV|k-KY)M?0B*nd!Bwm3*__% z-jgy0>m5b_#crAk!O(W>VDB=l*~w12_JBg7bF=@L5@Pme0qpJ9mTT+Nm>-v90(>h? zUSB1A%dV;~6TVd@hB%q}vPu4X5yPBhLhNDk?dS5yhgJu<`<#`0s~o(09U-<;itc=} z@JtH1Qf(j=*?LlZ$S7htsfFBrbOBx;kYAlqla=%J0+$5t*|G%u6IVVtQzj5TAB1u9zN-3I)HNd~5V@=Jz1lZ{CQH z+tGVS6>W^*dv&t#T-zWIU5Yu$8e*O{c)P+l3^B!AgHHv(cZ6OguA?RI5(V&sg7BW{Q+OZ_}bn zKad9iL{h04T1Z--9~o3?jxBg{!K6NGRQF8X6bKl{PEK3ab;s+%skx(z#tbiKT+SF% zgVZhFz*66ZGqS-sv0+bS!=7-nJM3;j3FO$jAb=b@Qg;t5em&Z|lc|pLc`rIHI9{x{ zP!V?2zEXPa;qaENZw`hVwuQIt4DZ@wn%D)thW7Bzz2UvRVeNi+d!RlD?y7^|SR`fD zeQ4{F*S}Facr#`|httr3a8qL%2~L=cBIcqA^Q?$@)>zTFX2Im_`6HsZ3&#=BI>xoL zK2KRbu30+6)q`{s+PM)eIj*3MNSAG zQsmBf`Y>HzoEmcT?#rdEospX#5(7=aeKd6RX3!L#Nz@KEeA-QnqloNGj&P3*Bu0=Y`wX*R?d)(Rpsaz4s48|MuyJi&4{s_ZFWn zaTM@B!qHH;rkzPJ$|~~2%cenegYf$hP^Iet#`+P^8>^ET`zC_V5D*0D>4D1=n3|4O zFz?}qW7PY>zlE9b8$FOB1n_G+c*KYREm3|F!K0X@H!VNK)!I#Ud~M)VMcW^M0a;Nt_`o( z*NEf7Q-rl2fl{X|0H$%6m4(Y~+`(W=iZB%^`n%)yn1)>VUiSf${sFV_Bc}4V%*x*~O`ix@w)hh! o4*ru_{99(z4QA7CnfecyD$HGoxeIS2_(Zl=!CLMx2z>% diff --git a/ultralytics/engine/__pycache__/trainer.cpython-39.pyc b/ultralytics/engine/__pycache__/trainer.cpython-39.pyc index 7edd344572c7fc44a6e5f08dabbff9cd0f391f49..6ab5ba2afa0a34d048ea3f2fc4bd8212e3269ff0 100644 GIT binary patch delta 12483 zcma)id2n3EdFSi*-kcZ=2EgDP9D^I+F$BSz0x4di4pJaRQj$5QG#sFZ;0)(tzXwW$ z9z2pj+p;ajRND6LTB1F0S>95u5<5{`zLdyuxtuL0wYBSVm3L=lCw3g~`iwUIC-Exg z{=R+#5TfPO5`5oufBp6GzW(~V`u*}R*;oIDh0~3VK@I=9pZd}K&<9@#_psy(8?RRi zZOJyKamEAr_CiOpqtKb`RP|uKtI(b7F2s^CRd2}m6nc}rg}!88p+DJQh$rKPf#g78 zFgaKlN)8o- z84u^T6eg3Cg{{f0inu91RoIr?rpl50_QH695&cd$buEOr*?!unrp2FVb-ojnU zyO?&b#+!M|HI26{>&a=ZpVlT?7adt;F_w^DV}C3^qW>fF$ft~vSohq;%-p3^p~N%! z)P-z5le!>Eg_LDy3i8v&>+Fa;XU-0_@YZWuvXP(UZM^*&ONMy|?*yhP6X9LF`C;|K3&!zp9Wo)mCqy7=QfqSlK;>*BAa?RX}KcDK`LR8cjJ;HXY>f?`Tc9by7Y+3#~bSTik zLzna$OoZkB##m*THz@L6yN`Zz{c7s|#j>i!d05e&vIkUsP_+&gXWUwKh&Qb;9_rF~ zC(oDAT{`?R=m&D?2#Mn>d%bAnp>EOZ!c_=(|FY#$BCpg91=IM7EXu`Z*accX##ydVk*d2~> zLoc7PcUE_;5D#~{JiMWcJaD^TtA^7I@~uZUFjH=hbecwKc2*vV92(t#6aG^jU}BF5 z;HPG$#k^Guxw7vHgBX*qM*1G~6*9KS&RI22IbX6ZF`~$=OooeQ6vajYq%vxLD}5!C z;#pBMi|In9WPvH{%7?v$Wr|fJ#vZqgtU|%g~YQ85*;!;LfqL*+Ub?9oQowhEC z21UiwcDm-V%K5AofI|H``l-;RE2vs#WZk1cw)u8m@$xYpJBqt`y( z_G892$i)r|V)3hvud_$w=R5E2k6=)2MOx5V7YAq!aYC8d;)RlEmyKPq&PNH`PuM_; zmr_bIOnAgad7vv6&r{ou8v%QWRDyM`+a*wuejHt`sEtfK(W zX*nDp(P!(Nsj>lRE(4}K#qbBU3ykaBxaOz2?aldeDCGipJlwpl0~_QX=y&hpru&Oo zJDbjD7c=oSO_tB3MMdc}k+w5&3lh&4@)?m3bSCEBZDp;rvT*<#2GF7Pv6$AWaeW!< zKuymXom!OeD0OI<+niCO2^wu45B|M%_k>w93h5`s2~cQaA8<7zFD^{58hi2;=1P*{ zp^_T^bUs~}<>~!zg7DWg!eU~*>P|r_7Rsr$^~)a)_7BWVG`ItM=So&8FT^PNi9aN8 zg1}?)v5|PD7ATeNYyoT$g3gBc7y&i4DyjJ+cTVs<5LtczP4}~QJ}CLdDoZO zv+_#!uKUcIkIn@SOwEURGo=N)W{Pz2lDLl1XwA>yS3C<)3zD$&%$4k1##JwOQ`0p9 z@0Y*m-aVoY9QC_X-h%Z7b6);id4KHqBYyyiaYk_SWn4`f+Eq=O^re++c2qCbQe;8IP)|bbHti+9QPJZOdd}|BTAH(dzJuR@8a> zGQ*=+mi+b@8hp8N$M5)ggm+vwsh*;`mKsHQ0;| zGK&+iwOHV?-mNW>d$9@6W+#9VI(Zk(kV+`NRWuCz%{svr{Q7c}yjx9|bOJ@M6X3C^ z#+!Igz1vo7`;@YoWLdZ6w&!->cPH9=ud@zqDQNGqcRNAeJkRVsPH>q;wPm#lsqOY& zP`c&2)J)Si^a@R0-KXSe*xs+ucLPqu;0MzBu~Jv24iYv>Z+SpLU+(Uhw$xxBsvgEf zS}u`0vdol+LGO>&Wj&^mq~`94YD*+|NAb8kyH`=%2YA+(yWeSWLcH%f#5je9>H}&q z#}vtN-hUmQyN+p758{ENQ$6A6?y<<7Q?iIbJCnkklt-I$?>5SlXK5N-?r9yfCZ>jkcvOk`kP>SV0q5cXqUEMk=^1$?b zrkHuMEcWA^5cQwLztgfAgs_I{*e7U^4f5jyyH9=*#hT|rzEl!5AG9dkjDom9w1$`= zvd0UQLjZkzIoWe>gPJ`UH_RxVlt%AL6u} z9vW(0B0{C3y%$6#16BF76hmXj$W)5ws8gfJTwch+RZ7i^G_QHMa%h^fRtl?3+pwf5 z5>PAySm5+s)dvROkrhUka#bV zpOJ;(5q3z*;jy;&5n_nIIRb|PYEk*$jz;C*4?h$WFH#-GYEt|;HNQk)kiggFhepP` zl@+N4^H_T-Q!dS2oM?2-*>(A?kxP|-Aj&rgC{g%hDk%X7)m64y+b@8&$OdR;P&sCP zPv}}|^maf$95a&z;Y;|9PSk^}6OLOH&RPgC$l8r@!WiwW&xpXe z+ZfbM7Qf{;&;(D9{6N3a&3f24hB50Nq0aQ*_yc}f8y$=*M?m>#v>NMrwKlau+eaTOz0PJd2i}X9XdKsuZZ~tAv8Pd|k+*f>?WzglMpT=}c)+z!BRq4+mg}Nttv~f5!(`?r>vSEJX^>p* zvA5=?a@(rg(YS4eeYC7xl^srCg{k-KqLPXYcVc^X+3r^8;%^*y&AFWp+oS1-{Lp)d zdlu;J-=ebPSLS6G;&4BQMT==gn9JNQj4|}6#yzHHRn9LK^hNKY$@VIID@ZbiVCfKs2H-hRu55$ zre12s>PSXS@$Vg9^)NXEyvd1lz`qA8Ax<6S+mYPkgxxfprg`0IMlc@W{au9vAalG;3a*Dk`wpl$VKQv)?oBAjE|95h`O1p+<(I(M%T#A+ym{1 zX|OXMi=@2O@gl|<$AO}HOC5VGcf4MjMsIQ?9$e9sc%qy-fqo|`9aMb?rJ)t}G99@Y z1&_YW_{4QARS7OOwo*N|Hu;GaOk4f~r{x4mGwnKwlt+BSCH{SawT}7}mh~N4CODu~ zPb=Xh9&S+FQ7U(-)gk26KKafgiFetg-i6Mf`nIHFHO9_{2GIAmw7P@U8D$ zINCydDo98bs&A8@-FzTQW>-bbKjl;+=Wj;-^XB&j1M!Qc1z{x;3AvCM)5T`_y~OU& z>qJEENH!9d?OR^js}SU2z$JVYKQ%ooevg~VvvaPS`m*bI{$k6PO)aIOo7He#m=wZQ zIE*{gK*c%N7xYkG%MxFdCnkFbe@aO&xT=SUq!GQ{lrbg2YToR8u_WX(lbhvhlVg+T z1?6qTKM_!_R*g!^y=+9%@=bk0$q4$Tbyf{-*fvsMZ>a3x(8b&a9mX z?jbQ@a0IqXMxya&M zvUR{-c~XUesPOrdOFMH&yO zD*1QYV*a*O=i1ml!dm3S_TDH(W6AJs7SOxP2euDaYRpdJ_0z?=R8#)tj?9U89-R;4 zAXp&)%I+W)igf6xa*k2Pp9$AFe9P2>zx5jNe?=b88-(ZG&Ad9LOzAI-K2iAQ)H94W zl}crmpsxIFwuwdIg>O(^_@F^fJ3V22{MN9i1#Y^ZnefkhfNOU>bU5&%tPg)Qd6@0K zMae_es)zMXU}>6mW0I1J%!|jw@*-@Q4WeaOjTkiiJ22Sl3OiYx)`u8j19ey7fV()8 zg>(uM5MkWHP^NrpN21~ZFT%tbJQlPqU4H3#ZHbY6%fYg7$|C7ml$PM(bI&!+Sk4cK<>=K&sksd34)JF1-yIWk?Y*N~%t0Y+TBzSh!RZ;6rLBi=^4?{*2b7|cFH zkGI!)^x*oW0y1d*&`krj4K9PaQ)*n?a~kR`t~A~4oa*gsAuIkw*1i}=#v7T}QZb#6 z<0zy_c_fr-pHZje9a&Zb{5A1VXl_wBO#FSUr2+?>f-6?lRy0SK`epO98$cqU_mHkB zBmGW#UVNUwTLe^msX&pcurJz=@rX~Lwzy$^y7KmEOibrX$j)2)6Vq#EK|HNhNVnI# zC6Ub(?ZpiT?>%{Z^R~ofeEgzqm#w{9wk+fkw&t(exEe_y<*_A?lx5M%Of2@SQt%R* z?jS>C+(Ii#(fU+2LMb1gt9&e1X9h8HC0k6H`U}bm z*BRr|(kL!N)S!28ft%xO3Xgn0An+C67$=%Kxzce@>_#Yr7)e1Cl$ty%)HIE^prb*L@jk zem5Sxi2N>1=uw(qQMDY~@ zcX0H70Jn1;fBxv;{Y~LR3)wu6la7Qxm5}D49sAX2rh8cYNa&@?Y5Sr(;(^~Cw$?aH zL!|0M?8S2X*(?h+>Y%s>Lv5mjf2Fz>oo-^9rGu8pG0#E~cW z(gxJ!2N&2P!!77J=+z8vvfsHmt6Z$N9s?JhK1K3wa)FOuv=egip5ttj{HuEgcdi-3 z#EcquRb~&++wP+kzjHRR0k|`A;OOG-pW}z<`<~ana&(+kRBTj0mqpLW)HZ9AYeb%; z!GploSbEWihQyBfO?033G&Ou5pyq{-x`2>HMIB1bBPM`zXbBiNGz(=(Yi0@e*)`7- z0;*q)jnjGi%zb~1XiW@d)mX8{059D6KKM#UJ10MQ-<9A0KD$7|W$gYJAEM2y1;`h* zY=j@QzvOO-Fep^=dBPQbm8w@;Uo+JacqbEUK15ygRl>#Bd=GT8iV~0W7-&WWFw#V5 zoe?V2(Bm{ogurtI_7NcePH7_7GR_hmgJ?b6&9%xB^KU1v?ttPBKP8`Ke=f zwU9FprwSe-oEKfZxmcAyKbD(T7h75iiQb%+g6oJBW1mX7GJNy$VVe@Olw@Fzw@c*! zE`Jzbk0kK*NaNL-{Lt~mR$|#1@QK1eae%z-4=YbJcQ9fD8|1#3 zt|$cne%6W0VOZo_x}pDTrztPYY;W0*p>dPP7Bq{zTQE+3Zl?bP5?DD_g_FlUU2cwy zaWFvT)D7Xl`7^n1g`q@wXG2l?y3~y=dDbfPz__1Mg*hYTlier2*o5mja-HF^GF)Lr zWzWgG`u~NNuKX@_kSH*99UIq&kylPW-SHLT?E4a8e2av2BSWIeY`gjn>cOO!3f*1NsrZxu3bo&#_B4o4hI zQXgTcfafSGwhDA`^NR$z=tNE(o0Zd36=SUdw;SS+ZC3H+h1v^6kfR(I68Mzs<6i86 zZ*j-NDz;We&dnlNak~_!C_27i5xd>2MLfN5+pHvNUKe{*@ocf{Fv5S(%9j%@75L`H z#jDU9enB55h#G=mlTLcC0XPP zr;g}7y8Pa$jg^lP8*txXE;EEpxah&&MkqgE-V@Y2!aD7D4pf44|v1nA^9$Z0;)PPIjP%d2I z_#*K--CUdpvT-TwG>C6I4VV`96qKnh>Q3WQ6K`{xpm2WT_;@?cQIivE4e;AlsYLuOfgA#^ra95Ek594$U_s*Y(zQ>v%7Awc6M<0b5yB6I`EJ)QJ#py(7H;1HcMr;QHH`-Z*p3`(bH^}Q}yV&#c zKc4N~{yMdJW?_#k@ee2`eWxBgeNvRlFuv60UN?UPbCBtCG4h|!{=`ef;wkx7vRLU> zo=p9YJf$MoR7#wr#og8d?L@D%z|U5Pt@+>tDm(ZiLiN&%G*W(-Q(%Tq!YuKhso^pK zWo7+T`Y6%SGLiQzi;u7*a2E*IM4$PoKnvN<(n4801oE0cm7+1RBJ#jsN0hl$7J^jl zy0`obs=h&e*nCZ&EebG5p8Wc`E_@zZI~PAr#&N=!nIUGc3v~#qtB)=4 z0pyJP)dibDv_oPlmI;s;i1!ot0KmjJQUXUG8>=*-t0)mr9*Rw+1p+F!eS=Cb5qO2b zKM{DIz$C^%CY7R#IfShjk$*i)_-_yxrdcS^6cqLe3gX0@)Ih)D{8uTZ2{i1$W8DV;rVmGsg-!f< z{3Cw9ztR6_u%h+)d%;@2zv+ZO=r{eLpy@Y*^cO(Q^m`Qa1LyIF{4xCb1r0l~PrOV^ zQR(1oK!^_$P(JZiYWT16)yD@aoQmB9o}p$hfzJ`3uS5jNXsziTNv8|*>Jwl08{PKx zO)4+9U}y7|`r1~6sB?5ZMX{Kmk8v&0NQ09%*;{K~PvG`e`%I@zUQdn7H&T79Oa36$ z*VawFlsDQ#rCtJkGIYMj+fT)~Oq}m!1M>L!A#)H2Sv)^HbX(TfxL)&5yGN2@MC60y VSI_sfLbT~`G8BTKlXwp|{6C;DBgFs! delta 11132 zcmZ{K3v`>ub>=_+|KmXr06`KY!8iC6K}i%PQE!T(B;jgMwG$2TkfU}j5pYkaH9H)K*{*=_M{ z+41;zc6)q#c1L`Nst9FvW+&nk*r~DlIyqDFCWVR4va%!ku zKB2$QOnKZGk}ny*#wO%ZbFOkqHD~bvZ{e*sw0O{J;B7o|LtA0-5O3!lpf&PN-i7Zb z-pzaP-OPJ=6yIU~DDUS3H(0!d5Aq>UTKO!G@ljCP_+xxC--29(pXOWn7;^1=8y^Q( z2j9+j;JcIWl!LBKXCLPW`4sB=_#r+GdOttRkKlWNALTRn9^_~E!~79&45>!m^h`Rx~25xpzn~?42t}B}O5@?}x zBX{H9qxhSMi}=$l(O@^LHr`In4ijaCtw1Jn@@@ZQouB&`^;=AMWhl^D>EZRNQ8%S& z4?6KI7gWB72NmB*yH}N?YB(aT;LM zsi7QiYF85;Ohlj5awkpA9=OG>{RPv?CexP3>_K~oH`~LG2>DK6Yx^c@w#qK(7~%*% zF}0SXoJoIh;6Td)t<`q$H1JlnRy!6n-u5O@cb1G>df{PvqPlC9R&l~v#ap_V18w)6 zE5RgV+vK+!Mk7Ye%`MxSw`-oFwUD)PcFip0i65Au{WiAw9Bl4wC z&#Wt(=T@fXfzpW7e6i*#Wb$@V41=>~7A=d5W@N<{O8i9hl7b|7O4Q8fD0O8Q>B}hE zLUhOrZ8mluncDAr6nSzHtWRl}2ZvgRrlGAUaGWxlc7-A>Zm$b5A|+*f<};e4Ik4{6Joa{ zoF{6OsC5aRPbljWbBP7{uaVB^B2_-H7B0o|CTfaX{QmuFF|`nGPfvR0rPCEUc5l!ehf*?M|0=amDIfDXy+lSTA}2I{6^Q_A+;&g z?!oST2{c_QBsed8RgQL_I{&7jX)m)XuFF-%D92W{vi>ql>s14(xyoK<36!g@RSlNS z4VowIwd+=NG414YT^m#%sBydgWhTGgJ?K|L{HxlG_RM+t|GFpTbWdlcvD&1XHLtS! zI%o#18FpvX4)cJrGoyA(y0zMd!FcS(bdwz+X)72^+T_8T7*L|Q6@2Zy5dFu+pM=yKre$YMXfs(i6LE5}xm{@u=7{-7GksE~7#%xyf4y{~=@+N#nN_7ws;q4?m5nEd+UA(bYKU%8e z9bt_(;0||qHe=hitU_{PD|oi1$I{y>$c)n^f0MOoWuLv>-cj=L#sy~YEcsSgSX)tx zoEWnw!0F_7A?Hc&hQ&o!)jdiNdhNZ6x)14KCm;k379H(Z)B{8f)0FnB)RW%Vsg>*P z$!g`Gy%XeEdTNE)y~KtOs#o+*jZ`gtD6Ex9bEffeR_L(eIHJ;{rTUVeci)5>CLH?A zEkjJ($9PE9A4hs<1&W(Ekqf@WE;8Q3dvAEDY@bX&lz#YWFi^u2JgRp15&M*C_$cqY z3Bah+V$I_77-o99q&sIK{WuRrXf}@|&Va7*exmaMAh5yZ`;3>rZ(rSp@sh{-=63E{ zKZ0-RHGOQn#;$(kKu%^-MSDJb6w2j^8(jf@+$7Etaz1p4KYh>pCSg6 z?msa}S>sAFgAUyVVO>mJjk(1U#gI#8Em<4rtIRW$*Z(LF+o93zSnNkaH~Pz3TIA8G9Q}dlb>$AA#EKze8oo{1i=5)=#+SfdnlEtJAh(Nm{dPssRm0c(>6Q?2`Guh&c2xe+(56tCnhsL(G$k|AKb(%$>hcBL7b#4PmT+8r zk%YMC^Tk9)h)u-!OO#xYq0XpZJVW^bN`6(oHayZ&^JOrFgjLATUy22sWq(Ef;qYRm zMof1op>4p$PnVX%;N0qKSBV<#H&ItyMxwcW3Ve7Coq6y2S=U`%ce5teq_-mV0*Ex} zUL%6Mk3~=l1B&?Z@v$ajghh;2)?LeB(7bSQi^n0to5oO#h+RgL)k( zM}BWpf0Pt!UZ)YP{e|%zz~?AF#T#xpI?_|&ggt`0d*I#gnX z|H4^V{;!ed3MkFs3EZxymWneh-Y2S)k7(3a*@aR|wYB6bxj1Y!(qNmmV63%iF1zhE za73uxiaw*=ay?^~-6hN-(XPe?c(0^)R6CW$?&7XR1NTs65=}hm?rKlT89Mm~G;t3# zzBdSUY5lC4SV&DVs@nDOMs*QhS3ZF!-IwUM*K05kbWnBp(&W5&agjR1@Kgt!VVvSE z>Vp3kiN;6=S7G?EfBpAvRSbf8|%A4MX-Hwz3yqz4L>NqV~SV;q|V^ZUtr9cJK(Z%eB0BT0Aj5oc5 zx&>qBBseb5+B?$|>0Q;`sN1#5UM?@hjKrOv>(S-bxoMPNfw1B9VQ#b&A1gPM8aM+^GsN#vP0nxctRA5p zH&*p>2=gWd!Qm@;sz)jQz=NfRHe4i_pqnO$!_xwdIw3!Py+f^ftW;O>ttx*TyhFr$i(UR6ALg5G zX!q-la;J~mC#okatJ$7kKnd#y1m!b!HA~GnCU+pp!bAeyrSx?}|5l`j)F0DK*}53Ha94$5%C_MrWV_ zXTfleA{NyrkQ-iQKSSr@j7pFH4C7mz<5`{E=-})sossk4@}{4J+~tiL)in*U`U#~G zw3=I28DedY87()JC{Cc%C!te5;h4xVHA7OKr>NnYX1f51yVPNGiEp9Zd1!~ez{<^b zy!v#hnY2Oi8ZcYnB?=uvHMH}vGYB=@a$ok}s<6xE+VZ(YMj5Pa%9KTbLmYJ(U$^sV z7iil%HNFGNy7RWn*{F{%#hl3TckcCQHeXDp=4)=}`*_Vi4<0Mw6yHKH z@6xq`7?WQd-|P8B68J?;{?qsy`&C}u$AnC2UlKIh)KS^eOs?P zwUEmTtJajyIliiM@h9N;1McmXjXT0S382IS^MD1##B)|^;gVehDD{ZKL!`B1t1A@>YMOj~$0qNy;xjblzm@Op*k4hg<@c$qP+kE3bQTV%0vtv0 zHdXvFC4}F^1}GEXK(SVzTgoQZA}At4496)^nDGacqqVNNt*bz5T>KVMP7}qYq$~L9 zQHWPG%OBs_-yptDEPqDHKOm96v$K6W!32?{PgO`jxIA9lBxNZD4lD845D{lpM^ zQGRox#bp)&+~i+R46+&N+tq2({>q=)6{`?VL;T6M3JUDi>X!=89xI_R-Uoq2ny03Ff&Y8}krc=BA#PaKMOhE%CTjNH)$f`JqO~%=9@HZ$xHd}Sfm|bc2%QbGFu*DXje+}B ze?Etw`5GXEDtrWn?;`T^n|sD8AE&E1YnJxP{Zs$y>tRqz0flzJZHJiZ-lK}(0sZ!RT z!o@tUAubXw(e2Oz1ZLg@VnCkCt&6(&?;Evk_tioKQ1ah^C$f>FR*_b!(Z%0vv})&s zXX)y8-!>58UF+dG;}rvD)U&+vEc6go5wTb&SYOEF=oO=L2>g`TfaP>i2JiH!|}1gPSHdinHP<4g3g6qq`5D{Uyq8 zMWPmb4!0X(n93VhTrbE!Iv73MKuo)dsSeRv#||lfn}VoRr+7((YJe_qUw)Z3v&5CVDj@P+uCXC)@~leOs>&j z&`J}p%KbCrzLOhkBNu0O-xncUo!RXr@hV{c?o8Xrr>N1-Q}VzT{~l;R0_Xag^&ERB z7&x+&!UG;*Dnxzcm1Dc6lrU2*NzIGsN}`aoFF69IKPt5rm?H^^H3_Etg_Awfb9{=; z%Hzk6&1{T%R_%eauWRewMT_$JkBU^+t5;tis7p?sc-(hSlKc-RcHAoom{I;%yd1#JS$Mws@cOOKbgi}pGcENfmXF)m zW(=ZTo_OdK8q5i#Bc5S?xj?I2(8K!9k_R*L>+@BZ%tw;XdL(BhmFaH~D`{h5J zh}?PO;Sf7V09qu`Y}qxmV`p)TV|bqOD=m*F}7=A3%!L@sNx+YH8&J*$tntk zGv^h>x(ZNH6xUxr)xM8xInL9)Q=%G~XjH7RT#X^U_~7XAimwgJGpDZf{+nyG6D(u8 zydG|oKRUJYI9btJ9X;C=ZM?$Mrn+c^KCp=yGIk&W@+3MV=i*a^Y| zPDFE2`4kT<$P_q_(t^-CFhNigA;%8Z3M7YkB`~887cDJP1OZ5dBkx9ZdF0H5zl6T< za>JJJSS(*Y)BD6UEl8DthERlZHyOv$s4$~0u9WZ!#yzVHIW4W@(TzAg?ON3YigP)k zF;KkfyJ8}v9JTi8p!*Fze1i2`KQiJOHf2TR3vS2j5bRFDz$A& zqiM?>r|+Jst8tyC3f0NiDfjzGD)-#G^)sh{!#(~7cKW+$bb)qy8X0G&O$Dat5QMPj z4u*&4aMumwZ?Y&H4?pyw3*Y2VPz-zB(OB-3fA&PDJo4m>{sSgoesXgqW1HANle<=o za-Hp>pL~?+C`iY>csTSSxKalQeUAHbUi_BB$Nd+L3r1Nl8)cIRuuJZQ2Rh0-(@TMJ18=O98gMRuv*e+lY*c-v zAgRmcml4(WmoNbFZPYa1UxRY^{_48WEEy$}w_enFfZjaI zq1+j8M^xx|FZhC`it1Rgwcj+rumcPoiXje$hWo_Oqa=w4*&*It(lM8w+r|oek(EN# zMrT+Eg7?0m9_7|(7&^LMquEW>X1t0>sr!+I&&$ZZw=E`@MRV)AoO6`D4H!MfkS<<4k)$=F7U1;hVYd^wio zJ<2_xZvPFmSD|iry89o`O!(o7X##OB?LD6+|I@Smd!Hx56(lv+VV=xBsq%tO=5dO` z2y1>x6fLm}1iU+?l0`8|y=<0$|7<&}$ht)L#4l2vYYrB&DCiLgt|y1)Xg7jbPT>S73z|`H`6asXY`O&V+%EXE+=$(GdXounYRaa=g-0Q zJQ;&i$Y{;5DJkQ9DOGwZy@vRMFNr3g#32DJI&IZ7yS7aInO{?gp0N+X(z93|?e zEK<&yl$@#;T_ZGKI}7n9u!Tl3m?<#1<{2D z?cQD`GIm(}6pi3dsgD&(eu5GOz=o;ft8$q4RX#!47D`^AYBwdHri8RxkXqJ!c3#Y1 z8k=9@N%j02Z*&N`+!!fkCSzp;c~jzdiMwGbgF7&D4QpH+o4>f=fD}Kf;;r{rk&`jD zbl6U1it1NjB1o;n_p_h_`jJ>sD5p3?^^Nz}FHvLpmwdz{+EIxgn|ZB1{Q7g$>Irob zL1CsI%Js@;tS)zyvVF2*b+dkX+ZvExwFX9L{XcGFYTijF{4oXn*NWq^{$f`fU4$yA Q6=LJK^dba)*r56U0o-DwJOBUy diff --git a/ultralytics/engine/__pycache__/validator.cpython-312.pyc b/ultralytics/engine/__pycache__/validator.cpython-312.pyc index 583bbd889debc9d55bd0e2be0d0de9ed524bbaf9..8d29bff561a4989f1ce5b805fead32b2c9ceb3b8 100644 GIT binary patch delta 5206 zcmd5fX>c3ob^8H}i?}c1-~o^zNP?#*k(MQzlqpf7DOy*hDwg9If`}Cahy>toR~HME zlC6xX5gn29wUN}+2b*yuM|5h9l8F;L6FWW98E0D2R4vY0O%>OT?YiwykvmR3$+Yh+ zxg_QIM}B5!@bJF(z5BiI?YB=pM^3&-Y;ReuB^*50kn)|sYDu4vdVEagpcAKJtx%@w$~e*x#Xg6=pGJ%p3isFX}Qb5;c% zz!XqPCp0nhHJz1^M!-?bZWO_JOMtPvfe1fJ#xdi-44CN9@fy#XTK!VD8 zQKIRj!1g-6YN0md)TM`0;s|@!(X_b=)^rtuY6M{ba!GtJF*I}_F?d)M^*~ri!)NPS z59Nl1)?-hkT9bm1&k9oOiQG``Xxl0_;M{2F1y0dV*q=G8^rgWue%#DnbN(oJD;Rwt zc_}@WzLcBFz52wBV9ywT&UD5!ZfD0`Yc<^-cGdMK#Lk-Cy_y1HC)aw|yFm~8*li*f z_CM}MHs^NhsiUCD*fA&itQ}A%qPC(&a)VLj$#~c)kC&bFl(K1$$EvxaS)iZc;uiL% ztCBxXGC^b^<*cpLO)6PWnU}SA^(3qawtL-`N$m)9#o>C68-X+pYU0*KGL`c@{d8ei+oX6)E5m> z7aAkoxb5Yqb!yy2W5GbnLGVE~Ra#%V+B z`hxKrT+*OGF;^pI6XP`<%%lv{VKC0vZpJeL~%FfSA8Q~!3>F5uW}VkNf$Oo2XqishL z9`@UciilRFYT%$1EXe?_3)<TDDh(swVF%-<^7{eEt6E#jKG58galcY$8ByXn3JBvFdv{h?V(aO8gi0RP_`I=w|qH( z2{Iny(b-eV>?zs+eM+g}KSoSJI0#==gOi?CF<==}M$B17W{f*Xk`*i+uCn5UR32b? z2FD3IU+p7bL#K3AK$}3!?v+*+A4{&{#MSSE^&c zmPd7gr=>qv%}RYL2D@L!-l_|Ym^n_3HyKtF-oX{^pC1QnstWeZ1y#fHKOC^?XSJ#h z$jLXpBfm1ujB6i8-D5%Mc3{N@)o zr60XcMyG34e_td;N5PSK5i|Z+FL%y)&NZk_LnC~pjN=}IQWu>?SHUS=QeBgrDpqi* zt|#kMbxbo}!E=M!qO0iK&3!%*_Il|hRT>hxQ0yycnw_d{UaaTYr2>rpK^CAI4#S;D z)#KJFM9)R4v8 z9pfTg8z+)uK-mYUo^a!@E%3w|N5)xg^qDm=LZe`aRsx9WWWzBbojN3m3Q^W3vsqdL zJ2HPT-*zAHMqt72M*pHg)(+;6$lBq&Nbya-5=U6osHU2Dc)~oV@idvIw*@Mr?fD>=jXX%;~a zK^}opM@IgY3lcOSD2lp>rSC=6l4nu8QnxeKe zY>TT59QW&!+q+|9t;s|M_i`+vdE1^ZO?Eoqv4t@fGMNX1&)owt3#c z{6Wc0?}oe{K1;<#qR~dw%@r4<3CzG-q+#v{cSmDsNcAb4zv~xM8fH zGuzK~o$Z>lIL{5785rL-$H?lOJyL%?);$y1Frk}B&p5&?+m^4JbEIy$d>8oVOD>k& z^v7oWu~~oPtgGpcmN=U~(Q2)>c_ZiWFzxD>NQ%9@x^nchPiwi-hscb#?>BcXoTra` zYQyz)Z{KI)PN>y8rLL4k_fCf`70}&ubfju^b1qkEjqc}bd-y|~_80BpLstF!URb!b zA6Rs$QquS@X|&c#oRa zSCc4fTJI0zrA^i+MCGjU2XeU~itkKW2j?Syf{wD}`UBpi!XybG5-Sd!3 z$pfBj5y!Z4QXkq_?WzEwyKqp!{B65u%%X7DvcKB6JMi|R3HYNI0W1sJHbu-;AV_C% zuamV6)Uf@VI<~EpNUW4Vwm$_h61+DRE5~FMJ;A;KPmJitj{X{^?_FJkT3^yCLrw8R znNRnY=OlZqx5^wtm)=2JoouYPzfN&yQ7{Z-S%3H#e!AZ)JH~$98!T5TFlNzo4u8aIw%>4D`RyjpGY{( z%C}THI+ncv;viu?n?fwTrL7cY=`8@_3P;YeSGI&5>%K_l|F%TGSfD7(-gAEH4XKp38d1NXZf!R2UcuWou-~Wf z{T%t!0|YpoV3!BV&0ku<`%eSCqxdOCT?n2-@Dl_gf`33zK!B5hUPs_VfFHlg5uqm$ z8$*DNMo%L+i=YTVHs&eZ1gLa^zK675A~=QMNdzAwnBd?JLKSy$h%ChKK@341{KW47 zIL+PB1a-!{_eXU74USnvR`SA5?$b~meZHdFQ9pCy>2&|-@|_S@MycD5Y@y& zbRPcb*AVea43C1sXx7p^lu_avWwe(v-)0lWa9&HUpq1+bkom(2eQYB8v)E~ b2_biQH{tI#u%B*kvQwT|f5jnG2IBt%pxF9v delta 4936 zcmbUlX>e1=`R&t_EL)bWTb3>B@QHk4z}OHlU~G&7HfIxCngQm+qpj(EPQd4OLP5Brh$#Lb{~=Kgp-@#Q>O=}?0PejU0EW> z?dOB}=Uz0`@qEw?zc+hz+To-aiGyJI3OsG8H~Wy0HZtI6zzfrsQg>U^{mJ1-)7}Sm zHHi^95|(8e3J*mSyFh2{$m>8x>2nS?SOYqT`=BsU0DG)&K(DQpAGE@wwg*WoG&-Cx z;W9#%JpfzFoN%w*E{Ob4D}1fkZPcLxWu!^W1f8yGNZIp=8Aj}NAUSP1YEJVp;}K_x zE>VOl8=Uc%8hP2Fip89;X{k*^V#RERCWq6gImKtGhq!(tTq*Sm`$){g$SxA|s)D&W z=7%Rs9eEzYp%3MZj}S;1?OstB!Oa<($8jSjjtles2(tq{sX5~G8F$M;6&0(6Gi7#I z=`5%`#Kjs^bZ!>4r%3io*`W2#8VvI|8f!)$eemy~4OY5bWC2`oHp5<%@&yg9T9yWW zQU`HY6a2+xgC~n!bF#+A+LXX8#SQ_zTE;#!e5@Vp#R0O4X(k;E=MX0I)Vz%9(|Oc` z+@0vM1Kx1joc(Uq(L{4vD>bEcQiCWkTZWqyxf|L_JRVV>)<46E`3fv}c5YPblt>y} zHoLrD)u$Sz8;+FJ*2gxe_}nZS)WnE|%$R80!_g{vld25NMm>DrT~)R@C!?-J6Wbno zhy0~k?%Gv-DlIop7O7rfNeC)Fv0A=cUCZ0mm1sskwHTn~5D=5obsM8dw8G_*I=Ihn z(~CCr)*{+ju8MAMJLb?LI+VR)2TB=O7CW%gN=Zmb`&*@U5}w@vJLx;(X|7wKvgYPF zjyI{;Z1s8Aen;5e>a~;?g@i+Kc~;h8nb!#P51#}QA7d9wJFCW{*_z=R}~6Yv1kdjp0gDzCg7wt1`SZzfa4 zV`;c^0i+GMnRm?6;JB{>e(Cis6IcbcaD?NzG@mXMOW1c>Gg+!8a?~gi_D}uBQ=cz- z6e(yfcf(3w9!L&5o}p5fTX*4@QDqesL8fA#6;v3U>J2MFv$V;V=ykGgGkJZw=lq=(1eUPm&3rz$6~QM(877K{@;4 z99e!$H7FlaSIGRhy3Wn9ud<$ckQ*eT7nTMB#x9kH{D+S?Jp#^8;arUK?VR#KZMk1Z zC$I=@$X0+ik?CcE9Tb$Bex%yQ$DY1z4HSqpX^U73No$kyETfV4ylc zJn-e}I`SNxsdf;hlUv1k@~f(-7-Sy9N7c(Unu#vxt8wGa%^;7^wUnQ@wokL2+e^Yc zna%B9u7YdkB(fJV+ws5=ZsNOHo?z|B(*^9^!Up`j=Bq0+ns91&kkAS=_cwM6W(t#u z{ym|1za)pK9268vMk6K?w49NJ!Bq3&Aj#+?nMTD3JU;K6Ub>V~G{Y&0Vo`8w{>B;! z@@S*OyQC553w~V1UND(Fd?w+8Ne*f?{B9? zOpHARGx}W-xnGP(VY-EV7Df|MxqmoJS;qkUbePD6uoVlNHF^A^82cKmTm3G7TbG8pqe+C*$0;LI^Zkh4(=!FZ5 zQdLvM$@hxOj*8F6j>Rr{o2I=@7ro6Fix*ti5ck55IAUCMrN{=+mXl*&BAnIrl;hKn z_5U>&DBqvmsO0S3gCQ3kbS0d?J<6jznlu%4ntgL#YU@LqbLulCr05AeoG354;xZJhh*W!XK6oI0h6Z zIg*f)R5_x-BDx5+wckZf!0Gl)WHFExt>o9RWW`Z3bMC^5O(d^|D_coEng_M3yrd0! zR(-vUwF7#dY0`&7@%RA#o|)|qtolA%)xdwkL0v}x*?T))E8fr8e}o{%#4tlpvf9so z#=)}H^~f1reZHD0&J@nJpZ-)xzMrPPAh!=TbO!Jj#si%$T^mxvJS0{xG#NW~N8_S@ zRv4x_7kl4EYLL&KMLjk-MWe)epK?qpyPDiuY^IV4G2`4-Q(&t6?u({vQ~GVR4dPvE ztxBNmEarrM(p7HFN1+>eiqMrJ`@uqmyBhp!wzz+d*jbbG>$eC_tf?>m@GTi}# zgBiWZw6%e5ZBLD2a8{7D>x^z_Z;0-aZk2r$_N=WixaY`TzS9RkTDx?OVwfm~B_wmW z=S=y-2u57D{L*e@oftc}ER`Xnq-M-=Fn>1wT|EvZ{*&2FQd-?D@gIPv*4W_H9)mfM z^8^ir3B0++2Y=~lb~1P9?-0y&<}qlz({HZ2oy_Q+Rm*NG^B@br`NaU(RT9i;g3EV? zEMHIn8`ss@8(5V2s0LfEjx$@8I1jJ;-g4zi##jDu1hX-m>cg8=U|*E|)}ZWS4bE=J zVTNGrx!r*A^%ZzizO;TGo|2v2m2iE1eFbwsl~FDWJ`v4{FePKKvbVy}I>+%u@77vH zPRLdblgpOQpO`@bfgeY9JI7JH*vW37XjqOW6Ve@XG%+vQ`>Jh98h*osSqzFS?ConX zteC@lx^HK@!uKxYQx6}m=@A6~T`>{%reU}0>NzTUH*}7*G6^dK>>WzqWI$rz#|#WH zu#bUXF~I&}LrWM)Gw=`tk23Hu0|yv*i~+Xg8GVZ41y1Gt^!E(+0Rv+Uj5F|8299uB z5h_uzj+4!hovj$C!vE5D5qz1u&X;KQH`>azJ>>dY!s$vbYD+)l2Z`2wL&NDj*Rsov zni}mEa&0q+8^;zY3YnUqkNyKE`XmE?M3Bj29WWjpP%4DJ%x*^@#P}h{AZ4ci0}^9w6951J diff --git a/ultralytics/engine/__pycache__/validator.cpython-39.pyc b/ultralytics/engine/__pycache__/validator.cpython-39.pyc index 7af53b63957cc3c803852ab7dfc80181362c29fc..469bb615b4c048fc0aef7d2f14eb44c330ee2e12 100644 GIT binary patch delta 3889 zcmaJ@TX0)P89uu@x>z?~;+rJfQRGW3-(orU)JdDfB!#p|+T5U%%!#Z$b|lM3>Yn2? zNsn+FwG(%!sp&pYng%2~VJ3ZG3K^IJ4`j+T@IZ$L7?>W0Jmi6C8HRxl!$S)#|G#oB zb(%_Z{Q=C!>MpO6!I(Zi#~mG`hVNL6@HKW>qqauP@g=piK;xcZ&oAZ_}9fICUzcxTgRG?txB#>Dk- zm)B!jJj-IZ@oux5?`Q365H*box~4FO7;#m0t>(XU zMRUPhg|`ZCcg|fLhqtFh7cfXK-*3~3x2hN(c>AhKiCT3zKOh3|xENgooEAi^s^mhz z3+L*K3-E4$r;)kZl^Ob|LKQ6p@=aBRC`-h6!&+5oo?lR^geX>vO^a_zV;-wjzQHNX zZ@K0|yPSc8=nhjC-6d+%WyfhQf_%*joG3gocsk(eoKlagCAH)(c}iZZOUkacRpwr# zC7S7k|d;y|sqT6;! zYPjSslQKC^faA&yutsU-25UluHYnZxJ6k1ZLzn|F3r}u%i54N3l@S=Lm36qD>#3p^ z$ZHi#HVpCC>H%V-posEO<}y4r2y3=|wr@&hEsIbhP?Tb=GWIc8$Ep+S3rACGsm@vt z*kO>}P*tprI4%>`w&*JPfw#%pJa5x7L8a4RK-*$%wf$u>0ckw($Z=WAQOiXwSz1Y zQEV5BLKlRNiSnwZnUNLX7=XvnKu%e5akc=eb3y3bpdGYBRf@_sV>jPx*kztup{x0} zv*RF3#51c}=6`rM1_In}haeCtEs|UaY<_45Sn~{8{}9%9fZafCz49RX05b?xrltc^ zvqYbS%~N>@b|y4XWv3mMb=qa^h8QL)U#cn>lqteGS=VzuyKa6$fqsI#Lp~<}z6UoX z07-7|lFE<6=wW=za&0M-CDnu!RK8q8K{ZyRa+w@A-&8vg?vnomR&3~ImU-2@u_8kUFrre}4znI8 zUYPZcD{$C`FB3Rl4uWK`^4OjJiYcs*tzD#-NvVPLv*dG%-B5YFs$kfM6uF}ZtV19* zQ8`=%UaLHUV2%lJ^S8&*6&&0*f-erB?ZCIgqGaqA8#A`hjeGqI>Ls4x35eu zDwimx#10>Vu7Bdp>Xh1waa3POxBTr_*`Ow9?!XhudCXkN1t^h5HU} zJ}XHbIx^~zGgk=T01NW2;hPasH$JT!`k5l%@s1d1e^5Ld>FHcGtHVu}G1txRJh*Xm zcnv^+;*L65$g|{8-h(K-4*5PLGB+{yFv}if5T-I_tx-p$Kba@B!lz1~56Mu@1wW`+Hq7Df!@EM(( zj>_SdWA)}d10v~FokH#7WzI1p=V97JZFZp_Wo;56u*W!27LQyyPu-MxXZ+05;=}A4W!San6%So(nEsOPbjJJ zJ;d{|$LE7thr|U*5U}kOXNXULl&>aF`lyd2s2`-||G%n7Jk&Yb(**!O8X!%il{AwP zm<356X`(8a34TgHRn@EKJGZMOCa$eXY{lkCwmH-&I_`5iFPM(oJe4l$d;k_4Pui3f zhR!PF`EF#np-#oK{238TOt$C}Wa)-k;Ihw>^?ZYPIdPO+79S+G2V@i1I$5N;TO&6C zZw>)b!U>4p2d;GxmHMbS+5JXJ?xUZgO5d%oxRrY$m}cyDQw7Dl>!RXF&x@;{1|j|y zlDjP8v;?Gqyxe=EAC)`7<=v|l)7B}@KSuyQEt6fMr|*IK@UFRRp2e3XM#OC2<_*69 zi6pIckq^E^YkEFw(+N$YptdX+5$_I0#fNcxIu>Bh6bOA3EqR4gXHIxJmT z7Li}ro|&i8eA-+o^#Vu@ixkvjXy8c|dLy z8wQ`gzp`%tWGkfi$@@dT4v_8Q_|W;j`?=%^Y*=1z688u0zPc#x8Q0cl8Z=w zg2Y1db0oNn_&FrMM1qaV+mK*;acmfl_2*b!j)mh`8ji_#d_@iiG`G(3Um@>05)5C) z!heIKC1n(-jOY%*Gn6+0!E@HcV)%&K;qO&R@Qi;zJf9lr|1CnkiR2w1ju$s|K07Ik z#$QMNcaiAglrh>uc>6`ngg`|!TJetZ2~zrB9M&)A9MB(0m2(mH8KLJCpJcD;9O?|OH3b7$<< zVHSsMQ7nOp+smV+iL|Req(mU70w@(CDuwn(g~|^Qf)Vf+ibUI=sEP#V+;JMbA-kG$ z=brPO$35rVdw2h^{QbpXJP`0o@L6zM;?9G4Na@_oj@^@0-?vx~N1U_?d zT|E;|Cu!WsvYVEx-$_2;4*y$CNy(IA^`p#lFK0Q4aslF!dChU)m0kjv*HAvr`kiJy3QF|?PQ=6?)zZrm_Q z%CRZM955;{!jp2OoK#)`Zi>WQz;~zIxp^2pc^P>r7yF7S7hNSubHP|qlJc_QP5A)f zhsQx3TY!rN5UWV3An+4zEl}2erJ|@XhEVuy%YDyB3wuyGjs4doQN+m+1 zO{rKhS+D9uzN)@fs7&L}x$d-ZYDq9QQs;@ZP{3`>yp zY6|&<*&d)H+j4Glb3-bQ!bB;Kp&->)LUob~m#~5(cZ&nL7)Jx_I8`*4jiB`=tbS3U z4O>{IiNdaDQOQ>rlYkt7QtmBD#uUC62yNVQ7dR0Uo{Q;2Fh?PfE!0UDQVlmp4pV3Q3J5tfax~_B^m8ZV}tTfYHHip%9QIr3*sPv z3IDjJZsW%PVj%ECCYUP8!UahM!O2IakJhiCyN^JA7##U*dGR513Nyz}O(>yPe%d%Iowk)KDIN~ z(+H}I^(yQ;_O>MwoKV5n32e&GgCtOV{9d7&CE7tdx8&DJzLIv)?&l@5viL+vLO-V@ z(fmEeya?r3Nh%y8^*b9Ey65<|lSM$ZA4lU!qZi&!(B5WHrF{kY%6L8`4D_3!)EA(2 zqt_f_Z6}MTP|FPQOnt+b{AC$EjTyjPfQR6=Q^)SIea#DN1I8EWAbKjuuSnNqv}guT zNfg434qf=JII1J?;lHonddPAla;s6oB5+22NPt-WtQK2~r(?PiXGT;J63am~jkTj# zSKaI)R6|<%XL9?g%3) zTk@V!i#&IourUybgPctwq+)hS)zs&5?9eaxaQHC)QFv+a1PaKCMb>N}9{V=hJ4=%Z zBazMI4!wiS)BJGLWu6IlJyCU1)mPJo9?|1#DvcP~h@~uK(=>X9bs`dP6ZSZg2oif~ z#(GY*l;wDO(Q>GnIBP@ESsrNaCnJ2S`GjHx*ed^Ob8GK@l*}MG0K}4WhUG{l4a=o! zOG!;-0TdnOx0{=N-lU$?AemZ1<$r3P2)UolvK5u-mcrD8!CH7nON5_jsf!**Oa7#m zG-4SQlAF+3C7OvMze3ekGb)Q4D(m3iZcz^pAXiyY*L5qjnAG6fy`bLJvjRDm)QlLj zb6|zi@dY&i$vWIknK2NocR-*zM?IqQ+i}?TJl5gH_JE)Lpey6jOM0k5wW27JBb;WhNB^I*T zwB6)RC_`?YeT_fWwNRH3AVbykEE9bctz={TgRV2=Ro>Hma7Hxhy>6|_#Eryq3=;xf zo6KtZ@4$fl`y`0bc_984qCl+tYWIzCu{C~*K0J4S^^|V_f1J|KeiHp#;=Zvkf1&4F z?H53c{T#{e7+4y3cO*XB`?n!9Z~1q&WGojqmf89Lqp=P=!hbT*#GmOq5Dd(%Cet*s zqs4FZO;5fBfT(OYiwAbRZECk)E3kliu^l}p_YODlk^U1s2-{H>{ZbxpEyyeQYyDSu zA-@OQa%2PW0{r|?6W@rcyU=1A?~aG%0bRwtV@kj)Y61K5*%N=Y0uMQHy zhDFdjaPhO$weoD7E$QVNmq8=U-yUh=$-(6|5rOFVvREzwtd)N}_;iPe!%n5xG35pe zJRWW1-yDiR*w`i*>w)y1e=x)ZNR+=ke5vn%v3A6=Wd1EPBfgH%l&^HO6_*2#cccyDH+OkhNr)2D=EDX-MG4 zh(IfLg+y^gncltM3;R0o4uDQO-x@vBCsug-ewgLQZ5T7M;`BQVop9Ms*xUDG*^9t? zg#UPqZtO*A6_S^c+(cp^xrGEb7sLI+eu)Homtk9qZe!R#3|oR>!C3(bR*PXhEKiQX z3CxUj_ABK52FXPvxE}T%k{6{>pkhV$$RtXtf#3tuo=z@^qOlV4uOdnC#)&p^mQPP?5sSY&@nru!FX9bdS)N(=m$sT#4`Ipm MCLl@;pPg*{FDg`U&;S4c diff --git a/ultralytics/engine/exporter.py b/ultralytics/engine/exporter.py index 5c43edc..6ac170c 100644 --- a/ultralytics/engine/exporter.py +++ b/ultralytics/engine/exporter.py @@ -16,7 +16,7 @@ TensorFlow Lite | `tflite` | yolov8n.tflite TensorFlow Edge TPU | `edgetpu` | yolov8n_edgetpu.tflite TensorFlow.js | `tfjs` | yolov8n_web_model/ PaddlePaddle | `paddle` | yolov8n_paddle_model/ -ncnn | `ncnn` | yolov8n_ncnn_model/ +NCNN | `ncnn` | yolov8n_ncnn_model/ Requirements: $ pip install "ultralytics[export]" @@ -41,6 +41,7 @@ Inference: yolov8n.tflite # TensorFlow Lite yolov8n_edgetpu.tflite # TensorFlow Edge TPU yolov8n_paddle_model # PaddlePaddle + yolov8n_ncnn_model # NCNN TensorFlow.js: $ cd .. && git clone https://github.com/zldrobit/tfjs-yolov5-example.git && cd tfjs-yolov5-example @@ -48,6 +49,7 @@ TensorFlow.js: $ ln -s ../../yolov5/yolov8n_web_model public/yolov8n_web_model $ npm start """ + import json import os import shutil @@ -64,36 +66,50 @@ import torch from ultralytics.cfg import get_cfg from ultralytics.data.dataset import YOLODataset from ultralytics.data.utils import check_det_dataset -from ultralytics.nn.autobackend import check_class_names -from ultralytics.nn.modules import C2f, Detect, RTDETRDecoder -from ultralytics.nn.tasks import DetectionModel, SegmentationModel -from ultralytics.utils import (ARM64, DEFAULT_CFG, LINUX, LOGGER, MACOS, ROOT, WINDOWS, __version__, callbacks, - colorstr, get_default_args, yaml_save) -from ultralytics.utils.checks import check_imgsz, check_requirements, check_version +from ultralytics.nn.autobackend import check_class_names, default_class_names +from ultralytics.nn.modules import C2f, Detect, RTDETRDecoder, v10Detect +from ultralytics.nn.tasks import DetectionModel, SegmentationModel, WorldModel +from ultralytics.utils import ( + ARM64, + DEFAULT_CFG, + LINUX, + LOGGER, + MACOS, + ROOT, + WINDOWS, + __version__, + callbacks, + colorstr, + get_default_args, + yaml_save, +) +from ultralytics.utils.checks import PYTHON_VERSION, check_imgsz, check_is_path_safe, check_requirements, check_version from ultralytics.utils.downloads import attempt_download_asset, get_github_assets from ultralytics.utils.files import file_size, spaces_in_path from ultralytics.utils.ops import Profile -from ultralytics.utils.torch_utils import get_latest_opset, select_device, smart_inference_mode +from ultralytics.utils.torch_utils import TORCH_1_13, get_latest_opset, select_device, smart_inference_mode def export_formats(): """YOLOv8 export formats.""" import pandas + x = [ - ['PyTorch', '-', '.pt', True, True], - ['TorchScript', 'torchscript', '.torchscript', True, True], - ['ONNX', 'onnx', '.onnx', True, True], - ['OpenVINO', 'openvino', '_openvino_model', True, False], - ['TensorRT', 'engine', '.engine', False, True], - ['CoreML', 'coreml', '.mlpackage', True, False], - ['TensorFlow SavedModel', 'saved_model', '_saved_model', True, True], - ['TensorFlow GraphDef', 'pb', '.pb', True, True], - ['TensorFlow Lite', 'tflite', '.tflite', True, False], - ['TensorFlow Edge TPU', 'edgetpu', '_edgetpu.tflite', True, False], - ['TensorFlow.js', 'tfjs', '_web_model', True, False], - ['PaddlePaddle', 'paddle', '_paddle_model', True, True], - ['ncnn', 'ncnn', '_ncnn_model', True, True], ] - return pandas.DataFrame(x, columns=['Format', 'Argument', 'Suffix', 'CPU', 'GPU']) + ["PyTorch", "-", ".pt", True, True], + ["TorchScript", "torchscript", ".torchscript", True, True], + ["ONNX", "onnx", ".onnx", True, True], + ["OpenVINO", "openvino", "_openvino_model", True, False], + ["TensorRT", "engine", ".engine", False, True], + ["CoreML", "coreml", ".mlpackage", True, False], + ["TensorFlow SavedModel", "saved_model", "_saved_model", True, True], + ["TensorFlow GraphDef", "pb", ".pb", True, True], + ["TensorFlow Lite", "tflite", ".tflite", True, False], + ["TensorFlow Edge TPU", "edgetpu", "_edgetpu.tflite", True, False], + ["TensorFlow.js", "tfjs", "_web_model", True, False], + ["PaddlePaddle", "paddle", "_paddle_model", True, True], + ["NCNN", "ncnn", "_ncnn_model", True, True], + ] + return pandas.DataFrame(x, columns=["Format", "Argument", "Suffix", "CPU", "GPU"]) def gd_outputs(gd): @@ -102,7 +118,7 @@ def gd_outputs(gd): for node in gd.node: # tensorflow.core.framework.node_def_pb2.NodeDef name_list.append(node.name) input_list.extend(node.input) - return sorted(f'{x}:0' for x in list(set(name_list) - set(input_list)) if not x.startswith('NoOp')) + return sorted(f"{x}:0" for x in list(set(name_list) - set(input_list)) if not x.startswith("NoOp")) def try_export(inner_func): @@ -111,14 +127,14 @@ def try_export(inner_func): def outer_func(*args, **kwargs): """Export a model.""" - prefix = inner_args['prefix'] + prefix = inner_args["prefix"] try: with Profile() as dt: f, model = inner_func(*args, **kwargs) LOGGER.info(f"{prefix} export success ✅ {dt.t:.1f}s, saved as '{f}' ({file_size(f):.1f} MB)") return f, model except Exception as e: - LOGGER.info(f'{prefix} export failure ❌ {dt.t:.1f}s: {e}') + LOGGER.info(f"{prefix} export failure ❌ {dt.t:.1f}s: {e}") raise e return outer_func @@ -140,53 +156,65 @@ class Exporter: Args: cfg (str, optional): Path to a configuration file. Defaults to DEFAULT_CFG. overrides (dict, optional): Configuration overrides. Defaults to None. - _callbacks (list, optional): List of callback functions. Defaults to None. + _callbacks (dict, optional): Dictionary of callback functions. Defaults to None. """ self.args = get_cfg(cfg, overrides) + if self.args.format.lower() in ("coreml", "mlmodel"): # fix attempt for protobuf<3.20.x errors + os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python" # must run before TensorBoard callback + self.callbacks = _callbacks or callbacks.get_default_callbacks() callbacks.add_integration_callbacks(self) @smart_inference_mode() def __call__(self, model=None): """Returns list of exported files/dirs after running callbacks.""" - self.run_callbacks('on_export_start') + self.run_callbacks("on_export_start") t = time.time() - format = self.args.format.lower() # to lowercase - if format in ('tensorrt', 'trt'): # 'engine' aliases - format = 'engine' - if format in ('mlmodel', 'mlpackage', 'mlprogram', 'apple', 'ios', 'coreml'): # 'coreml' aliases - os.environ['PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION'] = 'python' # fix attempt for protobuf<3.20.x errors - format = 'coreml' - fmts = tuple(export_formats()['Argument'][1:]) # available export formats - flags = [x == format for x in fmts] + fmt = self.args.format.lower() # to lowercase + if fmt in ("tensorrt", "trt"): # 'engine' aliases + fmt = "engine" + if fmt in ("mlmodel", "mlpackage", "mlprogram", "apple", "ios", "coreml"): # 'coreml' aliases + fmt = "coreml" + fmts = tuple(export_formats()["Argument"][1:]) # available export formats + flags = [x == fmt for x in fmts] if sum(flags) != 1: - raise ValueError(f"Invalid export format='{format}'. Valid formats are {fmts}") + raise ValueError(f"Invalid export format='{fmt}'. Valid formats are {fmts}") jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle, ncnn = flags # export booleans # Device - if format == 'engine' and self.args.device is None: - LOGGER.warning('WARNING ⚠️ TensorRT requires GPU export, automatically assigning device=0') - self.args.device = '0' - self.device = select_device('cpu' if self.args.device is None else self.args.device) + if fmt == "engine" and self.args.device is None: + LOGGER.warning("WARNING ⚠️ TensorRT requires GPU export, automatically assigning device=0") + self.args.device = "0" + self.device = select_device("cpu" if self.args.device is None else self.args.device) # Checks + if not hasattr(model, "names"): + model.names = default_class_names() model.names = check_class_names(model.names) - if self.args.half and onnx and self.device.type == 'cpu': - LOGGER.warning('WARNING ⚠️ half=True only compatible with GPU export, i.e. use device=0') + if self.args.half and onnx and self.device.type == "cpu": + LOGGER.warning("WARNING ⚠️ half=True only compatible with GPU export, i.e. use device=0") self.args.half = False - assert not self.args.dynamic, 'half=True not compatible with dynamic=True, i.e. use only one.' + assert not self.args.dynamic, "half=True not compatible with dynamic=True, i.e. use only one." self.imgsz = check_imgsz(self.args.imgsz, stride=model.stride, min_dim=2) # check image size if self.args.optimize: assert not ncnn, "optimize=True not compatible with format='ncnn', i.e. use optimize=False" - assert self.device.type == 'cpu', "optimize=True not compatible with cuda devices, i.e. use device='cpu'" + assert self.device.type == "cpu", "optimize=True not compatible with cuda devices, i.e. use device='cpu'" if edgetpu and not LINUX: - raise SystemError('Edge TPU export only supported on Linux. See https://coral.ai/docs/edgetpu/compiler/') + raise SystemError("Edge TPU export only supported on Linux. See https://coral.ai/docs/edgetpu/compiler/") + if isinstance(model, WorldModel): + LOGGER.warning( + "WARNING ⚠️ YOLOWorld (original version) export is not supported to any format.\n" + "WARNING ⚠️ YOLOWorldv2 models (i.e. 'yolov8s-worldv2.pt') only support export to " + "(torchscript, onnx, openvino, engine, coreml) formats. " + "See https://docs.ultralytics.com/models/yolo-world for details." + ) # Input im = torch.zeros(self.args.batch, 3, *self.imgsz).to(self.device) file = Path( - getattr(model, 'pt_path', None) or getattr(model, 'yaml_file', None) or model.yaml.get('yaml_file', '')) - if file.suffix in ('.yaml', '.yml'): + getattr(model, "pt_path", None) or getattr(model, "yaml_file", None) or model.yaml.get("yaml_file", "") + ) + if file.suffix in {".yaml", ".yml"}: file = Path(file.name) # Update model @@ -197,10 +225,13 @@ class Exporter: model.float() model = model.fuse() for m in model.modules(): - if isinstance(m, (Detect, RTDETRDecoder)): # Segment and Pose use Detect base class + if isinstance(m, (Detect, RTDETRDecoder)): # includes all Detect subclasses like Segment, Pose, OBB m.dynamic = self.args.dynamic m.export = True m.format = self.args.format + if isinstance(m, v10Detect): + m.max_det = self.args.max_det + elif isinstance(m, C2f) and not any((saved_model, pb, tflite, edgetpu, tfjs)): # EdgeTPU does not support FlexSplitV while split provides cleaner ONNX graph m.forward = m.forward_split @@ -208,47 +239,54 @@ class Exporter: y = None for _ in range(2): y = model(im) # dry runs - if self.args.half and (engine or onnx) and self.device.type != 'cpu': + if self.args.half and onnx and self.device.type != "cpu": im, model = im.half(), model.half() # to FP16 # Filter warnings - warnings.filterwarnings('ignore', category=torch.jit.TracerWarning) # suppress TracerWarning - warnings.filterwarnings('ignore', category=UserWarning) # suppress shape prim::Constant missing ONNX warning - warnings.filterwarnings('ignore', category=DeprecationWarning) # suppress CoreML np.bool deprecation warning + warnings.filterwarnings("ignore", category=torch.jit.TracerWarning) # suppress TracerWarning + warnings.filterwarnings("ignore", category=UserWarning) # suppress shape prim::Constant missing ONNX warning + warnings.filterwarnings("ignore", category=DeprecationWarning) # suppress CoreML np.bool deprecation warning # Assign self.im = im self.model = model self.file = file - self.output_shape = tuple(y.shape) if isinstance(y, torch.Tensor) else \ - tuple(tuple(x.shape if isinstance(x, torch.Tensor) else []) for x in y) - self.pretty_name = Path(self.model.yaml.get('yaml_file', self.file)).stem.replace('yolo', 'YOLO') - data = model.args['data'] if hasattr(model, 'args') and isinstance(model.args, dict) else '' + self.output_shape = ( + tuple(y.shape) + if isinstance(y, torch.Tensor) + else tuple(tuple(x.shape if isinstance(x, torch.Tensor) else []) for x in y) + ) + self.pretty_name = Path(self.model.yaml.get("yaml_file", self.file)).stem.replace("yolo", "YOLO") + data = model.args["data"] if hasattr(model, "args") and isinstance(model.args, dict) else "" description = f'Ultralytics {self.pretty_name} model {f"trained on {data}" if data else ""}' self.metadata = { - 'description': description, - 'author': 'Ultralytics', - 'license': 'AGPL-3.0 https://ultralytics.com/license', - 'date': datetime.now().isoformat(), - 'version': __version__, - 'stride': int(max(model.stride)), - 'task': model.task, - 'batch': self.args.batch, - 'imgsz': self.imgsz, - 'names': model.names} # model metadata - if model.task == 'pose': - self.metadata['kpt_shape'] = model.model[-1].kpt_shape + "description": description, + "author": "Ultralytics", + "date": datetime.now().isoformat(), + "version": __version__, + "license": "AGPL-3.0 License (https://ultralytics.com/license)", + "docs": "https://docs.ultralytics.com", + "stride": int(max(model.stride)), + "task": model.task, + "batch": self.args.batch, + "imgsz": self.imgsz, + "names": model.names, + } # model metadata + if model.task == "pose": + self.metadata["kpt_shape"] = model.model[-1].kpt_shape - LOGGER.info(f"\n{colorstr('PyTorch:')} starting from '{file}' with input shape {tuple(im.shape)} BCHW and " - f'output shape(s) {self.output_shape} ({file_size(file):.1f} MB)') + LOGGER.info( + f"\n{colorstr('PyTorch:')} starting from '{file}' with input shape {tuple(im.shape)} BCHW and " + f'output shape(s) {self.output_shape} ({file_size(file):.1f} MB)' + ) # Exports - f = [''] * len(fmts) # exported filenames + f = [""] * len(fmts) # exported filenames if jit or ncnn: # TorchScript f[0], _ = self.export_torchscript() if engine: # TensorRT required before ONNX f[1], _ = self.export_engine() - if onnx or xml: # OpenVINO requires ONNX + if onnx: # ONNX f[2], _ = self.export_onnx() if xml: # OpenVINO f[3], _ = self.export_openvino() @@ -262,12 +300,12 @@ class Exporter: if tflite: f[7], _ = self.export_tflite(keras_model=keras_model, nms=False, agnostic_nms=self.args.agnostic_nms) if edgetpu: - f[8], _ = self.export_edgetpu(tflite_model=Path(f[5]) / f'{self.file.stem}_full_integer_quant.tflite') + f[8], _ = self.export_edgetpu(tflite_model=Path(f[5]) / f"{self.file.stem}_full_integer_quant.tflite") if tfjs: f[9], _ = self.export_tfjs() if paddle: # PaddlePaddle f[10], _ = self.export_paddle() - if ncnn: # ncnn + if ncnn: # NCNN f[11], _ = self.export_ncnn() # Finish @@ -275,58 +313,65 @@ class Exporter: if any(f): f = str(Path(f[-1])) square = self.imgsz[0] == self.imgsz[1] - s = '' if square else f"WARNING ⚠️ non-PyTorch val requires square images, 'imgsz={self.imgsz}' will not " \ - f"work. Use export 'imgsz={max(self.imgsz)}' if val is required." - imgsz = self.imgsz[0] if square else str(self.imgsz)[1:-1].replace(' ', '') - predict_data = f'data={data}' if model.task == 'segment' and format == 'pb' else '' - q = 'int8' if self.args.int8 else 'half' if self.args.half else '' # quantization - LOGGER.info(f'\nExport complete ({time.time() - t:.1f}s)' - f"\nResults saved to {colorstr('bold', file.parent.resolve())}" - f'\nPredict: yolo predict task={model.task} model={f} imgsz={imgsz} {q} {predict_data}' - f'\nValidate: yolo val task={model.task} model={f} imgsz={imgsz} data={data} {q} {s}' - f'\nVisualize: https://netron.app') + s = ( + "" + if square + else f"WARNING ⚠️ non-PyTorch val requires square images, 'imgsz={self.imgsz}' will not " + f"work. Use export 'imgsz={max(self.imgsz)}' if val is required." + ) + imgsz = self.imgsz[0] if square else str(self.imgsz)[1:-1].replace(" ", "") + predict_data = f"data={data}" if model.task == "segment" and fmt == "pb" else "" + q = "int8" if self.args.int8 else "half" if self.args.half else "" # quantization + LOGGER.info( + f'\nExport complete ({time.time() - t:.1f}s)' + f"\nResults saved to {colorstr('bold', file.parent.resolve())}" + f'\nPredict: yolo predict task={model.task} model={f} imgsz={imgsz} {q} {predict_data}' + f'\nValidate: yolo val task={model.task} model={f} imgsz={imgsz} data={data} {q} {s}' + f'\nVisualize: https://netron.app' + ) - self.run_callbacks('on_export_end') + self.run_callbacks("on_export_end") return f # return list of exported files/dirs @try_export - def export_torchscript(self, prefix=colorstr('TorchScript:')): + def export_torchscript(self, prefix=colorstr("TorchScript:")): """YOLOv8 TorchScript model export.""" - LOGGER.info(f'\n{prefix} starting export with torch {torch.__version__}...') - f = self.file.with_suffix('.torchscript') + LOGGER.info(f"\n{prefix} starting export with torch {torch.__version__}...") + f = self.file.with_suffix(".torchscript") ts = torch.jit.trace(self.model, self.im, strict=False) - extra_files = {'config.txt': json.dumps(self.metadata)} # torch._C.ExtraFilesMap() + extra_files = {"config.txt": json.dumps(self.metadata)} # torch._C.ExtraFilesMap() if self.args.optimize: # https://pytorch.org/tutorials/recipes/mobile_interpreter.html - LOGGER.info(f'{prefix} optimizing for mobile...') + LOGGER.info(f"{prefix} optimizing for mobile...") from torch.utils.mobile_optimizer import optimize_for_mobile + optimize_for_mobile(ts)._save_for_lite_interpreter(str(f), _extra_files=extra_files) else: ts.save(str(f), _extra_files=extra_files) return f, None @try_export - def export_onnx(self, prefix=colorstr('ONNX:')): + def export_onnx(self, prefix=colorstr("ONNX:")): """YOLOv8 ONNX export.""" - requirements = ['onnx>=1.12.0'] + requirements = ["onnx>=1.12.0"] if self.args.simplify: - requirements += ['onnxsim>=0.4.33', 'onnxruntime-gpu' if torch.cuda.is_available() else 'onnxruntime'] + requirements += ["onnxslim==0.1.31", "onnxruntime" + ("-gpu" if torch.cuda.is_available() else "")] check_requirements(requirements) import onnx # noqa opset_version = self.args.opset or get_latest_opset() - LOGGER.info(f'\n{prefix} starting export with onnx {onnx.__version__} opset {opset_version}...') - f = str(self.file.with_suffix('.onnx')) + LOGGER.info(f"\n{prefix} starting export with onnx {onnx.__version__} opset {opset_version}...") + f = str(self.file.with_suffix(".onnx")) - output_names = ['output0', 'output1'] if isinstance(self.model, SegmentationModel) else ['output0'] + output_names = ["output0", "output1"] if isinstance(self.model, SegmentationModel) else ["output0"] dynamic = self.args.dynamic if dynamic: - dynamic = {'images': {0: 'batch', 2: 'height', 3: 'width'}} # shape(1,3,640,640) + dynamic = {"images": {0: "batch", 2: "height", 3: "width"}} # shape(1,3,640,640) if isinstance(self.model, SegmentationModel): - dynamic['output0'] = {0: 'batch', 2: 'anchors'} # shape(1, 116, 8400) - dynamic['output1'] = {0: 'batch', 2: 'mask_height', 3: 'mask_width'} # shape(1,32,160,160) + dynamic["output0"] = {0: "batch", 2: "anchors"} # shape(1, 116, 8400) + dynamic["output1"] = {0: "batch", 2: "mask_height", 3: "mask_width"} # shape(1,32,160,160) elif isinstance(self.model, DetectionModel): - dynamic['output0'] = {0: 'batch', 2: 'anchors'} # shape(1, 84, 8400) + dynamic["output0"] = {0: "batch", 2: "anchors"} # shape(1, 84, 8400) torch.onnx.export( self.model.cpu() if dynamic else self.model, # dynamic=True only compatible with cpu @@ -335,9 +380,10 @@ class Exporter: verbose=False, opset_version=opset_version, do_constant_folding=True, # WARNING: DNN inference with torch>=1.12 may require do_constant_folding=False - input_names=['images'], + input_names=["images"], output_names=output_names, - dynamic_axes=dynamic or None) + dynamic_axes=dynamic or None, + ) # Checks model_onnx = onnx.load(f) # load onnx model @@ -346,14 +392,17 @@ class Exporter: # Simplify if self.args.simplify: try: - import onnxsim + import onnxslim - LOGGER.info(f'{prefix} simplifying with onnxsim {onnxsim.__version__}...') - # subprocess.run(f'onnxsim "{f}" "{f}"', shell=True) - model_onnx, check = onnxsim.simplify(model_onnx) - assert check, 'Simplified ONNX model could not be validated' + LOGGER.info(f"{prefix} simplifying with onnxslim {onnxslim.__version__}...") + model_onnx = onnxslim.slim(model_onnx) + + # ONNX Simplifier (deprecated as must be compiled with 'cmake' in aarch64 and Conda CI environments) + # import onnxsim + # model_onnx, check = onnxsim.simplify(model_onnx) + # assert check, "Simplified ONNX model could not be validated" except Exception as e: - LOGGER.info(f'{prefix} simplifier failure: {e}') + LOGGER.warning(f"{prefix} simplifier failure: {e}") # Metadata for k, v in self.metadata.items(): @@ -364,162 +413,193 @@ class Exporter: return f, model_onnx @try_export - def export_openvino(self, prefix=colorstr('OpenVINO:')): + def export_openvino(self, prefix=colorstr("OpenVINO:")): """YOLOv8 OpenVINO export.""" - check_requirements('openvino-dev>=2023.0') # requires openvino-dev: https://pypi.org/project/openvino-dev/ - import openvino.runtime as ov # noqa - from openvino.tools import mo # noqa + check_requirements("openvino>=2024.0.0") # requires openvino: https://pypi.org/project/openvino/ + import openvino as ov - LOGGER.info(f'\n{prefix} starting export with openvino {ov.__version__}...') - f = str(self.file).replace(self.file.suffix, f'_openvino_model{os.sep}') - fq = str(self.file).replace(self.file.suffix, f'_int8_openvino_model{os.sep}') - f_onnx = self.file.with_suffix('.onnx') - f_ov = str(Path(f) / self.file.with_suffix('.xml').name) - fq_ov = str(Path(fq) / self.file.with_suffix('.xml').name) + LOGGER.info(f"\n{prefix} starting export with openvino {ov.__version__}...") + assert TORCH_1_13, f"OpenVINO export requires torch>=1.13.0 but torch=={torch.__version__} is installed" + ov_model = ov.convert_model( + self.model.cpu(), + input=None if self.args.dynamic else [self.im.shape], + example_input=self.im, + ) def serialize(ov_model, file): """Set RT info, serialize and save metadata YAML.""" - ov_model.set_rt_info('YOLOv8', ['model_info', 'model_type']) - ov_model.set_rt_info(True, ['model_info', 'reverse_input_channels']) - ov_model.set_rt_info(114, ['model_info', 'pad_value']) - ov_model.set_rt_info([255.0], ['model_info', 'scale_values']) - ov_model.set_rt_info(self.args.iou, ['model_info', 'iou_threshold']) - ov_model.set_rt_info([v.replace(' ', '_') for v in self.model.names.values()], ['model_info', 'labels']) - if self.model.task != 'classify': - ov_model.set_rt_info('fit_to_window_letterbox', ['model_info', 'resize_type']) + ov_model.set_rt_info("YOLOv8", ["model_info", "model_type"]) + ov_model.set_rt_info(True, ["model_info", "reverse_input_channels"]) + ov_model.set_rt_info(114, ["model_info", "pad_value"]) + ov_model.set_rt_info([255.0], ["model_info", "scale_values"]) + ov_model.set_rt_info(self.args.iou, ["model_info", "iou_threshold"]) + ov_model.set_rt_info([v.replace(" ", "_") for v in self.model.names.values()], ["model_info", "labels"]) + if self.model.task != "classify": + ov_model.set_rt_info("fit_to_window_letterbox", ["model_info", "resize_type"]) - ov.serialize(ov_model, file) # save - yaml_save(Path(file).parent / 'metadata.yaml', self.metadata) # add metadata.yaml - - ov_model = mo.convert_model(f_onnx, - model_name=self.pretty_name, - framework='onnx', - compress_to_fp16=self.args.half) # export + ov.runtime.save_model(ov_model, file, compress_to_fp16=self.args.half) + yaml_save(Path(file).parent / "metadata.yaml", self.metadata) # add metadata.yaml if self.args.int8: - assert self.args.data, "INT8 export requires a data argument for calibration, i.e. 'data=coco8.yaml'" - check_requirements('nncf>=2.5.0') + fq = str(self.file).replace(self.file.suffix, f"_int8_openvino_model{os.sep}") + fq_ov = str(Path(fq) / self.file.with_suffix(".xml").name) + if not self.args.data: + self.args.data = DEFAULT_CFG.data or "coco128.yaml" + LOGGER.warning( + f"{prefix} WARNING ⚠️ INT8 export requires a missing 'data' arg for calibration. " + f"Using default 'data={self.args.data}'." + ) + check_requirements("nncf>=2.8.0") import nncf def transform_fn(data_item): """Quantization transform function.""" - im = data_item['img'].numpy().astype(np.float32) / 255.0 # uint8 to fp16/32 and 0 - 255 to 0.0 - 1.0 + assert ( + data_item["img"].dtype == torch.uint8 + ), "Input image must be uint8 for the quantization preprocessing" + im = data_item["img"].numpy().astype(np.float32) / 255.0 # uint8 to fp16/32 and 0 - 255 to 0.0 - 1.0 return np.expand_dims(im, 0) if im.ndim == 3 else im # Generate calibration data for integer quantization LOGGER.info(f"{prefix} collecting INT8 calibration images from 'data={self.args.data}'") data = check_det_dataset(self.args.data) - dataset = YOLODataset(data['val'], data=data, imgsz=self.imgsz[0], augment=False) + dataset = YOLODataset(data["val"], data=data, imgsz=self.imgsz[0], augment=False) + n = len(dataset) + if n < 300: + LOGGER.warning(f"{prefix} WARNING ⚠️ >300 images recommended for INT8 calibration, found {n} images.") quantization_dataset = nncf.Dataset(dataset, transform_fn) - ignored_scope = nncf.IgnoredScope(types=['Multiply', 'Subtract', 'Sigmoid']) # ignore operation - quantized_ov_model = nncf.quantize(ov_model, - quantization_dataset, - preset=nncf.QuantizationPreset.MIXED, - ignored_scope=ignored_scope) + + ignored_scope = None + if isinstance(self.model.model[-1], Detect): + # Includes all Detect subclasses like Segment, Pose, OBB, WorldDetect + head_module_name = ".".join(list(self.model.named_modules())[-1][0].split(".")[:2]) + + ignored_scope = nncf.IgnoredScope( # ignore operations + patterns=[ + f".*{head_module_name}/.*/Add", + f".*{head_module_name}/.*/Sub*", + f".*{head_module_name}/.*/Mul*", + f".*{head_module_name}/.*/Div*", + f".*{head_module_name}\\.dfl.*", + ], + types=["Sigmoid"], + ) + + quantized_ov_model = nncf.quantize( + ov_model, quantization_dataset, preset=nncf.QuantizationPreset.MIXED, ignored_scope=ignored_scope + ) serialize(quantized_ov_model, fq_ov) return fq, None + f = str(self.file).replace(self.file.suffix, f"_openvino_model{os.sep}") + f_ov = str(Path(f) / self.file.with_suffix(".xml").name) + serialize(ov_model, f_ov) return f, None @try_export - def export_paddle(self, prefix=colorstr('PaddlePaddle:')): + def export_paddle(self, prefix=colorstr("PaddlePaddle:")): """YOLOv8 Paddle export.""" - check_requirements(('paddlepaddle', 'x2paddle')) + check_requirements(("paddlepaddle", "x2paddle")) import x2paddle # noqa from x2paddle.convert import pytorch2paddle # noqa - LOGGER.info(f'\n{prefix} starting export with X2Paddle {x2paddle.__version__}...') - f = str(self.file).replace(self.file.suffix, f'_paddle_model{os.sep}') + LOGGER.info(f"\n{prefix} starting export with X2Paddle {x2paddle.__version__}...") + f = str(self.file).replace(self.file.suffix, f"_paddle_model{os.sep}") - pytorch2paddle(module=self.model, save_dir=f, jit_type='trace', input_examples=[self.im]) # export - yaml_save(Path(f) / 'metadata.yaml', self.metadata) # add metadata.yaml + pytorch2paddle(module=self.model, save_dir=f, jit_type="trace", input_examples=[self.im]) # export + yaml_save(Path(f) / "metadata.yaml", self.metadata) # add metadata.yaml return f, None @try_export - def export_ncnn(self, prefix=colorstr('ncnn:')): + def export_ncnn(self, prefix=colorstr("NCNN:")): """ - YOLOv8 ncnn export using PNNX https://github.com/pnnx/pnnx. + YOLOv8 NCNN export using PNNX https://github.com/pnnx/pnnx. """ - check_requirements('git+https://github.com/Tencent/ncnn.git' if ARM64 else 'ncnn') # requires ncnn + check_requirements("ncnn") import ncnn # noqa - LOGGER.info(f'\n{prefix} starting export with ncnn {ncnn.__version__}...') - f = Path(str(self.file).replace(self.file.suffix, f'_ncnn_model{os.sep}')) - f_ts = self.file.with_suffix('.torchscript') + LOGGER.info(f"\n{prefix} starting export with NCNN {ncnn.__version__}...") + f = Path(str(self.file).replace(self.file.suffix, f"_ncnn_model{os.sep}")) + f_ts = self.file.with_suffix(".torchscript") - pnnx_filename = 'pnnx.exe' if WINDOWS else 'pnnx' - if Path(pnnx_filename).is_file(): - pnnx = pnnx_filename - elif (ROOT / pnnx_filename).is_file(): - pnnx = ROOT / pnnx_filename - else: + name = Path("pnnx.exe" if WINDOWS else "pnnx") # PNNX filename + pnnx = name if name.is_file() else ROOT / name + if not pnnx.is_file(): LOGGER.warning( - f'{prefix} WARNING ⚠️ PNNX not found. Attempting to download binary file from ' - 'https://github.com/pnnx/pnnx/.\nNote PNNX Binary file must be placed in current working directory ' - f'or in {ROOT}. See PNNX repo for full installation instructions.') - _, assets = get_github_assets(repo='pnnx/pnnx', retry=True) - system = 'macos' if MACOS else 'ubuntu' if LINUX else 'windows' # operating system - asset = [x for x in assets if system in x][0] if assets else \ - f'https://github.com/pnnx/pnnx/releases/download/20230816/pnnx-20230816-{system}.zip' # fallback - asset = attempt_download_asset(asset, repo='pnnx/pnnx', release='latest') - unzip_dir = Path(asset).with_suffix('') - pnnx = ROOT / pnnx_filename # new location - (unzip_dir / pnnx_filename).rename(pnnx) # move binary to ROOT - shutil.rmtree(unzip_dir) # delete unzip dir - Path(asset).unlink() # delete zip - pnnx.chmod(0o777) # set read, write, and execute permissions for everyone + f"{prefix} WARNING ⚠️ PNNX not found. Attempting to download binary file from " + "https://github.com/pnnx/pnnx/.\nNote PNNX Binary file must be placed in current working directory " + f"or in {ROOT}. See PNNX repo for full installation instructions." + ) + system = "macos" if MACOS else "windows" if WINDOWS else "linux-aarch64" if ARM64 else "linux" + _, assets = get_github_assets(repo="pnnx/pnnx", retry=True) + if assets: + url = [x for x in assets if f"{system}.zip" in x][0] + else: + url = f"https://github.com/pnnx/pnnx/releases/download/20240226/pnnx-20240226-{system}.zip" + LOGGER.warning(f"{prefix} WARNING ⚠️ PNNX GitHub assets not found, using default {url}") + asset = attempt_download_asset(url, repo="pnnx/pnnx", release="latest") + if check_is_path_safe(Path.cwd(), asset): # avoid path traversal security vulnerability + unzip_dir = Path(asset).with_suffix("") + (unzip_dir / name).rename(pnnx) # move binary to ROOT + shutil.rmtree(unzip_dir) # delete unzip dir + Path(asset).unlink() # delete zip + pnnx.chmod(0o777) # set read, write, and execute permissions for everyone ncnn_args = [ f'ncnnparam={f / "model.ncnn.param"}', f'ncnnbin={f / "model.ncnn.bin"}', - f'ncnnpy={f / "model_ncnn.py"}', ] + f'ncnnpy={f / "model_ncnn.py"}', + ] pnnx_args = [ f'pnnxparam={f / "model.pnnx.param"}', f'pnnxbin={f / "model.pnnx.bin"}', f'pnnxpy={f / "model_pnnx.py"}', - f'pnnxonnx={f / "model.pnnx.onnx"}', ] + f'pnnxonnx={f / "model.pnnx.onnx"}', + ] cmd = [ str(pnnx), str(f_ts), *ncnn_args, *pnnx_args, - f'fp16={int(self.args.half)}', - f'device={self.device.type}', - f'inputshape="{[self.args.batch, 3, *self.imgsz]}"', ] + f"fp16={int(self.args.half)}", + f"device={self.device.type}", + f'inputshape="{[self.args.batch, 3, *self.imgsz]}"', + ] f.mkdir(exist_ok=True) # make ncnn_model directory LOGGER.info(f"{prefix} running '{' '.join(cmd)}'") subprocess.run(cmd, check=True) # Remove debug files - pnnx_files = [x.split('=')[-1] for x in pnnx_args] - for f_debug in ('debug.bin', 'debug.param', 'debug2.bin', 'debug2.param', *pnnx_files): + pnnx_files = [x.split("=")[-1] for x in pnnx_args] + for f_debug in ("debug.bin", "debug.param", "debug2.bin", "debug2.param", *pnnx_files): Path(f_debug).unlink(missing_ok=True) - yaml_save(f / 'metadata.yaml', self.metadata) # add metadata.yaml + yaml_save(f / "metadata.yaml", self.metadata) # add metadata.yaml return str(f), None @try_export - def export_coreml(self, prefix=colorstr('CoreML:')): + def export_coreml(self, prefix=colorstr("CoreML:")): """YOLOv8 CoreML export.""" - mlmodel = self.args.format.lower() == 'mlmodel' # legacy *.mlmodel export format requested - check_requirements('coremltools>=6.0,<=6.2' if mlmodel else 'coremltools>=7.0.b1') + mlmodel = self.args.format.lower() == "mlmodel" # legacy *.mlmodel export format requested + check_requirements("coremltools>=6.0,<=6.2" if mlmodel else "coremltools>=7.0") import coremltools as ct # noqa - LOGGER.info(f'\n{prefix} starting export with coremltools {ct.__version__}...') - f = self.file.with_suffix('.mlmodel' if mlmodel else '.mlpackage') + LOGGER.info(f"\n{prefix} starting export with coremltools {ct.__version__}...") + assert not WINDOWS, "CoreML export is not supported on Windows, please run on macOS or Linux." + f = self.file.with_suffix(".mlmodel" if mlmodel else ".mlpackage") if f.is_dir(): shutil.rmtree(f) bias = [0.0, 0.0, 0.0] scale = 1 / 255 classifier_config = None - if self.model.task == 'classify': + if self.model.task == "classify": classifier_config = ct.ClassifierConfig(list(self.model.names.values())) if self.args.nms else None model = self.model - elif self.model.task == 'detect': + elif self.model.task == "detect": model = IOSDetectModel(self.model, self.im) if self.args.nms else self.model else: if self.args.nms: @@ -528,67 +608,71 @@ class Exporter: model = self.model ts = torch.jit.trace(model.eval(), self.im, strict=False) # TorchScript model - ct_model = ct.convert(ts, - inputs=[ct.ImageType('image', shape=self.im.shape, scale=scale, bias=bias)], - classifier_config=classifier_config, - convert_to='neuralnetwork' if mlmodel else 'mlprogram') - bits, mode = (8, 'kmeans') if self.args.int8 else (16, 'linear') if self.args.half else (32, None) + ct_model = ct.convert( + ts, + inputs=[ct.ImageType("image", shape=self.im.shape, scale=scale, bias=bias)], + classifier_config=classifier_config, + convert_to="neuralnetwork" if mlmodel else "mlprogram", + ) + bits, mode = (8, "kmeans") if self.args.int8 else (16, "linear") if self.args.half else (32, None) if bits < 32: - if 'kmeans' in mode: - check_requirements('scikit-learn') # scikit-learn package required for k-means quantization + if "kmeans" in mode: + check_requirements("scikit-learn") # scikit-learn package required for k-means quantization if mlmodel: ct_model = ct.models.neural_network.quantization_utils.quantize_weights(ct_model, bits, mode) elif bits == 8: # mlprogram already quantized to FP16 import coremltools.optimize.coreml as cto - op_config = cto.OpPalettizerConfig(mode='kmeans', nbits=bits, weight_threshold=512) + + op_config = cto.OpPalettizerConfig(mode="kmeans", nbits=bits, weight_threshold=512) config = cto.OptimizationConfig(global_config=op_config) ct_model = cto.palettize_weights(ct_model, config=config) - if self.args.nms and self.model.task == 'detect': + if self.args.nms and self.model.task == "detect": if mlmodel: - import platform - # coremltools<=6.2 NMS export requires Python<3.11 - check_version(platform.python_version(), '<3.11', name='Python ', hard=True) + check_version(PYTHON_VERSION, "<3.11", name="Python ", hard=True) weights_dir = None else: ct_model.save(str(f)) # save otherwise weights_dir does not exist - weights_dir = str(f / 'Data/com.apple.CoreML/weights') + weights_dir = str(f / "Data/com.apple.CoreML/weights") ct_model = self._pipeline_coreml(ct_model, weights_dir=weights_dir) m = self.metadata # metadata dict - ct_model.short_description = m.pop('description') - ct_model.author = m.pop('author') - ct_model.license = m.pop('license') - ct_model.version = m.pop('version') + ct_model.short_description = m.pop("description") + ct_model.author = m.pop("author") + ct_model.license = m.pop("license") + ct_model.version = m.pop("version") ct_model.user_defined_metadata.update({k: str(v) for k, v in m.items()}) try: ct_model.save(str(f)) # save *.mlpackage except Exception as e: LOGGER.warning( - f'{prefix} WARNING ⚠️ CoreML export to *.mlpackage failed ({e}), reverting to *.mlmodel export. ' - f'Known coremltools Python 3.11 and Windows bugs https://github.com/apple/coremltools/issues/1928.') - f = f.with_suffix('.mlmodel') + f"{prefix} WARNING ⚠️ CoreML export to *.mlpackage failed ({e}), reverting to *.mlmodel export. " + f"Known coremltools Python 3.11 and Windows bugs https://github.com/apple/coremltools/issues/1928." + ) + f = f.with_suffix(".mlmodel") ct_model.save(str(f)) return f, ct_model @try_export - def export_engine(self, prefix=colorstr('TensorRT:')): + def export_engine(self, prefix=colorstr("TensorRT:")): """YOLOv8 TensorRT export https://developer.nvidia.com/tensorrt.""" - assert self.im.device.type != 'cpu', "export running on CPU but must be on GPU, i.e. use 'device=0'" + assert self.im.device.type != "cpu", "export running on CPU but must be on GPU, i.e. use 'device=0'" + f_onnx, _ = self.export_onnx() # run before TRT import https://github.com/ultralytics/ultralytics/issues/7016 + try: import tensorrt as trt # noqa except ImportError: if LINUX: - check_requirements('nvidia-tensorrt', cmds='-U --index-url https://pypi.ngc.nvidia.com') + check_requirements("nvidia-tensorrt", cmds="-U --index-url https://pypi.ngc.nvidia.com") import tensorrt as trt # noqa - check_version(trt.__version__, '7.0.0', hard=True) # require tensorrt>=7.0.0 - self.args.simplify = True - f_onnx, _ = self.export_onnx() + check_version(trt.__version__, "7.0.0", hard=True) # require tensorrt>=7.0.0 - LOGGER.info(f'\n{prefix} starting export with TensorRT {trt.__version__}...') - assert Path(f_onnx).exists(), f'failed to export ONNX file: {f_onnx}' - f = self.file.with_suffix('.engine') # TensorRT engine file + self.args.simplify = True + + LOGGER.info(f"\n{prefix} starting export with TensorRT {trt.__version__}...") + assert Path(f_onnx).exists(), f"failed to export ONNX file: {f_onnx}" + f = self.file.with_suffix(".engine") # TensorRT engine file logger = trt.Logger(trt.Logger.INFO) if self.args.verbose: logger.min_severity = trt.Logger.Severity.VERBOSE @@ -598,11 +682,11 @@ class Exporter: config.max_workspace_size = self.args.workspace * 1 << 30 # config.set_memory_pool_limit(trt.MemoryPoolType.WORKSPACE, workspace << 30) # fix TRT 8.4 deprecation notice - flag = (1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) + flag = 1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH) network = builder.create_network(flag) parser = trt.OnnxParser(network, logger) if not parser.parse_from_file(f_onnx): - raise RuntimeError(f'failed to load ONNX file: {f_onnx}') + raise RuntimeError(f"failed to load ONNX file: {f_onnx}") inputs = [network.get_input(i) for i in range(network.num_inputs)] outputs = [network.get_output(i) for i in range(network.num_outputs)] @@ -621,15 +705,19 @@ class Exporter: config.add_optimization_profile(profile) LOGGER.info( - f'{prefix} building FP{16 if builder.platform_has_fast_fp16 and self.args.half else 32} engine as {f}') + f"{prefix} building FP{16 if builder.platform_has_fast_fp16 and self.args.half else 32} engine as {f}" + ) if builder.platform_has_fast_fp16 and self.args.half: config.set_flag(trt.BuilderFlag.FP16) + del self.model + torch.cuda.empty_cache() + # Write file - with builder.build_engine(network, config) as engine, open(f, 'wb') as t: + with builder.build_engine(network, config) as engine, open(f, "wb") as t: # Metadata meta = json.dumps(self.metadata) - t.write(len(meta).to_bytes(4, byteorder='little', signed=True)) + t.write(len(meta).to_bytes(4, byteorder="little", signed=True)) t.write(meta.encode()) # Model t.write(engine.serialize()) @@ -637,83 +725,114 @@ class Exporter: return f, None @try_export - def export_saved_model(self, prefix=colorstr('TensorFlow SavedModel:')): + def export_saved_model(self, prefix=colorstr("TensorFlow SavedModel:")): """YOLOv8 TensorFlow SavedModel export.""" cuda = torch.cuda.is_available() try: import tensorflow as tf # noqa except ImportError: - check_requirements(f"tensorflow{'-macos' if MACOS else '-aarch64' if ARM64 else '' if cuda else '-cpu'}") + suffix = "-macos" if MACOS else "-aarch64" if ARM64 else "" if cuda else "-cpu" + version = "" if ARM64 else "<=2.13.1" + check_requirements(f"tensorflow{suffix}{version}") import tensorflow as tf # noqa + if ARM64: + check_requirements("cmake") # 'cmake' is needed to build onnxsim on aarch64 check_requirements( - ('onnx', 'onnx2tf>=1.15.4', 'sng4onnx>=1.0.1', 'onnxsim>=0.4.33', 'onnx_graphsurgeon>=0.3.26', - 'tflite_support', 'onnxruntime-gpu' if cuda else 'onnxruntime'), - cmds='--extra-index-url https://pypi.ngc.nvidia.com') # onnx_graphsurgeon only on NVIDIA + ( + "onnx>=1.12.0", + "onnx2tf>=1.15.4,<=1.17.5", + "sng4onnx>=1.0.1", + "onnxslim==0.1.31", + "onnx_graphsurgeon>=0.3.26", + "tflite_support", + "flatbuffers>=23.5.26,<100", # update old 'flatbuffers' included inside tensorflow package + "onnxruntime-gpu" if cuda else "onnxruntime", + ), + cmds="--extra-index-url https://pypi.ngc.nvidia.com", + ) # onnx_graphsurgeon only on NVIDIA - LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...') - f = Path(str(self.file).replace(self.file.suffix, '_saved_model')) + LOGGER.info(f"\n{prefix} starting export with tensorflow {tf.__version__}...") + check_version( + tf.__version__, + "<=2.13.1", + name="tensorflow", + verbose=True, + msg="https://github.com/ultralytics/ultralytics/issues/5161", + ) + import onnx2tf + + f = Path(str(self.file).replace(self.file.suffix, "_saved_model")) if f.is_dir(): - import shutil shutil.rmtree(f) # delete output folder + # Pre-download calibration file to fix https://github.com/PINTO0309/onnx2tf/issues/545 + onnx2tf_file = Path("calibration_image_sample_data_20x128x128x3_float32.npy") + if not onnx2tf_file.exists(): + attempt_download_asset(f"{onnx2tf_file}.zip", unzip=True, delete=True) + # Export to ONNX self.args.simplify = True f_onnx, _ = self.export_onnx() # Export to TF - tmp_file = f / 'tmp_tflite_int8_calibration_images.npy' # int8 calibration images file + tmp_file = f / "tmp_tflite_int8_calibration_images.npy" # int8 calibration images file + np_data = None if self.args.int8: - verbosity = '--verbosity info' + verbosity = "info" if self.args.data: # Generate calibration data for integer quantization LOGGER.info(f"{prefix} collecting INT8 calibration images from 'data={self.args.data}'") data = check_det_dataset(self.args.data) - dataset = YOLODataset(data['val'], data=data, imgsz=self.imgsz[0], augment=False) + dataset = YOLODataset(data["val"], data=data, imgsz=self.imgsz[0], augment=False) images = [] for i, batch in enumerate(dataset): if i >= 100: # maximum number of calibration images break - im = batch['img'].permute(1, 2, 0)[None] # list to nparray, CHW to BHWC + im = batch["img"].permute(1, 2, 0)[None] # list to nparray, CHW to BHWC images.append(im) f.mkdir() images = torch.cat(images, 0).float() # mean = images.view(-1, 3).mean(0) # imagenet mean [123.675, 116.28, 103.53] # std = images.view(-1, 3).std(0) # imagenet std [58.395, 57.12, 57.375] np.save(str(tmp_file), images.numpy()) # BHWC - int8 = f'-oiqt -qt per-tensor -cind images "{tmp_file}" "[[[[0, 0, 0]]]]" "[[[[255, 255, 255]]]]"' - else: - int8 = '-oiqt -qt per-tensor' + np_data = [["images", tmp_file, [[[[0, 0, 0]]]], [[[[255, 255, 255]]]]]] else: - verbosity = '--non_verbose' - int8 = '' + verbosity = "error" - cmd = f'onnx2tf -i "{f_onnx}" -o "{f}" -nuo {verbosity} {int8}'.strip() - LOGGER.info(f"{prefix} running '{cmd}'") - subprocess.run(cmd, shell=True) - yaml_save(f / 'metadata.yaml', self.metadata) # add metadata.yaml + LOGGER.info(f"{prefix} starting TFLite export with onnx2tf {onnx2tf.__version__}...") + onnx2tf.convert( + input_onnx_file_path=f_onnx, + output_folder_path=str(f), + not_use_onnxsim=True, + verbosity=verbosity, + output_integer_quantized_tflite=self.args.int8, + quant_type="per-tensor", # "per-tensor" (faster) or "per-channel" (slower but more accurate) + custom_input_op_name_np_data_path=np_data, + ) + yaml_save(f / "metadata.yaml", self.metadata) # add metadata.yaml # Remove/rename TFLite models if self.args.int8: tmp_file.unlink(missing_ok=True) - for file in f.rglob('*_dynamic_range_quant.tflite'): - file.rename(file.with_name(file.stem.replace('_dynamic_range_quant', '_int8') + file.suffix)) - for file in f.rglob('*_integer_quant_with_int16_act.tflite'): + for file in f.rglob("*_dynamic_range_quant.tflite"): + file.rename(file.with_name(file.stem.replace("_dynamic_range_quant", "_int8") + file.suffix)) + for file in f.rglob("*_integer_quant_with_int16_act.tflite"): file.unlink() # delete extra fp16 activation TFLite files # Add TFLite metadata - for file in f.rglob('*.tflite'): - f.unlink() if 'quant_with_int16_act.tflite' in str(f) else self._add_tflite_metadata(file) + for file in f.rglob("*.tflite"): + f.unlink() if "quant_with_int16_act.tflite" in str(f) else self._add_tflite_metadata(file) return str(f), tf.saved_model.load(f, tags=None, options=None) # load saved_model as Keras model @try_export - def export_pb(self, keras_model, prefix=colorstr('TensorFlow GraphDef:')): + def export_pb(self, keras_model, prefix=colorstr("TensorFlow GraphDef:")): """YOLOv8 TensorFlow GraphDef *.pb export https://github.com/leimao/Frozen_Graph_TensorFlow.""" import tensorflow as tf # noqa from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2 # noqa - LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...') - f = self.file.with_suffix('.pb') + LOGGER.info(f"\n{prefix} starting export with tensorflow {tf.__version__}...") + f = self.file.with_suffix(".pb") m = tf.function(lambda x: keras_model(x)) # full model m = m.get_concrete_function(tf.TensorSpec(keras_model.inputs[0].shape, keras_model.inputs[0].dtype)) @@ -723,40 +842,43 @@ class Exporter: return f, None @try_export - def export_tflite(self, keras_model, nms, agnostic_nms, prefix=colorstr('TensorFlow Lite:')): + def export_tflite(self, keras_model, nms, agnostic_nms, prefix=colorstr("TensorFlow Lite:")): """YOLOv8 TensorFlow Lite export.""" import tensorflow as tf # noqa - LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...') - saved_model = Path(str(self.file).replace(self.file.suffix, '_saved_model')) + LOGGER.info(f"\n{prefix} starting export with tensorflow {tf.__version__}...") + saved_model = Path(str(self.file).replace(self.file.suffix, "_saved_model")) if self.args.int8: - f = saved_model / f'{self.file.stem}_int8.tflite' # fp32 in/out + f = saved_model / f"{self.file.stem}_int8.tflite" # fp32 in/out elif self.args.half: - f = saved_model / f'{self.file.stem}_float16.tflite' # fp32 in/out + f = saved_model / f"{self.file.stem}_float16.tflite" # fp32 in/out else: - f = saved_model / f'{self.file.stem}_float32.tflite' + f = saved_model / f"{self.file.stem}_float32.tflite" return str(f), None @try_export - def export_edgetpu(self, tflite_model='', prefix=colorstr('Edge TPU:')): + def export_edgetpu(self, tflite_model="", prefix=colorstr("Edge TPU:")): """YOLOv8 Edge TPU export https://coral.ai/docs/edgetpu/models-intro/.""" - LOGGER.warning(f'{prefix} WARNING ⚠️ Edge TPU known bug https://github.com/ultralytics/ultralytics/issues/1185') + LOGGER.warning(f"{prefix} WARNING ⚠️ Edge TPU known bug https://github.com/ultralytics/ultralytics/issues/1185") - cmd = 'edgetpu_compiler --version' - help_url = 'https://coral.ai/docs/edgetpu/compiler/' - assert LINUX, f'export only supported on Linux. See {help_url}' + cmd = "edgetpu_compiler --version" + help_url = "https://coral.ai/docs/edgetpu/compiler/" + assert LINUX, f"export only supported on Linux. See {help_url}" if subprocess.run(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, shell=True).returncode != 0: - LOGGER.info(f'\n{prefix} export requires Edge TPU compiler. Attempting install from {help_url}') - sudo = subprocess.run('sudo --version >/dev/null', shell=True).returncode == 0 # sudo installed on system + LOGGER.info(f"\n{prefix} export requires Edge TPU compiler. Attempting install from {help_url}") + sudo = subprocess.run("sudo --version >/dev/null", shell=True).returncode == 0 # sudo installed on system for c in ( - 'curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -', - 'echo "deb https://packages.cloud.google.com/apt coral-edgetpu-stable main" | sudo tee /etc/apt/sources.list.d/coral-edgetpu.list', - 'sudo apt-get update', 'sudo apt-get install edgetpu-compiler'): - subprocess.run(c if sudo else c.replace('sudo ', ''), shell=True, check=True) + "curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -", + 'echo "deb https://packages.cloud.google.com/apt coral-edgetpu-stable main" | ' + "sudo tee /etc/apt/sources.list.d/coral-edgetpu.list", + "sudo apt-get update", + "sudo apt-get install edgetpu-compiler", + ): + subprocess.run(c if sudo else c.replace("sudo ", ""), shell=True, check=True) ver = subprocess.run(cmd, shell=True, capture_output=True, check=True).stdout.decode().split()[-1] - LOGGER.info(f'\n{prefix} starting export with Edge TPU compiler {ver}...') - f = str(tflite_model).replace('.tflite', '_edgetpu.tflite') # Edge TPU model + LOGGER.info(f"\n{prefix} starting export with Edge TPU compiler {ver}...") + f = str(tflite_model).replace(".tflite", "_edgetpu.tflite") # Edge TPU model cmd = f'edgetpu_compiler -s -d -k 10 --out_dir "{Path(f).parent}" "{tflite_model}"' LOGGER.info(f"{prefix} running '{cmd}'") @@ -765,28 +887,35 @@ class Exporter: return f, None @try_export - def export_tfjs(self, prefix=colorstr('TensorFlow.js:')): + def export_tfjs(self, prefix=colorstr("TensorFlow.js:")): """YOLOv8 TensorFlow.js export.""" - check_requirements('tensorflowjs') + check_requirements("tensorflowjs") + if ARM64: + # Fix error: `np.object` was a deprecated alias for the builtin `object` when exporting to TF.js on ARM64 + check_requirements("numpy==1.23.5") import tensorflow as tf import tensorflowjs as tfjs # noqa - LOGGER.info(f'\n{prefix} starting export with tensorflowjs {tfjs.__version__}...') - f = str(self.file).replace(self.file.suffix, '_web_model') # js dir - f_pb = str(self.file.with_suffix('.pb')) # *.pb path + LOGGER.info(f"\n{prefix} starting export with tensorflowjs {tfjs.__version__}...") + f = str(self.file).replace(self.file.suffix, "_web_model") # js dir + f_pb = str(self.file.with_suffix(".pb")) # *.pb path gd = tf.Graph().as_graph_def() # TF GraphDef - with open(f_pb, 'rb') as file: + with open(f_pb, "rb") as file: gd.ParseFromString(file.read()) - outputs = ','.join(gd_outputs(gd)) - LOGGER.info(f'\n{prefix} output node names: {outputs}') + outputs = ",".join(gd_outputs(gd)) + LOGGER.info(f"\n{prefix} output node names: {outputs}") + quantization = "--quantize_float16" if self.args.half else "--quantize_uint8" if self.args.int8 else "" with spaces_in_path(f_pb) as fpb_, spaces_in_path(f) as f_: # exporter can not handle spaces in path - cmd = f'tensorflowjs_converter --input_format=tf_frozen_model --output_node_names={outputs} "{fpb_}" "{f_}"' + cmd = ( + "tensorflowjs_converter " + f'--input_format=tf_frozen_model {quantization} --output_node_names={outputs} "{fpb_}" "{f_}"' + ) LOGGER.info(f"{prefix} running '{cmd}'") subprocess.run(cmd, shell=True) - if ' ' in str(f): + if " " in f: LOGGER.warning(f"{prefix} WARNING ⚠️ your model may not work correctly with spaces in path '{f}'.") # f_json = Path(f) / 'model.json' # *.json path @@ -803,7 +932,7 @@ class Exporter: # f_json.read_text(), # ) # j.write(subst) - yaml_save(Path(f) / 'metadata.yaml', self.metadata) # add metadata.yaml + yaml_save(Path(f) / "metadata.yaml", self.metadata) # add metadata.yaml return f, None def _add_tflite_metadata(self, file): @@ -814,14 +943,14 @@ class Exporter: # Create model info model_meta = _metadata_fb.ModelMetadataT() - model_meta.name = self.metadata['description'] - model_meta.version = self.metadata['version'] - model_meta.author = self.metadata['author'] - model_meta.license = self.metadata['license'] + model_meta.name = self.metadata["description"] + model_meta.version = self.metadata["version"] + model_meta.author = self.metadata["author"] + model_meta.license = self.metadata["license"] # Label file - tmp_file = Path(file).parent / 'temp_meta.txt' - with open(tmp_file, 'w') as f: + tmp_file = Path(file).parent / "temp_meta.txt" + with open(tmp_file, "w") as f: f.write(str(self.metadata)) label_file = _metadata_fb.AssociatedFileT() @@ -830,8 +959,8 @@ class Exporter: # Create input info input_meta = _metadata_fb.TensorMetadataT() - input_meta.name = 'image' - input_meta.description = 'Input image to be detected.' + input_meta.name = "image" + input_meta.description = "Input image to be detected." input_meta.content = _metadata_fb.ContentT() input_meta.content.contentProperties = _metadata_fb.ImagePropertiesT() input_meta.content.contentProperties.colorSpace = _metadata_fb.ColorSpaceType.RGB @@ -839,19 +968,19 @@ class Exporter: # Create output info output1 = _metadata_fb.TensorMetadataT() - output1.name = 'output' - output1.description = 'Coordinates of detected objects, class labels, and confidence score' + output1.name = "output" + output1.description = "Coordinates of detected objects, class labels, and confidence score" output1.associatedFiles = [label_file] - if self.model.task == 'segment': + if self.model.task == "segment": output2 = _metadata_fb.TensorMetadataT() - output2.name = 'output' - output2.description = 'Mask protos' + output2.name = "output" + output2.description = "Mask protos" output2.associatedFiles = [label_file] # Create subgraph info subgraph = _metadata_fb.SubGraphMetadataT() subgraph.inputTensorMetadata = [input_meta] - subgraph.outputTensorMetadata = [output1, output2] if self.model.task == 'segment' else [output1] + subgraph.outputTensorMetadata = [output1, output2] if self.model.task == "segment" else [output1] model_meta.subgraphMetadata = [subgraph] b = flatbuffers.Builder(0) @@ -864,11 +993,11 @@ class Exporter: populator.populate() tmp_file.unlink() - def _pipeline_coreml(self, model, weights_dir=None, prefix=colorstr('CoreML Pipeline:')): + def _pipeline_coreml(self, model, weights_dir=None, prefix=colorstr("CoreML Pipeline:")): """YOLOv8 CoreML pipeline.""" import coremltools as ct # noqa - LOGGER.info(f'{prefix} starting pipeline with coremltools {ct.__version__}...') + LOGGER.info(f"{prefix} starting pipeline with coremltools {ct.__version__}...") _, _, h, w = list(self.im.shape) # BCHW # Output shapes @@ -876,8 +1005,9 @@ class Exporter: out0, out1 = iter(spec.description.output) if MACOS: from PIL import Image - img = Image.new('RGB', (w, h)) # w=192, h=320 - out = model.predict({'image': img}) + + img = Image.new("RGB", (w, h)) # w=192, h=320 + out = model.predict({"image": img}) out0_shape = out[out0.name].shape # (3780, 80) out1_shape = out[out1.name].shape # (3780, 4) else: # linux and windows can not run model.predict(), get sizes from PyTorch model output y @@ -885,11 +1015,11 @@ class Exporter: out1_shape = self.output_shape[2], 4 # (3780, 4) # Checks - names = self.metadata['names'] + names = self.metadata["names"] nx, ny = spec.description.input[0].type.imageType.width, spec.description.input[0].type.imageType.height _, nc = out0_shape # number of anchors, number of classes # _, nc = out0.type.multiArrayType.shape - assert len(names) == nc, f'{len(names)} names found for nc={nc}' # check + assert len(names) == nc, f"{len(names)} names found for nc={nc}" # check # Define output shapes (missing) out0.type.multiArrayType.shape[:] = out0_shape # (3780, 80) @@ -923,8 +1053,8 @@ class Exporter: nms_spec.description.output.add() nms_spec.description.output[i].ParseFromString(decoder_output) - nms_spec.description.output[0].name = 'confidence' - nms_spec.description.output[1].name = 'coordinates' + nms_spec.description.output[0].name = "confidence" + nms_spec.description.output[1].name = "coordinates" output_sizes = [nc, 4] for i in range(2): @@ -940,10 +1070,10 @@ class Exporter: nms = nms_spec.nonMaximumSuppression nms.confidenceInputFeatureName = out0.name # 1x507x80 nms.coordinatesInputFeatureName = out1.name # 1x507x4 - nms.confidenceOutputFeatureName = 'confidence' - nms.coordinatesOutputFeatureName = 'coordinates' - nms.iouThresholdInputFeatureName = 'iouThreshold' - nms.confidenceThresholdInputFeatureName = 'confidenceThreshold' + nms.confidenceOutputFeatureName = "confidence" + nms.coordinatesOutputFeatureName = "coordinates" + nms.iouThresholdInputFeatureName = "iouThreshold" + nms.confidenceThresholdInputFeatureName = "confidenceThreshold" nms.iouThreshold = 0.45 nms.confidenceThreshold = 0.25 nms.pickTop.perClass = True @@ -951,10 +1081,14 @@ class Exporter: nms_model = ct.models.MLModel(nms_spec) # 4. Pipeline models together - pipeline = ct.models.pipeline.Pipeline(input_features=[('image', ct.models.datatypes.Array(3, ny, nx)), - ('iouThreshold', ct.models.datatypes.Double()), - ('confidenceThreshold', ct.models.datatypes.Double())], - output_features=['confidence', 'coordinates']) + pipeline = ct.models.pipeline.Pipeline( + input_features=[ + ("image", ct.models.datatypes.Array(3, ny, nx)), + ("iouThreshold", ct.models.datatypes.Double()), + ("confidenceThreshold", ct.models.datatypes.Double()), + ], + output_features=["confidence", "coordinates"], + ) pipeline.add_model(model) pipeline.add_model(nms_model) @@ -965,25 +1099,24 @@ class Exporter: # Update metadata pipeline.spec.specificationVersion = 5 - pipeline.spec.description.metadata.userDefined.update({ - 'IoU threshold': str(nms.iouThreshold), - 'Confidence threshold': str(nms.confidenceThreshold)}) + pipeline.spec.description.metadata.userDefined.update( + {"IoU threshold": str(nms.iouThreshold), "Confidence threshold": str(nms.confidenceThreshold)} + ) # Save the model model = ct.models.MLModel(pipeline.spec, weights_dir=weights_dir) - model.input_description['image'] = 'Input image' - model.input_description['iouThreshold'] = f'(optional) IOU threshold override (default: {nms.iouThreshold})' - model.input_description['confidenceThreshold'] = \ - f'(optional) Confidence threshold override (default: {nms.confidenceThreshold})' - model.output_description['confidence'] = 'Boxes × Class confidence (see user-defined metadata "classes")' - model.output_description['coordinates'] = 'Boxes × [x, y, width, height] (relative to image size)' - LOGGER.info(f'{prefix} pipeline success') + model.input_description["image"] = "Input image" + model.input_description["iouThreshold"] = f"(optional) IoU threshold override (default: {nms.iouThreshold})" + model.input_description["confidenceThreshold"] = ( + f"(optional) Confidence threshold override (default: {nms.confidenceThreshold})" + ) + model.output_description["confidence"] = 'Boxes × Class confidence (see user-defined metadata "classes")' + model.output_description["coordinates"] = "Boxes × [x, y, width, height] (relative to image size)" + LOGGER.info(f"{prefix} pipeline success") return model def add_callback(self, event: str, callback): - """ - Appends the given callback. - """ + """Appends the given callback.""" self.callbacks[event].append(callback) def run_callbacks(self, event: str): diff --git a/ultralytics/engine/model.py b/ultralytics/engine/model.py index ac57323..ef5c93c 100644 --- a/ultralytics/engine/model.py +++ b/ultralytics/engine/model.py @@ -5,64 +5,109 @@ import sys from pathlib import Path from typing import Union +import numpy as np +import torch + from ultralytics.cfg import TASK2DATA, get_cfg, get_save_dir from ultralytics.hub.utils import HUB_WEB_ROOT from ultralytics.nn.tasks import attempt_load_one_weight, guess_model_task, nn, yaml_model_load -from ultralytics.utils import ASSETS, DEFAULT_CFG_DICT, DEFAULT_CFG_KEYS, LOGGER, RANK, callbacks, emojis, yaml_load -from ultralytics.utils.checks import check_file, check_imgsz, check_pip_update_available, check_yaml -from ultralytics.utils.downloads import GITHUB_ASSETS_STEMS -from ultralytics.utils.torch_utils import smart_inference_mode +from ultralytics.utils import ASSETS, DEFAULT_CFG_DICT, LOGGER, RANK, SETTINGS, callbacks, checks, emojis, yaml_load -class Model: +class Model(nn.Module): """ - A base model class to unify apis for all the models. + A base class for implementing YOLO models, unifying APIs across different model types. + + This class provides a common interface for various operations related to YOLO models, such as training, + validation, prediction, exporting, and benchmarking. It handles different types of models, including those + loaded from local files, Ultralytics HUB, or Triton Server. The class is designed to be flexible and + extendable for different tasks and model configurations. Args: - model (str, Path): Path to the model file to load or create. - task (Any, optional): Task type for the YOLO model. Defaults to None. + model (Union[str, Path], optional): Path or name of the model to load or create. This can be a local file + path, a model name from Ultralytics HUB, or a Triton Server model. Defaults to 'yolov8n.pt'. + task (Any, optional): The task type associated with the YOLO model. This can be used to specify the model's + application domain, such as object detection, segmentation, etc. Defaults to None. + verbose (bool, optional): If True, enables verbose output during the model's operations. Defaults to False. Attributes: - predictor (Any): The predictor object. - model (Any): The model object. - trainer (Any): The trainer object. - task (str): The type of model task. - ckpt (Any): The checkpoint object if the model loaded from *.pt file. - cfg (str): The model configuration if loaded from *.yaml file. - ckpt_path (str): The checkpoint file path. - overrides (dict): Overrides for the trainer object. - metrics (Any): The data for metrics. + callbacks (dict): A dictionary of callback functions for various events during model operations. + predictor (BasePredictor): The predictor object used for making predictions. + model (nn.Module): The underlying PyTorch model. + trainer (BaseTrainer): The trainer object used for training the model. + ckpt (dict): The checkpoint data if the model is loaded from a *.pt file. + cfg (str): The configuration of the model if loaded from a *.yaml file. + ckpt_path (str): The path to the checkpoint file. + overrides (dict): A dictionary of overrides for model configuration. + metrics (dict): The latest training/validation metrics. + session (HUBTrainingSession): The Ultralytics HUB session, if applicable. + task (str): The type of task the model is intended for. + model_name (str): The name of the model. Methods: - __call__(source=None, stream=False, **kwargs): - Alias for the predict method. - _new(cfg:str, verbose:bool=True) -> None: - Initializes a new model and infers the task type from the model definitions. - _load(weights:str, task:str='') -> None: - Initializes a new model and infers the task type from the model head. - _check_is_pytorch_model() -> None: - Raises TypeError if the model is not a PyTorch model. - reset() -> None: - Resets the model modules. - info(verbose:bool=False) -> None: - Logs the model info. - fuse() -> None: - Fuses the model for faster inference. - predict(source=None, stream=False, **kwargs) -> List[ultralytics.engine.results.Results]: - Performs prediction using the YOLO model. + __call__: Alias for the predict method, enabling the model instance to be callable. + _new: Initializes a new model based on a configuration file. + _load: Loads a model from a checkpoint file. + _check_is_pytorch_model: Ensures that the model is a PyTorch model. + reset_weights: Resets the model's weights to their initial state. + load: Loads model weights from a specified file. + save: Saves the current state of the model to a file. + info: Logs or returns information about the model. + fuse: Fuses Conv2d and BatchNorm2d layers for optimized inference. + predict: Performs object detection predictions. + track: Performs object tracking. + val: Validates the model on a dataset. + benchmark: Benchmarks the model on various export formats. + export: Exports the model to different formats. + train: Trains the model on a dataset. + tune: Performs hyperparameter tuning. + _apply: Applies a function to the model's tensors. + add_callback: Adds a callback function for an event. + clear_callback: Clears all callbacks for an event. + reset_callbacks: Resets all callbacks to their default functions. + _get_hub_session: Retrieves or creates an Ultralytics HUB session. + is_triton_model: Checks if a model is a Triton Server model. + is_hub_model: Checks if a model is an Ultralytics HUB model. + _reset_ckpt_args: Resets checkpoint arguments when loading a PyTorch model. + _smart_load: Loads the appropriate module based on the model task. + task_map: Provides a mapping from model tasks to corresponding classes. - Returns: - list(ultralytics.engine.results.Results): The prediction results. + Raises: + FileNotFoundError: If the specified model file does not exist or is inaccessible. + ValueError: If the model file or configuration is invalid or unsupported. + ImportError: If required dependencies for specific model types (like HUB SDK) are not installed. + TypeError: If the model is not a PyTorch model when required. + AttributeError: If required attributes or methods are not implemented or available. + NotImplementedError: If a specific model task or mode is not supported. """ - def __init__(self, model: Union[str, Path] = 'yolov8n.pt', task=None) -> None: + def __init__( + self, + model: Union[str, Path] = "yolov8n.pt", + task: str = None, + verbose: bool = False, + ) -> None: """ - Initializes the YOLO model. + Initializes a new instance of the YOLO model class. + + This constructor sets up the model based on the provided model path or name. It handles various types of model + sources, including local files, Ultralytics HUB models, and Triton Server models. The method initializes several + important attributes of the model and prepares it for operations like training, prediction, or export. Args: - model (Union[str, Path], optional): Path or name of the model to load or create. Defaults to 'yolov8n.pt'. - task (Any, optional): Task type for the YOLO model. Defaults to None. + model (Union[str, Path], optional): The path or model file to load or create. This can be a local + file path, a model name from Ultralytics HUB, or a Triton Server model. Defaults to 'yolov8n.pt'. + task (Any, optional): The task type associated with the YOLO model, specifying its application domain. + Defaults to None. + verbose (bool, optional): If True, enables verbose output during the model's initialization and subsequent + operations. Defaults to False. + + Raises: + FileNotFoundError: If the specified model file does not exist or is inaccessible. + ValueError: If the model file or configuration is invalid or unsupported. + ImportError: If required dependencies for specific model types (like HUB SDK) are not installed. """ + super().__init__() self.callbacks = callbacks.get_default_callbacks() self.predictor = None # reuse predictor self.model = None # model object @@ -74,36 +119,80 @@ class Model: self.metrics = None # validation/training metrics self.session = None # HUB session self.task = task # task type - model = str(model).strip() # strip spaces + model = str(model).strip() # Check if Ultralytics HUB model from https://hub.ultralytics.com if self.is_hub_model(model): - from ultralytics.hub.session import HUBTrainingSession - self.session = HUBTrainingSession(model) + # Fetch model from HUB + checks.check_requirements("hub-sdk>=0.0.6") + self.session = self._get_hub_session(model) model = self.session.model_file - # Load or create new YOLO model - suffix = Path(model).suffix - if not suffix and Path(model).stem in GITHUB_ASSETS_STEMS: - model, suffix = Path(model).with_suffix('.pt'), '.pt' # add suffix, i.e. yolov8n -> yolov8n.pt - if suffix in ('.yaml', '.yml'): - self._new(model, task) - else: - self._load(model, task) + # Check if Triton Server model + elif self.is_triton_model(model): + self.model_name = self.model = model + self.task = task + return - def __call__(self, source=None, stream=False, **kwargs): - """Calls the 'predict' function with given arguments to perform object detection.""" + # Load or create new YOLO model + if Path(model).suffix in (".yaml", ".yml"): + self._new(model, task=task, verbose=verbose) + else: + self._load(model, task=task) + + def __call__( + self, + source: Union[str, Path, int, list, tuple, np.ndarray, torch.Tensor] = None, + stream: bool = False, + **kwargs, + ) -> list: + """ + An alias for the predict method, enabling the model instance to be callable. + + This method simplifies the process of making predictions by allowing the model instance to be called directly + with the required arguments for prediction. + + Args: + source (str | Path | int | PIL.Image | np.ndarray, optional): The source of the image for making + predictions. Accepts various types, including file paths, URLs, PIL images, and numpy arrays. + Defaults to None. + stream (bool, optional): If True, treats the input source as a continuous stream for predictions. + Defaults to False. + **kwargs (any): Additional keyword arguments for configuring the prediction process. + + Returns: + (List[ultralytics.engine.results.Results]): A list of prediction results, encapsulated in the Results class. + """ return self.predict(source, stream, **kwargs) @staticmethod - def is_hub_model(model): - """Check if the provided model is a HUB model.""" - return any(( - model.startswith(f'{HUB_WEB_ROOT}/models/'), # i.e. https://hub.ultralytics.com/models/MODEL_ID - [len(x) for x in model.split('_')] == [42, 20], # APIKEY_MODELID - len(model) == 20 and not Path(model).exists() and all(x not in model for x in './\\'))) # MODELID + def _get_hub_session(model: str): + """Creates a session for Hub Training.""" + from ultralytics.hub.session import HUBTrainingSession - def _new(self, cfg: str, task=None, model=None, verbose=True): + session = HUBTrainingSession(model) + return session if session.client.authenticated else None + + @staticmethod + def is_triton_model(model: str) -> bool: + """Is model a Triton Server URL string, i.e. :////""" + from urllib.parse import urlsplit + + url = urlsplit(model) + return url.netloc and url.path and url.scheme in {"http", "grpc"} + + @staticmethod + def is_hub_model(model: str) -> bool: + """Check if the provided model is a HUB model.""" + return any( + ( + model.startswith(f"{HUB_WEB_ROOT}/models/"), # i.e. https://hub.ultralytics.com/models/MODEL_ID + [len(x) for x in model.split("_")] == [42, 20], # APIKEY_MODEL + len(model) == 20 and not Path(model).exists() and all(x not in model for x in "./\\"), # MODEL + ) + ) + + def _new(self, cfg: str, task=None, model=None, verbose=False) -> None: """ Initializes a new model and infers the task type from the model definitions. @@ -116,16 +205,16 @@ class Model: cfg_dict = yaml_model_load(cfg) self.cfg = cfg self.task = task or guess_model_task(cfg_dict) - self.model = (model or self.smart_load('model'))(cfg_dict, verbose=verbose and RANK == -1) # build model - self.overrides['model'] = self.cfg - self.overrides['task'] = self.task + self.model = (model or self._smart_load("model"))(cfg_dict, verbose=verbose and RANK == -1) # build model + self.overrides["model"] = self.cfg + self.overrides["task"] = self.task # Below added to allow export from YAMLs - args = {**DEFAULT_CFG_DICT, **self.overrides} # combine model and default args, preferring model args - self.model.args = {k: v for k, v in args.items() if k in DEFAULT_CFG_KEYS} # attach args to model + self.model.args = {**DEFAULT_CFG_DICT, **self.overrides} # combine default and model args (prefer model args) self.model.task = self.task + self.model_name = cfg - def _load(self, weights: str, task=None): + def _load(self, weights: str, task=None) -> None: """ Initializes a new model and infers the task type from the model head. @@ -133,49 +222,74 @@ class Model: weights (str): model checkpoint to be loaded task (str | None): model task """ - suffix = Path(weights).suffix - if suffix == '.pt': + if weights.lower().startswith(("https://", "http://", "rtsp://", "rtmp://", "tcp://")): + weights = checks.check_file(weights) # automatically download and return local filename + weights = checks.check_model_file_from_stem(weights) # add suffix, i.e. yolov8n -> yolov8n.pt + + if Path(weights).suffix == ".pt": self.model, self.ckpt = attempt_load_one_weight(weights) - self.task = self.model.args['task'] + self.task = self.model.args["task"] self.overrides = self.model.args = self._reset_ckpt_args(self.model.args) self.ckpt_path = self.model.pt_path else: - weights = check_file(weights) + weights = checks.check_file(weights) # runs in all cases, not redundant with above call self.model, self.ckpt = weights, None self.task = task or guess_model_task(weights) self.ckpt_path = weights - self.overrides['model'] = weights - self.overrides['task'] = self.task + self.overrides["model"] = weights + self.overrides["task"] = self.task + self.model_name = weights - def _check_is_pytorch_model(self): - """ - Raises TypeError is model is not a PyTorch model - """ - pt_str = isinstance(self.model, (str, Path)) and Path(self.model).suffix == '.pt' + def _check_is_pytorch_model(self) -> None: + """Raises TypeError is model is not a PyTorch model.""" + pt_str = isinstance(self.model, (str, Path)) and Path(self.model).suffix == ".pt" pt_module = isinstance(self.model, nn.Module) if not (pt_module or pt_str): - raise TypeError(f"model='{self.model}' must be a *.pt PyTorch model, but is a different type. " - f'PyTorch models can be used to train, val, predict and export, i.e. ' - f"'yolo export model=yolov8n.pt', but exported formats like ONNX, TensorRT etc. only " - f"support 'predict' and 'val' modes, i.e. 'yolo predict model=yolov8n.onnx'.") + raise TypeError( + f"model='{self.model}' should be a *.pt PyTorch model to run this method, but is a different format. " + f"PyTorch models can train, val, predict and export, i.e. 'model.train(data=...)', but exported " + f"formats like ONNX, TensorRT etc. only support 'predict' and 'val' modes, " + f"i.e. 'yolo predict model=yolov8n.onnx'.\nTo run CUDA or MPS inference please pass the device " + f"argument directly in your inference command, i.e. 'model.predict(source=..., device=0)'" + ) - @smart_inference_mode() - def reset_weights(self): + def reset_weights(self) -> "Model": """ - Resets the model modules parameters to randomly initialized values, losing all training information. + Resets the model parameters to randomly initialized values, effectively discarding all training information. + + This method iterates through all modules in the model and resets their parameters if they have a + 'reset_parameters' method. It also ensures that all parameters have 'requires_grad' set to True, enabling them + to be updated during training. + + Returns: + self (ultralytics.engine.model.Model): The instance of the class with reset weights. + + Raises: + AssertionError: If the model is not a PyTorch model. """ self._check_is_pytorch_model() for m in self.model.modules(): - if hasattr(m, 'reset_parameters'): + if hasattr(m, "reset_parameters"): m.reset_parameters() for p in self.model.parameters(): p.requires_grad = True return self - @smart_inference_mode() - def load(self, weights='yolov8n.pt'): + def load(self, weights: Union[str, Path] = "yolov8n.pt") -> "Model": """ - Transfers parameters with matching names and shapes from 'weights' to model. + Loads parameters from the specified weights file into the model. + + This method supports loading weights from a file or directly from a weights object. It matches parameters by + name and shape and transfers them to the model. + + Args: + weights (str | Path): Path to the weights file or a weights object. Defaults to 'yolov8n.pt'. + + Returns: + self (ultralytics.engine.model.Model): The instance of the class with loaded weights. + + Raises: + AssertionError: If the model is not a PyTorch model. """ self._check_is_pytorch_model() if isinstance(weights, (str, Path)): @@ -183,160 +297,362 @@ class Model: self.model.load(weights) return self - def info(self, detailed=False, verbose=True): + def save(self, filename: Union[str, Path] = "saved_model.pt", use_dill=True) -> None: """ - Logs model info. + Saves the current model state to a file. + + This method exports the model's checkpoint (ckpt) to the specified filename. Args: - detailed (bool): Show detailed information about model. - verbose (bool): Controls verbosity. + filename (str | Path): The name of the file to save the model to. Defaults to 'saved_model.pt'. + use_dill (bool): Whether to try using dill for serialization if available. Defaults to True. + + Raises: + AssertionError: If the model is not a PyTorch model. + """ + self._check_is_pytorch_model() + from ultralytics import __version__ + from datetime import datetime + + updates = { + "date": datetime.now().isoformat(), + "version": __version__, + "license": "AGPL-3.0 License (https://ultralytics.com/license)", + "docs": "https://docs.ultralytics.com", + } + torch.save({**self.ckpt, **updates}, filename, use_dill=use_dill) + + def info(self, detailed: bool = False, verbose: bool = True): + """ + Logs or returns model information. + + This method provides an overview or detailed information about the model, depending on the arguments passed. + It can control the verbosity of the output. + + Args: + detailed (bool): If True, shows detailed information about the model. Defaults to False. + verbose (bool): If True, prints the information. If False, returns the information. Defaults to True. + + Returns: + (list): Various types of information about the model, depending on the 'detailed' and 'verbose' parameters. + + Raises: + AssertionError: If the model is not a PyTorch model. """ self._check_is_pytorch_model() return self.model.info(detailed=detailed, verbose=verbose) def fuse(self): - """Fuse PyTorch Conv2d and BatchNorm2d layers.""" + """ + Fuses Conv2d and BatchNorm2d layers in the model. + + This method optimizes the model by fusing Conv2d and BatchNorm2d layers, which can improve inference speed. + + Raises: + AssertionError: If the model is not a PyTorch model. + """ self._check_is_pytorch_model() self.model.fuse() - @smart_inference_mode() - def predict(self, source=None, stream=False, predictor=None, **kwargs): + def embed( + self, + source: Union[str, Path, int, list, tuple, np.ndarray, torch.Tensor] = None, + stream: bool = False, + **kwargs, + ) -> list: """ - Perform prediction using the YOLO model. + Generates image embeddings based on the provided source. + + This method is a wrapper around the 'predict()' method, focusing on generating embeddings from an image source. + It allows customization of the embedding process through various keyword arguments. Args: - source (str | int | PIL | np.ndarray): The source of the image to make predictions on. - Accepts all source types accepted by the YOLO model. - stream (bool): Whether to stream the predictions or not. Defaults to False. - predictor (BasePredictor): Customized predictor. - **kwargs : Additional keyword arguments passed to the predictor. - Check the 'configuration' section in the documentation for all available options. + source (str | int | PIL.Image | np.ndarray): The source of the image for generating embeddings. + The source can be a file path, URL, PIL image, numpy array, etc. Defaults to None. + stream (bool): If True, predictions are streamed. Defaults to False. + **kwargs (any): Additional keyword arguments for configuring the embedding process. Returns: - (List[ultralytics.engine.results.Results]): The prediction results. + (List[torch.Tensor]): A list containing the image embeddings. + + Raises: + AssertionError: If the model is not a PyTorch model. + """ + if not kwargs.get("embed"): + kwargs["embed"] = [len(self.model.model) - 2] # embed second-to-last layer if no indices passed + return self.predict(source, stream, **kwargs) + + def predict( + self, + source: Union[str, Path, int, list, tuple, np.ndarray, torch.Tensor] = None, + stream: bool = False, + predictor=None, + **kwargs, + ) -> list: + """ + Performs predictions on the given image source using the YOLO model. + + This method facilitates the prediction process, allowing various configurations through keyword arguments. + It supports predictions with custom predictors or the default predictor method. The method handles different + types of image sources and can operate in a streaming mode. It also provides support for SAM-type models + through 'prompts'. + + The method sets up a new predictor if not already present and updates its arguments with each call. + It also issues a warning and uses default assets if the 'source' is not provided. The method determines if it + is being called from the command line interface and adjusts its behavior accordingly, including setting defaults + for confidence threshold and saving behavior. + + Args: + source (str | int | PIL.Image | np.ndarray, optional): The source of the image for making predictions. + Accepts various types, including file paths, URLs, PIL images, and numpy arrays. Defaults to ASSETS. + stream (bool, optional): Treats the input source as a continuous stream for predictions. Defaults to False. + predictor (BasePredictor, optional): An instance of a custom predictor class for making predictions. + If None, the method uses a default predictor. Defaults to None. + **kwargs (any): Additional keyword arguments for configuring the prediction process. These arguments allow + for further customization of the prediction behavior. + + Returns: + (List[ultralytics.engine.results.Results]): A list of prediction results, encapsulated in the Results class. + + Raises: + AttributeError: If the predictor is not properly set up. """ if source is None: source = ASSETS LOGGER.warning(f"WARNING ⚠️ 'source' is missing. Using 'source={source}'.") - is_cli = (sys.argv[0].endswith('yolo') or sys.argv[0].endswith('ultralytics')) and any( - x in sys.argv for x in ('predict', 'track', 'mode=predict', 'mode=track')) + is_cli = (sys.argv[0].endswith("yolo") or sys.argv[0].endswith("ultralytics")) and any( + x in sys.argv for x in ("predict", "track", "mode=predict", "mode=track") + ) - custom = {'conf': 0.25, 'save': is_cli} # method defaults - args = {**self.overrides, **custom, **kwargs, 'mode': 'predict'} # highest priority args on the right - prompts = args.pop('prompts', None) # for SAM-type models + custom = {"conf": 0.25, "batch": 1, "save": is_cli, "mode": "predict"} # method defaults + args = {**self.overrides, **custom, **kwargs} # highest priority args on the right + prompts = args.pop("prompts", None) # for SAM-type models if not self.predictor: - self.predictor = (predictor or self.smart_load('predictor'))(overrides=args, _callbacks=self.callbacks) + self.predictor = predictor or self._smart_load("predictor")(overrides=args, _callbacks=self.callbacks) self.predictor.setup_model(model=self.model, verbose=is_cli) else: # only update args if predictor is already setup self.predictor.args = get_cfg(self.predictor.args, args) - if 'project' in args or 'name' in args: + if "project" in args or "name" in args: self.predictor.save_dir = get_save_dir(self.predictor.args) - if prompts and hasattr(self.predictor, 'set_prompts'): # for SAM-type models + if prompts and hasattr(self.predictor, "set_prompts"): # for SAM-type models self.predictor.set_prompts(prompts) return self.predictor.predict_cli(source=source) if is_cli else self.predictor(source=source, stream=stream) - def track(self, source=None, stream=False, persist=False, **kwargs): + def track( + self, + source: Union[str, Path, int, list, tuple, np.ndarray, torch.Tensor] = None, + stream: bool = False, + persist: bool = False, + **kwargs, + ) -> list: """ - Perform object tracking on the input source using the registered trackers. + Conducts object tracking on the specified input source using the registered trackers. + + This method performs object tracking using the model's predictors and optionally registered trackers. It is + capable of handling different types of input sources such as file paths or video streams. The method supports + customization of the tracking process through various keyword arguments. It registers trackers if they are not + already present and optionally persists them based on the 'persist' flag. + + The method sets a default confidence threshold specifically for ByteTrack-based tracking, which requires low + confidence predictions as input. The tracking mode is explicitly set in the keyword arguments. Args: - source (str, optional): The input source for object tracking. Can be a file path or a video stream. - stream (bool, optional): Whether the input source is a video stream. Defaults to False. - persist (bool, optional): Whether to persist the trackers if they already exist. Defaults to False. - **kwargs (optional): Additional keyword arguments for the tracking process. + source (str, optional): The input source for object tracking. It can be a file path, URL, or video stream. + stream (bool, optional): Treats the input source as a continuous video stream. Defaults to False. + persist (bool, optional): Persists the trackers between different calls to this method. Defaults to False. + **kwargs (any): Additional keyword arguments for configuring the tracking process. These arguments allow + for further customization of the tracking behavior. Returns: - (List[ultralytics.engine.results.Results]): The tracking results. + (List[ultralytics.engine.results.Results]): A list of tracking results, encapsulated in the Results class. + + Raises: + AttributeError: If the predictor does not have registered trackers. """ - if not hasattr(self.predictor, 'trackers'): + if not hasattr(self.predictor, "trackers"): from ultralytics.trackers import register_tracker + register_tracker(self, persist) - # ByteTrack-based method needs low confidence predictions as input - kwargs['conf'] = kwargs.get('conf') or 0.1 - kwargs['mode'] = 'track' + kwargs["conf"] = kwargs.get("conf") or 0.1 # ByteTrack-based method needs low confidence predictions as input + kwargs["batch"] = kwargs.get("batch") or 1 # batch-size 1 for tracking in videos + kwargs["mode"] = "track" return self.predict(source=source, stream=stream, **kwargs) - @smart_inference_mode() - def val(self, validator=None, **kwargs): + def val( + self, + validator=None, + **kwargs, + ): """ - Validate a model on a given dataset. + Validates the model using a specified dataset and validation configuration. + + This method facilitates the model validation process, allowing for a range of customization through various + settings and configurations. It supports validation with a custom validator or the default validation approach. + The method combines default configurations, method-specific defaults, and user-provided arguments to configure + the validation process. After validation, it updates the model's metrics with the results obtained from the + validator. + + The method supports various arguments that allow customization of the validation process. For a comprehensive + list of all configurable options, users should refer to the 'configuration' section in the documentation. Args: - validator (BaseValidator): Customized validator. - **kwargs : Any other args accepted by the validators. To see all args check 'configuration' section in docs - """ - custom = {'rect': True} # method defaults - args = {**self.overrides, **custom, **kwargs, 'mode': 'val'} # highest priority args on the right - args['imgsz'] = check_imgsz(args['imgsz'], max_dim=1) + validator (BaseValidator, optional): An instance of a custom validator class for validating the model. If + None, the method uses a default validator. Defaults to None. + **kwargs (any): Arbitrary keyword arguments representing the validation configuration. These arguments are + used to customize various aspects of the validation process. - validator = (validator or self.smart_load('validator'))(args=args, _callbacks=self.callbacks) + Returns: + (dict): Validation metrics obtained from the validation process. + + Raises: + AssertionError: If the model is not a PyTorch model. + """ + custom = {"rect": True} # method defaults + args = {**self.overrides, **custom, **kwargs, "mode": "val"} # highest priority args on the right + + validator = (validator or self._smart_load("validator"))(args=args, _callbacks=self.callbacks) validator(model=self.model) self.metrics = validator.metrics return validator.metrics - @smart_inference_mode() - def benchmark(self, **kwargs): + def benchmark( + self, + **kwargs, + ): """ - Benchmark a model on all export formats. + Benchmarks the model across various export formats to evaluate performance. + + This method assesses the model's performance in different export formats, such as ONNX, TorchScript, etc. + It uses the 'benchmark' function from the ultralytics.utils.benchmarks module. The benchmarking is configured + using a combination of default configuration values, model-specific arguments, method-specific defaults, and + any additional user-provided keyword arguments. + + The method supports various arguments that allow customization of the benchmarking process, such as dataset + choice, image size, precision modes, device selection, and verbosity. For a comprehensive list of all + configurable options, users should refer to the 'configuration' section in the documentation. Args: - **kwargs : Any other args accepted by the validators. To see all args check 'configuration' section in docs + **kwargs (any): Arbitrary keyword arguments to customize the benchmarking process. These are combined with + default configurations, model-specific arguments, and method defaults. + + Returns: + (dict): A dictionary containing the results of the benchmarking process. + + Raises: + AssertionError: If the model is not a PyTorch model. """ self._check_is_pytorch_model() from ultralytics.utils.benchmarks import benchmark - custom = {'verbose': False} # method defaults - args = {**DEFAULT_CFG_DICT, **self.model.args, **custom, **kwargs, 'mode': 'benchmark'} + custom = {"verbose": False} # method defaults + args = {**DEFAULT_CFG_DICT, **self.model.args, **custom, **kwargs, "mode": "benchmark"} return benchmark( model=self, - data=kwargs.get('data'), # if no 'data' argument passed set data=None for default datasets - imgsz=args['imgsz'], - half=args['half'], - int8=args['int8'], - device=args['device'], - verbose=kwargs.get('verbose')) + data=kwargs.get("data"), # if no 'data' argument passed set data=None for default datasets + imgsz=args["imgsz"], + half=args["half"], + int8=args["int8"], + device=args["device"], + verbose=kwargs.get("verbose"), + ) - def export(self, **kwargs): + def export( + self, + **kwargs, + ): """ - Export model. + Exports the model to a different format suitable for deployment. + + This method facilitates the export of the model to various formats (e.g., ONNX, TorchScript) for deployment + purposes. It uses the 'Exporter' class for the export process, combining model-specific overrides, method + defaults, and any additional arguments provided. The combined arguments are used to configure export settings. + + The method supports a wide range of arguments to customize the export process. For a comprehensive list of all + possible arguments, refer to the 'configuration' section in the documentation. Args: - **kwargs : Any other args accepted by the Exporter. To see all args check 'configuration' section in docs. + **kwargs (any): Arbitrary keyword arguments to customize the export process. These are combined with the + model's overrides and method defaults. + + Returns: + (object): The exported model in the specified format, or an object related to the export process. + + Raises: + AssertionError: If the model is not a PyTorch model. """ self._check_is_pytorch_model() from .exporter import Exporter - custom = {'imgsz': self.model.args['imgsz'], 'batch': 1, 'data': None, 'verbose': False} # method defaults - args = {**self.overrides, **custom, **kwargs, 'mode': 'export'} # highest priority args on the right + custom = {"imgsz": self.model.args["imgsz"], "batch": 1, "data": None, "verbose": False} # method defaults + args = {**self.overrides, **custom, **kwargs, "mode": "export"} # highest priority args on the right return Exporter(overrides=args, _callbacks=self.callbacks)(model=self.model) - def train(self, trainer=None, **kwargs): + def train( + self, + trainer=None, + **kwargs, + ): """ - Trains the model on a given dataset. + Trains the model using the specified dataset and training configuration. + + This method facilitates model training with a range of customizable settings and configurations. It supports + training with a custom trainer or the default training approach defined in the method. The method handles + different scenarios, such as resuming training from a checkpoint, integrating with Ultralytics HUB, and + updating model and configuration after training. + + When using Ultralytics HUB, if the session already has a loaded model, the method prioritizes HUB training + arguments and issues a warning if local arguments are provided. It checks for pip updates and combines default + configurations, method-specific defaults, and user-provided arguments to configure the training process. After + training, it updates the model and its configurations, and optionally attaches metrics. Args: - trainer (BaseTrainer, optional): Customized trainer. - **kwargs (Any): Any number of arguments representing the training configuration. + trainer (BaseTrainer, optional): An instance of a custom trainer class for training the model. If None, the + method uses a default trainer. Defaults to None. + **kwargs (any): Arbitrary keyword arguments representing the training configuration. These arguments are + used to customize various aspects of the training process. + + Returns: + (dict | None): Training metrics if available and training is successful; otherwise, None. + + Raises: + AssertionError: If the model is not a PyTorch model. + PermissionError: If there is a permission issue with the HUB session. + ModuleNotFoundError: If the HUB SDK is not installed. """ self._check_is_pytorch_model() - if self.session: # Ultralytics HUB session + if hasattr(self.session, "model") and self.session.model.id: # Ultralytics HUB session with loaded model if any(kwargs): - LOGGER.warning('WARNING ⚠️ using HUB training arguments, ignoring local training arguments.') - kwargs = self.session.train_args - check_pip_update_available() + LOGGER.warning("WARNING ⚠️ using HUB training arguments, ignoring local training arguments.") + kwargs = self.session.train_args # overwrite kwargs - overrides = yaml_load(check_yaml(kwargs['cfg'])) if kwargs.get('cfg') else self.overrides - custom = {'data': TASK2DATA[self.task]} # method defaults - args = {**overrides, **custom, **kwargs, 'mode': 'train'} # highest priority args on the right - if args.get('resume'): - args['resume'] = self.ckpt_path + checks.check_pip_update_available() - self.trainer = (trainer or self.smart_load('trainer'))(overrides=args, _callbacks=self.callbacks) - if not args.get('resume'): # manually set model only if not resuming + overrides = yaml_load(checks.check_yaml(kwargs["cfg"])) if kwargs.get("cfg") else self.overrides + custom = {"data": DEFAULT_CFG_DICT["data"] or TASK2DATA[self.task]} # method defaults + args = {**overrides, **custom, **kwargs, "mode": "train"} # highest priority args on the right + if args.get("resume"): + args["resume"] = self.ckpt_path + + self.trainer = (trainer or self._smart_load("trainer"))(overrides=args, _callbacks=self.callbacks) + if not args.get("resume"): # manually set model only if not resuming self.trainer.model = self.trainer.get_model(weights=self.model if self.ckpt else None, cfg=self.model.yaml) self.model = self.trainer.model + + if SETTINGS["hub"] is True and not self.session: + # Create a model in HUB + try: + self.session = self._get_hub_session(self.model_name) + if self.session: + self.session.create_model(args) + # Check model was created + if not getattr(self.session.model, "id", None): + self.session = None + except (PermissionError, ModuleNotFoundError): + # Ignore PermissionError and ModuleNotFoundError which indicates hub-sdk not installed + pass + self.trainer.hub_session = self.session # attach optional HUB session self.trainer.train() # Update model and cfg after training @@ -344,78 +660,148 @@ class Model: ckpt = self.trainer.best if self.trainer.best.exists() else self.trainer.last self.model, _ = attempt_load_one_weight(ckpt) self.overrides = self.model.args - self.metrics = getattr(self.trainer.validator, 'metrics', None) # TODO: no metrics returned by DDP + self.metrics = getattr(self.trainer.validator, "metrics", None) # TODO: no metrics returned by DDP return self.metrics - def tune(self, use_ray=False, iterations=10, *args, **kwargs): + def tune( + self, + use_ray=False, + iterations=10, + *args, + **kwargs, + ): """ - Runs hyperparameter tuning, optionally using Ray Tune. See ultralytics.utils.tuner.run_ray_tune for Args. + Conducts hyperparameter tuning for the model, with an option to use Ray Tune. + + This method supports two modes of hyperparameter tuning: using Ray Tune or a custom tuning method. + When Ray Tune is enabled, it leverages the 'run_ray_tune' function from the ultralytics.utils.tuner module. + Otherwise, it uses the internal 'Tuner' class for tuning. The method combines default, overridden, and + custom arguments to configure the tuning process. + + Args: + use_ray (bool): If True, uses Ray Tune for hyperparameter tuning. Defaults to False. + iterations (int): The number of tuning iterations to perform. Defaults to 10. + *args (list): Variable length argument list for additional arguments. + **kwargs (any): Arbitrary keyword arguments. These are combined with the model's overrides and defaults. Returns: (dict): A dictionary containing the results of the hyperparameter search. + + Raises: + AssertionError: If the model is not a PyTorch model. """ self._check_is_pytorch_model() if use_ray: from ultralytics.utils.tuner import run_ray_tune + return run_ray_tune(self, max_samples=iterations, *args, **kwargs) else: from .tuner import Tuner - custom = {'plots': False, 'save': False} # method defaults - args = {**self.overrides, **custom, **kwargs, 'mode': 'train'} # highest priority args on the right + custom = {} # method defaults + args = {**self.overrides, **custom, **kwargs, "mode": "train"} # highest priority args on the right return Tuner(args=args, _callbacks=self.callbacks)(model=self, iterations=iterations) - def to(self, device): - """ - Sends the model to the given device. - - Args: - device (str): device - """ + def _apply(self, fn) -> "Model": + """Apply to(), cpu(), cuda(), half(), float() to model tensors that are not parameters or registered buffers.""" self._check_is_pytorch_model() - self.model.to(device) + self = super()._apply(fn) # noqa + self.predictor = None # reset predictor as device may have changed + self.overrides["device"] = self.device # was str(self.device) i.e. device(type='cuda', index=0) -> 'cuda:0' return self @property - def names(self): - """Returns class names of the loaded model.""" - return self.model.names if hasattr(self.model, 'names') else None + def names(self) -> list: + """ + Retrieves the class names associated with the loaded model. + + This property returns the class names if they are defined in the model. It checks the class names for validity + using the 'check_class_names' function from the ultralytics.nn.autobackend module. + + Returns: + (list | None): The class names of the model if available, otherwise None. + """ + from ultralytics.nn.autobackend import check_class_names + + return check_class_names(self.model.names) if hasattr(self.model, "names") else None @property - def device(self): - """Returns device if PyTorch model.""" + def device(self) -> torch.device: + """ + Retrieves the device on which the model's parameters are allocated. + + This property is used to determine whether the model's parameters are on CPU or GPU. It only applies to models + that are instances of nn.Module. + + Returns: + (torch.device | None): The device (CPU/GPU) of the model if it is a PyTorch model, otherwise None. + """ return next(self.model.parameters()).device if isinstance(self.model, nn.Module) else None @property def transforms(self): - """Returns transform of the loaded model.""" - return self.model.transforms if hasattr(self.model, 'transforms') else None + """ + Retrieves the transformations applied to the input data of the loaded model. - def add_callback(self, event: str, func): - """Add a callback.""" + This property returns the transformations if they are defined in the model. + + Returns: + (object | None): The transform object of the model if available, otherwise None. + """ + return self.model.transforms if hasattr(self.model, "transforms") else None + + def add_callback(self, event: str, func) -> None: + """ + Adds a callback function for a specified event. + + This method allows the user to register a custom callback function that is triggered on a specific event during + model training or inference. + + Args: + event (str): The name of the event to attach the callback to. + func (callable): The callback function to be registered. + + Raises: + ValueError: If the event name is not recognized. + """ self.callbacks[event].append(func) - def clear_callback(self, event: str): - """Clear all event callbacks.""" + def clear_callback(self, event: str) -> None: + """ + Clears all callback functions registered for a specified event. + + This method removes all custom and default callback functions associated with the given event. + + Args: + event (str): The name of the event for which to clear the callbacks. + + Raises: + ValueError: If the event name is not recognized. + """ self.callbacks[event] = [] - @staticmethod - def _reset_ckpt_args(args): - """Reset arguments when loading a PyTorch model.""" - include = {'imgsz', 'data', 'task', 'single_cls'} # only remember these arguments when loading a PyTorch model - return {k: v for k, v in args.items() if k in include} + def reset_callbacks(self) -> None: + """ + Resets all callbacks to their default functions. - def _reset_callbacks(self): - """Reset all registered callbacks.""" + This method reinstates the default callback functions for all events, removing any custom callbacks that were + added previously. + """ for event in callbacks.default_callbacks.keys(): self.callbacks[event] = [callbacks.default_callbacks[event][0]] - def __getattr__(self, attr): - """Raises error if object has no requested attribute.""" - name = self.__class__.__name__ - raise AttributeError(f"'{name}' object has no attribute '{attr}'. See valid attributes below.\n{self.__doc__}") + @staticmethod + def _reset_ckpt_args(args: dict) -> dict: + """Reset arguments when loading a PyTorch model.""" + include = {"imgsz", "data", "task", "single_cls"} # only remember these arguments when loading a PyTorch model + return {k: v for k, v in args.items() if k in include} - def smart_load(self, key): + # def __getattr__(self, attr): + # """Raises error if object has no requested attribute.""" + # name = self.__class__.__name__ + # raise AttributeError(f"'{name}' object has no attribute '{attr}'. See valid attributes below.\n{self.__doc__}") + + def _smart_load(self, key: str): """Load model/trainer/validator/predictor.""" try: return self.task_map[self.task][key] @@ -423,14 +809,15 @@ class Model: name = self.__class__.__name__ mode = inspect.stack()[1][3] # get the function name. raise NotImplementedError( - emojis(f"WARNING ⚠️ '{name}' model does not support '{mode}' mode for '{self.task}' task yet.")) from e + emojis(f"WARNING ⚠️ '{name}' model does not support '{mode}' mode for '{self.task}' task yet.") + ) from e @property - def task_map(self): + def task_map(self) -> dict: """ Map head to model, trainer, validator, and predictor classes. Returns: task_map (dict): The map of model task to mode classes. """ - raise NotImplementedError('Please provide task map for your model!') + raise NotImplementedError("Please provide task map for your model!") diff --git a/ultralytics/engine/predictor.py b/ultralytics/engine/predictor.py index c649090..9ec803a 100644 --- a/ultralytics/engine/predictor.py +++ b/ultralytics/engine/predictor.py @@ -11,8 +11,8 @@ Usage - sources: list.txt # list of images list.streams # list of streams 'path/*.jpg' # glob - 'https://youtu.be/Zgi9g1ksQHc' # YouTube - 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream + 'https://youtu.be/LNwODJXcvt4' # YouTube + 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP, TCP stream Usage - formats: $ yolo mode=predict model=yolov8n.pt # PyTorch @@ -26,8 +26,12 @@ Usage - formats: yolov8n.tflite # TensorFlow Lite yolov8n_edgetpu.tflite # TensorFlow Edge TPU yolov8n_paddle_model # PaddlePaddle + yolov8n_ncnn_model # NCNN """ + import platform +import re +import threading from pathlib import Path import cv2 @@ -58,7 +62,7 @@ Example: class BasePredictor: """ - BasePredictor + BasePredictor. A base class for creating predictors. @@ -70,9 +74,7 @@ class BasePredictor: data (dict): Data configuration. device (torch.device): Device used for prediction. dataset (Dataset): Dataset used for prediction. - vid_path (str): Path to video file. - vid_writer (cv2.VideoWriter): Video writer for saving video output. - data_path (str): Path to data. + vid_writer (dict): Dictionary of {save_path: video_writer, ...} writer for saving video output. """ def __init__(self, cfg=DEFAULT_CFG, overrides=None, _callbacks=None): @@ -97,19 +99,22 @@ class BasePredictor: self.imgsz = None self.device = None self.dataset = None - self.vid_path, self.vid_writer = None, None + self.vid_writer = {} # dict of {save_path: video_writer, ...} self.plotted_img = None - self.data_path = None self.source_type = None + self.seen = 0 + self.windows = [] self.batch = None self.results = None self.transforms = None self.callbacks = _callbacks or callbacks.get_default_callbacks() self.txt_path = None + self._lock = threading.Lock() # for automatic thread-safe inference callbacks.add_integration_callbacks(self) def preprocess(self, im): - """Prepares input image before inference. + """ + Prepares input image before inference. Args: im (torch.Tensor | List(np.ndarray)): BCHW for tensor, [(HWC) x B] for list. @@ -128,9 +133,13 @@ class BasePredictor: return im def inference(self, im, *args, **kwargs): - visualize = increment_path(self.save_dir / Path(self.batch[0][0]).stem, - mkdir=True) if self.args.visualize and (not self.source_type.tensor) else False - return self.model(im, augment=self.args.augment, visualize=visualize) + """Runs inference on a given image using the specified model and arguments.""" + visualize = ( + increment_path(self.save_dir / Path(self.batch[0][0]).stem, mkdir=True) + if self.args.visualize and (not self.source_type.tensor) + else False + ) + return self.model(im, augment=self.args.augment, visualize=visualize, embed=self.args.embed, *args, **kwargs) def pre_transform(self, im): """ @@ -142,45 +151,10 @@ class BasePredictor: Returns: (list): A list of transformed images. """ - same_shapes = all(x.shape == im[0].shape for x in im) + same_shapes = len({x.shape for x in im}) == 1 letterbox = LetterBox(self.imgsz, auto=same_shapes and self.model.pt, stride=self.model.stride) return [letterbox(image=x) for x in im] - def write_results(self, idx, results, batch): - """Write inference results to a file or directory.""" - p, im, _ = batch - log_string = '' - if len(im.shape) == 3: - im = im[None] # expand for batch dim - if self.source_type.webcam or self.source_type.from_img or self.source_type.tensor: # batch_size >= 1 - log_string += f'{idx}: ' - frame = self.dataset.count - else: - frame = getattr(self.dataset, 'frame', 0) - self.data_path = p - self.txt_path = str(self.save_dir / 'labels' / p.stem) + ('' if self.dataset.mode == 'image' else f'_{frame}') - log_string += '%gx%g ' % im.shape[2:] # print string - result = results[idx] - log_string += result.verbose() - - if self.args.save or self.args.show: # Add bbox to image - plot_args = { - 'line_width': self.args.line_width, - 'boxes': self.args.boxes, - 'conf': self.args.show_conf, - 'labels': self.args.show_labels} - if not self.args.retina_masks: - plot_args['im_gpu'] = im[idx] - self.plotted_img = result.plot(**plot_args) - # Write - if self.args.save_txt: - result.save_txt(f'{self.txt_path}.txt', save_conf=self.args.save_conf) - if self.args.save_crop: - result.save_crop(save_dir=self.save_dir / 'crops', - file_name=self.data_path.stem + ('' if self.dataset.mode == 'image' else f'_{frame}')) - - return log_string - def postprocess(self, preds, img, orig_imgs): """Post-processes predictions for an image and returns them.""" return preds @@ -194,157 +168,224 @@ class BasePredictor: return list(self.stream_inference(source, model, *args, **kwargs)) # merge list of Result into one def predict_cli(self, source=None, model=None): - """Method used for CLI prediction. It uses always generator as outputs as not required by CLI mode.""" + """ + Method used for CLI prediction. + + It uses always generator as outputs as not required by CLI mode. + """ gen = self.stream_inference(source, model) - for _ in gen: # running CLI inference without accumulating any outputs (do not modify) + for _ in gen: # noqa, running CLI inference without accumulating any outputs (do not modify) pass def setup_source(self, source): """Sets up source and inference mode.""" self.imgsz = check_imgsz(self.args.imgsz, stride=self.model.stride, min_dim=2) # check image size - self.transforms = getattr(self.model.model, 'transforms', classify_transforms( - self.imgsz[0])) if self.args.task == 'classify' else None - self.dataset = load_inference_source(source=source, - imgsz=self.imgsz, - vid_stride=self.args.vid_stride, - stream_buffer=self.args.stream_buffer) + self.transforms = ( + getattr( + self.model.model, + "transforms", + classify_transforms(self.imgsz[0], crop_fraction=self.args.crop_fraction), + ) + if self.args.task == "classify" + else None + ) + self.dataset = load_inference_source( + source=source, + batch=self.args.batch, + vid_stride=self.args.vid_stride, + buffer=self.args.stream_buffer, + ) self.source_type = self.dataset.source_type - if not getattr(self, 'stream', True) and (self.dataset.mode == 'stream' or # streams - len(self.dataset) > 1000 or # images - any(getattr(self.dataset, 'video_flag', [False]))): # videos + if not getattr(self, "stream", True) and ( + self.source_type.stream + or self.source_type.screenshot + or len(self.dataset) > 1000 # many images + or any(getattr(self.dataset, "video_flag", [False])) + ): # videos LOGGER.warning(STREAM_WARNING) - self.vid_path, self.vid_writer = [None] * self.dataset.bs, [None] * self.dataset.bs + self.vid_writer = {} @smart_inference_mode() def stream_inference(self, source=None, model=None, *args, **kwargs): """Streams real-time inference on camera feed and saves results to file.""" if self.args.verbose: - LOGGER.info('') + LOGGER.info("") # Setup model if not self.model: self.setup_model(model) - # Setup source every time predict is called - self.setup_source(source if source is not None else self.args.source) + with self._lock: # for thread-safe inference + # Setup source every time predict is called + self.setup_source(source if source is not None else self.args.source) - # Check if save_dir/ label file exists - if self.args.save or self.args.save_txt: - (self.save_dir / 'labels' if self.args.save_txt else self.save_dir).mkdir(parents=True, exist_ok=True) + # Check if save_dir/ label file exists + if self.args.save or self.args.save_txt: + (self.save_dir / "labels" if self.args.save_txt else self.save_dir).mkdir(parents=True, exist_ok=True) - # Warmup model - if not self.done_warmup: - self.model.warmup(imgsz=(1 if self.model.pt or self.model.triton else self.dataset.bs, 3, *self.imgsz)) - self.done_warmup = True + # Warmup model + if not self.done_warmup: + self.model.warmup(imgsz=(1 if self.model.pt or self.model.triton else self.dataset.bs, 3, *self.imgsz)) + self.done_warmup = True - self.seen, self.windows, self.batch, profilers = 0, [], None, (ops.Profile(), ops.Profile(), ops.Profile()) - self.run_callbacks('on_predict_start') - for batch in self.dataset: - self.run_callbacks('on_predict_batch_start') - self.batch = batch - path, im0s, vid_cap, s = batch + self.seen, self.windows, self.batch = 0, [], None + profilers = ( + ops.Profile(device=self.device), + ops.Profile(device=self.device), + ops.Profile(device=self.device), + ) + self.run_callbacks("on_predict_start") + for self.batch in self.dataset: + self.run_callbacks("on_predict_batch_start") + paths, im0s, s = self.batch - # Preprocess - with profilers[0]: - im = self.preprocess(im0s) + # Preprocess + with profilers[0]: + im = self.preprocess(im0s) - # Inference - with profilers[1]: - preds = self.inference(im, *args, **kwargs) + # Inference + with profilers[1]: + preds = self.inference(im, *args, **kwargs) + if self.args.embed: + yield from [preds] if isinstance(preds, torch.Tensor) else preds # yield embedding tensors + continue - # Postprocess - with profilers[2]: - self.results = self.postprocess(preds, im, im0s) - self.run_callbacks('on_predict_postprocess_end') + # Postprocess + with profilers[2]: + self.results = self.postprocess(preds, im, im0s) + self.run_callbacks("on_predict_postprocess_end") - # Visualize, save, write results - n = len(im0s) - for i in range(n): - self.seen += 1 - self.results[i].speed = { - 'preprocess': profilers[0].dt * 1E3 / n, - 'inference': profilers[1].dt * 1E3 / n, - 'postprocess': profilers[2].dt * 1E3 / n} - p, im0 = path[i], None if self.source_type.tensor else im0s[i].copy() - p = Path(p) + # Visualize, save, write results + n = len(im0s) + for i in range(n): + self.seen += 1 + self.results[i].speed = { + "preprocess": profilers[0].dt * 1e3 / n, + "inference": profilers[1].dt * 1e3 / n, + "postprocess": profilers[2].dt * 1e3 / n, + } + if self.args.verbose or self.args.save or self.args.save_txt or self.args.show: + s[i] += self.write_results(i, Path(paths[i]), im, s) - if self.args.verbose or self.args.save or self.args.save_txt or self.args.show: - s += self.write_results(i, self.results, (p, im, im0)) - if self.args.save or self.args.save_txt: - self.results[i].save_dir = self.save_dir.__str__() - if self.args.show and self.plotted_img is not None: - self.show(p) - if self.args.save and self.plotted_img is not None: - self.save_preds(vid_cap, i, str(self.save_dir / p.name)) + # Print batch results + if self.args.verbose: + LOGGER.info("\n".join(s)) - self.run_callbacks('on_predict_batch_end') - yield from self.results - - # Print time (inference-only) - if self.args.verbose: - LOGGER.info(f'{s}{profilers[1].dt * 1E3:.1f}ms') + self.run_callbacks("on_predict_batch_end") + yield from self.results # Release assets - if isinstance(self.vid_writer[-1], cv2.VideoWriter): - self.vid_writer[-1].release() # release final video writer + for v in self.vid_writer.values(): + if isinstance(v, cv2.VideoWriter): + v.release() - # Print results + # Print final results if self.args.verbose and self.seen: - t = tuple(x.t / self.seen * 1E3 for x in profilers) # speeds per image - LOGGER.info(f'Speed: %.1fms preprocess, %.1fms inference, %.1fms postprocess per image at shape ' - f'{(1, 3, *im.shape[2:])}' % t) + t = tuple(x.t / self.seen * 1e3 for x in profilers) # speeds per image + LOGGER.info( + f"Speed: %.1fms preprocess, %.1fms inference, %.1fms postprocess per image at shape " + f"{(min(self.args.batch, self.seen), 3, *im.shape[2:])}" % t + ) if self.args.save or self.args.save_txt or self.args.save_crop: - nl = len(list(self.save_dir.glob('labels/*.txt'))) # number of labels - s = f"\n{nl} label{'s' * (nl > 1)} saved to {self.save_dir / 'labels'}" if self.args.save_txt else '' + nl = len(list(self.save_dir.glob("labels/*.txt"))) # number of labels + s = f"\n{nl} label{'s' * (nl > 1)} saved to {self.save_dir / 'labels'}" if self.args.save_txt else "" LOGGER.info(f"Results saved to {colorstr('bold', self.save_dir)}{s}") - - self.run_callbacks('on_predict_end') + self.run_callbacks("on_predict_end") def setup_model(self, model, verbose=True): """Initialize YOLO model with given parameters and set it to evaluation mode.""" - self.model = AutoBackend(model or self.args.model, - device=select_device(self.args.device, verbose=verbose), - dnn=self.args.dnn, - data=self.args.data, - fp16=self.args.half, - fuse=True, - verbose=verbose) + self.model = AutoBackend( + weights=model or self.args.model, + device=select_device(self.args.device, verbose=verbose), + dnn=self.args.dnn, + data=self.args.data, + fp16=self.args.half, + batch=self.args.batch, + fuse=True, + verbose=verbose, + ) self.device = self.model.device # update device self.args.half = self.model.fp16 # update half self.model.eval() - def show(self, p): - """Display an image in a window using OpenCV imshow().""" - im0 = self.plotted_img - if platform.system() == 'Linux' and p not in self.windows: - self.windows.append(p) - cv2.namedWindow(str(p), cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO) # allow window resize (Linux) - cv2.resizeWindow(str(p), im0.shape[1], im0.shape[0]) - cv2.imshow(str(p), im0) - cv2.waitKey(500 if self.batch[3].startswith('image') else 1) # 1 millisecond + def write_results(self, i, p, im, s): + """Write inference results to a file or directory.""" + string = "" # print string + if len(im.shape) == 3: + im = im[None] # expand for batch dim + if self.source_type.stream or self.source_type.from_img or self.source_type.tensor: # batch_size >= 1 + string += f"{i}: " + frame = self.dataset.count + else: + match = re.search(r"frame (\d+)/", s[i]) + frame = int(match.group(1)) if match else None # 0 if frame undetermined - def save_preds(self, vid_cap, idx, save_path): + self.txt_path = self.save_dir / "labels" / (p.stem + ("" if self.dataset.mode == "image" else f"_{frame}")) + string += "%gx%g " % im.shape[2:] + result = self.results[i] + result.save_dir = self.save_dir.__str__() # used in other locations + string += result.verbose() + f"{result.speed['inference']:.1f}ms" + + # Add predictions to image + if self.args.save or self.args.show: + self.plotted_img = result.plot( + line_width=self.args.line_width, + boxes=self.args.show_boxes, + conf=self.args.show_conf, + labels=self.args.show_labels, + im_gpu=None if self.args.retina_masks else im[i], + ) + + # Save results + if self.args.save_txt: + result.save_txt(f"{self.txt_path}.txt", save_conf=self.args.save_conf) + if self.args.save_crop: + result.save_crop(save_dir=self.save_dir / "crops", file_name=self.txt_path.stem) + if self.args.show: + self.show(str(p)) + if self.args.save: + self.save_predicted_images(str(self.save_dir / (p.name or "tmp.jpg")), frame) + + return string + + def save_predicted_images(self, save_path="", frame=0): """Save video predictions as mp4 at specified path.""" - im0 = self.plotted_img - # Save imgs - if self.dataset.mode == 'image': - cv2.imwrite(save_path, im0) - else: # 'video' or 'stream' - if self.vid_path[idx] != save_path: # new video - self.vid_path[idx] = save_path - if isinstance(self.vid_writer[idx], cv2.VideoWriter): - self.vid_writer[idx].release() # release previous video writer - if vid_cap: # video - fps = int(vid_cap.get(cv2.CAP_PROP_FPS)) # integer required, floats produce error in MP4 codec - w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH)) - h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) - else: # stream - fps, w, h = 30, im0.shape[1], im0.shape[0] - suffix, fourcc = ('.mp4', 'avc1') if MACOS else ('.avi', 'WMV2') if WINDOWS else ('.avi', 'MJPG') - save_path = str(Path(save_path).with_suffix(suffix)) - self.vid_writer[idx] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*fourcc), fps, (w, h)) - self.vid_writer[idx].write(im0) + im = self.plotted_img + + # Save videos and streams + if self.dataset.mode in {"stream", "video"}: + fps = self.dataset.fps if self.dataset.mode == "video" else 30 + frames_path = f'{save_path.split(".", 1)[0]}_frames/' + if save_path not in self.vid_writer: # new video + if self.args.save_frames: + Path(frames_path).mkdir(parents=True, exist_ok=True) + suffix, fourcc = (".mp4", "avc1") if MACOS else (".avi", "WMV2") if WINDOWS else (".avi", "MJPG") + self.vid_writer[save_path] = cv2.VideoWriter( + filename=str(Path(save_path).with_suffix(suffix)), + fourcc=cv2.VideoWriter_fourcc(*fourcc), + fps=fps, # integer required, floats produce error in MP4 codec + frameSize=(im.shape[1], im.shape[0]), # (width, height) + ) + + # Save video + self.vid_writer[save_path].write(im) + if self.args.save_frames: + cv2.imwrite(f"{frames_path}{frame}.jpg", im) + + # Save images + else: + cv2.imwrite(save_path, im) + + def show(self, p=""): + """Display an image in a window using OpenCV imshow().""" + im = self.plotted_img + if platform.system() == "Linux" and p not in self.windows: + self.windows.append(p) + cv2.namedWindow(p, cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO) # allow window resize (Linux) + cv2.resizeWindow(p, im.shape[1], im.shape[0]) # (width, height) + cv2.imshow(p, im) + cv2.waitKey(300 if self.dataset.mode == "image" else 1) # 1 millisecond def run_callbacks(self, event: str): """Runs all registered callbacks for a specific event.""" @@ -352,7 +393,5 @@ class BasePredictor: callback(self) def add_callback(self, event: str, func): - """ - Add callback - """ + """Add callback.""" self.callbacks[event].append(func) diff --git a/ultralytics/engine/results.py b/ultralytics/engine/results.py index d6763ff..85849c3 100644 --- a/ultralytics/engine/results.py +++ b/ultralytics/engine/results.py @@ -1,6 +1,6 @@ # Ultralytics YOLO 🚀, AGPL-3.0 license """ -Ultralytics Results, Boxes and Masks classes for handling inference results +Ultralytics Results, Boxes and Masks classes for handling inference results. Usage: See https://docs.ultralytics.com/modes/predict/ """ @@ -13,17 +13,17 @@ import numpy as np import torch from ultralytics.data.augment import LetterBox -from ultralytics.utils import LOGGER, SimpleClass, deprecation_warn, ops +from ultralytics.utils import LOGGER, SimpleClass, ops from ultralytics.utils.plotting import Annotator, colors, save_one_box +from ultralytics.utils.torch_utils import smart_inference_mode class BaseTensor(SimpleClass): - """ - Base tensor class with additional methods for easy manipulation and device handling. - """ + """Base tensor class with additional methods for easy manipulation and device handling.""" def __init__(self, data, orig_shape) -> None: - """Initialize BaseTensor with data and original shape. + """ + Initialize BaseTensor with data and original shape. Args: data (torch.Tensor | np.ndarray): Predictions, such as bboxes, masks and keypoints. @@ -67,45 +67,63 @@ class Results(SimpleClass): """ A class for storing and manipulating inference results. - Args: - orig_img (numpy.ndarray): The original image as a numpy array. - path (str): The path to the image file. - names (dict): A dictionary of class names. - boxes (torch.tensor, optional): A 2D tensor of bounding box coordinates for each detection. - masks (torch.tensor, optional): A 3D tensor of detection masks, where each mask is a binary image. - probs (torch.tensor, optional): A 1D tensor of probabilities of each class for classification task. - keypoints (List[List[float]], optional): A list of detected keypoints for each object. - Attributes: - orig_img (numpy.ndarray): The original image as a numpy array. - orig_shape (tuple): The original image shape in (height, width) format. - boxes (Boxes, optional): A Boxes object containing the detection bounding boxes. - masks (Masks, optional): A Masks object containing the detection masks. - probs (Probs, optional): A Probs object containing probabilities of each class for classification task. - keypoints (Keypoints, optional): A Keypoints object containing detected keypoints for each object. - speed (dict): A dictionary of preprocess, inference, and postprocess speeds in milliseconds per image. - names (dict): A dictionary of class names. - path (str): The path to the image file. - _keys (tuple): A tuple of attribute names for non-empty attributes. + orig_img (numpy.ndarray): Original image as a numpy array. + orig_shape (tuple): Original image shape in (height, width) format. + boxes (Boxes, optional): Object containing detection bounding boxes. + masks (Masks, optional): Object containing detection masks. + probs (Probs, optional): Object containing class probabilities for classification tasks. + keypoints (Keypoints, optional): Object containing detected keypoints for each object. + speed (dict): Dictionary of preprocess, inference, and postprocess speeds (ms/image). + names (dict): Dictionary of class names. + path (str): Path to the image file. + + Methods: + update(boxes=None, masks=None, probs=None, obb=None): Updates object attributes with new detection results. + cpu(): Returns a copy of the Results object with all tensors on CPU memory. + numpy(): Returns a copy of the Results object with all tensors as numpy arrays. + cuda(): Returns a copy of the Results object with all tensors on GPU memory. + to(*args, **kwargs): Returns a copy of the Results object with tensors on a specified device and dtype. + new(): Returns a new Results object with the same image, path, and names. + plot(...): Plots detection results on an input image, returning an annotated image. + show(): Show annotated results to screen. + save(filename): Save annotated results to file. + verbose(): Returns a log string for each task, detailing detections and classifications. + save_txt(txt_file, save_conf=False): Saves detection results to a text file. + save_crop(save_dir, file_name=Path("im.jpg")): Saves cropped detection images. + tojson(normalize=False): Converts detection results to JSON format. """ - def __init__(self, orig_img, path, names, boxes=None, masks=None, probs=None, keypoints=None) -> None: - """Initialize the Results class.""" + def __init__(self, orig_img, path, names, boxes=None, masks=None, probs=None, keypoints=None, obb=None) -> None: + """ + Initialize the Results class. + + Args: + orig_img (numpy.ndarray): The original image as a numpy array. + path (str): The path to the image file. + names (dict): A dictionary of class names. + boxes (torch.tensor, optional): A 2D tensor of bounding box coordinates for each detection. + masks (torch.tensor, optional): A 3D tensor of detection masks, where each mask is a binary image. + probs (torch.tensor, optional): A 1D tensor of probabilities of each class for classification task. + keypoints (torch.tensor, optional): A 2D tensor of keypoint coordinates for each detection. + obb (torch.tensor, optional): A 2D tensor of oriented bounding box coordinates for each detection. + """ self.orig_img = orig_img self.orig_shape = orig_img.shape[:2] self.boxes = Boxes(boxes, self.orig_shape) if boxes is not None else None # native size boxes self.masks = Masks(masks, self.orig_shape) if masks is not None else None # native size or imgsz masks self.probs = Probs(probs) if probs is not None else None self.keypoints = Keypoints(keypoints, self.orig_shape) if keypoints is not None else None - self.speed = {'preprocess': None, 'inference': None, 'postprocess': None} # milliseconds per image + self.obb = OBB(obb, self.orig_shape) if obb is not None else None + self.speed = {"preprocess": None, "inference": None, "postprocess": None} # milliseconds per image self.names = names self.path = path self.save_dir = None - self._keys = 'boxes', 'masks', 'probs', 'keypoints' + self._keys = "boxes", "masks", "probs", "keypoints", "obb" def __getitem__(self, idx): """Return a Results object for the specified index.""" - return self._apply('__getitem__', idx) + return self._apply("__getitem__", idx) def __len__(self): """Return the number of detections in the Results object.""" @@ -114,17 +132,30 @@ class Results(SimpleClass): if v is not None: return len(v) - def update(self, boxes=None, masks=None, probs=None): + def update(self, boxes=None, masks=None, probs=None, obb=None): """Update the boxes, masks, and probs attributes of the Results object.""" if boxes is not None: - ops.clip_boxes(boxes, self.orig_shape) # clip boxes - self.boxes = Boxes(boxes, self.orig_shape) + self.boxes = Boxes(ops.clip_boxes(boxes, self.orig_shape), self.orig_shape) if masks is not None: self.masks = Masks(masks, self.orig_shape) if probs is not None: self.probs = probs + if obb is not None: + self.obb = OBB(obb, self.orig_shape) def _apply(self, fn, *args, **kwargs): + """ + Applies a function to all non-empty attributes and returns a new Results object with modified attributes. This + function is internally called by methods like .to(), .cuda(), .cpu(), etc. + + Args: + fn (str): The name of the function to apply. + *args: Variable length argument list to pass to the function. + **kwargs: Arbitrary keyword arguments to pass to the function. + + Returns: + Results: A new Results object with attributes modified by the applied function. + """ r = self.new() for k in self._keys: v = getattr(self, k) @@ -134,40 +165,42 @@ class Results(SimpleClass): def cpu(self): """Return a copy of the Results object with all tensors on CPU memory.""" - return self._apply('cpu') + return self._apply("cpu") def numpy(self): """Return a copy of the Results object with all tensors as numpy arrays.""" - return self._apply('numpy') + return self._apply("numpy") def cuda(self): """Return a copy of the Results object with all tensors on GPU memory.""" - return self._apply('cuda') + return self._apply("cuda") def to(self, *args, **kwargs): """Return a copy of the Results object with tensors on the specified device and dtype.""" - return self._apply('to', *args, **kwargs) + return self._apply("to", *args, **kwargs) def new(self): """Return a new Results object with the same image, path, and names.""" return Results(orig_img=self.orig_img, path=self.path, names=self.names) def plot( - self, - conf=True, - line_width=None, - font_size=None, - font='Arial.ttf', - pil=False, - img=None, - im_gpu=None, - kpt_radius=5, - kpt_line=True, - labels=True, - boxes=True, - masks=True, - probs=True, - **kwargs # deprecated args TODO: remove support in 8.2 + self, + conf=True, + line_width=None, + font_size=None, + font="Arial.ttf", + pil=False, + img=None, + im_gpu=None, + kpt_radius=5, + kpt_line=True, + labels=True, + boxes=True, + masks=True, + probs=True, + show=False, + save=False, + filename=None, ): """ Plots the detection results on an input RGB image. Accepts a numpy array (cv2) or a PIL Image. @@ -186,6 +219,9 @@ class Results(SimpleClass): boxes (bool): Whether to plot the bounding boxes. masks (bool): Whether to plot the masks. probs (bool): Whether to plot classification probability + show (bool): Whether to display the annotated image directly. + save (bool): Whether to save the annotated image to `filename`. + filename (str): Filename to save image to if save is True. Returns: (numpy.ndarray): A numpy array of the annotated image. @@ -207,19 +243,9 @@ class Results(SimpleClass): if img is None and isinstance(self.orig_img, torch.Tensor): img = (self.orig_img[0].detach().permute(1, 2, 0).contiguous() * 255).to(torch.uint8).cpu().numpy() - # Deprecation warn TODO: remove in 8.2 - if 'show_conf' in kwargs: - deprecation_warn('show_conf', 'conf') - conf = kwargs['show_conf'] - assert isinstance(conf, bool), '`show_conf` should be of boolean type, i.e, show_conf=True/False' - - if 'line_thickness' in kwargs: - deprecation_warn('line_thickness', 'line_width') - line_width = kwargs['line_thickness'] - assert isinstance(line_width, int), '`line_width` should be of int type, i.e, line_width=3' - names = self.names - pred_boxes, show_boxes = self.boxes, boxes + is_obb = self.obb is not None + pred_boxes, show_boxes = self.obb if is_obb else self.boxes, boxes pred_masks, show_masks = self.masks, masks pred_probs, show_probs = self.probs, probs annotator = Annotator( @@ -228,28 +254,35 @@ class Results(SimpleClass): font_size, font, pil or (pred_probs is not None and show_probs), # Classify tasks default to pil=True - example=names) + example=names, + ) # Plot Segment results if pred_masks and show_masks: if im_gpu is None: img = LetterBox(pred_masks.shape[1:])(image=annotator.result()) - im_gpu = torch.as_tensor(img, dtype=torch.float16, device=pred_masks.data.device).permute( - 2, 0, 1).flip(0).contiguous() / 255 + im_gpu = ( + torch.as_tensor(img, dtype=torch.float16, device=pred_masks.data.device) + .permute(2, 0, 1) + .flip(0) + .contiguous() + / 255 + ) idx = pred_boxes.cls if pred_boxes else range(len(pred_masks)) annotator.masks(pred_masks.data, colors=[colors(x, True) for x in idx], im_gpu=im_gpu) # Plot Detect results - if pred_boxes and show_boxes: + if pred_boxes is not None and show_boxes: for d in reversed(pred_boxes): c, conf, id = int(d.cls), float(d.conf) if conf else None, None if d.id is None else int(d.id.item()) - name = ('' if id is None else f'id:{id} ') + names[c] - label = (f'{name} {conf:.2f}' if conf else name) if labels else None - annotator.box_label(d.xyxy.squeeze(), label, color=colors(c, True)) + name = ("" if id is None else f"id:{id} ") + names[c] + label = (f"{name} {conf:.2f}" if conf else name) if labels else None + box = d.xyxyxyxy.reshape(-1, 4, 2).squeeze() if is_obb else d.xyxy.squeeze() + annotator.box_label(box, label, color=colors(c, True), rotated=is_obb) # Plot Classify results if pred_probs is not None and show_probs: - text = ',\n'.join(f'{names[j] if names else j} {pred_probs.data[j]:.2f}' for j in pred_probs.top5) + text = ",\n".join(f"{names[j] if names else j} {pred_probs.data[j]:.2f}" for j in pred_probs.top5) x = round(self.orig_shape[0] * 0.03) annotator.text([x, x], text, txt_color=(255, 255, 255)) # TODO: allow setting colors @@ -258,17 +291,34 @@ class Results(SimpleClass): for k in reversed(self.keypoints.data): annotator.kpts(k, self.orig_shape, radius=kpt_radius, kpt_line=kpt_line) + # Show results + if show: + annotator.show(self.path) + + # Save results + if save: + annotator.save(filename) + return annotator.result() + def show(self, *args, **kwargs): + """Show annotated results image.""" + self.plot(show=True, *args, **kwargs) + + def save(self, filename=None, *args, **kwargs): + """Save annotated results image.""" + if not filename: + filename = f"results_{Path(self.path).name}" + self.plot(save=True, filename=filename, *args, **kwargs) + return filename + def verbose(self): - """ - Return log string for each task. - """ - log_string = '' + """Return log string for each task.""" + log_string = "" probs = self.probs boxes = self.boxes if len(self) == 0: - return log_string if probs is not None else f'{log_string}(no detections), ' + return log_string if probs is not None else f"{log_string}(no detections), " if probs is not None: log_string += f"{', '.join(f'{self.names[j]} {probs.data[j]:.2f}' for j in probs.top5)}, " if boxes: @@ -285,34 +335,35 @@ class Results(SimpleClass): txt_file (str): txt file path. save_conf (bool): save confidence score or not. """ - boxes = self.boxes + is_obb = self.obb is not None + boxes = self.obb if is_obb else self.boxes masks = self.masks probs = self.probs kpts = self.keypoints texts = [] if probs is not None: # Classify - [texts.append(f'{probs.data[j]:.2f} {self.names[j]}') for j in probs.top5] + [texts.append(f"{probs.data[j]:.2f} {self.names[j]}") for j in probs.top5] elif boxes: # Detect/segment/pose for j, d in enumerate(boxes): c, conf, id = int(d.cls), float(d.conf), None if d.id is None else int(d.id.item()) - line = (c, *d.xywhn.view(-1)) + line = (c, *(d.xyxyxyxyn.view(-1) if is_obb else d.xywhn.view(-1))) if masks: seg = masks[j].xyn[0].copy().reshape(-1) # reversed mask.xyn, (n,2) to (n*2) line = (c, *seg) if kpts is not None: kpt = torch.cat((kpts[j].xyn, kpts[j].conf[..., None]), 2) if kpts[j].has_visible else kpts[j].xyn - line += (*kpt.reshape(-1).tolist(), ) - line += (conf, ) * save_conf + (() if id is None else (id, )) - texts.append(('%g ' * len(line)).rstrip() % line) + line += (*kpt.reshape(-1).tolist(),) + line += (conf,) * save_conf + (() if id is None else (id,)) + texts.append(("%g " * len(line)).rstrip() % line) if texts: Path(txt_file).parent.mkdir(parents=True, exist_ok=True) # make directory - with open(txt_file, 'a') as f: - f.writelines(text + '\n' for text in texts) + with open(txt_file, "a") as f: + f.writelines(text + "\n" for text in texts) - def save_crop(self, save_dir, file_name=Path('im.jpg')): + def save_crop(self, save_dir, file_name=Path("im.jpg")): """ Save cropped predictions to `save_dir/cls/file_name.jpg`. @@ -321,79 +372,105 @@ class Results(SimpleClass): file_name (str | pathlib.Path): File name. """ if self.probs is not None: - LOGGER.warning('WARNING ⚠️ Classify task do not support `save_crop`.') + LOGGER.warning("WARNING ⚠️ Classify task do not support `save_crop`.") + return + if self.obb is not None: + LOGGER.warning("WARNING ⚠️ OBB task do not support `save_crop`.") return for d in self.boxes: - save_one_box(d.xyxy, - self.orig_img.copy(), - file=Path(save_dir) / self.names[int(d.cls)] / f'{Path(file_name).stem}.jpg', - BGR=True) + save_one_box( + d.xyxy, + self.orig_img.copy(), + file=Path(save_dir) / self.names[int(d.cls)] / f"{Path(file_name)}.jpg", + BGR=True, + ) - def tojson(self, normalize=False): - """Convert the object to JSON format.""" + def summary(self, normalize=False, decimals=5): + """Convert the results to a summarized format.""" if self.probs is not None: - LOGGER.warning('Warning: Classify task do not support `tojson` yet.') + LOGGER.warning("Warning: Classify results do not support the `summary()` method yet.") return - import json - # Create list of detection dictionaries results = [] data = self.boxes.data.cpu().tolist() h, w = self.orig_shape if normalize else (1, 1) for i, row in enumerate(data): # xyxy, track_id if tracking, conf, class_id - box = {'x1': row[0] / w, 'y1': row[1] / h, 'x2': row[2] / w, 'y2': row[3] / h} - conf = row[-2] + box = { + "x1": round(row[0] / w, decimals), + "y1": round(row[1] / h, decimals), + "x2": round(row[2] / w, decimals), + "y2": round(row[3] / h, decimals), + } + conf = round(row[-2], decimals) class_id = int(row[-1]) - name = self.names[class_id] - result = {'name': name, 'class': class_id, 'confidence': conf, 'box': box} + result = {"name": self.names[class_id], "class": class_id, "confidence": conf, "box": box} if self.boxes.is_track: - result['track_id'] = int(row[-3]) # track ID + result["track_id"] = int(row[-3]) # track ID if self.masks: - x, y = self.masks.xy[i][:, 0], self.masks.xy[i][:, 1] # numpy array - result['segments'] = {'x': (x / w).tolist(), 'y': (y / h).tolist()} + result["segments"] = { + "x": (self.masks.xy[i][:, 0] / w).round(decimals).tolist(), + "y": (self.masks.xy[i][:, 1] / h).round(decimals).tolist(), + } if self.keypoints is not None: x, y, visible = self.keypoints[i].data[0].cpu().unbind(dim=1) # torch Tensor - result['keypoints'] = {'x': (x / w).tolist(), 'y': (y / h).tolist(), 'visible': visible.tolist()} + result["keypoints"] = { + "x": (x / w).numpy().round(decimals).tolist(), # decimals named argument required + "y": (y / h).numpy().round(decimals).tolist(), + "visible": visible.numpy().round(decimals).tolist(), + } results.append(result) - # Convert detections to JSON - return json.dumps(results, indent=2) + return results + + def tojson(self, normalize=False, decimals=5): + """Convert the results to JSON format.""" + import json + + return json.dumps(self.summary(normalize=normalize, decimals=decimals), indent=2) class Boxes(BaseTensor): """ - A class for storing and manipulating detection boxes. - - Args: - boxes (torch.Tensor | numpy.ndarray): A tensor or numpy array containing the detection boxes, - with shape (num_boxes, 6) or (num_boxes, 7). The last two columns contain confidence and class values. - If present, the third last column contains track IDs. - orig_shape (tuple): Original image size, in the format (height, width). + Manages detection boxes, providing easy access and manipulation of box coordinates, confidence scores, class + identifiers, and optional tracking IDs. Supports multiple formats for box coordinates, including both absolute and + normalized forms. Attributes: - xyxy (torch.Tensor | numpy.ndarray): The boxes in xyxy format. - conf (torch.Tensor | numpy.ndarray): The confidence values of the boxes. - cls (torch.Tensor | numpy.ndarray): The class values of the boxes. - id (torch.Tensor | numpy.ndarray): The track IDs of the boxes (if available). - xywh (torch.Tensor | numpy.ndarray): The boxes in xywh format. - xyxyn (torch.Tensor | numpy.ndarray): The boxes in xyxy format normalized by original image size. - xywhn (torch.Tensor | numpy.ndarray): The boxes in xywh format normalized by original image size. - data (torch.Tensor): The raw bboxes tensor (alias for `boxes`). + data (torch.Tensor): The raw tensor containing detection boxes and their associated data. + orig_shape (tuple): The original image size as a tuple (height, width), used for normalization. + is_track (bool): Indicates whether tracking IDs are included in the box data. + + Properties: + xyxy (torch.Tensor | numpy.ndarray): Boxes in [x1, y1, x2, y2] format. + conf (torch.Tensor | numpy.ndarray): Confidence scores for each box. + cls (torch.Tensor | numpy.ndarray): Class labels for each box. + id (torch.Tensor | numpy.ndarray, optional): Tracking IDs for each box, if available. + xywh (torch.Tensor | numpy.ndarray): Boxes in [x, y, width, height] format, calculated on demand. + xyxyn (torch.Tensor | numpy.ndarray): Normalized [x1, y1, x2, y2] boxes, relative to `orig_shape`. + xywhn (torch.Tensor | numpy.ndarray): Normalized [x, y, width, height] boxes, relative to `orig_shape`. Methods: - cpu(): Move the object to CPU memory. - numpy(): Convert the object to a numpy array. - cuda(): Move the object to CUDA memory. - to(*args, **kwargs): Move the object to the specified device. + cpu(): Moves the boxes to CPU memory. + numpy(): Converts the boxes to a numpy array format. + cuda(): Moves the boxes to CUDA (GPU) memory. + to(device, dtype=None): Moves the boxes to the specified device. """ def __init__(self, boxes, orig_shape) -> None: - """Initialize the Boxes class.""" + """ + Initialize the Boxes class. + + Args: + boxes (torch.Tensor | numpy.ndarray): A tensor or numpy array containing the detection boxes, with + shape (num_boxes, 6) or (num_boxes, 7). The last two columns contain confidence and class values. + If present, the third last column contains track IDs. + orig_shape (tuple): Original image size, in the format (height, width). + """ if boxes.ndim == 1: boxes = boxes[None, :] n = boxes.shape[-1] - assert n in (6, 7), f'expected `n` in [6, 7], but got {n}' # xyxy, track_id, conf, cls + assert n in (6, 7), f"expected 6 or 7 values but got {n}" # xyxy, track_id, conf, cls super().__init__(boxes, orig_shape) self.is_track = n == 7 self.orig_shape = orig_shape @@ -442,19 +519,12 @@ class Boxes(BaseTensor): xywh[..., [1, 3]] /= self.orig_shape[0] return xywh - @property - def boxes(self): - """Return the raw bboxes tensor (deprecated).""" - LOGGER.warning("WARNING ⚠️ 'Boxes.boxes' is deprecated. Use 'Boxes.data' instead.") - return self.data - class Masks(BaseTensor): """ A class for storing and manipulating detection masks. Attributes: - segments (list): Deprecated property for segments (normalized). xy (list): A list of segments in pixel coordinates. xyn (list): A list of normalized segments. @@ -471,22 +541,14 @@ class Masks(BaseTensor): masks = masks[None, :] super().__init__(masks, orig_shape) - @property - @lru_cache(maxsize=1) - def segments(self): - """Return segments (normalized). Deprecated; use xyn property instead.""" - LOGGER.warning( - "WARNING ⚠️ 'Masks.segments' is deprecated. Use 'Masks.xyn' for segments (normalized) and 'Masks.xy' for segments (pixels) instead." - ) - return self.xyn - @property @lru_cache(maxsize=1) def xyn(self): """Return normalized segments.""" return [ ops.scale_coords(self.data.shape[1:], x, self.orig_shape, normalize=True) - for x in ops.masks2segments(self.data)] + for x in ops.masks2segments(self.data) + ] @property @lru_cache(maxsize=1) @@ -494,13 +556,8 @@ class Masks(BaseTensor): """Return segments in pixel coordinates.""" return [ ops.scale_coords(self.data.shape[1:], x, self.orig_shape, normalize=False) - for x in ops.masks2segments(self.data)] - - @property - def masks(self): - """Return the raw masks tensor. Deprecated; use data attribute instead.""" - LOGGER.warning("WARNING ⚠️ 'Masks.masks' is deprecated. Use 'Masks.data' instead.") - return self.data + for x in ops.masks2segments(self.data) + ] class Keypoints(BaseTensor): @@ -519,10 +576,14 @@ class Keypoints(BaseTensor): to(device, dtype): Returns a copy of the keypoints tensor with the specified device and dtype. """ + @smart_inference_mode() # avoid keypoints < conf in-place error def __init__(self, keypoints, orig_shape) -> None: """Initializes the Keypoints object with detection keypoints and original image size.""" if keypoints.ndim == 2: keypoints = keypoints[None, :] + if keypoints.shape[2] == 3: # x, y, conf + mask = keypoints[..., 2] < 0.5 # points with conf < 0.5 (not visible) + keypoints[..., :2][mask] = 0 super().__init__(keypoints, orig_shape) self.has_visible = self.data.shape[-1] == 3 @@ -566,6 +627,7 @@ class Probs(BaseTensor): """ def __init__(self, probs, orig_shape=None) -> None: + """Initialize the Probs class with classification probabilities and optional original shape of the image.""" super().__init__(probs, orig_shape) @property @@ -591,3 +653,91 @@ class Probs(BaseTensor): def top5conf(self): """Return the confidences of top 5.""" return self.data[self.top5] + + +class OBB(BaseTensor): + """ + A class for storing and manipulating Oriented Bounding Boxes (OBB). + + Args: + boxes (torch.Tensor | numpy.ndarray): A tensor or numpy array containing the detection boxes, + with shape (num_boxes, 7) or (num_boxes, 8). The last two columns contain confidence and class values. + If present, the third last column contains track IDs, and the fifth column from the left contains rotation. + orig_shape (tuple): Original image size, in the format (height, width). + + Attributes: + xywhr (torch.Tensor | numpy.ndarray): The boxes in [x_center, y_center, width, height, rotation] format. + conf (torch.Tensor | numpy.ndarray): The confidence values of the boxes. + cls (torch.Tensor | numpy.ndarray): The class values of the boxes. + id (torch.Tensor | numpy.ndarray): The track IDs of the boxes (if available). + xyxyxyxyn (torch.Tensor | numpy.ndarray): The rotated boxes in xyxyxyxy format normalized by orig image size. + xyxyxyxy (torch.Tensor | numpy.ndarray): The rotated boxes in xyxyxyxy format. + xyxy (torch.Tensor | numpy.ndarray): The horizontal boxes in xyxyxyxy format. + data (torch.Tensor): The raw OBB tensor (alias for `boxes`). + + Methods: + cpu(): Move the object to CPU memory. + numpy(): Convert the object to a numpy array. + cuda(): Move the object to CUDA memory. + to(*args, **kwargs): Move the object to the specified device. + """ + + def __init__(self, boxes, orig_shape) -> None: + """Initialize the Boxes class.""" + if boxes.ndim == 1: + boxes = boxes[None, :] + n = boxes.shape[-1] + assert n in (7, 8), f"expected 7 or 8 values but got {n}" # xywh, rotation, track_id, conf, cls + super().__init__(boxes, orig_shape) + self.is_track = n == 8 + self.orig_shape = orig_shape + + @property + def xywhr(self): + """Return the rotated boxes in xywhr format.""" + return self.data[:, :5] + + @property + def conf(self): + """Return the confidence values of the boxes.""" + return self.data[:, -2] + + @property + def cls(self): + """Return the class values of the boxes.""" + return self.data[:, -1] + + @property + def id(self): + """Return the track IDs of the boxes (if available).""" + return self.data[:, -3] if self.is_track else None + + @property + @lru_cache(maxsize=2) + def xyxyxyxy(self): + """Return the boxes in xyxyxyxy format, (N, 4, 2).""" + return ops.xywhr2xyxyxyxy(self.xywhr) + + @property + @lru_cache(maxsize=2) + def xyxyxyxyn(self): + """Return the boxes in xyxyxyxy format, (N, 4, 2).""" + xyxyxyxyn = self.xyxyxyxy.clone() if isinstance(self.xyxyxyxy, torch.Tensor) else np.copy(self.xyxyxyxy) + xyxyxyxyn[..., 0] /= self.orig_shape[1] + xyxyxyxyn[..., 1] /= self.orig_shape[0] + return xyxyxyxyn + + @property + @lru_cache(maxsize=2) + def xyxy(self): + """ + Return the horizontal boxes in xyxy format, (N, 4). + + Accepts both torch and numpy boxes. + """ + x1 = self.xyxyxyxy[..., 0].min(1).values + x2 = self.xyxyxyxy[..., 0].max(1).values + y1 = self.xyxyxyxy[..., 1].min(1).values + y2 = self.xyxyxyxy[..., 1].max(1).values + xyxy = [x1, y1, x2, y2] + return np.stack(xyxy, axis=-1) if isinstance(self.data, np.ndarray) else torch.stack(xyxy, dim=-1) diff --git a/ultralytics/engine/trainer.py b/ultralytics/engine/trainer.py index 4ff4229..2e7a7db 100644 --- a/ultralytics/engine/trainer.py +++ b/ultralytics/engine/trainer.py @@ -1,6 +1,6 @@ # Ultralytics YOLO 🚀, AGPL-3.0 license """ -Train a model on a dataset +Train a model on a dataset. Usage: $ yolo mode=train model=yolov8n.pt data=coco128.yaml imgsz=640 epochs=100 batch=16 @@ -19,31 +19,45 @@ import numpy as np import torch from torch import distributed as dist from torch import nn, optim -from torch.cuda import amp -from torch.nn.parallel import DistributedDataParallel as DDP from ultralytics.cfg import get_cfg, get_save_dir from ultralytics.data.utils import check_cls_dataset, check_det_dataset from ultralytics.nn.tasks import attempt_load_one_weight, attempt_load_weights -from ultralytics.utils import (DEFAULT_CFG, LOGGER, RANK, TQDM, __version__, callbacks, clean_url, colorstr, emojis, - yaml_save) +from ultralytics.utils import ( + DEFAULT_CFG, + LOGGER, + RANK, + TQDM, + __version__, + callbacks, + clean_url, + colorstr, + emojis, + yaml_save, +) from ultralytics.utils.autobatch import check_train_batch_size -from ultralytics.utils.checks import check_amp, check_file, check_imgsz, print_args +from ultralytics.utils.checks import check_amp, check_file, check_imgsz, check_model_file_from_stem, print_args from ultralytics.utils.dist import ddp_cleanup, generate_ddp_command from ultralytics.utils.files import get_latest_run -from ultralytics.utils.torch_utils import (EarlyStopping, ModelEMA, de_parallel, init_seeds, one_cycle, select_device, - strip_optimizer) +from ultralytics.utils.torch_utils import ( + EarlyStopping, + ModelEMA, + de_parallel, + init_seeds, + one_cycle, + select_device, + strip_optimizer, +) class BaseTrainer: """ - BaseTrainer + BaseTrainer. A base class for creating trainers. Attributes: args (SimpleNamespace): Configuration for the trainer. - check_resume (method): Method to check if training should be resumed from a saved checkpoint. validator (BaseValidator): Validator instance. model (nn.Module): Model instance. callbacks (defaultdict): Dictionary of callbacks. @@ -62,6 +76,7 @@ class BaseTrainer: trainset (torch.utils.data.Dataset): Training dataset. testset (torch.utils.data.Dataset): Testing dataset. ema (nn.Module): EMA (Exponential Moving Average) of the model. + resume (bool): Resume training from a checkpoint. lf (nn.Module): Loss function. scheduler (torch.optim.lr_scheduler._LRScheduler): Learning rate scheduler. best_fitness (float): The best fitness value achieved. @@ -84,19 +99,19 @@ class BaseTrainer: self.check_resume(overrides) self.device = select_device(self.args.device, self.args.batch) self.validator = None - self.model = None self.metrics = None self.plots = {} init_seeds(self.args.seed + 1 + RANK, deterministic=self.args.deterministic) # Dirs self.save_dir = get_save_dir(self.args) - self.wdir = self.save_dir / 'weights' # weights dir + self.args.name = self.save_dir.name # update name for loggers + self.wdir = self.save_dir / "weights" # weights dir if RANK in (-1, 0): self.wdir.mkdir(parents=True, exist_ok=True) # make dir self.args.save_dir = str(self.save_dir) - yaml_save(self.save_dir / 'args.yaml', vars(self.args)) # save run args - self.last, self.best = self.wdir / 'last.pt', self.wdir / 'best.pt' # checkpoint paths + yaml_save(self.save_dir / "args.yaml", vars(self.args)) # save run args + self.last, self.best = self.wdir / "last.pt", self.wdir / "best.pt" # checkpoint paths self.save_period = self.args.save_period self.batch_size = self.args.batch @@ -106,18 +121,23 @@ class BaseTrainer: print_args(vars(self.args)) # Device - if self.device.type in ('cpu', 'mps'): + if self.device.type in ("cpu", "mps"): self.args.workers = 0 # faster CPU training as time dominated by inference, not dataloading # Model and Dataset - self.model = self.args.model + self.model = check_model_file_from_stem(self.args.model) # add suffix, i.e. yolov8n -> yolov8n.pt try: - if self.args.task == 'classify': + if self.args.task == "classify": self.data = check_cls_dataset(self.args.data) - elif self.args.data.split('.')[-1] in ('yaml', 'yml') or self.args.task in ('detect', 'segment', 'pose'): + elif self.args.data.split(".")[-1] in ("yaml", "yml") or self.args.task in ( + "detect", + "segment", + "pose", + "obb", + ): self.data = check_det_dataset(self.args.data) - if 'yaml_file' in self.data: - self.args.data = self.data['yaml_file'] # for validating 'yolo train data=url.zip' usage + if "yaml_file" in self.data: + self.args.data = self.data["yaml_file"] # for validating 'yolo train data=url.zip' usage except Exception as e: raise RuntimeError(emojis(f"Dataset '{clean_url(self.args.data)}' error ❌ {e}")) from e @@ -133,8 +153,8 @@ class BaseTrainer: self.fitness = None self.loss = None self.tloss = None - self.loss_names = ['Loss'] - self.csv = self.save_dir / 'results.csv' + self.loss_names = ["Loss"] + self.csv = self.save_dir / "results.csv" self.plot_idx = [0, 1, 2] # Callbacks @@ -143,15 +163,11 @@ class BaseTrainer: callbacks.add_integration_callbacks(self) def add_callback(self, event: str, callback): - """ - Appends the given callback. - """ + """Appends the given callback.""" self.callbacks[event].append(callback) def set_callback(self, event: str, callback): - """ - Overrides the existing callbacks with the given callback. - """ + """Overrides the existing callbacks with the given callback.""" self.callbacks[event] = [callback] def run_callbacks(self, event: str): @@ -162,7 +178,7 @@ class BaseTrainer: def train(self): """Allow device='', device=None on Multi-GPU systems to default to device=0.""" if isinstance(self.args.device, str) and len(self.args.device): # i.e. device='0' or device='0,1,2,3' - world_size = len(self.args.device.split(',')) + world_size = len(self.args.device.split(",")) elif isinstance(self.args.device, (tuple, list)): # i.e. device=[0, 1, 2, 3] (multi-GPU from CLI is list) world_size = len(self.args.device) elif torch.cuda.is_available(): # i.e. device=None or device='' or device=number @@ -171,14 +187,16 @@ class BaseTrainer: world_size = 0 # Run subprocess if DDP training, else train normally - if world_size > 1 and 'LOCAL_RANK' not in os.environ: + if world_size > 1 and "LOCAL_RANK" not in os.environ: # Argument checks if self.args.rect: LOGGER.warning("WARNING ⚠️ 'rect=True' is incompatible with Multi-GPU training, setting 'rect=False'") self.args.rect = False if self.args.batch == -1: - LOGGER.warning("WARNING ⚠️ 'batch=-1' for AutoBatch is incompatible with Multi-GPU training, setting " - "default 'batch=16'") + LOGGER.warning( + "WARNING ⚠️ 'batch=-1' for AutoBatch is incompatible with Multi-GPU training, setting " + "default 'batch=16'" + ) self.args.batch = 16 # Command @@ -194,42 +212,56 @@ class BaseTrainer: else: self._do_train(world_size) + def _setup_scheduler(self): + """Initialize training learning rate scheduler.""" + if self.args.cos_lr: + self.lf = one_cycle(1, self.args.lrf, self.epochs) # cosine 1->hyp['lrf'] + else: + self.lf = lambda x: max(1 - x / self.epochs, 0) * (1.0 - self.args.lrf) + self.args.lrf # linear + self.scheduler = optim.lr_scheduler.LambdaLR(self.optimizer, lr_lambda=self.lf) + def _setup_ddp(self, world_size): """Initializes and sets the DistributedDataParallel parameters for training.""" torch.cuda.set_device(RANK) - self.device = torch.device('cuda', RANK) + self.device = torch.device("cuda", RANK) # LOGGER.info(f'DDP info: RANK {RANK}, WORLD_SIZE {world_size}, DEVICE {self.device}') - os.environ['NCCL_BLOCKING_WAIT'] = '1' # set to enforce timeout + os.environ["NCCL_BLOCKING_WAIT"] = "1" # set to enforce timeout dist.init_process_group( - 'nccl' if dist.is_nccl_available() else 'gloo', + backend="nccl" if dist.is_nccl_available() else "gloo", timeout=timedelta(seconds=10800), # 3 hours rank=RANK, - world_size=world_size) + world_size=world_size, + ) def _setup_train(self, world_size): - """ - Builds dataloaders and optimizer on correct rank process. - """ + """Builds dataloaders and optimizer on correct rank process.""" # Model - self.run_callbacks('on_pretrain_routine_start') + self.run_callbacks("on_pretrain_routine_start") ckpt = self.setup_model() self.model = self.model.to(self.device) self.set_model_attributes() # Freeze layers - freeze_list = self.args.freeze if isinstance( - self.args.freeze, list) else range(self.args.freeze) if isinstance(self.args.freeze, int) else [] - always_freeze_names = ['.dfl'] # always freeze these layers - freeze_layer_names = [f'model.{x}.' for x in freeze_list] + always_freeze_names + freeze_list = ( + self.args.freeze + if isinstance(self.args.freeze, list) + else range(self.args.freeze) + if isinstance(self.args.freeze, int) + else [] + ) + always_freeze_names = [".dfl"] # always freeze these layers + freeze_layer_names = [f"model.{x}." for x in freeze_list] + always_freeze_names for k, v in self.model.named_parameters(): # v.register_hook(lambda x: torch.nan_to_num(x)) # NaN to 0 (commented for erratic training results) if any(x in k for x in freeze_layer_names): LOGGER.info(f"Freezing layer '{k}'") v.requires_grad = False - elif not v.requires_grad: - LOGGER.info(f"WARNING ⚠️ setting 'requires_grad=True' for frozen layer '{k}'. " - 'See ultralytics.engine.trainer for customization of frozen layers.') + elif not v.requires_grad and v.dtype.is_floating_point: # only floating point Tensor can require gradients + LOGGER.info( + f"WARNING ⚠️ setting 'requires_grad=True' for frozen layer '{k}'. " + "See ultralytics.engine.trainer for customization of frozen layers." + ) v.requires_grad = True # Check AMP @@ -241,13 +273,14 @@ class BaseTrainer: if RANK > -1 and world_size > 1: # DDP dist.broadcast(self.amp, src=0) # broadcast the tensor from rank 0 to all other ranks (returns None) self.amp = bool(self.amp) # as boolean - self.scaler = amp.GradScaler(enabled=self.amp) + self.scaler = torch.cuda.amp.GradScaler(enabled=self.amp) if world_size > 1: - self.model = DDP(self.model, device_ids=[RANK]) + self.model = nn.parallel.DistributedDataParallel(self.model, device_ids=[RANK]) # Check imgsz - gs = max(int(self.model.stride.max() if hasattr(self.model, 'stride') else 32), 32) # grid size (max stride) + gs = max(int(self.model.stride.max() if hasattr(self.model, "stride") else 32), 32) # grid size (max stride) self.args.imgsz = check_imgsz(self.args.imgsz, stride=gs, floor=gs, max_dim=1) + self.stride = gs # for multiscale training # Batch size if self.batch_size == -1 and RANK == -1: # single-GPU only, estimate best batch size @@ -255,11 +288,14 @@ class BaseTrainer: # Dataloaders batch_size = self.batch_size // max(world_size, 1) - self.train_loader = self.get_dataloader(self.trainset, batch_size=batch_size, rank=RANK, mode='train') + self.train_loader = self.get_dataloader(self.trainset, batch_size=batch_size, rank=RANK, mode="train") if RANK in (-1, 0): - self.test_loader = self.get_dataloader(self.testset, batch_size=batch_size * 2, rank=-1, mode='val') + # Note: When training DOTA dataset, double batch size could get OOM on images with >2000 objects. + self.test_loader = self.get_dataloader( + self.testset, batch_size=batch_size if self.args.task == "obb" else batch_size * 2, rank=-1, mode="val" + ) self.validator = self.get_validator() - metric_keys = self.validator.metrics.keys + self.label_loss_items(prefix='val') + metric_keys = self.validator.metrics.keys + self.label_loss_items(prefix="val") self.metrics = dict(zip(metric_keys, [0] * len(metric_keys))) self.ema = ModelEMA(self.model) if self.args.plots: @@ -269,22 +305,20 @@ class BaseTrainer: self.accumulate = max(round(self.args.nbs / self.batch_size), 1) # accumulate loss before optimizing weight_decay = self.args.weight_decay * self.batch_size * self.accumulate / self.args.nbs # scale weight_decay iterations = math.ceil(len(self.train_loader.dataset) / max(self.batch_size, self.args.nbs)) * self.epochs - self.optimizer = self.build_optimizer(model=self.model, - name=self.args.optimizer, - lr=self.args.lr0, - momentum=self.args.momentum, - decay=weight_decay, - iterations=iterations) + self.optimizer = self.build_optimizer( + model=self.model, + name=self.args.optimizer, + lr=self.args.lr0, + momentum=self.args.momentum, + decay=weight_decay, + iterations=iterations, + ) # Scheduler - if self.args.cos_lr: - self.lf = one_cycle(1, self.args.lrf, self.epochs) # cosine 1->hyp['lrf'] - else: - self.lf = lambda x: (1 - x / self.epochs) * (1.0 - self.args.lrf) + self.args.lrf # linear - self.scheduler = optim.lr_scheduler.LambdaLR(self.optimizer, lr_lambda=self.lf) + self._setup_scheduler() self.stopper, self.stop = EarlyStopping(patience=self.args.patience), False self.resume_training(ckpt) self.scheduler.last_epoch = self.start_epoch - 1 # do not move - self.run_callbacks('on_pretrain_routine_end') + self.run_callbacks("on_pretrain_routine_end") def _do_train(self, world_size=1): """Train completed, evaluate and plot if specified by arguments.""" @@ -292,35 +326,33 @@ class BaseTrainer: self._setup_ddp(world_size) self._setup_train(world_size) - self.epoch_time = None - self.epoch_time_start = time.time() - self.train_time_start = time.time() nb = len(self.train_loader) # number of batches nw = max(round(self.args.warmup_epochs * nb), 100) if self.args.warmup_epochs > 0 else -1 # warmup iterations last_opt_step = -1 - self.run_callbacks('on_train_start') - LOGGER.info(f'Image sizes {self.args.imgsz} train, {self.args.imgsz} val\n' - f'Using {self.train_loader.num_workers * (world_size or 1)} dataloader workers\n' - f"Logging results to {colorstr('bold', self.save_dir)}\n" - f'Starting training for {self.epochs} epochs...') + self.epoch_time = None + self.epoch_time_start = time.time() + self.train_time_start = time.time() + self.run_callbacks("on_train_start") + LOGGER.info( + f'Image sizes {self.args.imgsz} train, {self.args.imgsz} val\n' + f'Using {self.train_loader.num_workers * (world_size or 1)} dataloader workers\n' + f"Logging results to {colorstr('bold', self.save_dir)}\n" + f'Starting training for ' + (f"{self.args.time} hours..." if self.args.time else f"{self.epochs} epochs...") + ) if self.args.close_mosaic: base_idx = (self.epochs - self.args.close_mosaic) * nb self.plot_idx.extend([base_idx, base_idx + 1, base_idx + 2]) - epoch = self.epochs # predefine for resume fully trained model edge cases - for epoch in range(self.start_epoch, self.epochs): + epoch = self.start_epoch + while True: self.epoch = epoch - self.run_callbacks('on_train_epoch_start') + self.run_callbacks("on_train_epoch_start") self.model.train() if RANK != -1: self.train_loader.sampler.set_epoch(epoch) pbar = enumerate(self.train_loader) # Update dataloader attributes (optional) if epoch == (self.epochs - self.args.close_mosaic): - LOGGER.info('Closing dataloader mosaic') - if hasattr(self.train_loader.dataset, 'mosaic'): - self.train_loader.dataset.mosaic = False - if hasattr(self.train_loader.dataset, 'close_mosaic'): - self.train_loader.dataset.close_mosaic(hyp=self.args) + self._close_dataloader_mosaic() self.train_loader.reset() if RANK in (-1, 0): @@ -329,18 +361,19 @@ class BaseTrainer: self.tloss = None self.optimizer.zero_grad() for i, batch in pbar: - self.run_callbacks('on_train_batch_start') + self.run_callbacks("on_train_batch_start") # Warmup ni = i + nb * epoch if ni <= nw: xi = [0, nw] # x interp - self.accumulate = max(1, np.interp(ni, xi, [1, self.args.nbs / self.batch_size]).round()) + self.accumulate = max(1, int(np.interp(ni, xi, [1, self.args.nbs / self.batch_size]).round())) for j, x in enumerate(self.optimizer.param_groups): # Bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0 - x['lr'] = np.interp( - ni, xi, [self.args.warmup_bias_lr if j == 0 else 0.0, x['initial_lr'] * self.lf(epoch)]) - if 'momentum' in x: - x['momentum'] = np.interp(ni, xi, [self.args.warmup_momentum, self.args.momentum]) + x["lr"] = np.interp( + ni, xi, [self.args.warmup_bias_lr if j == 0 else 0.0, x["initial_lr"] * self.lf(epoch)] + ) + if "momentum" in x: + x["momentum"] = np.interp(ni, xi, [self.args.warmup_momentum, self.args.momentum]) # Forward with torch.cuda.amp.autocast(self.amp): @@ -348,8 +381,9 @@ class BaseTrainer: self.loss, self.loss_items = self.model(batch) if RANK != -1: self.loss *= world_size - self.tloss = (self.tloss * i + self.loss_items) / (i + 1) if self.tloss is not None \ - else self.loss_items + self.tloss = ( + (self.tloss * i + self.loss_items) / (i + 1) if self.tloss is not None else self.loss_items + ) # Backward self.scaler.scale(self.loss).backward() @@ -359,115 +393,137 @@ class BaseTrainer: self.optimizer_step() last_opt_step = ni + # Timed stopping + if self.args.time: + self.stop = (time.time() - self.train_time_start) > (self.args.time * 3600) + if RANK != -1: # if DDP training + broadcast_list = [self.stop if RANK == 0 else None] + dist.broadcast_object_list(broadcast_list, 0) # broadcast 'stop' to all ranks + self.stop = broadcast_list[0] + if self.stop: # training time exceeded + break + # Log - mem = f'{torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0:.3g}G' # (GB) - loss_len = self.tloss.shape[0] if len(self.tloss.size()) else 1 + mem = f"{torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0:.3g}G" # (GB) + loss_len = self.tloss.shape[0] if len(self.tloss.shape) else 1 losses = self.tloss if loss_len > 1 else torch.unsqueeze(self.tloss, 0) if RANK in (-1, 0): pbar.set_description( - ('%11s' * 2 + '%11.4g' * (2 + loss_len)) % - (f'{epoch + 1}/{self.epochs}', mem, *losses, batch['cls'].shape[0], batch['img'].shape[-1])) - self.run_callbacks('on_batch_end') + ("%11s" * 2 + "%11.4g" * (2 + loss_len)) + % (f"{epoch + 1}/{self.epochs}", mem, *losses, batch["cls"].shape[0], batch["img"].shape[-1]) + ) + self.run_callbacks("on_batch_end") if self.args.plots and ni in self.plot_idx: self.plot_training_samples(batch, ni) - self.run_callbacks('on_train_batch_end') - - self.lr = {f'lr/pg{ir}': x['lr'] for ir, x in enumerate(self.optimizer.param_groups)} # for loggers - - with warnings.catch_warnings(): - warnings.simplefilter('ignore') # suppress 'Detected lr_scheduler.step() before optimizer.step()' - self.scheduler.step() - self.run_callbacks('on_train_epoch_end') + self.run_callbacks("on_train_batch_end") + self.lr = {f"lr/pg{ir}": x["lr"] for ir, x in enumerate(self.optimizer.param_groups)} # for loggers + self.run_callbacks("on_train_epoch_end") if RANK in (-1, 0): + final_epoch = epoch + 1 == self.epochs + self.ema.update_attr(self.model, include=["yaml", "nc", "args", "names", "stride", "class_weights"]) # Validation - self.ema.update_attr(self.model, include=['yaml', 'nc', 'args', 'names', 'stride', 'class_weights']) - final_epoch = (epoch + 1 == self.epochs) or self.stopper.possible_stop - - if self.args.val or final_epoch: + if (self.args.val and (((epoch+1) % self.args.val_period == 0) or (self.epochs - epoch) <= 10)) \ + or final_epoch or self.stopper.possible_stop or self.stop: self.metrics, self.fitness = self.validate() self.save_metrics(metrics={**self.label_loss_items(self.tloss), **self.metrics, **self.lr}) - self.stop = self.stopper(epoch + 1, self.fitness) + self.stop |= self.stopper(epoch + 1, self.fitness) or final_epoch + if self.args.time: + self.stop |= (time.time() - self.train_time_start) > (self.args.time * 3600) # Save model - if self.args.save or (epoch + 1 == self.epochs): + if self.args.save or final_epoch: self.save_model() - self.run_callbacks('on_model_save') + self.run_callbacks("on_model_save") - tnow = time.time() - self.epoch_time = tnow - self.epoch_time_start - self.epoch_time_start = tnow - self.run_callbacks('on_fit_epoch_end') - torch.cuda.empty_cache() # clears GPU vRAM at end of epoch, can help with out of memory errors + # Scheduler + t = time.time() + self.epoch_time = t - self.epoch_time_start + self.epoch_time_start = t + with warnings.catch_warnings(): + warnings.simplefilter("ignore") # suppress 'Detected lr_scheduler.step() before optimizer.step()' + if self.args.time: + mean_epoch_time = (t - self.train_time_start) / (epoch - self.start_epoch + 1) + self.epochs = self.args.epochs = math.ceil(self.args.time * 3600 / mean_epoch_time) + self._setup_scheduler() + self.scheduler.last_epoch = self.epoch # do not move + self.stop |= epoch >= self.epochs # stop if exceeded epochs + self.scheduler.step() + self.run_callbacks("on_fit_epoch_end") + torch.cuda.empty_cache() # clear GPU memory at end of epoch, may help reduce CUDA out of memory errors # Early Stopping if RANK != -1: # if DDP training broadcast_list = [self.stop if RANK == 0 else None] dist.broadcast_object_list(broadcast_list, 0) # broadcast 'stop' to all ranks - if RANK != 0: - self.stop = broadcast_list[0] + self.stop = broadcast_list[0] if self.stop: break # must break all DDP ranks + epoch += 1 if RANK in (-1, 0): # Do final val with best.pt - LOGGER.info(f'\n{epoch - self.start_epoch + 1} epochs completed in ' - f'{(time.time() - self.train_time_start) / 3600:.3f} hours.') + LOGGER.info( + f"\n{epoch - self.start_epoch + 1} epochs completed in " + f"{(time.time() - self.train_time_start) / 3600:.3f} hours." + ) self.final_eval() if self.args.plots: self.plot_metrics() - self.run_callbacks('on_train_end') + self.run_callbacks("on_train_end") torch.cuda.empty_cache() - self.run_callbacks('teardown') + self.run_callbacks("teardown") def save_model(self): - """Save model checkpoints based on various conditions.""" + """Save model training checkpoints with additional metadata.""" + import pandas as pd # scope for faster startup + + metrics = {**self.metrics, **{"fitness": self.fitness}} + results = {k.strip(): v for k, v in pd.read_csv(self.csv).to_dict(orient="list").items()} ckpt = { - 'epoch': self.epoch, - 'best_fitness': self.best_fitness, - 'model': deepcopy(de_parallel(self.model)).half(), - 'ema': deepcopy(self.ema.ema).half(), - 'updates': self.ema.updates, - 'optimizer': self.optimizer.state_dict(), - 'train_args': vars(self.args), # save as dict - 'date': datetime.now().isoformat(), - 'version': __version__} + "epoch": self.epoch, + "best_fitness": self.best_fitness, + "model": deepcopy(de_parallel(self.model)).half(), + "ema": deepcopy(self.ema.ema).half(), + "updates": self.ema.updates, + "optimizer": self.optimizer.state_dict(), + "train_args": vars(self.args), # save as dict + "train_metrics": metrics, + "train_results": results, + "date": datetime.now().isoformat(), + "version": __version__, + "license": "AGPL-3.0 (https://ultralytics.com/license)", + "docs": "https://docs.ultralytics.com", + } - # Use dill (if exists) to serialize the lambda functions where pickle does not do this - try: - import dill as pickle - except ImportError: - import pickle - - # Save last, best and delete - torch.save(ckpt, self.last, pickle_module=pickle) + # Save last and best + torch.save(ckpt, self.last) if self.best_fitness == self.fitness: - torch.save(ckpt, self.best, pickle_module=pickle) - if (self.epoch > 0) and (self.save_period > 0) and (self.epoch % self.save_period == 0): - torch.save(ckpt, self.wdir / f'epoch{self.epoch}.pt', pickle_module=pickle) - del ckpt + torch.save(ckpt, self.best) + if (self.save_period > 0) and (self.epoch > 0) and (self.epoch % self.save_period == 0): + torch.save(ckpt, self.wdir / f"epoch{self.epoch}.pt") @staticmethod def get_dataset(data): """ - Get train, val path from data dict if it exists. Returns None if data format is not recognized. + Get train, val path from data dict if it exists. + + Returns None if data format is not recognized. """ - return data['train'], data.get('val') or data.get('test') + return data["train"], data.get("val") or data.get("test") def setup_model(self): - """ - load/create/download model for any task. - """ + """Load/create/download model for any task.""" if isinstance(self.model, torch.nn.Module): # if model is loaded beforehand. No setup needed return model, weights = self.model, None ckpt = None - if str(model).endswith('.pt'): + if str(model).endswith(".pt"): weights, ckpt = attempt_load_one_weight(model) - cfg = ckpt['model'].yaml + cfg = ckpt["model"].yaml else: cfg = model self.model = self.get_model(cfg=cfg, weights=weights, verbose=RANK == -1) # calls Model(cfg, weights) @@ -484,17 +540,17 @@ class BaseTrainer: self.ema.update(self.model) def preprocess_batch(self, batch): - """ - Allows custom preprocessing model inputs and ground truths depending on task type. - """ + """Allows custom preprocessing model inputs and ground truths depending on task type.""" return batch def validate(self): """ - Runs validation on test set using self.validator. The returned dict is expected to contain "fitness" key. + Runs validation on test set using self.validator. + + The returned dict is expected to contain "fitness" key. """ metrics = self.validator(self) - fitness = metrics.pop('fitness', -self.loss.detach().cpu().numpy()) # use loss as fitness measure if not found + fitness = metrics.pop("fitness", -self.loss.detach().cpu().numpy()) # use loss as fitness measure if not found if not self.best_fitness or self.best_fitness < fitness: self.best_fitness = fitness return metrics, fitness @@ -505,30 +561,28 @@ class BaseTrainer: def get_validator(self): """Returns a NotImplementedError when the get_validator function is called.""" - raise NotImplementedError('get_validator function not implemented in trainer') + raise NotImplementedError("get_validator function not implemented in trainer") - def get_dataloader(self, dataset_path, batch_size=16, rank=0, mode='train'): - """ - Returns dataloader derived from torch.data.Dataloader. - """ - raise NotImplementedError('get_dataloader function not implemented in trainer') + def get_dataloader(self, dataset_path, batch_size=16, rank=0, mode="train"): + """Returns dataloader derived from torch.data.Dataloader.""" + raise NotImplementedError("get_dataloader function not implemented in trainer") - def build_dataset(self, img_path, mode='train', batch=None): - """Build dataset""" - raise NotImplementedError('build_dataset function not implemented in trainer') + def build_dataset(self, img_path, mode="train", batch=None): + """Build dataset.""" + raise NotImplementedError("build_dataset function not implemented in trainer") - def label_loss_items(self, loss_items=None, prefix='train'): + def label_loss_items(self, loss_items=None, prefix="train"): """ - Returns a loss dict with labelled training loss items tensor + Returns a loss dict with labelled training loss items tensor. + + Note: + This is not needed for classification but necessary for segmentation & detection """ - # Not needed for classification but necessary for segmentation & detection - return {'loss': loss_items} if loss_items is not None else ['loss'] + return {"loss": loss_items} if loss_items is not None else ["loss"] def set_model_attributes(self): - """ - To set or update model parameters before training. - """ - self.model.names = self.data['names'] + """To set or update model parameters before training.""" + self.model.names = self.data["names"] def build_targets(self, preds, targets): """Builds target tensors for training YOLO model.""" @@ -536,11 +590,11 @@ class BaseTrainer: def progress_string(self): """Returns a string describing training progress.""" - return '' + return "" # TODO: may need to put these following functions into callback def plot_training_samples(self, batch, ni): - """Plots training samples during YOLOv5 training.""" + """Plots training samples during YOLO training.""" pass def plot_training_labels(self): @@ -551,9 +605,9 @@ class BaseTrainer: """Saves training metrics to a CSV file.""" keys, vals = list(metrics.keys()), list(metrics.values()) n = len(metrics) + 1 # number of cols - s = '' if self.csv.exists() else (('%23s,' * n % tuple(['epoch'] + keys)).rstrip(',') + '\n') # header - with open(self.csv, 'a') as f: - f.write(s + ('%23.5g,' * n % tuple([self.epoch + 1] + vals)).rstrip(',') + '\n') + s = "" if self.csv.exists() else (("%23s," * n % tuple(["epoch"] + keys)).rstrip(",") + "\n") # header + with open(self.csv, "a") as f: + f.write(s + ("%23.5g," * n % tuple([self.epoch + 1] + vals)).rstrip(",") + "\n") def plot_metrics(self): """Plot and display metrics visually.""" @@ -562,7 +616,7 @@ class BaseTrainer: def on_plot(self, name, data=None): """Registers plots (e.g. to be consumed in callbacks)""" path = Path(name) - self.plots[path] = {'data': data, 'timestamp': time.time()} + self.plots[path] = {"data": data, "timestamp": time.time()} def final_eval(self): """Performs final evaluation and validation for object detection YOLO model.""" @@ -570,11 +624,11 @@ class BaseTrainer: if f.exists(): strip_optimizer(f) # strip optimizers if f is self.best: - LOGGER.info(f'\nValidating {f}...') + LOGGER.info(f"\nValidating {f}...") self.validator.args.plots = self.args.plots self.metrics = self.validator(model=f) - self.metrics.pop('fitness', None) - self.run_callbacks('on_fit_epoch_end') + self.metrics.pop("fitness", None) + self.run_callbacks("on_fit_epoch_end") def check_resume(self, overrides): """Check if resume checkpoint exists and update arguments accordingly.""" @@ -586,56 +640,62 @@ class BaseTrainer: # Check that resume data YAML exists, otherwise strip to force re-download of dataset ckpt_args = attempt_load_weights(last).args - if not Path(ckpt_args['data']).exists(): - ckpt_args['data'] = self.args.data + if not Path(ckpt_args["data"]).exists(): + ckpt_args["data"] = self.args.data resume = True self.args = get_cfg(ckpt_args) - self.args.model = str(last) # reinstate model - for k in 'imgsz', 'batch': # allow arg updates to reduce memory on resume if crashed due to CUDA OOM + self.args.model = self.args.resume = str(last) # reinstate model + for k in "imgsz", "batch", "device": # allow arg updates to reduce memory or update device on resume if k in overrides: setattr(self.args, k, overrides[k]) except Exception as e: - raise FileNotFoundError('Resume checkpoint not found. Please pass a valid checkpoint to resume from, ' - "i.e. 'yolo train resume model=path/to/last.pt'") from e + raise FileNotFoundError( + "Resume checkpoint not found. Please pass a valid checkpoint to resume from, " + "i.e. 'yolo train resume model=path/to/last.pt'" + ) from e self.resume = resume def resume_training(self, ckpt): """Resume YOLO training from given epoch and best fitness.""" - if ckpt is None: + if ckpt is None or not self.resume: return best_fitness = 0.0 - start_epoch = ckpt['epoch'] + 1 - if ckpt['optimizer'] is not None: - self.optimizer.load_state_dict(ckpt['optimizer']) # optimizer - best_fitness = ckpt['best_fitness'] - if self.ema and ckpt.get('ema'): - self.ema.ema.load_state_dict(ckpt['ema'].float().state_dict()) # EMA - self.ema.updates = ckpt['updates'] - if self.resume: - assert start_epoch > 0, \ - f'{self.args.model} training to {self.epochs} epochs is finished, nothing to resume.\n' \ - f"Start a new training without resuming, i.e. 'yolo train model={self.args.model}'" - LOGGER.info( - f'Resuming training from {self.args.model} from epoch {start_epoch + 1} to {self.epochs} total epochs') + start_epoch = ckpt["epoch"] + 1 + if ckpt["optimizer"] is not None: + self.optimizer.load_state_dict(ckpt["optimizer"]) # optimizer + best_fitness = ckpt["best_fitness"] + if self.ema and ckpt.get("ema"): + self.ema.ema.load_state_dict(ckpt["ema"].float().state_dict()) # EMA + self.ema.updates = ckpt["updates"] + assert start_epoch > 0, ( + f"{self.args.model} training to {self.epochs} epochs is finished, nothing to resume.\n" + f"Start a new training without resuming, i.e. 'yolo train model={self.args.model}'" + ) + LOGGER.info(f"Resuming training {self.args.model} from epoch {start_epoch + 1} to {self.epochs} total epochs") if self.epochs < start_epoch: LOGGER.info( - f"{self.model} has been trained for {ckpt['epoch']} epochs. Fine-tuning for {self.epochs} more epochs.") - self.epochs += ckpt['epoch'] # finetune additional epochs + f"{self.model} has been trained for {ckpt['epoch']} epochs. Fine-tuning for {self.epochs} more epochs." + ) + self.epochs += ckpt["epoch"] # finetune additional epochs self.best_fitness = best_fitness self.start_epoch = start_epoch if start_epoch > (self.epochs - self.args.close_mosaic): - LOGGER.info('Closing dataloader mosaic') - if hasattr(self.train_loader.dataset, 'mosaic'): - self.train_loader.dataset.mosaic = False - if hasattr(self.train_loader.dataset, 'close_mosaic'): - self.train_loader.dataset.close_mosaic(hyp=self.args) + self._close_dataloader_mosaic() - def build_optimizer(self, model, name='auto', lr=0.001, momentum=0.9, decay=1e-5, iterations=1e5): + def _close_dataloader_mosaic(self): + """Update dataloaders to stop using mosaic augmentation.""" + if hasattr(self.train_loader.dataset, "mosaic"): + self.train_loader.dataset.mosaic = False + if hasattr(self.train_loader.dataset, "close_mosaic"): + LOGGER.info("Closing dataloader mosaic") + self.train_loader.dataset.close_mosaic(hyp=self.args) + + def build_optimizer(self, model, name="auto", lr=0.001, momentum=0.9, decay=1e-5, iterations=1e5): """ - Constructs an optimizer for the given model, based on the specified optimizer name, learning rate, - momentum, weight decay, and number of iterations. + Constructs an optimizer for the given model, based on the specified optimizer name, learning rate, momentum, + weight decay, and number of iterations. Args: model (torch.nn.Module): The model for which to build an optimizer. @@ -652,38 +712,45 @@ class BaseTrainer: """ g = [], [], [] # optimizer parameter groups - bn = tuple(v for k, v in nn.__dict__.items() if 'Norm' in k) # normalization layers, i.e. BatchNorm2d() - if name == 'auto': - nc = getattr(model, 'nc', 10) # number of classes + bn = tuple(v for k, v in nn.__dict__.items() if "Norm" in k) # normalization layers, i.e. BatchNorm2d() + if name == "auto": + LOGGER.info( + f"{colorstr('optimizer:')} 'optimizer=auto' found, " + f"ignoring 'lr0={self.args.lr0}' and 'momentum={self.args.momentum}' and " + f"determining best 'optimizer', 'lr0' and 'momentum' automatically... " + ) + nc = getattr(model, "nc", 10) # number of classes lr_fit = round(0.002 * 5 / (4 + nc), 6) # lr0 fit equation to 6 decimal places - name, lr, momentum = ('SGD', 0.01, 0.9) if iterations > 10000 else ('AdamW', lr_fit, 0.9) + name, lr, momentum = ("SGD", 0.01, 0.9) if iterations > 10000 else ("AdamW", lr_fit, 0.9) self.args.warmup_bias_lr = 0.0 # no higher than 0.01 for Adam for module_name, module in model.named_modules(): for param_name, param in module.named_parameters(recurse=False): - fullname = f'{module_name}.{param_name}' if module_name else param_name - if 'bias' in fullname: # bias (no decay) + fullname = f"{module_name}.{param_name}" if module_name else param_name + if "bias" in fullname: # bias (no decay) g[2].append(param) elif isinstance(module, bn): # weight (no decay) g[1].append(param) else: # weight (with decay) g[0].append(param) - if name in ('Adam', 'Adamax', 'AdamW', 'NAdam', 'RAdam'): + if name in ("Adam", "Adamax", "AdamW", "NAdam", "RAdam"): optimizer = getattr(optim, name, optim.Adam)(g[2], lr=lr, betas=(momentum, 0.999), weight_decay=0.0) - elif name == 'RMSProp': + elif name == "RMSProp": optimizer = optim.RMSprop(g[2], lr=lr, momentum=momentum) - elif name == 'SGD': + elif name == "SGD": optimizer = optim.SGD(g[2], lr=lr, momentum=momentum, nesterov=True) else: raise NotImplementedError( f"Optimizer '{name}' not found in list of available optimizers " - f'[Adam, AdamW, NAdam, RAdam, RMSProp, SGD, auto].' - 'To request support for addition optimizers please visit https://github.com/ultralytics/ultralytics.') + f"[Adam, AdamW, NAdam, RAdam, RMSProp, SGD, auto]." + "To request support for addition optimizers please visit https://github.com/ultralytics/ultralytics." + ) - optimizer.add_param_group({'params': g[0], 'weight_decay': decay}) # add g0 with weight_decay - optimizer.add_param_group({'params': g[1], 'weight_decay': 0.0}) # add g1 (BatchNorm2d weights) + optimizer.add_param_group({"params": g[0], "weight_decay": decay}) # add g0 with weight_decay + optimizer.add_param_group({"params": g[1], "weight_decay": 0.0}) # add g1 (BatchNorm2d weights) LOGGER.info( f"{colorstr('optimizer:')} {type(optimizer).__name__}(lr={lr}, momentum={momentum}) with parameter groups " - f'{len(g[1])} weight(decay=0.0), {len(g[0])} weight(decay={decay}), {len(g[2])} bias(decay=0.0)') + f'{len(g[1])} weight(decay=0.0), {len(g[0])} weight(decay={decay}), {len(g[2])} bias(decay=0.0)' + ) return optimizer diff --git a/ultralytics/engine/tuner.py b/ultralytics/engine/tuner.py index 0702690..f4fe57e 100644 --- a/ultralytics/engine/tuner.py +++ b/ultralytics/engine/tuner.py @@ -13,48 +13,59 @@ Example: from ultralytics import YOLO model = YOLO('yolov8n.pt') - model.tune(data='coco8.yaml', imgsz=640, epochs=100, iterations=10) + model.tune(data='coco8.yaml', epochs=10, iterations=300, optimizer='AdamW', plots=False, save=False, val=False) ``` """ + import random +import shutil +import subprocess import time -from copy import deepcopy import numpy as np +import torch -from ultralytics import YOLO from ultralytics.cfg import get_cfg, get_save_dir -from ultralytics.utils import DEFAULT_CFG, LOGGER, callbacks, colorstr, yaml_print, yaml_save +from ultralytics.utils import DEFAULT_CFG, LOGGER, callbacks, colorstr, remove_colorstr, yaml_print, yaml_save +from ultralytics.utils.plotting import plot_tune_results class Tuner: """ - Class responsible for hyperparameter tuning of YOLO models. + Class responsible for hyperparameter tuning of YOLO models. - The class evolves YOLO model hyperparameters over a given number of iterations - by mutating them according to the search space and retraining the model to evaluate their performance. + The class evolves YOLO model hyperparameters over a given number of iterations + by mutating them according to the search space and retraining the model to evaluate their performance. - Attributes: - space (dict): Hyperparameter search space containing bounds and scaling factors for mutation. - tune_dir (Path): Directory where evolution logs and results will be saved. - evolve_csv (Path): Path to the CSV file where evolution logs are saved. + Attributes: + space (dict): Hyperparameter search space containing bounds and scaling factors for mutation. + tune_dir (Path): Directory where evolution logs and results will be saved. + tune_csv (Path): Path to the CSV file where evolution logs are saved. - Methods: - _mutate(hyp: dict) -> dict: - Mutates the given hyperparameters within the bounds specified in `self.space`. + Methods: + _mutate(hyp: dict) -> dict: + Mutates the given hyperparameters within the bounds specified in `self.space`. - __call__(): - Executes the hyperparameter evolution across multiple iterations. + __call__(): + Executes the hyperparameter evolution across multiple iterations. - Example: - Tune hyperparameters for YOLOv8n on COCO8 at imgsz=640 and epochs=30 for 300 tuning iterations. - ```python - from ultralytics import YOLO + Example: + Tune hyperparameters for YOLOv8n on COCO8 at imgsz=640 and epochs=30 for 300 tuning iterations. + ```python + from ultralytics import YOLO - model = YOLO('yolov8n.pt') - model.tune(data='coco8.yaml', imgsz=640, epochs=100, iterations=10, val=False, cache=True) - ``` - """ + model = YOLO('yolov8n.pt') + model.tune(data='coco8.yaml', epochs=10, iterations=300, optimizer='AdamW', plots=False, save=False, val=False) + ``` + + Tune with custom search space. + ```python + from ultralytics import YOLO + + model = YOLO('yolov8n.pt') + model.tune(space={key1: val1, key2: val2}) # custom search space dictionary + ``` + """ def __init__(self, args=DEFAULT_CFG, _callbacks=None): """ @@ -63,37 +74,44 @@ class Tuner: Args: args (dict, optional): Configuration for hyperparameter evolution. """ - self.args = get_cfg(overrides=args) - self.space = { # key: (min, max, gain(optionaL)) + self.space = args.pop("space", None) or { # key: (min, max, gain(optional)) # 'optimizer': tune.choice(['SGD', 'Adam', 'AdamW', 'NAdam', 'RAdam', 'RMSProp']), - 'lr0': (1e-5, 1e-1), - 'lrf': (0.01, 1.0), # final OneCycleLR learning rate (lr0 * lrf) - 'momentum': (0.6, 0.98, 0.3), # SGD momentum/Adam beta1 - 'weight_decay': (0.0, 0.001), # optimizer weight decay 5e-4 - 'warmup_epochs': (0.0, 5.0), # warmup epochs (fractions ok) - 'warmup_momentum': (0.0, 0.95), # warmup initial momentum - 'box': (0.02, 0.2), # box loss gain - 'cls': (0.2, 4.0), # cls loss gain (scale with pixels) - 'hsv_h': (0.0, 0.1), # image HSV-Hue augmentation (fraction) - 'hsv_s': (0.0, 0.9), # image HSV-Saturation augmentation (fraction) - 'hsv_v': (0.0, 0.9), # image HSV-Value augmentation (fraction) - 'degrees': (0.0, 45.0), # image rotation (+/- deg) - 'translate': (0.0, 0.9), # image translation (+/- fraction) - 'scale': (0.0, 0.9), # image scale (+/- gain) - 'shear': (0.0, 10.0), # image shear (+/- deg) - 'perspective': (0.0, 0.001), # image perspective (+/- fraction), range 0-0.001 - 'flipud': (0.0, 1.0), # image flip up-down (probability) - 'fliplr': (0.0, 1.0), # image flip left-right (probability) - 'mosaic': (0.0, 1.0), # image mixup (probability) - 'mixup': (0.0, 1.0), # image mixup (probability) - 'copy_paste': (0.0, 1.0)} # segment copy-paste (probability) - self.tune_dir = get_save_dir(self.args, name='_tune') - self.evolve_csv = self.tune_dir / 'evolve.csv' + "lr0": (1e-5, 1e-1), # initial learning rate (i.e. SGD=1E-2, Adam=1E-3) + "lrf": (0.0001, 0.1), # final OneCycleLR learning rate (lr0 * lrf) + "momentum": (0.7, 0.98, 0.3), # SGD momentum/Adam beta1 + "weight_decay": (0.0, 0.001), # optimizer weight decay 5e-4 + "warmup_epochs": (0.0, 5.0), # warmup epochs (fractions ok) + "warmup_momentum": (0.0, 0.95), # warmup initial momentum + "box": (1.0, 20.0), # box loss gain + "cls": (0.2, 4.0), # cls loss gain (scale with pixels) + "dfl": (0.4, 6.0), # dfl loss gain + "hsv_h": (0.0, 0.1), # image HSV-Hue augmentation (fraction) + "hsv_s": (0.0, 0.9), # image HSV-Saturation augmentation (fraction) + "hsv_v": (0.0, 0.9), # image HSV-Value augmentation (fraction) + "degrees": (0.0, 45.0), # image rotation (+/- deg) + "translate": (0.0, 0.9), # image translation (+/- fraction) + "scale": (0.0, 0.95), # image scale (+/- gain) + "shear": (0.0, 10.0), # image shear (+/- deg) + "perspective": (0.0, 0.001), # image perspective (+/- fraction), range 0-0.001 + "flipud": (0.0, 1.0), # image flip up-down (probability) + "fliplr": (0.0, 1.0), # image flip left-right (probability) + "bgr": (0.0, 1.0), # image channel bgr (probability) + "mosaic": (0.0, 1.0), # image mixup (probability) + "mixup": (0.0, 1.0), # image mixup (probability) + "copy_paste": (0.0, 1.0), # segment copy-paste (probability) + } + self.args = get_cfg(overrides=args) + self.tune_dir = get_save_dir(self.args, name="tune") + self.tune_csv = self.tune_dir / "tune_results.csv" self.callbacks = _callbacks or callbacks.get_default_callbacks() + self.prefix = colorstr("Tuner: ") callbacks.add_integration_callbacks(self) - LOGGER.info(f"Initialized Tuner instance with 'tune_dir={self.tune_dir}'.") + LOGGER.info( + f"{self.prefix}Initialized Tuner instance with 'tune_dir={self.tune_dir}'\n" + f"{self.prefix}💡 Learn about tuning at https://docs.ultralytics.com/guides/hyperparameter-tuning" + ) - def _mutate(self, parent='single', n=5, mutation=0.8, sigma=0.2): + def _mutate(self, parent="single", n=5, mutation=0.8, sigma=0.2): """ Mutates the hyperparameters based on bounds and scaling factors specified in `self.space`. @@ -106,17 +124,17 @@ class Tuner: Returns: (dict): A dictionary containing mutated hyperparameters. """ - if self.evolve_csv.exists(): # if evolve.csv exists: select best hyps and mutate + if self.tune_csv.exists(): # if CSV file exists: select best hyps and mutate # Select parent(s) - x = np.loadtxt(self.evolve_csv, ndmin=2, delimiter=',', skiprows=1) + x = np.loadtxt(self.tune_csv, ndmin=2, delimiter=",", skiprows=1) fitness = x[:, 0] # first column n = min(n, len(x)) # number of previous results to consider x = x[np.argsort(-fitness)][:n] # top n mutations - w = x[:, 0] - x[:, 0].min() + 1E-6 # weights (sum > 0) - if parent == 'single' or len(x) == 1: + w = x[:, 0] - x[:, 0].min() + 1e-6 # weights (sum > 0) + if parent == "single" or len(x) == 1: # x = x[random.randint(0, n - 1)] # random selection x = x[random.choices(range(n), weights=w)[0]] # weighted selection - elif parent == 'weighted': + elif parent == "weighted": x = (x * w.reshape(n, 1)).sum(0) / w.sum() # weighted combination # Mutate @@ -139,7 +157,7 @@ class Tuner: return hyp - def __call__(self, model=None, iterations=10, prefix=colorstr('Tuner:')): + def __call__(self, model=None, iterations=10, cleanup=True): """ Executes the hyperparameter evolution process when the Tuner instance is called. @@ -152,54 +170,73 @@ class Tuner: Args: model (Model): A pre-initialized YOLO model to be used for training. iterations (int): The number of generations to run the evolution for. + cleanup (bool): Whether to delete iteration weights to reduce storage space used during tuning. Note: - The method utilizes the `self.evolve_csv` Path object to read and log hyperparameters and fitness scores. + The method utilizes the `self.tune_csv` Path object to read and log hyperparameters and fitness scores. Ensure this path is set correctly in the Tuner instance. """ t0 = time.time() best_save_dir, best_metrics = None, None - self.tune_dir.mkdir(parents=True, exist_ok=True) + (self.tune_dir / "weights").mkdir(parents=True, exist_ok=True) for i in range(iterations): # Mutate hyperparameters mutated_hyp = self._mutate() - LOGGER.info(f'{prefix} Starting iteration {i + 1}/{iterations} with hyperparameters: {mutated_hyp}') + LOGGER.info(f"{self.prefix}Starting iteration {i + 1}/{iterations} with hyperparameters: {mutated_hyp}") + metrics = {} + train_args = {**vars(self.args), **mutated_hyp} + save_dir = get_save_dir(get_cfg(train_args)) + weights_dir = save_dir / "weights" try: - # Train YOLO model with mutated hyperparameters - train_args = {**vars(self.args), **mutated_hyp} - results = (deepcopy(model) or YOLO(self.args.model)).train(**train_args) - fitness = results.fitness + # Train YOLO model with mutated hyperparameters (run in subprocess to avoid dataloader hang) + cmd = ["yolo", "train", *(f"{k}={v}" for k, v in train_args.items())] + return_code = subprocess.run(cmd, check=True).returncode + ckpt_file = weights_dir / ("best.pt" if (weights_dir / "best.pt").exists() else "last.pt") + metrics = torch.load(ckpt_file)["train_metrics"] + assert return_code == 0, "training failed" + except Exception as e: - LOGGER.warning(f'WARNING ❌️ training failure for hyperparameter tuning iteration {i}\n{e}') - fitness = 0.0 + LOGGER.warning(f"WARNING ❌️ training failure for hyperparameter tuning iteration {i + 1}\n{e}") - # Save results and mutated_hyp to evolve_csv + # Save results and mutated_hyp to CSV + fitness = metrics.get("fitness", 0.0) log_row = [round(fitness, 5)] + [mutated_hyp[k] for k in self.space.keys()] - headers = '' if self.evolve_csv.exists() else (','.join(['fitness_score'] + list(self.space.keys())) + '\n') - with open(self.evolve_csv, 'a') as f: - f.write(headers + ','.join(map(str, log_row)) + '\n') + headers = "" if self.tune_csv.exists() else (",".join(["fitness"] + list(self.space.keys())) + "\n") + with open(self.tune_csv, "a") as f: + f.write(headers + ",".join(map(str, log_row)) + "\n") - # Print tuning results - x = np.loadtxt(self.evolve_csv, ndmin=2, delimiter=',', skiprows=1) + # Get best results + x = np.loadtxt(self.tune_csv, ndmin=2, delimiter=",", skiprows=1) fitness = x[:, 0] # first column best_idx = fitness.argmax() best_is_current = best_idx == i if best_is_current: - best_save_dir = results.save_dir - best_metrics = {k: round(v, 5) for k, v in results.results_dict.items()} - header = (f'{prefix} {i + 1} iterations complete ✅ ({time.time() - t0:.2f}s)\n' - f'{prefix} Results saved to {colorstr("bold", self.tune_dir)}\n' - f'{prefix} Best fitness={fitness[best_idx]} observed at iteration {best_idx + 1}\n' - f'{prefix} Best fitness metrics are {best_metrics}\n' - f'{prefix} Best fitness model is {best_save_dir}\n' - f'{prefix} Best fitness hyperparameters are printed below.\n') + best_save_dir = save_dir + best_metrics = {k: round(v, 5) for k, v in metrics.items()} + for ckpt in weights_dir.glob("*.pt"): + shutil.copy2(ckpt, self.tune_dir / "weights") + elif cleanup: + shutil.rmtree(ckpt_file.parent) # remove iteration weights/ dir to reduce storage space - LOGGER.info('\n' + header) + # Plot tune results + plot_tune_results(self.tune_csv) - # Save turning results - data = {k: float(x[0, i + 1]) for i, k in enumerate(self.space.keys())} - header = header.replace(prefix, '#').replace('/', '').replace('', '') + '\n' - yaml_save(self.tune_dir / 'best.yaml', data=data, header=header) - yaml_print(self.tune_dir / 'best.yaml') + # Save and print tune results + header = ( + f'{self.prefix}{i + 1}/{iterations} iterations complete ✅ ({time.time() - t0:.2f}s)\n' + f'{self.prefix}Results saved to {colorstr("bold", self.tune_dir)}\n' + f'{self.prefix}Best fitness={fitness[best_idx]} observed at iteration {best_idx + 1}\n' + f'{self.prefix}Best fitness metrics are {best_metrics}\n' + f'{self.prefix}Best fitness model is {best_save_dir}\n' + f'{self.prefix}Best fitness hyperparameters are printed below.\n' + ) + LOGGER.info("\n" + header) + data = {k: float(x[best_idx, i + 1]) for i, k in enumerate(self.space.keys())} + yaml_save( + self.tune_dir / "best_hyperparameters.yaml", + data=data, + header=remove_colorstr(header.replace(self.prefix, "# ")) + "\n", + ) + yaml_print(self.tune_dir / "best_hyperparameters.yaml") diff --git a/ultralytics/engine/validator.py b/ultralytics/engine/validator.py index 9730c9b..e6d3f67 100644 --- a/ultralytics/engine/validator.py +++ b/ultralytics/engine/validator.py @@ -17,7 +17,9 @@ Usage - formats: yolov8n.tflite # TensorFlow Lite yolov8n_edgetpu.tflite # TensorFlow Edge TPU yolov8n_paddle_model # PaddlePaddle + yolov8n_ncnn_model # NCNN """ + import json import time from pathlib import Path @@ -36,7 +38,7 @@ from ultralytics.utils.torch_utils import de_parallel, select_device, smart_infe class BaseValidator: """ - BaseValidator + BaseValidator. A base class for creating validators. @@ -77,7 +79,7 @@ class BaseValidator: self.args = get_cfg(overrides=args) self.dataloader = dataloader self.pbar = pbar - self.model = None + self.stride = None self.data = None self.device = None self.batch_i = None @@ -89,20 +91,20 @@ class BaseValidator: self.nc = None self.iouv = None self.jdict = None - self.speed = {'preprocess': 0.0, 'inference': 0.0, 'loss': 0.0, 'postprocess': 0.0} + self.speed = {"preprocess": 0.0, "inference": 0.0, "loss": 0.0, "postprocess": 0.0} self.save_dir = save_dir or get_save_dir(self.args) - (self.save_dir / 'labels' if self.args.save_txt else self.save_dir).mkdir(parents=True, exist_ok=True) + (self.save_dir / "labels" if self.args.save_txt else self.save_dir).mkdir(parents=True, exist_ok=True) if self.args.conf is None: self.args.conf = 0.001 # default conf=0.001 + self.args.imgsz = check_imgsz(self.args.imgsz, max_dim=1) self.plots = {} self.callbacks = _callbacks or callbacks.get_default_callbacks() @smart_inference_mode() def __call__(self, trainer=None, model=None): - """ - Supports validation of a pre-trained model if passed or a model being trained if trainer is passed (trainer + """Supports validation of a pre-trained model if passed or a model being trained if trainer is passed (trainer gets priority). """ self.training = trainer is not None @@ -110,7 +112,7 @@ class BaseValidator: if self.training: self.device = trainer.device self.data = trainer.data - self.args.half = self.device.type != 'cpu' # force FP16 val during training + # self.args.half = self.device.type != "cpu" # force FP16 val during training model = trainer.ema.ema or trainer.model model = model.half() if self.args.half else model.float() # self.model = model @@ -119,12 +121,13 @@ class BaseValidator: model.eval() else: callbacks.add_integration_callbacks(self) - self.run_callbacks('on_val_start') - model = AutoBackend(model or self.args.model, - device=select_device(self.args.device, self.args.batch), - dnn=self.args.dnn, - data=self.args.data, - fp16=self.args.half) + model = AutoBackend( + weights=model or self.args.model, + device=select_device(self.args.device, self.args.batch), + dnn=self.args.dnn, + data=self.args.data, + fp16=self.args.half, + ) # self.model = model self.device = model.device # update device self.args.half = model.fp16 # update half @@ -134,30 +137,37 @@ class BaseValidator: self.args.batch = model.batch_size elif not pt and not jit: self.args.batch = 1 # export.py models default to batch-size 1 - LOGGER.info(f'Forcing batch=1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models') + LOGGER.info(f"Forcing batch=1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models") - if isinstance(self.args.data, str) and self.args.data.split('.')[-1] in ('yaml', 'yml'): + if str(self.args.data).split(".")[-1] in ("yaml", "yml"): self.data = check_det_dataset(self.args.data) - elif self.args.task == 'classify': + elif self.args.task == "classify": self.data = check_cls_dataset(self.args.data, split=self.args.split) else: raise FileNotFoundError(emojis(f"Dataset '{self.args.data}' for task={self.args.task} not found ❌")) - if self.device.type in ('cpu', 'mps'): + if self.device.type in ("cpu", "mps"): self.args.workers = 0 # faster CPU val as time dominated by inference, not dataloading if not pt: self.args.rect = False + self.stride = model.stride # used in get_dataloader() for padding self.dataloader = self.dataloader or self.get_dataloader(self.data.get(self.args.split), self.args.batch) model.eval() model.warmup(imgsz=(1 if pt else self.args.batch, 3, imgsz, imgsz)) # warmup - dt = Profile(), Profile(), Profile(), Profile() + self.run_callbacks("on_val_start") + dt = ( + Profile(device=self.device), + Profile(device=self.device), + Profile(device=self.device), + Profile(device=self.device), + ) bar = TQDM(self.dataloader, desc=self.get_desc(), total=len(self.dataloader)) self.init_metrics(de_parallel(model)) self.jdict = [] # empty before each val for batch_i, batch in enumerate(bar): - self.run_callbacks('on_val_batch_start') + self.run_callbacks("on_val_batch_start") self.batch_i = batch_i # Preprocess with dt[0]: @@ -165,7 +175,7 @@ class BaseValidator: # Inference with dt[1]: - preds = model(batch['img'], augment=augment) + preds = model(batch["img"], augment=augment) # Loss with dt[2]: @@ -181,23 +191,32 @@ class BaseValidator: self.plot_val_samples(batch, batch_i) self.plot_predictions(batch, preds, batch_i) - self.run_callbacks('on_val_batch_end') + self.run_callbacks("on_val_batch_end") stats = self.get_stats() self.check_stats(stats) - self.speed = dict(zip(self.speed.keys(), (x.t / len(self.dataloader.dataset) * 1E3 for x in dt))) + self.speed = dict(zip(self.speed.keys(), (x.t / len(self.dataloader.dataset) * 1e3 for x in dt))) self.finalize_metrics() - self.print_results() - self.run_callbacks('on_val_end') + if not (self.args.save_json and self.is_coco and len(self.jdict)): + self.print_results() + self.run_callbacks("on_val_end") if self.training: model.float() - results = {**stats, **trainer.label_loss_items(self.loss.cpu() / len(self.dataloader), prefix='val')} + if self.args.save_json and self.jdict: + with open(str(self.save_dir / "predictions.json"), "w") as f: + LOGGER.info(f"Saving {f.name}...") + json.dump(self.jdict, f) # flatten and save + stats = self.eval_json(stats) # update stats + stats['fitness'] = stats['metrics/mAP50-95(B)'] + results = {**stats, **trainer.label_loss_items(self.loss.cpu() / len(self.dataloader), prefix="val")} return {k: round(float(v), 5) for k, v in results.items()} # return results as 5 decimal place floats else: - LOGGER.info('Speed: %.1fms preprocess, %.1fms inference, %.1fms loss, %.1fms postprocess per image' % - tuple(self.speed.values())) + LOGGER.info( + "Speed: %.1fms preprocess, %.1fms inference, %.1fms loss, %.1fms postprocess per image" + % tuple(self.speed.values()) + ) if self.args.save_json and self.jdict: - with open(str(self.save_dir / 'predictions.json'), 'w') as f: - LOGGER.info(f'Saving {f.name}...') + with open(str(self.save_dir / "predictions.json"), "w") as f: + LOGGER.info(f"Saving {f.name}...") json.dump(self.jdict, f) # flatten and save stats = self.eval_json(stats) # update stats if self.args.plots or self.args.save_json: @@ -227,6 +246,7 @@ class BaseValidator: if use_scipy: # WARNING: known issue that reduces mAP in https://github.com/ultralytics/ultralytics/pull/4708 import scipy # scope import to avoid importing for all commands + cost_matrix = iou * (iou >= threshold) if cost_matrix.any(): labels_idx, detections_idx = scipy.optimize.linear_sum_assignment(cost_matrix, maximize=True) @@ -256,11 +276,11 @@ class BaseValidator: def get_dataloader(self, dataset_path, batch_size): """Get data loader from dataset path and batch size.""" - raise NotImplementedError('get_dataloader function not implemented for this validator') + raise NotImplementedError("get_dataloader function not implemented for this validator") def build_dataset(self, img_path): - """Build dataset""" - raise NotImplementedError('build_dataset function not implemented in validator') + """Build dataset.""" + raise NotImplementedError("build_dataset function not implemented in validator") def preprocess(self, batch): """Preprocesses an input batch.""" @@ -305,7 +325,7 @@ class BaseValidator: def on_plot(self, name, data=None): """Registers plots (e.g. to be consumed in callbacks)""" - self.plots[Path(name)] = {'data': data, 'timestamp': time.time()} + self.plots[Path(name)] = {"data": data, "timestamp": time.time()} # TODO: may need to put these following functions into callback def plot_val_samples(self, batch, ni): diff --git a/ultralytics/hub/__init__.py b/ultralytics/hub/__init__.py index daed439..4ea2fff 100644 --- a/ultralytics/hub/__init__.py +++ b/ultralytics/hub/__init__.py @@ -5,24 +5,51 @@ import requests from ultralytics.data.utils import HUBDatasetStats from ultralytics.hub.auth import Auth from ultralytics.hub.utils import HUB_API_ROOT, HUB_WEB_ROOT, PREFIX -from ultralytics.utils import LOGGER, SETTINGS +from ultralytics.utils import LOGGER, SETTINGS, checks -def login(api_key=''): +def login(api_key: str = None, save=True) -> bool: """ Log in to the Ultralytics HUB API using the provided API key. + The session is not stored; a new session is created when needed using the saved SETTINGS or the HUB_API_KEY + environment variable if successfully authenticated. + Args: - api_key (str, optional): May be an API key or a combination API key and model ID, i.e. key_id + api_key (str, optional): API key to use for authentication. + If not provided, it will be retrieved from SETTINGS or HUB_API_KEY environment variable. + save (bool, optional): Whether to save the API key to SETTINGS if authentication is successful. - Example: - ```python - from ultralytics import hub - - hub.login('API_KEY') - ``` + Returns: + (bool): True if authentication is successful, False otherwise. """ - Auth(api_key, verbose=True) + checks.check_requirements("hub-sdk>=0.0.6") + from hub_sdk import HUBClient + + api_key_url = f"{HUB_WEB_ROOT}/settings?tab=api+keys" # set the redirect URL + saved_key = SETTINGS.get("api_key") + active_key = api_key or saved_key + credentials = {"api_key": active_key} if active_key and active_key != "" else None # set credentials + + client = HUBClient(credentials) # initialize HUBClient + + if client.authenticated: + # Successfully authenticated with HUB + + if save and client.api_key != saved_key: + SETTINGS.update({"api_key": client.api_key}) # update settings with valid API key + + # Set message based on whether key was provided or retrieved from settings + log_message = ( + "New authentication successful ✅" if client.api_key == api_key or not credentials else "Authenticated ✅" + ) + LOGGER.info(f"{PREFIX}{log_message}") + + return True + else: + # Failed to authenticate with HUB + LOGGER.info(f"{PREFIX}Get API key from {api_key_url} and then run 'yolo hub login API_KEY'") + return False def logout(): @@ -36,52 +63,53 @@ def logout(): hub.logout() ``` """ - SETTINGS['api_key'] = '' + SETTINGS["api_key"] = "" SETTINGS.save() LOGGER.info(f"{PREFIX}logged out ✅. To log in again, use 'yolo hub login'.") -def reset_model(model_id=''): +def reset_model(model_id=""): """Reset a trained model to an untrained state.""" - r = requests.post(f'{HUB_API_ROOT}/model-reset', json={'apiKey': Auth().api_key, 'modelId': model_id}) + r = requests.post(f"{HUB_API_ROOT}/model-reset", json={"modelId": model_id}, headers={"x-api-key": Auth().api_key}) if r.status_code == 200: - LOGGER.info(f'{PREFIX}Model reset successfully') + LOGGER.info(f"{PREFIX}Model reset successfully") return - LOGGER.warning(f'{PREFIX}Model reset failure {r.status_code} {r.reason}') + LOGGER.warning(f"{PREFIX}Model reset failure {r.status_code} {r.reason}") def export_fmts_hub(): """Returns a list of HUB-supported export formats.""" from ultralytics.engine.exporter import export_formats - return list(export_formats()['Argument'][1:]) + ['ultralytics_tflite', 'ultralytics_coreml'] + + return list(export_formats()["Argument"][1:]) + ["ultralytics_tflite", "ultralytics_coreml"] -def export_model(model_id='', format='torchscript'): +def export_model(model_id="", format="torchscript"): """Export a model to all formats.""" assert format in export_fmts_hub(), f"Unsupported export format '{format}', valid formats are {export_fmts_hub()}" - r = requests.post(f'{HUB_API_ROOT}/v1/models/{model_id}/export', - json={'format': format}, - headers={'x-api-key': Auth().api_key}) - assert r.status_code == 200, f'{PREFIX}{format} export failure {r.status_code} {r.reason}' - LOGGER.info(f'{PREFIX}{format} export started ✅') + r = requests.post( + f"{HUB_API_ROOT}/v1/models/{model_id}/export", json={"format": format}, headers={"x-api-key": Auth().api_key} + ) + assert r.status_code == 200, f"{PREFIX}{format} export failure {r.status_code} {r.reason}" + LOGGER.info(f"{PREFIX}{format} export started ✅") -def get_export(model_id='', format='torchscript'): +def get_export(model_id="", format="torchscript"): """Get an exported model dictionary with download URL.""" assert format in export_fmts_hub(), f"Unsupported export format '{format}', valid formats are {export_fmts_hub()}" - r = requests.post(f'{HUB_API_ROOT}/get-export', - json={ - 'apiKey': Auth().api_key, - 'modelId': model_id, - 'format': format}) - assert r.status_code == 200, f'{PREFIX}{format} get_export failure {r.status_code} {r.reason}' + r = requests.post( + f"{HUB_API_ROOT}/get-export", + json={"apiKey": Auth().api_key, "modelId": model_id, "format": format}, + headers={"x-api-key": Auth().api_key}, + ) + assert r.status_code == 200, f"{PREFIX}{format} get_export failure {r.status_code} {r.reason}" return r.json() -def check_dataset(path='', task='detect'): +def check_dataset(path="", task="detect"): """ - Function for error-checking HUB dataset Zip file before upload. It checks a dataset for errors before it is - uploaded to the HUB. Usage examples are given below. + Function for error-checking HUB dataset Zip file before upload. It checks a dataset for errors before it is uploaded + to the HUB. Usage examples are given below. Args: path (str, optional): Path to data.zip (with data.yaml inside data.zip). Defaults to ''. @@ -97,4 +125,4 @@ def check_dataset(path='', task='detect'): ``` """ HUBDatasetStats(path=path, task=task).get_json() - LOGGER.info(f'Checks completed correctly ✅. Upload this dataset to {HUB_WEB_ROOT}/datasets/.') + LOGGER.info(f"Checks completed correctly ✅. Upload this dataset to {HUB_WEB_ROOT}/datasets/.") diff --git a/ultralytics/hub/__pycache__/__init__.cpython-312.pyc b/ultralytics/hub/__pycache__/__init__.cpython-312.pyc index 998422799e8aac402447aa292b39cf251ba7dc65..bef16036f08e36609d31997901b1f61d96060ea0 100644 GIT binary patch delta 3116 zcma)8U2GHC6`pI4$3Nq-^W#t`KNQG1vf z5|VY~NTt%UQbo`pZB{@R^$AL)r9$fR$V(-p@-Tr&&=J+rEq&;!59|i@VbzD8JH{lb zR@II)=l+~K=brQ3@0>eVcRbwN{AWpuaPT;pFGn8LbKKwAp!x*fbe2B4(%f4vM2Dh? z!*MJS2?t3m$K%k&z)&2AFLFaQxCTcqqM=$W!l(|{;wX&jp_QOb;5r;K8D zrjy8xzyGr%KUPe|Am(XPF%6jF4_jtF>35@`${aq~mpL^sFjyH~`J(nczbo_)^!6S- z<%Uil9USaS_nvlzY@TGt3^%BZtC=w}kLc)tTkC#Cv8po1*=%z=kqd z|ITIViWt9pMTr}GhvN)iNicjk1pb9*LS{pCqtP|h-g0ZMt@f{RrO-9*hVRbFw?Ixs zzW>KqEcpTA=ncM#1mdQoWK2O&~NLJ(`nOv#N zQlgjy%aeIh1RcT9w+yv7BAd*LG6CZ%TV$PjvsHSXIC?H3!$XP_D%FbxQZ(fWg>F@q zVU5UYPByG;7W~Lrnl>dXKu%bpX4!uBFHu_=C@KTzWK)MNiJSxTD;9zGZJv3y1$+8( zUMS;KpHRWgow91mld7i4!$bzwQk5`gb5t*^@_2=}t353%)bn(C7Z$Gk&akd)J7s;` z1P>G~*(IOJ6L1}3*F8Wy3|9F579NOrwf_wLWenOPUO1<`1T(rKsSUU4)QZ-1$`{`KT8g4c4*uewsZS8Fxl;tm!gfK+^o~8os(`!;tDoi+8 zqsLT+4=CehwFS^OC`(e8OIw?vO#?grXZz&<d>)I2+5&zM^l-xR%! zMKw3;Q}}K+JNlH*o>+DZg=n-{@>= zTjW!JQyZvXt~LS521_4C;if0NeCeeguuI(zIzPUvLsT7elQt;`OUE&Zg8|@z#vfJ?td1iy{0ho>+nyxhxaa=ia}4 zc@(5^7*o+6=8wF~;H0d2dM8L(MfDEwHb)y-ez8OCcJzS z4Y-soBa?;CS4LuQ3AX&ii$SCW5;Ei2r|-gtz3>`SAZEFjp_{-}Tbo6m9|}??%_%`$gDATj_RC(IzI6Af{{N ziV-=dsG3Eg?C!8Nv9ZidQD%njWMVZGw3W@V9Hc2GSYmp|dfQLiS^p3dyFeK1Cj(Nc z;&Akh!xeyzfa(le^ErrF?uA%;HS?sQX+dmqc>6!GZEerRAAHz0FW!9rX4kDVPH6A_ zvkxYm?zD3bFXl#{MOueK_8pL6`%ltQ6tTrvN5?^Mg7$!z7E@4&S}U%?ua^KEIYFlAHn!jp6DuIJ z_1F)U9Wa@Uc<6s`$eNjJX?-2!0gNN@xNH%X|sXA^} z=LAelk~NcSXp0?8bRypFNvtn>MDl7rqmNs=qJp+rrm7hgGI=G#yFvUwe3xezIAw*^ zs}EjoRq$pc@{sbJq?_4rxu}G?fohgU4$?tTKm6iA{W2qA_IRgNgM)6BF(7njD9>Tdl=>ys_YGRcT-up9vhoNjKoXUtLTzjvR!2^ z@u1m3^GFWRypl(9g64zO1#7M3mE5rUizx2P#vxuIcE7d2U*)J0t@T`zZC zWDV=Y(_>>J&l>j0k*TTi)YxPO?L7fj4yFZWr5VI~jQ+^B!boU?UEh5rVv3eHC$=L} zZ#hrwTx|F$u_(;Qf|wW1T^tt{9T;*63Pbw%Abh&NMtmmK>3nlzc}F!lW;Zycml+h21edJ#Fr%AUWGBGM-U2Ifdzq>4CVUq#=w( zUo8CH+nrSx{w**bdtggMF}I8p5#V)H$`F;~*#za>&^mrnUpf z+Tg%Mwv(JghsiakkKA>(`vF(>7Q12DL-ut<(ykyYairorR$KESyS_^SKyKA}l3*jG z!NnsRxP_3N6kTl=YurKZxCX;9JxAQugr3Gld7LIyXlsd>V>Ct_B30Jz+IrV2Ds3Ad~#>XFA|hgUczJj$<^GNmdm?uY6@m z#d>;nQPRonRNkYO&;62XbF!&j^ ztSmx08jgUr>=UX{g;`O_6|gMeL*#Q`%PNhHX^G<&Dh>jJ1$ss~#;sIP;!S8u7JQiQ zk5dr^p;1DiI;$DDcQdHM)d!|i)MpdK8uPQO_D1^q`t~hXdzmAvfoQ@+ZsA_Xy{6Cl z%fTnMBhlNl<#2MlC9yHQKKf~@lqxr$+Noh3kCRgE3)QsIAot~gg_&Fv z-I)jm1n^7$2Su>QohC*98%Cff!C;fo1F$*wFTf`u?g&inBeEFiYpp_54)#2Vc6>5= zH+3hq-Im;rCjPQnBAq)H&XFYLzhGa-8%Nkr7uH-1m#*SWR?A>zN#8rOR+~$_4mXqc zL;a|RY=y$BW;vM`(jqcy3pwaMxuULQw621#B=L`F~XrrKMv+VLk}g zQ+2=B*axA&3OV^Srby90NIMJSPYXh*Vr8IEUTg9syDC->5+1hlXcAQfD@vh@?OP8JU6q0gk_ohyVZp diff --git a/ultralytics/hub/__pycache__/__init__.cpython-39.pyc b/ultralytics/hub/__pycache__/__init__.cpython-39.pyc index 3fbf98712f52e63deeb786e8e87a5ead3d8807c8..3bb4effd0808cc8607dc9cdc64c05904f5e026af 100644 GIT binary patch literal 4983 zcmbVQTW=f372X?{%Zp@LksUiujA;@?W+G8e)23+}r*UlAb&|@4Eh8;qfLU=y(n`x+ zc4k(JSn8#AfTBQN`;r)t`{sYq$3FBIY@eDwm$PTioa;9X zr>3eFp6CAf)y6-cv8?~lVDj-Wcn?4IS2Wz>c5Joi?ZkH1X*o9a-PrAVEzk75xYYGq zez)8zcY{`7=1Os;TWwXlwN|ZLZ`Cn(R!oU$UjEYI0k1qLwNCIVuRX9@GyFL|#it+G zt&{u&pTW#5KgVbJGnjdXKhNj*DfH&}3;bDr2E9|C*xgvmFy7NqM=@xX zL8~lNQL2a)lBJB3jQk=vPA4oE_S2{H8|{U ziUezdxE*ew!X1o924bl++m6ip;qvdzej?e4WSVqEqS;O;qi`)2ELvwO>vSN-dKSlf zEQD`>AnMR|LqRXg4fVERK_?%YoLx{_F0izxA%7S*-X5Kfbdo8-)*;3OH&8XlJc!SY zb#vsAau!&m*=`iaY)vqDlZ*r%YF(z?BPW|U75lam9d1HeVHeiYG(IBc$D0C2go;vT zD9CJ&93%OUKe_-5@Ue=D=bVnc~!z>mazG~G)@_$WO2HI zFrX{~H=DoKn35Fs`K-wiZ7KecMN*K2?t6`t!miO(T*@Fs)iWiL2Qc3_`z0>^i!?HOJomIM&w{2ygp&^y6_Y1lMu*!P8S)*I zFCSURkj{Z?vZOoAR336sssC>iPLzn!OtW?V8ZyAspp4&zx-*~?Zx6SEqwCHC%rwrnI_{PH``2Tjz!&GDmA%H zkpL+RV9>;-IKhR+_)G9IeK`ef7-MH|q6fMSqy#!G{Pc&)yV58bKa(itmhmMyOFeQW zWAKUQ4CZpbkmncZDP~MRPe|@jq%pd(bkr&9TX4yp~Ibl71`X? z?tN>^L(lu%-M0t!R*9Fk9r>R256T(^i2FmsqR3#bV&(#(!&(LJDvA^eBMK*#p=y8O zwt($WCQx>w1Ul)aT*Q>V!h~hXXjY-(i)O#RWR@2teUz8Y;0AwE`E(Isvxqo(?Bw2- zN|U_2DMBu!`uZO@N&n1m4OjEoMCOb4R{C?3OY312XHu{yHfz-KfP!8qt#YrIqDGKw zOP3mxoN6RgrrI6wK4y))yc@~{-b!#HggihPfswWy@!Xcj4OKIewGG8}Y@iHqb6!O3 z&DfqT8<^|hH-5-)<4AiI18xJhSzUYIMA^W)59oFlXoQzObIw}#ZSM2(hV!fir3d|` zVXlYT<4Bo^Lqc9uS&!TrMi8GG|3;eX0%3)5UyyIG+corx~ThuSuSj*6c)# zwDo!%X>oL}15nV7fyK^EP&P$^%qyA&b0OtxV3vC%nWUo~SJsJ<6kV;_h`L*#QKSsZ zsXOu_-gyn^qqC_xGV19gNRF&GcJjYqFhGX7Z|xxCeE=N}Y`CdIGjLPyzWpa#+gpw% z1mzAe2XqIG4r~N~uRVkUa2ziq6r6#x36aYqy%<$yg-Q+uL#O9xznqL4IiX$M6!fY&rC--YhJFgc} zqn7&Rr9yYPU(m?sp#u2=H4IIEW^DOkAV?~|8ReVN3Y^}!2=dlSqh{1CU&Ok6iJGG! zB44IClb&9o9%ynkn)*Uzpl}8k1)&3g&% z9I+1-7{W?d@KfXu+A{jKKX(r75jsHh(Jjgy#)ZKL;lJ>yDWli2#3;`|a%75BE*f}7 zxBwZ90c6qrkhP+oK>|SFaCE`49%;DAZfI6q`H=ZWv>;%Hl^Rh4z5^f^-w19tMm-{! zfjye+EtKXuKxMYG^ck~I4e`9k3kG~5fjG4PAR zZEL*VXtJwfJw)nM^w~TR3;_>%aO~};H@G@f2dy^QO>@C11fDPKgOo9^#0I5s>Y~aC zdUxUwm$~)5`C}UnKl=ZHQxoZW1fB|9J+#6hj5;!XVV*R%q|>EN+DYFcB|$zq&ZW@_ z8jQWf3c*Gu9c$(LcrD`GNBE8OU>FVuKaL}*?+8nyPs5LTPca*TLjo;y&fKGuWfVYH z%pVi1lM*zgij2xBf#Y$nHGuK1acKA?9ChT#a0w)h;Yckt2`*C19o`iqA})F&8hYF8Rqxs)EAQ8oHGg*EknoBKPPDZLTc^1}0cl zq(&+xCUcoHPUysBKvR-}cTkM7SiDOZLY+nvICZ;j&)IYC>_RY8tpw$2wOaol{yj{F delta 1532 zcmb7EOHUj}5bo~XofosqJ}^%MLlgmH!(zp96eUp}7KC63Nen(@Wn~$*2bg8qK|Qk; zAR;8&r$`)7A2E`BTR{a>+>hO?7o`b=CJ(&x_$- z2CsYBtVQ5i(O+^uN67EExcH=C@e%x@Zx60daY}d>*SYbSR%~u^>oKWhc{gw4Hh6M8 z%X8pq=RG{n3*gD~US8xSunN47U*Vl#6**lX?$tl=ka?FT?VsMeJ?X_>Z4>cVsCw zGWnUe?JWC_*K|JqV#Znbx1CTpo>L1qmV>4j2jTgK*W}Jd$o+;hGdbo2WxtH^)qu+( z{az(#j*dWz>g?24Bd+DVewLP&l)VVm>koXf97g`UK%&s$Z3b0HaZE%OJTKpxeE0sO z?{Cieo@fTm)%)9FBYbd6c4(tEg8-5f?SQp|LdVIDR-zipA#5r2+tVhPNjmc23`3bg zb0riy24LYlN#|E4~VLqydmNgBny6<)G2O_cg3zQl~LH1m=!doO(p+X<(5( zVLY|ZzM;Fcp5f_rCf<*=r+REaT;_;9qrkAXK)`1zpN?^`XTaM!BuAOE2-n;hyX%9D zKy@su)EzB7KG*(BGQ@KUOFe`_-oadl-Qj;d>|DMU-SQKAaBmoPL=QqQ!oNEdedtqq_M?R&s~(S2VgT)H2!jZhYP4V1sQP^m z0wQFAFozbYMy*#`>KYZ}gvj6QJ}TrZ`@?rYVz$fb8rRlYRKG)F{gCWZ5K>345|q_J z^ds#NeMooNQ}Y>ti)G5a?Azs0tic7?X`5K}D^3QDVUN@Mo zCH@xy$fH2{F86D}q1Dqvr(ioO0${f`(-9JzD41>P2M1K|?S+1i?4Td(rd@6(qMaXJabJQX)#N!nhI3|&O%aXHQ@d&uLL57~Ln(iW+DlWo zXK0#bYYA%3rL5fQxd+d^G&5&d`DS(`znB_tD9^r>FpM1FI%Nm8%Ou!%%L2t6TX7LU zJroX;lC>o8!0{WD2QG|H{R-2p37#ELA1_A15n`INJylyW`W3|}APPKYQx<~D0v5*1 z#ax{BtjECdZ7D%E>U9w+36$4rEJO`3!;~ke1W|+5m%7H{$K@g%np0G0g(Zs6r4SRE6+I#^7vxiG&nt zc+07lteS9zg|V!bc4HKkam8()U(;=tFk$GXBf0kL;a%WjD>Ru7meY50}$-ss~<>puQ1*|nl}kK z*PiTqN67!tfx<-eN1>F7hXc8y`NFz4u~# c5*IT+p;Zl^9SV;`FNVs6>x7g(7&^wFkG=<6xBvhE delta 472 zcmZ3h->AiRnwOW00SM~c7Ns={ZR8VT%yI*Ar!%B7L@}l?L@}i>Rxm~}12Ib!YYJxy zQww7hTMAbSGmvIaVM*mkV@hFdVTs~|ve|%at`znbhA8ez4o&V`ERLlm8E(nUAUmLd z6-a~lpM5reU}j@tRGz$>MO&VuL;|K@Hp5)z)r=4xBSR%yC7UMaW@k1*MscyA)RLmi z)Us3s#{f@-?9@tyw4(f6g~@&Fb0rP*GxBp&^~(#>^;1$yQj<$ciV~BvGxO3XyK)$^ z@d1r1{y4dY;|aIL9_byt7bM)S^0;r#;|ynH^q+i;%V6_=E+s}mbC9xLAko0^nS+5- z{LW@0p3jVo+a_NW6q#(r7t4~ApP#dNE}uIiqs!)}{I3}qzW}A&IJgubV6wQ7{$xKP zBe1&3bNL)69~aup7{0k$*oTQRXYvWrC`O*i%3>;$1H}AQtbz8lFx*hpy1=5<-~l8$ z%4bM^W>IJLVEi1!$j$1@_(5v&DX~r|d!WK1IS?TOByMro diff --git a/ultralytics/hub/__pycache__/auth.cpython-39.pyc b/ultralytics/hub/__pycache__/auth.cpython-39.pyc index 2a60626433e2aa728a923d8aa3b844ca41e501ed..1f909c76ec39150c669dd595bcc78ac805fdedaf 100644 GIT binary patch delta 1062 zcmaiy&ubGw6vubc&5v%A*w%WfJv>Eh12y)CAof=JOA)bPq#Bek*_k9mw>xoXqO>Xj zDIPotbF(ez*^|^m51s`NUIlv*{2%O5e6wk&?Lh}-cW379=Y8jW-}{j-=khd{^Bf$* zPk+|Wp81d;BJC0`xl<<0DJK{y_ppTPlwF+3a*7QwmnAnzIn4%Hnq}}S!+TlpHenv? z+jPs`;VR2=xawnhc52fpXIcLpr*L}Too(p`eVA|@4{+R~A+2)-6nA;3g-^AR?GT!g z^tn=aB0@iCF%i~bcKHUZ@^xs?kOjCg4!)GDf?ujq#o2L0I1U&E4Ne)4pw2@cS#ntP z;bwe@*vUUPG6aPF}gn1EhUkB^ZQdSuayW)y* z#8eUNHMF8E4rkdOrHN16zED^$zl=XnOVNM3LTIj&0i?AHYwWhY2 z5Q;2BXbFhNkH2%r$!E2WM75>4>d3Wa7ObM>Di1NEjzC6Gl`<&IKq=ltAcGq23T7qi z_BuS~QLM|>Y#fydEodM%Y?&1-(m?T!I18suQL(<9V0Z1Z;A8gw7s%Hn<3WdcD0DfX zK5xjtx@m!NgzbiZiCf%v{2trdhr`8p3g;Y7wP&`;tLcy0eVf#&Er+Fd67L9c!r^C$ z=x8^ivv}6KHW?+fyuIW%ie_-Qc+;Y}-h*}|xuFF6|qg91K*3nz6 zN9E?i{ZaFxXKS!9Kl5OoYiy!YMEzB?)?9JVlM%Dxekb?LWOBm1Ne}Fbq)T!OR&GB& zoKz-ZHd0^7p1GcWlp2d`W2850ey0z}UGppRnH-o;y^F^4E@$FqKzqzJZ|W%a2Wjly r?t4kdbMq~GmE=r5cOku>u2h)x%|x!0&tskpar4=PSMd7H>)h5K9bP|Y delta 399 zcmYLDO-n*i5WS<<_vL+RA=#qEE84W_3qoioi4gQ5wg?_=thtqBzOt7f2rV-Iz%0$E zb*mB#+O!J_Y8wS@`UNdowdz_z2hPl#nc>Vl3||gK^0sX;g-!h%kNdA8aX887=sr&W@})n`Lj;AzN!E#?!NFX({&>WWHP~l(yEYUeT+}(5*HC z5qj5NVVQ39$@Zr%z)p7miK)Q20c4s_(4iM|pRapfvAtv^0n*e6-@_Lzii}}7e;^VQ u^eCo(*J#N~(VTq-tL+cF0Z^nuGzo$(qNDU2%?Y8*4G;lq%(7`b*8BlO%wrh< diff --git a/ultralytics/hub/__pycache__/utils.cpython-312.pyc b/ultralytics/hub/__pycache__/utils.cpython-312.pyc index b082510884e334f918d75abcaef05b90b2837c7d..099777d67cbe2c0d2b579345256f721b8035a31f 100644 GIT binary patch delta 1624 zcmaKsYitx%6vyw~*`3bLzGfd?+T8_aXm{If=|fO?ZlM7h3o!^msvtDm?sU7G-I?Xi zv{0#1B1#ib*xW=Un&1~7NMcC#6VVS4Vq)S4TG{}am{^Tp8WWv9OfhQo-t8k06K^uV z`#Ikk;wln`-^`% z{i`iN_NVMO;Nt!xYf^lv4LTtH%96sJ7Of;Qn{1&CE&XaspQW3mx^hYv5sEbxB#33fQ< zn4~g$X3@S+CV;|q{XMt785IbnsJ9qY4sRPK`j^@ zl1~iCnywiYOUhs}if-^3Eu(09Qt6cqYmcht@O{@U?+#M*OXSghBNzeZI{cA)Z|v(M z-*S@&4^Jk#E^`kBaa<4|3bo@x?O61=nkDXcu;Qz+F$%Zou5# z0e1sj^i0B&r9R;rQ$ZRP-zx6IUT2^r1@2p$>FW#!$wD8{x_i|WNhPbP3ff5h?si>A znVtb%-h+^e&;~43pM&lA)9M6!j^=n8Pgf_n^+52Ba`TP2IV5qhvSCJXS11Xm@U75B z-ef>!aE1ecFrEo@R!T+vfiq&Z0N*;ZwH*C;Uc&ps?Qk0|hFiC7B_|_#KhSFw#|aEp z??-Yng^p5BJ4G$UHi~YFB*iiOS2%7<6Is9wk%%W^^Ug7V6-^6K=JwLd5!ME~uq<`q zZ={W|v-F3QhwfV>UVA_=0;Ymhc(CTwL9Ut~wO(}12Y{>c^5Jo3WENOmD*Rpud{A-O zGs{rp=YTXONV7Jo=UL!gHCs`Z9Po(~Ug^F!KN;OL5#2JD$d5(-tb zBJVP+1y$1#{hB)B40@5wi{;Yd;sSb`I-aMX(TH{%U7)x~;Uh4tdGd3i_o!r(g@UZ4 z&|bQDn&Nc|j)JzM+|IZSy+gDiFui?gbcwplp>6nheG_|+*xE0+SRV^ogYHMZ;FYFg zlgSW0RTaL*UFGn-`XE<9?9mRB$G_I!J9C~M@jk(IfYwnhQe>8Kg9xpf;2fB-*lnB{ z1dc&CvTN2Fu<>*2Yi<6;U35d7oo2{1y@ucrIz`zLuQM#FhDL~i-l7<$=%t`_8aUDL zuEVI1mB9?6Hx~cUu=W6=-wrxJ*BR1ZgZ?!jiTdbGUu;G>!*W|cq`sBZ4Q@MW=|E0y zL!XfaNFMDAyr;3Y$^$J6;3@nr0=Rt&BcY^2 delta 1521 zcmaKsUrbw79LLY;?Y-Roqqi+BZ7EPnVU+&_=U`AsC6LCa|2^un03 z4HPrfsgSb-^#!-Y=w$v$>Vv)*qc0|!NC%O6Gat5S;`U%Jg=EARJ*Ty@_~1?M=lA{n z&hMP_JHOl0w@&VzDE{7L(gX5b2wyvW&%a#!NuvfxHj!O}Gz!3wTjYkyBoOP!`wueK zg)L4Su7pNc#s6g(bfSJulb@ODfd+^_)+F(OCX%XTI#^d*%V=U;YB@VO(jrQ-6p>{# z92twoNAM)y0ZZ{me04)cYq&HxKbYaoTf95XyH#NJ2x)$a>q>VSfpw}r zyvNt;?~w9Wz^o<)b0zlbUUbfCWBh~F=XK0L6ArN=SE#UX^7E_?$fm-KSyn6(wIY8* zx2Da<&U3R`tm^FeX}!a4Ddg>2*@oxz)zFU9b~pY?Z^PSOTSd&dpHNVKfJz`B<>ga7 z1{)0GR>O`~j7DT!Yy2AjZam8Cz4-@sn##=HNFpvvaoKxDijT;n_@*gQVN#fh#Du~l zX>v+R%1Nc@AEhwSxQr)@zqAC&mfhq{-XWL+suy>e2V6`1mf%eb-YvnO7X0fq2S1zK z5IQ%Ehw++u-qH{f9tnbPf=zwR%(qRjuNeo0O*oUXT2`4{g{jX=?rBNItFBT<8(CK= z4v{YyeSS)o9z`gDP%HkywE%nZQFo9_(A}r;6?c%Y1A=9d-#vhTc31JfoTFA@P1!Ka zV6m*7T?*Z{x$)Jq$Lyiy>7R@hY6GzP)W%%m-flVmt1JXNamdr!MSq&ePBBFB41vPQ zqevPS(McNVrSMa9QanrX9K|Vo!xLa7B2Qt(<1xEg%MJrLuc{$R?MSiZ9IKBOTJOb1 z-wEBm7xA^j{?v+Z917RS63Gk!=_(IOUD)cMd8)`&zo@-z+O-0ced&1G=*a@DX$;;n z3QPTqgIR_;zY68+#_}vn^)3fYwro*O@}StWVJg3Aif&f5Z&V#z?~JCaqN!W{1OsRB z6Lpmr&yw8~?L;CeBYL;O0R}xzY`zEd!~p@lOal*5&}{S)#j6yrQCJBS?RY|z&}AyQ z;fV<;E}|oJ@EpYq1-&(h_9)lm0E-rgRs<$;dIY^e!@1OM{AXPg_XY8jf8eG-?IDfh zILufbD^1HyDwA^pjh{NNJ2SO`529D2o9*FrZTM~5s_R|XcSo`;%~%Yp9KICz@!~6V z*((Is0rFApr5GuIjwIa#3t(HLV|mp94AXF~FRLwK$KXz*kG18e(HQ~mEnicd#TB97Ydctd5s!jPUT%y9f1 zs-ijcTICy*t1#E7%Q6vuaF$GhwG2acOqiQOjCLlPX6s3KAe2_U6}NJTx+HV~zvn6-D~*tK_^ z*>TgPPJKw>fK;h4>H#h!2M`Desj@&q0>OnVoVe_P14818#3_f$yk|oZJMzzO{_oA# ze`h~hd2li9q*As9PyWttRb%G;^rLC5olz>q-#I= zT_CIJ7dKI}YoXX9bUsa!*i)UwG?rIvr)^^^6N?)ZD^$M_FqUfr{=s$cCw*XI8`mB5#tFL}fBt8#nci9XLSZSmQb z2&%#h!&zPnxx@W-v+M~jw>_W#o8o0h@ZDOY!QEQeYB+n`3Bz_1?Bf5Qo4@#E%z}z1 zeDajKotPZ4Yhld~rQ=t;h?Rq&p&ll_ntlyk&ZEmCNpN?Rj1x{M$qZT&m?q+a3X%`W zZZDSlSjQvwKFhqPFI<2zQ358E?XXk{T$r^U2EKR(_4JCAqE>E8?-d~eA?DQEqwnUI zAtfddxMIY%+J5C8MB(`l#%AI)M~uGFr%cU{y+jsOW9(e_SuD;Xt|8U|ks-H*=eXi3 zDuXFcW3-5P9kGEJEOP~;!CI-X=?Ga8-cH*KWefB$1JJUJS|m4V!3|?%oQfNuRWLDl zdWD?S$%guHVndxwERu_AK3^tF>OubT*>9jCd>YHBL7>qr$$@_qTAI)k%kF(y$WRif z@6Swi>rfW4RuD=-AY3czVh#R>r@}K7GvXEs3ka+hZNwc!2QdPOSQC5}cQG`aR?G8U z@iHbGh?@u#frl8lR*Z>Vh@*^N*{X^z%7fDL>Ixs7E|n@yqfsjT40?#4n`Ro6>9Bjk z`V_vl>|-mYKAqg`-o}ydAO^Pn2lQ6F1^5Dfp#=aR6X0oXpcW1B99BFJSQkskwc>0< zgHY_CE)h+{CIZhAnJq_dH)>@;QJO|c-JRM`M5gE8stLSdD#fSf#3jtXig_JujqqlQ i0%~|OhaTY0L|t9x%cn9)nkH#@*-X#OS=?SuU;Y~dM;-V8 delta 1099 zcmY+C&ulix($B7q-P%QOFD5M2S5vd|twIHBSMWm^$t#=dKadyq@ zCSYVMhe#Z%A^`)JUK&FWoRE-Psh3KK|9}IRmAD~VRpNkzP(fV*9)KQwHjWJudjXs+j|92O+6Mc5) zkqYv2Cgg8m4}N72_||u44$RLuPWz~13Af=c&Ud_)w>l+%)c1^S#Rh(!+N3owAcc#A z_6B>(U-2RNr(TBJa>STk>+&Ywh}kh)>$7#}#V#BSv~CRhbK9`vmv~RZ4E!29aS3LW zuX8=Ut8ExTysNK6A3!vHHIZjs8 zD(x>@BB%=I?26+DU!mX6B9t6w1`L-BJZ26fOi-jof#@0B91he(F zz+ri__zmon+UQ@~Z(xzXiz>MWxL6J+%A380%Z%I&>>tP3?V|kj8cm(nVQvZJFYFJDcB@52s(l0iiE7h z#0uKs+iGoLQS`_irS2sw5-ma`no+IRtX8X9{pD)8prb&!zYSHE%4h1Ul)e7{lGk-& z9s}3#=NkwVHi;q+N8XAi-lwV$5U0gH;*rXmA@h7e_dp235@D7wEgzPz6vQWFe~Jio z$6alR=vH;ivKgwVUZ`4bM~qSq-NDUK=@Ud<-ZMYk!cus|98VylW+#ktVt?kde*m}` B^8Nq- diff --git a/ultralytics/hub/auth.py b/ultralytics/hub/auth.py index 9963d79..6ede303 100644 --- a/ultralytics/hub/auth.py +++ b/ultralytics/hub/auth.py @@ -5,13 +5,27 @@ import requests from ultralytics.hub.utils import HUB_API_ROOT, HUB_WEB_ROOT, PREFIX, request_with_credentials from ultralytics.utils import LOGGER, SETTINGS, emojis, is_colab -API_KEY_URL = f'{HUB_WEB_ROOT}/settings?tab=api+keys' +API_KEY_URL = f"{HUB_WEB_ROOT}/settings?tab=api+keys" class Auth: + """ + Manages authentication processes including API key handling, cookie-based authentication, and header generation. + + The class supports different methods of authentication: + 1. Directly using an API key. + 2. Authenticating using browser cookies (specifically in Google Colab). + 3. Prompting the user to enter an API key. + + Attributes: + id_token (str or bool): Token used for identity verification, initialized as False. + api_key (str or bool): API key for authentication, initialized as False. + model_key (bool): Placeholder for model key, initialized as False. + """ + id_token = api_key = model_key = False - def __init__(self, api_key='', verbose=False): + def __init__(self, api_key="", verbose=False): """ Initialize the Auth class with an optional API key. @@ -19,18 +33,18 @@ class Auth: api_key (str, optional): May be an API key or a combination API key and model ID, i.e. key_id """ # Split the input API key in case it contains a combined key_model and keep only the API key part - api_key = api_key.split('_')[0] + api_key = api_key.split("_")[0] # Set API key attribute as value passed or SETTINGS API key if none passed - self.api_key = api_key or SETTINGS.get('api_key', '') + self.api_key = api_key or SETTINGS.get("api_key", "") # If an API key is provided if self.api_key: # If the provided API key matches the API key in the SETTINGS - if self.api_key == SETTINGS.get('api_key'): + if self.api_key == SETTINGS.get("api_key"): # Log that the user is already logged in if verbose: - LOGGER.info(f'{PREFIX}Authenticated ✅') + LOGGER.info(f"{PREFIX}Authenticated ✅") return else: # Attempt to authenticate with the provided API key @@ -45,62 +59,65 @@ class Auth: # Update SETTINGS with the new API key after successful authentication if success: - SETTINGS.update({'api_key': self.api_key}) + SETTINGS.update({"api_key": self.api_key}) # Log that the new login was successful if verbose: - LOGGER.info(f'{PREFIX}New authentication successful ✅') + LOGGER.info(f"{PREFIX}New authentication successful ✅") elif verbose: - LOGGER.info(f'{PREFIX}Retrieve API key from {API_KEY_URL}') + LOGGER.info(f"{PREFIX}Get API key from {API_KEY_URL} and then run 'yolo hub login API_KEY'") def request_api_key(self, max_attempts=3): """ - Prompt the user to input their API key. Returns the model ID. + Prompt the user to input their API key. + + Returns the model ID. """ import getpass + for attempts in range(max_attempts): - LOGGER.info(f'{PREFIX}Login. Attempt {attempts + 1} of {max_attempts}') - input_key = getpass.getpass(f'Enter API key from {API_KEY_URL} ') - self.api_key = input_key.split('_')[0] # remove model id if present + LOGGER.info(f"{PREFIX}Login. Attempt {attempts + 1} of {max_attempts}") + input_key = getpass.getpass(f"Enter API key from {API_KEY_URL} ") + self.api_key = input_key.split("_")[0] # remove model id if present if self.authenticate(): return True - raise ConnectionError(emojis(f'{PREFIX}Failed to authenticate ❌')) + raise ConnectionError(emojis(f"{PREFIX}Failed to authenticate ❌")) def authenticate(self) -> bool: """ Attempt to authenticate with the server using either id_token or API key. Returns: - bool: True if authentication is successful, False otherwise. + (bool): True if authentication is successful, False otherwise. """ try: if header := self.get_auth_header(): - r = requests.post(f'{HUB_API_ROOT}/v1/auth', headers=header) - if not r.json().get('success', False): - raise ConnectionError('Unable to authenticate.') + r = requests.post(f"{HUB_API_ROOT}/v1/auth", headers=header) + if not r.json().get("success", False): + raise ConnectionError("Unable to authenticate.") return True - raise ConnectionError('User has not authenticated locally.') + raise ConnectionError("User has not authenticated locally.") except ConnectionError: self.id_token = self.api_key = False # reset invalid - LOGGER.warning(f'{PREFIX}Invalid API key ⚠️') + LOGGER.warning(f"{PREFIX}Invalid API key ⚠️") return False def auth_with_cookies(self) -> bool: """ - Attempt to fetch authentication via cookies and set id_token. - User must be logged in to HUB and running in a supported browser. + Attempt to fetch authentication via cookies and set id_token. User must be logged in to HUB and running in a + supported browser. Returns: - bool: True if authentication is successful, False otherwise. + (bool): True if authentication is successful, False otherwise. """ if not is_colab(): return False # Currently only works with Colab try: - authn = request_with_credentials(f'{HUB_API_ROOT}/v1/auth/auto') - if authn.get('success', False): - self.id_token = authn.get('data', {}).get('idToken', None) + authn = request_with_credentials(f"{HUB_API_ROOT}/v1/auth/auto") + if authn.get("success", False): + self.id_token = authn.get("data", {}).get("idToken", None) self.authenticate() return True - raise ConnectionError('Unable to fetch browser authentication details.') + raise ConnectionError("Unable to fetch browser authentication details.") except ConnectionError: self.id_token = False # reset invalid return False @@ -113,7 +130,7 @@ class Auth: (dict): The authentication header if id_token or API key is set, None otherwise. """ if self.id_token: - return {'authorization': f'Bearer {self.id_token}'} + return {"authorization": f"Bearer {self.id_token}"} elif self.api_key: - return {'x-api-key': self.api_key} + return {"x-api-key": self.api_key} # else returns None diff --git a/ultralytics/hub/session.py b/ultralytics/hub/session.py index 595de29..ebde7aa 100644 --- a/ultralytics/hub/session.py +++ b/ultralytics/hub/session.py @@ -1,29 +1,26 @@ # Ultralytics YOLO 🚀, AGPL-3.0 license -import signal -import sys +import threading +import time +from http import HTTPStatus from pathlib import Path -from time import sleep import requests -from ultralytics.hub.utils import HUB_API_ROOT, HUB_WEB_ROOT, PREFIX, smart_request -from ultralytics.utils import LOGGER, __version__, checks, emojis, is_colab, threaded +from ultralytics.hub.utils import HUB_WEB_ROOT, HELP_MSG, PREFIX, TQDM +from ultralytics.utils import LOGGER, SETTINGS, __version__, checks, emojis, is_colab from ultralytics.utils.errors import HUBModelError -AGENT_NAME = f'python-{__version__}-colab' if is_colab() else f'python-{__version__}-local' +AGENT_NAME = f"python-{__version__}-colab" if is_colab() else f"python-{__version__}-local" class HUBTrainingSession: """ HUB training session for Ultralytics HUB YOLO models. Handles model initialization, heartbeats, and checkpointing. - Args: - url (str): Model identifier used to initialize the HUB training session. - Attributes: agent_id (str): Identifier for the instance communicating with the server. - model_id (str): Identifier for the YOLOv5 model being trained. + model_id (str): Identifier for the YOLO model being trained. model_url (str): URL for the model in Ultralytics HUB. api_url (str): API URL for the model in Ultralytics HUB. auth_header (dict): Authentication header for the Ultralytics HUB API requests. @@ -34,110 +31,287 @@ class HUBTrainingSession: alive (bool): Indicates if the heartbeat loop is active. """ - def __init__(self, url): + def __init__(self, identifier): """ Initialize the HUBTrainingSession with the provided model identifier. Args: - url (str): Model identifier used to initialize the HUB training session. - It can be a URL string or a model key with specific format. + identifier (str): Model identifier used to initialize the HUB training session. + It can be a URL string or a model key with specific format. Raises: ValueError: If the provided model identifier is invalid. ConnectionError: If connecting with global API key is not supported. + ModuleNotFoundError: If hub-sdk package is not installed. """ + from hub_sdk import HUBClient - from ultralytics.hub.auth import Auth + self.rate_limits = { + "metrics": 3.0, + "ckpt": 900.0, + "heartbeat": 300.0, + } # rate limits (seconds) + self.metrics_queue = {} # holds metrics for each epoch until upload + self.metrics_upload_failed_queue = {} # holds metrics for each epoch if upload failed + self.timers = {} # holds timers in ultralytics/utils/callbacks/hub.py # Parse input - if url.startswith(f'{HUB_WEB_ROOT}/models/'): - url = url.split(f'{HUB_WEB_ROOT}/models/')[-1] - if [len(x) for x in url.split('_')] == [42, 20]: - key, model_id = url.split('_') - elif len(url) == 20: - key, model_id = '', url + api_key, model_id, self.filename = self._parse_identifier(identifier) + + # Get credentials + active_key = api_key or SETTINGS.get("api_key") + credentials = {"api_key": active_key} if active_key else None # set credentials + + # Initialize client + self.client = HUBClient(credentials) + + if model_id: + self.load_model(model_id) # load existing model else: - raise HUBModelError(f"model='{url}' not found. Check format is correct, i.e. " - f"model='{HUB_WEB_ROOT}/models/MODEL_ID' and try again.") + self.model = self.client.model() # load empty model - # Authorize - auth = Auth(key) - self.agent_id = None # identifies which instance is communicating with server - self.model_id = model_id - self.model_url = f'{HUB_WEB_ROOT}/models/{model_id}' - self.api_url = f'{HUB_API_ROOT}/v1/models/{model_id}' - self.auth_header = auth.get_auth_header() - self.rate_limits = {'metrics': 3.0, 'ckpt': 900.0, 'heartbeat': 300.0} # rate limits (seconds) - self.timers = {} # rate limit timers (seconds) - self.metrics_queue = {} # metrics queue - self.model = self._get_model() - self.alive = True - self._start_heartbeat() # start heartbeats - self._register_signal_handlers() - LOGGER.info(f'{PREFIX}View model at {self.model_url} 🚀') + def load_model(self, model_id): + """Loads an existing model from Ultralytics HUB using the provided model identifier.""" + self.model = self.client.model(model_id) + if not self.model.data: # then model does not exist + raise ValueError(emojis("❌ The specified HUB model does not exist")) # TODO: improve error handling - def _register_signal_handlers(self): - """Register signal handlers for SIGTERM and SIGINT signals to gracefully handle termination.""" - signal.signal(signal.SIGTERM, self._handle_signal) - signal.signal(signal.SIGINT, self._handle_signal) + self.model_url = f"{HUB_WEB_ROOT}/models/{self.model.id}" - def _handle_signal(self, signum, frame): + self._set_train_args() + + # Start heartbeats for HUB to monitor agent + self.model.start_heartbeat(self.rate_limits["heartbeat"]) + LOGGER.info(f"{PREFIX}View model at {self.model_url} 🚀") + + def create_model(self, model_args): + """Initializes a HUB training session with the specified model identifier.""" + payload = { + "config": { + "batchSize": model_args.get("batch", -1), + "epochs": model_args.get("epochs", 300), + "imageSize": model_args.get("imgsz", 640), + "patience": model_args.get("patience", 100), + "device": model_args.get("device", ""), + "cache": model_args.get("cache", "ram"), + }, + "dataset": {"name": model_args.get("data")}, + "lineage": { + "architecture": { + "name": self.filename.replace(".pt", "").replace(".yaml", ""), + }, + "parent": {}, + }, + "meta": {"name": self.filename}, + } + + if self.filename.endswith(".pt"): + payload["lineage"]["parent"]["name"] = self.filename + + self.model.create_model(payload) + + # Model could not be created + # TODO: improve error handling + if not self.model.id: + return + + self.model_url = f"{HUB_WEB_ROOT}/models/{self.model.id}" + + # Start heartbeats for HUB to monitor agent + self.model.start_heartbeat(self.rate_limits["heartbeat"]) + + LOGGER.info(f"{PREFIX}View model at {self.model_url} 🚀") + + def _parse_identifier(self, identifier): """ - Handle kill signals and prevent heartbeats from being sent on Colab after termination. - This method does not use frame, it is included as it is passed by signal. - """ - if self.alive is True: - LOGGER.info(f'{PREFIX}Kill signal received! ❌') - self._stop_heartbeat() - sys.exit(signum) + Parses the given identifier to determine the type of identifier and extract relevant components. - def _stop_heartbeat(self): - """Terminate the heartbeat loop.""" - self.alive = False + The method supports different identifier formats: + - A HUB URL, which starts with HUB_WEB_ROOT followed by '/models/' + - An identifier containing an API key and a model ID separated by an underscore + - An identifier that is solely a model ID of a fixed length + - A local filename that ends with '.pt' or '.yaml' + + Args: + identifier (str): The identifier string to be parsed. + + Returns: + (tuple): A tuple containing the API key, model ID, and filename as applicable. + + Raises: + HUBModelError: If the identifier format is not recognized. + """ + + # Initialize variables + api_key, model_id, filename = None, None, None + + # Check if identifier is a HUB URL + if identifier.startswith(f"{HUB_WEB_ROOT}/models/"): + # Extract the model_id after the HUB_WEB_ROOT URL + model_id = identifier.split(f"{HUB_WEB_ROOT}/models/")[-1] + else: + # Split the identifier based on underscores only if it's not a HUB URL + parts = identifier.split("_") + + # Check if identifier is in the format of API key and model ID + if len(parts) == 2 and len(parts[0]) == 42 and len(parts[1]) == 20: + api_key, model_id = parts + # Check if identifier is a single model ID + elif len(parts) == 1 and len(parts[0]) == 20: + model_id = parts[0] + # Check if identifier is a local filename + elif identifier.endswith(".pt") or identifier.endswith(".yaml"): + filename = identifier + else: + raise HUBModelError( + f"model='{identifier}' could not be parsed. Check format is correct. " + f"Supported formats are Ultralytics HUB URL, apiKey_modelId, modelId, local pt or yaml file." + ) + + return api_key, model_id, filename + + def _set_train_args(self): + """ + Initializes training arguments and creates a model entry on the Ultralytics HUB. + + This method sets up training arguments based on the model's state and updates them with any additional + arguments provided. It handles different states of the model, such as whether it's resumable, pretrained, + or requires specific file setup. + + Raises: + ValueError: If the model is already trained, if required dataset information is missing, or if there are + issues with the provided training arguments. + """ + if self.model.is_trained(): + raise ValueError(emojis(f"Model is already trained and uploaded to {self.model_url} 🚀")) + + if self.model.is_resumable(): + # Model has saved weights + self.train_args = {"data": self.model.get_dataset_url(), "resume": True} + self.model_file = self.model.get_weights_url("last") + else: + # Model has no saved weights + self.train_args = self.model.data.get("train_args") # new response + + # Set the model file as either a *.pt or *.yaml file + self.model_file = ( + self.model.get_weights_url("parent") if self.model.is_pretrained() else self.model.get_architecture() + ) + + if "data" not in self.train_args: + # RF bug - datasets are sometimes not exported + raise ValueError("Dataset may still be processing. Please wait a minute and try again.") + + self.model_file = checks.check_yolov5u_filename(self.model_file, verbose=False) # YOLOv5->YOLOv5u + self.model_id = self.model.id + + def request_queue( + self, + request_func, + retry=3, + timeout=30, + thread=True, + verbose=True, + progress_total=None, + *args, + **kwargs, + ): + def retry_request(): + """Attempts to call `request_func` with retries, timeout, and optional threading.""" + t0 = time.time() # Record the start time for the timeout + for i in range(retry + 1): + if (time.time() - t0) > timeout: + LOGGER.warning(f"{PREFIX}Timeout for request reached. {HELP_MSG}") + break # Timeout reached, exit loop + + response = request_func(*args, **kwargs) + if response is None: + LOGGER.warning(f"{PREFIX}Received no response from the request. {HELP_MSG}") + time.sleep(2**i) # Exponential backoff before retrying + continue # Skip further processing and retry + + if progress_total: + self._show_upload_progress(progress_total, response) + + if HTTPStatus.OK <= response.status_code < HTTPStatus.MULTIPLE_CHOICES: + # if request related to metrics upload + if kwargs.get("metrics"): + self.metrics_upload_failed_queue = {} + return response # Success, no need to retry + + if i == 0: + # Initial attempt, check status code and provide messages + message = self._get_failure_message(response, retry, timeout) + + if verbose: + LOGGER.warning(f"{PREFIX}{message} {HELP_MSG} ({response.status_code})") + + if not self._should_retry(response.status_code): + LOGGER.warning(f"{PREFIX}Request failed. {HELP_MSG} ({response.status_code}") + break # Not an error that should be retried, exit loop + + time.sleep(2**i) # Exponential backoff for retries + + # if request related to metrics upload and exceed retries + if response is None and kwargs.get("metrics"): + self.metrics_upload_failed_queue.update(kwargs.get("metrics", None)) + + return response + + if thread: + # Start a new thread to run the retry_request function + threading.Thread(target=retry_request, daemon=True).start() + else: + # If running in the main thread, call retry_request directly + return retry_request() + + def _should_retry(self, status_code): + """Determines if a request should be retried based on the HTTP status code.""" + retry_codes = { + HTTPStatus.REQUEST_TIMEOUT, + HTTPStatus.BAD_GATEWAY, + HTTPStatus.GATEWAY_TIMEOUT, + } + return status_code in retry_codes + + def _get_failure_message(self, response: requests.Response, retry: int, timeout: int): + """ + Generate a retry message based on the response status code. + + Args: + response: The HTTP response object. + retry: The number of retry attempts allowed. + timeout: The maximum timeout duration. + + Returns: + (str): The retry message. + """ + if self._should_retry(response.status_code): + return f"Retrying {retry}x for {timeout}s." if retry else "" + elif response.status_code == HTTPStatus.TOO_MANY_REQUESTS: # rate limit + headers = response.headers + return ( + f"Rate limit reached ({headers['X-RateLimit-Remaining']}/{headers['X-RateLimit-Limit']}). " + f"Please retry after {headers['Retry-After']}s." + ) + else: + try: + return response.json().get("message", "No JSON message.") + except AttributeError: + return "Unable to read JSON." def upload_metrics(self): """Upload model metrics to Ultralytics HUB.""" - payload = {'metrics': self.metrics_queue.copy(), 'type': 'metrics'} - smart_request('post', self.api_url, json=payload, headers=self.auth_header, code=2) + return self.request_queue(self.model.upload_metrics, metrics=self.metrics_queue.copy(), thread=True) - def _get_model(self): - """Fetch and return model data from Ultralytics HUB.""" - api_url = f'{HUB_API_ROOT}/v1/models/{self.model_id}' - - try: - response = smart_request('get', api_url, headers=self.auth_header, thread=False, code=0) - data = response.json().get('data', None) - - if data.get('status', None) == 'trained': - raise ValueError(emojis(f'Model is already trained and uploaded to {self.model_url} 🚀')) - - if not data.get('data', None): - raise ValueError('Dataset may still be processing. Please wait a minute and try again.') # RF fix - self.model_id = data['id'] - - if data['status'] == 'new': # new model to start training - self.train_args = { - # TODO: deprecate 'batch_size' key for 'batch' in 3Q23 - 'batch': data['batch' if ('batch' in data) else 'batch_size'], - 'epochs': data['epochs'], - 'imgsz': data['imgsz'], - 'patience': data['patience'], - 'device': data['device'], - 'cache': data['cache'], - 'data': data['data']} - self.model_file = data.get('cfg') or data.get('weights') # cfg for pretrained=False - self.model_file = checks.check_yolov5u_filename(self.model_file, verbose=False) # YOLOv5->YOLOv5u - elif data['status'] == 'training': # existing model to resume training - self.train_args = {'data': data['data'], 'resume': True} - self.model_file = data['resume'] - - return data - except requests.exceptions.ConnectionError as e: - raise ConnectionRefusedError('ERROR: The HUB server is not online. Please try again later.') from e - except Exception: - raise - - def upload_model(self, epoch, weights, is_best=False, map=0.0, final=False): + def upload_model( + self, + epoch: int, + weights: str, + is_best: bool = False, + map: float = 0.0, + final: bool = False, + ) -> None: """ Upload a model checkpoint to Ultralytics HUB. @@ -149,42 +323,33 @@ class HUBTrainingSession: final (bool): Indicates if the model is the final model after training. """ if Path(weights).is_file(): - with open(weights, 'rb') as f: - file = f.read() + progress_total = Path(weights).stat().st_size if final else None # Only show progress if final + self.request_queue( + self.model.upload_model, + epoch=epoch, + weights=weights, + is_best=is_best, + map=map, + final=final, + retry=10, + timeout=3600, + thread=not final, + progress_total=progress_total, + ) else: - LOGGER.warning(f'{PREFIX}WARNING ⚠️ Model upload issue. Missing model {weights}.') - file = None - url = f'{self.api_url}/upload' - # url = 'http://httpbin.org/post' # for debug - data = {'epoch': epoch} - if final: - data.update({'type': 'final', 'map': map}) - smart_request('post', - url, - data=data, - files={'best.pt': file}, - headers=self.auth_header, - retry=10, - timeout=3600, - thread=False, - progress=True, - code=4) - else: - data.update({'type': 'epoch', 'isBest': bool(is_best)}) - smart_request('post', url, data=data, files={'last.pt': file}, headers=self.auth_header, code=3) + LOGGER.warning(f"{PREFIX}WARNING ⚠️ Model upload issue. Missing model {weights}.") - @threaded - def _start_heartbeat(self): - """Begin a threaded heartbeat loop to report the agent's status to Ultralytics HUB.""" - while self.alive: - r = smart_request('post', - f'{HUB_API_ROOT}/v1/agent/heartbeat/models/{self.model_id}', - json={ - 'agent': AGENT_NAME, - 'agentId': self.agent_id}, - headers=self.auth_header, - retry=0, - code=5, - thread=False) # already in a thread - self.agent_id = r.json().get('data', {}).get('agentId', None) - sleep(self.rate_limits['heartbeat']) + def _show_upload_progress(self, content_length: int, response: requests.Response) -> None: + """ + Display a progress bar to track the upload progress of a file download. + + Args: + content_length (int): The total size of the content to be downloaded in bytes. + response (requests.Response): The response object from the file download request. + + Returns: + None + """ + with TQDM(total=content_length, unit="B", unit_scale=True, unit_divisor=1024) as pbar: + for data in response.iter_content(chunk_size=1024): + pbar.update(len(data)) diff --git a/ultralytics/hub/utils.py b/ultralytics/hub/utils.py index 07da970..5c00076 100644 --- a/ultralytics/hub/utils.py +++ b/ultralytics/hub/utils.py @@ -10,14 +10,29 @@ from pathlib import Path import requests -from ultralytics.utils import (ENVIRONMENT, LOGGER, ONLINE, RANK, SETTINGS, TESTS_RUNNING, TQDM, TryExcept, __version__, - colorstr, get_git_origin_url, is_colab, is_git_dir, is_pip_package) +from ultralytics.utils import ( + ENVIRONMENT, + LOGGER, + ONLINE, + RANK, + SETTINGS, + TESTS_RUNNING, + TQDM, + TryExcept, + __version__, + colorstr, + get_git_origin_url, + is_colab, + is_git_dir, + is_pip_package, +) from ultralytics.utils.downloads import GITHUB_ASSETS_NAMES -PREFIX = colorstr('Ultralytics HUB: ') -HELP_MSG = 'If this issue persists please visit https://github.com/ultralytics/hub/issues for assistance.' -HUB_API_ROOT = os.environ.get('ULTRALYTICS_HUB_API', 'https://api.ultralytics.com') -HUB_WEB_ROOT = os.environ.get('ULTRALYTICS_HUB_WEB', 'https://hub.ultralytics.com') +HUB_API_ROOT = os.environ.get("ULTRALYTICS_HUB_API", "https://api.ultralytics.com") +HUB_WEB_ROOT = os.environ.get("ULTRALYTICS_HUB_WEB", "https://hub.ultralytics.com") + +PREFIX = colorstr("Ultralytics HUB: ") +HELP_MSG = "If this issue persists please visit https://github.com/ultralytics/hub/issues for assistance." def request_with_credentials(url: str) -> any: @@ -34,11 +49,13 @@ def request_with_credentials(url: str) -> any: OSError: If the function is not run in a Google Colab environment. """ if not is_colab(): - raise OSError('request_with_credentials() must run in a Colab environment') + raise OSError("request_with_credentials() must run in a Colab environment") from google.colab import output # noqa from IPython import display # noqa + display.display( - display.Javascript(""" + display.Javascript( + """ window._hub_tmp = new Promise((resolve, reject) => { const timeout = setTimeout(() => reject("Failed authenticating existing browser session"), 5000) fetch("%s", { @@ -53,8 +70,11 @@ def request_with_credentials(url: str) -> any: reject(err); }); }); - """ % url)) - return output.eval_js('_hub_tmp') + """ + % url + ) + ) + return output.eval_js("_hub_tmp") def requests_with_progress(method, url, **kwargs): @@ -64,22 +84,23 @@ def requests_with_progress(method, url, **kwargs): Args: method (str): The HTTP method to use (e.g. 'GET', 'POST'). url (str): The URL to send the request to. - **kwargs (dict): Additional keyword arguments to pass to the underlying `requests.request` function. + **kwargs (any): Additional keyword arguments to pass to the underlying `requests.request` function. Returns: (requests.Response): The response object from the HTTP request. Note: - If 'progress' is set to True, the progress bar will display the download progress - for responses with a known content length. + - If 'progress' is set to True, the progress bar will display the download progress for responses with a known + content length. + - If 'progress' is a number then progress bar will display assuming content length = progress. """ - progress = kwargs.pop('progress', False) + progress = kwargs.pop("progress", False) if not progress: return requests.request(method, url, **kwargs) response = requests.request(method, url, stream=True, **kwargs) - total = int(response.headers.get('content-length', 0)) # total size + total = int(response.headers.get("content-length", 0) if isinstance(progress, bool) else progress) # total size try: - pbar = TQDM(total=total, unit='B', unit_scale=True, unit_divisor=1024) + pbar = TQDM(total=total, unit="B", unit_scale=True, unit_divisor=1024) for data in response.iter_content(chunk_size=1024): pbar.update(len(data)) pbar.close() @@ -101,7 +122,7 @@ def smart_request(method, url, retry=3, timeout=30, thread=True, code=-1, verbos code (int, optional): An identifier for the request, used for logging purposes. Default is -1. verbose (bool, optional): A flag to determine whether to print out to console or not. Default is True. progress (bool, optional): Whether to show a progress bar during the request. Default is False. - **kwargs (dict): Keyword arguments to be passed to the requests function specified in method. + **kwargs (any): Keyword arguments to be passed to the requests function specified in method. Returns: (requests.Response): The HTTP response object. If the request is executed in a separate thread, returns None. @@ -120,25 +141,27 @@ def smart_request(method, url, retry=3, timeout=30, thread=True, code=-1, verbos if r.status_code < 300: # return codes in the 2xx range are generally considered "good" or "successful" break try: - m = r.json().get('message', 'No JSON message.') + m = r.json().get("message", "No JSON message.") except AttributeError: - m = 'Unable to read JSON.' + m = "Unable to read JSON." if i == 0: if r.status_code in retry_codes: - m += f' Retrying {retry}x for {timeout}s.' if retry else '' + m += f" Retrying {retry}x for {timeout}s." if retry else "" elif r.status_code == 429: # rate limit h = r.headers # response headers - m = f"Rate limit reached ({h['X-RateLimit-Remaining']}/{h['X-RateLimit-Limit']}). " \ + m = ( + f"Rate limit reached ({h['X-RateLimit-Remaining']}/{h['X-RateLimit-Limit']}). " f"Please retry after {h['Retry-After']}s." + ) if verbose: - LOGGER.warning(f'{PREFIX}{m} {HELP_MSG} ({r.status_code} #{code})') + LOGGER.warning(f"{PREFIX}{m} {HELP_MSG} ({r.status_code} #{code})") if r.status_code not in retry_codes: return r - time.sleep(2 ** i) # exponential standoff + time.sleep(2**i) # exponential standoff return r args = method, url - kwargs['progress'] = progress + kwargs["progress"] = progress if thread: threading.Thread(target=func, args=args, kwargs=kwargs, daemon=True).start() else: @@ -157,29 +180,29 @@ class Events: enabled (bool): A flag to enable or disable Events based on certain conditions. """ - url = 'https://www.google-analytics.com/mp/collect?measurement_id=G-X8NCJYTQXM&api_secret=QLQrATrNSwGRFRLE-cbHJw' + url = "https://www.google-analytics.com/mp/collect?measurement_id=G-X8NCJYTQXM&api_secret=QLQrATrNSwGRFRLE-cbHJw" def __init__(self): - """ - Initializes the Events object with default values for events, rate_limit, and metadata. - """ + """Initializes the Events object with default values for events, rate_limit, and metadata.""" self.events = [] # events list self.rate_limit = 60.0 # rate limit (seconds) self.t = 0.0 # rate limit timer (seconds) self.metadata = { - 'cli': Path(sys.argv[0]).name == 'yolo', - 'install': 'git' if is_git_dir() else 'pip' if is_pip_package() else 'other', - 'python': '.'.join(platform.python_version_tuple()[:2]), # i.e. 3.10 - 'version': __version__, - 'env': ENVIRONMENT, - 'session_id': round(random.random() * 1E15), - 'engagement_time_msec': 1000} - self.enabled = \ - SETTINGS['sync'] and \ - RANK in (-1, 0) and \ - not TESTS_RUNNING and \ - ONLINE and \ - (is_pip_package() or get_git_origin_url() == 'https://github.com/ultralytics/ultralytics.git') + "cli": Path(sys.argv[0]).name == "yolo", + "install": "git" if is_git_dir() else "pip" if is_pip_package() else "other", + "python": ".".join(platform.python_version_tuple()[:2]), # i.e. 3.10 + "version": __version__, + "env": ENVIRONMENT, + "session_id": round(random.random() * 1e15), + "engagement_time_msec": 1000, + } + self.enabled = ( + SETTINGS["sync"] + and RANK in (-1, 0) + and not TESTS_RUNNING + and ONLINE + and (is_pip_package() or get_git_origin_url() == "https://github.com/ultralytics/ultralytics.git") + ) def __call__(self, cfg): """ @@ -195,11 +218,13 @@ class Events: # Attempt to add to events if len(self.events) < 25: # Events list limited to 25 events (drop any events past this) params = { - **self.metadata, 'task': cfg.task, - 'model': cfg.model if cfg.model in GITHUB_ASSETS_NAMES else 'custom'} - if cfg.mode == 'export': - params['format'] = cfg.format - self.events.append({'name': cfg.mode, 'params': params}) + **self.metadata, + "task": cfg.task, + "model": cfg.model if cfg.model in GITHUB_ASSETS_NAMES else "custom", + } + if cfg.mode == "export": + params["format"] = cfg.format + self.events.append({"name": cfg.mode, "params": params}) # Check rate limit t = time.time() @@ -208,10 +233,10 @@ class Events: return # Time is over rate limiter, send now - data = {'client_id': SETTINGS['uuid'], 'events': self.events} # SHA-256 anonymized UUID hash and events list + data = {"client_id": SETTINGS["uuid"], "events": self.events} # SHA-256 anonymized UUID hash and events list # POST equivalent to requests.post(self.url, json=data) - smart_request('post', self.url, json=data, retry=0, verbose=False) + smart_request("post", self.url, json=data, retry=0, verbose=False) # Reset events and rate limit timer self.events = [] diff --git a/ultralytics/models/__init__.py b/ultralytics/models/__init__.py index e96f893..42de3fb 100644 --- a/ultralytics/models/__init__.py +++ b/ultralytics/models/__init__.py @@ -2,6 +2,7 @@ from .rtdetr import RTDETR from .sam import SAM -from .yolo import YOLO +from .yolo import YOLO, YOLOWorld +from .yolov10 import YOLOv10 -__all__ = 'YOLO', 'RTDETR', 'SAM' # allow simpler import +__all__ = "YOLO", "RTDETR", "SAM", "YOLOWorld", "YOLOv10" # allow simpler import diff --git a/ultralytics/models/__pycache__/__init__.cpython-312.pyc b/ultralytics/models/__pycache__/__init__.cpython-312.pyc index 0024398af833dfb360de2e8670abf852ac88d727..d0c12fb8ffa56cc11cfd1cd2c42e4ed9665ad99c 100644 GIT binary patch delta 266 zcmZ3^^p1)5G%qg~0}xz@f0X`qBCjN)%|vze`dqdsHb#aNh7^_@_FRrA4j`K~inEeU zll>(lP_ZWCEw-Q#7uS#=Ad5NJ(N~k{7E7eRkN+)B5E-6dl#>FKVFz)_3=K3{i&%lW zikLwJ3y5F?5$r(1Pm}8wTTw|$YDp1T`WAC>Vs6pIx%_ruHMiI+^KhUZmNEHVY+@wYHC4VYKcBn{lsfpS^_{NAlDX40ErLGjEsz5 b*%%n5?=pzoXHdM$AoGCBvXQ-r4=4!$Yo|nZ delta 177 zcmaFIw490eG%qg~0}#}?ElOKEkynyYXQH~g4I@JeOB7ort0vn^Mxdl7<1MzJ5Es{w zARvo5*wGhAu|)d&_-is3u>$3bm_Y;!knq!FpSVj&>lS-_d}2;ceEdp=&p=UzU*`H5 z`MIh3<%Q|`DXAr?$t5L4iOJcSdFhj-7`4=Sfl5I36pI3h56p~=j9=Lp7^Uwrh}>lm Od%&gG$X>(^lmP%Vf-7wR diff --git a/ultralytics/models/__pycache__/__init__.cpython-39.pyc b/ultralytics/models/__pycache__/__init__.cpython-39.pyc index 213a9b8ed8c3d5d72ce64842c1b97da55a00ba10..4b37be53cc036b130007f2f58bbb27676c571930 100644 GIT binary patch delta 244 zcmbQrbd`xWk(ZZ?0SKh3AEm#Y$ScceGf`W$K9?F?uzixWhK=NILq0A<)goH9cLP1Yh7 zpspe&5Wx&0SV06Eknq#wief7&Nl7g!0!v3R7bi}f&*pZEr7}M!AEFJU_!c{e4>Sv` z>lS-_d}2;ceEdp=B9QyR#4iIEs~DHmlGNmqkfOxo?99CMiFfsQIDqmzj6942j66&L Dn>IRJ delta 164 zcmcc0G?j@rk(ZZ?0SMCAf2FLQ$ScdJGf`XBj*%gSC5kPDHJCw@?Ij~nT9fe>TTqCL zYe*1~#T@MD3#3>g{eAp3nTuF}@G%qg~0}xz@f0RCv=Z}^;1$) z3-VG+^egjo^2-bjCU0W4W)z?Nj=3+kDmo~&q_ilnSRqj%B{R7sGe0k}s8S&}v7jI` zFI}NHH9a>quSB6Fu{c|yBwrypzo;m+xFA0-1teQglnT<5U!;(jm!eRXn3I{32;wE@ zBo-H^7VAx}WeFFM1G=zS3`jIEd|+b`l$!jArIV3sat^Bu4-;!N<7Z|DHoh-ROp~Xu cc1zx6P`%5b_=QcJF`BU>^(zC2E&}NV06Cpa3IG5A delta 143 zcmbQiyMu@CG%qg~0}#}?ElS(MypgY-i7{w$A5+lehfF&qZS*tpb5r%p3)A&eQcF^k zOG=6ole07P(kE|Wwq}%={DHZT(Qoo?mT*3Kpb^C)K%#-+fq>*>C)Q3z?#X9ZWq24_ qqZvOlGqCY}VPc&8g|%DqDTC@=2E{LIVvNy@9jRX#Ky;B9P$vLiN-3@Y diff --git a/ultralytics/models/fastsam/__pycache__/model.cpython-39.pyc b/ultralytics/models/fastsam/__pycache__/model.cpython-39.pyc index 79fbabd497f92cff255c81c428b038f0985a2a68..05ea0982d8f94e49b43c4bb8f0c6a6c6a62195ed 100644 GIT binary patch delta 190 zcmX@ZwS=25k(ZZ?0SKh3AEnE&Y~*uhVho-f#1zD+H+c!u8X;2`s~DHmlGNmqkfOxo z?99CM$?43RjN+3gG6%#~MF*vplosU`DTHRxVaCE~%*nKBuP+3nTYrVb&g2CLTr}W&oR~ B8G8T# diff --git a/ultralytics/models/fastsam/__pycache__/predict.cpython-312.pyc b/ultralytics/models/fastsam/__pycache__/predict.cpython-312.pyc index 7c7958f36280a331c4d09f23356ac0497c2e0ab8..005e2cf17b2f38f1885439910b35a8ea48bed5c4 100644 GIT binary patch delta 2902 zcmb_eO>7%Q6y9BX{j+f#r%9UTPnnPgC)7z=wIHadPy|$sXp+(fssvThW_N6_v)*xM z$N4Fa%%K&{0hMZyq8>Q(oE$(L5aIx5xHLkdQI1>?5(f}1syOjx*MCV1YNhVMo|&C_ z@B6;@_PzNT{gaCS*xam1@Z4MYCDWC-9p5P*huO!{ri7(=*_7rLQ=SjtpsAQjI&f1; z%VywubY3-s*QNQ;O=(r04_}f-)V;)6-DQ_1rY=xyS_UIj|6GwI4Z(Bp4YFM17O-LI zwsj4gC`~9z!)XM^QF-_h&g8Mf(1f$bvX+xUQ^dsf2y*>(oms>|1>cPrGTmKrk>#NG zY({l^jai24`JJ0NKZDX#&*K$Bm&P=2aW)Iy4O@3zgjX4MOc${%MsHz;4Pm)lBalKy z(PboWT@xV0rUs`4tAK6G!9J0+W$3oOwvWk}=WdcA-OPa<7Zr%hMhlb}*mZ?Pm*E09 zCji|bv;c}RjPhk*)96TdOym$}G_S8Bw^#rOa2i;zWGx#bQec)3(DNpg6t-Qijg ztRlb!W7JwKGVGrA&WH#Z=?oecVH-JJMsE@)ZDoo8#}ir?0y0cpk6m2?pPx8QmNBK4 ziCy1D%0opQ-5xoOW&ofVqC(Y9!*&7J*BCA0tiEg!I##ZMS`eBp>IMYY!}esmwgrhp z(x|!_rHhUiNKe{D4C*eECCDvzJlVUV=+Hn z{d9ROr9HV*;4F(NYz0SYO7iuzx={TrkO-)YnYTwIl}KNa1<6XRl4Dw3g9(~x2$Mk4 z5UOX7lrZoV(JDWExN6Z7rk_{DQvV+^;c72jk!zZNCeL-hF`gxPJic-@Gj3vBa4;KR zBQ{w+ah!*gv#s3_UUvX~9(^P|I3z{m`~&5byv#pUeuBEjTLPT}@oEax|F;8MVSlzB zWUc(eK(AH`nsOJc<=Xi~u&qTA8P;bpRRdt>Ts#0ggC>k*dZuZ70tVZoo5Lh;VJEv;(B4Fcx7p+5N~{#~eF zP6@w(3e(FT(-N>bs79$N*Y1JX_p=r7rw31X94n|Heq+MN$w(>cMX+kUQLXA)gZ{#! z;2~@Ln{W?5A8CJP3l&!V9xF4x-Lj?nc-Na*9o;?!W}c%0rEu;fe>c$9`f^pYQd22b zl5(%`tARH8RiR2}oq4pDJI&h(o{aXDTlILgt&7j_0;W*J^Z(J?_VsC3R7b-unxZ&9 zsiS#?5z4rDzerZ0oj_G2)XG?*e^_}vgSE17DjpYZ4k@peQp}3f@yl}B+X^kRXfwCc z@5JuM%cy7r&{stFfG#x)b(rcah!k0&$QpVCG(AzqMV-!zTS&Ry^@Lq34}BBvI!1PE z7CSDX6H=Yv z;Y97+MPD667ISdpfv8~J$RdL{Ot&1rLwZ*f0PjU1h4SKp<%``ghbt?z5I-JB^a==!|!NUqI;^RBu(D z-BNSner#|jHnZ#K zPu_1I+G!u!ZAt8#KWK?GYd7Twhra5K{h-JZ{%ibbcrU!LU=o8zTP8+D1%7m0`csLi zkw1E+-sBfYzd!X*($vAfj~rJ!>0yCC3O^%Fbc`2TzSQ1_iTF<4A$}~;bLbse?!4DI K@=y{3Z~MQcNFQea delta 1412 zcmbVMO-vI(6rS1bZnxVmZK0*o+9(z%rA0uAMk^Wv5^JO}Atusjj4}(dKndGT^suN0 zj2txn%t5_@7a?r|MiWmaUX2$UA|-?aR}+sw(37*Y(-nycaSr?5`*!Dh?|pA}e#Mp! zR(z2p5s-Da`}F`XKC8%g#YprJyZ|ytK?G8a%p!>306PT+AYvwMDGOpJL5iCKcVLR| z1rf`NtNTVq>pk0ZNtMx1zc!}g&n%Kagh{SgGxFf5oYwFX*H?48<<{7!+;V$-um#DQ z+^=csjsD@G^dL^~-A;)l&Kw|*t@~hAknl9$4j1q@e({**B5M4bzTYK!?re-Y{55e;VZ?hHiMwJwVZ-rMtQUYoj*lLwEA)>tkyt%T586t!4ihY zMK9j8xjW1uz@jAC(kF-;ZUjIT^b(Flqw=(c#uI`T#a&_xjA2cz^_e^(Wyn}a6FD)6 zhwN2&Rg`?Kv>?-#bO;a~baA&(x?S<(R(mkGL&EJ&X(vEAg4gW9zTHawgU?J(J(8FA zgYp!lv|iQ;DD4J&WN+-np&lj8ASqXo<%$U=q(A8XUxRzQG|;WeaoU4vwG-6W?Yd*{ z|JEP++sda+$~kEpHFT~JJcW<$Sb%ON_k=5{GFWlebKi}D;3v*7{@{GS8XBv!4Wor_w1sWFWKDBQEh6`;9 rhd7^FZIo^!kBD7u#m~KO#mgix+RcP;Pj$f62z_6DkqtoX_Q$^g4~PZ+ diff --git a/ultralytics/models/fastsam/__pycache__/predict.cpython-39.pyc b/ultralytics/models/fastsam/__pycache__/predict.cpython-39.pyc index dab61b7496a7eef03ef5ae282f7092f923e9c953..d526f37bd63423d7e6e4ba0d1b9b900fbeb332aa 100644 GIT binary patch literal 3937 zcmcgv&2Jn@74Pcqnd$lRIv=n&1k?wVjUgGW5E4=pvGFEkwbE`j+ISC)46Sxo%}hHz z-Mv*cP8@npPPC^;xp6|WB`*8Fz#Y{QPL?=ug$w(8)id@u2}ppTt*)uAdiB2Ry`STL zzvJM!^6Nj1H`X2J&-AhS`1rVkS3ShQ9nLamNWX5zrtZ*XH1{%Z>JNQ8_p`>dIc(Z_ zBWq2AVK8kE+sygM;Z5Fp?(mjq9(uzLXOEoC-~?Qq2b;{aJ{M}1X@zO9yIVYs(qe}B z#d|`FM5jgmPzs(Vx{#Z$>E8SK{jD!P*^chs|HybnsS;wd`jFc6sP{1lXUMoSbU7P( z!sjk`N8VS?h;fhm&)dTWZ}8@GXW0D8@tt7{YPHPz{aEQoTc4gvjeqMpPAA0k`c9at zP?aJ{<1Bq9csMF#IKt@=$HVK7#CR%l9d6|ZdXnbj@Y8~eY%^5#dQ7H;Wo;Wxb*vt% zFwMg+GA-lmK&OeaVt?`3C!d8Q8BfKNLOvdJ?BezW!jmjkDilw($hivjM1*fT?M9f) zlrE;}Gtz-*R%DiGDY7&dwHc!{iL>nBoE-xz?PDFrd=Gq7SQbj(EM<`hrHG@_qJ-=M znkR)U3mIz>PGQTcoH*xUUgS5Y@zYSvN@#$?u=&X(%|uv~I<57y!pM!pIukc*16Mt^ zFn>#HneNWCP`B(E3QsZ`hu0}nHgAP@i+q%hXHdp^IVSIEA(gf5%mKA^bhy|TQl?y} zn#B#PRFTISn0*HQXd0GFJ}cy;ZSCQ3S4`slw2)_9BXa0&oFK4Pv$wO&QshiA3YWy; zXqMZ|w|?CfaP5Ba7zxH_l3f0}{tuF6(o^Eoyn)xltFB_u&Y^R}Dpt9bckJPQvdY>)2X+i)l& z_dGlLc5K+O$zA)~;@->F+}FWgyYh*CuftuOs^?wqzjWt~%Ef8}tMuF4K)>Owk-y_t zDDt-*j14;mc5L&`3zud`J+y`HG0y8gXuz9yc<|zsJH9|!uZ^(Edo_Pr#aI8OKW|pe zqqSq)tH<6eCVyCKRgnhu*?`V1c$0Li^~%20WrgNcwG{}s#Y$b9RSE3ZTDw^wbiVBLA&RNcMKvG@8qw0Dl)tpbvF zx_4&tTe*O9ll*(^&-;0WJo(PQao)y%5uY{w&dbKU|DWq#wT?FQm8x6y;HmHKT|Kp+ z!`m-h8?`=Pf7v)@4Bq`xRsX@mQR8LaWPCbBTLyYdCWCE}E5Nv#)dEAHty!f*fEa9^ zKTCE+V6adktYGXLjLsVmUxdqsY8kJZ_<8MyGt58=F74YoJ-0xlVX?ah2#S!1IGKb= zk?R<>*LGlwf1tY!!3_&|Ku5tT;0GB3Zp1v+@tHjmebzal0o6K)$2ovhnwSF{osV^?nx8FgOh*Y8UNw;^!()m;Nv#HbEGE- zuIXCqBQ=Rju?>d7tVAI%GeOOwjP=Aca-bq*8rBBWAkd~ZzbA>axrE${NVOHXCaHKE zAAUfifgsacS`p<_CEvxa+#r&Mjgo0R6S4q*tAeDKVf_76JdtD*q|c@zGo2JzPw6;M zgz+hGW&;9%#&l%4{1{~AL9`p|WMaHb8&oBW?Ys4(2|@CFHql;Kfru*P4?}Qy#}y&O09L7k%pDSU^L4z5;VM+ z5E_D9`tJn|nv18VwYXxY0dcx8EQQ~3qfD@5Vgm4tC>HV>B*{ys!(|fOC9J(rmG;$# z7@WS(nx}s*YyPd-{yzH&>#$yGFaea1;U1R~)Iv6|Hz2km}@qPB}>KpBP zMbKtI5Q8!U5Tg@C@W%|^j7%?zzMRF`Vy6{Fyhx%*64bPiK;Fm9^cN5#5%5g3V$lVn zBeP@+17(t;L<^X|cDZNQHoNjiSTR>tV8}q^C~RV|aHWmWFkB_5amrwUQwB4gX38em z@Y-9*4vMT8;I`pf4;GL{(seQyR*7pS)-ux3YPVvF4U>&faJzQT&n(y`U=+al^tbMP E15#Im?f?J) delta 940 zcmaJ=&1=*^6n~RsCfQ_@e%f|h@t{>HDu^J8?4d&O18fm`P?mZqzHDjQW;GiWWkQv0 z>BYdxfHz_HF8%@DJ=L4HJ%}E?dG5(KyW3i-HG#}~zc;_%`g2+d+1bsU|D>pJO4MI7g zw5o67%qDCbWstlzfF`_SEK>?oSpV#HFIy%Ea>`!;@=D%bJ6MB?wEAwNSLNs~2z+ZfQ zsQX9ZW03RA|4;D`=v=#VKBjtUx2DpQ(v!vB{W|CYu+V4vIFR!;W6)$zUrL*<^WZ|F zo_pu`Pe!*?z4y+m&t5)0Mo!0E591EssA0ICOcRhysE5Tz>Qiw^o%ZvoA%g0&^a18F m(SOO>6Sn%|&of(12GiCs?5S+a-ZZh3rj;|Djxy4;bx_Z^Z8}jh(;2%-TWaLC3}m-X#+j+7(+NZ? z<4oc>d2bIO0n4_MX^+Iaz1_EO-@bkC+qe6Vf5JcW3SaX>kEe=*XDiDvp(WiUgkR1Xob52zDyHDSqvi5Y8B zOH9anbfU)OXiPCpd0aPSEv`(caz;rcbW5=`Js~RzBA?dGloHd@tfr@m$s4pRLyM#= z)f`~oI0jKqYf)A1GA$!CAP<28DGNs_)^|ozMCp`aoX%)6)fwtp>}jZGDrT7j^4`b^ zHJY)Yi={@H0>EwBYTF7GSeN>mddi5Z1M&g(E=*x@E=k3jT;h$!v?ORYx_h4-XH&;X z$Fzj17&6#En@FI2oOTEL9sXcOh&j%Pv*Iilb}$D3G9xVGtcv2-a?WVwIl>c1Oo}_t zNIWNSvl0=ng5~jHZA{8h?&ET%xM{~xZi){{>B-#*&C<}ys>v41Uc*OV5#=e(nv^HB z(`rJtC)+I-tiYDXHg7j8+mkS1Nwp+f(Yq$sV+f$t7&V30f?Ikg^|;zQb!wuQsA@8y zTD>!ROh3JL4c+b-J`@B-b1#r{+&fLJPl*?+=l$*Qa?jged8f0o`Ig(CcXN$_r^II} ze|e{xt7}={#LDKk-9tZdx6j=-{HA-?TWWmSnz!UzdV(GsZ1?(N-WUf5lx3$%gC zDlWjHM=OS^pc^?cFl3n?s`_Q!04n&eOu>8=C`i9p{j5uBZ@5&Ichh`zr?Zi322|3EP&DC z+U<+!;N2zeTkf5zX}dyF2X{ARz;qU8*n`NXavD4gLr_my0N04_8QW#$L1Ur>+_AD` z47m&A24H@$By7MJBdVMPeJqtMx)Kv-i)VWSZUFlLmZ9@rHYm8Q0cfuqGYqmapcD&# z&L z`N@oJsJn;QXE&73@?O%gPdKJTV4uBP;waYJTQ9DGy+oONX^62N1{ffE90EHA=In~- zdaTSGM?b&hWMOhLGBdm8GG-y@K9%P%ySZgJ|w({DT5KCN^qS1D7cz_6l~g& zbLiva&_kP;v_g(tHTrf~OGGDCGgp;VNU>baP(KNqO2SIJ4%qRL7IAe$pU_4KYwhc51Y3yLF8kGxoUW$OCcx&4R#&3%M^r(*@LXXiWK6#&cs zVR?(gP2BX>a@FMl92c+E(%}_9mg-3TwFbIkbu&%!~uOEC;( zW_3)QZ}YALK{N;iW~#MSb+y&L##kYQd@f(Cbk!6ySvrm&%u;{h(Cr5F>c6`oWv%H=ND z56Y1GAZwNj&4`eYo@!~>0m~Kce_&0yI~C+kX#|c3UC~dGYd8O%V!tgI&4>jlAR*rP zQ!Hc3W7nRfKkQr?jKKGMn8)MF|)DCoxAW9B3no2`3;xti{vNQZj}cI^YUR=!&{Up?eZ~luXi4aXLgM zily|>pM8GV6NQ08jmBAc85!EYeqj0IP|SIT_Kxn|AAV?f|GNFd4;i1s=H*D(`aMU# zw*C_TSF~%xm|KKHuDJuqIqu!;6C1wH*E{eCK=_k=z+N%SLzca|FD^*4bZup$2wAO@ zzS{S)%f$`~65YDd&pT*p;~vQ3Yx|q&t&JX9)9<7g2EAD3MX1$U7R>K$i zu(@3j#_c)c(FGh3pEDJ7?LdbEYO>jZUOrQmJ_0u@+BlxxaEB>7xWWP^Rl~wI%)m?h zY`1<0v!f{$4*Cc~HbZIW!r`68r-5mO@pB}2=(R)iRV=N!1vJ!TxbfpbB=!np={6D% zk_9B2kvu^+ZVK@~ppi{$orQRNY13xWq3J1lYtyJ3?l(B|5DT_4!*zBM}MUNh%iLzT^MS4qtcm&7}6&L5eSoZMTk>hU$9;F{``CMZ|>x-!y4a=VzdaJeL&qiMw zz2d#ux;ndIYp+PshX=`tlnH73(n-d?CIX+f&9xXL0MT%nm4E|HSy13wz`YMH3@Kir z*d{I5k)9d~WLPI_}!$CpCR^dn1!9Nb)#XwQykt7omCzZf@WVrW4p{>_n5@v-VJBWRE+F z8lXO?!QYKM=xm0HP$T{I$Z}WC6%G?UO4~+PY{~+Q@f9R!nVc)3hbIh$rXTxp!{ZVgbL8zBn-gm%v9!vR^ z*do+Qa-j}N`Bhv^!+fn5uJ7i9?eqSY`C8w6Q}F%9{k*iB&rb^2xq@qInfKHqo7a#3 zO@aBw<^}IIUTQ9g!eV|qaC^@k`ttF|k3SjC3p=W$RoA_F4qkH)A6&pUayRA$C@nNM zNqhN)H62px!cM*sXiv4&`hHN5TG@ga7y_YM<1Bq*&pOC6Ufnam%QU~InO{jO$5z)K MmiX2M4k=^*9gLXq(*OVf delta 3344 zcma)8eNY?672iGGNl2W&C6I(LU~C+LzugAMF*f)MZ~!L(8=Kmr$l4P~NC=;V!9*B5 z!L6A%He~<6snaRWOjD<((7+%pN3A4?*9=x(GBCa9Erc5t4G8^a_;jY=s6@A z=oN>zp6Df!V96NbiD8&uevgUd4x=_R5dkg42xuiHKpQaw+D~ylr()a}NFsu|!@d6fJwJz-gj(RKWb?y3zP@tCxs_eA>!A|<6@ zGARdwo#C!_`UzhvTEQ@J0AL7ZXtj{HMbNw67xS|Q zA+DG%s+|+-Zi@LChl?p>^_ujEP{84)^t|pkHWe|4e^wpd%5$ZO{*NG{GvcSF^-S`v zD)}hSMQ<1{==c>|Mh#g5?J}*=+L6tZb)+ww{*J+y&1%M`9n9k9f`!>mH`(U7-{|RT zdnJB{9(50fWg_Yr;wCjTP~fE14u_E^0%8B~o7B>P!>0i%{fuJ-V|81P}n!xpwG+(z#>JlX+w${jgMKYqxL{VHR~ zv&;xE29HrPNI&to1y9NoA*yCObK|iR>!j@(+iZA1E4)A<6rp$O?OI}Ab`(<`El=Yx zEif(?;eat^jGUln>~`!?*CngVz$j&i^sx6-I8x$eM4TzlW%LbxDO8L``SZWxp@MyR zO;Bt?GM)@X=|oYJF#_xGakp7n%boO}MIqt#7x2(P@j5}##6ux^p*Sy3c7nWL*Bm$r z)mPll7boMzzK7Gdi+k{(!Uy`o2}RQ$3@O@RpzCBHp@{8L5^A}NNU|g=Jdp#vG7CY` zb_al+RD@V33Cl9ic=`|=yW^*lQbMsXyWTKKcKAD_aC=8m5tH#mQiiws<@VzWp9q|k z6d@RmCnUw30A)fiuFUlm$m}%8b?PhPJ<0B#r2GKm#OSLl->_;z;dVvOZ2jGVWJjWa z4we?u%caG}Mo>yT1xp)3_Z;-C(l4hCtIl!TZ;9rMbt8472hSh4X>*RR8Cyer=XPhc z$YQ_PKhl5QTsC7an>9Z$ZQhu%*vAcHhH>+ldCpRD%jKoxWveS4?(qX-2PXDj-SWL{ zFKxSCv2Uhg-)u$QTcy7)|3&%r{ehYNf!Y1RSqC|{HzQgv?jPAddTLHAp*PA_<0sQ! zl-=Tr(9-UO_yK3insT~fttnf|4rPl7qFlM8gR&<4q=|Nw-w@2iGHIm^74HIN18T+E zX0!d;QsLNAII@lvj?uQ{7ehVJiM5+b98E8AvWumw2f&BrON?fukjqC9R`rgKdql2KS=pToM73gY@N{TKK3?!41EI-X$ zvX&)}F?g22IRKwmegp=E3-rlR#$4DtMs+cWGw5b;f}Y+~g41+j)7XYlVDx+ct97pr zK_mAkI^wBIpuyBI9|NbH4sV+h?$e{09r zu~l6i!JU<0nv=prLm#blzD-4|H zcew%ow5y;%^|iF;DeY1Z0z}LqQCg`Dk4cq<# zT8U=0E;701i`z%GU&13hujdxeu)no%KVGtq)m}RBY{Nv$)sic%-^VW;ofbDwJ2ufL znl@6g_PaXXwRM!gXP{HHmrC}$W_`K#>Velx%gfL@YeQwW9Z^<_so@==3nDwqtzf!k>A6!hre?s5N!eH_H9RE+$4QkX3 zRxyBE2+4(vDrGPN06jPyOfDS2os6kvIx84eh2VPk85cTejB(U6r{;MfdyIF4!C?jp z{axcx(E|2~57e5?q#K*&ajqVOtlQlEDo2agcxZKvj!rb#b4Kx*{1l1^tohJveOdaA zN6+H5d(cHO$hD&&cT9)SfS3}Az)EI}JFn+~J0LEVTmEcYc$%?s!v*#shmkH!N;hK~*;-(m13gK7q9veay-x%*FsSUoM~ zM&XlEUsC>z>Hmwt7YyJoM>1Z^P`!N&!DzVKABz(yD&GbLMVko5Ww_dOu|S_cO#0+W z`gvmk{X_E_u?(ydRsi+|=V^JnXvQCz-Ps8^efTR=nyIGQ>gH*GWM*gQ?1Rx+UyS~v zrO^J_yarX{C{Ad>`_FcYdv=J_d+GmJ8HM7Pkvlta!SKQw&2Z*A9OLn1d)XbZaWuA>p*QpBT^PIu~ zJioq3@XqhT4#2er!F$(*1+V-i7Ce+3@|op+`gZGPJV-xp-R3D}F{(lv%S_I8*u$Qv Uv_tmM#}2PE6CQi#5kpn!-)`_`{Qv*} diff --git a/ultralytics/models/fastsam/__pycache__/prompt.cpython-39.pyc b/ultralytics/models/fastsam/__pycache__/prompt.cpython-39.pyc index 59f365a54def5b4433c88f9fd210a26b7138151c..c4fd1842e880f6416e4d94233da43aa76fb214e3 100644 GIT binary patch delta 6275 zcmb_geQaCR6@T|Vf5mnZCyvuNX_^<(BsC=N2cwj(e5B1NMQuoF2P4p?&%V#ea~wbS zzUO|JXAHC*Vp^t7_m6$hv1DT#NSn5)Xxbm0CN`%1vyH*ngggd_X>6bYVv~k6P1?@6 z&q?e;+aVDv=iTpn?m6fF&N=th$rmy$T61$yg1_$jU!NJg`HL+-sXPufU78|PnvyA* zQs`Awp`LSuda3WUcgmxCsh zqqIYe3F@K6@RFoA>n${PPNto7&1nzJZKYi_ep;G}&`LM$fkGQyOMBtlPS?>s_(rKp z`{CO`l><^@sM>t1X4wbE_DwQ#zF=$D5lITF@Q2On0exm(&)e!){-ix?4Xd!vaJo<&P9#@mGTkcXY-_uEM=GnQZJ1%h2N{6D zmf*Tlr1sNDK zd6bVkzUo1CClaLUW4c}BkCN+&%%34^DoV~f?<;$WB!12Rh&{${XX|Fu>{nBFR1iKSd^C{Kzo1hZ)QN6%eHN) zx|W^=rt`LDz?%0naEIIWCo( zolw?ypImM!x7z+}z-gu49?5Aznvtn*0h9&pO^A`=KLFpR1s}F92B93_e`xyTpi(4f zWws91YRk5lBaZ(pVdHkxiO@h?vOBV|Q!;ytZw+2Yf;<=OZwuL-+1@32NpT`h+YagI zKltOpo_Khn2`*<7t~4W+qju-wRZa_Vr_DSZx~B43 zyL<6!r^AU-4{f2XOK`zq=9(FaM!rFoNO_IZLEGR=YZkA86ZolTN$HTvUBG|D=|W?) zM;tI}C)^{I<4!y~noZJn_(d0FamtOfLyYogH#u<{gEP0&&IOO?+q@)IfaT!g7P`g> zKPJ&GC;pi9HIJ}y7u)077nB;9tME6OGr`*H%o(h;&RBKJwUL4e=5MKhq~a>$460h% zF)%&6Du?1mHeaca6YEQ@UCJer%nt*ci^NEiXI* zBKahMF-*QgYzp?w0OU<}Qn((gq8$?m16<)+R^v)31Q@aVh-wz_c4#?RYG>V%>+wgu z_r&HA^(6eC*aF`zhmz{Jp3%_B;H8={^Lpd@1+yXq^}#JBT?sax@`|g$V$(S-1>W_8 zEAGX1*WscLToolNS6CB4U>fj&n)7hub0E$QDbvg~h~w^A9ady`#G<9E?r4w;jz+J6 zYgmn`7QTp&$}%Iua*%-A zHS*f!_ZP=>$Kl}`*SuBBSr_RUo$e}N)=Thq*P`k~h-N7K1A=Sg$}3V6%tDOdwIO86 zB`4neFZ}V2@I>;X1`8TpE{BAJa3_`g2izM|@}RyG*Ph?jTqFYwa9gVS+%Z+} z{2W}fsz-wztA4!q8Doa8YmIkzvsUEnCNbKD0XBp!hxq>1pYm9wul)#AtrQTcI|AOT zg>>*+BNG7yU0RT|pMN4UNczL4NXAo+Ic-iXE&-}W0VTsUd`8YF;HVWE=5Mu)R+<4( zJ+yT}IBmy6!+`UIU9elTZI4J9B4Xlz-R?j*H4YK9XlsW!xvmXkWf~KBxjd`0afFz) zrm+oXbuzMz!b9kSTzv?JQmFvJtx+O~SE-Cs1) zb71Dyta;qUO|-QJU<80&jheuHqwVC3l!}5+1}iJR_;T>7ZqOLhPLAe`IUTTVU*jH) zc%=avu5OdmPC(2x21LxeXm?dzz1uZR_&5>dLvGo{XkGdE;rb(?b>&Ln537ANw7vpZ zyU-=91Tm&^d6>Qwel?;DLXEAl8Qq?q)e$Mw4ZwB-{vQav$z^(mQQtYD7fHc#<4+K0$~-W=^B6MQiG z`JThr7eUenq#E3Tu|^uQ$eaGke-(Xn-8Zr8+YN)*ccA(=Y@f4QY?vMO=IZ@5{>Wf>?dF5MkkGn!{A}rDIib7zqJ#H)P4Sq&Jd|Uka<%05%0n_ds%=)-4NSxVoUu)=$`OPN})c?T88~MY@>7d`z{Bc_XJW@aJw^h$$_D z48t-r_B?=zNNtj7)X(xC@9esPJi#ZshJq;Of5ShB`N6JDptT=%^{Nv>n@WP^dZp+%Q8wroRtO zRyz<$IsaUIJJCwhyO>$PsI`{Zr!))FPC(n06m&H_#SjNDQG~e5=S#!%0?eor6w^^8 z@NS~nT9OBQb~oP%8zy3G9zHD35BF>*1^$(uUZ2qM1N=hIS@I>mZ*3w3UPQ7sAb&D{ zXl?dJS-7#69YPhzSjn8iu7)yZuTZvg07~(67Sp>%C?W>W;Sq9A^)>K|AQK?D(L?v(n<>-|Zc(z(~x?PRTIFzeBQ{?XWu< zQV^eL1qWPf1&*NAD_T9^mIaSEFD`Xq4Y&tNum&lO-)_maI*^-F`HWay^uijV2BUCB zH&!!vY&1-{9H>o#8fWYFSAwOx;dRN#V3LcTu|7FLhV;|-tb#}VP{4~aj$8~rW zLHKkB)K5;j4;=zm7zSDF1th;iA^_(cmNxwoXrhzhBOcp~#MLgAUPKZ?@_QuLB00+A zeF^ddzN>Gu?+7+%ywtbDTU89R$S?F=9a@X3{{-}a>fh_@AwJ%$-qY_Iz>x+eZviDM zH6?raBkIODJYIl;L2!>^Lr2_8#)F!G}D{Qmwv;O(*g;Y?HMt}&V|T9ERrW`XX@8d$$8+?DGD zaWp*t!(`_9V2IlKgW6-Mnisc1`sE(HlY~=az#P2x{bpeEW>lq*dKP4O+z6DL zpyr3@vB^;!e8<8bFTRW{29~69$O%y|-?V-xCqB!MI% zELg#cxCTOMuVPi;wI^q4)Sd7=Hhv$;RwO-0g!o>=(yB;>G{ZugRS%woy@Uh)h~#A? ze?syKlAC~3gYatERG1bIuIyR<7iV)XdaF~cSM2zq*pB(PrcMCKI zh$k3{zzbh1yr)Lt88K0Sb-235s*uHZdKN8!yIDV#SOX5PCSZ$$+^KdB2k<_`|?74Tb@0_`f zYxUp(z!FKMMR+2_lv6+jC2T z)LrdwzWMf>+4-LP{jqRkUpN;E`3YPF&%gP_t)co| zvYMrHlY3Uk8JabK?&Uu2U!gEFVHV&)_l<{d;1M2Op;<3*H}V*dyD~n@Uk@~h6|_P; zagp&PZ(Y%0ZJ4+5_7#$i@Dxw4jFN1Wckp$P7Kho7pz076Z?HAaK(tzOkXpi@`= zV?00uT`1IwLQL)QJTXL0QjxZ)BqdT}XK0ymTGMPz!jUG)kz+MD2ELJcig2dh@N6B> zF6uQ?njAQFqd|Z&CWvHpzcd@5#)*E3nw#p!-s@e%__ehAHJzs6Fi{$2jQa7uZ5rwa zzJJjvl?=QW_$*4cyOK%^KDyiiWDuTR$geX(Y$7R~6CB9tDy#XWf0CSMM^ zE(0a4L2E%6jamrMJj`n0TEzC0yfVUB2a#dK0tCq_oD=vy3>Eh=U^G`v~v{^jLC=$7Z}hRxnG6DgUd^H zy1rQ^WE?}4hZ+zlu(EBI@bLF3=-DO{JOVnk)dxWX2AE@96mUM8b{Z)~O7247+0I=G0eW zQ=voP+6=qpg+XN1o3Vm%>m#_-M0{w>(JF<)!3=Y})>5%*&s2^(o`syS%63)6VZmw7 zbZ!yizx%Y_ZCAQ8JFfgHev&RbdJe=LZ8l$U%zUoAn5#O$S<9Y*mE01RaC9zmrv$_k zam?n1jxm3n7lqJ}=PN*-9(&5Rs!kNwohows$(bju;_Q>Q6SON;TfkRm#OyIgujUpl z$H>oBs+JR}f-qO5WFXxkBB&7tv}0CI+6yOb(T|+N>Kn;db*)gGReM^yV|##CEddaJ z6!IZVO|{%Q6=is5h@lpbg>6IRc>Ub&1+gG((OwxL*jA2N)0g;-z%mmgV z!2vQ63!{g?VbZI@rVA^;AZ7diFL`e9mAp|0JJmXYT> zVCC&xd3MfnJk@;8wk;t}94`gvC;y`bLe z;we`VSBGVg`4&9Yod6_wLj2^Dw=n+I#!`sQVl6 zfbNg>j?%n(s<+eU+V?UQzg6|#WY@3p^)>*(0*WKlA;KM zD5-OO+c(Yu>4bLe-M#m~%+5y+j30Ppr|8Dly$DYuoL2Amou|*K#r{m-Ixwo600>nt z^q1P2QT?{09CYXOxF$9Ot*OC*P9OfR45&Va+CPv9m?=^-Bm?^}b#`E|59ZQHK)!T9 z^xOXC4$=gU{s~7@*9M06fF%BC&M=TMCx{)ggH35DlcvjMpKvvlm4?g3Y=pDGcVJzL zwByLs-c8B-Tz@sWO^LI(H+3aJ56ooH2K~rtUOQZhNQN_YVblJ{ea#NQO%s7-Q(QxM z6T!`&I#Ry_$g~P(}cZuK4jLSu9tRHzs{SgZUr%3?5s`?hYVuWvro({#$wwSO@A7R3OY=SgJ&FmE|0l0*4|Y*Qt=lpX_O6+)X*6|t%h8=Lf>Ka$-ot^3OE8Gb0GD-v zWmz=sfz)n%~SjhDL3ze3zcHoo>_1k%P5@b;~d#Bd>ner=ZD8UXnYeg z(2a{$=cYR*3ZA@1OxUc6(^^5}k=44JF0XwBBj&KwymX)RaZwl6Y)&+j<|P?e^b3XlCZAg##a#TMR5J#8X7(C z7QUwOUAqTeyE{5=F8+i9?;zYj_%p)02>4^q`tx(eg_(J%lyl;Bl^q=o^rHuw-M9MY z=rk0+k+IRhIC9Z0P`gTF4+g%6J0N5d>ZfBH)y=UD6W>MNG6DvJ<0)5WW<`!W8Z;Yj zgbG)Sn<(*Dguf$Ta*BVdhsJvz!xMQ~Gz^V3Ji!QzEzpep@BYyL1*YFIr{u%nJtmC> cylvhdZ>u*b4A>$Q5k>Wr@o`#KH^&$L3pl_fQ2+n{ diff --git a/ultralytics/models/fastsam/__pycache__/utils.cpython-312.pyc b/ultralytics/models/fastsam/__pycache__/utils.cpython-312.pyc index 602a6d65e8e4e2f723c6e0ccdcc42bd6df5b12e3..bf0b4e7e56b7fc58bce2e0da80918da21134ec95 100644 GIT binary patch delta 53 zcmZ1?x>S_sG%qg~0}xz@f0VwFr7%Q6rQ!$u^pNK5+$?(Iv`N9Qe*?-R1hK*f{>cDQUU^BR+F8vz0G*XotaJQ z%9VPkL@H5nLgmP@Rgub(0|z*9;8H{n)e0_f;zpHLRGfHkf4oi=*u!{cJ@38mz4v`@ zf0~<{CGgz#fA8I@6Y>Whil>%u_FjX{HNpvZBpEmz$H9F?RtD8h)gcw~KH=5NguBAI zS*bVt}m}nVz6H-gv?TSPR7Qn0o~4` z0}dKUW(@5rO}k)4!Hh2Ec<6F8vi&gX(N)ET^r)FifQAwMP+HC8$O35k$>~$4X;-s> z*im|`HJgTe@4OWeH`Mk8Juv$${1OdPT;T-_hL@)R4z&sdKr}-UgxxS;QjYfFZPAT> zXfjV}R3mH_(lWC}t)jQg}AIBigiE^{7Yt1#da9ZF>_!s`lV zV+}5t7L_3w_>kt&aIO(M`SZ%mm|O)N zkDYO4TphdP+Ra?mpkZfB&bpJUlZ{^}4>bNCHzrp0vBC^)9xC?bFT9;EPsQ%*>P?U( zK4aoHvK=8K*Q>GZLa&(R2NBQ^1bacG(s)$dg@l0hDL+v@2NQpg%@?d~F2nMb9l%po3-zM8H>J4)}CRuzeLqA6C665 zkQaR6y@InP0nr086>y#7gBC7ecxIZjIM+ar2JG9*3`b) zfO(#Q@ssHaQN;#CvdUnLoKHuZ0$>^nB$)-Z6n&l^P&lq(mgi#<@l21ZGL@vB&&n0A zk~E6`By|<}I04tZW>PcpQ0S!L`ynvX_Y)Tjn7Bx~q=vVQMrr6KF8ENzB@p14NH zZZcc4D!rDt*hoExmuJgTCQaW@KTy7JP~TVyN-zEMM!T;DqP=sj*XBYDBVpSkCDrz; zue4#T72l-x06$x%jUP6{25p#st>H+cKrbA5R7R~Lodx*YtibZ{61o2@Y0h7%|J-=y z%cEa?eBFMt@$HE_jg`AcUfgv*Z(eDBHh=f%v3&dZuM00b}oa;QJ}kWG*j3Af)tt&(Vhw zpaaX>`e|4-4#9N63Yn*1d3c%JtIX9Je?R|ZZNs@oaMhSsdTq_CCC8@9)XK`#!V0$X z?rWTW$&u+>nd O4d?d4(gT8<)cjxTqsk}% delta 305 zcmZn?{=~s|nwOW00SM~c7Nu=un#ecJLIKEYXPC~A$`Hkv!Vtxj!qmZ##+bs`!WqTf z!BD{%#S+Y*$$U$|EwQ*H*wHsEF()%6u_V7}@($)35mjNHd6^}dK(VS+h2Yfm+|<01 z#FEVXJh(E2#Jm)R;?xp_lEmU{g_3*)^Y8bp4dnlGNmqlA^@q?99CM$*0+!_&I^@E_MMDKN=XGaPUr+4BO~KiHU>r~#=8tAU)b0g RLl`fJ>wIMZF^a^2DgZktS+f8D diff --git a/ultralytics/models/fastsam/__pycache__/val.cpython-39.pyc b/ultralytics/models/fastsam/__pycache__/val.cpython-39.pyc index e69b2995ddef901e4b9492b5b546574912f10152..5ca087281c8ae335d597a8e4a35534e05a052f4e 100644 GIT binary patch literal 2175 zcmaJ?!EPHj5amj{mMqw5Q6wnRONm}$0mfc>Gm4@@+!O_D#emZS6$}v6l9CuxONHdx zk*iw@YdfQ`r>M!)v8SYBE4uVoxip%B9yqS43+E zoG)eWxmPCtk!vNmHx@oTs6U)bZ&l_pJl`Hk{o=lWg(Q<}`bFrF69n;n>-Y3>}I8B1p%ML~=n)rjaQU;Bm1 z=k(kNsfX0HT%anaKWJ~cUVDH|e|moQoX#v?%3EWv(?Kxz?Q1V{;iz9o`a%CI^T#v` zxYQjQ!^16rD{C?d5Zy{eZM;W#Gs%5nK?S#x3g!4zOA+jXC4OU+pwe1n9TkN| zfyS3QDkBe>o)%uZlOTtQ0XA@LxRCY)+yTY#POA-SrY~jY5w{2++X_(@hvPSr31#V# z^LAM4P!{BjUDMlz1h9=91^k7EDBUWp=~M=Bk?zo}rW|mLD8zmaM;(Rt8b))7+qr{7 zyC^|G<^@u`qSH2|RWx_1876Sbvult&Q@}VgquVVyEAk9O4*4+%OXWA_8od5MB+=<{ zw%4i@Tto4aE1?{2c$55|K*2^E^0LFaq7(jOpTPefJU!9<74}$P z#89>gzuA@y8u~s5CP2;H$-vI{>ubwmAYPydGWf&XTUJGm?$z~Bv#+KG8XI9;ck#lV zh8*+=@|i8t!Yl1k+)qzoIt$E!wBt+7nks_-OB&SO5a)vX#W6Ljiu0&!#~;vz=eD$I zIDi_EEP(5kSf)6E;6sm<|n|tMneqKNasHlyo z3I{wr=TOP7Lsu~X%tC-P_dwWfp4&YFJ0`T#x>F&ZcDHC;DSLIhdN%Bo{cV2Ap2-pb6!>sN82P+!BnvBmaJRdUACtJv5dV7OEIhdIgdK zuGV%Rlzs}2>%&AIJdFDAyL};hRGNQUxaKh^xgW_<}kK&KQ&UlziK5zMwR=%WbqfJ_;#6njf klgI5-RVmidt)h-?1HhP;kN^Mx delta 216 zcmew_@QaNvk(ZZ?0SMCAf2C|>n#ec9Ob*CPWr$)-VTfW%VQFVbV@zR6VQ%4!Vs2+( zVTfW0X3%84CE%7=ToUZ)8quOzV~ zGd~ZmOd&BZMWHygM4=?HI9s74UqQVXqD)<9awd!Pm}znWyCx&+ None: + Attributes: + device (str): Computing device ('cuda' or 'cpu'). + results: Object detection or segmentation results. + source: Source image or image path. + clip: CLIP model for linear assignment. + """ + + def __init__(self, source, results, device="cuda") -> None: + """Initializes FastSAMPrompt with given source, results and device, and assigns clip for linear assignment.""" self.device = device self.results = results self.source = source # Import and assign clip try: - import clip # for linear_assignment + import clip except ImportError: from ultralytics.utils.checks import check_requirements - check_requirements('git+https://github.com/openai/CLIP.git') + + check_requirements("git+https://github.com/openai/CLIP.git") import clip self.clip = clip @staticmethod def _segment_image(image, bbox): + """Segments the given image according to the provided bounding box coordinates.""" image_array = np.array(image) segmented_image_array = np.zeros_like(image_array) x1, y1, x2, y2 = bbox segmented_image_array[y1:y2, x1:x2] = image_array[y1:y2, x1:x2] segmented_image = Image.fromarray(segmented_image_array) - black_image = Image.new('RGB', image.size, (255, 255, 255)) + black_image = Image.new("RGB", image.size, (255, 255, 255)) # transparency_mask = np.zeros_like((), dtype=np.uint8) transparency_mask = np.zeros((image_array.shape[0], image_array.shape[1]), dtype=np.uint8) transparency_mask[y1:y2, x1:x2] = 255 - transparency_mask_image = Image.fromarray(transparency_mask, mode='L') + transparency_mask_image = Image.fromarray(transparency_mask, mode="L") black_image.paste(segmented_image, mask=transparency_mask_image) return black_image @staticmethod def _format_results(result, filter=0): + """Formats detection results into list of annotations each containing ID, segmentation, bounding box, score and + area. + """ annotations = [] n = len(result.masks.data) if result.masks is not None else 0 for i in range(n): mask = result.masks.data[i] == 1.0 if torch.sum(mask) >= filter: annotation = { - 'id': i, - 'segmentation': mask.cpu().numpy(), - 'bbox': result.boxes.data[i], - 'score': result.boxes.conf[i]} - annotation['area'] = annotation['segmentation'].sum() + "id": i, + "segmentation": mask.cpu().numpy(), + "bbox": result.boxes.data[i], + "score": result.boxes.conf[i], + } + annotation["area"] = annotation["segmentation"].sum() annotations.append(annotation) return annotations @staticmethod def _get_bbox_from_mask(mask): + """Applies morphological transformations to the mask, displays it, and if with_contours is True, draws + contours. + """ mask = mask.astype(np.uint8) contours, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) x1, y1, w, h = cv2.boundingRect(contours[0]) @@ -74,22 +93,38 @@ class FastSAMPrompt: y2 = max(y2, y_t + h_t) return [x1, y1, x2, y2] - def plot(self, - annotations, - output, - bbox=None, - points=None, - point_label=None, - mask_random_color=True, - better_quality=True, - retina=False, - with_contours=True): + def plot( + self, + annotations, + output, + bbox=None, + points=None, + point_label=None, + mask_random_color=True, + better_quality=True, + retina=False, + with_contours=True, + ): + """ + Plots annotations, bounding boxes, and points on images and saves the output. + + Args: + annotations (list): Annotations to be plotted. + output (str or Path): Output directory for saving the plots. + bbox (list, optional): Bounding box coordinates [x1, y1, x2, y2]. Defaults to None. + points (list, optional): Points to be plotted. Defaults to None. + point_label (list, optional): Labels for the points. Defaults to None. + mask_random_color (bool, optional): Whether to use random color for masks. Defaults to True. + better_quality (bool, optional): Whether to apply morphological transformations for better mask quality. Defaults to True. + retina (bool, optional): Whether to use retina mask. Defaults to False. + with_contours (bool, optional): Whether to plot contours. Defaults to True. + """ pbar = TQDM(annotations, total=len(annotations)) for ann in pbar: result_name = os.path.basename(ann.path) - image = ann.orig_img + image = ann.orig_img[..., ::-1] # BGR to RGB original_h, original_w = ann.orig_shape - # for macOS only + # For macOS only # plt.switch_backend('TkAgg') plt.figure(figsize=(original_w / 100, original_h / 100)) # Add subplot with no margin. @@ -134,19 +169,13 @@ class FastSAMPrompt: contour_mask = temp / 255 * color.reshape(1, 1, -1) plt.imshow(contour_mask) - plt.axis('off') - fig = plt.gcf() - - # Check if the canvas has been drawn - if fig.canvas.get_renderer() is None: # macOS requires this or tests fail - fig.canvas.draw() - + # Save the figure save_path = Path(output) / result_name save_path.parent.mkdir(exist_ok=True, parents=True) - image = Image.frombytes('RGB', fig.canvas.get_width_height(), fig.canvas.tostring_rgb()) - image.save(save_path) + plt.axis("off") + plt.savefig(save_path, bbox_inches="tight", pad_inches=0, transparent=True) plt.close() - pbar.set_description(f'Saving {result_name} to {save_path}') + pbar.set_description(f"Saving {result_name} to {save_path}") @staticmethod def fast_show_mask( @@ -160,6 +189,20 @@ class FastSAMPrompt: target_height=960, target_width=960, ): + """ + Quickly shows the mask annotations on the given matplotlib axis. + + Args: + annotation (array-like): Mask annotation. + ax (matplotlib.axes.Axes): Matplotlib axis. + random_color (bool, optional): Whether to use random color for masks. Defaults to False. + bbox (list, optional): Bounding box coordinates [x1, y1, x2, y2]. Defaults to None. + points (list, optional): Points to be plotted. Defaults to None. + pointlabel (list, optional): Labels for the points. Defaults to None. + retinamask (bool, optional): Whether to use retina mask. Defaults to True. + target_height (int, optional): Target height for resizing. Defaults to 960. + target_width (int, optional): Target width for resizing. Defaults to 960. + """ n, h, w = annotation.shape # batch, height, width areas = np.sum(annotation, axis=(1, 2)) @@ -175,26 +218,26 @@ class FastSAMPrompt: mask_image = np.expand_dims(annotation, -1) * visual show = np.zeros((h, w, 4)) - h_indices, w_indices = np.meshgrid(np.arange(h), np.arange(w), indexing='ij') + h_indices, w_indices = np.meshgrid(np.arange(h), np.arange(w), indexing="ij") indices = (index[h_indices, w_indices], h_indices, w_indices, slice(None)) show[h_indices, w_indices, :] = mask_image[indices] if bbox is not None: x1, y1, x2, y2 = bbox - ax.add_patch(plt.Rectangle((x1, y1), x2 - x1, y2 - y1, fill=False, edgecolor='b', linewidth=1)) + ax.add_patch(plt.Rectangle((x1, y1), x2 - x1, y2 - y1, fill=False, edgecolor="b", linewidth=1)) # Draw point if points is not None: plt.scatter( [point[0] for i, point in enumerate(points) if pointlabel[i] == 1], [point[1] for i, point in enumerate(points) if pointlabel[i] == 1], s=20, - c='y', + c="y", ) plt.scatter( [point[0] for i, point in enumerate(points) if pointlabel[i] == 0], [point[1] for i, point in enumerate(points) if pointlabel[i] == 0], s=20, - c='m', + c="m", ) if not retinamask: @@ -203,6 +246,7 @@ class FastSAMPrompt: @torch.no_grad() def retrieve(self, model, preprocess, elements, search_text: str, device) -> int: + """Processes images and text with a model, calculates similarity, and returns softmax score.""" preprocessed_images = [preprocess(image).to(device) for image in elements] tokenized_text = self.clip.tokenize([search_text]).to(device) stacked_images = torch.stack(preprocessed_images) @@ -214,12 +258,13 @@ class FastSAMPrompt: return probs[:, 0].softmax(dim=0) def _crop_image(self, format_results): + """Crops an image based on provided annotation format and returns cropped images and related data.""" if os.path.isdir(self.source): raise ValueError(f"'{self.source}' is a directory, not a valid source for this function.") image = Image.fromarray(cv2.cvtColor(self.results[0].orig_img, cv2.COLOR_BGR2RGB)) ori_w, ori_h = image.size annotations = format_results - mask_h, mask_w = annotations[0]['segmentation'].shape + mask_h, mask_w = annotations[0]["segmentation"].shape if ori_w != mask_w or ori_h != mask_h: image = image.resize((mask_w, mask_h)) cropped_boxes = [] @@ -227,18 +272,19 @@ class FastSAMPrompt: not_crop = [] filter_id = [] for _, mask in enumerate(annotations): - if np.sum(mask['segmentation']) <= 100: + if np.sum(mask["segmentation"]) <= 100: filter_id.append(_) continue - bbox = self._get_bbox_from_mask(mask['segmentation']) # mask 的 bbox - cropped_boxes.append(self._segment_image(image, bbox)) # 保存裁剪的图片 - cropped_images.append(bbox) # 保存裁剪的图片的bbox + bbox = self._get_bbox_from_mask(mask["segmentation"]) # bbox from mask + cropped_boxes.append(self._segment_image(image, bbox)) # save cropped image + cropped_images.append(bbox) # save cropped image bbox return cropped_boxes, cropped_images, not_crop, filter_id, annotations def box_prompt(self, bbox): + """Modifies the bounding box properties and calculates IoU between masks and bounding box.""" if self.results[0].masks is not None: - assert (bbox[2] != 0 and bbox[3] != 0) + assert bbox[2] != 0 and bbox[3] != 0 if os.path.isdir(self.source): raise ValueError(f"'{self.source}' is a directory, not a valid source for this function.") masks = self.results[0].masks.data @@ -250,7 +296,8 @@ class FastSAMPrompt: int(bbox[0] * w / target_width), int(bbox[1] * h / target_height), int(bbox[2] * w / target_width), - int(bbox[3] * h / target_height), ] + int(bbox[3] * h / target_height), + ] bbox[0] = max(round(bbox[0]), 0) bbox[1] = max(round(bbox[1]), 0) bbox[2] = min(round(bbox[2]), w) @@ -259,29 +306,30 @@ class FastSAMPrompt: # IoUs = torch.zeros(len(masks), dtype=torch.float32) bbox_area = (bbox[3] - bbox[1]) * (bbox[2] - bbox[0]) - masks_area = torch.sum(masks[:, bbox[1]:bbox[3], bbox[0]:bbox[2]], dim=(1, 2)) + masks_area = torch.sum(masks[:, bbox[1] : bbox[3], bbox[0] : bbox[2]], dim=(1, 2)) orig_masks_area = torch.sum(masks, dim=(1, 2)) union = bbox_area + orig_masks_area - masks_area - IoUs = masks_area / union - max_iou_index = torch.argmax(IoUs) + iou = masks_area / union + max_iou_index = torch.argmax(iou) self.results[0].masks.data = torch.tensor(np.array([masks[max_iou_index].cpu().numpy()])) return self.results - def point_prompt(self, points, pointlabel): # numpy 处理 + def point_prompt(self, points, pointlabel): # numpy + """Adjusts points on detected masks based on user input and returns the modified results.""" if self.results[0].masks is not None: if os.path.isdir(self.source): raise ValueError(f"'{self.source}' is a directory, not a valid source for this function.") masks = self._format_results(self.results[0], 0) target_height, target_width = self.results[0].orig_shape - h = masks[0]['segmentation'].shape[0] - w = masks[0]['segmentation'].shape[1] + h = masks[0]["segmentation"].shape[0] + w = masks[0]["segmentation"].shape[1] if h != target_height or w != target_width: points = [[int(point[0] * w / target_width), int(point[1] * h / target_height)] for point in points] onemask = np.zeros((h, w)) for annotation in masks: - mask = annotation['segmentation'] if isinstance(annotation, dict) else annotation + mask = annotation["segmentation"] if isinstance(annotation, dict) else annotation for i, point in enumerate(points): if mask[point[1], point[0]] == 1 and pointlabel[i] == 1: onemask += mask @@ -292,16 +340,18 @@ class FastSAMPrompt: return self.results def text_prompt(self, text): + """Processes a text prompt, applies it to existing results and returns the updated results.""" if self.results[0].masks is not None: format_results = self._format_results(self.results[0], 0) cropped_boxes, cropped_images, not_crop, filter_id, annotations = self._crop_image(format_results) - clip_model, preprocess = self.clip.load('ViT-B/32', device=self.device) + clip_model, preprocess = self.clip.load("ViT-B/32", device=self.device) scores = self.retrieve(clip_model, preprocess, cropped_boxes, text, device=self.device) max_idx = scores.argsort() max_idx = max_idx[-1] max_idx += sum(np.array(filter_id) <= int(max_idx)) - self.results[0].masks.data = torch.tensor(np.array([ann['segmentation'] for ann in annotations])) + self.results[0].masks.data = torch.tensor(np.array([annotations[max_idx]["segmentation"]])) return self.results def everything_prompt(self): + """Returns the processed results from the previous methods in the class.""" return self.results diff --git a/ultralytics/models/fastsam/utils.py b/ultralytics/models/fastsam/utils.py index e99fd62..480e903 100644 --- a/ultralytics/models/fastsam/utils.py +++ b/ultralytics/models/fastsam/utils.py @@ -42,23 +42,23 @@ def bbox_iou(box1, boxes, iou_thres=0.9, image_shape=(640, 640), raw_output=Fals high_iou_indices (torch.Tensor): Indices of boxes with IoU > thres """ boxes = adjust_bboxes_to_image_border(boxes, image_shape) - # obtain coordinates for intersections + # Obtain coordinates for intersections x1 = torch.max(box1[0], boxes[:, 0]) y1 = torch.max(box1[1], boxes[:, 1]) x2 = torch.min(box1[2], boxes[:, 2]) y2 = torch.min(box1[3], boxes[:, 3]) - # compute the area of intersection + # Compute the area of intersection intersection = (x2 - x1).clamp(0) * (y2 - y1).clamp(0) - # compute the area of both individual boxes + # Compute the area of both individual boxes box1_area = (box1[2] - box1[0]) * (box1[3] - box1[1]) box2_area = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) - # compute the area of union + # Compute the area of union union = box1_area + box2_area - intersection - # compute the IoU + # Compute the IoU iou = intersection / union # Should be shape (n, ) if raw_output: return 0 if iou.numel() == 0 else iou diff --git a/ultralytics/models/fastsam/val.py b/ultralytics/models/fastsam/val.py index fa25e49..9014b27 100644 --- a/ultralytics/models/fastsam/val.py +++ b/ultralytics/models/fastsam/val.py @@ -5,10 +5,36 @@ from ultralytics.utils.metrics import SegmentMetrics class FastSAMValidator(SegmentationValidator): + """ + Custom validation class for fast SAM (Segment Anything Model) segmentation in Ultralytics YOLO framework. + + Extends the SegmentationValidator class, customizing the validation process specifically for fast SAM. This class + sets the task to 'segment' and uses the SegmentMetrics for evaluation. Additionally, plotting features are disabled + to avoid errors during validation. + + Attributes: + dataloader: The data loader object used for validation. + save_dir (str): The directory where validation results will be saved. + pbar: A progress bar object. + args: Additional arguments for customization. + _callbacks: List of callback functions to be invoked during validation. + """ def __init__(self, dataloader=None, save_dir=None, pbar=None, args=None, _callbacks=None): - """Initialize SegmentationValidator and set task to 'segment', metrics to SegmentMetrics.""" + """ + Initialize the FastSAMValidator class, setting the task to 'segment' and metrics to SegmentMetrics. + + Args: + dataloader (torch.utils.data.DataLoader): Dataloader to be used for validation. + save_dir (Path, optional): Directory to save results. + pbar (tqdm.tqdm): Progress bar for displaying progress. + args (SimpleNamespace): Configuration for the validator. + _callbacks (dict): Dictionary to store various callback functions. + + Notes: + Plots for ConfusionMatrix and other related metrics are disabled in this class to avoid errors. + """ super().__init__(dataloader, save_dir, pbar, args, _callbacks) - self.args.task = 'segment' + self.args.task = "segment" self.args.plots = False # disable ConfusionMatrix and other plots to avoid errors self.metrics = SegmentMetrics(save_dir=self.save_dir, on_plot=self.on_plot) diff --git a/ultralytics/models/nas/__init__.py b/ultralytics/models/nas/__init__.py index eec3837..b095a05 100644 --- a/ultralytics/models/nas/__init__.py +++ b/ultralytics/models/nas/__init__.py @@ -4,4 +4,4 @@ from .model import NAS from .predict import NASPredictor from .val import NASValidator -__all__ = 'NASPredictor', 'NASValidator', 'NAS' +__all__ = "NASPredictor", "NASValidator", "NAS" diff --git a/ultralytics/models/nas/__pycache__/__init__.cpython-312.pyc b/ultralytics/models/nas/__pycache__/__init__.cpython-312.pyc index 84e39329f4886d3c2ed9fa2058ac587379a8bc07..5c155c19e6e57d1545fa7bd473ba25ec575f2fac 100644 GIT binary patch delta 50 zcmdnZw1k|yoSm7MKJl#$0P+J3jsO4v delta 47 zcmZ3&w33M@k(ZZ?0SMCAf2B<1IVEoCVin_(np)tKnpl*VnU@|@nV*wiW;*eO4FERs B4{QJc diff --git a/ultralytics/models/nas/__pycache__/model.cpython-312.pyc b/ultralytics/models/nas/__pycache__/model.cpython-312.pyc index e33e05227fe801836005710f73d0acd123c81fda..b7b209ce50cdfa542ed233bdc5e7412fc0e99e20 100644 GIT binary patch delta 1335 zcmZWo&2Jk;6rb7kuI+VV6QGuuQ;)hsT+Og7O%KtL+}lPyT+LNbA-8Lnmn!~e7D66@b?x%< zWdf1`X$#6i5{Q_H^|x3<63QeTTtr3BK;LNbgfv1wNk}Ia8=SI)_z~feWO3j(n9-Fj zW^B%b&5=nBqJ;G%i)cdhe|<%}s*i02v1pUEK`3KC+>*SJ5EeCg#EQo0B}u?5xZ=%- zQ6dEaS3Km>m&_QGe)2XE!T(5;Zo;D`FlF;#8SGI4p*6aR6zwC4j()`SX-z2^NBT<7 zZ+Aj=-Z0Qvs@LnCE!h&0k=(d_=Ekp2C>LKCj6X*ST9Pv6>s`r`e>F#0J)6k5aGqT8 zWlM*klQ2na1Oj@aOXRm%k-W?TzZ*(OnILmpA{4dAPijfABj?66%VJ7SkW^n=3!o-Q zB;;X*$hD=FWzrB)z?rIu*3(&jtO5@oCtFwQ#nMmHFNoEeSuFsp7k53Ao& zXT3}l?_l$0pZ<-QGd+eN#nhG+=2;(mfJs9)R;UQ%zS z??1Q*kmMWKAPWaB%07Pg%Ub5RgzA2cD8Bm9s~o$cy`F&WI@)v51EZ zqM<&>t+h^SSXq-BqJ8VQllkK*+bQjP$esDYd1TKz<~;dvWe>q|)Bar_2R3paHS&Yg z8D|aeoj`7W-?E%ltiSU2%r$-pOvX9b3iC delta 521 zcmX|6J!lj`6n<}ZXJ&Vgor_U4mncUx3M;}9Vj);a5EKg+yv1e<7unf6Lx|DKp;nq= z!Lt&4g+HZ25MrTN*jZ^I7AjVTC^j}$mt0OQon3z(%=gW__kG`EzDzzCS8sja15?*e zJZxQ&&(u@Aan3w%!d=K9Mh!93Qae3pp~l>gEe4srS~yH;mT_%myk%=U|KE*;b~Vp2 zmYJhP4*%4#+kw=q;X+!A!}T{ z+-~Tso2A`u=X^Rpcd<=PG2>4d)6!uBzm{R0?g))QKgBdM3gj`gXilE2+fVrF-(|NT z;YM7R_R4<=dM9U5(l+P5<>WvgG5(QH^jVefyBGDr&|_<}pn(NqciA88b@*BA?*O4T81g0-BehjtJ3CEU kr(z&bYcfgnVwxnRMqbpu!Q^Myvn8gmiM{Fb7g$tmsw_Z80fb1xBd)4gNcP~FYmak1W6g~7 zaE*+VNU*8Y=x$kn!~(G^Z1@BG09KXQl?D6$1I~;?aC;XX-!td&o$q|-{$2iap+t(s zya&(1?;krC=l&|aMw2?cGOvNC*YGK71oSu!X!sO8_98@6G`(MJgeLE*R&+d3g-a#_g5c7%aFi zDT{F=6_yWr6V^7EG6M&fqMCQE^)A=A9TTnbK*=pmnZ`t5E(}u6I5e+-&(b`T8FtJCHwf-*eDY38s65JiOz%zR&_;h8VpsT6z>Q|v5^=`@t7N8 zm`i4e-o!GpsQ&{R4>cDZK*}yaYRE@1^wu6~Bl|}d+x-mE!$z;|d{6p=m@PR3mZes! zHP|&>DV*it-PwarKO`Z#JUP6a5>zur@drc0^w-EKt>?8-l_h+Kn67QXk}wNw>ICqn ze+22XDqdz08O8=ElX!7g#Q}28g%0h3zoAu{$Q|6yE=#ODgV5@bbr}Nsjf(S6NZv3250B0pl}^p+55?FL2bj2y@!E8W3-2Q{=;w_ z(qPk9=S}b|9HZyxiPubt(_jt3x;zeg>0YLn9lK@t!?(Az=$6ASLhgyzOEq&4o2U8Y z-{8z-4y>ND9_Q0@ehfLmt4s^-{D%6#Luu4mkpy%K!e%>M8>^uiCK&H@VI)>bih%^d z5ev3G>te}*;eHX_whcK6F+7?b46P~zy{{!#!X0!uc^e8;OBPfA6fap`7A!A8)_-#% zeG8%M3CUbtAKksj%uoqil5)40MD5}}84N5)y9Z7yis^yf+byRt>j`M>3U0x0DY0*E zi9rL&i_DF4cWgoU96-TQ0a~rvOif5zLCSuXD*p9)rI5^Lf69EwG@DemlS|oqKYnM! iO7N3K;fZfkAu5;ic^|2q4JoN#;7DF&U#3t26@t%vUT{GG delta 462 zcmXAmy-OTH7{=$FkNw`$I|aW&Y+`Uh6bmi15arN9Sqw@b8(}$o*WjvmvUeVStkoTy zjUY@bsGY6)7lbrvQU~npEUm3FbD3gZ-sgGV;Wsl^!+%SKitjr_tsV0no908|5x%dh zHX#upi756UMRpAxvIEFbwEc)ByhRd`2sExKnrO9o3nBx1(M7Kfk(pTNW1&ryYIg`H zk+VU9;xh{uH{@>~8$`7tlxO0L{38|yX3KM1;=ODb7)xB6r2q_$sc&d86(m^-3S z>Rj`I42{KaWS^X~2ISC!l%DgPBi*Cfc!S;Y9PZ$P#G?8^sZ6QmoTWx;rdHl1zognl zuX__sZC{X7#I~As(3Y>p=#-;W(p7tR|JP!!&Qz%qy4~7`8r}~xP)PE|Jcd9HT2+{k z9qYv()wIK!mRaz0WUm5D%b(5?gz~Gq@_Am{RNM5_Q;#v(sMPo3gUVq5nf`--E^h{| s{N;XVe1p0Z&9|X#fBK diff --git a/ultralytics/models/nas/__pycache__/predict.cpython-312.pyc b/ultralytics/models/nas/__pycache__/predict.cpython-312.pyc index 475134d7729bea57b8a3dd230700e6da4af1220e..243864e44ab8e17027b0bbb6897debcea738dc97 100644 GIT binary patch delta 1118 zcmZWpO-~a+7~b;LhVUT<5Int#1PIWA2V*cCYT}`Z6b%?Hmv*{4-45=~tTVIFJ@vvt zVj_|~ka+bE=)qs%MNTw$^9Pt1@oIc`TNcp0%*^JU=Xu`ed3WEByqOsKGC0_8;PbWo z-F}fd9Qx24$J3EPjjfbrY;{?wt!}Gp&)Dh-y61XM2a5TPCz4vs)IyR{w_)@HKF=I2 zi4$mMDk!Z#UI#3Lqz-CAf~am&Q-eh{E==%je|&SZ&J>sqQ3_~7Q*JdC=sJbUL!xMc zP=T5h?iCu!ZN@1OZUG`lsyx9Jt2#7h^Mulio)jikig7!FjOirV1+T3iH7Kwtr;=!D zK{beZOU-lnuCS=1vQW@KJkMcNf$*Z@_@cvJQs9Cwx}?Ee-v#A+2#jokN1|djWfG^u z4prfED>_s~1EI8l8q+bQkPKTP4ltM4vgXxj}*S%-g z!&cwtRB!k;eJ6dIE|)D~h9A?nhX(M+uxI@28t={gTAB>MrTx^s@M&fuwGwV;ZVlf} O-FUb9+rULGW9470^lw=J delta 251 zcmeB{m?g+}nwOW00SM~c7NupfPUKsxCIsY7XGmp;VoYI(VoG6bVTfX`WYT25#pCA~ z98i>+l9^nRU$ohhCMNlY^P=b^okrU!>&%5Vavfj0J*2!5E zZfK|wx|TLSfZxC`pbZ^AgNj+-#R;(s?^?6(y?Jl;qV;uUnJq1~0($O$`jfm{{I+~i zuF*~e-H->}1`oSU-r&v8K{w*jQLr_4dG}!N=#3Rz#?DxF(hPzY(DPO~%T(cHjBs-J z`VgoL>N*%23uf?9#16P{^pQq)TGh$pkwl0ULj*B)qSPG0jRf>xFcdXHABNVX|IrAY zNG*VA4uoI@W=5kNC{eLx2HkdMO)L{aNtfW?$0-3Th5x`|Cs1=eo^JvdGD+jp~xKqJ;$C*tKHV;@zjk8#6?LfVT*l5S3 z_GD+wN|TerMh|6@U#a(j+_GrORl>_LOEV>|nfm>HR=APTHFuRoej{?3X2v=n`ZQE8 zu(A(!{m;#f!YI?z412h-Rm043ASt`k>P}|M*=W^8>#&EIQLI3uHV&jl$F!5xXmM$2=2cP65*6e~^CqXXT-xc*v>lzDS!ZSoJ@vwc zgF(_muX^xoym|2#c!3iQ@#NWq2EF^vwrp{e-JP9zKc44#pWVOm_3F^K!NH^h*KzJg z>0Qs!(8t;}_-Z)R+36zAPB-b=iIMJoXD42brQ#|9hxmQWagqqG=iDgFtA@E6ZErl; zK-twz^a686u#pNC;g*G0)HR4u11}~;IvHlJ?HMYFM)}7V2N!8Il}AM-J!H!CzXqNp zlxyT;_cbn2gatuNLt)KIf)ZpT@})KlzLGB0I!sr17x{`37Pi)5=MoiEVFMY3Y6oBN8YtLSih8m_5!{Mn-)Eu}0^)La!Ih%O2o)|x z1-VD{eAqVNhI!DIR{@bEfooGN#Uc}!cl6POp<~bIw8JN#&-+zVmLe)& zR5chqBuBnto-dWL&T2Ww>JUx~t=P@E*{b9+C$P@x*;G`g5}9zN6`hWDqL|xgRqFt> z1ftj=FiQ?bTmx5P1tL_iCBSJEA5O}KK*S1&+_rhzYT9<)T)PO-9VkGug*+?GCGJ~g zaIecTSQQp6S8{O2x{wAjp$!&>g+3)r!M_7X;07BTXfQ7mhAI#pws)B+GjJuCDG!R| zTUA>l`+WSRdwOZHEIqoo`=+!=DD?$3i&nJC{d@Hfv9+EV_@{NF{xkOUS#w~dIWT;f zImjGl53(1_^@H{1=tN`bXsJ1Qqd7j+*go1m9Z#K%r~M0f7rjZ>Vc8 zuUuHUyl!C~j9I&|_J*F(`t+6QH;gTA7?^D5+{k&|z~!QW%ME?w^_43tuj@Np)OWaH zV*Z7nUBL1K13RbXXP}u3ewr+kEjUyrr*bH8vjh1p44+xJSuH0|<%nQZnf#7Jf>C2K WJExieFQerJVb!k;K)Of)s15+n+D9+| diff --git a/ultralytics/models/nas/__pycache__/val.cpython-39.pyc b/ultralytics/models/nas/__pycache__/val.cpython-39.pyc index 0cfab75cb815acefdead3cf5c987f7388c57039d..c8aa1cee630c135d0f3ed5b206d49aa1675bcd75 100644 GIT binary patch delta 1058 zcmY*YJ#W-N5Vd`m51o`qL=+HF3?ET0A~`CWNGT$PA`)DRBuJb<=RNP9Z(+S_?XE95 zQCN}q0U%4%bksEb2`VHiN*eeHl+3OZUs%gpdv|8u%$qk~mwvCVl9iQCfNTBjuk`)m zht<#JE&OA_Af&+{qT!%LBiecw4BE7P9BeJytNEy~%>i$YoXpiL$cPE(KRa&vOR`+Bc|ma;=QBG|vWf4TjJ}P6c$O<|orY?SxFj-F zV%K5Mopv{8N~obEW@t9InmE%&Z+mEt*1hJWvAGZX#uUgOR!S_OE{0AQ26;v$O^M52 zu*Tds>e8aM2h-|%bhGR>UUO`$s?N2q?6qP~CaBCr5R@T))QS@*2dOM@X)KYJdkIpm zaV+o#+wV*ZgSzC>GL0nT@fo)hF1>he#7IG0N3EOm<;TZx?a{&R%X)@RG)@LKy1IxS z-rc}CHEx!JXf3=Kw!{{#M_jJK;k0Cwj2-b7?rFDycfHjb%6A%VGO=^|*EM E516q^;Q#;t delta 144 zcmaFPw~d`Ik(ZZ?0SMCAf2DLYPvl!JF9PJHGDI<^Fhnt>Fs3lIFhnt@Fb6YevJ|lZ zr8d{HOk)%`aj}YVNlh*ANlh%u%gjrUsm#yGFEgF2%BC&P3shAs1|%4mgcy|=*}#y6 ikqJyQ{hVCLCd0@wxsOd{@@_U|HYT7t4yMV^*ct)r=^g|C diff --git a/ultralytics/models/nas/model.py b/ultralytics/models/nas/model.py index f848cc4..7997e96 100644 --- a/ultralytics/models/nas/model.py +++ b/ultralytics/models/nas/model.py @@ -17,26 +17,47 @@ import torch from ultralytics.engine.model import Model from ultralytics.utils.torch_utils import model_info, smart_inference_mode - from .predict import NASPredictor from .val import NASValidator class NAS(Model): + """ + YOLO NAS model for object detection. - def __init__(self, model='yolo_nas_s.pt') -> None: - assert Path(model).suffix not in ('.yaml', '.yml'), 'YOLO-NAS models only support pre-trained models.' - super().__init__(model, task='detect') + This class provides an interface for the YOLO-NAS models and extends the `Model` class from Ultralytics engine. + It is designed to facilitate the task of object detection using pre-trained or custom-trained YOLO-NAS models. + + Example: + ```python + from ultralytics import NAS + + model = NAS('yolo_nas_s') + results = model.predict('ultralytics/assets/bus.jpg') + ``` + + Attributes: + model (str): Path to the pre-trained model or model name. Defaults to 'yolo_nas_s.pt'. + + Note: + YOLO-NAS models only support pre-trained models. Do not provide YAML configuration files. + """ + + def __init__(self, model="yolo_nas_s.pt") -> None: + """Initializes the NAS model with the provided or default 'yolo_nas_s.pt' model.""" + assert Path(model).suffix not in (".yaml", ".yml"), "YOLO-NAS models only support pre-trained models." + super().__init__(model, task="detect") @smart_inference_mode() def _load(self, weights: str, task: str): - # Load or create new NAS model + """Loads an existing NAS model weights or creates a new NAS model with pretrained weights if not provided.""" import super_gradients + suffix = Path(weights).suffix - if suffix == '.pt': + if suffix == ".pt": self.model = torch.load(weights) - elif suffix == '': - self.model = super_gradients.training.models.get(weights, pretrained_weights='coco') + elif suffix == "": + self.model = super_gradients.training.models.get(weights, pretrained_weights="coco") # Standardize model self.model.fuse = lambda verbose=True: self.model self.model.stride = torch.tensor([32]) @@ -44,7 +65,7 @@ class NAS(Model): self.model.is_fused = lambda: False # for info() self.model.yaml = {} # for info() self.model.pt_path = weights # for export() - self.model.task = 'detect' # for export() + self.model.task = "detect" # for export() def info(self, detailed=False, verbose=True): """ @@ -58,4 +79,5 @@ class NAS(Model): @property def task_map(self): - return {'detect': {'predictor': NASPredictor, 'validator': NASValidator}} + """Returns a dictionary mapping tasks to respective predictor and validator classes.""" + return {"detect": {"predictor": NASPredictor, "validator": NASValidator}} diff --git a/ultralytics/models/nas/predict.py b/ultralytics/models/nas/predict.py index fe06c29..2e48546 100644 --- a/ultralytics/models/nas/predict.py +++ b/ultralytics/models/nas/predict.py @@ -8,6 +8,29 @@ from ultralytics.utils import ops class NASPredictor(BasePredictor): + """ + Ultralytics YOLO NAS Predictor for object detection. + + This class extends the `BasePredictor` from Ultralytics engine and is responsible for post-processing the + raw predictions generated by the YOLO NAS models. It applies operations like non-maximum suppression and + scaling the bounding boxes to fit the original image dimensions. + + Attributes: + args (Namespace): Namespace containing various configurations for post-processing. + + Example: + ```python + from ultralytics import NAS + + model = NAS('yolo_nas_s') + predictor = model.predictor + # Assumes that raw_preds, img, orig_imgs are available + results = predictor.postprocess(raw_preds, img, orig_imgs) + ``` + + Note: + Typically, this class is not instantiated directly. It is used internally within the `NAS` class. + """ def postprocess(self, preds_in, img, orig_imgs): """Postprocess predictions and returns a list of Results objects.""" @@ -16,12 +39,14 @@ class NASPredictor(BasePredictor): boxes = ops.xyxy2xywh(preds_in[0][0]) preds = torch.cat((boxes, preds_in[0][1]), -1).permute(0, 2, 1) - preds = ops.non_max_suppression(preds, - self.args.conf, - self.args.iou, - agnostic=self.args.agnostic_nms, - max_det=self.args.max_det, - classes=self.args.classes) + preds = ops.non_max_suppression( + preds, + self.args.conf, + self.args.iou, + agnostic=self.args.agnostic_nms, + max_det=self.args.max_det, + classes=self.args.classes, + ) if not isinstance(orig_imgs, list): # input images are a torch.Tensor, not a list orig_imgs = ops.convert_torch2numpy_batch(orig_imgs) diff --git a/ultralytics/models/nas/val.py b/ultralytics/models/nas/val.py index 5c39171..a4a4f99 100644 --- a/ultralytics/models/nas/val.py +++ b/ultralytics/models/nas/val.py @@ -5,20 +5,46 @@ import torch from ultralytics.models.yolo.detect import DetectionValidator from ultralytics.utils import ops -__all__ = ['NASValidator'] +__all__ = ["NASValidator"] class NASValidator(DetectionValidator): + """ + Ultralytics YOLO NAS Validator for object detection. + + Extends `DetectionValidator` from the Ultralytics models package and is designed to post-process the raw predictions + generated by YOLO NAS models. It performs non-maximum suppression to remove overlapping and low-confidence boxes, + ultimately producing the final detections. + + Attributes: + args (Namespace): Namespace containing various configurations for post-processing, such as confidence and IoU thresholds. + lb (torch.Tensor): Optional tensor for multilabel NMS. + + Example: + ```python + from ultralytics import NAS + + model = NAS('yolo_nas_s') + validator = model.validator + # Assumes that raw_preds are available + final_preds = validator.postprocess(raw_preds) + ``` + + Note: + This class is generally not instantiated directly but is used internally within the `NAS` class. + """ def postprocess(self, preds_in): """Apply Non-maximum suppression to prediction outputs.""" boxes = ops.xyxy2xywh(preds_in[0][0]) preds = torch.cat((boxes, preds_in[0][1]), -1).permute(0, 2, 1) - return ops.non_max_suppression(preds, - self.args.conf, - self.args.iou, - labels=self.lb, - multi_label=False, - agnostic=self.args.single_cls, - max_det=self.args.max_det, - max_time_img=0.5) + return ops.non_max_suppression( + preds, + self.args.conf, + self.args.iou, + labels=self.lb, + multi_label=False, + agnostic=self.args.single_cls, + max_det=self.args.max_det, + max_time_img=0.5, + ) diff --git a/ultralytics/models/rtdetr/__init__.py b/ultralytics/models/rtdetr/__init__.py index 4d12115..172c74b 100644 --- a/ultralytics/models/rtdetr/__init__.py +++ b/ultralytics/models/rtdetr/__init__.py @@ -4,4 +4,4 @@ from .model import RTDETR from .predict import RTDETRPredictor from .val import RTDETRValidator -__all__ = 'RTDETRPredictor', 'RTDETRValidator', 'RTDETR' +__all__ = "RTDETRPredictor", "RTDETRValidator", "RTDETR" diff --git a/ultralytics/models/rtdetr/__pycache__/__init__.cpython-312.pyc b/ultralytics/models/rtdetr/__pycache__/__init__.cpython-312.pyc index e62c4190cf2642a68f4314a782c7c80abfb15d6b..dc53474c1a772e0fdfe6f41539fea6168be108da 100644 GIT binary patch delta 50 zcmX@kbb^WJG%qg~0}xz@f0RCv=aiJaenx(7s(yK4x_(M(YC&FViGF2%PJWr8!Nm6w E0H1yl(f|Me delta 49 zcmX@XbexIjG%qg~0}#}?ElQinb4t=qKO;XkRlmG2T|XtYBsICDq$n{tJ2NkR;)e(T Dd4dr` diff --git a/ultralytics/models/rtdetr/__pycache__/__init__.cpython-39.pyc b/ultralytics/models/rtdetr/__pycache__/__init__.cpython-39.pyc index 2522cd6a3f7c1b1c85bcaaaa8a2c7274ce8f1204..1224ec82bdbb612a57cffc1b5910ccfe25b9c2af 100644 GIT binary patch delta 42 wcmdnWw1J5yk(ZZ?0SKh3AEi&^xh!PvVin_(T9TSv5>k|yoSm7MKJlA90QUY3z5oCK delta 47 zcmdnMw3Ue`k(ZZ?0SMCAf2B<1xh!tuVin_(np)tKnpl*VnU@|@nV*wiW;*esJpeqR B51ar1 diff --git a/ultralytics/models/rtdetr/__pycache__/model.cpython-312.pyc b/ultralytics/models/rtdetr/__pycache__/model.cpython-312.pyc index 78dfea753839c44a5f7b12a8d700985999d5bdad..7713b7b4a3a9d4876f3cf34e40beb1d442099c79 100644 GIT binary patch literal 2513 zcmb_e&u<$=6rNqLe{JeeN~#7b&E#h^67L}+%1X55AL{;ka;LB>VGq#uRug;Fs zI;v0)k!TMbQG4JI0F^(38<%q6vT83y9Jr;VMHMIBn_b5)RXH)TW@g{c%zN+qzIp4P z8x5O4yYD_4{8T06H+-11axppR!Q>9%q(`{H&AtcwLzHO8MEnxv|}vV_zAB*z1uJvd=t0`>~{-FrSZ_ zDc$V0*DiE7mnfr`{nU>m+LbIyVM!=tdz+<#Qz=-`R(>dGy#1x{6y-v}5KCvWoyPsX zkm+=#oq#VmgGJzvMVt=(!H_b~8%gGkm#Dbzi6HQ!0rev^i$F-Gz}GhOu84@IH1Mwo z`svo%D!t~bA?=DNjpb(7q3eqF1yds_QaG84e&6?e5h;5(-j+V6BJyG`r1rWVZ?)Mq zCI!7Z5^_va@Z%}W5-||b5aZ)TK4_jq=-BpWup*3Ms~_o1^fp*>h}aI`rB>)rsU%(L zbeO#E?>Mm>bQ0e0y!Za{8E5(2@&_L}@M!;me6)wi@2|8B*C+`{{cMV zs#o%t7p34n;tZxnZUS=rNT8uubD0G`XRyZ08P9vGcsMu@ldnNsg7gd!n8}S^iI;ih z7U_e$_8L7)l({J?eUn>z*1cj|uX>AIH+r>8q*cu-xo6fz5m9LWDI|al5_D*H=%+7N z)W38@6B+OL90;9CZAL)at5(5tD$-I&M@bS(l}>d)XLSGyTcR&j0jM6N$v8_C+YU6- zLV|t-O%owFhye3lRZ9BXBPG%m{fsu9B|V)g*;=6&83RipqlN1j=rj{_2jclh8mbl)0xh=MV1x6K>m8>k&O3~u~r z-Ya$)WL#=l?81a3401j4l@I#Z1z<6@Lb*lo8c;M3{2dX!7&z_wfpF-h3BVAshRYeV zFu?m0GL;q(p|@2zm;nea{}V`upl>oioh1Q%Y{Y6kOac)?1rYp#l(Af)>wTSX?jO^D zdZVC#;0*^^MzNwzz<-H0F&7x&syD|l%NfCQvr1m%T4q+(=VX>*>6Hz92F(eX)m+yH zfL*s$$}A`>eK`*VWp&r}0+yz(n<9af-u(M~XBdZ~bM5M&!-Ys9p*rI@hk!vF9^|6&_clc@_6!31vHpBaSb&rpZ%{nV8knX6|Bo=a3exhbwedkH98_ znhw5`i5nWR5XBxpceW0(aG=9{;WA8lmkmg|Wo31^$a(S2>N}Gfn^p6xEj86GOCm_J zh8=v|iA3~ThCBvCir7=R0DV@&09=Ukkj9Cji7VfR?k3qkMjFQ-yt#zGlcyiN-hNy! zpP1h-Rgc>Hq*}M-aah(ecIRd)NXwLSVBKidQH_|p>t?p=f|5o7&X2h6)e#GdH&xf= zvFEy4Qdtd(F6dZ|GYb=yZ@@`eqfjq?ok7`#d=0ERmydNW693NVPA7@g>DeB6Y*xy( zzmD4Fv-^vrGQV%mm+iZ!{va=fHdWL%MFRFqhG9ImDu(s+ dC^6o7NY4IFPCXU&rSdU delta 935 zcmZ8g&rcIU6rR~%U0RAz(m+9yMX9{>d}KI+W>}}zS*|M__F)$yf^c{_vV|~qv6kyfiKZ$7=gW< z`%qjLcLz4cYKic|;`F20#RbiFOk!!JG9$ET6i7oAs`cBj_%+%?1m&=aa)OC-qA8j3 z4k}_(*pB8TBAeniI^e8eIb{cJ;9Tel(iLA>SmeaD$*>02asIE83g!wzCp|z3P=xoS zjOqxz$9rfK)5%S|ivL;E9h&%{8#RRl#7uD=(+K0~ftgZo{uO4NIIuUuCVCr1z-!7M zm4j|^jH*ZvWqq9b@f%DReWC6$U#c3m<%?C@s@?BN<&>?e=2cwRq0G|?lo+g3UeSuw zv9;?dmubb4MKp(k!?Oc74ZaJvf>iztNDDXTw?h=^d))3D$-cvr#zCln||ilB>m#~O1eYOs8LvhZW@|0H9rFT ztzGCypXxEhAn4rzINw5FBZKdj_Lg^-_lJ+;$^EBCasBYt+);e48A~2sA;+=eVSlly zMjNY&DQ4+V<5MV!V+}uau|CRaxcV9$1N2qCo4jqfe+%{kybJFVz!qvLC>n20+~m*r zwPs?v-6xHtTcSLeX(72UL&ss6j%yLhU^{f$0yRM}g%Vbw<*cr9AzvtF zUFv{DtV%WRM)UcCWq4kJSQf~IDZdy&qYr~dDgOlmfGdE!Nr3Zh)D{F;{iTZXOe>6( zfwrK?nG+F4r+gH&735^~ewu_7f3#w;#^pZaG4Hsmz&$wEibWpK6vVhR-c^8>h%s(U f0+!Acgl~L9GvCqFH>AOHstn=G&e9ozPf+6zvUc_n diff --git a/ultralytics/models/rtdetr/__pycache__/model.cpython-39.pyc b/ultralytics/models/rtdetr/__pycache__/model.cpython-39.pyc index bccf6b91dabe45065b1b6087e91c3e3e145eef46..ae5cd9d3709eb688a952a3af7f8813dcefe6fabf 100644 GIT binary patch literal 2339 zcmb_d&2HO95GE<=kCvPM_z<)`>`fJ5(MB2+txyzoW4F+uMxZD;5C(`9cPVYmzwEA@ z$Sr!Yft>mVd4aq~pMuBqlvfA}v@=V|Vj85UQs8QKINX`}=9?ki-SsTEKKYNbNx=xuqMUJQypHpSL;93TPM$2eToDC+B8^!h20U9} zd~BT$sy7k(p7$eIq&d7QGLwmU4NPtjy8yiOh%B@&)ac-V%4g!j&*l7}VDZ77FAwkf zhhHCl^^Ff(?==+H8rWs?XNWBUVQ(@;+>Z^C;MwS3L!Hp-R`u|{a5U<0`vW(N({Wew)Ou%BD8&6=!ry&Jf^$y=ynZPvMV zuk4`x+;Xj;1N8CAR^^*ckwyfr|D+Ot1}XYvx)AF9ocb4&NFnnJ!JwpDvehVLyc!<7 z#Un!owJeHUYPDqosWSmMwol&61K>T7lQLRpI!jEgxdaArq6*F#&;av2(prkyQgbyj zd(@dM$rnn?!HC?aKvA6&z2J}uA8J!3wt?c9$8?!Mh+xu}8rnw=6Ti@X-yHX5r=~lF z+O{MIerJ(Wt0&4hD;jXCJy^7LWj(fkwQ*kjjcEV5qz+ng~7e-4gdr+5`cG(TXD4|M);Y;hy-Q&?& z)#lK=(K+z(47ea^veF__XXa>iRxzO3`Nc}!f|EYOQX397OI-TQ7&QV z7J0_-!NYA5#AM6sT$ofYw*7*T;xrqPBl8cHpfHiacd>CpGMXY$^S916A+8;mFyA5a zwq|k%k{`Hb7n-mdon`l8Ga<`%)t1tfY2ZqnkP^PhmPGI*cGkd_AENxp&4R5~b})z# zV(r6DeE^Hqb?o);FZb`8@@(wjoGus*x=0dCD-267458S|1m#{BK3URa{iGd+ERVv_ zRK4uLEChP#Rq0|5@-`e)?yf0?pG`2_#`h6Cl^CS#z=FkBD0FPMv)6NMqdgt;P*Z2o|kD>km delta 686 zcmZWmU5nE|6rG!-Nm|?8WxMXuB1;r~h+rQ?!4-V6iwM$(T6`!AA#|n^oiyQQw$M^3 z*aboGLGuR`pZ38&G=D$@f59TGck229-f%DH%;es4Px5*DYjq=TG-`}u?49wc{eENG zp0{fK!L9D$pdX|e7cxjDO5iviiPE)R!JYENx^cig?w_y~4Ba=(VMFgJ3v1f#4@}$q zYAvAnMgE(y@7!D7Ccy>Z{sKNS4xfNqEynFBohabW3SNV?+dxBfL4mS6nt*5Ccss9i#J?~+}Ix- z6JXdx2(ak}<)lgl$Qp%)w@Ep87QDyAj>FNw!=tVlP(6&^OdX+jx3T8Lp%^I^Q zIE1+pe{nc9R;>Xs44)IDS%M`A!f!M@E(NU9CRmh6pJNm@Tc{vl(BU~= z^&O144IZWxqsH4P&tYEUxgUvaMV-snQgTrw`7lobPC+Ze}O7@Dh8r##$q0&P)VX z)8{VC?l8o^QBhh$VUfmLyXuAKZRF8-t~J^kZv{`%6N!2$OlKx@s7*e=GIc?@;t7d7 zH7K%MVULr|0g14<=Y$KPA}Cf@sf!vQDO5b)4QW>KZXtRV*$mIM5Xy=e!tjux9SKo< zDx-IY%682&0l6V57d;&iP$7bvfD+c{BXk)G53@itsKG3TM$Cm9$YQIc7 zfz_c(?T~af6N+^2;(&y+V69IDi$HHlPOib=ReQWueozb`k4kFPN@eN_kE6GhC~aXt z4PT;-QpUl39%t3-N=c#^z~e>C;eeYfXu@1#7r}Qsl~682X93O4jI6Cv;oDMNQ?d~( zXAZPgjyiJL5n>_ddwT~9Q|!-ZRFz`4{OWG1^)hXf3bp%gvCNf#JpLEkVYC-kte+`hNP$gFo+C_+?`9?&7DHKR*9p zV`wVTeuzfh3#@QDkJXDFy$Hh0?NtMn8C4ZUo8nW5F9Jf`LHr<$71$$84+wfG#OZ(w zl=bd$HkGFkxeD>7A_HOpA0i-K`Bx~F$Rd1=pL+{R!O7go_VXZRPK1s%w^4FU!Z~SP zgVqk1EcM?CAsprx^2@~&#aeN*5Ir6tG>fp7$Eo6p0^EXS#5F!Esx%Q%g7w_tlZpt6 z2S|fn_PVH354MS~Ebfak)P3%PZ*p(wh2Xc`tNQ6cA3mtw7(Mj(nEh2XVd$iuIR(*W0ASCEM)oN;LqVZS1ze-T`${1XD|le urf2yIE+{1z)7U*p>bm|kqv<1mXr)Km>yK1_=vn5Ht{?l<`A1V@=;c2Xl6~a> delta 982 zcmYjQO-vI(6yDi(w`KcpD_9CR1%Z~77KB8tNJ1hSi2*{m>1F9o%P#G1-Pwjfw&;Px zgBQt=$W?C!ZX_n&ym<1&#l&9p>cRL!y!*CwftT!i-@Ny|d2im#9t?j-g+GNt5(lz# z?@jr;e=qzi`T}vBghA$TsVY;-ifA-nH;JN_EY;BQnoNm;0mwRDSslMSzq*1IVv$a? zW+ummCy1;tiJKX{OinEb|lCab`w&#t9@6AQLi@oYnf zWnSTryoWq+Kpul_3Qf@xI%|%zJ(lQVmqjmIPRX`t5<3y&Y($8n5W67^1A9L5|R?D#y-}LDA?6N_vx@nXMrTF~fDO|XM zO=2}n9WeXWbp_=akp|HsdoE4lo|Zt@GQtWqej2p1xYEP-aJOp$vN$y^4zLb9542 zXZ~2@{%z>roCo!mGiMxJbd=v!0RS`xYM1-X2L=D1zHXuCbRsBBbl46tE17BbF}6Fj m;AEX`((=#1uhpoMS#t>j&KsnIta>RGnL-mUYkxT~ZT3H}%-=Zx diff --git a/ultralytics/models/rtdetr/__pycache__/predict.cpython-39.pyc b/ultralytics/models/rtdetr/__pycache__/predict.cpython-39.pyc index 53f0f0d61eac7e638ac38556d742ab17b3f6b4f9..23bf39313997c0fac9737d2b7fe81283cd1ffa33 100644 GIT binary patch delta 2011 zcmZWq%WoS+7~fsL?buC7NndLF35mu=wk1H&5LGEf;Si`wl$sp!P&(e3wa3|qGc(&X zWi2CBJr)V=ffG`CMnXdUFW|y~leuy0l>-Mjz&GpEPRdIA@XR;A@AdnB{%8KXs})i% z7Y)2FfAvSlsC--b>(&P;!zcn?8?DCbr>zaBZZP6DTHIr>$`rE|4}53|;!7tG9uqZK z7mRXS1tLCgE)G4i&;1Vc$BSqzA(Xh1fPO+a4HxAE+zTjko5f_W)#Vawmq-aNd&mUo zFbS&5pdTDE0f7UXT)yTU4FYr(Jmi#t(18w>jx=cZFjAnCfDgOeW#AF+E4*KY)8(Bm z5Zev~v5%Utra*@Mejt>DeRc#6BPtXah0FIDFvsCGXTE~&QCn~d%(nxob0t27wcyJJ zIV56^!9mEx5lH5ai}iw=H_Ty@;vJ;~Z-<>a@nGxs|3F;p?I+A^$pm^DcLm;qK7 zxbHB5ZWvU(P%3ES-a$wNOI%8uxUAuD*JZTUoB$HhkxRU7Z#=1V-G7Uo+k*Q2&Io1sK0sMMRK`DfO?bv#EA zGMf3=7|h`Q8&M%DMx_o0t^oN;+ zcxDW`QiANl*_@_a)IlP_p^EBYcbhHd%RnI5`nW!c=YFWrMo;_Mb1Q-1 zojpAs;}}5G_AJa7Xu*Br)*ieP6)Xkgs41uiC^rm69JLG?>mePGm72R_ zik^|8X2QBN5GY6!cP>-QNJL#es=19FM+pTo5iU(Sg+qb=BmYyeW_&xh6#tf8xUcK6 z?f08LB|?y+U5H+5{o3gqkd{f!pn9+Ve!W{itc^u3p<5lctGHB;ujQ`9hqpeU8~ryzQTvf?4khXS*sPrbzB+6pBKKK Not{a)mg3(F@E_#&Y_b3V delta 905 zcmYLH&rcIU6rR~`+wE?5DWV7{Ivxa?6cSPSrD-76i$Ow=lVZrynU*c>Zk-t*nAu=L zI2jIP4qoBv6>t70{Ra$J4<6v4Z;OFR_U+s6z4yKMzS(cXmwle^x;BDS%U;lP>)88K z_=*v-37iE|ZTdp!c|1DN=8Oha8AKu3@CBm;96uy0tK*f|t1E;u$*!WUh_dF{jxmDu ztodNS0%QtPJct^I8iOb{|9@ecQYK9epqfDqY91MDMx2f_(oiXxIxxh%G*8khb&BIu z9mQFhQRxF*K_7oZo)zaHnpHW7=2S*mz-|Fcnr6B@cI+toBu9t_D?AU<_LLvRI$~q=6o*i3#`2G~sUA(Bw ztyLInFEXEpL0Dhgi<;5SbNw?nX%9hy7y*OyW$tid5LzyCHZHjzidw{5BnaEvk_0Wk z&d3G>hyhe%oIxX9a~?fggz3k^_C!d1&i%b&agK~FmB`Z)*({OWB7qVjStuf2A|J*Q ztVDFjxtCdrBolLb)|t^;*`Z;mTez0ux}c$0-3_>1Xbv{WijOYi;v-RNbef@n)Ye>;WBQ(`I{Q!AOTHan}g++VmwA=Bl&_QiG_LR z;?_mb5?H{xnjZT0P|`)8hunt{neHh}fW-kY<+r&BLMtG{L7ok0wRw_fD=1sVB090d z6H3O0qtJ0eNq10XNNtvY&$`Uxf@@Bj1;37PL0(>ap6xM0ztKar4+)B_q%6Eik$D1v z#~~_=eFRDrgY-wYF`bikFgfw+M4x@`=A0T{FT@mt3H||E9EDUsOpk*C^3)Bh$GHT} zlIuyMWXmvEuc;_mAHJCtKa8Lja9`I!b`Aeb^jFv6O)H)+JgC)+MIf=15oJnzk|qdCvKf2FW~z+&=+LqIfbvsCiNfLfpyU zg^VH|N5Dg9sU9B1u`8p}w=HkSs;_OgOs@{R%$9wIB8*7Sa~;*1dXw)x@p7{*JJ{U2 z5j6wEZj8FQFNNGacQ*Zb^7YxL$s@@+-9B{gKju`I4jP50+^tW?W!>m;K}|`Gc~a4U zk#MMOi(AG_k)-b~wE=xIE=m2VYfGE?K4Y~yAScHtSg|Ko%>ETRu#!GIz0}`e+HXT* znkciMuUQ;XGY;U7a+j$)l=q?BWjPRMgKY9HG;xfDjT?*Bj+AEl(ezK{=L|bna&}U` zxK%98mJXfLOey`Y_(lJl<8gtF=De5bu~-(}1^q zCh{pomZNSr9wX|q@5Vc!kj(Z&hv|piMx~h{Z%P|@WrF?`mQVj$Z5eWI(d}=t!<ncbxhir+I(sitEnbo_pqjV;&PPdEp9OVGgag0|emxnuuCH)J3f@dBkoi*V{d7yt>N z-Y74hYSh7@g4)7UVmEZCFJHwf2`0))X8mumg&1s1*Qw|+WrB;VE35w-D)LSI%EVl? z6Lx9!=8blB&``Z;uW6R~#2E_|IX|wpyXbRvrY{py_`o7YsMhm*N0CGz%S0$VA%7>Y zU2aCm%?Jrpg62B$2e%URIAt6Nqd;0d1rdVX<09;O8x$PRbt$8P0McuB)k$t{RE3JCv6|bUY5~)1^(OM$(k;%`sVuCuJ@5Hgi}La_Yt+d9C=MA{@z0cLA|0q)o;pQVxMd||BsD0g>#pjl=a^NRNcaFdtz!Ya* zZiIoGQKB-HKt|~2mKPVLVZ;VXK*~#hiDXD92>lG7HDmP00$< zQ3?}ukl+ksrIlB_TB}on+vekae29#Ge@^x>IMQ`V_3^?GV@;rnj4TyNAWCWVXJ^=J zMEfN=_fMz9C!I!NQTcYkXP{P}Q^2vr1r&VS7SK=^`JdMkJi1dij6xZLb;IWFVDThx zk}QWosuI5kW+wWhF0UTI2cdb1DKeGME!K@0QHFEvxibbnGzAUUkRFoCC;V~Xyj_AZ z)?M@y*)p@V?}KI1hI6xJ2y4jY8E9@wY#hNLHbyhzN%O2to~t4Um9ZD+w2l+rEK5HQ zx;_iZu+ zU!>Ek!bW`wf?U<#y>GYP7p`65+N-|m|6V>@aJ_43W=%po>=5FhFV7J*m4PvYj}p|) zsNvkOWhE#)jq2|bfJ_?0XX5Mz8my M>RI&jX6M4@zZx6oMgRZ+ delta 572 zcmZ9Ky>HYo7{-(IB+L!pDRVe+mBtuyx{fS{5uzmaXS`-=81*?X9ot3F~$vk4D+K;P0YO3G>T? z!1E%YaR5clv<7@`Q1D8ef|(#>Xs|Gq6eGndf$BS~{==kXPe?w~DEI(e~$#zdN#Mu!t!AK|?ZRE5%N8nXiqC#PSHK$Wrnae|i%K%w1X$#s{;hoKgPA*Fd z#1s?M14^*cK+{QcMM38FCDH7ye2nip{A&(Bw7*wo{*JAq8yik|h2Q1|#8~mRVQPEv z>v-4M7M(4y@8chPy izN^%`thAYY=qGOPen;ppu0@@wA1>LS$>)Wjx9}JEg0W`+ diff --git a/ultralytics/models/rtdetr/__pycache__/val.cpython-312.pyc b/ultralytics/models/rtdetr/__pycache__/val.cpython-312.pyc index d995e6c2802322e84eb73eb4d05d891000ab33d7..da53777567ccec7dc85fe55ddf9de5b99d68e798 100644 GIT binary patch literal 7135 zcmbtZZ)_XMb>Cfb$t5XDvMAf4Ea}!4pRM(ssMzOAFFxxM`($5jUzLL+92&+KdbPWh zR$g-H*`+Nq6wW9Ngi5cCgrX3gv@n|njSeypANnCizT{Bk!vR47u`+66;Ub47`QUF( zxb=WLM>JiIrlHkz`6@o!HmD^3GQ#Wm~p1TM9Vgx6E9@ zv@|Dl&Llb6cH-Ax&)7uHTUq$FU_jrbW^0PAoB8)-Lsw(Z7@O8}nmBpZTd*kSRdM=~tX$Re zv*PHb=`mcQB*G?e`=VmVmL+Nnww70s!k*K_8R!w)WaJX1qI8bwIo%Sif~M#oin6A5 zaDq-%v-H_K^nm!pBCxul+jEq$H@j%g%!4?A&|hTER5gPx6~>zdThCF}z<|z4a9_}< zwGF|2!6CW(g1ua|Qj``CIEcGbKq|E06IHMW{5s@;m4Ay6x`N+X0Ht6TBz9nUbjI=QIL@Aiyj!bCmR10g^aOJ_VY? zTg8G#93hj@fqf<;@lMdvj4UN^f-;%4oY2)9^cl%y=ow`)7H$HM`Q{%^j?bAnZT!Zy z*>P3V3VF>QFM%>%Klb_Yq5)DdO17?8<5aBHII%&}Wc<2pqzWa{3tY{3 z*%h|J-DTG~w-t1lD}U)7Q&uhUn5gIESva6Ag6r+?{2qvv4#^u6Rp&%w=}H#USf$Uf*)yp6#t!^Gf^isK(Z#a5XLGf$g$ zR;2Kp_5o;b=jIKwx+?5EF1(}USyb9gYsCM7Yp8=Ie0p44hipfDnhNfRdV0UbeO6eN zyBk>dR=CLJV>T)~EwjwJXKagHg`3Ay>L@lI6D?{`UjJc4(Z~@^XlE)`HMIZz8%zDL^g83CxuWPINm4TBr(OGt^|9-kG9q zsBIy#JhIb%I{QapwDtE;-DEzC^?c{lw@)>Xy!^w;_bc}&e|r8W=WB^OgWnze^wsKH zA6{s_B>nL6_b=Z+^i%04QY~;N^4-X%3FtWgD0#SUuTB3b{ZaaU;uG~j^5W8kry*wW zV7>2q2dWpELf=Cn*$|SCy84DA~B@xfRVYz{j($_d!$mvg>-f;FVy7reY02_4`}QZ#bS8 zo^&&ZM*oB1_}6|H<@i(oDfIJ$bYYZS0m@eKO@HesG6 z(qV{g0zosDe+J`e+;mT{gM@E`VEPtJK~S>2)>lNZ76aEgW#-3n@`9c#=0ph31&B2kZpOzTfDr95*sOx>`B@kU z0Hj!eEyLh#MAf1WM2}Y6c1XjC{k8ZcYe%!T8nReSY&jHVDk-$+C#BZPifbsHFv1A*B zHB&?pqBuczbEaxxpw9v^Xcy{7K_=k+#OEoY2m6%v=!gb|E^F(bs8MW#JpdxR#@F3q zYb5}&byw==wa@PoOBYxR^Bb-$hH~G7Z0B#ebuTKy9J@r+yTNrY>Oy?t<#&L%byjE4 z|86U}IO&3UPtQ;jPQzhsD;l;ygvbA(|C(%TC#?WvMrD)kELV8fmESxMJrx(T`1e$l|MV5Gc~ z0-edqU{2u0Of+z9oAbG;GIvshTuSf-LnP1@C+fyz#F^wJXdo%95ZOAs0tnl5USfaW`V3c|uZF9`10)sU6)H zfSEN7)lKGUAJaSB+&kRt+qWvMNUP&3Ih>6-#6hcLN;#6U6Klm_q%lfjM5A&@c zph9+*TLgrva)`fovyaQbdy!iVEF!f_)k2#v#4Bo`5>x|au#+sO1_8%;Ghc@1BEJ}_ z@Vi^lJciefQiju7cccDXxEe|st_Y{#{li?4so*t4tbF1$#Lm)lBPZA#m*ZKH&4lK@)h!KO`%Ou`gSM9UX*8USvM zpt?B`MDoWMN(&_?05HG_D1hX{81o?FN^ut!^SB?&;bts{%{W|MbwUXIHOSU2i1zRd zd)uTrrxV0pSPu39N5gW0lmog3-@VbNi>qUZF+7M-KG5j{+B!2Z-ueSnn3N`KFa6-q zUA{j2pl7_fcd$0O{3q43pYmry9I0KqJ#c5__Q=|vwKMnj zd~|LtcE7JSvY9-!)LjiNhnhn7@*&@kG=ziI(4)k_>KiL>)UK~(f0X+u_aJdLsk<$uBKGH3^{-@@}}!LEWveSK-cQF#zAx zyHo`~uKK+QtYr zW@NNfT9!;EY%e1P?}%ZeqilN=SOc#{)tDNd<+9uY{7~K@wF{r<)0BY1p@mYqE;Zgk z7we?6@uq^7N)Y^Kr~(##w|m`NC0KY*`R#X!hVTo*xfh2qy3*KBkv4&wFB;DkxHT+M zEM3BmR_5r2_Y`>%<{@9l>NHmGLFGhDqGw!-4L}fnjG~-^8yy@2P(@9-9sy74F&?zU zyBxj-?*vW2jkq(|6@r}nT2a%$!q3CE6EL(qHEc44pF#9OQpB}l&eF*QC4$WRxhySo z3oRpc4YWH0=8N*k&>-%oy)7i3+SzXd2kW1qg0#0gv3#-7E!NL8x(`XT~YUL7`+TSyhHbqH?M|v?K^D+O`)+XZ*WM-*()4 zaQ`0*kOj|}MfcX71`(P9x%csgJfj&_%C!I@!!)kKEBK~DB?SSF8h~iqqg)p5Eo~oi zUIr?_s#H-O8o2VEptGf|7?hp`ojbb!4A@yWpaNY6Y#CjN{#6&~o)E7p%cqyGEq|eU zbyFAwT>16J$gz6|8zU#~**`9QT>9|`AAisoIoA-*LHGZbyr=&=-(33UpD%6*LsaYF z&Nl#w23*XLW3Uu*9IIEcI)T-jSiO(cJXQs$K4R!V_r3+QEPCtl8obc^7QAlhIAjk8R%?IdASX}kE8c+?k(L& zJE51^g_Flc*^j&KSX!!6aD zWC1$R4lPVco&;Ey-3l>m{MXFrFPOI)%v=A)O#Fg*`LpQA&G6IEOYGqC#itBBp1#Xo QV%g!1eWSl)@P+dJZ&R}Og#Z8m literal 7848 zcmbVRYj6`+mhNt;^_DH$0!s!PnWsB|9bQWUb~1p0!7$D&7%~V*(^j{POzS~+3oMP4 zP*Y5;OfeffTT>P@JB30`O-#WS%6r}eL+>CB(rKqp4P)%&*Z~#ww}owSd)OX#gdL=gm7Q@{*hRi=vODexd*a@(7s~c< z14A98M8_3MbV}CS7IVIEBdK*kty^*}*7`CG=Xs1>INGp&U(n7C!iW3hJ4fE>7c@bY zG|pn!_a)=0q$(M<14$(=XomOV8@#3n2{j636$Ui-OPUnXV#&k_L5_)nmQ*;#a72=F zQc*RlJ6%D%wT%3LRz()EFJM9iuph;Wa0zEFZUHg#`%_x-uF5+TpJe{+@1v?5Fe; zrBz$0achqHiIt*qOpb<`ac)b_@=N-!;es+0XBj-eS(FACW_Z%7#H-RsTuNxF;l<4p-o*0*&i-b4;|idyY2`S*OrE6*aydT$C=p^r{^4Jod;7BMe zKV1(QIWqV))3^nW4Az~abIcw3He=SpWiZ*dk0b?A4Q&a<;=+g&iYm!?2yX)vD-=tJ z(pYFiY{RCIB57$Qp@zC*aV)5@tR#lcUE;do9O;xPHSWgIt~n<*bSy zs+1L&;l30wIZDO#;xQ@#tZIiW8+h?Ro{LzH1F#<)$$vEZ$1mxPouxhZg0qdCbB()a z9lMnvG)5{LgHMWb!yh@0UxDUKQ8{XqRKJ)hsaMnopuSdGWti&8(WAKVx|9Gt%+dVf zh(ANBtxhBqD^eFPS#wtV+b3JmA407CefIA*x82e05*%cx)`4QgA~C8;g&qSp^ZounDP@ln@oXSH*`Jp00w%X?QA9$$|i_@I(#E zxlGD%Jx6U7?;Q+;*oWa8PRC@i2Gp}1FB(gIdmD^aZ$fsNdgN~Wu=i?jdBes}ayN4` z{huBB&7oq;jkce)J=j**cWt1&j{D^F&C@e$KjVJG6)iWMKXX24frdj5gX>G$^w8b# zt?*3CeQ`c`czocQjcV&GHUE4?VW8}2UT_3;NARJixp=YM-dSF?W?|Lq`l{EOWMZ*~KzxmZ>NXuzx9On3 z4}kq?FwF}@z?6;x(H(~RLSihUb&-*M8_bcL!Le!JD$slMZHOcz$v3()LR{X!Elyq8 z5^S!oeu6HP+lSc_-MgOG!G4Ii!ALC(I_rUtB;p95d~1#`Oszc9xV(0#{xy8`b3gsgC+syTS(d(rmI%O`i=Z#wA8 zonwzZaB$Br9cuuJ_!Q8^P#2&iw=0CyF{CAl-bItLC@G;xGNB2;eLx$DqgJ~YZwe*g z?N&#h1zd_H&YE>_e`nz;&C;e2@G3% zYQnk@IY$J(I4wbAAQ}k`TtEu-Uyxz_$531{3bp`XEvcJeBQZ4u|D&y(MMyr|4F(kz zK($p|#9%TZJ(ql7Fw5);aZbZgJs1V4rQtHc9|?`p1rXafyV`GFB zay9}E$};SP>_*|6dP8Z4Qh%62Mf?lsZ6LbS5&b=KDXLUS(&Q->hC zOg(F+eC_3B?d9g>Q`{ss)ic>sZe3aS2g^SHlylNqZdzW`^m#K^8hY5WqNo+4rM<<& zb1hxvre#yX$>6gVr*GvG%IRGBGI2srMr`Q|P(dJ0Ky8p*Ek&PBL%%$ex8$vhy>Mh* z0&)O!A`3bnWkd_;c?C{S!*8C=+j1;eM6ESf#xG@ES)e3&_PK2>Wl18NV@Ex8i-6)C zTXp6U;K5n&(s{eKNP}{0)js>FGv_Iatj>{lJ|{&>(jjMcRNk4h39g9L!j!aiarJG2fh*Hv6M_}skEkcD|mB{@v0vGA0(h) z7BLnF%1`PM3~$kfO_VOiBJdFC9z>Gn5+yZa`aB>}ta?sJfl;U_Ii@`~BZGwpDG)gp ztT3r6`$3Z7loIK<1YAr~5H1ZTlDw^BnX!yvfh8N32#`EGdJOPaoYy2#EUsELm|@kR zd4oxah7CEP1TQ%?l7!tE_R40+bHuP>D=Y`wK%%S~Rw4l%3S!`5GFg}#UfEhoNk-t9 zvMcJXwx;p>Fkbx%GGJf+V6k)ZyHkUcgX7KuQ`l2b%E6A}h3kPE9oIXio2K{PYr1t{ z+C9@;?3fGojyDu+6SlIWVPb97#C1n!!S=8vFtvMfck$wM^ltoCe7>c3wz=;%(m};uywv=u+aagrM1Z3@Lu=c^B28yfxpmOcA~KM z!oi6>R|lRnQ339WpYpX98cYbOHGITFEI+*pI#K&}&(T#g=%*$uXSP?&3;_h92t?g2 z=K+TEER-2iW{DEpplqor+jF+1-e;nf^s&CWE$_@bKo*gjBNFp24GB9*OF>S~3Gn2o z_IUoQEkf2(AOvHUhM>x*ygLUGkx`6O)FsiL1?`J4eC3S<5a1N8cq+r+RCOTfRPG=P=Z_;qmO97gzdXtm%-`T3)@Q$OFdIv7`3RE^hUwoz1 zrBMt8)FsjVr&e*tQ(ZAlP0-hRUa^jjI>As>;B>)3xkdMn8POYM5GZrj(ax89-mX+3 zq>2sjMs6HoqRbflN3^aA*h`X+qZ;f;U}Enhzy4)uDW%t*g#H ziVx7bl(SdwFYnEHU%j&z){6aJvDVstHTsA2sxE=cgcBF-)w|0#z`bp(>jcG|XLR$+ zwblE`H7t$?tNPaZxANQTA2{2^If#ybYYSY|tfr``B{iBxwB}D(SJYa)n^)Ra_sb`v zVEhW7#&rqy!HCUdUV?qqb9<%it#tyRs4^4GwJ$+Fpm%)|TTrL9oz?of*(wj9QKZLp z37$y)Uobd2-w3jI*EK^C*1q)JqQTasoGaH@je-FlSg!4~QW-b5EbI6l1Ysd6+pVBQ zC?_#PYE2l7;Y=zqo-h;)4~tp`;={;A9^=9}<}uP7(o^ZjuRV|rLQkMLmEbzi3|Sb4 zaPMPuDc#Ch$RDF8RK5e5VUNZ6XiS!rVJKy--P)MOtx^tS`$5bOVKxAnVNJx6X<#SG zq-?Swh)hcfAS#-msS4TxM87ViB?$-SeekA>rjf!yYzcNcW_T-fg(vS2 z!==J=4iL;JzWEu1RsjLz-bS9=n)QaHX8j3=Jb)Qu?qzqsEjlepnzLXWq}dq&^`V zmPAS+2W2z>Un3#s3b`Y+FJV|UWOH`&3ZRH*t)i(e(zwcp$>ti;DR_ZNeB9KDYDDcs z^p-un&vu!+q$rp3FlTe07!Kg)iaojrPD;piFt=$ZNU_X>k$kMF}nR}U4N#s`0L=uuBruie}jkWSwUP7 zWL=PF7aG2G$FE`H=7+6qQ~Ak!X=twX zHMk0&f8tniOHnAUpExz=>nt6g-tzGebl+z9{$p(q+g9D+u5+bh(`!qI?={V}ZJV$^ z>Rh+bxn1wve((4{o%;2ue>ijh;9TdCi3VWC$egF%{^)Jk-h!Xg{oI0or|#c5!_Hiq z_aDTY85kc}@U-im_Tu}~+TZWFci~?Dt?bP9-_iHC+_!$Z>%RK0Km6w(&K^7dFCWZy zygTptLCqnq{=9KDtaZWHslx`=O)s0y=-it#t97n#-uG6yvBf;=&iTepye{1QnBLm+ zk3%!-?mP8e0}r<9y9VcWytA-lNZ&CuyZ!j=@sqP#!+L9&oX?_M|J~rN-~!jHbG>179DbsUS!AJiwX-VmuI#RnP}0+b~1KH|#twCL=tryoFV#F#8c?wSQZI> zk_>&5ivQemh5DLtvyP{2&Fsqm>hiIzPuFf_57XutKz)j9GVBid$~_{g>Lvi+H31>H7r_!$L1&w6>9K1|Op@A`%!Uqt@@1B1y3WB>pF diff --git a/ultralytics/models/rtdetr/__pycache__/val.cpython-39.pyc b/ultralytics/models/rtdetr/__pycache__/val.cpython-39.pyc index 0d0c2dbdc463d313ebcf7e677c390679fc20928c..230f46cc944e1d8722f528958d6c147794faff11 100644 GIT binary patch literal 5168 zcmZ`-TXP&o6`uRfUL-5VHg*!6fhx*c%34AQRf&m1LSzS0E+yrN5Ei9Iv(vM?BhO{i zJ!@;1*%uTlPvQ@7DZ45!d83NIz$3q)AK}TrfP$Csot{}~rNkr6nZ9?Q(_eq*^ag8d zEe+4}zx&td@0T_0-;`N>EM(roD-sB%F+I`x^feMaHTs55X)`fXt8b-t-%g#rqspwL zk-B|XrR}7dw)(BK-ESlB^gHirtijyRH0DOek=b8EzR6n1w;~t$b*6u)b=yxdqqfu4 zW&1bp-+O;MC;|}`T~jvR$GoMhi3-@=o@9f?`X%o_fs}Ctpk9ZRz()v2n`UcbcCNr4%XZ>?+ zs56V%pSAiHYpWco)$H4>qZHEn&gYt?^&3!UO|Dhb)rtg7UDH|~p8HXdZ0*Kr{>$tfC(JlKttufLA;ZQD{fnfnH({Ne&bRImS@0Yr4`o7v5-~Uqk-z(d<_9`Uq)++B!6KJ2z ziZ~Q|%6wvv7qBSbdl)3W$&9aITAV{o7!aB$U4{Ac&_fC=wMUYrJ_s8W3ir#NHLp?4#`ans{bG3A-?Hsh2sLhb1IQMU(E=&@{a zK)|P!cMaabOtO_=Twe{^HTgPlI2q)=&(mh_NT zv>Wf>6?2GEJJ9%hN?wK3#nn z>5Z^J_3Q%D4X{mAgkxzP&K`}s4cQ(H@Ij zXgCNkh##{<*`gLcr@3TDrSpdoPl5>urV&g>X@vF`?UHoTTm*3_?KD1|PPmO#NjpwQ zV$R8=(g`LL@CPU4$PSLYFCx`n$Ti9>rx*Ms)uNtAt+C!}X^v+T**YcPYO_yvTy6}e zal%&E`8tLY--FN$N56`{OQx$^x}p9om4f`Q^_A70Uq1_v^Fwv@yIp;!>(bfGf;94d z+42dS(}dC;-~V(PBy~;0_gNnLzB*tEi2NpLwN@Y4FiH~Cs+B9DpsP?z4w8?e_aOs7?BpAdAh%c%=!7vVk1OP6A7=E#0 zel;t3lO^eIu6ieOLEr2JPTWjXF9IXykA$c8jHosr!ckyUqU0^*h%o($7b2f?y4IT> zkeSdrck*m24G!aUntI^j2?$A$GD{bOFre6ivLTgcBXop+nNFZGI-ed&F9pMba=jo7 zb50&Jnbj-4a~PzPB)X*(CvUvJzdxB3<2*YlQC?z&xO)|G_bTEBpMnmGTD6ZVgTn)! zbSKZ?-S$`<7MrT)?Hg6kEf9VI$|V-QeIv}n{I%XJNRu1glfJc0V4d6NC)lKMZY7E_D2VW-nQrV7mVB_TC2t#@T*V2Gj0C@hW$df+-GD^2iGYNknsmQ&Hp; zECCfm7?mtpbGJIIqH-!hI93Bz#;hjw5a+=cz&~dN91w!8JPQ)6|3exMr%Y8x*sg*? zKK#?!+9!vw-@|qd10Wd$lGE9WPDj4GGdFH|-KKPxhT-4G?#NcUUNYmFc3|$A3u9p}tU7Nov&4H<8ydvt$Htzquoq5g6E+W=*R_RRG!9&b zl7l8|92+BqV3)K-qiB`(k-n{c@-ce2i)PV&+$oz+^hL9DO81)fc#T<4bkwyLZH%y9 zHp>=6dD$j8GOO7Zow8H54ldB;;#j|?seGe!-_SnIO1fmU=dZpE>YP23II-{8F5)7h z(R^A=riJM3aEi8{(%F3a6&^$`CW}*qc&eQ2g(8PV591JlUvGzEn-*>iVRirLZpc;) z0v1J(g^{$9SQOGOrXW_t&*NZ1A251x3Jg@|9D$JyT;+n(1xq%gY??-hZ6Z#tRyHYU zdgX9-IFly4fHXq{O%0+v5H5Tr1)I_)eS|ayLh8OWGA130u_8p}0%VVGqgGSJrP88W zEQjhaVj`qPBb*0dn_NrnS$xc+t{mee@J1$ z@*xLL=RpDRZ%EK>1{`D`W0ON;#~rLMiKE9?>@ z0HlB!TR*k|vJ3NfKj0BL0I*t3M?^A;0+p}lxMV3>3vn%-DQ`Zbijz38V!0ltXpsMk z#7{|l03n+>kNpZXCb;M+Vj*n@7Z2(K#%8?=lnR+-*sfiT@F9)xF$p3r{#&YY5P^R> zjUs@I>@CeC$`rQvFDPr#LFhIsAQ>X&uc=0ev!6NSd7P~KKo8l%S3Fi=fQqS%j(!m^ zsiX_2yokSLNPEguXS<(43LQrZ+7(jvktp@Y+7k^Zfup25%*lQ!j3{g}MZEPrW1#5l;O)ejKr#Hnzh`e`Bw22mn< z)n4&F7GG`69;H5kz%|+q6S$yNN*Lk>SS)?SM`#7-qRd*I^6b3v*G{ZQNBH;Xpkj2* zaP>|6ecdmdX%Bj%Fda7tzJ)LEk@z8rmr1-%;@2cX5@Qml*8`PO*8@7xRAB5z%;m|78+P+B{>Ar!{aesP~PL?2obphl{fboW5nH!t` E2ix;-_y7O^ literal 4774 zcmZu#&vVse@+Hv6xy_8!=_UU_PyI7`+G|gm>Af?#q~BXmznnnEdn|geTlPA zj85wivl|^#*&m0+&N>Rs&p*2J(OOu9GA=rna^KIhQ7&UuyPb9-|kqVg^sEO5N#aIlbeTxw5ulG_wmXpL}?5R@u8Nh zkfvNM%>f%?uca9<-Tty}e;Mu8yxl77l14g!1>@G9J!8XqQQI~inKM?heYdD{o73n9 zUJoU6(8Sx~&YpV!@RVf7+VuH*G2BRmV)v?_57S*A-s8tOy>UpM)7{1b^>Ytsb>TK~ z5$z~@vis#u$5qX(t$Y%Y!@&U?L;6i&V%4nd9%P}suNLQ(BP^=MRtP0Q!Y9h39)X~_ zR7;ohhp|Y*QKzlkaIB%MY&s=X%W@eek#e$RG9C%?5#=P=wwwwAi>igAQ9R_rK}EDl zERcACzP09B^@8T-s;r&ek@8L`qdM8+<5f#r<0R#$IEBtz`Va=InXC9)vFfIcKXxL& zv%k0N(@STO@T8)SdAnn-!+!9^VVK21puB)!I8LeD3W5jYFs;657t&A@*D$kc&;2rr z(=-T9tyzOpheQAF zy=z3qJ~*VJZeP<%*VFTSFbT6!8ei8Hns{?_bF^FRH=mr~ z3hBK`;zzn)IcdXwv^r%$*?M#od0=c!h{v??8X5|tG>H}lth7KCC{=P`jV^HpdSHJ= zp#FwEqHhkj_Zm1kC%kiXXtdq;2;r68wb|Dy2A!LmdfdXAPwY)CaJgWFnCxEx8+5Mw z6rcSf*GJyZQyvRH%7;aWAOj$1CY9Y|BasF&?9u2=_+K*ItlD6@O>DfXUG)*bgv61= zez=v!G)FX+MV^6Sa6p@Y|_r4;2(E^?30*mFgrYws-6*7QKqHvdMtT(G#2^ z0P=m+uDE6%3U8oE5~CRn29~PfC9YzoXSPiHf772l;RxL`qtApbVpQT29k%|70_7tY z5V|(38Jk%%dtOI4DiL^AjXuH>P{%gd%$e0n2Pk9%>n&sE6gKeal+K{WYkO=PVSUAz zxrJLgkIXgWi_b8kK5G>9{YKe1FlUXjR@N^V`yRIrOnmcZO=xJAjnd=p1*2?|+_tLO zX05VSdI)n!G54@OU3bg+4dcOQr3(-6rmx)^jS%Q?jMuVol4Rq|M`nb>kdo*KCyX*N z0M+twF&Y=LD=0TRB!@iw9uI<{#ghzKgKj6oAZM!a_+b(um*}ojxb%1|qlyp_&K$WD zj&N*plqSW=K`RG&DMG3Poa9ox15K(C564-Is2Gceu2Z;uWwJZjRTh@4tO()OB{~6_ zb=n#l%Ar+D#kQm}1I311)hIg0Na|#iV|U7(Z$@V{%BHVa4t;~hR4SV^JPD{KlI&t6 z@(AYn7>lXk#ForUfY5n!2~b+LoD1VWg3ZxG(TR4Z!Spe*BO)_8{IA&; z@8XCFPK@AHTK54Tlo`qnj>$=AmE?}dY0%l^9n!?Krse3N(4cI zoIPun;M0N2-8T$o1a8^j_I)P)UDib%eF%8XZIi}ws@DJ|_IIXb%i1uKCH;=ya)*dsB2Q;!dSQP)|HXNeKn}1@w-Dfwj+Wn;i4NSg(FuSfA-+EhLKQ+d?0gFP-W``#syI{7DDNZ$HpieJj}h z3S13qe8Vmpvqg;l2}aYp zE|p%ncz_$}gF0`*&7Ypue~WtvE+$>^Qy@)zLSmf+d8}&WA_=rd(#?n$ySN&W?;Xz zQMNxO%0gC)olaDH?xul`49b%Tz(^>`wVfzM)T$y2Ii9#p;y0vjAvinC>Eq&pY+i;B z<6t23p=uE?1Vvue>iPrrqpA{5gdl6-GFil#YU$6IETArx7Fua%sfu79(!_64euP}R29x%){1zc3yl4#q)N z0bvWu<$DkYTLyR0-+8>N<^rQM{w1@HIzvjogwl)Vi&hQX_8k6}*b?*11@jflGgt8K z1@kDu|MciT7P@DrLR@$S^B~0+UHpn)ka!tFxk1452#Nb!)bx82eAaUR06}={tv=b@ou^Ft-c;R}D1fO;r#sH` zX1eHiE6@v None: - if model and model.split('.')[-1] not in ('pt', 'yaml', 'yml'): - raise NotImplementedError('RT-DETR only supports creating from *.pt file or *.yaml file.') - super().__init__(model=model, task='detect') + def __init__(self, model="rtdetr-l.pt") -> None: + """ + Initializes the RT-DETR model with the given pre-trained model file. Supports .pt and .yaml formats. + + Args: + model (str): Path to the pre-trained model. Defaults to 'rtdetr-l.pt'. + + Raises: + NotImplementedError: If the model file extension is not 'pt', 'yaml', or 'yml'. + """ + super().__init__(model=model, task="detect") @property - def task_map(self): + def task_map(self) -> dict: + """ + Returns a task map for RT-DETR, associating tasks with corresponding Ultralytics classes. + + Returns: + dict: A dictionary mapping task names to Ultralytics task classes for the RT-DETR model. + """ return { - 'detect': { - 'predictor': RTDETRPredictor, - 'validator': RTDETRValidator, - 'trainer': RTDETRTrainer, - 'model': RTDETRDetectionModel}} + "detect": { + "predictor": RTDETRPredictor, + "validator": RTDETRValidator, + "trainer": RTDETRTrainer, + "model": RTDETRDetectionModel, + } + } diff --git a/ultralytics/models/rtdetr/predict.py b/ultralytics/models/rtdetr/predict.py index 33d5d7a..7fc918b 100644 --- a/ultralytics/models/rtdetr/predict.py +++ b/ultralytics/models/rtdetr/predict.py @@ -10,7 +10,11 @@ from ultralytics.utils import ops class RTDETRPredictor(BasePredictor): """ - A class extending the BasePredictor class for prediction based on an RT-DETR detection model. + RT-DETR (Real-Time Detection Transformer) Predictor extending the BasePredictor class for making predictions using + Baidu's RT-DETR model. + + This class leverages the power of Vision Transformers to provide real-time object detection while maintaining + high accuracy. It supports key features like efficient hybrid encoding and IoU-aware query selection. Example: ```python @@ -21,10 +25,30 @@ class RTDETRPredictor(BasePredictor): predictor = RTDETRPredictor(overrides=args) predictor.predict_cli() ``` + + Attributes: + imgsz (int): Image size for inference (must be square and scale-filled). + args (dict): Argument overrides for the predictor. """ def postprocess(self, preds, img, orig_imgs): - """Postprocess predictions and returns a list of Results objects.""" + """ + Postprocess the raw predictions from the model to generate bounding boxes and confidence scores. + + The method filters detections based on confidence and class if specified in `self.args`. + + Args: + preds (list): List of [predictions, extra] from the model. + img (torch.Tensor): Processed input images. + orig_imgs (list or torch.Tensor): Original, unprocessed images. + + Returns: + (list[Results]): A list of Results objects containing the post-processed bounding boxes, confidence scores, + and class labels. + """ + if not isinstance(preds, (list, tuple)): # list for PyTorch inference but list[0] Tensor for export inference + preds = [preds, None] + nd = preds[0].shape[-1] bboxes, scores = preds[0].split((4, nd - 4), dim=-1) @@ -48,15 +72,15 @@ class RTDETRPredictor(BasePredictor): return results def pre_transform(self, im): - """Pre-transform input image before inference. + """ + Pre-transforms the input images before feeding them into the model for inference. The input images are + letterboxed to ensure a square aspect ratio and scale-filled. The size must be square(640) and scaleFilled. Args: - im (List(np.ndarray)): (N, 3, h, w) for tensor, [(h, w, 3) x N] for list. - - Notes: The size must be square(640) and scaleFilled. + im (list[np.ndarray] |torch.Tensor): Input images of shape (N,3,h,w) for tensor, [(h,w,3) x N] for list. Returns: - (list): A list of transformed imgs. + (list): List of pre-transformed images ready for model inference. """ letterbox = LetterBox(self.imgsz, auto=False, scaleFill=True) return [letterbox(image=x) for x in im] diff --git a/ultralytics/models/rtdetr/train.py b/ultralytics/models/rtdetr/train.py index 1e58668..10a8f9b 100644 --- a/ultralytics/models/rtdetr/train.py +++ b/ultralytics/models/rtdetr/train.py @@ -7,16 +7,17 @@ import torch from ultralytics.models.yolo.detect import DetectionTrainer from ultralytics.nn.tasks import RTDETRDetectionModel from ultralytics.utils import RANK, colorstr - from .val import RTDETRDataset, RTDETRValidator class RTDETRTrainer(DetectionTrainer): """ - A class extending the DetectionTrainer class for training based on an RT-DETR detection model. + Trainer class for the RT-DETR model developed by Baidu for real-time object detection. Extends the DetectionTrainer + class for YOLO to adapt to the specific features and architecture of RT-DETR. This model leverages Vision + Transformers and has capabilities like IoU-aware query selection and adaptable inference speed. Notes: - - F.grid_sample used in rt-detr does not support the `deterministic=True` argument. + - F.grid_sample used in RT-DETR does not support the `deterministic=True` argument. - AMP training can lead to NaN outputs and may produce errors during bipartite graph matching. Example: @@ -30,43 +31,71 @@ class RTDETRTrainer(DetectionTrainer): """ def get_model(self, cfg=None, weights=None, verbose=True): - """Return a YOLO detection model.""" - model = RTDETRDetectionModel(cfg, nc=self.data['nc'], verbose=verbose and RANK == -1) + """ + Initialize and return an RT-DETR model for object detection tasks. + + Args: + cfg (dict, optional): Model configuration. Defaults to None. + weights (str, optional): Path to pre-trained model weights. Defaults to None. + verbose (bool): Verbose logging if True. Defaults to True. + + Returns: + (RTDETRDetectionModel): Initialized model. + """ + model = RTDETRDetectionModel(cfg, nc=self.data["nc"], verbose=verbose and RANK == -1) if weights: model.load(weights) return model - def build_dataset(self, img_path, mode='val', batch=None): - """Build RTDETR Dataset + def build_dataset(self, img_path, mode="val", batch=None): + """ + Build and return an RT-DETR dataset for training or validation. Args: img_path (str): Path to the folder containing images. - mode (str): `train` mode or `val` mode, users are able to customize different augmentations for each mode. - batch (int, optional): Size of batches, this is for `rect`. Defaults to None. + mode (str): Dataset mode, either 'train' or 'val'. + batch (int, optional): Batch size for rectangle training. Defaults to None. + + Returns: + (RTDETRDataset): Dataset object for the specific mode. """ return RTDETRDataset( img_path=img_path, imgsz=self.args.imgsz, batch_size=batch, - augment=mode == 'train', # no augmentation + augment=mode == "train", hyp=self.args, - rect=False, # no rect + rect=False, cache=self.args.cache or None, - prefix=colorstr(f'{mode}: '), - data=self.data) + prefix=colorstr(f"{mode}: "), + data=self.data, + ) def get_validator(self): - """Returns a DetectionValidator for RTDETR model validation.""" - self.loss_names = 'giou_loss', 'cls_loss', 'l1_loss' + """ + Returns a DetectionValidator suitable for RT-DETR model validation. + + Returns: + (RTDETRValidator): Validator object for model validation. + """ + self.loss_names = "giou_loss", "cls_loss", "l1_loss" return RTDETRValidator(self.test_loader, save_dir=self.save_dir, args=copy(self.args)) def preprocess_batch(self, batch): - """Preprocesses a batch of images by scaling and converting to float.""" + """ + Preprocess a batch of images. Scales and converts the images to float format. + + Args: + batch (dict): Dictionary containing a batch of images, bboxes, and labels. + + Returns: + (dict): Preprocessed batch. + """ batch = super().preprocess_batch(batch) - bs = len(batch['img']) - batch_idx = batch['batch_idx'] + bs = len(batch["img"]) + batch_idx = batch["batch_idx"] gt_bbox, gt_class = [], [] for i in range(bs): - gt_bbox.append(batch['bboxes'][batch_idx == i].to(batch_idx.device)) - gt_class.append(batch['cls'][batch_idx == i].to(device=batch_idx.device, dtype=torch.long)) + gt_bbox.append(batch["bboxes"][batch_idx == i].to(batch_idx.device)) + gt_class.append(batch["cls"][batch_idx == i].to(device=batch_idx.device, dtype=torch.long)) return batch diff --git a/ultralytics/models/rtdetr/val.py b/ultralytics/models/rtdetr/val.py index 9b984be..88bb0ae 100644 --- a/ultralytics/models/rtdetr/val.py +++ b/ultralytics/models/rtdetr/val.py @@ -1,7 +1,5 @@ # Ultralytics YOLO 🚀, AGPL-3.0 license -from pathlib import Path - import torch from ultralytics.data import YOLODataset @@ -9,16 +7,22 @@ from ultralytics.data.augment import Compose, Format, v8_transforms from ultralytics.models.yolo.detect import DetectionValidator from ultralytics.utils import colorstr, ops -__all__ = 'RTDETRValidator', # tuple or list +__all__ = ("RTDETRValidator",) # tuple or list -# TODO: Temporarily RT-DETR does not need padding. class RTDETRDataset(YOLODataset): + """ + Real-Time DEtection and TRacking (RT-DETR) dataset class extending the base YOLODataset class. + + This specialized dataset class is designed for use with the RT-DETR object detection model and is optimized for + real-time detection and tracking tasks. + """ def __init__(self, *args, data=None, **kwargs): - super().__init__(*args, data=data, use_segments=False, use_keypoints=False, **kwargs) + """Initialize the RTDETRDataset class by inheriting from the YOLODataset class.""" + super().__init__(*args, data=data, **kwargs) - # NOTE: add stretch version load_image for rtdetr mosaic + # NOTE: add stretch version load_image for RTDETR mosaic def load_image(self, i, rect_mode=False): """Loads 1 image from dataset index 'i', returns (im, resized hw).""" return super().load_image(i=i, rect_mode=rect_mode) @@ -33,19 +37,26 @@ class RTDETRDataset(YOLODataset): # transforms = Compose([LetterBox(new_shape=(self.imgsz, self.imgsz), auto=False, scaleFill=True)]) transforms = Compose([]) transforms.append( - Format(bbox_format='xywh', - normalize=True, - return_mask=self.use_segments, - return_keypoint=self.use_keypoints, - batch_idx=True, - mask_ratio=hyp.mask_ratio, - mask_overlap=hyp.overlap_mask)) + Format( + bbox_format="xywh", + normalize=True, + return_mask=self.use_segments, + return_keypoint=self.use_keypoints, + batch_idx=True, + mask_ratio=hyp.mask_ratio, + mask_overlap=hyp.overlap_mask, + ) + ) return transforms class RTDETRValidator(DetectionValidator): """ - A class extending the DetectionValidator class for validation based on an RT-DETR detection model. + RTDETRValidator extends the DetectionValidator class to provide validation capabilities specifically tailored for + the RT-DETR (Real-Time DETR) object detection model. + + The class allows building of an RTDETR-specific dataset for validation, applies Non-maximum suppression for + post-processing, and updates evaluation metrics accordingly. Example: ```python @@ -55,9 +66,12 @@ class RTDETRValidator(DetectionValidator): validator = RTDETRValidator(args=args) validator() ``` + + Note: + For further details on the attributes and methods, refer to the parent DetectionValidator class. """ - def build_dataset(self, img_path, mode='val', batch=None): + def build_dataset(self, img_path, mode="val", batch=None): """ Build an RTDETR Dataset. @@ -74,11 +88,15 @@ class RTDETRValidator(DetectionValidator): hyp=self.args, rect=False, # no rect cache=self.args.cache or None, - prefix=colorstr(f'{mode}: '), - data=self.data) + prefix=colorstr(f"{mode}: "), + data=self.data, + ) def postprocess(self, preds): """Apply Non-maximum suppression to prediction outputs.""" + if not isinstance(preds, (list, tuple)): # list for PyTorch inference but list[0] Tensor for export inference + preds = [preds, None] + bs, _, nd = preds[0].shape bboxes, scores = preds[0].split((4, nd - 4), dim=-1) bboxes *= self.args.imgsz @@ -86,56 +104,32 @@ class RTDETRValidator(DetectionValidator): for i, bbox in enumerate(bboxes): # (300, 4) bbox = ops.xywh2xyxy(bbox) score, cls = scores[i].max(-1) # (300, ) - # Do not need threshold for evaluation as only got 300 boxes here. + # Do not need threshold for evaluation as only got 300 boxes here # idx = score > self.args.conf pred = torch.cat([bbox, score[..., None], cls[..., None]], dim=-1) # filter - # sort by confidence to correctly get internal metrics. + # Sort by confidence to correctly get internal metrics pred = pred[score.argsort(descending=True)] outputs[i] = pred # [idx] return outputs - def update_metrics(self, preds, batch): - """Metrics.""" - for si, pred in enumerate(preds): - idx = batch['batch_idx'] == si - cls = batch['cls'][idx] - bbox = batch['bboxes'][idx] - nl, npr = cls.shape[0], pred.shape[0] # number of labels, predictions - shape = batch['ori_shape'][si] - correct_bboxes = torch.zeros(npr, self.niou, dtype=torch.bool, device=self.device) # init - self.seen += 1 + def _prepare_batch(self, si, batch): + """Prepares a batch for training or inference by applying transformations.""" + idx = batch["batch_idx"] == si + cls = batch["cls"][idx].squeeze(-1) + bbox = batch["bboxes"][idx] + ori_shape = batch["ori_shape"][si] + imgsz = batch["img"].shape[2:] + ratio_pad = batch["ratio_pad"][si] + if len(cls): + bbox = ops.xywh2xyxy(bbox) # target boxes + bbox[..., [0, 2]] *= ori_shape[1] # native-space pred + bbox[..., [1, 3]] *= ori_shape[0] # native-space pred + return dict(cls=cls, bbox=bbox, ori_shape=ori_shape, imgsz=imgsz, ratio_pad=ratio_pad) - if npr == 0: - if nl: - self.stats.append((correct_bboxes, *torch.zeros((2, 0), device=self.device), cls.squeeze(-1))) - if self.args.plots: - self.confusion_matrix.process_batch(detections=None, labels=cls.squeeze(-1)) - continue - - # Predictions - if self.args.single_cls: - pred[:, 5] = 0 - predn = pred.clone() - predn[..., [0, 2]] *= shape[1] / self.args.imgsz # native-space pred - predn[..., [1, 3]] *= shape[0] / self.args.imgsz # native-space pred - - # Evaluate - if nl: - tbox = ops.xywh2xyxy(bbox) # target boxes - tbox[..., [0, 2]] *= shape[1] # native-space pred - tbox[..., [1, 3]] *= shape[0] # native-space pred - labelsn = torch.cat((cls, tbox), 1) # native-space labels - # NOTE: To get correct metrics, the inputs of `_process_batch` should always be float32 type. - correct_bboxes = self._process_batch(predn.float(), labelsn) - # TODO: maybe remove these `self.` arguments as they already are member variable - if self.args.plots: - self.confusion_matrix.process_batch(predn, labelsn) - self.stats.append((correct_bboxes, pred[:, 4], pred[:, 5], cls.squeeze(-1))) # (conf, pcls, tcls) - - # Save - if self.args.save_json: - self.pred_to_json(predn, batch['im_file'][si]) - if self.args.save_txt: - file = self.save_dir / 'labels' / f'{Path(batch["im_file"][si]).stem}.txt' - self.save_one_txt(predn, self.args.save_conf, shape, file) + def _prepare_pred(self, pred, pbatch): + """Prepares and returns a batch with transformed bounding boxes and class labels.""" + predn = pred.clone() + predn[..., [0, 2]] *= pbatch["ori_shape"][1] / self.args.imgsz # native-space pred + predn[..., [1, 3]] *= pbatch["ori_shape"][0] / self.args.imgsz # native-space pred + return predn.float() diff --git a/ultralytics/models/sam/__init__.py b/ultralytics/models/sam/__init__.py index 35f4efa..8701fcc 100644 --- a/ultralytics/models/sam/__init__.py +++ b/ultralytics/models/sam/__init__.py @@ -3,6 +3,4 @@ from .model import SAM from .predict import Predictor -# from .build import build_sam - -__all__ = 'SAM', 'Predictor' # tuple or list +__all__ = "SAM", "Predictor" # tuple or list diff --git a/ultralytics/models/sam/__pycache__/__init__.cpython-312.pyc b/ultralytics/models/sam/__pycache__/__init__.cpython-312.pyc index c91bc2dd52764a38c377219517508bbb38061d66..8b41d71f676c0e6ad3ccf9901150c726d045b6e1 100644 GIT binary patch delta 79 zcmeBS>Sp3S&CAQh00bA}AEi&2$U8~OMn5AzH&ws9FkL?-HMJlwwM4%%KPSJ;&|u;Y gb7>)<(htmxjErB|7#PLxGRQsPl4)cw;sgo<0Ld2?lK=n! delta 80 zcmeBX>S5wN&CAQh00ebzi_$hs+-0k(ZZ?0SMCAf2C}g$U9ry(#0yqB{j9cCpEDsFEcMarZPV#zsz*vE=xuh E0El=HVE_OC diff --git a/ultralytics/models/sam/__pycache__/amg.cpython-312.pyc b/ultralytics/models/sam/__pycache__/amg.cpython-312.pyc index ca3781858b67bc5730740c0c21c64d7d3cadfe1e..06b5fc46a5f3cd12627a990674705c1176a6804a 100644 GIT binary patch delta 1039 zcmYk3QEb~p7{`4{owZ49x;8CSx<%J^jEZ$hw-uH`Nm;v0w9`V{q)4Gz8z*sFlhp2P zH&v#IYzT=J3CQFJG6`wv(=ut&Hu-@d?IDwf)CnZ+mP{#o0U>zzlu2kWfbHu^ z-S>TW=l|VZ+Fho9q$oR~H?;iw>OY?Q^s3E$%C{!0v%am1tG*P=Zt$$^%Ly4FdwfLs z%61mCt50puEWoYY>v#+fD3|QXk>GTVo_QFlB(F(@k z2&QLh?K97<1nqhGjGj>`$5lWwbATX}g7Z|j-468@S!1CjGYKxk$vNd0=ad4efxH=F z_!P_tTQJLHHd)Er$`1)LE4^*Tjq+pg1D=KJjFg6)1ewehlapXl;AMv6*%ah>iA%9i z)(;W%LG=EwFwA82N_Jfkb1;#E8zP%!d70x^;iiY@*(57Tj2=odk_=@*?_zSz%klDC zqslz>X&&nSNnhy06+-JJdi#nz@=ajR1nzmS;R<3`u$rjPE;MEr@$4dU1#7NY!xhJ_ zIAW6Z^hP6{#px_^#cQsuE9dvFFUK??3i(Z60u0&?n~p7-0pXR3E_ zk_08?;?N0q>nZ11c~j)!;>fW1U4Scss6H6_22e#~(_hU^U(jm&jXM_5r%=ZZ<=csQ z@F93^wS(b3N9Wh$+vA$agRhsZJ7X>Ok5|IqceHpqWa@9E{qxw>Sd-N2G&c#(fuf`l zhL*WXn~`1}-E|@VdF%}}&2_Dl0t2OmTd|#(M(9|5(~$fCiZSLKt)v?(B3==jMpT0~ zFbPWC8ewR;>#qB@yJ@o4WdTW0p3(?ID<@lu<|6+=^a5InHEmqj@K}Gq)F;oT1c7q fKIk6=K~UVhdHKd=jWGBg`JMB=^M%P`zwYwCa+6z{ delta 861 zcmXYvO>7%Q6vyYS9n;wBNocZ3o2>ELe6%L19fveYkstvl#z88INg7qIo8pg6Y?6M& z>(CFPByi$JU@J8sP;EHG2PA6kA+E&191xDE)SWu5tA5Z(a6sZRl`GMnV0XhD=Kp*1 z=DqpPXey_EoW{S|?N+3X<=}TSUptF<*0R6fmpXe|^qpUr^(Doun8;=`vBX^Z%&h#m zB?wmKj^!Q$opP~p3--$&TN6Ejcd9t_b6+X&nY89#yY$7IIe%^;f4J~2arfV98YuS> z9IEaKJvJfy!n1ogq+r`8z+C!OzRV8n6(&=nAZ26m^n5yd(JBan{5u;k8{o3Ln}yh! zq%eQ>yzo(Meo>UX@{aw}6_@5%TGOUgM4M4NzrL*Y3=`)tX?v^21ql}{XSTyrwQ!V# zqbe7yawltCjBqhEo_vrxT}w$MB~cS{9Ay1aWMSKC`8mT%)Z?i#lJV9(p)tr2G%a&9+ zTQ(R-$`!6x{+H`j`rBLpA?0}cAlUWh(mWVlLbR z)~I)2lP~ghBjOui4Av!zbSBbtxu$GFRf z4N;@9U7eY$&1A?-rmiO`FGsw5hX{oLMGS~gdTm$L@T39qnzwaS1Zs6^O}U&@2;?s0 zofK(dNh{Ih?tyM<($(L8En6BXw_G19u2jsGiLFL;AWZ!4*YzwNk^lDo{~~abm}IH;&nmq`|0lLh51>H*IJF3Y4ZajT)LJn2o)Sw{dpU zx0?h9i3$nIr9BM?5UJEFAS95*2@V`NAcQI|tOSQ1kopepz~3tQmjPt_ zpyOxgMJJRbZ!SV~BeLf1lFQ!HCu)f>9q#QoqjPKKVm=~}8$QyS)u zvaL3@G^=o!KWEySHR1YPeq6a=Xv&6att+~vsEr-9X(?6HuvOjAYKm@HdQDSo`iMyo zg?|-ZQn#s6+b~5_S#2ttLfh61n|?sq${B`M)htUTtEyVIVw?1cYQ3Nv_OnGaL+4-# zzYaaqtxz|t0QV$31VbQBw4n1iKoLi!i!g@oNQuxSnR&k`;-2*5C=W!Q@QGJEK`#e% zQbmrQVaSX=#t5N|ABSgqUH3G{{z*BDe}so~&r#6w5rr)Y}2*9laGxmno}vow5cL09nA zt{YFi%*KC`>;IR$gb%y&;TcYLSN{U`$BXb1&d1M(U*YIE#w>ms?+AmBzfaVnF8cd?QghTYbCnePGa;k-O9AD)kS&E67*a&_#Od#@%Avwwug_}?Y0=@dAk{lnC&~Vc$H<0ks~<4YYo-5HA5`($hV=sNh=hpv8IXh zoHW5G6Y^c+7;CO~pzgrVQm1ZI>YBb*x5WzH?)xbGA$itOf+vV$+2O8Rq^L(her3gr zSj(26gnQZH$rBWEf<{dhLTw87obZ+wml-KWg;6IsKFb!4f7LV_VwZ!r@F07%@HEK~ mL?Gi0LCEiEd!YyX2a#wll!^Cr%7GZ94k9D6BulxllKczN;Um)k delta 1106 zcmZvbU1$_n6vy|>Ze~B7-K^Q{M>gw3U3J0QxFi}9H;E>;ewWsiSR1pMm`Qf*>`ZcI zOd1v6BuFWWr!VbGUxoHTU3?RKDHOzqQc9uoR*Ho_DwKXeSnrvFrP8_ZoB#cvbB4pc zXMP>|VOY_&Y>5TLvl#w+;frnemG6Vufyv>SNnKw)rE6#z*3!(nZQ84Y^ddM09j$6; z2B3#-b^HV+GUN@&({uTubmd$6Dts`lPBnDZakY8FG~D%=s;cyRcp~JBp~&`>s#y!F zX|JiPnt54w_R$}ali<-rbbqE#bm92DL<$gU%hFE3f!2-a8vujUioIKW^HqYBfHV}v z!cpN>LO4ij{5u$>=kZK_P*^z5C@^{$y9I`6TjE&8kB+f7&hS$v=vty&8WMX)5(r2* zDY&l^^1?>5?ig2e9BVyJ906FMuu`Q_r79uNc)W`~Q69iC8ckN=0Iek7*>PADRwPfp zi1u=Q4UdcMYi*pRUy>_(PIBNs$&LR@o}z21O8h7%`#rx!_fi9Jf}W*L$4|3+igBE3 zoxSmzkj_qlK#=s#9WYHdJO5IqIN^N(59(N>+vzOS>2CT_|2YnPz(_I93shvRvyL)e z72+&!YxLRHzKkK{KX)Xqpj%QP1~}JxvUNA4G|rm;w~`=JfJG{1)U3l*34ws9QXwQ2 zpbOAwoy~j-(4?Qc=SxRK&;zFe_>oYr9oYtHSr-n|kJ&z&%SO{vm6#X2xLWmO&2iV4 zbsVB}?o8?uSNi29E2QPz2+Y#gxxTXRuCUAF;B}$iv{&`I)6`6}j{1^eTh4WQkt@U1 zRxbY(;7fW|sOHu8boVk^>WBS%{dGZIw9%jC{XLg52gIj-+ibVn$diekjB|9aCqF&I zQel(?yzru_xvp;E1>Oal+Q*`E)T|{P&v4QRX#R$>!+lq7UZRt z=vU_FG2ikhvkQ zaYI<)hM4*-Nwphdk{~24cSBm?hJx}98Cf9vt1Le!*B2!QKG_>W@<4SrM1iFE9dWri zrSmIhR$P}iyr?N}xS`~VxD7~FToq(A&|C%U8xm4C#MHh>^73(gP-S2h`r^d6nU9y3 zQRfTLxzia^7;70zfNq9>8iv(Sb}d5*6HGLVb+RF!>f|OqS%G549#JF-&dJSu>eAdW zg*6Pb8B&;#4 zFk-$6F)+$KX5e|sV0U+NoS?Ff6hkp%1>xhl$Fx5>zzRY>vJOTtTB4E2alEG9nm}6!(xXIk<_l2R`*pcl0B4mu1f;Gi$ zrfSTvnSPQfd8SonQ;L<23*b~4arquox$3`k=k>Wa&6myZ zZf!W@);^y7sy7o?FC}Qs%{Q1~F|8cuWG6kMkkaA{5s_uIcKNk0poIhUlOX<8M-1ID zXW|OnsR9gyK6FxHmz2hi{jfvmsR2Hpm7tGa*&jbP`tS-`!hfKjWKHxIy5YQ1!c^E- zI>tKR-sI0);S#`a!wzml;^ZYvKj0s|^>9vg3QzdRBLwd@myyV$KF|OQe_&z{F7Aa- n)k>`hE@ck=d*T|RGP)&zT4a+nX>)piTvDrn9Fq*v9_5H5^y-4Y diff --git a/ultralytics/models/sam/__pycache__/build.cpython-39.pyc b/ultralytics/models/sam/__pycache__/build.cpython-39.pyc index 8f953855c7bb4503b3e30cba7c0030976a47ec7b..43e49b1b92147b1b79db19a1403b497e89893b88 100644 GIT binary patch delta 288 zcmcaF^IV26k(ZZ?0SKh3AEo;XZ{*8o5;AnLig8ITNlh*ZDN0Pv&df`nJcY@SQFQV# zrYDRFlUFm}XVl)@&7#l9XtsGbYapwr6HtF~3&<1}7Df={5Muf;S&>_wQE;*&w>0xR zCdJ9|+`Uc#Kou#BwTvYUH4M#+wG1VU3z!x%xG=;@)H2mDEMTr-TF6)=0u*Hdv1%C$ zMQRv}JxW+>7-lo1Fo9H0R^tg|4tDgNT*afv#&(OjxTI+E9G-0HsUQ~!fLy=?w4ITM zk&BTB$P!~JikPgx`j5;9J=D92qjEv@+x3LDYiaG-gD{cW9$UUdIf}d2$^)n(g|U{ggkb^WLIxLxSczJu8ioZ-HB1W`i+F&d%pg`R zVdBfsfy~8;xsz*o6d743FX73SoC30x4`eA53nLFB7b6dl z7Go+3pRC3EoI3z0UK9=@q$W>f6`rim$ETwKqFf#n+ M@@8TL!#`~70Ha<(Q2+n{ diff --git a/ultralytics/models/sam/__pycache__/model.cpython-312.pyc b/ultralytics/models/sam/__pycache__/model.cpython-312.pyc index 1da9eab8b2ce456522744207a32e6f32b1b7f65e..7c9e884145b96b4d8e63fa58b457b4e80e69f89c 100644 GIT binary patch literal 5727 zcmeHLO>7(25q?W@$t5LPvST@xtER81ZHaC~C9T~yM&U&LQK7_U1zRmp7RBa~`$(?6 z+@;?xC6k~LqYzr@kfuOeSUoi;dQc!Aa`e%M9tspFWW+^_g?(w!LvA)?7eP;*dAmz4 zWx-8z&H{Wp%YAR=&Agd!X7ctxQuZYm&}b*=DYsQ%Hh*j~Ln-F|s_hoeX%FM>@H} z2s@V`BzI~40&O}5x2S1*T+~>V7jwDghUwzpZHv>Ea8^u%yOi03In;Bg*WmOLuQ$2v z(RsV$HB7rsFGyZ9z_vhZ!fDbUSe{^3$1|($OBC3cb(;uh&L%II&AS(dZv%Ehp&X3Jw0*x=B*Rnu$Gw%uYN)Z!M1TH(S4o|fr# zVE=(E&;|D7n(j+EW*fA@Tw38AqzY!@_HYAI^6V?$q6UM_aIZ)gJzC?;YYQ+&mkVds zZ8#onfz=$*WOkL)Dr>O{iZVH9%Wc>n8pZU`Ud&)EN!WZfCsn3~>3YJfv{7X=uo37@ zQ46OHyoCvdx?(%4ph+E1=1tzA3*jct<|tgVbZLKEgl{kQ%^cfl_^#a70yMYX8W%$< zgtL|hR=G4Evq`M%<3mjpM= zs^^Gm;F7_Z3Lkdg0^;Z3{hX8)h=7Ell#@o*$gPta#KKy>oH9m?Bv)$*BekZM)5fTL zqpoGk86$6`*Oanm956C)&+;7l|8~f_JhD!%Ddo{iq%h_u!L`^NY)~qSem1keLlb9#5 z%3LY2;Z{?0HoX~2;@T* zkfVR$p&_vBwqW>;a+CBF@uG*PF0Y=l9nzQMtHQU2vZ9C)F9JPcB%;|rRlIaKiEJc# z_UhJ5gfr=Xx^6Kvyqa4k!q%efrYE_ zniq992&~|lhVVE8ky~YaX{< zi29i1i_CQS9@!(M{nN zy5GPpg9;djh`54ZZJP*fakWl`jPTwFEK&3`A>tMizA882rx3171y!IU_-VIYtC`pQ z=l0pXuexo(Az#x09ZXNx3rRoaa;qi|;L@zF%lOiD7gQ`0M4( z6TkZL-zKK-PE2pTeRtyA{X-{qPQLZwA+|Hh?rZst%w6s1W_#<^9qs5}wKKc0l=zud zZq^%~Tg5#_7t;9KJr2W7vR?%Bh~AHjjsnN=$Nt%zcbE~ltlQ#Mvt}Y91X2)t!fYF5 z!kE`Q?XS_YeLOaL75IGQ;CROaq__g0q=o*#Eb#Va{MTkpUZmgQH3s&EJvy}H*bp+& z@=_roGN7|O!9r3TM*1iYV=#p0NW~F&@KxOcMY~yfCP==FQ^sMqK^~8jBVX8D{cvJ> zJ@rmbJP&_X`w##RNRPm8_h&F5&SeaWBT!=N zd&yp^r`}3O71DKW07)BJ0H09=pI!p;O{S-rN-zBfUrJxo7$d1=C zu^ z4zs{4H|8OHtSmv;kqR1du zK0{Gf%oscCh|b} znP?J$a4DqxRMoL-GF<%>a`9EOS$DgD&FSEr{B&R&KOLxm9At5Vi!p=v>2?ch1TLVT zAz})KLQWz9WCQ8faF_lhC{Fky4hY76`pPPUl)F3+2j0zZcC) zKfJwg@6fl`Gxx_1Z>(;-y}9z<%H6TW4_0@^7Vn${YDZgq8hjAoB={g;-GsKPM=k@k z_d<{ez@(;{qz@AcyB>#$dCP>31HAxil<829Y1-sa_8ZRx_s&GCsBLz1!R9 zZ7#gGu(j~P!tI5-V;An!VA#gm!#^r=n1c0{dK=U$Gn_*^-skwjB@Y>K*K-QS2l7jSVgE}_$m1r-y z`{^NT4pIqJ{aU2Gp9xif5)hAbs05tFKZ-C6mw+kMq^lu-BlQ!UfVCcg_YpFFZ1ec< zPW|@O*0np&--|(UShK;jz-Of2U_}XE$e{M~{3#l*TS+5Jo3F_!utuxOLDTBxM_ zG9|G$tRJR>0vKvr6H45wBcKs#IYFmAy%c?{nEs!J#Z)Nm8g^AoboxR+mAo(tX^^+G zVXA=rX`n2`G#1+~S|=>9H7w+H&@%|s=m~R`i*q4R$H~ECn{VEl-1*#Bcg9ZdXs2ap z7L=|A&68e%P7jVaEo8M(Dk!BwYN1qu=B^}fiG}4t9@{r;3v}Jj={mRzd?*BZRM)Sz znH9dt=(^!lbzQb$eiG8ZI0ieAFeJW=)Eo@H29*SKgkDD=4*2=71FUj{8_-Y|0;u8Z zLH@@qEO3~FpZgjNAFq)|iL|Qydwf*=+M_8=o!Pv!OW^k8@ImzrW%uL(wWRFQ5uhI) zBk2Q=5*hW(ZW5@+NJ;f2$5LTbU~>r74Yc`|Sqb{tqp?1-$n83OA}q=f5l}r5|MT(J z@F@?z!Qh9jhAwFt7rrXLeg$n37DpnB18VU;3FOQ6Nc(1B%X8v&c!f&41sEPC6-9ZJ l$}6cSFA!z&U*yySa{2+8dO%)yoc@uboLN8dgy3DW`Y+D$Nmc*= delta 841 zcmZuuO=uHA6rM@4n>1~E8H$Fxp zVFc&)`RB!-`Xght&y9x{W-pT}E6@tDY=`oqnWyrD?|sr83==s2OTP;|44|}@ldJl5 z`B}f3n9i12m1gU=O4$N+Xx?$SnP0N(lAI4tbjP5jm;%^eM_)Rl@z2&;_gUJO%4;YF7C$1o>eFN4)e>-Q*?F=wK_9bE`lFACbe66j5)uouD|g z+-5KU0Wl8%%)iGD$r}-Q$IZ=|m5LxvnUc{zb)%$Mv-2Lay5*Ef$y%W{fhILY8EP3> zhS4%!C2a8~%!jnc1X40-%suWwk2@cQZhTC^zpQy$aS0;%B6`eFG6VrwN1NUKvJthD z?n`f7YGs#LN&H=Dk;x$=C7<*dW9hSGq0H*4w`o;q0T!+(wI^yWvYOMrC4cqw$Dw$R z*$!tQ6~?s4gypQsx4jd32Kw@>{LxDyimW*3@z{yVn>h8jZ{*A@ sgfM(UN!a#djGF-;4(x^yPHds$8)$M14Q!yn9eo5(-9NgEz<7`S2KIXBWdHyG diff --git a/ultralytics/models/sam/__pycache__/model.cpython-39.pyc b/ultralytics/models/sam/__pycache__/model.cpython-39.pyc index eca6f8e41c0fbf777a61a694f44ea70429d5f269..1826afc052f030727a7ca9838aedfe9e0d4da7c5 100644 GIT binary patch literal 5214 zcmeHLO>f-B876nRpZcg`yNP3%$7mas)Fw4>UfW>dc&)+1-&n7$J>qUNB9enT|F zR@5G}O}`a(qVAw;TfehJTXZfh(UHzYeXu6F&n$P1|FULT-DmqxSd<7EvLH^S8gXCt zy4~YRpy_rNN;Xx=X&|I#JidB{r3p(Xl0B2-NX9AKkI&Od5RcgtGuIBLb=gQI5&M0Z zDjuGvfv-Pim?jv&4yA!s^S5u zM+z9pyAMBMf;(Wj>YX?4VR)ugh@k zvZI@QQI0-cy1AlhIW|~Y0n1}Ol4`|-@~Pu1uRPnYc%=I!z-hU5?i}%Sf^);HJ~~|k zDKCge$(MHVOlOz{q3|@1zOeJok&+_t(?t0+MfGTu7kY$_v<5cNUlaDAF7AoCa6Y#` zvqm-{{c>aAi0{gVXnk%Qd^ZM7@va$N6YH0qK}){Gg`iQ0wzCp)cQSSQXL(M>pSD9$yoHckHFVp+(~hcU>-!(avu78G5? zuhMH%C3hFP7q*XrkeEYn)3L9{x?oOm&C2+<11h?EjG(-TZq>Q2qL7)79bi_50FZ?g zKTaj%3|=7b<1}INRVrf*T2~}L<$;#hWcQQwFq(#vqNo(VRVq~LgkyHfvZI$D|- zQKurSw=~VI&FUDKIi$ku=cF=BAfi!7Rq|yPQ0%Ij^-(zyohOASdU>l1sctQA7$tIt z;&IUo&N(J~yV4ZKR+_dG)!&-=pq>fxik z=Z|D6{S-mQe*ri@&#n^2b0b_o2gZtSZ+fnfF0CKpubXJB?j5^r&(>jTZz<5uTW2yD zPg3m@>J|s}N&Vm8uQ$;YD4bg!a0sN;X8%}v@^QjN0Wdw4elQA%mc^VD0LtXINKBPA zej8nj*KWYqVyHmq0*-VhcQbkee4f(k@etTNrn@nZWRE?PBMwhN$AtBh7yw_q-gj$? zTx0z0*416=-=~IbR_`vG_t485UIxSnXNI_>%4(_{PO_8%GC!x?aOye}X18 zMG28rG~PI`Tl3~$?0M_2woQ($&z(7f^qczYc3K0i>vMZv8`VT>-VW?}^Y8Yb$>nYS zH%MKoepIAV#JOrIP=Ronm&d^=l68?nK{zT;5~h<(nc`*Ph_{!bc*OYi8q=@MMGImg zDQ8EAk9&typRictS;~Y4|QdQY4)Ls`fMje1+g;wW89#WQB4obPZEV(!Ba@1(yPcPz;xBo zt1?$XMIws?bBoCGfp09JNL#lEC?EL01T4Yz)a0gX;n(<&n1yPu2)9>gLux0p=@LDx)jOD9JwW4j4091{4X$xEcv9b|2Yx^eLz6dOoN*+yypy2LRaB>|4`}cnHHPa% zb2H-}I=YR<+T60c_NF~sBdV9StZ==BhluN+;;%`Yxpjh+J+BqK9yJ1M$zhwnu5x%k z3{Xwe5>a~IE@$6nkwi%=ZjF@R((_w-u2yDWlb$Y$NPyIB70K>L=&K)7vq~2AA>F%{ z08yv-*eZJ76ATtPaC1>l6a&X^RS~pDF;&DCI!JcbiB04X_M1A@5&UJfVtt&9HEISh zA}Y4F9HANe!0OKSR$2&|a^xd%TPnduavJh;+JKY~-z?F@xYa;^LvleV875kOErC@z zWqL+5)Cp?tl_G^wUd7Ctqj8#{6h=7SXQqd|6~mmEb~)rPqy6@ZH?Lwe>df}S`n*+| zfLe$)A1k$xOE*5E<{A_nvgHpLC}6NQYg2rrn?4qFc~GBfNrM2&2lQ^EkB*Gh)<_vhWRF9B4e3)w~Ja2Wn6%2&QBC7^l4J;?6!{<+vM&4={m|H#&c!4 z&(---=r@vAWIo13QAL)F9z@;efe_jzUv(H z``A+S&8c>9>~7Gefy!IY%e$V3)yDTH^w&M_WyZsDq~&=c@jcIM1oAr4qIw@Qnh2nF zsP_{zc^erP`;hcp?b78&x$E#HalNwgCQ+IkNd+#=q>0sS+iBn1-0E)L?QV59YPO=$ zzPqj-;*9i-U1Asvi|x?;%9f=k<1s!V^$b)M!6g4)br_#|ursF*9}~~?4L5MYx&%#y;!5X_*->Nh!@Eq?NLwjgmc7poYT)YJl>)Wo8^%)IoN%KV)CGSkV@ z?5f-%Kr4&cKo&Dj_GWiu1!-lR9K&J4EeVogg2>F~$kVOz2q;QT$xJROR!GcCQ7B3+ zDJ{w?RwzzQ&rQuMNi50C&r`@vEY2=gNXsvpY{w}(c?0)RZW)j%93WGeCO7c-22}Yu zC+6f7E0koUDySDg3{+Q0E6qy=8&IBElA(~ES(ciokXV#n3Njg}HeaCt==uDjT!s9k ztkmQZg_P71AO*BhZ?ZqXE4MPp60lR5CeP=$m#SI{4jxbt`Q)b;!-Fs_e{vzK>||X5 zK6Xtjh2V_*^2w0`GOXYbo;*=NS`;YioS#=xln-=aS!z*IesN|=75MIp9!o$SF2*%6+ DA~}e! diff --git a/ultralytics/models/sam/__pycache__/predict.cpython-312.pyc b/ultralytics/models/sam/__pycache__/predict.cpython-312.pyc index 43347e62d7ffa02bcfe4831e90c82aff696b1c6a..8253d471501c29eda93a44a882d5a9710cc96053 100644 GIT binary patch delta 11252 zcmeHNYj7M@cAl0rdRdZXJ#1OFq-z@^k3EuQfx*}qW6MvlWEsnA7~{yT>7JQ}p6;Q$ z$C6y6W)lo+lFfp-soLF2$dcEl!m<@WsYZo0}VB_{>lKQ+E8(D=l56 zH*xjVm=;ULG)1d2l~k4ZTb-&Fe{1sbbd?gz*QV+e`9yuH9^X`(4f)1YqqtXNuE;l~ zn#6hBY|gi&TEuy+xia6HYR#`otrFLDW?Q~J)sFLet-NPTmttCz-t?_%5;zu1vDvCbv-%w+7Hd4BTe_oqIx9H3W~4pCwp>

    =<9t?4G)a%%r*Vz99hf0*Gi)ncl4LA6S0U1LUG&FajR zFRC;k%Zr!nOh$KTjEt^&MMrm;X*^C7+xdd0PMJ$yQZ0>L)E#@k&DkC+=uXCV@~V~A z2OB4Hm=uE+@lM*dJk@~E5VwTM3#LvI%ZcrbSVehl%(Syc8UkxfEzTIGp*pjy=;6dO zAdsU!Q8b_k=GnoF6)S0!n!&OM*)fliL}K-Z<>^^REt#yKIv(? z&Fj;)^SGEB;%mBVWGzgEL0Hbn<_025OhOXX9Po_1F7?@1p-)eBA9n{E|BZ|}mQWTO zj_aPMI|p9-uK}g>8@Tx;IHDd)DR4$OVX9iM(WDQ@aI^~s)K2b726HlMS{IW@|1>Q4rw#{Wa46IE z-C!Aa_SH|LpE10g^nG%D)k)_{hNq{=y@jDa&AHQRL7nowF3AkIlA5MVL?YI0>N5t# z1G#DF2x2vPSe|8N%p$S|Bg=zN(WoVQ5e`VcMoO6`2`{LQVHXi2vjvQgZ>3x=bBk%n z@3JYoD0Mhx&**Ma%vG?VB{wOgFzx99M67KhRw$sQ1B%D?d!A!V6(RU;x-OAgSUQts zTL@1QyV;0sWsGbQC_}IMAVYEI7WA}{nI$Qu1!%pbTufy`6B-w~KLcurKJ2 zBkUMVa8TkF4Vkt}0uE@*3#wVfYQ3C_i1c3){-o&{wP<<-9c97F6HKO6)0|S%kGtU# zA~QgFMOSwQFo;3n8$8T1MT_9GL{MY{T_kW$={fa+VLRpFRVRyqEv(BezyYD4vEZCu z%-JRewex^FZNS77@TzQ)APt6dn2mtI5m+JAshYyJgr2^N_(aQ^5+Vr}PV24<2@R`I z^n6%GsJKyKLMYzi*>K*$3GqE-Iy3|9N|Oy_0E7=>@e;Gba49b>@TyW^nGxGG5e;N_ z=q>O<`IvOZSYmMPmdA?RUKMXItz9&l`vP3Ls)An6GA{O1`x zZ%;i2NnB_{_<&p$Dbz9pPc*PH+X@x=cGc0kDy>?pc_q$S^-iUhf1!FqNqI6hSt|i8 zhBS_=y5P!);i^8k(&}*45M0eFiF$Q!m5+4f%f~F_9Mv=~0&jp(W$+UbTj86@(0|>M zjK{$1VDvCB2xwmX@^@>7HZE+~e7WYihG!f8s=jxjZT01vFV`(}t;e6{ zg*9vOr)gm>5kFW}QeP%+NQ$FR8BJv9MDT=C^pfnkyhlQ8fVqHUP@2dHQ?rbmQDgx? zj^$#=TwZ9H+*4x~(LRL95~YKFxy3@kcHrq#Kt(uE8NkGgrr`=IW)%i4O?4akpO>*d%C(s*4m(x|HaS z;8|YXL3}S6u@s^$OkrH4*hAd;wvlrQ_D-{8ijK%NOJw0_ znt(uoi&!A-zJYXY56cN+wT_k}bZF{U7DfZNFCyPf>1+pVv0-ExL(naO?qK;Out08s z<4cU-n~7g(+t9JxI6S-Joo0h}L1e15LjMn|wPSb@sp}x0W$s#>0v!3;O2C z8W(h8l;l@wFpgxTeo6;gLgAqTKBe?T+YHx_FAydlwM`u?1;vxg0GR!Ns#D8WUkSl8 zLz)TrcA+9t4RWl^Yfv^yE+^^pJQiLfpu(^gTvF3_ibn+x4!Nd?ADO0aEzAV{g|!a! zN5YuWs^AX36_S#8x;eF=^IvSa>$V8s9;hIj(Fgye<^GAtgX1FqL$x4{2!C3k$l_2Y zT~nzllBJl1Z5d0l;RM*U#R|p@s?@Y?J17jmBzbb>)`uelA6KVzQ|2bYa(z9)bmgHE zgKUS;3W5Q}9my=zG9+#iKa@8IdEvnh{=v$oGtmi_;mfE2?K}k=_#n7APOvPoia~aY z^c0B(TFcQSI|CsO^RKqv@n}Up2{;c@Y0&{2N0Ky9_xiM)9o5Z3rf3Q|)%+B=x15ja zY+)WfRj>J%~Oy&^^c?u|t1X^dck`3soHIl9htSzJfhcmM|jtPG#KMO9)K zJ&FjK=M9Vhefy?n@=(8e(KY^YYgbeBJwD#PG4VtZVDEvFrJ?pvONbmK3PKN=ZD14r zN_*GO(e%+FcC+J^nx}f7m~+Or?8wZ~nKME&ZgI**xli4VFtFjZppH5_y7$c9;hlH! z%N?&MobT+MYCjIha0oXRYfa%i&KQ5W^K|n?!jl&So?P3?n^qrC-sdM)Z&%)%E3K}H zm(&NM*m8`jPLPf;^9w;_$odIhxOG{eGSU#T$cD+1T8%~PG`}zlta9@B5=asJOps1l zt%fKf^9p&$fB?5>2O=uk&*J`PaUXcst-&{e0R8`nd(mY0zl-}awI6e+f^$%}#hf7= zviOG&|His@LGKphqG19s`0l#X%30pI{*BHMDx4G^b;js$mjCnmS>+LauBWg0Yq;TV zz>uJYx6eJ>^P;l$`xoZVrRN_n%nu&9I{3uf_1os_*UqoocCB{qP~U5HB`Pt)v_1am z1m$!0Z37F6MylVqu!3?~G{^vpXoN~Sfwrn8D{RmVPar0>Il;SUna^1$MM%;j8o@y` zEP$6#cp~&=mJ#WAir!Vw5fjbTG}?4Bqi#q2J}VkpM7bnfrX9N=6F9YDoIrK~pTMZ0 z`;Q57;&T!r%(_c!f}E5^%A_{nWRgi}=(vJTfm!f}`-e`P5*;ckl<6f9{wZp{WSFVW zq9-L2tfqmyQza=V5oXE)dFa;H?4zfip7I8ppu06nmtYQ6Kp$7Xi`3YcVgRNdWcqy1|upYA#&wf zT$Os}=(P#5MP29t7nMGZsE2a$cM^TOq7_mF9~lv~E6buS7bR6 zq)mjy1zp;=Uko!TB<<%{xAnIipyGw07!s`q{y*DRuknj3=)^UMTd9@~0%h`<ARvt5Tpf+UQq6U;gks=|G_{n|H@G918hqM zbpka_Phm%d8p1?+ZkxLzDuw2kCGet2Lr9^Ph>LGUJ-2#AtL-CT||4846}Ri z?z@iJr%|?e*qsqykJ;A1s5*m%=h-O~*qEH+0qlD8RD#s+=y~`f|Kq{6HDos`ea8dx zxSxr$Xu0N-2Pji;lca1nNexPlCp%gdtcJfoc;9+rTh3WJ2>2x+PB3RP-N^C#wjTzZ zKD~WFIWsqhW9=;6zsP?)^jpd(e|E<&Z+;jzF1C$_+W`UZ{7c+-U&dA#;CGDQGrV`@ ztNAz2%}*FtyC1v!;KJU0{L8~{H1}+qPwt)HcjDci@hc7dp*zm^E_C#K&HKj1uU~w( zZ`a#>yXJR~->9zI)b{+!Yqha8xAPy~@mk$LtoNjHbC^GK=bQEClvw{k<>vXhv7LR@ zCG{*O3{kM$`je5qe4QtE*@RI-f?84S(KwH+}sMy&<5bP&39Y7C5blSs(Aw6Um{8WeM_)DEOOq03@Q$++| zIrb`Ao8_!d=?{=PWxG21$R*iG#+cNAq?(!bJ&;kTbxrsxH3S=orV)Z6VudBUA3bgnw$*RV0 zt^U+u_&0YwrIh&Adk!nF^C#}v-Fh#YX6`>CZqc;a7;Ed}Z`|`z9AUS%cka>M-$Qg= z144v8I&Qn(K!D*vFC|o>&re158$d$PR?v`unHQalvLK-{1)9_LJ0XUT?@>?>GWnAI#Y&Gc z^4Kc`H{m<`+j5_0(~6?Nc8u&K-h{lOpZFx!SMLA%aX=5D|6`&fX9W)7x8Ov__S}lP z^P;^7?(mS@YY6v=03E@?mbe8Po)rx&nZG3D!68&l_0;kTxp&{G-f{RY_UWr#&v722 z{6iUyF=re8IH&o)+&7@uynWwJWs)D;cU!F}8C34>n;gl}6Z_~ejKg9rHbku~nh11f z4;`MS+be_v$~_h5Ub_7h9jIDyF7eL$`!~t-NY_+;IDdx2VjXfj?XNf!bZd-H-v4kj z(pJoU3I}9IHut^ze}MkznP|`YRAg(3R?1wv=pcbCL8{Pwd>2Dbqpb%4v9}DgR4L@R^x>}`*riJsU@ z2l}UdnvPgg+g#_!E718rM0E}l6S%L07o<;|fSy`ewEg6~mFkIZ3xX#^YCOE@?+nV6 zU}QwEUNupdX|u>SC}&EtMdXt;?A}?7 z6apG6#PrZCgyXvojn{sMV(2RW%Aqw%hJW)=pOWQo9lE>u3fcGT_zvdno@+e3Sy6tU z4;{JVIPDzcw)}{FWIQQ9*ekBOxD|jzRxWFe6iU6!a4Q}bAQm(@#-BcNpy}H*Q!7UJ zV(h)St4BVnDz(={v!dTNC`7U&p*E;}w7(}&yxgP>+OD5PQBjG?IvH7z_JcgRxzQ?h z8M>as=O0LxL@E)B5(T=SGz2ff9#QbCE0V~E^Z5@CZ((hVeb>7k6&42#E)hAJ^$o+~4(34m)^Fy_8yuf$0&uGvGE#onMep|;2 z!>Dg-9;0!}%>sXkN9mVxIUT?B@?^$1LpP=k&C8Xu1*r_HnFKde*%t6nA7*e*RAk~l z_Acyc36}it9$>uzQp(BN0lhhWTIM6L!8Ip>v@W$PH*K)1OI!R|gc<$v zj;hK~(a=nJ3na8ZjZB{be*XCFU3ewRJkY=Z@56HVbH{sXXl?w5#|N6fi-<^6$y|A! z*N+Yzyo5UvrtnN0_gVJVe5E9meH9q4x*&X?%0n+_t|p2u0D5xMfo(aoD*#l>O` zEoreX4P?mb+F}EK1td=$mEQUT{?_Qd&975zZomNq_BOt9?8UD4n%bTleRlNMH^0+gAK=kM+L%{{-p_YXJ!(e~H2zj^!B;bY(5eRbs*F4w%b zvWu@eaYUKpkDOT7`|p_HUo+zK<9!Kb0_!AV;e3cgqJzJ1;S2>#yA& zTi^fcZExQBNvtEj;d-(=-gbT0%J|TYjqBqn<;K0c;v>qni%M)w*TTBZ3%z{{9bF5n zyB50FFSM^&=-Wj9cdfgzHmAhzyfIPT7iZTGD4XMJZjLD{;)(0st1;YARlM(deRaI& zT4R5#w(WXVe|%iIQH|4^;?zl!e4RKXHaH*A`Fs3F<9F2l0-Zj?>rZ~Doa8r8ZfGS= yb*)-al#grbl$slDoITXHrK(Y>`p*~+pNPZin9_5zcDhy>SDtVFBqmP81pfes zKAb55oC1Y#J40!qWoSFY6k4F>M`=4lN||A%{n7B&r2H^bpk<)N6FY?tGqmU2l`K0- zrdM z#JyXqj8_G#U|gsaY1Q#1!6osUU`_nOPaC?#q(^ItHwT-EKDP`x# zGeSHNuyRrwCBdpe$ya^ZGhi@94O_awdp&)HAeG{Lwt%1atRpV|vZu9N`noji&b$eR z<)3*PD%~*W$QzCwPo^p*PFl`%ceR*u{iU8yjOg)PgF!>83mlyCHIDzAa{<}i>i z&PwT$CHx-msuk0%9g_tQdCq&@Dr%mts+}x&pfFw62p8}4(t5a*PB+lWg13w6W+i^% zSpC$$ylsxo@kc@Aq--#gs)=OEqH0`@G8$qL-C!^kVFpWt8NaopiMLcPUE7sN1`>*F z81k6U*GqTq>N`qdDYaO_)D1s9-qm+>myeFpohQY-rkd8&#Yz|Pwxh^@0&-TGDV=(w z>bqp!_(#Pw^8+b*K*NLy%;NuBl*em|wqGMkSHGXG@za=}j`*$-3FqJ;iq)SJi$C4i zT8#369y_SU>uTTeZbTsu3L)o~Onq7V19IE@PlLl2%83r9aoId=+S|k!jjCr@A|rLu z(BnzV^wX3H-l7&f4yO#TqNR;dlVvQNF=z$Qi^#t?v@&}e} z57|jJ&sn4fKfrDq`}UpddkEBx&fK07BgfV1!$iv zRBew7i}m^GQ82+s3_>`MZ-m!3u9*PO?~PE)NHLHB93{k|BdVr>*jCC&z^uVw9=)A! zuiX&%ay+C)Qz(f79B2>)ni@V00$DL|p4ekTk3in!$p$QxR2Y-c z0^Ks9X2s-0j#p0q0BWEoAhxm_kW8Cwg*5na_r-IpQhJQj?nS$i8ggiR&W!`Kefl zq7UmS3qoegZdo&JxiHjwUAa!~$&Ur9ipy3eGUzTe z4LCnf%S0+30`uz;`+Yyf`0o-?m3f8gh8k59vSy=Z3wU~*Ee?}~AjQAY)HX36W}Rl< zjybR9Qa0-q5QAa*qecGv7Jy_BC5X-_w;7X@%vL(9E#J~qK9RR^R!5;?-2L9BFkyB&L|y8gI`4miw4|%+ z(^Ylps--i9(#k$ED;2CPe!XbLrK0*v4J&5KrnUuMC~UY|C`p4X8b|%msUtBRl3bL} z>w^2L2}snft01sZ{hX_9D1cNmQsBC%3}LG0V?x-_lP3BUw-TEK=pX|};C(JqHF=B~ zFsUQAtj{rSz(2K%vJFZDETu7-Z`*KSVxE-(kPmYD_pa-BF88%yl zO$$;$;FBF^%-g-bRI%p z%0`n=8dQr$4OPiU1#r5(nE+$aa|l79s`vqIjX1#exJW{Je3k#klOl0x0t|)xC~5;% z&6;}N(qey5FU4mzE^DE;jD|JnSO8dsjWg+SN}x#nWM7$WACBU}C61 zXK>T3mGguXuh^o4Nf&qz_RDM*lC5xzU+VSd5+zSoOkY#QswF2F^aIyvW<&;Lx(w)6 z)F`wj8KnUh0kgq`5!u9e4?&LvwbzdIJp)H*oI%)+@%l||LpvbfG+2a<5%wo0Add)I z;r<1UXP&B}zgblD1sFk%;a_cP)@%Zg;N?p3ly&y;|Dgc7gz!1gYMOjED{jBhCNMO9tGUh@>DldNdR-x z#9kD1v0X!z6y`MWx0?s-Rz?_{5fn8dj#M^UF6b$T_|TTsjabBt6G)&SNd_JZebnf{ zvC5y?vKwmF$6I>Hv8kn7dx(1+=fB4HZF`FJ^S^JqzvFh87_S=09zp|AmcZtiy$JK> zb52p?ZsSjE-%^{dXcGS&vxNl>Wt017ilwdF3IBNeOWtPxrJk*qwhz49eCvf`{@jjJ zjlbG^(RywCcjNEwIraLUQ|ZlzXPspE5P7(Ky0(M&?0lhch17DG%&p=d?z~uZoJgHF zles&le!i>4xp90wlopyw(%TOmI5gj112&mS7FHvwuzx0mp;v|uOMwn-abpu?X*k+aeVmCby|{wJN&bxl(r?SIudG=6wMH?1B>^)PrsoMkfy z*K7-ztG6Nu!v;g&*eC1k(Zp`L^w6Eq*_r|Vqr-#Ex59ota@lEGhB1gnM7L0CtbmJg z8?PAbA)ENt!FA*mw+7qYqPYn19}W%^m47u@TcYE-O-SzLEkm{NIRS<-B=_*`L!HZQ z590k}NPZ9Ga$%;WHICraA^z>5W8Rxk`MZI@;n_X)+0Y+I`*@ou+F)8!PZ?oI0WjYD zfnAt7z-x|eAaC*hV-4j4$e{r^!WV<-lFF&^W6zQs#u3jlM}8<1vT{a-!+s6|cNkH# zx0v@I@9eg}3qW);nv}tR*y+!I3dpd)>Ud@~|M&5M=HH;~Zz1Uea=B2EE!kw2u?wf7 zJbt1D`o0HGtoI&4xlnoWS>W{(9fU;q=O@>+;inRqwtouV!Ako#CK>1$e&&vyrFY`{ z6>tY1LZ)85<1{(**=qpGLJ5=; zkn?OLm&Im)9)@041=mnL$r3%`Gxpg|VQ{CwCz3JI^c99{-3P@^d@HNF0nV<&@NGq= zU2tZH!{$LX0TlvHvN$&n-F&msRwe|%gAKHI@|NRHwgTbNmhrYM_&C=!R z(ptc#isH$FnNq2up3g=1kQe#pSVQwCu+?89;`h-u9~lC@1X_NIQdRR*term4qDgZ@;+qs^oRGUTLm&RbA;W zb*-9hTt)=@cc*)mdPS}kx--3My(-7W z?rd+aJ}1Ye?vdVneZIF)Uy$>u?qYALzJ%klHQinA9jzbjt<+a~$Lhy=$Lq&?C+a79 zC+jDBr|PGAr|YMCFV$a?XDi(^y_f4R=d^EY){IqsqFGhDx|gpTmVR5S&6+>TXEyEn{bh;hWeQXSU9QYmd$F)Bjb}$Ww@5Bv`4Wy#x1%qBUKJ*QCNXFjj`*zzk zyT-tDeIsi1ldg|TkNr;5Gg_|MvmZI`18Ewaw`{M|?xQ8tVQh8UTbD8eTti39?j^s| zL)kvG_KwFc9?et9-sH@uHEz-mi@5Pw7&#JXtn8ZZqxLeTa6B?HGRhom*m{G z8a=e#@SVn{vlE`$9Co@^!|Zk&UGuT+HU^$Ci z99E-C>>$TAtvctKy(Z@=`w{lW1@G^pu(f&)Obq6&=k0=}TY4-18(IsDY89SL*Naxs zDm~HaC2Ptmv;%M5cSu55t9967~)(Pt*j%KV=)@eLnmAYQC z&O9mCXRVhl1J~xPv(`EMKVrRNoyY%qd%=3$dewUENp3G!U;GWNpw*YapljiDvXJIK zfY_-R_#~H%X4mvUS|FeoG3uIO@A!7#BJ+*W2RN8@-T~{V+Q5ttJ0MPSG^yHcnN3^T zP(0k}gNJP}p#i4rVl{*%Tr)ag zH?v7H0!d-iU3;fPkwI2YxJ3p-3NRuP1GTJYMhC`|TJav3l>EpUdBpxbG~JFfgcNx^ zK=rt*y5$+(u!-J##-=k=6Dah$CQS*!qjT&;*LidaGR^5ioKWN`CY8so`o7!Q9HQTE z$vG#b*=ifBl()6FjB8H6)oBmGYJ4*i!jw$jz;1S0kJ(8z{cMOci6EfXSx&!+uK>u-RyyPIb7 z!J!Rc>V}@}UP2`u3b^=TqXiCC+6B_p94fW4X>XYiJC2*yZn|yM+sCN90ca1?vk@Ot zEN?ko)amq~39L(9v|!+cAw@n`%{3gb0!j!|nM{aeE-aNxW`a6BV--@ZxwU@B?t74u zZ;`98;7!}}(B)2lF!Uom2DZTyY4;M{HCe;^I}rCxu8HD?G^w6aq`CGSf>f+T0h}3Vq&6s`jnz%Z>0+cex+XNcBZS~OUE|S~EpvuT z(c=amcQ&`t5f5`AB%#K{3GR1ey6j+FPCuU5jV6In{NeEwzIlATJ17E3*rDcDJK9JO z^nJ|-Xrhd@icj;;ayiY{_q6?dkPGx6|11yqQVO)~LZfKq0dkG^^EP}bV91Bx}xfl*6 z>Q$2@Q*RCiT|x-6FFcm=P1+FYGiiqMg_71Hl1N(YBqIP{lFyX{0AxW2d@!w&_W&R% z9k@1bn4*~>rEGFG9_A9?afeEFI|E<|@B-u$p+IJcSHW$mP~s$h9l?|mFyDw}MYb7IwC(Elthf1l^ zOhbLT`@4`F+V=c$NQmu)Q@lp`P&EIDCJu>+AB+rBPa&^lT1LzhMNJ@_43}gC0cH|9 zSJWoJd8UTV#|CA9C~K@i-=lRPN{B+iGWtA)Y?4bU1Hp^{1q~b*@?{g+3w)a@CV8>j z@x-R+57zsZ>AEHwqFknqQf+)SLr-cuBQS2F%1_h!?t@X^yY|6dLn*tcR*I{*?GNr= zGy22c08O(4*#iq#2(g!buvuLi7tV1|0hXcp6!vzy&NZ$wG0^XD1 zgq0RJvLOQub>9g~${GrbLK0!2HF)EjVPVSzdlp+=$Mhi;Y_lKcJ>RP3-B;L8)wjOm zH`J)y8m~%mQM#Ru|0A4xCsAm#6)@c#Sa3dfJXgt8^eX=IYSX6bvmU(bOVwQVpXR4tV4)BNAn#2}$j16I4P?U7BU%zrn= zCd$+XiHSNjl39$X8Q_HKHB0~Y0AFmma#fomm77NfnmM3O6+xqLqma=3p7$Nko-EV^f;cplM zWNzBV8<}Ao)RbhbLpX_-vM^hqlK`m+E{gap)5hVv^t?8pP?L#_VJA2T_+K{{V=SI1|0}`~f zr@7zOKP!N+axlc9IpM(&MnIkV`ZvPd4i#BfoE9*qvOMxO*8%8$@a_(3{U2;^_w_MO zNO^kw?XClT@4btI_$_GBJXXpQx;XN-QX>eq zdrpftnuN6RXBr0-PT$4X>!SdtY5U;6K!2tKMKUrHhncR;DisG%+0@scJZY}~bBJYtPK z1GIJlDDd{7KEoPxN5=Gr4EC^Wy#J?Ku191=-2hx2n*`$8)o)LR*iPt!=O!-$dBlqbR4G6%M?y0KCz@ zK`C4bvHIZ4;5i+^VmYNFx~$qAbavnaZaR(&mlFLk<0I^vn|7B*{8x_frg}<0>ueNTyA#04M;9=#Q$RdA3F46~~E{?Y^zL#wCV>5g65JwFhv`&vI2fhe6fp9=@ z&ly-ZZp(V6N`Wh@mesfcjU=|%2k+c{=kj0s=0rCD#zEI`m^wqBOeQAySYKje9M%Fr zl+oiG@L)&YBjW?RJ7^8NGFY>>sUS-{PwU2YWjg`QI>rr1LU_g#N;_PY_(RkVy1tfm znz!I|a2|=;-EwW)fJ0dPDdVFdd0jHnB~{E3jZDorC{iumwVe-k?t&elscqvL0|W#N zz*n1e&a`6h9K=fGCd<<9!G^9Px*)h0uSPohT2{k;HUzNt;g+C#Iaxi_4ouF5mHH#0 z*(RKMZ1Y89n;;T~4d`HMWd|tB;te-FL=B)LQ!yKl`~X#iCB^(zX(5(XL zs8*<3m1d=09csUfkkV(m1uN@|+=i&KFLd`h9tv|ErP)%zIsG=BpgRNi-QU3j?uRIL zUrRMxf{QYGG=8C0bkDNV{J4mRQiQO;rM!6RDB2YQ~=y^EKc${FB7Cy&Wki#*~c&uxq@@N{*72OT1 z2&rERrulSGW_e1(8Q&_V}IV2+Vrvhz#pgjgZr2Hr1XdoMweFf0A4s5&8ECv%)Y&U#M05rVlrYM zeJix|KI#$JliQxh71lBJG=EPY&5WuTM{au|;LPLh{^Bz&m1k}w5tRwz8 zaXCUUW#NseH9;|#1axI%dfA?3ME1`SiSctp0&FfM_j5$@b3`(Rw*LPhk`3_^h`Y_b z1Z~;I7oVSf>kBnz5Wy|ApV4D9eYqIa%m zHzKr>Bvz{t}9CHmYA)g<(-#I&kn2kh|yjM84PQyMKj8)20_W z-ohzy(Y<+DdL9;F-Spfy_)LM(DmWzi{+pM>>0#dkZnJmoa4zm-)KFD`N@QY&)wa#_ z2GrnGjgn8?gVIZKS9gChp+cd6xz~heKohHSK!-U(aUfsF6YX)!t>+s^;dEy^%Z?3 zU&`q?*7^5;DwW-nDX^MEc|syT`G-IL>7Ta$>_2=_xRwyeXA+x4bSkihA-VFg3EpiCym7Qh_xgdNUR0@UmeZ*I`F`3iwGpYN7OJk zIsycd2R^#u=YtZi7xwa_VjBd@zgFR1VKhHl7%h&L{6a7n9NE*aYd_TQui)L~(a~TT z_g6;8M#o1dg5$vo;shsxW5Jha-&y(e`bR-!7IRP(}Io8k6x8_&I`Ic6`XA8am`lcgf==Kyc(1r6h^NF=Y!YK z%Bw6(efgWP&1AeaDDZTl=v^JA}=hOcyGX79pbCE}mu_oK&;2BMT4*;hSX>o#w_(;XK1?2$IF21hk zyX+*{tgo`%;Mc-5!iREOl5-gIA9rp@E}R;a3ALy~;R5QiekD3#Gjnjz9}zEY^8bDM zE~IjLfUF_LVqCs>v7@?`@vc0#S{Vi-%UyHOkOUcHRdgjr!#J`FZ5jT4lpEZQLIOK)sQyX zt2l0p_Q5=#eq-&8`{PX|3-rQ+PgHV|8XW>noE5dA<29;rB5#seTrx{zJ>E$CXzD@s zOHC0ncN}zCCPiil4l7QjP{eo}FX-(1JIyYn&_i2!fG=d`^TK#VQD+64H4wg29R@|c z#}raSt{hy_sOKq5dL6om%)Sl(YxnVZ6U^Lc`0!1F_hNd52eXCQkxWG1k-RWj9a+Ry zrwe-K`i+G=#1n`R1p%HByA27dl(%=h*@?VW)q}0H3%BFER?P9i9!L|1bVIHuReyDh z$e3m-PcrE5s&h%)lVO5$B*~ToP3lyB(D9mu0sB2*Wa(;J<%iBE2>$qJNh;iM`j8vj|@{NGoI8g`& zaPgysR=`)KTaNo|(!t&lGX`nVg8tXTGEx|a~xW<5=dusnUL(h&0>zlZ4_Y%v2tYWA{_|X1~U?tdWggar|&XQ8RoH;%7>aq z#KN8DdkgX&JigG*?ssrfTT5+}`m7>>NQu=K<#bFL)Q`%wNikiY$uNI?HsW_Boaze) z*gveK>{?$*h*IT#M}t0iT~T`d$ON4tb7~9UNA;@Qi1NSd6_NOjjUcZUcisr~$N27C#{bJrZq&a)+=&L$Tez65o&azI;8C9Y04Tg=0auH$&+CoeW@Rrn5(*an6HP^ z4{Up&-Yf7hEFEqH-oZTldwjhu6xxLnqS-L~a#fVeD0w!ouOiE2o+hDw2KN?qn1Fv- z$n&U*J1cnZGz`Z|u9{~_lW`7jmgQOn=A^n>$XNcbbf0#2;pZCOKn& zh!do$(D$p;+YD<9L2pZkZFe5F9j_PmBui|+_zdw5WO@Nk&FzQRl8RX=9Q+I8q6g_OfCw_BJ~}DmwFsB0gwUt%#gb(MM7qdaldEdjtWv{6|C#PlkC`ja$?5aPEtz`|=lQ3ol=o59C=7oxd^J|U%FHrGrN1S^g|(G$_6sWZ$NtB-Dh1Ria5fT&on>JL{;=^>bU z+)bfq206vay0d28Ns1j)tJL^ZjJn!awe$Pk)} z)^4MgZ@K`{RFp=JP*axE~$-m+xJ1Ux(aK$~oWu)4y1~_k+tHU1`(pbngc@ zuPDFeKfZb8hC9S_cWMhR1Ar0ZxScEIo3d6%dX?XwyAI zvC3c`LUAP@Nwv%Ye4_bzxX2)4_ay(1|3_0k0(gFDPxtOncv9Ss@@<4Y;fO1=<;na1 z5w5gphBkv~xLsxtu4B^gf0kgNj?gArk-%wbf94qidboltL%76BK9Y7-i#VDK3fo5* z3LI5&HV;=Gf{SUmGN@+H+IjAHgn}rcm4%?f&}343G+t>XFYd8Qz*OPI?IpOjcm!t_ z+(+!~Y*5}m>aQToI13kC1%01+S`=5%CR}=21HoHy(EJQO$%J=qIi7zhwou~arbM6{ zD)j++z)A8M;`|v)lCp`L1r>;po&>B(Gl~<{E(1ahWu2&`VU;QrNq@)fw2^Mpjrde7 zD~fQgYHRCKE3v0k?co+sXVQV@NVTlw}Y*n67w1s3TIhhAtR05@$7=INvNH$DFrI}8 zdPDN-SoT;nFFq4zk+WxF`+}KH$A(x;Und7eB^akUDDDQt4+J>Upk0?~kQ0*6{hPSv zKsu@9lhstNArae;=Hrm`lN(eD13J(8ZMV*PDf^K$nrA47MjCfZ0G4cpMcF8(A}-jh zt<{bx!|f!F!)Y5SN3yv&EFvQxS*Wy%+;!e7aCVg~bpkU96@b`LdX45^*X4QnAhsFN=!VW5v-cLYPW1{2CwFWbx}P2!?BOqBz2$kU#K$k9$61 zib`!7_c!^*KSB{s!4!vw#B*=)l4RAC<2PU>b|v`MZg5s6mH~!QI^Fj%ecpLgq0P?& z)t@1n*9*k=_&bN-(3<`l)Y2RRM5SC-6=mu;kcOT|b^LnAQBV~@F)aHUc64DM2F0xG z*3!+pXgrlZnJrbMNW+66+f*Bt5@yfHjx21Bfgq$7sM;8NotblOh%E@6!L57ar)`=p;6aAcgq?1xo_8C31K_zEwU)Tw&`CyF{Q^5~zT zctV08O=^{LZg(Z+(QG|qD^5!;vF0A)gkVE*myBq;0n(Y70FC{8$PI!8C+W*P0?g31 zONgi-`^me4O$vE#Qs6bj#4a*lXnzWsL;x)SAd;U2Fj5fPZyKr3S_40v@R|GB4WXxk z`Jb}%gr3JDRw+*m00;0u_S;CQR5Llg_Nkw0GJt|NquqWAYIj@j>&Gehpoc1)@GREE8+*=Ajv2J{q8!!PuqsJIt zfuV%48ra4Q2!MD-hG`CxovJ;ex-^{=wD+MHg`{q5D+UQ-ml0+U(6`?fayK*i2vO^t z4Tg`J4=-mL{n+dPPmFIXyo#N*E=Tab>pCtPYr%+-?H!B{CBs3gD29?%PE4`iUHesB zRqmEw#naq^8gH7y4VAha*KxKsqvWn2e%S=@40Q;xivkFz$fJ1Ay~67z3xfr>@F4Y( z9OGVNA-G;7H*Zaf)#;>9zfB%m#v|O&Qx&Jl3QDFdLWpM46_Yo{7z&^hQQQd46V`Jx zTO$&hyT@*0Z9&=GGRTle1k={(<2;LpBOaII$u83Lh*B`TFW5;oqv=_>o_S&n1K{bj zQ}i#YSiO{x@PNzu?nPh{GFeKs#G}lVjW;_|j?uJ7Z*`epnQ8P~Cb@=cQ*ji|fQ8~^ z{$uu5#L|Qw$~-gXW8%EgHOhta{tN|uOpr9#4#k~`w~-|vCJm$rWX%93QgJ``OxxG- zg+a6pa{wDNV2;pevhos-FOEt<3A-4lqU*@XOs-EkEp;!*?@vj#0BYEWiK1f6K>=51 zf~h?faYjDDc2%-9i}ZK-$nb=WEcu4ARz_S3%y9VHp1^D8(i$izq;a)XyQObGSWi> z5_Y$Puq8cW+(6x2 z4C`<8ChPr$%vycLR)*D2vqsmN*h-(m-*^9htv0TII4{_~U_Vl+R0kHC{Y(}zT3)sJ zMW{L^Tz)fwjnk(cB zxdKcYZs=neq=J06`8-1wC-vPkDaus~I{y1Ywc-XoH{yR4nOE0mvqU2s;$n1(eBDPZ zc9P;Tj%v9(m}ja_S5BAt;{Y^M@#6rM2Hk-8*(w}Y8x2OFqdQZLhUGLH4VTDTHt8#L z@+MEe$ASrdE(1L7GZz1p1p&PKBNqRH#h;*n0l;6sYO2k7GBM$7^s50)8$Y7d$jqJC z0~h(~5(?^M?=lMfD8SP4O8Inop>n434q`BqfB&IW4Zk0iX@Woh<*8{W`GzkYxVbI^Xs8FL z@Ju~#_S!16E40txu!`?WtXQJB!Z*ZCDlS`;-=lH`lX9*ziy)-TMF`7pE9!WcFini< V!Zhu2Sm%h$@oygAwV7)E{{roqI)(rM delta 5324 zcma)ATWnlM8QyceUa!~JICd^hV^18r_B!j0lc-Ia8%dmdZ0Fu2+XU-)XLfy(v*&E) zob@FHOo`hf)R!<4h$5t_5~WpnKr(8J+Ny|3@CFa?6p4ol!2=SYs;X2#rF{RK-L;dA z5MKG;IkPkW{MYaQ=e+(Sq~DUzw@G2Vdh7=MyC^2Cw> z!3TL0Z^kIjhj@(p|=MhV`{H(`B~=Jf>MycEwg^B&%dxfag&7JRqzK0dS+ zgErC5hj~9ASTgPznKds(qLEAoMBMhE@thRg&IX=Tdm8)JM@kXdY)8Is-Zfv11dx@a zDzGm(9Q3eCPX+sAx&yGuthe0%jMi zykOHJ=SiVHKHQ~l?CVf3_q6Y0ciFh@2SaW#?Q%;>YayB3%Z`klyuz@S1;X_`nPSfk zoxC!ZWb^FE4L$E*kCJVlH1(4t-6Tm&x$Q4*yYrU>EB`@+kw99m4@QOmA;$^3z=j?p z#L#4l?Mktk6q`#vA`H#pM-phVDv&C6^d(3zC~%lL>h+F~jbrNDYi~6@Lqf(#$b|aO z+Mizj_zgU)mXn;v3YLG%4-c9VEN{;Ww=8v0dWB-(r&!5{BUpf+vn45o8#oKh7omC8 z`F%Yw=|DZ1I;H-Z+@wD3*%X`fT#?+XUR`%69S#}kwKJJw$E@rOD|)tz{p2S$bTxG? zNrR$2FC6vj-r>bnTd>2iriHU2Vz{>~T)L`H#r`|M3W`|)qQD}(L|%F&mzU49FdBVm z$+VTdwR%Z9p-!>hiVQHRH^&fCo>K~J&(-$AehRO^(?%(HTsSEfEEI99<1EyOl=TW^ zz#Qxek&05x+Fr@00eOI!+JTT3jneF-rB&@mbiiUKQ|t;%FgGKlsNtJaAF^G8@cXG8 z3uH+^2KY#e!{%(qf!IMwx|o$hs4ur{SUis4whGgBzC;3qT9hrxhWrvdgVQZW{&wJP z$Ijlu>A?&rx zr5g1Wa}-KWU>BWQ^FL(16w~WS@+q$rAUqBnw;Vr|i?WKW(?sS$EP} z2#vk)^d0eV4z0#Sb$?^u-fBz@`ZXu6gz!p!R@~y{l-svq<7$3DD8vN8Ere9R+p=}B zhGRu|E&Is>R}Soa>ghC@2Bv%@6CE#a2Y$uzMazflk&`tn5oV=nU)(MMxmGfbBE8V4 z1C5edUFd$^m{EV|zPWZBh$$yP9>hZEk(28Dru|(fFy%LcM0#2c`fR{w`L#`781?cD zX4T*OhFkQ$`e|=o_}DhE^|EnQeQoP6x~fd&6)csLM6Rk+{a)L%7&`d(4}wHCtKanZ zCt`+4pDz*(%YWbgH`h(s=me1S9|f6UEz$WJP7HTXMv5$Sbs6!A#9tvD!5U}T6a z!3TK@!R6Jaa#ngppCX7((d9GEE{Z_y7+${(S%kfnC=|f)8dE#g0s?TxQg029FJhmL zQ^o&X!11T8jz*wyE;I5QxIfYm?PYLFq3BEC=W7ZOUM??05u6iwDvwwTrlp$%n#|_yt}7OncNZF!m}|i=pU`NH#pYGfnlA%$Q<>`qwFK zw?QB}EQ)$CONUmBx{Bi}hN0-N;3{X~=kj6GR*;=h`$t+48!`wxy!5|5aA2zg^|8Xhml$i7Ew<^ zt$`0P(QbZjG_~W#s&=nNRoz-VRD(6h0P)q$bf5a_j+kQU#Nz*TKn-&Q8S?{c+SWzA z>|@Oz-5XTOqM^v6sY9sH(rhX4a=9>sDSA-VB#If>4p(DT4`^{k@JBGwS8h6egaXJa zphwgloj+seLXa(=fZppdIB7ZAk^>Y$#hDT6-Sln^4ZwYN%DeoKaPS&+7zJw{c`Dz#--(ZpfR831NHWf*Nh2uZ6_bQjESG)-!Fnhw#AGNT(aU} zEkZSl)_3B$STyH#cXE(r~m5S`z+3~a0jueRE z7e&_2+1hO7$V0=4Hie@p&Wg11c3&`N)vtDs8sAW#?mlk(Q0?8*zi9yKg!e789fvjR z|CqrHW-c%6xo2FubK#ul2O~({I6te8qT9-9OMD(}n$pdhfv07^mO& z6WI>(AZFoKm>2RU&FmqvMAJzN zX=+maE|e2#_0FNE+TO*MbF?{1 zn@#oaLqB`+4nbAe0Z;|7_^jo^Kh^L(Ec5=}g!=uF!Qt?hjldPGXu-$SQdjTQWfoND zW_PNkv2zDs!x@r}l&3)+G;k}hd=bb)G&N0x5$Od<*2}Fl9wVY{lT{r@`(td_+na7ybu*T~d#uA~-C(Y|2wzaUPNX<= z)GpAM4Hr#1@OY(qP+m_?T%-h*08&&r%SBp?xs^oq)9eCzOdF12UQxIs+1c)EV6Kd~-0L9v!n+fQ$J5;wysys@y@5N^b{%1ziViU973G*?AuHFB!2RSQtT z>$EzzrAPhoRCi($Qmimk6Q>6nNieOA`p)UieN~@Ta8bjjhnu`#^l(>p^XjeBBP~CM zxsqlWnb*~yPmdnGhZ!BaGDo9ZM07k;;7WS2CSM>DBl04V-9&V3U&kn!KxAG!Ghn=; z7SHrd-k=|UK;)4eYQvmMJ{rOx($;K5>!NtS69?FI7z9EIb6s6~T`Okrwg(hY6jOgX z^Yk+%;zlSpDQ9UoM`WJJLY3UXC}~`RYB(Yy_2XJ~VZ2{0j&~(9G)FDCB=D0uINza> zj?njL^gfXf)Cc1SkF3Se9|eh torch.Tensor: +def is_box_near_crop_edge( + boxes: torch.Tensor, crop_box: List[int], orig_box: List[int], atol: float = 20.0 +) -> torch.Tensor: """Return a boolean tensor indicating if boxes are near the crop edge.""" crop_box_torch = torch.as_tensor(crop_box, dtype=torch.float, device=boxes.device) orig_box_torch = torch.as_tensor(orig_box, dtype=torch.float, device=boxes.device) @@ -24,23 +23,25 @@ def is_box_near_crop_edge(boxes: torch.Tensor, def batch_iterator(batch_size: int, *args) -> Generator[List[Any], None, None]: """Yield batches of data from the input arguments.""" - assert args and all(len(a) == len(args[0]) for a in args), 'Batched iteration must have same-size inputs.' + assert args and all(len(a) == len(args[0]) for a in args), "Batched iteration must have same-size inputs." n_batches = len(args[0]) // batch_size + int(len(args[0]) % batch_size != 0) for b in range(n_batches): - yield [arg[b * batch_size:(b + 1) * batch_size] for arg in args] + yield [arg[b * batch_size : (b + 1) * batch_size] for arg in args] def calculate_stability_score(masks: torch.Tensor, mask_threshold: float, threshold_offset: float) -> torch.Tensor: """ - Computes the stability score for a batch of masks. The stability - score is the IoU between the binary masks obtained by thresholding - the predicted mask logits at high and low values. + Computes the stability score for a batch of masks. + + The stability score is the IoU between the binary masks obtained by thresholding the predicted mask logits at high + and low values. + + Notes: + - One mask is always contained inside the other. + - Save memory by preventing unnecessary cast to torch.int64 """ - # One mask is always contained inside the other. - # Save memory by preventing unnecessary cast to torch.int64 - intersections = ((masks > (mask_threshold + threshold_offset)).sum(-1, dtype=torch.int16).sum(-1, - dtype=torch.int32)) - unions = ((masks > (mask_threshold - threshold_offset)).sum(-1, dtype=torch.int16).sum(-1, dtype=torch.int32)) + intersections = (masks > (mask_threshold + threshold_offset)).sum(-1, dtype=torch.int16).sum(-1, dtype=torch.int32) + unions = (masks > (mask_threshold - threshold_offset)).sum(-1, dtype=torch.int16).sum(-1, dtype=torch.int32) return intersections / unions @@ -55,12 +56,17 @@ def build_point_grid(n_per_side: int) -> np.ndarray: def build_all_layer_point_grids(n_per_side: int, n_layers: int, scale_per_layer: int) -> List[np.ndarray]: """Generate point grids for all crop layers.""" - return [build_point_grid(int(n_per_side / (scale_per_layer ** i))) for i in range(n_layers + 1)] + return [build_point_grid(int(n_per_side / (scale_per_layer**i))) for i in range(n_layers + 1)] -def generate_crop_boxes(im_size: Tuple[int, ...], n_layers: int, - overlap_ratio: float) -> Tuple[List[List[int]], List[int]]: - """Generates a list of crop boxes of different sizes. Each layer has (2**i)**2 boxes for the ith layer.""" +def generate_crop_boxes( + im_size: Tuple[int, ...], n_layers: int, overlap_ratio: float +) -> Tuple[List[List[int]], List[int]]: + """ + Generates a list of crop boxes of different sizes. + + Each layer has (2**i)**2 boxes for the ith layer. + """ crop_boxes, layer_idxs = [], [] im_h, im_w = im_size short_side = min(im_h, im_w) @@ -127,8 +133,8 @@ def remove_small_regions(mask: np.ndarray, area_thresh: float, mode: str) -> Tup """Remove small disconnected regions or holes in a mask, returning the mask and a modification indicator.""" import cv2 # type: ignore - assert mode in {'holes', 'islands'} - correct_holes = mode == 'holes' + assert mode in {"holes", "islands"} + correct_holes = mode == "holes" working_mask = (correct_holes ^ mask).astype(np.uint8) n_labels, regions, stats, _ = cv2.connectedComponentsWithStats(working_mask, 8) sizes = stats[:, -1][1:] # Row 0 is background label @@ -145,8 +151,9 @@ def remove_small_regions(mask: np.ndarray, area_thresh: float, mode: str) -> Tup def batched_mask_to_box(masks: torch.Tensor) -> torch.Tensor: """ - Calculates boxes in XYXY format around masks. Return [0,0,0,0] for - an empty mask. For input shape C1xC2x...xHxW, the output shape is C1xC2x...x4. + Calculates boxes in XYXY format around masks. + + Return [0,0,0,0] for an empty mask. For input shape C1xC2x...xHxW, the output shape is C1xC2x...x4. """ # torch.max below raises an error on empty inputs, just skip in this case if torch.numel(masks) == 0: diff --git a/ultralytics/models/sam/build.py b/ultralytics/models/sam/build.py index 21da265..266587e 100644 --- a/ultralytics/models/sam/build.py +++ b/ultralytics/models/sam/build.py @@ -11,7 +11,6 @@ from functools import partial import torch from ultralytics.utils.downloads import attempt_download_asset - from .modules.decoders import MaskDecoder from .modules.encoders import ImageEncoderViT, PromptEncoder from .modules.sam import Sam @@ -64,46 +63,47 @@ def build_mobile_sam(checkpoint=None): ) -def _build_sam(encoder_embed_dim, - encoder_depth, - encoder_num_heads, - encoder_global_attn_indexes, - checkpoint=None, - mobile_sam=False): +def _build_sam( + encoder_embed_dim, encoder_depth, encoder_num_heads, encoder_global_attn_indexes, checkpoint=None, mobile_sam=False +): """Builds the selected SAM model architecture.""" prompt_embed_dim = 256 image_size = 1024 vit_patch_size = 16 image_embedding_size = image_size // vit_patch_size - image_encoder = (TinyViT( - img_size=1024, - in_chans=3, - num_classes=1000, - embed_dims=encoder_embed_dim, - depths=encoder_depth, - num_heads=encoder_num_heads, - window_sizes=[7, 7, 14, 7], - mlp_ratio=4.0, - drop_rate=0.0, - drop_path_rate=0.0, - use_checkpoint=False, - mbconv_expand_ratio=4.0, - local_conv_size=3, - layer_lr_decay=0.8, - ) if mobile_sam else ImageEncoderViT( - depth=encoder_depth, - embed_dim=encoder_embed_dim, - img_size=image_size, - mlp_ratio=4, - norm_layer=partial(torch.nn.LayerNorm, eps=1e-6), - num_heads=encoder_num_heads, - patch_size=vit_patch_size, - qkv_bias=True, - use_rel_pos=True, - global_attn_indexes=encoder_global_attn_indexes, - window_size=14, - out_chans=prompt_embed_dim, - )) + image_encoder = ( + TinyViT( + img_size=1024, + in_chans=3, + num_classes=1000, + embed_dims=encoder_embed_dim, + depths=encoder_depth, + num_heads=encoder_num_heads, + window_sizes=[7, 7, 14, 7], + mlp_ratio=4.0, + drop_rate=0.0, + drop_path_rate=0.0, + use_checkpoint=False, + mbconv_expand_ratio=4.0, + local_conv_size=3, + layer_lr_decay=0.8, + ) + if mobile_sam + else ImageEncoderViT( + depth=encoder_depth, + embed_dim=encoder_embed_dim, + img_size=image_size, + mlp_ratio=4, + norm_layer=partial(torch.nn.LayerNorm, eps=1e-6), + num_heads=encoder_num_heads, + patch_size=vit_patch_size, + qkv_bias=True, + use_rel_pos=True, + global_attn_indexes=encoder_global_attn_indexes, + window_size=14, + out_chans=prompt_embed_dim, + ) + ) sam = Sam( image_encoder=image_encoder, prompt_encoder=PromptEncoder( @@ -129,7 +129,7 @@ def _build_sam(encoder_embed_dim, ) if checkpoint is not None: checkpoint = attempt_download_asset(checkpoint) - with open(checkpoint, 'rb') as f: + with open(checkpoint, "rb") as f: state_dict = torch.load(f) sam.load_state_dict(state_dict) sam.eval() @@ -139,20 +139,22 @@ def _build_sam(encoder_embed_dim, sam_model_map = { - 'sam_h.pt': build_sam_vit_h, - 'sam_l.pt': build_sam_vit_l, - 'sam_b.pt': build_sam_vit_b, - 'mobile_sam.pt': build_mobile_sam, } + "sam_h.pt": build_sam_vit_h, + "sam_l.pt": build_sam_vit_l, + "sam_b.pt": build_sam_vit_b, + "mobile_sam.pt": build_mobile_sam, +} -def build_sam(ckpt='sam_b.pt'): +def build_sam(ckpt="sam_b.pt"): """Build a SAM model specified by ckpt.""" model_builder = None + ckpt = str(ckpt) # to allow Path ckpt types for k in sam_model_map.keys(): if ckpt.endswith(k): model_builder = sam_model_map.get(k) if not model_builder: - raise FileNotFoundError(f'{ckpt} is not a supported sam model. Available models are: \n {sam_model_map.keys()}') + raise FileNotFoundError(f"{ckpt} is not a supported SAM model. Available models are: \n {sam_model_map.keys()}") return model_builder(ckpt) diff --git a/ultralytics/models/sam/model.py b/ultralytics/models/sam/model.py index 2ca3501..cb12bc7 100644 --- a/ultralytics/models/sam/model.py +++ b/ultralytics/models/sam/model.py @@ -1,51 +1,114 @@ # Ultralytics YOLO 🚀, AGPL-3.0 license """ -SAM model interface +SAM model interface. + +This module provides an interface to the Segment Anything Model (SAM) from Ultralytics, designed for real-time image +segmentation tasks. The SAM model allows for promptable segmentation with unparalleled versatility in image analysis, +and has been trained on the SA-1B dataset. It features zero-shot performance capabilities, enabling it to adapt to new +image distributions and tasks without prior knowledge. + +Key Features: + - Promptable segmentation + - Real-time performance + - Zero-shot transfer capabilities + - Trained on SA-1B dataset """ from pathlib import Path from ultralytics.engine.model import Model from ultralytics.utils.torch_utils import model_info - from .build import build_sam from .predict import Predictor class SAM(Model): """ - SAM model interface. + SAM (Segment Anything Model) interface class. + + SAM is designed for promptable real-time image segmentation. It can be used with a variety of prompts such as + bounding boxes, points, or labels. The model has capabilities for zero-shot performance and is trained on the SA-1B + dataset. """ - def __init__(self, model='sam_b.pt') -> None: - if model and Path(model).suffix not in ('.pt', '.pth'): - raise NotImplementedError('SAM prediction requires pre-trained *.pt or *.pth model.') - super().__init__(model=model, task='segment') + def __init__(self, model="sam_b.pt") -> None: + """ + Initializes the SAM model with a pre-trained model file. + + Args: + model (str): Path to the pre-trained SAM model file. File should have a .pt or .pth extension. + + Raises: + NotImplementedError: If the model file extension is not .pt or .pth. + """ + if model and Path(model).suffix not in (".pt", ".pth"): + raise NotImplementedError("SAM prediction requires pre-trained *.pt or *.pth model.") + super().__init__(model=model, task="segment") def _load(self, weights: str, task=None): + """ + Loads the specified weights into the SAM model. + + Args: + weights (str): Path to the weights file. + task (str, optional): Task name. Defaults to None. + """ self.model = build_sam(weights) def predict(self, source, stream=False, bboxes=None, points=None, labels=None, **kwargs): - """Predicts and returns segmentation masks for given image or video source.""" - overrides = dict(conf=0.25, task='segment', mode='predict', imgsz=1024) + """ + Performs segmentation prediction on the given image or video source. + + Args: + source (str): Path to the image or video file, or a PIL.Image object, or a numpy.ndarray object. + stream (bool, optional): If True, enables real-time streaming. Defaults to False. + bboxes (list, optional): List of bounding box coordinates for prompted segmentation. Defaults to None. + points (list, optional): List of points for prompted segmentation. Defaults to None. + labels (list, optional): List of labels for prompted segmentation. Defaults to None. + + Returns: + (list): The model predictions. + """ + overrides = dict(conf=0.25, task="segment", mode="predict", imgsz=1024) kwargs.update(overrides) prompts = dict(bboxes=bboxes, points=points, labels=labels) return super().predict(source, stream, prompts=prompts, **kwargs) def __call__(self, source=None, stream=False, bboxes=None, points=None, labels=None, **kwargs): - """Calls the 'predict' function with given arguments to perform object detection.""" + """ + Alias for the 'predict' method. + + Args: + source (str): Path to the image or video file, or a PIL.Image object, or a numpy.ndarray object. + stream (bool, optional): If True, enables real-time streaming. Defaults to False. + bboxes (list, optional): List of bounding box coordinates for prompted segmentation. Defaults to None. + points (list, optional): List of points for prompted segmentation. Defaults to None. + labels (list, optional): List of labels for prompted segmentation. Defaults to None. + + Returns: + (list): The model predictions. + """ return self.predict(source, stream, bboxes, points, labels, **kwargs) def info(self, detailed=False, verbose=True): """ - Logs model info. + Logs information about the SAM model. Args: - detailed (bool): Show detailed information about model. - verbose (bool): Controls verbosity. + detailed (bool, optional): If True, displays detailed information about the model. Defaults to False. + verbose (bool, optional): If True, displays information on the console. Defaults to True. + + Returns: + (tuple): A tuple containing the model's information. """ return model_info(self.model, detailed=detailed, verbose=verbose) @property def task_map(self): - return {'segment': {'predictor': Predictor}} + """ + Provides a mapping from the 'segment' task to its corresponding 'Predictor'. + + Returns: + (dict): A dictionary mapping the 'segment' task to its corresponding 'Predictor'. + """ + return {"segment": {"predictor": Predictor}} diff --git a/ultralytics/models/sam/modules/__pycache__/__init__.cpython-312.pyc b/ultralytics/models/sam/modules/__pycache__/__init__.cpython-312.pyc index e19a05bd982586f23355006f4b99bc584bde865b..e266ea7b5a21fbdc94f09178fa0f209c9caffd99 100644 GIT binary patch delta 50 zcmZ3&xRjCSG%qg~0}xz@f0RCv$4JUWKO;XkRlmG2T|Xr?wIDCGM87gWC%??lU}Cs0 E0Dh(rQ~&?~ delta 49 zcmZ3=xP+1CG%qg~0}#}?ElQinV&p|6?e;FKa{4~I$eLoHc8E^m5?vq@sP4@o(G7bZ;$gc~MGXsTV9vweEBXck#R@#PBgkCh!n-IPO8!ntTKlYsa~lP^PYz zOGmGJy9NE=I+9aFq6xB{<9H0MvVaSUSj-bVplEoI$RJ@z@*pmNijDp4XuRCYDjVne~IMnoxRFlu_$*X3Jd7@E{yG>E>6rysgf(X~T@ zER#GIr2>(4Ns-x+^e0z?t&^vJyPD8W`m-y#02g&QgIU8&n56}AkXq+nf_kXzPK>dA zQPPD%zcg5!XK| z64{W1JTrof6*VD`Wnvssw1Zf;Hgn}Wtm-k(%4AjhD&#|>D$y$HdbW^P!9h5s+X{tk zlFnS+G*1S1Z`-cV3z1AUMylNGi&vrmt}8o41aTfKFan9iF)LIFv)O6IJ53I6EcTIy4$eZzm+ZSDNH+(x`#n z45d)qT8)JxXn}>E4g37eR;#_j9f;5)&LkZPhft%ne>(g)!WQ}!));qlg=0k|RzWXD zBBe4{Aguf{}$lCt98eBU9_5O~@~UpzZ>eSGk!4)#}%WcTca{UD}hGUbVNh zKU)%Vmt!LRw*G9xj*lW&)2pZ9w`6q*KYbCEl53_D&(Wg|8|}8GJpI0*%LC(#i;Kjm zEhYz}Fw43rFEnc{SjROsnPs*R#|LhQt<~N1?Rf67v@>-hkvh|Hs>5^whU3Dq$&QH*)6Hfc zuDR`e$GHx(hRt|xQS14I=N6jYH0?|*Kxw)qVYFYcZi7XCTX!*UNK3Z~qfkV)6D>zn zLsR55E5cA3^HY}+@1wQ$v_rTIp3hz4W?5pXFC8X2E5}wM{XZ3*a3#kinH3Va_}S{U z6LjawZ@Gv=A!Ce|C?0_OFKT37J!6%0ND@UNKN?S27U!9RV-N6DYR@CtQnI!5GmVut1jv3yLIWE9`CkijwcZifTZ;vMTEuRn3oL0apaE zkTawacwXSKf+voBsQV0qH*`RN8--g@@CG5Tld7#+l*$V)qR;8_*XlO+QO6 z4V+~OwI8~>kWq`Leoy{|P%piiYDX7n9ah6Hz?Qa)b-P)&2Rb^5SMGWjfW8AdDe{g5!(cOuv!7%>mS&faCuI@nh1M delta 1373 zcmbW0U1(cn7{}i?Imt;+a!%Tue4I3&P11T18xS7}hmtvY7X@Yqlos zNti{Z^`c`PI&68EwJ5CBiwc5Fa;GqMp*Ylwgl(+L)dpS;IdyH_jpBPwEmA26z9dh6 z&&T_`fBuj^1~>bKpLm`_^t?Op*+^6RTHvtrM~u(~w18xk!31Rt1j|Oa8gflzVj#v( z8N$dWVv>zTn|5*MA!gcRWp?+jmktvPooJz7j#x*8Su}!)ZAQpg2rsu3ojbQ_84bXAm<<2llD2jV+pGVPRNP(C|C7TJ zk;2IK3{((JzURZ?_^4iOzBp-{LqLGPU5@i@{P$4`seIRp{uFu#~Qr@H}GxS zOZ9YlP47QMc&~zInOm0Fx)~k_L9JTj?KuV?f^l&UJK<098n(klryp)R3)lgdTwRcL z_2`tKam*7kqM{2rTp~Ph9f7<-gg&J2iLvA5-Bn<7uh`D$*gRF#!8a{jE`G*wRJCu%=P9hlE*Ub0l~ zduZJO8{X%|p~m5%`ibG2hcorpv-Oj2-joV(w0+65f2n#W-q)a?XkR^i&L-I@*xS`?2GSchNil zipuLH0eStZUB^}w>`=G1IS?moh4je-gLn|0jCKp}YYx9b!y{^OYi%m}JH~04=-P`H;ghZj&k$`XC$un3LzB(rNV$;9 zK`?e;=sc}dj?qBPS~D0;7S)ZyU2D9})O}BMt0wwOb-N^$)ySezi$NR!2PHEE*U&JopbFeQSkDbLhG#6P&%qP~7#_Ni)9kt;o~ diff --git a/ultralytics/models/sam/modules/__pycache__/decoders.cpython-39.pyc b/ultralytics/models/sam/modules/__pycache__/decoders.cpython-39.pyc index f3d070692e495c71c03e6f6e5662e8d2ac3d8373..bae68e23dfa9f52dc57cf618fee09056e4a328d8 100644 GIT binary patch delta 2366 zcma)8O>7%Q6yEW!cm1>ei4(U?N}8#XR&hz((jS^eEh#juC`zhGNh^p7*2XhVHgR^{ z+1<7Ywk&9?2B99%AP$hC?lgj9(`#RV?hkdW9Oka|Eu+~9_U;LX~u(@;Sp?em+N z_r7^=-+SNY$DyB3CXGZQCc$6NyN~DB_I;FmT>l(aRGLLZnhg;&E0Yja7Q-Y&WTLDq zRE3oWG9rGvCD2q7ChEF&S1KW*k;v`jY?Q_p<4-+0O|;gfP`5;)>ne$n_-$o2+1hXZ z*5f-&_4P1GkR+IrnqD?JWnmK za7}9-FN2GN4U1rRftm~(j$;>1!=(gY^Nh0Tu3)Dq+l=xlxj%(n8&?=5X3-VcT#Fvo zUv*t(&Ur3%Mq_+U?#rHAFf5CfF)^2^<(Rf5?&J^U-JyJpKa%&O8UBr&-HSa3E)n5~ z7+SDk4ps-11LzW;QhMs*oS2TWJZH{(+d|F} z70)er6{iR<8fqcppQhJ5YPqIS7D3Pvj~QibQTK+;mar2X78^xu`-#jNVD0$$l-6Aj z5GRAWPneEK>-CiiWi7bLi7Drq{m_OHFSl86s7zsj8e|(N00Kq;(K2ebRM0$8=$;1D z)_wG$k~uv-dUc$-wCK(-qqqbq^D*^gbR3534>50B@2O`|ct2>p{9Emys-RtetIF2z z+Q)MG2<)<#gg7e1G5$$B*E1~CohM@NIR7a=f-drme*E8_SM&*WQ6s2Z;$P_fNaNq@ zy?w!&AQHMK*CJJIQM;L0%tNNx=)4(H9)izNLrt>Z;g#rqQxXCrN7`uhWrCX?$cBREsH4WaOd!brK zHMRZ5Dy-XxUDBqUmNxWr)hLmhS7hLdbTtWAbXGIf&Qb^nAiDa-lcAVu45`zY?|iIv zz<*2NXlsFaJXuCB@|(&1UBRn-LA<+OA@+f|$G=RDPIvfW*Ji~9UoILh>xX?`y+O_S z1(zKVi>PNg(6V%u`YOFy0VeUo&Vo^)eg~r!YPba$D9XyzxK3GGU=Q%KsqFBeP!9=& zu;W5Eup!WMUF?}Pdjd2I9!FqG3Er3PMKV8=9zY#@EF#c$}9(M39o@ zf2F%op6~9c9eNPFN-+b2lU~_1hbN2`V2vrtinQV~+sX;r1uiUaeb(_BIHHLt@51wK#kQEO9ZGcfpUmJBE<6vf6D zjIu-9v9gTBf60uY)BIrPDfKJSDSe!F4jjFdi!dyfL6^NPw6G945Wc)>R(!RQAA1LO zavB@v4?Fh^iUpe#BIvsI6bhIPc%0zhkqrN{^VcDP>Z1$3!f4qGa2oBbH51Dz!3g>2 zCOgAZ*`Bay>IXcZJvPFoL1mYPcvFZV%An`p63gJ;K(w2G4;*~T?`M0_9{zQ<4~_9h b*->ch zXe~=LQHrV}p`n#p1<5{f1?9w*1AhXC<$%P6BSKuD7kIO7Xc4=b-@fL0D5_$DsuICeUQq}p5|Q^5s)D8g>f$?wKvIc9)IEI{ zH89bLzU@>EYObXIvn*;K=nZKM5o1pyCP{58)%1b=->*%lo`xr`)}Bgik_K027b&Qk zc^{RX*jn&{)p=U?31#~Q^`0{N&isYNc}hDK>aljSy|lRLxBQJ+{;fG@oq>J=2|&a9 zzngDkwFI)m&sr~Q4mSQy$@@30FJ*TctZYWWDFLr@EmPJeg*eS;GNZhbnZ@V%rRMg2?eJy5zjL^zQ?z}?I)+Qkit=)~AI!pvQ~V%@>> z=nn3>AzE=G7ow&6Qa2lAp3fM8{_KwhG>~1?PoIiRB0o)$gB01wffV^D7v&ogFl}y4 zUuBdu>tRq@1ImLbzLqWVhuIc>mCLyo3kf}k1xH5&90l0q=W-Vg=VCSVS$#Q{>t4uS z0(-38qRpk{kWKKaXl~Is%V>vsVXfH?DQi)0gT`vG>~$z}#s7|rI5T8kJLvcU zjg^fiy~SP;TV=7O2BBA9Wv_u;F0kRO#O8BT1hty>*Tc?wSPT7C+74n3zWiwZ#^tlZ zcTPak!ka=$`Vc4P2CU!&05Zq%IL^r<*uW#$k#m40xrkHPhSQ@-{QLX_cKNUQi5^qI zJyza(w{ks}p{H0|@X30MG6n3cB;YOKpq`s5FC8^H<$%Yk(07I9sf+Z~d$sZK5_$2?j(A6KLO7K+nue@*xYY0ZET;XfQ zOZW}`P4T?;5W@aI{I}vnuWYbmpbd*#6q4wHiN<4jtJ#UQmbXb+z&-&>Ss#-8k#MOB z_*g&^CidBafTYf(;-uIT*zUm!l556DVGTR@1m3b2Dp#jCANy%i#K-t@tT5VIPZAAj z(u3yGn%^X4B}p6O+w3BLJU*@pvL5m8#!vTRy;dWB9eBD1Huiyll7NKI4~3LelE5

    @8EI%TLx#Yc+u0`HU z-+QT7-B3>5CLR`8>gUxm(AM_SF5QBzN!b=I*1;aO0!a%LQVT4q&=lIxVc4)0bZsx* zfF(QUyP_mJ#4gT#yiQo_|WTvB%T<8vLa|lksA(RTPWSQU=?ScnBFMMV2mBUv7pHD0i zD&f~JI)p0n7v1aG0K&E)v_tS!!?zB`!=ek|`lKlYTM%l5+T?~em?SH3m)vLELY?S2 zS3~akeAXI@&HH>gi?HF6)R};gME++J>DsnJltivUIky1{Q zb%o=(gp$ti37t#vx+v?M1`V-^>mL>=<03zu9}%^c#Zj1+l5~xebdHkA@Oolc)S@s$ z;4-R`5H$_v>(D21%Y)ETmF&ax1%d{3kV-_7 z@(KrG4NX~0jBp^)kSL2P$bvJuq$KmH6*{VzQDH-}&g+trlbf8&oFt7)q<9VG6F?-5 z)3TY2qUxxxWL8eVY+2*tVoDj~Qqs_{o|@ozfzRloz$H~BO<957_lY_e=My7wMHV@J zl$TO`JO!9e`3&oNM%&fgJS6GE*?1HA)j9u*f`~>^6}hz;gpW6;c}Z?w)0qZONWnIm zi)5QeCA}##vAp$eU00=eRu{EhPV!7iFt<-ii!${IP^~0qvS1(d8?=n3Wz(=>P*Ggu zwDVcerlD$dFskC1n2y7Y#$MF-e#9fYxWkH&1tHLh=p>F#5@1tnRx27^yB-$Ha#Js_ z@@bGvjdDr`mB*)mP&fISGn`u`BraPT*&CLpiw%A~lBUBN{CT>U7<3FzLpwdOw>7k> zW8k*>#Piusu)ajMa|Hbb^ozoQPLf#_90p`GNK%t3SsgTJh?&1?+3Tp@*F3DG#pbc| zLtrH_Ba3?TgpyK5x3!Rh`w&}`{{zo1Q|siHdsmKtVT7XVKe0=&N(K&+sF7o$znhbi zz@9qj@<9Td(Nkoc8$(Idmr!TutSH;68OUgvR;EMz(Rct;OreBvmy?gnUSTW9sq&5X zMSCnJCuL~({!NW< zr*5Sd8aBUu^47_PFt-q{TiCE^VO@BkdE34Ee%9(-s_+-Ah!kv$KUlC+w1n|h6&w_G zGL?ZsDMejOSlm|JwtB5(=qGx5E&s`#TV5S$67_P*bMH)W0MJU5`1BvZ3W1B5P z1&oyg2S|B14$kq!A6+%?t1QF(Msu7OCL^hGf52Y8ikE}P$r8Z_1buXCjVMAxCA9tj zFAUb(|F=Do|MJ_L{J^k^Cf5KY*Q!M~oXK=XU(T-lqhe|`I>^_bsi~LbL@JAsjms#I z)*xucmGNR$OY_=@7A2n#)rBxS7BewDju142EaW`$x6jm(PnsOdp%=P=BZMM4>9Jt_ z*CnC#j|FSYRx6PemXN{f>h}I@CMCWETZv+tJqrP`Pnr~IK!oXQ;yXlluJMH^cQOJi z|0Pt@_mq_odm!?0+FA-mO@Wb~ioJ#kMi~6dh_MtufrSORvm>zY@q_@mTY&VJQ8Wp| z{Hg$}F-gv*V^o_)azQra;(1?=_N^|^Xgn9BWKvYI4ACbtMtxvdVwK(#v_Nc#>`%=Z zPjXnLAuD<=#;@HLr9v|vZV-fA!Hqma-2rj}uhSv~2tXB+;Z>TPczFV=pHeLtw~{76)Xx_XiQN@3=OWCk1q8%Nm+@} zor&oKG|_|VRs{Ww&SF^!R~+N-vgjrEJQhuAy$2CmE8cOKVS zppMcO%?VytamJz#ijXm06=GvhLd6n_q6(V9D~`jqF%gS16_a1Yu09Jt>Ii}qf;72Z zuXsKHR0~1cn_`yA1Nj{rr%W~lhjsE-n>N=m`LGD#z+I)gtAg4C} zr(3{&lhJ>P#2R<9e^0JAeuwQLy^)@cDkKL+-Gp;kHM@oc-O;N!c!_)v>307kpxO%n zAYXe(V9R&TOm-a?p>T(i6D=r+qf%n!%qJxkDxxP`do!4ciq1g^uPJI1Wgan8P0;DxMqPGM}F9K#5iYH%f8L<5a zw6!+!`<92f_qYAB_V9-T);0OXM0vm#32%-RT zRCx6`w$CBxML*-fHb!g6hdie%l&vaa(I|?ti=IR!@Ne^mfCY=#QcleZQan%niKs;Xr(T zAbuam3pL~mZ5?Kh)laIohxfxT6lG<>S?dkgb=SPNpVw}m^ zF?-_1;Pt`#44{P)64-u@9mv17-NM?=!nP;d$xM4Y`C0o}hiRl5gfah@$x;q|uGMOz}9hRc`w7MXcA;QA(xvBQh(n0lUk|JhUQ zj=cN1h^g}}>^JF(vA?6xDn^}m0axEca1nrpzJ{sU@TGAg?`~oDksG^bbB0S(N2U&m zM5qt1abaX*xHG(QX=F2pY^In8jU?DtYnjz&UMA1>b(QffH^lQbsN`C znqZ2mSHR8jVFVtCR_fpt{YIe#P}6uk>69pUxTn{+q=F&DaV$^JE|OfdOiD__OG=aK zLM5%76nc(!8rB(vk$?W=(@#G&4yl)BN%f~Ve+oeW!F~XXwyYcnHgM>FKagOPWZ>jp zS02UQ0RW7c|JKO?wvAem`aZST|H)2Gr-SU98>N+I30-4jxxM8mQ(b3)@u!~nmN2`=HHyQnX9SX)PYbf5gV0z zk$oO2lurh;>`(Ig+0!QWYvkv#gZZjg23b}o^MgJQE>nLE;0T0;EV;@H?vGFhccBYP zhnl=KzQfF(AkT@%syHYTRd}O<7lILSf|mSnrO3Y{vg|%&x@d!fQsh+#d*8&S1sqE} zqJ93q)&no|0&(=4nmQnEkQ2`&_vqhIlIYa zo7xA-#-;p}4Uu0fAyT7Olgi9%TgO1yr=5=8mB*^(*BrP3HTSN|{~+@{*0UEGYyk$3 z;BagtS(Wrp9^VVMG8%TCc#YxWpfWfZ6X?weMIKdB*K`PF4tSc zp&fIbMqYJPm?e_bUXDJ7&WavbpTp)S`+z(-pbw}^ZF|rX-VXSEgA(3O%OY<`C|mf5gZ{W zvb))@lRwVZpSERKwq-hgO`4G&Fo-`$n$Mcpw&^|B4$d5Wz##sh%Nk*K&c<&@*I^Q% z4^FY4W4FGs=gPs$Fa^*Ddra+Yc-nN$G2?i^09we9^3l#00<3$%U2$db@?gP?c;i;% zw|&>`w!7xro$tkr_8}+@`{quZn(ICFK{M@IvN~TMyf}E@3d;)H$sdhw$u+QL3r_Eq z=;dg^4EU1W_4B?1%33e##2k3^Vs6kZ8YGQqgvSI=9z14v^5L;a zTCo7$R!Jw?q#USvt;b5^dj+BJ{*=!S`9<(J;3L|f zYmTF}b|^9;)sByysuiV}6o|!Sf8b0ge2TuEzn7R(#PR@7I_WR0QBx3lj`jm2xU|7c z_t*wVK7G|zNq=jbjGHjC4m|exMNZLiuCjEgC+&2lU7oa~B<*si-Q~!^UR`sg=JlF% z)rKqf*X?OfW!keQy|y~-_N0AVZ&n;8f_}Nsp$JG64d-wwT1Iu8y;#X%RL>QaD7lOp zIICST9wu=pY(jG$Gn=_0my*w@1+4{)S~+`(Vq>(BbGVX+=6mOQ=kgRgGZk?~&ScAc z`&|2MTyZc{F;`rwlrZW<>SDB%^HeHsMm=1yODO|t(kta$`MQPZYpn|tFS)7TZnbNK zx}6^*h8$5N>FHloRzr{%b3g+IQ4fZ)4K`J7_N_HY#??YP(Flc1AXGgi zy@mX}%4#tW1q8yq-);`7#eA@vp;3@?w(#9L=InTxKuwT~qi_i3dn_O36x4|psBOXS zPp_qQ4y#El0E6}aZ%)vBt$W&6oKtFr#%xUVg&=xc#@HoCMn+?IRi8?fzAb2uf>h-m zy1nwlq8_9yeGFMIK!)_o1Jt#qfsCbk)|hym-jn@Nah;dGxb8X4!`b=`>-UluQ%|gK zQgx4FJ#{23MKh{^KPF>|OhGUrkNah@e>@Z$>JLOBvKY!`C+JV>8p$u|?YcMhC+|(rLoaXMxADuA3=-w!wOEVDvHKcj zqdbq9PtXEyv++Zq(T4zFlosmuzUk;>b*ofta%?~{x=0?PL%w>$AF#Zel~1RB;QO80 z9f3qA^fHWvut#|eAqr4slBei=gYCg-v`iwLLwFqFNdyrACoRVTG8$=O)E^dQT-vJK z`}}%$#Q$t4$xUEs=H~%g*4U%@6QrZt>G{Un@pN;`Efw+XCV$?&_h);qd#}rj-A6y^ z9$xAmUK|N8bw|=22R`W#mpa5nDY(>e3JSN85K&Z|(FJkOT((&5UCi??>b!C@j=(OX zXXJK-y$E#(`w;dc2nZU4E`*U9L@&ZYgb>031UB{6S*y`Dg}^+^yz?L?4xg<_&X(rMxwY7ER!f`%F1#f>w;Ta*2jhHn7t%DvOPN=Hjp#LFd<&#&-0V?rGA@Q zR88{q+s&oCsHT70*=Q8rz=^|;aH2pr?b@N?sx+OlOyAn|iso6c%arx5@_FDHK0KP?M?=|VIY-A@Y$QRaT3U%U^>K@jKZGlg z5yaRdqf&O$F!i!+%EZhX+zo}Qa#(}eC1Z@)_luE0e}7hlOib92{5APuS6PWNZLvu6Uj>HiB_jyv)#reKz;5!qpu4Hl z3BHAZ!oln6#0SE)xWXBMFCOZ-jW)$fC)>wJU+PBt212@NaYuJ5+~E?)e)@FRIkGjS zI_Sh3$aJ@p#ON#C340i5u58zRZLa@& z_iV#f5xm&*uYthiey@I5F>p?I+CfK;{LpR^HrzYxf!;gpO(|bbiHd)h&};i9Qa5_v zCxp@;o%p`#EVi_SEhV^>O6u=(3k%jI+ZuYYugPL9U9?s&8pakkj{OGETK!jgr*CcQ zUwsPS*(n=vG+#<;HA>4TAmg)t-??)qE8aX7wB?VmI7UX(VPtrh9E{#`n`bw zk*Ln!YIq(;v<(2fnmX$5AwK3-7NnR9zlr1M@?a5>=|2X`>fORIvTQUf0ul%ga8J`R zs^bdmN)DrXuCQ3iWfWZLQH*Zd6|h$^gPGHtZs#-FLQH~l)w|AB@6H4?s$w>8wxZ~j zDh-==hU|eu^lI=dc{R26R4-4yOB12?)JLHcq?|dA?M&v&-RKu=ME(?@6E;(fK6pkn zzKe9w*wibN2MyaWqm1>HrU%cT%x-74o=kk-c$;p7He`fS zZ1Gv_>TV2zjasuMy>?T2-Nx)U$xWk07$(b((qwd@H90Zo#-nc-Woj!vk*r8Ylj~;> zC~yFRO~)0MBz+62WW#Jfo`hi1VlWN zNZ-9MP}zl})gv51z}U%RjQk!lcKGk7JL9`Zl3s{c^y*F$Qa3YrDKr~e;gGMisJeOL zn`yb!KHI*+Azv9*SChu%;CyHf3ISa?PIizDPqjSLJ`F{Ht~Bvp;+)|x<;>=+Ksa+M zr|B2*hE^LfE(ot0UNR^g(06?7bzQg~SnTe(KJ;N8OQ%ab3k3^-g`&A#iVBLQYqwmP ze0}m3a;1(wny8Lf5OW%C2{qF-iVFC$)-W~s*rXx=$NPcpnaSx%MT;r88e~&Bn9_4b x%S^>gbY}hZ0VNkLhWjl>w3s+e?o{7neYf%;wX%;CgFh^n9J7ww98h-K`VS7|9ew}+ diff --git a/ultralytics/models/sam/modules/__pycache__/encoders.cpython-39.pyc b/ultralytics/models/sam/modules/__pycache__/encoders.cpython-39.pyc index 67b258032a171779543623eec5e880eddd0d9efd..1a0e848d3e49f3592144042183c73dad1d05a9a6 100644 GIT binary patch delta 5197 zcma)AYiu0V72Z4E?Al)2>#W~S;*1}FH#qCafhL5I#MlYs<(R|}#U*jZJ9q7gcV{+t zX6@KXwFFS1q6pecC0gZ2{G&)zEoroUNu|&qttv$S^iNy1stT2=sulf#Dg-Hr^gDNU zy-P@?dNgP6+{ZcRp2v6Y^(!y4=f2OHjK;?L0RFl!{&ntqXRkN?iRE9yCl5?BGcc{0 zY`VtO%o;Ojt}{b;!e*^m$AjnA;SHH}WM+v4 z!)9Yj>k61nX7tMXX96i^Hk<38Zk%r9P3Q7akf<5EqM0pb>(jyM=4S%I!1VePfp}Xn zdN5n11mTNh-J|zs>O*taNg+7RN zaorHfdCTKTFDJN;5q}+)A6zPv}$gTnR4A^Na=V zuB{!yw6x{9y5;GLP1f*|^V}W63{%evC&^tG^F8!&y)=j~;pB63^APHCVPV`!>0XuY zvuP)};3kw{C{Wrg82a3?D^4u!H3_ET>9e$XazV$A=D5uT_ChoDlw}*~H9mr8^8y;# zp5ei6iTaWx%hi);mZOo?8@|2u;&&#^<%-H6`CMVYj#~vBRmS=}EhGh>| zb!OoIG=vYAnGG*mULw0(Qh3z!gf*MR^CtT_%8jZW9Fbbg^rq}t4Su5s#@*1YV9U0 zw(Z2EA&d-mCWdq;OC~eYU^FUw!W&Ol81a&SNxbiGwb++;#Bx2Xp`Y8whR?^3FY=Dk zO>PUf?N{z6HBRvJIe6Go(kCb=U{2Z#OpwY6Lk_S0}|_xP^I z;Yzp&iIrS{9C@aG8|#rT)L&yv?v31?FM-BtaFro*Ln~jMT#I?%{uJvJm$w)X4#^^O z5c*liwR|I0OsiWdsHOt6lu2-(N+X|{Z!ZK>Cs|Kd;Y$0ixI`{TPDeaIoZCRS7nJWs z23Smn8V16}@XU#B2Z-<{Ql>0>ib= zC^`u>gVXDI45CYHND-(KqwlKU5CAn;0@N0awJH_b%yz7A`#;u)N2>)WW*ufH7Pl*O z?tqWGijDAbChL{p+4#7O&3=dcU}I0eWhc`)N)osixQP6lbuN@)F=Mz3?vT75?d>9X zEkn7&1_Y#;18B<5O}+Bdp1Om66rmdukuhpgb>8~{p!=~iV_F$~5CV}qpThzKJ0-_M zirb9Bgc;Xb;rifIE}Q0G#*!fd@Y7g$g8B)Np;{G&073M@hlli&@kG7Un|n^&W+j3g z>82~rWe$F|A~aFl8D3PDUOmEwZRowuCz>(E}`V-@yaWDRh+EljDIWaqr?cr!+| zQVQD?rjmNt;#%8BG8H8-x#6kehV>|pSwkrJGwfwlWt5$YQWQfozBg~<$Nc|rB(1uI zhv_WYZqlGCqjbClHFE;~w7IwBhy~wbRr4uAKF`v2W~k#(xs z>$amf>8QYn^+n6%rmtvWPg7ekat#pmZ@ht(E%M!#cIm~&LnrVlj>|V(t$ea|q*i4VLQ(#x^`+1m8b2;4+D6!$h0ASM z*!l(Z6oaVaZm}k5c;bvqbacg)l(RGxTH|_U?wAlj^ML55g-kf~Q{r1aKf0km@;8jRO(21=d|<<= z>6LxQInInY?U0jAzGx-aKp|xbl;vCU=`MIuSwhF@(RD;ZiH_I!9-u7=GNmx(wGEpi zAEN1MAOWrL&W3DF{v4VU&~YBWN?(ztmJJeb5*MjG3{qUjfrHDUg*q+~p&BR76S+WS zg~%)sC2|q1Vu)YJ0!0MHM?6yHBHvy#rB9o6r`O}iD_mZMB4UX+DNoMGe{Q}nbOYaR zkBsXZf~~Cerkv2b^POTp%{oBjAtDbG*-C^et2j&~NMs$62_h7NYD3}}wK$QZMAUXF zJ3(m)_`6reQ#&4~zDXicBC>O5bo3Flp1|vlfS{Il1plLH8mq^nv0A3xXx`df-yLnO zYYoNljh;^$nwv}QrccBpa%b<=@M$bwxpz{o-q9jI>g|=A`X=%$7jU~TX{NToM2}hX z741^ss%8c+XZYdW@n39*78k#UgihdFq1Op)qw_N-y^iPl&{}LTYQIz3Q6U8MHHeDfQ-xV z;J#x|pfwSX-ZmsvAgYKW&bP)O$@ark1(?F5Ng3VQ*+pTYlfCH*jo{KYMq2B-8^vk) z_TYEflsq3l0bBkm{%VjNmUd#A9g}Y*-pQ}b@4vv4bUT5^aer7c1l{0pyDFcF30l5z z?bDYmZyp3?e+f4xx{K1)mTqLWV{dg9xk%&eO!dm2BUr@G*@1;bw z7g2^)k#_VprrXG#mqR=6V*Q1wou{;JB}v&?0ewhGDzXEpo4?t0j{RDW-2W-NB8PV$ zEG+GQob{>vR!OO}A0?HDhj<-i0=eSI4{0o(y^pl(xDN_k z*bzw!d|91RaiT7Kw3o4Q;#>^j6vPdI8mm$@2+#2e@f3#Q^$Ny|H5ucAZ;oHkf(QkmkDgMd-UtD_bjW1#Iw+l!$Sslho_h5?1Lg<v&a|;3_?aACVzhL_|(erFjSi?_A;Er; zyrn8u67KFTn6r--Q%VDZf&o@~Q0J)1QJuqcqq$wO!*{diWWn&Dya|4>xU=qs)@O=e4sZr6dR50ytAvD1wU)(?(7RP{~`9l8x4d11*{FKH2?qr delta 2933 zcma)8YfK~86`ngDKk(RtF^s9GX5O{7$5>PVHUL{X*oN7b}ae^hN}t5wt=t)i;^lRr(i3O(nV zmsYA2kLH_m&zXDAJ?DPsjKBRhdE+&r>0Yl}f?v}4IqiA+UF|;EdKrd>6eCoM$&^r; z+NeV9a5<=xx(wT*0=J#IXbIeIswRDg36Yu}44?3juZfiLsIQ4X>)TK@v|IpiDV&rUTNUImCzydGG3*eH-LPXUMBm6U-xo=E1SJy~5(!Rv@EpuxR@hszM?>)O%+OGI5~P zK#3_+Vm_k`T<pW?6Ag~<;Kf2`Xk(~F?7lx)T@ zEn7m*Gkn7;PiEP=&S-o+m0yS_vROt=Cbkksvt!*z%`fB`IuAKn1Rl&lKtiy2{;QLd z>YE^%RREHcpA7!Xv4EyD_t!Voi%qQHSS%?*U@#0R#ijE6T75rxpTAQ7ymuD$a#4Si zPc@cnp16^oGiW@mn@i@e`9MQR6~6Ph;7y)ySaRRkk2}aeZWwDj7)2S-P-2a^CSe(T z?72L;!c=3U`WvvBNMfmkZ#7=8F3t?uM92&`%P?^)fm^CnLF$Uzj7fv+^kb7ri}%<0TDikYKOkIzd=9;Zab{5|~hj1RD6=4Kn6hT3-Ba9;;`$R-6iV{T_LlE&ChGIjV2w!AU zgglJ8353!E;IU5k`AHaN;WqmKTrTBvS(b?#E}6K9{F$%S=TV3PBjuqd5GV;a{GbQR zLyzBQFUs0OHp$-$-BV{^iCbS2q~NiZZhKLBsU>0eC+%JkXWFWuM z>Lr1~a%%&*BnsM2^BXy%NOwUuQGQ5#K|D4HQm8~YDH6FIO_*WJnOx55`L3|#NUWyK z)ik@yKW?k^yo5HgV3BpU$ZhRiq^{80K0>~F7xi*JTioMC812{8Yap}lBM7dE1PBmW z!1{0DA9M^%Z-EpIl|I5U%NdW;Y$6^nl1#7#SAUdW=)XuoK_1aoCiqxqRW+Wo9rNr4 z!9^6M)*UFR@v<5IT<4F-G;a(~7uLgXDP)w__r^#!-|YPhiSiHoYDtFwwJ)dYI5_~5 z;q!Ja(xq;LR3!fe{&J)$bjW$JJ5dLZu)j!X+>ZS;%nlbmjr@bizaTvBswZ|C<4^{|jd~l#m1T~6NM3H`(NOtgT{^!BRN&U};uFExI zLn0YS;E+RX$Rg+9OJEK!k{|O2!~Z2e;P=mt@UEx)h2C=`grxcH^W_@OWbXq+fixNZ z`uUjpA@0uyD#=@LUT7m^hWCt2)gJWll~sdn1fl5#myC^|nYv||yz{95|LMrThta*| zSWac&zYNrepsbRHzdKR(bufk8f=iYyt=}vr?E{>krZRbh<+95P#6d8bIaMe_RdIeUr$e0r_t5bL6ebWS5G)> zbf4}B&z;s!cSiJZPnRA}^mXu#@oEh|$Bn^Ec0~^kMY=l*7srQWM+Py#&_M#SUWW<@vgH3`Fg3N*z|CiBa zEimN-PCfww6qtj5bh(-Y_}?#|BH!ZvE3-}GU|=o;Y~XAL0qcMX3W@K-D1Y@z^l`1iS00s%pTVju&=%Y7E7i^0V0(uRne=IZoBNJ8)Za zGBZ_Hls{UbIV{pVTC{-Uw1_1%r&OmkipFWlEv*yRpyjX1?eaRgY_uzjq&D#&v*?6Q zH$#X8FaVGlR2R_;G2(D0TH{?m^58;DF{pvhfFkp~2r0O60`c;vxWa*@?GE?j2)M&O zV>X=U&}YcN4;>EyqZNseFyJwG7-8aYOiOYq0EWv*sUN-Kv1xE33cVx_d;F=-QV6k` zl`KE#B9ow7UwI6i1c7%kQZp$muc-t=$wb3b)YH}QUV_w@l((3x7G_(z>kQ|Kzuf1@ zj_X_zQ}qt-Cdj&RK%wdevBL+o2OVix=y0eqo=AXm zX%@Shsvq_GJT+umTI@RgK&A|izW?^wqt60~Uo+=Q+v06Ybh8)5_u0dPovOf<9;V=W z1v_mW8Mh7H^Dkr23DJq5`mWFTM_TG7$@GGHr^}$VGA%aUYot^J{uRA?dugONMPpoA z_B7Zcmpq(=N|CF6dSLzsRAYYY8~2Qvsq~bp#Q>a-eD15+dh`0+t^^L#M{H}$;&euc zUoxL$l}zDgKNk11?yHcBd0y)@#65L{oj{y47bv`y?m;1rgr&0qs*TyEeQd5Z4UNVz zdv;zvXPS1aS=)A0wf?&gI|gl<^Kbn3pw_(IT#$2Gb9Q4^Kbmu`n)uPYwf9uL8;4l` z;&ZP~5qlBx`dS>stBo14o?ECKmVW_kAa8$vN&d9OPr19sgt(SJlz))#bZERH?&Omf zuBiIgP C^M(Te delta 525 zcmZ1}wOfeqG%qg~0}#}?ElT^xHj(c|eFl)%&M=)Jl_82Tg&~S5lp%#NiaC`fl{JOQ z1}K-p9L1K(4&?yul2be7Bf`6LZsnioxI(U+(5-ESngG)-o|La4A4Ru%oX+!Q@}eQj>49 zHB63Wk5N@9$w(|wNX*H}FIPy*FM_KCX-G{=%S_Hp%`2IFib-nnQTC9@(i|z`1(_A8 zIq|uviFpbdIhn;J8fiKCi6xqo7jalBB58*xD=tYvRb|g*8_k%!^k%|i(6&#bnb%sVvu+% z!wnhv2EPfems#X*NXRz$Oi;eeBKv`lLD}YpqQysEY1T-_2jXfUSV4>rYz&fWAK1Z6 z4hC_h51e2o7lWwe4AF_+AGpC>9tIJK8LShnKY%p@jpE|_&JU9L$N(aX|IqdG6dGmem``*lc&wM$bVd-?zppkv{ ztMfDYI`d;#rwENkj2n%Kdj^j+Og_=DFpddsUNgAWn8Ydij?;Gvr!k2$n8NtH5k2`7 zpMIY>G}_4y*y*7sq3RvTuH$uJxaD(n z^WX^JD4dRm9NK;W{?0D8B#0=VNyqms-Z4m_Qv%v8O1a#0RbiWOb*B{8^9mHGjq9DEzAg};I zcF|MiaM(Fe0raRy&CF33rt0bk$Q@F1EJ~#aJE^m-SEUS`oq%t^pUM0>Rd6!3&)fkjqrwknwyK?IQq*G-azX}_dozkihN>!(qVo!5bej!asXSl2 zd+Wxcoeby9dTOkUCO(_@!;G~sJY6!h$-D?oTYuWmkt_B{Mc;XXZnKBXgWMfQ$cI|S zBdTpn&FN(_orDP8@~EGls@rv*>XzFN&Y%fNcKiuelT-%_ILX>~qJ#1uP>uPi8Y{+@ z=1)$ivVGHYq@%cXTlIl$xlHJzb=<=6Fdth=X86(E4pUb9B1B^T)>;6+uZ}Kr z!xz@tE^I{3E5wm7#jHkdxmdDSD&+z!FR$3eReCR#>~bkTc2uQBuAcQ_Ohoq9BoAL% z#jF3OE3ew6HDW7Q?B$|dNJJ=3JC!VK`@d=NU0zMLC%OQ*>8369wB!`?S&LpvIrIy zVrOBa_(!aQKf+opf*X9`6vNCg)67fs8ip(g2+|PtC!bnZ;bebDJ3uk!D6Z@xj$;E% znB$c;;zn$Nt+lDO2M)MEzyq^}zM0$HxwXbfVD55q5ybu&nJ5mrsNugEU6wvSnX9<= zSV4%;A9Odj>0k(4_RB)eTS`4xy%sHxSxH%v_rrPdo^f~~8Enve; qW=FmAJjq9E>fUX7k-nwyXIDjP;x6M*P2DBcB&oHiGlJC}x&8t!99%R2 diff --git a/ultralytics/models/sam/modules/__pycache__/tiny_encoder.cpython-312.pyc b/ultralytics/models/sam/modules/__pycache__/tiny_encoder.cpython-312.pyc index ac2f1bc4f25a1252d0f81ee0d7a80f715f63fd15..b5ca8ae79de53a0b77ea60291c531a66b5c276b9 100644 GIT binary patch delta 14418 zcmc&*dvp`mnV-?ik}Uaw-|~a;18jjUY-4P25}tnG0ODa2zy!=lGqNYhl9-V(cHM|{ z(}tu;W9Obz$eunpY?~z|-DK;uhwQc+l5Mx`$)2p;Bx~1c+GP7m{^-VlA=_@Z-S4|I zl19dq>G2xpN=i{l4G#eZPDCt>3U$pEYE>=5*Q__)PWvNB_T_`1_0t2J*S| zteC*|8U^DSCcp}Yhi$#4Gt4Qr*WAsxO>?$w;n2xOp>L~qv}X&ah$n``P*mcBoIf0j z@={2Yx#$2NP@Hx)>I^81_9V!S+F)D2(o-UkWD;^8FwzDHILkH2eeb1Lns?6}oe~?!c zbv2R=(iKT8M50Yt8(CE1+JFN~E<{4!fuRtr4rl}3Sxy&5Hh^e8FhL=g;}!jBgex!k zZ|eh=dVJ78j@u4pyWvv#CKQP!c*FMoeQtKtyKg9jf=q$y*bv39pZwXX5P)rdGgewr_QmT|J0P#C3A4| z*)+KVzT`?Qs<2prg#(Mbuvm#jH53X9#b$Xq{>Dkr0-w^xskYp|V9GSxZ&n!0#k6s( zLc-|ME`M)8d{B7(ai<<5mBxMjgq4% z2eifZKtS?KVE$e)JIe90e?W?Yh7HN0$8RB1O}Ue=r@zd`(%jZjo%(<_>Hz_egEqZb zee?>WE?H7-YB@oSL@dIz5?Qcb1%vKj~7`leeAWa;ipc zaC6Q<0Ym7ct1Yj?dOM*YV>u<{k&LYDt?)y65sJ8$O+J(HWA-jkvzD&GNKgb5Qf&0_?IF-M z&|rR0jEb_7#CHlQ6k*&(X)~CibOnpe05Wnd3cL;q(4G?b@{(ma_p|FrIA>G#0ItEO zphzge_i{KxY&5VX5(!Em!<4lL--BkOYBP94@M;u2b>1-*4^_(m#KhaO2qFjk!X`ix zL&2zr4(o$I!-tMtokcA}Zsa}Y7{;MJ0|hG5=kx#W7Dd_%&txn_teH@xg9|37x%FmQ z1_=~Y#G2sQVt-^~;{OQ{7{o(_MZU?b1`2dtN8hH)fa|Q`J}iT};;JH{6b4~J64(u= zB9d2(-nX_crbI{JmJ~-PUlN`C$<`Duj#3^w?P)hIHmr4UgyXe^5W>SDg-65GK{y94 z62++o3#ypGO`enlSUP1W5~&IU+M&tz%e;U{2N4E9V#)^A`Lzv0vNVa9Fb9|zBs(9Js-1ddP*a*~)xH9`@i zI113?L{nSjex)_>3y}}r;cBE%Q`Ftc^*~FBHXx{qj>Mr{)k#z{RTk>ZNL{-t)-wb` zI}8&>GmIY5nn~JXY7;d2NE#L$0b(_dAwGoA5X2fXzNbZV)B&JprmNu#hW*DAO~fdC zCu0PzW*aOl?+c1Bk}V1I=i(uQw&D<01i;2Qk%tIVrz?qeX|o6!eSmE+y!|0inZto# zn1_k(l$5A?eI$l6Zqs7I0Sq2R57!N12Th{2IZzLt3&I;XBYI1PdD^u)!$rL&b+IO{ zs)ysV!tOe>@CQ|I1D8-;G?S#ddN^-5B%%r{$E8S0EwwvR-|4Z&6_*_8EQ_Tm1}t_OS^L8WLMQw5UX`n% z(t-)Mrv~QzMz8s#1^Gqt}4V*tokdke_F7u}ojB-~W~0SW}=CgO3e zjV)tb%g+scspNXL&6#mEi*ct(?Stsx=pH%j7Zn8%1_h=?qgn{cL#!#I-vCY0DQ!GP zQUiItBhc&u0yk}K$V&~CxdWCVlPBXunynFdU7PFXr@z^4nif}M0RUD^M zLIoJ9n%td&qziMO1eAc{1t9T`Qd9Pjr@4)ecd$2%2T*D3WbTp)wdPEn z?K}71DUBI{nlcuz!T+hc=U~=txDuT@aA@Yh(dh$6r^JC9jLp2_W`3HvM9pnR81jws z!dd}dU95&`1R_8#9FIQ?jA(wqLH7DwF~LG_8y~h^f>-g=n0h2xi8Zj1@yA%OyZ19=Y~f_q4q6ixL;q!_?-@^zoYqpka(1S1VW#!X+N z{Ux2KhtnqwGVmzq;cjgPV-f0ChC&iBFMtyPhynn8qr)PwAhhuq2DxDcc#|+qcPY*! zyP`e!%aWjl4Il?u@(o2rUEsj`xSAfCAwvSM1ycai+rXK^EN+<`j!FszM394DtSy7= zEC>O%zzIV%d5GeClHtpj0LY3*0*L`qkAY%4$p=9#PC}h9Bsltxno>JTDg-QbYKWJd zMGO|bThnvh3tIlK3RIU1ru0|S&{WGcG`V#Pw~v>=7Ek>?v-gUZ43ie zk@}i?(>Y<{$fQv72bMLE&5nXJ@MWJhj4|NN$i%9gg9f*8)Y~m;X}bpV7?f)IOIb$> zus|~kLG(j$+u^EtUv<*RaV0Rr_ski1pO-wh>Tq@?j6}hl9fxB9VsJ8l^|uX1t6(Ls z*ZneP8});0L?5^~X#`?MA1NexDkMC#ir+5 zpKYDaTsOP5Qr!>HUu!dFaSr^(`1I<}UWSk+I~>)61J#dE^HTkNZY< z_ei0U`=lOjD<&NvXJD@50fZt319eH$rKvdw)c{^~u~|Haq{iU+APr}NxwpIpi@M3z znx0`}Hn*Ms_Fqc*v|-Ru=nUqz>D5-3F;TDvYCO}}2NAMQ}=|4m0( z%KgVXOS)2q(F3M;T$EuKKpl#PRky5L%Ba?Az5J=OB$m?JQVAw{I|yA<8VH3v7!K){ z?&D(JFcn)Lij88O55cZq27-;+Jw&>i2+&92XDPr0Kac=QK{_cZ5=IXI5XT_e?4wRc zmyMC3=3W5Fsb-g*l3SZWQ@!MR^F>IazSMF$qZ2LvJ-CTbbvGnX#gpypzYesJVE(K@ zFpmLf3TE=r-A|Y;g7K1p95xk`uiX=bU5@OH-LbUM2jdZHLIvv7@CSfYjq@pkj)`-G z7igdiS*+?!kdXI@AYu`oDGFj6q?mDv9`bCozeLcWvqNoohJ9ljXn2E}SShmwj&E)A{Eso-RHw%&cslUfFy(cV%5npHyUkrbz3C;7MO?LWK1pOM;P+Twt_g~|9o5F<~AHo z3p}H{Acr6)&&!`(mN)Cn!m_YvHYe|F%S6kKyme-ty%u6uo7c?uvT0`fxosaY@I_kN z9yg3`BvWmbZqPJ{CgQ-a&o@0uzq%zje@&g8G}O(Sw2CIWW}+xXJhyNxc6P)DN^T z&y7osa&*ba7u`W4x!#`p2{CQ<`px1y^lX3dcYx97!@B1ovsQqJT)jP!Lu zbArpHo?sCp$w7h#Kng618aoSNQ3+K?VFL$u;E&PmG|fX;Z~|@{`EY9yiM80%<$nSF zrW;^h(thvb>2xcM5^oXm^OhQ7+L4=i8O`&DP!B!Ey)5P2Tyf5=$O)WVp~|@xFxjQE zg~hWg*3Fhx&vL7=EGqq(XZ_`(@3+0_={hG}WdAz&qmkLFwX@6JH*@U3zvUzUhK*b| zW@`Lf&7JwTOdSDN2``g(cD%hG=##y-hS@;w^8K1i(r?jdd&K(97uej=PAdR{tIs{; zdpqmdKa&64`I$qb2a-s?7~i!H)U8wT6!^kE#Qr34ogkDb8TYvU=qv$m3Of`>;yhAO-v(^b-5s#IJy{2&#?3|Hz4suG4f6 z+z-S7iA`bP1ht99V*nM&HT|NAS&y5YOFIq+PQVk;ElDqMSSIA)=Zul?u#6ehoJlz$ zM`4ir_H{b4agd*ZyQnN*-#5POR)0&jc2~#Lv*5gox?;M4zveT{kYb(7`)bM4C3wG!y}07}+GlG&qSvY(VS|47KkD1Ccy{Vg=O$KUC4pbK{e6|E0jBzra3tNwYb+toQ``veXKxp9E`Mq z(~I)T@izt2MxBW)L~i=IKDdu22hO9o#+W#b_xg`93&6~o}2o~;3@ zL3`7iAeHC?pm+VMk!bajEztu#NIAp1bU#bm-oa~RQRwqjmRh%Og%}irpd{|nsE4>s zOWHHcizqiq@hkZ%&Vq)YOOGRIm5FpgjaIpg)E3Fh?Rky28GJqyrjv{9SH#qZD3J3{Jf-_dr zorLx-vCJxktJj-aZg@Zf?*rS*;bDb4I1(Ph-uyHd=lDcp6-t!%AFn_giUR|3eL>2> zp5#^54HhCG^$$_jHlzj0AG`}F2ZxWo+*4jc6K^a&u;5L+0#Aeas{H%Oxt?ztiY;R~ zF6I(~GJK+VD2#r_%z}knIM5E+_WK7mv1z2{;PTQ;!8+!U*sFC^{ zR>mc>`cz+we4)2AX3;X9Y{+=B_(`aikXF(us~esvBD-DELs17&GXa{n#LlXVR3Wq# zt0njt=qJoIHZJ135bk9b$ia%U8h_ zPl~9nD|K=$AUxd3fUkHD8hhEh4}CAT_DTFpO6!ZXlzI;I4Y2#;uAut3NM%xe3sfs? zzLQ5u>9F3@psE2J4@u3f%e_?6OZ%9VHgx%S+)S2mJ-C6@vee0r6t9!gaFZv__d;5~ zKXu*yqBlxuWswEbHBC4ux~+Bx-;~l?Lwrm)RrQRZNv7vS5o2XMA*UV>rI1b$yd3U% z-B$IC3X*7YKNdT&2th&q)SC`mtNn1h;m-`&d$^cwCWD7J2N#y9{Ka(?)8oMI-b*`k@=hn59zuK)a;}gZ_`1s;4$XxDzG4~$OiJ*{Q(Qj+CAjv z{l3h#7}LK91t#m;SrRz1$u*l-deQa5mS;<^te-mQTQC|*V4LPmYvH_=TsabHe>ZpK z`4iK*?u-1DmLKu2-u*)7W&T?`reIIUb7VUA=v+`ltLqW zU%6|lb@v<1d!}4_$+JhRN?+V|**>}Z3P0V{`9|gT>B6qr;uQ9YD+uJ+=AX%0w)im!&A4$T%-0AX-79cs)wn1a%)PPiI3&u78$oF{iY zx?{#sHti@oH}sZc#e54>=mApuP~R);-pS%)&$Hk{U*k`(R&tMTH@L|seARp6?#u-4 z%s#p$u_d@f9J2^IFe%lMxO5XR=pnEAdV4WN?YT`;3Xt0Bu1PY0jyFYPi`1<}s?k6Y zr~#vwRQRKgZ{Vt;OUHcgg1^_vfFi&-DLkAmmlK=N%|1_#2nXTVx>u&pOxiQ3Cr7YW z6V z1iliZd#&5>FeRKJQ#p}T>8c`uh)zA6fTWm4l%rtO&Dm6rL@t9-dEG|26o-hZ-khDX z=Y1hKmF70c%ke&kLpP9n`u8~g2}OoGkSMaJ`|I~wumT*@WMm{eWD{OFv9Mw>i1*NZ z<#hb%fMPBSn-<^*Vf;+7yc%x1ovKyl6W4%ir3)*XDPyV*;WD;?j;|VoeH(pEw`p{rK2jUTv)@1$qDKhA zANBC?g>k+bHPg!kX`$wyn&PX6q15H^c-3Rce(-<673>sH6r))=11FoO#e<$c z3I5WjPwV#wIQcQC1Oy9t=lF%*(Y;AxO_1o{hCrZ=o;M+S4wnM{3I$Gm!{IDCelQSB zNtA)`P*8yJ;z1d0dspztU&cW&-*Z-1~E$3%Sy z%?PZj4pQ}_DtQ>6KhFShBHxdfN3h5u&xgub7kNFj$MP1=4Lvs3Q;UB*=lY+>qZNI_}b6^;t zrkOc2nN`!7Rp9tVyb^rM!ams;`V{7gt8_5kwXt1 znla~1oAa(34CX348<0PbSJ$jgbIHvzgSkc}t!|LJPMpo5z85pKIm<43{7-h1U!SOG zd=E#2ycQi6#Z~n+)bKg$-KrsMl()jf;2)!0BOB!En1ihtH=ngmSZA^-r?V;-7`U-e zmd_TCFFRW_QMACod%QWkUho-r svuqz59|QfdAQ&>)E_S@)ta}0)#OsBsT}HO%jPFV5QRxPQ*L2GN3nY9!T>t<8 delta 6170 zcmb7I3vg4{nZ8HT)ysO>k}bb2$xqqF4=@HBf{n3xTVR_SZNu(#DuXba?Cj(JuWW~q z(oC=A@1Dnh9{>5j^Pm6RnMuhzzazR|YPD(xp5tpTb+V4@x=WIB3uIqq#u%PyBOKEv z;UvRMC*jDDs!ck~942kD7RD|OtJd^-zOk5d);atuq`xLI#x8fzaW;{@%ic^`SGupa zhcE3v*j38$KEA`}6I>k!+@3CaPQG5B0wZ|e0Qj#H%w46Hek#8~So)SChyJ5tsa}f} zL(F5DoVCp`avHC+RpvZ3r@0v^+oYUih-s6F^J2w$skgd(9s3%3+Ih}@Tgj6QoR}bi z4(`hU1S_V*tQ1o8XX^d(QY+V49LReaU~X#PiThMBNgQ?}ET(rgm4l11-T-id z3F(dJ4xc`Jft-0UpiC7kP&g?UZ157Y5wI&kgRlZ27aK9|jPa5dYEQY+X4$b7R)O|z}7`wtDst?*ELmxshealdy zvm1ms%!yMirC~#Frnn&WP#D(mhx=R}&MCNj?p~n+v8Jis)eK84ZsOz7K%PCZ+xGE6O%Ooq(_tB^Q)cv(>Z7S{hBW ztuBL#C+(_Y=Kna@c=KEEDZH49-dkp~AB$`aOZb+l| z_9hSNnBQw?uC7U~I zlC$YCr-KdVoDQVuc(Eq$1xP%(05NuQG0nx}LBjB|(Ge4k)G13jR$LLukP6)?K5cSX zAEr$qu8a~^MjlqJ_W5`ZxE7E9^^#JVoUtn%u%YZrtNq+kB_T^_Pi4RE1~m2tVcR0{ z4D>-|nQUA(>h$L=`FPBB<8(=xBC?ZBLt4O=~`eoYw`@LE}`^ z{F5U-VWeqn*XXYCgz@G(i3P# z>w<-Kf&98(#jgpp?VWCO1>0ORQWD<*%nqtx>Cb((3f*wN`oT6rq2p zO|C)5>qTda+|@AVT6S?s?f6m9Xpv@=(gi6C+}BR%fIe zR-Y|(aiFBy0?#(L=g1ECHcMT1Z^wZWwOF=V=<-&JDU{&cJ+yRxA#G_*$nhQO>*ja4 zJ-%WKz#eVt0BSjE}8d9X|Ir@JRDmIuCgu0~7v2 z*5fI`jU{S+UjZvDpz9M>BNY3W)qNkM9hHN=xvni|2^898!b@29GQuf@lL+GRQVS`X zvVH=5==TgoBFWSdfk01P2>DBzn z{A<>0?bq|B7GE!#@=e!l3D#^0H1C*hZV$q*rai#%cM3ZrQbw!0XE6ESZ1xl4p#Qz? zqNW9hYK9pj+q2qJD$@fy+NO7K!5v&+J0D?4C&>(D+NLv0f|(_OjM9mLsrs9a_Z)#0 zn<7$TXe1z_Y9utWJxLW-IGtQ?hto;pc4WV*k>s+a5eJd5jpV|b#|*yd@Q$-;*B$8`p=r|2?BuT_ za@GQopTXgY)+}W^ZV7LU&@`>BguZ(q{j03~U0d@!f!FM6hoqy|!_$Fo3$U*p$&tNsj_xbkqyS;y!->!LQ`AmK<`iuBY|8=)n)7yvacXeY+s@P*s ziOpOj{};S>tOe$`?<^!~Ukxu*VtAn)(9ph2vjGALF_?gG!7g>!RrKuoY_;%nu&o$A z(tq1?7WP8l-nF`JWY*uI(ZQ`-7UGGd`FO${2k`{tFrG+cv?(EzIbN<(S6vqm+<7AA}>u#<;O++1vt{`Ij1`yd0F&{v$l zo{K2neW8rJL4W_kORyo{cQt7);`2s@;OKt#^V>Bl=wP!UUHb;>x!kgQE99>FR&f>q%-F?4kF3O$QC|JjbDg&^f2 zSDtVm#scq_2ruDNg!{CTFW0_}imih@SnjUh$JY|ooD%~F_V=CH>lbpJ(i9Lo3jY`h z#jYRQ>?Eu>!1H}@qKHn?t_aI|c$X(E?d$CqbRZm-c@GLc7|q$WPV?6|8ah~9@OQdK zwCXP%i`(|)55|r~5P9-)^?S$*2ybBr^WH@!W6k3&5e8lphs4({*(1zFG%`3OFC@O5 zelZ=(H;{?ktHqPWkHvJ%0@&LZQ4wTtvJ3;E!^3aG`%7jaoP?8hF`VoY0c1s#i>SCn zz-TH?&Bei6!f85nqS3T?#pZN)q2=^|Y|LuZ+F`2T3`5L9Af@8sp<2x)0LF>7)WGr@ z!-~06{Qf)t-c0eLB;dgtyO4?+ ziV9hwj1C>m0|#^YXhS9gb{FoToC;u)vFAN;(_Kn+FIE}x0v?X*!%qfI9={m~i)h=6 z4dfF#{$k5=)K|dUzDT36%)@sau$yDsVJQ}i(}5La!fLU#6DaK!;Jz@>DHkuxi|E)u zyY7cbz?*Pfqo=b`<*}NyiNm3?>Z$VU+E95_sC-3KAx(d7SoWbZIijYnV|{}mTgA1e zpl#XI>Oj-;f!%w4)wJhU{s*hyFA8+*53W2Av~_>I1f0J%4~ZxlbH;_8qrIWjjBj_0 z=S?I}HE>Juf%SG=VEN`d6^(&ZN2qwoHSWiB!|}|)yZMC^)!!YQ ziVx;jLr9Uca)u!(&)rMQjizXmbi->S>5RdA!TQ?3-DGPpxj9g=E|A>(K_w(d+21DH zA`F|dXrlF6e{fMvsC32Dx?pL2Xi@pqy_0)G1?8b7bu%guh^U#o;+Z%|jj#~LYU9ss zI=yLHlNr=#GcO$aNRt~WXOc^zmGqCtZ<9^_>w{Mb@Y8kjAW_geC%3T5A?BF+ywOZo zy`)( zr`nT6nH-dXum$^iva1aGtyjKNh*udQ3qm79fAs>2g^JN8q8_|!UpV>bBZIt$2-9*V7O`5 z#&~Z=5jDqmxQ^ggA^6^ct;I!2!zxw`mts?zVwe(`7!(VO5NrtI(%}aR3p_6%SS+=3 z-@^=aW;jc$L>*Aq&rykt)77j*l^s#i-KT$QlBiY!p;g**>eK3xnvay(pFLPYm%MgU zPcB0%d2uEU{ZT^SWoL#yc7tM=pESA`+>!ILi;+zXD zak@QjpR=ELckT0e3vfmQAPeUmy>j*vTo>^+Ef6~K`WDk-tc??E?-^_XeRe>ZOaVzir-?=voQQOr#{Mp zPbBIGK1n8<^PrX}*+mfH$>UyaJ=r?VCI;EW83|!?@RFMp!PEn}mQA0{By2(K?4G5c zogXubE*($8u)MLCJJih!1@ybrw)hjAo)|HR6D6pO1y zO!Eho#5Tf?DMyvlhU}mrJIX*oG%22>jUC8B>j_q70^^TS)@6v7NJ@wnrIAMhoi*NskRRrXSTu8O%pp zHcIvq(n#hz6Eo2|DO5Vi{1n7YbfZK=Hjok9n0*ww#B?;XO+pHW+t0aAyC1=aeiB=# F{|7ZOVgCRC diff --git a/ultralytics/models/sam/modules/__pycache__/tiny_encoder.cpython-39.pyc b/ultralytics/models/sam/modules/__pycache__/tiny_encoder.cpython-39.pyc index 293a2c3ee27725bd2c14704ee21c1774196f3ed5..4fb47c1413bb1b602b9ffb516101fced7168e7ba 100644 GIT binary patch literal 25663 zcmdUXdvGMjdEdqiYw)l72B~(jubnRoH(gexhhGy%9YBvQc+oU==&on z%d#bzdN9A=*YjK~_;8Ynv%vPw^z?N1bbtMQzwgmg9~r3{_#FDFKU(~c7Y*Z^JV}4@ zI5~;0ceiO6j^UUsV~)R8%UreQEK}#@=5lf^zgn0pgn7l)f@PS-YH6+<=2TXzk~1_n zBssa(@ao9ih-rM#aPm&!Wy2}Bx#hz0=(XJ381jlv33(;Ausn{OJ;*6L735UHoC)Mq zogw55g*khXGwh5YXC%y-M9!!)hMci5XCHFLoju6eBdxLS{m7ee_9Aa@Sn2?BCY^oA z*%#(aA!omH067Q3oP)@jat2rF`qG}58dJj&Q6;_;llb_%f&&(@6=BCJZbP+ z`cn)~ygPB=8gr&&%vp|!5iMRf7EBqL%3S`skvHZFsJ#?a&UD(BPB^vGv)v1)Y|p*8 z?za7Ay=6B#ZNJ`ZyUO;L>b||^s)deP_3RU8dFfK8weB}NZF`~9YIQEVjy-?Xp0E3j zCA*Dc^;WZ6=k=Lem})~YX;i|_@rQ&mf^QCAuZ_evt{GeAHYT}eY@y43ZaKeP=$SpM zm+R$wgg~4M@{ZY^bw8Wo zURkTRJr0KAGas|-4ZnFw9v{bTr)q~EM|IZhwYtAF6K4fx<8e%Be!f}vWRwDHzCAlV z6coI540%wl)tcyMtrk@4j#ESD)?02+@H?up6j<$cP}K7m44vlS&tmLPI6t=4GOJ+D@~X}o^&>_ab{b$z$tUr_bNO0&KA!g|YB_10Ct z+3;SFQT1N%>Z?4&Gk7of&Gyxr+iqaI)y&#eMUps#uQ!au$XCt7=CC<|zwU&7M$qU? zG#t}KRX{172#F=9Jen?Y=-TyFmqS+z2M))>wc5q?dMmtAs@0rMqgIo~gOTuo8g8qF zygS%{B)9YET};Tr4&dwYpls$X$=R_EN!I*$N0Fr0p{zoX@yq48yyjDL1zC+G%3@G` znzQ)W>b&c8&&!WzH&^S6Akk*q?~r&JOE>{l*=wq^jNumIUDcQCwzuXsnhQ;=N0*g2 zmGYt++^dT*9%w-8~;viXqJ{H|IbED8JIOaj{=R*dLa{6e&>KV0M&sfg! zn*8F*B#vr%ELWpez>+m;SezS0AB)hhAh+n`4}l}&iF4Nqz5F!;xsr!8%-u$vwQ9IH zKb0&_S(`E}=UlbO=MkdQD>QjrS*7vfj2{{6Rk>cotT=4R;*1yJ!g?Dk7R)Nnk)?Qx zb1W2g%fitu4iuKAA|DfiQOpS@!bzzm4FyB(_0?LKAC#Nz8rg>zRAf?W=$#_-3#!k= z{8s2F$bkV*m(*Ui@DogqNwHMT)ujKZcy+Pnfq?~PTM3N{wF;qfW~cG>rjT$omCZxu zezS-u&blAuYxyRO`sMg#!ZH+cs9vajB!kt7RT7OLgv@SB@7u5sDs zW!V<|BCjpy9n-OHa7mW9T=84R739k`EPY3p%HKe4trC{0hGihXv8;$AnC-Z0{*p-Cx4Fp44MB7J@uSG7 zZ|3ze8NA}UQItoo;VYkMOJBYXXS37eDL%lZ8sxyC=Z2S>j^oDb9aupxeZ9O3kjR&g z2YIaF%dy%Ei zFd4uNP9xvrvyvMW%~7*xO_*itc6=wza=tsJDQ++yAPw=$%DwYQdgcb$yygWPpoJ}y zTtSYk_z3hB=0P-sN5FJ)1Z4MsA>~1FAwPNGiB+IlNGmPX04`;@gZMpZ-nGc7RpLfbihK2EkLa{r`10aJof!=&a zQBZl&yjhA;r3K>SC(A9oMBCI0^piVZT*##L1Py`u; zV?pMVk>s4*B}1Kd%(ms^H&_DqID#_pTNGA8HpATHzDme0{$YX%G+-y6e2UyL!zLQZ zQydFL(;z01g6J_U*Sw0I+9U}*E}Oin^H zsUy5Zt^y82HZnV1O*lp{F6=7}8FFB5L5U252RX0^;Tf8+g{DukvXBFO5Xb66OrBuE zg+Eu;D8AMhwECA&Leq457P;yd^<1eJ>mccG`NT>l;Wc#uN-lTtqSfn zx1h`6*CJ5vE}U9hYc)ad85RK*jPg;72I!|y?gTKV^wlf`)M5r8k=e?{@&KM;6(AX? zFEcWMx8A@lDQnfeGW>q>^N17-tDNV2WF%>&A8Sx^+FbASmqR zvR|e;b{05on@EkSQ*PZ*=bc=86m}5{b(J?^_7c2Rb#hoCh5Dz_Xw1dlf^~8fw6P;W zSh6HM*XN^x<;7Kt#|fT6Vr57$1b7b=)UIk^@OdOl10-oVgecibN>;$pXn{N*JrtXJ zJXC^(;^XzgrJ1ae8GE+VcDZi6mFAkAE|q=6bn=e^qOMlYq&w4ByrOUteJZQ zvk85Mz#!PO<0&+BID?B?)KJO<)rWa^qqDm9=s(A~H;$xOw*Ja8qrb`Tcke!f5fSDc z1BID~v2^M!?@=5?70lku^}&;yX~T&i7ZIHWS*O8hmSRHl$T=|;K7~qy0tDuw8x&=k zdOFHACv?Me zt%8xIjW3aqys*^tLi2@RU%}MWBOih|6<`bu6*0oj5TEGgGM*EWzy9S8WK64d^&U(v zj6<{(N!d^dnU!JPVfEpVkPy8&Bnjr|<<`cbx957++7$J7kfM+oFhw56;%(U$Mo~|#VKN2s!MdDz{aojG7_x@7l{f5DsHsav)1Wj_KJIz z%Fi)75!Fj>-N87yeq$zUdKLR=MHV8R9zf$vi%-F0RaZf7p>dqa3H2mKASh9mg5HBX zWpAiM1w*N+2*yIgO*A*bFhxy#R>8Wd$;c}zz=Jfb-{*jnXL1#k@iz<`%P4-k!=SF1 zTV}Wg` zB<2j3?kYN_I!rj4dH~jVbRUz%jvDJK{}uV#jT7o8)L6Ph9EOM?RFut|-_7JsCU-HB zSmsPS!fC*)aNgAuG5_ zxRu;J52qmb$2?N7>o%M_a+frhAmJH1Z8cNy*%GJjtOg^mf$4g?2`?`s#sc6yFpz)Q zg_|?#5|mQ-Md3M&fN|22G`t8;VWxA3aw{OGsfF&oieCPZmtv zJ!?{%%B!g}ltBp?xseV-iVs0KGbiCA+$hoDO%n#LjNurpw^tH|x<}4f7AOE$C;Yws z7(U3bTK;H~O&A8`_%U(LrO|G>waYbTRM`E{|*- z@(*s^*30RV+inTDfPxw>VQ~;rCD5IZL5H3QBdI7u+q1D^nUJ>>ow3il3w7WL z8;Ib+Y<(!&)>ivaqsUa53{n(Qu&@B>A8BLp0|@Tps9vhKu%s`cOa}(abi4Ow+f7#U z4jntwgJ(hxl`0R2nE=pGGSZlwY3}Ip`;Xbj@1IUh>-l;UY%FUtI0Zbb$Yvf>s-qsV zpIES8N}76!14ao{w$kQ#N4$CY@x5r_fZ#tlj#9R-E|=m92B~)>UhC;_TgMF7*p;dpGwu{*Fh#Ks6N$cFKQ;NW>L%ZSV}|sJIGL< zV)6=;pJehgNCI;yD2i>%3(VQMJxMcKPs&d`gXWY=&6hc;7gF>kzp+JI} z;JkMbvO@8t1)hDRK7S+;%v=_@-<=7p`ur^LB|`*2P^>5@E!92VsQxiB)z2~MA%QoM znpKeJjs2te^SE*skcCH)XXH3yMXNiOjEz*C85gRs)WOc+>wOOiE%YzsXqSgYey4v9 zZWo|`2ss%abDPmt#xfOAeoKoOAw*kQsG+z~+9>ymTV>kc!Rlet-#|20=DgG^XU@y0 zqq0>+IE0>&09Z$*I;&f9~j6|olXH>{aGellClDE z2~G@O$&$Rr+>bCJodgybIcX&*I_{-r!&M*UrJrU_IU-P@qoCw<7W~!vm7oL$Dowr2 zqSui`rV(n!>H?F0gd{L)(viTt7?>-83El+yY!3qR$4G->6S0PKa5h*o);N4d z#&^7t2W3*{5X0lYfKL%?k8ng!_1d&izcNY^zJ zz?YyEd;qD0YDshk!?kkz!9f9!pG8S{9#9f_hX6itM;<_O$gd)&3jbcdH{=vA_ARjQ?joiFbmM;)mJ^C!rwi4k9!6U2e9ar%TY+P9XZ6T(lVxCYGw0*fI$1fdoOZ zC#$Ws-Eru7MrILxp_HvHB;xLY0Y4L8m}#I7anj^62;+t}nb5nDLdy4MJIMPb`ejfh z&-fjL)_I6A)!{#HS(_}l8cn6U4B`dI@)cJh)>jW?hrXP{vL zvht-E9;e(T0>Ha>rW%G*7Y-^oS+Wgx??lD%wM@lvz2a#HM2a!$bY6MnfIgE;OvDa% zghvk|nVsH~0`ww}KgerDy1`JYQ$Z!Bcn#K1v)CCX6dUT7m=ITrDJjUIv<}{?CZv-A z)NwQFi@fWfF_FHp&R{ILO9)Q<548h1nvjB&a#5b%KjFRN@$0(v9t6Gz>B8BQ8SRc8PtMGJ8(9|J0Z_GIWl z4?s&3zz{J!0D_G7g%t$i;>RH_;Lo7A*r5HyShf%#^0YuqeH8jNHeiEj--|F2h!X*E zMI9QlEDrHyYd! zZ41y4M+Xel0YGWNdIIq@#9ASWMj)W{5d@L+W<=T>m9)4GXFRb1z-Ade9RXm~pjDm? zXT=bDiUk%eMRn-=+4KH7hR~vZ+A8cY3jHlOg;y*@K|w)5ntR1sfDLD>vd!Q)T^}|- zNIM7+0Hg-U1u*8d5`ZvF^wKwpwh9AmXjft=p1vD1w4IB}GFNIbwji#zr7nowQumXQ zRZCGZ^@4O5C<>HzLZ{#99tcime4e?_FdmLQpT@S;Y zwe}%=pPRGV_-#Ko2u6RBE0fMZhF%VvlURk5P=pzLwI8Z*-kd@xCT!%nDKq~@-rD~o z>kX?=wT>ZB>kexae^a=+-|9|gR&9v2u@j@HJ$xA#Gny0^vn=0ZIFX25;}?MaK-?yN zsrgy>rLL93FZI9tFBqRAuq}4Kcs>k=suN4Bk}MpX5oVYZ<*s2!2xM_W;8p?vd_jRH1NVxf;uZN8szoN{)o3-!!!iCF|`7s_qpm5VW_ZewmX@qm5BG z#A)RBjqZW;(99&I;SRS^v|k&d14J>@EAf9|Qvi$yzsq_7Z+-Np)6jJl2eAKr07(f>dVF) z)uJJ&McgKrR*Oc1>gl@IY>0cV`{*fJN`TP9p$MI$#5;^8!4{=Oy0z_kTp;aMok`T6 z*~hg4G@5z{9uMK0!`GwOO%$L_TCA;2Dmjq?1cO92#Oiwfdf|HUdg*$3p#n35H6Ylk zBEeQKNU&99GmlzFb_82}6CT%?g!?Vt$xx(l-2Z4r ziXkRxUXX1mUc-ZwgujVejNQ|w?J(dgqaD0sr6%RNJqnfKoc6x=#DSZs`D_veCeCbF zG=+b|-b^iztX%RlN(f`VvZI|1*3scznRfaDy^^335f}&`OT~AkofVHGjHs`zKK#5> z>;3TGIAx8bAAJ}_Ky`-!l9+RjWoD$PZt6&!L{h%hRK%QSkTUozJwYk2N~}JMD%Te@ zXv2F8tcG|M?Oky?uGc=|!>fax1Bi#?j+&H;mjM_Sm@UQCUUFWlU=qW2uf(vK;FG1z z@@8?3LliZUqar;&fxG{N(sK$2#bN6&6Y0sLNyfi@w|gYT$0LPlVCb8w zv2!mWnI26!UPMo<^HS1>tgv6oze0rH)*augRekTfWZH~c(sr2G`_=vQ(p}AhjfU3Xn`lFi(=HU z5rt@z+p?W-PvOk0T14+ulSzVpWF)v<=LZ}KvF_^dvx(F*hZtG?3zW-HFYZa)B^<`x z;PL0UGdJITE`h`SAUJ$5%{~)^mjnk(+*iz7Gx&H$eFbf+e~F}94!5v90{Tv1s8Gyr zu~6Tm|2Fb0EW&6b8pSPFuRrd}~X=y>An%xgFP$CtLg`|p13(b?%-Fib;Iv}HfYxoe*K zGP>FzwMUVUDQ63JmiO`CU|~>Nr}ntA_2{i5vzX_ zW%Rog$uscEjL@Zsl_Fc~CsQLIJ6vIR6J=P2jBGyo0OcmkVi_lT7m)T8BES`AkgGC+53m!%+UPd6lnJ7A#?@|h?~O2D57%@7^0EBH)Jff zXr$Z+EanENcOf}M+Nd@qXy!5_wt@@r{=h=wACBCQ09JA(#jM8WxWZw0|Q0)kFO4*67gNwg_mAQv=;CuDoN z><|$nLIP!@efd$zM5)mf6fuzG4q+>N=LLcR!Ywb!P4Zp>#8fskx02Iz7*V|WANMk* zox5AO^Mc2Od>M}lrd@*Ys3ps$jus=5lb#4P+ohuL+HSY)_6H-m!9%JSKcZ2633`uI`0DFB7sHF8lN)meTF>HLv>O(S5(U$&j*9-Q?z$%hd zwmL5^tGB-sCA|wJsb-KeMG6$jKBc8-qot6aMU8?Jl z5DeM^%*vn#E>}HPx4r@HDyFLDpXMk-ZoPzzTVF@g{AnPrCaqPFEJu(b$u072IYRDH z9{)a)Ta*B|h|ZAv%gmL8L9hBqwo))IVDU3Q&(Bvr5bZEpF`b%Uk0TU$DoYpoHgs#utCEfzVetRwhvE zxV&nn(3^0Ia2kC9zojW`dg@K=#an@JwS?EtL|6ZP_9|XUgDa?G58HIgy@^*4LxdpM z@XRS4Fmw*np0v-}R}8pd#{EfbM=!%Al=t_s-U9|kAddiHl(@$7V@~Bpd1EiGAtnNM zocH%H;ALUG;ilQ!`-|q!nH!Vs!~TH-Ha`}H_9X2 z@ovo1o%$|^QC1MoH}d{n&IrZSSK2W-6T6iaNKGWFq?|&a+YTtk2n0?=ah^~F$eGk5vFlCWfw9}+T@4$8k z!-8x}XaoOf?nGY`gFwT;R^B#z$TobRw0fNF-{05tF7RSt>uEOMk5{q{K7fH_q%wZ` z+Uv(5nTGyah$Pc;AB_vvNP(10YiYO=+hk>}^ikhH#ImERx56%&e%?ECJWFMR08OTe zH?{h{3ChVdJHQ0IJNU^oKWIa41_ou?xqqf_2nS)KO#N?;k}{R|F&+WB{lF<(H=)(; z5Gqso@pR9{H4c366A~-1!XUG#Mmqv-Xg->qF83R`&LBHw0+r3W zd=AAOyKv2ZipOHQP0XfZC;c!k+(H1T@%XeA4C&x49c&{id`|rr-uatMB;!4)NTK7% z)vs#{B?AL)$xZb}fGYW(OrNJCdkCYngZ4dgC$xdaQcak@n&vf8+siF%$zH2tM|RFz zUsVkjEr!)NJb}L&LH~?IMTAkaEjwGq)~5b0D{1jUg?0_m)Kr_9r#TA%$7QLV#yUKy~nG3RSqEZt1Oq>!mj1(2)CsZ{^c@Z zo>O|2K^p}?=ci$AvzCiEx)U3Eb=e$B=U`q9OK;|!$^*u$xr-l1No-?WH`OeUq+VTi zC@On#b~Cq$7%lIMvLnt|mH_?*3-wQ7`v6|z6ZT~|zT?Y`Big2~U=&edIufi{8@d<( zXpX*&{cpIQ$BsAln;|7*-Ba;qiven4pR}e`mnQQp^4!7F$VzCS6Hkb+w8;&;JONI6 za|$k|DO`-4O#01Y%GPqv?iK+jLgYHZ;)jJp1AXb+l z1QGzh!g#67VOYR$F>F&g|L&Gt9na;xz=aa1Z$CJ4U2w(tBvBrKnP*?_}dwd%>9{p!(tFoB=bhrw23j< zG^zhxymBHV}40)9X!3eFr~5CF*-=&!~TcI@r+FuF}z zdmBB(YdOLmnzMSizRRxVq-*MV9IL-#l2Fm>IQ@;NQ$l~UJKBjUm_~gW3AE#YdXv#$ zk)%QO{rFT!m5uyn)X0wNIcaY)&=j4is=pVX##Mi1VAaS>;KbcHiK%)D4q&W4?!fZO zPtrpmdksM2oLlU;I>YzEV0UpCZjdn;X+no}C){QQlTa9GMt%TN$6E#Z{Ab@r<8b+? zKWEoJgCrz&dHF9e%wI77FPX@^#FG<@={M9+R7$g7#WN)|e&5@npuQTWht+>$Ln1sW z<`p->t3P4#4JIy=1t#BQ^2bPmalVS!L4bSodO7t2WCI;>*3|!G@@FhpkN_L~IRBKn z@=QPE5uXY8h$}>Wi%FeHLXa|-uOaXAd_>L`UTg^AIAxU!`ytr4JAc2VhY+2Js6<32 zB3`)AJ+%8EWO!Ue4QEiZ_i-c;bf5yvyrqtU0{ChZQ(ZugIMr}&ZR1Vc?PFoNBFgQ! z8ka^0E=voK%y-YHZ4)8#5!MLE1zHMix#C?DJ{Wt4v{gP7EBb{LnxNocCQDL(fCS_I z9FGO&3F@K{SL00fFd;G%_Yg=+W=QBOOIqr`qtL(QSQ89_Wn!!+WMt#G>zNF4+p%id z9~sOd4jgkydl^5P_@D_NVQ_>IlIF>ncx)0AB4_cECA@t@-uVqFKw~Z5ce&njWK%5y z^7Wo1XGU*Fl-~aqTIydSQ`nIezW7^kci^ms~^!Som!A#ePMfL;h;H(E%AyG;A_MBrNu{`3@fkA*e zO!#f-g3bYZ@P&is7Ix$#lo2>8jE@#CE|^iV`(b&3F>!`ABSkUG%+Z2XXh@roqYwpN zjH99vrJ%!iW;79zSKm5`MB8zX;0QD&eBc|*6}E7WN1tOd%j6?W{uD{H>M8QDug}Lj ze``&>kjVq|7QSEP(U+KHsV*3YicPO*{MM2`;4Dp|B;H73jlwNBDT|y-eBA6#MT@)t zw#va+s@`as9$c8s5k`TH<b- zK=U2YIZ*$uuNL{^nDVe6w(kiLZeyc|{V}!dwIchIb*UA!+%jH}Uu-Ty#c~zlxo-8n zEOnSkGPX&?#DBnrgKYm84&Y|JO-D?G?~2m8L6m-Lv{SyTJcPZ^`SO_Jozu6eP5cB! z|LPh%B#I*&YKzexAz?dnd!y5t==E~-R!~ij)vvRf{kjz2a_P+^4yB+VIckYz4&mJc z(JT08bjj#dVe;;BtsUm2SLZ~y%G<8xy<(!AYQ1!YJ*BRt!>+!@qu*h|$4ZTqj2VRu vAem;ZkCFk(o)o+e2m4Osh5q1&lVcMHC+?oOYvRnr?UOG~T%0&RG4a0uGc!tN delta 6537 zcmb7I3ve69dA>ay4u`{w1YaP<2Pl#vp$GLKMN1SVQ4}AtOu>>xy&%aTh&@UmL4er< zNr^^8Czc{jQe`sTG>;j3EIE^DnA%PqJ9QJcwKMHZGik>iyX_>s#4}FEPRmXwP105w zJ8jkf-vdC(m8N50?z_Ex@4x@||NrjmKOoOsBmRWXr%UjgbbUyj(U<&J$ks;qG^7}z zQcR}K>(V5l4ys&r#~jxsMT#lMrHE>|4`ws-J7|0$%By6)PQuKnMUul2Vkr};gvkhW zW62X4e%SGH&Cp=)xk1C29W@dxlgeB;cR8ER&hO*_rQ1^r6-_UIM0ix$Pn`VoN-KX& z*`djhr^!s=ZMP75EF!{i3vJkE>257n#d(D4$Vv$lt1VE znrhFG|CL(H-&fm6jC)*zY6SXY8~F)Wp9i}(LjW*~r(MTg%On~hY%?Fy+V=0jGMtP= z7K5w(c1~v5LW0p{m4%^7SHp|7^FPx&{gE=}#xF6M2>-2~?HB9qg-pw9T$)W} zXq+W-sVwWndIJdc2&e(92?5*gg2%*uK{DU;{9YH^4#|j%?SU8TL)eS34`Dxo=%^o3 z5eKWlj4u8SZ$kj(@d!L-5CHo6J8!eH6H-DvJ&?esD4q(|Qxwmxk0Bnf*qtK=y8ueY z|JrwR#~Senm5ex2{h*quHxT+|nB4{#mbC+b`0C)rn*tAQ5Q`J*!X-sidm+PEbp+uM zLMP7$nmuPASzOaM0)IXl6`ifzg`XHvY>gNt;<1zrP0cXu$8w~NjHS${j0MXzYp|KQ zoMFrKVeH!;n>Us>kuJWkyk3aUz@aJuKzyn4$g22;AnOzs7PQODhCqA`yK!wr!ce!M zjFYO!W$L2Z)wPt-ZBCCSE*p&P;=c*bt(WE`_865i{=Ax(Z^(J=hD04VmAn+!Dauue z^2VAm(!7^W?acmu@-o;$}7kr@qR@HGdI=?Bm>i4^{K z{9!hR92vnq$Dg?kptReB62md2PiRGvzCnmtcxuGIVl_gRc%XKkfCUQB( zP;uE!geUl>_Hf`Uko*UDOcaGA^H}?P4(OL3=onc72^@yZqAeA(11npwuue#={0|t# zNR{O@FDAg`iS=K@8n`O42q>COr*iB`%=k1y6alAU$+@hUiltIxK9w}sQ7l@*A`PZ} zAcc_ydjh{*W_B_+lelELm|>6g6xRI=KtwN)6Elt@{4Rh+;_MoKA+ml6_48-&m}oI1 z;&tetL6lkx&tK8B!547IA-6lhLU%40tS8CL6!N z5|p7>?SMfkMhp!LkoRqPY~rhe_87 z_1-@I!H9=vw>Zn7#;H_>W*5R{YSDngH;#t*(L-MT(=B15^FZfLwKu%2bDPc|?L1ga zM*HeVp<(W#5uP?!#z>3C;q0WyLT6DJEIeW477Qa3F3B_u;Ys+&=+VybpfQ=4OXqsS zT{{0(U;RL#!$dNdn#b@Z6P}#Qzz0luwQeTUiMB$leZ__16=oG}EW}52(@#Z2=#KKc zTTdJ)F(;xhhBZtCw7N`~FLa~qA%IBv8g8%ppDX-J+nVbRU@hV4aO>FbBkV?);%{wh zZdy|vGyeeLc?2=U059KOMLK!=_A?$Au*O^uMUwaB&uxF6_ylvQl$Wo`z_r1ytq>&V zx;6pc>RKnOQQp_xx+EsqjvXS^B0dpiu%|IYj3wrSM6<{+3t_h4&vTfg!&c-{7v{2a zroDm}vHS&qqOIw|jAICoAs}%qGHQ{7F(;@@{8Eg++ugGV$+H<}-wz4&bzV|Q6nOkV zplbM+$qEU{K{WY*@?C`}@Kgh(_@g_okSKqm=Wx}1CB?0tw({fHH}Xd!b)qE?KD2qW z<%-8?HW`mIAC$0p1VJ*Kj^!}Hj=hF?MQXL6|M?9(B9_vKQYn+-U%BIN)7sgxAlSDb zGW5jXItm2iwYyqE{~xiMO2m2qr+&1%Nt-~5n)ecn$9fz24~~{Sh*KfHWlwLBS8UIO zt2Z3YW(=MG_*gw}st)q9$Napbug2XQ9!{i9WGJxjc~5DAgn1;XmBBcZofYP2&I&{~BK{IvmHM)xOr404y*N5(uR^+fl3tVBf%k zQUIIAajPLqDq9U;8$IfO`gA`=u!|J~2{V=C{}MXY^q@eNO=6c9F*J)HUA~BYs@NxR z6e882=;EGGmHiR_)Y(e@QvaroDEnh<{#}4YZF5+t)LxMB>XwXS`S-B<0CMoF*gbeG z=$>CW5K{gGQg{Gte;pF1u@L15l&mWf65k4uf9>zkoa}WB>K^TbDy5*#Z9phTW$Jhu z?k6D5#qiD-a~kTD3m>XJEsL{`64OLb=Z08Kw>)A+(JY(ULHU(|FnN_fJ8*JaG5m!f z0mC8NiU_0fIs6n(TLIRYEQsH~!)Fif@H~st7V!|_FC1)Ag{gd%zkQJQ7YsL*CLIuR zkxB@=rc`=QZVd*HPWu2q2Dc}!X}BlBL*fP)fxj@g70!R}4AzSC-`CfSMV@Y zXhuMP!FKcgLk(`+$C9zD_&$6+gAEIzvC6)W#%&1$Sp1ob03~t4jS6*Kkf`z;0dxvf z1-hOiRKxg~df+Wn@1$P{+(-T5KpCKAc%Y=^Gzf380aF^Spp{pFATc+sqScVE(;6Cr zx2I0*h8GrJYq<)s%gVbj6(E1vBhACOd?AAs)JBm4@7#{*w*gjgFIG@lAorV?D*)GG zr)+os^1>tKBp~I3*C6J7isS-$CwR~=kG2ts|LJH$m3CA6WJNwm$+Y|h!XC?2PfFCy z|82A#LYkT*P3z!x>NbH`xi0U3IttWzF{j;HmshBVA3L)1D3nt#=obj#rjl!+UKm50 zS`T?%s*3!;O{blI8xPK{BIYiO+IJBOi{N}WB5XnE>L1`EbnJ+gn2p47suMjclfQbs->+^*YafMX5z_o z!ZZyN<7{vdCL6^P9ZS-9J7C&j%c`$!6)k7+!Yyv#jT`HL9Gfj#I+ZbJ6G;R7@Z1cW zz^ZDYn=W35vTF6 z@l-%5`w_HX+zchH{10QhL*hcf?jD0JxD~mB@ISc zf9Gto+91k-4h{T+v+d%RQ;xEKhYrBK{2KfDD!+Pek7xbrwD=Ft4c4Jfv3C&OM|c;3 zA{e~m`~dknpFZC-GL9KBgbxseL;WX6;f=G2WRr*jyA+Z&r5bht?S{vJk&Xm%hXs3a8I z1-T3_RaiN~H2?YI9i9%H7TP;-nziu}U(`O4H)4(N@t^fn@VDc&o*h^V9z2%ciT9B_ z?;3wtxux8e_?7YQrNXdiKvwzzY*UJhg;@xJy^Fa*euD4{vcf)@N3rCe?Wz@+Mi&Q% z>|BE7;wf8_zr_6G2wy{Z1wqVEKokFEkHVi!l!tn7nw3y0!MWN`6gW|%eEobmG2*xH zkO)(NO7?wGCpxpC($dIcpx`S?XKo)0LzRLP~gFxs_c8fv=&6=f;RyYV?Uxy$uE z?m5>9sZzNQmDceB)sfZ?iLKiMD(VA;2nmTN9(Y2X#&|&iX^aX9co-f{1rG@Eedjuk zld>Xl<@5Rc{r;Tq`+ev8^Vx4Z*qXZ*+6L9^#|7`|%UFqkoCQe!Sc z&ABME)UEy912p19Xq3gN1uX_G4lThF%zC9$T{QExHW(*q%E?|c9Fw+R&CDffnx$w4 zS{7Q8wX+mUIgvgP;`peGAVOf^%w~j?SQbFZ_j&7!iW*+maAT1`;1p5v4ep5fy;doa5B$>MdFt2 z1x9S2l8aR)R!N1sejumFxkXo!l4nb)g-RxtfuRjQA}g%q+MaufQ4(xxNGL0D$^=MN zzl#p3n6 zZ2*6u^F%{g$rj3Bg&oeJ>RM^gmRG5kSP{I$q;x0!g{f3{X+a>|`D(!Aj2`1fQdWTr z7zKOBX24#iE?5U5Qz>;LS1`}1Th=5F-n05(@KO%Hz@0&2S z8{ae%BD|kQspVKL*pAb-7JMfG-zk=)?KIuMcm^#Lu271ZHe*Wl=(F;Ws-%E@LZ z>41jK5+E#@YR?towvPd?#&b{Bw)Y`J3a~)DRH`5#;QDBIt}*!-c!HfyBkXnuH!igZ z3S(PCuLjuLva*RhjLn0O1g{R{U2*Gh&*Lkuzo60JxB_6=G<-Lp^}T2fiveF^KG0$< z%%?lxczoymh42m`*aAQF(N5$Y`ae{zNl3zw5kFDsvR5Ia;H+mcIRS*5i7E`WW{L?P z57cj!u<|^k6mDggf`=bfu!h;;v6tEuxYO9rJj&~TvI-VmvCGtSYsOVCclXWgOpTB6 z#cq(K)DQL?IC_4O!7B>y5+`!W4fAk?OBWv>NU+R@HxJ_l(Fm~@hKvr#MxMLdF%>gk zUH_oFBdQ+c``S+f@`7;wGBM7D=xg&c*hIwGV{0JT%G2K zWI+%{sJfTzQ`fBg6h76>ml+rbtC*6IgHA}h!dks+?XBSvzY5_laO*aR7MV?3f?rn; ze8rb`x#F>+umh&+hzY_gVPI^%!geWuqGo=TtSq{vQ2l20(9{hq5TGhwsWmu4opgdZ zHq>qUWj@%*k|_2owkunU7@RvMp*%ymCxzf*hCJ_(5wN1DwH^V+@WA^V>O#0TsJ5uP z>{2WhTPm4QmuW0vMw@Ui!Yr9a#ab;L+Xk+>T^KjdsNWP`&%O_?l^7=YHV>(leLpHh zV6^ompo1~`00Z!bv1X_b$rDMKxg5D1y=pUC$`)}m|Xnzd$Lf|GYJ*5SL1d#`^= z?Tv{GFccXe8|M6m>5I=oS7P+5ljNz+k8p$txMI}f#-48V8u?rSA+Yr!mUgSqmoS?O z^r-w`CK`(CZUarCYmqfe9UruFFTqjqO(0_pEsE;rgZnKl`~KjO&WErh5xQFT#N_(e z(EUi~c|7%XBrhVd)xhv{+`;ag`toqWWQq?TG5@7*5C6NC{;%-Yphu8zq$Sq7r7|QV zPD9As$Vs#NWZWa*H0tg_NB$6q#(e~E-)WG+kCXke6XqZC{k3`wdvI~x`FcWk+l>A~ zU9Z--%@529Pk^>m{*d-IF9WA@fE?wD=v zn60OKVna83>IVGXQfniF=8}4Iq{l3)pN@>1$JK`;V=pg*!stSI21q?>n&yMJVWvMY z_T4vt4*ba&-;6WUEZmH|m3$+)X<&czZ1ih5b?B|9-*|e{!2YI)oHWg$??!GUuO}ZG J*cZdV@;~iRARzz% delta 1377 zcmZvbTWlLe6ozMJFZQn28{0LxxOQYGp~SJlU?fONQhFnW0;Q0WP^)d~X4Zx_jm6ri zP}#IdK~(gmWq?owYQhVZQbdu3;H^CHz(uNfj4ULID#YahZ50;~NQiURsjv_uoo~;X znf00d=l83HpLUyH`~5nB@#6Srjg*|qDu3RYDM=iVlvGOgYQSoO_L|pWwy?VSg#6{L$|I70| z>lx2HT{>!Ijy+qdm}x?z^1FaPPLk%3nD8CZz5aEML_)pI z&iWU=Uu8NhJ_-Cx{bDINAg%=S+8elgo;U$97P=IE8>)6SIBn_C%^8;pa+?wgeEymm zkGpDkK@H9Fa-I9u6fr;%R6^$_LXRYGnVjE&RpiBXq?E$RuWG*aRcn5&O8KgV9vSIN3;!<%XIkYk$&h}lP z6|u4Z?kB(jDi}xx^QZ-tYw)EWcMfYA93#7qQNb}P=%l*)T0YTSMRZUkV8@`-@t0jM z)UWjCm`sw=E^*snN=yvCY1Z*D%Xk>Lb3^<-_~96S$Ian^1nbobV1ZQW;hQUM&wa3? zCe`GcQl((Ax7cJk20|(wVqP3Kh>kLwvL@9XUotDhudYT_ZAO>Ge}iMTu$Y? z_QQg`50Gr{eL!T>{YtCj-t@5f6>KPv0-$3mU0+IHV}VJ0a|-bk;-GjvlTq;o?Y-j5 zOpJ2zV`i9sBjR`deZu!2>oh#m)4Vh@J>7E7zfL)b@4G*O-TvP$cx~?8eE>=MGyv>! zok@z<36Xf>y4Im=r&qfoj)IA!lJJf$$K&Q+5{WtPx}y`_yy0_a`(!1t*y#{>T-xS7 zJ{@1h$}wbRba4Ytjb4~sCBn$wMGuQj*-knup3J7`R`F6cdEhxX0@}HZe;h}ml&-3T z8sCxLYXlhmfux%1B&D&p*m>=o)+CrWZT0|8WzUbC8)*{Eo4eTv?K{I3w6od`0;zo~ G{NO*?88?yu diff --git a/ultralytics/models/sam/modules/__pycache__/transformer.cpython-39.pyc b/ultralytics/models/sam/modules/__pycache__/transformer.cpython-39.pyc index a2a7a17b17d6b444e0c30a2a51f8db74d5cac49f..9a69d56f67acd5506bea9de53d4e24defea54e25 100644 GIT binary patch delta 3172 zcmbVP&2JM&6yMo(?8F}lG2tU2K?6lVln}&652WQIK>4T=pg={5XwAekj+d-=-I+D1 zA(4@a#0jZot{k8}rj=Ud+JB)J)Jt<}dv9-TRfp!tpLb_w-+MFf_j~U} zzy17dH_PSH7F+|jf1CYk5p5j!4|-;C9It$@r6kkhQbfs zz^7qAXM(Uy-3pr(l=+-~S`)HPtAXo>YK&ekyNVV)rj#+NguDp~8T@6bs*0k^JolF1 z`i6Z-FWGN3%qn<(Qew6O{<3Hcoiy%&UqOuG^%u@av_I5U@u~&xR+_VL z#GqG5pLw)cX1*^x3iHBOc+6&_uw-iT0-f=K;#|v+$Ld0Y72hA5GO?RVQ9m`8({mVO`LSumOkfG*$cXA-w|4kltlsCd7mxD;Vy;@PwltBstA zZAI$!b@Gh#>5uJG%S-lCJAd@#__dP;BLMv3oa@i(A36?XhJcT1Cvvn;Kke8?GRuR> z=MFio2h(Rrw-)Igsg+crz=NW`m%iS)9Ts+y#76yl`fy_g9@@FgZNp~ZZ7H#q&7uog z9P>HFJAuz>k%>Q9{56d3df36!JOhl*8Af*lqqDLf46!?jn8Oo(bnHWhiv}2Qw!qw~ zkjQss$f;2wnsZ!vt8RfVfKNU$R3<<|BX~FJRKhF8h*^tEoN6-^!O}3P0aSXiyB1^N!1ld_JA=VBkShhA7t8~043JB% z1eP`Ty_TOv87O5uEQGwSjg zM0Z1|Or8MpHPcRnu4aOWN(1^=r=l_=IEO=7O{&7O zD_^zMY0ScKwZ+GzB#e&<)apd{hWM=IOIqLV-f`?oSwP){`V>$#=SG>e8Yma33OQc* z#Q;wW5Z;C&gVAWu%R2LqL+&l#@A;1CAKE;_vv(6P)`cTxGzV=*H}8QYl{QH#14*h^4uIlYca}G<-$$-GFp^+@ zlQ?7HiJ$f15KP^V@YCciHXl&-=3S_=*mKOfH-+D-f+j;zCBhIhE(~9C$^4yJ1VK8lzi0nD-T{R6S9UEEJ|PkJ6wT1?)wwW5p1f zj+r{0FT2Ia&1T(h)@$%of>TX}(8in;B?b+YDS!LoeAYm-7n!Ttn+zPAP|zr$-S?yn z!J{_b#TTM01>Y0!RihvltQ*!fXVG4C7848BBeGyGI16MU zu|Ni_$B9Ll*J{IUr$b)Qe{DFB`2*ieg0LKYaO3u=3vefUKokKc%?qvIqSaOsi=<_T z6K+4YQU4^WdJp}*dEHGdJ_lOKQ>2Ag<)Nw42@BJ(HQ#)g;KrZQPKiE~pZ&Y}2F z|Gar`@*})GueznwhO=lc(z$i9+khF^W~#!6OUhgqxu?E@~MM1wvQM%^L2 zL+))pjqLalz6x)Ef1TDd+qQ}M{JU<@CVfep=;kF?$V$;=`GAI zOpmWD&M!V}mFs4U?Lb+}jaX`x{Axz2RoFET6xt?t1N}50Tb#JO=S=VjY%mP@#p!xF z0pXDlL47GubVfZc#WheuQ4AM4h%sHOIBb0Sa*VttOkshX5?@XLUWf0ly8HzydPIC$xp${^rdt{BaM#NfvHY}^9UM37s3Su z1)*EM2-T@Q5XudwsuUSr@Z~Uo!R2w%J21$4er&!W^Y7}MOy67 zco=UYOumUU42|GYDZmuNWYKT~`u|jFj{PGORIp8kBpVD!RUp?SkB(K3!a46MK;noP z6W8SZXs`VZ!XkU&V|0d=UBR_Mgdv0*@?7jRzlGtjoR8Jgn{qwYK)2;qtgGEV*2f(^ zofeBz>GWY6EAf=---6&@(vDG&JQWXjZ$Y?x62PFUN%k3I)E|G!$}4yo_dAmacz0mO f4u7z+1b>EyujKIaxGP`9n`pQE5Kk&&75#q!)j-9t diff --git a/ultralytics/models/sam/modules/decoders.py b/ultralytics/models/sam/modules/decoders.py index 0c64a7e..073b1ad 100644 --- a/ultralytics/models/sam/modules/decoders.py +++ b/ultralytics/models/sam/modules/decoders.py @@ -10,6 +10,21 @@ from ultralytics.nn.modules import LayerNorm2d class MaskDecoder(nn.Module): + """ + Decoder module for generating masks and their associated quality scores, using a transformer architecture to predict + masks given image and prompt embeddings. + + Attributes: + transformer_dim (int): Channel dimension for the transformer module. + transformer (nn.Module): The transformer module used for mask prediction. + num_multimask_outputs (int): Number of masks to predict for disambiguating masks. + iou_token (nn.Embedding): Embedding for the IoU token. + num_mask_tokens (int): Number of mask tokens. + mask_tokens (nn.Embedding): Embedding for the mask tokens. + output_upscaling (nn.Sequential): Neural network sequence for upscaling the output. + output_hypernetworks_mlps (nn.ModuleList): Hypernetwork MLPs for generating masks. + iou_prediction_head (nn.Module): MLP for predicting mask quality. + """ def __init__( self, @@ -49,8 +64,9 @@ class MaskDecoder(nn.Module): nn.ConvTranspose2d(transformer_dim // 4, transformer_dim // 8, kernel_size=2, stride=2), activation(), ) - self.output_hypernetworks_mlps = nn.ModuleList([ - MLP(transformer_dim, transformer_dim, transformer_dim // 8, 3) for _ in range(self.num_mask_tokens)]) + self.output_hypernetworks_mlps = nn.ModuleList( + [MLP(transformer_dim, transformer_dim, transformer_dim // 8, 3) for _ in range(self.num_mask_tokens)] + ) self.iou_prediction_head = MLP(transformer_dim, iou_head_hidden_dim, self.num_mask_tokens, iou_head_depth) @@ -98,10 +114,14 @@ class MaskDecoder(nn.Module): sparse_prompt_embeddings: torch.Tensor, dense_prompt_embeddings: torch.Tensor, ) -> Tuple[torch.Tensor, torch.Tensor]: - """Predicts masks. See 'forward' for more details.""" + """ + Predicts masks. + + See 'forward' for more details. + """ # Concatenate output tokens output_tokens = torch.cat([self.iou_token.weight, self.mask_tokens.weight], dim=0) - output_tokens = output_tokens.unsqueeze(0).expand(sparse_prompt_embeddings.size(0), -1, -1) + output_tokens = output_tokens.unsqueeze(0).expand(sparse_prompt_embeddings.shape[0], -1, -1) tokens = torch.cat((output_tokens, sparse_prompt_embeddings), dim=1) # Expand per-image data in batch direction to be per-mask @@ -113,13 +133,14 @@ class MaskDecoder(nn.Module): # Run the transformer hs, src = self.transformer(src, pos_src, tokens) iou_token_out = hs[:, 0, :] - mask_tokens_out = hs[:, 1:(1 + self.num_mask_tokens), :] + mask_tokens_out = hs[:, 1 : (1 + self.num_mask_tokens), :] # Upscale mask embeddings and predict masks using the mask tokens src = src.transpose(1, 2).view(b, c, h, w) upscaled_embedding = self.output_upscaling(src) hyper_in_list: List[torch.Tensor] = [ - self.output_hypernetworks_mlps[i](mask_tokens_out[:, i, :]) for i in range(self.num_mask_tokens)] + self.output_hypernetworks_mlps[i](mask_tokens_out[:, i, :]) for i in range(self.num_mask_tokens) + ] hyper_in = torch.stack(hyper_in_list, dim=1) b, c, h, w = upscaled_embedding.shape masks = (hyper_in @ upscaled_embedding.view(b, c, h * w)).view(b, -1, h, w) @@ -132,7 +153,7 @@ class MaskDecoder(nn.Module): class MLP(nn.Module): """ - Lightly adapted from + MLP (Multi-Layer Perceptron) model lightly adapted from https://github.com/facebookresearch/MaskFormer/blob/main/mask_former/modeling/transformer/transformer_predictor.py """ @@ -144,6 +165,16 @@ class MLP(nn.Module): num_layers: int, sigmoid_output: bool = False, ) -> None: + """ + Initializes the MLP (Multi-Layer Perceptron) model. + + Args: + input_dim (int): The dimensionality of the input features. + hidden_dim (int): The dimensionality of the hidden layers. + output_dim (int): The dimensionality of the output layer. + num_layers (int): The number of hidden layers. + sigmoid_output (bool, optional): Apply a sigmoid activation to the output layer. Defaults to False. + """ super().__init__() self.num_layers = num_layers h = [hidden_dim] * (num_layers - 1) diff --git a/ultralytics/models/sam/modules/encoders.py b/ultralytics/models/sam/modules/encoders.py index eb9352f..a51c347 100644 --- a/ultralytics/models/sam/modules/encoders.py +++ b/ultralytics/models/sam/modules/encoders.py @@ -10,27 +10,41 @@ import torch.nn.functional as F from ultralytics.nn.modules import LayerNorm2d, MLPBlock -# This class and its supporting functions below lightly adapted from the ViTDet backbone available at: https://github.com/facebookresearch/detectron2/blob/main/detectron2/modeling/backbone/vit.py # noqa class ImageEncoderViT(nn.Module): + """ + An image encoder using Vision Transformer (ViT) architecture for encoding an image into a compact latent space. The + encoder takes an image, splits it into patches, and processes these patches through a series of transformer blocks. + The encoded patches are then processed through a neck to generate the final encoded representation. + + This class and its supporting functions below lightly adapted from the ViTDet backbone available at + https://github.com/facebookresearch/detectron2/blob/main/detectron2/modeling/backbone/vit.py. + + Attributes: + img_size (int): Dimension of input images, assumed to be square. + patch_embed (PatchEmbed): Module for patch embedding. + pos_embed (nn.Parameter, optional): Absolute positional embedding for patches. + blocks (nn.ModuleList): List of transformer blocks for processing patch embeddings. + neck (nn.Sequential): Neck module to further process the output. + """ def __init__( - self, - img_size: int = 1024, - patch_size: int = 16, - in_chans: int = 3, - embed_dim: int = 768, - depth: int = 12, - num_heads: int = 12, - mlp_ratio: float = 4.0, - out_chans: int = 256, - qkv_bias: bool = True, - norm_layer: Type[nn.Module] = nn.LayerNorm, - act_layer: Type[nn.Module] = nn.GELU, - use_abs_pos: bool = True, - use_rel_pos: bool = False, - rel_pos_zero_init: bool = True, - window_size: int = 0, - global_attn_indexes: Tuple[int, ...] = (), + self, + img_size: int = 1024, + patch_size: int = 16, + in_chans: int = 3, + embed_dim: int = 768, + depth: int = 12, + num_heads: int = 12, + mlp_ratio: float = 4.0, + out_chans: int = 256, + qkv_bias: bool = True, + norm_layer: Type[nn.Module] = nn.LayerNorm, + act_layer: Type[nn.Module] = nn.GELU, + use_abs_pos: bool = True, + use_rel_pos: bool = False, + rel_pos_zero_init: bool = True, + window_size: int = 0, + global_attn_indexes: Tuple[int, ...] = (), ) -> None: """ Args: @@ -100,6 +114,9 @@ class ImageEncoderViT(nn.Module): ) def forward(self, x: torch.Tensor) -> torch.Tensor: + """Processes input through patch embedding, applies positional embedding if present, and passes through blocks + and neck. + """ x = self.patch_embed(x) if self.pos_embed is not None: x = x + self.pos_embed @@ -109,6 +126,22 @@ class ImageEncoderViT(nn.Module): class PromptEncoder(nn.Module): + """ + Encodes different types of prompts, including points, boxes, and masks, for input to SAM's mask decoder. The encoder + produces both sparse and dense embeddings for the input prompts. + + Attributes: + embed_dim (int): Dimension of the embeddings. + input_image_size (Tuple[int, int]): Size of the input image as (H, W). + image_embedding_size (Tuple[int, int]): Spatial size of the image embedding as (H, W). + pe_layer (PositionEmbeddingRandom): Module for random position embedding. + num_point_embeddings (int): Number of point embeddings for different types of points. + point_embeddings (nn.ModuleList): List of point embeddings. + not_a_point_embed (nn.Embedding): Embedding for points that are not a part of any label. + mask_input_size (Tuple[int, int]): Size of the input mask. + mask_downscaling (nn.Sequential): Neural network for downscaling the mask. + no_mask_embed (nn.Embedding): Embedding for cases where no mask is provided. + """ def __init__( self, @@ -157,20 +190,15 @@ class PromptEncoder(nn.Module): def get_dense_pe(self) -> torch.Tensor: """ - Returns the positional encoding used to encode point prompts, - applied to a dense set of points the shape of the image encoding. + Returns the positional encoding used to encode point prompts, applied to a dense set of points the shape of the + image encoding. Returns: torch.Tensor: Positional encoding with shape 1x(embed_dim)x(embedding_h)x(embedding_w) """ return self.pe_layer(self.image_embedding_size).unsqueeze(0) - def _embed_points( - self, - points: torch.Tensor, - labels: torch.Tensor, - pad: bool, - ) -> torch.Tensor: + def _embed_points(self, points: torch.Tensor, labels: torch.Tensor, pad: bool) -> torch.Tensor: """Embeds point prompts.""" points = points + 0.5 # Shift to center of pixel if pad: @@ -204,9 +232,7 @@ class PromptEncoder(nn.Module): boxes: Optional[torch.Tensor], masks: Optional[torch.Tensor], ) -> int: - """ - Gets the batch size of the output given the batch size of the input prompts. - """ + """Gets the batch size of the output given the batch size of the input prompts.""" if points is not None: return points[0].shape[0] elif boxes is not None: @@ -217,6 +243,7 @@ class PromptEncoder(nn.Module): return 1 def _get_device(self) -> torch.device: + """Returns the device of the first point embedding's weight tensor.""" return self.point_embeddings[0].weight.device def forward( @@ -251,23 +278,22 @@ class PromptEncoder(nn.Module): if masks is not None: dense_embeddings = self._embed_masks(masks) else: - dense_embeddings = self.no_mask_embed.weight.reshape(1, -1, 1, - 1).expand(bs, -1, self.image_embedding_size[0], - self.image_embedding_size[1]) + dense_embeddings = self.no_mask_embed.weight.reshape(1, -1, 1, 1).expand( + bs, -1, self.image_embedding_size[0], self.image_embedding_size[1] + ) return sparse_embeddings, dense_embeddings class PositionEmbeddingRandom(nn.Module): - """ - Positional encoding using random spatial frequencies. - """ + """Positional encoding using random spatial frequencies.""" def __init__(self, num_pos_feats: int = 64, scale: Optional[float] = None) -> None: + """Initializes a position embedding using random spatial frequencies.""" super().__init__() if scale is None or scale <= 0.0: scale = 1.0 - self.register_buffer('positional_encoding_gaussian_matrix', scale * torch.randn((2, num_pos_feats))) + self.register_buffer("positional_encoding_gaussian_matrix", scale * torch.randn((2, num_pos_feats))) # Set non-deterministic for forward() error 'cumsum_cuda_kernel does not have a deterministic implementation' torch.use_deterministic_algorithms(False) @@ -275,11 +301,11 @@ class PositionEmbeddingRandom(nn.Module): def _pe_encoding(self, coords: torch.Tensor) -> torch.Tensor: """Positionally encode points that are normalized to [0,1].""" - # assuming coords are in [0, 1]^2 square and have d_1 x ... x d_n x 2 shape + # Assuming coords are in [0, 1]^2 square and have d_1 x ... x d_n x 2 shape coords = 2 * coords - 1 coords = coords @ self.positional_encoding_gaussian_matrix coords = 2 * np.pi * coords - # outputs d_1 x ... x d_n x C shape + # Outputs d_1 x ... x d_n x C shape return torch.cat([torch.sin(coords), torch.cos(coords)], dim=-1) def forward(self, size: Tuple[int, int]) -> torch.Tensor: @@ -304,7 +330,7 @@ class PositionEmbeddingRandom(nn.Module): class Block(nn.Module): - """Transformer blocks with support of window attention and residual propagation blocks""" + """Transformer blocks with support of window attention and residual propagation blocks.""" def __init__( self, @@ -351,6 +377,7 @@ class Block(nn.Module): self.window_size = window_size def forward(self, x: torch.Tensor) -> torch.Tensor: + """Executes a forward pass through the transformer block with window attention and non-overlapping windows.""" shortcut = x x = self.norm1(x) # Window partition @@ -380,6 +407,8 @@ class Attention(nn.Module): input_size: Optional[Tuple[int, int]] = None, ) -> None: """ + Initialize Attention module. + Args: dim (int): Number of input channels. num_heads (int): Number of attention heads. @@ -391,19 +420,20 @@ class Attention(nn.Module): super().__init__() self.num_heads = num_heads head_dim = dim // num_heads - self.scale = head_dim ** -0.5 + self.scale = head_dim**-0.5 self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.proj = nn.Linear(dim, dim) self.use_rel_pos = use_rel_pos if self.use_rel_pos: - assert (input_size is not None), 'Input size must be provided if using relative positional encoding.' - # initialize relative positional embeddings + assert input_size is not None, "Input size must be provided if using relative positional encoding." + # Initialize relative positional embeddings self.rel_pos_h = nn.Parameter(torch.zeros(2 * input_size[0] - 1, head_dim)) self.rel_pos_w = nn.Parameter(torch.zeros(2 * input_size[1] - 1, head_dim)) def forward(self, x: torch.Tensor) -> torch.Tensor: + """Applies the forward operation including attention, normalization, MLP, and indexing within window limits.""" B, H, W, _ = x.shape # qkv with shape (3, B, nHead, H * W, C) qkv = self.qkv(x).reshape(B, H * W, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) @@ -444,10 +474,12 @@ def window_partition(x: torch.Tensor, window_size: int) -> Tuple[torch.Tensor, T return windows, (Hp, Wp) -def window_unpartition(windows: torch.Tensor, window_size: int, pad_hw: Tuple[int, int], - hw: Tuple[int, int]) -> torch.Tensor: +def window_unpartition( + windows: torch.Tensor, window_size: int, pad_hw: Tuple[int, int], hw: Tuple[int, int] +) -> torch.Tensor: """ Window unpartition into original sequences and removing padding. + Args: windows (tensor): input tokens with [B * num_windows, window_size, window_size, C]. window_size (int): window size. @@ -470,8 +502,8 @@ def window_unpartition(windows: torch.Tensor, window_size: int, pad_hw: Tuple[in def get_rel_pos(q_size: int, k_size: int, rel_pos: torch.Tensor) -> torch.Tensor: """ - Get relative positional embeddings according to the relative positions of - query and key sizes. + Get relative positional embeddings according to the relative positions of query and key sizes. + Args: q_size (int): size of query q. k_size (int): size of key k. @@ -487,7 +519,7 @@ def get_rel_pos(q_size: int, k_size: int, rel_pos: torch.Tensor) -> torch.Tensor rel_pos_resized = F.interpolate( rel_pos.reshape(1, rel_pos.shape[0], -1).permute(0, 2, 1), size=max_rel_dist, - mode='linear', + mode="linear", ) rel_pos_resized = rel_pos_resized.reshape(-1, max_rel_dist).permute(1, 0) else: @@ -510,8 +542,9 @@ def add_decomposed_rel_pos( k_size: Tuple[int, int], ) -> torch.Tensor: """ - Calculate decomposed Relative Positional Embeddings from :paper:`mvitv2`. - https://github.com/facebookresearch/mvit/blob/19786631e330df9f3622e5402b4a419a263a2c80/mvit/models/attention.py # noqa B950 + Calculate decomposed Relative Positional Embeddings from mvitv2 paper at + https://github.com/facebookresearch/mvit/blob/main/mvit/models/attention.py. + Args: attn (Tensor): attention map. q (Tensor): query q in the attention layer with shape (B, q_h * q_w, C). @@ -530,29 +563,30 @@ def add_decomposed_rel_pos( B, _, dim = q.shape r_q = q.reshape(B, q_h, q_w, dim) - rel_h = torch.einsum('bhwc,hkc->bhwk', r_q, Rh) - rel_w = torch.einsum('bhwc,wkc->bhwk', r_q, Rw) + rel_h = torch.einsum("bhwc,hkc->bhwk", r_q, Rh) + rel_w = torch.einsum("bhwc,wkc->bhwk", r_q, Rw) attn = (attn.view(B, q_h, q_w, k_h, k_w) + rel_h[:, :, :, :, None] + rel_w[:, :, :, None, :]).view( - B, q_h * q_w, k_h * k_w) + B, q_h * q_w, k_h * k_w + ) return attn class PatchEmbed(nn.Module): - """ - Image to Patch Embedding. - """ + """Image to Patch Embedding.""" def __init__( - self, - kernel_size: Tuple[int, int] = (16, 16), - stride: Tuple[int, int] = (16, 16), - padding: Tuple[int, int] = (0, 0), - in_chans: int = 3, - embed_dim: int = 768, + self, + kernel_size: Tuple[int, int] = (16, 16), + stride: Tuple[int, int] = (16, 16), + padding: Tuple[int, int] = (0, 0), + in_chans: int = 3, + embed_dim: int = 768, ) -> None: """ + Initialize PatchEmbed module. + Args: kernel_size (Tuple): kernel size of the projection layer. stride (Tuple): stride of the projection layer. @@ -565,4 +599,5 @@ class PatchEmbed(nn.Module): self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=kernel_size, stride=stride, padding=padding) def forward(self, x: torch.Tensor) -> torch.Tensor: + """Computes patch embedding by applying convolution and transposing resulting tensor.""" return self.proj(x).permute(0, 2, 3, 1) # B C H W -> B H W C diff --git a/ultralytics/models/sam/modules/sam.py b/ultralytics/models/sam/modules/sam.py index 5649920..95d9bbe 100644 --- a/ultralytics/models/sam/modules/sam.py +++ b/ultralytics/models/sam/modules/sam.py @@ -16,8 +16,23 @@ from .encoders import ImageEncoderViT, PromptEncoder class Sam(nn.Module): + """ + Sam (Segment Anything Model) is designed for object segmentation tasks. It uses image encoders to generate image + embeddings, and prompt encoders to encode various types of input prompts. These embeddings are then used by the mask + decoder to predict object masks. + + Attributes: + mask_threshold (float): Threshold value for mask prediction. + image_format (str): Format of the input image, default is 'RGB'. + image_encoder (ImageEncoderViT): The backbone used to encode the image into embeddings. + prompt_encoder (PromptEncoder): Encodes various types of input prompts. + mask_decoder (MaskDecoder): Predicts object masks from the image and prompt embeddings. + pixel_mean (List[float]): Mean pixel values for image normalization. + pixel_std (List[float]): Standard deviation values for image normalization. + """ + mask_threshold: float = 0.0 - image_format: str = 'RGB' + image_format: str = "RGB" def __init__( self, @@ -25,25 +40,26 @@ class Sam(nn.Module): prompt_encoder: PromptEncoder, mask_decoder: MaskDecoder, pixel_mean: List[float] = (123.675, 116.28, 103.53), - pixel_std: List[float] = (58.395, 57.12, 57.375) + pixel_std: List[float] = (58.395, 57.12, 57.375), ) -> None: """ - SAM predicts object masks from an image and input prompts. + Initialize the Sam class to predict object masks from an image and input prompts. Note: All forward() operations moved to SAMPredictor. Args: - image_encoder (ImageEncoderViT): The backbone used to encode the image into image embeddings that allow for - efficient mask prediction. - prompt_encoder (PromptEncoder): Encodes various types of input prompts. - mask_decoder (MaskDecoder): Predicts masks from the image embeddings and encoded prompts. - pixel_mean (list(float)): Mean values for normalizing pixels in the input image. - pixel_std (list(float)): Std values for normalizing pixels in the input image. + image_encoder (ImageEncoderViT): The backbone used to encode the image into image embeddings. + prompt_encoder (PromptEncoder): Encodes various types of input prompts. + mask_decoder (MaskDecoder): Predicts masks from the image embeddings and encoded prompts. + pixel_mean (List[float], optional): Mean values for normalizing pixels in the input image. Defaults to + (123.675, 116.28, 103.53). + pixel_std (List[float], optional): Std values for normalizing pixels in the input image. Defaults to + (58.395, 57.12, 57.375). """ super().__init__() self.image_encoder = image_encoder self.prompt_encoder = prompt_encoder self.mask_decoder = mask_decoder - self.register_buffer('pixel_mean', torch.Tensor(pixel_mean).view(-1, 1, 1), False) - self.register_buffer('pixel_std', torch.Tensor(pixel_std).view(-1, 1, 1), False) + self.register_buffer("pixel_mean", torch.Tensor(pixel_mean).view(-1, 1, 1), False) + self.register_buffer("pixel_std", torch.Tensor(pixel_std).view(-1, 1, 1), False) diff --git a/ultralytics/models/sam/modules/tiny_encoder.py b/ultralytics/models/sam/modules/tiny_encoder.py index ca8de50..98f5ac0 100644 --- a/ultralytics/models/sam/modules/tiny_encoder.py +++ b/ultralytics/models/sam/modules/tiny_encoder.py @@ -21,19 +21,27 @@ from ultralytics.utils.instance import to_2tuple class Conv2d_BN(torch.nn.Sequential): + """A sequential container that performs 2D convolution followed by batch normalization.""" def __init__(self, a, b, ks=1, stride=1, pad=0, dilation=1, groups=1, bn_weight_init=1): + """Initializes the MBConv model with given input channels, output channels, expansion ratio, activation, and + drop path. + """ super().__init__() - self.add_module('c', torch.nn.Conv2d(a, b, ks, stride, pad, dilation, groups, bias=False)) + self.add_module("c", torch.nn.Conv2d(a, b, ks, stride, pad, dilation, groups, bias=False)) bn = torch.nn.BatchNorm2d(b) torch.nn.init.constant_(bn.weight, bn_weight_init) torch.nn.init.constant_(bn.bias, 0) - self.add_module('bn', bn) + self.add_module("bn", bn) class PatchEmbed(nn.Module): + """Embeds images into patches and projects them into a specified embedding dimension.""" def __init__(self, in_chans, embed_dim, resolution, activation): + """Initialize the PatchMerging class with specified input, output dimensions, resolution and activation + function. + """ super().__init__() img_size: Tuple[int, int] = to_2tuple(resolution) self.patches_resolution = (img_size[0] // 4, img_size[1] // 4) @@ -48,12 +56,17 @@ class PatchEmbed(nn.Module): ) def forward(self, x): + """Runs input tensor 'x' through the PatchMerging model's sequence of operations.""" return self.seq(x) class MBConv(nn.Module): + """Mobile Inverted Bottleneck Conv (MBConv) layer, part of the EfficientNet architecture.""" def __init__(self, in_chans, out_chans, expand_ratio, activation, drop_path): + """Initializes a convolutional layer with specified dimensions, input resolution, depth, and activation + function. + """ super().__init__() self.in_chans = in_chans self.hidden_chans = int(in_chans * expand_ratio) @@ -73,6 +86,7 @@ class MBConv(nn.Module): self.drop_path = nn.Identity() def forward(self, x): + """Implements the forward pass for the model architecture.""" shortcut = x x = self.conv1(x) x = self.act1(x) @@ -85,8 +99,12 @@ class MBConv(nn.Module): class PatchMerging(nn.Module): + """Merges neighboring patches in the feature map and projects to a new dimension.""" def __init__(self, input_resolution, dim, out_dim, activation): + """Initializes the ConvLayer with specific dimension, input resolution, depth, activation, drop path, and other + optional parameters. + """ super().__init__() self.input_resolution = input_resolution @@ -99,6 +117,7 @@ class PatchMerging(nn.Module): self.conv3 = Conv2d_BN(out_dim, out_dim, 1, 1, 0) def forward(self, x): + """Applies forward pass on the input utilizing convolution and activation layers, and returns the result.""" if x.ndim == 3: H, W = self.input_resolution B = len(x) @@ -115,6 +134,11 @@ class PatchMerging(nn.Module): class ConvLayer(nn.Module): + """ + Convolutional Layer featuring multiple MobileNetV3-style inverted bottleneck convolutions (MBConv). + + Optionally applies downsample operations to the output, and provides support for gradient checkpointing. + """ def __init__( self, @@ -122,41 +146,69 @@ class ConvLayer(nn.Module): input_resolution, depth, activation, - drop_path=0., + drop_path=0.0, downsample=None, use_checkpoint=False, out_dim=None, - conv_expand_ratio=4., + conv_expand_ratio=4.0, ): + """ + Initializes the ConvLayer with the given dimensions and settings. + + Args: + dim (int): The dimensionality of the input and output. + input_resolution (Tuple[int, int]): The resolution of the input image. + depth (int): The number of MBConv layers in the block. + activation (Callable): Activation function applied after each convolution. + drop_path (Union[float, List[float]]): Drop path rate. Single float or a list of floats for each MBConv. + downsample (Optional[Callable]): Function for downsampling the output. None to skip downsampling. + use_checkpoint (bool): Whether to use gradient checkpointing to save memory. + out_dim (Optional[int]): The dimensionality of the output. None means it will be the same as `dim`. + conv_expand_ratio (float): Expansion ratio for the MBConv layers. + """ super().__init__() self.dim = dim self.input_resolution = input_resolution self.depth = depth self.use_checkpoint = use_checkpoint - # build blocks - self.blocks = nn.ModuleList([ - MBConv( - dim, - dim, - conv_expand_ratio, - activation, - drop_path[i] if isinstance(drop_path, list) else drop_path, - ) for i in range(depth)]) + # Build blocks + self.blocks = nn.ModuleList( + [ + MBConv( + dim, + dim, + conv_expand_ratio, + activation, + drop_path[i] if isinstance(drop_path, list) else drop_path, + ) + for i in range(depth) + ] + ) - # patch merging layer - self.downsample = None if downsample is None else downsample( - input_resolution, dim=dim, out_dim=out_dim, activation=activation) + # Patch merging layer + self.downsample = ( + None + if downsample is None + else downsample(input_resolution, dim=dim, out_dim=out_dim, activation=activation) + ) def forward(self, x): + """Processes the input through a series of convolutional layers and returns the activated output.""" for blk in self.blocks: x = checkpoint.checkpoint(blk, x) if self.use_checkpoint else blk(x) return x if self.downsample is None else self.downsample(x) class Mlp(nn.Module): + """ + Multi-layer Perceptron (MLP) for transformer architectures. - def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): + This layer takes an input with in_features, applies layer normalization and two fully-connected layers. + """ + + def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.0): + """Initializes Attention module with the given parameters including dimension, key_dim, number of heads, etc.""" super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features @@ -167,6 +219,7 @@ class Mlp(nn.Module): self.drop = nn.Dropout(drop) def forward(self, x): + """Applies operations on input x and returns modified x, runs downsample if not None.""" x = self.norm(x) x = self.fc1(x) x = self.act(x) @@ -176,20 +229,41 @@ class Mlp(nn.Module): class Attention(torch.nn.Module): + """ + Multi-head attention module with support for spatial awareness, applying attention biases based on spatial + resolution. Implements trainable attention biases for each unique offset between spatial positions in the resolution + grid. + + Attributes: + ab (Tensor, optional): Cached attention biases for inference, deleted during training. + """ def __init__( - self, - dim, - key_dim, - num_heads=8, - attn_ratio=4, - resolution=(14, 14), + self, + dim, + key_dim, + num_heads=8, + attn_ratio=4, + resolution=(14, 14), ): + """ + Initializes the Attention module. + + Args: + dim (int): The dimensionality of the input and output. + key_dim (int): The dimensionality of the keys and queries. + num_heads (int, optional): Number of attention heads. Default is 8. + attn_ratio (float, optional): Attention ratio, affecting the dimensions of the value vectors. Default is 4. + resolution (Tuple[int, int], optional): Spatial resolution of the input feature map. Default is (14, 14). + + Raises: + AssertionError: If `resolution` is not a tuple of length 2. + """ super().__init__() - # (h, w) + assert isinstance(resolution, tuple) and len(resolution) == 2 self.num_heads = num_heads - self.scale = key_dim ** -0.5 + self.scale = key_dim**-0.5 self.key_dim = key_dim self.nh_kd = nh_kd = key_dim * num_heads self.d = int(attn_ratio * key_dim) @@ -212,18 +286,20 @@ class Attention(torch.nn.Module): attention_offsets[offset] = len(attention_offsets) idxs.append(attention_offsets[offset]) self.attention_biases = torch.nn.Parameter(torch.zeros(num_heads, len(attention_offsets))) - self.register_buffer('attention_bias_idxs', torch.LongTensor(idxs).view(N, N), persistent=False) + self.register_buffer("attention_bias_idxs", torch.LongTensor(idxs).view(N, N), persistent=False) @torch.no_grad() def train(self, mode=True): + """Sets the module in training mode and handles attribute 'ab' based on the mode.""" super().train(mode) - if mode and hasattr(self, 'ab'): + if mode and hasattr(self, "ab"): del self.ab else: self.ab = self.attention_biases[:, self.attention_bias_idxs] - def forward(self, x): # x (B,N,C) - B, N, _ = x.shape + def forward(self, x): # x + """Performs forward pass over the input tensor 'x' by applying normalization and querying keys/values.""" + B, N, _ = x.shape # B, N, C # Normalization x = self.norm(x) @@ -237,28 +313,16 @@ class Attention(torch.nn.Module): v = v.permute(0, 2, 1, 3) self.ab = self.ab.to(self.attention_biases.device) - attn = ((q @ k.transpose(-2, -1)) * self.scale + - (self.attention_biases[:, self.attention_bias_idxs] if self.training else self.ab)) + attn = (q @ k.transpose(-2, -1)) * self.scale + ( + self.attention_biases[:, self.attention_bias_idxs] if self.training else self.ab + ) attn = attn.softmax(dim=-1) x = (attn @ v).transpose(1, 2).reshape(B, N, self.dh) return self.proj(x) class TinyViTBlock(nn.Module): - """ - TinyViT Block. - - Args: - dim (int): Number of input channels. - input_resolution (tuple[int, int]): Input resolution. - num_heads (int): Number of attention heads. - window_size (int): Window size. - mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. - drop (float, optional): Dropout rate. Default: 0.0 - drop_path (float, optional): Stochastic depth rate. Default: 0.0 - local_conv_size (int): the kernel size of the convolution between Attention and MLP. Default: 3 - activation (torch.nn): the activation function. Default: nn.GELU - """ + """TinyViT Block that applies self-attention and a local convolution to the input.""" def __init__( self, @@ -266,17 +330,35 @@ class TinyViTBlock(nn.Module): input_resolution, num_heads, window_size=7, - mlp_ratio=4., - drop=0., - drop_path=0., + mlp_ratio=4.0, + drop=0.0, + drop_path=0.0, local_conv_size=3, activation=nn.GELU, ): + """ + Initializes the TinyViTBlock. + + Args: + dim (int): The dimensionality of the input and output. + input_resolution (Tuple[int, int]): Spatial resolution of the input feature map. + num_heads (int): Number of attention heads. + window_size (int, optional): Window size for attention. Default is 7. + mlp_ratio (float, optional): Ratio of mlp hidden dim to embedding dim. Default is 4. + drop (float, optional): Dropout rate. Default is 0. + drop_path (float, optional): Stochastic depth rate. Default is 0. + local_conv_size (int, optional): The kernel size of the local convolution. Default is 3. + activation (torch.nn, optional): Activation function for MLP. Default is nn.GELU. + + Raises: + AssertionError: If `window_size` is not greater than 0. + AssertionError: If `dim` is not divisible by `num_heads`. + """ super().__init__() self.dim = dim self.input_resolution = input_resolution self.num_heads = num_heads - assert window_size > 0, 'window_size must be greater than 0' + assert window_size > 0, "window_size must be greater than 0" self.window_size = window_size self.mlp_ratio = mlp_ratio @@ -284,7 +366,7 @@ class TinyViTBlock(nn.Module): # self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.drop_path = nn.Identity() - assert dim % num_heads == 0, 'dim must be divisible by num_heads' + assert dim % num_heads == 0, "dim must be divisible by num_heads" head_dim = dim // num_heads window_resolution = (window_size, window_size) @@ -298,9 +380,12 @@ class TinyViTBlock(nn.Module): self.local_conv = Conv2d_BN(dim, dim, ks=local_conv_size, stride=1, pad=pad, groups=dim) def forward(self, x): + """Applies attention-based transformation or padding to input 'x' before passing it through a local + convolution. + """ H, W = self.input_resolution B, L, C = x.shape - assert L == H * W, 'input feature has wrong size' + assert L == H * W, "input feature has wrong size" res_x = x if H == self.window_size and W == self.window_size: x = self.attn(x) @@ -316,11 +401,14 @@ class TinyViTBlock(nn.Module): pH, pW = H + pad_b, W + pad_r nH = pH // self.window_size nW = pW // self.window_size - # window partition - x = x.view(B, nH, self.window_size, nW, self.window_size, - C).transpose(2, 3).reshape(B * nH * nW, self.window_size * self.window_size, C) + # Window partition + x = ( + x.view(B, nH, self.window_size, nW, self.window_size, C) + .transpose(2, 3) + .reshape(B * nH * nW, self.window_size * self.window_size, C) + ) x = self.attn(x) - # window reverse + # Window reverse x = x.view(B, nH, nW, self.window_size, self.window_size, C).transpose(2, 3).reshape(B, pH, pW, C) if padding: @@ -337,29 +425,17 @@ class TinyViTBlock(nn.Module): return x + self.drop_path(self.mlp(x)) def extra_repr(self) -> str: - return f'dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, ' \ - f'window_size={self.window_size}, mlp_ratio={self.mlp_ratio}' + """Returns a formatted string representing the TinyViTBlock's parameters: dimension, input resolution, number of + attentions heads, window size, and MLP ratio. + """ + return ( + f"dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, " + f"window_size={self.window_size}, mlp_ratio={self.mlp_ratio}" + ) class BasicLayer(nn.Module): - """ - A basic TinyViT layer for one stage. - - Args: - dim (int): Number of input channels. - input_resolution (tuple[int]): Input resolution. - depth (int): Number of blocks. - num_heads (int): Number of attention heads. - window_size (int): Local window size. - mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. - drop (float, optional): Dropout rate. Default: 0.0 - drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0 - downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None - use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False. - local_conv_size (int): the kernel size of the depthwise convolution between attention and MLP. Default: 3 - activation (torch.nn): the activation function. Default: nn.GELU - out_dim (int | optional): the output dimension of the layer. Default: None - """ + """A basic TinyViT layer for one stage in a TinyViT architecture.""" def __init__( self, @@ -368,57 +444,90 @@ class BasicLayer(nn.Module): depth, num_heads, window_size, - mlp_ratio=4., - drop=0., - drop_path=0., + mlp_ratio=4.0, + drop=0.0, + drop_path=0.0, downsample=None, use_checkpoint=False, local_conv_size=3, activation=nn.GELU, out_dim=None, ): + """ + Initializes the BasicLayer. + + Args: + dim (int): The dimensionality of the input and output. + input_resolution (Tuple[int, int]): Spatial resolution of the input feature map. + depth (int): Number of TinyViT blocks. + num_heads (int): Number of attention heads. + window_size (int): Local window size. + mlp_ratio (float, optional): Ratio of mlp hidden dim to embedding dim. Default is 4. + drop (float, optional): Dropout rate. Default is 0. + drop_path (float | tuple[float], optional): Stochastic depth rate. Default is 0. + downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default is None. + use_checkpoint (bool, optional): Whether to use checkpointing to save memory. Default is False. + local_conv_size (int, optional): Kernel size of the local convolution. Default is 3. + activation (torch.nn, optional): Activation function for MLP. Default is nn.GELU. + out_dim (int | None, optional): The output dimension of the layer. Default is None. + + Raises: + ValueError: If `drop_path` is a list of float but its length doesn't match `depth`. + """ super().__init__() self.dim = dim self.input_resolution = input_resolution self.depth = depth self.use_checkpoint = use_checkpoint - # build blocks - self.blocks = nn.ModuleList([ - TinyViTBlock( - dim=dim, - input_resolution=input_resolution, - num_heads=num_heads, - window_size=window_size, - mlp_ratio=mlp_ratio, - drop=drop, - drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path, - local_conv_size=local_conv_size, - activation=activation, - ) for i in range(depth)]) + # Build blocks + self.blocks = nn.ModuleList( + [ + TinyViTBlock( + dim=dim, + input_resolution=input_resolution, + num_heads=num_heads, + window_size=window_size, + mlp_ratio=mlp_ratio, + drop=drop, + drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path, + local_conv_size=local_conv_size, + activation=activation, + ) + for i in range(depth) + ] + ) - # patch merging layer - self.downsample = None if downsample is None else downsample( - input_resolution, dim=dim, out_dim=out_dim, activation=activation) + # Patch merging layer + self.downsample = ( + None + if downsample is None + else downsample(input_resolution, dim=dim, out_dim=out_dim, activation=activation) + ) def forward(self, x): + """Performs forward propagation on the input tensor and returns a normalized tensor.""" for blk in self.blocks: x = checkpoint.checkpoint(blk, x) if self.use_checkpoint else blk(x) return x if self.downsample is None else self.downsample(x) def extra_repr(self) -> str: - return f'dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}' + """Returns a string representation of the extra_repr function with the layer's parameters.""" + return f"dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}" class LayerNorm2d(nn.Module): + """A PyTorch implementation of Layer Normalization in 2D.""" def __init__(self, num_channels: int, eps: float = 1e-6) -> None: + """Initialize LayerNorm2d with the number of channels and an optional epsilon.""" super().__init__() self.weight = nn.Parameter(torch.ones(num_channels)) self.bias = nn.Parameter(torch.zeros(num_channels)) self.eps = eps def forward(self, x: torch.Tensor) -> torch.Tensor: + """Perform a forward pass, normalizing the input tensor.""" u = x.mean(1, keepdim=True) s = (x - u).pow(2).mean(1, keepdim=True) x = (x - u) / torch.sqrt(s + self.eps) @@ -426,6 +535,30 @@ class LayerNorm2d(nn.Module): class TinyViT(nn.Module): + """ + The TinyViT architecture for vision tasks. + + Attributes: + img_size (int): Input image size. + in_chans (int): Number of input channels. + num_classes (int): Number of classification classes. + embed_dims (List[int]): List of embedding dimensions for each layer. + depths (List[int]): List of depths for each layer. + num_heads (List[int]): List of number of attention heads for each layer. + window_sizes (List[int]): List of window sizes for each layer. + mlp_ratio (float): Ratio of MLP hidden dimension to embedding dimension. + drop_rate (float): Dropout rate for drop layers. + drop_path_rate (float): Drop path rate for stochastic depth. + use_checkpoint (bool): Use checkpointing for efficient memory usage. + mbconv_expand_ratio (float): Expansion ratio for MBConv layer. + local_conv_size (int): Local convolution kernel size. + layer_lr_decay (float): Layer-wise learning rate decay. + + Note: + This implementation is generalized to accept a list of depths, attention heads, + embedding dimensions and window sizes, which allows you to create a + "stack" of TinyViT models of varying configurations. + """ def __init__( self, @@ -436,14 +569,33 @@ class TinyViT(nn.Module): depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24], window_sizes=[7, 7, 14, 7], - mlp_ratio=4., - drop_rate=0., + mlp_ratio=4.0, + drop_rate=0.0, drop_path_rate=0.1, use_checkpoint=False, mbconv_expand_ratio=4.0, local_conv_size=3, layer_lr_decay=1.0, ): + """ + Initializes the TinyViT model. + + Args: + img_size (int, optional): The input image size. Defaults to 224. + in_chans (int, optional): Number of input channels. Defaults to 3. + num_classes (int, optional): Number of classification classes. Defaults to 1000. + embed_dims (List[int], optional): List of embedding dimensions for each layer. Defaults to [96, 192, 384, 768]. + depths (List[int], optional): List of depths for each layer. Defaults to [2, 2, 6, 2]. + num_heads (List[int], optional): List of number of attention heads for each layer. Defaults to [3, 6, 12, 24]. + window_sizes (List[int], optional): List of window sizes for each layer. Defaults to [7, 7, 14, 7]. + mlp_ratio (float, optional): Ratio of MLP hidden dimension to embedding dimension. Defaults to 4. + drop_rate (float, optional): Dropout rate. Defaults to 0. + drop_path_rate (float, optional): Drop path rate for stochastic depth. Defaults to 0.1. + use_checkpoint (bool, optional): Whether to use checkpointing for efficient memory usage. Defaults to False. + mbconv_expand_ratio (float, optional): Expansion ratio for MBConv layer. Defaults to 4.0. + local_conv_size (int, optional): Local convolution kernel size. Defaults to 3. + layer_lr_decay (float, optional): Layer-wise learning rate decay. Defaults to 1.0. + """ super().__init__() self.img_size = img_size self.num_classes = num_classes @@ -453,50 +605,52 @@ class TinyViT(nn.Module): activation = nn.GELU - self.patch_embed = PatchEmbed(in_chans=in_chans, - embed_dim=embed_dims[0], - resolution=img_size, - activation=activation) + self.patch_embed = PatchEmbed( + in_chans=in_chans, embed_dim=embed_dims[0], resolution=img_size, activation=activation + ) patches_resolution = self.patch_embed.patches_resolution self.patches_resolution = patches_resolution - # stochastic depth + # Stochastic depth dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule - # build layers + # Build layers self.layers = nn.ModuleList() for i_layer in range(self.num_layers): kwargs = dict( dim=embed_dims[i_layer], - input_resolution=(patches_resolution[0] // (2 ** (i_layer - 1 if i_layer == 3 else i_layer)), - patches_resolution[1] // (2 ** (i_layer - 1 if i_layer == 3 else i_layer))), + input_resolution=( + patches_resolution[0] // (2 ** (i_layer - 1 if i_layer == 3 else i_layer)), + patches_resolution[1] // (2 ** (i_layer - 1 if i_layer == 3 else i_layer)), + ), # input_resolution=(patches_resolution[0] // (2 ** i_layer), # patches_resolution[1] // (2 ** i_layer)), depth=depths[i_layer], - drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])], + drop_path=dpr[sum(depths[:i_layer]) : sum(depths[: i_layer + 1])], downsample=PatchMerging if (i_layer < self.num_layers - 1) else None, use_checkpoint=use_checkpoint, - out_dim=embed_dims[min(i_layer + 1, - len(embed_dims) - 1)], + out_dim=embed_dims[min(i_layer + 1, len(embed_dims) - 1)], activation=activation, ) if i_layer == 0: layer = ConvLayer(conv_expand_ratio=mbconv_expand_ratio, **kwargs) else: - layer = BasicLayer(num_heads=num_heads[i_layer], - window_size=window_sizes[i_layer], - mlp_ratio=self.mlp_ratio, - drop=drop_rate, - local_conv_size=local_conv_size, - **kwargs) + layer = BasicLayer( + num_heads=num_heads[i_layer], + window_size=window_sizes[i_layer], + mlp_ratio=self.mlp_ratio, + drop=drop_rate, + local_conv_size=local_conv_size, + **kwargs, + ) self.layers.append(layer) # Classifier head self.norm_head = nn.LayerNorm(embed_dims[-1]) self.head = nn.Linear(embed_dims[-1], num_classes) if num_classes > 0 else torch.nn.Identity() - # init weights + # Init weights self.apply(self._init_weights) self.set_layer_lr_decay(layer_lr_decay) self.neck = nn.Sequential( @@ -518,13 +672,15 @@ class TinyViT(nn.Module): ) def set_layer_lr_decay(self, layer_lr_decay): + """Sets the learning rate decay for each layer in the TinyViT model.""" decay_rate = layer_lr_decay - # layers -> blocks (depth) + # Layers -> blocks (depth) depth = sum(self.depths) lr_scales = [decay_rate ** (depth - i - 1) for i in range(depth)] def _set_lr_scale(m, scale): + """Sets the learning rate scale for each layer in the model based on the layer's depth.""" for p in m.parameters(): p.lr_scale = scale @@ -544,12 +700,14 @@ class TinyViT(nn.Module): p.param_name = k def _check_lr_scale(m): + """Checks if the learning rate scale attribute is present in module's parameters.""" for p in m.parameters(): - assert hasattr(p, 'lr_scale'), p.param_name + assert hasattr(p, "lr_scale"), p.param_name self.apply(_check_lr_scale) def _init_weights(self, m): + """Initializes weights for linear layers and layer normalization in the given module.""" if isinstance(m, nn.Linear): # NOTE: This initialization is needed only for training. # trunc_normal_(m.weight, std=.02) @@ -561,11 +719,12 @@ class TinyViT(nn.Module): @torch.jit.ignore def no_weight_decay_keywords(self): - return {'attention_biases'} + """Returns a dictionary of parameter names where weight decay should not be applied.""" + return {"attention_biases"} def forward_features(self, x): - # x: (N, C, H, W) - x = self.patch_embed(x) + """Runs the input through the model layers and returns the transformed output.""" + x = self.patch_embed(x) # x input is (N, C, H, W) x = self.layers[0](x) start_i = 1 @@ -573,10 +732,11 @@ class TinyViT(nn.Module): for i in range(start_i, len(self.layers)): layer = self.layers[i] x = layer(x) - B, _, C = x.size() + B, _, C = x.shape x = x.view(B, 64, 64, C) x = x.permute(0, 3, 1, 2) return self.neck(x) def forward(self, x): + """Executes a forward pass on the input tensor through the constructed model layers.""" return self.forward_features(x) diff --git a/ultralytics/models/sam/modules/transformer.py b/ultralytics/models/sam/modules/transformer.py index f925538..1ad0741 100644 --- a/ultralytics/models/sam/modules/transformer.py +++ b/ultralytics/models/sam/modules/transformer.py @@ -10,6 +10,21 @@ from ultralytics.nn.modules import MLPBlock class TwoWayTransformer(nn.Module): + """ + A Two-Way Transformer module that enables the simultaneous attention to both image and query points. This class + serves as a specialized transformer decoder that attends to an input image using queries whose positional embedding + is supplied. This is particularly useful for tasks like object detection, image segmentation, and point cloud + processing. + + Attributes: + depth (int): The number of layers in the transformer. + embedding_dim (int): The channel dimension for the input embeddings. + num_heads (int): The number of heads for multihead attention. + mlp_dim (int): The internal channel dimension for the MLP block. + layers (nn.ModuleList): The list of TwoWayAttentionBlock layers that make up the transformer. + final_attn_token_to_image (Attention): The final attention layer applied from the queries to the image. + norm_final_attn (nn.LayerNorm): The layer normalization applied to the final queries. + """ def __init__( self, @@ -21,8 +36,7 @@ class TwoWayTransformer(nn.Module): attention_downsample_rate: int = 2, ) -> None: """ - A transformer decoder that attends to an input image using - queries whose positional embedding is supplied. + A transformer decoder that attends to an input image using queries whose positional embedding is supplied. Args: depth (int): number of layers in the transformer @@ -48,7 +62,8 @@ class TwoWayTransformer(nn.Module): activation=activation, attention_downsample_rate=attention_downsample_rate, skip_first_layer_pe=(i == 0), - )) + ) + ) self.final_attn_token_to_image = Attention(embedding_dim, num_heads, downsample_rate=attention_downsample_rate) self.norm_final_attn = nn.LayerNorm(embedding_dim) @@ -99,6 +114,23 @@ class TwoWayTransformer(nn.Module): class TwoWayAttentionBlock(nn.Module): + """ + An attention block that performs both self-attention and cross-attention in two directions: queries to keys and + keys to queries. This block consists of four main layers: (1) self-attention on sparse inputs, (2) cross-attention + of sparse inputs to dense inputs, (3) an MLP block on sparse inputs, and (4) cross-attention of dense inputs to + sparse inputs. + + Attributes: + self_attn (Attention): The self-attention layer for the queries. + norm1 (nn.LayerNorm): Layer normalization following the first attention block. + cross_attn_token_to_image (Attention): Cross-attention layer from queries to keys. + norm2 (nn.LayerNorm): Layer normalization following the second attention block. + mlp (MLPBlock): MLP block that transforms the query embeddings. + norm3 (nn.LayerNorm): Layer normalization following the MLP block. + norm4 (nn.LayerNorm): Layer normalization following the third attention block. + cross_attn_image_to_token (Attention): Cross-attention layer from keys to queries. + skip_first_layer_pe (bool): Whether to skip the positional encoding in the first layer. + """ def __init__( self, @@ -171,8 +203,7 @@ class TwoWayAttentionBlock(nn.Module): class Attention(nn.Module): - """ - An attention layer that allows for downscaling the size of the embedding after projection to queries, keys, and + """An attention layer that allows for downscaling the size of the embedding after projection to queries, keys, and values. """ @@ -182,24 +213,37 @@ class Attention(nn.Module): num_heads: int, downsample_rate: int = 1, ) -> None: + """ + Initializes the Attention model with the given dimensions and settings. + + Args: + embedding_dim (int): The dimensionality of the input embeddings. + num_heads (int): The number of attention heads. + downsample_rate (int, optional): The factor by which the internal dimensions are downsampled. Defaults to 1. + + Raises: + AssertionError: If 'num_heads' does not evenly divide the internal dimension (embedding_dim / downsample_rate). + """ super().__init__() self.embedding_dim = embedding_dim self.internal_dim = embedding_dim // downsample_rate self.num_heads = num_heads - assert self.internal_dim % num_heads == 0, 'num_heads must divide embedding_dim.' + assert self.internal_dim % num_heads == 0, "num_heads must divide embedding_dim." self.q_proj = nn.Linear(embedding_dim, self.internal_dim) self.k_proj = nn.Linear(embedding_dim, self.internal_dim) self.v_proj = nn.Linear(embedding_dim, self.internal_dim) self.out_proj = nn.Linear(self.internal_dim, embedding_dim) - def _separate_heads(self, x: Tensor, num_heads: int) -> Tensor: + @staticmethod + def _separate_heads(x: Tensor, num_heads: int) -> Tensor: """Separate the input tensor into the specified number of attention heads.""" b, n, c = x.shape x = x.reshape(b, n, num_heads, c // num_heads) return x.transpose(1, 2) # B x N_heads x N_tokens x C_per_head - def _recombine_heads(self, x: Tensor) -> Tensor: + @staticmethod + def _recombine_heads(x: Tensor) -> Tensor: """Recombine the separated attention heads into a single tensor.""" b, n_heads, n_tokens, c_per_head = x.shape x = x.transpose(1, 2) diff --git a/ultralytics/models/sam/predict.py b/ultralytics/models/sam/predict.py index e8a8197..63ca632 100644 --- a/ultralytics/models/sam/predict.py +++ b/ultralytics/models/sam/predict.py @@ -1,4 +1,12 @@ # Ultralytics YOLO 🚀, AGPL-3.0 license +""" +Generate predictions using the Segment Anything Model (SAM). + +SAM is an advanced image segmentation model offering features like promptable segmentation and zero-shot performance. +This module contains the implementation of the prediction logic and auxiliary utilities required to perform segmentation +using SAM. It forms an integral part of the Ultralytics framework and is designed for high-performance, real-time image +segmentation tasks. +""" import numpy as np import torch @@ -10,129 +18,155 @@ from ultralytics.engine.predictor import BasePredictor from ultralytics.engine.results import Results from ultralytics.utils import DEFAULT_CFG, ops from ultralytics.utils.torch_utils import select_device - -from .amg import (batch_iterator, batched_mask_to_box, build_all_layer_point_grids, calculate_stability_score, - generate_crop_boxes, is_box_near_crop_edge, remove_small_regions, uncrop_boxes_xyxy, uncrop_masks) +from .amg import ( + batch_iterator, + batched_mask_to_box, + build_all_layer_point_grids, + calculate_stability_score, + generate_crop_boxes, + is_box_near_crop_edge, + remove_small_regions, + uncrop_boxes_xyxy, + uncrop_masks, +) from .build import build_sam class Predictor(BasePredictor): + """ + Predictor class for the Segment Anything Model (SAM), extending BasePredictor. + + The class provides an interface for model inference tailored to image segmentation tasks. + With advanced architecture and promptable segmentation capabilities, it facilitates flexible and real-time + mask generation. The class is capable of working with various types of prompts such as bounding boxes, + points, and low-resolution masks. + + Attributes: + cfg (dict): Configuration dictionary specifying model and task-related parameters. + overrides (dict): Dictionary containing values that override the default configuration. + _callbacks (dict): Dictionary of user-defined callback functions to augment behavior. + args (namespace): Namespace to hold command-line arguments or other operational variables. + im (torch.Tensor): Preprocessed input image tensor. + features (torch.Tensor): Extracted image features used for inference. + prompts (dict): Collection of various prompt types, such as bounding boxes and points. + segment_all (bool): Flag to control whether to segment all objects in the image or only specified ones. + """ def __init__(self, cfg=DEFAULT_CFG, overrides=None, _callbacks=None): + """ + Initialize the Predictor with configuration, overrides, and callbacks. + + The method sets up the Predictor object and applies any configuration overrides or callbacks provided. It + initializes task-specific settings for SAM, such as retina_masks being set to True for optimal results. + + Args: + cfg (dict): Configuration dictionary. + overrides (dict, optional): Dictionary of values to override default configuration. + _callbacks (dict, optional): Dictionary of callback functions to customize behavior. + """ if overrides is None: overrides = {} - overrides.update(dict(task='segment', mode='predict', imgsz=1024)) + overrides.update(dict(task="segment", mode="predict", imgsz=1024)) super().__init__(cfg, overrides, _callbacks) - # SAM needs retina_masks=True, or the results would be a mess. self.args.retina_masks = True - # Args for set_image self.im = None self.features = None - # Args for set_prompts self.prompts = {} - # Args for segment everything self.segment_all = False def preprocess(self, im): - """Prepares input image before inference. + """ + Preprocess the input image for model inference. + + The method prepares the input image by applying transformations and normalization. + It supports both torch.Tensor and list of np.ndarray as input formats. Args: - im (torch.Tensor | List(np.ndarray)): BCHW for tensor, [(HWC) x B] for list. + im (torch.Tensor | List[np.ndarray]): BCHW tensor format or list of HWC numpy arrays. + + Returns: + (torch.Tensor): The preprocessed image tensor. """ if self.im is not None: return self.im not_tensor = not isinstance(im, torch.Tensor) if not_tensor: im = np.stack(self.pre_transform(im)) - im = im[..., ::-1].transpose((0, 3, 1, 2)) # BGR to RGB, BHWC to BCHW, (n, 3, h, w) - im = np.ascontiguousarray(im) # contiguous + im = im[..., ::-1].transpose((0, 3, 1, 2)) + im = np.ascontiguousarray(im) im = torch.from_numpy(im) im = im.to(self.device) - im = im.half() if self.model.fp16 else im.float() # uint8 to fp16/32 + im = im.half() if self.model.fp16 else im.float() if not_tensor: im = (im - self.mean) / self.std return im def pre_transform(self, im): """ - Pre-transform input image before inference. + Perform initial transformations on the input image for preprocessing. + + The method applies transformations such as resizing to prepare the image for further preprocessing. + Currently, batched inference is not supported; hence the list length should be 1. Args: - im (List(np.ndarray)): (N, 3, h, w) for tensor, [(h, w, 3) x N] for list. + im (List[np.ndarray]): List containing images in HWC numpy array format. Returns: - (list): A list of transformed images. + (List[np.ndarray]): List of transformed images. """ - assert len(im) == 1, 'SAM model does not currently support batched inference' + assert len(im) == 1, "SAM model does not currently support batched inference" letterbox = LetterBox(self.args.imgsz, auto=False, center=False) return [letterbox(image=x) for x in im] def inference(self, im, bboxes=None, points=None, labels=None, masks=None, multimask_output=False, *args, **kwargs): """ - Predict masks for the given input prompts, using the currently set image. + Perform image segmentation inference based on the given input cues, using the currently loaded image. This + method leverages SAM's (Segment Anything Model) architecture consisting of image encoder, prompt encoder, and + mask decoder for real-time and promptable segmentation tasks. Args: - im (torch.Tensor): The preprocessed image, (N, C, H, W). - bboxes (np.ndarray | List, None): (N, 4), in XYXY format. - points (np.ndarray | List, None): (N, 2), Each point is in (X,Y) in pixels. - labels (np.ndarray | List, None): (N, ), labels for the point prompts. - 1 indicates a foreground point and 0 indicates a background point. - masks (np.ndarray, None): A low resolution mask input to the model, typically - coming from a previous prediction iteration. Has form (N, H, W), where - for SAM, H=W=256. - multimask_output (bool): If true, the model will return three masks. - For ambiguous input prompts (such as a single click), this will often - produce better masks than a single prediction. If only a single - mask is needed, the model's predicted quality score can be used - to select the best mask. For non-ambiguous prompts, such as multiple - input prompts, multimask_output=False can give better results. + im (torch.Tensor): The preprocessed input image in tensor format, with shape (N, C, H, W). + bboxes (np.ndarray | List, optional): Bounding boxes with shape (N, 4), in XYXY format. + points (np.ndarray | List, optional): Points indicating object locations with shape (N, 2), in pixel coordinates. + labels (np.ndarray | List, optional): Labels for point prompts, shape (N, ). 1 for foreground and 0 for background. + masks (np.ndarray, optional): Low-resolution masks from previous predictions. Shape should be (N, H, W). For SAM, H=W=256. + multimask_output (bool, optional): Flag to return multiple masks. Helpful for ambiguous prompts. Defaults to False. Returns: - (np.ndarray): The output masks in CxHxW format, where C is the - number of masks, and (H, W) is the original image size. - (np.ndarray): An array of length C containing the model's - predictions for the quality of each mask. - (np.ndarray): An array of shape CxHxW, where C is the number - of masks and H=W=256. These low resolution logits can be passed to - a subsequent iteration as mask input. + (tuple): Contains the following three elements. + - np.ndarray: The output masks in shape CxHxW, where C is the number of generated masks. + - np.ndarray: An array of length C containing quality scores predicted by the model for each mask. + - np.ndarray: Low-resolution logits of shape CxHxW for subsequent inference, where H=W=256. """ - # Get prompts from self.prompts first - bboxes = self.prompts.pop('bboxes', bboxes) - points = self.prompts.pop('points', points) - masks = self.prompts.pop('masks', masks) + # Override prompts if any stored in self.prompts + bboxes = self.prompts.pop("bboxes", bboxes) + points = self.prompts.pop("points", points) + masks = self.prompts.pop("masks", masks) + if all(i is None for i in [bboxes, points, masks]): return self.generate(im, *args, **kwargs) + return self.prompt_inference(im, bboxes, points, labels, masks, multimask_output) def prompt_inference(self, im, bboxes=None, points=None, labels=None, masks=None, multimask_output=False): """ - Predict masks for the given input prompts, using the currently set image. + Internal function for image segmentation inference based on cues like bounding boxes, points, and masks. + Leverages SAM's specialized architecture for prompt-based, real-time segmentation. Args: - im (torch.Tensor): The preprocessed image, (N, C, H, W). - bboxes (np.ndarray | List, None): (N, 4), in XYXY format. - points (np.ndarray | List, None): (N, 2), Each point is in (X,Y) in pixels. - labels (np.ndarray | List, None): (N, ), labels for the point prompts. - 1 indicates a foreground point and 0 indicates a background point. - masks (np.ndarray, None): A low resolution mask input to the model, typically - coming from a previous prediction iteration. Has form (N, H, W), where - for SAM, H=W=256. - multimask_output (bool): If true, the model will return three masks. - For ambiguous input prompts (such as a single click), this will often - produce better masks than a single prediction. If only a single - mask is needed, the model's predicted quality score can be used - to select the best mask. For non-ambiguous prompts, such as multiple - input prompts, multimask_output=False can give better results. + im (torch.Tensor): The preprocessed input image in tensor format, with shape (N, C, H, W). + bboxes (np.ndarray | List, optional): Bounding boxes with shape (N, 4), in XYXY format. + points (np.ndarray | List, optional): Points indicating object locations with shape (N, 2), in pixel coordinates. + labels (np.ndarray | List, optional): Labels for point prompts, shape (N, ). 1 for foreground and 0 for background. + masks (np.ndarray, optional): Low-resolution masks from previous predictions. Shape should be (N, H, W). For SAM, H=W=256. + multimask_output (bool, optional): Flag to return multiple masks. Helpful for ambiguous prompts. Defaults to False. Returns: - (np.ndarray): The output masks in CxHxW format, where C is the - number of masks, and (H, W) is the original image size. - (np.ndarray): An array of length C containing the model's - predictions for the quality of each mask. - (np.ndarray): An array of shape CxHxW, where C is the number - of masks and H=W=256. These low resolution logits can be passed to - a subsequent iteration as mask input. + (tuple): Contains the following three elements. + - np.ndarray: The output masks in shape CxHxW, where C is the number of generated masks. + - np.ndarray: An array of length C containing quality scores predicted by the model for each mask. + - np.ndarray: Low-resolution logits of shape CxHxW for subsequent inference, where H=W=256. """ features = self.model.image_encoder(im) if self.features is None else self.features @@ -158,11 +192,7 @@ class Predictor(BasePredictor): points = (points, labels) if points is not None else None # Embed prompts - sparse_embeddings, dense_embeddings = self.model.prompt_encoder( - points=points, - boxes=bboxes, - masks=masks, - ) + sparse_embeddings, dense_embeddings = self.model.prompt_encoder(points=points, boxes=bboxes, masks=masks) # Predict masks pred_masks, pred_scores = self.model.mask_decoder( @@ -177,58 +207,50 @@ class Predictor(BasePredictor): # `d` could be 1 or 3 depends on `multimask_output`. return pred_masks.flatten(0, 1), pred_scores.flatten(0, 1) - def generate(self, - im, - crop_n_layers=0, - crop_overlap_ratio=512 / 1500, - crop_downscale_factor=1, - point_grids=None, - points_stride=32, - points_batch_size=64, - conf_thres=0.88, - stability_score_thresh=0.95, - stability_score_offset=0.95, - crop_nms_thresh=0.7): - """Segment the whole image. + def generate( + self, + im, + crop_n_layers=0, + crop_overlap_ratio=512 / 1500, + crop_downscale_factor=1, + point_grids=None, + points_stride=32, + points_batch_size=64, + conf_thres=0.88, + stability_score_thresh=0.95, + stability_score_offset=0.95, + crop_nms_thresh=0.7, + ): + """ + Perform image segmentation using the Segment Anything Model (SAM). + + This function segments an entire image into constituent parts by leveraging SAM's advanced architecture + and real-time performance capabilities. It can optionally work on image crops for finer segmentation. Args: - im (torch.Tensor): The preprocessed image, (N, C, H, W). - crop_n_layers (int): If >0, mask prediction will be run again on - crops of the image. Sets the number of layers to run, where each - layer has 2**i_layer number of image crops. - crop_overlap_ratio (float): Sets the degree to which crops overlap. - In the first crop layer, crops will overlap by this fraction of - the image length. Later layers with more crops scale down this overlap. - crop_downscale_factor (int): The number of points-per-side - sampled in layer n is scaled down by crop_n_points_downscale_factor**n. - point_grids (list(np.ndarray), None): A list over explicit grids - of points used for sampling, normalized to [0,1]. The nth grid in the - list is used in the nth crop layer. Exclusive with points_per_side. - points_stride (int, None): The number of points to be sampled - along one side of the image. The total number of points is - points_per_side**2. If None, 'point_grids' must provide explicit - point sampling. - points_batch_size (int): Sets the number of points run simultaneously - by the model. Higher numbers may be faster but use more GPU memory. - conf_thres (float): A filtering threshold in [0,1], using the - model's predicted mask quality. - stability_score_thresh (float): A filtering threshold in [0,1], using - the stability of the mask under changes to the cutoff used to binarize - the model's mask predictions. - stability_score_offset (float): The amount to shift the cutoff when - calculated the stability score. - crop_nms_thresh (float): The box IoU cutoff used by non-maximal - suppression to filter duplicate masks between different crops. + im (torch.Tensor): Input tensor representing the preprocessed image with dimensions (N, C, H, W). + crop_n_layers (int): Specifies the number of layers for additional mask predictions on image crops. + Each layer produces 2**i_layer number of image crops. + crop_overlap_ratio (float): Determines the extent of overlap between crops. Scaled down in subsequent layers. + crop_downscale_factor (int): Scaling factor for the number of sampled points-per-side in each layer. + point_grids (list[np.ndarray], optional): Custom grids for point sampling normalized to [0,1]. + Used in the nth crop layer. + points_stride (int, optional): Number of points to sample along each side of the image. + Exclusive with 'point_grids'. + points_batch_size (int): Batch size for the number of points processed simultaneously. + conf_thres (float): Confidence threshold [0,1] for filtering based on the model's mask quality prediction. + stability_score_thresh (float): Stability threshold [0,1] for mask filtering based on mask stability. + stability_score_offset (float): Offset value for calculating stability score. + crop_nms_thresh (float): IoU cutoff for Non-Maximum Suppression (NMS) to remove duplicate masks between crops. + + Returns: + (tuple): A tuple containing segmented masks, confidence scores, and bounding boxes. """ self.segment_all = True ih, iw = im.shape[2:] crop_regions, layer_idxs = generate_crop_boxes((ih, iw), crop_n_layers, crop_overlap_ratio) if point_grids is None: - point_grids = build_all_layer_point_grids( - points_stride, - crop_n_layers, - crop_downscale_factor, - ) + point_grids = build_all_layer_point_grids(points_stride, crop_n_layers, crop_downscale_factor) pred_masks, pred_scores, pred_bboxes, region_areas = [], [], [], [] for crop_region, layer_idx in zip(crop_regions, layer_idxs): x1, y1, x2, y2 = crop_region @@ -236,19 +258,20 @@ class Predictor(BasePredictor): area = torch.tensor(w * h, device=im.device) points_scale = np.array([[w, h]]) # w, h # Crop image and interpolate to input size - crop_im = F.interpolate(im[..., y1:y2, x1:x2], (ih, iw), mode='bilinear', align_corners=False) + crop_im = F.interpolate(im[..., y1:y2, x1:x2], (ih, iw), mode="bilinear", align_corners=False) # (num_points, 2) points_for_image = point_grids[layer_idx] * points_scale crop_masks, crop_scores, crop_bboxes = [], [], [] - for (points, ) in batch_iterator(points_batch_size, points_for_image): + for (points,) in batch_iterator(points_batch_size, points_for_image): pred_mask, pred_score = self.prompt_inference(crop_im, points=points, multimask_output=True) # Interpolate predicted masks to input size - pred_mask = F.interpolate(pred_mask[None], (h, w), mode='bilinear', align_corners=False)[0] + pred_mask = F.interpolate(pred_mask[None], (h, w), mode="bilinear", align_corners=False)[0] idx = pred_score > conf_thres pred_mask, pred_score = pred_mask[idx], pred_score[idx] - stability_score = calculate_stability_score(pred_mask, self.model.mask_threshold, - stability_score_offset) + stability_score = calculate_stability_score( + pred_mask, self.model.mask_threshold, stability_score_offset + ) idx = stability_score > stability_score_thresh pred_mask, pred_score = pred_mask[idx], pred_score[idx] # Bool type is much more memory-efficient. @@ -291,7 +314,22 @@ class Predictor(BasePredictor): return pred_masks, pred_scores, pred_bboxes def setup_model(self, model, verbose=True): - """Set up YOLO model with specified thresholds and device.""" + """ + Initializes the Segment Anything Model (SAM) for inference. + + This method sets up the SAM model by allocating it to the appropriate device and initializing the necessary + parameters for image normalization and other Ultralytics compatibility settings. + + Args: + model (torch.nn.Module): A pre-trained SAM model. If None, a model will be built based on configuration. + verbose (bool): If True, prints selected device information. + + Attributes: + model (torch.nn.Module): The SAM model allocated to the chosen device for inference. + device (torch.device): The device to which the model and tensors are allocated. + mean (torch.Tensor): The mean values for image normalization. + std (torch.Tensor): The standard deviation values for image normalization. + """ device = select_device(self.args.device, verbose=verbose) if model is None: model = build_sam(self.args.model) @@ -300,7 +338,8 @@ class Predictor(BasePredictor): self.device = device self.mean = torch.tensor([123.675, 116.28, 103.53]).view(-1, 1, 1).to(device) self.std = torch.tensor([58.395, 57.12, 57.375]).view(-1, 1, 1).to(device) - # TODO: Temporary settings for compatibility + + # Ultralytics compatibility settings self.model.pt = False self.model.triton = False self.model.stride = 32 @@ -308,7 +347,20 @@ class Predictor(BasePredictor): self.done_warmup = True def postprocess(self, preds, img, orig_imgs): - """Post-processes inference output predictions to create detection masks for objects.""" + """ + Post-processes SAM's inference outputs to generate object detection masks and bounding boxes. + + The method scales masks and boxes to the original image size and applies a threshold to the mask predictions. The + SAM model uses advanced architecture and promptable segmentation tasks to achieve real-time performance. + + Args: + preds (tuple): The output from SAM model inference, containing masks, scores, and optional bounding boxes. + img (torch.Tensor): The processed input image tensor. + orig_imgs (list | torch.Tensor): The original, unprocessed images. + + Returns: + (list): List of Results objects containing detection masks, bounding boxes, and other metadata. + """ # (N, 1, H, W), (N, 1) pred_masks, pred_scores = preds[:2] pred_bboxes = preds[2] if self.segment_all else None @@ -334,21 +386,36 @@ class Predictor(BasePredictor): return results def setup_source(self, source): - """Sets up source and inference mode.""" + """ + Sets up the data source for inference. + + This method configures the data source from which images will be fetched for inference. The source could be a + directory, a video file, or other types of image data sources. + + Args: + source (str | Path): The path to the image data source for inference. + """ if source is not None: super().setup_source(source) def set_image(self, image): - """Set image in advance. - Args: + """ + Preprocesses and sets a single image for inference. - image (str | np.ndarray): image file path or np.ndarray image by cv2. + This function sets up the model if not already initialized, configures the data source to the specified image, + and preprocesses the image for feature extraction. Only one image can be set at a time. + + Args: + image (str | np.ndarray): Image file path as a string, or a np.ndarray image read by cv2. + + Raises: + AssertionError: If more than one image is set. """ if self.model is None: model = build_sam(self.args.model) self.setup_model(model) self.setup_source(image) - assert len(self.dataset) == 1, '`set_image` only supports setting one image!' + assert len(self.dataset) == 1, "`set_image` only supports setting one image!" for batch in self.dataset: im = self.preprocess(batch[1]) self.features = self.model.image_encoder(im) @@ -360,23 +427,27 @@ class Predictor(BasePredictor): self.prompts = prompts def reset_image(self): + """Resets the image and its features to None.""" self.im = None self.features = None @staticmethod def remove_small_regions(masks, min_area=0, nms_thresh=0.7): """ - Removes small disconnected regions and holes in masks, then reruns - box NMS to remove any new duplicates. Requires open-cv as a dependency. + Perform post-processing on segmentation masks generated by the Segment Anything Model (SAM). Specifically, this + function removes small disconnected regions and holes from the input masks, and then performs Non-Maximum + Suppression (NMS) to eliminate any newly created duplicate boxes. Args: - masks (torch.Tensor): Masks, (N, H, W). - min_area (int): Minimum area threshold. - nms_thresh (float): NMS threshold. + masks (torch.Tensor): A tensor containing the masks to be processed. Shape should be (N, H, W), where N is + the number of masks, H is height, and W is width. + min_area (int): The minimum area below which disconnected regions and holes will be removed. Defaults to 0. + nms_thresh (float): The IoU threshold for the NMS algorithm. Defaults to 0.7. + Returns: - new_masks (torch.Tensor): New Masks, (N, H, W). - keep (List[int]): The indices of the new masks, which can be used to filter - the corresponding boxes. + (tuple([torch.Tensor, List[int]])): + - new_masks (torch.Tensor): The processed masks with small regions removed. Shape is (N, H, W). + - keep (List[int]): The indices of the remaining masks post-NMS, which can be used to filter the boxes. """ if len(masks) == 0: return masks @@ -386,23 +457,18 @@ class Predictor(BasePredictor): scores = [] for mask in masks: mask = mask.cpu().numpy().astype(np.uint8) - mask, changed = remove_small_regions(mask, min_area, mode='holes') + mask, changed = remove_small_regions(mask, min_area, mode="holes") unchanged = not changed - mask, changed = remove_small_regions(mask, min_area, mode='islands') + mask, changed = remove_small_regions(mask, min_area, mode="islands") unchanged = unchanged and not changed new_masks.append(torch.as_tensor(mask).unsqueeze(0)) - # Give score=0 to changed masks and score=1 to unchanged masks - # so NMS will prefer ones that didn't need postprocessing + # Give score=0 to changed masks and 1 to unchanged masks so NMS prefers masks not needing postprocessing scores.append(float(unchanged)) # Recalculate boxes and remove any new duplicates new_masks = torch.cat(new_masks, dim=0) boxes = batched_mask_to_box(new_masks) - keep = torchvision.ops.nms( - boxes.float(), - torch.as_tensor(scores), - nms_thresh, - ) + keep = torchvision.ops.nms(boxes.float(), torch.as_tensor(scores), nms_thresh) return new_masks[keep].to(device=masks.device, dtype=masks.dtype), keep diff --git a/ultralytics/models/utils/loss.py b/ultralytics/models/utils/loss.py index 95406e1..ac48775 100644 --- a/ultralytics/models/utils/loss.py +++ b/ultralytics/models/utils/loss.py @@ -6,20 +6,32 @@ import torch.nn.functional as F from ultralytics.utils.loss import FocalLoss, VarifocalLoss from ultralytics.utils.metrics import bbox_iou - from .ops import HungarianMatcher class DETRLoss(nn.Module): + """ + DETR (DEtection TRansformer) Loss class. This class calculates and returns the different loss components for the + DETR object detection model. It computes classification loss, bounding box loss, GIoU loss, and optionally auxiliary + losses. - def __init__(self, - nc=80, - loss_gain=None, - aux_loss=True, - use_fl=True, - use_vfl=False, - use_uni_match=False, - uni_match_ind=0): + Attributes: + nc (int): The number of classes. + loss_gain (dict): Coefficients for different loss components. + aux_loss (bool): Whether to compute auxiliary losses. + use_fl (bool): Use FocalLoss or not. + use_vfl (bool): Use VarifocalLoss or not. + use_uni_match (bool): Whether to use a fixed layer to assign labels for the auxiliary branch. + uni_match_ind (int): The fixed indices of a layer to use if `use_uni_match` is True. + matcher (HungarianMatcher): Object to compute matching cost and indices. + fl (FocalLoss or None): Focal Loss object if `use_fl` is True, otherwise None. + vfl (VarifocalLoss or None): Varifocal Loss object if `use_vfl` is True, otherwise None. + device (torch.device): Device on which tensors are stored. + """ + + def __init__( + self, nc=80, loss_gain=None, aux_loss=True, use_fl=True, use_vfl=False, use_uni_match=False, uni_match_ind=0 + ): """ DETR loss function. @@ -34,9 +46,9 @@ class DETRLoss(nn.Module): super().__init__() if loss_gain is None: - loss_gain = {'class': 1, 'bbox': 5, 'giou': 2, 'no_object': 0.1, 'mask': 1, 'dice': 1} + loss_gain = {"class": 1, "bbox": 5, "giou": 2, "no_object": 0.1, "mask": 1, "dice": 1} self.nc = nc - self.matcher = HungarianMatcher(cost_gain={'class': 2, 'bbox': 5, 'giou': 2}) + self.matcher = HungarianMatcher(cost_gain={"class": 2, "bbox": 5, "giou": 2}) self.loss_gain = loss_gain self.aux_loss = aux_loss self.fl = FocalLoss() if use_fl else None @@ -46,9 +58,10 @@ class DETRLoss(nn.Module): self.uni_match_ind = uni_match_ind self.device = None - def _get_loss_class(self, pred_scores, targets, gt_scores, num_gts, postfix=''): - # logits: [b, query, num_classes], gt_class: list[[n, 1]] - name_class = f'loss_class{postfix}' + def _get_loss_class(self, pred_scores, targets, gt_scores, num_gts, postfix=""): + """Computes the classification loss based on predictions, target values, and ground truth scores.""" + # Logits: [b, query, num_classes], gt_class: list[[n, 1]] + name_class = f"loss_class{postfix}" bs, nq = pred_scores.shape[:2] # one_hot = F.one_hot(targets, self.nc + 1)[..., :-1] # (bs, num_queries, num_classes) one_hot = torch.zeros((bs, nq, self.nc + 1), dtype=torch.int64, device=targets.device) @@ -63,25 +76,28 @@ class DETRLoss(nn.Module): loss_cls = self.fl(pred_scores, one_hot.float()) loss_cls /= max(num_gts, 1) / nq else: - loss_cls = nn.BCEWithLogitsLoss(reduction='none')(pred_scores, gt_scores).mean(1).sum() # YOLO CLS loss + loss_cls = nn.BCEWithLogitsLoss(reduction="none")(pred_scores, gt_scores).mean(1).sum() # YOLO CLS loss - return {name_class: loss_cls.squeeze() * self.loss_gain['class']} + return {name_class: loss_cls.squeeze() * self.loss_gain["class"]} - def _get_loss_bbox(self, pred_bboxes, gt_bboxes, postfix=''): - # boxes: [b, query, 4], gt_bbox: list[[n, 4]] - name_bbox = f'loss_bbox{postfix}' - name_giou = f'loss_giou{postfix}' + def _get_loss_bbox(self, pred_bboxes, gt_bboxes, postfix=""): + """Calculates and returns the bounding box loss and GIoU loss for the predicted and ground truth bounding + boxes. + """ + # Boxes: [b, query, 4], gt_bbox: list[[n, 4]] + name_bbox = f"loss_bbox{postfix}" + name_giou = f"loss_giou{postfix}" loss = {} if len(gt_bboxes) == 0: - loss[name_bbox] = torch.tensor(0., device=self.device) - loss[name_giou] = torch.tensor(0., device=self.device) + loss[name_bbox] = torch.tensor(0.0, device=self.device) + loss[name_giou] = torch.tensor(0.0, device=self.device) return loss - loss[name_bbox] = self.loss_gain['bbox'] * F.l1_loss(pred_bboxes, gt_bboxes, reduction='sum') / len(gt_bboxes) + loss[name_bbox] = self.loss_gain["bbox"] * F.l1_loss(pred_bboxes, gt_bboxes, reduction="sum") / len(gt_bboxes) loss[name_giou] = 1.0 - bbox_iou(pred_bboxes, gt_bboxes, xywh=True, GIoU=True) loss[name_giou] = loss[name_giou].sum() / len(gt_bboxes) - loss[name_giou] = self.loss_gain['giou'] * loss[name_giou] + loss[name_giou] = self.loss_gain["giou"] * loss[name_giou] return {k: v.squeeze() for k, v in loss.items()} # This function is for future RT-DETR Segment models @@ -115,50 +131,57 @@ class DETRLoss(nn.Module): # loss = 1 - (numerator + 1) / (denominator + 1) # return loss.sum() / num_gts - def _get_loss_aux(self, - pred_bboxes, - pred_scores, - gt_bboxes, - gt_cls, - gt_groups, - match_indices=None, - postfix='', - masks=None, - gt_mask=None): - """Get auxiliary losses""" + def _get_loss_aux( + self, + pred_bboxes, + pred_scores, + gt_bboxes, + gt_cls, + gt_groups, + match_indices=None, + postfix="", + masks=None, + gt_mask=None, + ): + """Get auxiliary losses.""" # NOTE: loss class, bbox, giou, mask, dice loss = torch.zeros(5 if masks is not None else 3, device=pred_bboxes.device) if match_indices is None and self.use_uni_match: - match_indices = self.matcher(pred_bboxes[self.uni_match_ind], - pred_scores[self.uni_match_ind], - gt_bboxes, - gt_cls, - gt_groups, - masks=masks[self.uni_match_ind] if masks is not None else None, - gt_mask=gt_mask) + match_indices = self.matcher( + pred_bboxes[self.uni_match_ind], + pred_scores[self.uni_match_ind], + gt_bboxes, + gt_cls, + gt_groups, + masks=masks[self.uni_match_ind] if masks is not None else None, + gt_mask=gt_mask, + ) for i, (aux_bboxes, aux_scores) in enumerate(zip(pred_bboxes, pred_scores)): aux_masks = masks[i] if masks is not None else None - loss_ = self._get_loss(aux_bboxes, - aux_scores, - gt_bboxes, - gt_cls, - gt_groups, - masks=aux_masks, - gt_mask=gt_mask, - postfix=postfix, - match_indices=match_indices) - loss[0] += loss_[f'loss_class{postfix}'] - loss[1] += loss_[f'loss_bbox{postfix}'] - loss[2] += loss_[f'loss_giou{postfix}'] + loss_ = self._get_loss( + aux_bboxes, + aux_scores, + gt_bboxes, + gt_cls, + gt_groups, + masks=aux_masks, + gt_mask=gt_mask, + postfix=postfix, + match_indices=match_indices, + ) + loss[0] += loss_[f"loss_class{postfix}"] + loss[1] += loss_[f"loss_bbox{postfix}"] + loss[2] += loss_[f"loss_giou{postfix}"] # if masks is not None and gt_mask is not None: # loss_ = self._get_loss_mask(aux_masks, gt_mask, match_indices, postfix) # loss[3] += loss_[f'loss_mask{postfix}'] # loss[4] += loss_[f'loss_dice{postfix}'] loss = { - f'loss_class_aux{postfix}': loss[0], - f'loss_bbox_aux{postfix}': loss[1], - f'loss_giou_aux{postfix}': loss[2]} + f"loss_class_aux{postfix}": loss[0], + f"loss_bbox_aux{postfix}": loss[1], + f"loss_giou_aux{postfix}": loss[2], + } # if masks is not None and gt_mask is not None: # loss[f'loss_mask_aux{postfix}'] = loss[3] # loss[f'loss_dice_aux{postfix}'] = loss[4] @@ -166,39 +189,45 @@ class DETRLoss(nn.Module): @staticmethod def _get_index(match_indices): + """Returns batch indices, source indices, and destination indices from provided match indices.""" batch_idx = torch.cat([torch.full_like(src, i) for i, (src, _) in enumerate(match_indices)]) src_idx = torch.cat([src for (src, _) in match_indices]) dst_idx = torch.cat([dst for (_, dst) in match_indices]) return (batch_idx, src_idx), dst_idx def _get_assigned_bboxes(self, pred_bboxes, gt_bboxes, match_indices): - pred_assigned = torch.cat([ - t[I] if len(I) > 0 else torch.zeros(0, t.shape[-1], device=self.device) - for t, (I, _) in zip(pred_bboxes, match_indices)]) - gt_assigned = torch.cat([ - t[J] if len(J) > 0 else torch.zeros(0, t.shape[-1], device=self.device) - for t, (_, J) in zip(gt_bboxes, match_indices)]) + """Assigns predicted bounding boxes to ground truth bounding boxes based on the match indices.""" + pred_assigned = torch.cat( + [ + t[i] if len(i) > 0 else torch.zeros(0, t.shape[-1], device=self.device) + for t, (i, _) in zip(pred_bboxes, match_indices) + ] + ) + gt_assigned = torch.cat( + [ + t[j] if len(j) > 0 else torch.zeros(0, t.shape[-1], device=self.device) + for t, (_, j) in zip(gt_bboxes, match_indices) + ] + ) return pred_assigned, gt_assigned - def _get_loss(self, - pred_bboxes, - pred_scores, - gt_bboxes, - gt_cls, - gt_groups, - masks=None, - gt_mask=None, - postfix='', - match_indices=None): - """Get losses""" + def _get_loss( + self, + pred_bboxes, + pred_scores, + gt_bboxes, + gt_cls, + gt_groups, + masks=None, + gt_mask=None, + postfix="", + match_indices=None, + ): + """Get losses.""" if match_indices is None: - match_indices = self.matcher(pred_bboxes, - pred_scores, - gt_bboxes, - gt_cls, - gt_groups, - masks=masks, - gt_mask=gt_mask) + match_indices = self.matcher( + pred_bboxes, pred_scores, gt_bboxes, gt_cls, gt_groups, masks=masks, gt_mask=gt_mask + ) idx, gt_idx = self._get_index(match_indices) pred_bboxes, gt_bboxes = pred_bboxes[idx], gt_bboxes[gt_idx] @@ -218,7 +247,7 @@ class DETRLoss(nn.Module): # loss.update(self._get_loss_mask(masks, gt_mask, match_indices, postfix)) return loss - def forward(self, pred_bboxes, pred_scores, batch, postfix='', **kwargs): + def forward(self, pred_bboxes, pred_scores, batch, postfix="", **kwargs): """ Args: pred_bboxes (torch.Tensor): [l, b, query, 4] @@ -230,43 +259,62 @@ class DETRLoss(nn.Module): postfix (str): postfix of loss name. """ self.device = pred_bboxes.device - match_indices = kwargs.get('match_indices', None) - gt_cls, gt_bboxes, gt_groups = batch['cls'], batch['bboxes'], batch['gt_groups'] + match_indices = kwargs.get("match_indices", None) + gt_cls, gt_bboxes, gt_groups = batch["cls"], batch["bboxes"], batch["gt_groups"] - total_loss = self._get_loss(pred_bboxes[-1], - pred_scores[-1], - gt_bboxes, - gt_cls, - gt_groups, - postfix=postfix, - match_indices=match_indices) + total_loss = self._get_loss( + pred_bboxes[-1], pred_scores[-1], gt_bboxes, gt_cls, gt_groups, postfix=postfix, match_indices=match_indices + ) if self.aux_loss: total_loss.update( - self._get_loss_aux(pred_bboxes[:-1], pred_scores[:-1], gt_bboxes, gt_cls, gt_groups, match_indices, - postfix)) + self._get_loss_aux( + pred_bboxes[:-1], pred_scores[:-1], gt_bboxes, gt_cls, gt_groups, match_indices, postfix + ) + ) return total_loss class RTDETRDetectionLoss(DETRLoss): + """ + Real-Time DeepTracker (RT-DETR) Detection Loss class that extends the DETRLoss. + + This class computes the detection loss for the RT-DETR model, which includes the standard detection loss as well as + an additional denoising training loss when provided with denoising metadata. + """ def forward(self, preds, batch, dn_bboxes=None, dn_scores=None, dn_meta=None): + """ + Forward pass to compute the detection loss. + + Args: + preds (tuple): Predicted bounding boxes and scores. + batch (dict): Batch data containing ground truth information. + dn_bboxes (torch.Tensor, optional): Denoising bounding boxes. Default is None. + dn_scores (torch.Tensor, optional): Denoising scores. Default is None. + dn_meta (dict, optional): Metadata for denoising. Default is None. + + Returns: + (dict): Dictionary containing the total loss and, if applicable, the denoising loss. + """ pred_bboxes, pred_scores = preds total_loss = super().forward(pred_bboxes, pred_scores, batch) + # Check for denoising metadata to compute denoising training loss if dn_meta is not None: - dn_pos_idx, dn_num_group = dn_meta['dn_pos_idx'], dn_meta['dn_num_group'] - assert len(batch['gt_groups']) == len(dn_pos_idx) + dn_pos_idx, dn_num_group = dn_meta["dn_pos_idx"], dn_meta["dn_num_group"] + assert len(batch["gt_groups"]) == len(dn_pos_idx) - # Denoising match indices - match_indices = self.get_dn_match_indices(dn_pos_idx, dn_num_group, batch['gt_groups']) + # Get the match indices for denoising + match_indices = self.get_dn_match_indices(dn_pos_idx, dn_num_group, batch["gt_groups"]) - # Compute denoising training loss - dn_loss = super().forward(dn_bboxes, dn_scores, batch, postfix='_dn', match_indices=match_indices) + # Compute the denoising training loss + dn_loss = super().forward(dn_bboxes, dn_scores, batch, postfix="_dn", match_indices=match_indices) total_loss.update(dn_loss) else: - total_loss.update({f'{k}_dn': torch.tensor(0., device=self.device) for k in total_loss.keys()}) + # If no denoising metadata is provided, set denoising loss to zero + total_loss.update({f"{k}_dn": torch.tensor(0.0, device=self.device) for k in total_loss.keys()}) return total_loss @@ -276,12 +324,12 @@ class RTDETRDetectionLoss(DETRLoss): Get the match indices for denoising. Args: - dn_pos_idx (List[torch.Tensor]): A list includes positive indices of denoising. - dn_num_group (int): The number of groups of denoising. - gt_groups (List(int)): a list of batch size length includes the number of gts of each image. + dn_pos_idx (List[torch.Tensor]): List of tensors containing positive indices for denoising. + dn_num_group (int): Number of denoising groups. + gt_groups (List[int]): List of integers representing the number of ground truths for each image. Returns: - dn_match_indices (List(tuple)): Matched indices. + (List[tuple]): List of tuples containing matched indices for denoising. """ dn_match_indices = [] idx_groups = torch.as_tensor([0, *gt_groups[:-1]]).cumsum_(0) @@ -289,8 +337,8 @@ class RTDETRDetectionLoss(DETRLoss): if num_gt > 0: gt_idx = torch.arange(end=num_gt, dtype=torch.long) + idx_groups[i] gt_idx = gt_idx.repeat(dn_num_group) - assert len(dn_pos_idx[i]) == len(gt_idx), 'Expected the same length, ' - f'but got {len(dn_pos_idx[i])} and {len(gt_idx)} respectively.' + assert len(dn_pos_idx[i]) == len(gt_idx), "Expected the same length, " + f"but got {len(dn_pos_idx[i])} and {len(gt_idx)} respectively." dn_match_indices.append((dn_pos_idx[i], gt_idx)) else: dn_match_indices.append((torch.zeros([0], dtype=torch.long), torch.zeros([0], dtype=torch.long))) diff --git a/ultralytics/models/utils/ops.py b/ultralytics/models/utils/ops.py index eb1ebfb..4f66fee 100644 --- a/ultralytics/models/utils/ops.py +++ b/ultralytics/models/utils/ops.py @@ -11,8 +11,8 @@ from ultralytics.utils.ops import xywh2xyxy, xyxy2xywh class HungarianMatcher(nn.Module): """ - A module implementing the HungarianMatcher, which is a differentiable module to solve the assignment problem in - an end-to-end fashion. + A module implementing the HungarianMatcher, which is a differentiable module to solve the assignment problem in an + end-to-end fashion. HungarianMatcher performs optimal assignment over the predicted and ground truth bounding boxes using a cost function that considers classification scores, bounding box coordinates, and optionally, mask predictions. @@ -32,9 +32,12 @@ class HungarianMatcher(nn.Module): """ def __init__(self, cost_gain=None, use_fl=True, with_mask=False, num_sample_points=12544, alpha=0.25, gamma=2.0): + """Initializes HungarianMatcher with cost coefficients, Focal Loss, mask prediction, sample points, and alpha + gamma factors. + """ super().__init__() if cost_gain is None: - cost_gain = {'class': 1, 'bbox': 5, 'giou': 2, 'mask': 1, 'dice': 1} + cost_gain = {"class": 1, "bbox": 5, "giou": 2, "mask": 1, "dice": 1} self.cost_gain = cost_gain self.use_fl = use_fl self.with_mask = with_mask @@ -45,8 +48,8 @@ class HungarianMatcher(nn.Module): def forward(self, pred_bboxes, pred_scores, gt_bboxes, gt_cls, gt_groups, masks=None, gt_mask=None): """ Forward pass for HungarianMatcher. This function computes costs based on prediction and ground truth - (classification cost, L1 cost between boxes and GIoU cost between boxes) and finds the optimal matching - between predictions and ground truth based on these costs. + (classification cost, L1 cost between boxes and GIoU cost between boxes) and finds the optimal matching between + predictions and ground truth based on these costs. Args: pred_bboxes (Tensor): Predicted bounding boxes with shape [batch_size, num_queries, 4]. @@ -83,7 +86,7 @@ class HungarianMatcher(nn.Module): # Compute the classification cost pred_scores = pred_scores[:, gt_cls] if self.use_fl: - neg_cost_class = (1 - self.alpha) * (pred_scores ** self.gamma) * (-(1 - pred_scores + 1e-8).log()) + neg_cost_class = (1 - self.alpha) * (pred_scores**self.gamma) * (-(1 - pred_scores + 1e-8).log()) pos_cost_class = self.alpha * ((1 - pred_scores) ** self.gamma) * (-(pred_scores + 1e-8).log()) cost_class = pos_cost_class - neg_cost_class else: @@ -96,19 +99,25 @@ class HungarianMatcher(nn.Module): cost_giou = 1.0 - bbox_iou(pred_bboxes.unsqueeze(1), gt_bboxes.unsqueeze(0), xywh=True, GIoU=True).squeeze(-1) # Final cost matrix - C = self.cost_gain['class'] * cost_class + \ - self.cost_gain['bbox'] * cost_bbox + \ - self.cost_gain['giou'] * cost_giou + C = ( + self.cost_gain["class"] * cost_class + + self.cost_gain["bbox"] * cost_bbox + + self.cost_gain["giou"] * cost_giou + ) # Compute the mask cost and dice cost if self.with_mask: C += self._cost_mask(bs, gt_groups, masks, gt_mask) + # Set invalid values (NaNs and infinities) to 0 (fixes ValueError: matrix contains invalid numeric entries) + C[C.isnan() | C.isinf()] = 0.0 + C = C.view(bs, nq, -1).cpu() indices = [linear_sum_assignment(c[i]) for i, c in enumerate(C.split(gt_groups, -1))] - gt_groups = torch.as_tensor([0, *gt_groups[:-1]]).cumsum_(0) - # (idx for queries, idx for gt) - return [(torch.tensor(i, dtype=torch.long), torch.tensor(j, dtype=torch.long) + gt_groups[k]) - for k, (i, j) in enumerate(indices)] + gt_groups = torch.as_tensor([0, *gt_groups[:-1]]).cumsum_(0) # (idx for queries, idx for gt) + return [ + (torch.tensor(i, dtype=torch.long), torch.tensor(j, dtype=torch.long) + gt_groups[k]) + for k, (i, j) in enumerate(indices) + ] # This function is for future RT-DETR Segment models # def _cost_mask(self, bs, num_gts, masks=None, gt_mask=None): @@ -141,18 +150,13 @@ class HungarianMatcher(nn.Module): # return C -def get_cdn_group(batch, - num_classes, - num_queries, - class_embed, - num_dn=100, - cls_noise_ratio=0.5, - box_noise_scale=1.0, - training=False): +def get_cdn_group( + batch, num_classes, num_queries, class_embed, num_dn=100, cls_noise_ratio=0.5, box_noise_scale=1.0, training=False +): """ - Get contrastive denoising training group. This function creates a contrastive denoising training group with - positive and negative samples from the ground truths (gt). It applies noise to the class labels and bounding - box coordinates, and returns the modified labels, bounding boxes, attention mask and meta information. + Get contrastive denoising training group. This function creates a contrastive denoising training group with positive + and negative samples from the ground truths (gt). It applies noise to the class labels and bounding box coordinates, + and returns the modified labels, bounding boxes, attention mask and meta information. Args: batch (dict): A dict that includes 'gt_cls' (torch.Tensor with shape [num_gts, ]), 'gt_bboxes' @@ -174,7 +178,7 @@ def get_cdn_group(batch, if (not training) or num_dn <= 0: return None, None, None, None - gt_groups = batch['gt_groups'] + gt_groups = batch["gt_groups"] total_num = sum(gt_groups) max_nums = max(gt_groups) if max_nums == 0: @@ -182,26 +186,26 @@ def get_cdn_group(batch, num_group = num_dn // max_nums num_group = 1 if num_group == 0 else num_group - # pad gt to max_num of a batch + # Pad gt to max_num of a batch bs = len(gt_groups) - gt_cls = batch['cls'] # (bs*num, ) - gt_bbox = batch['bboxes'] # bs*num, 4 - b_idx = batch['batch_idx'] + gt_cls = batch["cls"] # (bs*num, ) + gt_bbox = batch["bboxes"] # bs*num, 4 + b_idx = batch["batch_idx"] - # each group has positive and negative queries. + # Each group has positive and negative queries. dn_cls = gt_cls.repeat(2 * num_group) # (2*num_group*bs*num, ) dn_bbox = gt_bbox.repeat(2 * num_group, 1) # 2*num_group*bs*num, 4 dn_b_idx = b_idx.repeat(2 * num_group).view(-1) # (2*num_group*bs*num, ) - # positive and negative mask + # Positive and negative mask # (bs*num*num_group, ), the second total_num*num_group part as negative samples neg_idx = torch.arange(total_num * num_group, dtype=torch.long, device=gt_bbox.device) + num_group * total_num if cls_noise_ratio > 0: - # half of bbox prob + # Half of bbox prob mask = torch.rand(dn_cls.shape) < (cls_noise_ratio * 0.5) idx = torch.nonzero(mask).squeeze(-1) - # randomly put a new one here + # Randomly put a new one here new_label = torch.randint_like(idx, 0, num_classes, dtype=dn_cls.dtype, device=dn_cls.device) dn_cls[idx] = new_label @@ -217,10 +221,9 @@ def get_cdn_group(batch, known_bbox += rand_part * diff known_bbox.clip_(min=0.0, max=1.0) dn_bbox = xyxy2xywh(known_bbox) - dn_bbox = inverse_sigmoid(dn_bbox) + dn_bbox = torch.logit(dn_bbox, eps=1e-6) # inverse sigmoid - # total denoising queries - num_dn = int(max_nums * 2 * num_group) + num_dn = int(max_nums * 2 * num_group) # total denoising queries # class_embed = torch.cat([class_embed, torch.zeros([1, class_embed.shape[-1]], device=class_embed.device)]) dn_cls_embed = class_embed[dn_cls] # bs*num * 2 * num_group, 256 padding_cls = torch.zeros(bs, num_dn, dn_cls_embed.shape[-1], device=gt_cls.device) @@ -235,27 +238,26 @@ def get_cdn_group(batch, tgt_size = num_dn + num_queries attn_mask = torch.zeros([tgt_size, tgt_size], dtype=torch.bool) - # match query cannot see the reconstruct + # Match query cannot see the reconstruct attn_mask[num_dn:, :num_dn] = True - # reconstruct cannot see each other + # Reconstruct cannot see each other for i in range(num_group): if i == 0: - attn_mask[max_nums * 2 * i:max_nums * 2 * (i + 1), max_nums * 2 * (i + 1):num_dn] = True + attn_mask[max_nums * 2 * i : max_nums * 2 * (i + 1), max_nums * 2 * (i + 1) : num_dn] = True if i == num_group - 1: - attn_mask[max_nums * 2 * i:max_nums * 2 * (i + 1), :max_nums * i * 2] = True + attn_mask[max_nums * 2 * i : max_nums * 2 * (i + 1), : max_nums * i * 2] = True else: - attn_mask[max_nums * 2 * i:max_nums * 2 * (i + 1), max_nums * 2 * (i + 1):num_dn] = True - attn_mask[max_nums * 2 * i:max_nums * 2 * (i + 1), :max_nums * 2 * i] = True + attn_mask[max_nums * 2 * i : max_nums * 2 * (i + 1), max_nums * 2 * (i + 1) : num_dn] = True + attn_mask[max_nums * 2 * i : max_nums * 2 * (i + 1), : max_nums * 2 * i] = True dn_meta = { - 'dn_pos_idx': [p.reshape(-1) for p in pos_idx.cpu().split(list(gt_groups), dim=1)], - 'dn_num_group': num_group, - 'dn_num_split': [num_dn, num_queries]} + "dn_pos_idx": [p.reshape(-1) for p in pos_idx.cpu().split(list(gt_groups), dim=1)], + "dn_num_group": num_group, + "dn_num_split": [num_dn, num_queries], + } - return padding_cls.to(class_embed.device), padding_bbox.to(class_embed.device), attn_mask.to( - class_embed.device), dn_meta - - -def inverse_sigmoid(x, eps=1e-6): - """Inverse sigmoid function.""" - x = x.clip(min=0., max=1.) - return torch.log(x / (1 - x + eps) + eps) + return ( + padding_cls.to(class_embed.device), + padding_bbox.to(class_embed.device), + attn_mask.to(class_embed.device), + dn_meta, + ) diff --git a/ultralytics/models/yolo/__init__.py b/ultralytics/models/yolo/__init__.py index c66e376..7b1a597 100644 --- a/ultralytics/models/yolo/__init__.py +++ b/ultralytics/models/yolo/__init__.py @@ -1,7 +1,7 @@ # Ultralytics YOLO 🚀, AGPL-3.0 license -from ultralytics.models.yolo import classify, detect, pose, segment +from ultralytics.models.yolo import classify, detect, obb, pose, segment -from .model import YOLO +from .model import YOLO, YOLOWorld -__all__ = 'classify', 'segment', 'detect', 'pose', 'YOLO' +__all__ = "classify", "segment", "detect", "pose", "obb", "YOLO", "YOLOWorld" diff --git a/ultralytics/models/yolo/__pycache__/__init__.cpython-312.pyc b/ultralytics/models/yolo/__pycache__/__init__.cpython-312.pyc index 6546cfcaffee8e3757e30ebcc7cd4d793ba99f76..fe407d23b17c934107526559207cf9ce330c556c 100644 GIT binary patch delta 270 zcmaFLG>Mt_G%qg~0}xz@f0X`wBClkG36L|LA%!7@F^3_SF^VylDT*nVIf^-#C5k1N zHHwvyA%!W0IfpHmJ&HY-BZ>paXNlshWYuJQ2{K2M^%h5RPGWI!W?JPfwv^P8)Z~&| z%=t-46AMMHm~OE|`uq6b;slZ5`9(P?n(Rf)KwU-bAc6%%uz?6x5Wxu|K#ud%%F{M=Oi^1^iel+@IMywno?%KV)CGDCyOe2n@UfB7UG00M83R&Hw-a delta 259 zcmbQl{FI6JG%qg~0}#}?ElN8)kyo-_2gsSukiw9{n8T3E7{!>&6vdRw9L1c=62-#E zkiwM0oWq*S7R3f+vqZ61vTCxu1nJjgxy6y3lUQ7wnO1p=EhV)iHM!&#OF@2d>Mi!- z)b!lcyporUAa#tlSR(y>{54sNn1S3PHW0xAB3MBL2Z-;d$uaS)uGuZt-29Z(oFWdO z&@J}(_{5x?`1q9!pMhM4UpD#~`MIh3<%Q|`5X(x65|gtt^U^0^qq+}k(ZZ?0SKh3AEiH^$ScceGErNyK9@C$m60KZDTO(QEtfrtJ(nYj1ITBI z;!I%;X3%7N3DTm;dW$1DC$YFVGp+I#TS{t4YI4af=KQ3jTPy|n#i_U0i&N8cQ}aq* zGJ+H{-C~LK_wm2Q2_nPui*iyl*^8KfGDU14f*C}xf(RA{20n%&4iJ+QNcd@TOQ z2Uc~9H8(#cH3uPii#^$i5jO z`xWnFuXSFsiH!zeG*~yfu^5sX{)I~<&g3tojR|OlbDFHEI8fA7P4bzWKT*@Vo*m0( z(neOxvuU$&STU50+Yg*Sal)DjUeI*K5;tB{#&b&EsKbRWYgCo3BRpoXn9ivwRZGhz z|LOVjmok>RR$bGbydHr5ui5;jlSLSEQ7$BVDeYbkDM>{V=Z=F)uzPrd9^5xXxa^sGKwT>qm%G6MA`l|Vh(?OJj2&%f|rKM zyiRb0{EmA-3}~%V<{sDN+q}$``ALsGX zXqlvJ10rQ%GHkDKTUg<f?1B39kZ@T9LuUo zKAls@1uBIVCjB5 z7y6Rd6xit|Z>04brhi(YqhQSmse$5FCa)Vx&h+V1V`JG{W)NXaD_CDFC}uE~%Al{L zQf45PlCv2Dq=qTgplP*eWHUN##$5z`s-RF@l7*k1N=X6OSW(C52QW)puxe$e6~H6S za9+6y3w0w62Fo^}F__f@P>EK#^Y&2hgqBl!Z+<@BD=SJNuNb|x-`HDsd@uWPI-9cO zc%ewQ!A7MB{+76{Q6nC37pkA`kbi90eE;Nce>KZ5Hf+9cJScuytTc!Vm;P|=_t%~d zj8t}xyo~Nxxb)T0*^!sAZ42GcV!gA&UnCaWyQ=N|mG=JG;k)M+BOg56@kpANDv|Bq zhxdLL?OYi6>d4OkmVG3$Y3_6-+`h2?8{gCTHb^9t=2ry)MeX+r}sotMP|RWQ0~RAli^(c==&qL=BfLCl~7F@w%>$jAYP z$c`0FpOth?zizPvlP^H5HND#CsMJQ$MJZ-Gx)&<5c_j(CWGQC0tQ`J@t>)^93%0@( zlP$$NFq<3-WAC=f8F4pq$J{;8wP_FTvALe%*xNMM54t<87iqylhMuWEX-x2k|XO(K&RCo+90e$N=WJR4!ep1%JDX4yOYlC3nbj zh_v;*jCMRaGJm8R9jZi!AjUtsK7YL$J5h<9SkZYE6t*@lH-;Xqi-srB^dh zpc>?Sqeze8r;pT#LPk;5R0{5fWpuooW9)9|1`t`*0D>+;_0tUbjvsu(Hwve?C6V|V zUwO6(q1RrJmQiB5GDrqiJH=+5uMfk}J0x0=fh;o`9eI~V%lr&U`WcxHK!otqC~Q&c zM554Rak2N2XD9gr02N><`i~$m@(it2<~%?Q!Ntp*%sufuuAw+$xFnaXm8xa0y;kpc zX|mxYT$wMyXR}H3WW+wtZDEHRjVP~qLdv1!MWHB2zS6%#HL4}LY*iPH35&8$AFO>4 z7B)p7t`lr-JUgxAF8RK+@}OE;)k7TL?~5Nj}aqgwAA8W)F^u{4lDP+JpsTNbTbJQ>d49e1{8(NXfa zY}6Fcru9-wW(??M8sdav=)D;&qa9=lC!_!s;p1AS?w?5OX~Uov9@!4X8!)}&ib2nU zBzajVvgVoLqkV|t9n18$L8r9Iq1Fy3Jq6=>0xIC75N3jnbN))8_2JYv2cHI7p9cnj zGd8sVK`nh*S~&20!`_!2T~D?=?K@ME&OGZlTkS|xIud`r`K;se?C`y_b7xtF6!*SNY+aw%`n>S$A{1GOBv1?` zRz}72NAXon_hWSws=CC$LR9??*oomHuKNh8J7g(9BF!(iZpY~Pp6Kr9TN;@%AnJRsnR(5^0%xMEdt-`w!$2Y1^8uSyQy(HqO$q!Tz?NZIqa=IR{2 zK-WAa<(vZZYcPG<&X8-&tfjNNThUYWK>E-f*Se?}4rBT`@2;^bv1=r&8|dVcbVxid zs?Za)F%kZVQN}fzRdg{MSHx?bkXm#t-+A42Vv`dSaOX(m%?9vS>tU+srjIH)4c@ir ziI!zp?r8dJhnk_$bmqo5)u!^Yjvof1jpdmapi^pgJ+p4xU&54r4Jt4V@Xg4*&|Ih* z_^=ZA@Zrtpf!%*?Zk-J+HZ(tMfw$vgc=Nr)T;k!O=iyz8(au>O7e5cQf=z{+?hXBB zXi@3|AB82=uviI;FT%UPQ^RdmHs(rAt7Dhs!Cz%e(R@M*(&L~7J%QCHSPf%^F@>JQ z>KIn|Lny`*dKxNb`n9}^DURhi#OgbtVtKdwN4`P0!Y#G2yc?+H-6ao5KW^D5^jpan z!3k-T>Bo1wnjN+NGia@QfX9{mI1uVMi{2Ddqq(VmnvLtsNzEGkG2^g| z0y6R#f_<2Gn;YvfQ$|+RsS|)LPJudpODuiy=b3@Sa3~O%A&S?BuA<{hVadyJ+#4U^ qw*Hgs`JQb5f!wN+TR)JVW#4`-G`oM9K(k!ny1D-Q!W#k&+w(uQag7`R delta 702 zcmYjO&ubGw6n?WiGuhpkG}c5T^+(cz?P8j!pePAd5L*=6fM2i6{R9!Hei^K@@MjC8WJnN}bsz;)8wfn|VL>ec$Zq?-KDZv6u$j z9!X9u0brg`A3y%%yV?|W<{h2CiOut!@&kvU?39=$wY>1VN+_sX1~B74OzzlUYx%@-z;hw zZtMj*KN_M6>Q{-N@K5a1d&!4yfX{xB3HE2`2HU4m_KYMyDpZyk+-a%F9`y275~Y$j0rvbT26Dc74ZK`TEv#(1;$ z4cKSx0>#It(e@#>ujL3nc9o-Z(HzC;>*BH;&82YaayOey6g!Yqi#^^5b2??eqmv7P zQg>^0%k>-XD8GIW?idelRygszp%W~;5$;;hl_a8c6rl00bd(f(@&Fz5iIzECV?)W2 z$3os{W`VLFm^aLtCxwr#7gcJS(wlJaD diff --git a/ultralytics/models/yolo/__pycache__/model.cpython-39.pyc b/ultralytics/models/yolo/__pycache__/model.cpython-39.pyc index ffd6446e0704d36fa4c649a189f12dbf2e7c87e9..f489232cbe6122d040dda39c1490d5b2ff4eb33c 100644 GIT binary patch literal 3457 zcma)8S#KQ25uToVFN&ljORAV2yq^<(l71o;O-g1D-Ndr*#o%%W?nuj;PuufFP}(`j1pz4@oV z4nJJCtiNGp^>d-}5ft?$2w@49TLtSehPs{Gh0}9LcXGGzdLHR+?iWEXD8gP?GFi~R@t)7vQdwJ)j(-!JnM{-CNJgVs+Y zULB6W2`=Wo@B)4BzHFlF$2(QC1GQuBd~~o@ayFO+Lu-v`mVW_ba%g ztWxbE@oTajZtTHUxh zQIAbqSGq}9fNY11y#CooXy1TLKne9Poj z=>OJw`OdxFM>Ui`mr4II9M+?i)gF;A>JfTd>)r8*{2@$TgQDI5Vc8+O#6s4jU%S?v zX$L*)&iB{HW14@cH=ukBMg19swlo{rBZs6*(i7~tt9{UeQAn~uvPtbN(A%R9$u*J} zsC^yujnO8_izF{m`(@DI7+oQGmE;ewUD(e(0FX0%|I=j5kJ3aSP|m;uC!Uq5e48Hw zutcIOP?J*du}tw=D#_`hQq_%II(8GBO)-td6w_D?k3197jBIdX{5s-HplX6J?ifH~ zoa*o}dQ)P2Nenvqa}eh8dH#WW!i||L=bL{s-{z|w59b?}KAPM{lj{rinp@jkv@8uR z+m<%);^Z~7adDO0=4LLhHZColYhD@)(O87WT1c96BORd?P*d-Musoaj`1_X|{GZz2&wxI+6*&?kWAA{&WjN{6@{GuS+EBbs|wUn_@Zvvh-!DC$j7#AKkUqX^FvoLsNW-l^oIK2b1x*l3MZW-~!RWb;<^OP{|_Es@q&FdPfe`?xKb} zsaLwDwB$2RkBnXVL$bkbeQ(h&zn2aYpkdf{#kXHQ!tL(DJVkS4FKOR+2rMQ_6o;k_u7Z^7x-cngjam1r&N`G_Og24S3Gs^vYnz0w4V=FvpU`*_QbmcD9T&xS5jzKLh`ykbeob$i% zc9xg;wFjBf2)iiS<#%`vHC_!kykgT~C9_oVtef)RZ2{w`s@(eh5>_u6--9#5TQ@C0 zQ2i!J6-I7R!IK8DQJ<_s05QSYqNaHWuZW?nCZ%}wRDTVV{s2WGLs~)GcImMYx;~p; zUIJ%jy@<)XFf?jNpR~86LNkV delta 596 zcmaJ-F-sgl6rS0gnO&FLGa7`TCZ0u6*Yw0srj+0cNvI-;r)c79raa}Jk`tP6#b7iq+2__yYxp**LD)08UUR5T zfq5T@L�YqGMNCJ5t_nr2E5${SZC2|CpSTtmHY~XUD8YnVsRwRXJxEkPIGVV!Oq6 z1xHx5zxaj-5Kh}GzAXg8M^=jZD#|N#*7-xsGw9yDZob)XYTet>sX014I@m|;-(J~s zf4NnL{j~V{B~N=X{93$#Lbyi-KDSIfh{nShrPuE#>`Cz>SbJB7J=5)J(;t``OmjC( zlqKw;8JGp09a6?T>Uw)JvKN(wc@Ns<>~YpNX*TE|wu`zmrZB!8SLU~zCC&#Y+u4BQ aCf1;GgS*PG0o>zEZ=i>6VTD%c6aNnK>Ua|X diff --git a/ultralytics/models/yolo/classify/__init__.py b/ultralytics/models/yolo/classify/__init__.py index 33d72e6..ca92f89 100644 --- a/ultralytics/models/yolo/classify/__init__.py +++ b/ultralytics/models/yolo/classify/__init__.py @@ -4,4 +4,4 @@ from ultralytics.models.yolo.classify.predict import ClassificationPredictor from ultralytics.models.yolo.classify.train import ClassificationTrainer from ultralytics.models.yolo.classify.val import ClassificationValidator -__all__ = 'ClassificationPredictor', 'ClassificationTrainer', 'ClassificationValidator' +__all__ = "ClassificationPredictor", "ClassificationTrainer", "ClassificationValidator" diff --git a/ultralytics/models/yolo/classify/__pycache__/__init__.cpython-312.pyc b/ultralytics/models/yolo/classify/__pycache__/__init__.cpython-312.pyc index 74695b1c5562e98d7547db3b812bf113e7a89e75..d05f0b553bdd976034b9ede7b7f4ab151d67735e 100644 GIT binary patch delta 52 zcmcb}e2JOoG%qg~0}xz@f0VwF$A?kMO+O<)H&ws9FkL?-HMJlwwM4%%KPSJ;&|q>Q GV*&uLauFW@ delta 51 zcmcb_e36;wG%qg~0}#}?ElS(S diff --git a/ultralytics/models/yolo/classify/__pycache__/__init__.cpython-39.pyc b/ultralytics/models/yolo/classify/__pycache__/__init__.cpython-39.pyc index 0de9dbe0f0c450f0992f64e28f4567927ec9582d..419dfaca2d2c4ff267a68af8a468696cabf8cd30 100644 GIT binary patch delta 44 ycmdnTypx$Hk(ZZ?0SKh3AEj^P31t+rbFqqXNi9iDE(s}0OwP{COP^f9=mr1-iVX+= delta 49 zcmdnVypNeDk(ZZ?0SMCAf2C~X31t*_cCm_aNlh*ANlh%u%gjrUsm#yGFEgE7%;*LH DNuLhp diff --git a/ultralytics/models/yolo/classify/__pycache__/predict.cpython-312.pyc b/ultralytics/models/yolo/classify/__pycache__/predict.cpython-312.pyc index d0af994c2bdf1904ad81892126ed38d9436e9c7b..8fbe3237797adda8df88cf1fbf18744a73b762e2 100644 GIT binary patch delta 1814 zcmZ`(ZDMy>My|1!uO^__*qRTU7<1wW>Y-hiz0GAWyLajA zCh3KXu~eu+ph=e+3xz5aq{Mv=oKmgoPAb%5EWgju-gj5ApVw^);$ z(9;3lM>3Nj2o4tmFpfxB&vLoEmqq!l)68TEOR~pNoH>tXmvE26luFBWJy-6H7nd{X z6`hd`yCKp@1 z{FV2T71qyOMZ*gg>Hqexfs3**S#53P*TRt1%ZCclDCq`tIx-5vtYz{o6Y^4@oxL2f z&E&VxOU#FG2Y^IkS#Wp_=?^@~B%P#x8gof70AykQt|Q&n@W$_BKX3$!Ka0~}&qGwA&Nhxm+;kL0BLQ(6Wt6(CsMN7+) zX0%2AUZh+#))VveSIXH(fT<1KiK>^wQ|zsxx?EA+dE>&S*`qg#Ud=_-X9m^jyVdI; zrgR{JMsZaSue80Yp{5K|O38}NnQ1H!*!-f-Y$T;Jw$~BYW~wg3=18JEh$&jz(~r}- zgt3&dkv!5wbUb3V6NzEl-yZFa?iROg-`%u(`!?$V-(1f+*R+wUC+uR?_>XXHM%5jJ zM+8B@L95Z#Jh6_PVib~pi3+l?{B-E!;ZL$3Wp5O2yj!z=f!_9ZbkFw_*C%d8)gPkj z0wNI-4h80XYk%iKQRr@P&CKBR;FaR}n$9c5UpJk3<=ob@TQ4--47FY^&ee2YYrGNM zIgRcwrrl8n=2xjXWnfN}ruNQ=t<&PC`_74H#Y=m>I&|^S<(hBgTV4BeUHj)-4$QAV zc(Wunrwrs)sdMR}Cx|IsIfb6^ptRaL=h|QC2ca(b>@g2YlF*8=&8F60?yzcL8 z{=(-z3Vmy74_Qqmq;OfMJe#U01#ulUt;B>qiyh;J$dhZ}2J1IZJJC&}w*@v^3GX|E z77xA69X!pa2Vlxx+(C@(7e(hul@(1B#W2JrBx+E$zvl1@)3?JhKDtnC9VrObv0M~B z_W8NCKdZc4DK5>YgnhV{`T{XF+ST2Qku_aV16k`*fnZY1_KN49?O{{Z4D7(7uc33y zXJE0OLvHsLJsls`lr|hC>lBs4;YSEUxWEHg@FVcu10@eY>pc+p2}GXojj;5z{s$nF GQ|2$PO0sMK delta 927 zcmYjPO=uHA6rP#=$u`Yys%@GCO0(G7vS=HyT7M9+MieXdQmGKo7hZYencqm@QgS1|jiWfb2Q)@->;!K)C=kUGvX7_#XoA=%W{cS>f zr>bGV&cvyQ{llSYEz#G_qFG=83%Vf7EpnlUvIufta0O4yie+DPB~Q-EUML%a0D;rM zl5PM?w$T%TGXjuRMxhb<#@(L9`4t4asVAIm9|Z>L2^7=K6XNub8e$-kks%ugN-pOV z9Y2=~#5^vLK=kwE3LO=XBu}OXN}ip*I$TIwwr`uhkMrggr&yrx#WonB|HR#D11m#P z4E{`kHQFX6_1oGyuWcfZm7_4r<*mPgfDTEW>VChX7JxyS4$JW!$V3H}bg>qIaWn=7 ztJrOA96f=X6w~NdOl053kp-V3+7>!TWjRLggsSM>NbMn(pjB#ZGlMnTf{MFEI1YIZ zeHCeuam^SUjM1o)f^nKwnj~};7y`{J2aaO?3Ihg$<#jbzR4G^jf$hqqB(2Fqi^g zqRF|Ydn5P9=Eqi}d)IcxXZK9+S?XPB>{{wwz;pin$@$4g_+x6{>)sEIUCYC(JI~zA ztkvuEa`gpG0;v;_NQ~%tB9nZvT0H?H2^IyS>kuEy^pbkGQDhgGj|v+MjH*p#(h#;S zf}80}H3RGDU-bZNrD^RtjL}7{W0G^?2!|F92N?uvE>~V7({|ll&VZQB3=lr3G~QQMCU2!Z2AdSTexMxnO4f*LAH!WNj zNra6fBP9|X=%LyRM_ZiAjX!|D0C50`%UnR*df|>Zls9YAM6lBQ=IzXz_j~huZ+@Nn zX?D`DR?7x_Z+`wq>*vz5$=cum{z4-}vC$mQsEObo$2cKPg7g}RX<|0b!ltp6*iE~z z&Dcpw%~DcsmXWb(gjQ&u8KKS0Q_`FW(VpQtFErVXixTbf+#bL%1FZC0ae@m98KH9kR$p(`wko(*Z9z$mNpXB`+ zt?nEQVd zm{-9|5h1yY6>fZ>N}%9az~_ih!X}^AFr(c{D=n6?&pLc{0e%Grps2UJKCWyO_%h1! zr)J$bcgt;a`L;d}-Bf-RSUkskR>#9p!mopIiK4s`M?wZ!(ph~sKQ^}q0Ro5fPc!PW zU4^!nQXZdH{d5v4-^=U}a9(U#JKE_kpML3>#Wu z*em~Rw(+!SMhr3FsAVJOkl+>un>*Y|NQ)FX;cnu!yu@$$fF2{}GVc^IFTx|IMHxIp zf&bGM-aUW+1Mf6UmZ1?ELk>C4orzUlkX0w+*pRP5(4E=|mFOf1_NxW{)NrTxQ5s88;;I`@`0=Z$m#x}1r2Uv^WG$N+1+ zx~6fQd;n23%ffQRWdpjr#bcRu(qyoGUCsJCIH;)KenTzMjSXCowG$@?7dz_HZ37Vv zMB0g@TriW88bG)T4OoWbCAA23b=$9~@APZ+s#(#*st8woENxt~4HAnM0s}$mLd7)| znG`qFrSf{C@HaJC8Kg4%E4QLvl#@N%Xko8GtXQ~jK+l9>alxI4^Dw+{T-2ypiao9H z4A|FL_@zMRt`O=Mv&Ke4-Ki`AR?jO7d)A|#8*xjM{ZE=8g1{wwvL#?&m` UN>VoD(RT3=Y-6ZtOciR*A7)UoP5=M^ diff --git a/ultralytics/models/yolo/classify/__pycache__/train.cpython-312.pyc b/ultralytics/models/yolo/classify/__pycache__/train.cpython-312.pyc index 44eda6fdc31d25aec74d07aebe97007c757d0180..07671e64bed03b68b6aafd04b0654de14f7c8066 100644 GIT binary patch delta 595 zcmX9)OK1~O6rGoGrpe2%Nt!gj+Nn*d@zbeTT2Zk`T15)h`h%hno|D&fYBH0|4AqKA zm7?vc-~-7*&?36gMP*u7x^toEVl)ej3l~Lj;bxkmD+S-^S)6yzJDkhCw?6jjbl|<; z4;c!puu*!Md=YrUCG+BxTEXJgT1hNoTvf0k&Z=c~W`Cdgfh)QLw}Fv$A`Yf|$gM<_ z%*7(+WZ*jhkeq5=Zwq%ns;rEx3|fb(R;JbnYnD$VgP|K2uTSJPj0~&`NXVCwuFI3M zgbZ0#hLM4EYzVTV8%U9`P?Be`A|OSOE2xBpDl(?_Qdtx#Y7q;&3=PSONIMwL$JEb}M!f5=FS727+8jRwZJr5)mkNsoyTws% z#vivmP6Zi%WYbHlO@?oUHp6?Mwu5#cogFc7j|?Pq@--3*Ja;`~o&6c+ER$o&TH=Ho zXq{B%TF9TokoSv6Lgn&hBwf+Tk>oJ1Q%gTc?j&p+$%44lR?1*#qDLX=HbcE^ZoZxqV!$ncl`ln?5%O wn{6yt16vSsdzx_$IKMH=Osb}u%=>}khjPD!>jky4Hq)$ delta 461 zcmdlJFvFkkG%qg~0}#}?ElPVSxsh)vtE7{DMt*LpetBWKeoAUdYH~?QQDSm-W?uT{ zv#co`e2fewsvsR8kfk=cP(gU|9Wl|(>3q)_8QCZM3)~RokzK&MB6UUh1(U1`a@kjS zawb~~)-d`_7807ncxUl?A!f#q%^O5tGcqP^o+IAR$d&;#sn}(*p`@Y{ugHYV3q0-% zRF>;6)CV#)SObXzY!`XlKQc24crbooVC3{*EK&mMNM;1N018-uG&2x?uA00=vUu`4 z4i1)J22G~P)>7__Y?HgB>?dE5a$(B_swmc(EGMnWc9}o)0!QfNNa+)d3nnYcxJ@pU zG2q}*fPl#hWw!7ZffUC8i3WxbT$__*xfvPTCg;hyGS+V1B6o?Iv2k;|k_i(~Y>$d2 zkbI_+!q`9ATQ!oIl~ZZ*a@7ve3ZUMiW)RTy_YZ&8aE>2eMPhw20o{XP3cv!s|zsaz(Ds8@~rp(B=e)3m! SGeLJIMx`0vUl~9&I5YrTaeprW diff --git a/ultralytics/models/yolo/classify/__pycache__/train.cpython-39.pyc b/ultralytics/models/yolo/classify/__pycache__/train.cpython-39.pyc index 2c526ca3b01c4e4de2e7666f18af630a9d111927..8078fc12a7f62b2f2ecf21e460b6716190b1f9df 100644 GIT binary patch delta 622 zcmXAm&rcIU6vz9f+uCmFk8KNWLDGo^!V<+Q(SyGZmViW{D0r}36erc zG{$()MDr4F9yl6LX4P;b8qOX>V?yFV{|&`BgD-iXc{A@jZ(iPPo_u#I!Qyc}Agy=x zSN%ip+XRNvXU0x1jM>7r#5`x#UB|8ao1wBs9s=$~U8wp^eH(6cu9dmXgzZsA%T4Ba z&Z1*6;k4W_CYWan>bRa@u4U7@vuwMRxzt%=b(^-CxN}5qYjmkqv+0oF%yEy&N`l(L z8Zk%sB#w4@K=7lH_qV!Y5TBGCnlYY|3!laB@q4i=Dx6W6$GOBXT*dXoSE%5F-V81# z4Y2%`}9DYg_aVr@eq*7IbI;;h51q3um zBe)i5XwpN{!!`0OI2SO3s2dYH@Pkrp9^f^cH_pX&qZVs6Z_8nM_`*009=D0 z$D_JV{~JC{mvB4X?wgWVc~OB%?CNKmjwh_vQv3WmRx;C2!&jLSOyTFuaQBK-c>5m| z&O!t%cHM)mObSP{8}P{gmfZnx@O94+Ji#wLJK^i9_5!}i9ft+{ksIi@)H~X; zW8dd$AbeHA4Dy1CD_G1Qg_k&;ztpG3JR=jARRR?D2Tx|?kNEkC?u+VZR-vkJQ^Ce$ hq1vzMiAXr2M?!j6M2jT2Aya07mz3gy|G01h{sBm>p*;Wq delta 462 zcmXAl%`XE{6vgMxM~xXBrX4y%grH%BCw*Oek74;+417ohCslxGm)B!=fTMF9@m@zZQ@4X+B`7(HjJ zq>eU{z#eT!O|o&CVIJ?aZkWT8_5m|^SK9$A?ptje=xfw1dMG&FfQH}0AiWpBpx$DO zREsdTks=E@IOfvaF68+e;h4*d^hvjTlCya%>SKb3IQXoOV2!UGHZkP8f*XADjbfcY zBrC1|g<=0TOyRxXf_;4TPl1D$5u!@W=vE2)suGEWr^Y*+xQY4_KpumEE;z$@pyWx> z6|#(-Wh#UOrjYesc?uChwu#^!3nCJK9d=xM9uC9h)(C3$TK5!1>+R~ zts6pO({(56F7RBDv?64Ko84oQyWOZ1>&hd&AIlyVORh>xNDj4V^9J|WKX7e!5S+ot=so$S&}K%z$=`)rc>;je7OMb>28KJEJ46iGnAtd$Ht&(* zXNpe;Nri!>o^S|HaGjw#(dUkw^8B2cIctnplx_%JS#w$2=CYja9SPa_JTrL~gl=HG zB4KnxN^ZXHOx+cp8m<$O`f86>DRQ|pF|!hD~ZKHs=mIkhHh%br)^X4U&7 z#muVtiHngnoADDHGixB@CoX1I4Ky;A|&`|(l CzS zK17Q6=RG8@nU8@P_`Z+d*ZBQVfl|9$X%3!$?C)Hy^6I!gxv7!S@D+i%cit|t;$u)% z|F*0WGHD>HwcLJPl|r1w&mq1%xhWwq1`YmJ6LL>J-Fo@}A-FqdfL-frUOQ1;ZQUb!?HaJCJK))-qa*C7HJIYb(g8t}peP)jj`L+sZOgQmfR;PZ+a_MC^YS9S fA=^+c=IXK#vq<}sI2pZ_Ls`f?Z6V9J9)14-?+8t_ delta 170 zcmX@C^+1z1k(ZZ?0SMCAf2BO%$a{}P+|I=+#w9hiz$Z1aC@(WFJ*F~0C%?>e^Iw)@ zY%*dXwM;^c96-ng#7vAVe>qsVfb1%b$@Z)glRtAmV)UMTjVG1SZ}KwUM6Lj!`eG)K zuE_-g(wlYo%vdGUL1KI$F(wX10Y)AmW?^Lc4K-n^@F8wq4pt6kAZF#@nw%u^5CEM| BChY(K diff --git a/ultralytics/models/yolo/classify/predict.py b/ultralytics/models/yolo/classify/predict.py index a22616e..853ef04 100644 --- a/ultralytics/models/yolo/classify/predict.py +++ b/ultralytics/models/yolo/classify/predict.py @@ -1,6 +1,8 @@ # Ultralytics YOLO 🚀, AGPL-3.0 license +import cv2 import torch +from PIL import Image from ultralytics.engine.predictor import BasePredictor from ultralytics.engine.results import Results @@ -26,13 +28,23 @@ class ClassificationPredictor(BasePredictor): """ def __init__(self, cfg=DEFAULT_CFG, overrides=None, _callbacks=None): + """Initializes ClassificationPredictor setting the task to 'classify'.""" super().__init__(cfg, overrides, _callbacks) - self.args.task = 'classify' + self.args.task = "classify" + self._legacy_transform_name = "ultralytics.yolo.data.augment.ToTensor" def preprocess(self, img): """Converts input image to model-compatible data type.""" if not isinstance(img, torch.Tensor): - img = torch.stack([self.transforms(im) for im in img], dim=0) + is_legacy_transform = any( + self._legacy_transform_name in str(transform) for transform in self.transforms.transforms + ) + if is_legacy_transform: # to handle legacy transforms + img = torch.stack([self.transforms(im) for im in img], dim=0) + else: + img = torch.stack( + [self.transforms(Image.fromarray(cv2.cvtColor(im, cv2.COLOR_BGR2RGB))) for im in img], dim=0 + ) img = (img if isinstance(img, torch.Tensor) else torch.from_numpy(img)).to(self.model.device) return img.half() if self.model.fp16 else img.float() # uint8 to fp16/32 diff --git a/ultralytics/models/yolo/classify/train.py b/ultralytics/models/yolo/classify/train.py index 0829f05..42c6554 100644 --- a/ultralytics/models/yolo/classify/train.py +++ b/ultralytics/models/yolo/classify/train.py @@ -33,23 +33,23 @@ class ClassificationTrainer(BaseTrainer): """Initialize a ClassificationTrainer object with optional configuration overrides and callbacks.""" if overrides is None: overrides = {} - overrides['task'] = 'classify' - if overrides.get('imgsz') is None: - overrides['imgsz'] = 224 + overrides["task"] = "classify" + if overrides.get("imgsz") is None: + overrides["imgsz"] = 224 super().__init__(cfg, overrides, _callbacks) def set_model_attributes(self): """Set the YOLO model's class names from the loaded dataset.""" - self.model.names = self.data['names'] + self.model.names = self.data["names"] def get_model(self, cfg=None, weights=None, verbose=True): """Returns a modified PyTorch model configured for training YOLO.""" - model = ClassificationModel(cfg, nc=self.data['nc'], verbose=verbose and RANK == -1) + model = ClassificationModel(cfg, nc=self.data["nc"], verbose=verbose and RANK == -1) if weights: model.load(weights) for m in model.modules(): - if not self.args.pretrained and hasattr(m, 'reset_parameters'): + if not self.args.pretrained and hasattr(m, "reset_parameters"): m.reset_parameters() if isinstance(m, torch.nn.Dropout) and self.args.dropout: m.p = self.args.dropout # set dropout @@ -64,31 +64,32 @@ class ClassificationTrainer(BaseTrainer): model, ckpt = str(self.model), None # Load a YOLO model locally, from torchvision, or from Ultralytics assets - if model.endswith('.pt'): - self.model, ckpt = attempt_load_one_weight(model, device='cpu') + if model.endswith(".pt"): + self.model, ckpt = attempt_load_one_weight(model, device="cpu") for p in self.model.parameters(): p.requires_grad = True # for training - elif model.split('.')[-1] in ('yaml', 'yml'): + elif model.split(".")[-1] in ("yaml", "yml"): self.model = self.get_model(cfg=model) elif model in torchvision.models.__dict__: - self.model = torchvision.models.__dict__[model](weights='IMAGENET1K_V1' if self.args.pretrained else None) + self.model = torchvision.models.__dict__[model](weights="IMAGENET1K_V1" if self.args.pretrained else None) else: - FileNotFoundError(f'ERROR: model={model} not found locally or online. Please check model name.') - ClassificationModel.reshape_outputs(self.model, self.data['nc']) + raise FileNotFoundError(f"ERROR: model={model} not found locally or online. Please check model name.") + ClassificationModel.reshape_outputs(self.model, self.data["nc"]) return ckpt - def build_dataset(self, img_path, mode='train', batch=None): - return ClassificationDataset(root=img_path, args=self.args, augment=mode == 'train', prefix=mode) + def build_dataset(self, img_path, mode="train", batch=None): + """Creates a ClassificationDataset instance given an image path, and mode (train/test etc.).""" + return ClassificationDataset(root=img_path, args=self.args, augment=mode == "train", prefix=mode) - def get_dataloader(self, dataset_path, batch_size=16, rank=0, mode='train'): + def get_dataloader(self, dataset_path, batch_size=16, rank=0, mode="train"): """Returns PyTorch DataLoader with transforms to preprocess images for inference.""" with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP dataset = self.build_dataset(dataset_path, mode) loader = build_dataloader(dataset, batch_size, self.args.workers, rank=rank) # Attach inference transforms - if mode != 'train': + if mode != "train": if is_parallel(self.model): self.model.module.transforms = loader.dataset.torch_transforms else: @@ -97,26 +98,32 @@ class ClassificationTrainer(BaseTrainer): def preprocess_batch(self, batch): """Preprocesses a batch of images and classes.""" - batch['img'] = batch['img'].to(self.device) - batch['cls'] = batch['cls'].to(self.device) + batch["img"] = batch["img"].to(self.device) + batch["cls"] = batch["cls"].to(self.device) return batch def progress_string(self): """Returns a formatted string showing training progress.""" - return ('\n' + '%11s' * (4 + len(self.loss_names))) % \ - ('Epoch', 'GPU_mem', *self.loss_names, 'Instances', 'Size') + return ("\n" + "%11s" * (4 + len(self.loss_names))) % ( + "Epoch", + "GPU_mem", + *self.loss_names, + "Instances", + "Size", + ) def get_validator(self): """Returns an instance of ClassificationValidator for validation.""" - self.loss_names = ['loss'] - return yolo.classify.ClassificationValidator(self.test_loader, self.save_dir) + self.loss_names = ["loss"] + return yolo.classify.ClassificationValidator(self.test_loader, self.save_dir, _callbacks=self.callbacks) - def label_loss_items(self, loss_items=None, prefix='train'): + def label_loss_items(self, loss_items=None, prefix="train"): """ - Returns a loss dict with labelled training loss items tensor. Not needed for classification but necessary for - segmentation & detection + Returns a loss dict with labelled training loss items tensor. + + Not needed for classification but necessary for segmentation & detection """ - keys = [f'{prefix}/{x}' for x in self.loss_names] + keys = [f"{prefix}/{x}" for x in self.loss_names] if loss_items is None: return keys loss_items = [round(float(loss_items), 5)] @@ -132,19 +139,20 @@ class ClassificationTrainer(BaseTrainer): if f.exists(): strip_optimizer(f) # strip optimizers if f is self.best: - LOGGER.info(f'\nValidating {f}...') + LOGGER.info(f"\nValidating {f}...") self.validator.args.data = self.args.data self.validator.args.plots = self.args.plots self.metrics = self.validator(model=f) - self.metrics.pop('fitness', None) - self.run_callbacks('on_fit_epoch_end') + self.metrics.pop("fitness", None) + self.run_callbacks("on_fit_epoch_end") LOGGER.info(f"Results saved to {colorstr('bold', self.save_dir)}") def plot_training_samples(self, batch, ni): """Plots training samples with their annotations.""" plot_images( - images=batch['img'], - batch_idx=torch.arange(len(batch['img'])), - cls=batch['cls'].view(-1), # warning: use .view(), not .squeeze() for Classify models - fname=self.save_dir / f'train_batch{ni}.jpg', - on_plot=self.on_plot) + images=batch["img"], + batch_idx=torch.arange(len(batch["img"])), + cls=batch["cls"].view(-1), # warning: use .view(), not .squeeze() for Classify models + fname=self.save_dir / f"train_batch{ni}.jpg", + on_plot=self.on_plot, + ) diff --git a/ultralytics/models/yolo/classify/val.py b/ultralytics/models/yolo/classify/val.py index 0748e27..de3cff2 100644 --- a/ultralytics/models/yolo/classify/val.py +++ b/ultralytics/models/yolo/classify/val.py @@ -31,43 +31,42 @@ class ClassificationValidator(BaseValidator): super().__init__(dataloader, save_dir, pbar, args, _callbacks) self.targets = None self.pred = None - self.args.task = 'classify' + self.args.task = "classify" self.metrics = ClassifyMetrics() def get_desc(self): """Returns a formatted string summarizing classification metrics.""" - return ('%22s' + '%11s' * 2) % ('classes', 'top1_acc', 'top5_acc') + return ("%22s" + "%11s" * 2) % ("classes", "top1_acc", "top5_acc") def init_metrics(self, model): """Initialize confusion matrix, class names, and top-1 and top-5 accuracy.""" self.names = model.names self.nc = len(model.names) - self.confusion_matrix = ConfusionMatrix(nc=self.nc, conf=self.args.conf, task='classify') + self.confusion_matrix = ConfusionMatrix(nc=self.nc, conf=self.args.conf, task="classify") self.pred = [] self.targets = [] def preprocess(self, batch): """Preprocesses input batch and returns it.""" - batch['img'] = batch['img'].to(self.device, non_blocking=True) - batch['img'] = batch['img'].half() if self.args.half else batch['img'].float() - batch['cls'] = batch['cls'].to(self.device) + batch["img"] = batch["img"].to(self.device, non_blocking=True) + batch["img"] = batch["img"].half() if self.args.half else batch["img"].float() + batch["cls"] = batch["cls"].to(self.device) return batch def update_metrics(self, preds, batch): """Updates running metrics with model predictions and batch targets.""" n5 = min(len(self.names), 5) self.pred.append(preds.argsort(1, descending=True)[:, :n5]) - self.targets.append(batch['cls']) + self.targets.append(batch["cls"]) def finalize_metrics(self, *args, **kwargs): """Finalizes metrics of the model such as confusion_matrix and speed.""" self.confusion_matrix.process_cls_preds(self.pred, self.targets) if self.args.plots: for normalize in True, False: - self.confusion_matrix.plot(save_dir=self.save_dir, - names=self.names.values(), - normalize=normalize, - on_plot=self.on_plot) + self.confusion_matrix.plot( + save_dir=self.save_dir, names=self.names.values(), normalize=normalize, on_plot=self.on_plot + ) self.metrics.speed = self.speed self.metrics.confusion_matrix = self.confusion_matrix self.metrics.save_dir = self.save_dir @@ -78,6 +77,7 @@ class ClassificationValidator(BaseValidator): return self.metrics.results_dict def build_dataset(self, img_path): + """Creates and returns a ClassificationDataset instance using given image path and preprocessing parameters.""" return ClassificationDataset(root=img_path, args=self.args, augment=False, prefix=self.args.split) def get_dataloader(self, dataset_path, batch_size): @@ -87,24 +87,27 @@ class ClassificationValidator(BaseValidator): def print_results(self): """Prints evaluation metrics for YOLO object detection model.""" - pf = '%22s' + '%11.3g' * len(self.metrics.keys) # print format - LOGGER.info(pf % ('all', self.metrics.top1, self.metrics.top5)) + pf = "%22s" + "%11.3g" * len(self.metrics.keys) # print format + LOGGER.info(pf % ("all", self.metrics.top1, self.metrics.top5)) def plot_val_samples(self, batch, ni): """Plot validation image samples.""" plot_images( - images=batch['img'], - batch_idx=torch.arange(len(batch['img'])), - cls=batch['cls'].view(-1), # warning: use .view(), not .squeeze() for Classify models - fname=self.save_dir / f'val_batch{ni}_labels.jpg', + images=batch["img"], + batch_idx=torch.arange(len(batch["img"])), + cls=batch["cls"].view(-1), # warning: use .view(), not .squeeze() for Classify models + fname=self.save_dir / f"val_batch{ni}_labels.jpg", names=self.names, - on_plot=self.on_plot) + on_plot=self.on_plot, + ) def plot_predictions(self, batch, preds, ni): """Plots predicted bounding boxes on input images and saves the result.""" - plot_images(batch['img'], - batch_idx=torch.arange(len(batch['img'])), - cls=torch.argmax(preds, dim=1), - fname=self.save_dir / f'val_batch{ni}_pred.jpg', - names=self.names, - on_plot=self.on_plot) # pred + plot_images( + batch["img"], + batch_idx=torch.arange(len(batch["img"])), + cls=torch.argmax(preds, dim=1), + fname=self.save_dir / f"val_batch{ni}_pred.jpg", + names=self.names, + on_plot=self.on_plot, + ) # pred diff --git a/ultralytics/models/yolo/detect/__init__.py b/ultralytics/models/yolo/detect/__init__.py index 20fc0c4..5f3e62c 100644 --- a/ultralytics/models/yolo/detect/__init__.py +++ b/ultralytics/models/yolo/detect/__init__.py @@ -4,4 +4,4 @@ from .predict import DetectionPredictor from .train import DetectionTrainer from .val import DetectionValidator -__all__ = 'DetectionPredictor', 'DetectionTrainer', 'DetectionValidator' +__all__ = "DetectionPredictor", "DetectionTrainer", "DetectionValidator" diff --git a/ultralytics/models/yolo/detect/__pycache__/__init__.cpython-312.pyc b/ultralytics/models/yolo/detect/__pycache__/__init__.cpython-312.pyc index 87cc14be2566083d19be2219961e4ee3c0fcc5fd..6374723f52c16a8d441cbca8b8657acd53979fc3 100644 GIT binary patch delta 50 zcmcb^beDk|yoSm7MKADZt1po@24HW|Z}158dv<#E`*r%kn7*C9H$)|;M^22qE~bqxcNjuM9w{NDJs10E%!kpa1{> delta 154 zcmdlav{8ufG%qg~0}#}?ElP`K+sN0;Ch4S~k)NBYUtXB5pORXVnp{#+l$e~InU_9! zD_geb3f3DMI_o)Ca<1oF$p>TduH?O8Xu4f;qvUl%_lt(^Hw;X+b8h6kZs2&)z!8P> og_~8-nX!ZQ3*Y8Ub^%64p~+PoEBO@}#b?NVWdKq|T0qAD0Qe3w6#xJL diff --git a/ultralytics/models/yolo/detect/__pycache__/predict.cpython-39.pyc b/ultralytics/models/yolo/detect/__pycache__/predict.cpython-39.pyc index fe70b67fb14a4d69641a3edfbe2ccf3e9cccf252..25c32471e9c33cf5e51d86bf27197dd5bd0930de 100644 GIT binary patch delta 74 zcmcb^bCZWJk(ZZ?0SKh3AEiHI-N<*0MabI4D#j(XBsIAtq$n{tJ2NkR@@p0wF(r^P UCKg5}Mm8Ay#m2eWnpKez0RC1HrT_o{ delta 77 zcmcb~bBBj7k(ZZ?0SMCAf2BmTZR9(~BJSW~72}ecTHuqKSd^EUmmX7@pOar^I{6uk WjfetBEfX6f9R6lw-)zCE$Or&EtrGD7 diff --git a/ultralytics/models/yolo/detect/__pycache__/train.cpython-312.pyc b/ultralytics/models/yolo/detect/__pycache__/train.cpython-312.pyc index 7b5b461006fab0688a5bff61dff970b522dffa6a..786811e747a3d963b0ffc8eec8d97dc7584bb384 100644 GIT binary patch delta 2202 zcmaJ?U2Gdw7QS~pGx5xL;_?5)Zu8R?7E{usrGg|a4XsKGG!3Mn1g2YAj%S)WnX$ti zCvDPLK?p&*tlF3Qd5CvbNZo~(!Xi0w7s117xz|oW zNG(^s=brPO-#K&U`uhidI#m0GDDnWWeC*!?ugkewdGZ}B%>WgsPzOr&gbKtmrBeo_ zP_T+Q6uM%=7_7opFo#}aI2EVCDO^RSb>0vZp~5q|%McZ@!n1m<;a1!gzDAb}kK!@B zink&=^}{~HulS9C5-@^FumW;=$dDD;s8i}7pul0E^3MZR(1ZmCDIEYMJO-n#8-(qS zLYq#chsUnu$C&W`mQ!g%YdQVQK#Qtr!znG(GM3iUqdQxzezp%bExpg~biiioKinrm z4fZDS%73M-_j$wmzAh?FLqtzN71m)qOJxMh>D&~U0D~-+f4i)7I2dNe831mpo@Ce6 z(r;afG>-QIRmE|DTUlW3Z8SJ()-mD8_^STcjvDlD@V}nDYK`g)g9TWH#s%y~PtYni zbUy$neQT$x&p~j3Nl*hIL7mu*y_igBBkZ7T_0T?pnV^5?;c%vnthaxJ9QEh>Xpm)b z^zbjyH*Ijgo`VfIV_-Y0@~ZHbYk{t6veX1c-qcmDICIZ(B` z-nq{F*op4MQ1=be4%^wE)RQSKjt)f~Hf<)yH54GY7}t{nsaPV7QW`R&l1*jOHmho* z$%JMzC-rnZgCfLGFh=xDGG->?x@NN|o>J3>&5;xD0nHv_lE#2Jj!5H>OgtF#RD4*o zX(OIFg-2FP>Nb^1+5E|oR3ej1r{cOTBvTm;4X5>ZMvK;<&BV8X_|hbpO_}3(pS0l_ z>}i^0fE&rOt_XW;+OcHrhSp7C*QDUS(0;CcMQB_W8jC)8Hjvvpb?_T0xFR(cq{cb_ z<+}O0%MJ4li}x0q?z>8V#J$ITxjm~6 z=xMm_4bE)OZC_<5PkqS&M0be?UjN0!?6%kLzI69o&)l)u9m~OJVbT+wI#6_brhEQ$ z`udT_{@(F5l$r?15?9-Mh3_sDE~Zp)>UbD7I$=g$`EcP;zx zT~yy4x-zuv-~ZV@oWtWQ0qk*;1#);Mo6D|*S_`4pMgP?U*OYur&4*f-L)tW5j5JI= zG=0apuAHn!G0W@T2Yam^_tWsIwcx%KzV#5lMJ>?Q!;)I>u$^XnREw!eR2>dJhQ~th zk0*xAC}({r?TH*GX~}IlX@x!w_eI)K29(6(=5W0AlIL4EZ;g5naXeoc2s{!Wcw4QT-e!>m zwi%Qj!Gqah?eIkz^e~1j#rLSFlB9!}JPS&$r7ONyV5JURQdeOAiD!uR$%^*6=C-j$is$07BSfeI48-oW?oO8A8Ua#?U=%rB{>Iu}~~_mekN&so@)^Z+(=D z!N}~9Lh!Dl6qwd>t`%upLE1KVY+1UC7|6NKqBk_l6}(%DcwU=x&2vW&*Fh>zVa1-8@_rMpI%5sEi8Y2j~uB&w( z#T@n^2U@STCDQhXXNgR1uyqh~QM&RK+mzmKGyUl^nu*R>=Of+lrKPVTo8jc6#QHHp z$Wx+zLUck#2zic>7YI3z$(sOeC43Vho()3&1kC-IJP%eWfpM0@JY&ML%ri&fn(Sc$ zYj@T#+sb|hbF~b^mum$^UJE-Je$CH6h^b659c3rYAXp7gB1a?WL3|)`yH>{5W>O=@ z@EDrHfbGnrQR38&R09haE*EVJ7%Q7@gVmuGj0Vb`@D$Ma*Ma8M-8X2`9sp{`Dy6jDAyk33D_dMzo&D_hRi<-l=wi{-hhBO4 z!jPty^=!FprL*~5;R3qCo`>DFCi@5tlgPv0^1AS5Vz*ZSsL7iQJOdO4T@&UHDzr_N zi#Dt2%eky>GYdvBT~;V7hO@0Wh7yGV-S$nmM@V!8AgTQ@`aIZyvi@7JfDVaE>?lbd zLm!Lh;W*kC{}G6A=RO|TX%yWLjKbq+G&l*LL@U7;;B|C6SaE$q-JsDMAuT}JTq(V* z&ucl$isFq5huqS*|0FRbO*^7Bj^?CeYzoJv2~?Fj4{mDB!O6zpqoLe)4?wZ6H8is2sSoD9dg7bYU!TULe(oiLnTw6V98U8u!W{rTi?Ei8KnN$% zZ1k`h*_4s7nM`p-FIgJ;J3b4SYNvW)Pcmir((+ zWc`?yPN18;@=2PJc6m2*U@ydA=A)Sc+~avh-j!))5$^IFliUqa%-A01Vk~GSh>3_n z2|IVpW(t?grB$np9)9RIcom5WIdTmzLsH==h8-IJo_!X;j$c3kr4o^#2B8JlF7M;_ I0u#saUueN6E&u=k diff --git a/ultralytics/models/yolo/detect/__pycache__/train.cpython-39.pyc b/ultralytics/models/yolo/detect/__pycache__/train.cpython-39.pyc index 226df4e76f835475f7644f2ce8ab775ee413353a..c3a0b6f603dca393bf5550e98f8bef802ac0ffa7 100644 GIT binary patch delta 1693 zcmYjRUyKt)7@wK_)9r4z*IwJ>{s4D?5=-<1LFDuRi6`wrT1F+W6&P8Xt|`%x+x#6g-DG)F;+pq!tCA`7B@-qXSN|BTk$i2X_<+w`q^N}8cOVxp9}ITyJ+Sefi_>^6<>u zvv1D1kz0>WaJ#&Jislyy; znXRx@;>Z)p%q;R!Q|4a{bbS^nD{8`}9HlszO*v{}X&cBYgE?&+d96tLQcsG~J=WBn zl%u`N#zw-~;3CLB0+A${h+=M|jv67?^wkdbk~+L3-r$=01(c3hhBu zNg8Z|tWrpT70FF@PCicfBuL9rEC+UU7V7PKJ}&`vi^Xhu=dPGuyv6tg5HSnq6EUAc zPoMR@FsiqM&Vd)f50lT+?$hE_dduSUtlzHNes}GAm>NL zeti!;D!Bf!iNu|CXl2Hah)0YW`kFXnY^5i~6=T1ShxY>XaJ%^4nACc3%N({Wi{Feh zo26I_?F(+z@%W$?dKIeR*We|D<+OMs^W4Pi_%%F*fgt_wJU=Er$!y7=#;r8~!c?l# z;<~tpY5 zpe*rIqBOL1uMJKtFSwT;6z?rmOd*-`?V9UX{dO2uy~qth2iP$Ar&8B~p#oJ;q4t<~ zFSl*eIh-p35LTp&vinSu|7J4#iz)0+@lEar`l`5|f6CyvBt(eBxB17_qy$}YH$SuK zGO*z=0AUQWRpEdTb5y?9-7c0(lVV}m8STP+1)|Tu7)$l=0!Id-_;q-SUR~1)Cfyw& zgO3E>!5-8xzc#oUNG91OC0a%gtI?f-{T~%Uv0@~#C9*46wJEIH`ww*-t|^ud2mT=R zd&Pa*FmyitA30DZPf)3~FS;QQ#fj2m^oHn{KA3(5g%yMo2xkyH1S}%&BfO1p4Pg?a zI6ees0eUzJpi$Y>GkQiVXu6)#Qo5<>T1JyOs>&057YzJ)1iV|Z(mEe>F7YL_wW!VO fXQx{&KDV|!V$zL+{opY$>^XcC@XOI6%}M_N&KrT6 delta 1091 zcmZuv-D?zA6rXeFV`pb)c4xn`Cb7*LwYZ^J$)__Z>YmsTP^x6j&tV8G{+LjD1vr02}m_QiX-zx%uA`yB2sl7cnUBVA$WprHB=+b(<0r|Q@IuzQO3)pTo27C z>t$1JgjSUEaw*S*cI0?Yl=t%K+YAel>$y?UD@G-+l*+PUIjVS-sOnYWw6dW1mY@4t z@pD1up6d1a;v=PDA8?foNVI0ByR-P@uS2aid-=lhEebS_O=lXW@sTqOSMWFI68>Gx!Z2p?Lz*w{t9UlQQWA+7uWbdZ zeoM-Ja&DOTbAHC10_v}BwABqFHnB(rONc+I?>oAP=pBv*mMkVL3?%g9XzZJ;q!`rEFkS7 z6=&0(sLgmwvV<}(RO=A#->Nz=w#pK2ZcoT-Y$w9?H#IBSj+?zZP4Q3TAAO6nX=UkP zLe{=vMDc4cIYEp{;Y-dj1o&y~{K*Zb1OE$&krn^5L|^Z02eDkir?oTC#h0~*jWITy zVz|U`h2dj{WrkY}cNk(k?61wp0&yNue2l;Z&9Q9DHg(goGN#^Rrj}YYYUz(k^!67~ acSpAOjeZCEO5|n>Q@Au<-Y!(YRsR86Dd8Ca diff --git a/ultralytics/models/yolo/detect/__pycache__/val.cpython-312.pyc b/ultralytics/models/yolo/detect/__pycache__/val.cpython-312.pyc index 53190ee95f3b3eae0bfe2a967f37e528eade0cd4..a6aa07b29223f665fe88f2da0b394660c4dd0577 100644 GIT binary patch delta 6723 zcmbVQYgANMmcF-c-J+_f_XCPID6b1Bf`~>UMg=2LQ4&oeKFWeyU?Egh&Mg#35z;Gm zH)KqNoXi3`Nn2+1bg*@eIP|QT>7Ercf0CKZn$!(MNHyu0bpGT=uSV2fovi83?0qk# zW>)6UTwrs~-ut}w<2&cl8|+`+WGx?>%|-^kxcAG>Ej42n5Bu}6l95ae^C%-4&M>0U zpL&Ty1%_$$^s_?h*p2HI?J7g1D^lk6D^UL_GtT&#R#s$MH6q)}iEb*h+qRj`Fs&BR z4Ad&7iIy{Lt4*{*%`V!2Iz&5Ar|1Cc5}iQZq6=u+Fw@7jra!?5=`)XN&T%>yj6mFU zk#90#F`*5&`3T9TRl1VQt(7N2J^sqm&vaIbet$UVk5u-DxBE@Cj^!69YYhIiZP!$Oe>U z(-~tKn@4|d^ymwLA?QgF{hP6Xt)}^@F1CzrNL{Nd2Z8h?4WxcHWMW*N@rTA9ns=6d z>@2;*Gr6l{)vq7A&2x6g7dgcf>>ZAAXO1_GHBI!!4o*g=8mG(Uiu&sXa>Wigui<*f z2M6O%9FzBYt$`@s+ugK+zGG1Gf6CeHacd_qi4#TST%XwNtMVN`n$4RYBs0S-)1?FUFI)oR1x%p=b?O} z4r&?6S|B)O=-hr2@&3Cs`vkyK>wp+@^^K`RE@BMDxL_#%?Pks?L6CnWm26Wl3Zw}fYq zpPx_4=ze>(ZX493R61;bnUnS9w8(MNwdl^6cs$lNU44DkoV#&Y`;w8A(@DpFn6N8} z#pcjBZt=b4+f)$x*(vh`$Ke-hLmNy(V`&`H=j+#Yo+xIn0vps^;5QXtsb z?Z>Sk1Q>#5WtO;El7#1@%&*VA?&txiNRI<~p83k^n709acfpxHzHw~hf+u^yoiToB?9jYhkln%p7(bZyysJod72UDwZN?==Z@^W(zBZ#c z0xKG4q7W(20HW;4Qid7ipsrEs9MUe9ZcBGoa+h%R@C|A~uDvfG1?U4@Zpsu5N z8Cm7}OlB|zYK8}-`UtOd10x`ynbXiAii>Z1C~rlftcOvHJABt@D6a`UB?@gb^NzTLc^_`^Qp zm-2o2ZN5nRiTqGUexS$KiJYJaf58wKQy>(S@;gE#AB-pf)(iEuN{~{+RV_0FCyjwD zVF;0cS32Pf`^g%pll4HF69$59ykVb6YJhQ3`!#MR!P6##$DZ`TIv4m5S3_!qd)Q^-wEn)G3#p2d^ao^Yz z^tmd}(0#^<wN4y~}9~ z#wDGS^I)Gotj*Be`K zoOTF*$x$G9rF6M5{F!yh$h5+7ZB8-<4TBHHH2pokF{+R1L_K$`MjBlnVh}Y^t!NMpV40~M8f7|6R|&4QQ5#LUw^mcsw4!BN zv0a^LRBY&AB&S(Bu_O`bJjX|9cA> ztuFde7SFE1kvWZMqkqgwr-g!({wB+1lr*E7vpvIr%& zCHJ?hhK91;X2%;E)E)$!)wK~iS31>nZNj}8N)eq(Lvzx4V52o>ztl4Q2EjF;-wS9a z&@QdLAJ3IgFK80H80m*}fPok=2|^>%jASno1@0;#cOO)n!QG9bKjLp!Qcl9q8S!Eo z@Jopy9h3nze2wXg3?un|SBaEBkGCVx?I$4+4rtd#`XWj&0mNnS2YY+`kc9lii!GI; zfq6hl6bT-ZNk~%RHJBsd`nFK0oBRZeTsRaaZ4A=^+>`!(DWUU)!~UR1va$IQB*#$C z944W5za%N?-~{PK!AmH>MDd`c6`3XsN8U(C1$lY}@8Nu1bboH?QHv7b$SLdtvlBUk zvN3{x~da-H3KGOVB)1t#YDpFy1$D%WR5$*_w#}3ErQ#DuWF4oPtH!f!6 zp3j=hinUBdt~`73*}06mMW<)HZme!XiZ#5|bfM|j2j6bJ+WLX^qtt&-opTuYlug;UAnrS03iCPZ z?Xx+si=zj|j0i~K+BLrQm8}3m=UsyA66ReSW!J`Q{I$sSC+1vDh*Vq0TF1R(-W%@1 z;!$=%%|R1YW9E5Vp=>LR9hfS(@YE-^H8B0Ok|jNTr(n?4as9Ad-x7b~sT;PZZ?&*Y zh5(tTVbxp--VGFMT*3Z|jV5#_Ps8;?dI0=yUA>vTKnGT@$u5T$?e|gHOLxlxwHoZ_-VHEiQ6XkxvzRXg&uz=%4j@~WYM*FoUy1i(#fg98evZonA zM_(w~yjzJC7$M+7Z0HFDyZyn=$O+Po96P!vg2afwM^cgBBgo*|!@Y2S>g@^lD-NIF ze8GN_OaHs5w01Wr5DaGt6M)~|?ug{YtJ4a!Q2Z#to>Dq~08!8b#T7%pgjUkqKman- zZ-2Mzov!)%X1Tt3u73ZVtwpZie^s3JU9*qcUu{$_)xMaW?wfa&$gYxEw17(4RXOXb zlI>OT`u)SaY->T#)_ZpIL{UsTS$@89vT`o7e8H4I+;DEs$ewvqzKp*@Y{SPUVKIAm z?)lx5yWtu%S|OYA=1s-2srVPttf_oD^7iSgr|)o@9OJNdPM?9G#C0w@5}mh}%GT1j zzVw^hX2$+FD`5ov`Ia4Jx|#JQ?`U@+02ed|cEaNEQG*-?k}w2AqzAx0L`FB%n}tj@ zXeuT)iE4UO(*}ZYEl^@=2HjARKcqy(92BLT@{_iZy5AmaU@qDD83%s!g4=DDnR&erSN_YLnE-nYDGiErH(-}h`hD>`QyT+-2Z)?D6# zLFMS=(dojs%dVEqSMQLkcg$8d##>tB;)!`NEQ?|AMtk{PMq_u}g0Lb$)s#gPU((VG zYh5+RVX$!qZAi6&L;+O6q_-&)msmtZAcxSmv*` zO_6DKicB@i1=X{b_4K1EkMTM;Yi)?@8z8EDu_;4;9Gc)_=rgNwwNW<8(QSq_bo*l) z(=3c^wiZ>DFQi@4Vzy`pL$Mt>`u@zQPlH*&;tea?hnydsQE0 z)H|mp7|1c8PZ(s! zss%^d_?odbv8JguNPAiFRa<1o7G#!FyvL7 z>jIDg1748CwO!#(@+s7t;fkiL1}_yV=pk78U+YruWa>66xMdNP9z4g;=FOddLf#P| z4=!(sOEDO~GUrVArw&{>bn(#qntFLn{hWF0Egl3A#hjiWSY1YZRh{gp`~K?gn4tx% z&Ro}`5l9rLiNQ@pi+ElM);GWvb>=5nW3ymfc_p9|TPingL8*?Q!G;Bs5>yl*MJ`|m z4pRMB(>6I$hoK|!BY^t&yE27C}8F(A}ucbgS32RBwP0J?8yn(H@msfyPk1}Bt` zZd)~kfES+W+C#(-gQ1g?^T;v;Bn7Aj+Q~m7Q@Qhgg%WNc5GH>>-f<*9Ly`p~!IMyL zP+WP>P_dsLu^+x0DxH!gRO0|eC)jX;ZA-9yo*#J7Aa8&g&YvFNZqjfp4Q{9VZ|0XS ztS))0{zCn)S3^=`uZlB*y!zo!tJlwF*IX@_YI*%2JVp54I6Pe-XV)y|;KM}Ag_nvf$@ZIdP-pCu5J*m8xUCz{j%vQ!9Vwaz0Ilg|`sON=cmyLHU7h3p6_IAF3Kfx}i zar|+1*_aCLGV2XM(^B{bcG+R!Eq~7U@NOjoDsvDr$dfRGL<+|B?m(Mr;#Yu0{s~DQ zeRS`vpw1aFoZ3=(SJOWXsiU{sUr%yahJjy3dWd#5uXWzhwQ5=SM9y6X|0^y22XKq& ANB{r; delta 6251 zcmaJ_eQ;A(c7IRO(}yKl-?k-x$;Ka&G5!K<175I=4H%61GED*jA?wLTAb;H_2iuV` z+h#i^OM=~-40e+gk(r&Cg)SzZSxh!FxM|wbq}|nv*_EPUvt%aIPX6eOabRb6cBbt) zPZoH0x_vV`y64<;?!E8abIE$3_Ry|z2EKJIF+8o{&=eNVhPXi znqXG1{g|C)WXx`cPkW8wwSMglIVv;EF~yDV4QU|5nt|D9{oVKbo^=<*VhOAD2y_U)CY`+NPB=LUN!d4I_73x$Z+cQ(-1 zLw(9pr5)5qvuQz@MdR@V`T`-3$2osfd0$?=5qhq(2_^4;CE)Xu8l0jU$hZFk54@b5 z5A?eA30bJ0)M8&Q{ZU#O>!i`NEOjvm+-g!nm(vO|aG(Sm5Y+JRTz$xTTB@y3mV-z* zNRMlOJ+5S2CF+_Ov5ZU zdscL9UGR#o9b$IlV#$5m{a3}d<5ABkvF()D?2S6QqNmUN%6K+zFI>S<9yPZtbBx|} zY4@_4G1?|{zhBXSSWvURP=Bd~uD52= zpQtSJmHO>$)i6u+EcQE7rqBO%%&3)Nd@_$AB@45(agQ}C%y>*GQ3#zSJ;_eGxJ#`A z1xrdyx9`aSi!-j2;Q7DDL|H>VEpl24|BOOLN;y!q_NPyGwAAY_oatd z!iQs%_a)R2cMxEGJqfPQ+v^v|cGPM|l28BJn8EI+Hq*LwoDlMc1h<@&pgq^y z&-;T3`77R_fZGA#1EJ$J)hNFQoS>sKrZ){=*mjNIF`VoJy?%h%Ir9;kPhSm^T@a^T6$JAoQ$s~SHDjBo_V1?C%rIc6vk4JDDG zcft7;LeTH=)(Z;f0ZiB&d=m5opI zN@Fv-%qTU@EWtzLOJxvfGaC3wvHA%_^A#q<(sS9i9r6>fikD$oWAZUYSjMxs*#c64H@Q}${Pw89%s;oA!VIOr-LfXI@PFNIu)v9AFN8lD{x)p?Xa3mK_nZc zvo^NV%4vI8Y4jW!ucphpGK@DhH&)N-`)pWw#)czir`^EKg=dQWdu6IEoyrVpq}LS| zB`Jr29TNLl?w2rp#$+Q()($kZG5K9+%%>9 z1#4H6uU5yxQH;D9N0IhZKb8)Yo!XP53#(Eu3r;#vk&_lq59{&Qtw3j)pF3+5655#I z&#kd!b&ZDb|Fa^PBHNjh5mtr`-7;R4a@Cj-+_Zr{R$DZ>urVp6w{jiyqQa^qIcuG6 z4nNai7-@T+ZFI5rnR8+EWYj%(w#2V)w*A?5bt_VDzh7i&P?tPV6h2fDc?G(WtdQ%6i&FuGJdT+OLGex#&;Or_MQwC$4p?j4X5HD zX|IOMy*A4@cyi5dPRcJGQ7|J~cgCoCKV-{*PpFWpX|k?ji6j`-2~9rzF>c!pJ38TXa%1aQfgQK zuwO`MhDXkwt{EN~9w9r>rU^CDJp-iQ=NAM|mp9})Ol zL2s8oDD)*1=pw-kJy3t1J?dl&DoVk++VNx1D<~ihL(0!8h!n@NTo?CDG*2qx=FDlW zXfB$SiRQA2oyc-i<WxqV-{J;nm|a$7e0G{H?(Cz{f-PM!WBhMi2i*?C=Tk@QDYx zCnq}MI^*TG*W0E|7dzf)dt@+AdS4&@(qfBSv##u&+8eVLi`L@E`Dn@D$J{;bUG3to z=#dv;qW8R?X4MuzWm%IaF$>0MkiPMgv9Bbzbu}+WJ>4~1|TI%#g zv$j3R;^|OP;W&;=+hKh=!{q}P4@|d4if7tq&0|=(_+QUvNzA;*^ z7^Vyn4u_mQEIO-V&RWq~J2yB-qRy=gEsMqRvdTHFSk|y`SS)LVWtf`TZyfno-81gU zk=dfizB$v9W8*d!Kgnmf9%d#RvhGX0Sy^U9W~Emscg_?sQz-^5_b+z-%DDIOQI@g00nL0~{J@+Kp+-&sSw%^Jy>6FS+4c0F zOSZBTlr7y*C}D)vnh1&vgSA$|2p@w~!U#Jy;+E>{6|Q8Iyq+Dt7V-%Th@f3M!%sT2-E}CUzNO;AA>K;j#drcoQ-_74>;ZtU)9B zne?YFH!G))T&*e?@*Lw<&a1tZyEX|OdA(o|MH{s!m^ z5g>46>$fd*-aB^pm{`AO$=C*TuKF$875kKZx_a76^JAtW(Nwf#Dv6oOMN|1NO%>7l zJri8i*!EDHJ~4D@>vVC1opD_)pDBO0BCc~zv|Mh#*dEh4MV&KJEb3ejbMmh4o7o2! zm#*uYn5$lN)o=RPvgG>Sed{kW|26YL&Y=m#1GPPFuv`vb495&E(cp@zU2scbOnX@Y zv-#UwcG?y5s`YmjZ6m}jef^}@8w^0=MbzKnb|fJ-hD$IG zpBFc8n(Mst((RY-oVj04+b&M0;q%k zXTS_3l8oi$DOn0SjLE}vS(ia?HLHsit57911h>k|Q=bFiimsf0T=QS-5!^BoM)E2W zEcQ^&@mM8Hy;$0y(D^ZDv4+FG;+2JrBEfo22oOO51f53KqiBL|*j)1Bb?n#=O5A2Q{~L_E)>k1p+&A+SBtN9W23}R^x%E~@A9yR37I@h+ zPborLDsISEv0*tRhjc_qCRtcm4Su&_}#Tj7S!dNFYZF-wt3?!KM!M_5jieohJ8CDG3dFgtajT@`S!?C=ld5 zc!d?{VqMYEyC4oc9vDtW)hClNN2}`Jv2?+}u;aKIXiBJvqX_Rn8^$Fs&o1RYf@w7wyiy%frWra8d9U$GTAy^ zJk_28A@~n$%<2-Yu37VvwHzg`sRMCGencZW%Hoc^s~cuE%(l&S&2=thN6T78M=J`; zX3F9@&a3S+?Y}jsG-Ytx7Pl3$L3|%TrFf|(=aKvz$f!m-5EW+zddM$;?{KSl6B~t<>N`jRu3C*WbMb-ltRRYeK2uKb6y0J(r<%mnb zkLKg&J-zfL29UQ0F#cEbRZVqrxRZl6lzqral(ZC%PeC)m34o6MFdf1L@6nx5V1Q+Ac-LP5{U=NUn0o?lHf@HP#^EsJYRG|jVKyF{YZn7|3Ec4;%s&x!FG`! zGhis-(3s++CzY+UkIMYlha_F7B#~MYx`zfQ)mqBr`)&71(&Dg8w zt9O|>9eSW6>5@8nV4uq^1=@=sCHOroSx}`{?LbxvJ}jzajz;!nvybV=`yAU=Kma!@ zA$hYpEOEhpFMk+{Vks(rjv~lXl7f)*Y0}%&)V{g&6zX8rCbv*g(>m<)4`5v&^T4~c zNuQpgz3q;S$Eb!Ag8x8Q;q3zyaGAc>o(qZNXYJ*(C(Qh}?aQpH3%Wbp8T3&{#kd{y z_aN~hIg4Zn$qbVBklaA>5t0ccpCZ9(K$ei;0U{V%1UH8$pqM7~9{4l{(7@*p20b1) z&t=dheROL~?xPu1k&Gh|x`F)eHRiEQ%c-9_(m2~nmH|kip37RvlW{?IMXlzV*%fO! zw~t+UnU!rg0v2B}>koVpq~MT-i#73W!<7wXiE@Em!ti zM;2$3zL-ffyR%6r!(_wmV0j1zx{^WuKCbLXNSt)r!KM9)TtJZU*?uI1_w=DqAShJ8 zXKtv`Qaf3Iq}MrvM&(G>14*bGpmGfb{o9EdT!IioXrOh6Dl&e}Y<{8~m9w_#+@}ow HOGf_-m!W6`nh)GrpcgBgm zU3UQ|^nna1BMPXku&WB;f)L>TKmq|pqAf^JK@mR&0d4t#LJO_vpEfED-?_8SW2xBH zo;mm2d(J)Q-t(G$@9@*vWHynAO8E3W^k#m~HBTf5*w>%f@<{hhl4V-aQ<4?6W3$R= zuf+~Z>DUv0`1#fxt-kc5H3#ksKx}E0S<FKe0Pf4RmD`oZJxx(6F z4OpvAG5=HYnm%iTwGqTFYm=46v-=TAl}3B8)z+oF{6kr9he+Px4{EpUAv92{V_D8q zghn&DY_T|&%^hm6rhfy#Blteyzxy3d(?G;Ka?^##_pc=VXgc%^tE( zWQ&to FC;pm>L(rO@kRel@UVZaUxr|kw&FU#|`NBBs3RiIi~FB>SJ;FunYZeucXjtHd)S(ktDgP6rGLRs!yz1 zadCR$>JMSH+W{cSDwCOxZ}a&)HbIw1Qrbz>y*iD>^6VB`fwU#B@qCnx_;DOlft!Zdma-l}Vmi2NT8-P!e+* zJSur^*IjV=EVtdE?9f=&%S{Yb#)q6zHcynYgHJeg|N;#YyNsGemsDZu11w#c{(Jgo_#fjTx; znc^IjKyylYw-(MugNS>4(za_h-$$JL2^iQPdrwWDoY*lnJvGfK{y8}or=$?XT%;m9 z1A%bq3@B+cbO+gW_)c1(IM!5X%t_*+ZTS=dQmNHlk{QYrIJS&9v!fF1cRR6AlH;s{ z#qlL*ur+wrR)37sTrMD|5O4FN;&~y#?82bV=A{K`M)6odhBuH`DHc5HG=7$HS{{d= zG%!I9C0xnU>AvdhQv$*4)3W2%|bFQ=$0}k|L+LSk4ORC5Np&q70FjY%t`ZR z9WlBfBibVKQA=N7;u%?BvNU7NnApUaWi!#bQP(YFw^YYI$7KF`J<3T|Y(CytkPb*k zdS_xZtj7@O2}By63yev}p9eKrSRod-71}McZXyPwa|-52K(f%m--qfdO1g<_jg8CL zM4DfTh>bw4cs<^niPL36i3H}CX;__V5HSg2^bF$)OC4?jv;H}3E%Y?sn){HyCsxZtiBnFW)(sH0#-Zb(GMKo#* zl{$V0f!itmE(zWd5J%wIQA@NKl)M5n?`0^V*lwT$MOipYZ4sx$ak(fdh&ZLpxKp(G z1SV_hus7w2ut5bi?eb*FMmDqg5#roW00{;Aide%?X2DZ zsC`o-8J5fn@lk5ybpoHG#Y(x#1C@$2|2z>z@}_(j$mJrZnB%iFQib&`MK?KFMdr7c zt2W(^dm{iIO_(1zABOM>|nBDur?&cPS&aD7)LQmvR?mu$6}gV z3t@b-bqM=M{n$x#FPGJ7|9309*ndc~g zt1L*sr2=xpxa1(2JjcGo{E1GdK{=Ny_CfsI4FH&xj$DBx!)74EgG@Lv{Ci$r5ORF^ zxC$3#qHG~ciW>6?Fa}DlI*Ep9vQ(Xh1=oTk4l`gUS`%h;H#JJRN~tRE{kFfq>#AML z{8f|1xmI&&HMeuu1$w-C8lnGAdV_NS@hft zNo=+|g0)o#le#C-seKE4ZW2I}aV@wMm9?EWl%>_(e`7b(1*hdhH^S0(p!|M-z${mI zNi?9m1LKi&Ul;=72ERYv(|jMLIn?6o>4^NR_f{km1TA5mDE4kgChAn1|hhvI37WJiHfbLrMN^&TusCH z7ck8qA@C>xsnxsD_P6tfA$|6{8681ROu}5EP*UhA%xrL1a^><#K{HdSlAKV>52HR4v->n~>4$8N-*1xOmlQLkz0# z*VbO_ntXNjOAS&HnTOtF_~QhA1yD1ErwkXWdH!pRN6_vG{SveyML;mLXd3o+;Zcez z@f%v%a!{eZR9wgrhv13tVAVy1&&zW1BNdrkD12_oUUQJ8xkr*j-4?D*CmFD8O~482i6W5Bl$}-@I;778yEVQ>|>&Y$wrk^5^`W8}4q1 z!l=2q3b&yiIVk@sKwvtqXabxZ|1J?Jn)qupqtu)#CwJylo%k67^kU2Z0+2Q?niv>V zVd)K9h~#fzK2Xq}1gsjcF`?LwP(Au*g8Kky7MGJ%6v?`eZO2cSWl>e*AFAhTgZP7D zaOwVyC7GS{Urz7dNq35ZB(%JsKmRL9y!^U3nZ8b#4y3l-Z`(TH-?zD2r;|gZd7C%g zApD7X4&FmxKLKH;$N<8}DEIRp5TMs#`22LPlB;;=fZSoi_*?!PoBP(hO_LGF4V$ck z7>!0K9`8mnUrXFC`>VF>YaAu?LSYHa8>Vu0m69TV0W;}Xc$~Cq)TH-R@xR5WDHEDxp*)7L__KqTym{&6YPg@>{P}&(u_xgLb4mY|;(oF<% z1o8x`1Rf@EionwZzDeMF1b#w*(lURMz$*lPOW;)k@72lcL+uQr}CAnPe^Eq+iCh?`gNz?C8LR!)$RnyR>A;2~1WqrH0_pz%@GuO(l+Df=dv`_&pV3$g$LaIa!RY4T0LZL#a|FrzG5*6Yf6hVbh`2$L6_`TW7 zmr;G%-@ci7Z|2RL_c80|j1P`wjC4Ao;BPGUp*eWt`Am_$aQ^xyHkpbUcY5o6(n-Whbs#0Eym4=s(@t3)_8dHch zelB`%ipW51jT%A}d^@^f-5vXOAKPbHt%H^!8g^s+*mTorp1#@tc64hr59Yz;-$b8b zY^Q%bHqp0(CK>@;xQIV&A!6F~5*Gvhd$IoHP8ux{Z1Yw9sCFZf1^#jDY(ig26GOzc+y9|n=$Ep$Q+pfnCtAX4iDuPu-R=IL^uC^m_Hl-5A8VX{ ztg15u{+&ZPzZfqhRB(R|kNbwdH(oWH6K$oIO(~yOxKin9v)Zb*Qd#A#CTfm>UW0M; z)p8X=URx{aytfW-8soKIGrGXR*=K6N!ngwLmsm7oFs$w;6GyU#N&|sv8lDkE#w`z9 zJ(t+Qdi~cETW=tBL?7)U?i#19O3iIH0^JjaO-2r)4Wn+k;!fhakD%yhl6iK}-<%ve z5OqDnb4w9vnjl(lnwAqpP8&{W+|a@_bqbStl{%KTtL|se;eisC74HpliU=lJo%$M|ey zR&B%Q`;>O%B%h6f=Aff}%B%)D(xqcUW>HjI9U}` z6mQMc7SzscvW-Zo#mv~PQQ>(6A%f zK&qnD(u>6}Ty==~P=eSsp1$r?NH`jzaUV=I0LVeQM z1vA&Lc>Pz#=UA=ytYWVH4)0fH5!s6akS4{}V>7FkV69WSnMCyFXY~6KG#y0Eh)icf zCN`PB82mAaH-%ksCybU=;z88(rdyVvY<7W05d$~iC5RTg2yOx#0%yXsJge&2&4w$+ z(0757xVWAuN>M>v7#Kv(HxZGON(X~9QxLQ3l`-3~q|l5y;!Sy_4ME&$Ox7)kSYj`@ zgv_DQ8A~)>v5~~o(Pq<;dWmDi@CbEkMyq8t%pm5TnzSr%%dq}3jXzA|>27>hMvVhr zbwW7Q8Lc|ah9z{OlWx^O#E&+oEH{WxO`nEz zU(F47R)?9UPRzTBnY%XX4YCEYia8|>4aG5gz#GVFjh2wRD`%=rAz-`;jbY^|$uz{> zG|@ePvg%==9CM6O%W)flDh;+Gqh-pIE#zbCQcf&G(4~|{IWZ-Fd0;Qga2*M08`D{y z@rcf{ptiHbMNQSvlVX{RIxkY2XIY9TScdKL7uLR5Dq<>W$8L6()(y0iXACiZO*-@| z`L*mxe=>jHEnk9+$xJ>^=}oDzri5CvKih)L3Jj5Wj1S?B+&FgPSbrJsgkr!6tsbS?2kw89Ea(Iw9Kd;4=8xk{?)7_M6h zw{2%$^;rp_9Uu2DN%}sg_F0&#BFE?tSX)-&J{aHc-jrGJFEK*aL994u=`(}3`h8aLm4<$iylHl5H zibv7+A|99WqLSX`e{cO}?F7iG>c8LHyN68VDIP9sH7@cV^r~^BceMd+<`7HJRrO>c zZ?iUy#@ZZJC9 zq)?bM?+111*mS!-j=b$nh#%2-nAxt4c=QM9(Mj7eD|97vvzGT$^o2W&4>EZ|{_h9R z+q*qNp{X$co2L;jN+w{E!B zszF9x=&-ocG)zmp>A$`4)dH2DtBM@Ou8@`Pw7PSdNv-7Bt9Z#kCxJIvwI916TtL)g*{Fw0b?{(EL9OfIOBM-9h6n~(WN zhFw?n2vJ$;PoGXkK z>P*uk8QDeD>_333du2t&zo_EkDpmfE|5$$Q@|)M~VY^{@>GZPpub^Zgka+`U%tSVi z4+bo8sFVmZ@m>D*EjucA(x@iL3tOdB7${#O-5w{{OF#)P;NvK8vg%Ut>K08Z#ewSB z4J%+{D|RF1!S~+s&$b+3NvX#TBUk?q$CJ2lYP#n7L72$xt^;v6fmop>~4J(Oma`eKMKd|%f86YUViX@+*#=|4D9^uM(|uSxsezO-3l=vgM%ej@^7JbvbM-nmH4R3%C5@U?w z_b@8Qb!c(%bN`V~eYW!mXd&+(-zSOq-BVSoMR!f{1GG!Y(8$D9jvI|eQ+{%mc~ikPDC6aFbFJy z2Eo$==Lr@GULtsu;1z<`2!2KIYl3+KI%V-!K#;EBQxj#iYB^4&a+N+?iAB0Z78K(B z0mUWj;2M|Cr1VvKkFM$oeMFDyx}MXsi4>@$9>*^&NTk#+9>Ob#Q5JXX(a^7bbctsP z2K=XXyfYRI;C_Pt$V_|mZ7}KCI6$u{suLAShjoIcyGk_~S)^K^-+?MK=~#OxWlNXU RaeG9WM)gifcg+z-{s$PVIjsNy diff --git a/ultralytics/models/yolo/detect/predict.py b/ultralytics/models/yolo/detect/predict.py index 28cbd7c..3a0c628 100644 --- a/ultralytics/models/yolo/detect/predict.py +++ b/ultralytics/models/yolo/detect/predict.py @@ -22,12 +22,14 @@ class DetectionPredictor(BasePredictor): def postprocess(self, preds, img, orig_imgs): """Post-processes predictions and returns a list of Results objects.""" - preds = ops.non_max_suppression(preds, - self.args.conf, - self.args.iou, - agnostic=self.args.agnostic_nms, - max_det=self.args.max_det, - classes=self.args.classes) + preds = ops.non_max_suppression( + preds, + self.args.conf, + self.args.iou, + agnostic=self.args.agnostic_nms, + max_det=self.args.max_det, + classes=self.args.classes, + ) if not isinstance(orig_imgs, list): # input images are a torch.Tensor, not a list orig_imgs = ops.convert_torch2numpy_batch(orig_imgs) diff --git a/ultralytics/models/yolo/detect/train.py b/ultralytics/models/yolo/detect/train.py index 56d9243..3326512 100644 --- a/ultralytics/models/yolo/detect/train.py +++ b/ultralytics/models/yolo/detect/train.py @@ -1,8 +1,11 @@ # Ultralytics YOLO 🚀, AGPL-3.0 license +import math +import random from copy import copy import numpy as np +import torch.nn as nn from ultralytics.data import build_dataloader, build_yolo_dataset from ultralytics.engine.trainer import BaseTrainer @@ -27,7 +30,7 @@ class DetectionTrainer(BaseTrainer): ``` """ - def build_dataset(self, img_path, mode='train', batch=None): + def build_dataset(self, img_path, mode="train", batch=None): """ Build YOLO Dataset. @@ -37,53 +40,70 @@ class DetectionTrainer(BaseTrainer): batch (int, optional): Size of batches, this is for `rect`. Defaults to None. """ gs = max(int(de_parallel(self.model).stride.max() if self.model else 0), 32) - return build_yolo_dataset(self.args, img_path, batch, self.data, mode=mode, rect=mode == 'val', stride=gs) + return build_yolo_dataset(self.args, img_path, batch, self.data, mode=mode, rect=mode == "val", stride=gs) - def get_dataloader(self, dataset_path, batch_size=16, rank=0, mode='train'): + def get_dataloader(self, dataset_path, batch_size=16, rank=0, mode="train"): """Construct and return dataloader.""" - assert mode in ['train', 'val'] + assert mode in ["train", "val"] with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP dataset = self.build_dataset(dataset_path, mode, batch_size) - shuffle = mode == 'train' - if getattr(dataset, 'rect', False) and shuffle: + shuffle = mode == "train" + if getattr(dataset, "rect", False) and shuffle: LOGGER.warning("WARNING ⚠️ 'rect=True' is incompatible with DataLoader shuffle, setting shuffle=False") shuffle = False - workers = self.args.workers if mode == 'train' else self.args.workers * 2 + workers = self.args.workers if mode == "train" else self.args.workers * 2 return build_dataloader(dataset, batch_size, workers, shuffle, rank) # return dataloader def preprocess_batch(self, batch): """Preprocesses a batch of images by scaling and converting to float.""" - batch['img'] = batch['img'].to(self.device, non_blocking=True).float() / 255 + batch["img"] = batch["img"].to(self.device, non_blocking=True).float() / 255 + if self.args.multi_scale: + imgs = batch["img"] + sz = ( + random.randrange(self.args.imgsz * 0.5, self.args.imgsz * 1.5 + self.stride) + // self.stride + * self.stride + ) # size + sf = sz / max(imgs.shape[2:]) # scale factor + if sf != 1: + ns = [ + math.ceil(x * sf / self.stride) * self.stride for x in imgs.shape[2:] + ] # new shape (stretched to gs-multiple) + imgs = nn.functional.interpolate(imgs, size=ns, mode="bilinear", align_corners=False) + batch["img"] = imgs return batch def set_model_attributes(self): - """nl = de_parallel(self.model).model[-1].nl # number of detection layers (to scale hyps).""" + """Nl = de_parallel(self.model).model[-1].nl # number of detection layers (to scale hyps).""" # self.args.box *= 3 / nl # scale to layers # self.args.cls *= self.data["nc"] / 80 * 3 / nl # scale to classes and layers # self.args.cls *= (self.args.imgsz / 640) ** 2 * 3 / nl # scale to image size and layers - self.model.nc = self.data['nc'] # attach number of classes to model - self.model.names = self.data['names'] # attach class names to model + self.model.nc = self.data["nc"] # attach number of classes to model + self.model.names = self.data["names"] # attach class names to model self.model.args = self.args # attach hyperparameters to model # TODO: self.model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) * nc def get_model(self, cfg=None, weights=None, verbose=True): """Return a YOLO detection model.""" - model = DetectionModel(cfg, nc=self.data['nc'], verbose=verbose and RANK == -1) + model = DetectionModel(cfg, nc=self.data["nc"], verbose=verbose and RANK == -1) if weights: model.load(weights) return model def get_validator(self): """Returns a DetectionValidator for YOLO model validation.""" - self.loss_names = 'box_loss', 'cls_loss', 'dfl_loss' - return yolo.detect.DetectionValidator(self.test_loader, save_dir=self.save_dir, args=copy(self.args)) + self.loss_names = "box_loss", "cls_loss", "dfl_loss" + return yolo.detect.DetectionValidator( + self.test_loader, save_dir=self.save_dir, args=copy(self.args), _callbacks=self.callbacks + ) - def label_loss_items(self, loss_items=None, prefix='train'): + def label_loss_items(self, loss_items=None, prefix="train"): """ - Returns a loss dict with labelled training loss items tensor. Not needed for classification but necessary for - segmentation & detection + Returns a loss dict with labelled training loss items tensor. + + Not needed for classification but necessary for segmentation & detection """ - keys = [f'{prefix}/{x}' for x in self.loss_names] + keys = [f"{prefix}/{x}" for x in self.loss_names] if loss_items is not None: loss_items = [round(float(x), 5) for x in loss_items] # convert tensors to 5 decimal place floats return dict(zip(keys, loss_items)) @@ -92,18 +112,25 @@ class DetectionTrainer(BaseTrainer): def progress_string(self): """Returns a formatted string of training progress with epoch, GPU memory, loss, instances and size.""" - return ('\n' + '%11s' * - (4 + len(self.loss_names))) % ('Epoch', 'GPU_mem', *self.loss_names, 'Instances', 'Size') + return ("\n" + "%11s" * (4 + len(self.loss_names))) % ( + "Epoch", + "GPU_mem", + *self.loss_names, + "Instances", + "Size", + ) def plot_training_samples(self, batch, ni): """Plots training samples with their annotations.""" - plot_images(images=batch['img'], - batch_idx=batch['batch_idx'], - cls=batch['cls'].squeeze(-1), - bboxes=batch['bboxes'], - paths=batch['im_file'], - fname=self.save_dir / f'train_batch{ni}.jpg', - on_plot=self.on_plot) + plot_images( + images=batch["img"], + batch_idx=batch["batch_idx"], + cls=batch["cls"].squeeze(-1), + bboxes=batch["bboxes"], + paths=batch["im_file"], + fname=self.save_dir / f"train_batch{ni}.jpg", + on_plot=self.on_plot, + ) def plot_metrics(self): """Plots metrics from a CSV file.""" @@ -111,6 +138,6 @@ class DetectionTrainer(BaseTrainer): def plot_training_labels(self): """Create a labeled training plot of the YOLO model.""" - boxes = np.concatenate([lb['bboxes'] for lb in self.train_loader.dataset.labels], 0) - cls = np.concatenate([lb['cls'] for lb in self.train_loader.dataset.labels], 0) - plot_labels(boxes, cls.squeeze(), names=self.data['names'], save_dir=self.save_dir, on_plot=self.on_plot) + boxes = np.concatenate([lb["bboxes"] for lb in self.train_loader.dataset.labels], 0) + cls = np.concatenate([lb["cls"] for lb in self.train_loader.dataset.labels], 0) + plot_labels(boxes, cls.squeeze(), names=self.data["names"], save_dir=self.save_dir, on_plot=self.on_plot) diff --git a/ultralytics/models/yolo/detect/val.py b/ultralytics/models/yolo/detect/val.py index 6fca481..5550ec3 100644 --- a/ultralytics/models/yolo/detect/val.py +++ b/ultralytics/models/yolo/detect/val.py @@ -12,7 +12,6 @@ from ultralytics.utils import LOGGER, ops from ultralytics.utils.checks import check_requirements from ultralytics.utils.metrics import ConfusionMatrix, DetMetrics, box_iou from ultralytics.utils.plotting import output_to_target, plot_images -from ultralytics.utils.torch_utils import de_parallel class DetectionValidator(BaseValidator): @@ -35,35 +34,40 @@ class DetectionValidator(BaseValidator): self.nt_per_class = None self.is_coco = False self.class_map = None - self.args.task = 'detect' + self.args.task = "detect" self.metrics = DetMetrics(save_dir=self.save_dir, on_plot=self.on_plot) - self.iouv = torch.linspace(0.5, 0.95, 10) # iou vector for mAP@0.5:0.95 + self.iouv = torch.linspace(0.5, 0.95, 10) # IoU vector for mAP@0.5:0.95 self.niou = self.iouv.numel() self.lb = [] # for autolabelling def preprocess(self, batch): """Preprocesses batch of images for YOLO training.""" - batch['img'] = batch['img'].to(self.device, non_blocking=True) - batch['img'] = (batch['img'].half() if self.args.half else batch['img'].float()) / 255 - for k in ['batch_idx', 'cls', 'bboxes']: + batch["img"] = batch["img"].to(self.device, non_blocking=True) + batch["img"] = (batch["img"].half() if self.args.half else batch["img"].float()) / 255 + for k in ["batch_idx", "cls", "bboxes"]: batch[k] = batch[k].to(self.device) if self.args.save_hybrid: - height, width = batch['img'].shape[2:] - nb = len(batch['img']) - bboxes = batch['bboxes'] * torch.tensor((width, height, width, height), device=self.device) - self.lb = [ - torch.cat([batch['cls'][batch['batch_idx'] == i], bboxes[batch['batch_idx'] == i]], dim=-1) - for i in range(nb)] if self.args.save_hybrid else [] # for autolabelling + height, width = batch["img"].shape[2:] + nb = len(batch["img"]) + bboxes = batch["bboxes"] * torch.tensor((width, height, width, height), device=self.device) + self.lb = ( + [ + torch.cat([batch["cls"][batch["batch_idx"] == i], bboxes[batch["batch_idx"] == i]], dim=-1) + for i in range(nb) + ] + if self.args.save_hybrid + else [] + ) # for autolabelling return batch def init_metrics(self, model): """Initialize evaluation metrics for YOLO.""" - val = self.data.get(self.args.split, '') # validation path - self.is_coco = isinstance(val, str) and 'coco' in val and val.endswith(f'{os.sep}val2017.txt') # is COCO + val = self.data.get(self.args.split, "") # validation path + self.is_coco = isinstance(val, str) and "coco" in val and val.endswith(f"{os.sep}val2017.txt") # is COCO self.class_map = converter.coco80_to_coco91_class() if self.is_coco else list(range(1000)) - self.args.save_json |= self.is_coco and not self.training # run on final val if training COCO + self.args.save_json |= self.is_coco # run on final val if training COCO self.names = model.names self.nc = len(model.names) self.metrics.names = self.names @@ -71,67 +75,88 @@ class DetectionValidator(BaseValidator): self.confusion_matrix = ConfusionMatrix(nc=self.nc, conf=self.args.conf) self.seen = 0 self.jdict = [] - self.stats = [] + self.stats = dict(tp=[], conf=[], pred_cls=[], target_cls=[]) def get_desc(self): """Return a formatted string summarizing class metrics of YOLO model.""" - return ('%22s' + '%11s' * 6) % ('Class', 'Images', 'Instances', 'Box(P', 'R', 'mAP50', 'mAP50-95)') + return ("%22s" + "%11s" * 6) % ("Class", "Images", "Instances", "Box(P", "R", "mAP50", "mAP50-95)") def postprocess(self, preds): """Apply Non-maximum suppression to prediction outputs.""" - return ops.non_max_suppression(preds, - self.args.conf, - self.args.iou, - labels=self.lb, - multi_label=True, - agnostic=self.args.single_cls, - max_det=self.args.max_det) + return ops.non_max_suppression( + preds, + self.args.conf, + self.args.iou, + labels=self.lb, + multi_label=True, + agnostic=self.args.single_cls, + max_det=self.args.max_det, + ) + + def _prepare_batch(self, si, batch): + """Prepares a batch of images and annotations for validation.""" + idx = batch["batch_idx"] == si + cls = batch["cls"][idx].squeeze(-1) + bbox = batch["bboxes"][idx] + ori_shape = batch["ori_shape"][si] + imgsz = batch["img"].shape[2:] + ratio_pad = batch["ratio_pad"][si] + if len(cls): + bbox = ops.xywh2xyxy(bbox) * torch.tensor(imgsz, device=self.device)[[1, 0, 1, 0]] # target boxes + ops.scale_boxes(imgsz, bbox, ori_shape, ratio_pad=ratio_pad) # native-space labels + return dict(cls=cls, bbox=bbox, ori_shape=ori_shape, imgsz=imgsz, ratio_pad=ratio_pad) + + def _prepare_pred(self, pred, pbatch): + """Prepares a batch of images and annotations for validation.""" + predn = pred.clone() + ops.scale_boxes( + pbatch["imgsz"], predn[:, :4], pbatch["ori_shape"], ratio_pad=pbatch["ratio_pad"] + ) # native-space pred + return predn def update_metrics(self, preds, batch): """Metrics.""" for si, pred in enumerate(preds): - idx = batch['batch_idx'] == si - cls = batch['cls'][idx] - bbox = batch['bboxes'][idx] - nl, npr = cls.shape[0], pred.shape[0] # number of labels, predictions - shape = batch['ori_shape'][si] - correct_bboxes = torch.zeros(npr, self.niou, dtype=torch.bool, device=self.device) # init self.seen += 1 - + npr = len(pred) + stat = dict( + conf=torch.zeros(0, device=self.device), + pred_cls=torch.zeros(0, device=self.device), + tp=torch.zeros(npr, self.niou, dtype=torch.bool, device=self.device), + ) + pbatch = self._prepare_batch(si, batch) + cls, bbox = pbatch.pop("cls"), pbatch.pop("bbox") + nl = len(cls) + stat["target_cls"] = cls if npr == 0: if nl: - self.stats.append((correct_bboxes, *torch.zeros((2, 0), device=self.device), cls.squeeze(-1))) + for k in self.stats.keys(): + self.stats[k].append(stat[k]) if self.args.plots: - self.confusion_matrix.process_batch(detections=None, labels=cls.squeeze(-1)) + self.confusion_matrix.process_batch(detections=None, gt_bboxes=bbox, gt_cls=cls) continue # Predictions if self.args.single_cls: pred[:, 5] = 0 - predn = pred.clone() - ops.scale_boxes(batch['img'][si].shape[1:], predn[:, :4], shape, - ratio_pad=batch['ratio_pad'][si]) # native-space pred + predn = self._prepare_pred(pred, pbatch) + stat["conf"] = predn[:, 4] + stat["pred_cls"] = predn[:, 5] # Evaluate if nl: - height, width = batch['img'].shape[2:] - tbox = ops.xywh2xyxy(bbox) * torch.tensor( - (width, height, width, height), device=self.device) # target boxes - ops.scale_boxes(batch['img'][si].shape[1:], tbox, shape, - ratio_pad=batch['ratio_pad'][si]) # native-space labels - labelsn = torch.cat((cls, tbox), 1) # native-space labels - correct_bboxes = self._process_batch(predn, labelsn) - # TODO: maybe remove these `self.` arguments as they already are member variable + stat["tp"] = self._process_batch(predn, bbox, cls) if self.args.plots: - self.confusion_matrix.process_batch(predn, labelsn) - self.stats.append((correct_bboxes, pred[:, 4], pred[:, 5], cls.squeeze(-1))) # (conf, pcls, tcls) + self.confusion_matrix.process_batch(predn, bbox, cls) + for k in self.stats.keys(): + self.stats[k].append(stat[k]) # Save if self.args.save_json: - self.pred_to_json(predn, batch['im_file'][si]) + self.pred_to_json(predn, batch["im_file"][si]) if self.args.save_txt: - file = self.save_dir / 'labels' / f'{Path(batch["im_file"][si]).stem}.txt' - self.save_one_txt(predn, self.args.save_conf, shape, file) + file = self.save_dir / "labels" / f'{Path(batch["im_file"][si]).stem}.txt' + self.save_one_txt(predn, self.args.save_conf, pbatch["ori_shape"], file) def finalize_metrics(self, *args, **kwargs): """Set final values for metrics speed and confusion matrix.""" @@ -140,19 +165,20 @@ class DetectionValidator(BaseValidator): def get_stats(self): """Returns metrics statistics and results dictionary.""" - stats = [torch.cat(x, 0).cpu().numpy() for x in zip(*self.stats)] # to numpy - if len(stats) and stats[0].any(): - self.metrics.process(*stats) - self.nt_per_class = np.bincount(stats[-1].astype(int), minlength=self.nc) # number of targets per class + stats = {k: torch.cat(v, 0).cpu().numpy() for k, v in self.stats.items()} # to numpy + if len(stats) and stats["tp"].any(): + self.metrics.process(**stats) + self.nt_per_class = np.bincount( + stats["target_cls"].astype(int), minlength=self.nc + ) # number of targets per class return self.metrics.results_dict def print_results(self): """Prints training/validation set metrics per class.""" - pf = '%22s' + '%11i' * 2 + '%11.3g' * len(self.metrics.keys) # print format - LOGGER.info(pf % ('all', self.seen, self.nt_per_class.sum(), *self.metrics.mean_results())) + pf = "%22s" + "%11i" * 2 + "%11.3g" * len(self.metrics.keys) # print format + LOGGER.info(pf % ("all", self.seen, self.nt_per_class.sum(), *self.metrics.mean_results())) if self.nt_per_class.sum() == 0: - LOGGER.warning( - f'WARNING ⚠️ no labels found in {self.args.task} set, can not compute metrics without labels') + LOGGER.warning(f"WARNING ⚠️ no labels found in {self.args.task} set, can not compute metrics without labels") # Print results per class if self.args.verbose and not self.training and self.nc > 1 and len(self.stats): @@ -161,12 +187,11 @@ class DetectionValidator(BaseValidator): if self.args.plots: for normalize in True, False: - self.confusion_matrix.plot(save_dir=self.save_dir, - names=self.names.values(), - normalize=normalize, - on_plot=self.on_plot) + self.confusion_matrix.plot( + save_dir=self.save_dir, names=self.names.values(), normalize=normalize, on_plot=self.on_plot + ) - def _process_batch(self, detections, labels): + def _process_batch(self, detections, gt_bboxes, gt_cls): """ Return correct prediction matrix. @@ -179,10 +204,10 @@ class DetectionValidator(BaseValidator): Returns: (torch.Tensor): Correct prediction matrix of shape [N, 10] for 10 IoU levels. """ - iou = box_iou(labels[:, 1:], detections[:, :4]) - return self.match_predictions(detections[:, 5], labels[:, 0], iou) + iou = box_iou(gt_bboxes, detections[:, :4]) + return self.match_predictions(detections[:, 5], gt_cls, iou) - def build_dataset(self, img_path, mode='val', batch=None): + def build_dataset(self, img_path, mode="val", batch=None): """ Build YOLO Dataset. @@ -191,33 +216,36 @@ class DetectionValidator(BaseValidator): mode (str): `train` mode or `val` mode, users are able to customize different augmentations for each mode. batch (int, optional): Size of batches, this is for `rect`. Defaults to None. """ - gs = max(int(de_parallel(self.model).stride if self.model else 0), 32) - return build_yolo_dataset(self.args, img_path, batch, self.data, mode=mode, stride=gs) + return build_yolo_dataset(self.args, img_path, batch, self.data, mode=mode, stride=self.stride) def get_dataloader(self, dataset_path, batch_size): """Construct and return dataloader.""" - dataset = self.build_dataset(dataset_path, batch=batch_size, mode='val') + dataset = self.build_dataset(dataset_path, batch=batch_size, mode="val") return build_dataloader(dataset, batch_size, self.args.workers, shuffle=False, rank=-1) # return dataloader def plot_val_samples(self, batch, ni): """Plot validation image samples.""" - plot_images(batch['img'], - batch['batch_idx'], - batch['cls'].squeeze(-1), - batch['bboxes'], - paths=batch['im_file'], - fname=self.save_dir / f'val_batch{ni}_labels.jpg', - names=self.names, - on_plot=self.on_plot) + plot_images( + batch["img"], + batch["batch_idx"], + batch["cls"].squeeze(-1), + batch["bboxes"], + paths=batch["im_file"], + fname=self.save_dir / f"val_batch{ni}_labels.jpg", + names=self.names, + on_plot=self.on_plot, + ) def plot_predictions(self, batch, preds, ni): """Plots predicted bounding boxes on input images and saves the result.""" - plot_images(batch['img'], - *output_to_target(preds, max_det=self.args.max_det), - paths=batch['im_file'], - fname=self.save_dir / f'val_batch{ni}_pred.jpg', - names=self.names, - on_plot=self.on_plot) # pred + plot_images( + batch["img"], + *output_to_target(preds, max_det=self.args.max_det), + paths=batch["im_file"], + fname=self.save_dir / f"val_batch{ni}_pred.jpg", + names=self.names, + on_plot=self.on_plot, + ) # pred def save_one_txt(self, predn, save_conf, shape, file): """Save YOLO detections to a txt file in normalized coordinates in a specific format.""" @@ -225,8 +253,8 @@ class DetectionValidator(BaseValidator): for *xyxy, conf, cls in predn.tolist(): xywh = (ops.xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format - with open(file, 'a') as f: - f.write(('%g ' * len(line)).rstrip() % line + '\n') + with open(file, "a") as f: + f.write(("%g " * len(line)).rstrip() % line + "\n") def pred_to_json(self, predn, filename): """Serialize YOLO predictions to COCO json format.""" @@ -235,28 +263,31 @@ class DetectionValidator(BaseValidator): box = ops.xyxy2xywh(predn[:, :4]) # xywh box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner for p, b in zip(predn.tolist(), box.tolist()): - self.jdict.append({ - 'image_id': image_id, - 'category_id': self.class_map[int(p[5])], - 'bbox': [round(x, 3) for x in b], - 'score': round(p[4], 5)}) + self.jdict.append( + { + "image_id": image_id, + "category_id": self.class_map[int(p[5])], + "bbox": [round(x, 3) for x in b], + "score": round(p[4], 5), + } + ) def eval_json(self, stats): """Evaluates YOLO output in JSON format and returns performance statistics.""" if self.args.save_json and self.is_coco and len(self.jdict): - anno_json = self.data['path'] / 'annotations/instances_val2017.json' # annotations - pred_json = self.save_dir / 'predictions.json' # predictions - LOGGER.info(f'\nEvaluating pycocotools mAP using {pred_json} and {anno_json}...') + anno_json = self.data["path"] / "annotations/instances_val2017.json" # annotations + pred_json = self.save_dir / "predictions.json" # predictions + LOGGER.info(f"\nEvaluating pycocotools mAP using {pred_json} and {anno_json}...") try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb - check_requirements('pycocotools>=2.0.6') + check_requirements("pycocotools>=2.0.6") from pycocotools.coco import COCO # noqa from pycocotools.cocoeval import COCOeval # noqa for x in anno_json, pred_json: - assert x.is_file(), f'{x} file not found' + assert x.is_file(), f"{x} file not found" anno = COCO(str(anno_json)) # init annotations api pred = anno.loadRes(str(pred_json)) # init predictions api (must pass string, not Path) - eval = COCOeval(anno, pred, 'bbox') + eval = COCOeval(anno, pred, "bbox") if self.is_coco: eval.params.imgIds = [int(Path(x).stem) for x in self.dataloader.dataset.im_files] # images to eval eval.evaluate() @@ -264,5 +295,5 @@ class DetectionValidator(BaseValidator): eval.summarize() stats[self.metrics.keys[-1]], stats[self.metrics.keys[-2]] = eval.stats[:2] # update mAP50-95 and mAP50 except Exception as e: - LOGGER.warning(f'pycocotools unable to run: {e}') + LOGGER.warning(f"pycocotools unable to run: {e}") return stats diff --git a/ultralytics/models/yolo/model.py b/ultralytics/models/yolo/model.py index b85d46b..f10dc97 100644 --- a/ultralytics/models/yolo/model.py +++ b/ultralytics/models/yolo/model.py @@ -1,36 +1,111 @@ # Ultralytics YOLO 🚀, AGPL-3.0 license +from pathlib import Path + from ultralytics.engine.model import Model -from ultralytics.models import yolo # noqa -from ultralytics.nn.tasks import ClassificationModel, DetectionModel, PoseModel, SegmentationModel +from ultralytics.models import yolo +from ultralytics.nn.tasks import ClassificationModel, DetectionModel, OBBModel, PoseModel, SegmentationModel, WorldModel +from ultralytics.utils import yaml_load, ROOT class YOLO(Model): - """ - YOLO (You Only Look Once) object detection model. - """ + """YOLO (You Only Look Once) object detection model.""" + + def __init__(self, model="yolov8n.pt", task=None, verbose=False): + """Initialize YOLO model, switching to YOLOWorld if model filename contains '-world'.""" + path = Path(model) + if "-world" in path.stem and path.suffix in {".pt", ".yaml", ".yml"}: # if YOLOWorld PyTorch model + new_instance = YOLOWorld(path) + self.__class__ = type(new_instance) + self.__dict__ = new_instance.__dict__ + elif "yolov10" in path.stem: + from ultralytics import YOLOv10 + new_instance = YOLOv10(path) + self.__class__ = type(new_instance) + self.__dict__ = new_instance.__dict__ + else: + # Continue with default YOLO initialization + super().__init__(model=model, task=task, verbose=verbose) @property def task_map(self): - """Map head to model, trainer, validator, and predictor classes""" + """Map head to model, trainer, validator, and predictor classes.""" return { - 'classify': { - 'model': ClassificationModel, - 'trainer': yolo.classify.ClassificationTrainer, - 'validator': yolo.classify.ClassificationValidator, - 'predictor': yolo.classify.ClassificationPredictor, }, - 'detect': { - 'model': DetectionModel, - 'trainer': yolo.detect.DetectionTrainer, - 'validator': yolo.detect.DetectionValidator, - 'predictor': yolo.detect.DetectionPredictor, }, - 'segment': { - 'model': SegmentationModel, - 'trainer': yolo.segment.SegmentationTrainer, - 'validator': yolo.segment.SegmentationValidator, - 'predictor': yolo.segment.SegmentationPredictor, }, - 'pose': { - 'model': PoseModel, - 'trainer': yolo.pose.PoseTrainer, - 'validator': yolo.pose.PoseValidator, - 'predictor': yolo.pose.PosePredictor, }, } + "classify": { + "model": ClassificationModel, + "trainer": yolo.classify.ClassificationTrainer, + "validator": yolo.classify.ClassificationValidator, + "predictor": yolo.classify.ClassificationPredictor, + }, + "detect": { + "model": DetectionModel, + "trainer": yolo.detect.DetectionTrainer, + "validator": yolo.detect.DetectionValidator, + "predictor": yolo.detect.DetectionPredictor, + }, + "segment": { + "model": SegmentationModel, + "trainer": yolo.segment.SegmentationTrainer, + "validator": yolo.segment.SegmentationValidator, + "predictor": yolo.segment.SegmentationPredictor, + }, + "pose": { + "model": PoseModel, + "trainer": yolo.pose.PoseTrainer, + "validator": yolo.pose.PoseValidator, + "predictor": yolo.pose.PosePredictor, + }, + "obb": { + "model": OBBModel, + "trainer": yolo.obb.OBBTrainer, + "validator": yolo.obb.OBBValidator, + "predictor": yolo.obb.OBBPredictor, + }, + } + + +class YOLOWorld(Model): + """YOLO-World object detection model.""" + + def __init__(self, model="yolov8s-world.pt") -> None: + """ + Initializes the YOLOv8-World model with the given pre-trained model file. Supports *.pt and *.yaml formats. + + Args: + model (str | Path): Path to the pre-trained model. Defaults to 'yolov8s-world.pt'. + """ + super().__init__(model=model, task="detect") + + # Assign default COCO class names when there are no custom names + if not hasattr(self.model, "names"): + self.model.names = yaml_load(ROOT / "cfg/datasets/coco8.yaml").get("names") + + @property + def task_map(self): + """Map head to model, validator, and predictor classes.""" + return { + "detect": { + "model": WorldModel, + "validator": yolo.detect.DetectionValidator, + "predictor": yolo.detect.DetectionPredictor, + } + } + + def set_classes(self, classes): + """ + Set classes. + + Args: + classes (List(str)): A list of categories i.e ["person"]. + """ + self.model.set_classes(classes) + # Remove background if it's given + background = " " + if background in classes: + classes.remove(background) + self.model.names = classes + + # Reset method class names + # self.predictor = None # reset predictor otherwise old names remain + if self.predictor: + self.predictor.model.names = classes diff --git a/ultralytics/models/yolo/obb/__init__.py b/ultralytics/models/yolo/obb/__init__.py new file mode 100644 index 0000000..f60349a --- /dev/null +++ b/ultralytics/models/yolo/obb/__init__.py @@ -0,0 +1,7 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license + +from .predict import OBBPredictor +from .train import OBBTrainer +from .val import OBBValidator + +__all__ = "OBBPredictor", "OBBTrainer", "OBBValidator" diff --git a/ultralytics/models/yolo/obb/__pycache__/__init__.cpython-312.pyc b/ultralytics/models/yolo/obb/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a4c4cf2513ade88ec259c3781f80b2bbb7201b15 GIT binary patch literal 328 zcmXw!ze)r#5XO_u{*l!!Ep6-;4x^2g2;Kvzg;;D8_AcQJ%x>1~-pQ6eh0ow)Sld_! z1kuV)IBcwL5_JeO-^Xvhfjk#QPOx6Y`{D`bhd2Kf{fYGi#RHK$K3)g%JqF5H`~;=1GL#4ljxYNKm+4Db literal 0 HcmV?d00001 diff --git a/ultralytics/models/yolo/obb/__pycache__/predict.cpython-312.pyc b/ultralytics/models/yolo/obb/__pycache__/predict.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fb46f2fdb8a2efe3a79bcf6022ad3a30aa04c256 GIT binary patch literal 3250 zcma(TT}&LudG_aiIb!~#-O8Qc#eJIGZpb=H8HhtjDmF+6(OZ(04Eq6|g z+L3nVoBh7|{^$GV_x1Hr1Y=G9TlQH9p}(_@KLQot$#(&mLmJXJ6Q$Y8n_Qkx^BjW( zQ^<>H(S=1bkPoJV`A|BVVmJ3M?+V2QZyX`VABQN z(N)K=tt(j94Arr5lBabS1}>ia{_CUi`HPpRU>At$@$u#gQ}pCj*#8Jr)KQw#P@31c zw4jR`ukm9-8I5t8a66I?XyR>@4wlg*mky1fWPrvl_xJBgQ{Lwgib^ofNvf$3BI%Qk zZfS;`l zin7GV63NIHZ0tzqMn(ok zNA@Omg^|>hZQ3b2lktz$+V5$?gey2pq&|rSA>khEJIW5e{hD6nJ4VGb&L&7Cw+|j3l5|zM`hJC5>-4n!eu_JiZzjEnaP6hX68@1 zIppjjXT=hF5AY?f#Fqp=?;tz563W%`sGN}fBtlbsQkWhdv<$~kOk-Ln&siYdGMsVA zF0c$$OfPy6VolN%N0C(98Z)v*tT42+YitT~Hj^1niZnoq1s&6{EE^y|mZ`}6Kt)F( zHz5;6qMKt>P{*<~vU{3FWmPfFjH2EoG$Mm%AS-2=FzZ>I-ud_0o^d;`_uQJu_Gr3Z zuym)#jnwg1dumdp$BVv;>H&RvDy~iyrf>{+O~-3PmGV{G4=8dFrqAC&pM~pZL+jzT z`Q~|Jp?xEKa%=y~W${jAHuBy}Pk;nH7amqp-0=JmW(W>kEN|p^AF8eps22t%7p&6w z&>oawA75Sfpmd8}m5M=oP%_7SVL1EzA8@9@hpTI@sS5khV#W47q_p^S)m7t`_{EAv zGn~`vV@iV4=EI8>KazNjZ`*xrb%hI1kv}73(TwPP)yIL?1<>hbe_vfSfd$^bI2X`c z6rt-7$CVcjWW3t-7Kh$Go!Ql%?Pb|wccnWRRcLWPyA1$+#ZhVsg!tmzKrE!en4NhdUf zZAXDthgoV4?=Tnc{09cKc0n`pJM1^W;^fq=@f~)xlL8eBiZf0FmXg;AW>+)WiW!PF zTec-LF;F!@T|%I$;(CS=S*4|dVHau4*GRVV1hczA1yi?Zz%6%}r6Oi`f<_HuSj15* zRi`4ek?sdB(2yJ%UK#5639FbdOvxF=QOBt;Z4_w0gB!7)Et(33W+3B?Jqcz8+!Ii# z6^GUlD9^fFL6C8!pwpmID8Qou!-YY~C}tId2AIbQD4l239UEC0Am%ae@dIuEJPrF8 z8V0oNx+dsJb$vsagIL-324_tI+whcoPXoP z(O@V$kJg^`6LA6;MTII+TSG<~alek<0@y!5kge)!sQVzu+PU7vJq_FrA^zqk^5|6}Z}$8{(<#66CohSu^+I1Ua^hFP~~xeeVuD$9_SMaUMJ#$c-$wuBV}bDF4%BgU zxpVd8Gb98K{wvxVXmvGmsgn`RUdyz-=7EsXt*l|`sY*h+$xDya5_m5N@BIi@AF6`? zUc>CB)5cn2_hstY!)~)H_CE4daI-di#&gEE@g=}Bn}`I{V}awiCqcy3{{yA|j0XOK TQcr{3TwAwWdm8x#qGxs`Ydf7THC!(JAb z2bXW*Q(Lfc#BqW;_H+`QIvodF*iGEj>v$#XC4L%og0$ADVe|xXp9ilI4}^E*c0!JK zP^)$VlF)Vwo7#^;jT5Z^ytE~>h;*E1&!pgSq;uJF%+l7Q#~VNWuuC64eq!9bP?2R_ zT-H3h)Yq^H)WIBe9F9A#@VLXBzWWySF?YH5y58|CV>-cGL( z27$FfqJ$|$#J(08kFx>MLqYy)kTDQ_a!O)(u(kO}7&gc+`QuuasnrxeJ3;Xc7 z@TnWHX>^3%;R&8N6ZgFfwTLHZut9@?-4v)&m`GxZ$Eo)SslxKV## z>T_<>pb<-w9*dr<57^WxcnsA>=?C=p-K~4ws?K&L(4q9Y3n}kbS?tCn4A@Scy%%U?NbZSh0!*@{T^Dzn^685jcgDhbR)Oiy>j=!pdSn@5iNUZZ!*kN`Y&2WaZ)3n0tN`HZ`(=*ZndKmXQsr~WjU*5Kue zzBsxx@wvynHxBIn8>jEAqB$n8c#9c0Jm9rs2jqre;pK@ly23+Ch1YO?#2)|^^ZK#( zG3`DoHHLEyK5v}YFs)C#cUSf2lVCFQbyDX`$KHF4(Tlx_2gTGp+<2BNeW#FlB$R@B zI_rH^mW~v9EHi*f5-UyeKABBAlJ`b1=2UyP<(rTVGLRETrk1jO%7r#g2A8rJgfcam zYX+l3LR!%FiB0RtKd@k2x;#!#96$i!Vo(DbV1W!sL#I-(=&oIG{bIqoHvRQVvem}T`#K&V@=A;-fG4radwoX%gtQUf;te*@pe51gBD9)5AI*A@h| zLOUF6LSQ&FD1}gt!9{ACl)f0VWJak`%JYa)Y13Txvb+b7S)L7tNF)hR&n#ZC@2uI< zrru^!Ss=vTg)4rEYt`i)z?ka`1EejoL7a*9EKp@oJGT}eR1>E9pMa8tYCWCH(#~3Q kaq)5a4^)|KtE^PJVB=Y6!~2!r56XERT0@;CZsHa9Uy!(O4*&oF literal 0 HcmV?d00001 diff --git a/ultralytics/models/yolo/obb/__pycache__/train.cpython-312.pyc b/ultralytics/models/yolo/obb/__pycache__/train.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4e86ac5d70dc398b88a9e23207230e2d6f79f5bb GIT binary patch literal 2350 zcmZuy-ER{|5a0W-eU9TgA)yKZi9-z}*A}}~p+bltNcj+yCK63)W2rjMw`=>t`OfT} zgKZf#50RQkt)LH8c!a*R6+G}Kz*AMV4@sd4Jy8|)0pU0iXZs?3UCZp2E+!&xN+FT&X(Tk|F=I3#kWIJRU~9v* zU09W=iHWydOl(aIQpb*;IQq_+Y5k28CjNEcrseQ5mvupfda1my*c@*)=U z5|Ocp#iF!`iUO7vlKB{pV|hWy$Jx8{iA7Ww^2*ytO9VY&a+(^JL#R;}5b9B3992!* z@H~~&ed1utDXD&$sKy=zMgj=u*{Z8rZpl;SsF5j%qmS6t#%s13Tf?LM;<+ zT^$31np$CAW_!4`<8`A_waG!wF!SfnpRYFjvg>qqiqx&BH5<&>4c{`oEWht%S=_R2 zp`coos!M%!!*m_g`3%`Kl-wY4c{0a#jbB0$Tp0M z4O^t@nq_bNHLddq?j)h0Ys1W#J4YzBF!9Ejo&Oxp@=IeI``{Z_IBCjVaI&mqh^`-m z=~H1DwZwVRLYD!>mWZW8uaT`hGp=l?pKE4vkvyL7RX!-P~F;^%nmUwtLl zg=Q{9lZ$fjkQ@(Mw%VZ2fu`obM!2)>Ck=tX(_nfwj~=B4S5iA33=J=ydaz^HV)E*q zRb|^lCB2kbRfet&T(iCy{$AO)4u{ddf}Vg49`q&<%gBf94@XPDLJQ4`ZGL0L;x=Gf zQeB`)0r?#Wv2xr5@~-+o@F*Rjh1*bvLv9E3`H1g9zIL_8ik69%3o!1 z4?)BsVqTS)R?&iDGF_)=m7u;rLab65;Csxx2t>zx{5K3u3PiI^S%iWFWM{$kh$d48 zN+7dDr|eOIY`X^5Vl)E`WnC;tMEdA<#>GNV+E}V|knJW*#Mil`IoJhDXLlzY@>oln zN5A$#@UQn@_djpkXxtvW@xh(McSp{Bdu(O;+}Ee?_r1Fq`ysXEL2v(+(@UqXUb^4A z^HFASv6pUz%Vq}(U^*~;1`Kp;fJO%@9DWC+*$xJ}Ng=b$hF0_d2mYtDrnbW|punL0BS z^+kYCDY$jrc0DgpOxuenT(mg_HL7^V9MQ2w1DQ7-CFo(Nb;qa>kFr?M6ih*Hx78r! z6VC?~28OP+n;l}QL-)X>2|N`%7WxS6G=nvIq)VGan*i@fM&XLbdKA>+@S#Qdqs+t1 zz?J$^eJwMxni;u1y|#P*>hAq_ z0Cu@=M|dLBBs*x<2K&23HQ%zm&;wqTam;_RLEq+|Ew}q63g5s> zQ8^R_<}iI7Ue93jUV`a~BnZOOI1--!4ZZLq(tbj@KjNxDgsX$g+dtX9*1vbPfA3So IcDS;C0GDZ8A^-pY literal 0 HcmV?d00001 diff --git a/ultralytics/models/yolo/obb/__pycache__/train.cpython-39.pyc b/ultralytics/models/yolo/obb/__pycache__/train.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9c2cb084f0c460e1e5c1c129698848f5a29221c9 GIT binary patch literal 1879 zcmZWqQEwYX5Z=8z+h@m?LK~!j1ROk&oK!3%5UQf4YSOe7A~;eBl{i_toNw3mIpoDQAHvAUbPLof2I?&YOnIVuk;QH7Fcgq2wNny@l= z2L7OW`K)?PqRK1c zk;sR5HcSm1pUNaFxJ>$#kXiuaAV~8>seoTGf^z;7e-wn1C1}B zT_;hT1)>O&A~=y5FElI<#d!Y8p|}io!C@E-G1q3zdV6x23`aTNvkP>8c6K(J=)NeH zJ6$P;!8ivu`9x=_YTEm%iRd*&rxRquk&rrAB{P2-UrS_91^WTZQoU=h?eE~Vi~Ge_ zaI-nmJNE*X=wyF~37y<;PLg2`TYMzazS{pLTn1s2BY%H)wAY?fsg=|>k#d{KAFeQtWyo1M9+{N8$kL%c;d)fb>3PBGDR;0#T-2-XXr8)O8X zkKKPUU%{6FkAFEiK3ViPAmVq#Wte9T&-v zD~b3@%HD-BO147%@gG;&a2W+9y;dken?0<#EI$Q>|LwgH_ooo?U4Z)p~~ zt=4H+#lFCJ;u!dkMHuI(*W>utagr}iDsjw28pjfGwxpJyp!OvQ(^xc0%JUpdR1Olxr``Hp3vx}fhyVZp literal 0 HcmV?d00001 diff --git a/ultralytics/models/yolo/obb/__pycache__/val.cpython-312.pyc b/ultralytics/models/yolo/obb/__pycache__/val.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ba785e6512d5ad86e16fe5bb8646d4d8dc49c96e GIT binary patch literal 13076 zcmdUVYj7J^c4jx;06~HvL6M*+sVPb#p$8;Nq~4NEiF#0$NJ*q*i+aEyx=8`TgY0ff zBnOyyQ=VNiaUw9DM9_>~f;_Gm8fz->Zni>aH#N*;s+O}`wdo!(OHPlHnIzt-b=Cfm zWqX~O+S=W7ZleK^g6)}~ds#fVx9_>{bH97ex%aNl{BTJPEoA+b&9ochWGVqTZ2`TI*;{ZOn7L;aNaN8?1e~8Hz#rdF&K+p z_J@P4UySh{onkuMck0xMi;6xrDnQcKckI|XPUM3DL8%<_i-DWIQ9d>ljEzHPLu_0e z9T$CK%*V$>zsRvZ(a(=?qEay$j)}ft#6Q9b0ksz@&3B3V^aiB=4NL$>4bm(HgQf@d ztbLjqrdi$V)0h@D8d<~Z)S!tq0yVQHpcWk9pp~@%wXs&96|4=Yovi>`IZaK_ zgH`>Mr&6&)?+b(T^E5@-7zkZVAnX?ehMN$%C>xB9Fyc*)d44+bY{N00xvfqSOcpcb z7dV!Ih(F5o@j)&M<7SS<#?_jR#U_|GXx76-Vk{T-+DMBhCj61nFn5q7unVug`s(PU zcrz9)d>Q6r5oSCr^8WCo2-EE)fpP5YPWiBRBupAS#;FrM(V!TF zb)Vu^%n7VJ%Ltsvh<@P~BgU9km|>XSh`LSiOL5b9VIvI<4S4j5MeyI|d~A?c47fQI zb1dq^NUz7D7=`gsj#n%`Ul7{#`4j_As$#$vc<_q}4;URzEmY`UDJ|gx$pMX`O)G*9zzdpS4W-P+(yfZekljXS4 zC@1bDlGDCxXNffKRHcz*?Sv8S1c~vEPV%)-(^OrlOnS9Jtc5J$5RiX)jrzz^Ib)VB z4XMWTD;Y66Fn4g#a&pPdZbrsL0!JUTKbE)4wbwVzyDtRI{<{DJ*b zC{6`*Nb>=ufJo3^LS&T$=`{&@A9gwhnkp|hp~6b5d`N-QQcP*Fva&?KpYKsZCst~l zah+JD#kKqinnw)R#dWu#eqW*fNxjD~)mkE798Bl9AF;R~Mu<9B`p))w6-qI5_YHLQ zdaQg6w4m6DkmSWcG2rqmdO%%u$&JEjI4CN%pb(4-qCXno6ultwia99waPNBzd_C4; zBrB@up=F*d70xV51m6S^9?mUl%DrM?OPj@P;W&`u;x?r&Em~TZt7`5BXM%SlGm%vH zgP!|65Bl!+WiIBbc1`PHmD4Tn3c31iKdnEKHO;kTD{+zebx_ZWMXrDkWRXp$09mNI$f3np z`myf)JgmWKrl0EQ8XXN!GQF|rj);FE7#WW+fE%McCkQAwfIAS!697}{(JXieJSN2y z_78DkLD5A6N<{>AbH-nG$z1l1E}Jk@Ud`moAW0*2pi6yQX2)F!pxGUR6#QY zV$oqm52}Eo3lH(zk=q7BHxlLmXas&IeDLV_RjQb(Q`BBmTT+p?VH`V?5MqaXXegw zL(_7d``(q=D@%1Axz4lfYPi=k+q2}_EW0*C;l~3FS_tIL*S}w?Lg?b>Gqr>)gOu_@>7k_1lP!)ju|9IHCO}3?N|Tut}@cn6DAe zNA27K(J=YDU}g)4`9)3~=c7y@#`B=xl?p|~51QOWUMGRQ&UC>sQ&dXu3&?<~6beim zXoP{A-T^KufNFS*zd?{;S2b%y6q1VIf#MXFfbTj&o~i{XSnaI?t3 z1O==LH7|fV=-EMLqJ29v3I7wj;TQfC#)AS1vI=7 zJVte=+lmsPSPKiJm>`Ovum@@quvVPnJQ7r)Nr5onBL>FQ+7)21GP4uQUj_0S^;r|; zXiD|GJO19}!^!N{zq~N7n|IFZ<}S&*kIxVO^!x>>|B~E)Svr4NKJl{D{)%+t)x}nS zu4-sn4+yd3*ep9Xr$hhNneEEDXBANE0SVn@Bxn&N|l2S{kW@ z8F;KEWh`P|RR*vzXf5%V<3`rTTG$E=bjm?XQE@J(>cq|EPrS!=4YEckw(=b^8(ONLhpaSL1Z*ilsRMbsooK-C}e&2h`)YAqi?&N;Q~JV1>fu!d;hJV7~# zgFyzeNCX=Mr^Hh8Dpu8E4YCs;wjg~6p#jCDVuoUk@j)L!GC0H{BOl{*_bOH%)iB?v zpY`$w;0qYaCYHM$3~(O9$GEaQ^6|Y|Y~YcOKMM)PFfn=OChUS!B!O-4Js34Y-NF>% z@z@lzFgDI{Qyi%Lh@px;%taLgoXI}^C~}*?A_cVN&p|>xfGY%0;W-~cDNy#{c!#5z zXmC0qs2IrzVF2s|X_>!(1+iiNA`=cPwbAneKUQj6X}kOdoH-Z{++%5iK8~9 zPjAY4<~yZ<>x+&XlI6x%?C@{GIPbxcU2Ye@4|y08`ma#}G7n!({CD2Jmf4%#CT}~sutmCX zO+Is7VsCz~*I8{(O;mN=bl!Hr-e>{ODuUJj5h5#8H&CJwGe_v+>Vd1_H?|!>8_y&1 zJM<4spwtokhVo^s2i6eRf0ts7Z|kSPmI0nJE{XEU%9`FX{2@(KlV*^E?(^Uv5Ht<$ zxdlZhR4@u2IPeXjr%m-tfSW-eY84=<*84MkO9(B+mN!#H5->}*4*1MyCdC`8OjV~-O|QGa?K(9xOH|bS_Efj&wf@3 zC7y=_G~S3H{RozQg&^s)qpqAe3hID9R5*^Ij(pI-gRc+^8h*T>7DVZ|K4JKp!&tKd zek-R|Qtj8a1&`n2LwASf??@jUhaZRt0Ueano1jYBg&;=%%he zpr~sw-w89JmV_0yNyDnjAsIrK#i{^`0~JE&#i~#;-*g;Y$f^w$+OUcZl3K5|{vce4 z%^Dv#8q93oe*3D@tVtVJ+|o_mwFPt|;NG|n@|3WZ)MjJN8sDt$jcNn%By4f(Z(D-3 zyj561ntJ1*ftsWPI-1mW2dEbFSf_UOb9~;RiJm15SHN8WfDvPk_Z{zJLIQw_YLI#J zM+It4{2?G8Bhv9lFoqk$V^k~zczi)tsQ|Np8;S9gm@o=pd~%9`hjRzqk+JZk$H`+6 za9N66M6m{i=y(J=69CO6DiYO)e*r5eK8Gq)rTE9_UIHH*Rqod%>SFEa&GvXgr3_)zn$m1#KsUS)dKZL!oAOs?`@#uf%V@P-; z0_M^|h2|A{NTDYLKyPx+;aLYkbzRsIyYSWnCu^0HtTbyF3Ja~EG@|pOHhKE@zq#bt zC_6TSM#OA-uj*mdBD3q;dy?X}4`kVC%M*v|?&Qp5YV$jEdUv`D{*CvyXN=$XXKOyF z%?v-Pl-zsgjx9P~m^M5?S4gw$Xijg+Ia-$-+hxc0%*CuT=hy?8?v3y6NsI4&Hcr-6Ei*2T{E|nbg~`2CTC{OEZNt|_I1Bx+Ll{d-#hy7=%2S_ zxw)>nvAOOKZY}LSDepa*+j}~B_hs9$ISIBe(3ywRC33a_bKK zZ10dpwQ|mf^r8IR2 zc9^gqZ>T*{U;X3#wI`Zt0;M=wIN1Lu%xEbdmqXzqvZ5zugqUYwYSCYX%z!@sfufT7 z8(@Sb%n87ruXa3+P;t{cblf^iYj{!4z=HPm+CGQI z=&w+BXh5!u{h-wlcX-i(L@>+Gh!Y}%+Zi+=CW?+Eys%-xcNF8{Y7fvtz#Km#jB#3XMv$u+zdn00A_rthsw_=2~iVu98VNr!S<}$dzr=y5-jG z@5LX+|MZ)(y=8hJ*^#PEaZg-s)REHb(;eycnY#4)oNL#Tt5bG$X8m){oa@l^i6;$f zmm0Rp4O=svxrX-aF}Y#yQvG4M{_xzzT>Xnn^{3?eQ@Q${>C->ku{U{Ks`aGWfTX*B zTI-qXmkytu-!C6NJKg=nS(ju}$G^QH-L+ipyl0*@r|RCVO%Hs}ExWfaHSCrfcIO&8 zvZCB@XpWT|jxAOn|3XjIG$%KFVx`*leqkZ;UlS1d(T?56w;KOutMT|A>x!ENJkg0# zHvw=0b{-Xi5355tKuH>SR4+;`jeN1Bp8ujm#VBhJXrd3i|A>GTHYSLRIBZIQQy8xM z%@Ioiu8N=qyy96ESObCqz*qTP0u|slGKC6CS+N4vL@dm_e^`F705 z|4>2cDzdPZY?THX0B;u7L29#?^xec($1A=|y{*Iff<8Il*0D8OnLM<$6l>ICaVzjv z#%+Y!RVy(cuf;aGSIdSYBMmP^(sN@t0kvwV}=(thGMmDduZ2 zcD=@zsAf034&IK?HZ8CC8?V-6Mw=P6*0L=#T8-TW<((J~&O{B=(u}hXzeMifm&jMb zm9Qmh(KtdKTCDh6ta}9~8sVz~ zj8OASxir6&*00ckRm_mo`op^LxKGp-$~UlXt$e(W>)vIP!qQ3A&$JT4yAv7me z1+A4tL%d;S{|c+IrgSw#G(<=Uq3d*2h&ROR*^SyIS;8H!jgvhHb5kt$;;KTBEZ5t@ z+#G+NpKW0`X>yuqC|RNI=Xs$|+RWD%XAZ6cTOM!L>Ik7SuqtRX1DN_(F#pc-_$l^i z#o|u31tnkWH^xI>kEl+V^(~DQ7@+PFlhrN0RH)a@)@l7zP1b}xQJU{$w_saEE>L*! z0}1h}|Ig9+hX&Y>Z~SN2wRAtVT3Jo%-HRNE6Np_eZ@keD482Ehh~eCocA9Q?>9Zl ze;f13-YF49r7AC$s)t>`jU<8*80Ei3s+&}A;Sk9c6TY1RZ`QjcLHuF@|2ic2-vuHx z0x8&Y&qY)N&sq?hsWs)r>0e=0DUfQoRjF{?Juq`%sbYPu0&KST4u9|PpC6wy{LuEH z?cewP++P24d)?pLcmA@W=|ScFN~!Ij+;DKZXSsguy~J!nYTYl_@1H)sY@{~^nt4fT?3!ceX-mJ1nm|EHxdO8;}~0%^zQ^JH5~ajKmf_oj#bkkQvX6WG`gL z=A0jh*EbKW zD_5k!tJ1aW^56~Wicj*rD*1<`04w`B>D6IrctpAxltQ=Un_+1rB1K|S^XQ^$42;b+ zJ+xGJ`e`M#rtztZ+R*YKaz7$@Ps;00PWLU_*Dl$cWqWgG)0|7PH!s?c{%l>_V^?mS zce?k9HeaU~Hb|$hNLQ{&4c8a#H%e!0X{}dY>&=L{wH?_D^4fhcgN;oOw%*^GzLsm; zoizWf{%S_|XSPSS>^>N6=)-flwlfRua@)D3wtl&-UmCcw*fyA}zdCCm4!)L6>B|of zWc*q4qtIOQ+&TH+nN)w0OB$A|UC9e?np5qw_BYL|6Za}+D^i!z$5Uq)t3Aur>r)eQ zb=y+)PPuw#*6?@Loll)O_8O|CeQD!fdE?%tjr(V-l9tq_C-qJD;t=9?H3QF1b5ocSjB!nNHb#FlmAtnD01KT`7O+cxnxJ8TQI9 z4~UR^-FN(HXNtYwsIeSNT}T~E)y=-7KHti@T7j+F`A5yki%CEHZ+_E~>YA;5)AEt4 z{$Af~ANWsiXM~(<53tp*g{(VC17t0^w%|Rq`2DU7pQST==ETEq0Mn9dzwFvSS2K4Z z=XxPYYrXFxjAL16M#!9e7?)Z)<~A<6k1V;nWjDA}PbRz2k868qd%PzzmaTa-{{BIk z*}KFXm6@Y+!u;_Zb2{0x?A@8wL*{UrL}(=EoMC3++E~EtnSk58Hq!gDCOS!g(=VaYk>iT`kVb- zmha}`@{Jhvk#uL2H+$_?<0Ax%J0KiHtV6=NHrqzo5>3W(w=*I;nQ^ K=M+XrP5%$Yc2ai$ literal 0 HcmV?d00001 diff --git a/ultralytics/models/yolo/obb/__pycache__/val.cpython-39.pyc b/ultralytics/models/yolo/obb/__pycache__/val.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c9ca6118fe3d4da78478cd6b091db69dc5230d86 GIT binary patch literal 8281 zcmd5>+ix3JdY>~l4lkl4Tec-R@lKqJ%+zM>B-^-w?Iw=hY|@RK+HMxh*d32}M%2hd z4t-|iNZcX1trBb>>a6otbl1C7o@$_PMS=baeQzI%^U#MDDB$Nl6i9(hf8QCBB4rmz z`&1J1&ADH`>+hWLXJ$$oe#<}k*VY>^XxhI~X7Xnu^ERG@gK&+rNNckOW0ck-y`kf6 zL`K_en5xW-thU{-+l59!<*mqR7aPTPsZmmSJ1Vy;jY@l_F~hX?G+yA&XBu|`bJu88 zxqd^d6`x=xt;VGJk)Lj?B5~+SkOs{(j5{CuQONx?7ByYkKfLzddsnYZBkm^1mabjC z{9%xau$jo2H9u``cwG^%h4B_D7vrt8yOnxr?1?z_(|~)aFIqt=%iSnWy|C@Kf}}|k zPyVc7Cg~s%Xbr|SY?w6+e(Y!3I^za6KPxs&mA1J3nbWZH4K-|Dt4i7smH{A%_r&%;?wjK-je$F;9&|UMBfV)We9rv0DgARD%UXHi&kuJyE?kP-L zbK5ZwqIyZqcy-%vccb7fm7yivxpSwxlWxSFaml)f+wNAB3P0LOA%eQs= zQX5Y`+#WW1acY87H10(r&uBZGh-yDsQIoFR-fxB-s^M3g_Jl%Ex`4;Ple__vYP;Hi zWh~P(BQrB=&w`S(Oxx5wBQ^21xDIM_L+*IGot|^Yk3`-zqWR>W=JRT?*|?ag)~W=Q+=y^&jJ@2vh^&3~-x|QqWO@Vr-=&cFWxRtBMEt2pp?BEu}SMTnKIgEN7Pog-k zFEEGoj!f!HeYA(Bj%K4D$%34elF9ENF_Ot1V;V_f0M2ikkObS)`+90;dRo}k<2odx z-$x72;Rd8)_EsjO5@1nVKDnGR^U08YeC>zV>QZZ%SFYWBXSG%ovzSm;6l;gflO{1u z8gMQ-qpYMGg{drsN!UqJztaq)k)%Qv!o(wWt{Gy1MzIuMq=A{mA_~MZ5#oq=9t6&L z#F`2_#Cl6pteITF6nltOWjyav_p>0Wwn+*L#%vN}^i8FvNZGmKZWdIj!x`5f6u}sy z_r^QjZnWdB#+@^5e>-e%wOwdcR|H8y41!i7O~y;P+|wrYo6sUV^4EeWk$R^o%WddD z=&2Iv_^nQyz)?Ww{B4iJ5f_Gz8B8U&D-U;|#Z);lRnAbe8F$vD0naY=Xicn8>k^pT ziUKHMB3?y7O&>CqR+i+f9P<-SACn_GG_eEfo*V_ybd%}K#^1z0PwD^JTb$zaM8_s+ z-RMV4cJL@RUq>RhT8r+kmlIyT@TVN(Q%ux5D1QwB53;vgWE&rP5$*AnEbx?rV?;GkEn@z3gNW5 zQNI~<62#TF+&tmN>u$2)cLVp6)zj{qw_OpCz!5Jf;_P>?kJm%P((kI@+}Lm6h6xQt zAqobBmA&O|pF8dD;Msl+Z#*PUPzq|N;Q=AW=|Nkmx3(5<2j5|-AD(uI!(M1t0-4DD}_$Y5Gz zRCo2hfixMIv7`-vO7|GdN#|x>(zlSdsO6H@M-brl5#&)egFzMXf>m_6DY=uKcpfv zXSGSOP}+cSP&H{$u5*yBGcdiJIaGZEy%Tjl)E&gcSH7{0-ts|9-ETe>5vN#5;z2}2 zTtl>DDdA-WKmalHuh?~}fzhJO$cc(VFg*Z~x*!KqV+t$)inYDzmzH z%>=V>6iuYH-tpT|d!mWL%&WD6AeSU4_$zp#M$AF=9V#PaJ(PgBNd@OXfQZ6_te;aS zQJOSIQpqQ1IMivNk#BUp`Ww@**&HNce%*V1N)8A6PH8nskQS+Y=D#38;AEF=GI&g5 zpzp!BQlxfS2gjJcsl40>f_w+q(zjB30N=OA27uDs0?fA4Vp__qL3vNhY@)lE?}~Q& z&Mm!Pz~l6b5Nu^I0}rWfRxvxv3fv~RGnhqds7X4f$`nANGH&nc`QbWquAl`D*tj{! ziYi_DC1z*Zy=~5*BIVw1-w4F;d_|Cbq$|LuP=eTf_u9MH+|2}vl4Fwk(3Z%x3JOe5 zlsMl+s!5)5^ds+T;Zb%$n6U*Nyo>Lqs{rY+YxI$RWb~m-)?sQ`lthChcwWu5V=o; zI4-t8q!~j^r1hzQa)?zbu?5cDVYgPD(2=6LH=*N)_)zsyF+gv1#QQYT86s~}g{7c> z!y=XSrL(WIQP>G2TYpN8)W8VB9ystRMo35jH2n}e%9f!%F4X6M>P+^SI>3_xF5ywm zdl69;4imujA%Z$mI=$gO5jl5N87Wj`^#N7zw);w5qC(s0<9ZgYirhR-%0?+D4uG(| zSpsMw*rKC_J}9S^%-V&j^$Y3Dpvs-hMm#P)g0iB%00sT0z5_s8g3=c8F5_Loc(X`X zmax>p9Nl$7Maw9eMQsHm+I%i6|3Z7DAJ;J2{3D$of(qVODmb4NQ8ts66s-_O7^V)7 zQw!tNk#XwiICX5CS{$dARBHLr0<9aWtdCXsr{}bXEcswcnSZJ{avU6~X4NT01AAztmsI4s&LKui5;YD7~N|e^})Yjq-z|St(m+=}JPg z8ICc7W7%QKJ!JR(BAXp7l4E(uZvQi8EA|hkOM_*qSE=I|Zvj?|eqE=hfa1uIAU4@T&^;jX#|vlAuc>u%y(OM#BJoive7u#Mro_-=0nA%d=rCR*nw z9lAyxEep7;XVmNUJXXhWd7=aIHFLjaXH?j3*f()&ea`Edb<8DpsfjwnUZGS#PB%N z8}71?DP-y#(6PqASdpR;E-6F2Bx@cFAjJ!AAe7UP|>8p2;V-nZVH_@@The^8`bWk#^JifpU`j=pK_wEstG*)Efo=X z?7jM}YaRpHC%?gkRPB}giZ_qUq+;Sl8jM1?=z&O+3Z)Ltl~Fi95r2xzvqTQeKGH>MlH3B(^b@#yaaaY&xx}ih3g|<5 z2LQ;?pJRV+P^kk%O;3rPV$b1RJBPAEDt{6nX%5eGI$G%`af`Ezo>lbxLhl`Ywo=us zRr=Lt>0=kM0w23do`I E0HJ{q;s5{u delta 49 zcmX@Zbef6hG%qg~0}#}?ElQinb6V0-KO;XkRlmG2T|XtYBsICDq$n{tJ2NkR;>TzJ DdwCI4 diff --git a/ultralytics/models/yolo/pose/__pycache__/__init__.cpython-39.pyc b/ultralytics/models/yolo/pose/__pycache__/__init__.cpython-39.pyc index 35cd12f092fc1d4ed3debe7e359b831930e309d3..1db62619cecdf4576fc0572be4cdc9900d4a87e4 100644 GIT binary patch delta 42 wcmdnVw1tT$k(ZZ?0SKh3AEi&^xguocVin_(T9TSv5>k|yoSm7MKJmK~0QjK|&Hw-a delta 47 zcmdnOw3CS^k(ZZ?0SMCAf2B<1xgu`wVin_(np)tKnpl*VnU@|@nV*wiW;*ec697Jd B53B$H diff --git a/ultralytics/models/yolo/pose/__pycache__/predict.cpython-312.pyc b/ultralytics/models/yolo/pose/__pycache__/predict.cpython-312.pyc index 514206029e9df2b4d339847183e2966999f7a754..d4f16cc219cea62d49ee3c78741f17db4dc03f8f 100644 GIT binary patch delta 419 zcmX>vyIPL-G%qg~0}xz@f0UlGk#_@gJxe7JYO+?vc;;o6WG3chR;3mz1mqW|1{9^H zWG0v77wISzr@g_P8?%;Z$PTPz?=x0r##r9D9F!ySYCJpJ4i9?hD3pSg+AZ*n3_yOg7T zMt*LpetBWKeoAU;L0)Q!er0}6ewm@cWJXpmjtro_9}NsoCI_msTbMO0BZcld=Sm`t~tXmwp!`J%A$Wqy?}{2YQ@9jsqO7zCtd zXf6r4!mo8lLi)Od>1GceNk%RvUcL{!3=$e2geK?kR>(?&9Nfb2k%@w|ss;efIEJ7A delta 300 zcmZ22cV3qFG%qg~0}#}?ElSJU$h(1=gP{@#HCg;7E3wR(yq~3A(m_8XKQ~psyf9rq zCAB0qxum2hF*!RkFMV(Seq~_fjbhx)%O=HEF9(z@ zp3lg@(7^CSK=`_V`b7ct6|6Tjbk=jOk1pfl7)MQc}w@lT-C3GcwO%^qV}Fxlzc{#VW=nwInsUB%~-YIUA^YvMGy^01wDb zOahD?jB<=Dj7+KKtV<}IOO`t#>K_R#l*r0(qhNx#wf+e2h^zY RpN)C)ANGDmuE{+ds{rvwK8*kX delta 145 zcmbO#yiJHVk(ZZ?0SMCAf2Cw?KLTj(N`H-^`8Tb}m*iE~%*nKBlYgb7b6!F g3s|uoqZ^|XBOfCRqso6arpeJ9{fz9BUvsPi0OegChX4Qo diff --git a/ultralytics/models/yolo/pose/__pycache__/train.cpython-312.pyc b/ultralytics/models/yolo/pose/__pycache__/train.cpython-312.pyc index f008c3deff62a33fdf6984610cfe5847d6efa1fc..38c449327caeb05b8f1c8b4025a87c4079925b1c 100644 GIT binary patch delta 450 zcmaE)I7Nx~G%qg~0}xz@f0UlLkvEoA%3ePsKQ~psyf9rqB{j7mFSSI!GCwE3%+O$R zGpie8(d1LCNnA{ff_{u&nVEU{Hk-5cFfyubKEj^J$f!42jnj>B$K+hjnT$1)&ohg% ztYKQsGMS63QJnb}M{#0VYJ5s&QIRmv!XjxPp~*RUE|(+sEzab`oSdY@ zjDb?c?voX`B@J)z2wdlpxyU24pk#UN!rDD12Uw1C9^~9@e<3{jdU)!^@YD~?3k1dF>2N5nH!f~=UZvdMWkXc+ac_FWl@eLlK z8$6;nc=$mKLD3sR;x~lErt41BUEsZeX+_BflM6}~7lkZth)7KLndtLPkdu>ZvOM2Z z2{)h-ewxfhoIv^(b8>N65lE!SV)7Y2Ge+OZ%=|grmLLId5aB<$ir<*gVDdVCF-dM# zzRyx@tfF58m|4v}aj~*;ec}^j<@%;J`9A+)t{9+lMj$RWn7l?nTGoY;(Uoz6%5?3C U+Si5jE(+;=WdI8m$pO^>07HF#rT_o{ delta 435 zcmbQD^hlBSG%qg~0}#}?ElTs=$Q#QlX{Vo&pPQ;*UYM?*l3J3QTvAe$n4F!Nmp-|L z)s3-u@@dv2ZU#n~NXD-WjJ#2do6XsJ7#USIA7M{qWYn3g#_7hmXmT#+OqMlFtC=S& za5V}s-Qp-tEK7|~$t)@o23n=bF?k1<<75_Y8&^Xhx7Y?qG%!4n3hQ9$;lIN#++W#Q zd7WSFBEQ;#lI!Z`7uC%Vs2tZlsCy+W>Oyq<_2`s~(J7b1QZMkUUFJ`_z>&6j0{0F^ zM%T%nymF#WK(mTK1{T?Y2xk!CFu8^|fXxiZEUun>iPy(yg4+!d*&8C_H$BjV3?f7n9~>jbi*P#>Oi8MSz*r z>=PF=YdYg6E`C<6Z%UK-1rBpX12r%Lak1XyI|9N6 zk=pS+KYlTm4MAiF$E3R5rh~02oU^i2wiq diff --git a/ultralytics/models/yolo/pose/__pycache__/val.cpython-312.pyc b/ultralytics/models/yolo/pose/__pycache__/val.cpython-312.pyc index 748a7a586fecad27a14f090f4bfa641215aaf685..5276eff122fc5939cf8af40c0efefe72628af3fa 100644 GIT binary patch delta 5019 zcmaJlYj6`snlmGfUe^0z%MV%d3oIT6Y``15hJ^qZ6L9dxv13Ef3D4M;Z6wh%_!ap? zYDrE`$YG^7XFL0d*xW7Rpb8VXTJMj&aQlnhTqT+_S!t(ydsHfQRb1U|0fAI@YpZf! zk1Rvj&9zJ4eEof2cYodez1qJ&nGsAMnoI@?S|;%6K;){agD(CX=>07K ziv(}?8}N(fMw6`OLVcpl3Sv+c@ayJA`dR!7vsZHnMhYYNQ}h1#+dY@jm9dLs3*x1z zMN!)HbV7aBun^DkIidpL>Xms_ zV4H@?uEU6r1`#j#g1+NHv2W1VA3?q-iuCb<5DE|Yj!*ddBH>XUiKI8oPedc3uqgP% zh_8Pn5)^&GFy}iFiH{OC5IKG{)W)uZd_(VCkBK`KhuE7VZrwftw=;r zC?E_5qr9v~K`|5wM1$N#8ubvSdN?Wyeipe%#~c7VAgTtGNoBTgBogM4hVYC+ye~My z2l^rr#0h>Ksv=ZI2%(Jx(L|LY=m_c1gEBpa>Hrmpx%%?$G+F~4bQHj3#o>s+cLhg6 zTu_XlhOe*NMFzeN;LFq0XSU*GiDX-wX43I}OSZ;@CTDTZ9FZ(lsk(HLRK0bPk*aq{ zmX?G%XK~F8Ovk6y2|BTb_}1j~x_x6H;i+%q$_>H`1Rg0ab&M#@uoDXvGbM% z=fddXu+)4Y({(iS(xB88x@8%H8A?3~O`>je+6-$XOnEo`i$?rA3Lj8a*#68~@dpN1 zN8VoZM#`yPWjHl~{fb2n6)X+5U1;K}>&8@<$W=lkwt8u0=_#Bs`QprqDYD=YiZQ}h zrx>NaV3af!Q^nYRMlr|MN9G7-C6G%M9y7X7`0Hj0_w<;pHYgX2xkT5JnVBRF8)EF_yrS~scn-Xl! zd2iak(6#uiwC$NpXK&_6ztlPKiEfaXY$#*#W^`WsQB67XK8uf8${oy0 zH&-H+3XB4h*${&}OwP@dCXxjLD>qJ2;z}?lw_mV>DRwnBQTQW&CC$CHwgmsT%J5{z zOSAZ2RK<*_!zUXm@E@%udUe4rK=#kst*dNEBZa7@iBUriIJhSonAM}mtc_`hT(AL$ zNmJ0WKxxqyC{y|=-DCUFtG#)b`5w{nY(4{1Z>-SPG)j(V? zieazXrHU1m;)uGWET#br6e~K`4Wu!6h z?XXjmveN<`W+Ynxx>=zc5C=+-0H1HftMV;oBpMg=g#i|2z~jONQ7SJRGsU!=RXL$T zRiNQ)E2ES%YVv&38LdvWiWJtz|6}w1f7ep`&DXLz7lkXG6>))?Va{F!4M-w+qQPnm zXIG9!$?JuKg?nPoeyFA<^nS;r?hr4cP@mAC#HD=lqmc)u@-s5ai4##i-%~5q4r&7q zdWL|%0uYx~r0`K6KOPxJ2Y}G-SIGvB7x_NOm0>~F4~PN6%p*R37g3%}+=J0uUBa2AT!|u{`2h2)FMtjKIuqzk>Gn6U1hUJl1Eau^wuja7h7(*nwLu}-z}dnPj#fl zD=%Ms`BrJmvco;wl5Ck1QhP77oo~DT!q0lI_TE%~T=c7=TaM1c42~2RzYsbfx_;_s zlUFBi)_%PHSL<)Nx*q6Rr#Z3ru8}G(%ev|%SAE)^eqqVgl-QqhSH5eQH>9>Lx&7(w zS9V<7vCzM?zIDb4n%SH)&trFDPp+yqv3I6IGFIj+_L<=8cze#51jo>XVQ=((>6-p z#)WN*wKr5ZdNPN*GTkpphmS3J10d3sG?K^1Q%Riglr)Psp1?%FXgqgs21l)H-W4}4L7^xQ8W z-rnVBPzexaH59W!G)nlV@Y(8}yYqD`LWCj^K%N_l>C0MhZl%2VJ3`t8;PL9!m#<#W zV78`8Ge?-;!y9VKyroNy_5ZXxeexno?~@#jSx2+vXkMh396J^}CC6?YsVQsnr+VL~ z7aA9K!;h8fwlCS66Hni^II@;H$x@dZdfx^tr?h7KqUMHb$+9=2+k5S!ng+T?RzXRR zULg7?KFb3pYx2QJDgV(JuBfY|kKmTNI;*S|PM+d<2qo~iq0l>cq^>)Bm8eR_fgkG} z@Lrv%1CxW#dg2KAZr13NjJ}k8$yiG$_0!MgY{fG#OSYPvy<|==*{gH*;@OSKjj6Wu zad3$8Om(YdZzaTa$#vvSdoX$M3x@$-%A42xnBK#Y<;j{#EJoi4%uk~hLKq|9?*UBe zNnOY_3`GafEDhNpwu}ukUN`U(z5v8N(1n#Qer}+SS z!h869{l4Cn9o<#w^oGw&o;g0Qze}r)=Xg}3E=R*uJ_O<@qeywEIS7TSv47o${H~;5)$CuJVc() z=rmrx!Q&-?R@RZc5Dr30_$e9wo-%}FpA;V0;Gti`uWs&{LU573%3bqIHQ)YiW7AT_)~i+Njvu~|+L8+X=*U8qRIxQzRSmtylxx{TkdE_P z)3zTRyH}*K*&nKOCi6WN<=Xy-`v%Ih?b=%#yV+ur1M+sXy03QBjlXKFFMXA0r40%2 z;Om12BoVwsz(M?-ruFo7JknIQ_n!+gWJ9P@>A{1Q`z|k_aUxu;kl>9M6nIhAt)xT& zr3sVxG5RU~b<;L8Np7f|fH(kb-s;|?kuH{; zo!Gj)V$)*h2S%P&<8t(gwSi^%dE&Ck(z;0lRAGkHF@4j^%yX*6#bP2nUzTapBsS1#~UuI#= nUsRXli_NvlKV#4&UTV(KIehtBo9!VL?aDaUJfsMs1o8g^@;pom delta 4828 zcmahsZBSd+mG4S=5)w~H=nEkv5av@d1}r}86xWXVa!g#uCQ#RagN5D$1XzNn=NO1& zoKCunyN(UHnIg7n5t+_d#NI`OAC}W}qGmd?+1;64y~R6H->|LQ$^P((bq(t?Ycq+u7x}sullI#6 zsgEQ`;;F0B=QsVtJr zkTxkime(mhZOeh{c@FsdJO?-dOI0P(qe$8}06SHk`ghOIOWs!8W!JB)U5Rja0#S~y zYl9mq!yI0wl*LA5Xo=uJ)`TD2$>8n8g5-jnAO?`90dYX$M~hd8#Qfs%YQ!NCElSZO zh5;#}#m~_)L`$BdbNte=-IyD*<_LmL{qC3S4gTM;fFdCG=ZuNqiv>Dk%2qDYElcT6 zSqxAd3I9uB5h?ueO|@RcN#Kgz0)J;PD0A*e(&_@ruan8&lgaa=Y#YnNhKgsEc#-zz z-mjoZ960*}znpys-pVz=>-+VxJil^8lu~UL(Q;_5Gql`Q+{py`K0rb1_u|Q7bZ&N% z7r{SBvp699a7tn*G-k#2zHE)37&Ae>O517K<7P3M`fFzHDzh#DVgcOvZpsi`FVyMf zH0hVn6it0kMCT2Qjv$?W2~^kXw0AP<11hd8gTmE%J)V12X71scl7dsTl%`CCUzJfp z(dzGshSQlrA=&)Bb-Js`t_BJ%>?!9#{Oi z{kn{Owf`?!lP>`FV#;&uvzpdnpHrEgAJA}R@P^53@XN9*!I5d>1Tj{FP0BU?d_Q`J zC3t}g_ZD~-2?n&g9)8vQRA&6s{zdGU*eW;G}m#jf<~=xD?4 zNdEI9yKa2-MAc^;(S5xm0o`+v*W$?aQonXsL`$>j6VRhXJcviivI|Jj+Ux@Q^}BLt zE2ZVizf#Vx_Up3A5Kw-7-kY;|4=mUyrP?zA;LY3uya&&qJ-D00>P+fAm(!^P{44u# zn9QzyW*1r<*%a9O`a}!j8#~COChW_c3guJ5UUp|gM>|Y%gOU(4l+8Gs^DTyB-7a6P zARlsauF*j^JuQe`6FxyYG~}ITe5?)WXYqOQ2%o!x+{?NLeWT7v#wHR-nwyzqSiI$J zqDNRLC20egHAg)96M9Y~IYqh@y1gDBdl;+2`74Osmqs4p<4=&YPR{KeoOIIY*M2M@ zMS$%^z$U6q>qOsfEGu(g8XR^{Ft#E=&Uh|gV$i>gK&D(0NY2Z;Mg?+~VZHS}LE>?H zFAI_(uXiHlq#*Gzj0XvGP7X!roSbAlv>@|MU1peB2L7tZ(IJqo39pA?u>(S`&*hw8 z22&FuNM3g{R|G1}a(TTh?GxnFGgn3((=*dEf|O%CJ}-+V&&ff(**<4rOK}jb7#zUb zL_W75nPeFnMNUt%XE0BKXIYSpPa=Oro(VzhnPd?%&JN;H%H?HQ)I$uW#`CgNdSwI> z>+%S4{7FklM;Z6XC?}9t+;jyuDo8l2o#ax#%O;#d%!JS5Ga%L0%mc5RkIOm{!}n`3 z9cwj5L%MTj$bd;3*UjrMmUX^;d!&p^D{GVGVDtNthkH z*%3Jyam3BdQSu{aOcN`OX`;-1D#j!$_lIBRD-TCCeC3g6VxT(bAl4?;EBC*D~a5rzQPgt9I zYjb2O!p5!Nh_=TnlJ>etE^j{)?dI(-qTupd!9$~YMX{{-A+_?m@Z~jgWQe!Dkgy%&ZO39DCW_nIV!fYr#4ilRT^{~|H~!ir z%U|$4Fmemzwy9*rwrmT%8ZHmL9Le7@H7v-Ir4@ zFE8XJOUe=@4!*<@Y5FKX+8RBJUj4?ASixsDG>=;){R{BN#l>w`9_UUymJ-VRTV2H_p)K8DlWJsW%-|Y2E-h36VD@w{xA4>z&?x1hBaAgnw%!nAy zl$2OSyIe`C&Xe43Bkq^@#kkir93bzKyLC+JDduv~UQa!!IQ%~|@KK3{l%fKn#03Nv zTh*4VR8FRDNwbX)y7I_3ziAbTYf|TO>aZU9vU=dwr_UZ9vx!)Ij}atJmT|Iu2xak& zg%!c!vZF1s^vQS*b8tCeuOT4FM$+dTJC9k%5b)u6G6xk~iMcHN@3Ko(c@d%qcI(??U--h))WS-LN7$x?{#f- z#d;oSx~{ip|KD1 z(Js^RmCaGQ`!HUIG z*>e-EI2!gA7RY5U;;|_VF2U{Uec&DVr)rZNB8C7a*=um0twZ_~MEOGSlC9%hI=IJ# zRBqqaqvI;lbMO4d`9xDE-_-d)f8vQ44LmH|_tila#_esqsqN2$da6XfY*KcwNS{hW z+*71;5m!BS04~&6z%=~F8V9%ze^X-w!|;D<6oxw?Gyt}9X+IlfVT z$IX`<{$*|5ma%@#8P0!CADjx6{-`MI+%X>6coFP zrC)-o#{J*}*wScf$Gv?@1_#X+r>F_9lkP@6;A>cTUy4OLIj4`|gq%zp=3{@12k~u) zeGe`-HmMkl7GYt@I5Xpe+l|In%oF5Jk0(7JnVtnt4f_EW$Hv*)seiB#`#MGeYL^(a zS=cLxg3V1wr9VT6ZyU}w(Lf7--SoSQ4;{_->>KuIchniFMFm@5k20uyrDnM%VcO4| z_J`XdrE4d6QzNfyid;^j@~Ks1qI~GV8@{#?K?&}0tSk-e`z+szdKx>dlGBCh=5QG f10o~*kHbF&{|;}yP_KCjg&EgXJ|i%a+OGc#t=>S& diff --git a/ultralytics/models/yolo/pose/__pycache__/val.cpython-39.pyc b/ultralytics/models/yolo/pose/__pycache__/val.cpython-39.pyc index 50fdd650a1f74ed3c98ec01232a7658cc833a1f6..a331001dd4ee9573e8196dfa385ab17a58ef453a 100644 GIT binary patch delta 3323 zcmZ`*U2Ggz6`nhPJG0(huO0tx?093+BY0u4z3Lrfqc1uN-xymxHRdc8Bb zGjS4ktVJYITY;6(2^D{$UKD}aJ{3P99w3!?tKbo71PDdE_=UHYA`s>~ckMJuVOMkR zIp>~p|Gsn1y?;BIOy!+?KC9qYdg0!@|5pAC>%9%hRc07hW>n5*G(MUL_VKeXDsznI zdEvS>V@tl97r|$F5AVIM%yjWRd>8M#&SrAFpASHj=YxC*ZGjK-654LQn~$I^zNqNR zOwSpmGPZG#y{`2>dHkvK$KA-SMQ4RmTlAXq$~`9!$dmVZ{JVu*iU3L=+wyS`6RW!REn&5W#`Y0H6vE4 zHaD@NO!9d4nY7M{X3WNG`Va;i{ z)tc`M9#%494=L5q6BERk0eQ3*#95NqLdq_Saj;?^k;jO99poIouoHX*)(7Y;R>Xe~ z+Le*he(0Wa8Xk8dUrau{lo}zr^CN`sqs$@fpxk;8=DLKr12ETas9YyH2hi8G2nbzc z1Z!JcH6o*KA}9z(11*3AwT2aLujvdZ#>Si~k$n0CB!P%LknjP#yjJsJh_RsuaGsQh zBTi$%)0?#Z%`N=ISHOv{5+Oju84w(=#3O(I5GY5G1rkJk*>f+aTElHhWW=-NVGt2U;#Fm*PzVv}3QP)xt5 ziudBIpeef6b8Y3ga$#t-i=?;+Fq24WRi}fCE$98P-?3#u?@^2OD zy|ZAsbt_ChdNV=@?f`UUFBk~ML zs!_GqXJ`E@;yC2#ik4=%yrn$98K(ApRHaTgOii#b-lP(=snY;-XFM<-)tkYUebD}$-+HZLu?D0XhDhb|JuA+s-nqy~yhZ6P&QT;Yduk*VJF{7YDm?>A(XO0~cb z#1vWUsK!`@69jIPC(m{tF$85y^84gNjTA0OarzMewW?)h;p}65 znhCjt8?cDv9ZCjz#yzSnJ3k8y$%H@{gCH{5B__Ur(3fpQ_oLcIc5;Sz#SOV#Bvr0K ztM@6ZW`q)m_*HQ0nKIWh%~FI?ncVH^AI%_C%y`E?BcoSCtkdOZZEW?Z+JQ|K6U$hc zfA8S=n{U7S*UwZ`L9dEb$8F}hc9@cHD% z&=c%#^2?!Pr(Px%6Q7{&CXw%etk_iH`D8tq7vF_EU9rVQw8#*I4!v*$)mA~1c3iiY z^UQF`{2nRY#2MbV_cvg}ArM6^(JO6BRSW;oHCm-y+dmU$us`|3P}j!W!y~Lq9``kJ zZzP+gsr?DqlwEoBG`|n^AIa)fkd?`#K2l2VjPeMg4B%s zh8NJ`Wb;$|D4mu4WbL zckqQ&SQIsj_d|*G9tJT1M-h*SEX(~%-`ZFhc|$cR*~EVAZZvsvZ2zG)i82c1WIrPD zk=UW#{+;x%CNGVBEO-Bmrp4*xcVh>KDS6V27vc$2^E(CQ4w;`!%<|(SZ%}VvaFseE z6q0a~ICt4;h&Pi{<=xCryz;37Q{+KFiDh!IPh6?F0iI+{F$%eo?*#u;@|SYy01YYL zB=Tdj)tzS3@01tjr8*6bnC4fKvAv__pJ5RmNsjHE$RifbtqHBj_Oq42WNmCPUoocX zS5fixCa>*1RyjeV(t}J^Gx2TWD0jpwL@0v<9k;mq%J|{(2sn%B`9eNtT16|Lvvawe bWld&>t%8-edXg`XkD8w+^NWc;{_cMP{UZ~; delta 2896 zcmZ`5TWlj&b?%*q$M*Oc$8nrEP4=aAnhou;fVPwtw%gTqA7R_2Wg9Iu6W>W<$C>fo znd~~z7z^!I<(0DKfBFNbG;VfbaQ1Lhz9vB!rOY_QRYr<1DDkjy30= z*FE>V?iq_qf4h(}Q>lam&uZ);>;F1+o#cI>ENPiAX_+#zEVD_$$(d(aily&K%W+m_ z8J4|AmJ=+;^1w;5F;;-pVB@R^Yl=;<60GUBBt=@zT$id-dk@Kb^29UepSgM7vf5WH zliLk@?dG=Aa&CW0{6<~Or_l#mAR7F1k|#4{??ZK*kdu3v*e3{C+M{JZ z4=HZOnD!ZoO0}4uVCn|FPq^mCxemMae%w#{sWpoGSv<6t*rh)uzSd8$M;~<0TA1?E= zeqjJz6IC_L2b8s+_A@^03#&cqj7O!+hK1?49pMgslA`JWHULV3udqy@q4E97n>wil*eo=Gwl3Se%<6thG%B zJHLbq8UVfoph_3R({a6wy0OM)ZMD&|s#8JSx?*o_TCmv))X=9u^&DPb57ZrtJ8q!Z z4QDG*Rvf1lIut0bW!a$XnI1%Fw%eA?f|z@2%d&PX@j>qNi-B5iIkv?^50h@)Y+1E1 z5rJ~MVciLgh*ftS&fFm0*}k)Wy0hKc4m8iQU57*ROiv7RFq-%b2Jtxr^XNHs8-dd1 z7K6xHhrfh81+yF|8*T81+O0sg+Z>P$egmY-M&02Y5?2f5=>saNOrWAIJBZ^FX=v+K zV{P3F)H@AU^40^z1MADG7jBQW%oVHU+QQC1(YpZV-4y^5&BHj2enpZ1tOUO_O~9zm z5^V_6eW+8N6bY3Rz|qM9NeneQLl$HWxG;)AJ{|IOkY_=jCDUXUY^CKhvQBoUuQ{&u zn%QbF6RNYg)n=ynM?TrZ6dyb>K(OZF9Df%OUuI;B@Eg$Z;p$0!0zFI^fjI_ibgc$J zhaAuzjUkW%SApT6da|$hGF&RfSD6Z9S+nOo{04-3F8b++-xjrg=@&c{^uP4L2NB5 zEdQFAohTJIfY*aJ`pRXq^b9G`d9gI{*XQ{wxalGS>>B<>1eXwmxAQf?g7|H-RSWO% zPO=u2da>DFdr#$D@X~REe--sJ2;LIED4nhR2vD~GK!WmWP{`;Doh1rU{9 zsEvnJDPI*w%M0Y^;#&FKRdfe#>ZehRhx5AtcH%YcL_Cz=1A3_%=dYu%iNHXRM1Y^{ zXzR$Czm&_`H;~y8!}77p&j20yRmLV7Dir@Sgnm5~!_v6eEGPHa)HI3mj8lTYEdDTC z5${f)I`J(a1JdEgkR5{Khn7(71!l7A0*GF&wNZSki{@%Sdtzk=5-0g&J# z6-b#p1+NDGeX@{exMUyg88h!wIQF-*7f<~V)Oa{MLwom;JjDAyB7avb%smaG8&HgU ziC-3txs#JEz+p1FwYpPx0vX)nKSrH%;{CZNr~4=^w6{^{!7Owa5woCS_(1$~Zi;+a zq>fxY`3CMlr(qsh+c4Z-?9{C`PHkQRZZ#F<;0^KhBjrndR30FB2SA{hw(W2SQ36vg zwA~IV>tnIN)W%M2x{mz-6x=i7??>h{)EL6_fk~1jC&t_>_x}hiawD{;~MZ*!p KUl%jS{{3J2T&oHI diff --git a/ultralytics/models/yolo/pose/predict.py b/ultralytics/models/yolo/pose/predict.py index 14ae40b..7c55709 100644 --- a/ultralytics/models/yolo/pose/predict.py +++ b/ultralytics/models/yolo/pose/predict.py @@ -21,21 +21,26 @@ class PosePredictor(DetectionPredictor): """ def __init__(self, cfg=DEFAULT_CFG, overrides=None, _callbacks=None): + """Initializes PosePredictor, sets task to 'pose' and logs a warning for using 'mps' as device.""" super().__init__(cfg, overrides, _callbacks) - self.args.task = 'pose' - if isinstance(self.args.device, str) and self.args.device.lower() == 'mps': - LOGGER.warning("WARNING ⚠️ Apple MPS known Pose bug. Recommend 'device=cpu' for Pose models. " - 'See https://github.com/ultralytics/ultralytics/issues/4031.') + self.args.task = "pose" + if isinstance(self.args.device, str) and self.args.device.lower() == "mps": + LOGGER.warning( + "WARNING ⚠️ Apple MPS known Pose bug. Recommend 'device=cpu' for Pose models. " + "See https://github.com/ultralytics/ultralytics/issues/4031." + ) def postprocess(self, preds, img, orig_imgs): """Return detection results for a given input image or list of images.""" - preds = ops.non_max_suppression(preds, - self.args.conf, - self.args.iou, - agnostic=self.args.agnostic_nms, - max_det=self.args.max_det, - classes=self.args.classes, - nc=len(self.model.names)) + preds = ops.non_max_suppression( + preds, + self.args.conf, + self.args.iou, + agnostic=self.args.agnostic_nms, + max_det=self.args.max_det, + classes=self.args.classes, + nc=len(self.model.names), + ) if not isinstance(orig_imgs, list): # input images are a torch.Tensor, not a list orig_imgs = ops.convert_torch2numpy_batch(orig_imgs) @@ -48,5 +53,6 @@ class PosePredictor(DetectionPredictor): pred_kpts = ops.scale_coords(img.shape[2:], pred_kpts, orig_img.shape) img_path = self.batch[0][i] results.append( - Results(orig_img, path=img_path, names=self.model.names, boxes=pred[:, :6], keypoints=pred_kpts)) + Results(orig_img, path=img_path, names=self.model.names, boxes=pred[:, :6], keypoints=pred_kpts) + ) return results diff --git a/ultralytics/models/yolo/pose/train.py b/ultralytics/models/yolo/pose/train.py index 2d4f4e0..f5229e5 100644 --- a/ultralytics/models/yolo/pose/train.py +++ b/ultralytics/models/yolo/pose/train.py @@ -26,16 +26,18 @@ class PoseTrainer(yolo.detect.DetectionTrainer): """Initialize a PoseTrainer object with specified configurations and overrides.""" if overrides is None: overrides = {} - overrides['task'] = 'pose' + overrides["task"] = "pose" super().__init__(cfg, overrides, _callbacks) - if isinstance(self.args.device, str) and self.args.device.lower() == 'mps': - LOGGER.warning("WARNING ⚠️ Apple MPS known Pose bug. Recommend 'device=cpu' for Pose models. " - 'See https://github.com/ultralytics/ultralytics/issues/4031.') + if isinstance(self.args.device, str) and self.args.device.lower() == "mps": + LOGGER.warning( + "WARNING ⚠️ Apple MPS known Pose bug. Recommend 'device=cpu' for Pose models. " + "See https://github.com/ultralytics/ultralytics/issues/4031." + ) def get_model(self, cfg=None, weights=None, verbose=True): """Get pose estimation model with specified configuration and weights.""" - model = PoseModel(cfg, ch=3, nc=self.data['nc'], data_kpt_shape=self.data['kpt_shape'], verbose=verbose) + model = PoseModel(cfg, ch=3, nc=self.data["nc"], data_kpt_shape=self.data["kpt_shape"], verbose=verbose) if weights: model.load(weights) @@ -44,29 +46,33 @@ class PoseTrainer(yolo.detect.DetectionTrainer): def set_model_attributes(self): """Sets keypoints shape attribute of PoseModel.""" super().set_model_attributes() - self.model.kpt_shape = self.data['kpt_shape'] + self.model.kpt_shape = self.data["kpt_shape"] def get_validator(self): """Returns an instance of the PoseValidator class for validation.""" - self.loss_names = 'box_loss', 'pose_loss', 'kobj_loss', 'cls_loss', 'dfl_loss' - return yolo.pose.PoseValidator(self.test_loader, save_dir=self.save_dir, args=copy(self.args)) + self.loss_names = "box_loss", "pose_loss", "kobj_loss", "cls_loss", "dfl_loss" + return yolo.pose.PoseValidator( + self.test_loader, save_dir=self.save_dir, args=copy(self.args), _callbacks=self.callbacks + ) def plot_training_samples(self, batch, ni): """Plot a batch of training samples with annotated class labels, bounding boxes, and keypoints.""" - images = batch['img'] - kpts = batch['keypoints'] - cls = batch['cls'].squeeze(-1) - bboxes = batch['bboxes'] - paths = batch['im_file'] - batch_idx = batch['batch_idx'] - plot_images(images, - batch_idx, - cls, - bboxes, - kpts=kpts, - paths=paths, - fname=self.save_dir / f'train_batch{ni}.jpg', - on_plot=self.on_plot) + images = batch["img"] + kpts = batch["keypoints"] + cls = batch["cls"].squeeze(-1) + bboxes = batch["bboxes"] + paths = batch["im_file"] + batch_idx = batch["batch_idx"] + plot_images( + images, + batch_idx, + cls, + bboxes, + kpts=kpts, + paths=paths, + fname=self.save_dir / f"train_batch{ni}.jpg", + on_plot=self.on_plot, + ) def plot_metrics(self): """Plots training/val metrics.""" diff --git a/ultralytics/models/yolo/pose/val.py b/ultralytics/models/yolo/pose/val.py index b8ebf57..8405686 100644 --- a/ultralytics/models/yolo/pose/val.py +++ b/ultralytics/models/yolo/pose/val.py @@ -31,100 +31,125 @@ class PoseValidator(DetectionValidator): super().__init__(dataloader, save_dir, pbar, args, _callbacks) self.sigma = None self.kpt_shape = None - self.args.task = 'pose' + self.args.task = "pose" self.metrics = PoseMetrics(save_dir=self.save_dir, on_plot=self.on_plot) - if isinstance(self.args.device, str) and self.args.device.lower() == 'mps': - LOGGER.warning("WARNING ⚠️ Apple MPS known Pose bug. Recommend 'device=cpu' for Pose models. " - 'See https://github.com/ultralytics/ultralytics/issues/4031.') + if isinstance(self.args.device, str) and self.args.device.lower() == "mps": + LOGGER.warning( + "WARNING ⚠️ Apple MPS known Pose bug. Recommend 'device=cpu' for Pose models. " + "See https://github.com/ultralytics/ultralytics/issues/4031." + ) def preprocess(self, batch): """Preprocesses the batch by converting the 'keypoints' data into a float and moving it to the device.""" batch = super().preprocess(batch) - batch['keypoints'] = batch['keypoints'].to(self.device).float() + batch["keypoints"] = batch["keypoints"].to(self.device).float() return batch def get_desc(self): """Returns description of evaluation metrics in string format.""" - return ('%22s' + '%11s' * 10) % ('Class', 'Images', 'Instances', 'Box(P', 'R', 'mAP50', 'mAP50-95)', 'Pose(P', - 'R', 'mAP50', 'mAP50-95)') + return ("%22s" + "%11s" * 10) % ( + "Class", + "Images", + "Instances", + "Box(P", + "R", + "mAP50", + "mAP50-95)", + "Pose(P", + "R", + "mAP50", + "mAP50-95)", + ) def postprocess(self, preds): """Apply non-maximum suppression and return detections with high confidence scores.""" - return ops.non_max_suppression(preds, - self.args.conf, - self.args.iou, - labels=self.lb, - multi_label=True, - agnostic=self.args.single_cls, - max_det=self.args.max_det, - nc=self.nc) + return ops.non_max_suppression( + preds, + self.args.conf, + self.args.iou, + labels=self.lb, + multi_label=True, + agnostic=self.args.single_cls, + max_det=self.args.max_det, + nc=self.nc, + ) def init_metrics(self, model): """Initiate pose estimation metrics for YOLO model.""" super().init_metrics(model) - self.kpt_shape = self.data['kpt_shape'] + self.kpt_shape = self.data["kpt_shape"] is_pose = self.kpt_shape == [17, 3] nkpt = self.kpt_shape[0] self.sigma = OKS_SIGMA if is_pose else np.ones(nkpt) / nkpt + self.stats = dict(tp_p=[], tp=[], conf=[], pred_cls=[], target_cls=[]) + + def _prepare_batch(self, si, batch): + """Prepares a batch for processing by converting keypoints to float and moving to device.""" + pbatch = super()._prepare_batch(si, batch) + kpts = batch["keypoints"][batch["batch_idx"] == si] + h, w = pbatch["imgsz"] + kpts = kpts.clone() + kpts[..., 0] *= w + kpts[..., 1] *= h + kpts = ops.scale_coords(pbatch["imgsz"], kpts, pbatch["ori_shape"], ratio_pad=pbatch["ratio_pad"]) + pbatch["kpts"] = kpts + return pbatch + + def _prepare_pred(self, pred, pbatch): + """Prepares and scales keypoints in a batch for pose processing.""" + predn = super()._prepare_pred(pred, pbatch) + nk = pbatch["kpts"].shape[1] + pred_kpts = predn[:, 6:].view(len(predn), nk, -1) + ops.scale_coords(pbatch["imgsz"], pred_kpts, pbatch["ori_shape"], ratio_pad=pbatch["ratio_pad"]) + return predn, pred_kpts def update_metrics(self, preds, batch): """Metrics.""" for si, pred in enumerate(preds): - idx = batch['batch_idx'] == si - cls = batch['cls'][idx] - bbox = batch['bboxes'][idx] - kpts = batch['keypoints'][idx] - nl, npr = cls.shape[0], pred.shape[0] # number of labels, predictions - nk = kpts.shape[1] # number of keypoints - shape = batch['ori_shape'][si] - correct_kpts = torch.zeros(npr, self.niou, dtype=torch.bool, device=self.device) # init - correct_bboxes = torch.zeros(npr, self.niou, dtype=torch.bool, device=self.device) # init self.seen += 1 - + npr = len(pred) + stat = dict( + conf=torch.zeros(0, device=self.device), + pred_cls=torch.zeros(0, device=self.device), + tp=torch.zeros(npr, self.niou, dtype=torch.bool, device=self.device), + tp_p=torch.zeros(npr, self.niou, dtype=torch.bool, device=self.device), + ) + pbatch = self._prepare_batch(si, batch) + cls, bbox = pbatch.pop("cls"), pbatch.pop("bbox") + nl = len(cls) + stat["target_cls"] = cls if npr == 0: if nl: - self.stats.append((correct_bboxes, correct_kpts, *torch.zeros( - (2, 0), device=self.device), cls.squeeze(-1))) + for k in self.stats.keys(): + self.stats[k].append(stat[k]) if self.args.plots: - self.confusion_matrix.process_batch(detections=None, labels=cls.squeeze(-1)) + self.confusion_matrix.process_batch(detections=None, gt_bboxes=bbox, gt_cls=cls) continue # Predictions if self.args.single_cls: pred[:, 5] = 0 - predn = pred.clone() - ops.scale_boxes(batch['img'][si].shape[1:], predn[:, :4], shape, - ratio_pad=batch['ratio_pad'][si]) # native-space pred - pred_kpts = predn[:, 6:].view(npr, nk, -1) - ops.scale_coords(batch['img'][si].shape[1:], pred_kpts, shape, ratio_pad=batch['ratio_pad'][si]) + predn, pred_kpts = self._prepare_pred(pred, pbatch) + stat["conf"] = predn[:, 4] + stat["pred_cls"] = predn[:, 5] # Evaluate if nl: - height, width = batch['img'].shape[2:] - tbox = ops.xywh2xyxy(bbox) * torch.tensor( - (width, height, width, height), device=self.device) # target boxes - ops.scale_boxes(batch['img'][si].shape[1:], tbox, shape, - ratio_pad=batch['ratio_pad'][si]) # native-space labels - tkpts = kpts.clone() - tkpts[..., 0] *= width - tkpts[..., 1] *= height - tkpts = ops.scale_coords(batch['img'][si].shape[1:], tkpts, shape, ratio_pad=batch['ratio_pad'][si]) - labelsn = torch.cat((cls, tbox), 1) # native-space labels - correct_bboxes = self._process_batch(predn[:, :6], labelsn) - correct_kpts = self._process_batch(predn[:, :6], labelsn, pred_kpts, tkpts) + stat["tp"] = self._process_batch(predn, bbox, cls) + stat["tp_p"] = self._process_batch(predn, bbox, cls, pred_kpts, pbatch["kpts"]) if self.args.plots: - self.confusion_matrix.process_batch(predn, labelsn) + self.confusion_matrix.process_batch(predn, bbox, cls) - # Append correct_masks, correct_boxes, pconf, pcls, tcls - self.stats.append((correct_bboxes, correct_kpts, pred[:, 4], pred[:, 5], cls.squeeze(-1))) + for k in self.stats.keys(): + self.stats[k].append(stat[k]) # Save if self.args.save_json: - self.pred_to_json(predn, batch['im_file'][si]) + self.pred_to_json(predn, batch["im_file"][si]) # if self.args.save_txt: # save_one_txt(predn, save_conf, shape, file=save_dir / 'labels' / f'{path.stem}.txt') - def _process_batch(self, detections, labels, pred_kpts=None, gt_kpts=None): + def _process_batch(self, detections, gt_bboxes, gt_cls, pred_kpts=None, gt_kpts=None): """ Return correct prediction matrix. @@ -142,35 +167,39 @@ class PoseValidator(DetectionValidator): """ if pred_kpts is not None and gt_kpts is not None: # `0.53` is from https://github.com/jin-s13/xtcocoapi/blob/master/xtcocotools/cocoeval.py#L384 - area = ops.xyxy2xywh(labels[:, 1:])[:, 2:].prod(1) * 0.53 + area = ops.xyxy2xywh(gt_bboxes)[:, 2:].prod(1) * 0.53 iou = kpt_iou(gt_kpts, pred_kpts, sigma=self.sigma, area=area) else: # boxes - iou = box_iou(labels[:, 1:], detections[:, :4]) + iou = box_iou(gt_bboxes, detections[:, :4]) - return self.match_predictions(detections[:, 5], labels[:, 0], iou) + return self.match_predictions(detections[:, 5], gt_cls, iou) def plot_val_samples(self, batch, ni): """Plots and saves validation set samples with predicted bounding boxes and keypoints.""" - plot_images(batch['img'], - batch['batch_idx'], - batch['cls'].squeeze(-1), - batch['bboxes'], - kpts=batch['keypoints'], - paths=batch['im_file'], - fname=self.save_dir / f'val_batch{ni}_labels.jpg', - names=self.names, - on_plot=self.on_plot) + plot_images( + batch["img"], + batch["batch_idx"], + batch["cls"].squeeze(-1), + batch["bboxes"], + kpts=batch["keypoints"], + paths=batch["im_file"], + fname=self.save_dir / f"val_batch{ni}_labels.jpg", + names=self.names, + on_plot=self.on_plot, + ) def plot_predictions(self, batch, preds, ni): """Plots predictions for YOLO model.""" pred_kpts = torch.cat([p[:, 6:].view(-1, *self.kpt_shape) for p in preds], 0) - plot_images(batch['img'], - *output_to_target(preds, max_det=self.args.max_det), - kpts=pred_kpts, - paths=batch['im_file'], - fname=self.save_dir / f'val_batch{ni}_pred.jpg', - names=self.names, - on_plot=self.on_plot) # pred + plot_images( + batch["img"], + *output_to_target(preds, max_det=self.args.max_det), + kpts=pred_kpts, + paths=batch["im_file"], + fname=self.save_dir / f"val_batch{ni}_pred.jpg", + names=self.names, + on_plot=self.on_plot, + ) # pred def pred_to_json(self, predn, filename): """Converts YOLO predictions to COCO JSON format.""" @@ -179,37 +208,41 @@ class PoseValidator(DetectionValidator): box = ops.xyxy2xywh(predn[:, :4]) # xywh box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner for p, b in zip(predn.tolist(), box.tolist()): - self.jdict.append({ - 'image_id': image_id, - 'category_id': self.class_map[int(p[5])], - 'bbox': [round(x, 3) for x in b], - 'keypoints': p[6:], - 'score': round(p[4], 5)}) + self.jdict.append( + { + "image_id": image_id, + "category_id": self.class_map[int(p[5])], + "bbox": [round(x, 3) for x in b], + "keypoints": p[6:], + "score": round(p[4], 5), + } + ) def eval_json(self, stats): """Evaluates object detection model using COCO JSON format.""" if self.args.save_json and self.is_coco and len(self.jdict): - anno_json = self.data['path'] / 'annotations/person_keypoints_val2017.json' # annotations - pred_json = self.save_dir / 'predictions.json' # predictions - LOGGER.info(f'\nEvaluating pycocotools mAP using {pred_json} and {anno_json}...') + anno_json = self.data["path"] / "annotations/person_keypoints_val2017.json" # annotations + pred_json = self.save_dir / "predictions.json" # predictions + LOGGER.info(f"\nEvaluating pycocotools mAP using {pred_json} and {anno_json}...") try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb - check_requirements('pycocotools>=2.0.6') + check_requirements("pycocotools>=2.0.6") from pycocotools.coco import COCO # noqa from pycocotools.cocoeval import COCOeval # noqa for x in anno_json, pred_json: - assert x.is_file(), f'{x} file not found' + assert x.is_file(), f"{x} file not found" anno = COCO(str(anno_json)) # init annotations api pred = anno.loadRes(str(pred_json)) # init predictions api (must pass string, not Path) - for i, eval in enumerate([COCOeval(anno, pred, 'bbox'), COCOeval(anno, pred, 'keypoints')]): + for i, eval in enumerate([COCOeval(anno, pred, "bbox"), COCOeval(anno, pred, "keypoints")]): if self.is_coco: eval.params.imgIds = [int(Path(x).stem) for x in self.dataloader.dataset.im_files] # im to eval eval.evaluate() eval.accumulate() eval.summarize() idx = i * 4 + 2 - stats[self.metrics.keys[idx + 1]], stats[ - self.metrics.keys[idx]] = eval.stats[:2] # update mAP50-95 and mAP50 + stats[self.metrics.keys[idx + 1]], stats[self.metrics.keys[idx]] = eval.stats[ + :2 + ] # update mAP50-95 and mAP50 except Exception as e: - LOGGER.warning(f'pycocotools unable to run: {e}') + LOGGER.warning(f"pycocotools unable to run: {e}") return stats diff --git a/ultralytics/models/yolo/segment/__init__.py b/ultralytics/models/yolo/segment/__init__.py index c84a570..ec1ac79 100644 --- a/ultralytics/models/yolo/segment/__init__.py +++ b/ultralytics/models/yolo/segment/__init__.py @@ -4,4 +4,4 @@ from .predict import SegmentationPredictor from .train import SegmentationTrainer from .val import SegmentationValidator -__all__ = 'SegmentationPredictor', 'SegmentationTrainer', 'SegmentationValidator' +__all__ = "SegmentationPredictor", "SegmentationTrainer", "SegmentationValidator" diff --git a/ultralytics/models/yolo/segment/__pycache__/__init__.cpython-312.pyc b/ultralytics/models/yolo/segment/__pycache__/__init__.cpython-312.pyc index 60035cc038f03c28913604d653d201436efa4fd6..a9bf2744fececc20c816f4263daf0ce79d967663 100644 GIT binary patch delta 51 zcmaFH^qh(3G%qg~0}xz@f0RCv=dqNlenx(7s(yK4x_(M(YC&FViGF2%PJWr8!DJ4` FH~_vw5m5jD delta 50 zcmaFP^o)t;G%qg~0}#}?ElQin^H|bFKO;XkRlmG2T|XtYBsICDq$n{tJ2NkRGACmk E0GC@3!Tk|yoSm7MKAE4<6#x$J4Ltw= delta 48 zcmX@lbcKm0k(ZZ?0SMCAf2B<1c_Z%RVin_(np)tKnpl*VnU@|@nV*wiW;&UR(G>t# CG7lR7 diff --git a/ultralytics/models/yolo/segment/__pycache__/predict.cpython-312.pyc b/ultralytics/models/yolo/segment/__pycache__/predict.cpython-312.pyc index b860998774a2b7bb0da9b7125afd16667d25c093..64e90a983417902bd80299b02113ef77a996a6e4 100644 GIT binary patch delta 887 zcmb7CO-vI}5Z<5ec3ZkB&~7ND)rSR=@>fZeX!Sy(i8Y~#i5f{vaOpnU#ddeueFY^Y zdLh~qEzhLf@gN~F{+_(x*$a&%Z44*FSuUVQeM?&n2NRvc%zXRJoBbv;-$E}>iJwH# zO%dCf{+j;qTI5-YiLy)g5}JxsIjb%~3!n@HxayX!f<+Z&j-g@d3#tMYkkYkTHJvwQ zr0Uu*&=;U-62ck=vPM`ro1KwU_pQ+{P79`UP(w)!*s0)^SVqsm*y4OTra)+D5XBzq zS$$#bEM8;p();*;9jEu+4mh6C6J54E+X}S8c9V!g$B?)!rjeOKi&GqSh307CWgeZz zKe!kTaIfIQalVh{@H8LBZh}^M5x?Pmjsi0`KxnCl3sMC^;S_F#A>O&d%rbVnf{XAs zgmA|R<}zXM6K6Eq_)6aWuK!yPA|cEve3065G=wj=X$3?#a*CQ8#9?RnANw^mlW`85 zJ^wXE^9RiV?J)}6+(=8rpNL|(B3yJfhsHHw_|DSp3B$;$S%{BZZ|Od~=}tVdw{L~X<{wl09&gPPsd^%HPe-Y@c&+YfUriJf ztI1-rl};8X>r!`RsytPbhN{v~-5;nV%88o4zv}Pb^LCcbRpRA%%@?WqBA>l|C+I`3 zAiAH@hkmL(xE_6ebJsh#7r;iF6dHVS{#D23g?G1h!KD&clFPz+=bC3*+MfRy*w|=e bc$SNNcZcz&_`ZFN?%D~B9#JHk64Cw(f$s37 delta 738 zcmZ1{_fCfQG%qg~0}#}?ElNw+$jipe%b>~RcZquViu)bEKrRenx(7s(yK4 zx_(M(NosOQNl{{Qc4l7s3-3~ z@JkJAmNs$-LD(g_lmBs8vO`r*w&T=c^qYK$Q`QF(F| zm)Yd)T$>nGCuebc)=LA`6w5I&Ff=gS;o-l|qkNG^d4cN+#f2U>R5X@*F7#aPx6lv9 z^j+wCL)UP9#bH=UDvk0sBL{&-3CSa z3oje5eFx(g0R{o78JbH%uJCK!k&vEj%p)QPwQw=-2Ej`TMiW>kBu?a*AvB3+2kVZ+ log9mkd6?LkSe3tVNKMY<+bW>TD1Jdk?<)h4E)oU09stFYxLg1L diff --git a/ultralytics/models/yolo/segment/__pycache__/predict.cpython-39.pyc b/ultralytics/models/yolo/segment/__pycache__/predict.cpython-39.pyc index ebc040ddc344a584e8e06ac6d5056c2d627ed07c..135cf37d7b57e8839d7770f975fa705779239cbf 100644 GIT binary patch delta 764 zcmaJljA6uz@_XU8T$6G{m};$TB+gT%lPWkCu93|&$&pd#V;oWv%vjeRL7oRuN! zSSixYNR0#w0~KOK;)g_?_zetYg6A|!Goao{Pw&0&eeb=y_s%v>DkY~>vJjlxAAWUz zd@G%5TUrY}&_}PIg}(5ezCZF(CVC!IGkx%kZWMO>?l5tLABEd2 zI`EPNfOMNVA)rpb-*((LX>D$#UU%SyVppAMt#YGT-*0-tbH!c)xF2>^Td!kHebgKH zRL%9hu`Myzt4Lg(Xi0Fc^FkGc^jy!VxF!n|+(c9S>gB$c8JQ_{X;e{Glqk5)aTP5r zOBm9e;PeY8nLWiJ0#pk&rZ}kybX=BNP~nu5Lk;3hPI3*TF+S8fxvLmdX8Jmi`x^+T zHzdsqtS}%iIcsSyvOrRr7nv0@JCp(io0sy(%RZI~*1F_HZXLl}F6;iH?EjjV0b z4NKvI2@!lTTJWc=6D7=Z+#d4>PS<089ttuZ3f6Xn+pDRcWQY8Pv#WlSI=-bI(&zZD z8qvp;h+*9K)Cv9Gya!8~lvknx+T4z#R4j|$0Y~}>1kwtauC9gYA8J|H0`GnG$@q$i IYMGz^0cBs!;s5{u delta 555 zcmaJ;yGjE=6uoyJvoF`Y)VK&LK0pg0jS9g=NF|cOSgD19*#y_PiOC`;v!n=?0dao7 z2zCCIgBAm_TZZMRtWkNon%d9_kKTjkGkR>e48Okd06Jf+OzJ6~6J7d3`d zcA$k+IhN{`pA%Ib@W~GjNifR0%2YE((E+A85SoF;^cFVmDTLM)`oO@Gy!aK?GXdNr znB*lXp>QdQdYwXq#I9XRr6^-~$&UOtzX5`oVkFWZ+zD=LN8Y6lTc8B>LEBY9pU zD~2o_{rD2WZ%G!@{EckkEML;rxT`(k9G}!@=B7kVBNKU#CX)UVs7G`YO-aE8zOQ#t I;ihr-4S|1wPyhe` diff --git a/ultralytics/models/yolo/segment/__pycache__/train.cpython-312.pyc b/ultralytics/models/yolo/segment/__pycache__/train.cpython-312.pyc index f43270b0ed88cc5c7f975a11446ddbc142464032..acedd4071080702279de381837d1b1b279043c3e 100644 GIT binary patch delta 411 zcmaDQwNRS(G%qg~0}xz@f0X`fBku!dDJT7m{M=Oi^1^iel+@IMywno?%KV)CGDCyS zY%C#6j5{Vrv(03wVaO7g{E+uy&8h0hZ&O2RV1! zUkH!B9-ewJJoN)JgMiUz1||Wn4-8D4T$^J#LKxXticEnfPu|3-$7nVAA!kaOK9E}+ z3nUsCo^S}?;1Qgl+UWzLv@i0=LAlp?6fW{8ED*ZPqXH7q==8rKBtCLalevf!NZ(>k zE-otqi4^Hg4(GOGw3$4OJDnTknj$+8;WU|p$Cy!jvJH;{7b`2*Cq8agu5VJ4(|Hav j`cKy8RgpGk^kAH!GGBY9_H`+)!e@$o2b$-K({DvD;t{XXBG;%r-az6HC?3KjS3u#%`)ABE-jF(IjsY9{ghP0O>kQS2J~u=pXJ}s( zQ3KIH?sXB3iy|5;gf5HdfJ8JV`hzI-iGDYvky za=0kva6?96zRygbZ~W|>T1DbO3z8Xu1PrhO8O%UD`43mMge8#Sr^#Hz38Zf^Cl{9$ zfkcXQC--yPF?;FE1ndR?yv9 z&OltOHo2Qso}-2#iz9`pmu2!g)+!0+TO7rSWvTHgnMJo)5{uG{iv)oxH903+vw3pg z;!IA=$w>m5RXn+cO^b&ED8<0U#>mDf@_+L-HaSK%mLh$i(B!M^dWpx?Yqwk(ZZ?0SMCAf2BOx$h(1A+}_11#w9hiz$Z1aC@(WFJ*F~0C%?>e^LggA zjFUHTa55@QZex{aNnz?`p1hW|N|@;uM{#0VYJ5s&(Jhw5qV!@-j>-0Ho|7lDX>qZD zjAmkEtWumTz#_8wJ{t!kBlG09?0SsGlVv!pWp#nl#X?|39E@y?JU|SB0*ov_t29Ac zCUO?k02=#BE@ft5-IA-k|kOaEm^c=Q?g)?ccMU%0KEsv zA`OO$nqJ}X0b6%710PWwaRG?G zqGzc%ywzuhO;9g^?GtoDb4{c2Cbc+a-Y|u<33`Z*>Ju7)E`go|eWsS8CK+gJls2QF zR?rZOB($(Sw7)-a2#I51Buadk9|*?>PxGf@h>xRiBpQhh@jyqSr$i)12St8hoF7B6 zK~a)OS7bCiM21BLUh(KuTxyVwijGi3IFG8~BS8j{-6PvV;C}L%kkXIHL}W|soQOuk zuusC$nV?Sfx<-nipfY52grE+QnNgyYhb~D6z>{ZnLmY+nL^atEgWvqlF(Q2c;HwMN z=OzA2#o3a^dCmNurIOBRTfVq#c04tnE8h4~@y4WPCO+T3P?l}obIX}+-9OFb1M4&T zD-H8pHn3^hmV7pAsL0!Wiv}My1 zEI;*)FPsFvbO?YL-r-K^vyPgyka>8?(K^kN!*C*XB3+Z=UJuU~-Ed~at7Y@T-#0G| zEIb4M?KjTc@+=-Y_I^e3#FG2TX;a?LC5IR7{Gx$J0Z=+f&PL&}00C`=y8lZ({!3d| zvG%Edrl=EeG$yqQGGH+Xhq%e^Zq-a!VYnu=dT=NzPS&J63FaEDQj@y4ag|BM&Di27 z2Pfg84IayNZQ;oMpL`#!!-p*%V_XUGpb8&$yK$|f*vu5XsEq$#<%-wjzUFs*g2Ijf z=YY-g;N%+;2Ap=g&AP(m)i(a1(YR^SG->?W`iupUNpr%O&qHHM!^WD#&SxH zVbpLCAFA|BCD?)@#pSk1d%`BDqSn>Jr59|-^(5?TJC_=6!p)4!&J{L3SoRx!?Sf_t@u=B-22K^KhjttB zv(NV}tzf>&{r1W71 z24d$?6SRASTG=Fsad9vniA5#ZI1~?&PEnF|K!UK6KffT!13g86`00Vtz96j#D9V8z znj+n#Lixf@fJ{<0#!w`r@2J&78^k%{mRAKvDg^eL`de0I1bAQ zkBx~@0sqmvu>u_=x}GMTq!Lg{h}BAfq-gxTl6ATkB4WU=mvlYpP%;yGlyFEbfSw=_ zCZOaeZFCI5B%xSD1B7{+KmD{mOT;AJ)BJx9mQ;Vq`hn^zNxU7RpnabJu3wwHasT@g;Zb^uu|t|FY?lDc!#04Q4jI z*?F~d{?t-kcanpd6>-TU*gL%|Us*l9J6WE!`15vWGW_ECPh9T2ukv#LrT)vIOQB!; z8s=yyI~6_6m#jH&eb!r_>6@>-dhBCw`yDOiZ~P3(AFL@anL2iSQ5ahEb}f5Pf8y}o z(NSLi>_loJy?&{9!}Pw-pgtX+8PD0PKC)Nkef}i#%HcUW*_Sfq`RW|slI2_G+ZH_U zx|jI9S&MIaPqH&##=lyW-kIhub-^S(GY9e&)vq4T(CNNQ&jF(}Ybn30qa03{aL(5> zmGw2vyBC;+!6o0WB%^4qsLEBeWGhE=lJd+^nf0WtwYFS#yr+tfM;Jm#MsR>|@79(C=BlqN7SH@WcL-`}3SX$F0k9 z>(b9;Dl>u+1pGk#~%bjO7M!|$WdU_-^H^mp*TSDd6T)pD9A;AvoNYKXr$@_~9P+@8c5lzrcfZ9o8g2jBL zyXYHzlWbfIwT`cZO&H0&LC^|JVH{LY?68dlV*GL?)HVSRyjeV7S;MdbTY?V;N)^Ue zm2NGFLYY!wX5(vF5l_{7m^BSU0SC?C@78npP+*;REw7lO?l_6U%9GIG+W{YK!oLr! zFVZH~$^@u+ZD9uQsH&$M@JQ9a=uUw++KUfX?=9{jq!KDefePBx0Co;vt6oQk@JH2~ z`ps%w876{f2#gRQ@c?}vK-RBbNYu!Bk+9fER1nXj{Xk13XR76|g6d`T0x;FP2aeX* zbbm(#n|}Cw4ebi1``@DHTjqDdKbx)DwB+o-m)Cg>?`oIqyB7_+Z~kJPmEI$3p<+jm z!!R|BkyXe#H5I7yK!jsF7ama~=7c#k5*`poq=wNZFJsmj z$U8l=8&ez8J(&TBA!Un!?yR$$FgK(&kjw0W)Pdi*Oh%JhoHOvX!gI4c_}BE*522~1 z%_vZifJhpKJBn6_j3Tl;^do=?Be_Bd%CZ>rW5RxoK!yOhuJ;B_$VM7a!&9nv$reNB z03g3YHc%H%ctl=i2S>y(3OX6p?@B6BlFI9NQ{DcJAApSX3c%NGb4P11nm)Js=EU^Q zOQ+{G%huLAEU@q!b?ZqN&et`P_8sREGaAHa%XcoTeBN(jxMDQ2QWq#?U^zquEDC z@X6+lFz#Y=BmEQnvu6KMiFA_REM#3gHWHEGr=4N)3lB;Vo^fy_EJ=`{$7B}8&PIix zU3Cyi*0O;VCDAa{BV^m6e<1K4fuG@~7H>1TRLb-KdWW#y0+8$gg{7(Q9qQ4Xfe!ec zDA^RSeL@7AA$opwf%?*hueN;f^k;l+zH0qz+pcVTyJ|kXK;IB<2D4RLKWS-QDsQ_1 zLH-Air<>E^zdb%*nJsU-?F&Hb@Rhbq(chfRSJ!5WuI#;Q))hIw)Eca|yIQLBVfw#V z%2u;zt^UJ38|7=i`El!hwm1SGCiT?Ns2ZAx6?-9Ib!@5ST1cNhB83KH;3%csx`m}J z&E-p^gBu$s{5VutQUjqx2mxprzq_T3mhkN@<>r3^F+@%qdJk(m+owqJg~|v#2Ot~w zoF5d&NO>vipA94Ob578z8d@pSjNxcB78;hI->BU535m$J(J}#IVVNO9YU%Z%(k8Qj zm?fN-X;C5van1C2p|f50d*Dd_itl#{G>4z)`t3me*zw%4aQ0X@?=77xx}()RL3^@X zUvke}_2v3Y^|{izY-wHQA*f9DW=q?$T>Jdlg}$Gkc;`f}yEoh2drQi8ANi=G@2;KV zhv++A%Hf{2OrM>${P#UK)o_OXYQ=$nv=z}V?CsvXxf#BWb`W@sz)=EE6L^^bCUAkk zH3IVl$R0tz#oy^}J=zE4S1(X^HD=cEWvPzsqgNaZ>t3-K*+=Pn9vf@A$6HzNil1fu zD>jC$yl3fP_tW>z(OP!feUq2nL*Hw&fK<7e`7Jv~ z{o~Be%NIv>(oB$X2ZOB3eZk`!x%&gEUmz30s`&Ti&rq+p6y@cFL{oyG0{vQFq#De@ zI^kzZU>Bk1b`of5J9+n)@*+V=M&$PUhBHEs|58RUHk6SC zK+iE|&^;Dlo3XGKq*9%yo3OCo;e|a>s8fnkPEi`|h}o8oq|3J7>|E-+*}K%cN%k!D zq<8FGX;^MZ@2X2Xt5;4hpH4a3IA>eBa@R`da%ZY?KUcXwy=}+M%S)GEs3po>v*Zg6 z;#B+P*eASknxtq^*VrLC7bb`S0YQX?Jc`0pNI(noVOU6bVLR}}&=&D+F~+b2+SF_9 zoR9=P*BK=w&l6#~fTzQbCS27zjTfkg!?KU_`B@QdNZfQ}Hb_+d&ej&6t;O&bD4h zwh~sb@GBI9J_f-n37XrAO~E8FM_%_~8R+-(48_8baD?=cA#qp*7F6<OEy4^W%JsG|k6+)UioZrNDFugo>*O{|nA zw*>2k1voH6BJVb|A~vhS+Ms=NZ(ax1Lh()62eYz6+TWsw9QLB$(uBBe*j%3^Lee>s z#nFN_#X6d4Bi50RNnKd~+PalUTVjxH8XjCVXeyNHL%I2sguj| z%H#J1*hb*7WdKjHt)Jqn4pPMUh@H$(yxgY{wy-|y3VQs56E50LvS)$Ao&e|vVG{pa z3_IqU;DtbEAMR`ec&Y}-NIc_%!=5q5UY?PDi}76=XFw8@5l(w1*wc6b74)<2(F`@s zu>L?s@k(bz0fzB`DnVBecI}#&V0?5&61aGYVWt`MVS)2>MzNv6;J7Pr zfi>av5A%}UV}2jQ;yjy?2iz{mbsI*ToVq+(GdVRml@SLSU%=0TQC-1|YH)(}yO}^> zkheDabwLH`L~>D_i*otysDxSj|AC2$6-`W^{S;a!ywk6L31m?7=iNsYvB~fYX&iOK+Mt zMOvb@oPOJEdpbA&h9}~Ql991!`^~PUuGqz;o;l&{fwUoieg|i$h_al)G28LTVq5H9 z=#B|v{W#r!%G~$f^CpjFWsNMH=XP| zmFhdk^__cIcz(7wtD_xiK6~fvs_yRKb0Hr4f*_5D$;WvG zH>M&pQ#WSbpGli6DU*{kIa8)a&eXUXO!mK#>L29#2a|6Ot@XQ;rpAXR`g4uxnV7Ix z7xymgO%+uCT|xESfk<6E0H(>a%)b%28l}0s9kWN$`Gt#?1xu8P^?ziGAHUNVFJC^J zDEs8%YRPK#YDwbKy&VruCQp3(rvu3|#RFW=xm1sv>v1P(CP@z`dq(ChYx$$IU9gtM zfZi-J&1AQXj)H9!>i3Slb8NmNTE1{JrsFJ?DN7A!sfky|Ym%1w1oeq)Rkvze)g_pF zid80Eu_M;aRqRRVxQeEiLQ>hho&WY!#M|tKO;Yc{z zzn15?eR5@Bc_3{x&&(f!0gf4g5S(9#P zN}S~yIvA%->J-_35ctkU&DI~2UL>@gC$FO+TMa2eOF~1%pFrOYA|!?qMHiyj@!;0HkYF>B zh9&5iwql_q1O>3IHeUkugkUqz(1K|Y9)|Z2uN-G2^>6fsMQE_3*eD9+;G&ck(xT0I z(6drv5rl;3PDzVYv{7~AmKo_wjg$xk8gZh8a#Ti%g77keey{X^C?xbs(eWw+3^by@ zEHxdJ`ig=$0rC%ILyD%LY=r-<5|P~CQM6){x0U9lkVy-$-pE|0Dc+nd-KZ+0o7%$r znaU+Zjb)W+wCo?m{jeC@iaN^==N-W08yL<2*j4NiOuNv{@(S`a`rGpQZdvxGKZ*H# zZp5!b?DsGXV8E%8y#|n>JU*J4WIKQk;5Ufu1&ookL6c?C=?c3zg1ZlVI9ow>RD7U~ zwk;iq8W)@~^G9uQHbKVOcn4?Sx29{HJ@{CyPpQi}b$QhLkv85IH*gj82{GI%wnPvP zyL(ODmCWh7`xo1kq?P?PUZEEQ&N?i=uWw6^;lw!)uH~n>_~rg#v!( zgepTwd&b%Gn7;@xErlG+&{f`v5jF~KcqL|4xD(!-h^)8pi!STJa1ldxW0*1kWGMF- z<6`ZitQ&Ez&Sq`$57CjzZs*6q2)qx#?>(*1&XvD&^qEj1EqS6WoM)nK3!`ztLuKtV z3Rq~havSbK|5~{dx1VY57CB|YT;mDXTZ|QI3v1VjW4%_E30xWrR-w$UmxcwrU=RS8 zV}6!qShf%SbM;>G7AmYcLq0&0H79L+0OI<|F67IoS9KmAmTVjP_nJa-1}SQtFmGS& zPI3i#Yl}_|V&QKBK-uCS^8}zA@X{W4kR8E1se8-?rOUW$B15tMOFr7J&Klu&ac>ST z!h9|$zxaUuGvKkGVEA+NPqn7%E+}!Gf;&o<}KG4`wQf2EFcHbyNwq4r!b5aVfX~yY1~~ojL8B#S9F1yf|9#q(#=dj zHSEhsF1uJCd_}P5*MAZfI>lZ52-^`L)^poa2RrqTXyBEVz4A(KtVPG-* z5r$<9*thJjP(w>?H!dQ-ew%nMkW)D?&0?w_zT1c>;|oO&Rk>~uQtjlrOir2B%`H?n zxqe6>q?*5!nJ`@^qa5od8D(8JnJC9sb!N2MvPX^+54(cFg@mnkasfG8@59U57p*4p VDJgc1WB+Q)ojW5DX$Iv?j^2-aB^IUhi)1 zOlabayOac_4|Rz&s!9=6cPXMuDSu7XhqkJa;H_2FhrTf71>W+~hYGa?+S2dL*fG!@ z-E+@9=iI+@&;8EbR~~rpp>!#gvK0Js7e82ff6trg%Pji>nzk~}gfh9dVo(gB z&WK?#g1YmHqABy)<4R#H==QH>yRXmiKlAMDkwYhs*sfi6PsmbvrBYkEp3|RVRt(Eh zQJiD`OU8kmYb_~TpWqw1rxX)H#bBEMzG2^5&;!kF6juZ8HUh0&uPp{ns8?s)u9fx$4)zT96tBHK%U}rLjhhr2g;=CNi;tHC~q6*SQ z90pBO4E3^ygvt7owyc(LY9(i-phsuGor>HE?RggXv=^jtQdh zG|RCJ^V^+$&CkzRCZBEn3^3c_g~sQ6sF4pn)nfwRGw>SGMe^O2gb8(1!Ug;(^bKuY z_cTvmk73B>+zp!M2F)AjmDMF@t-ug(XG#sbphjLv-CQGus_cd-dWWH?kt8r0 zk-P2>2O&`{u&4@M=6o?yABEXz6bCcMLvQS7YvU9;+uoqwWeT3HUj|G9<*O);i->g< z7vU5I4u#Ameu*7XuJTiloX%W%yBg)2qws-AXdS^;a9~WR8+^Of z+-iCTlqMAM&`8?|yZpa2^l8jfS{6=%BHQ%LRzetU7S^#g4^{pqV=_UJ>R@#eE-DXTV`xi(%mNTJo=W@E2zNJP(C-NKSaQ^NxwDns>dmNU zv}5H|?lT*1erCs4+1Yn+0KHxZXrJiX;8=0!gR5K)gX(0v&%sw6*iFwfSafTk z9r`S^9+W-q(DJa?(Hg-O=@qQ4w({-JrZw7DJX7>xn_To0;52bdavLg5)EdKTxfmcZ zN=G|%w20&846d0E+ODjWok__Mj`A*$YfMg{D)2xT?uCXOwX_gO<*j5@AaV;xpc2|^ z3k&u0ayQy@1vRh)<(8&fsn?t!zT_53Yde7f7STX?@&jk9KE$dFW%RsAD>gTYbdc7(M=>oM;Z^&ulv=#dTc2R0H?3(bO z?$|RTzeYn5283`M!t^HGJ_Mm}W_B8rWD@gl%iM7y69SE-+!F$>e3&@GQW?UHT5CwG z5&2PmgM_Du5V{1sRt?xnV8Vlh+>ZPvspdiaSm#JtfN`j$@ClYirsSY$*28-KZ5T<^ zVfI9ZBbmvvjH?#60NYvC#ky1jwZ^iaS}J4!^gf<}U7u<~+J~HI{m0M&@IPaYe>6MG ze&Ro!-QNt+pQgl*z>J_b1hfyqcrp2wXUI7h5eKX=T?Tx|bzN2k+u$u+T?8<1BP;d- zQ#`$;Z%Nj5m$eK}_u?yDs6uP2fIc1I#)VE7!qXz&Kd>X!)P=q+A+0b3Flhr|(XUXy zmL)7eiS8!ABU_*^G$PUFo`yD!wb{PJ*R5L4O?n(ajBx=qx=2Pjhjh3_shgSzWK_$u z+LY4MCC$IFV>g@hySx5sJdXX#yZnE2-P`*WV#6R`09Qyyp||3n=-$b`?qBTQe>@R| zM3D@iAW|YiM=39X1hLJ89o>oVkVvo8gneEfA)D~YwEb>62iZ&hC*8w5$0;NGGd*wI z6R1dq@0T|)xL~A1%*#~;L zYL{dot3{sPNs90WzTkKFPq07whx$iZoR)dr?SG6l-?Z|ZzvHDR?R>pv=kGuM&|Dtx zwR2T>(z>a>=X`0kQMH{zHzi@e!-o%V+UCEMzc?1Y*zhHWL&)B9#756qr|rBkj;8>V|Fr)T!B z-<;QfKlf_av$U~qfe>uhrLd(u>AyH|kiF=?H!#RP@IM-OU@(kDLJPcysM^MSx!^xB zc&Phs(&5o0>*s3X8V~s7UU|)beQ=n4=>KVOo}Km!Lwndy{l|uGV=wzxhK8GGXqXB} zV7T>a#lZ`InI8J`EXm^KYRPc`S`GOM4V{Wkf#54J38rf$0CX7dKZi{IipU$JRpFID zYXFDmH3>HdtRjz+f-OX0|00>@4-ow}$h#=c1c(w#@}yw_*>Y?zOEQb?Vq?rebY{>( zbbg|J8vTp7ULSs%V@rR}AD#X+I!cvDxP)I2_l4W_8e|DXuzVfN+D!QW;HmMr|Gm-f zpVVwTfzcD(+-u4H^azpQIsmTX{>bNq9RqG}3i0#NT z{v%@#rYYq}`iCMLApUD(gKXOW!&t8HHp!^Jyap0zrCO~XC6TwnwvCI7J1@@r>G2`s zW5}Foe`WWvmt9m!bIFz+yRD`%$7lsW1jo0v+hQ;(O4toYxZ MD6nV!Urv1VKWUwrTmS$7 delta 3364 zcmZ`*Z){sv6@T}>KYo7x6DP49$4wkJakIL#q3uHdY^B;**KPfivbMA|D)k)Sm&A_a z=ic|yCfPII)DFIo(e5HqrWM&RK&3)31u>za4QUAR5s43sx1oLRmkEh4jHxTl1H8uads2Ff^rU!lY@+$h6Fc#Oq}8 zFpIG0RcSiNdRPqJ1{-F5tp6&R4zV~(04L0nYyjR7mSThOj(2$x zl2K17S+!YDE?Y|;t!5SK7AC;`$z=?c9<4oTPKZ;YO&4H+J#p}LgGFfT9A zh6c0(v^FR;0zj)>T6Y5#@IQ={&YMb&V8Z|jkaS^nBjm^xrRf^3R8fJ{;2mIrHKh^8 ziT}ZmxDbho&gfo=N(J3D7+Ij#2&Zn4Ct-E4Ww;SHJV$Xo(|vuxHToms23jFzw8?dH zQfd<-!A!Uk@z*q?l^zz_5MbdoN~DXZEL@3!>ONPVBQ@%3jVSCq0y~ew&U;|K*HvAG zg;?k%%A$y~;H&$b5bNo3D2sK@3tpfoHz=`QIAqMX(bohcFEDa{Vc#r00BTzOZK=`g z#48Cf`#A+}Z`bexGa-=c?-*k7H3iNXyhgd(Sq-KW9n%oLzDBmi_(dkC=zE^tw%emkqIB7SgvnoauPLg(GfSXuqjdqCR_Z3t!HPV+Lvb~#xJ7&xe-!8`5HpzRm?fud)oebALYo2ZXb1`b zG>OHzJn>Xt?Hv_aL0$y;LcPfHgr5RVlOF{<4cY?MC6iZ+bzck29sxdo1mI2&fEOq) z7G}#;Grz?Pnzf}x6EvBgyjrgFXK`M2EMA)T)K!yPwx`svK`AqqRrUAmDYj|WU@K11 zfm0Xjb+g92fPGP2GR;*}h~6FNyl{61g~g)1z!PZyF<*75YSm00KsscXiqP)5PFr5N zJimK+WqHNZ9J6Ly9K2d|ylA1$t&(Zm1>fD`!`^X{7oomKn@K*1YQKUoj{2xw_LMp| z89#(W86v}{swI?x-sm=P+7#d>T?_O2I}p~sz}!| zo;Gim=jI(xy1s#5x5pR!VzC^V`82F0s7iW@B4PfE&69^{(@=^Xc(&xmlljG!B@qTj8ZljZ*$C2-sbOR|D;WPqP zI9~?v)N+lP%lx3YIM)AA1vuB?u~Pt2NP#TTNrps8AH+6C46>DOqpLZ%^poPXfnR0$ zS3!&)M!;nEI|_dqDc`^uVWl?FTZ*`z8V&R3kRL<%zIZpaf8;ihb{s&01k@nW@cTl8 z^aHd1S{?VlEG7rvyWrb^{E+z90HCAb_X+ePnUxp$8RQFq)gTnB$tEiGIsQEu!&efI z;3R$-bsU7Y_&J0!LU$|Ar-wAZj&F#Y>D-n#VaiSeNEDL%9;MMwshqj1Kr#3``&idI zdt1MoK0}g;Q^iYW7TQYo$f*-kS#H`(RcAt|!@r2Xj7F*e*oiI9OwNypKMwCFH($vN zkw1yy%+V1)h}b#vvfn@-w0J+XV)Iy9v@^p8v43I<=GIb;-J%|y*vVf<{Ui4w3zTOZ zsFrP~WG&VY{td?eghyK41FjquCo+ck-IyVEkEF?EaeQP4`Js4zI?Lx5aQa zbr{RrBQu?i-b8*D!3KC29(xo(lEX9%y(CW3WG5toLAH@mNCgTm$IhjFvfjwPN@3fn zv1ej$zyb~*m|ZhJ!^|~q!T2NO{am~}wm*E|uTxMme;L~v&%+$tJ$9jFLAUkm=aY_2>mIcRtcL9B@JWBbPq* zHZFJv0Yl;`#ahkcOE`YZADfqoRelLcVtzJYL7Xg9Y^(Nn5U?l3nJwFTX!u?d8m=2.0.6') + check_requirements("pycocotools>=2.0.6") self.process = ops.process_mask_upsample # more accurate else: self.process = ops.process_mask # faster + self.stats = dict(tp_m=[], tp=[], conf=[], pred_cls=[], target_cls=[]) def get_desc(self): """Return a formatted description of evaluation metrics.""" - return ('%22s' + '%11s' * 10) % ('Class', 'Images', 'Instances', 'Box(P', 'R', 'mAP50', 'mAP50-95)', 'Mask(P', - 'R', 'mAP50', 'mAP50-95)') + return ("%22s" + "%11s" * 10) % ( + "Class", + "Images", + "Instances", + "Box(P", + "R", + "mAP50", + "mAP50-95)", + "Mask(P", + "R", + "mAP50", + "mAP50-95)", + ) def postprocess(self, preds): """Post-processes YOLO predictions and returns output detections with proto.""" - p = ops.non_max_suppression(preds[0], - self.args.conf, - self.args.iou, - labels=self.lb, - multi_label=True, - agnostic=self.args.single_cls, - max_det=self.args.max_det, - nc=self.nc) + p = ops.non_max_suppression( + preds[0], + self.args.conf, + self.args.iou, + labels=self.lb, + multi_label=True, + agnostic=self.args.single_cls, + max_det=self.args.max_det, + nc=self.nc, + ) proto = preds[1][-1] if len(preds[1]) == 3 else preds[1] # second output is len 3 if pt, but only 1 if exported return p, proto + def _prepare_batch(self, si, batch): + """Prepares a batch for training or inference by processing images and targets.""" + prepared_batch = super()._prepare_batch(si, batch) + midx = [si] if self.args.overlap_mask else batch["batch_idx"] == si + prepared_batch["masks"] = batch["masks"][midx] + return prepared_batch + + def _prepare_pred(self, pred, pbatch, proto): + """Prepares a batch for training or inference by processing images and targets.""" + predn = super()._prepare_pred(pred, pbatch) + pred_masks = self.process(proto, pred[:, 6:], pred[:, :4], shape=pbatch["imgsz"]) + return predn, pred_masks + def update_metrics(self, preds, batch): """Metrics.""" for si, (pred, proto) in enumerate(zip(preds[0], preds[1])): - idx = batch['batch_idx'] == si - cls = batch['cls'][idx] - bbox = batch['bboxes'][idx] - nl, npr = cls.shape[0], pred.shape[0] # number of labels, predictions - shape = batch['ori_shape'][si] - correct_masks = torch.zeros(npr, self.niou, dtype=torch.bool, device=self.device) # init - correct_bboxes = torch.zeros(npr, self.niou, dtype=torch.bool, device=self.device) # init self.seen += 1 - + npr = len(pred) + stat = dict( + conf=torch.zeros(0, device=self.device), + pred_cls=torch.zeros(0, device=self.device), + tp=torch.zeros(npr, self.niou, dtype=torch.bool, device=self.device), + tp_m=torch.zeros(npr, self.niou, dtype=torch.bool, device=self.device), + ) + pbatch = self._prepare_batch(si, batch) + cls, bbox = pbatch.pop("cls"), pbatch.pop("bbox") + nl = len(cls) + stat["target_cls"] = cls if npr == 0: if nl: - self.stats.append((correct_bboxes, correct_masks, *torch.zeros( - (2, 0), device=self.device), cls.squeeze(-1))) + for k in self.stats.keys(): + self.stats[k].append(stat[k]) if self.args.plots: - self.confusion_matrix.process_batch(detections=None, labels=cls.squeeze(-1)) + self.confusion_matrix.process_batch(detections=None, gt_bboxes=bbox, gt_cls=cls) continue # Masks - midx = [si] if self.args.overlap_mask else idx - gt_masks = batch['masks'][midx] - pred_masks = self.process(proto, pred[:, 6:], pred[:, :4], shape=batch['img'][si].shape[1:]) - + gt_masks = pbatch.pop("masks") # Predictions if self.args.single_cls: pred[:, 5] = 0 - predn = pred.clone() - ops.scale_boxes(batch['img'][si].shape[1:], predn[:, :4], shape, - ratio_pad=batch['ratio_pad'][si]) # native-space pred + predn, pred_masks = self._prepare_pred(pred, pbatch, proto) + stat["conf"] = predn[:, 4] + stat["pred_cls"] = predn[:, 5] # Evaluate if nl: - height, width = batch['img'].shape[2:] - tbox = ops.xywh2xyxy(bbox) * torch.tensor( - (width, height, width, height), device=self.device) # target boxes - ops.scale_boxes(batch['img'][si].shape[1:], tbox, shape, - ratio_pad=batch['ratio_pad'][si]) # native-space labels - labelsn = torch.cat((cls, tbox), 1) # native-space labels - correct_bboxes = self._process_batch(predn, labelsn) - # TODO: maybe remove these `self.` arguments as they already are member variable - correct_masks = self._process_batch(predn, - labelsn, - pred_masks, - gt_masks, - overlap=self.args.overlap_mask, - masks=True) + stat["tp"] = self._process_batch(predn, bbox, cls) + stat["tp_m"] = self._process_batch( + predn, bbox, cls, pred_masks, gt_masks, self.args.overlap_mask, masks=True + ) if self.args.plots: - self.confusion_matrix.process_batch(predn, labelsn) + self.confusion_matrix.process_batch(predn, bbox, cls) - # Append correct_masks, correct_boxes, pconf, pcls, tcls - self.stats.append((correct_bboxes, correct_masks, pred[:, 4], pred[:, 5], cls.squeeze(-1))) + for k in self.stats.keys(): + self.stats[k].append(stat[k]) pred_masks = torch.as_tensor(pred_masks, dtype=torch.uint8) if self.args.plots and self.batch_i < 3: @@ -130,10 +147,12 @@ class SegmentationValidator(DetectionValidator): # Save if self.args.save_json: - pred_masks = ops.scale_image(pred_masks.permute(1, 2, 0).contiguous().cpu().numpy(), - shape, - ratio_pad=batch['ratio_pad'][si]) - self.pred_to_json(predn, batch['im_file'][si], pred_masks) + pred_masks = ops.scale_image( + pred_masks.permute(1, 2, 0).contiguous().cpu().numpy(), + pbatch["ori_shape"], + ratio_pad=batch["ratio_pad"][si], + ) + self.pred_to_json(predn, batch["im_file"][si], pred_masks) # if self.args.save_txt: # save_one_txt(predn, save_conf, shape, file=save_dir / 'labels' / f'{path.stem}.txt') @@ -142,9 +161,9 @@ class SegmentationValidator(DetectionValidator): self.metrics.speed = self.speed self.metrics.confusion_matrix = self.confusion_matrix - def _process_batch(self, detections, labels, pred_masks=None, gt_masks=None, overlap=False, masks=False): + def _process_batch(self, detections, gt_bboxes, gt_cls, pred_masks=None, gt_masks=None, overlap=False, masks=False): """ - Return correct prediction matrix + Return correct prediction matrix. Args: detections (array[N, 6]), x1, y1, x2, y2, conf, class @@ -155,52 +174,59 @@ class SegmentationValidator(DetectionValidator): """ if masks: if overlap: - nl = len(labels) + nl = len(gt_cls) index = torch.arange(nl, device=gt_masks.device).view(nl, 1, 1) + 1 gt_masks = gt_masks.repeat(nl, 1, 1) # shape(1,640,640) -> (n,640,640) gt_masks = torch.where(gt_masks == index, 1.0, 0.0) if gt_masks.shape[1:] != pred_masks.shape[1:]: - gt_masks = F.interpolate(gt_masks[None], pred_masks.shape[1:], mode='bilinear', align_corners=False)[0] + gt_masks = F.interpolate(gt_masks[None], pred_masks.shape[1:], mode="bilinear", align_corners=False)[0] gt_masks = gt_masks.gt_(0.5) iou = mask_iou(gt_masks.view(gt_masks.shape[0], -1), pred_masks.view(pred_masks.shape[0], -1)) else: # boxes - iou = box_iou(labels[:, 1:], detections[:, :4]) + iou = box_iou(gt_bboxes, detections[:, :4]) - return self.match_predictions(detections[:, 5], labels[:, 0], iou) + return self.match_predictions(detections[:, 5], gt_cls, iou) def plot_val_samples(self, batch, ni): """Plots validation samples with bounding box labels.""" - plot_images(batch['img'], - batch['batch_idx'], - batch['cls'].squeeze(-1), - batch['bboxes'], - batch['masks'], - paths=batch['im_file'], - fname=self.save_dir / f'val_batch{ni}_labels.jpg', - names=self.names, - on_plot=self.on_plot) + plot_images( + batch["img"], + batch["batch_idx"], + batch["cls"].squeeze(-1), + batch["bboxes"], + masks=batch["masks"], + paths=batch["im_file"], + fname=self.save_dir / f"val_batch{ni}_labels.jpg", + names=self.names, + on_plot=self.on_plot, + ) def plot_predictions(self, batch, preds, ni): """Plots batch predictions with masks and bounding boxes.""" plot_images( - batch['img'], + batch["img"], *output_to_target(preds[0], max_det=15), # not set to self.args.max_det due to slow plotting speed torch.cat(self.plot_masks, dim=0) if len(self.plot_masks) else self.plot_masks, - paths=batch['im_file'], - fname=self.save_dir / f'val_batch{ni}_pred.jpg', + paths=batch["im_file"], + fname=self.save_dir / f"val_batch{ni}_pred.jpg", names=self.names, - on_plot=self.on_plot) # pred + on_plot=self.on_plot, + ) # pred self.plot_masks.clear() def pred_to_json(self, predn, filename, pred_masks): - """Save one JSON result.""" - # Example result = {"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236} + """ + Save one JSON result. + + Examples: + >>> result = {"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236} + """ from pycocotools.mask import encode # noqa def single_encode(x): """Encode predicted masks as RLE and append results to jdict.""" - rle = encode(np.asarray(x[:, :, None], order='F', dtype='uint8'))[0] - rle['counts'] = rle['counts'].decode('utf-8') + rle = encode(np.asarray(x[:, :, None], order="F", dtype="uint8"))[0] + rle["counts"] = rle["counts"].decode("utf-8") return rle stem = Path(filename).stem @@ -211,37 +237,41 @@ class SegmentationValidator(DetectionValidator): with ThreadPool(NUM_THREADS) as pool: rles = pool.map(single_encode, pred_masks) for i, (p, b) in enumerate(zip(predn.tolist(), box.tolist())): - self.jdict.append({ - 'image_id': image_id, - 'category_id': self.class_map[int(p[5])], - 'bbox': [round(x, 3) for x in b], - 'score': round(p[4], 5), - 'segmentation': rles[i]}) + self.jdict.append( + { + "image_id": image_id, + "category_id": self.class_map[int(p[5])], + "bbox": [round(x, 3) for x in b], + "score": round(p[4], 5), + "segmentation": rles[i], + } + ) def eval_json(self, stats): """Return COCO-style object detection evaluation metrics.""" if self.args.save_json and self.is_coco and len(self.jdict): - anno_json = self.data['path'] / 'annotations/instances_val2017.json' # annotations - pred_json = self.save_dir / 'predictions.json' # predictions - LOGGER.info(f'\nEvaluating pycocotools mAP using {pred_json} and {anno_json}...') + anno_json = self.data["path"] / "annotations/instances_val2017.json" # annotations + pred_json = self.save_dir / "predictions.json" # predictions + LOGGER.info(f"\nEvaluating pycocotools mAP using {pred_json} and {anno_json}...") try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb - check_requirements('pycocotools>=2.0.6') + check_requirements("pycocotools>=2.0.6") from pycocotools.coco import COCO # noqa from pycocotools.cocoeval import COCOeval # noqa for x in anno_json, pred_json: - assert x.is_file(), f'{x} file not found' + assert x.is_file(), f"{x} file not found" anno = COCO(str(anno_json)) # init annotations api pred = anno.loadRes(str(pred_json)) # init predictions api (must pass string, not Path) - for i, eval in enumerate([COCOeval(anno, pred, 'bbox'), COCOeval(anno, pred, 'segm')]): + for i, eval in enumerate([COCOeval(anno, pred, "bbox"), COCOeval(anno, pred, "segm")]): if self.is_coco: eval.params.imgIds = [int(Path(x).stem) for x in self.dataloader.dataset.im_files] # im to eval eval.evaluate() eval.accumulate() eval.summarize() idx = i * 4 + 2 - stats[self.metrics.keys[idx + 1]], stats[ - self.metrics.keys[idx]] = eval.stats[:2] # update mAP50-95 and mAP50 + stats[self.metrics.keys[idx + 1]], stats[self.metrics.keys[idx]] = eval.stats[ + :2 + ] # update mAP50-95 and mAP50 except Exception as e: - LOGGER.warning(f'pycocotools unable to run: {e}') + LOGGER.warning(f"pycocotools unable to run: {e}") return stats diff --git a/ultralytics/models/yolov10/__init__.py b/ultralytics/models/yolov10/__init__.py new file mode 100644 index 0000000..97f137f --- /dev/null +++ b/ultralytics/models/yolov10/__init__.py @@ -0,0 +1,5 @@ +from .model import YOLOv10 +from .predict import YOLOv10DetectionPredictor +from .val import YOLOv10DetectionValidator + +__all__ = "YOLOv10DetectionPredictor", "YOLOv10DetectionValidator", "YOLOv10" diff --git a/ultralytics/models/yolov10/__pycache__/__init__.cpython-312.pyc b/ultralytics/models/yolov10/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4ad531bcc0d418928dfe60cb0496ac269fcd259c GIT binary patch literal 348 zcmZvXu};G<5QgnIX;4unCU}EXsD>RO21cMvmB?a6u~jTx;^4SN8F>nxf%jlzVYUf@W)#cXwagt))ObLCXww5hJGlDcrl{%6i}&@%5$W<$)f zQ7;VTDvf`CvSN?)4 zZ&DXs^oE?A%enV(BB#?+f_3r!wf@BSJ2r=vVRMgnm?0>lD3Fdelt!LG*0F{q&LHoK zra+!Ir7E6DUH(v9scEQI%ZKIe`U>g!KV4|ATW@Ut#DUYWK}sTGJ4w^DjW$5LyK2gIsdiOh{|KcM`^BEf@eDKL zq)}B6;zRHm5FdpLN7N&Hfkk^jf(y@MyXj?-_J9wb@tgPeH^2A%%dK0ag3qtx-@|`4 zE0sU8kMh|lHZMQK&5xC!GOLzH>-Fbi`>eXEHaFOB=!7_qLt^ejqG3Q;C}U^KZKGtO zm|Be!Hb-$98^z+4;l3scq!_0v9?-}bsRy1HaucN!*B5i|bo7Pu@aWK6iC8Q;ZDLrc z2gIRbG6UZ@0T^7kNKl08GBu!_1=qQxGzyqu8jLl(ws1WVaW>?bHp=)9TnutM7WzOF~51!l+UG5h8E}% zGPlSMOM#mijHLqP22UVp)MrQ*9kXa&-hWbWk{T^T=MG$}-f&$vJ7ZI%0JS1au0=%x z)Mp8msg9@-)I>m$T!m>S9UC!5N#piPq{@k=PO3p=G%bTVQ_RMsn9*&#rW*ZnU97FZ z#p?6$*WetLY$9ZYw5n@Fi9${hTqvBgxr~A8NGyG(GZ?Q4h$&NsIU9|Du@DC2|Ho8g z=|0dQt!FgA7POHywxBPHKC!gLO1tgOj@Qngt-YQ8o;#Cay~I;NzdPS*plRf=QfxplW^TBevmk+j0Fp{8>L~02u+y}n2;xx2vXbhUZLUj zySv~+zrEe-@9pBI6Ler_0$ZLrSnlfw{-gczUVn7==_9#w@L0JRDZ5DQ?E^kJg(uj& zMy)K7?{Z^lOz{SVP4-i!a7@K;v1CcuZFhUcRA~`P4L30iFJ5mPTxOOm#THS+i?7gW zi|&g&cmzp{j#6&Fj(OUm$2?*;4K&h{U~B*>%nab> z-27B|nf(g>zT-s#J7@V#=mh{0`=;z7@5apcY~SeXqs*TxS66S}!Gko0kJVe~Ec@T+ n_wn`OjcT>}V&jwd-u&y^YGsprfB%QWKfkM1s_!cN;Z*(w#&|BR literal 0 HcmV?d00001 diff --git a/ultralytics/models/yolov10/__pycache__/card.cpython-39.pyc b/ultralytics/models/yolov10/__pycache__/card.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..45addf01b615562cafb1eb6194a9d7ce79d3fba4 GIT binary patch literal 1676 zcmcIlPjA~c6n9b-7>M<<<4#yLsRq58NLZjaORR=u zWd$=t^HfbTo%CsJZEE^K5D6P+Q=h3tFdBaEy*zs!tW=_w?G~{RnLhETn$9`1UdSyj zLP->%dYW0Ty`?aPq%;b_g5lN~-dT91G+>G;BIJx~E*T~_gT!!G2B}u7^rZ@ULQlD2 zTBI)4`?nY87nn_-zUGj4BeCH0YZ-c0dAQIEmm6Aaa{Ev(3j=*EmcG(a08r$ zhLAk0*XBqTlR&&E?_bs$q)N-sg#+KI*L~m5&tQfWpiYFzHK~%EGLSUQOiZn!Hs%z` zwK2`56RRdDsXtz+OndRn%M8~!P0OI(470HrX7m`ZsY-v?7V9eTvHBwX9XMwtn+O>q zt!f5QqP(C8E|g!xB273ovASYlav1Ljh$&ORoUK8?AmV-U|6^*fbjIm`)^ZwP3%bY# zTac+upE%lLrJYv$AZQiO-qAtt$e*WCt;AFFP%v9B{0)kwNewLdV5BqNaQwN71&dwn z*plRn_bynF%4pjuER&u+a#5okPxeueIOp9UbCF zJ8biVDc=k1$95LwZbNRu>No3;3l_(3|mAG)<2@tCOuX~@R-Xc9cIGq z&PCRw=OTvthcBQ!M9p_%F2Lyy0k_{|GE$p{Jay!Mo{1>K2rlFt?^r@ISi3PpvCmR1 zq}@!N?J#o6D-M`lUhWOKaX5V2*u8ydEY85&&lu=%VmVS6EXR*)`)fr%!2bjH^H=w@ sKQ5*pb}IV`QyJeI_+!^JX1u|1oICT=)$J!=;z5@1uXfPUpZHe(0V-Z2lmGw# literal 0 HcmV?d00001 diff --git a/ultralytics/models/yolov10/__pycache__/model.cpython-312.pyc b/ultralytics/models/yolov10/__pycache__/model.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6f88e238cdc6fec7028a883e3a25884b401de2bf GIT binary patch literal 2046 zcmZuy-D?|15Z}EI-JL#cCywKp3$;jhbcyva z6B&*naV9h)%dzBnBg{rQCnxo0n2!ohff9whPkd{M_*q6-jujr})*%X9PG2i;7oH-t zsnD;~eHH>3`t<6DSLaSnUStWYBtg9<4Nqz3*yus)Cteu%UQ*}a`jYV)XMRxW7>5sf zUgus=W9WC}TI+h9S7zmtE;XmG1oJ@+^oJ`R_uYg=jnGRNfcd0?;dYmrg5C1~zJvra z;!vMBibX8qYs+MY`gF;1REd-&)6tmjt4m}(b#yYzWS%<4$Hdl!kp@^v^CQn7#>eb_ z2I5<~N|33}TG89&E3mg|TWPBg>3SLtC2j4Ck)(G^o7~8BefEBRrLFCGv`CBC>XfYt zJwhIYmU!_^Vazc;U5}a2YhJ`+TN8TRY%nfN*9~ex;=00!S>h!L=Y4Ruoe^5h!Wp^Y z{h;9kC=P)Tmg`nRFOFR|#(0Xj7XLX{o~=i$eEU|l>@(J=v83Fphjl1`ax+Z07q*h1 z5|?FEanB^TrACVn!c7YUY354ZsPdqVPl0%`Nd7PjE5@cda<{p5`p4l7bL6r4?pAJK zBlpU7e(;-%tC#OweqgK({gN;5s3d=E7raPCL76U0WDdT&KY>6B+R6>A#skZz0a;J| zK35^>`|n84wpCv_3mB~00e9H`FHPzwo`(iZ{mhsBz^Y5D+E#puD8YE;TG60LMzBa` zAilQpJ^h+eBs()DGL?F5Ca9+Dw7e*EXM&LJk!Kr1t+GTOkXoVPsSdG+;eL^8G~-z} zsk^hyXDwBA6~H{yz;>q=#_xWxX-=*kePm8@$PKCNrxR;acHC(IFcaxs$0a>VlOBCZueW^V zJ1wBl%IdQg zl5V7~!s@vStLFx%Fc^-7!7&()Q{ASLj2PdnD03}LY-u<08hicH)gV9dYakZMj!6nb z+oPrJ@l)H!&cNrHr4RS*sK!8chZt5?Cd}3ra%q>=N{wXdy(!zonSi9jOvt(}ymT`} zoOj(@O)u*PnSQxr4L@%oaqkpGUDcI5+1^%1GZ-RKS zM1H++acg+|FICegpXW3B$(;d`>3gPFdUjU@^c;~4SbD-lACzRc|C#GkcE1?eH)~3) zRt2U?@+ikr5J&c#Yqb*QEtY8)DsUpxE{v2CnSY^6(j+UQf3{hLE2}eJg<+>~3oq3$ z3XZQMqx*q$BE`NC=GjgH&-1roi)n}_Ka2AzX@#QZT|vIitUvE literal 0 HcmV?d00001 diff --git a/ultralytics/models/yolov10/__pycache__/model.cpython-39.pyc b/ultralytics/models/yolov10/__pycache__/model.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..402cd0b416761c47241e0549093231150c18c27c GIT binary patch literal 1610 zcmZWpOOG5i5Vrf>^VnT>7xEB?k+{&(Y7`D|Kud%~6gaSxm57jNtz`AKJu{tkcMrB_ z$%NswE9De^0SAz{!(YvFy~{yzABz z*{l0`pOUW$PkHB>@Q$FOEBd_qBFuMq?>mz9U%{Ouqc%KgxsYJI|HGqiA6dxGoqf!!U!;)niA999>IE|KLbOMoN|(Tyz`ulDGzvfo#wu?BOYHTc_2cb z@bsEo(L8!i0+Po7Lf6LYZ^p6vimkgX^-;23p9mo2Wjls2xRxd=7F0BV1;1$OOMlIVnP5jTGBD>U3!1gUmgDe zxM*afvTerI#8qn3MNvyOuB3PiL>60rB8&@gt2V%>6-axKmF>>vddf`8rt|YGPzSh{ zDluyrf^VaXp9(e6>Nc#aLsTE2x`paPQ2QVnks-84J9G~OpgGt;vvqih4`nmZMkv1m z(Rc=@h8Su*1E~BKR1}Hv?2D6PHkgWn4@^67tvMP%ukdjk4K6qD>u69kd@$R*sDW#? z&_f$>_fnxMOwh*btv0>cYSWu-5QY3z_|H+uW`+4>)FHAe`lfQ5AWvCc% zqwDXV;LTfZYy#nUWWwm-C`*vX2rlrr!x+>xmuUAG`*B{#mB%s8A#9VGYJvS`p+15W zY=2GXQb@_zR%Wg{t}*ymMRAQuJVMEvov4?1RN-ny^8*Ja`_Erwl3jl5PR2zkkZvl0L-PQ$P$`%$ ZyRCw@@IGFlbRT)+@a_TK+~bUTQ&-Px?4-6k4O6M zD2c)`XaNVHfFL5<2gyKCvp^nf*g*6s_azEYGzId)3zbsX%7D@OMQ;YwKC};QXGxt& z%XZKOxH~&D`_1gk>>R(TudhQe3gN%aBO5}0ryFxv4~c_mAl8wHL`Fg}Cd4q5wn&zc z1y)vKV_b+U^Ek;GvxRI7vY@w-XuXL<8)ojY6`YVgW%1ef>Bh&9mPxE(L5sxYO9~bv zf)-b1fX&7eDlGOOR+EyZ3YDXknK|4Xv;eV=FbXju0w0*0D8vFbOFDjW@xsN$H-7Xy zw!qaOIf1559sCiv>qs+!kQr zOpWR)vQ#Q^*ZUAs82NXg<*G_eN@PW=i3#@WO@6e&q^hgV=qTD$-J4XD#_P!VUY~3$ zP!wj`K7dH2xwJKHtFi7m0tZ{2yLTAJG^cq@d9>Z6s;kcF)@WCCZ&D&_BIvg7m(Q~z zd*5D-3uMSaqxq8icbgWf>-9;vmHj39$EtcJ_3Nv;9jrLTI+6R>a;==%np>VQ^{VUt zj}5svRXt}?_g&`wUKk6D87o>p(H`?ubrs#F#tP&mZP$8Dna-G!Nwnrza|-M{5-hV& zTOUBpryXgh?u6TxBTA#1vwYIXLG@Ef%?n_)r!TLQrUByVuXYnhvjrjLze+Q;b#ETID z@6&V8L%9U6EfA~B$FNGQbMa-Y`npJ6M2*O*#>)aG9BtQxlOn1{tXeW5VPaJm_yi_S zP2puV5m&K7Es_T4#jw*7N?gFIN;qDbSBX8wFNZ~}5$AGhX<=|VwVVQ+xFReNR^Wl1 zl+_zaj90*v5SOnJyCCtZ3hvl6EUR%vp-qmkyqqFVEGJ`F;WbP;Kz9)e7U zJjZJQ>rw??!r}5|;7(SW;SP2pS2eV(A*OCg8)Q5;hW>C(gJM znGXX6KFn2MY^o4fs813W{h?#sJ~A3#&v3i0#sRjWhYyJb#DD3;Fh26vqToj#lD8_NG=vv2#=ulv91FHHoEiC`i0;Z9fhJNMNmPSoD_qz*NCGY$Vb@S^57 zRoX@VWNA6yZJY6N#1RhD)({Spx^NhZFbVG}Ox)q{jU+EsBnoYpchLdNNExA%-JB?) z(&;8TupF~`m9wVA=OD*w>f2wbZ$B-4KlBRttEkYwY|3$%NOP@bQj17xrQdYc^07Q0 zk+J{qA(r2H#gVrtPT4SZjyOgu*LYKzf-U_IqtZVBPgsUw9-^_oBKJdd@*(p7!*-nU N7ETQQ6TwWti;3*=M69OhmT=5n~SdLEC;{R$fSacwcZiPL+&}_wu56*hi6@xN*`0Mi2oo zdPRB`voM3L+h0HX?AiH;kN=q=HpSeo0F?%cg(80dLKG=M!6g+NB`|1{8B`XktYeDS z570`DwS=L~tTtixDHR`QUzKpn=FevTPxmjV>97?x-4zLX7U1KcF=eJ1}YxUHAU2AKMj6wYP9b#n-)@1f8dW@(r z)_Uc+hFR~h)-}awbPlKVW`hND*e9>ax8xYu3R}JM)!h;<;ecF1RimY_zBeEP#I{)? zmYFpmGw;^xh3eCpKWmg$iFjsB^txA4>_NcDE6iOskOHXT?$&6^=Fpy7f4A?N3htX8 zR&iRTck_--&1hzx zhBYi1b!b-@CU+N3ZFCtwPh!3>l!?vd+hqoBeD0#icX1mauos(HB+O_@O>5hvO}?jR z_H*)DZ)@oAC``p8`orBju}vOmfh zPi23ar#X_cU(|H(7E|5aO%ti&d{pc`z#PAzkc&_LlfB&{m#PqX%%$w^rgLm>B6XsjEE;jCouhohW#@rzCNjySin5q%+k9)jpy3BJG}Zxl^J#+7bNrY}!+Cez6j=FQ(y9q%dqtVxc^&iou#Hn;R7c z&zLat*G|=8(MG)!Xj}61SUaZ866ZA<=teG*L5Lb+II6?AGR}p;ltxE59PQ!3{BJZQ z*h9Rk{o700$D5()Yc*|QSR?M4ysf3~_-K$_+z2$mr>m7v8o>K_evht003Xr&cy3Y~ z*1?CTunG_1;?e(__96b&{4g|Q4MW`wL#%j=JqdLXhG*j_t>1_}%wMB4oUJtjRT#Q^ zDAIfkK2+z9-+Q>k({9bvuHk7H2aG>wVZ6m7-1MDg&#_8US(9XY+0L^;lJV}Jy{v{) YW%@Tr+TX2<*fXD|H2n*(Edd19AML5ln*aa+ literal 0 HcmV?d00001 diff --git a/ultralytics/models/yolov10/__pycache__/train.cpython-312.pyc b/ultralytics/models/yolov10/__pycache__/train.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e3fb6c1003aa113871bf7d6f458515ca5fa9d191 GIT binary patch literal 1548 zcmaJ>O=u)V6t3!@nd#YynXCrgbd+e>UZU6t9+)=X!s zdXh|-fQK9+2<|P{^`s$!S3wW!$%B_f0vihlME8>0V&cV%zN+pXvk`rVs;}OwpMKx_ zUjHyMQbu4q{;w@!BJ`VDCPS0`62MW4aR^Oos4U;JDT;)O3q5J@+fiu@H zzjt}*r5A_sYXOT$ki;BLjh_s@9}~(TV1@BwPt8}}ZoV6;H*zyFaCjELCZfp01bGI* z>&P>SMa*^VSv|wG<&!5b&%P{FUzS@DfJ*4_4h%pN?jt{sp(INcp*~u$hVCH%Z(+C? zg2c%o#C3V{bvkcn??bfmXhf~PG`6#srjcu|zH)^oUEUG3p!RPDYx6O$sVr-4#anGD zr51`%cMGyG7q9qnTNXkl^i1ZNp0z*D{4i^O?3%J9f+gych)XNrEg{Q(7%(;$gtvrS zlw}qR;dg>I6|Ua()^-T(QX?S25?GlTbxL*}PC`*ZzE?aWihy#`5h} zgHXEIp-H0`vltZ8=(2yKDEmca_=1-}G8P2z4ahXP~n~C7(mCH_xqH14L##KVnQB>>5>V^NS zhTbv?C$dUBs7~GuzaU>lThZ6nR(x;y$BVOj(=$6aW_PD&_o^Rm*gu^*yib;%Cio-_TS%YP zAI();>Oqk#R2#0N-woR?A3bK<2;{){U Rku`$HcP8q8Aei+Pe*-Z5gmnM_ literal 0 HcmV?d00001 diff --git a/ultralytics/models/yolov10/__pycache__/train.cpython-39.pyc b/ultralytics/models/yolov10/__pycache__/train.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a0e1e89e4f0cb87edd52aa8a124429bdcfc82f00 GIT binary patch literal 1228 zcmaJ=&2AGh5VpNP*@RZKKtV023P>E5Lj(tegb)Hzi35XzD5E5z%&WfSX$Fz2=zS?}cxqs{CPyfqI7CftzhNzW4ozD~(5*&W6 z_QrT_J?uWo@s0Tfebzm#gCa;mIY}6&oE?(HgDL|a9@50;9#k0FOMgBxt1!n3PZ#PT z>_@Q8H4vOmNlGe^Zy0Q^@<9d%cn>){@*%Z1xw9pzksg|M*8aP{c3zTGadtM-1~ zI6%~i0o@(-K2>EB=F+&6_oX|XUCr!pI=hNJ8<}ihq`c79&-B39R+?v0?qvC{i5s>h zl`-isD}~X>xjj<}Q>9X6Txi_{i^l#mPK#~+iUf>}ko_-2FM;R#?Tx#=x)nev-z|oN z-bhwDljEw$P0w{=dSfLOaM}Y$G3@M(?czXG>8wGeBgkY>5EfEKFPu!S|AT|hZ*kr2 z97{i40Vuc~Cmm2KGDZ9;r>kV@sf!ikY!}A($ouF|nZ7agKY3I38HRg`{P>ggnjfIY z@xV`Kv}UAZ*)TsoffX}-9`f79zR)|$h}hTou=Qb3Gu?11A{htzEMC04Kd_B=q8Plc zj9x)g;Gl6J)?yj0P3+HkFm8s)73$L#om~ANCN3uSy4~$KuuQ;%`SoQK$m5*AIvgX|hCzHI8a14y8@H16IeI(pRntLwJ2hCEL4fsuS~pdC p4YMIOZkp`!eDtU)q;Y7u>~l4^k{lq>y}JJU_`?|-#sxb2*bm)`KTiMv literal 0 HcmV?d00001 diff --git a/ultralytics/models/yolov10/__pycache__/val.cpython-312.pyc b/ultralytics/models/yolov10/__pycache__/val.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..62581446ab2a58350dc8e27dbf9189e476f497a7 GIT binary patch literal 1837 zcmaJ?O>7%Q6rR~1d)+uKZBmp_6_UyXmLdlt5vffm2$8}eY6#`0b*Wa{-AS_U+Uw4& z6Fb^SJw(DeK$`=Kk&wut2T(cmSdb7`BrbLnk!VOI1YB~fl!7V_z?-!4P z$W|7StrC0__7aj&%d#HZ7Es6h(Cfq}mhX7^_e|HZP2Z!SmAwKxV+oT;q$b!&0r(J( zCMbh#l#wcE3fp*5%gDC0h%!nAm2gHqi*%Xyo}U<>n0@huhmWvuv@i7HO$XrHnT6&P zyn_7Q*(EtZA1Me0*g=7G1Ah$f3{Vy{d!>Z4vAsH2hqBtk+WBsTEXip41EDfF;*73x zl@$wwa?LQDyyF`NS4=w1c!HU;#KQ&cd;W0&U|hK6CeCS!)d z#0~rU`mX*tGC1Sq$l%0x>9g7X-Je#;|cWTyj(}glk zz%KL09_ht&bW!ngmWJlRRkWpbF2(CwPc>E3^wohon!c$euO*fe8`_aO+L5aNdGKja z*N*?F9oc~u;0Vz+B@VrC%g#4IK|2uv_IK@WHW5_=V7?d37jTO*u-v7`-=$|1CcMLG^&C!f8u-f?WRySJ*>yHl01-$I>zW2|~{ zz2msh_b=8Oe{~VzV?N*)_uG(wv~Ej zBQ;!44c{u>w!d?}b~a8;)K5*+Cg0tdJXfDQx0X6zPxe>Ft`Ap-w+{ELTwK1m`qJ8A zy>fc1t7qlJ@`>uy>eR--aD8C-7F`=SdHeLn=y-i}{QI}pM&GI(ovgioe!VMGOJ?Xn zc!3@hH7alB2-K*3h5^N+=n9-Pj4MUcZOnw#gFYwR)!8ge7I9s}&@mO|mWl9uHS{MH z{tX*ZiZIY?Gb@tSP zC;bd9c!7$5&9Jti^5>>8q~f3 zBay@!jc|f-m=jG#G@8fNHQ6nAsH!rN|Q*^9n_1j;3n!}zxq}=C0thIA5)#l z)D;H2v>4kh5soYhlv0a*2!lcimM9@#(H@pqlCx$)C6(+9CG0C=D2X6V?QayE}@S&%2Ud;EK)1{eWov zjqCuipw*2n-zajm0crwxSARjSLe*D)U>0~Ivr%uww=$qq!XyR+#|5??N+S<;JEE%GxIr{;}$912*z z=AckobG}es2X;-U(FATqpf$;wxWj5sFHY_)5VSuhXg?6N550r`Z355yoy9|?%e1XK dZ|>iOUdUNDr|wJh<-GD&0dpJR?%)nI self.args.conf + if self.args.classes is not None: + mask = mask & (preds[..., 5:6] == torch.tensor(self.args.classes, device=preds.device).unsqueeze(0)).any(2) + + preds = [p[mask[idx]] for idx, p in enumerate(preds)] + + if not isinstance(orig_imgs, list): # input images are a torch.Tensor, not a list + orig_imgs = ops.convert_torch2numpy_batch(orig_imgs) + + results = [] + for i, pred in enumerate(preds): + orig_img = orig_imgs[i] + pred[:, :4] = ops.scale_boxes(img.shape[2:], pred[:, :4], orig_img.shape) + img_path = self.batch[0][i] + results.append(Results(orig_img, path=img_path, names=self.model.names, boxes=pred)) + return results diff --git a/ultralytics/models/yolov10/train.py b/ultralytics/models/yolov10/train.py new file mode 100644 index 0000000..7305bca --- /dev/null +++ b/ultralytics/models/yolov10/train.py @@ -0,0 +1,20 @@ +from ultralytics.models.yolo.detect import DetectionTrainer +from .val import YOLOv10DetectionValidator +from .model import YOLOv10DetectionModel +from copy import copy +from ultralytics.utils import RANK + +class YOLOv10DetectionTrainer(DetectionTrainer): + def get_validator(self): + """Returns a DetectionValidator for YOLO model validation.""" + self.loss_names = "box_om", "cls_om", "dfl_om", "box_oo", "cls_oo", "dfl_oo", + return YOLOv10DetectionValidator( + self.test_loader, save_dir=self.save_dir, args=copy(self.args), _callbacks=self.callbacks + ) + + def get_model(self, cfg=None, weights=None, verbose=True): + """Return a YOLO detection model.""" + model = YOLOv10DetectionModel(cfg, nc=self.data["nc"], verbose=verbose and RANK == -1) + if weights: + model.load(weights) + return model diff --git a/ultralytics/models/yolov10/val.py b/ultralytics/models/yolov10/val.py new file mode 100644 index 0000000..19a019c --- /dev/null +++ b/ultralytics/models/yolov10/val.py @@ -0,0 +1,24 @@ +from ultralytics.models.yolo.detect import DetectionValidator +from ultralytics.utils import ops +import torch + +class YOLOv10DetectionValidator(DetectionValidator): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.args.save_json |= self.is_coco + + def postprocess(self, preds): + if isinstance(preds, dict): + preds = preds["one2one"] + + if isinstance(preds, (list, tuple)): + preds = preds[0] + + # Acknowledgement: Thanks to sanha9999 in #190 and #181! + if preds.shape[-1] == 6: + return preds + else: + preds = preds.transpose(-1, -2) + boxes, scores, labels = ops.v10postprocess(preds, self.args.max_det, self.nc) + bboxes = ops.xywh2xyxy(boxes) + return torch.cat([bboxes, scores.unsqueeze(-1), labels.unsqueeze(-1)], dim=-1) \ No newline at end of file diff --git a/ultralytics/nn/__init__.py b/ultralytics/nn/__init__.py index 9889b7e..6905d34 100644 --- a/ultralytics/nn/__init__.py +++ b/ultralytics/nn/__init__.py @@ -1,9 +1,29 @@ # Ultralytics YOLO 🚀, AGPL-3.0 license -from .tasks import (BaseModel, ClassificationModel, DetectionModel, SegmentationModel, attempt_load_one_weight, - attempt_load_weights, guess_model_scale, guess_model_task, parse_model, torch_safe_load, - yaml_model_load) +from .tasks import ( + BaseModel, + ClassificationModel, + DetectionModel, + SegmentationModel, + attempt_load_one_weight, + attempt_load_weights, + guess_model_scale, + guess_model_task, + parse_model, + torch_safe_load, + yaml_model_load, +) -__all__ = ('attempt_load_one_weight', 'attempt_load_weights', 'parse_model', 'yaml_model_load', 'guess_model_task', - 'guess_model_scale', 'torch_safe_load', 'DetectionModel', 'SegmentationModel', 'ClassificationModel', - 'BaseModel') +__all__ = ( + "attempt_load_one_weight", + "attempt_load_weights", + "parse_model", + "yaml_model_load", + "guess_model_task", + "guess_model_scale", + "torch_safe_load", + "DetectionModel", + "SegmentationModel", + "ClassificationModel", + "BaseModel", +) diff --git a/ultralytics/nn/__pycache__/__init__.cpython-312.pyc b/ultralytics/nn/__pycache__/__init__.cpython-312.pyc index c6e62c93277806793269968fb0b526ea3010bfb1..2e2cef51cb2017148494e864a0a0dd032011b998 100644 GIT binary patch delta 84 zcmdnUvWbQFG%qg~0}xz@f0XXMk#{qrl!<;uer~FMd11PKN@{9BUTTScWqwY6nW4et dn~YX+Jd8}=8DPXm5bJ{s4>waIdyyc}7yv=T7cKw* delta 83 zcmdnQvXO=NG%qg~0}#}?ElShg$h(ndf_bpXyucR_kcpYJ?>DiJeT=crw{Inejxj@SwCI!I?N&XOdZL$c%|IyA zPT(W@9s_$DdyMRz7d7>mcn;!F+9&v)vOF2LffIC3aDqN) z3a_U}^){m%+2pvMiV@yxq?P>qD+l;idGRrw2cEXW!N3tuAmUFXJTZSXm}t;pI=!se ztz-mOR7xeU(nAGAM=dJHg0z;=`J(=Tgc6dm7SHdOHy(~hgN?_I_BRT_;6N;xY#fP4 z;zJ7;&@R<7b3PCzs(6k&&fPU|8Y`u$!<{oW`*_8qW6f*+t7WeTU)wXeK6JyDKR$S7 zbYhf>)3(Y=eC@;?Bnmq<+i-roZOgqSHl_d z=|*+&n6vDq%1LkgRC)Wfv*YUElymEeO*0FZearo-`!9;G*;`&Me%XG(a-ns6`&sSz zvU7$B-{s;H?Nj!a8NK=VmV0*`oMqL$J66uImQNUf=Qp*b>zh@7<2AP{Q~_W=_oyI- zg1z$(5S-yej;u`b9KAbyl%_^jIxDz^T!KF)0UNDAM`i zub_BZ$t3SprFDULEEyb5Mnb)5O=56hfCLkXw6=XX5FAK`;xWQTzyu}JO5%_82febi zLhvX3X+D&YL9)j~yqIeff_?tMNYck@_OY=dOCYBn{u28k&>ZL9SL<&W3+UHI%g3gS zb-$?eUZ{9!^R&M4#m#4ro@gC+j2|5joOtA9=lRW3`o>8`QbC8z~;pXLPD$AhYCFJ`OmQ42<<@*33k9<7ik=wSlr zf_lLu7@pvBbcPe$FyCX`$$8Cbed}N{zRn*w5{wD{?V!9&!#Db4LL`{*2qQ87STq#y z^kyhLiC`ocK;e7(;>1G+W3f=I-xG@U1xYX#2zmw+_`NHVB>u=qG89O7_H}LP@f z5bA@Tf_7jRK=+1%G3==w>PUFvgP@1VTW>HDKem!STx+*?WU2uk1y5(DSR4Iv?IL|g zC=&GS9PH~04b!rDjw`on6}+xih{e`)lfj^u{iVp;1L!nsQ$7IuZrF!0ZUeVds z*-3v?S2M==?LG`b$=IQIA4oVe9`_9g@E$nK`1KF?dqa^>G6cic>rViuFi1%dbtpD4 zn9Phow&U|XQU4J(Zb^_pA_M{ji3`DjNPL7f91XGY^7sQJ4n|Ot^gzU)MA5-OqJbOL zF>|&e)-aGv%L4<09i7Amy4W3%lpPf_5B``{NEdV5GyE6#oZ*s~&YqKd=_{HpUQ0jI zY*6Y%E^MKEcah=%6rxdW;aJKkDnkfjO*`2jE%DR-(UnVW)PD`Ss#nDVGq*X%A9i%Ct= zmE{_m67`}%G>OK3B@OA8*AS{4wqp{o#v8CqK)yQJLOJRWgU4;2{RlOa7jzL=X;U7alX0Slh748pT2UrwMfD~9MMb8}|96IM_c1)Ki)|AP^k>Ej zm-bmsFg~BNnXnzm6HFmKRYu);iwm@3nJBwtzKALbPs;trg3y-(a zqd(4bXp6?Ru^)+Y(oWwumVv^WO#5uJqgxKSkBdg~rEKo=reeOBe#hi4r~u7T2!(>X zUlx-=$fqjBitu(&bGuoryi_EOQmRUToyX^S)q0cFL^a zQZzM&XYq~2EJ?dYF&}~RIOMWK1ko}al5@qpH#_xYN-wTVM|nK zP822svjpKFecx^?D3x?ldV?N&!~OKp0vo+rqSNOjAZqFR9tS;O(Hbt5<@z*_#x42u zbZH*-m)WUryOxhp%WfOLFqi(J#jXGr6WCf+xzvjEhRLiFDvqoNy_X4`QPzvv@F=!x znLi3UiB!?JIz~m)xQtAq5WT2J^>g(|3tL#h6=_a2Bp=U7sv-OY)?r*IXfG|2+M)|} zZu$jVWzAxVvy?3PMX8LiL|B>=QKQ%pK8Y59Z=hxS?XvKb^v$9|PijH(OA@`P6_!c# zOS#lSC~+D~xE4sYL+YYrtwbN5z+CylFlA%7T%rnPAIftUGUy^)QYFyf6q?Y!Bh6>7*68@GYpO^B% z=2#Bd=^GDg=-UphayiuTBC>1Bv6)%{9sC;Wf;;?md^>V^Iewb3o_vQpmqU7mRmqoU zTM1vplKgTymS3P;CN2qoGtTG=D63cpTyXI<%1am8v#8Rt(eQE;=;7Mh>n#HHcy;0&fgr%k_F zTq?An`3nCP_RYj&Jhf(A5xXjyNf#=1F)H@jxr)uy+Lvk;o5NReo#4NRtFReN`u7%u?5!C`absJ@(<+^&{(88^08Tqh0fr4DJPr`pElv8ZTE$VEGt z-(}23Y=}govDEr#Jk<^s%uYH`R5i%)#``$lBy1KQ61F@iPktcDIs8jDls|OqZCYrv zUF?wB?B_3SrN465vwAw`)&q3wMeU_7i88feY(w}%tOs{R8^Zs@%-u$a{T5@!P09b8 zYwkAF%a3#4cVUoA^>=dL|AGsZRoEs?5U@0E(1m-9%erE%{?i=kADGnKU>`tw+RYL- zXi@ABvMBUiaYfl8(Lwad&!kF(?NYJFcrhOYszcE!QFJ@#i>CaQu$sz*ov;#gON6k# zqg6g0eKhqDOw0%953E_)Mb4UV&t2rk`LGgaiEEcsne|Ej&_@4kHBBrD+!Y4SPOdET3W#yQDpSt(gIj3i+Kx< zlDbCJLP;wtS%I@SHx2lhQ!F4ibCTJIl9@lwC+%6zaJB3K8SR%?%F5`eRF~KVmAbHj zmU6LJEQhb$rcWx(alS80@&8<&58O1k4oJ;PZx{e0l%*-y%W7)|cYZ9HLFu6fuOgJdj&N_D}G9H~sZDTvsZFBZF?pQIXhYt+G)2Zz| z*w185u7-z2)GI$&-tE$=^(^Jm}YJN*iNZtv1n`u zTK`k7u^mES2vC$$CCFRm8QV#JTkhI}SO$9@P_9M6iA93I)`CKa1zD^cVtp*8f>=L` z?Sj~07AqBtphu0Ue{`TiS6mYFz#*pu4mm||$SF*&lG+2R1xVE&5_ynkc&=#@4udEb zOO%q7hj=EUrBb?>lXmubd(#IJ@!j1EuY_-u;7+20=tjRx7N ze=fj+P)!b^0Wg#PIVq%F)uyIxen39p=#MY~gCRV}f}#GyfSU9Hg3;hvpTW%%NK-R= z*sp1z6V;Xcdiw3^Li2q@ZQFLy>FOnxuFlTAbDH$5r|z279dn!W3j**PkNG1WgzOqd z{Lu(&6_J;uKg4jQxfraci-CUr2t8io-q0TN_X5Up*dOVu2V5`^N+2dR+etuQ82-ax zkjMyPAvuI6i)8^$glv_J_4)xr@zjQ5N$*O}I{c_lFfqmf_(=>#gA8t(tve@j6no!b zBIv2@jmINUxC1}h06AK;-cy&UsuS=M4+E`0s4xz3=tVR%>c_rk1Z)Ikl1Uh`p$2NI zE%s)6Hxwkjai}p8?+0M3|NimFP~Wa|!X8&U|KC(NX^EQ}>f8BmUfEHv4PX5!MMNu{EQ{IRbBJNGn=*b$6xB+uC<@bnV#M+S%6b+qkK-8&Fxj z5F82xf(&_0t5q<E#KUSFb!RYgj;PsCmH# z{^Tb2w2Gl$iL@H< zJAlCw1Q(FBtUpKyy|Z|atdOT$mK?ES=~4Oc$^~8-8Nn(>=?|BDz&}P`S^8c5Xg=#S z!{)?g9W|CD2_@sPKqLeJ;eLN4a?s1CE!`|}M=%;s2Dc)7M*n5mGoDJU3{53D1wmSo z^e2uGJlPRkVo4>vxV)+4d|C-`WssybJ|8H4(&r-xA&~+MoK&`=ZDTV=%@Cwz$v9by z5%pnz!kMN20FQ- zl;1*cuc+V;Pn&GBPZzNYwDZQPM9PTq}(u=3;~#&83l3@sh)x$t?eEe2(}XvCuvo1 zIFx|V*3%!YDIH^2KbeO#x&c1{mrfGE)U=TVj}C^wso?7;egTCqgaRP^v}$kwY*Ns> zFRcK)yN_(gCfEpk17oPNX+___!eyC}T#Jb-F_Aw8o}d^?XD|{>%LbC=fK~1ahtPWb5ZNuh}Pyny)!qsBzt2)qkYo^2|34 zt?%1wZ@8<@_g_k0b1#`tzgBj&W3qeiq%Uw+E_39aXuqrC%mwt|x}f^Yo3V_A`r5+E zOziL47PFX@%GTGxwxVJEA?3%6=Ja~Eb@4UBlJ^W{w``6x%O{r8rW5OF$z~^=Uf()NZ;($se*-1ZbWA7^z{1tE6=shla*qx>{9Es z=NF!@n51@7cXnD_2d5iwzh6+aG`Xibxlg_Qz44v;8DsIt=jm=CZW-4rqGl|)s zK6YECGzR&fnYVwW18Lqc=bzC}=+7QLfAoA{(z|oAsQa3E*G$#2$#uIY0|Qg;qc`kD zXSyc3rtMW9$z?^>am`06>e=*thk?tp-mq3pR`*O>_fH!3XZZhQQ#Jn!TDtjSb}-)F zT*Kd@zuTPHlI>8*Wa*-7hQ&7x>)y9lo%c`MYfrZSPQw}7_z$e^8LR7T*;(6djxX-w zf6{euy304&`7pX=d*`}k4fN%QZtc2hueqR@w%4C%ziBQzUo~y6J%Npa9y$uIJF4Ds zRQ-daW}KfXhu|60Z8?`;3!|$wo;E&dysjyFM^p6t?q~PDtEs;CaZtq-weg?aUB}bE z-qKh=c+OfnQT*v$GiNXT7(Bs2{ylTeEvngCAydm~{nnx}gKc=3;@wq>;Z@xA71ohP z`HZf$X{214mTTbS1FdIdwfvVl&8U|9&{XhBPQl~WZmB9j9()lC4fPU0jWEB;clsUhH z7NGwk+S1je)dC3rZ{S06BF0R=()A4`raVVmw*8jHD(J%P`5FKfmVh6|jc5?Pyw6R4 z+@<6J+}vST!TIZ_taV3O1~=0Q>V96-jRL<<7N00)?p94o4{a>LT{^#vg-z75!$Du_ zwjrviN*P2$cnu!t>-5qA4h|K$qll=JQsE3!1AR0CeJfVMuftMRyl51Sf?^b8Qpkni z>ymc%7;?%BCmch#9S=bOaLt~F!yAGAkDimo>$%f;S${$aG?kZpNFovQ!dsz|iaOR) z@F;i`;B`plNzA3R;8QTnq36P@7)r6AOG|f}?9^m5>wrid`#FfCliu9!;xd0$0Y^= z83r!$^wmB2w7y%*v$iw5D^lhRXL+I-QQ$=FW==37uVzJX(I}MQ6YO1EM>~Sh!V59WhF0aLs z9X!}Or7XFgSy<2LXS>vTZOSsPjJ*X}=vmn#JIc|2x42azFIYyE^s~MBPQfb4MtY~L zK+c56A47J0Hqjd5bGcf3(1t8I~DWk~zwj#JvG~00!C$0C)wxx<>;OToL;}!29Ru)SfOX+h;Q*J0!yJ zafGK;DIW44V+!_prah>xr}rxPFEC~K@7rvkxElJsz3!ZB>$0+C6g>0A=q-{o!cXB$ z%Y*q*4<0v^wjwrLr8wTpH@KOOX)u6#<0 zZRiM7h_;I|`dL>g-O=MNg6S!Z@Opk zC6~m0O8cKE1W*3goLnw^3CA~IC}i5gldsRo)Y6uN6)rc^J(*Ah)3KPrYK~OCs1@_k z7U?pAgHH#39unjzxlwTV3vLWk1#>KPfy5w;k1-$%p*vOsG-#JgL?cXc8I_&VF@p() zz_(ix$T%Nc4)XqUoK=?065A&xFIA|zDVq|CMubt`tCvlC?jG6?4j>F?ch?$=!{S< z$re)>zuGwy7tX9OacAd;WsXhy8yEo_ZPu!FL?&8dCeacu#z85EDZ3yuWoJu{&6!!r zhXG}_m9@_0UmB0Bk+s99{Q$i))dJHrDSIbC=X2PD3ps4%_yLN}l@lE>;9&Y|0s2fu zMZuk*=sep69Ft;}2gm;8F5l-fpk%hZ@xi6M75t?YpvJ( zHXBJaHy|C7y@6F&po&k9bGK?6UopLG8sBhs<2&{$f}od#cY&jlBVkOyJ0#x|K3UZP z+-U%AU`Q;%b(X*y#Sz>{K`E3{M#eJs$|Rr@f*hpxf}XK8SR=}g;Fx#?atPykOl7D6 z#m{#zcpd{MWTe#qf<*@boPq0I95G^mun0p@2*X}r?!s-)*kE*E1hINROcKH5CcNR6 zR%c~EaF0vI;9fA7mctE2Qj9Sjf>aqWFauq{y0niJ{Nd*#Vj`;z&1EG^CWKll`ASWt74E~4{lT<_g2#ha* z>jNCA!(bj3v>;=>pb?@r-s8vu3>b`~0U3cHV@ZZolEv7v0$}(Rh%6=nUR#7IIut($5;}u$ z?oj+l5D-0{h_;VmI>T@ch518~c5qy+;w zMgg+a3!v5EG=G>F*yjj=qZvm!@I%jr*$0yoG-5F(S3QB@0SMB37E)G)qCS8|(<-=F z3d5L|1&66A)TGPIVU#$aRK=RLY#NiDbRgu+n}FIA03hJpyj@7232j{qr+kC?^ztW{ zj|*pFPse7=_A|N(-PuLwkG5^UJ`WdtPy191BTzkQJ&AjM_tK{s_ zuR6zDZ`n()+v}$6b(hzFtLxRSt2^G}-)f!Qw*80gR}YR&F5WqL$Tw-Pd)NN(Z6#M& z4%}IauA3{T%$3*7)i;2j&WX-93#KbJy;<;D>r3k|SYC0y?7UbwUAg4)_UX#yR|}>p zHci_%Kg|Q0=5U?0Uw1W5xf(Card^A#yH-xQR!+NCPutf#&1d?(|Ec{mCfjN8De=7Q zziF;0jy%q1W%Fd@P>GCpG|n_GxDc3J*bgv`BZM$bffa7gTda4jR1;m5CGLod+1U4^ z%f_x*=3kc0n5}0t6PmO37o6vu*Io5fuKH^QS`V(;1B7sBVl7c9?4FSJgTG)y}hFDs@UOD9`)kMooEJ#e$! zT0do(4`Tqsy#LAlGZx#K{E2+(JX1139|`O7m`boQ*Kk(*m1klf$;NidxysG_y^kw7 z%R}&5!C8Fp%Hte+c~;@1W&R&Np2wB9L+X+YjgD(R$S2xBH-Fet5h>M7>phW5#ZO!d zB3{LN^{u+dV)>2b3nLAxo3%2C|6_qM(xUjaY;jbf_@~9rs8;ct1|_8bX0bA|npe}m zJ$gmU?5p^>5QCjOSIl{p znXWM4+5n4lba23z1P?`m%%?YwEi|^m5}LqWIZXaMS~&cz5?qiU{ELb|fXHb1Bb_op zD~299t(WJv$Z~4Y!4@#*K_7)nAh~@t+`Y>YDg_nYEP5N&DOGGUc(XMTL)4g3vl|m? zF?Xa)Y35vC)`Uy&)MX#$3H(syz6=j$z>RB^pr{F#v_Wt9lkrC8C2v5s%e@b-fsL_eO{Thr zj3>o6hJCN~dOQ`dO~l=5!c)#F$<>kBKRpQ=@FAR>xKT|*G{PGD5Y`9oLqtj0j%<)b z5(6NB%M}FAxaq{>HM-#A3`eVt|cgRxl0x@*{!2 zfgq^lX#=zXzqn}tM;UBapN4@W! zdn*x3DKpB>=Fk|cM~=*6Pf+CS$*hL|1(HS!=1%Uc*sz99oF+3sah&^IA*XHmh0!w6 zFl{ZrZuL%Cz0<~dCuBEFj&c8~k(UZDv|dVFD_eZS;5=(NyY8By^oGec-ul$B)AaSR zons#u>fg5(o?S@mPPE;C<)h+-dFSR$Big?IQssrjE5k1jUu=Mn*LJ?q^Lo$ZL;EM! z>_5?Q%Va;R8XulAl}>6(?|odrS!xq1VCT&;eOr^lEAQxleY{tZ+3Mn^_rv4BF?j)E zUVhX(t2i3)3BxZXym|DAVg;s=Pc8`1s#c6<@ z68xYB9E6AC0zr|bjafHXAP8pyAMD&$0bv6Exa>A#8PvuC z$88yleoosfQ&xYxQK?hzlMgWXHT|^=!weyxc4N88&UGp&w S?-?&Y9;s{67H0Yzwad delta 12904 zcmd6OX?PpQm0&l{#zg`I2!J?9P&^^tqz)bu4~e2K>!7HUlth7OkOYK-Dl{dE(``z! z9MiJa0$raYIkw02Br~xhCb37EFCIIS&q_8MC!1Z9raYoxd#vm@9y{Ocx3VQ?Bqy2O zR}C~sC3b!hydsX$S_3syD7rrLT`tJsVmV)2IU4Jv!KQxtPr|(UbJv5;R zYa_Z|9Zj(mONaHndNDWjW{G)rZ?>2lBc@)H2%`+=M9jTrv8)PPBDuY}5o@nCV(Ya< z?7j9#UT>b*rw%(J&R%E4)$5A5d)<-z-h8p02^T~>y`D&6Zz0q*IcKTfWio0b#cIz| ztS(>*b>Y)T%mz7Om7;o!PtsmJ_Ry6YW)y`-d?+5cx|!~jn}^pA2mD7p{;)3|_e6b> zK)f1%N3r*+QmLd>dO$@wY*giXkqydmB_tm5`(jnia4Zt2IdN>Lh7E85KgS`T|7b8e zg!iacWr@IrG)0}FK2)3Vv#Q~3mwcCcW<8tUG`-pVi`+NWvpcx=@(ZU+zPC9*U#2<&UOymmeVelzC+RG0DYUH4FUQ?T`ulcyC-s- z&vnigwavNO=5yNL$eqjCIJthIqW1eOms@7mUbi$|Ui;#T>AGoOqWhBkQpYo@sRJ*q zo!l^IX5?BSXBF_LWR-*JNotU0RcAH5N>vbiCNcC!=p2h0U(wP?BBsE6&>U$_}c2d_d%Ei`!lmk(ATzfJWjva4| zR*!H=xqn18+O>VRpTX@$i?!ZOQBTv4?Y&5GX~Gos_&yvm?w}d`nsK92!&4y>?l@?k zU?ipwW>J)&6?B4LFbG*fwqO)YLXKb-EJChe6>Pkc*N)L~D^2k_$a5gqL!Je>0dg(m zS&*wB*FY|V+%`^VD*5a*oG}e&O2g%h(N+=AEZA9@kjKgo$^gM3IPv#Phw@y!3zc$t z(hPVFujO@o){p|1=B)O*cx^nJx5W*-Ca&XP!l@shnUKKn8uW9X#0$ge-sbP2-tKn5Mx+{tl&i^+P$$elVFbONKLuc`k!a`8M zH*#&X2QTC{>Mla@zLp@|cb2_ODC4Siu<(C~I22jG6;6K=$ z>QX2bhxQQ=+-T3$ITK9uHQtb*QEi4LH(_aGzNDQZ8wKI2NVQNFsS(N}wSrevfHKKH z2+O&Qt9n59nNWa?lofYyLy|`E3_hHfua&3^4E~oqD-L^gO&KxqOei9OFXn8agfp`& zGvNxpA`}y6^Sv?^mN_iA)~=xuuG?)^mM`sV4~;I8l1iY(!ZB zq2nS+Ba}{(;Z#BuzdZC;WC>PRu*EYa8IG!fdtMq~F=tE1b+8W3ggW{dpOuiISt9Qa zq5@^l%cY@cN|RdjHlc=lRKn(KLd5C+$&amIH8Yi%cWx}JlJEr?_qYVcRd@N2mE+fau4=wM^t6Py zn6nvBwtl8TA||XL_96!iLat$jL@so35mPF$nO+G>aPkfKl-r^pLy9ZyDPm-T10Qx5 zf-`%TG}Q_#x#uMk8LeD3L(k z5`fUcfgYxQp;x8SV$NN~9f#e@7QT6AHJ&Y~ajpR)spXsaX1;}Avt-{{6M9`*4ZOyq zDOklftpPF#`uqw7wYVB=<5`Md75XO;_F<1%x0YYUHW5b}`Z0dalc%XlD55{$v(XM> z4{l-)%}eb;bb&{NR=zd#6H#w}CQdlxD`30%xx_t4|4pLRYA4Y2^G1BQu&9JkZ22>a zfpdF7E86%rRPy-V(67YZ@t=pyvd~+Ee0GL>)B)YIV)uXKT52-wFzlDOyo`~{qqlie z%F8zp2G)Ej12nrf9E^m7cAA>p3rDopvy9LI8)r$L_5X>KU5P1u1JY+v-EGty-JKjeZ+!^xGxC^frGwaz@w+Q}`0Kf>UH> zCv`rslfF@Eb+9`mN>VN`6I&B>NP~O5?uv}ca^%`1{=Q;=-HG*OPL&H;CLT)QQ@eAm zY&T2K?2-gXQzBIZ-d|RPahVzaxXc5B%%#mlq_sjR&%mG-X^_R7?I9|Xu?;$^tr7|xh4z4i_Db_fOX$3?gWmx|ILUbJLLu+s3!&8dl0d{A8R$B2HUIoKi1XO(A#1O+Eb=`vBeL>^9w zKFqC*h;*5#%8NIP3;=AwPQ>%hlo#L9NgY~CQQ|eJTiC^Svj<^^%U;|XCU!}bdFMno z*%Z#_O?0z|j#F{^d_Jd^_?p;-^DFZ9RV>#346jW=@lN)z=wkAq)GwBLpmanm?S_(1 zEbW2PfLO}soiL*+{A7jO?~Z!lik1&oG#6aa99)Yu7C=-><;9%!^EAklUFxgY0gyPH z5>h{Dr$0`^RSwny?o8lkS`-rJLOwvRl%OXvhiD;RgkB}}Eatei(yAas;KF`8exp*i zwd4dd3%tl*(*5>z!FHEpOUR;OuySN-*bNJ3-;vsyzBfVtNfy zTbbhg?J@ZQ>ID5KBebS*VPAv|=zobD-pwkrLaNXoicPOnneo3@Y2NAMilsxDN$m zqjAqj*vA1(B#wZ1|-VxTh+z+1dk0I55gFLiAJL$b|l6I!Z0Y#uUTPPZGO%j}XWAmW5R@j&90ZQqWn@XoqFsz{&# zxo}VY2Kv<1XX=%7r7<=Vh#n6{W36lJYwH`TYrXQMVtot+lB$P%Cu5^rQZp3b`UiZR ze;B{gP?DQebnMx^!%HW1sderr@k4xY#owut3f9N@uzKZcdKYe633f^br_mmg$&<3` zT2zj|w{j!>ldHd5iDb;`RChNXX>6uj@s-9d*$oQcX)H6eksb!Ikf8uBYue%&f?`q? zjE;UO!PAUh(F&~FElNJkDAo7JzazTH*IvTAeJjGE1 z!632fqyh{rmXwErTv7p8Y*G=zJ6m6 z#OcR8v%V-DwT{}zra9>B4Do&a?{OyhbYf|fvMY+Hj7Y+_28IUE| zay*XB>)ux}1nOb@yLD9#bPCEz&F+&Ufei@7P*N2b3&uek^!VWV0!`AiFAuBG*v!t|}Kr4uY5!VKSG`H4URbMAv z9rzFH^XNHj?JS#kn1HD`QD>5h=r(f;zTi_ir3uoLvdQ%cOCo-$Yr5;Dt*_?1y7!HJZ?dzyduInpZhw$1kJUZ`EQP`7fSy8fFgPQ5FE_qvo%Phudv?z1-3vDR#j&Ze>6$rP-MLLkgZV1I zwOs~mnDMo3dGrE)cUzJBrosN6KIg*L$G2W8hc}U;*^-B5?c1*Fx4)||TEOn@tpJ~m zk8F<_0q?03w`B@=X!*d{{V9Wgy*;n^mb-c(zkZ>h@>8|kRee`Q)vW%0>~d_rx{Wk+ z&<&isqL)%YdiPcE&qkq}wv3 z!T7nJGMCPl_RZ%UoYf!vjegzxInGN%)4u7R+my^(^y{L!`J#r|Wh-tgW$smXD4BUR zv>R66)#6vWZ#6Ag>^Ce8Z(AB(?wq$YO>TJK2(L=>#_~z!Ek=LAaL#apalOsBp5OcI z{&$$t`=9kGDc5@X-rY+4(XJZX08Ql-Ou2u5S5H|AJ_E7=`dwq$0

    Sk||}lyT>)5 zGoM(dct@`|QAXX+sRpFz5iP*HR76mRu$UBr>}+h-F}x*pgH2(#2?_UB;x{k4ZWINb0A2<6vII3 z=!ekRC;=|!5S%qYPcwe&Kn-4Rv(PGhEMUhKy>2{JX*RN%cN>8`VYQQkF}rXgPSO@+}n0 zkOp#FBuqCGae8H}7OvSkXw&nMKChB)H)m*Wha?Y>Y_}vfEbp^vROCjNo_MK(NE-jK z*Ch*Ogi9Ebar^MGL*6WX`VGPfx}g{6RLSd8bCL#F%%^1M-3m%z1bspo{Ur>sUtIM+ z9O_i{iBR~BLpBvpvxadcezDJLVKY%#Nw9)Jl;*GP<|Ax2ZwS(v4u&=IjCfJaL|9l8 zuZGFzEME6u;sJ@D#g`0LL8OSBOSFnvobDRXC1uq8rHC?um{6juMbxG7n?)iZGe!%9 z6w%+pyobek|EPZr{#Ktw7m_#%g-LW%NdrVwypGA?z2oqaEO_rAuY0GCtTIXRhD$Zb zn+m0R?)94$y#5M}pFixtw@ge9NV+O5X$1rysL&W0QTcl)jf=V_fX(#LS@^9zHhO|A zzf8y$E2l+WfM&dVz{$Y#tdY+SJp$TqDQ0Q1uzc3ybrb^d3~z#X0i3u6V#X42AgB_| zfcYq~o0=TnyodzRDYC=<$&WiKHH0P!&N8#KvaFd5E7fS~{5}N*fzA>hn_RhRCnAr6 z+N1(U#6#c&8Sb{dI9EPP@%mIG}Buu}XXi%wJFt8vS@lFBSR*yiYi7Hc@$K z^tVg;7?u{D5kx%gtX)(~8Jh=I>=5HI@J%zPzYWfT$+Y;@*404GAuiG#@kpI8Hrm| z(rF2DF<;XAH;b)OX;5~Y670MkEdy$9LQR}Y9>i;DK94+`@s4pk-Dk5(qh*9jOyOIi z!ioFAr<-#e({J!d+2GHqcejgKGtk>vSJWBLsbvFR6#uH zL0|^ z2GxTG!JQXbzT7)W^R=G;Cf*n7~i2sP*ucfG5=uI*5#N0R%Iq5KR(tp+w^K^I1}=f+VSojz&gKqHKb~!~@)B5)(

    04L!2l1j?qHjm)LGb(jqPX$kqc;SNs6NDrPD16)lciH2<@Ms{86r>qG zHG>cYX*P+GpY&xAvcMQ}k_^w2(}aFPXp3=-UyxEE zrjOZGN`kD$1;;#VJ}0dOkR%xtfJjR`z#{mbh(bRjZE7*h(NK>np^1bTMo#v_Z8OsE z9~ngz`1rA66}f{$P;2^_V&rEOF0T;gh;m}s1-(vyO9>YcPT?8}bq=BfC8R|Q((mU26VW&cLIk77 zV@Ct1Oza_o;{sxb0`Ah0qz2R}8Xtl1U{W1rQ{21>w7o9$pJWIRVW5sA^@IRD0N3u} zBt4AanGLEZ5qtzhiEEdn({V3VFwzf^mt=(s;)f7$O3Gk;b`vmh&2>xS2T3&nfd>3O zYEs5sFd|&DU6rW|b_d{$o0oiNR-IO+X6T69V4FHc-L~VS@of*?(q&&*eQtGvy%>Ee zdUMl`xt@aw#YOFu_EP=yiC4V|?R8`4?7-lSf$-cwcs3H79~e2;v0yM=*ne*Sg3*$K z#93k+z87y+lfDDz4&bblR@}|4YL;M4=8M)TE4E+EpUR&%6;7^u&uB^PzL8roms|1D zy7}Ci8@a3Ia#zjgHqINHL~>WBMxUbPfRD>h9QIIjVar0xnpcZv*T&&v!opGVF`?}^ z`IyjjoW7fdzjdNT1pe>@FXn-`bdi~2p38Z`{)~OPY(8)KODpE`YTq%|eWs?Ip32+gDt)2v zT;Bu2x@9t7tv~sxY(u|{GHs>re^yDE`XQ^OmTiD+MXFItwH~75B;xp1ohPVPywg@2 z)GIz{GXzbFkCrzCt%{GG_MlUNzsX;f|5WS2`{7FwuG);B7gpjq!HPc;s*iZQ>Quav z*daPXw2AmCh<1tEj1r`o1kAt>ii%4@uyGkA9#2Y}h*pnVAJ8UZxuVzlA&uh?uc6zn zzWi{NOzCR`^7w;O{{#GV!Nm4hGynnp-NV7S2SRAWF_t_Z#Krf6v_8U%1bnYb0_AKh zTE=-$fEz_oXgCS48Q{RQnbnXQt(vnJQFZWgcLH{S17Q3#6_uyndhlVU8NbX7 zOq2lZc0`O5wU9AY=wF}(eE>;Pvti617~x11QT&eANO}l+APo}T6#d8p>-BqR9WPzF zUg`KLn|kC)2j|4Egy#wOy1jq_@i!HfWyjBGmUY}PRJ?7d_~OIzs+asT+WG8;%i8I> z1fAG(sqRuyqGc-UCGA{x!z|Nq|LyFCcs5{v!`32yUD=^fU$YoGiWRuy5v%H-f%ao+ z-1o?K6)T@nT#?}`k6fUYxaDj?NhSm^4)Kr7!A!}jGe`iue+i$D{Qkh6r+Pd>N)Lfk0%{J?T5NVk*A6~~=tJe$#qJgmy)OwpNGT9>TeN5duMNQ1Q zH_ffy_;TST-*m&bn_p~xE(X>6aRop;Vt3MTqEZZV+(=R}ghu?{Y(!KUogxW2U?kJe zSO_MSehMKj3T2UmNF=qXG4OeF{6vr&MxxEZnp*`i2o`9Bd+gC^+NpSurK)_r-w@{k4_Y9_~>iL{yH*&ml zIo^4^V^WiKY4n}!l9|w}rXQ`pUfquUlYxm3a~zlIaK+^M_j2ve70ru6f^^=o>CK|I zmj7b;Y~G&v+`W@s3)z-Ss>Ik_cEK!DaQ`#-{=7U+rgyDax4u;2m34uJconI`A~`mG zatd?@orjW_9ygLV*cvWYBd!>H_U$#0*9h$3l7N8J5^aJau8&b`$D9fvtE`w!5T++N6=aCFny^7G2vVQo2c!TaYAm{#ZB+|M~!5 zIOpm~>Ote-!KhmN_YH`M7JY*xZA2L2Ha1C+7N|@i*J-fGifGFp#rl+Uh{q@&hf3IG%yF0DmZ}gKtSd3Q{<&Ws2{V$4-7w|ZL zh=3_fEh&b4tA;ARHA55ch!GL*s1e0mE5*ujBVJAz36;u6O38A{NQt;wN*ifWw#&$f zcel|k-aX}BqgRxPmio&5M!$&1N(1FB#+LG+F<9PeY%LENL*;G8Hjx)E4VOoZk@Bc9 zS{^gT%HzhkNKcfWC~r5mmvHp>=sYt@o9p$5@wrn`i9>{8iWv?gLfZ2J0@ z6UT$tSrr90PpIhg5!!R42zGZ zsmBiB5>R7!ZV+&{BPd{cZw|eoHPnWNsRFdF>S?R$FKcf_Zz%68SiDHVc7urH^7kWw zmOT=vw*z&V_oK21J~({t)Enn4*UGzd+|1uD*o$w}N-j4`D{dk0ykXmKm^HUDk1We( z+3HG=K25{RSIX5X9j*Ks9;LoVB8-4cPD18%sggHK&Qx}ze0~3zp!{X%ySNloiuB?S;F)nQZ}oPh4&WJc195hcn?T;d+L3~(?%2t z>kWwVK@@ynHBtf$IkYMBhLOxi5KK^nTASzz(gmkrJFaQxtsqM4A4CbdLCUggWs94x z6~qeId`=K4SvHOEZVhLIGQ@!hvXRjoa9? z1l0oYvb|cCAHY~=JElT(thLn$R$}S<8lXiC8|`b#ds?{3K#&De3)sG|@vSHvsD(C! zXs!G)WQmop?+Ztt`#C%2;U8nqPsBlxE4FLhbxVc$AnDYqRc<*>kUD=iZ&h7TY`NS$ z)OC!THjW3J7lh`Uh7oLeOvjU`=Fqw+S`=%ZRrxcB=P9B6kC2J}oFW2Cp<@_FuxdoC zs2^L$O5z=7k#&$~7Q+)~36@w$3bINEVQN)15^OL$3X}36G*WCU>t%g79Me`88)5@& z%RSIBBl9-!!sy1DZwtDnL4sbCB*=UP%OXhQMbl;_LWC8dcOkEbG)i}@5~vw~qA$?O z*K8X^4M?$oa~1ncuVKqA>Nhutoc{XN%UAU>2zIGS(C3ycy{*$G#UZS73X4Q%fNE7< zSuQY(P|lnOg*9cbC3D5%qVHvs7b-QUt#8X-1eJ2K`XyJVWvpPk>ZX1N&4@XPwbLsL zRK0Av&LQ10^GnEgmvpCU;lo$ckpg%7xEx$HGq|=_dA@~NF>~&V0eEgsHX!53t`n8q0 z3deLi|2+Lbw(54IZLTP)-N^GoM>(qKQiDz_XLH4?v$L=1*J}Vk+1luVDpqX!?na9K z*os%HmVNe?er|R)EZM5KpV{`AIbO3S9;t{rw=T_IZEG7*RIDIlxnNgv0uc{C(&O0* zw?NT0{ZWtQQWbjx+=tBD+~b^OI~9I?uCpdpl-Srxe|*ImV3U3W$1l4=JA{UsbLdkJ zlxlxO#S7f5E}gR$s1$mv&bNB|ITbJC*bB>2MR%dJk?@f{o@W5W+_jss$DD;vw_0m9 zuz9H0Oa>w%!;PWQ9IGOyHOb8tXV11)eWRWLj-9vdRy7)`*dB9Bg(jO*SpX?|4Bf&m z&o7CcZC1_s0wzr7+&nlXrh}Tfph@y*6BrVbh(mhWB(6;_xS$nI0i*|{iCNWBWkqx> z>6&ilc?Dc+lO$D3rb{#?WHamOl}f3yeA32F3nKYyZDy8_fy~e4n^d1D|1CUp#^6j+ zoLgPQ+^^zvU&mQ3n0wWYg3aEJBNC^(tr0)kNP+2B-B>YROsr~+G+67T2lo4oo<>(A z)97yWGq_ODo>uH)(c6*66W$a2dv3-{dtF|}>-D-9!Bsfl zb7Outi>`xzW-*lMa(?Iy<4HR|bbCCUSdEmIUQ@a4seZ2qE~YW&CGZ{h#)^GjlEojW znCDny%q_qN6kFNT`0 zd1LWb-#!6ADmkc>n*5_omht* zMIB?>iBa`EhyZ#q)Og{M7I(6q5oImSPq{mbyFBs5`q=;kZHNW20 z*P!gneUfcmMW62O;vRo5;Z|c8%Ky68y%ynvZRLkr<+piHpsn9!!?wmo79cNS#3~yt z?Q1+Ka{iEwvGGOCjv`bWyS*oi`!R>z>b?0^?Xc<{ZLD7nA*tT(|9u}SYhL1kI2*+K81e`;OvpTlSmd8y)Qc6dy| zT;o{(-nBSC(l*y4a<27^fajUUQHO9T8 z{!5g)uHb9zRfUZTCFVg}(5867lO4UyEV&rraC&o}=$oby-2$Ujub` z+Iz~s>75XHx0s4tjXggCsHwi}@r`MwBxexeKY{Qpx5?=(ie7teSJ5$X0pAdU^%rya@d9*XW z7O}tQ^`qwRwADP{Rx|&3D1#bP-W1Aw@>a>d{7`-OS#iR?2Xu-H?WrA z*W22h+0>@WE|eC_OO5AMsT{h=UY4(r2Eo%wq62)JA>mCEJD*|Oa@M{U|i z#BNHgd_(Rg0XfVc^Q;uhBCHl|O=+BMoRj+rZ6){*+h%zsoaHRL%C50jz8-Pk^7*=g zH9GH|1)V$RonKWKRdyYFWX2wEt9e7#q*83|f!dhyW_(ASQ!~B`Xk5T|4eyKYvVX_D zTU^23`vkD;u3txuo9o)U$8Z9S`Ja?@t8vNo{06=+dY9UF;3JSrI;D2VJtqGP?fGk} z?R#4R!Ed>rVz)>Qh?FmSLu)C{+Gg{roQ+E~KgqwxUIS%(y+xrvUH(ktC5*k3I;R{> zQ0HG@Mo67s@;@u7GlihfAEWejt?@EiP4Oe%%f(gp23F}!qIBL%{ypzy;NoTPvi~_e zzbGMrlq>jp8?eI(R=m&ljwljO?eBZr(9WN?jr-%xal4;K&V81{xIfVv_dAVQZ`S`M zw6x26!rN8+LhivMbNFs}!kFP4cKKhFa}X!MF5e30g0lv5q4T3f_3asN80Yjb=r{JJ zpxyb>yXCdURg}6aPO>q37v{^bbH3EL1_*wcVK3r@S@5o{LT&qAR48U6-Z*&1EKXky z?R^4c9A8!aU&aYYUtggw(Bg4?{m2`aROQWc;O<^&T=!mKi=cgJ{zXuu>o^z3y;n{v z_AaC^iF6xW(REk0Y`cGDqs&_RE!!Js1>pK9-bK7`;C&nKINl|^=kPA$y#u}e3QJ&R zw%fbGd38A%%$&kBg;eND$D9x7dY_0>S30R9qw1X9rUL992}qj zHKLAhW*RqH%)6QU`dZgpCp@M2p8xAqmmtkXq4B7i39#5Lsrr%)NdSt^x+P?QcD)2? zI9i&GP#LY`Z<;SUo$~x0tFX8PWy5|bDke_pWJ@4HfrJVvPBaUU$}FoxWFzgGP)G0E zkPY@ptu%E(f6Q8j!dBler0lHxLgVLTGMb0>rLBvK-P!QjYd1HwXWRPz`AVgPMs7Nm z-X>sUkTvKXXu?8h-0z$4YtszW-&lnzO%=L>d_Oed7QU}-has~Elfgfmxsm|!d%<%H}o?V zyR<@*UQ5#xs_E}5itr`gw&mtoc-DZ2iIm}0v^X#;B6Y@HOb3x&n3d)Ey}wVugAE6dFs?HU-> zpzVjzEiXWmZ?}t4oWy2fy2&6ymMIdQNv-|93ez9oiB$e1g7?)xy}Ma)n><}I%k#{f z`b{cE=7aj)HcF7q<$#`UE|=Y~yMz@0jjqak5jxS_Ql)aczH?&}we7EtSq_piD!)8g zU2&Hx_SBi<*{8F|VPt9TzNX5oy;cwYFKGlxpC-eLFlhZYWeeEu|9P-&R;iCls5I4j?GULCG?jW&XO0~?c079|d!!ya zkiAQ|Hvekz#}Z8FCuau`>m+@+S|(8>VgKo59ael1^jQs zixeZpXd~)H{1{5b{e&0wz!|sV+B*Kh?L_LkUWJxeU*MIxW$TNiF_s67)MApqLN?zV zLGw)uEk;4RUyh=v-G@GeX%9rJvz*4quOJ}0Py5?+18w=J(N`#w;Uy<}%E2HemJ6Fe zTV#Re%1=QX_(jx)V@J4T(;vNLcQEptoP})rF|;_z7id(A2!d`|p=JxaQ4ka3G5Sm= z92x9bH1{A@1v#~hC=5rg(M?ATc(_^~7EO48@Fi+0E)>H~kO&t|5IW4qdSB%Q6abo1 zxzOtS$UsLiMJJ8|iZ%*+Z~gH9=S1s~yQhxe2rE+`HUgq3mte8v;CK)Q-QpOA`M_F) zO}lFI*k;gm^lW`-9bR4UQ)e{&AbzbjxdBLsNd6?#H_hV+M^d_iC|C~s? zj`6tG6|x?^uLeDUI_*BPKET3RPl$<}*;jvapB@6u+yGW6oXEmi{f1?=P7+qhJJ~j0 z6){d~%sC9KBR2{$$0e&&g=B=ak7L`w&>F z`u+?BGBq;EbvP z#e!s$I}?5yd+bddo)Cw<+F9_mJ5Es-Y2NYgjO8<5T*Mt*K9MjFmZ z&7}Bck!5t#34%_-CS!u)8X2mLr3e`(GzO&Fv6rlpkf@BUkncF1^5h)RLZ%=s zmk1r+31YY5Y6V0Phm=6TpsUTq9gMYa4GG_@q+%ARJFQ+!Fj7XQb#|jaqxYK09gDWW z4G|7`vWg)INPe{&bRfz92;0Fe`uT1;l6eOGWS`e`n2M%Fi6G}qRv~qGH zj)bXL3(OYU3P56j7EZDR%`|?DI(mu*ov0V8V%wx&G?`JT&d?T0G$Topn}kbR#Drg3 zhDHUqz?=miykN_0#RXrE0+u6wJI>(nz$82;mWkYuT$rO z(*G*OguD|a6_Y%pi0bp_Xl&}8ASGQ7=p50oQ@%`jFHv3uGG`E%xJ%41?Hc~~C`YSe zRU~2tiI9!L8M9EthLT)|gkxO70yK8%PF@}-bZ!L6CVl3gr{2UlN=J-1I*hJ%^+*Kn zvEn4)m#JpLk*7eA=~zRpvJ80;BLuNu4ykQV3z0-Z(2tyu9ye)xdIK|zh|6968ufuS z5M)(d2omA8GsK8<`P_zZ$Tt%5O2c5927&Oi!aE`J4`jx})J&pLh?cNo`Q=L0(Uop45^kn?~9&+8vGz;2jksi~pZ^ zEEAT~&~FcVps){NnrdiNV@RDv{W$8-e-F|rpGH59cU+^EM^zo=24s&~v&|%YC3RJf zE2(9)Ogw`cS#=0=(a=H<#yX0Y2hb*bW+Q{}q)nqQ{A)eP&A@RIBND9+s2R*d_7xYS z5MH-oo}8o1??KB{Cy5&AC_yi&MnA-Vd^=)sjGkH{eGlU*>B-%fPa5e*)9Gc(zoK-;txqvTXCDG7RW$HwU&`H4A+^j zY^pcH;xHts{8Kb*VZKhY#G>k@2bIsOP;{u`W=FEoh5Dn#F@GHICot-am+`lIT{Jp( zhnE({{k@HDFH_V}Vkc(Z?ZMq>oANn0ju-I7(qtA~Q{Os?n!Cu9>OSf3W?d|E4@wPk zdL~%d1MVI`<_Vg&zn65z7=6_KHrm_=N0x8my&vzrG+M+bSU1e>%58F(*^BSAe}HwP zoo>Vr`iC$Elv_u?t+JD1o;?p}^-%jXPVvpG1T{Z!uk^)9CeCW|FC-mIS2it z9$J=0Ld@dNWqsL9xoQ0PZCca@V>q8kD?Jd?1!{q={ws^fT3&w?6@EBI& zDViZl9S=ux(0`idUpy}7PSAZ@!x{_%N=f$wa^L_Y?vAA3D)0?_KLb~n7o;272JW~g z{b!N3bx`q7v7vQ>ER7}YKgYIteZUDol?{6-tbBxP?rApSB{90u_aZbh`;>cz%$sl~ z(~!U2-2-!PxEA*IFi%5XoL_eLxKqtsu~rc~gPxv8y>~V+%|GQnS$qM#(p;zg7h70; z)*mEH5OW-(IU*mcAzISx-^JHCIKxotc{sQTXS?Ps(ON&!d;JUR%DYqK7T{kL-bNufZItCP-8oh7Ukb%yMS*`LYoU$L%2C?pe~s204nisP@;dh3 zJB<;Wm!QCsyf9#Et6aMEkqQ#5a zf)<0DhnIX7Wc{?#%8Rl{D=!h|rTjO%QQB#6@2R%I8nj)&7H|t;8v({2kQguScBMeU z-UReff=#T$DS|~$!^w;&nwOGTELYn-B8>lW{!Z9F?W{Pz3#|CKT)lPJ#>6f-NVqC% z-vDi*e9B`9sE-esry$X`44M#`+tqNP-)yL)y8;fs5MCsZnx&7lnHS1U(>By!@S%on zl`aMdeY`HkE^!aQ;-sJ2Ftxyj+i3+gYm201Y#CYaELkwT3r}6BheTx(ZuPL7Fdyoy zMndC8tB0CbUW8Fz7=c@}*73JoaB9*^plQo(>t-X9EdG+E#03VyG(ro3iw2NbTNeLE z8@I|(!s`1;9z4`;KA#r1*2Ikks2iY>)|X*utZ?VBs39~91S5F?rw!WI<&B(9Cx!j; zt|P!qx&*r6(6qYKsJVHkIdTd7L*ga_)CjmVCY+>&#X@M|n)8*Q+>iTq%@j-e<+ng5 z?91E)uk(lq*TBOiM^sO2U z13|J`j)S6vMow|bD~an)CGsTrM@Vu-+;NIa@$XIIL4b{t{)FTrl z-J)1P8k3kH?tj6>4pRj6Q`^kUn9F}f86lhVtBAd?^53HbF|XE?{#|oQ4?jd^5UWA# zJ4sq-*!RR0uLY>5p=YlbZnoV{5?&#p^`L{aEkN^ZPgV<6sA=fp4=F@(LqsUB96eM} zF?o4tpIvs&h;6-3UaAb$)b$hlaJLc{h-`iK$_>5Qm@d^ZxJGMhHBfq>=1CJ(k%TW`0^!(RFml#@EEP{0kfJW%OXS8{G8hg%2&mOM>h>xAS_ znvi^C94+hQ?&dhg2`hapoR1p`U>|^&2(g4Ac4}hF&tgd5LX{w9a&E3b6+(U;5a?++ zVaw30&?Td_njbdDK<+3>6c+wLdH$uY;eH}22mgPmFEMlzv7bHk zPLSi;2joLs2mv32D-Eh2=|q93w1WtsCu}Cc9uvA=x$a_O8?z&#aHu)E#|}c^AL%y) z{`Jsi^@+vm*s3c3TQnV7*2iJi(Pj@5IYCJHzodfXDZoER!EaLVE(QM)0ThlFFGG>Y zf1i^6H3hFwAT)16Uq=p0e28%6KOu(Oo0fZtNE4LZlI#iv9sdDkMxdhNe}tGcN(d6> zNF~(wshR&w!4E0;FI1>^+JSaQ7*$@WSv8CQF{Nfm-QJ|&QqkWfE(YV;K8o`np-_;# zW5Qbq2ZKaKOlD_#%r9D2Xtg5G^5sBUG9E11cN2V!AE0 zbGX?noc{*p$-_flWas~Z>XE8c>h^X}?B7!GXB3ciJxBt4bc@@Sw#5HQX|R_G;%>Xp z)cy~oNUM_MT*zR*T!Go)FGL0@w!@4ltTAQ&H>rgbXo~Gr;XWr&c4ADPDSwJQP6r5w zzyNB3AW-L_d|V3DCH}9drn(|LQFt7AMweLj9JmBv$PubXTFpRp40x90VA8_RWtgN` zhkCP$2Tj>|=s~3ofjs40i!4(y2S|l;s&4TLjLS$4;*xZ4xK)9O_S2vu6#O3bCCKFn zKEH&=q0OXplLk*{tDuz{f;Nk^d89p)@7)p7ZV4@&mWh!@E2Hg3`Ju=Va!4=N2b~<< zPTz&+INBOg$C1;g4r)|d=p}JaJ)R;x8#HFfgVqc=!-x$dHl&_L4E;q(E2fJ)DzhDN z==z}b8-y;6%8||vx8Bh*$@kt?h z-0ntv@{EB34P0WJ1RIZdM793gEhhDB_!AK(-C(ccOr|r6_-ZqhL)tj&M}2?q>ig|# zHYBm35YuX5Uhvu2Mc99Bt^5rBoUYSm=h~$qpuVgej-)l_iNpqh=UWLOh=9V@AYH^B z;6s=-a4ZT;xj+em+>1aU4w34oL6IffnywFZ0Gf=V0v_*2#F67s@o0GP(5#bNpd$`#driDKdieve{A zN#r@aMzNI8e$0#m)%w&y#ei@w<@N>-=v|R!2lu(-3i?4Sd6qdU~SS2#Nw*dmonw<2BbM$uqAzB9G6_<#okr5@>?R-M=_-I zvo2_~;JW8U7u9W$@+LCoKLl`Ft1E6ZlR1l=cy-sj4N2AnTFOfWX47j224x|lOX(ka z3t*D~n@$IMR=-fum#sqn`nh_RNpmCe>4|X>5#^tmK_O>YI^ZYY{#0-NA+#TC-#TrB+)y%X2<{JwIM(rqzhcpQYe^lq?vvEfmX8K(mybPZFLb@<(!rx&#??03SkfWCN#l z7BdT?bO$Af!(0s$eh{U#0gH+s%PCtp)&4iNB)kD&*?`{*p|0c-Mdb+ElVA}560o_C zDzijjNIF)!iM=7#A}yE~m^EQ)Bop9vt-kGNz-=NjGcyb2#&bmDRE{f&^!q{Z8X(69 zcnzd;1Y{Wg5zpkn?U|)8CjrlbN*;bj;`mMA7bf6n$|O;}M^hoPDTSgbntTSFEPne5 zX6B^|<23T1J2VaFtx^dMZ5+}+rTSuqClMne?CeDVqvTjTLdMB9Bk^DKeOmdnGM)ak zvL~5IcWWxAIup42u$%u5V$w6^D-`=S1;2$LfM8Mzf6m2`o=Xu*hM$MKghi99`YTji zh*;mEn3!i$c-CcM`liD^h+MmLSqc{Yk9c`VXAz>@!qZLsYLfKoc#KDg_{AjY?=g2Upt{Np4Fg{~vvQBD??q delta 9977 zcmZ`<32+?8b)BBQ7mFLjO>hBT01xn%cz_f^f)G!DrzEZ^K}+-i*yZj3>KRZZ-Q!gR z$~tUHkjF|KTT%vg>_l=RAFQ$~E+?h3Q%c3L6+5vNd*Y)a+j8VMDaA<~Td}RY-?IRr z!@D&-ef<9Y`}hC9|Npz+eww}VIIGH5Rwgxk#$vDYk@3f?+S$vGuRUA}7m~$_Oa;@p z##teiNvU^brb@l5Gu7%{Q>@L@Dws&2u2`R`SM_M2p|~Woq}Z5gEH-7Dip`nkVoRn) zjl~MB#kNdau|3mXT$)*0?8tPe{&-lJ2RbVCpviYs+Q@xRpS*rbyedjqc*>M zCX`vh*(t53(yd|Z67gwKn92&vxW0p3V)YaICydbx>Cr;gveIU@Xjr}Oi~4PDPw4i} zI&4r-@%q+*o#zG&W9q1p6=u#HKX-YuFnMvC`($VhTYCL(LKSSy0U&td1q$2;)iW?PxVJu^tek1QvYU=VH-PtlY3BP%*R&<61`N z%m{g5uFo+Z;$b`y9vO>0sEsim<*}=YOoTV^I8R(43NSpzm+(qng`T*P;MKh5 zsy4$iNz@y89k0L2(5iS)3u~Daj-kn~=%2PH_hq+^UNB64C8>D1-RzyR{m|&t^x)9x z(Ri6MdY!>jg(rl^O1ghtxvCVmLz3B%d^8`M(Vc|Uyts_dv2#^U(y4G#PNh@jR68|J ztyAaJI}Oefr_pJW5t*E_Wd)uTo=QAbcoKM;j%iLrR!==5Yw*)3sZ0*foKeHd)aH<^U0$?1?0OubFEdd+j18O6J0PGg#EmD)%v zPdLfhxcQ_^0_L}z@niw&=A`Yjk^HiJ8dRl0SIxB$7#P8n zJ#Ryq=2b1)Y*NJS&U}|tEnds(U>WMK=}y~h%zRpgF!RIni@E(ZEhR{j(}^)tre;H; zZhj1eGzNs&%XyP*$Gqoxv#IlzF@1)}W4yJn+UZgw-{)<-eOx!gD4DZdcICT4?{dBr z>#pD(eA!h^uJC&1v;mx>vl8_+b6S2aXo{B1U&u-<{PLoO(+dlCmY3=(tShc}Ruy}l z)x`}?H;!%9jOO&%8_N4o_ZF~i^d?iR) zBiDF0&0z~Inpe6Rlxzn9&n@cQAy@NNGmMTwcFQ$DwN|dp?*!Bfa;>*(PIJ})b)Q_r zSMzQlTrcDK-MptwqaB&6WYui+O`Kd4Z0Jf#acz&&d%I@#hBf<^*>LIdRt7O;n!R^6 zEV{w$wR|04e@!p57{F&WQ{SlB{jxW|kN5Hf-*7Di!UC<-G3%Vt!HuMY_I|#JZ-%05 zkiFgjW(?#Hpns!1DC2y~jQ*JIY{JN`63~wwbT->Z8uAC_Chrj63g&F%H(@th>`t$X z`s9{5<{joY&w&$8o80Ujq24(St=8K$-WrCHn*k|i-6m}|Z29h{#-*jO_4-D&A*P*G z`sW=5zqZ=PaA4cUHD@Kv1dOb6lRX4Q8J=MS+MUlkH$$3_ORVH<2YDyZI!P_Gcc6ZX z>a!8?A->%{jrJL8&#;Rc`rZrOzDaKNZj(3DSjoGcGYso|M+<~)4t$h5>^t&jFX#aN zD1al-xs_f4?R&jT_T~H)Xw}`2s>|NHF=O}eoc`Em*y~nL2Ij^&WINt_&^{y& zE!M5KX;a)9f;KHHnTt6%%gp!725{<8yTp&ru=^Q$uE-^`2{E{c=o10a_Hx+fxcDqT z39Ef-!DioA{5|I|NPM&Dj9Wiart^M&x@1S zo^bmSjO2O7Eb07R3zWal8InWZ`+=%cw#&}^wUI~PvWtZEr{xRaTzMCr-ebW9plo@k zH&)&Zu7a|CU$ETfX4!;gUjc9LB;J}`oR#=pkp5X`7z}%ykEr|kZaF-oeMEZ--p~CE z&O@86+Z%By>bQD)K~UQa^FDyvh*~bSU^?5-`kZVFOv9Z6aIua%C**OSg$YZ-emBSy zxTD(S@jlJ$LjQTy-)EOX@`N3r?z2C5186pRPoHe#qY&X1ygA+{@iy>2h4&cVr|}-g zy93KU!DBdxrRECwKS|5SKB-Jl3HJX!pO6gqeXgt~d#wa#^FuO>$sP6| z$`0(MLu&aCdmkYXVkvKhkDJdfk7`;nUH%-KF4(z!S>^ARLyfUXk)FD2PfVKWoH=F) z!yGlzXk5&S+~l;Co+@N*z!a_Yw1t2peWqZGY~ivU&y8B?v%^P+-4h3v?z`8>jZfHC zy4$iv&(8Ge2_rp~wI|Z{WZFi3et0ZbFnZGi##k1F>`bq|;c>5aztFNGLi4!$gO*k| z*}A3n20gj4^l33|xR39sckgOl*VE5=&dyDm*+QCU?QHMmY_X6gdVbLyF-?q2Uo^z|NzA61?>sTNa4M&!r=})_ZE&K#m<1cNCZ~0CSTI{8u8+Ep zBsND+O^=P`F1f$1Xuba31fnW#n!AZF>9qUPWKX#FQnBFvUq$=%EftTkusLd)?hC2a z&GUyZ7y_49(#W|Le8>BCyP^i-lwL!Pjwt#S2HwbKh+dKfQeuUeXrwl)0O=R&3lQ_7;>6_Vc_rdg@ox1?i!^A0ig#uF7(JaQ_N^PQV z+%SzxQ)17T0D!%|ksf2w?Ns0IUg%oUO}j%#tdDbFx5t9%DxltjfqznSP+aMEz2zI+ zSGtx&@1njDmn}a$csHtkG-pmt+m;{0CaI#B27Cr$^eg~CC&85i?0ZFaYGh1IUNOv( zanvWirMZtUAB-9VGUk47`H$J8`{fmXStHufbEQ#9zB=>wtsHFLa$4kUXz`&1l|E$% z$fWx(E85*(u6!cBme6dbVj6`XwzJj+ago~hQnA+k^Hn=Ko&;N`rwrl8M@DjH&K?;N zlr9iyDmvZp>irYjs9J*pOqmonQzbT$wX(J?0`#j0pq>g^NNk~k7Wc!XiGF0PFqyT* zVd`msQBbx8rlr?3dx<7XY^50q_fxC8I#UD{8ojvL4?*<(YGGvgkYNvs$>Q+P&>f=R z{rT!!(+d*T3thBxMMK<5C=OC_2!(sF`~0d*B4>>t>tW|cGf5J-f?+Y*DKO(Z`xZys~^A}N-Lpk9Ntm$H1?)Vzc?sUJpW-yv3=nHQ1 z+Vk#)Sd;s}+V4gwy&;4s9cF#yS~2rj32g)!k9Qhm=wz&quNPA79N}0bd%z|*yNsA-xz&_hW5I}^)2j^ z?t|-BpArNXwUtCbZ71#>4bDO_Jzg~IENP9nkLD?nP{Kic&r}$fa3mvSS}i|h8n);s zsC_Q$Z9V=LzWU)&sBtDfc`;y~pHNCX!gEDG8C0#@6~nIuK$*)tfXu+{v&FD$^zJ_4 zvz#Es7RRYLfx_2;pz?gq!Y@KmH_UgB1gsDDEx}D#*9syqoRBiY{Q|r zA03@E$8zKD@P@9lbOwU*Ric@SOH?d~J}g^eq@36q5fe0~wEW$ut-)h?WJXseSu3m3 z<8O56Y1Y8PEFNlL4eqNO+OsT*^j8DZ>HC{#IE8itfE!s7HA;%rsd{U_3L~^W&8tE!NazJ)9RWE3Kou6iq*zj?ykGdY z@rZkJQ&$L>U1w8sDGYwif5=vmI;b34Gh7qN^4htu6Lum_)QKTzV4@p+x*f_RuSIn) z0=JT3B#Oie^J4I8b;=0Ng;j6dI*hS|n3B46QijBNw33oZT_R(6Eo8<^2#C-DM=3rV zG0$PRg2J#xeFxE(5}yY0N(4?-<>jiq8k)r@o~ia~(M$8&(W(=#yal%2Ye4@JJoR2< z;9lpOB+HRgUW(6#%~vqgj06`@-dMzL0U52dh}T9@D;5ZwUj!tjbXMWL6f-_b974T= zM-i@S7edsBcG6qMqu5je^|aTC%pc$=re1*Uq8=VAT_fgUdLIofK}^5Vp}-ReWU`E) zLneWIxwpbziIKFoN+#yL@L6`xR4|{GcPa>}tU$=ltd|dKJWg$Rs6+;l`wu*cn5F_f zDT(r_=uC-x@zvOOH|-H%J!MkT-WuXSz9-l{v0BBkl);VLYcaBfQV@zSA@dVxuLEDc zD4EI*EgY4--s?qQb%W+@;58~SkpgVe+sJEWD#uE6^1M#Q;kfJKJ@zJEFJqvu;i-^{ zC3aF=g19$~Jy8a(1tC@WILwann66S3U$tB8E%T#gvTCr@R%D-|HxAa@U~kLcgjH#? zH+!Y+3u3gxYoG{K?Q#k25&$?5g-SPHK#LMtAH;i?*Z1aq0$nBgtG(S2#41(WGZ%>F zk~s}QHqy+p%DY8Y6L(}KNdi(jaamJRCte#M#pF7G?Ul8%jzF-RetVm@4=u(UTU6jM z8@lH%#Q)ndV?W4wc9E{=2!R#}Fdzm1@!xnK!26qcL(_N@Z@#9R*qqX_64+092)hMw zSj3VtjznY};qgNNzZE$$%88H)9`X(or+5pZ`S_a{FYV13KY~MQRR@7KVtDK`58m-M z^o7MBZ-?M5Mf`?DJ2D3iG|Iy*8UkfmuNt8ity$x3&6=|$$b8yIWm&`?(ngM$zr-TP zR6eLtHu7|fhI1UkGd!kW)R4Dt!ZKgMNN0c^2MYvf%x z2sT#=bn>`&lFpva1k0Slp*-TWqITRvYK%ZJwGB%|#2U13EYUVEPO_z0lg$lCWXiVu z8LnRl1J*<+*~l9o(8(%}>tlKZxf2=N0R!8o1!E-);^&iv!Dx99CokJb>1Mxk9@KM&J(yEG0mg5q*MPhk5HTqFuin3 z&t64yIPlCr1^_>1@~jZq%lHj%x;S-Nap0}!Y5&g!`v6Hz?rnS1C54EIPtYR2Ywy?R z_ug}d^3cRr2wFL8l4^fU#e1osxX6zogeXqihWHwFeU*yCR6Ih3a%4A9Ek;;N-$B)n zTZVmzq9s3ukoexL;9@U<_%s-TTQB|@wZL%?qN>1We35`UsrUgwMlWUy(}pGfo_c;v z1xZAvRt1=4e-ba_lK3GFeiTJ7VjW>1=`m^q?sZHU1OxUSG-08f{LHJrG2d>zd~(A z#kZCpQ4K$)9B_EMA1AC4b)@1Yn)FF3o~0s9#W$$9LdfxEd%`KIKv2A*(=t-x4H`OEEi*T8$FpQF3* zY{k4r){gNS)}Ys*Kg{$uQZe#;$%Q2kyaOIA9PT)tB!FwkvsVacT9xiTH1Krj%gMCc zePFBm;Ng0=cwn2$b~d`7JJ5S1-4nZ^ewDtkxc`)%rP!{jSFgZ~Ps)M@XHD1CKa;8xdqqT(}v5}!rkCk|X1HKr&( z8T^@|nnq~5xSvi^T^;w0Gk*bn{|F>j6csH*aod$u@4Q^S`-Owcsv-a**bpA;dG{v= zmz5|Yx~5y(?TFOu=nU0j62C;w!27V{5`I!X@o;d{MtBrAt*$QW5Tc|Q`3&T&aoo7W zf#(q73FS$II0A!|c1-o!6%sD9c$9~nu-T1&8vyIW%&xR69_(TGTL%5tCC)1eF&urC zuNuei(N0)~WO$r4X}HL%t~~30{?L&7p2Mfx)8$WaL0Z@fU8E89#}Zc_*|K4QOY=A6 z3OWor&VYf+HVmTqsyZb1`$rDB*LT%sh$~z0SQOn$?ams9Npi)ti`s2Ct9x_~Qp7F{ zV!fz^1(71Ui4KwZA^a(45%zZh=rx5Ml`Cu9A0Fu{&8G>G3k*-#_EaW3E~Z9%s>PoH zM&)SV5qLPX*6$2>zIR1GO{O@Ll`sif+P_|I;qe!+CCP#ZBKAQ@% zM!JP`nO#zfq125S1cfC$Vi!@VI8q`MXj$YuXi2h+lzX7~y%AqnPCbrX!Ih>rNtjA^ zDXr_#2XPQ$gJOulPvH2&5+wjB?K(2zyC;velme|*O8#zIofI)}88)Nlr-H;xDx8l5Gh zIukUZXQlW&UV#d}pK70?;<~$XsJrB+aHfUa`Cj#BE5%{)C=Dxr<6~4)W;%}i<>&18oA3A>Ms30ZtYZs>_r|n$95;2-Z#ziEkAQPli-EV#yY;&)YXiKCBNS%Ha)vDcZ i40ZO@P*q9DdaAvL3fj>k-UN2SO%E@3`-dA??f(N&L`7Tx diff --git a/ultralytics/nn/__pycache__/tasks.cpython-312.pyc b/ultralytics/nn/__pycache__/tasks.cpython-312.pyc index 2640d759f2784c23fe60c1098db131cdf78226ec..3d17fabd1767a6a6a271c59a8394ac599b98aa6e 100644 GIT binary patch delta 23301 zcmdUX3s_XwweUIfelf$q48xllKw*3Uq7s8B@(?9a6v5XdIu3IN7@QgK%%DPtjHV_+ zVj^TG5u=IN<~Ev`#uVF_G`(t?=H}kMAmEiuXq)@^d;7R;iI67g?QQ?H_L&*bnBIQ( zyZ?8;|0rwr*=O&4_FjAKwbyH($%n;n{zR<(eq5Z2fosJ6&n=-B2eirTp9bcO`MZ^AI&vDu5nJg_tV^@ zBg}fvG^g+`nw<=VQ|9FErn#w*n>HucPIJ>CHvZ%$xuia2lw=68JsBS;tgQA;hMp#x=S<~rvg%wqm8{9sZ16y%>Tt24@9u+rN z)mDbZn;R;^vW6O)cU5-E}?=tTP z%(e6WLRy1^3GkZ(?PlHu4}IjLBr{3Xm5?lTu0qCR7x@Nqui9J|meTTm8DyF1#c#u> z^#Ho{l@7maBTSU1po4dDZf9T*Wcv#N9AZ97Cuh_-y358fUHOQjoZkVtPAQps$&io^ z6nTPaq5pd1J+dAt`AD6t=x29zFiekvRJfDKJWV30bT43~^zO^zsKD<~IWPOsL zlw5HyfeHG^okli4ZX|teCQ=Z$P{ikumbMyMP~vBgjSx$lNh#USK>BAvIjMi#APN}B z2VLo;xXBpDa*~%s&?mUs(r{I%*_YV4_X)B4Xg*8KZKk|HhER%u>54wVuK7j%?6I0a zR^;gumyMd(4bzrQ9M;4oa!wk3DJQ!ikEWd>mT6!lOptA0FzpoeC=W1Jh2s~)ug6u< z8ULT1>DmRNU*ceSR}3I7&a+`L=k}gY2ur;0$q4NC%D9j>s*=L@Tv zJAF=p*usep&H;qC-(`2~Z?Sj4>MrKOa{s1%TPxUksi z2v}7SGQ$qsk6;%9JHChQ3X8ZvST#$k>0#CzX50L@CkbSTP$0$o$j@}?79O7X%VA81 zn5k4IBWp-BRN33zTinY|=#3{*pGqa)@je=QUza=?KX1r!PX9{M`J{{b*Aib%yxes? zzBa5g4C>D$o=hB)K9fEXpL!$SG8%7rKKq5mXBXedt{Tm*x>|8{|5$e2jqGis+1p08 z?;Ojv-N}1&m=_|_f%8Lsw zJI3%d_uzo6qNl1sD#_M|(pWQSuckgEDBDp+(X2fEpYD&Sx8*b<6$-_xWN|tpHv+2k# zH@twa0}z%VZvkp(ZSSB{7nV5NJG%IL@R^i%_*z`N7Yi?^h26g9b|9J#9u_wf7W+HB zR)sK}AQoys&`sE6i;Qv>56MrquqTG^NiJpC9i)FviYQl1hEnE<9te_a9Y!*lVqxpZ z)V$o`?9?ByBHlteQqAOMYGK-ZXo_CZbw!tCK~U6YCHgJtBq4pCydG8=)3y*-BdJgS zQ{h8U!I~6Sxc&BK*flsFhZ&Xw`FDC;emY)039@(-f>iQAMoOp(i@=zD`}tF}G%OWI zAi#$;&F%aF2hY)_`0ZFD89_Cc(qaXB0Py4QX!pDL9DK3_pGfe{Jb|QDEMNwE#UpzB zxOvA&{du_Q)*5HoE>^sV#k4H3WL_&9Pqa=Lj3+h>YLUM3#ZwF>K6wSj284r!C9b3G$w$Hy39dx^*6Y zcoZ4vkR#nXB&@&-~`D^?DnwK9dLR5{8L!-Spbm{hGl_P z-sNwd+u9nTwbLXs$He}Kl;o^~`F|j14ZE9M$;lGP|Rr!_!4cQlP{X@{>GsHtx@1QL~Ry z2!9yvqC<^L$Yqk1Zydt#MRWQXMkT<(_EfL}m3T{dE;pYtO|5bFZk zfmyUWxVd=8`%lSF7oLFG-M{EE`!}R;u@PsN>{cKB~yBPdDT`Z~W~&XQFHo8WzxDg*i5dL#K(fws82s21Mhc6i*~ zE_0yOW%jiD{pQ$9&9Ihfc5J!|$oC3L*z@F%1u3aiio+c=LR|lVZ%xLcWW;W$GQuOj z7V>~2Yb_($JI1s-M-)4$5OiKrQ&UrC6$3cWHUb47g>rD)=spBA$bTL0^!$V5NMT-x zvJjNDc-#hf8v+jkJg4lD z^%2VsBYAIWN>OYP^(h0{&<}dW3QK#LW|mVQQS`_I^XMaTZHGw)YZ30rt52tkI1O2} zY$3aV?0d>UJj*iF$g%0wrzEc)F~s*V_BmN?g}5C^d}&bK-k+YzXr%TzAHW=ph*uzMKuM8YWnVbZ=K9PzrxH>l?8~{OeHGw0e7I4l2r>x zOg=O03C->nv%|-kI~=^j3#EB~L0AODE%G`4kg{pNiwrK$HGTl2p$jc6cl$a#4yUV; zcaYKLMioltiSRdlgiI~ZOAg>Yy_&@_3>x}GWXa1=#HWXtOKn&6Z*TtrH{Q5?q2r;AZXd%g(Td5S&3^s%7& z7)fb~eu-677gn_UT*Y38uM5@)UHl?JSfJgZQV{e8JNYjF$Z*NRdy27F2|}-is$bY} zcEd&eS2ta}=e=A(|DfXTFaPxCKmYlcW+)YA>a3~!YnXx!i}D=1fRRn4_AawbV1A_i zuA~Yo{Io+!{s}BD^}$s6`42H81wk=_{QzKNbNT%3d|2A;;@kbe%3B@4%U>ryyvr0? z3k9rdfqjad%`H5g)=8}U5CR`I!1@KrVHW>~A=|G8u!r(h0Ba&6|0QI0n*_BOXiU(> zqC?3*Y`g>D4=5ML^~s5I#i#kW6->k@_Et}!c@r9mYpLzLb6ED&3s0hE~N)hvn;FxHX_wLp4Hqte^1 zl?FBgQ;Evi1$yjdD&J+1k5(JVg_UZxCE6NjV33!!s4#j{c z^DeTXEH@OEBk92(k3$r8TtOG9X?yx&Lsi6aM_cN=Z{6Q!mAr(sXAfb9&?0(p*_>Zx@ zHUwSRsL1aIVF3OP!e5u0KTP6R83b9{Mi#8fm7!MT|By7Uaxdzw#97Wh&5i4FCXA** z<-o3fk&qR5e(RujT(=0b*@3P7BJ%dCY?+=hnWqxSzpu)Z?SPjF3jFZm)vMV}F6_a> z2q=<1fs|DO^xold2Pm%SdJqPp46_|5WVkzUMIZ+FKVi)l1eDOb$Q$KpLa+ZwJ}9@a z_2fUxp8$EKf6X&&UTzgsH6u$k#}H)(0nWvvUB?`7xIO*?V2GBgs=V_eUIHVoNBiPes%F+zb~AR`;m<)3s9>igOLB~M9Cx<8vnIq*T9o&~Zb?AvApuJ-11RPIeC077ZaW= zTdZvN^4m7MFA$w>V@$xuQA_1V0Qg}TG2w~S)MO{OAoAiM7r2!!b1)FqO39%bLkOv> z4uvJ8I#?GE!Vf=&U>w191iwUpB2rj`a;)14yJ>4X$Nv%YaoAKY;{Op-HvrHsM6mKc zW+F(%UKB!t%0iZpC1z>*OtqDbm(HKcWn>9AMG9%*v=~xTn7V0DO7*`_5=rL?BG3CE z^4zmn0Tx5bi@U86F<=w#1~VoK|LvVVw7x;&pgAAPS;>WUo0C?}L1?rJSRK>WFAU9K z*KXY?>VT{%r@?> zj;H3+6~4voO-%(|4zGt7sNuZ)i^%{b;5*NBGQ5gM;K6t5alA)NAoec(SR#^Eu)kM` zq*=mTCbPq3>*Q=Uk2F!or(wl(tn@jAlEL*^>M!AQ+;aud2J{TeR#+-4{yJ<$4BbGs z(QO~f(P60Wmjj?AY@SQQ37gyf@eu=bl!}j%jT;Wn((5*ufxlAE zgT^9l0p`oG)1SjUWOgTGIZCwsh^^=pqzu9oWQ`~U+ObNz^--!tW6zv9I4`QJYrOM3 z%!dYf1V^yt*gT|vERiE+CWxeFVIHuIla#MB4v%eI%GSm38dlkx`F5|JE|NQwDh?sN z3iU#Fvm5NNF>;F{aF8@^vckUmz$TG6ewOY01QGI6(!XV6=x3Oud{}?mtn zHtS*4HixIvwU+1GdHy%}2*pIfra=X2l$!Zz1g|5YyV`Fd72VZtB@+IjR4j#(^6WY^ z3tL+e+24@EK0EwO!-#Y%-7FnYtj@}YB~5OJ-)a;H@;Ph(ABVLl#&dROtJ}kYBq5B( zfKO!LqC(e^roA&@kGN3${62DJX93$me!FvJIqmE+m9Z`rr53BrGFidxYM)&t(M_nsp&X{iA zhype$Qm`v|tH49S4F82&De8E95`roOU(8Yq@G*j?$f;e+85Xryn8Q`ut$20i^q_=9s);rn|41+CK_ZwBg29H&}O&@Xe3_+ha$&&+l&X zxWLd8;!zRz2l!5S5wwcCTBIWma9n^H%85R-MWD?$yM0)K3OrFKh<_37TC>CH2H6+Z z6clJ`546(vVq0MJ6{_I~X2sSHzTN5a`(6G}LG*RdHHR-^7o1}|z&jnHGk zF44(T24QXn|0&ofz;XdLF;HFnu7DZJ!Z-^6=gp2zPXLjc#|Jo9fM4vP>@IKCuIX$w z=evCYXuGZx9F{!#CBcl*88ACr9X_AS3)1`ChL*zoudB4qHsEv825xb2&NfA7)dg69 z-U%kvS=8OqZXKv}1uC*9H#yYM;c@~8gj()qGx#LD4!#S<1mpFCE>nLi_=nA0 zCk_<)8!;AAl-$y6Bi{+lQe1ANB4-S-W?F65l38kwtn?5la^QePOh>JpK+zx{;ur*J z+Tn3_f?ELye-;n;q|{v1?giXIeIyIPgDbQ}D@OVorMcP45g%=-d9#a`;+pZ3AH?Nk ze6AH>dCEx69a#KUMkkMwC-GJG_aWK8;JjeA3 zxDg2dFV8(~R+xP36kh+etcYfk?hM3OO!EE!Q0q?vt_hO&jDb;V`{aV`z2#ixW%-CA z`MP2aaP6txTl2eDMhGa1dVyAAG=$`XR7Gdx9=n;HBzx@zIkm{4af6C_5;5Zz1@<4v zoA!M6-^l;5XTjz#-E&4#>ew}R$|FQaTc4YH$J}R$*-c zM*%gRGEX;#fl#vZIGS7@TBU$Iyx3fJ+sr@|2&Hr`VmysdFtj+!FJg0xK@6e%Kxc=? zMFSvcYVKxR2u8%a1nWO?1K4KFf(dVy)y> z06pL_mvXqrQzKEhLDtmNe$eFykweD0_Pd=fa7|%3elvoN0K&?afW3up@9cn5{P%IM zq?VLL_)Nq3?7mK~T`;0h<4QBhacyp(G7H^-sW0TaSdB_bXbA{Q(8(4SftS4=UqkJX z;CJ!0geCjkt^?E{6;?z!0*38?fr-51T9mT|8={@~0jB5>MNNL%Q^AO^g&3Pxt=xuH z8WC(ouo(-#2!L8!1=61j1$hihb*CUH8W7{Zftvn!u$lmsCo%CRa;f=UxinGLE1%Mm zFSmR*G!bVSOdZkW4z*lPdDlJSXq^%>@tM<%C|=bonE{V=V*j!eD+gARRbx8K^UGf- zKU@BO-lj3l=Fz-OLzdpgeoNo>q{i>DpJ?4R5|4|PUQ&@N@8&;${z4PkXP3vlRc#p zO*IpVR#632M6WTDB6k9dDFfN4F^c6h*PuXk+Lr@2hc%|fYNx0LdsMOa9 zViz)1DjK-P8_CKcZ9?}oJo0Y#ag4=5T)9jqGM@+5iF zd+dKZsz`rpRFP#Wm9+XwLTMON^x4g1CLy1i)~=ZoYszKsST*Y;3b3lh-6E_;Lo241;M8Jkw`C9d^}$T292A^66vaG z%|fZ_6C0Z))eE+b8bNVffr{fYP#YOg8zF4&7PV0kR?&D+N>$y75rZksGH)bbUza9B z3V4bo`LCKo92*oLWc?zDRTXjKC^ro<5GotFI0@VVuNSU>BAV)x2ARi<5a$OmA3Iv37%kyRn}cTM;MEA;sC~}&h_<@y!GiJO3%MUgt>u>gE0}UW?&`pcj7EIVnSs(((ej*3PNS*UsE#Aq@7F~sy`=sQE{&6 zV$N7fNw4IXb|N(sesq)Q9lV3c3AJx=@}c1q@ict|8b5&{)GR0RFg>CG+BWDFWH<>Y zielNH0>M{GhAewrp9kz628|cT;$8bR@xp5NjQENl$~I{c-Dg^+ITY$^c3yRf~( z<#V_Tt2WebE`V%)A9So+vCZAMxUz6*F(0afd%*$dfRQzVA4`TgEpQW-?}gB32qlHk zItdbUSY7MwXy*fh8Srjeq{Rj5c7PY+RH2L)YhhSV|2( zKZs5Z*#kHhx`gC3erahjv;g~DSdO7(=ys=a@<;GCz6rb5fgonAix?OokP+Jn;4>7T z!z#SD(|6Jux7!)RYOsIUoguU(x%}Npa~2wl^-O-Ui+57d(Vprks7c{?Y$x?KGBY8IdqP#wE4 zO&JTw#=uwtu?G*bd=^g3k09qZ8)uwglI%{Q_(zL;(l*37vTrbVLfDnULbnt$+L|PL zUV;rnoOu$7S0)jQ-2jo~`RGaDHA@l|d+&lbrUzvx6q7wmAaX!7 z%h!5jL0MZIoUTaK^~fWE?5ZA65rSmjcB7yjT&0q!8hDs7QA}IgI42qc^9>jgA=L#H zL3$U}7U)fBHFI1uM_~ZDmlMxX7!*OxEeeAMvRmq))tz3^60th5D0Zlm9j&tl_`5og9;w58uF~v_WIl!P z@+jYffhd3-!ys5SVJ{nCa|ZsqBrYv07H%KWZstXshn}n3=Fe714vOdQ)j=r@1kM69 zLwnkw6cio|)-MdI@I?fJfj*fILu$DWVRt19(e=g6^ya&1yg) z+~vY??Y!F_lPJGHEdp+dg4Y-6LkV+swun{)$JrSFb$9{nYUvE@M|A^rHw^Pe4fD>` zzp~@}j?48|8n(Z_eZ(+t%&@jsIjPq6c0IcKbi~3} zrE&d2Av0slkQd1v&ng_(7rmbjvF}4Er#1~J{|T#+>VEb^vdEYI#cJCJvxh-W{l7a5 z5}Kp7S@AH)g2-Wzyx3uo@9Gn4<7A+$B?!)B|KbCvUd-_(H}az}fqzL=JA|}L^|g8= zp?^eh1AreR2%@3D7z@E8D8u=srf3WW&iGg&K}AiYlg_`HEs|z`qR5ZxX;|6`EyONt z4zKA104Eq1V`DkSCgNC7;ly1ct2k_=I#T3_T|fb5T&lp4{(|}B%wwuW4Uy<)mal*& zZrko7yM1n?4RUWyM$<#sTcqGa%%m=TuyIV6J)+2_n|>V_ULV9(#t@7nph!4}De9rz zhN+{_YYI@KpU6sb7y` zdnnC-`bC2;LA|=JNCh*#v*T&DnEat5jhOajY(N`t9W+iCIbGk8*oX$~9$nS+G zunbf}IF>?J^PHH7hz(;qIke9xIxZq7_HE-ZzPtup)3@Q^I=1yo1ltkJQUeSzN(_%6 zP7w6s7?6F@Sg4E{2#`tpL?M%!1mrw2T+c6+-v25T2*nYmdWFQFmpJ@+uz)h_ZyR-6<5BNO5;Eqz0_?ln80* zF39YG!?B#YNAiH=*rf1#fQ>11NyD#omr2;r@XQ01Y^W3Ey6qKP>T2uOng98jq2FCT zPF=Fp37gk_D9;S1LLfGQ+WnebX23iTsv7SB|0S3lIrOlhwGVVF7gu1efm>7(z2Nr% zwFq3b;I0N&qhO;bz$*E9*fx6S3OWM$c~(3a>V#+w@OnoY6+$nmK?33|aj&z+lNEuT{Mm--qln4*R zT{fZC953lB>EH35I%6^+Z8AP-A}MF6W@yC}BU#i6O-qb!c3LdcCrpbKiQ1`n#$cS% zE8>jMgi;qh)Ym+?V`N_GHHGb@qzsiNXKr=psav`@uRw@U*< zCG+$(oQCp=xLf#y7J3Pq3|3afrl^IhGWlJXMl?M;F2ipFL_~6UaA@dR9{D^mSutPxHSeF(SVBv|Nz zpVdx7v+WSb0&6QG8HAL zw@73WlU;p{3$y9QA}svl*eEVq9u*=XY>Z0USMj!yO!mzSJ%>rUH~tP&I0Qk0ejZX` z3A#gsUFJi4f+BQSv9-hR@PZeZ{~ET9!uo798efWe`w^go9+o-y7C50y1#O5~^mqg> zxtf0)tJQN@@Iy?}_pt}~ApD zbKijSe?o4RK=K}aQD!(N8%rxBHy+(|mpY+;Nxyp>j%g%k^;SGqI|*)4_H^-?l_ytT zkITCxzs!!TY`9+3IFYxox8k}ccS^}<%`;l^@UezfesGJHoh`d0eNFSKW^~)`u~l|} zBlTM@JFc|8-g?FJy5~E-k!`z2SJ}sk_Kf8?#}@B)q#UvDTy@hjWrd6QbdP$LBN$@Yj)6g6eq{<)CA^&x( zhp&YgyEB-kTIYxOh}OJ~l3v6I$StYQ>P{!!!=_sZ3AD}#F{$}W(EdE&mi_`_d*Ve| z<510*X(3UOyF>pSV#=7(G8%nlBi>1&PNUs1o1nJe9z8?-jki&ynYFyCt3V6v&gY!duX#t!{dUb+AADB}h zn%|egWl`2G;Vj*-8lzMkRCY_jQDKhWW4SV@6hNh&UXWwKnq1c2YM8Wzv-?Z{*%`y2 z#2!FxpI*`8!N`la?76RtGWE!7v!bu{Nuu?lCGZ|QLPiHy3FusKN2J9N)U4hQ?gRdK zUrO6PG-i?ACk@grzzt6hoXpB-fm9&UM<#%rmR_wQKHIytzMG8%pFxF+)73igp&m(hPtK{Q#L`?U;t>COwB*z&3;Q(r&Z!jXyd#Rh zN%Fy|3hDe*rtK8YZdM+rjS^2!@+`)<{3A@z)b=bDKOV?RDC$eIZ;HOAFEywOCOgGJ zlYMhE57PB>(p$(Yr|a40@D;3;7@u8YeIfdE_O~|`rey(iDkqv3)WJu7+Miv*9w%p? zEd)6FY?fYf>{AfSvZ7_$o+j*bCOB7}@?2h)+y8Ts(*0Pa+>ModbF>gD-ZM!}LN zhBk%z;8pw!nDGlpb&FOng=2pZYuhbeU0f7qef*=CEndB(h<^-=J_bd*#bwKuhm{~$ zbmFIr{BXathQVq+Py#F9k#vy17V z>DjZW$VKe~A3(u74dsNG{N}8?5HTfGk_dP@i5YZ%!<7~JYQUk0CF24?e7zthFMcIc z_6`pE=j2CUahb6d2xyZ+V4BNwr=xFJ?Yx*A{AxaXjGX;y;{qJ)=MT3DQSi7;n4`xy zgjc>SCe_a`4Q&Mh45Es!LIFV-d<9Zr$zE5NUl8C9;S=0&=@&V|YKX?h&|^D@tHJ@3 z60D$fVMify*(Jz%$iD>1P~PHcZ*q8quX@0jNZ^YdDhx{mcb~`I6bdWgvkPG7^!s@{ zNDOBH+To<4KdgbEZfMVAr_yF8zC%ibb9e(bos0mP7mxZ5pMgM)0M#NM{U&@m0xbd= z0yzR5f_wy41Pd78Zi20t(t#2)QHTobs^GgJjnU7B6mvYRU>j&;b0+U*m-z6AAtpd3V|L$ zF#_Dz_<0BxAy|lDCC&`;JoLZPGwM7ZDB_Ww2^Q}*Om$!h8Q>fu*TXV6Dh*}|Ftopc z_DJEJ+e29F`*0i9fNT!OGcZORrfm{yigC8z#D`MO)e(S?Kpb#$5W*Q*w%9g}&hk_H zI-I007R>V3u=yTrfpx$!OFKMS0)JlNbgjs1PvZyiy>xZ>Fh!RLPTih8Wb|u>bP(*< zfa0_bpZz8Z%7LE!{~{g!)7O5?h9)(JNlpBuCS_7%oYW*uYSJb(I+~L(snOGX?W6|& zLnb&%At#akha9K{Y3&V7=BOrfQjj{aYS6I7t?qH-#p2@lUSn3r?n@jlJ=r<^1-Go~w@0l67O&_3thpwQe5S z(lD0V*e~g?ACOO`W=u5h7};qXZM<(}xBb0F`;%3trKg=kxkLNT=AA3MykvA?*;Uc# z!pgDCso@c}o-my=oO7HvU0iZL?UBj}a`XJsgq0WMmkgKu<7FFu;2ufXF&YOa zT3+;4PQ+=S%TeoYJCM>>`PEY| zvJIUgCaa2_*)L)=+T*MGRt?S{QCprbIhXs@vJrLRh@|k7b`g`beAKw?k2ixXld|x$ zn+hhqz>j?39kyUcwtQ5o*-@x^FIlr=nPwtex?>Uf%}XZsSHsc^IyRkC&56onqGH{C zmd^lxbo^F>1gx7HQt)zSl1~(5dc;wWDwr32TOu&vxw~D&PJDqJ{d%704Kca+^ z?WjCs!jy7i|G@r{%#zFUw-c{6em7;zbZ?*LLsMF>hW!1-3J79Xzq*Ij3#QY^*UD6| zgh}ul_GI&cRyTxk!kHD$B z>-TV6+KT8|8o|g(2P^Y~b=KL+OAtNNWcQ_%Hq5^z-2V621cHCS(cKFrAt*YTQ5kM1 z(?^x*r*~ghF8V;B|4^qt-qqJNl2ULu5W4f9fx(%bc4e!>E={9{L zXH<)CC>MTpxbBX1$Nh*WX zeDLENm28&`4Q-Kp`J8+?7Z=Nik155!W=KMg*$D72yERM8Srwo4VTf)78#GWTOG zs@u)?;Pd|>`Qhah_HPj?pNM?2GJg1Xmmgr+ZsL2Rw!|+DvYedS2ce&kRz%VYNGl_0 zC8Px#BD}dvZPkz)Z)Ap0sH1`&|8v0AYMg)gGJ}&I00=ASp%iy>7e@C9$1hzxF9Mwt zj|$N5TH@y?BS|?N+@&ABl;H7E@SHR?;WaaY%EbQ86Ws&dqj8p@t>;{?>^;ACbl%-C0TXe?K_kXzj>hG} zY|Y3RsrqQ$;kqditMs6|K`Mi-Rb%*MJE^;3*|T0GzIK;(y-xa8k$!!;^j(p3y+;17 zPP#rt`EI&&{d}bm#Vf9Btl{5-;ac|!9BTkk3F8F$44E&8yaJ0t0U2ZLfM*r>U`oi3 zue`dE&%x5U2&kAxXN1axb7fX|N<}{^1TMzHR0f?DNta+=G3mK#BK6lY$g5Y=n(oFd z>N3LpBb)>hJP6lac1Zxmf00^gkzE+2)J%C3q`AU*| zEr))jkA8TMUyHSB5TI;KKNv^9;6guG$oFGjC(;2NFA%t_mP$4V&g!%7*J)liCvo{#m&zSmArv4KVxQ?JP zgg;@*J`pLgX#mp*rjo>L{09=;v z)%(B_nfQAvS%Ci+jxcf6 delta 14125 zcmcILd0bT2x%bSzF~h*HZ-XK*sK}zYE2D@zZn&Xl9Oe!%I5Utlg9sghwZ0Zp8zm=+ z*q1iiXBtgRV@leX)LeUl^EC zGb2z`Qxqt!Ddsq>za&svQyQ3AGcz!&W>%oAri|lt{@H;!HFJ1c@1Gl(S2Hg#zh-`5 zLCu1|!kUGFMKy~8i)$7K%4^C4&Kf5X^w=5kFR59g5S9y~;h-QIz1qeu9GAdec-PD< zztMKqEaSN*$c^zXdql}|g_=r^H3Qb-tsKKH=h#@lS|?yvaBLi4<0rJRl4EUvO_)%2 z6~`t5HVMXZpTI_*voiB>%?p^^&qKP z5AQU1ZxAPm>G0ktW1LR@GF&6OG~RaGLN#5omD=>cB_)upH&f}Vxl^q9aCrZKCdnz zntFpB9`>nbms(D3)*d5;?62B6I{pory;F3vNlMX-hNggbM(geMGemFL>j{Uc+q1_P ztY>@mGh^&9xljdw{lZ8hds1JSr5>FwzA%mJaI*@)F(IO&R(2^Pk?nrOz!n%XEz#xJ zBshrsUBX_86!soBh=j#=7qU*%DxI2^Lrv;r{U-a&5H^RSwo~kpE(XwUUE&UT*EWe> ze_=EAiat+x2B1UJ0qhq(N@deyCKnG_=Je_2&`Q90Q~;un77~1TuMr$xJ^brXcc>x) zyZxNaP$BGkMG!hP5e*wYkjS2nu@X$#*h6gzN)^Z2n8mD>u|*Za5v@m2FL)HY%ur1m z(TZeGJ`7V4Awg9{715?ZcioD^u_`Fjp=DX^8QGc$iKwGgAc8`O0Ok=G4L%(tqGH+K zuqmLg$IRt%A#(WPuvN|z1gNQDAGs2gnnn{VcG=k9+D%D^UsFNGCg_0hPFe+r%6(6i zu(l{+bJ>Bo_!xtv4j|zqQb!lCi*bd{B4}Jvh1;6F4&5l_0Yg_ISdDq4O;U(qi3B9l z7}5cTFUQ!5tYlsB>FMQw54{Xvzc8FESmTc_=_=_WL)L_2srRs0R>vOQpGzdR!IqHy zo7k1ZRv|8ZI8KPQcIiI5VG-uuMnW2>d^t6K=_C!iG&!+1CSd^~Q`oA+Ntr7l$DtS> zTSx(jC_}7ZcP3`7+yWJznhH#AW)YmKzq> zBa<$dz7g*jvLzf_)4k@Nsv&#!L*|p_qt5$Q4yOr;1;gn=jP=OMLo0jgyH*aGa$$+P zjIgXvPyV)oRIrNNS>%4!p4*j!O9mr31TXqs1UR+Pk?fas&0&_CS3rKiHsqDoz)}Sz zoy&#F#^sXKzOXkCqAggl1HkBsV;4{AY!QVE^#Xq)0w{Kco3Qm$-`wc5b7ceK;||+>A-mgN z!Ocm&!`@We2zI4#OwuTt3x{4(hgzDwRMNQI&CUKc&Ym(?CPF@)#5$(gCzV4|Qq%=G z_fR){MQ1*`a>v=lX$hK#F~iNIX%2nvsl2|Ff~hQTL3M(CSa#v6(qBy^m~JRunyULN!nJguf8E1 zl2?aInbH#yzhuuqX+V>FkyxO}NG`CpqGKT6*5dPtr^aZrv?d_{6r^WxK+6C`8B$&A4tp9THQLG0ErLc-T~1^VZ2e_{y;-sj z22i}kVG+ly(gEU7RI$5D%Vs)=6kpX8UKwcidjsBJ*efohw27V}oF7AI#YVXgU7+@4 zWO#OYqs{pye1^bC0^?mbXZKCoHelY~r`ygA^ciJkWu-$=S$T$31B2H?DTlJM+P5FL zj^2e|R$9w8&CKh>sc|2GqEVnL5Nt+p5CO^W&bh4gQUK7V6e$N(VlMrHVZ@uQ=hC z&@kt5N$U$X``sRIHLYS_%u6t+AtxSwH``g}{5kYYeDTHmc1THrBaQZ8 z0@OVhFPA*7g>3CYd*>9$kwxCv?4}{Fi)%QFCQMdV=!#H^_0>Ul0Bq_dM9A6;Q#dn8 z9d4o^mYT3P7;2)Dy4_2gLZJH^+@Sovk0tcAK6i-sFKMcIDOw8nPKS|GK~j3^>M1Aw z1#H!eU@KN6A=wg-Q_T(NkP*Ocv<>)_kLt;*fNW2abuy@rwcMqS&iWm!_6!i`a}Zfw z%pq;RE_slNi$?6AP0!BkODP&O77xY7cR7cW(;qM$H+4A=ue_e<>srqCFTOi{vXJN` zpWTQPrnpJS1Z_Q6Vq8(EdLcPwMJX#RPtyJfn!cADW5IJt%u_y#+`*2QKWq{;=rVWc zBSK?5>v!7N)6RI30I4k1esp9$YEEGKB{9shBf}O^i`u9JiHbS}SRNf)zGR2~d1#^v zRNxi%$da5+ipEQ}FJ{^gX-V1cYo@a?rK~go%%LwFv-xuZ+=R>7$Azm+$W;ArZX5>4ZB7Gb1l2Y^qWbdt zH8ZZ9p7S2GSVg-4o>9oI7INh+{`+BOUsdKv176^6$$z38_gM-~>U)ZhlA{~$)tuCS zc2fnl4p^)N7Bu@@CNy1;LH`Cg-ZUNl+BT5AVBHo^mb^t zu*%C$>LudkGRlkc4G&-JQSJzo6^aGZH0aF3{=jyCj z=LPk7_XXWRo^v3_*>7^P>1#x8tY>A)c9Kxs1k-Z>Zn4EIW$mQS$w-hG$O3t`WcGxk zdmg|zRPCm=CV$i3Il;mrU(w}4MNkJ%BghU@@j8?ah z>T}FNkcT~BFo2UP=VO&Z?Ii!LBkFvaLZ3$|RNQ;-rtgx@e?ZYHhkn$2VJp7^ASvC! zHrj-rQxJp@jM2@Zm09*yx=Ciy9d^6+G>2WXjq+k8H-HcZR5%^&6OBsT`le9qXpAOm zP-d~mHh)v5XI_+DapAv8Pe8UOLkrw>>|8XFuqi!2*sMNX7N4?1-EJM_+v&K=oWQ(i z6jfE#RDin3e76u30wtg#0hD0NV+2kASfxdR#u2q$CMcHiu<8Q|z2eqVGHr}VlEGC+ zn*uH_D*wdRzsgO|ZObOh*n8U?pfN4m70TFgW_=nb{S3?Axwi8nCb^7RzqCyBCCqvm z!PqiMhAnP?itMX>GtXR7(_1PI6W8Yz(cKcpY`DYktMh?z2LFa{pzO8YaI4oFw2y`k0S_C~plA=% z7T99BYrSB{K=Dn(@qbm3?yJiqP3%OStrPd`pf_Wq{O5ss6LqKIU(31)rb<7>4ze)y zuSMU0*$9@vFEkZuaM92GSfym8^^^HdmKxal11XEjAoDvA(ew&6L=d%7S*-%x(=9=I6pE#r2Ga@DOo}3xLyap9~{NVAI!{ zNgL}AW`WRr6nsF{StW02aVrllG45quK5-xY5Zfb?T11xMuAQW*t!>)p4FOgodiVM~ zUWk!kF**$gfO=ih*N0v8w5g>TO3|~JhmuEim}e4$u3$^RB|A&#L_wLquwcvjO`OO1 z8u9J*DXhayJQ{mRg`prx0S^6i>^#Mq00GRq;7p+F=vxYkjV>r~UN2zRa?r%qc_p#rQvW`mkPz&Ach7 zVrO)zModC%{Lz`m=6BC$3kNLOr)E90_~hdLs=WiHtpKlD&sChaKEC|Ps>iF|m|4+R zwYR^tb!gtgiv>>=j3`MCkGjTksIP=QL9vjq99~7j%zg6KpyaYDe?^(9k{X(e4X+{4FBzioHH;=RD2TldPHSXm{*rj4^QJrOeu01g9ps?kX+?bRCR| zZU8WGFESp?m)UI%7OFw81Hn!Ny8uY42CrM>Tg3?sCG*qbyZ2{L{t-9P@m$tTP@%{h!^472@*wp73%;d_}V6io#Tpp#xnS zL&I?1M5p^FDpJ{1P*qk|RYHjTOO7b_LAh*lu~>{#i8`FO*nD(;X(7TeP67=dsM3#B z1T6jyjY_@nrYT!pNj|A0DQc^1{U@`9_q|+(YA~w0+>n-4*F;~(x-1y-25SA@!lGg?)@VUQS3;zCx=owHkn= zk>m4x%_cZpqo#TpYwk%nU*s7faPo>h#QdjI>TruG>8rs8$u3zQ7UJI8 zZcI4`^kT`1c0HYoTx{6Yps&SZBcsOMde z$s8(JIFwgD1lw_4ja`mEd<)pTSkx95>M;;ik4k+zgTy9H!})&Q@N|S)yB7qI_-|fsf}( z)BtZe@-;wX+=kSu!9_A2;~D^P@Sx_v=T2kq*@x>1%c2P^|Lng3a}|Zs?O<5$1!b=b z<3jQ$qKigG!35};=Fv=MdD@_8EM)yL@htXfJ$rU891tU_WDIrUu7ufT(H6pUqNzIV zUhq94iipmY5yc&Dis-}0YW!(jzu>wQFi;WTC-v!b117)-hTtJ8c5~KqO4B_R` z(ua%@LwM!*muSZJz&^h7Xi|(ZoEFV#T*V2H{gX_fX8BL@$Xbj>vjaa#RWz>S$x}Z` zS!;Azq7}kuw)oR!1jp5KPQD>-Njabvq)DB2vEsPGCc=zEFdtU*mP} zY1`;svsqGcs76>OsW$o6Y@QH;zJ&F3o0~&!JjS6O90d1j$D`Xc5s;F$-V1>pIO)~6 zX+1_pe~aB#V|At5@0Z{Au+-S|KaxUANm5d(`^f<#4yaXu2ovSbBH* z{q%vz@yOYYk8M4(^@-Xi8y;`CqIgkzS^G-j;D&8|vD^EN+lP{~x}4uzITT~QXVS5$ z-BVwW$v9Pi+TB;Y@{Qb8LwQrXoNt(NhK+*RJ`&5$_iie0;Y}`kY|WW9{Tpir=I#J^ zb;J4Wi}_FGU!3;Tw5JREHrDjd-7zq2=Rnr3Ygz98EcZZ`r$14=mbj-saSvPZ#ER5; zJIQC#ju4;Y)tu~AdFGe&@vElkJSubv6#JCSxiPtS=!q;v7OEM27%c~XBKj!1cNjq0 za@p{lbOpTdPzYYSN7GH=+-PB0`H)=pi@E7@!X;63cxE&;rfBr8G3f+F1MNmjG|oXq zQ^^%gQWP1{k4M+JqR~b46Eqbl8gPPD71e|^9v1S7TAA+xw65ehj=5|1}PJXR|3kcnVNI!xAf*^t> z1c})GBm|cbG$RgW0bJ)@n7);xX$#`Uxta6ucd_CaUypmvI+>?|nSXdb!>|u(!qqV$ zw3R)7{$b6g6UzpY3Rufib36ZEnK@%PRxnwQ=5)>PQzd?ObFvV(O5Sc=E{?4%(>x1a z9lShf8lw*`J~x$j7<#jQk)}{8cm2J0;e}-i_V6BU?&>(W9dK9kArdnVpgr{60aCA8>JN~0TX4ExO`n>zJSTDX?|?NU~T4pQP+ zb@Yh&Q>TE&%h=S*v#evd7J`_LNT+S#C(#XyHf%H=L z-jxkxKXyU#+3M$u$syM9e3I1xLqLNim$QWLWDi1K3wzug&;Ig!`e-Wk1(Q{I_@Cf> zjJYC>`>>4Kbe9nR9$WW9o>F=E_pI%OG*ZCsec^=TE@bg!Jv`a-qya=gw#Ui*b ziTmhw<)V@zi3I5$L@O5+7g1ypx*CeMD;Lb1CF#NLwcs&d2);{;Q?btj<(;)uk^?%)Q+NGCrU^mwLaz62op6`_xR6(V$o|DPZSyUnR=y?Di z4%{GOov)PcSq_dvC@i}?+ygoSMJ3f9Z(C@>#)<3kc+4@1-a0p2yK{j%M)^qv8ZAlh z#j#Sjo`OHkw?Y5AHO8bfMA zfYAVIKww0GT9BewOJfjd5NHvY5%2}pV+vhY$+8sgxm8E6xs`}C8MAQrOi>i*EU-r4 zE(Lr+N#lb+0S!~wh6pqRK{^1*xCG;xRZTPiwc`-of?(V{p=Ois4rXI231^JDNYYX- zzj7fN;4T6L%Yhsr8pbjh)0dB&!Bd3(HQs%2*FcngHx1l5fSWsxbb1Fi^-C=4MQ{W@ zB@;M$5XN^0gAfVlE*`8IP=7yWszq;e8156b`b4Gi z<@Y4tKl4<^6Z#ADu5=D=+cCJyJ?QZb?rFYm%{-+zwdAbw?8YZ-Pqbc9Tr1z$U%qj$ z+B;a^IOxB9SV^oQ@{vGHA@WW_5E z`kur?k)c@IF=Mx}$JulHU~D$rIP50xu#$mN{p?c}eQ}Qd7zb+|2>eIxAklk?khPSI z)Dbr0jg(>+5fW$hC(QiEjX6Tf)X#3{g!IA?a>WazusUD+>Ks$`EW@vjrs{d7!F+Xf z2}ClaMDkK^_Zt?%9xqQU5e=d-j7KN@i3m|=FnVdYLm5WlkaLxy>1+(sUE2V>UUMzY zXjuUGh#JEBv26df@8);$W#fkw5-F1!~| zhM4`$GzI9U9eLI0HQ1|aEMec=WdKcCrxYkUs^SZ0Cdvv0qdRrz!5R0k0sxGe|K zaZ^hW4q@PQsM!r%^D``rocEPOr(a*`64maICDmtS`0r-0TZWFR6aH zav;8P{~C5!a>5zGYtn9FHK4Oq4W~VIEkO^S^RUI&UCA8V+Timv;LStP8*BoWx7W7y z{``6t>6A|mVUjpUe_-D(hrHE80|8OSdvS>O;UvsX*#G!y;rg8%;^8yXgy1~O1N@KU zeG-eQ`MH8aAsP66AbFx#_Lh^5UtqHc)<2IETn*LX9%iy&uwB!q_v_Q|zwHhEwBPEi zLl*0iwnJ@wDbvq}&#fAmvH04IivAfD1D0jiEUWu1tFJl-ELESV1;g}f`f2_8X~P=! z;9CWXx0I~^tzPZxq7t_4Gui*VJ%>H__N!zS>wf31L~!m`!F4W(R$~BEejtoq{CnHP zt68^)Gg$ZU6VwK{f0N0`A8fjiS=7RZ9X?Xj5F;2z>qgN@TAgT$X0X5?v;+$$Y4sw+ z>3PK@r6xR?eNHoCVxqhZLxHR8e~2fHeej1AauA_@+vwjpn0xd7cn2XO`|-Ogt0-xM z3-Y3t2QWyR7Sg)Ww60C>Ftn#FZ}Emga%6nFbL|=)Mh7;4uwo%M(Q;_~d6=_9#a!>Dck<1g~Z2>{b@n|Rb232A;6yKO90wc)1e=La9-H1=BL&z zk21}lAMCU@QF|VComc3HpKuyH&W!qH`#hk3VQ#Xq)4+`_47+lB*@0smHyWrv9h@g&XMxb?Qhe+xwqC zwr&};Z1eI2^|M9R<&)Ge>eS2Qv@a&8 zmuKl;oTOepO)nn}D66U~=}%#_jxDm_-i~uXA4Jlk!twiRWl2RZLDKO+Yhe&b$d3Hw zadz|r8*_e;!7hK0Hrs&}3lN|#lM=!4pl*K~Tx|&zwuJG56P~5XN5j*xSRsNLEaO8v zDPoI0oXqda^ZW1owjRGWj2-~&XAsOrFb4r{86+cIqXkPWD;E5F6n^`Z--_Y)J@_33 ze*8=E;6gIU%e@Bv&Va|2_#OK&SoVNB=!QQppc_HL1$kG!7gMgCARRt(!?ob%1x!9NE=o_ z>c)C9h0ME&&;~G>B;71qO-Rv=oeB$yxshWb=9@WALTsN7I}-(6{0$gt?2yrNpaPr* a68kwnXMHwZRWV(-GF@9STY0670Q_H1^LkzY diff --git a/ultralytics/nn/__pycache__/tasks.cpython-39.pyc b/ultralytics/nn/__pycache__/tasks.cpython-39.pyc index 519c646bdef7a7916832e32e79399e59c2c0a61f..60201b2c4335f2239d7d915dc726c0d98311a24f 100644 GIT binary patch literal 40317 zcmeIb3z!_oeIMA}^W53@1B(R+f=%$T7!p`60f>Si5F}m;LIjr(uzZa`4QHokcV{t= zMRhM8qgh$jLMG@OMcU5Uj%4~4l$8q{`H|S?&&lT`K7F=xNi5x+oQv(Y;wVg_9Fx26 zobx58DgU?j8FI7-S>HcuBooBuBxv3*T4R+`d6X1H)G&$`)A&n|Dm5Z zjDNwK=wBK)PvUZa+A<8saLkI4Gbu2el-Im){-JaWCy(@QDbw_ST_3qr=)t$MW)q8UHRPCHyy*GDnbysdz^}gJF z)%$bzS0BhdP<=4>V0AnM@#=}(iR#JR$?8;Ys(LDSs`^y!DS4i* zoUVQ%_X&x2Ri3V%$(^Y_lY6E*otv&cn|ro;Hg~rA$=oNa=W^$&=X2*xqtPS1I+L4u z!KjVJjnZ>(n%CA$gZ~^eH}jHFvnGvGhLdr+uNzKxDYf|gYAp8xQhJTJdRR(bw1?#G;M zxZfuCpT_-m=PumeCHHyU?{MzM{oQEME2yzhG8Si@NoS{X&vkRv%oQEmx%aw}bDT$= z`<(}_o4Jy+&w0=pM{Lg7@9cK6h|N0>IS(V(g0siji{G*{;XH!hMaMjEWDmScn4Qj= ze%dLOmWuVIYlz3s7QBV*gdab8dg`=q9e)(pK3x0#`0@QS=ls~S$By~2^JmZcaRgI- z=2+eHDy3Sfc+u}|$B&;s>&K4ooAXl$j(T3rA2?kt%$LsA>y>hCUQ$xW_n%s*yB;#` zzv`!tR|>9Mp1bBJkJoF(f`FF$xogrd8ML<-xnm;p#E)T z*ys3(sd{nQ_4}occFX)ONszZvPn|kesiWn|r%s*sN>zM43s0OYE#ZSx{9zNi&z0Qi zl6R(Xt)%?kb2BGS&YU|@D%Ks`rp}k^9JY-$mpvQ?8W?ETxP-EaJP+UB^$OhtHlr>L<@1mlxA0?fFxuPQ2iE zU4C>@SCLKn11C;S9ewW1O#b-Psr-r4$7ecIo<8}~c|UpP*;A)Zp7YbidZn&hPx;AG zwZ2$({jO_;Y9(K(7o2R&A1W?j0Q0JJX}PRQ{2tfu(P{4T++6vppV4=G3003=J{SyF z86#D$*Ur>kboJ$fx^ZDb*X4r%7kQSse2{%B@uABHgI;UD+uQmwW&t2>nJX2%WmU>w zF1yQxO1U9Hm-{lqyl~TT3o%g%Ea~*Gk_;u z&MrYDW-jC0=WKCCk=FgHF=skkow4ibTn{GuHfK9hdz}ZIyPO?}^*MJtJ5f{rt47?& z4PatF=yx3}xTRxXYIK{wOqM2QN#k*`eR@4dKQ;^2!UrbP0w7l)UKwnXf@4w zvuV9;o-@iuGxl@l7lDM39@{KEYZe|f4I^U*e^YgJrJx*piLI~CwOeS{Yxo8iD5X-e zWyB`z7gS+ssibUv&MSF@d^Hc8QhLR%mb`_!Gm&YPd{oW5hgx^6?y9}ptE=L|#7wE? zqKKwVV96e`*^}kk(z0iliOOtGKWC>&7CbCgmL0UEGXtR1p7-oZVYXCpBUPU(0lsVN zYVW8!sIlKJX;%u%TuW&}DmQ+bvANS*KsWRuA zSN(VapX?_uqF3hKTZTH1k$NwC;?M;>cY#8S7x4`jmfK@~p;o)#72J#N#L_iC74+yi z4s#NLk;#~2W@E4gWfMV;sp+CTXCC}9&g(cXcPD~oT=g}LH9*NtfrT}*2?%M%nu(in zz_D342=Hos=Gl@$xm9#_P`ri)Z$Z_U=NAMdT3yq6=>vziV_bs~_3CW^Yj-nYV@cKL zfT``>vslOQs(n@gr%3K%y^28+BxMs{aJWi^;sVAU%Z6Q^vuD(DX^-uc<_hRq*G4T< zg^FA1tRcj9?K;jEE(6v8djUeUtA(X?li4GvLoNC4k0Ie#g8J zjMeVUtmO~ngKxo zE6~< zjH$Dp#kdy(COve+WPF=Z8+p}u)ts|rok=t8gt2PAW;87|i#Zi_^+(Eq)WR?>*G4c8YT)Iu6|4b{0SFoad?o?y zfb}10d^rXO?4)muY;LJ*H{&Z(!7?#Hko z_(P~dunEYHQeIX7KS`>j=J;kAYca}JN0~kDU0W*oR;}h|=9X(kjY9q5f>QuGMFR?# z=krTgY{1Z@+)K-)QlsR@s)eh5LWmSUxm=qqqtqDa@NBw`&(w=-?n?|l#d<8q^D}Mi z>zh^GVFl(8Z1w_UxMXQIdIx~bGUgsLZuXgJb31-V5VLXZFdJKM)0rLgAV-+EPu6=I z5zklyMk6h_82d8BA#cS5ZxCAv1(1z3rdoJFFHN$ZVzsMvP+nJU(o}l&umc>8g$Q(e zNs*13`1k#&?uDc0rcY0wvfugA+W+$FZ`h3&Y^M&Yw^sLTcX?^4uDs3OwXc=D3Hw|L z1LScz#4@eP%e_Kgwktu*^-2Y2BAZYLP@SIwMODKqepfy(bap=Pr}Oz*0n9^Qv3s*t zWPnI_wSiVWkJ~qJxm+R)3*G*HJl@#8Nk<1CTnu>+=I2XYxTLQ~0HC0xu#z~KJr}L| zs0X^rvsSH@xEE7XUThIP4(vYBwAKoF(3&2SQT0mF~T`mmDllB>o_ z(n~q1Rje_Xp(%NHvHk8U){sjtHe+jP=4~d&3@1He$k^&upiIo@U-P=wGESG1`EuOp z2JfSPd*!$9tt9g^M>ED&JN?YnOa#B?Vz)DZl)S^o~}lARH(Ia>;^qH9{j;n{$iBXPBXd?SQSoq6`GkXnV1#n8muMh;{ib2Wg` z3Hzu_bWk`bSC!m=+G|zIWT1!3Wd|@zoD(5{HqxtAdv~oi0cLKwQVNJw-3S2YtQ16w z6f6zM)7IZQ9wvBCGcQiTD-f#L3@NGeRmal=U7L*K3;?pxMCgWo3i=`_snv>2nVV@~ z!&VN#=2IZklF}|PE0}V#kP#@15mp2ImA>(fqwf-pyi0-WyAFbeFyr>_s+(=Zk32f* zzGvEg@|28}pIj=aLe*`gZSt?^utwt1$%%b)@9}4H?mf%4-!u0%Qno!)_X?HiY@fP_ zGBx6kb0~n17wSubJpDLDV(K(9_z4J}=1Z#1%re*GA|tSBONuDdkGad$5c{q$6G=+- z89p+LeqydtUvhoZ^Ub`T@iO-X>(-Fxr*OHW2#nq_bDxe$v_uIe*=9+brwB>KCSY8bHAnsvq&ylKhyi^lX3yJYb0$&v-pLFX%&Su@@NEk?^*mG!aNar3_W;G)bpvoG}thq zDZg^G$i*(Z!NL}u)4nxZ%bIEx1DYN6(`7d=uL$(|DGlqcfU(+wJc_`iwjuCS3kA2} zc`66;A`qLo>KLB@yla=$PfI39iX>A{z*N%FOl>wKPT%X9-APCYjShb5!CK32? zmaTpa(QHhwaBQiPnXqn|`&m5rWnAuV1V%aydSW~1jI>F5BMqq6zl^6PC1dtjjlT8_ zkgC3bcRS#oNP_EO443;D0uO7SV_i2qbJ1Fh5$;zZIDgIX;v!FjeC~Q0a|5fNe$q5A z5}M+L>F5e^oMKV(?nIVC=h^5_yHbJ}%3CN<_EZF&P!KU0Q2j!^;*jla!JEdgQK`>H z3=atg-n4gvaO~isXHchp#!_p`V7Aq!h1(~zOo6ZiMAsH>Z4P~b?1JKm@Y8NzIHy4S z<55BT+nL;I-ySW@uonm{toW6ECw zLc)Mpew_1mTJYwrn9Acl-@NFXm)BuZzw7Wk)DTyf)R8|x9wm##18uko=S+m&5A%04 zFodN2Y~UCIa3upPCS{r6qr;{FR&JTloyeR6S;uCnw-JC#;gpIHc3;8mA2Nw8g6uru zEiY9dhQ=MqLyG4>C??9#Ke*~AnDI-@s7XaXse4$L_f9dUWAzQ))(L`rP?N4Q)9tkD%kk^ImO>=;funRrhjyJxI| z7bDlT0svUVRNH2RQ~}oA69kFH#45skLgGn@CnXL(0O=`NeXT+|3NE00D`jdH?2wt| z${Feuw0O0pRM7YoXcT-Hs7vx?Lcc+?rvM`g>)<>{GO%=xJqs-qQtt?Mb2WQ+SZy$D z;jOd+ZMzl{zEbxGA6&4 zZ5mS%pZH0r;XwOCx>zeNKqv~vUJ~43@${{^nyj6`C7;6!5nS?%xcx3Jw-14l>A?z# z^-=yc23qZ!(DfEM+~FS?aWaX=ak&))E#=G*$SDjzm_TzS1||+fA{R`sit%O~OC;iC z904YYW?Yy=STI1$xZow%Qcm1UQ{)GBK7p9A*d_USpL{DpCe29!tE3x$+JgQwP&vvx z3e<9#mw=JhG`-^xhp+4=)`Vt8D{9vBM-aU34d-F_2CBg9q`EUoS8YHd9SiUa3(yv6 z;;&MNB!KH3sUJWh8Mt{Y+ivG9=b9P;2Qn8^&}GVq7rjQ$Oz=8E`aScJ#!g$%zCEYv zRSMqMwR$?6(L~)D6eMf})Z!WFc|1y{ik>>j^nQL-K9WifMW``92CcGyA7m61bwfs> zb)g!suzipmxz0M|LhVt(`5vCSJqV2SC_tvU8QFdxS@&|3J^I1twK9!FF+dRLU!s8PuIQE?F=WCsw*HfLg4!E{Ch~-!ta!2 z%xa9a>+kHpYGe{BRs^4J15m9U|v`AXMS1eU3D5||M z{|d7Ref9~&__^+O1ZgvFrTa2NnY+{J^tQ~-OmC*UXDHL3=~K*C#MSX9G#yf0PWuj{ zf5Gx*Xx?e+^7yEA*d6-tbYC+XTo3JzlqRcf2GU@wBHDMX1Q-{`u zp^$?R5~dvKX2Kg<8-cjex?$chZ(@aH?Kie9x zCIn2O%fmLKrMWa@uKTKm8jSli()%BfP5l-F4wFV#{{*ppVPiUK;E*P8Nex71qNW>K z0VG2dZ#c#_r7y}=zAt6F8^^1X<0I`_4`Ee%rv3XEgZ3?LyvjB22$)@qz9%3Ahzb__2l^m zi&JQ?K5qaEnlRLf zuUZf&HzB`CuB2$DK-LjtT}ntgvq{_!zF{p19>OGkUP*f?u%78=8mwm*M9aY7z@%53 zT}~1;>6C7-XAa6L^+lwnm5XNhg7Kp`2Vv)<%$;L}~@8U_hD)q+(qMi=sHpX_}X8Sp4L(`RSwb=`Z)Km=shG8*L#q7P^0j zhgwFhMAj>V&>o4e;Bg)!p~;v>*Y|zrjEzt;p@dpP4FTrQ(?0dT<3&A(Z`Zg&*tm)5 z4K)yK_v31b>)8Xa0W-(Y4`ZMVf}SACjY&^Pb0ggWme8tIB3Oj|!FsezJfYXz|AVdk z2?lR6AbJeM78=aWdEYFlB|ej${T^ci=>Hb63n5Zu{Q3ZVd~C1cX!A4 zvWrOt9LFV=7!;7N5{S&kN7fPCw_%{y)dsTbFba!MTy3x*H#yICvr5Bi+`aByp=nwy zIs^;Saiwc)cON>J=B;bhf+!Zz9LeRmpW}-Z+n7|vkgm9!u zbZfVt#ufq1i)I5B=2-9~Nfc+Y|XLN_TDpvzpZdNIlN4_$>x)s1Sf4sJf9h zZEdw_1CV~7uWKN^mAXB2vc%g1;Gf|sg$iO@D*zZ*O#1->-e7bSiiW`L!O**UQ;)3N z8$b>Ze;^gQy?`%&X&|n@{%a9bvEaaj z6Bitq047d0?v9LV@Dcp1;3NF5i%VW!IQxdZxtOSx;qk_R2HbXv2GeX;gpv7Ue4ZbJ zXhO3z2XL!}t}Ub)TIhe5k3{D54aBhL2{vr`R@s`3mhqP17lcJ5`YH##2N813Y2lW^ z5f8P->OURuKco=7T>}9A20^UXh2ugxX#=j+LL&Eouv|t z1lqe4(>6eRYsqEouThdVa}>bbBZ03$JnEyF%) zbBqe!B{C^G#mpUx_)fI6EoK&dQLvcb>{jpYS@pp zCt_G5tt=rav@4ja6y_($<+lJ5ss-xm@)RZA7Tf+bdtZ7NVAt5ONlV&*7or?|m~dGf zwBxwke~ciU``9K$Il9b$sL*2Iz+!8lUqU%L?*lpdA}AZ)X*v3$;Dc7mdO5nzdDy6J zZ^qo~L8+FLxM{i9Vekv3|L09F4Ji#u3}oyn%Gl9{3@*rNu>B)Vxi#+kG+Olf!}>Ls zhbIFVs3axb4)IKPfw_0O5gwnrqJA`|*oOry|J^IPvQCIo~a z(KaUSMSI^ONRtCLtWf&c1ECLbDh>D=JMa7@9RdY{cuUB000nyxyHn5ukss|lEog(a z*#U$;+YAU7t(6#-xtQ3cPB&wV$wkGnP-*-jST?#V(ZA#50|< z;dM75h6wUY?p4qWwO(&XN!f830(_41BlM($69U_+Pt zEnT9d1Z`+Sbb;-X=|${?x@OQQeQL1tJIWb7F3$SL!+;|>)K%u@9B(@IKiVj?M{uaSedG>g1oR)vVc5i1wL&m1! zj}Cwr*+0}d#7nED-j*W}j#Z9!(3{FWxbFNqMp=XWM>hi$+CM^Jkk=df+z8ya#QsqF z`f$9@vN2DrVx2L-W~EsLPrvT;$0}F!rzMp z8&vdDd-u6gp|V#t6rVU5s_3~s!jwq%Y1_-(p2S-iCc(ZDV&^Q(Cc~^!szwTd<;=_i ztayup4aC+Q4mE1x{yZ5gHj-1`KHLC}uq>MQ$#F+j*6@`P{+1?9w+%@vAe z2(v-zA;T;Tg2h&bWr+@jsmul|MZH^#wp2x!Qk7s#71V}?!MIwR*F{Vd_!`R4->oSJ zF&`{lg(eQBDPl&2ilo9;ALuTaZTUF}`T;&jRC#vUD@AtTLpx{h#`nY6X9n%CX9m$UYqwgHIr+w4|PZ zZ@V)8r#;>F8WR%90m@=7u2HemT(Uz`RR47{L3i zuI+CE7TD`O310GLwO zIy=n|Awl~iE)%j~Wg2&HhPm4;9dfq*_y$R+20q*!iCvcv!cYjE zXncI#@M?$;%?o7`aBgf`y*5dK?cKVkXD~NrV2C1ulN9#=U7o9CW4XH{{yioHm%0F73^gySK4Xn^m4mv~Rpz$On>FIUpy|j>^wZmE; zt-YG5c?)a<@**u0q@^W|*4D7NfUeqt&bp`Bi;_lOPrPP&{jgOUpv47h7!;edE$ky_ zl>NhA!MAG*iLHf~F?_9=8(`2j(j?j?#A*4nwywo42#VkCh-NzNa8E$z&uNO41S=|c zOOXc9+bFoCi)WmmP)z&QA2IVf)7*7f04|^Cbp;EArUoJ!`pu9_>q9r+7$qBXusk2N zB^GTgJHg|gQN0SW)zxsAizT$L6*hc zfk4=-5p801(c=D1+D=E=Elc#Iew^0hB4+@-y^Axp#Y3nf3VNSO4kC@co6bwl^v#$p z^&jvKl(q?nJc{VW{p9Ry{c4Gxxsp!lav9!J;*w20$Vynb-!<>$=do5Ux!f7`nJZrp(0PTB*@y`C*ilLDH0)Fgb=j7+h_2pbfqC#K=$!uwMNbI~d zi}80@*xzD6-L-GQbbOrsLO~0y>ERV1ez{z_BDOq!I;29>|H@Zroa1*b*8=|zdTf)I zDZ+uDHkgxXJ2UK77EJt`eCNMrAW&t^x{4-}e#(OgtK_+G9#$#_ovGuxH**vAYiuqniflX^X}`qcBV#fX3bWkIB?_$b)Eo zHfD}lBj$)jF9ykYpE>e=W7};ZaH_}!ApGk_q_rU2j|w|HNl1%!Hq#qC)nOF0oZNfx0c!4?I~G7n0G|tcXA@2 zA+n9-h_k`QwVh*iAnMa|f%B^{i%|H2x`mS04zsw}v^{2Z;_nakl(l(Psgq?9%5am4B8yo^C8vPugdtr9OUNZr2(%LC?Is0pByR&tB)xmFqvUpf1wPND z^dP_Pu@?6gx;)vqRkCw*{@?KqB=q07?D(V$yY*l*y}8X5$w^raZA1jS-@z}CdKsBC zBL6gFh1RX$Ev|!mLcHFX+!PB1WW%OQVTr}fF_>ongrpa)+b>BY$P!9Q^^z1>j@k{y z5;V)gI+q~2ypttJE#e`pdj#d5WyTj7yu{!)8RQszios_XG#RWQ@Z;Qvql=ZkkZzd9 zlkF&5ixK+=AiTewcb^Oe`aAB_QCRwIbVMkQ!^WEgw&h@BPS@=Pb0Ue8<%CVSz2V0_ zDERN=ax=&VncQ$+Pdb^|tC-HQ?D!MD11aud1RW~O%_VZ8z=S+*V1qnv(9ehiR{_|@ zaG}2DK2rC#gMyd@6-4C=#5@>ywa8ss1+q9_PA&M4)gzik78vL4G)naiM?pt5zb2%d!b078VCOt`)GGcz2)n%yI=v_Osw@gX}B(db^}iboisKl z#@2>pckHECJRwzGm{LJaI6G&EvF+m#X=gTvDk3F(~ zpCaj`gv`Ex{+@vn`%m#Jro(>fA{;)$dl`I*#5wSOH+Jj6AC^`a9z;Gr1xk{t5WV+} zWkYv?#`&4{wjm*LLCn@KmTI&l--}$iZV;B(Sq7RFUn0&l!b@W>g!0nt*+yFHbZ8E z)z}uPu2U*9)nNwIK?Q#nb&wKVq`(53Hw9a}CS^Z*U%#Ywe-`h841G|C+ijde2y z5k``V_mxx!4GlpBN=u!9Zt~?_q%0vKiQ)Y>q3B0kHe@tFJtH2Wo<+>Uv=;UJHmK+B zW(G@Kn$$Dj_Mk^qzb(x$ZolG=;ZZ%S%PQg=#fA5#Bim={L*X~qNkyB{^dY+!Lv zG^qGy_^K2SVNGgHcX>moHM2M@xndCT0C6@GYDVr8puuM#K#Ce@(Ln3S=*GIT4eoa!p`T)Ap`-6iLVpzb4zsBN32pTy(}U@_ zV)_OWnsD_bF0p88lhE{?1uyn75TkK_i%_89(E||W3Ps!jo+x_rjT7qU@C3XL>$vR_ z`31I?Eiuf-{hL;{PKPj!zWZU8_}dJAhk28ZMhQ>xLML;NJ$* zU(qpuCdDrMIH<*hlY~aUZTg$`(TPA47c_+uh&lm_X6HDd&G3MNFe2K`nw+BL2eH*Eid$j}GMYCK_(+eS_n{ zj;7%p_T3GvubLh$rm}a~Cn4K|$usHC5xD*p-U?rT2DRM?>!1lsPh2sr1MU*Y&aIW4 z{4PJMiOo%AVwdPD;Uqgwf}^D+Cyb{X>PeWdJ+1KjJO7j--`ryJVK7dxD)3&1T#8VJu;jBi`9+ z^($e%b;Y%sghpy8fdiXJ+h@?<8Z=-Wf)TCP0F%)6!R06Hkr8v@&r6_&`pqaHQ&Gq_ zASmWo^VES&^Odfay@*qxVWI6^VTz#*GhhI0HI8{u@X)T$N&gI_Qg6Vf(2GN27qEycu@`V^SR9L}7!waXYPio& ziWJK^Uh>*^Ozd>Vd1Q0V_#!q>faUyg+SNO~P43d^dkbYl;uQb99X{Xn4q>FS)$kw;yCA4KF{@Hj9QcnI(8k6vU1G8x`+Haa}M#>(R zvPZP*b!Q8#0s710_pPZd0!9nJkB$+r=){x|24Sk|b4%#>3z+QeH8DKq^e+KPg^p%` z261{0^T20R2s6N$b9m($!N!zZ+^eoymIR+;@!>Eh{#ab%`CVItPzloY&C! zYh$xLGHt`Ju?zFm{zog8J|SY(y`il#qg@do#!dvR-LXY&4kK2E+TCPFF9(HeuwMvW zo)J^AC0J?Jy+|LI?0KB4RPq|Ji6xI7SJ?(Qt&@|#8iP?oA+Zo@ zY>g(#_hSDJ+(k78q7SIFg5R;xXc}{AGo!}vhV;t^8(TIcvJ`kXSXxHAd$16}my2F6 zr-k^#7M|r^%;%Hm&OLk1e&?t0E$ED-#+N(-Yv}GEEZt9-zNER3>l4{ z9_Rzore%VDVEAY8_minfc(uv!&dj`Ty$?yw_pGis1U-26emt2m@3(p^ob`m`IWm@d z1y#>X70E`2e`d==g?9Z36|i8Zp|=F;9p&q%p-`%E&Km zN6um42KT`%v8S1K(#ZV>*gBlfk6ijXz9{W&SsX>$*X1epT)AJz_TzMZE1r%~n(t&T z(w!d9jOyM7OBG1jw>v#Tg!BePNFVxP2V6hh1(|-Aw*xu*;Q|I_k*IppytE5$+3v;{ z?3AyNgI8~WyIjTB-vrqNgHCbM=iMWs8Xm-ov#j`h)dbn|lb?{oSb1ElPQ`|3iyIq7 zu0Ps%;S3-GD0MSoiye4Nef(2}J+pg?FLQDg!lOVm!f)r0r{Qg3!p{hL&V$T$;TW_8 zLwbU)Y`FW^8dRQqQhS^HJ>2@Em~q;e5oTFDc~Uc!ZE0|0G!z}cI374JT%>$61LfX3 zEc+V_{uu*6ozWoz0LY81hqFR9@`5`R&NDHbF?g2H?PFR&<2z% z2M-&&L!v?mil8Gl6Rd1#7bnVoToDH|00PK;7Gco0h@)fLu67`f;itjLsQ%bF-0`rc zQXnYPO%%ziaxhvEmpLgxWq)#8>}aX#HpTdYq<8nhPTY1|I33TXKbSR}P!|xA;JC zG53|KElhio0Y&#X(n`(qEUFktCv3|4+sq<*3@J}4F|VjR^RvNv0th7C5IU+f{s)VQ^O)J(_Vc`Qbe{C4=cnl#lV6x@NA zskx;j>^Ls>j}d6!KES;36e1UYuQt$8? z&s5ikdO1Tl+G|+iI5!<}oR;p4;TMM;e>wgZc5ax^2F#nUnKWd$VLEqh!>3yY_gAd+ zc|A8QXU8^(!JWIe8C%g}m^Q>;{;LV9`GLY#=YHM)_cf% z*xTdng*owr_lP&?J?icA_In2wcA$+h#iJBKR$*PmXrkvvR{ENK%>*`~TllVzc#q)@ z=ETje5yQC`{{H(==R?Z|p8j1v#d|i0vbeKrJ5B=#b1@YjWj>1EUD(^g@37P_^E>1X zr0F-D`}h`+B1`9&CG{Kd$BZ}OrEjF!HxDBZczuu&d7DPq@z(c+U&e1bFxQRmiK|}) zX?oDv)f~79d7Wdg47_R9ei83{3~i37UHWV1Q9jGUvmeK^L*B<1Ea{EUMSFww2Jkep zvIYGz(%gdfKmxTEd)ILZ!dzQ3d911SBl)ciHPbFcj7Z1$q4#T-QSMWc&yilWrL5hNve3u1v&eN? za{Xm|5phDFZvD#0X5&}0k^d8t|7*<0Im>tO{Q`;TXASvr^=bY{X75aM zKzk4QA`bzDNz)gNl~EY|fKkF>=?~E>?b&=9sUzE&^7%>hO(|(i+4Ac}1bR)j_th1Lms3|yzrbc*9 zJOr<{nD@PF=b<4roe62_V6)%*D6m69{b;j)o(CDocuCHlwNuSe83!0_felKiug&9_ zUu-Wp)r2z$VQz3p?&8zUo-Z2SOtW_t2l_(c{z&8CS$ZY53)*F+a2ncS375q{DI>>b ztP{dcXzG}Ukj{0i=OOvOp9=<84}Gc;h8raIO*0^{vD}Ng9@O=BkVo7(U>lGbSQF1M z?JEdQV%^az`Ob*@V$=-CHUBKp%G?V6SM+D6iAJc4} z19B|N0niQ`e}D4CwDPzf{~>PFGGnsrwpuwAwi3H}`kc~|w~((wYEgz-D%W?V>_qNW z7hnJ1P|z}^59{PFwR#{Yl$Oh}x`~8!83s#z{y;<7P zJSG`&?uporXj@;Bx7y(Ij~M%R2sVHKdPMi}C?e=%@PAmTFqhW>fbL0lEI{FR@w2XL ze=6+S4K~>o>co38) zu+y$o+0-4&S5U&OR-HeJyG<*SnMe{7P$Cu?IFvbtv)S;uqhW>G0`>wjMSQ)23lPJt z&bjytVU5}6z%9aSf^f0l!K>Pnb^Kxp%EAM@7ond}<6Cf>>xLt!0;X*q6H9xC`v{W# zt`a|yHYZB>Q6cHsJFr}?)Ms&^vykvmLeiZ}pq8apGEF5qM4_(g1LVO)jGX@L_drLE z;wpJziYfWPp`X-IsbR$RDTsG7Mg^kU!GLl;)y-fVgS!~?G9V^XwE0kd48|GkW{_pj z&mhG>X6zthem@K#F3-?~#}XXw?Q;}yprS-U-OFHt!6OXzGMGYuQ=rSWfDMPD3a1b% z&OLPx0}hB_}(kE;FlOlU3&W9By+^>0E7j(2>O= z7vo6NUnBOP5^MVjLDp>?jX|R^kt@c5IJ#n=l3E)q{uNej7T|9bQzS>h{ePO4K~#Kc^o;K%OAyL(WY&6DX8j6aFqaUWm~TF&(ISYTJPOke+XdnXhmke!E4cmJs9cj& zyVjjsu?bm~OVr$I#Qlw~3y)sdw|D8W3;UN( zq3Ctg%ya9sp!V6vG@}rZHx@V>G zpzb*8-p@17;tQ}r5|YaAk}sl`Dcj>)s-)P_ejG>pRe5sqY#Z#uIv98rT$lF zjc20xC=r9m#OBt3`=-@vghGJ#?g1Y@rf z9M=(G7n9hAkaFTT%@@Fvv?T-Q4cLu5IcezqPqZ5mKZ(gg5310X7=??}`hxQ0Sw1|b zxUG_dAzBNlZJbT3Jql5y3;fHiMB0o(4zPp`6H_484vw-E&H^P!S5DZXXbfyRrD6aB zk0YLk%@OwI;%rr{n_xd+hy()-0tFfa^GS|TzT5K?}D!2Tl${Jx{1k1MTNeXoH!Jn6BO2aiA9LhAuIcY69-N{f<-4R$A&!=4lxI(4(E)W zWm%X}Qr?e7%e(YEC8O)hfM{DvS=Dp2)t*)v-|Q&kOIX^{BB-Ckk_Gnw9t8Do8tQwP z!#MX+QXrg1i!(@p-#1CYLF!0>+S`HaTuIUHC$R(4!6|K$0vV;(&vU3y+B-<^K5x8f zTzUu3a6*|k$n$Yf#ygNsa%nWEQe7N^G{|%tvLp{Pu0G3F(8?@$_LH(e>t|nLdz`-3 zv%iT&+gQ}kevK`o9a`|-HzD~ApM8sMgt8&Le}`@JM%K1C1C(j-E<0EtKnT#DqH0QJ zznl>W^&7%4MOa(+BCJw8V(w}=n4^Vi@E z=c-IiI6%`_GwdWHoUqBuJpLONGcfmHg$m1Yc!+7oyyJ?*ixbu*TSiYxkUTN#8f)8JV-UwFIPN5VQ?N-q;NV0C#M6jBN$5bl3-L3A2E?JtS1;<(#~AlC?$To` zq?PnzTRJr%A_@^(2w!_dc$5EtHsNrgqMr`?h#QY#d&Z?&slNu8p?4YfX|4-IhK|xL zr*tk>dUPiYMQ3N}4m5!ScuESa^>}AK`20gRV%yxUaI!)~0s@R+ zxr0#2iwGOAmFOZL{u2B4EMsjkjR2IaeN-X-Q-Tp&X!ND~$%s8jr89mfbqJcV2h3LV z0z(%Yz*91i(F-TG#lZZ*h)9%^JiIhEJu{^P9AAe?7>JBPhVen(jx!+lrNt#8cMu80 z9;S)>qb&}ZWEuzA-)N?)4JY<9#{mYE8>z<_2rn_V(Sl8^f?aKK_AzE8=h4}(5f^zM zXO6=R9%pcb!4nLgM3CFQvG}D*-J?@f+0W>AIN;O8YCAQFt79zbID-=mPBNg}M~;Y> z6XVor-hPq669nDA!Qd0T{Ra&Gj6s}fNe0~v=tM)d0m>#gKci<84?7mi6TdGIlm+{6 zwE5mEe2LZ+qK6jgf%ZI3!cVK#Wty}DP8ghWsR8f^0oLV52qy(={T}N#Cv9+66m9J8 w=GJ@|+{K1|$T%}_ZlDMM1_$;HEDt<25FZ+jn?2d|$1>xY?U~EzE$QL^3l06c0{{R3 delta 12914 zcmaia3v^W1dG6WgoOx4Hhg~PlT74wxBeyxX?RDI=jeF~+kG0xc-`n_p z|2}#EhE-{ufA-_wuYdphf9?IBUswP7mn@tpE)FXA8}z+z?0Mpua6OAWv+lQcFvU;| zPkL>pDb|#U#-f?#SaW7wY+Yu3Y<;FB)*|b@>DJ7K*oI77tS!?XYnNrdv?yWK1%G-= zW@~J#EDEHzWwyt*XLiJPWOl}OW_HDPWjbS>nccD7nLV*RnXXtDQzk+fkfpn0-N%(| zjjotI*V*_9rqI8^V%;Z|tlFmRSB#)hbXhTqOy9`fOImCnNW$oC>2HcAR8 zC^36cupb4bMj7&DGJikvfwJ<4_BI^$7+y@og9-TzKT=*JU<5I) z&nPlN`1Knri9)CeV5((CIZ{ERK@x$*imobyOcKB!3tzjx zMZQz4RSfyI`eh!KvoHDDSbZ0k^?vK#?Wgx*S$$@LXH(gs)8n~x?))b4xBis_5g=Gz z0Hr9%YS=_scfv9s$Qfq3buh;-B)HL=^icKOAEoK*!DrP2NCh@tV#)60MLPOpO;P^&)ZZMTLoxu5&HI_)HCQQed z7#qr%S)0@BUC>Rbod6r~x$`a7S@5>`j&jpTCGDgRl=(kenGc_a&w3tUNEuqEsLGRU zQqLO1%*fjOxMW4R&3como^I_;S3>X?Owf(^Nj07ZxW_YgYOgUFNrGP@-#HWZt z-SZTBWa9zMQZD+Y>Zq-44jZ|ty0l{Bi^On(JJTu4PUbSBoezpHlx(eW{8r+;88=e= zX3+6uXbT-S$hV25rL8B4Y>sY^kD5{MBK3JMjkTZZRKs>y#_hkKN+rejA+-meRSlqo zf}nVit!Fw5Ge293-|AUbR?b+Xs4QRAPS{cPmhSJ^zrCnGfzOgVq*kl5XUl(!EpV)e zR4$p_T^V5PfH+Wfxo`}qd>eqH(^4`0;oPX4vZK=pj}a2F9X~@=Ea`X#)45U0VOV_JUA&w#E#Av%t{)Res*l%v4=G~u z8O3@NTg;}_8DgE6#T(U6FDcKf)BYI<#=JUGoLA%FbLGIP5RWbSkD~?}Mk^hE%8Cz; zS*F2hQH~G0GnO_jxjJ5sB3?m&sOle0yv5t72CL4V=i}m!YU+;C z`Qw!Y+5uu=s*2m7g*BS9Oum9@I6zc$mrfJma{ae&ilfsg7f@@h68mdc)&*Heoe19| z1yIcgQJ+-BPdh4RpRaXTJN0o><9+yX*jb02zk_y8VCRsTH7}0xPC9h_uz00zut0kW z{-9R;62MKy$0@x)a()@v7pUkkK-BAa?6FbE1~r>?{Bf{$(vHU+Ps%nk7PkrS5yEp> z&hgsA+_Z+>?w+7p$-@&!J&Vs0*Xob3Gve*~M)C3D`q`DsTG%ck3p&gUSrbvlHz60* zU4mUhnoFuZMXB4VCQ18}_~i0+K^?VHy7Y&U)_&k6%5!4Qsec=qYY4aSa% zb*p;h_VDeflBXhJJ3-4%*d$SsHs46)CFxwkh~HA$h*C)lM@ySoIjg8E-5uWrGc`19 zTP_VH+iOY7xPy>5+UW9k(2XVDTNPpbqPXEy^=~XJzX$Sd%r+zGoMq9ji5m@959Dbs z;Z!zd$CDh)z*D&_e~G3-r(%xd9nV0*P7XUBQVZ4{!}1+K{C!27ZS3{Rjp-LZZ`{nH zBC@)+K`N&^_In2SKf!0I016YQSJ$o9dV71LDn51;yhBGMs`Vb_zd{Kr>W`N30rBGM zwFR1%)LxuKB;QMbgbObaAO@90Cd( zs>5e31@JRn_m>3Af;E1xzcyGGEDi>WLgm4dV3=sg4Z${`a=Ug)y+k@u$A~AUYUbpo?VI)+<)kbP+Kh^R z@h45~fiKYd=m3fz)m1&aXZhxZErC9iG9*SA3#F14P>a$AAhr1SRDD?VuUji#h*lQp zAiax8DM@?_f0U*pO!C)S17Istip^}SAIG>JdR@yaGq4V7F}3*|9p!#bO(88vs_~Ts zmJyIF$fTBmV3HD7GE{9B)zW@B{+;PWX23{v{tFcy7KfU5i1(U18|0!$J;C;5b1W-2 z_z=qe)y3^!*H(C!Mt(N};3(0h{Ao%+Jjw$RcjqIIamjchla5PK6Q^V5=v3fnL#8cN zrF(LVMiU%*Mxt?IZ&$dSqSQI++LKLWpsT(@1te_>i>rlz&T$+vuWe?n9CthuCeK;? z8DPZ%1F3{1C*^pL%4*($@;Up%(UOBhT<-Df)G=+eq#Y-|#u=8B`#P0M@<}>;3)#1o zM(osjq!b9S8m)$vvwEcJVULt*LF9DUCSmd7`ffHYe!9MmoeVmd!~c=0I}Y=^{6Y#duA9oJ~UmN_ZYqz)c_x0xBU|*#F?^v;-6b817F3+ z{g~i4M7XuK@S8Hn9CgT&4B^V_F#j6L9c>~t$~RL=A50^897#zWN6REG%8ip^0f*c$ ztz;r?#z|U9W5y}LxlfJ8=WJn=z9qX%ChTa?r3qN&qu6<9ueb%fnM-PYl*V|FP*p1h zZGi70zVVbK>@DhGFK|A<=h}8ES#*#cpx+u+DpqcYvTulwZ1~s0H>f|80C63s^+t85 zqtSHAmaCNW;y7lyI@=?@9zKLlvWdIp7AP?pDGcoqayleDCl<=_urAcLn=tS zq7FUduWV&J!3--Gx?%m`%A)-y#Gf?)evz?74gEx6I=Kd z6hQCLN#LI)(6s<$gBfHSr~#d6g-tAzHikI2t*-ES!jzm#nu`_^E#d=I_&EZv5O|dU z9XP43oDgaKR5D}22sb$G6sH+U9mQ`_YH@fv!S1BV+@K}M9R+u(;T55<>SLjxzlgsE zZPYKmzom=)rf5I7OYGlz|3f!*{(WkFyI|;t#3v~y=W}z;eY^|aoh(k%Zx&zK+SfKu ztyb#gUesz`q?Q=kwmSCLgzS>(rtjYLAwu7q1n;1BEp_ZgLXtq|?_lMX!r7L_%J*;o zr^3%r)gKKkif;aGlsH-ZDe4{8V!&CmvqCq|3wKr6!V2oOJF{{&!R$ z$CF@@AAXLC4(elp7y<4@Plm(6vwiJfaMhb}cyQ?-(LmcbVRChP2M%ILhp4=5} zkt;gVvtf_qG_tMb6rC`seIuS6PNfYF<6hc1rKoni=~Nb>Bqx*`v*Uwi!XAU)#izs@ zJ>~;)CI1X8@TZ*J>(oN0zNAWr#eg7$b&H6T>m*V;CFH(=Y2)Y@R=*V!*50wg`+Mh| z0DIVsNLx5Hn1ULF)i3>m$be~IFwJaaE&zmbxKvpqV)HS04v7IXZMA|R>oCSWIXg$B zrTtOViB&yhaL)}Q&g`o_`H$3ePGAP_EeU9k_)iIto%vtL#`l2NEq<`CioHiCpqorK zQ;9J{iK#|d2L32`7>H>ybg0C1qeKP|DV$g;jdO$(uflqb`Cy`yIhEqEzRI%qi@9yU zz-vjj%0ol4b?beulf!>TbNd?t_c}}G7wEvA?K|n=u#6P(t9=pny7+2eYcm2@3a2fd zbGo@K=%OAapna6<9v3AK{6#?-Q}{H3A;G+s_Zp#VYF5pwlPXGz`0IH!3oFbhu2oKZ zCVi9s$-rcAvM66hsd)?2s&w3spzoy;-QVh|5GA4 z9)pg6JhqN^U?6wVv|#Xi4fA{|X~Ll(H14Nn)SVL;vg1QMH#Q0^{xa2(@RE`iG2v`H zJC=#N)|e^#=RXk#4?iruCAV8UksQWs`>569F3Ju{-#*Wm+n!RJv-#+=Tqhd*NXKc{sbo^JH&rXO@2T~fw8Q0Zp<`co=S3g&y!5F zoB(_S*J+4`U29e@?bp$!h)+{Xb-?05@u?%b(qt3zA_5@-U8u!+B(3E_B-2rP3 zHUAet!iP`8XOYT99N7&cQ^*%rB8GZJt#!l6zPu0NWEJ7$BM2uWEyKw>5l%+BR>@Yv zy=HK)HA9Cxjk1N{vLS=ZJ{{3wulRhZ?#Rbw92wrTj3d{`II<6MWFI2U^Ks-FIROkl z>CgKm6MJRHez)Vien~}qngoC^@eUK|z+>Vc1K$#p%Zd?93E$ThA%{7V$|loe2KFv8 zWJe@VN)hTx&j{(@2w4{7Mw(o#tXWCXe661!5dFuyTMmLEES8CBC+`6#}GHLUr)!3M61H_UIZkg{1?Uw5%{-eBUT z!4Y@w#DU*wB&!i&T+BB23pRr~UXnOaNl(TSLwJNo@3^ z4{46jp`AA^WwSfFTHF}vPf1$#pv`xg*f6}d?+&`WiW-+VgmZ6_)M7T@!na@-hIC## z`||K7^};96&?!!3MswVzAO+&)Zq(N!e|SADgq$=-%cTuR%R(A;(O?qL{G2jKXf#jo z@wpDH9|7O0N1d`nhgU*kLRCvyx$4&-9$4s?1o=u}iAqscM0PSQ!1@mwd0GhFUngNhqek3%{bMLM+eC9u$O9KUwOI^71lfc{V7!^P@l zU9r8_mGOUI$|SHRMxb+iM(C`{pR@gTz$nfuH*kUa0>&VH^tfsVr=bRq%HmQ&Ms?ZT zZ}|&8k`s1_@)TwJ@RYn5*eIIR9>v|9``f0pAqCk`G5c59ojQxp)>+-G5G{7<#?lby z>2WxHR_mCZf(>QlE@VjnSbP;I!=s>xq>;-xFCyzyJRoONydjiP(AAZRlSvv@j*SP^7UwSv^S}v@A6 zCMxp?;UiKMu&YL@@h!nugRd6fQoGKsAEX<4*uwXYp6$I@@poVLa zx}*{}U1#Sy5u_}hUX2eyS)=qa1UzYx8T?PLnNeyLH;`+um3AauE8_~1FfOs{%=+>{ z+_&S3d7<46klZY-_-8n{!sl#|ZECedZ*j_Ngd8 zEGowBX0kasxsEqk%#UcInHhi}kZp)9WQ3>JMgP+H)vkFYh2?jtkVxZRKT;!NNe|G$c*>}v*CG-&7?C1}r`i{x?svBPY3bpee8?_P{T!Ha6nuN5(3i9NT zgUSD$aC`{o(tV-hP4FQKD^ipZv4eCPL7nFbO`2@fpSyAF0&Wr!A>g~INN%s(fD4p; zoIn?W`)EX>ty78aAwg|<-r@su7dxv!ZL0(Iisr@3fe=iqdZfHKaJX2~nl(C%5qy=* zuOYgql_R|rF}*g%_oDCH4j&yn`lOV7UwrS8&zwb1G);KPrJ zKh5uH?!w@(I~wrAA0t2oE+86k3IiO*iouSZlkEk$=^LmyZMxi@{^+uMa}H@Sm);b3 z;hPC;A&{qvJD5p!iN`wb^``5{6C3euRJI);$?+rzS4sqS5vZc-l?3EIQ=CRl3TDze zO5e@`yobu}w*6hyg#_KA_0f9oLE7YqIP_@ky2Z`p`_McIIl1sxgVYjyRA&89pMCbx z3Z`jiUK6hteo-hHX|~td>+P1os`JWql~2JEhARh|25VSVCMzey`EcHIRzuxhyVc%+ z9E{d{pi(hbUQ$t8nGX-Cz>m0A_RI9(Lbn_2Mtilr#$IbT*-_bl0R3zHYq)-DGgjqG zXJBa=k+#Xw>nz)b2JK@s{BO9!4?);bN2serU5CAKP?gOU@z!IT3LAkt-&MnWSCggL zuK~AYWtZ+1gt=!}_y+caorDsS!D= zV^MEo0XyM?;5gtB6eel!%9py=cuy%=?-bmRrwYm>&e#-g_wYU`O0LaxCmvmu%&3z~ z?9F&;<+-fntEYFDQ+j;Ql$I~aFVV22`RaHVzHWRydG)%^vo`KorZl7UhHjVV zOVDfi4Lx5$`6+#5kFoxSZdB)&7&TN+I&Qe-^KJ=7g_vVW)!4HGR zYn|A7h%Uh*33+Hb$ zpDZ7tVy&~iji(89sW|mSZTkTv9sfWgd3GR|H96f4Ola9`tBg}gE&Q&Zu*tmVe+GaH zlcnOVC+g(7+=;oXa-;3H&-8{n(>VvNNmfcAmv$I~-Lf>&)i1aWhOQo8_g^E=NaXoB zX;XDOh}N>lIcJ7nL;=!LNG;k2-GoMT|LlAZ{Hx;RR8I@FN4Ud-Xo#7~_4)asqWhs%E!(vP-k_OW1#tAU=D2l>Ju4-5Icj1> zhB=sk9UX^d=(NUfdr#=xVkX+{_1eO!Hj}Wv>D(iStFvGabbt)!jiq}zHQ8&H2=+E&? zhr!FJ*e0;3p_WnhB!LKl|3ly$faBxlXgZNJogl8H5irJTEncvwUT%xL+9adO@tPMA zJThEU$BUb1+ccsz`~VSC2K^0c^boSK5KJNju@l)W!oboPjcK$UA3-HTaptIvHzF5O z1|AQ&M^g5mPK?vwrxfKhB2mc>F2{a=`hS2BSR$ro(lBwPVk_o1=E{k_RxusCeYCiC*6n>~gP^uo56nUTJcw<+Ouob*f=7nQJ z&vj}L$g08ba(wj&Z?z#;0~{~g&YDpc#QklU$-9Omrn>M)8%gU@;ufZ|nYWs=z} zuZUm#%8})HymK({BGtx2CAbmVq-F;YVJbGl*VNI+#kp&>RV6zWw-z_!r91JS5rVT! zymW1H_aTfG^Tu`i7i~I~ zwM5y|b?nbY%hRh@H&UC20i3{vlsz1m*9h>o6@HvRGgYq@7oJ{m<{Ztz59c4xlgvyk zG?qpD9@iJ6c*4s^3Hff5_%=G2rJbOYp~!$?C1{L*A@&1yE2th~{M+K+pMIxIzDYWY zD~Q3dY?3Y?#NS_!*T~Wf!>QyjJ>)XXY!0j#85o~UJhOuJl}VytVYIGv=Hi*C>mHGu zM-=G<&Fdt96YhcqpBjKO<+{kU9LH!{o%Viy&56Bt9;cWDuH zfRzxj&t}HQM44E|en)H&M_GrsES|yJs_IX5!L@z|R#EUF<+^h29UHG2?P5Djk6qxB zzHZ|wFAQ#k1Q1j#ftkXr0Rt>CQc7t)X!sF02rbk-YT!Y|T;1bF0Ck91%(Z(ChWC8k zi!`3$qx@3Y(d!1TEbOxBazmbc|DjF32Ut3-vwI@UA{Hs_z!U+Dk8=`S1IDc1Uav4*{V~8Z506xXq>A9WbvrPHW z*wN|HBt8H3q;nTc4!!ezqU@sHS)exh9~|ni6V}!e(~L3JIl6+_Rs8W znxr{BN`OY=6qTOPTQC+No;^%>T4=bjFNo>Se6|qD@yJ@ruWgN1+;RiF$Qyxu7n2ZE z^vG=rgE?L|#=@nZd4%SKAyoPI3D9I94Oo~9HXWi9-}FCQt~dV zm5$2)poQ)Mh7$G{3*U2%OM_Y|n5+iUX8sE?T|-!9vpD=*rT?QYV~>i=b4yC!LOaJV zgEp;MtU!F~xi1}e%03ibavkmlV7rqfP03}XXf5?n^PvW<$zd~j){@~PHx|__{^3S_ z`#Ng4ovctIoqbYiVdRY`G!Cn;kE;aS8psY@Eo-&Iv}Ht?uooablOT- zuMv2Mz|RT1M*x;K?t9&h>BD1X8B%cQRjv(Hl}Th1c%$qF%Vczc9AN22xfjDEhvYeS zJwv05@{6DwS-cu^qo19$sTS>+6i*0VhSZPi5r`{t!kaV|FwBBlc|qwZJy=>)T3Xss Xda%4gXQ3v4Q?Ln70{i{t{__71;D5)| diff --git a/ultralytics/nn/autobackend.py b/ultralytics/nn/autobackend.py index 9010815..abd255c 100644 --- a/ultralytics/nn/autobackend.py +++ b/ultralytics/nn/autobackend.py @@ -7,7 +7,6 @@ import platform import zipfile from collections import OrderedDict, namedtuple from pathlib import Path -from urllib.parse import urlparse import cv2 import numpy as np @@ -21,7 +20,11 @@ from ultralytics.utils.downloads import attempt_download_asset, is_url def check_class_names(names): - """Check class names. Map imagenet class codes to human-readable names if required. Convert lists to dicts.""" + """ + Check class names. + + Map imagenet class codes to human-readable names if required. Convert lists to dicts. + """ if isinstance(names, list): # names is a list names = dict(enumerate(names)) # convert to dict if isinstance(names, dict): @@ -29,44 +32,39 @@ def check_class_names(names): names = {int(k): str(v) for k, v in names.items()} n = len(names) if max(names.keys()) >= n: - raise KeyError(f'{n}-class dataset requires class indices 0-{n - 1}, but you have invalid class indices ' - f'{min(names.keys())}-{max(names.keys())} defined in your dataset YAML.') - if isinstance(names[0], str) and names[0].startswith('n0'): # imagenet class codes, i.e. 'n01440764' - map = yaml_load(ROOT / 'cfg/datasets/ImageNet.yaml')['map'] # human-readable names - names = {k: map[v] for k, v in names.items()} + raise KeyError( + f"{n}-class dataset requires class indices 0-{n - 1}, but you have invalid class indices " + f"{min(names.keys())}-{max(names.keys())} defined in your dataset YAML." + ) + if isinstance(names[0], str) and names[0].startswith("n0"): # imagenet class codes, i.e. 'n01440764' + names_map = yaml_load(ROOT / "cfg/datasets/ImageNet.yaml")["map"] # human-readable names + names = {k: names_map[v] for k, v in names.items()} return names +def default_class_names(data=None): + """Applies default class names to an input YAML file or returns numerical class names.""" + if data: + with contextlib.suppress(Exception): + return yaml_load(check_yaml(data))["names"] + return {i: f"class{i}" for i in range(999)} # return default if above errors + + class AutoBackend(nn.Module): + """ + Handles dynamic backend selection for running inference using Ultralytics YOLO models. - def __init__(self, - weights='yolov8n.pt', - device=torch.device('cpu'), - dnn=False, - data=None, - fp16=False, - fuse=True, - verbose=True): - """ - MultiBackend class for python inference on various platforms using Ultralytics YOLO. + The AutoBackend class is designed to provide an abstraction layer for various inference engines. It supports a wide + range of formats, each with specific naming conventions as outlined below: - Args: - weights (str): The path to the weights file. Default: 'yolov8n.pt' - device (torch.device): The device to run the model on. - dnn (bool): Use OpenCV DNN module for inference if True, defaults to False. - data (str | Path | optional): Additional data.yaml file for class names. - fp16 (bool): If True, use half precision. Default: False - fuse (bool): Whether to fuse the model or not. Default: True - verbose (bool): Whether to run in verbose mode or not. Default: True - - Supported formats and their naming conventions: - | Format | Suffix | + Supported Formats and Naming Conventions: + | Format | File Suffix | |-----------------------|------------------| | PyTorch | *.pt | | TorchScript | *.torchscript | | ONNX Runtime | *.onnx | - | ONNX OpenCV DNN | *.onnx dnn=True | - | OpenVINO | *.xml | + | ONNX OpenCV DNN | *.onnx (dnn=True)| + | OpenVINO | *openvino_model/ | | CoreML | *.mlpackage | | TensorRT | *.engine | | TensorFlow SavedModel | *_saved_model | @@ -74,103 +72,166 @@ class AutoBackend(nn.Module): | TensorFlow Lite | *.tflite | | TensorFlow Edge TPU | *_edgetpu.tflite | | PaddlePaddle | *_paddle_model | - | ncnn | *_ncnn_model | + | NCNN | *_ncnn_model | + + This class offers dynamic backend switching capabilities based on the input model format, making it easier to deploy + models across various platforms. + """ + + @torch.no_grad() + def __init__( + self, + weights="yolov8n.pt", + device=torch.device("cpu"), + dnn=False, + data=None, + fp16=False, + batch=1, + fuse=True, + verbose=True, + ): + """ + Initialize the AutoBackend for inference. + + Args: + weights (str): Path to the model weights file. Defaults to 'yolov8n.pt'. + device (torch.device): Device to run the model on. Defaults to CPU. + dnn (bool): Use OpenCV DNN module for ONNX inference. Defaults to False. + data (str | Path | optional): Path to the additional data.yaml file containing class names. Optional. + fp16 (bool): Enable half-precision inference. Supported only on specific backends. Defaults to False. + batch (int): Batch-size to assume for inference. + fuse (bool): Fuse Conv2D + BatchNorm layers for optimization. Defaults to True. + verbose (bool): Enable verbose logging. Defaults to True. """ super().__init__() w = str(weights[0] if isinstance(weights, list) else weights) nn_module = isinstance(weights, torch.nn.Module) - pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle, ncnn, triton = \ - self._model_type(w) + ( + pt, + jit, + onnx, + xml, + engine, + coreml, + saved_model, + pb, + tflite, + edgetpu, + tfjs, + paddle, + ncnn, + triton, + ) = self._model_type(w) fp16 &= pt or jit or onnx or xml or engine or nn_module or triton # FP16 nhwc = coreml or saved_model or pb or tflite or edgetpu # BHWC formats (vs torch BCWH) stride = 32 # default stride model, metadata = None, None # Set device - cuda = torch.cuda.is_available() and device.type != 'cpu' # use CUDA - if cuda and not any([nn_module, pt, jit, engine]): # GPU dataloader formats - device = torch.device('cpu') + cuda = torch.cuda.is_available() and device.type != "cpu" # use CUDA + if cuda and not any([nn_module, pt, jit, engine, onnx]): # GPU dataloader formats + device = torch.device("cpu") cuda = False # Download if not local if not (pt or triton or nn_module): w = attempt_download_asset(w) - # Load model - if nn_module: # in-memory PyTorch model + # In-memory PyTorch model + if nn_module: model = weights.to(device) model = model.fuse(verbose=verbose) if fuse else model - if hasattr(model, 'kpt_shape'): + if hasattr(model, "kpt_shape"): kpt_shape = model.kpt_shape # pose-only stride = max(int(model.stride.max()), 32) # model stride - names = model.module.names if hasattr(model, 'module') else model.names # get class names + names = model.module.names if hasattr(model, "module") else model.names # get class names model.half() if fp16 else model.float() self.model = model # explicitly assign for to(), cpu(), cuda(), half() pt = True - elif pt: # PyTorch + + # PyTorch + elif pt: from ultralytics.nn.tasks import attempt_load_weights - model = attempt_load_weights(weights if isinstance(weights, list) else w, - device=device, - inplace=True, - fuse=fuse) - if hasattr(model, 'kpt_shape'): + + model = attempt_load_weights( + weights if isinstance(weights, list) else w, device=device, inplace=True, fuse=fuse + ) + if hasattr(model, "kpt_shape"): kpt_shape = model.kpt_shape # pose-only stride = max(int(model.stride.max()), 32) # model stride - names = model.module.names if hasattr(model, 'module') else model.names # get class names + names = model.module.names if hasattr(model, "module") else model.names # get class names model.half() if fp16 else model.float() self.model = model # explicitly assign for to(), cpu(), cuda(), half() - elif jit: # TorchScript - LOGGER.info(f'Loading {w} for TorchScript inference...') - extra_files = {'config.txt': ''} # model metadata + + # TorchScript + elif jit: + LOGGER.info(f"Loading {w} for TorchScript inference...") + extra_files = {"config.txt": ""} # model metadata model = torch.jit.load(w, _extra_files=extra_files, map_location=device) model.half() if fp16 else model.float() - if extra_files['config.txt']: # load metadata dict - metadata = json.loads(extra_files['config.txt'], object_hook=lambda x: dict(x.items())) - elif dnn: # ONNX OpenCV DNN - LOGGER.info(f'Loading {w} for ONNX OpenCV DNN inference...') - check_requirements('opencv-python>=4.5.4') + if extra_files["config.txt"]: # load metadata dict + metadata = json.loads(extra_files["config.txt"], object_hook=lambda x: dict(x.items())) + + # ONNX OpenCV DNN + elif dnn: + LOGGER.info(f"Loading {w} for ONNX OpenCV DNN inference...") + check_requirements("opencv-python>=4.5.4") net = cv2.dnn.readNetFromONNX(w) - elif onnx: # ONNX Runtime - LOGGER.info(f'Loading {w} for ONNX Runtime inference...') - check_requirements(('onnx', 'onnxruntime-gpu' if cuda else 'onnxruntime')) + + # ONNX Runtime + elif onnx: + LOGGER.info(f"Loading {w} for ONNX Runtime inference...") + check_requirements(("onnx", "onnxruntime-gpu" if cuda else "onnxruntime")) import onnxruntime - providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] if cuda else ['CPUExecutionProvider'] + + providers = ["CUDAExecutionProvider", "CPUExecutionProvider"] if cuda else ["CPUExecutionProvider"] session = onnxruntime.InferenceSession(w, providers=providers) output_names = [x.name for x in session.get_outputs()] - metadata = session.get_modelmeta().custom_metadata_map # metadata - elif xml: # OpenVINO - LOGGER.info(f'Loading {w} for OpenVINO inference...') - check_requirements('openvino>=2023.0') # requires openvino-dev: https://pypi.org/project/openvino-dev/ - from openvino.runtime import Core, Layout, get_batch # noqa - core = Core() + metadata = session.get_modelmeta().custom_metadata_map + + # OpenVINO + elif xml: + LOGGER.info(f"Loading {w} for OpenVINO inference...") + check_requirements("openvino>=2024.0.0") + import openvino as ov + + core = ov.Core() w = Path(w) if not w.is_file(): # if not *.xml - w = next(w.glob('*.xml')) # get *.xml file from *_openvino_model dir - ov_model = core.read_model(model=str(w), weights=w.with_suffix('.bin')) + w = next(w.glob("*.xml")) # get *.xml file from *_openvino_model dir + ov_model = core.read_model(model=str(w), weights=w.with_suffix(".bin")) if ov_model.get_parameters()[0].get_layout().empty: - ov_model.get_parameters()[0].set_layout(Layout('NCHW')) - batch_dim = get_batch(ov_model) - if batch_dim.is_static: - batch_size = batch_dim.get_length() - ov_compiled_model = core.compile_model(ov_model, device_name='AUTO') # AUTO selects best available device - metadata = w.parent / 'metadata.yaml' - elif engine: # TensorRT - LOGGER.info(f'Loading {w} for TensorRT inference...') + ov_model.get_parameters()[0].set_layout(ov.Layout("NCHW")) + + # OpenVINO inference modes are 'LATENCY', 'THROUGHPUT' (not recommended), or 'CUMULATIVE_THROUGHPUT' + inference_mode = "CUMULATIVE_THROUGHPUT" if batch > 1 else "LATENCY" + LOGGER.info(f"Using OpenVINO {inference_mode} mode for batch={batch} inference...") + ov_compiled_model = core.compile_model( + ov_model, + device_name="AUTO", # AUTO selects best available device, do not modify + config={"PERFORMANCE_HINT": inference_mode}, + ) + input_name = ov_compiled_model.input().get_any_name() + metadata = w.parent / "metadata.yaml" + + # TensorRT + elif engine: + LOGGER.info(f"Loading {w} for TensorRT inference...") try: import tensorrt as trt # noqa https://developer.nvidia.com/nvidia-tensorrt-download except ImportError: if LINUX: - check_requirements('nvidia-tensorrt', cmds='-U --index-url https://pypi.ngc.nvidia.com') + check_requirements("nvidia-tensorrt", cmds="-U --index-url https://pypi.ngc.nvidia.com") import tensorrt as trt # noqa - check_version(trt.__version__, '7.0.0', hard=True) # require tensorrt>=7.0.0 - if device.type == 'cpu': - device = torch.device('cuda:0') - Binding = namedtuple('Binding', ('name', 'dtype', 'shape', 'data', 'ptr')) + check_version(trt.__version__, "7.0.0", hard=True) # require tensorrt>=7.0.0 + if device.type == "cpu": + device = torch.device("cuda:0") + Binding = namedtuple("Binding", ("name", "dtype", "shape", "data", "ptr")) logger = trt.Logger(trt.Logger.INFO) # Read file - with open(w, 'rb') as f, trt.Runtime(logger) as runtime: - meta_len = int.from_bytes(f.read(4), byteorder='little') # read metadata length - metadata = json.loads(f.read(meta_len).decode('utf-8')) # read metadata + with open(w, "rb") as f, trt.Runtime(logger) as runtime: + meta_len = int.from_bytes(f.read(4), byteorder="little") # read metadata length + metadata = json.loads(f.read(meta_len).decode("utf-8")) # read metadata model = runtime.deserialize_cuda_engine(f.read()) # read engine context = model.create_execution_context() bindings = OrderedDict() @@ -192,126 +253,152 @@ class AutoBackend(nn.Module): im = torch.from_numpy(np.empty(shape, dtype=dtype)).to(device) bindings[name] = Binding(name, dtype, shape, im, int(im.data_ptr())) binding_addrs = OrderedDict((n, d.ptr) for n, d in bindings.items()) - batch_size = bindings['images'].shape[0] # if dynamic, this is instead max batch size - elif coreml: # CoreML - LOGGER.info(f'Loading {w} for CoreML inference...') + batch_size = bindings["images"].shape[0] # if dynamic, this is instead max batch size + + # CoreML + elif coreml: + LOGGER.info(f"Loading {w} for CoreML inference...") import coremltools as ct + model = ct.models.MLModel(w) metadata = dict(model.user_defined_metadata) - elif saved_model: # TF SavedModel - LOGGER.info(f'Loading {w} for TensorFlow SavedModel inference...') + + # TF SavedModel + elif saved_model: + LOGGER.info(f"Loading {w} for TensorFlow SavedModel inference...") import tensorflow as tf + keras = False # assume TF1 saved_model model = tf.keras.models.load_model(w) if keras else tf.saved_model.load(w) - metadata = Path(w) / 'metadata.yaml' - elif pb: # GraphDef https://www.tensorflow.org/guide/migrate#a_graphpb_or_graphpbtxt - LOGGER.info(f'Loading {w} for TensorFlow GraphDef inference...') + metadata = Path(w) / "metadata.yaml" + + # TF GraphDef + elif pb: # https://www.tensorflow.org/guide/migrate#a_graphpb_or_graphpbtxt + LOGGER.info(f"Loading {w} for TensorFlow GraphDef inference...") import tensorflow as tf from ultralytics.engine.exporter import gd_outputs def wrap_frozen_graph(gd, inputs, outputs): """Wrap frozen graphs for deployment.""" - x = tf.compat.v1.wrap_function(lambda: tf.compat.v1.import_graph_def(gd, name=''), []) # wrapped + x = tf.compat.v1.wrap_function(lambda: tf.compat.v1.import_graph_def(gd, name=""), []) # wrapped ge = x.graph.as_graph_element return x.prune(tf.nest.map_structure(ge, inputs), tf.nest.map_structure(ge, outputs)) gd = tf.Graph().as_graph_def() # TF GraphDef - with open(w, 'rb') as f: + with open(w, "rb") as f: gd.ParseFromString(f.read()) - frozen_func = wrap_frozen_graph(gd, inputs='x:0', outputs=gd_outputs(gd)) + frozen_func = wrap_frozen_graph(gd, inputs="x:0", outputs=gd_outputs(gd)) + + # TFLite or TFLite Edge TPU elif tflite or edgetpu: # https://www.tensorflow.org/lite/guide/python#install_tensorflow_lite_for_python try: # https://coral.ai/docs/edgetpu/tflite-python/#update-existing-tf-lite-code-for-the-edge-tpu from tflite_runtime.interpreter import Interpreter, load_delegate except ImportError: import tensorflow as tf + Interpreter, load_delegate = tf.lite.Interpreter, tf.lite.experimental.load_delegate if edgetpu: # TF Edge TPU https://coral.ai/software/#edgetpu-runtime - LOGGER.info(f'Loading {w} for TensorFlow Lite Edge TPU inference...') - delegate = { - 'Linux': 'libedgetpu.so.1', - 'Darwin': 'libedgetpu.1.dylib', - 'Windows': 'edgetpu.dll'}[platform.system()] + LOGGER.info(f"Loading {w} for TensorFlow Lite Edge TPU inference...") + delegate = {"Linux": "libedgetpu.so.1", "Darwin": "libedgetpu.1.dylib", "Windows": "edgetpu.dll"}[ + platform.system() + ] interpreter = Interpreter(model_path=w, experimental_delegates=[load_delegate(delegate)]) else: # TFLite - LOGGER.info(f'Loading {w} for TensorFlow Lite inference...') + LOGGER.info(f"Loading {w} for TensorFlow Lite inference...") interpreter = Interpreter(model_path=w) # load TFLite model interpreter.allocate_tensors() # allocate input_details = interpreter.get_input_details() # inputs output_details = interpreter.get_output_details() # outputs # Load metadata with contextlib.suppress(zipfile.BadZipFile): - with zipfile.ZipFile(w, 'r') as model: + with zipfile.ZipFile(w, "r") as model: meta_file = model.namelist()[0] - metadata = ast.literal_eval(model.read(meta_file).decode('utf-8')) - elif tfjs: # TF.js - raise NotImplementedError('YOLOv8 TF.js inference is not currently supported.') - elif paddle: # PaddlePaddle - LOGGER.info(f'Loading {w} for PaddlePaddle inference...') - check_requirements('paddlepaddle-gpu' if cuda else 'paddlepaddle') + metadata = ast.literal_eval(model.read(meta_file).decode("utf-8")) + + # TF.js + elif tfjs: + raise NotImplementedError("YOLOv8 TF.js inference is not currently supported.") + + # PaddlePaddle + elif paddle: + LOGGER.info(f"Loading {w} for PaddlePaddle inference...") + check_requirements("paddlepaddle-gpu" if cuda else "paddlepaddle") import paddle.inference as pdi # noqa + w = Path(w) if not w.is_file(): # if not *.pdmodel - w = next(w.rglob('*.pdmodel')) # get *.pdmodel file from *_paddle_model dir - config = pdi.Config(str(w), str(w.with_suffix('.pdiparams'))) + w = next(w.rglob("*.pdmodel")) # get *.pdmodel file from *_paddle_model dir + config = pdi.Config(str(w), str(w.with_suffix(".pdiparams"))) if cuda: config.enable_use_gpu(memory_pool_init_size_mb=2048, device_id=0) predictor = pdi.create_predictor(config) input_handle = predictor.get_input_handle(predictor.get_input_names()[0]) output_names = predictor.get_output_names() - metadata = w.parents[1] / 'metadata.yaml' - elif ncnn: # ncnn - LOGGER.info(f'Loading {w} for ncnn inference...') - check_requirements('git+https://github.com/Tencent/ncnn.git' if ARM64 else 'ncnn') # requires ncnn + metadata = w.parents[1] / "metadata.yaml" + + # NCNN + elif ncnn: + LOGGER.info(f"Loading {w} for NCNN inference...") + check_requirements("git+https://github.com/Tencent/ncnn.git" if ARM64 else "ncnn") # requires NCNN import ncnn as pyncnn + net = pyncnn.Net() net.opt.use_vulkan_compute = cuda w = Path(w) if not w.is_file(): # if not *.param - w = next(w.glob('*.param')) # get *.param file from *_ncnn_model dir + w = next(w.glob("*.param")) # get *.param file from *_ncnn_model dir net.load_param(str(w)) - net.load_model(str(w.with_suffix('.bin'))) - metadata = w.parent / 'metadata.yaml' - elif triton: # NVIDIA Triton Inference Server - """TODO - check_requirements('tritonclient[all]') - from utils.triton import TritonRemoteModel - model = TritonRemoteModel(url=w) - nhwc = model.runtime.startswith("tensorflow") - """ - raise NotImplementedError('Triton Inference Server is not currently supported.') + net.load_model(str(w.with_suffix(".bin"))) + metadata = w.parent / "metadata.yaml" + + # NVIDIA Triton Inference Server + elif triton: + check_requirements("tritonclient[all]") + from ultralytics.utils.triton import TritonRemoteModel + + model = TritonRemoteModel(w) + + # Any other format (unsupported) else: from ultralytics.engine.exporter import export_formats - raise TypeError(f"model='{w}' is not a supported model format. " - 'See https://docs.ultralytics.com/modes/predict for help.' - f'\n\n{export_formats()}') + + raise TypeError( + f"model='{w}' is not a supported model format. " + f"See https://docs.ultralytics.com/modes/predict for help.\n\n{export_formats()}" + ) # Load external metadata YAML if isinstance(metadata, (str, Path)) and Path(metadata).exists(): metadata = yaml_load(metadata) if metadata: for k, v in metadata.items(): - if k in ('stride', 'batch'): + if k in ("stride", "batch"): metadata[k] = int(v) - elif k in ('imgsz', 'names', 'kpt_shape') and isinstance(v, str): + elif k in ("imgsz", "names", "kpt_shape") and isinstance(v, str): metadata[k] = eval(v) - stride = metadata['stride'] - task = metadata['task'] - batch = metadata['batch'] - imgsz = metadata['imgsz'] - names = metadata['names'] - kpt_shape = metadata.get('kpt_shape') + stride = metadata["stride"] + task = metadata["task"] + batch = metadata["batch"] + imgsz = metadata["imgsz"] + names = metadata["names"] + kpt_shape = metadata.get("kpt_shape") elif not (pt or triton or nn_module): LOGGER.warning(f"WARNING ⚠️ Metadata not found for 'model={weights}'") # Check names - if 'names' not in locals(): # names missing - names = self._apply_default_class_names(data) + if "names" not in locals(): # names missing + names = default_class_names(data) names = check_class_names(names) + # Disable gradients + if pt: + for p in model.parameters(): + p.requires_grad = False + self.__dict__.update(locals()) # assign all variables to self - def forward(self, im, augment=False, visualize=False): + def forward(self, im, augment=False, visualize=False, embed=None): """ Runs inference on the YOLOv8 MultiBackend model. @@ -319,6 +406,7 @@ class AutoBackend(nn.Module): im (torch.Tensor): The image tensor to perform inference on. augment (bool): whether to perform data augmentation during inference, defaults to False visualize (bool): whether to visualize the output predictions, defaults to False + embed (list, optional): A list of feature vectors/embeddings to return. Returns: (tuple): Tuple containing the raw output tensor, and processed output for visualization (if visualize=True) @@ -329,41 +417,75 @@ class AutoBackend(nn.Module): if self.nhwc: im = im.permute(0, 2, 3, 1) # torch BCHW to numpy BHWC shape(1,320,192,3) - if self.pt or self.nn_module: # PyTorch - y = self.model(im, augment=augment, visualize=visualize) if augment or visualize else self.model(im) - elif self.jit: # TorchScript + # PyTorch + if self.pt or self.nn_module: + y = self.model(im, augment=augment, visualize=visualize, embed=embed) + + # TorchScript + elif self.jit: y = self.model(im) - elif self.dnn: # ONNX OpenCV DNN + + # ONNX OpenCV DNN + elif self.dnn: im = im.cpu().numpy() # torch to numpy self.net.setInput(im) y = self.net.forward() - elif self.onnx: # ONNX Runtime + + # ONNX Runtime + elif self.onnx: im = im.cpu().numpy() # torch to numpy y = self.session.run(self.output_names, {self.session.get_inputs()[0].name: im}) - elif self.xml: # OpenVINO + + # OpenVINO + elif self.xml: im = im.cpu().numpy() # FP32 - y = list(self.ov_compiled_model(im).values()) - elif self.engine: # TensorRT - if self.dynamic and im.shape != self.bindings['images'].shape: - i = self.model.get_binding_index('images') + + if self.inference_mode in {"THROUGHPUT", "CUMULATIVE_THROUGHPUT"}: # optimized for larger batch-sizes + n = im.shape[0] # number of images in batch + results = [None] * n # preallocate list with None to match the number of images + + def callback(request, userdata): + """Places result in preallocated list using userdata index.""" + results[userdata] = request.results + + # Create AsyncInferQueue, set the callback and start asynchronous inference for each input image + async_queue = self.ov.runtime.AsyncInferQueue(self.ov_compiled_model) + async_queue.set_callback(callback) + for i in range(n): + # Start async inference with userdata=i to specify the position in results list + async_queue.start_async(inputs={self.input_name: im[i : i + 1]}, userdata=i) # keep image as BCHW + async_queue.wait_all() # wait for all inference requests to complete + y = np.concatenate([list(r.values())[0] for r in results]) + + else: # inference_mode = "LATENCY", optimized for fastest first result at batch-size 1 + y = list(self.ov_compiled_model(im).values()) + + # TensorRT + elif self.engine: + if self.dynamic and im.shape != self.bindings["images"].shape: + i = self.model.get_binding_index("images") self.context.set_binding_shape(i, im.shape) # reshape if dynamic - self.bindings['images'] = self.bindings['images']._replace(shape=im.shape) + self.bindings["images"] = self.bindings["images"]._replace(shape=im.shape) for name in self.output_names: i = self.model.get_binding_index(name) self.bindings[name].data.resize_(tuple(self.context.get_binding_shape(i))) - s = self.bindings['images'].shape + s = self.bindings["images"].shape assert im.shape == s, f"input size {im.shape} {'>' if self.dynamic else 'not equal to'} max model size {s}" - self.binding_addrs['images'] = int(im.data_ptr()) + self.binding_addrs["images"] = int(im.data_ptr()) self.context.execute_v2(list(self.binding_addrs.values())) y = [self.bindings[x].data for x in sorted(self.output_names)] - elif self.coreml: # CoreML + + # CoreML + elif self.coreml: im = im[0].cpu().numpy() - im_pil = Image.fromarray((im * 255).astype('uint8')) + im_pil = Image.fromarray((im * 255).astype("uint8")) # im = im.resize((192, 320), Image.BILINEAR) - y = self.model.predict({'image': im_pil}) # coordinates are xywh normalized - if 'confidence' in y: - raise TypeError('Ultralytics only supports inference of non-pipelined CoreML models exported with ' - f"'nms=False', but 'model={w}' has an NMS pipeline created by an 'nms=True' export.") + y = self.model.predict({"image": im_pil}) # coordinates are xywh normalized + if "confidence" in y: + raise TypeError( + "Ultralytics only supports inference of non-pipelined CoreML models exported with " + f"'nms=False', but 'model={w}' has an NMS pipeline created by an 'nms=True' export." + ) # TODO: CoreML NMS inference handling # from ultralytics.utils.ops import xywh2xyxy # box = xywh2xyxy(y['coordinates'] * [[w, h, w, h]]) # xyxy pixels @@ -373,24 +495,28 @@ class AutoBackend(nn.Module): y = list(y.values()) elif len(y) == 2: # segmentation model y = list(reversed(y.values())) # reversed for segmentation models (pred, proto) - elif self.paddle: # PaddlePaddle + + # PaddlePaddle + elif self.paddle: im = im.cpu().numpy().astype(np.float32) self.input_handle.copy_from_cpu(im) self.predictor.run() y = [self.predictor.get_output_handle(x).copy_to_cpu() for x in self.output_names] - elif self.ncnn: # ncnn + + # NCNN + elif self.ncnn: mat_in = self.pyncnn.Mat(im[0].cpu().numpy()) - ex = self.net.create_extractor() - input_names, output_names = self.net.input_names(), self.net.output_names() - ex.input(input_names[0], mat_in) - y = [] - for output_name in output_names: - mat_out = self.pyncnn.Mat() - ex.extract(output_name, mat_out) - y.append(np.array(mat_out)[None]) - elif self.triton: # NVIDIA Triton Inference Server + with self.net.create_extractor() as ex: + ex.input(self.net.input_names()[0], mat_in) + y = [np.array(ex.extract(x)[1])[None] for x in self.net.output_names()] + + # NVIDIA Triton Inference Server + elif self.triton: + im = im.cpu().numpy() # torch to numpy y = self.model(im) - else: # TensorFlow (SavedModel, GraphDef, Lite, Edge TPU) + + # TensorFlow (SavedModel, GraphDef, Lite, Edge TPU) + else: im = im.cpu().numpy() if self.saved_model: # SavedModel y = self.model(im, training=False) if self.keras else self.model(im) @@ -401,20 +527,20 @@ class AutoBackend(nn.Module): if len(y) == 2 and len(self.names) == 999: # segments and names not defined ip, ib = (0, 1) if len(y[0].shape) == 4 else (1, 0) # index of protos, boxes nc = y[ib].shape[1] - y[ip].shape[3] - 4 # y = (1, 160, 160, 32), (1, 116, 8400) - self.names = {i: f'class{i}' for i in range(nc)} + self.names = {i: f"class{i}" for i in range(nc)} else: # Lite or Edge TPU details = self.input_details[0] - integer = details['dtype'] in (np.int8, np.int16) # is TFLite quantized int8 or int16 model + integer = details["dtype"] in (np.int8, np.int16) # is TFLite quantized int8 or int16 model if integer: - scale, zero_point = details['quantization'] - im = (im / scale + zero_point).astype(details['dtype']) # de-scale - self.interpreter.set_tensor(details['index'], im) + scale, zero_point = details["quantization"] + im = (im / scale + zero_point).astype(details["dtype"]) # de-scale + self.interpreter.set_tensor(details["index"], im) self.interpreter.invoke() y = [] for output in self.output_details: - x = self.interpreter.get_tensor(output['index']) + x = self.interpreter.get_tensor(output["index"]) if integer: - scale, zero_point = output['quantization'] + scale, zero_point = output["quantization"] x = (x.astype(np.float32) - zero_point) * scale # re-scale if x.ndim > 2: # if task is not classification # Denormalize xywh by image size. See https://github.com/ultralytics/ultralytics/pull/1695 @@ -438,14 +564,14 @@ class AutoBackend(nn.Module): def from_numpy(self, x): """ - Convert a numpy array to a tensor. + Convert a numpy array to a tensor. - Args: - x (np.ndarray): The array to be converted. + Args: + x (np.ndarray): The array to be converted. - Returns: - (torch.Tensor): The converted tensor - """ + Returns: + (torch.Tensor): The converted tensor + """ return torch.tensor(x).to(self.device) if isinstance(x, np.ndarray) else x def warmup(self, imgsz=(1, 3, 640, 640)): @@ -454,44 +580,41 @@ class AutoBackend(nn.Module): Args: imgsz (tuple): The shape of the dummy input tensor in the format (batch_size, channels, height, width) - - Returns: - (None): This method runs the forward pass and don't return any value """ warmup_types = self.pt, self.jit, self.onnx, self.engine, self.saved_model, self.pb, self.triton, self.nn_module - if any(warmup_types) and (self.device.type != 'cpu' or self.triton): + if any(warmup_types) and (self.device.type != "cpu" or self.triton): im = torch.empty(*imgsz, dtype=torch.half if self.fp16 else torch.float, device=self.device) # input - for _ in range(2 if self.jit else 1): # + for _ in range(2 if self.jit else 1): self.forward(im) # warmup @staticmethod - def _apply_default_class_names(data): - """Applies default class names to an input YAML file or returns numerical class names.""" - with contextlib.suppress(Exception): - return yaml_load(check_yaml(data))['names'] - return {i: f'class{i}' for i in range(999)} # return default if above errors - - @staticmethod - def _model_type(p='path/to/model.pt'): + def _model_type(p="path/to/model.pt"): """ - This function takes a path to a model file and returns the model type + This function takes a path to a model file and returns the model type. Possibles types are pt, jit, onnx, xml, + engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, ncnn or paddle. Args: p: path to the model file. Defaults to path/to/model.pt + + Examples: + >>> model = AutoBackend(weights="path/to/model.onnx") + >>> model_type = model._model_type() # returns "onnx" """ - # Return model type from model path, i.e. path='path/to/model.onnx' -> type=onnx - # types = [pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle] from ultralytics.engine.exporter import export_formats + sf = list(export_formats().Suffix) # export suffixes - if not is_url(p, check=False) and not isinstance(p, str): + if not is_url(p) and not isinstance(p, str): check_suffix(p, sf) # checks name = Path(p).name types = [s in name for s in sf] - types[5] |= name.endswith('.mlmodel') # retain support for older Apple CoreML *.mlmodel formats + types[5] |= name.endswith(".mlmodel") # retain support for older Apple CoreML *.mlmodel formats types[8] &= not types[9] # tflite &= not edgetpu if any(types): triton = False else: - url = urlparse(p) # if url may be Triton inference server - triton = all([any(s in url.scheme for s in ['http', 'grpc']), url.netloc]) + from urllib.parse import urlsplit + + url = urlsplit(p) + triton = bool(url.netloc) and bool(url.path) and url.scheme in {"http", "grpc"} + return types + [triton] diff --git a/ultralytics/nn/modules/__init__.py b/ultralytics/nn/modules/__init__.py index b6dc6c4..7f4c4fe 100644 --- a/ultralytics/nn/modules/__init__.py +++ b/ultralytics/nn/modules/__init__.py @@ -1,29 +1,147 @@ # Ultralytics YOLO 🚀, AGPL-3.0 license """ -Ultralytics modules. Visualize with: +Ultralytics modules. -from ultralytics.nn.modules import * -import torch -import os +Example: + Visualize a module with Netron. + ```python + from ultralytics.nn.modules import * + import torch + import os -x = torch.ones(1, 128, 40, 40) -m = Conv(128, 128) -f = f'{m._get_name()}.onnx' -torch.onnx.export(m, x, f) -os.system(f'onnxsim {f} {f} && open {f}') + x = torch.ones(1, 128, 40, 40) + m = Conv(128, 128) + f = f'{m._get_name()}.onnx' + torch.onnx.export(m, x, f) + os.system(f'onnxslim {f} {f} && open {f}') # pip install onnxslim + ``` """ -from .block import (C1, C2, C3, C3TR, DFL, SPP, SPPF, Bottleneck, BottleneckCSP, C2f, C3Ghost, C3x, GhostBottleneck, - HGBlock, HGStem, Proto, RepC3) -from .conv import (CBAM, ChannelAttention, Concat, Conv, Conv2, ConvTranspose, DWConv, DWConvTranspose2d, Focus, - GhostConv, LightConv, RepConv, SpatialAttention) -from .head import Classify, Detect, Pose, RTDETRDecoder, Segment -from .transformer import (AIFI, MLP, DeformableTransformerDecoder, DeformableTransformerDecoderLayer, LayerNorm2d, - MLPBlock, MSDeformAttn, TransformerBlock, TransformerEncoderLayer, TransformerLayer) +from .block import ( + C1, + C2, + C3, + C3TR, + DFL, + SPP, + SPPF, + Bottleneck, + BottleneckCSP, + C2f, + C2fAttn, + ImagePoolingAttn, + C3Ghost, + C3x, + GhostBottleneck, + HGBlock, + HGStem, + Proto, + RepC3, + ResNetLayer, + ContrastiveHead, + BNContrastiveHead, + RepNCSPELAN4, + ADown, + SPPELAN, + CBFuse, + CBLinear, + Silence, + PSA, + C2fCIB, + SCDown, + RepVGGDW +) +from .conv import ( + CBAM, + ChannelAttention, + Concat, + Conv, + Conv2, + ConvTranspose, + DWConv, + DWConvTranspose2d, + Focus, + GhostConv, + LightConv, + RepConv, + SpatialAttention, +) +from .head import OBB, Classify, Detect, Pose, RTDETRDecoder, Segment, WorldDetect, v10Detect +from .transformer import ( + AIFI, + MLP, + DeformableTransformerDecoder, + DeformableTransformerDecoderLayer, + LayerNorm2d, + MLPBlock, + MSDeformAttn, + TransformerBlock, + TransformerEncoderLayer, + TransformerLayer, +) -__all__ = ('Conv', 'Conv2', 'LightConv', 'RepConv', 'DWConv', 'DWConvTranspose2d', 'ConvTranspose', 'Focus', - 'GhostConv', 'ChannelAttention', 'SpatialAttention', 'CBAM', 'Concat', 'TransformerLayer', - 'TransformerBlock', 'MLPBlock', 'LayerNorm2d', 'DFL', 'HGBlock', 'HGStem', 'SPP', 'SPPF', 'C1', 'C2', 'C3', - 'C2f', 'C3x', 'C3TR', 'C3Ghost', 'GhostBottleneck', 'Bottleneck', 'BottleneckCSP', 'Proto', 'Detect', - 'Segment', 'Pose', 'Classify', 'TransformerEncoderLayer', 'RepC3', 'RTDETRDecoder', 'AIFI', - 'DeformableTransformerDecoder', 'DeformableTransformerDecoderLayer', 'MSDeformAttn', 'MLP') +__all__ = ( + "Conv", + "Conv2", + "LightConv", + "RepConv", + "DWConv", + "DWConvTranspose2d", + "ConvTranspose", + "Focus", + "GhostConv", + "ChannelAttention", + "SpatialAttention", + "CBAM", + "Concat", + "TransformerLayer", + "TransformerBlock", + "MLPBlock", + "LayerNorm2d", + "DFL", + "HGBlock", + "HGStem", + "SPP", + "SPPF", + "C1", + "C2", + "C3", + "C2f", + "C2fAttn", + "C3x", + "C3TR", + "C3Ghost", + "GhostBottleneck", + "Bottleneck", + "BottleneckCSP", + "Proto", + "Detect", + "Segment", + "Pose", + "Classify", + "TransformerEncoderLayer", + "RepC3", + "RTDETRDecoder", + "AIFI", + "DeformableTransformerDecoder", + "DeformableTransformerDecoderLayer", + "MSDeformAttn", + "MLP", + "ResNetLayer", + "OBB", + "WorldDetect", + "ImagePoolingAttn", + "ContrastiveHead", + "BNContrastiveHead", + "RepNCSPELAN4", + "ADown", + "SPPELAN", + "CBFuse", + "CBLinear", + "Silence", + "PSA", + "C2fCIB", + "SCDown", + "RepVGGDW", + "v10Detect" +) diff --git a/ultralytics/nn/modules/__pycache__/__init__.cpython-312.pyc b/ultralytics/nn/modules/__pycache__/__init__.cpython-312.pyc index 5bd4b3d969a2c208005a7feaf27f1625e3b2cdfc..1b1330536249672c6283d638bd4ca9c802f4c1fd 100644 GIT binary patch delta 1279 zcmaje&1)M+6aesDeOrnhJ8ly-g?bgsmXx~svSd5{koD!brbZPuO<*9Ex6(*j*tetZ zu0NCzMDD$)`&abRL+Q29doP6?O7~dkA&^5t5)3_c-Y6l^OBeKGcix-Po42z+Prsg< z`E7bSEb;62)?d9Jr^Yi;Pf{KykGwEX#!re15F;^J>c}0hehU`KBE(4?mdFw;lVwPd z@t?7g2>bLbkNG^~ zA~8w)RsQ%oCNe8+4K15OvYWuLr902%XhK<32HHT;49iw^U718Q5OaC?;b*)bQc7{I zc=n?CZpF3@clH3(9&IwFYeo-8oQ9_~awA97;+sEIhtzbh)|kP2XxZ8s{hX>Dcdpv_ z@6eSaI%w36n|p_qM#1%0>g?QbgU3xV;09{dyQda)L$&ImW>D4SAx)=7o4VfSafQe8 z-P%Fb4II~SV%QDwQqu>;ZEGM_1W?FL&depFknAEZ}&O!{X#6YVpg zne%UZnGU4`gW8!P(;0u!&PRdt=nI~Nr`Teg7hYMGU;1Wc-(@@|doHE_<&{XO%QKk$ ljyK}&6G{GB{zH23RC;jfiD3L4FK(9Qi04VrJdiw+^e_8NJdOYW delta 742 zcmY+>J#W)M7zc1);@Fi#0^v2#LND(&q)yTZ@e(1mYU#*;1cMnYlbjO^pIvO1CXG_W zM+o-~*q9I!;v;lnVqhQ>6Ns${P!JRM7&>6dKRtKPJvS4(jzx&>M^inL^uXxS*!icz6eqZ;>8?(}`3Lz_mEwu~0v zl8n`ntS#)#m3wX!Sneotx*?{1Z^tE}j_*6+j^#Rg1n)Z0;HILmSE!PA=(Zn3IP!z; zK>4@uL*D#_mY-653vc+|6ScK^y^cAWV)HG#Hm17SCz0t{l&IQ7_dRb|Q{s@g*2xfJ zRa(cxI<_^%5A|>qMue(%4TeL9;;}u6-)a^6+r)#qrYT<$KNl@GOKh~*L1LX1lO6Us z`5dL#zSv|3;_HVi!8tw>5+}!@aSHrnmgbl0g|;7160ot{)@0MPT-P*T3g3BO_}ys?d`cR-kNb@tiAXo% zA9No%UZ34w(CH060%sdO;2b6qLf^z=NSrSr)Se6f@iShE2yG(h90}W-=!>xSL0Fq7 U5c1>hk%v+$-AB*l;2O{W1E*oMVE_OC diff --git a/ultralytics/nn/modules/__pycache__/__init__.cpython-39.pyc b/ultralytics/nn/modules/__pycache__/__init__.cpython-39.pyc index be57fd6e79c3f64847ffc173cdf967bfe8edf4a5..8afb5ebac8ae5d61e15ff851d0e60c604cbf31f7 100644 GIT binary patch delta 1115 zcmZA0%TE(Q7y$4teNf69REUx=v_M;|wJj}=^6Cqbgw&=*NG2w*v{Slazujhcc~lP^ zOpHfy{|UW#@IUb1STDvKH_x7&Z%RVq9)8)G@0;oO&1`=Sd>9>)2L}T#{ycv7`}AAi z&Cpa&M7oJp+%QgW-e;y^hR#T?hSYGYK?uS6S(=44x(4fX9X99&Y|>5GqFa!oImpvI6lej8 zw8-;)wAARWxuSmgL~^+TuT{&C)sCgqO#*u3TxDiFkPB>I$cN4l9=q6rB(@!uF z$Ob9u+C?OpT8R-(v>X2-O=U1Hc8Kemb`@t=xMu`n z(qx!~NLy(WMKdi~RY^CZfY_Q>9Ju92;EED^q-T3F)y!L#=8Wuue99`it}5Co&e+wS z&1%1qwxp4?%EGt4XH$7Wi}1$)WlhBI0y|35WQW$kjYR@vUmm(z|nU(zo%r?*xS z6OOM~cyVqr$6qY$DH@Xv9#T}M)tTe2)bc!@Y8H13j;~h4g#pLUw>#L`DIMCMdOrL7 zj{8-iV29j^38(*1H`GRnS*&h3eHY1uc(tqU?R?zSc^ zCE&q(oKN9Vy!jD&^%f9%)h{3v5AJ_;V17I^^Y9;-`B=GFDchxzl1YXuUpoEbyu6mZ z$jtRuvXpToC+E*+gGO5B{Ti#Ab=EK&tZ6n`%WN^#RMWG9KmT6aVqLS#dS)-#Dw-RP z{5qp8o%~kb_;G0a)7TqEgmLG{r;$P4deM>Xdnc5<_u|o}qKL0jYt>_89>yf*;c%q< zU+^e7{3BIojBJq|9vpXi8&7qTpeb&$<+e|mVI9)g3T#F@+L^(FVA597khC`F1hE~a zlZj4TP2rIdO{18yj@w3aZh|Vf0d4^U=-?61KoML8*Tj!P-7cXl zfE7>%%K(2DRzM9jK^+jV4z7a+Xn{89fX6^h*w^~vTagwQ`C;{2@3qjng0!QsJ_`}}wy RUM@XZF7IVuWY?r_?k`5Dh(sXFygNdh~Q-EYh0sJiN3r_MR`KmYgnpTEq=NVDOH)%`z>Uya&qe@j2q!=;=| zoOau6S8O3$tt}*l>@8BQ#Qxe_9kmXLUvsuPf_BN)>Z*0)iX-G~Non=edRkL!Q(Mz& z(^}JO(_6i@-qwuT40hkulG&P7o5h}Xw`8~aYJE6Qsm-z5cG*In7j2=`u;X;G+7#{Y zO$R@(&0~;hfJ~nSat?#^0y1M3$b1Hw3COHjAm=j3Y(V;Efh=H>#$b3M~odt3MgDe2#yjdU@GRXOWTrdmdA_ln-kc+~L#P?Xtt}e#aC9^!gghBcN zS*Uy7&#o5X>Qdd+LUwf-t}dUY)*=R349FF;KrUsFC4elQ1#%gKTnWfkvp_CqkgEZ? zW){d|23ZEkwX;C3V36woSw0J734^QvCgeLGAAvR1CVrZ7DBQuWr35;o?Hz$osg%qjG_|y85=#g-OTG>V3(S-Mbh}(!G1v;b^!u={S7gK+=h#I%(fgj(-*S zw?66EQPGf0K~Wivwk5Oov<4c(2in_Pn%f%Ll`OO(DhDFb=5yiQ;Xo+qtEw?ya_m@t z0liv((9U-pK9F=BklUl}Nf-5beKP$}I8qai z?hAB<<)jzaY5?=(zRH>nNmu30_VaDY6!d~FB;7lzs?q2YF)jy3sSB7u&vZ3d+|Vb zf0`f1p0;SXQ4X{yAJyO9;NRICiOS9Oozdp@Hh*<{5I6U=MD>g_$%F*ZOvAqf*=%(^pvPD&q$YwPydq^z8P`v55 z>Ew@`PF3sD{)X%~36XL1*y==m(F}ggW;>m!o@sx3q$mv~J5Fb-XWHL4oi~*}U9{`k zn4>|cXFc}QbJTO~Z%E>8SKF<%@H7(-HAp%;op1cjt`$tZ7IKAL3v4|S`YC$X}zjfSO5%YA-wgUZ;m$MRwHIjo4>8IwI0`iG0klqol$?VDbUsy zZi$r1^x>*+NJ(dXb0C80>I%jnMqfqvjh0=sjF+oyXxwa_O>?QJ6=^0N@mL8 zvz^UyI8q0U4Amu_)P$rf+Aas1lI{S=L}NJVYG`Q>M3bop@Vhk}MP<%&&EfM&_xUi9 znWQrmhz64OV0o!C>5PP18sx2LO;Wm$Os%U6wge)Py1EGA0g(TNUvF94)ZQ9id;TnN zG92z`3rE*>wYRjND_^&^vjqgAr7PMTjI3>ITdNSgwe>^-%R0K`GSt!S#hfTpJF^~F zB0VU6^@8o^o{YYfxTj#uGyh&*{*^rgdj{)=3x^Mm939JBKb}_=&#StE6=i*SBBWJ}SNIe%n2I_EyoYz-?b*-Hyc4oxRS> zseP&Ao&r=q?pYM~EE)>LJpQ{mdA-h8Qzr0!oAp$>hdWpB;ZGBY={SXNcF}&(anadh zJ5Arubl@p5DYRQXpf!NE;DtR-6=|&}TgahKjnnB`ed-zRDw9$DJMG(@Du)DRb9O&j z+0oI`9F71<@%2=;qF^t`8OPLUxGmBy`&V38;Rk#1%i(CJ+{T_^H_ARH{P>urE<)70K~1JOOH^&kSdkEg9u?VCDPsyZ4F!ib=^kyx1&f(^+~BNDFq@lbig;iUw(|P zr-0j?56GeJbV48H2R#;f7R3v;k8*OatQc6)m(nZsmiGqkX60V)>gyWxUCkfLAC`v8 zheOvJuQiUGP0TAFpSLYOZ`-ZPTli5qK5tKa-kv*!iFx}HS^LMc4#l$$#SR~d9XlR7 zQJcs*8S|W!AIBR?rH_eP-AJK{*oJ9@iI`Sb$59;T-nzQ8oq-nRPD))J=yhEk!+3cQ z9!zE^Qz;m3X{oEbA<2*8YJ@)76Rd}f!?0pC-N;4ps~2s*usdCzPrYu}ocq}h*C~m$ zg+T=YAPyD#9-gl)K^@)YC!XH-^tsLc)rMAfWiH`)F{?Dz z!C;MVNm0km03~J@>Y*O7qbsG<)BR=|LqrrBwU79tO=17EIE$yVBNAxsXbBTg1r_VniOMFj zJqS(|tl(&@PSzJVDls(VC?<&5&z##nb*8!e_Y3T<$2bKekRbkvgPKgoL z(@+(_8zHoH)|Um_Th|^4ghDN0UMfF!LWT+wH6+QV9#YV`a7%l~+K!G8WFbZ%@d4M? z2ZCqn@sh~erbY-!b>}J|0%kzh}8*V{dsE@z0;Fw)QUb3^Y~o-7vrR_vxk_8^MU)V zjiz8!x@Ys$@z?(H5eT{8Oi_hAdmamVIA*aNIC|`jpd_GW3bSDS@M(3aXS#k+Pf3(E9R>)A)SzFls^$s? zXUX6o0UZxfX*r*d#Db{_+Wg^XG-NcjLd~tEX)>vdNvS&Nz!Z|t5O@m}9aNCo3y~@i zm0Rg&8w%8^X+ic2u*qF0SCwwA194WKsUW|?%Ny}%qz?snxi|arQ+-eMpS`pn%CI-% z@{@f}_80!asf4$vSGt?wyL__mpwPV%Cgflf+_BaVEVU*HP(y zUb@T21q1>O;-5HF(SG6(E|~=cP*+C3?Y6MfN?}m>U;|8gF!8xy;&X@GF!7}@6Q2hd zkRp?1r~3xu(7SilO(#iIOqC>rvG%trMKE&yF%3&~P(Sr(ViP7#tYZSjOVUgBmz*zo zUP?6>1k=oVS&v(GM+Na{b4Aqwx4oP?vo_Z2=}m>!ZSQq9xI)s49&H-;xKx37r!YOL zSHS)R3DQ_+pVZ^(W9+>fT>w+&LJ4%eJ`YTT8r#*FxTr;i#^0&%VuX59E=aOVvf)jR zGY~e>4q+H%CcRpX4U?n}le1qlQgdmtT#o~aysj}}F07M=XJXc6L6Vf!gW64WU}7hV zue|9utB)bhJTUe&4g|fZR*(UY5@SnTS{-&e~5Z{olNRsW9c}`oG{f`{y zrBVl@O3(z3qDDD{qUHt=J83Ulmz0{>g`|ra*Nb+8Pip{kdd~ZIa2n)-kx-E z>!#G2v`2zTdpHQ8CKSTcjE<6$%6rI!3F>B2YD=cJkjax-Nj;IKb~zgCjIxoDzd{c% znSD}9)L3Y<^M2<9zR=D{Pol~clNsO`Esu;8dx;GxNhv& z$=Io9$BsRhSX`Iz)j?2j-^k>t zRV@rpsmIirE0H3t%tRH6{^F{bi`w;>X-6Lzy2{Q~%Ao#znM4NREYNC{K_Pl1q7R2{ zeRhy;haABV!{nmovB|XoCtsJuC%7?Y z53Y1WWM&LawVZyHz=#WDwtke6*}DU5-IIFR)8|QKuN>ZRebcu#;bhFSlKA_szFqx0 zuk0Gw_4<=n_YCbJk@uTVC-PP&yle1S!NRMaKlJpT?Jw`^jCocJ!?3=BTMJU#38WmHh}}cT#R#f{h>AGq)6wzpmQtXOt$XL;4%k8A+&Rz%UrKYx zPihRAYf7N4l`86*YDEah7n;v8=Ut#avbKD~x=m#p9;w)3G@cT%VNt`Vo3mHtoGy^_Uqnj zUKm)*?*!oW`16LJ`j5uyo{ujHBz*N?E$+{LvLp~8N$`V3MO9vp!MZ9i_kG-d5G@tt zTwo?1Q4S50EuCUuTzHNcn&|-E*%iEl0C&-D2M%j`9WctvFp6P#Na(&Qy*~_mVmM_7 zPT9Mw%-SDW_k{swwI7ZwQ1(`u=@Eqjz@Nz|fPz)rg$qjtQB{r|#DF-NW{9CD({w>0 z(|#S9PA3FVSjZrOJcWX5cDws__aD5p7X+7^Y+rX>O&v;wV{7SX^xbpcJ(nnX?Co=- zg*SX7<$shndM;k_SiI=5+ft&SDv`A#=GpPtPf8wRPPO;5D(7XELR$QorufG+%RXkL zb}!DR5MiQvG8M1lUxeK7%%EHIsnhKWO5~KB96^}GfUOLx<3^mFjwkekcfiQH0^Y%UyF2yp`*pRw8dFceVWYLeROSSl6BAfLiMI)iN8g4X<9=drtRaAHxi>U7cFHUof%`a&))*ddC7kGr108>0j8Wa@948G5BasxQFm7f8 zB@{-CI?U@3&H2#>9Xe!F#a2aW%vF`fQadxs)s#xiWC6wWck!6Ki;7byEELDZJIV#k zvQQdHw2J7IM*oiYN=6U9kvB}}8@RsYy%NQaGIzc-9fe(sI+AIJLA^n_p}WYxPwinM zi4u9Ul`c>NJxu&#?)QyqoKsLnXyzI+0V#wE*w(JP*xQ&& zv17f$_R8={nLx(~*A=|LLTXFHuqZ+si4~DT6lS^v>T|B|Tz}wF*I;?y#h7OyqYof5 z)O>3qLla)6;$P#RMr2lh>U6rElSu0@>45KIvN#Gww!!xI@2@Vb{;O+2IByDXR@ z-e$82sfz2cFWF#lb{Qy5O5gNhK=vLt$x5nZ-lx}J1*M^=&OXbqIeJ#F|KfSVU3O?X z?@UjV?yF8|JWajfPm@T?4y#DjUicDV@`kAyv@TbL)O2e-twXIO9I&Iy2_5#;2cdPw zAy);l2O)5){i_!=^y&fjkU_L zNOY{VGvPlM^PRUQ>qIMvCQZ;V%ud#=NIJ!OsD|QGWC26q?dKAGt-a7fpzd)rSeT^A z$o%W=mSQg%3W#3Tiv2c`G~fE-Zc#?a3BRqI`E671UEFdW9>d3hO#q?wdnja43s;37|8q05GDv+8ng5q$ zW<8E>^m=-R_|MJKbj;eo*nZuoF*ftG(5;7vo^RK?P$PWGXrZr`QoD|+`4wg+nA`v2 zOidoe*OEz&lix+51h*Uc(h@Aanu#&}EgqcEF{Z`Q_?bwoq4D+Zk!5cc-zXkk_HOap z#kWdsSH||7h&}uK_>w>jcHT3Q&*b;;sDUZTKcJ>e%ap!KcLh_D{{%lH)9@RwzSaMM z03#^A5X14o1T$xN+4bUU#qoj?(sL5I>r_h&oT(o!j*gw_h@F+k&O{Q6qX}Qsn(MgH z5XN=>9w$um6>8ixYJ-SHvNEaYIgpC}%%w`)&2%sw7;+)rL*r&TG|5n{hq+XRMNk9R z`BE~qtLmAR^Ar@(`RgT?oC;q`iC@y}a|U^jq$MWrZNSOrahl5gcu;=0?*1J;`ZX%Pj-u3)bhgmSIwmubY9)VzikGNh;m_1sZeiD{W_rJ8I-#fXzW*1% zk%K6H1&!64-Jf@*V4z^I@U;cwzM{CV2&Qy+JxUVZ(rJC61``riCVew2kyR4&a9^Xr zHzIH^A2|2LaSwPzHJT3VtB_x&csm%L3SCO8Mzz(TCPBuvyVKxlh6bUYMQ7BMfTvSh z#3ZUR1D^7@_XrjvMN3MdkAt5F5JS^|MGz2_N=hxPHopaoU8cjJI@UM~y#4<$5IVd~ zmH(115ZWT-l{&;}?So@)GDAUH{>pCOlvex~R33Q_#WdKw^Lq8QYOb=xmaZQ?nD9N4 z@NWKmsEyb}BBa&aSn&Z)p{Q8-_L-w}5AMsPMp?V{NnB$x8(GOC`6!s6SpBKfgEw03!DbnIhOL%kffVFL7y+Ih6#|8HbCX4UE*Z2 zZM>YK8RlVOO#x5QTUecFAv1Fq4n)kz(hJwK=RWCE8NS-?$7BD z4CG%rIc>lKmVOyS7^J}}?t)9?7VE`6f7@92bnHyaShzLeZ;Scbt;IfSE))B{gOllu z4YP!|TSl^hNmAXWL-W?D!W`f%L^%!4ia~=N+U0+Psu}X#aPuWzns~!R9^xll?jgEA zb(L2dK8V6L{t*BZWStk*03}|4>-)>zD!EZIT7Gj?V(F$^&L4W-^W66RNqVek=j}kE zV0R*GPt3E2)8E^u!&>1daw=2tOZ?L!1EBDyxH_c!Id<1mN@M^5S<~=kxHE-@JG3+m zOv`tXOe<5c9^7B5VV-M@Q%(6s;!VOg!v)g6X42)kM-*hj)g#uE>XRhEU-|$>YAYd1 zn2C*%(UvYTn2Cilf$SlU?hQKPN?A3SR1`o~3%>3r9QB974FN2>$EpQoZ9mrL3N;6v zCX6LCnrNaC_$}FR^~CfD-V0ML{n1g^@8M?+P@ZTir=v6I#p^H3L_^ci-DwTBSoJe{ zgx2=lYPuuE_CFg7=nS@sUaXHxV0tkleHMgs7DZUsOk@t@#yA~09LyO!1Vh~Jk-(^L z%(MPp&Yb@G!SaEI*H(eZr6_^IGgwsR+ZZcQhH2%l12mST-{@DB)3jl8$3aaZ+Z&U% zR{sN>NEf7E*r?<)ogl4e=|f9eepJ;>iEI3zNI@a5;Hn_bO=ylO{w8HcIm)T`5h2P@ zOccJ1e0DjCtryfwTcTYvYuQmB+RYBVZ|AaGi9Lf}e$lP-|G2Q|K?$Qq3|cmeL31;2 zw~(89Ts`(PkKnuH>3V@{CTY;CzPHP+Yat2DrsJma8?u@18702WjrcZ4cY~ggz*=pV z&;nR9*w)yB1?QSuR9V=-mosR>n`u$QYDS9q0(UN{)s$xFB7NA5GD#+;xkXf(DfLOu zo)Gz{qg{;uA!2A7-6B4wb5FlRKiM4JMQ2P#p(;p5!J&@{%4CUz1oa>hK0=lW(wTJG zMjl#9;qDLN&t^XPxg$9vhyLY~TP1PD!TpN+Zt1EK`8x$)+rSpMt5VLZo_FMTzx4K( zzIPD&8VD7OYt6UTRR=p`af+vS#O#;7*j@Nn0XUi|4qlW;VekgMTJPc9}ZYlgdm5&!uMi7y>qiPlVC7gV zv;xL3<&UTZW*nfeB2%%LUX_g#jVrDrrnruy0D*jA6j*>tx(iK`>(n8%#(;l;hatFH zS{xo02lUnlE4y@W3SS7M#ir^xp2}qyFB6sphA&U+@MQ(U^i~+K(F(p z)29oiHb}5U!kIIO#JU9)FYeche6fO zHaBS%y}uIAeWr1;nWqR_hnDc~{n7F}(vJ{?wKwM5_aIY+uvey{gkC8G$Wqz zeno&JAnVEU&;lfVs_rd3qEsiUs0(X!={cG@1IS{wrma2Dw6>A*CnD8`QRF0DoVeq} zAD{y!Vmft!4TLbZz{aJ8LRy*LwZI}wLM&;~Ex$q3?3ogbS+;H(K`2DLEwTz~YgpTI zb#D-28``NH>U7h+i9|&DPgjf1*e7^6-osn4S@#ApVb%JP{)0w_9q0qtfw6r$v6u2R zncu63xL_T^Ceo!Jvl)71dxPwd+PJzjN-D>i!C>7A?osqaS)#y5X(Rgz-(R@z1#Q34X#YF?f=GAVTOe>iR zm+CY!s<_{cT9|aXRgKe(oN4YLeFRb^OFc#_s__$96T)pg8JP&%yG8EKhLRL<4-p`a^I5wejq1G-`i^fQRbV!>eHH79)Cxh2fH zZoE2DKE^};W?y7ZUxs1SFisc&z+dwrFU7a~Lh30e3}38{!^q3hy>^8%*Go!~#8OFm zsb=x>S0AX+!oVpZY8lpp(ud;~clI%@NlK3Hizsh6f)Apa-&e1DYxj3{V}107E1L&4 zBbajc&~Ahd?8W%UDSF`PTg!e}@?Od94L{!e!R9-a|884i>(jW(=)?^tkG+%MriOe# zg(cY`8o+F+y9g6}_{H>@#pF{hy`N`y9Z+O60{jip9U_+9yG+q_782U=_O`~o&21-| zJJ;@@$a_#KB$f`yv=jju+RnGj$bx84egvr+LvNt`{B~TfLK?DMkf%6nS^6QxssiF9 zWG+pXvU<8Ng}Zu9Pm`L@4vkEPoSGxoVA2#)mhMe?>hs!P^?B7^gEoqO>7Fdmo>0$j zDkpd%L%pVZf*6+R5N=}^0^paZH>Xt@i5aq^%UznHH%MA8_TeO46ZkN()MS{(wLBU@ zS17q5#GTRUYL@hmVQaK3f#PEl8gj_G_Nlb z-KV=gcThNxhtH~OICJu@lns=@{c`A9ejqqJJ2Jl+MxIQ zut06_WQEqDH9E%~DM}{wVX76yiFJ2)8g4pK2Z{9L;4<+*nnmo;K~i>SCoO*mtV@^O znP}KlSSTQ-w3^yGQ3pBNA}+k$#awQsI#-%SNUg3FWY>XJ1a*t#sVbg>M~eB@rOS6_ zwM##0TcIsRqB_kKJL?L_BwyPJ{O zpVN0tiwi15imT9A?s~meLf0xsH;ml(YQ|8;8zGiF`rxg?+jGW>cOj+)bUb4$)G!`ukB8b5p|kXx%~%6nr!PbpIYImm zL4IgCKde@`P2*qtLa{>{hRqy9L`sng%d95F)%x_6>Y9AgZuFDB49jKC;nAd14!3l2 zqRXTJrryxUpT=yGtN*YHUHuY$*DZJkpEWC+EpH87X&Pu!{B+K@JU2X}zMJWZB^wg? z8xdxd@m>bvi#G3$=j%g^V6>qj=b0ibDJMl!$W*k`8wKUuOddPg5NSj}K~yFVs+o?5 zUkO`?r@Bj(!P6~WX+^Y6Gu9Xx#?_?25Zh&FG>GD5Qe$f{ia9tAT~-D z;ZwBAv>KhG_5tM7-3`G>MO)Kx17i0Fl$w|im-4kCe6EZi{mNaKXVK0mW}=c`Y__%ydj z@x`7U%P)`3spze~>&+qS^N*gqQ+Oxv=gWVzFERh%2m5ZFeSI55FL-T`?OZIWf z;5E#lp??lFPmQ-!h$0_bg(wct(2}al?a1AI-Ghaf#Cm^YAblmktYyVQxq)l>sohzpo+932Iy3@iwON#A?NCV)+9OD;q3w2TB1Nrtt2P8c ziZ>(sZn;h}ow*$rwiqU<>J}5|T9c)AsLL6`m#EYI#@;4d^d!&%1r#+w>|`P=GCbBx z5i@zrjR&8enL$dkHS+_ut7=Nxn}Xc2)`!M0YM6BIuLcbVMq%W)xMZJ5DklKvtf;0}{ z>Bv<)Xd;cYQxr0<=Hw092blTbZpDUM_V1Q_Z5idp9`=p6KFle*?IfUkFgBD9lp?O( zp2%6suJF9?Z_K&A;M#(bL*HJK$ScS7)O+(5AwPW)7Tg~keR6CbWslGN*<(AtcIe8f zfm3fRzrOO?O5CxJR*s%c6m3aRdiht=?&MO*Wmhp@&|qRuYzDp$90wV+-m?lbhjy&G z=(-54%`$3CEQN>V!EPBr2CWLu3daoIs~os0W|evk3}2b8%#uNj%5?m$56_XvWI8kp zM;rXpOu!JDod!y@r>E~I*JrK;5kjfAEqOeqK*q|$Fy%z>Qk@;?AgjCH(* z+P>OLr{ME?YUrtWMscs5_9&?7s~Id$co$(UA~q?g?xVnl=#}$onrprQ*PGFAzv3P6 z4jz0hEACxFX)n2osRl-5Esc4WGAgJDRUf~AG5^>G;#X}gRMApgnTl`FK+{xaLRB$k zsMwDT6*;bD+=fRmLEILHN@gd>Y(8bZ%Ip~`XySISr1jU>Kn452D(Mc|{It0WNmNKE z4C$&(xm_02Q%OPBNfu;lSW;f{7#1L=&6r#%RUbHy6((wDY#MV7O?r?76H9@BB-#sE$HL$ABjA|) zW9s31C`ui&pDvJW#b~HSX#e-=9?@>y%0s5^ewD6iE+d*$Nhe{sAw&Nug1SV7cnN>! zt8|S;N07?P1v(NEp66s0PA9!gmu>Fb`*>aCGT`BDGKNGvOzxx4lZpcQl=4=9zPfL? zN1e14DKpAz>mdO|hsL?Bq%+bnR4qrH z6)!uGjf?1r>F9MJ;P;`@RG=RE2&Mz=6q1y73rlX9`~n$D(Yn<3uyhwf57C~`OhBhC zBqP)}r6x!VF8R+`28N!$ zq;qC?{Zl&o1{E)%z`ng59hCLIoE|$tm#GUZ&jn2VLJEHcfDrTW62&`BCure^&Z zz>yXJlVbC^vS0B10gyIh-oQM>As!mqIJ_JQGsZln#*|W15@YCbSYZ2eXwMhNc3_^h z9%6CT%7e*fMk+?dbg-F$B&Mie&x;i(tHcH*_1Vt&XQ=oyR2A+z{e z;zi&Q%3O%ySN9yt*8JZzBf4;Zni=ts88klc^nge~6GUE3y<%r;aHh+(A;;YRLd#|XWud+#3Fi&WYCf`p2# z{wLe8nRzhG@~1|zZ;792L$rCIlHQALU%DoMU7W*M#|&!O%A$CUc0`(O)V7JgKxwig zPT8|c=dIPO7P^H_R@Mle!@y!@YxX=HoRI&-=wW9naAB3JDSu&G6FgS;w1q`Pch{;7 z!c?t;vuXo0YKu0A_vxM%*R&q!?*8W6ASkNtE!I5{oB3h%L_1&&U8Q@HFwS&DiF~r- z^nCS9`y0Zh5vr`g;tiB9q}Ypv-(U9*in%ZyJ$9Xo3U7gnN_^kbeiJZ$!s>&qbEDd7FQ^cgY$B-06uFG`3o z4JYZV3eS`eI+q{G;anG@Qm+inGmoy&_QTo?XQF81 z?IRz~c`A0|6np=IMOPmkdNkhHoG3kwGWJaH&7!wTZm8KW(Qi2s~=^n}G3CHM5~jp5JJFo}Sd-Xnne9kkds| z`+>L(^g#W-7t?wmx>@x%lp-7=X&TErZN<*g^k4mAnhr;--tWO1Q^orU+e@dm5^j&_ zzuC&E@M?VmbIA2!JH2`;B$-^*tp^wrRB#)NFtz)MCohDt>m+vQhBZv_sJDiR!Z$}+ z{q1eaRs@{)_%E>h8qD9|=a$0 z0Z=-Z`$rgqBo@QM2a}HO<_@`%e(s=xIZlM(LEcHP+fFwgrQ$IZ$rNznrU3TF()=s5 z0{{y#lb@sq9;ctg;TS(tS>ASBNIFSLkXKMW=|~?^hi$d6UsHrX(m@c?S>7<|oV*HP z5hKU*>x>-*Q#hVtjQKD0-0LVP_BXTlY~PW~a7heGeb0i~W##mjUphCKcXjU2+@ECm zV;+Aj%YQe2J_`;G3^ezq-`n>z#km|lK7KeDKOBsOo5#a)JS>mxi+t#f#`i_uTXL)L z){)yA`i=}N>p%P2ic8OqXDo?lEE%>VTYCD385?5zA_;Gl_G9K7DF+4*jd}g!-j#9h zO4@lm;ax|55WaJBG~r#ye`&jY={H;c#BIaQ&9omgc|k(t=oyY)KY#6f!n>AxQ$Ei< z;rv-eyN@O^Dy$qmv}H8y3M~)HV;M!g_Mf9a`}+39=B-4gIw|3;;O(aO2d*>^G_wI< zPNqcux`ekJx3jY0JlZ<2weNR^o$;(?G0(EkCYRbWI+fffg)4VG;z4|39bwB0RFEG0 zF{v)h$UdD{hwhWK`cwMU**IZ(@X=45*{L@A5 zJhfP}jdbI1@m!E<-LN(~Wnw1XFYahZR)3@%hs@{SO?gUzaBK&%HOJfK7Fwv<+JQOF zv|%~i2p`WHrW8kl?U*$FKoEaPH>Ndb6r~wyZf$NsqJ(IdzaERC!eNjBQnA_Wrzg`$ z%4-M*qMdl2Z{1)p>X5;UXut%ev@j{opix*HAwaarltzPJ{6$qey%#f{@!$fCy=1G4{d+b^;X)Aw9$&2neWf} z;llS8-fl{4d3t=y;rN!ri7iLtB}Wqrj`cb}^c1iuWDsb<;5fSd94C68g=M6)8uw2K zibL-QvxGBy~6{7flEmDKCWg;A=!^(G|M;9=23-OoYFEI1nzb>s-VyqiF z+xU`nN=~dHkG~}xklQe+bs04cZ(alhNc&IUW9*Nzvx zwJy{45H6YQ6y5JI^2rvs`N;&JpYxXVnZztrEjB1wre1 zPGUna0Jg+>7+SCCFrbl63>7)V)eGHR(UJM%*8Or1S26@iW|q)QIf5sQ#u}pOH3K!nTN8O@FixaSN8^9aY5a)u&Ca*FZ*<=(y7@ct zl1CE@9-ED#{}ivBJ&YPOT%DJeMhUjs%dTzle9^A6R|<6Cqf8|td8g8F3*@u%vYBYO zg=nGBaO}=Y-(mpo;|}BF^z#||Rvx*I&WRSwC+N&bh2>hhMpRfni8BkzJ8$ZjRA}!C z`jE%*D3lP0^Grs*wC^`ecF}=-)Vlpt)KKv>6+fVY=;jO?trLZ0WO6Pwlq|W7HoIJ& zPcq%E$A{N{YQvxV*@8mT1J)FpwB{)kdWNLzNlN{vt=jC)qNJhPl2M~?h^+5~b8H(a zlZXow&2(hXoV)iSQ<$XVjqIRk&}mbc2m#n4g^BPQu!QMf(TEfD04yWY_1_R57VL~f z+gqEv1NDd)Yi?VM#G6q|*w*tCM&Tl*|~6V_f__pM^A*4=Q*)LhoMkQf-!(r8(rOCvvLg_o~ZoWOcIr=`BorLr*Zh4AdykrP;dH+JMy z!vAc{_Z)enxR#E$i-R0KjAMQWC)3Gvv&eB@XgxDDYQaB6t8@{JRxw%7we8(Ss;+G& zhLmtfp@P&iIZ1^zDP-B7kzYljf%`Es@_!(3MgwotnI#QuAR0(TC4EU!Ll_M#r}f5l zhV{k-WK9Iu;i)Nt(Cm@oMR=?`6@6UuaGPy>Gh!lhNui0%zeN++qkLG{<+O+xsOKIofJ*F(O8 z#)=)f7SXaLmPMYF0jMX&{tH!u32DdmR8##Ndb+8Z2Nl}M)7q}~8we-~V=xejjiz;( zDVa^%s9b&@?b3x(i_iszi@KZ*Hhll7NgC2|6)^-GbbELcm-4eiFQk!#P=h`ZhHVh! z@+jf@*YSW++q~<`?LRv(7m}E=O{{UZ79I$YKVlyVU^z(+wwWq?4*pE8f|8D+d7szT1_F4b_QU3M^1$y})qMP{UBhu*^(PwDsSs zo~fiw@O?Y_{l#}!vkaRCZ?X)Vw$Btc4U#8t3B#!z4G1Q>shq2J5f%n5Y$+-v>?>L7 zxm7}u>303=E}4#GijS=l&MMT;*tUUVlib2M_6D5%^TbM%geO{~(q#)q9Ux>}h-N5^ zI(ut;`Icd6ps>HPzkcA!D<$~(8Q&vNx7V8dr_`@+P;r8aS}JI=a9#ca{k(!gQQn`> zIS{c-erm&>qgGHQG$kG|WP0mx=z7z&CPe;w-u9rCiR`Uoo~^S4NaH=?5O_`7&wg>d z1~%$>i1Hfit5(eT^X(jn`YjCU)6M~b?_v~eI{&_f9dNoG1^?#-Jg>1rplPF?8_oE= zIllCf!N7p8zr6p@z{V>D_^IHxex1NyV|;YM&<}y+F+ABlfBH6ts6mYYng-QpjfLJk zyz~0*YrFZj3}}5Kd-IrQ^UQcJze)XxQ(;y^i|8U1hp1r``f0<+4Baq+> zQW&0L$HTgIm*S~zcjeCZ^KE58k}lPQ7%Bap_yG!&=!I3XM#~dfK4%iW)Vo%pTo6hS z-lVQQGl*q6m2<`#5Ly*->S_?`XqOAQwomB=8`e{!%}EnhIJ1BuqTL_DfJ6ecv$hhO z(jd3D3R{#vSWZB}3PrjhlQmZg={wZGrN4%*Fe9W%#32lZOPyKCw-V4PDwyw+%uWzp z^nyz!|1zq5yp2$U5QlJ93uc@mu>^)vhV!oHU(2V=*CR*?NNev^S;OLcd*e5flZ8501|j5^2uZVr(J3;;qV|mrqWi;}`8L?VpgKRXDb&1Z|`P zCU<^<@`2lFA&)@PceD4?H^IT)Y(cZN;CVzZDs8}ARZKJ$5#=VDYRDL)Q|d^O)#W`g zP)sp7tb`HDC4+xo5m*Xe1y zI_Uark2+P9nes3b#I7uChDBHy({$UiXqG#mqAXAjrZ1jtnMpQ2FdxXqi1?UtmFul&YePBDX3I?Ej20q9i%SCcP zuAx5Mb)9uiNi=K4IRu*j9ok6pJY-`*8*xcFG(UW!#AI%D(y0LW%XaI=Hj=18p~SkI zRIjz9FA!3LZg5PajfyFw!RkEn7LL_vv{8p?lQ7krp?p0>5&dnepG7M2DAe0R^Sn&x zv=F^lHP5Q}(n_ObUdbhMHXGn|C8lDjbaoLsGtM-V=9y(`(<{_~1JnQtS>@!O^3m=G z_uxwm6WLXIVVm#YidN$Kn5SwMk^5g!Yc1IW(Q28BFH;Kydw|I8ajm>h+j4XEKp+P8 z0C09ZthBuXPj!0^H@AebY|yxCDIq`oV;slG8=~s#fbg#ZU2n2?+o0xmNw6C)(bGT* ztW@e(Q)|>9D1(Wk^mZXnJ8YTWy$1Xuctag^8Q5H1i39HuG_%W5>g?WXMk=*i3RbPg_PY_< zK$ELS5kKMs;izv6hsutW+LP`^xNH{(rbJ;QSBa1*D(Z1I$U{trV>mL|HPqwhli!Y%TYsj%8sh)&IpLGS_@T6 zZKZz`n^8RQEJdujmSTvJ2E+ZW_w#% z4Mvg89cr^OcbY>N)bH3!vxCh9YPE3^WGNj~iTT?!ymJAjOo4{}P~5X@%u~#!fR!9a zGm?!)5ne+yeTEf$(CO!s_D%5YpfzhDX}*Y@$4WZKWoOXA*3qF$Oru9yK3#OATYXmh zOAB+YmbCYMlHH_6lVZ6>dN&AIJGChh6 zmSF_jQ=s+l(c9Vdci_!Q2hjlH0SXn>`3Eu;{83o%_X&Oa00|PPXB_9qG|Zb@UA zTYDXMvvPXVKAUjavf3pU67fbwRl3_+0$@{yia(~N3+c``>2$c}-cNJ6ayeZgkRbkv zL&f2VT3j-Cw7EB1LeMq*yf50+r80I~*liUTtO{x&=ZivUt3`q@EUZZdJ$ENPFi#)b zwQJ{b0~HeUj+{G2D%3YatTQ~^iggWtX-;UoUG0%gn{5o5dBmqc6Hj9YVD!n)GSurc z_0=FrppP{&MQn5WPiE3;EDvA|JcJ*rWWWri^jgNRk}lHP%lRq-rcgVm3ibexFHYLS zq3P(JPaRXi{C`N_{}5^i<-4a@P4DFajoAkEmB`9mg>ZhJAEYTg zGl)tZ$}RIG?55iuI>i3SwY3IwxcxdPF1;c?TUDa?AJbl4{UJpjQH zXn;7wAEfcJ7OAtv@F)}e-_z(2Z+*e0C{oz}BXuZsxbT}^EVb*t);(JI?Thh(4GC?* zg8^NsrL4zQIQhIiR!OYd8@sMI=8h!2O4IAWCBxn1+-H+Wl~>s1Kc{XHgRtn9&TcqG zA7?$O#>4s*KKLHEVm_>{O#|sPKMSU!)Tj>)!_yW_aGLj6LC*9ZI~M;6Azy0`Oxu!` zLU9p0npnbPPgYBKcT_k)tq6pbQQT@VTaT;J7LvXt^1NK6ePmsP@mM16>~XU*zmrlQmn?C7dfle01bpB_!RiE7rJSN_%~Qy3M+1?8k3^QIYj zH(7pZT-N`pH^et{?f9l(* z?|R?%CbAywb>7SN!vQt_+WhNFt}Pi|_U_8JSH8RE?KO$)Cwg5U`Q}_%FtDK4eHSN4 ztcH`ExmQ*XtnNkfiMdx+4y^1=`8iHX21>?rmdA6JkLQ%eb4o{6e0NJC=h5DjkLE5K zPKnQ5(d)e{&OY*ZFQ@mVk9!K@o`P}D;<#t=V9%Im^<7*~?MuC!*_S!)Ss3>$96UAV zS@}_30TFtvLGF8s>nhM##$ zbZfSItC^{Z+gwcPA>)HWt@QVhB*RU1fB18ip7yvR1sa67ufjn{=TB($wPk1qT=lNn z&Z#w#0-?T3y9?6`{L(#ktyZ0XSrM$Dd`#WERZv*+PsqQ-=rWI;JV9rS4p6^@O@txW z59l5fbiPApG%O5rxS{oB{504^PSO2WsdxuP(%y1Lw?8wzUlHc@wmmOCz7U9hP8j(u z074*|X>kGzb-RZ!pd9Rn;Txc!InMr9rzb93nXlz zQFl5Gu_BpF(g?4>1JIU=im#V^t3>RETKU?xzAwi-+jzXxE#LRj@q<$##}WAhJTuL* z{~MT~G8O-cDi`_;c0A2Q;yb_HwVjg;0tuoTaj5;9ScXdmeMW*F<4_jQMISgexUC|qVDlOw z*IG$By-=`voydL+myfm}UURZfb>Zl$6YU8gr$(H71(zUAz%ELlaac+Earc>;sa&5D8p=mu~D*?{c>wO=;^mfBrGV{Izi6 z&^L*!Mg@u!XEBfSe?2n1;rga)n?^RgwduwtY~dHVEsrmGDz=2h`!i?%SIWw~$dA{F*|kfG;M=*bYJXw_4q|3;`x zQVkXkXp~4%%XW_hMtx(R_4kyWDi`Zk04#ns;d@rB0Kk9}PFCwvNPdJ1Q<(A}Q(LL{ z6}>nIAk5fU{SOETcux9-jY>X?2q=jA;-DAMF@^D_B1~PGMbe9@s%1bomr{`}k|(_h z67fW9(p@^&1MMw{upgKulrhj73Jh>$I5ff$HnGD5=LUBt zQ-`20s8jR=vbU$Ekw5p*4>i_XEz@V^O!*2%9vOPp&;{beNJ)USJWWV$5=naK53Ocr z3rdqINSeofvCUSf4@t+_Gv`?Do}|4atVXA^)CcdNUc}-cNoXBgF_d(*HPxL7$tAeX z?G`Dh0P_QfetP5xOc8V4i4PnZ1~hoJKICg&5g2CX-GT*!UGW0io+abn-1&nWC`;VP z*7$;riMh0CtoQDmd9<_2@R9hOQv6Q4Td?q|cgQ=8ht|bB>v~=NuDjl>{^f&(1EqaW zV}UOAMt2Mz9F~SVm0i)vOp-eo94;SOK9+~bbQnl-^7|u$JHY8HM{-A(kL6&e!dFw+ zv@sY4gdOVu5SQ%7r_l}baSB68kzKSw3$1w|bL1xl_)Wy zL2?_}ZA^zIkx{%LWE58opVLtQe*BOB^iTi9lZblG5&^bUZ25$gJA^1j&4oq%*O!%q$fm zpU=G18Pa{)q*kOuAI<6Icow#)Ir&$P4;=4H>y>)TdjtJGY!K2f^*xEuhs>PIO?^%M zXD^+B-8*Lv>BGoD9=h5#)P|VjmK!a%%5SvYivIB8dl%zn`|bo1IS0mbj>U71#hy7e z{!A$TOeofHdb~l7H^{N*g@3TwI;AT6FZds*fzL=g?D(_C{**nIa}4T9?p}Le3U3DV z(#)*OC;LtkCr)Jehs)y`%aGt7sdM7qiccK2)C_=R=UwjU>lwE9_9U{f=@ad4ns;UK zz+y=6RSDmUc*crx@0z%G4Y6^yp_6yZt(`yI_1>=Al|SC|!Jaz-B(97dIf}if6FW~N zw%5kJwdl4!T>uhjDNcTfPfycee}Ip2!#%x`rZ-p8dlMfH@@Cw$cIoA|i2@CV4RA{rCyo0ye)#I7zgL%OfB0qFrybHA0s@iPeOqpuneG;^1(4+ z;-FHQ*_5mMe=jTbGs9T?soE)^wG0P&kPa zl|c+t4eDdukzpRgGTK3d2eOg|&9>u^e~2@yDDiZ{DTTq65lWo}pyqs$E`4b{%OB73 zBVwfZTJgxTw~B8R#|q0w&mse5B5O;ZlUNUNKj)Ryfz-jm*Sv9_8j`Mf20Vkl*V0Ks zHAefD4+n;eM+y@Ks}oskVxBeJkRq|ECjVP%zL^=Y{eGxeLXFHoBgro7On0T-pYL>) zaqW-p2XRXrs_HexhQh2TFjq{``y5I4;T^QD#;~cpQ2J=pO?+TV2)Kdr(=r)Jr8Wl2 zFK9R!Etyn?b-oDjR%jMr1(mU)d=ZQWS09~Q|(8)QQOItx&tcs=HqfXmI6^d!6`YcvHZCY?= zs-0u~F;qE?A2n0ymxA$IsaqtrY8dal>KXD3`@Wf;V3|KTVp*c(Ul5*Bv4Lt)P;%1l zaBaJvCaKvx>3-=}p5lmRW6~3CmxE1ZZEehSoXk~!mTBujxa;T~^@90{*axKnlLE%?MCNz#9^<^8SA`!}{Z|IxN2W?S-~ZPnwp z>c6p-{*7(xC(cFb_7_qnpOA8-Q&Mly<=NgJI{uJ%cf^;S_ztVtVACrcJezTQKW zX?D_v)5*u?NF}}X6bLhE!|CL@Olha6fzBt3@}$RmD=u&8+cIgx>13H#TGX3!d0yYV zNgGZl7yG15>TjG*mduq()h6I{azm!{l%xXC`D9UwbOG;}w9((myi94E+CrSt-F3Yc zbaUNgUW&9|>aC|A`vI9Ft?jR;%EI$1nN+B9gVCdAO=8>M0dJj+9=7&^Vo3 RnIbLculN+-gD-^o{{bCJ;+y~g delta 7326 zcma)B3s76vwY^8u72Xk=SYz zaP2lQO-kr_?|EJ`oi>i!gtTews!h{r(@s16WL{pTZu&{1I@6|;Y2r?&&v}t)+D<=v z@2ekJPRfiH=k9ZK?m2s}z0ST@|9Q>)()XGDB}K`S;P1iqi$g7SbM|7>+orkI9TiL( zG!M!n%1L{#9op93T$8j*8q7E^4cY=`aploM@hr0`4zgx~H7muMPu6U(=A>8)$f|(V zo?>;9H5aUTDb_-=I>4Hrw2EA2%_snyGig(bXk;N+m!(*X$yx-~;uNcktS+#YB&_!G zQn)S)lzhpIevqWzQW{kb){4X^TT>ZXE5WL!j4dZ?6RN;4WcC_!Q>+2gH9S-*O6@|Z%{RV4W0Udk|!a9IR zCVXpDA0N{LkqmuoD0tF$T9bJl_OHi&HV|R{2=nPOT&eiS8^o^_7poLEl_BO9N9>yq z;?4M09Io{B`A2-Z?hlNN^!3d%UJq6sx7^Gda1`!^z=0hF0Pmfb-Z7bF+s!PiT)2>D zmX9#*rcvVABfFU^d}nSY+bC%n-s>1+@EJS7H{bvhKo2iRVXFN_tJlm7%Aw6a5lWjEN!6_xt!@y{Ij?V=rz}GhLU!JJ(elxeZ|l zK&r!BXhc|v9eDuYu-7bGZdznnU9g+vZHBj8qUygovT~q5^wj=f;H2m(D;5>b>Trwm z0aLc5*}Qxgbn*>CD{3y#a+Wv@eDy$4t0&c0C;49d0!1*LvvNCj;f)gB+6<%h=K$WD zlCB9`ahSQo-!6WMO+v`bgTp7aEPfPvBdk4Q9`_CM58>>62z~_IAB4>p;71q-8?Zl|G4n~)81mskbktW#| zwPncnu!a0=*SUm$kRj2R|(rtHTBReb}th{R_ z(H^sa9{|v@Q-X+J;piKP$J=O0G+2ms8IP#=CC46%8dQ?dz|M_O4I3;2h` z`kKCe%BM`e9cOArXhCR2s75$~fN4&o;mDF(vJIWV5xU`{*8-3{0U2)C#Gh(Pa;>tq zkSEKl4MJp-L@D8c<*%~IL~vjWhj5<52)zJapuUP1bax68N=C#y5*RgPgcf)o4jDi| z4`~^OBT%`FOe7k4@e-xkh@Qa|2%TCP4;}P{a`tz1B@%NOEc%pqy0=t3v1?h_w(2bN z;QftTC*4nXO?5vSV(wQ{+@Fn;$6S9(ys~<{GHqfhcngFf;nPa3lz|F4%pQJOt6`Iv zB8HL^rSHJ;p9Iis>AaQVsHBgqxAzf|8`Bb<6Q>~tAf&+h+w1zAv4atrsAE!)Ze8hh zf|rVfr&a^DA02XqAFeHAUh)_EV~Nw|7fIBahKT8mJDo&fK-%L@J1hQg-O;;=uZ({R zCr#@>JN6*l4*kyUUK|?!w)tUMU}9ve+~r5>=o2bq`Y?>t@f7(B$c;^z$@r=G@!Q4XGYusWiLW<&VO=r^(>T-+ zm{ewF!E7;N&WVTIYe@EC>%_0LK1Qc;gc+PM;p5ty7PGvQEo7PHa_$xnXg#b(eD{v( z@Mz;!W`%>6k`q_ZTq4gGg;P1wE=)uH|uz(kuMbPB9$9dUM z%g<2XF@whs!bPmoBI(D$695nuH%7(uc}e{3?otuhxNssv=Pq3i4ZQ}WQho`yJ@k?rx_NU8;vZ=gDi51*{3(j`7kmR{%6zqC855r^7GKc zzku*XfLlT(Uf8-^zKlKM_ggnkJ_W7RF!6c(CPqJPm`MKPe68hN`TH(a^t5W;Z{Z4(OU&f7 zW=x5QtdEZj#}eh=T&Q@dbp?A~ywiGYk_6jAV$yC%%Cj&jNMyQ_0~5t4G`@+gHiR7j zskJ@wj3XdX^ko1f@a-t@6YT%(7AQ7y01SSzd^$WG;hG)Y3@UoiAkwuB;&(eoJ&)sD z7ZIKS@Xj-mnpBnHR60?Gh=_Dre-;o$v59v%M z%!4ToI@27Z9r&*l+0mGw`S)RvJ`R917Y$uo#gBJ2073AftYZFhH~%uO23-PDLn~Wc z+889=gM+#OZhc70f}6!okN?2|qQJ9#HwK-51tG1vpq_Aq@4`pNSW*Z{UB*FOO!Ds7 zdUcq%mAi!Aak zIk#+<=bEpw-CJmjR-%1Ptr@Zf3{dagWr)5 z4U!iS$4kPGbnazL60dt3Cndj$!RG<%{qVSKa9&eVDn#1k5@IgdE~+SrQpDyN#F8a= zh8P0Qs>7}&)ygHbk=x*KD@i#Ek)!Yzr>Jmr6)4wmOP>aK4`>-~=<>16L_X3$R2?H# z(o;^^7Z^{n?oCPjZ}%pnFpSj`7jc5L%qzhjgxBDcJl7m}u0iDj%-aIW-`(S3PVt7v zDZae#XI?C^z3_fVh5yFG2stkkFKNBRV!Wh%rfd~VA*HRcXJ3L8f0V?_`&&8^q~Jfq zS(Zwi6O9Ph;iKa*O!+~PugAdG8*z6gvZFBkSkGCuMq^mBq9(>_Rt~W+qO#- zYmwib#P3evw{gclK3 zAQ1Uc)6+jQMwo7}nY1w(aNb6WI z2#?DF|B1LpXNl$`t2`8$AKAIvUid=YI`!neYhWiRkPsd8sh7WibAJcm*2?^;!8*1^ygFFBfgUK%m6S8haryWIkC(7oQF`1cSl_31DaN@F;P;{w)q?j6neY;R zVVOm4i-iIQY1PKNG`xAl798XL<8`B>RQ*K?;+J(p6Qlm%@YtwtB$DMwUhw01uN9*1 z4`q{)Z2HXsSPQ>-@JyU8qp#10KNv);o%Cag@iT!7zmtPNujLeM*%9!0ogV3^c&74; zstY;}>9ip+ro1i-}p@wMk5Q zT`K&IRC!gZd`r6P7t&p?OWF+!%ihbTGNLvIJHn=`XSEqEDq%aiCWmdCZk_eac%l+C zqZKaZnBF&gc;;|af@bv2d^WjydSG^VW;iP4v(?cxcD5ri1-7HrxorEid$wt&DJnrT zx;&eePv_6NW?WGTn$a>l+ZexwX0+PLwD=mJ8Ld;;ZWbSa?Wmf;PQyK-68?`4I2E=f zzD8(bcm1>*d+Vdl4A#k}2k@ekMV+~9{i6dHhUZ|h(2Oq6VO{Le{0pu*SPQnJ75S`< QJ=%J~Gv~Pp`-$=8U-4^5k^lez diff --git a/ultralytics/nn/modules/__pycache__/block.cpython-39.pyc b/ultralytics/nn/modules/__pycache__/block.cpython-39.pyc index 912cb7c3cb3a961fb49221b52e23ceec767ba00a..56ce4222fd15a607c1ae197b84ee7b258d6e2000 100644 GIT binary patch literal 37545 zcmchA3v?XUdEU;xckv(yLVSrLSClLf5($bRC7U!YQv?Ny76sEHEyXCE^&8u!CMRi5nxt)-OwVbP+G)zBP2HsJ z$x&Ln-}m1;uYK@wYG=v0xbx444lZq)-Xu?_7l)QMPj5y(Gd+AdW@qi(YcV@lNi1wxO-zpflDGQ+ z=`)b~0O_{}02wflt$+;Ln*iBlAlm>LvNr>=*+8}fGHh=FWQ&390A$3z50Lu|WG5h7 z?QMW;Gm!fM*>3LuWQT$50%WIsKOpxT$OC}vvL68Cfyx7@@ot>&w)fzCkGW$HAO-tD zKpvDk3OL_uKZNs#;>cz`%yq1HIRn@*=LUeGHM_X1M-;tI3SN3 z$U6YpZyx~UfPp*$$e4W)kb?&DC?HSRhX6TbAo~Crw+{ny*g!@BIbuHv$dd;07$8sC z?*!zX2J$!{@3M~qa@0Wf19HrMHz4mekOP1mx8DQEdkka@kf-hU0`gu1IS9xJ`x!u< zF_0$!IcZM-GGQQx06Aqp3&^tuG7iY3eHxI{267mX=j=0poT*p~8h4HW_CEW0z@9gE zJqgHJ`vpK=sO-X6J%#fr`~5h7zq#X`fSj{G0LTXn@$CML#b-3*dv8jYGebY`hES2L4M-=IX87q z)f;s;#Wo*y`(CU#Qpx2DVV2Eu5-44ON|4X;kaA!eqUSi)ZVOQ`m<= z7#+*YV`$!T-Kp4xYOQb@y@3*{O`L{qpKmmlonr?NRx6dt)#b&ys*Eu^R#$Te?Rt4- zsZwhkJo?U~+wc>E!T>#h<{XSc^{% z*eUc*x_SCct=gzcg9@br5wX4~oq>*62L>Gk~GvstRT?NPdAVA-;-;@uNO5k` zE;UMSynJXh;U=BR;;ecXYU5g0QJ-9~SY9kSPO-QVd;8F-V;4`MfN}%0SiS@zxwx_j z@>smqsFt0Jwc15P@)u{w0LGTDsr@MFFb+o~YCLc4w1%vQt!6(EWXx+)WFS+xn*%w) zr;a0OSqrg?iRLjY`d_tHPsKA1#2B6jY_LqH6MtTo}7W3cAHnlA!`0OF8o; z49pV*J;_8mEhq0FzPlNxKHFF-T^&uSqr83$iEGWcR?)Re>OBNW@-*QYFt#ftWxpFI zj!OFfE<8yTWP4Fd^O$QO&c$C2+Z7GGH@Et)HtViAN>EXn|=(|pB% z8Om~HaS<1S3hZWSSmS}3c~(8o`S{}oXQ*tz9dWq?IVHQ0v*S074UG`dp9e@yifHn;v`J< zpj4oV9lx0X#U>ibg;a}(lOVSvr@NU3Jly~+bYDi!6RUAt%i0<8z~&c)=V%t_k-Jl9 z%vTCM+aYA{ELY0a*(!KlK={jK_CPEk8C&ldCaDW2PbjH5;di)4Rlp%Y_WL`Y4?|2b zEP=mU33LM}*Se%mAg3sHFQ|P;r$%$?ae_F+inqCm^5sM70CSXdmqFGwhsX=ol3iJL zN=wU&72Y(?gU>L@IONnuD-ePwQ56Sq@XaloR`)djC9 zltJv{D&_nbn)5PVre0$5K_=&sY{Ua{4H1dZ2kGboI2_hE7H)tGY9R;<;r4x`QY-hU1=N?62?dzoPp4o!AqQj^K-RIqj-57B0?(Ko2Y*b zt?5S8jl^1V9VXSy6bLnCh&aA81`&@g@vr(MZsQuryz{CMyd59mRWJ+xNtT<{%m5`O zBk)`!v5?ng*vjK`C%l(1~N}9C}1Z8$_Ncl^Y_EG?|Yk-9G(rkq^6RYG*b?5maa!54*W?rMgI& z0o9_vjTvY@kbJRn_Ja^^z??&TzOpo4GJj_@_^|$S#Lc}>x=Ljk%p%F!wXugK^&!@@ z3xD`9;LaeD7!-o-)^7aUg}=>BT3PSTB*^Ni8wit7$Ow}FOSo<|DD$m3nA|^Xt;esE zZ3xSNLMPgQe}&8>*@E(Sow*d*%JqaOfr(apj@Xfa8p!9fa*es1$&vLW-+p{j%W2JT zKvks>U3vJe$D90x%r_Vxog?qy(Am|c(Y%_$nQKkD2@H`U4^TFfc_xQ=Em1BtRE4Lr zNW4B4Wg>nRR?O8yAvWW4sX5&1j38kb4_W%iTf@lZ!Hi8u8^+a}S&^5JJ$lS4-Jci! z!iq+IR2cYkC>z4eihWA@sU+7@>#MYSlUwTLpT8v1A&F8}U}^Mq(tkE!Kke0w#wJy%Doh!u+z0 zgoYvw!*i`n{bP7Caf!N2-ljd5X-)_1iQ*H+Wg##}wPZLH?p-591LmY9DD{^%CN@?k zwtGKfIeS2qe_EZb*oXICw{Fm~id~AUksIiP8wvblORSs8m!RRj0un?m6GAr-#MJ22 zMI=*# zGGoVKtdtg=r^o!ewdTYosK0__YP2uJD>>StJ!yuSe?il$5R&6@BNGnVXx2!W?SB>&YLz<^q=Vd-6bWOFbg~K6{kgof! zN2~%F=TLi_A{G_fk;Ng%_&#L7E3G$UH=tw*H44(fnldP7k+X{0MfAkrz_z05^(Np8 zX*GER&u@#J$MfWfY5@gyj69jOjQmc%F81MJFjWgh#dZLz%}9fxZp3eW6F)t=7;iXR zV~S0r{(4RGHEH@)z;5v3H6+cwUULoq>NbLG^k~LSSFXaev!zh=F_v(h2|25(Gx<1^ zX(oa;uQTU!QJy*Mx>!67%TP} zCRGTlSz`gocOWy_7%c;*fMky7yd63R=!L9|jv{KEHxu6*e(ra+jN?%-6_BuIBWo5sZHXJP zIV-||k{si0hT)L8G*7mxTM>M(iBzer5q?s!fP&^Wzu6U z=p*egd9pBbt#BGxSu4D+UY@V0G4&=Z=kYh>g^T!{_(wcCpGW8 z^qzfB?k^nOUpRDl6z+2I-yjKik$Nv$u0Dli3cmQL^CpB)Lf2#}TF!kpb6xDNb-;bU z3@LcTY7VfeK3j>9Foz?LV}g$n@kzqIT?FoD>?pX|Xh%_Npp{IwTZ?B_3;x*Iqz#)) z95?aife1N!-Wr)JDbW99oaY5m>$FoG-c#~k-7xXg_|&su+)?D9Zu}Z%b*_zvs6j~e zG_L&w4kN{B{eYIpwkAEA1u+r(dK*E+^v|;lYJTvF(qlu|w~&~IP+W8G9hGt2B@sH| zAo+B1947&tP|{Mjwk;!R;Qnfz9CgE=^CI_8=;3G5S6E= zG$MBY44?K1D6I%9EFrPj`ha+SSps5B}orX~YttJ3VH!=&D z5ut@YEAtMp?sEa%;f}nKMK|w;?xJn+M9bX}+x~)QTJHD7%p9EM%g+7+3}m?d;k@l8 zfjYEdD*i&Bk01kGcsF%nG@DJgZp2BabYt918(K}k%u9~OL7=2poqPs4GX!6MFRo}) zxH5(8E?jc0vid&!-AK6BoW{&g@YyIq|0Y4A8SkDLzdb>R%y&0Y#f(1Lxwhej21Kri z5EXUXC+v+($Z{7A`BgqBm04r%b4-NJe~3As&VL1GTGA%-)}Xc38i|9}gIY#tomfxQ z6I%CFWlGmIAQ`5r;}$cBwW3B z%UvcFThyDgEaK|DbCyedUBU_aw@}J-hMOSF74_;-Lu_yAl;{o!gv7y+dyu{^=2P9kYg?2DqkCX-X^@m1}(O9)2OGm?cr)y|Ek36s~#8-2d>hkzR`Yb)G#@` zDFc%!!G}JJDSj~8eWN|p;cwj#{0VzYhT!2)e-x!g-3B73HuwYuBxd|rZ%q#OG92!J z(Gm4){upJMf!WQ0+1ybPsM4f@$v`q}WV?D;sZDFY>KB=Nfr+4kBtnXU59H~;i$e@R zG2Zgov~c>~0^~8=9}ysT(msdrKzHr)aD<^6?eh-up@!1B$%i*%nCS^ybeNQmoMYPC zh5{xM!dhk2%N_JjnHSQLY%?JV;VBL66GPey;svdFc3c+$AoJNi^Dn^Zv4S{IJ*Fqn zx(-I;E0ue36^v%5D3vt(;y2)AZC5Bqp-{qpA*Sf$Gt7OK$qyo#f(1xD(kPMIqq#;v z7d7(#5NlgdH^t0H^p%%WM-O0Oe^Ujjlxb|5_}#=4Vfw6IqN`d7w= z`2#=pkq~t;QdszZ36Z)w@}try*p=B5f+-PKV1hKAPS3SZC6AzzcV~KqDTVeIs7B1N zfOLdl02YGz4&2gHlgeATD3r1XAWz&Zf&tn$!Wnu;JMKZbO-8d9Krp)J$)(CE zB;JpC9)fvSk!j=oNqQYL?}z5^dGmz#ht4IIIZ#d-*}%&HE}sMb#xEt*0%8etpc8z8 zwKV3}^6S7P1TSgt-)tC9yMzXm5R_D#TSQ!9;AAop0y-`r5Dsn80s95hbsS_5RfnBG zaqVnHA`>%%o)}n-ho)xWZ=$)Zxx1ygd%&u0;?hLSeDFg|US;w*CZdb{1Ll0bKSZ4C2T0H2lke|FR8F+69ju>? zp1|Q0kdXD`AhU;?*RXwwdfJ9bHZUEl`A|gvgr@o8g#)La1@qyGgdj#qq*e<>hFaW9 z2JB$PjSLvj)%C=HVJsqqdm8hk2ds382mr!*liXbkfE_5NLjXuXD{APi8S&KY3!N$$ znZGXpp2CSHLo=I^Y^B--z;y)Xv@)#>BGD7#V8^63`aRJ+6ec3QASB|dYg-$_I=vU~ zn*tIAGdzNaWcb>{oV^=Il<-(Vw}2eT03Y;vJQm-IppoVPMUYPuoxD{2eZE)9kVJ|U zE->^8%aX)U;vRRl( zFvOG@2QesGHa}$6Ao=6l#kQLo&9tLh7akW5WqN zEQ8r^&7*pfF=Wwrfi9uI=J}Npmc$&W)l2gS=eRBnLCQvI6YnBYn;L8%jsyF1o(z17#qC8h*6zZp$Ji&zBB~}>x%tx6IB<71abuFi7{K=C<5?Tusno!oj$qLrc z|4pZ#`et)`Tf2PZ?+~J_&jb#M{tzLmEhh1@nF+OH-h`TSsk%&62Le+TqcG|br-;a{ z)b#Qce|}9bAmXf=wDS{qMJTW%AlB2kaFQ)CQgmB;&D-QDZ|m(GCvK0kG~b{a5I%7a zn6Iv!=Di`t~Qo*j)-$FM`^)iQfp^ z7lryVM|rVnLK)ON_^83n zeWO?act!Jc+iXx5aq@D#HurqB_Mz&^!3oaHgNCrWxTF|7 zfQ2|$>I%!h#3$23qw(qINASjO^wY&7$ap8?BcvP=%E>SaN?JYDh zBDqEit6v2wvFCj_?-vVk0A&pNz77WPXB(R$I@6F@Ef=yOyGD>`S`35@2H9KY77Y*y zDASN+WUg8|Zc-MusT1g*4xb`9&|w@-0SUY%612%)@O+9lQ%1#t+74X(EN+A2kT%uu)vNakYEAYW5x~?kM=8%wFXYm0~5Sckfn9vF>GA_6~;`W^^ zHOli-FkrCACO%Vhljo~vKd7U_sdNM}C?RDn>SMe}V(`RfKP=6yoQ}p{1aWNbUj2yB zq`)Mf5uR}p84;QheqrLk^qL#(h@=ZLwkC5Tbf9^zNZP5ekq`;yiP%#6e56D(baR^{ zjJ}(x`W1G=MJB(%ByekfFHdFQg3+V&oo9!TJJ1?JC)^k91fS;^$`k=i@z$H?kRg1? zE7b&u5q1we=MIh}Z(9I~J#UWbJpy!?oc44&o_r2FW@KvD^Sjf;?9gCoU=j_T*N_Ap zriDdPBcj2Iei`N(jMwIRSww8pD~~lOTw=$lZc1#+Uk(TT7J6&K`A(FOg6HWnW4Un` z&S)^>KJcb>OMFmS;V?O~fbKvb(IajqK@812LQYaWN+4emnO{OPG&0c3xseQ@uC*)| zCY6ILQwSLfZ0&#$ee%ycRDIeM}nyRCHa zkLdB!3kkX|f+x~(Y=r0@L8jpmT*T8K`1*aP&m)uJ1!`W1C8cy2X~xi&CDuEsDvK+j zWw0Wq+1Q@S>qprnBdfQB8{!d;nU^8H^(L8L#ML@W^2Kyc5mv?#`H}tbckdx#1w7Rp zHLVUsIV{1=UPWw2q(cmqOo}Ha61qV#LjIS)(Y}<63m6cOw-8q+8TkssPK#c><_)9) zCj$^2WWrUVSVu#Hu6BZE8{Du65VNy4lXKQ}c#+nib-ZDLPckee+sK1V(%a$q^=6pq zu4rXUsAeB{Xn%8mKqy+!>b9PheJ9$a?AR?T6CxCN-ggtHp9ThKLAozg_ikb| zQ$$?UHNZgc+x~|C^gnkqWrY73dZlU6T!RxOw5qeHb2(qJT z`qv1SN|UBniMF0VO=DqD(kgs2n;f?G`jjn0;5Q>8PTAwE;RFuR-$k5iv1m2#NWIdc zF?8>*YyU5j3e^vO{sSo=c0WJ%Ht% z-(~I`6hQ|H;WjcTGlNn#iP@TM9u3oOm*yL(?r$LBtIN9SMvCbfrC&l5MSC3`3WOTA ziJ5of^JT4NevG$}MBMaQy;V?rZc!Z}lnRrZYF1#Os8Qx~*rH-VHdS(yT&$1a4=j(Y zz-8D*H^0TlS+X0SE34n-*}IuUMc`+MJ41leZIgz=wHvc}!(4j65pEy$fM`f0-jAY? zqbfx6UyzYKVUpYt=6dQn6c8N|jaA)o%!WYmFtIz>9fm8t7vM@*vD69NCu_cIP(y_0 z;$H7gjB|QD1&4$zIfF|gzMiwo z9ayBx`}D-xLLby9JhRp>t0mxMV*U6|)KR~Y4!!K!fSp;laId+;T;-%(wmIaBbX6}` z9Ehq_biq_}^UP9t{>uKD+EV$z(=$u+S8D1KF6c$)qOqR9>FA((hR}b_RJy9sAmeC?)C2oFxZTaI)Eq>=SDF=XN$W9!Q*$faWFS7{sAqX@ z&AZkWH&wx2M=)ASSGpAn=0ji>>iNSS5pq3(^qG`QG0RL!vxP{Y#Xg zeiO-LZ*Ai@aQUqW59?Eeb?`7E8Gp_=l1SXafVj!HV4ZF<9Xpz``|SQm4C$a6!Gq1m z7+r^57O?%9iJ`0&xB~+@7}j5co7K!^P0ry)aNx>RIJ_V8tQQHQO-~oCJ2wK6gjj># zQR!G8euvmm&R~Zk({=1$q!adA- zQ-{5~h=scd>HH-Or8n83w{G*9f^#d#8G+$6^j21FY> zd^PeC{mxGiw#G!XA7PI_#8Y3J_%hK_ECdrNzMXo+5D&~I0nqwoEgr`{|WK?h+t zY>~pq4_E9FCyJ%tVxA*h5o`^t4&mey#F%Mh*AOe5j+(h5hG&mDv%T-LvfbD z7(kSSMh4WmmJfxOrqqw&HKM?O8wcgL{6X2**8HAvqk3riLPPW8tmx;N_~WDgkf&V+ zNBt*WXd93(0`?ohfG~6v19D&28bM7Fb|hj!a#}V>R)peD_-~Nc2u;eklXd@`_Zo5I z^kp-`(95_I`H}kg>$FPyMNV)Ug>pi6XbnxL45<6?RP*`vjV6y3o~>aoK*WR5Qg2|> zf`W)oZrWqQ@MX`c10`${S3&d`NMlK6pvLr`Q_>W+^cIm;^unl8OmGH=@Qgo2CQyng zMOL8@i3ytkXMlo>Jk|$dy*79Q)K>o*S8($h{ix;|{KznU@m!Z{*eHqjqC}RGhjRRB z7_xnhekeTHgkoJQ+}H_36z)OXy-E5{{L8JlHh%HskX*44Yn8F-dEVU0c&0I~4&x26 z;ugrLSaAfZTda5=AW2d2w_?YUSaF;2Ej<-hyW0n$x6VC>{*vE?QOg~O?71k4cLxQs zr?=n`6-Nx`5l-xL%K*?Y;#vLe0<_T#gYLt)9kYjG$I=yyd;@>nzG|)LMOERpj{z3a zuy5k@t3Ht)Xm64)KSDgzH>EpjO{|t$lbK7c!OrP<;%Lu8MudZ~bXDEW&)6kQn_aFz zRpWjV;!@?Bo03Rk z>^21pC1ky%_2jG8RGWOLk9X)vTSu&ZcnX$^+!DYE^Z=Mhvr6>f7x9ee7hng&od@Fy zTWe8W&^|;lZ~ly@WEdD683;ZY$nxJK^Uz9dKAyF9$-hC&QtyP(la_z*H}B{&j%MS)bS|ASh$GfGMmE0Cic;AdXG2tPqCK0fU;@ z0ZU>0)}ShcvZg^k5o>)sw9mKK7R*josOAtCF8O zQH5-Nn{b!O5|arg(tM#D(v#;>peE)fDfL|I6(Kj*x~#s+2Sl#@Z|0t6avI4WlgjoY z1JeL0Q7?fm&Bkl{pxd2SjCX z%Gfy>R8YoWY7U%_r#|6Sm#T}1Rcl-;%wXQRf}Mz~L@1#IJ}J=+vy~Dyy~R2WIg3yN znj9D$CvXy09vI}bmYRcNAYL&BGZB&@1b^W_!AjQLIy}a59s4Fqlw@f89*3JhVB=hW zo9%W<9BE|Gn9wxP+{0)78vgG;5#6gx#L5v=SYz(LVUA8N^>3Mc9SL^Fsn5B6NU*uG zIJ!10Z69>;W#vFnX6lf8={2{v)!P@h5l&8INa2{kVV3pD-a%UV0XGrFfICxi!X7&v zvj@Rxxyqkhko-<|&jZf`7Z}7jjH+iW&LC&Fbhfp3nKANuJ8wu6Nw4EUbjkj-BCO0}!$0zC{+ux+VdqT+J^MMm z)evQ3|1`Gph8h`O{F_nR@KE!o1@~pry+9QtjF??XQLpNoA^$G0&%z7)7RM1vsJl_C z>Uux)XVjc?Ox}a7UbLED#9oltG#x9`M_~!jZ&m;V!U?49aE+jlH>1xq-9||jI%!_4 z1u3Ruwt>8gC}>b`75bds6 z%tZ^xICCOkeQHw)!h~TZnG@oQQrM&6Mhb=P=xn5+F}&^!-~T=)&oep8WSz-=CKRu| zmr{~KwuJ~WGn^Hw=}m}_;fXLI+IN}>;+ldW9HY4jr=gI(Z(qb=iVlBxIHM7Vuz*7m zhu#i3s{!_N2hB*0BTnD)iqohsRhy+5xPhxRZu*NrC%xl;5Ze@uIz)+}a)0Th&n9H6 zozVUqzy>pW`*X-ex2%!zq~fYQW2F5c>QMb7o_3LK;~d#ME`|aMw?-Nl&n&6NGG5Sg#`8F9FV%`S?~6R{smi1G5JZ?k_a0=4 zzMhv?gxv$C-yw+5O{05h=oSLiBTkUY1xMMbAqlHyz-d{l*d-1tfQh z@l*c<&SF>hGb*o}qWAh(i6iBUt7IrIAc694y!&uyc4o zsEIZwCC}Y0@onz)G{SaTVyLPP9ME0)Qz*A5CB48(vv0A?S5~B<3q1L858Jk6(aF!S zD4&FKim=`!G>%tBgoP#{YHXoT)0n-Vf^A#!>Onl%%%PD7_*opS?w0vl?^k6vQ5AKxbl5fLQ@6n}ryuh0SwF=(SI*5}dw z_R!O}2;=Ak@u%u5fOST^{R*J)j2%(0@{ulLBE}Hj#Bt-Ty`OogH-lZg&69-c^B^!p$-D6HEfNUkFZylT7bLmPZjNuBaULH6gTv z!*cBSyH{t_UOdxG%SXq?%3dKXh3@HtJgvc;i|@1p6C)j-eYhc#wZeaY9g3*|%fgq6n)V)~TMDeF>HC3o2}o!UINAFRJ|g`DH@8j89rVXz;+T_n_@)KFV1Y;@P$r&5$7Y%j zcIAP(hmCS@0yPObX7Yka5AYr_O6f{-^PaP*wCLtbm*-?k1yer4lgORiJOYS3s1QgU zP`JQ@Dfd_SmSrXZUi%zsh5e{iCVn=EkwKU;8PI)c)R5k;B4X?%mLL+}O2+#$im)<{ z$d3cSU+4U7M&=*eln#HR$*g(^Pc?H`3T%{(H*sF30`C2o=RqQInn=RR1_;1YG%(U; zxf_@4V3cw2`!(8Z4{_)=n1$k zME6M!^&sxhp@n*B3%veAXsD=yU5O751>EZ-PCVz1`6xnkPjZG2QphD|%?|XUVtBi( z$)^!OUdgu27QDE7QxT(Gd>eI&??$VP+2S?cP)iqeLIw@aAW}fO6`y*UyIPu*s{#9oZ_|_SeJ1)Ss00mI06a zdb8C#tZ!29Ksm7C%!f?14L4QJgi2?gma&W%eX`NtScU| zt2iWrN7`&z6E`-dpAa}8U4pG;TY5t7;jRjT_XZEWh`=(r48(tec~Oel2v1Ks1}GxO zR~lwMJBoon$|eY&zyv{@qt^g~gl6Xm;ZRdrxKP0isS469g3#>kAM$!cxJO#>r~@t8 z5B%LL_jnl3G&A}uyJH<@5liJETX(b}Bl@=p%O8><-h3{MXu7b7N-I(pNM={A%0w@| zz{<##%awwe<|*JS$8eX8O`&sy*RWm&&8qfQZyIEnh|~pXek$m3%)f0i&FP9T92e4= zG>h`tk2!%(`Z}EBer$;j7>0fdUDe_h9gfAh%LObE7CELSS zWqQY`qpNJ@2_#by!O6<8l3Q?y{w84=d$CC}!ZP$!>J4Cb3t=)l5R?&(wsJH4P;$j- zfP|Csjl3kkh6mi0c~1meVejAN`XY9CQ0!1#kl>9Kyi6iEFojB0cDk>DB^mgJA`qba2Vv>hlS_CE%Fi?SnFoh)t^#?m2pIVL>B&J zUNLaowN15RBHQLvYtlIGQWY$)z=srgK1wJ~s<{_@&dQPrZ!oZrrr~ z25$%1`7(g<8-Z>uOG}OQ1k3||3b*HSRgI;vghFu6*qIeRviLM>b1(h$l=@Yas`<`2 z9wHhXISNgUJWapxOcOd05iOV-vgu#KYk`kBDJ0Uy$VHJSnW> z!R9+H#9Lqm51w7%+AplqfdPOr?b-bB`N z0USP?2}x9P4L8YQ_;n5g(TZ$i7)u-FUwj0?j9G?N0n+3Ytr;FX0sq(;+#Z@z7V0=n z*k0lrTLq?Dmk*SyM+ka>7X#Zr5ad&(Z?5M0YoXIA#iL|8%@6CQ5!_5Kv!6)tS zH1(F4ICE0{Lp-IY(rUoFe4wA>hET9`?SW*^dMc5^Kg)caa!=Wt4^u;c+Ncm(3*yfEm z!dDrU9@-HFqT{o;6@5=!P^0LNDV(rhv`>e`v5}Aq+Pn7&-WO>5S8>WVd8&|fgpU+m z+!9ewtYB@EMShe?3yBw^Eq0UO>Z`mY?`8e8sFvyqQ+b>)(TPY5f58zJr6mc=gUfls zNDA$X-8326dA#>Fh>xM*Klh3w!YD#I(8bq~Xh8eu6813q+EEB)a#;AK7dz{i)bGc` zWv?jY#d+o9%clVdSd>`7)Oh%{Yq5txUYLX=)1tnEGlX?*;Wb8KgV~Hg{lY3*z5W?z zkamdIMhUM&3-~UR$m-JR~nq%!j zXN5nhif}^&LC+)7)p4`@!$39(OL8*^`8(*$?Gn9+Gd;sOb zEsb?^MTs1kPSxg%mu&U>yeJqEU?(vm3GCE73{HD4O&&s+*dRm27~DDRkAsi25k06M zB~THWz~Ouu3DknvMJzbh%aS=Kj0L@VaWZ(BS#a$86KzD*aR@LooL*1D1q>gfIZ|^l zs=)u_2sO`M&t1=BfiQNGwmIvZ8{$)0l-$?qTgaj7<2Nnj^Q}Jbo*`}t%Vox>!8$|u z^DU|_K4cx=Sd`#;e^D~nMrulr6)lll-@r#%bqM=_jtYMQJ*yWFvyYYRHaW)JkK=Y7 z3oZl1zEFpF;m?_4xHt438cbq5xzT};F~m? z{lUTzWieRC5+mP?C;F{%&Jb)42Fv67@qm0bIyQ&q@ZTZaZ9kfk;7f%+Tkr{fb8tWr zTE@eXpGTdt%+)7!#3*3F5-exP1fd+7K|Y~p+{C%_&*+Wwyysac^F%AWIp&a;g{WJU z0B0*#dx%}*(Y0Cn3&a4#4`GlGQfU}f6z_uogn(+9Vu=*!nez>@yb$t)+=5qtoP0t< z461tjnCRe#K11sPiA!55i>k+03Kr0pRaY@@Eq^JBG|7z5K=ywkB@K37|vOL(<5QBk1Ty_IcK>RLP3w=Qt3H;xo5=tZ zid6Ydt$bfoz7Z(j`jYRp$d@8ygMPJ#_w8V^lga%|b}@N?$uBY~F!>E8G*V=hj;tDx zh-DoDC;?9o^6FkD4>6&pCQfAWd1+foR9-50nm>_Dq=!RfHv1j_rha zut?{ZbHDTae&6F9pXF!GuYSoDDk^fC=cTa&a;w4F&)=rZ4l%z>BPnuLUS?nB{iA^2%&c?L)nQ+{B1zZQND1@p_-eKvk zP!+vozX(O+M`L0(EV#Q`Dt~72%O2|<**j1phi@pAPxjkoVQq~)pGGDhm1VYpmu|4V zqWC)LTUQpA6+J%~oSF&6Emn0w%$G#4$n=5Ln0c9-g*0CY$kTHUv}m-*&1m` zrQDbI?%rmqiFN=_lb^so=3?=%o)?cDP0WTCRGa8RdpCiu9MP32T?vUz5Ha*Zo=021 zDk7Y0qlWnSQlg<6R&<+$)uO@PGxq87q@(EtfAie9>9>SV9*$`75Ln|S}MpLr^% zo2U8&FPz>!k5Xw+oC86T69wPdyc^T)0_+5QfneF3p(0BQRsf1_l8uFRgFDcD1aK`N z!&FAwDr#t@vZAfol_#o5TaUavH)t=QmgMvDt-@{cQt2@nFM6X0>N^5r6chU8v##MX zwQ!BXr0c(2BfZ(W&Wal_U>#+#+qjFsq{x}#e;y%DEm~w&b&CBY)0J`EG8fXs&GaD- z01g6L&}NwmCBzr;c?*GR%}}D77y795W~rQ}W>mgW((TJh2vqK;vO$$)`&jh?nEasO)xs-wtN%0eQ8uT<$5`8c@w*jCzXzL4Y@+nsu{p zldeRTZ7kJoM-Y$f@8UM}BA_%hTi=6Epi`a@teZjQtpfIEotc0$&BW@j?PJ#B-kemD}Z|Gah-aqs_l;zH*iR z3?<0(H0mQ}2q@5%MT29WW}0LnG)CkT17d&)0P8xBTG~E`WI;vC zfg#AQ18f6q2Y3OKfOJ5MVSKLV*s#<-Drq~`IfRq!=u2lW=$agpUA0v&eOO^r6u=uUdJZjHRWnh=ClO~or-l!B$_bhSkGDS$EY;T%X!B1 zYiQ=x&ZFl!0JF~4Otbzt$=PX4K)qmfR}=msVk@e2y z%xMfa5W@hbgn{{GzlAG2PbKakFjAkxF2B365kD#r#3j<-)x&=BFyAxjJm90i&sPy6 zo^A5(?tyWHuy_c7^b?@`$B}CUt#Lk{N9zdyZ-aL*O`mT<#9q`OFZ(NUxX>E!*^NP7 z!0PzTa-nacl~(~DuTxpMP0jlbk4m(rqLU|Uf#5KJ4;=QCUHF^;Wa#4=aqPY&t9n{K zBNR)h-_NOJhhhnb;<^CNPwpX1vOGB3JxgBg6Q)YO{xRsk4-lh__62Ps5n^X>X zs_8SCcT;%IJL>lt>Hi@a?E6~QiMrr#>B5wBpyts(Bk3!K3cM7vj|@2?$%#f^~3naRP}J6J1yH*tvSwEL9FSNh>Tf9pSY7?-mEX|cxk4y=J~QKslZ zeT{-AAUh9WRoPY8kQuU}?|UZs+iN;{;SmA{-MVHkqlPO~GG;GJZ`C8`2fAsi&kVe; zstWE)<(P(@nW@dJH%$#6qcI|18|=DnjY3|utTts<$gj52KTNWDOZR8&+Cvk5L?yFp z51dsgmzTF}ryr6fTYvJYtMIt-bI+N@c4l8T>=2$Wz71#wus?!px(ci@!x%|*C7$yq zgh??qn&LIo3)eY{CZ$lJ#QcffX20rH%vT+XUHE%KF99vNyi zypD5APOpP$KB1EFI;SWnQhf6LA#WQ}QPMM+2t_q|)$tyQB@*;wDLi%Dx8FD%_$W9l zp9@dO)7P~!dDZ~G#~ArmVTV)mve>&oa~dRN(r%1opY~eTkR2Ap{co>z(;HCS?xcS7 z3;M-vVXLYndu8?Zy5W=k-toNwZzAT6MCaxcUM-Xe`RGcFHI4fj?;{tJ)iS=lZP3sY zBXTkcu8MgVR51=%Q2{1#xK_%h;Yo^#`Qcxd;hvml9IoU1Xhlysm(JA7%R5^9v@aa2 z6f=m?fcOTX5jlLqt&W@yT8$#7ZN#^vmgZ*Ii#eJAy8y^ucoo~M7mS1wQ_}%@ZS2!@ z^O2}-os3M}%&#N4!;bgCeuiYx1cluugn%O78M!pRUT%C6%LV{X0a^fu0jB}1Q${g( zZJCDvsrxKA^ zlzwgEzrNrtEV;f52Lfc823?WfFDecfgC({|AOz|+SgFEen!nE*5GfpbUg$|v* zcalFcl%2V^dvAB&zW4UM-@cE3OlHoLg14PcI|J?Ap1<{9Y`jo#kR@&^vL(cyinD(w z6|I4YIv@pxj`jz_k`z-UBQUH+)xpLFYFO*Z`qYco&n-kq&E!2pNCOSnmq-iUTQ!U8$&5qUx3pgB zy3loL|AqZ?EnTxMU2`qL*_PmYEeFQPiRu#vC&)x~(mHW;swinpiOFLVu9>2l#vi&* zlhf5_tf!Bj$(yNq*WNIj*GQi#IZfJV!2NvAkL)}&`nJ0z^dSobM;ln#}} zO725tsxz8iXRTb`LO%Vh?8a_Zuq0Z$71@x*sGN}a4k>0t>W5@q;(JwPP>se7SylKU z*@*D{@}Q*fF;No-Btz14zt#-`gLJW6c1BRX7v+yKAJHS8p>hd1#mGV5R4-XsuA5wE zSo)dg<4!!bZSAr5?}pP3X`-UXR9$Kc^Sx0~*T3XkfoN4AI7t6l@u)L~=d7H3c>SXr z25vcd3!dDCCj-@5U{1@WMfo1Ok9#l`to#K_5_Bf7EcKPDrx*$G8~qbejnejsqq*uDq&viHRM6j^!T6{jZ3=!6DunG5Rjl?)i&Ty4Q;9kJHLy{ zt(g5%&FBalX2X^zEkMn_&*Fh4$4QuktzSB`yeGTj|LnPCrq3F-o@GHSw6i4B-^U1C z;yF7%g24xEXDk+#C4H@}yrJT6uV_dLd|@7XUDZsJHOYu;iq0DmiC5!BEN=MiW*noe zZUz2%JtD>=Ue@`|Az!a=$QKdJotf1uv+HrX1?B@yzDS@+@UKYm2kA`hjzS9h>dycf zWft<8LN~ou``79d$SeY4MyYSnYT>rs%+KNC*-)|3H}>wKU#pwUWsh^mbIw`lAM0vH ztYI$99?!-9v$?Pt+Hs}_ugYx0tdNuV*4h9;>+X)Ix`APSOQ6W6iP;FzeOVtYhYT=| zNE*ZruS0hFc-_$CuoTF~jE;U-F>^I|R!!G10fV%9(-TEAFw!w$Aw$!7jIAK`vrQ@? zXK1bP$hsGBvJfXRSvwZ3xm?@LVmc*Elh|F?jUFGQdQ|VMrJgW=xN)ctYpvcr}J8 zAx78cM_*ilWKmhgPtW_-lOTP|H|Km6&eLB5f+&=Fx&Ag%QrM|#L!uVuW18A4=>S@H z(Ce-Wy5uhT9~$N}iOV-1 zYtGL4_{L+6YgfW0DTZteFY`>pAP8j56atAYaPzL7K7fgykkW(sK+iP(D*q2?z%YUfE!8GA91z2B&*lNyw1I3A&BHVQei~`sQT#OS#MI<@4HI7*r@w5j$^R3wHv<8I zsZW}{){#W$Zb^|e$U_ht1Fyt18Gf>wnjw|dk?(^Xg}hz4BrqK`2F&6^fYiMakf8RN znX5=~4ekeC&LLGh;ulyARWZrDO%9s|s+#ry!_YwMPsy`%zSUbkg-fP^%ya$Xz<@Y! z4M~QmJx43sc9JprNLyw8Utm^00Z4=DKh^dIAuOG+ucI^Wyj0)q?-Qrxob_xTEQ0pf zdU~~}l1A=)-;uKnPK2Sax0lfu+8?G*wwDtd{Xu&fvD1b2>Jj_unqd|)){zi*s{VWAW^EvZt zb)aYPkdQa;p!M4y+V(8`3U2Lr=(GtWwMaH1c^C;U*Iq_)9?2_6E+Dx`f4eT)lJcc0u3!HWo!34U484Tx@u9~2=o zLQ%Lz4e>ye!hQ{(3Nx$^X;@|i1zYw}Mn2vi{Z28Z-;$_1#Vnon;$?>pmLV%XGt=LJ%J8IOPrXEIL0m!}he;39pgYh&4OkdNv> z730UT8Os+b*;h&b(^Ww>zqGEKh1$AoZy&i#uOIL@@^KZCjU@HrUKi;g0-I=GeMR`) zyLRqdTOi)@lJc8-8#86Xn)ee5flK#vlULwWud@~HMPxK@r*+?@oOkfV-^1%ricX@g zpp&%I+k!swCMx80;xv-2xO1gu2rOEy-1eLKEVnaTGdQtkNX-Rpgmlsm_Wy(IrEebC zLv~a5!FKX}df&lskIaD>kizIoKo%^7kPn!O4;jaYO!1!?K)uohYk`9u&HdMg9O6tD z7n8cNrju>sZD|IEX>S=3#yZjrbmj#$Yhy3-e{3aj;2<*kALUmoqNtb*U@FT`$Ne7m0qtQbUkz6l>zO}mFzAq->hl#4}aDFbSOCJA1)s9 zdqYqA0vb8U{TT|s;C#wIk0?;$C;6x8BLH6@LJ(T%a<8rVhlT*b(06d z4Qm9jq3EubIg23eplW<-%SnAdGk)m)n=L0Ty`OQfV(uNxT|T_ERt19?RTs$fJH`Oc z%lTF#KoD&uCyYMk^{}8~67OmAak1oHHW5{xk9ew;PEbHy*^qvW**^T{<+6!!;w4`0 z8!C$FeL%(fGX`GpGVb#=4}AV@@i{)9(G2ij)isWWrRy4ex*s7T@2QqdXsu|)yg*m!Hyj@a*H*B*48571baJ)1h z&sG{lm6!LDvCOfI>)f?WYE0J{>0^aRuMr{&9*K1cZQ^r=DJle5zgZX6dihP!4={s{ zs^0v|Ol4#$*3R|~o)>oC*I~439&TWR!y~`ocmsf7TXm^QxSoN7)m3eoqx7Vsbdbg} zyp;Q|1PnA#deH>b9_a}xOYDR_BHYmphlctFeEq?YJK*cAlCKHQt0PxNDEyGS3WO zqHsqz(?2xSe}_Ni3-(J_F|?;eWMv#J==PmwFGh9WKq%nrJ2l)F^bH665<5p=nzlY2 zchAph(d1auCaAdUD!jUH8}Rs}eSZJ}ueMn6r4}tVw(21h4Xsvuv*k(bZndDRRjV5z zv~(JeR6)bxpIT4quTVkElUS{!2zIqu%*#xCjR8Bn^g06>pK0^J0^Vw?(c6t6CV1jV_GG#uK*YxUD()e=emq#gJ(hH}kTI z{fkT6ZwT*+O(_G((m$V-GUD6qpJ*d2^=SrE_|AUY{uf#1O%&*5T;o*iNg}yL;gVTf z#D?O;qZ_=2z832iPN-N{tAu^9uHl72Sl97FJHFs+pYbvy8om>w)|Ji-$ST;FT{8z)gJhXrWfeEExQg z$kxXUkU`Q%!C;pgR`9JZyWT|aLi)q8~ph1ymuZkN`CWK@mUT2MB)gA*gs~ zY+rl=Z0)-<@6683^Um`=Gw8jk)!Zu|Et}@IE%yQQi&p~a>TDHOoqRNyC9+T)jm|G2ssXc;Md3kyb$~WIaxa& zdYF(i@O;}_WEn1muajf&tFZ0+Zjj;+&P0yZs*!O*J_j$ie}`O#QyrP;IX;K4K8LTz z#}piY5I*eKJ}IA*;-2z-%a-YyTEQ$E^n_7#b5rZ4W6*R9;C0i@(X6>C(9|?jxndxbU||*`oAZL z6E-BK_&9bvRBV~zWanA3TD#fVC6h9|+WkxNG;H?OxwR5voFW#FoEnq?V{5|xmQJ;_ zYp!X66>D0dgrxYRg7dS-`MTo@8^Jd`-TO#ro3DyDVT3;DeV{7qvhKO&K`Y)FPoGpF zI?Z&xQnf*4*XH%K<{B10I>onR zGXaemZi!hAb#n%_OK!2`CWCu}En8TpPEIQt)O6^0Dv?gCCvtJ|Wxe;l)}z=wJ(tK$ z&&88`5IZNm>7H$WMY`m1 z<%+-J$B21#UTXR=5{TSa-fG=%4&qeKc3jTCefw?-B#jMs5)G(L2B71L!H^2n!Tk7S zf;uiUb)!K6hr`{A$z3i8hyo$&Lo6#WJ&+CUa8xQ9NrByg-;-D1`oTw|-{#&utN|7f z+5gXjHhBy3gDYMU^4hJzFsXVGbekKlkcwRKR1^^kpY9jIh}8WU2=4djYQNMgVMv9ok~wBev~9EXo)dc6-|kK2%^?~wOk;_!`X zoRssAH&|ypRl>w77?#`I0G@6sQ*&r)gVqy~&R?Bxv?l26)!EjbxJj04x*JWOz=SL? znHU5UHSWFBekp@#e<@{M;=G3+9{y(Nr+n>Vmt@EeojyfM5^Lj(a-}@U%R8P5$x6EC zC9;b?o#&WR9y+V(NCmNkMgn18X<~WxdJ=vOLpF_NclE=!u`^w>PGVIqOKzIm5hS*4&A#gZwA~b_ivFC(ieI+gSxL0 zs08t^_DDzr9}Es(@cVfQ%NxUkhFVUYGAx5(-s9qnxHm;+Vx8Dbgky3MX6;rZd)#DXu5O!Pj*;W9LLyjg8^RlB34 z3|C_g?2H_1=Lg8Ic^KxW@WBV6P>JEx#4MAomGj2$i)(do_hJkJ7d-Ll1QnwvNMx19!7Dp5-@TsxOqZXFpUChN7yTZ*F zH{;y!lUej+*Wk^uUie__v!n{c<3sU(V2kq!G?E-73TeYf-S?OO?hD9&^95mbe6vFg z%ZbEz7};a+@4msq5iGujk4QwkUik5P5mpt|tNQVOkVUwt)-J?*P*{Xs?vn`BxHvw4Cc8|!kyuMTw@ z-ql`;7qk|t?h#Yy8OnP@!wMVqFX1C!S9>zLE8EH#KtL8+%f*(yMmbk*mx}akjaDa@ytMdE!}PCzPH(x-%jxyZfioW zm1ku_mSA)4d*n16ng29dg!KG0DFMvCFoG9ZPS!SAi3S6)?vDOgtV4~dvF=!BEEdbg K+G8ED$bSID0yXmh delta 1777 zcmZuyU2GIp6rM9PyE8kpv$H$hZg*Sy3#BZjrKPO})L5V`HsC@4ZDNYBFuSF-?U1{> z7|1k$C{hv>I4UL@Y)LQ}F{JE+C_#e{#0O1?4+wpc2OsdsXkvmIf6g7c1fzR%&)oan zd(S!dJLlXz%5Lrnr42(9=$BP~xAm6U&^m~fYwMheea=`RKjKW(jXQQ>ER)lUS$G>N zx_Va+^=2}AH)hgoVq{`?XumV&?CZji)C^^~T6!7E@tV{F;o@IXPK0V?{%>Is_9^FJ z3EopaG7@Af$1SQLLvdQ&1<;0)_BwRoY3(YkDyH$y7r19fuD&t#nKsK%3466J`k3tNSxm}333jvq%h>(BGu zA_)OdwT562GuDmhgI!5r4Ya_XBau|lNhA=<7U*f3mX3T@wi#Y1>d~kOBUl&v4))^x z*cHg*Y{g)ChHorgOcg_lDl>(=iDFAMfUD!@!`zOP`cdXqwvfQV#I~r*k8wx<4Gx6* zo!x~TUNUz+;hnE467;DN1DBxZcD;kaH252 zBb!O*3oMhv!}Xu)?{UKx338{eVXI~KJM5)2v+MRUXLlx-V^3haz5p)ZR}EjlMLf1} zO8O8|cyQ4N`Yb;>f0MSx4GV>Wz=Ae0oEJu6RGgMv7^b+LgBfuvMT%zwe%ZJNUc~F& zW#%ZI{)B7b+>-Idi~E#$4agR68W4|&xzAl;G)Ud36eiVtXw>xPDzPkOWtxX=`Ghzb z$VWJS1bT(-@k!10xdB(Brp5%K!-rbavyGe>YBTa=kjM*0i7KMdM_;{)h|7|PR0Q{$ zZh(WAmMo1k@`>e0%=w1XT%W_kR1$trV%@N3j_D@c&*tC9=^yqMe&5N+v% zI&IkU>7`wRz8L@t*>suh=c7E0ZRLo_u6UU85K{^J+QZa`Sb(s*9v0x3$DWq*QWbw% zy3=sTa5BmY^kHK-)EqD88N^=W;use^&De3AYEHo+yxrW;ben)&CkbA7G6boqk2LW& zzd~Kbh07-OF@Bf_h>hSSqoF=T!?&n=i9!#VO!XdAW!0zpsa3HMe}#&AOAKQ3>CQ`w zND+i>Y=|VDY8`-+c+#xUf8~1Sv*yp%q>Xer!9XAc&=lLfMuOMDqYj_6)$%Vz%lP)7G1ZL z%r#v(Kd&tIE_c74yI;YVy|-9A5L~x*%`>gp2bD$-F*j0gnk5CqFugRFA6o%XiyhtP x=?~>j_fxO{6Dya)J{(y2P9sHtM=vPKvKFwS;YuqURxHJeT83p>+ewBk;~yCsiHQIJ diff --git a/ultralytics/nn/modules/__pycache__/head.cpython-312.pyc b/ultralytics/nn/modules/__pycache__/head.cpython-312.pyc index d5773fcb14acfe41d9aba34209e79aba26d45a60..97dab04ff7c82bf07d9458b34917cf584b647ce3 100644 GIT binary patch literal 38289 zcmeIb2~=F!nI`yRg8+gh*cX8Uv=I_Q0-=3HyOc^Kp#=#oD2Nw8APC?^XaNVdx}0tY zS?LmZ6I6LhA-m=zs?6#nyK5@Z={%Eo+C8P5&KbvZkPaLrpOU+>yE^C0nIor^E~jf| zYQF!zEh2a+rORD?&UDW`aO1uE?)L7x_uub-|6O)=CWjE|`1jpEy2NpRLpMsW$QMs1 z4IFoect0inOcX^v-}KHj?Ta7RN&!}@pz@AU^7J39w%%9jFN+WK8rJsmEe zyJtY~e~gL1p_pm^!DDgD31J{G5YOE=;0p*Yf53a)a~NHS+xNDlBsmRnGm@@5O>xu9 z$4* zVW2af)#vqjTmnl*o2G$5zcVeKw%-%*xC8O@Q=aaAk1r56pBV6a;?{kAF2CQ~H5@k` z-@7-S-FkZefzz#WC*0B$kz#qia8JLB`>$gqcsQ&8j%I(mi8r>J@o(Wx?P*5Nlir(` zSX8>^&2P}aA+%?FfjpUfR*35|@RnCH+OznySGaZ?pN=@2w<6BrGZ5$UnTYd3+)YEf z{S@c42}P)Wr1t#rW5=&=aY(N$O$m&ygJ(nF9q`cv@$~uY54H$K6mTo=MxC?bpYETI z;L&#sU+2DV`nvfJ7ocY>j9JFGZvZO1A>SLgpka*bpo&-sq$^iJqw>ahZdOux%B?^Z zR<@TxALY69%Ns|KX9PFMou)hsqEmy9z-;fD&z=%0u z4I1~WBd+uUBa@@#eL+2DR!`dJ)}xN==e&RV)%_eNR0OJ(_Vn{5^ekV=Ic!bnTaA*c z{VMYaZ2)#n}Y_#J^Bk3*i;+BEJXIN+eA@9+-q5 zpv&Q=wT-eaAMbGYxO_gE^B>dF{FuHFoBJR0xN^%2lXZ$d{|5X&eHYPpj4yEpEcPIG z=@iF}8Ukw0UUkB=QY;E2PHQ92Ddv{(IJ?m}^b+6++oXS%n7uFCqH%f8u_9i+jQut22Mr(;IQ~ukF44^4QDYTRu4yt_i#TsQ%66 z)51*S%+MR_-fetu>w~Rxtq<)FPyhJh`xnI{=OY&`Mz>#z*)EGcy`TAMn7`Uw*1W`^ z|IPmb&lA4+iU0E7T>p#n2IVU&zw*~^#n(2{*S>EOXmLs(`o2Y=l_o5sh(58f91%Xg z+i4b7(mjnXzU~IWi3|1P3ajba8bn{@3sL5$+82sAZis>Fk%7TK$$!1_UgevCndTW` zw&9I0zZ;m_`_TBX`Cor!{zPl+#Ch?=`AGZ4$mP!1Wsi8-6Y1`Wp76%1dr|tcr_vWH zZZ0#(bG9IE@n0JRNnlkbi?9w2$4z}6AKs`>SV4D2-$2|F7!ce&am$ED81To_`~kts zd*UWcXhdEg8WXf2($mM`QN zhRm;}Kgur}@BG#(x?CtJ!;cLQ#shbP6Tz@cELkPyvuBTrEACvKxEfym`l@@YqQ$F` zG3$}NaJ+M}X7c3Jig3f!>M;L$-@U$>lj6!vk)q8}`<8c?&34Y&A9#Lr>S5uJOWrSu zw4R9`I2+y87D@{lLwg?;6y4c5vGbX{w4R!<#GZ1-!c5d{eU`=*7SrQ-_Fbcn8_|Lr z^kAX3?k(GW+sx2UYPWoQgVfPz!Say#8`gI_-@E$Y>c8%%>!)~M&YcJ)X^OfuS$3*B{>f0nY9trB@Hwf zI$?Zmxfykhns}3fphwLpMSV^Q{#>M|q|t5Acy%*oNF#54g$raW8G;7nR?)PQ3ixeN zR+%R5pWCBDWHqADQ(6a%#)>$*#RFY)hUx0J*4$q+I2&*quPp@U=| z9>35`4>PX${2-A%BX}9Vx4VD9%R93ra)1_}aGo+5Z+hdFn;q0u6A4L7?ml#)8$?G( zkNkThp26<@;9=rsA0!-cGf#O9!+ydLj>Nz|T|7hmTqAj@A8;*-_>1wOZ*hFuv>N9pZT+as$liWL_Z@=N~E5UE%@dHU|fsf*Li zV&z)gMM~F-`SqcF>?yMEnA$O2Emo`%l}BUy9+xb+^QDO|h4;UH?B21N261`Q4F68| z+uf06yXFkhlD%VxL;J=HQd-G@cxODuIoI^7XZSER=qE$s~*StuyEvtwdMxasxn_qIr`|4or`Z@jBR{L-1ySN=IF+jXu)BZA zWf6Y`mqYtMve|DR89Oo_m^^j&!qkO7x)dp1Gkr2@TNg4c6qeq3>6*!jV#Bjr|iA~;mZE@Ei!aNXRfSz}`X_6TsK1O+AR=PqGA5Gl}0 zb*BVP(xb@;jzQtK6B^%#QAVq*c5KQmlhtv z+6(-^Hr+Mto~gdyHFI)i=7qSZbQFRifOoPoR zJ|1!>ety(IL8VGSNA?nvl@bJF5Eu^6REkr}pb7Hu5e&aM3UL9T zp2vrONxC9<0@nne-!Uk7c&|GEDNN_UH6QQwb>jk1RT9CFI0jh;I$fP!0A(*E)J}_V z9qkC}SBhJK-5}BRbqhmy;4~#bUHX828Wwy#>C>glUuh&K`KukLoLdnAP}=fhwo1`f zIq3=)zFvB-bh_!SE%&!XZA~Eq0hOJv?p!D+3>|))TX6f!V_%+hMROeyt3&!GiI|$k zjD3@XNTqL*C=CKu*ZUF%y)S{hI^(Z=A1pb%_(ild3T|Z7%mkE-BG+4Fdi)6w9;PCJ z1JR{6&FF;=g*XDrlgDyrziA^tw{)c}#UKRTw4lFuPxM=<;9i5sVc6tMAK9x3`DA%@ ztn$5>$=O^C(WYadYLA9vRgQazO{Tzi|6B8 z_#DC^Tsbr1rV%e>PPmrf03#pF)CQ2TE8vQo`(1$^$P)&-8J&PRNpy1D&>uIr;)c$+ z!OtKogBdcCRuvB_l2%iQy6}G)5u1A<1Ni7tpsBkXr#6PGUte)=#kBpc()*=9EomG( z6lx9)jaM&JFULja*n!(e$By1UK6ZSnIby4Nv~o2=JVP@L?=-#LbpPh;2C(v*=Y}5C zi|Yid+ z^o9&jPH`zuq4h|7>a>g$OUjcJ08oEWg51Wq`R4Eq3^woWKkYOM#L)`WF^I3&pr^@X z7}Su-YhB=xGEyk7^CX(hMR|$P$B0XW$)kcLlV`(iGaF}J^96e%wmpw>ODBuNCDVaf z`+V;9h;=&~p8Lj+PoUU+&Y4frC6Jh&j*fU{2hp^U5#id_(J^$*)h9nm@95wM+#Mal zMU;~IPL!ZTvsyg;gMndzIxbwMsEHy1ViJfOp{o!@^d|9aS%&2H^!0Ug+&2jOkm@IZ zvxm(wrpjRv=_>q0IRE+;?q}x4$2kSJ4lQKo-#WNp%e!@8A*U?lxqWr)>SXor`|u$7 z_EU4V<(T16W90U*(%EJJ>L=R@`Vn4a&56x*AD~lwG7u zz_fs3%FLUffU+`dS+WD5FNTmjRnHOnQJ7 zIaTpZpg_r~w5{rx{$)ro6YTVEm4D?-N zACjqI{Pj$q!nnO8UYN+w4fuch7l{79DO(qa@xtV;!OvoudjI`onR*k0WO_37W%7b} zp4uZoZ8T6sym<@QP8vK3nEVm0;wImqAOIZ|vMA2mR zL^&A9wyCyg_KJ{EmQN=OpueF*FRLuhZ)Dm`pI(*#fU;CZLGkKQZBnjLlcu%P0~iT_ zMOR9!v;sM`S(a081WjiK^TYM3OB7=$hoD(zmDsORx>r@qD*3=pnNWic?Up3X-pr)g zfX@n80TajdssJgWHn3-E9+Fk-Nx>|{!Uc_8CdMHUj86$d`SVAM%6A(zB{=E=xACGf zvg|&&q=A=4iuET!DrbgN&N9;7%2*K@!VO%57%CvK9|X(*wme;3Ubh!|9>2re!K@nsCGG_kh?WX2$W{vNEtEf|F615_Mw}?oxtE6EJR6@Kf3*itbT#fg-j7!*n%F z5s^WOTx;Ag7&mtMg=|mogVbE7JcT5i!b>h{^GxN&7?~YFh%q zL3YmVwz0PHlVj~6<44)~x6hBA|Dfn$;Kw)Kzwtp;wB+3TRddagS(E0w)+uY)K9v=2 z4!iE{kCr-PrS)QIJyfhs?`(d1^X%RaOLsr45=+iS+Al=2FNTaDLvFt`_R^#oypzp= zmQ=0^x~b~%EurHJYa8CW@TV8PZ6PVuWW!|Vgc zbB5)n;a9~OmL7vdHc?J}vS~e@EFzl-ZzqvWq$p}nW5iQBh^I7xAi&5@nRtRdQi8<} zv+KflPDo;Q4?xYy)Kns6LDxhHJ^lBH7&WD9$%DnIf{9dWDxlIzdQ4vuU8JeJOwz*r zLe!4=`8GWv$#R9L9g{A#qtbVb+S!*t?a%=jGKOrGFv7nUZB#}q^Ur{Di48ol7m7LBq+X# zpfE9lf(W3L5M)+uw6+PXD5ZPVRjOqt)v_KhqeD<6tH;f~cC8lW>J9mVH;b+b^aNj3BZc0n_U z7mGqBB}zf`^UH)wQYn}_NYZfhB!O%(fow?=ZsJuW{z)Y%x}}$m?3QSY+%G~_u!Yk_ zb^RV9DCNA2zKB~OE${P4i;vM7l8B300SSLX`H9jH-a?cj>XImpELvwWl|dqD|KA}3 zm64qj8X7x&yM3&E(lB-j1iPe&`rrW5XPg&X&wtPoEx+)7%iPc;X^XZ@Z3#C|ZI`6u z(TciQMT1z;FjFIGd}f6YEA~EY5z8+`E?ti1b%ZRB?ZtO0Cn~?SG~6WGSH0Qv*5>=0 z#k@v5fV6h>)lpd;^&ASs!|0DS>)zV_r`unkQj+1i^%O|s@Rja139GltX`3DN7>M1Z8|_fauqXT+?r zPbLujO%jYt!ZJI3wIoRy1Mnrt4Ze*@#0p<8zgIrpHd{TPyCY(i#6gg`6EVeFV|0|$ z`T{zNr|jRQ=nkSpVv0ZyBWq|r`l)3`Oda~voDO2@lYE=SYxva?qh+T=QBn5)tP~Zs zII>cvsL0}5lSN(i3=a-?i4R~JY=3={(#(v4>~lip|Bob{=mXOW;g?Cx3D|*PQYTG7 z1@d}QeB`7l#ezm6K?2X}!{|2Q+S3j4^ za?t#I9+}0Udjk!dBpS-n$gFtA)xkiAWI^#iP>$b{;`$3J^F=7G?4(Zq35xj%QR$Fe zWi=4((AdIA=^db9F4?^n~*PVrPe#B1c7fbH3*W@hskwpY>mk;^DmIY{}CcRax16cws*|? z!HUR<)3Fn6;)%8o>Z7IS->;uLIax7Tc(-(_G+aGZ5k46nx_3HSwmMd}PApqDW1QLl z&Y`yt&9;76w&x*aJFtC(LI;RO2*BLF4%qdMy_0q z?CwM6&pyfG%Gwp$YTw3W+KLb*L&ozGucd2#D=%*njv`lT5blvZiaxd8O%z&c0o;Ed z(wYgwpQZf)#J14jfcJs%tP-1HA}Q?~vJhfN4LYcpPa__PJ}RE8=w54z@jn(I_4KQ? zm`RNRlIydeDLiIsmQRxn1& zdn{M`N&QX#nn^MdeqJ9Jm)L=EP*bfu47`zonSx_>b(U~bQ{LPf<6+x}I#oagH^C%J*!r^f<0V7x_LM_kiqy#2RNrg_c$U2h8pi=_4qg4M8 z&@UjAVnie6Izna=1j9;jA+K!Q%p|`xk-Wyqz}+uTeHjZojv{D#RnI~JeWKz(t z?+;B^Pd8653m>^x7pvJM)@+K_Y>8BDoo$E~?i@G3yywoQiA~rh!Qbtk>JAIhg4Ge* z>c@G7WIg#Un_B5YWz`>|!tinc%;l3errKkb>%_`+(aMH#!#AtPPmZ6SG>o4fUp0}b z#dkC%+^ zA1?{@j=lW5S3ovPjA@eDHK*}oq5`B@LDKIRa&7e8^tm?TngojO;a}t0)_-cY64zE} zwe%Z)RhVljlDIa?sn4~o#*;;I@aLHil%l}MHkn^z(pqflgRL3a3y=uB&f%^1)H`n0 zcGkHY>KfPM&*=yZNC}P3dN(n}N>I_pCrJ5@A<2-y8cf;5${R-0HGLGoVLJ3|*aZ+w zdqW275JqXdn<{qJ=%-2xqLjw7 zrNO%D_cfl)uWX;x8P(J+lwKdSW=uXVDZ$j;lXZYj0=7{)$e_7Sw#KTyXsn^ih>}xg`-~B+VJI?~`|ltK8ZDie$eF(XVfD&#YWVj=HBS+YT{U zVJWG@!*=M<0GpzPbzplk9u*eH3Twr}+G%^VaE+XD=jg=IaKSV`U$}v#6qnxdPI$w{ z@ZM-Kwtamgi!HGv<521)qlQ*uk|j%;0xIRBum?i1HmUU}6xxKPPe2#layS_J?ojySBH7gY6E^Xj_i_|1jr?l z`2=hfPO*yttdUlLx8K$6agdFmgo~Y7@l3DZ%UmhAVbI@)O@x}+vGDIOGQ#^5{Ut^J z7EwICs}E*29-qV|r27ZD0<^)_C|P5yLYAbB8;t(XQ>UBE11wOsZbT5fi_QmE0@fWWi9O8jHwKmTR87IVQz8c{*f8i8^@wGTVplNVomd0b+iUL-2+kE zLCKc<2eyEi$r_`vIUC%7ySE)gk&PV z{iH9g{Z+BW(j;LC%Hqbq8fXqvc)Ai%5|*G{@0gb`FoVGw z)|{KXbKQ&v$UyArF+>U90IIT4cn=V=G)2UiN|pFBG=-PKzR9CiiP7TNesTjG#|Q-B z1sfuHgyMz{;b*vwr!$8uoxa~?g#IHc@~=Roj8Kp0>5g1+J!qS+Zk*XTU%mB%bwOOol1$&I^TN+5+Dj1`YG|f!4#$-% z$qME|y^sA16!H_F_4kX+-ySKJY~M3Y?!S9z>QML;ENG(Hs}qdbXdXlz*P;d2fbp%W zjT^mu)X@_y=%L-y653ZlB--K#!Z7&uktGG~>k2#+)QSBB;09l0&Bm0-D#c07NkLzU z{Ct%&drEUACFs|sQLlh(Dk%aOZK99*8vMq9(9qd0tDt^)M}m&%0^6K0Qc}<-eV|IW z$hLWn7$Xbz{OVcA!N{G23g}0nQlIvVR2``(^fPFiusK98@>j$j^Co7MXVFy2Y+o~B z#+1PJnCrY#&@AsUPs1K_RUMAe(p7Bp=}Oz0>5RO`JXdKDSVL`xDcsdrfI4fGT-q<& zW4>IwSFU&~+hd-_XYd&iP0`-x&m9+KcQN*5%lV&|o9dJIm|L;OJWY51^K)85-8$bb zakjBN=INv033cC?_L%FvaVFbiZkE*g*zIlRvuK`-W+u$LT4j8+-(Y4igZ{C{JiV7V zP*B1N7c|0bw;3UmhXKT)wiGBQptag*tzg3>!%C$>&rg$%Q2swX@&b(LhE_m@Yj z*3DGR6uwjXcIj;O+ZD4HAS*rh;M|<+!38)$YIwgey0Zn^p=iUINafkcx%1Hi$Rb0o zSNA_*+fP+f-Oyodr?%%W+EXsJor{*AM;tkMN@rVn;GN*x!8w<>`H;Bw(8K%>D~?6l z&WYvB>r&pO|9_kLe|_7`p`;@?_&=@O{L)(0?wZ4lJt)}mkpxY${roOn-Juz7 zKqPN-`za+-^ceqsBJ?0W_|#}2!=N08rA`taPzGAGYM`Y>;pv|sN)jF@?ie9YB`2fg zkp(yh?iAs4(W1wlE3lfk@!5P1pUWG&a+v@k4+4mEzKplOVraMW`JN0tI)X3Y3%ksG z5%Og5aP0{>iPq`dnN&cR*$Q1?P+hn2m4+DLYX5> z$7#(ICX$&eOh;{!Q%Wc4vavalgjkM&&R*>I(QLx⋙NAd%S)mO2y^u*s3LY(o1Z@ z1%Fxa*x|;82am%Atx&gW&7<@vVWSsx>H`j0ah|fjlQqly?6FPW*qaMqdkI~{W(yw# z$_^$`t9Kj;;6C8#7T6AQ+INo*Aiprs@92jak#`U;$m9@`8Ib^g9I8U!xh(oCOR!1A*X8xW2q{tX&&nTU=5A1<`~u(SW6sQRUJlqqc%5$R@VS_k z(#M2gKBlQJ{0&9_hN6QMkz7Q=Z9!bQRcabHS@<{M|7jI!g#o@PNe7Xis)HDV@;YH7 zBYYm|X&kiO_;^lup9SIpG+hIubOed|1eM3po6nFik?2j=;UsFIH)26^>WE&a z!DkZDw7NT?;G0P9$!MO&v!GNS`jw>^K%#Dy%hNVa>d|y?H>z5bpp-h^@Y2I|sFpCG z%n}?*g1X{5)b|d;gI9;j69jYN*iwC_+oCzCMOD}wQ0a3X(!DXp^Fb12defZnR-_%1 zMoepCy^^N=CiDQjXlfWk?FzM;kdt6*Nt_VtgysOG7CY04}McWI8QYr_by(jep z?`lbU!%C?=7<+4PsE)4k3{Q6|*BU1Q4%{wt;NXHs?Flu!pnu?|%NaLqSKb#rrlrV2 zLOs8IPcVnj6ZP9JerL2@u&8Z^+%(Av_9@reuYzR^+;E!gN6LOf@hF8WXSyp0Ai*FK zp$AQNEA7fnLIMg7VB1>=v~@!(=7auCF}{=$0aes2bo;ld)lv%Ny|nl_X18=LAJjq97XIFl>y;gwG92Tt_)Nk$FTOplj`Q$`sVB-PiraZ_>)z9FRs?X}>< zNf>rWcD_laH>^*2IUhf`xVVB&bSh{{X&hsdI;NzsW{8iWw6$yt7gGk2IyK)Jq;22Rj z4)%x!!8?#t)B5_2$(?`~9JwAp+}wJ!diD<7lxtxRl4@zFPZ`ZV_%(+QcP}|Smu7Zt zC-mPK*0vrGzO6t68q0)b$$Td}q-;!?L8|%W4}B0GcRgB1lLQJRb=ZLar+gXBU+a&*kplLws!;TBqJ zfv$qYEYeQ><+zGx?R5p*JuRSV8+j0VE9vPfdSL9~<&6`vzJCo;N1RE(_IXMk@&)84 zLA9*HSS7o_ebe|>_WkTX?|jGmws+1j*El!y@Y?*QGxKfdV{IO>%@gVBiMDxTwan)& zW2nB`oY|ahV5BCK?!a|@CN?g1NXwncN?M#Xn&Km#DS^fgm~6!@J_0TR38JK(#v~kx zXHj{Y03ep*`E(=05T#N!N`#f6)+Un=a-iQUFf$^es^E4{_Qs!afV5#Wf#OB#+L3+W zvbhz{ma3PRRouR4>BMtW)&TOPPG`6_><)CW3I$^D;yG;kb_@>qJJ<^@L!v->=6J?2 z^8HO;G@c3Hh8+@_AJ3s{zk5LNumbUHx+j-1Qh7hC6mON3hwjxpaeD_?5f8>fMW)UI zNh(BxDyfbBcdEze8-g`{EV`c`9mE*2=$~$eG-V5Ow zEBdI?5zY^{PTOaO<||p>%2C_W>bw0@{U2B!xa~I}YFGkl~5?e1mJo)B6m|O3iHO#qwT6Z|odQn_+ zDO!1%ij+UL+G5r!(OUJP)$!RU>D=1WhR@<_F8N7y_CIG=9=2HjGQaq6X0j=CCLS`5 zlJ@`LI2M>F_i_D(Q4=<9bNYt83VqD99Oh9A^m8V*<&&7%lz=*j&rJ#Xe4U;w1vynN zPOAi|Dwmd&ny=j{SIDbFvA-er>LL)zXxb=rHO#xN%3CR=$z+&p_P}AtsE3uv9L*ZF zje;ZWB?V1N0N!=_$U?XwGOZIL};grrE)aE(97Y9dim>d0*rtohq!ot2UP=< zfTwgo3{akAHSmAZ3qC~@PrKpqcJ~B?XOyA=YtlW)#T6VX75^pW`v;2tABsMsNY!A# zc`OhZyvQ=;g0wBfo}gi{bP`!2_02+z6M3yCFS!1=74Irx)^uDEb`rAb73dE8>s*o* zMyB9_Z(sIULW7d`H0fFz(0HbT-cT#MmeM5+1>yPx{32Z@=qQSkk&MhXcnHVrM|6w( z57u1(*JYKl(sg3#x*0>Xv~esO_D#%lnam6yoynX%6tV3~bZinjthi_$iIq6T5+{s2OX{Y3WA%H)`aN^@ zX#Kue{Yzr~OVRq5BgIg_v_@^GB!jy}&Rdy)yZaBPDBRx-FT#V@{`O0S|KiXuMqb(N zP6W{eSQzN9N6h@VrfHP3W?{LX8#i2nh)PmwvlzfqGx*V^$>CiiXns>Qp@IsVT4F&r ze@06Ftbp3nq}}u~FjVTIUjnzxYoOeMuPC9d&&jU)hX?y!Kf;|q&9n1A){6# zXwT&Ws>qMeR`MtH)lR+^oH-*G58>5AwH#>bvUvGl7rT8B706WoD2^WqsE@K>z+lC#M)Fel^ynZ*BbYd>6>Q~%&YJpehJ zr0Dk0qYQ}T{2hH>nC%Csex_F^^?BUn>gyA}N>^EgMX?{T%i9MQMw0O4OIWIiI?FWM zYPa#CK&O|evTIb@&=WV@h#URG_`fN;Js*~TZ^qM^`}3Q_aVs0Kj<&;bBiw&xN_Wh^ zAklz}P>_S+An;;zVi0{b(&y)(wy(;rsuoj-dac9s{< z@)6i5dap*$_KmfK4vrsGj%C?2y(v=DI0Li&&7ngd!KXPL-*R%=5I#7wVlHjI+Oml z<1EeOPkBQyXBOQSz^HVqX1VZm-VRuttu(01N|_T#b?izFT2x;$Cb)|>GADRlCxijF zM?L#Y@y;z1z7lEecXSRrKn@3pHYd3cw7?{DPgce#%+F zFAbYN%w92bF=P;JdmrcJ!+6DB5K8;2!XkDUz@2Rq+a_I;o#VU4_LJM@$MDgAZ0s0z zSudGdf>X$r#>&=h5}=wPj%di07Vn5jn}znP}cAbNBQoFT{~)vHo3 z+E{^J(|r@QMja7`RwE2Ttq^Bq$~&%g@ z@4rVr;s2tDgf%er92mUHwvxotNVkY{fJh`n`%-b91@4%|sgvqpWOW+ehH%yYO*M2; z#0YB==?lN6D37uZ-q!npjnnSk*Lhc5dTeZvWx- zNZZ9&n_FyiN2;2lRlI1e2&Ipg!>7od-4naR+h#1{vW?M#O(E;!%Ia9(@X7Arg@7M_Znmb4h%Vq15#uqRg7 zFBbMke1p-#p-={-N#!+R=k&Qq%|@{d%*1xFZ2RoqIdg2!331PfNGp7+oC)PTE?xS4 zLwHL#ODwI6l{Sc_%&}c;(?N05K`0&43YXyy8Gk0E#=m&R8mjGyth7%S6%mT`GrEiV{@*WjT~J7*i?P^yfH_A711QRoYN5!LkV zdHMCet5yd}>ZpK(rAc+`sDqwgYf@>4_6Ew8?r1X)qW=W-rV*j|{CY78HA>&KU(77f z#n@`6jkCNV>iM-1$3C16I-xhw&z1Xx61Fh?h?EM*Zgqj-aiNl z7ddfcrf_82>w%dsNCrudBbg6N=LyvdOM$8+-oxOUV5zl+1KKO#4HkTY*s(0Bn+SU!nv)qg&#Lm1Bjh?4%$%E6|Ln zBW|Wcx8oM6Zi0^t4NDl9BMh7>@k%vM_!%MK8;BT_TevD5fNQR4K`dyH{CtT8+e37k zZ$|$3a!Gy@%U><#ubw_UZ{7IFYNyoYVm{(EV*Z-xy<+~lSpFt4f77gK-n!%SGBk<# zO*8I!>$XSbm6Lp|szI!3m}!b_I3R8~@YC{xV@E^#$CrO(D;npSLyOXR+v-Q`j?OTy znHY!_*NMe-)9!g&V@mpJv3T|Lz7K8dpJdRfcF$~FM&bDWJI5!EhnvO1mGjnBkE|u| zQ=dDPJ8ck4S4+5d-r5Kx7R##sNmlhETLpICwoJ9emNbY<8mI{>5i4CImad7FHj1T< zIB2kRW2|(CSh{20y7L8(7mCX6bbPxbbbu;80GDcq?;f8z{-LezmnN=wg|tKAujmsS zTUwlXsT&}^O*5c_B3eIjGugK#IH6|*DBMKAux>-6!01#GZU{tu2{Zxpee__}fu=*0 zg)Dv|HGlmI_e*1%#rhq=})uq?h~s zBam`D#`E&Zh0=QN;h z!`S^Vi}^@ikwnl@V;k`QbPs(1qiQDj9huyKP*x-I^hmlFJSBk00z_9Z8w?tuq$CK7 zkVwXmRk|X+P(INYt(n<(b7LmGnkC!s=hUo2exi)|b0(2Ib(Q*LL&iG^hK$vCG1xKw zZ}^e0l2A`67RhFjG%k|BbUj^>{8aJReJjy`afP~Mo@c!HYl0jh)FBx!KEW~YU4~z@ z@$Uy74&fl!XBMtz`RkSUD&G{|8ofXI?&-Pexs&jeeR6*1@%fWy;3+$Do_Xp%*%4iL z<Rii^t3y=j(@bi@tusfGhjT%zt{bsdu}MY{>a~1 zj5RM&yRwsuy%l?_49*A)bgOgz+Pbj9|L#%>8QYH1+$MUeN?eDCGkUR>0*tG3KPj2 z6uCVE<%Z6t2GWq+Hp-hXK=`oZkP>05TtJZ~LCaFeHa*a30rC<|tP`m^i$R&{(8K6L zR)20l)!>hjQ#KHynk@r!ZkvHy-7F&+z<{*q|RGvNq=lsP5_VEqW~)ntha{O}QafW{fC&X-i6|A4CeBm?|4P7Ka3j^!^C^Orqnh%Vpxpkbyp-1_?Y zd*}cA3$r-vdgp8)w)?oa`*`H!nb=8}c+wTk?-Epdj} z5w&#aL2)@9oEC127OxE*BKg_&Z*E^$UN>7JmNZB5nje=`Fy*uGlM-hnZ*`=^`KWeH ztah7NyKQ#!d%GX(hEHBsxVvNSUeVnft-UJdtr)kAU!S~=*6$2Y z42M@t7ss4iMd#Mpny7Q<>~*=rFlyR;Dz^Kqxce*x$$mKa#ptMk00)7dr(H-*@ze-)#Wz3&C}0Q{%>ZcF zf}l|by65Ra(WMepYCEJfQ^Advsw_M(vqPX;(33@H@2W(VR!Wb-#l{8>XYZ<@NpJDP zv8+K)RYhViD1W50cP*cL_U;$%iGC~c*}FJ&q(ungWu!$pN>^W|h*qZJg^_3jfpA>H zqTEKewm&_*w5J`aOE|Lnt+n^p&f4EAeSkxC_x@8Js=H4<_>20r*u828 z=57LC#|H1a$hm0C`x(ke(J%4uC&3X|x5qj5TR0ZOYT59tFvqg%?;KW~(fWj=SmLj! zfU{OQ!iuCd!Kx;}MTpce;w2qa`Vgs%|WYOY#KvZz+xNaXbqr=lT2qcd$xgdo=`NCfMGQxP{d(2~(I{ zoE=-Yd)^3`#NlZGrfLI)Ma0v55%lr#6F9D3@WrN4u4M;N6H)HOUvte-uKDL&@n3V7 zMDEgGa~uAO%lbK2{W$Z;t@LMUCk%!ip;kI3?-@te&$^5yh8{zx>lsJC&zhDPc7|3y zF@h3nS5V4WrmetNkTCsoXS)m_R zmd=)4;=&b@+hv3=6}Yo22IG`*DV2jtU5tH~J74UJwSfeAg3DaliNjr7kqvhFa!D@l z_2|z&Uy?2H=Jo6D>DRB{`})0M?6Pq64L;*-hr`Oj@A2(FXiL?NW#sV>j#cew<~doC zjS+j)(c*|^v}CZhDdLQ}T3n1bM>3;XEm_g*mh7mz#m!`vh$ouUk`v8s$%VGHB~RdX zaI$TflkJMwxsulJ%ce(9n>Ms~S%(8UG8FG+1Fp|;Ed@;K1gUF*w2($qc=EOS&D)1aVW?Ut5K64ZO#Q0fpK5UVgfi(FfO^C%b>p`Wf zTZtvZ!3eR@|1#C&uUXy^k1ET1y4sh^N>T|WlOz~A9*(uso#rObVwkJ$0rKU)a$gQ{ z(;QzK;U``;7xTLc7rdJkVWSv{4ap)PuwKX~l2UA!v36(`sQ^Y5fs~XC4UtOhTTC;p zRs0gV&KjsfRejl-q;@A1qL~A_Ndm+Na?KD?V(`)kDWTV`OSa*X8rmC-yeF(CHACpc zN+hd@1-s0*%~^K;|CDyCeLFc-s6?wuctj|47J?=;oy*jsYP#<4hcvqFsw}=m(<7+Q1#% z#c>0Mq=^C`E7zMTozy<_@(%iVFNr*?-pTl{g6i@LW(~~kGXQ` za!0AtFw7;h*+@2+PWR;UBK@b``Cha$`=z*meU2BPMVIGP<)ruq?u-}`+PRQ$6wE&$ zro_$y`kFJF-nLumKkjv}k!FKnOgLR3!ngrZHqI(GAf>oYJkYQG$R^6~%gK~YCpc1* zl4P@NeoBzdDLUHj$(Jqt630Cyv2(kqZdd4>fY$(_)c7TxVM1l|i3K(VOL^!1Q3LtPhf(b>FK%sVptiUlVfk0FSlVmM6*J0BP8UxtH z+BK8h7YjziA%b=#wb*G2#zGx&qH6T7vn+$+iLlZ`YOuqo^d{mYsTteiBns~J z^oBL5Hvl?ogND|4FQ^|;HBm-Z3P;-m9r~k3Gh<5)!!mrI>VWAb3xCY^nhDhH2@<(K zqk-Y8f2O?@hO4z;?IG@tD|57TOgw8HvyMo&Gjm3hXM4tao+}=yr!VaHx%OY}y4HN7 zX3J#J*6aIkWbdH=zCSl>%8_~I$ajv6H=l1g*K%?7-xsZ*(F4o)kLa7(4=!E*o%Q3j z=hvTGKl#9sbit9Stla1M$%19$4d?fq+jFrdUAPQdlX=V1S=BV?cAIATlzYkGZD-!N z`+VNHybI;uE4nDYYQJQ^wWKb+r0#0hjV1MOiIWEo-gGukKJwVDM?z`%bB0F5J2vN; z1#50f7i^j~a~|(_>DbQkH4$+s5~wZ$8(YwwHY>ae1pq9WEx^eHb2>Ai$|u5{=?@E(FbPL=^_frgG(MpTttvT!ybujBw`DFi1J!uG{mwZ}XTYMB{=D)(2b0iuw*(0X$4gB)4u}B>=4uX9l_=i8b zr!IrOxMB^TJ#l-*^SlwWHNw1R!~g0v$H}*0vWTN(%>>)(hOlHt#N) z<#-|)V7N`ja7pIpEDo5Rzv?9fk7Cy1%uAlZIbU-SBgoJ{X5GWh{KJ9j%sEFFU0<8v z>n2{QZ4p|TKj4PJ75*Py!1SE+0?h_HPy@0Og6iP+T!NfJ1&`2wsftIa=e$fi%T5@r$*?e7-j&$_Cj0Nf{*E%bNa`S!~JPeC=!n;ntA3f z4a`~{_oIopWlfAh&aj3kI41$*_uTzi5ca#+UKiL1EvE+QwhceYcV#|PI=X+fd7K|@ z9<3U)-g1?vUFA1jl~mohg(f#{4>CM85DX50RpuOQOBfzL^ZyYZxIX#D7|F&SBr#f0 zS7O9fNH&8@_rz%c&3f>%rZ6Xg80|Z2RLqyi2naM&IF^V6Aun3s%d9-o#85nTf)vpg zH`g7jz@b8@h7}|lf1D6ph`oSshmL@bXcmy_FabcT!##7^%&n22s)pP8=G^vq7^wz; zd)L5 zrran*auz#WKp0Fld@dTLC11ECsaC(maL6o30n2etMmg@@1uUUG0z`(0-xg#O6f73m zDsYNO>&rdQ95R7ni%HH@%=F5jdr;1jv)e@34MMBz zf#TaH=gT?Jvdg(&DaF0S1B-9h-O&){Ah7OoA|xRkcnLWTec@#Y-k4h71rc`_v4~6{ zxqyTrjTgBsNIHOoTod~qDHWl*?|RH57@Mg1*k3i`-0eP|9EWEU6x#DQHmkmETY||@ zM-!yVD`lt{O7UG~Ks2F4Rt><+`O#wP2>OM$IJz;oZwIQSr%JqLjde!@kzk)fR6W%% zTL8+|j<5^`b^zjAlVbS6gDl6n>^k((o59M$`J3Acq0pTSz&+ndGH{fGdRm7Hux`~d zAWqC4K{dMbF7xOZPlG_}8B3$WyAG)EEiUp!dGN?h?*s93% zT6p6Q1v*pbE+d+G?nBGvO%!_0&$!kO9tb{XQ{ryD!-;SO(k!#thp@hUB;LdY3~Zg_|slLFz2h!Ro`=(9KKHtoW(z49<3T* zccFbchjV1!vX`gr5UZ^I!gN2HPd@rN$-LoKnjZ1GEl-!HtVO!?PN z`BzQ(Yp47xZ}~T;{hJw6pZ3?!Gi&Room`p!9%m}s%BRi6Bi7NzX&22q-VRVWbo|@A z!JM*8h|zkUmuN|E4m}bZ+1=zbXa*%FYm%JogYQqQVidxiP~n=xFLogB{ zG5XuM_W|bb|IB8zld#O{%lQRC#e}LC1xM(*#1X!dK9^Wyx4}`V-R!)k=zEDb%}0)= z4;@Wsev7`)bqNyNz2s$pk1xp~NsxKjKo6)#;hW?YbJ^x(fZwPI@+^}4ST0&J-x8nVur6SMwyd2n%BDq1@zHg z6>RfP?^f2z>pM)VqiptTPB%-o=(Bw<4H{-w7|2XzrZQ5_l&ejIZ?UE(@&zcW<2ddZ zFnTvW1o9cmnd=BB$kP!MzH`q=(c{S9B6%AL;(;b6FR)LIXukFpojcb{w& zg`4K}v}m|UY`tk-N9%^`fU(!mp<#E?mVdA>pSIxlhx>NU;u&o|8yE{**q`=P-83(u zuMM9WWcd!?g3aTB-D2k}s2%P4DEnRco#)uSc&%Cz}sX2156^OsV{{#ZIZ{ zURk+R_r)IGBUS4;8EWENM7({dQ4|T=k{Js9P=`MjgY@@jbju)IWs!L3IQwMJz6P+A zQ%{p^?87EpNKu6lJHEn!1QQ5N>Q06us_q|n)S4*}00<2PG^74s0(zC0g%dD|XK5s} z3@e*;9V@{+;L{uQ|EZ}X=&|s}sZRizHt;Qxc zT+#bn&IjDlGJU)9PsxSZ#MANqg&4I8r$_bhdk5re*kIQ-NXO@ diff --git a/ultralytics/nn/modules/__pycache__/head.cpython-39.pyc b/ultralytics/nn/modules/__pycache__/head.cpython-39.pyc index 01ba79e80abd86712780ed9e0488e838c5c806bb..35edea3f825c54311228e302ba0e12b811db0c26 100644 GIT binary patch literal 21596 zcmc(H3y>VgdEUJCeYdywz>^?31YcMhJOd!4Vkn9a5QIn>I-)>`l$Kx?dpmP?yV%E} zXAT6Gdx`8LO|UE|QLH3#qDuDYq)JewoW!ZhyIf9HqEu4O+j)5MDBDRaLsg0GD5+vp z6)E5M_v~Zt0Mc>A2x`uE>7jQ`BSLwwegNf? zH;eMDlnk_Mv8(a-u&mfp5JoU{F>*syWU324;F88iI>V&l&g0;L0IdA)oA8g?P|kU)s0SL zrK{T2vJ>SmymJ19m#W9B$B#uhuMvbNmY2KN;;dqJs$IM6S8JX6T2}?Pu|)>D*{9E( zjZ)`SH|$0;FLXPhss&-=s{azE5Y3)i+EGRi6bKSC-0 zf+iB*0CXCJ%!=(<6$e+!vny#oGst?*wpq!Ia(Qq5J!8f6GG6wbT&3XUy!<;xWy&jf zQ}~_sroAG50Xbd?zcb#fH;3QSdxm3FX0f{qY5@=I9r@xbXJ5Jcq^n;=Z#7oU_2ktW z-3~Y2Zv~5ImK68s_BkZCjJl1V;UDv9T>K4w2}x*d83S|MFpa)>!>U@L-M6-wb3CRg z);3d~wP5TsP)>(CeZcUngSf9-uGm{GzEHtQCt0`cAfQoi?^i8Lue!Ou3iYac@RO&t7)aFN7uLx6Do&?Ps?+uQ@M= zh06Cn|1zMVQEN4Ne&B{{z8kOEc#T>F9+&Xq2J3#kvC{B8x3ke+Ms0V+trH;7x7P98 z`dY2i;ey^SVWn@aw@VqohbjRe`N!}i}zF0sE^V_ zs1sh4QT}SRUAuP6imb(Bk$DZsVbxDWPT;pz)IDhW{bQ$}x~KsM*jB&X=&WAcXaQwg z*TY6VxY+4jjH$*&l90vq>rw9MRlnn3TUXCKiU9(?qGR?B>p>Q)8t{afX3aj)&1zM7vxq-xhy=ugiE0gBf%oo)kDZ|9qLc0 zBfP=4S7j!TA>s8sT*HN(k5a+LItWg7_(JM&+>Pv(-@$M#^&m@DryHfhuBxv^sh+R8 zL6iQ%-+n74XShF zhJOWQ2vLImUjqd9!C?;=O`@eauz;2(&=ODxv^0s9pk&r2;I?T8pG0chFfZHcq9Cpj zI)rSXd}hmL+ly#dP2zd5gP)sQ^qj4@L;Om7Om>CkLW z_tX6puT`gSH;dl%O=~dYVFnn<-prskuM}6JpII>OF*dU|j6n(4Y~S8C``HD~I6asH zOrkV=3(wKBTrjXkxsE&8XWV|9tA|lHb6nwmuI~)yw+;Okn|a*h8+^H+f7R&c`&fP8 ze^Jx#U3iqi&duWrTqLW+r2U&_-yC39JmUc8h~3<_ziM4^Fp8yV)erFpa^QgLzokBb zj<+oJFn%KIS_4^v7jR&$1|%4vN|b-P1rV-x+w0F9Lj%R6_fQO3jn0a%{7&7U!2MD4 z+XS~=Gf$y1;B^oQ<~^t9jg>UFt(fv@94A$caU3WC0@VrD!KnqU^#n(=6oe*H=;=N9 z5z`w|vhScZ{(kk@Td7JRs9p8Fs_@LnT3?RR;YzC!`ccOBR{d~&qvC`s&0tK1v0CTR zS-r}Hv_WY}jx(^opGHXAH1UO%Vpy6Hb)Y^@@;-)DQz9{EppsUnt(qH+qPtmj|Qc{&-njffVc{8`s3BVlu9_Al3R@>c% zS1xGc@d)pyu2nfLo8p{ZZ{gXiAYj$C`1h?SC*=T)GjckR#UjUJTl2bJ^_MV+kkKeN z+J6$Sfb^Ec=nzPdYo@J|dC)v$9kLeeteG_rn+xVi%e5BFUTNq4)E#z>|0tA@PQHLE zID=%>*mOd`O4Hl|C3^G3W(t@C@nqoit$qraYBJq2`}SlBcm;^sHo;4aeZhhyfhKU! z6kuu9S0^!cl&-C>`yEgAOTCVoa(cX0iVR$Rn#pB0HXAztFPg%By;W0Qb;YlR-1vY* z)i529;$voSK7P~5RtYjp@=KJEm*w{KjR9W>FN+J}z@~*?TY>n2EEs&{Z4)SMe*d%J z_}E9kgG&o3$`3bGCvex5?=|Wngqh{;M#lq#M+V@bB^gRJXwqG-EjIvN4QL1Dlv>B5 z$8Q^Dfo_ndJFBXN8{k4D4vO=Wh1j_YnfU!)K?A)2)|{0#9jjMNEZvsi;GE^8{E~k2XwGOlcmKM)wWCA<6cKt29h1Y0b0Hb*W^HN-iSgeWCF=_fD zZwSF2Q|(KrtKkX=1qJ}7WcC(ffJqQG3`}+*{mXNd&_GfBYBkDNNwpzoAwN~EUfHO% z;yamY)$7))RmGLk(;Cy|l;2(tuPd2Th4+Pb5Y#mykU^;*XL1QiG#$%hb-&fZh~u|7 z#U^DYRLl0@5{WDe$ag>j95Zj`t>R4fP&S>-WpmQ1j#i1kA?*t&ktTG>yy%K)F9TiC z@v>eHx?oCl!8AxfLGgjTqp@BStFvQeFwpvKEu7E?Wf^a~=yw}RhF;J*w`=&vJTDtp zzy;A-A)o}}A2dVjhFP`xHc}fxA21Ta2JjS@wZ+<0n1%>tRPOHSl-0%0ZtbK! z)=o}p;yOkP!a>MhB5OSuZ|q0uSxe<FsD~_YS%H(+_0_=Z`Ie~aSicB#mA~CEY^pCW;*USoo-#O<5 z&m+K(VC-us+%O@yf(jh~)ds(_D7+qkg&?#BHsJ+U0MK%(BB29FgT6mfNC0z#G?WZ$ z+j<|c+03Xf+yH#`(c@J@W>zVV!n##h8MZMx1#4Z$rwr@ye2Nm%Q$+7{Z}q$|)ENA_ z8xRCT%|r=lfGK}vrBQD{mkZp+iaTVPN8K(!PQBF#d@vBQWcMTcYDnA#gd-83;2B!?B6nl9f4(YWz~yJxRDT9A7;MGyd7D~f!=B1>0{XY zEN;Kec1hTa`TL#3eBpMQ8RXQTVAm0y6?savS$C4jPcu#YA#+N!-QLU)#^SkQIN=drX78I!_*SL;0hkI4-t?=ulR;ppn8 zn8^D}GN$Mxu4}l2Igz3lHNm|*{pO$v`VC?STOUnZq~Empfqus}zqHWt42a&8+K-OC zPsel|*2f7uo>+15h7(m{umX+SP1t1@)JDpk0#dQ0H3A)WUkV%e7^U#)dsZ+lsL|NP3hGsSQQhn{xA23CZ6-yy- z0ty0wL_UEi!H4_tY9CF9^AmLV8|XVGyBhkFG*@tRhzCM*rO(~S?YkH-AWhfgR!HsC zPxh8um*%Ty-lJkWg)t89cGjU(M2C2xfyi@(-g-1|BD=|KZy|& z#86iFPw_cIXU1Di(kI+*ywx8^_EymafB*bhP5-9uPWqP7O!bh!zR(aF7^R?uwtNj1 zZ{SWs%ldO{FLO&$Lz41f4hccvAiHa$rKL!7?-McRq z^o6wSQXUuO6~SG$&>!+RMRp~#e1|!i>Ub?Lp^ghJwqnET&2nl1wiIy>xL&VCG~ z=9qUU*QB98fh*vELauY!qEOb~2ZFs9&*(i86KuMV#!~lX|N45jL6JbrNsu$im2O$L zXYr%$)?B8$6KF!pod`75G~x=CT38`P0B^#!Nk>dfvTZnCs5Dd5&JJ=h$#%RHMC~!j z&U@4zGQF24NH$-^?fBtaU3FR0ZqHw-!7bpf*A!H(&{taWp_?HVq-!l#+;2lsT6U72 zie0!%lJx|x;P+3SzO2oHcc9*X2@lrNjLV(+X!2bclkX^Zc|ELZbLa;gV}Xgt3_s1> z$5Ha1Wuv=M^1pza!4wir$>+4#Je`?&Ld>iU<(8*%C zIJQ(lL5+cE7wG&RrQqFUU$*dWWdE;XK__A4qv-x=wk52Wb9X2A0wD^Gq5_~OsA|%; zOYE)KxLe6`8LG(LiQxYn1{sOJTKLU^;P<8kR_;XYNtrpn7ufS6w0ux{;rZ~cz^tgg z4h0M9!zwv|@c~RfUp3DgUxAGk3cDE9Mk&1-bm*oQIEL+|_rvvz96Xh zsR4Z|$-aBfoJRI~LrTQs&J^0it{hTmoT7%p+pQ}gu<~nZ=wZ<~r&>YCF*%5hT0%Q~bSC|501^f7K9EVy-u*`DWmbNWbQl+AJvQ z(RZ*F{CiHlf>d(>Yz*AgO31_;tQA#lY?AsU>u8WxQ;4ZpR;5sK1V?ibME< zogVf#ZJPZfY`udWlid7GT;g=m{=)s{B8>8nnI((H`J?7@_}425P)$HAWD-%T2E#!= zmP_+dcjwY%oZGm@Tsoi4E7snfOH%|-s8!HKpjMHsQLUnRY|<>2Q~S`N_f*WCMe~4b zAEt8J!KhCibsLNRqI>Pg^3nS7qbH8xf7uPYy5dAx@7_|Kq-Xd?d}7(g6^vCV21|J6 zW`>^qKDcn-p?7gB-8X0+8z5K$W@vaqL#LSmOk>J7Va|tf%o*g+9t0PqAwT}9F+UE5 z!3uYn{my`eMh9jI(W$hRVHB%y6ac9ZwBXW{b2K(#AI-b*-+IlXYe3t`RzaoLgT>L! z1~mtvazoHHXx;r8ynd9y6I->qzXS{9aa3q!dYZQ!+S!!L?CkSiZJj|t&tF|V*X_0t zo8YYDH_Dt=-E~YAMsQ@J^jQhgiPAb)2k>=--L3j@@q9<-W%VUg=trBYxKY)XsIjm~ zz5Acw(hSwf!|u8O5(#7S>CT58coswJ;(Iq>Kn*dF{ zk&Sk3)pzM1&~R8TMEOS0kf5zP9CCE8j-6HNuVB`SJVJ?_^DX3}%t{NkQop0sj7-p7 z2@#us?c>U7C(aJ~K&j^6;S#hYeVBr~u2&kZUd(2osS*(+S<=}*%kp@%Q!I+&OeFlr zxF*dfth+m&(-Jkpa|o{^(+QY?m`)rqnb1z0j|#7MRm;=90wGbi4eaI_nabS_+L!Q% zk!&pnZD6jLsS)f1zUIM9Ez&c1TEgHF8>)e=8S*%(ioG6h^$Sn zq(BoTF@OE4PA)Z`PeQ7?XcBn{wl99;x`TwACe!PgxYYLTV^ zF+YmIBX~=eeNXT5$SCq(aYu_xk4|jG2+t-gg+q_~u2eF9hDBGDlXH%v7bB79-X z>^ML~BMt~Ku;(Vi7ZpWjRX}np4qY&-sQo*rh%yrRx7^t=&);AVYV=Ex*T&>Zr1z(I zPs}bZbDv@&&fc#fmoSC%3F;S^j3j!nMy`%LT;GzlAu;+jTrnyYVG>6u&cdD6D50a1 zeiFI3D8Y9j9B>(8kN0z%h!|s>SQD}mgl@PtY5YYQQkX=0z-TwFN7x?3a?&oggwvC; zB^mXarcdG&%_7PtHm;+kcE*@uRELRNqG@u$m(eRLQ9eJfnJfbN_{@Trorv<8LVVA( zMEM|6rdd=UBAxMai1PUmn3ebPi1Oj{OI~3VYK|Kg}HRUsD3=a9Yj#0&|oB#gRYZJAQ+87n#V4zR8>{@E>OGH6~RimzXd@ zN7a}tBZ1rQ2=iopkrVpYLh;Y2Ix9UUK9dzDt4!9IG?+A*(Ep~_O14mTFF@UI;tGx; zp?TpM_)8r7cM+B|e}{D)dR6{5#?sss+Q(0#FyT=l$kVbpX?-eFZ2f zg7JC3)_VLxqwOPN1Yjot>+Xed#5$@aWZgZI2p%uf?tu^mihyo+xe3?n*qgJMm!U7L zH3C%X?s0Q?t99`5e+ z5TaTT-aIiyxHLP^DqN9A7u}abltX`2Nwg7zoiG8kSKYSThMlmnju#XGM%xJ$g3w0B zpPGxXJp>vds#?G8#F`CXI_``4#%c@&nyyk@P_e>F zF*Y7$Sk#cFq4+^G1k<5%mmBE74VYFS9>y{4xKjv0{?R{+(9b-?O&CPBGMl9k%&1>N z-@+D0DWd-j8LB9UB`^u2I@_N{I@d2Eo$t>eUFgrKZ?R?HiCgXo5!>XUMCK)`CP&+c zHv4fMV8Hpdh1@~>QZz8C_r&$ObSMNfLr<7_pr*3#y||Azkw9Qd;MwrLenz{8M6JDF z--n!zu-;vFU8qVAh=PoO)^efN$2Q6CQU4qZ$U2B4fiOdS5g#JKU=?a)XTlO4BMk?7 zUabaCjfQ0ngz6&z)$&vB(#}x+MIx?PwSjF<9viE>?1zYwBjEMa&quBZ#b#bpdX!piU5?rwT6dK z@EC6rEI^O#3?nBhO!vH6DJREDYb4_VU z5h5Yz|F}4+MeU@WB{7>PcMRsWc8@lkizWk!?mHF&p_VJ1;Q)!%6pG>d-s7=MFmz}n zcZz$IoDl60AUf`ml~%V#z=qS(06ovz7C6rHZ-klbWqr_ z5@>tgKl}OQ8@|(vC~xRN8XeESmYBig$CI65OW(CK0I4ob2CIpl$9KYn zko+o}`c=GXqdQ)Jv*ZS@+~OiMx`~mFEq)?72}GC1;{-SW;*ZBU)x8#vA~zDF9ADhA znk}5w!nw2t4{>S8KC%ph5tjA!H6MGc$jqb>&{B@}xVo9Br?h1yH~I|DqWI&9?h;LN zAn~GpE+PZ<^O0DCh9Ira=|E$!ZX)uSniomtKZyPK}vG*tCi@($VG)y zwXnXn1fG1t6Ok4X;?~+uC7KLI%e3VL1^g&ATFbH&wlQQLAr@7QGa}jUdP%asd0Y~g zD4Ih_zb4~`wk@qEhzJQi+C@Y4mzn%sBv7%x&AiC6V_BTq!KKl>AN$yOkIn<^nj877 z%zN{511t9R~yc62}0aR>5uZ7-d15N zk5U~{EcFCCpJc+<*6urHVVJ?!PH)}Wa~)9v<P{_JZH2mQs8Ss zcx*yQY-VxgaOJnM7oE-YW@aGY}Q0`?-~r*rW668?^U?C7fI;#^;r0^lyCXh(5Uz##vC`?w4SV zD8<%@RL?uFrFT(Tc)CIRa$~EtR!@$`K3o8JLbDK0#16NGK$li7$fE^uv~9C&M$}ZQs`wyKk%JV`!5PNjvbI_5sT`U(pSXy}^B^X*qhJw7dev`W3!=FyhXLsF%rJNe zd?K1~{Sxk~f59f{xBSNHTB!adYsNrD4L|Na#C6^MpRwJ4Wbz-F{97g?I{}W?0MT&9 zMtkRI0~o^mBgjXoWzv5=w-lvD9u(x!svyeZT{Q*BAV(}x-(eS)5NT1<=FGhEakffs zYjo9d^g6E>G|CUr5Ti<7*H4%Q#$++TBYYyplgJp>lwGtf(*g=OG!N*1A5Ina+YV~= zKV55mn9mbs9LqI(dy)te3tFpx=4beK>siDQuWx+k?S-HF%&+y{dA6SBqvF3g{F2}c z);xxZ;8ahV{uU|}Z+;t4oWTVwz$rZoDiVBYo_W*W%u6{pDi=aqokg8PjPQ6&!S0|9 zJTxfFX+U5M$|>N&6e7W@w#ovJ;ud`z zboO9FoQFaMJV_6DYA=ubB|Yk(fMX|6<coU%Ludj6kzY-XK(;l>)!h4 zSOVv$h{Lv^)g%pN2`^#Y<%urw0}6UVz-++7gl<5`_9D;W;|LUJF{9@72{WtF1S)1l z00oK*CPEw6KG*4*eII6aLKoy?}0+%f7!(CTrt>Sh*DCe6Tv1zCIS0MobY8 zV9^*9W!pH*#`)pwVF*1-iP@~iTREs)LABMOACnJxoym*5W7k?O^)_<_5(0SX1VG z$jzC(`;(ZpYn+7RGK1l`*kYx)ij3I3#NIGOsV%rAfnpXeI5L4^&oICPk@c_tnZ2ry&>apL<{^QcjL`2RI0IUJrnhoVb?g4< zoc!n_N1NM1pC3Fa#*z)35T5M9kZMhDy2K|nMiJMygwI`g<>X+?J2 zi#NjQ0ocJoR2Z4GWll=BrXs6`&FEqL=*@RH0Yo$&Va;d{6$RX%#ja_={N%2E*}-Ne zJY6GESAss4wDFz$RL&?EkYYo!fb!o!ZIl7FwQ-hk;u1UOnBTwL<3jDda`q@|&RGYol8sOy_v2n6;V>nkPT>+ly@FwJb`ByA4^l%| zz-cj*dErFHO&e3X#m?9 z>{QeHK-xj<@pT9geFP?LnCida)ZiRMt>TbDfH~ah2^5d!;cz;H--YHL%nza+Y;JJ; zg0a{gt3S0li+g*~<~clLAD%(c41I#<`m<8T!ErtlEh_RA%Kl2r}`B(h?r0ag14f(ep!MbqcjZ`IDJe}i_sxC ze2Nn3v;tSu?v5ypx51Ruzh^alADWs|q*woj$!#VgjPZO@t-e-{$@mxzev3_ho5|Oi z2pKQp2~l?Vwv{<*FIuw+w8j!tvYaYpLa&k&sZ6p(R8}v`v0YlSE7Q6hAIw(&gAe!~ zlmCeXQrA$M+j%BaeHZtC9+ME?9AYP|{bte1JBO`D?ETguyD0HhC3C;^5aO)#zdRVg z9kDVYfF@`jF_JV-uo4IHV3+nFI(gPTP&t~wJZR3MYCteT0E>wePN;{%!7s8P_y}KJ z*--}wmb$Sj0!#IqPXeP61&ZGIvdd^Bd)GL_ns}5euZ0CX52yI(n8uStXmL~(dDaIe->qq`B{{8oD%0tQ31#nMbSgAb1bkVk6@+LOWav^XQJKE&$lU^u~@ zZ?cGeh@)Wutkes&&S5bps6y+IfIQ-xsC@?-%->?QXceMgkmPFoB5M;hSL1}}9KyHs zxx7kNPWR%_e#JpjtvLMLOO%qaHCq_-vHygzeuaSPB4gNdP<$8gZ!ctyIdcJy!4jl; z=I3xsb(j3HOQUD}#nMu_wA1_dJ_gt4Yx$hWp@DPae3cLjL?pqo`osw(G==<1&_;2J zcMldkfV&00z#=jT>CZaOP&FCKKgqYOTz`1K4B@~ZW3|%8+b@Y zu}6sf6Lhr5SVK8?bGO(dME66K86CJ1_Y%+7;4?`$HcJ-_L$;+f?J-a$6$>5|IukU6 z)0o2b1TL8ljGh;@)FhuL(w`^-=6jHypnN%`0`LgoWG_5Na#!2t_`xFHmD2DojHH+d zLZ~1=CO%;#W;*e?WeKDvC=;{3|YDOgWkiV zwf`tHX}LHy{OMSIZo0-#m$^?d5vlCc$T4&gUpmTq&|H|4bNf*N^x*`qDnH;eQFuZ< z$KI!y$gD@Ur~kzX7g2A}wgft#{z2X;n!UN*linrKQ>qtG`Rm9)e!(ikg9!PHn@=i_ zYxG5kU@>&IC}}|=k-+YB@J;k#P$|#Q@-sb4P-USYe*=;_%J>x7Wjna|S>q>60u4W= z19=}{zD@HKxxm!6jV~?0Yz4)98YU(L&*0-Y_`VlZQ~8BNS!*JBi@< z&`p-R*RFN0N11pZwb+Bl=x;O#G-?58P9OY#fKxcklEuY^I z(H`PGKdF(r%jY*B)_VrO%!JQx;G7X6=*?yxAKrk1$&YX(KEEMtd0#%i;R=wXmsE%+ z>)qI3?kW@dG=?$7Kh6qaljAL+N;~hCT}y5_3kq#7&!(Ujxqzl(T67SEr$48 z8S~>c5{62D5@g_m9D==fN4Y(dUn4 z?t~D0X@wsXTkPo1hEWXF#YN6Z5q}i7s__6 ze1IdZ{h<1w)Ew;r#e}>v+Oa76v^Fn4!#5X^+Oe__p%Kmv!FoS_ cX+uF7TRHb?>0s&pQh9D~=||@NsImHg0r7skGynhq delta 6138 zcmai2du$xXdEeRHd+j}VcRZ04MN%Y9NnS+{>(yEh(=tuVwj|SXWVOaY?)iALB#*k6 zW|x%6EOTxp!Ey~ZHU>e9rb&)L(L_d^Hc{&)|0QXIwCN)?&<4oDEm{YyE5L06^p7HL zfb{pxQaa1E8xs5N%r~>&eDl4(d+ST=zvnaMblTGJHy3-4`;)I`hV-|!S9eXfnZ_eL zT1(Z_#dJMW%&2*+)>F?GvuYf#{g z>Z-<5j=8w`icuT{Ce1UzWSl|Fw_x7GvzTYqdd?ZDWS~EE^b2)yo&0Qn^uCXbSfh!!2z+Fs4tO z4kBkn(`!moU+78Istu-xC-l727yxO`K;&rZLf%`qP1@vDhqk|(%z zRV${#I~B2JYQ+p#$%yRDDPx>PHB1sS@|))J$N-KLF@PC666lRaV4Za?E;$XaTCNF8 zJ{{RP)Q8pg4ox39KkYcJ(_o@eZOorvY1W#{hvcUtJK1*m&BzPw!S_)TTWB&wfYc_2 z0YJSXwqk(W>6HWG9$FyUVuHXnfP3-$7oLzKyEllqOD#tP@lts2lGuU8z^FM5P^*a% zn(2*Z5P_7w8y#-bu>px2GCo~(y}+m}?y`LERN$7K|F=q6q_Hzl#X>ta2m5%R6SXiVFy_>%k-aHx~#?~+9XqhHogIk z`i!&9+M@O%^VoH5QNO~bwPz2m>7LOsF^~BAX6zw&h1R3*pU>%BU)O2%n0B#%)tGqN zXJ<8x;w}f&uCoh<_)Ttb^Qz`0e4=2rdE|;Q4RYu8>sl#^Cv_bQC5?`JL3@FLY?Q~Y z#(l7Ih4J{hzGmRVw0IXV!xPJzc-J?$#jR^Pw}9*6$%1xGp97O?=Ixc+c-o~_B8?&6 z=wz?AH$K3ITzG@?`kJ{G@$%QTPOopS6PL+OfwOhSQ(Wiimq`+-g0>dLJN*h1zlC>( zXO=ZJ28)~N&K5}$yJFWF@3{*T_l(6&e%z1I^Z2~!M;H5e_GP`(5AsO}(O66PQ9m(^ zBS;DuM>+%RTFBlS-I0#{if^6Oe9MP~;3m0Mc`|(}Cde&COulIrF9cAL7UEgio2Z(|VusSAB?iowGgCYL@`9ZmLbWom29G={RsjZ7qyaN4l z^@gtO4oulJd#x|l@5OZZN+22+U8gW~-s;696L}=^%v^BBRa1m&nRVRpgPQz^q zaZ-^Gj=C>MROjo>Dz}rN`;E~ms}>`dOW-IlW}BD6Tg@$+9K&LyTAwd1gs)3MoM!G* zRZwRuq39l>qsfPZ*j!UwDhp1&?Cu26bPN8wAHHm|EE{D}eF*PCHiY+tUNG}4&&F9? zFEIJz^uO(x#1}KS=qlCcA#X8_k(~&P)oQCND6voeEVE@l#TGjfDw5R$Ru0%~z}ycQ zNRXJVmR%L(PRhr7#xqdVCVZ*3**$VPyXU?cnm|U4>Ua{> zui|kt0GfXDhrKVc6Qo-B_GP@pg9L~M9C4J!&(PQi^jWuSkzKbG4`X?r_Sy2aP4;tS z6)LmZ39(G$J8BcBX?lu)a;b}ZBIW!DI&m=_-#_%gFFTXpqtDg?L zq2HoyDtIU&6;tlsIv~e~zY&emf-;H+{$HcG5XMNQ3dO=5b66n?w!A!Icg-P4TxfZv zFb4hxt=};Op#*og6)4r+79lyGxI*Bg41lEOrT{cO1NYA>CtppUY2F;&`WQP%{M=;@ zih)&Nh0;csds6-<32)2a8{OU}JG!m>U##~ai7UD#?o?1H{vRVK8dxpHTf*Tbj6Td5 zh?xyL&aerVk`ImTWhdpuvDXH_?VG3!3?y-S$F3QaraRWU#^j^p9aW|X70^@|dk2K* z@3?pXFw`<7Is{=Q$EOIZ9HSd^C`*JUP3|N}L_wrdYc`x9z7fMqb~H3oOl=Vty;y8=RKbUAfOl5R*rm7(mC|%zc9wl9|1{y)E z+Gy3vNUllI4R02$n3ZN@S!|L2x^43KJ%rFJ`&7XhunS@Vh>!rA$3iI#U0q`%U;bS@ zq2)4J+5SDSv}eb_xvnl=B^sY1@G^my0Fd}?#1sf#T=A2hAUk0AMXbLa2DUp z2hsW@`6%DQhk!}(VLk%P?Is8s8xsZaa6*}mZB#X#5}S!!u0ic8j8=D1o}=pN7FohA z3WtK^EnXy`e0wvEl#l${%~$sGu&1(Suo~W@~;oqX6e?U+jlU+C6a{r0)fvF zxckE&5sH|!$GZj|SPNB-KC1-(4k8U|d&Mr=sFK|d`I#fzh7^A)Yg=%UC9i}WzbMkQ zD<$7L^86M$Hprkr6e}f^)g?tm$?z3<>F5!5N`CF={aIzV>Ov~Oe_py%APJS%t3j`?RV=O|JAh9``-~Eq}9l>=0u!(wdxLlXCCmMEhGn2l>*x9j#my)6V5tZ-uvJKPpR0dG}cucdGtGY z-1`AEmNGJCSpQM+tNwqA9yXczxn)L~uKq*3{!=S1-+1slM%+)xPagMKLH^J2qo-zU zBQTr>4GF9C*?lZ2&b$rCjaT7akf{o zsqx+C2urm{kU$=BOSS3+^ajf?g;21QT}Y|`f0Sjn>F;Xku`MZ9o;uO z$g1nCW2(FwS;v@ZXnl1*;z#Fonq3_CA|34J5j5AjulvZoDrLxDo_^pkaRC3iK`*0Q zU zebdj(Bga4k89(7CKZHsa^m@_w58_=|+yri-#ElV$&ma=EtTxPoF5RURB&y&D#5|493l zJ4J=lf_;fM2%}=lX&OszRpp7MLG*PM>>U-=*}060BMF4(u07t>jrs z5Q1obbY)t=>T7shvItbBW*C@t)bOYKKX$~LDD=j2)^=6d*jvMfuR;{nnQ?T_m4!Wx z2I|eBvr~^Cd(@hh3+g9P)ncpaQ2kOHB-IaD_<^MEh%oo3h)x0w3=pIkFylZ>IFTEcL5{Q$WLYmvv$7s Fe*xSBFU$Y{ diff --git a/ultralytics/nn/modules/__pycache__/transformer.cpython-312.pyc b/ultralytics/nn/modules/__pycache__/transformer.cpython-312.pyc index 23ed098836ce89b5ba3709ed5c96758126d8ef6c..941b74a9d2dbfe86dd687e50aa75917bf5902d0b 100644 GIT binary patch delta 5823 zcmbVQdvH@%dcWsNS1(I`Nb*C*wytf6tzgSY1`IY17!2lNiR3)yV~bGi#O)8e$932xX8toD&tU^$~t7)ov#N`GVU-pecb z$hzoue&oJ}wdKw*tR*+4V{C;~He90X_SSd0NX*+N!om>+go}iEDby#4BJ4`xqmlG> zq_7f^`9MEEEJuz7L`if{GLvjl7gYz68u(UWVRd@axH^A1H0~Dzd4ZYHF{S14Um0H| zjV?{hy-kwQ8VV2exFF+Z6(y|z;1I13!ysx$Ksnr#bSS5uw9tz=2T3!1n6t~=0!)?H zRL*zfd(AGk9k|v%jjJrpV=6xfpUWt#%OuxgyM?}z+vr@2&2>oHku)ReK(Zdm26`{I z!;HqjuGd_z`p24I)ck{Ms@MO3DdapeZboPQ zlx4Z5`x>VNT=rq!m!D(UwR6*eKch~VA+Q|U4T0U z*wTZxorI;=Z2O6ZTI}T-t*D{(_O2Z64YU_LS9J76d!^bxX?GGmeaGIg%n+;FkPRom zmI~TGAi2x|BOm?JUIC!0C~P5}w5PCCw;y`t12k5++1Uxrm>EBJXK;EDdX!5*K+L=J z=d)va z=wGzjXuZSooM~3FG*^H{PRzA-BoGu8-rJ7RPOA4qs0d3>h!}1vIl5aWiF^PAO0t0? zE|N3^!lHBn!pQ*i!85aZl6CuhsvTU0Bo~dy{=uXU!d`elqW2v3vcoVdsqqWZqz2Z3 zRzv}84nnUyKnqHH$ssyiS|>`-p%_Re&g45Tc+QMZkoWT{rwpN~>d;M~d6gd(6rC?S zS9U@8+mYADUmL$wultvjo`P0b6cb8KgypIDzL zb|vzkrOgYz)0`u3MOn5X*^Xofl1d~ykyIc-6v^F44k6ivWH+^xmE`S)rm_l%>fZI# zQ&wbVxlWQY=(u%cCO^KjEWwiFbdi(SWung(XBFwEVP^{&jsKl9L1z2^q_WhissSB@ z=jEliN+?s=hkQG(c}CAx z_8?E>pRt(yGPZk>97ZD0#kJKolp;y`Kv z)QoeHoS|K=l|?eNz6o9>F+D_NdI0!68c9px_&-{k<33Yl67@% zTdU5Um!Rj;BQFMkw*slEj)%$FKZUT!3;t-}n3~D>{*f^JfRyHQ>FE-uohF|ZEPo9m zDj5IO*tc z_&oE$Z-X%z$@g()0T5N09Vsox4U*4twe&;Jo|A$H^l#$ff!N!rmYBZmA}LeW@{nXUQ7`Za1&gsG6ibg?CwD)I5s6O4t5RdK>UUVW<3u6vRs^+Ql^gR9FJ2*H7q z+K?nbRvL~RmsQ8nDo5q0{1X_Je?%k8^8pesEH7E4Lc*4{^;u|4!A#}rK+s2jy8Jud z`OwHkR!Dx1GZ7;IjZauTXLw-cxZ=-pIeMx|Fi)%Y>lz700}B13iC@k42?{^}N_eQ? z5G8#z4l> z2j+5E+LCm9ZAmj4P(F&}1`jbSl!ds#)P%;>dsq+mGtFxJNDhR6kTv!D!*9% zs&m>=mvGcw-tm@w%29XI@!VTS6OOiorR|QT;7s?~h3D(e)lHBI&xCN(QhUc@JG1+2 z+u7*(@pI!7;_IQ;LRYunv^34=8KX62Vywk_ZyB0DN{djNu+~mn>l4=cX{$G3 z^5$z>*$%r}GM1LFk`#xyvG`IaTjy2N$c>g|b>}57FtaO{AAr`l^VZ zw)>V8sm^2+ltbxLWo9mY$=66$)1UeNyhimXdp00vT`C7mplpPN;K*4OAKlu?o;-<5 z4SghzQvj;wj?8;j=sG&SJw?|%d6;nd;b%!Gd4aE$I(GKGBPlzo((rhl2|{R-hEQ^ zctjq;R1}@Pz)>@ToBnLy^X7fHF9ZdI>-hSfK6ZDkd3FI*R3_oW=Wv&iy-21~5HmJT zvwReVKzVC`s~U_T^R>`l9tbsKPttgFM3P4j!Ig-duK#h`?|~>eK+sV$Pp007&X{XX zAMPGQ9*X=s=mvK%Fa?El;$ZutDKT_9d^0zE+idwn!{(X(!(=<-g1~b0TMaWDV>H8E zO+gVg_ttoBoAam5&V<=H!A*?bG<&W#Pd9cX8ar-~n~j@qte@JsZ)*PlTqot3PHSer zs-&lSi;8`)rpx>l2cy?|?WO8j2yG|-FOn=bqj!2sb}vVnv0zetisKA3xeQ-@x$SEA z^~M_=H`cz_@lMCPo2Cv6Q^C*!#?CFCse?O@PaePh_|%}~oDa=5t_EBX7Ac@FH#unk zp;g;|rTWHP^_i~A;S})5r=IATa~XA>Ke|oyqa9<^dH5XOSR5`?U1-A-T&VIX?xSMC zN8dZVg_!69q1`fuI#`Q3z^XAQ1UWKp8DnYXGWA3s|D9kYdiAbBw$gk4s`z66F|rGs zcj^BFkZzy4zRQ%?dl`}p}VeSYBA2Y6rr{~6^2ATt_5$UUa?VDO!tg7Cx?AOpDJr2TgZjJOM#1l6oc(ll$4VonaZh3G1y40A#`%EDqtp_)AB2$ zXGT*DG*cD1q~LVbE3PxH6a&pvRT0@l&Q_gwor4Z+r@Z?KsXgmGzw+G56oc*57+f^h gTxQ=iP8w4TG*e$A%Sh{l_w|*pt$e^hQ{CbJ0nhK92LJ#7 delta 4243 zcmai1eNayiSZloRL@ zwsnf3gvfk$J|UezL_M5X$HV-zl7&K|pxR5CRDxDe<6Vt+4c;|)Pr!RZKeLlGC9Ywt zNfLcYkN#o7)sY!o70WSJr3=n0pN`B^X^3gz?9E*ParTbx_Su3M7QNxH^9A)&VszTU&!P$%tIMb*aZ>Wq)n_79VdH+0iSQo3#{NLn`;V@Ogr`9!COkJQJB z0{S%54D24Zm#krWNiW-jB2X*RV}W04jMM$ToOb_qF<3IdSQXN29E>T$VldPt#S~k+ zLLmima^Q|;(Uh4;-8LZXVMa}vk*b7agd0$}tf14*B9*XM+d`c1lJ;>e6%slrPaKJS ztF^LIqy_k7O^^!lCRIXba#{8gN-m|ajDnLw4TUNS)$qII<=P4)?+Z#z=^&O`I=CFc z%FDV+-xmWfrp_lzAeOqCXrWlQAXTcITq4Pf>!4p(;?z^ga}d7X%Y4Y0N6opXJ(to3 z3kGxFDR`^k_tv3S?;S?R+in-KyyLc7#b@8vsd(#M9o*C9X9&3PDdq?xGEGEanplAd zte{{SQ31Cq^l;Fmso;EkKjR|;zgN=~r<8luO-fNEsA<@oK?(F~Qp;hYL5FChWMsgv z($*^(sh*N9eRI-dNQ9;#P_M&9{TjkRqG1zZp`98ml8h{Y zoS|;RYfBn*8`6jp^3rFkR4$1}9z2z9Mo;}BeV(NbA7d(CrzeaqkEynbQeazHjIrS^ zX(J9vO~6ilESt{i(Jra2sV%{CvBM(?4af-{M?kGcpkwMA%2!zi4FVRMIXz5YB454C z84{<$3`8q3wP(p0oJX%d?)!7ecjvNa3A}GKKzV7};$E(o7ud6k$z3(0kUp-D?_~w9 zN69b(-+_@8Qa#29%1O&UiJ^|8bfywtl?@dpEyOC)sN9XFJg_IGp`%ih7!zvK!{Ib_ zFAugv>b}&(2V4#O(`3e9B9vql!e^WwGVs17Lr-akW>V>PPS8y5O^{ckOA}lJL~26$ zG1kAuN@8k%P!M<01AQZ7?Sgh{2ZkA2G{Ba`J!93xcr?9tb4=;MEYvQ>6#ihCI;hJN zF0;qf0Wq|>UGfW3E9{wOQ2KDWB*L+2t)vD1F|F9S78xNu+I$c5Nox9$%)^;K^PaFA z^PR0e*D%m2qbSF}3@h*{L^A@OPIxsdT}$|SGOpzLWn^qhWGE}j7Hp>r>-76sqBJPA(DPZ#nbG=174hN|Y1)d$}1pFHQ;3c+AO(D^LeTd09k)6?tv>-`m$} zD(?Q4CiI7^c}5~eKF!<0l6q*I;U!PP8#Atu-H|;7D%P@ruA%bfV`o)M2PoZ4p_PIg zPFwTRs1`BOCjEr6COrDF)kx~#t~HD714GdAuxaTI%RpzI#JpM z$7h$q(CoLct}0ycMQISd(qhXX)v(hxmz;!iHaq;;=H^5c4+j=yW@vFQC2qGr=nuQy zFVZ*Dz@+<+?6zkR;)081vnt~r^2hj2&n{64aX2LhuZft!r=CPAXCgPAqGD(zqY2wQ z$A!)d%rP2YpzKrxx#BDu=Qw^d`AVyR7w4=u_uNXL66>ja7q%XDVWRcO@kw0)IE-z_ z7v-t&Lb(&Rm*>N`g;o6l3>o9h#Or5@Yd@;1Ap%;|Ji%=LQhZGy+^b=n;chd8Gc zQs<^6RLNMVyX^N+$#735)FK-D#iswPmp#IJES07apo+H8**EWgdSzY zw1>7oY0THDQcoh#*ea-e7LEDaN~39jveOXIn73F3ufCJO@l(Ija_~i^C;gPX)0L9H z-5>OH<}_^U4EtxTflc#rNC|Y8n}FM3kMQ%;X#b^h!ODf59M$;L;6jr0HkJH$Dwzhs z9~T7_srUE2kL^{+)~H+dx(c}hbA|;{ZGf#+`bcTjbyCLUF*YVdcCdum$PoNHM4sVy zDmF4Z2o?j0ws4XkVIuWo;@bqPYV^gj!7?QU9el}o@4a4(yt4lHibiX)F5ZB-{3DqBj7>I>UqnT#mT(=ZtirxP%392xb8W&XB1=92+yp)k|C=@HSsf; zbmHMdwezYWYS%}8w4sAdL}R$DM4~Ms)NJ}!(o}SPXqdi+2Jp&eCCQf)xa2?{8m^^xL*-p>Ue^7u2$_My=kFpbB$vCV z9_~@ z#*Y*%B?x%3(jyg}b;KJ#wl0_SW0d$`onH!NQlN|CpF1?MvzXY`Jl{aZa+s;pMod|E z4f*^6JSY>htyz+M4nLrbAp9k8HPeCu@Sh2Nh%lxggxum-!rfLg_&xZCU`7#1Y$tnE z<7ykJBS+hg`=9rZGn5|ROfpC(8A{9@XDBgV2`OEjo!%{YVPcqb_( R3r^Tyah`PEVUUzJ^dA~CQKA3< diff --git a/ultralytics/nn/modules/__pycache__/transformer.cpython-39.pyc b/ultralytics/nn/modules/__pycache__/transformer.cpython-39.pyc index 9f832c04f024b54fd289aaf43097c18c337a0af5..565233cbad566f9c059030c76ad126898f7d6a9b 100644 GIT binary patch delta 6493 zcmbVReQX@X72nz0`}End562(zM>b!COYBQxARkUhNN~QBO9-^Uk#ekW#`Y%Xz02&L zLyn6Ml~jn2l!~d4S|PAoD6*(3P*7D+OG^u-rJ&LZsnu2@QmI;ns-hH?{s(<;_Rc;Z zXDaoqeLFj!Z{B;q_uh=(KYRHCX)&6cBMSVM-TUY9fA4*vWfxi6ek(UDdo0H%8P_Ls zw2)Y2*UApQJ)GMw@8#0!fAGRkWG5Kq*4o=@QxsrKSfI zO-V&>QQ}K^7?vpxjj%D(VzzGRwrP!LnVvPy!8*Ay-I-vzQ?oR)GC9h4;))Vdlq>Kk ze?v|cG2r`2M*yY_7Iz=COvf~`W)X(MaIrDe*`uE|oeAB}vy3@rGOFhdZcH+VaXTSC z^gXmR8E<#}b|KHW8%n25m?51`iHq9XfjG?0*NMCRU9l}A!zV{zP{sjiWKILC;!*$l z&gIa<)`2K$ghW-1s6_ot^NHX2H;b7eeJLp@JX%t2QA$dQbizBX7Kg-+K-V2k&f6ff z>>St4F+I<7Qzm6Jk>s0TzFE}`II_|8<#V>I^T-gix-Gb=G&H2ybRm(V zSb?Q$kgNd`5ApTl)!^`2WVeR3Xk|U*&e;xab_)mN4msf@CX1G_|)=B~dK`KOPpx+uOy_rZ72#NK8JUj1d~5;R~TukcDUqjnZa` z%CN*;1nh4KM{N{WRdI(f*?n-|!39v07)*#~oA#E8wAn3nz>*_O3pQY7*cSjS8CUr- z+y!np9`wXFfE9yC5=iFlOG;brTnYlW-yuid0VU78)nYTQEk2BOh`XX)L=|VFYs$VE zGVP}t^-(h8EBQ|aO5{`!o)Gn4RB7O%dY+VOdQ69Du%woJC2d@Tk&zh$W~ex@uNEkv zK#9;rgu^x9DR~7k$HhOQ7s+~YZ*x!oLhkuZ(EJr-9^Dl{;Kk-0zWwknBf-SmXzgpk+!u@q<{xsrVrzjlhW5`7%5< z21yAuf+_=w! z$!*vq5mIMV$niONq({KO6O(_fK#Seta_p6za>BWFcot`pz8n{sRlWIfA2pkl&IaxE ze(|TP_V+I;0^Ir3r5oG7Pkgf^DLz`-xlA%S2U$0~W1Ly+o;=?v#LA9jK+0MdQyr_e z+c3E5MWq|S3b{!)VihLS((%|_ZkS_}K2&=`Ma*8%QuA&&$fGbpar42+e3nfz%c;qLMzguh zY0Q8OeX@{s%t0?Df^B73-r*3eGn)9g>u|+4XX~H9ItJ_Eb9`1udUJ6pi(j=03^~_oBEYw%SMC;u6N+bp}HgE(g z?-Qq&>4`wBl5bd!TR1|NhRiJ3;*+>#%XAetj;2QnK+dr?G`e12V z`GPZ`PneW4&_}M|!1ugLz$`!|0F6;EDfR%85VJjdcISWrh_4>Ju|Krlt#|D;vH}6)fl^9+4wNrIeN~xn_U6k) zy`d_>yiLx~))Jmi_q$;Rdt%&xH2eXa9G7v)K3C%`TRzBK(-w*n#AHgFv zqo^@*^$n!hgo{)(&KHF-s_qBXp{UeokjG>$b)|1-+1Lc>W<-Y{gtQ~_IFhW*l`DrJ z9rB+%?2GE7=XCFE0N5Oo3cD$1O(ogwj&4@*EdgaGB68ki+#EM8BU|4LQ$}?0@rq87 z>ObfM&5Ms#bVIpSywTs%HEEdE1~`DRti2%v3xJPx68Uopv2?ky-^9s(>0%15)|=!qbX)Qt!9} zEZ5a0nPK4x=`1&pYt25x9goAukWJjmLUSs(r2aG|!~Z)q+(%dRU;8KyU(|w{q4lRY zqT&|9EkWKaE$GaOgV8@oEgwbaZHsqS=)3e$!v;fvYzXphw58E38ywkt>j`Ns#|=ue z%rha(m_BDTUv~ zhW?FDBbh;hE;`kerfG;c=?P}gM#{bnoovirAgg_L^oG*o0IHt*!{7<=&C*kW>{x~)O7!~O@_+cm|L`Jqu8qS22VI5 zmHzY_SdYfTpGM+C@(hyi0jU@i9MC72W7w(iI5(-hIp7zuPhDtw7V18M$G#efqPCKi zq)T1aL?WMolwU-JR@?0eaodVrGZF+ zPvdAQG%1&d#dCv$u_v(-f$vGD6;gl?1`h<}fTzXQ#7KYLdN)DeCRop22L+`~d^@qP z=_HQF{GFc^e^0C&MyYvu`J%D84Z8dh9_e(zjR#sfLrZy^gyEeV#>&QFSVKy59(8){ zZPo~rS4tHpU+d57A&F6Vei9G5pM$AfYQk~y_V9)c8 z!B;$7173Mcb!ND??8O{uGCz~X3S-W|>=EUiX@ zl5!&%p0jPQ(=}MT28oVM!7$`;%;1}NTzs%;-GOFV@NcXP51{z$D@oSNXI>Xuwb+_X3 z=pqzfaUw44c^D~wObBmX|e=wLOINiyebj-SSu=~S;J zWk^cI1N;}r!_Se(eD_r>y@up0qmtoxS9-OqS5hrnF>H@f@!oa)2QXT>VR@kna%VNSd_R*d z1=D#y+ymZLDOxMJ2Jb>u?NiOy zLLxqmbm7hDkFP&`J9=1GJHaX!Vcx+TRmX9`{{i||=&~7?!OR>$AJjWk6462d-YS7_ zZ{B_LTM~(LskwKUfdkb%f*ZPlxgcGK{zL=9&TPANaN+=yXrprrd5`e$r8Zf3l zF}U?QLd59SNw_zAXX|~(!5Llmf0z|qQNt2&A?pThb9^#qQvMJ!^MDxNrpJ*lH$WjW zW+wP^*e~X#^KW9GmyrAh$y?(2ZSla{SbRlX-Zl`BvGePqZMdsZytsh7S_i$AX0d-b zbz;%fYzcHJR7D`8LYqRAPdL`c`@HL)Stxr6V@O^uB`gb0Js%IRArFi0?R{iQY}r0~ zXdbMxgvTB?w8Q%s$WHVyHab^L4XGg#h1-`_HA>o5ttB?xs>iO1b;Op$+G5?YyJEZk E3tE3Ym;e9( delta 4984 zcmai2du&_P8NcWH`uZ8iah*pCNt`xKa@!Ovp1m=5FkTqiEhwK>;G zTH~&T7J(RDht6J1n@GlBWP*(;+aE({Fz7a<0Rm~7*!_oTo2DVqG{iOuG;RC7bDcP3 z>6YmJeC~H1-#Op=-1jb%Kg8-;*4D2Q~)!ySQVVJ2bo6=ciDodv` zy6IgUCnJQM;{E=u`)L*RQ14|DbI}m>Q9rcYbRDgr0cd$>m{!stw7d^XiWKu5leB8) z0Um|$%7ypwKF6wIoBb|@OE&h@6X73i5%eEra0xS^A z6OSh|3Dbb2B_<7@#OqGA%KGksOnXq$tp*3&L$W|#`$f~X9fTy!e4SMk&U;R=_)8HC|`z+?gprORLN?>h|^ zv_PVo%r*c;Y*KUCNA5sECj#nYg+_&h#NNeG!~yVhx4>hY3Mo{K9@>A1Uu>-5a}{+& z;yTe6W_^ z89d%Lfd`YMzwTa;!LLyon#XeHvn)AjsR@XY6T0O{8+rccU|~zy*o2yHgVD#~G0{II zIZW_BU`L-vszxTgY%kE+J_NKkh6c_P>L=Z%1 z!EjRwZ0sabg|&(ueAYZC`H}E%hQ-9?^_YP(M8v|y7G&G>??7t#IN~IDbjFj_2O6$> zSWH>^;_KDVkv@jbpb>^7O}6CDE{nt$ppopcH@Lhv%QG~g8~WKC8-9>KTN_n{$eZ{- zYd7u6i)%8or5H1k$42*c7&q{_amGI zuv|*IYvFvX{kqnNQu-ot z!;s*XLm)SfxdYtH7XnhDH*$y0&hfkJyZF_5UDP;Alpkx@GboHzE;}6f(|s_*B8Kp~ z)#sXqzIk}q41cSkc^xE-<>bMNNN?b3Vbl9hGeHB0=A<{w$KG49D z@Y#Qx?S_&Ep}4-WG!@gaU*%6mjyDuO@>iJ+6L9Eo-wy2 zv&IZWf_!K0+@`Va?m^zXF;TN@^WGIwZs2ch)b>ol7A=RKGnWcGKL&;5WWq3DIrbIk zUQ6gdz>}5IWB;b}TO=wy1bK!^DF@^&5|)b685Q5lR8eR;qr9@Y=9K2Llqo%7VEH)9 zgoP^ReJpSDqDeU>jEB5mF=7#O)dTS`Aq?ZclUBDTrQqYStpt1@u z@_PPZ^GMB8LODlRdLnHkGLgg6nS8qA7$4XiA)P$ew4T4PIY#2#*YcjQeOHukYR&O4 zw~j4^+&{IR+=~S=n?n#5{%vfb6~%mUC{A@2pVSj{Evdc=gG`LwP;@-d_Q$Ga`8@xrd@5lK2-5sGPu^X-3-hB}6A8kDcRWaddKGU4oT)G*?z7LPN1wab$ zcYF5x;?QDYggDo>Z`_GTwBzx0!*LZ1c?ll7@}LVcBDOOg)o|C-QlNwIZ<%p&3D3y^yEnO|mRpLd75r zyIK&Mu*(Gr67HkjB5zr4%Cfoabbh;_^S}4i9V}ZviZw~gqaBw5E5Tc57ti)@? zD!YbLUqe6@u~!j(j_?bBrC8|5Pvm0?K+h_JvX*auaJDXFgwX`{{JFuuktn~iv*F+? z@NRkX_`Lz{qq;3-A>p5io@v9pZ(7$2dii3@UWXniUr~aVQxqKRMgH;5T{r&{D66(0 zOs5xFN1#)Z(M8}3fOr&elK}BLHi)VmTLn!>;OeeTr_enu<)~h|PqGZW3xNst|HmSO zFt`et`k{f*Gg2+X@@^*!iWu5DG(d`Al<%S`F33?4fj*PmQBdW4%pQeT%dO8OGVwG$ zyHuzOY6aDkkkpu6qS{r#J2-m{yBWhhd7;z(9HJ6_WcUO`%BP2?2;u8?U%ZFC4V{|T za+&Fgscf3EFW^#-AS4i60G1o{1>a|k3AhF2a=JkoGPG2xLr9!t&%uipvg`X_;iMN4 z-avShw~uJ*TiAS=-!;;%il}y#Um2-eD>xj({i5YbKK|NB>}U=rmV<~mp{Q^wq2s{P z;XvR@ASe;(9}MYQJv)H&zK4K^fc@hYuG27}e9De)ZhQ=gv5F;PCw_XeQ-FxavwU%M z{FXA;<@jZ}hwP7I!{|RI1_woP%WnA46e2;{Pr|aIgmwh$L+e5{A%Cb6eout@{sW1i B5U>CM diff --git a/ultralytics/nn/modules/__pycache__/utils.cpython-312.pyc b/ultralytics/nn/modules/__pycache__/utils.cpython-312.pyc index 470ed3cf63925efa6fd14eefb5ca781ae0055f57..f5943b6faa7b433334a381f961fb5084b974d01a 100644 GIT binary patch delta 734 zcmXv}O-vI(7@gVDpWSUs(**v+r*{sAS zL?OeMB}ZJgyRf;`k0B3! zDrMpooL1&dPhex|muU>%s$0dk$DA9Q<~f;*q`4eAvspo0G9^oFgpadPE+QexSjt$B zM-lPAzv#xgB7xysoiTM&37coy>AypZ9F`P2akCXaDTW~rYNglai*PtMcrB8SWz$Ax zBNfLlBT+)E#3eHV7iSY$J|=Sl&n5&ByUK`{1Tmm|wj^P#G-&;8Qu^t((hS{AI4&9D z@1P;BH!FF^`ba~^7SfomD&yA}e{r_L1glI)V?yQ5GA0vMW=dnGc8T4&k52VYRE;Op z)KZ1Hr;Iws`u6M%YSV?{{D-7^H+pI!+9}o6qFVsjR<-#wn@>GI_|-O~w}ZB@-le>B zHbwVr)W%%FxdpevMe5DG);)1zB3dkJUE`?@Q1zRhf=6xaE=IqQ164Alk)g7$LS9j? z&T8cBA!#o#8@C^yJ0u;2hK=aco~zvTWO3s0*QKt#l0mOxl9c6iT&F4Z*OIXhRUilv>uhp_bU67=)O5 z(~HJ@7cM61sU#*!dZ7tP%dNzN`=#P;h$b97>uR}q@vAeLmp7T0H#57^Yp?y2-Of=Y zo=m;F_kP>%0D0Q-NhAuO##NY$Ca$ zW>RRIK4)_fm)tOcBJ~aZqThmwURhp1Pq~i?K`*sNa)_DDwq#kFz#(&2P2|v4+l8&@-GKA!cGp^H>5NuWp><*pM0Ku*ZCr@$zGZU1WlAHr65`KNV&why(GRcI;A zP9*V9gWab$3s~&eMIRP@r9?v<*2NGOLpvioq?ypgX)I3H9d&izqb(#gC8en=4Ka-l z`H;UfXq*ihfiWX^$r!zO+`*iRXspxZs1CQzN|=?jGv^Q33#ONHkDG&NQs_}?Qf+O| zuic1gb4x~4u0O`ng{B3Zw?SjNe+c5s_2=uF%l|R@MF{Fb1PhTG-4HHoQ}bAu*ZD=v zFKTz<4Sq@I70fFJ%RRfg$zhxHW`@`ZBgLsBYZs~rE(oFn;rq!VdDyf7z;A{IrpZ!Z zLwlX>TL4#}$4z!6vyxBbnfMR7mSIM5He^er7gw~NJ8jp4^5z<(2j*7-^pjSXB|kol)Bym z**X|~h>W3RPN7+{HFHd9NWVrQuH?27dOW26d%yqFx7OWiLX#x&?ArMBE1yi0ugl4c zGOG$kD$NVkna)o3#=Bj~sAhzcf-6nR0ZEHeu#AirQbh)`91%TaBO(0Jm#t4*Np zP2kuEFT=0AC&74UUvSN7!N+WY{lIuW)QV7%kv^x2DQD8z6^zRNeq_snB-k*+PIYL0 z`iE=vzoNI?w%o99Dzw4+>EU8`yxpZmS`}`1Hy#&9Oe#hc&qpQC$e&sZO4z7 zp$Y2{1BR905pE+4(Y)rzIEKa?<8?rDjG}uqM}hgibp80Dt-&aS&>dUWgcxBz45AO_0mV3;)E!UNd;=umwOvdiRY+BZOonhb?h65GE za{O`ub$w-I^#rO5o0Y9b#rOMFzZHclJZzlwg5Id4CHG)Xm-$gDLtZiea|C_OcR=Y6 zehE2!X0$yvglKnw3F}%3q*wyy2Cxp>V@0e-S&~FRc*VAA#d;g-c_&aIU23XO4VsHI ziv@w4Z!n;*YGxZ?dS~7Otm~$|q!arowLuF}ns8bkzpqUD!Uova$20e(A~j==z(g7{ zkOv14=FK(qxCQ(_zbw

    Q(q0m1|^pP3%Wqz7)!Tm@cB4abH+8uDZ z+R61!ub-MD3jR{cT()dUj1(?Z)f9At+KWb;d`Sol-S%*00(v|>gL)*iNpJxK8gf0IL0RJyppuo+%PXvomlgUkTDKEx7^bv)}vt4hl1I zh8$5sf?t$6kY^CJ97C0S2unrf_cYubDCg&lFIa6`(q^#D1fsod`faVy~RL)$_|1Vq*ZsS7g zP*I*hTOfQsiMxE+G#if=Z9!{{ZDT^%v`P$>ndh1|D%%`s99rfkbcRp&1j@T%?LRY@ zi0{Y^$f+QkB&;XN1TuCT8YMDkI`qfWttYZdHYl=WjRc{Qn-GO(DjRGB%|wOcWZ|XX z8l>ivpmP+}qeSUko}&ng2=!8^pcxOO3`aw_IL@Ug8t}B#Wf7DsSQ#wq1b3-QvRH&= zfE9MuFPtRn7G9diCWZ?SH31b|pUZXHu1Nut#66*paxU)?j7KCMlXybnLlRF(d_-cB zh}RsMsz;SiQF$=;rnxyQcE#MEV#{R@)>H30ws diff --git a/ultralytics/trackers/__pycache__/bot_sort.cpython-39.pyc b/ultralytics/trackers/__pycache__/bot_sort.cpython-39.pyc deleted file mode 100644 index 1275148a8bd4b37da28e16949eda418a6b8a83a7..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 6022 zcmai2TXWmS6~>()L{hZuIL@tu=_NH&S(BzsoJlo_YrDS1kyFcwV^$di0#+m_fS?zk zEOMw$r*bAw?M(X%q(}F;KcoEvcla$A;3Rzjt%`l_#mTBNsiw5h%x8d0TP ziOjYs?v1b-S#3+mm2fGlwQE8)!+O+cH)QFiy8hiv0S&Tp*;-1T-hQ#WNTC0n}s=)tc<3*(uwDI~mOMsDhLgMJ6H zdh?TO&*ZH6u^UEi|3(m|jDPXy;VqBGpS@_^bR}a{Olr%%)K-{!D77{J{5R5$OzVDH zZR@`78&9RS;a7YU{}umb@znB{@YED?&96T-+f`=y4S)HmbSSr%Sj}(xXM|k$SNyYh zvw^mA{!5~5`5Q@-+D)wdLRPtYPpo9)1h;e^AiN@Kw|mU&cqZap!hFNCP(v#>{?n z@9w?5_w9JQ$Gp^j5~N)_Wzit!ZfFl&?nVqsOjhk==yh#3v3Ho84mq>CL6XLNv}(KI zpzGRh-?zOX=d7QWU6Qp8*u;q3{gCxLfBzd@|NN+BW_mIlz=o#d1ek~8WJ^gD$7$E0 zv9l@-lZ(s4fe+)08+&`lD@c_#hm^7Rk4t}`~c%Us^aHrg7+VZ8V5)-X(=f>9cH$rdDU7h2fb zj#DRzdAc?j@ny{A44I~ZN>ZANAsezOTXI7lHw&w-l}q$!=H*2)8TJ}V5~7;OsnnB? zl!<&Szabrvi6sXH>yc3iBkSGL5NQx1=(w1KiJx;x@KKm@`mC-5_a8UvrdO0NpFh>b0lGnHsBDwcU}5&bxS!=4-l|{)_TtXHeDf1 znyApIk>it)E9=tZD`}-?9#zTn2%`?<1B{bebbqLb(QFzGML#emh!gatvotXZsrLAf zf^C`O&%YDT;vG9e01O5pE17}VtVw}%N!JB3*)IURjX8~#c)Uq^CWV8NNMAN+7v;G8 zC*?6PR+t91BJ&BmJySAe?d?oS7Oj?9>mi7T7e|8+@1pm=QKWIJG?|#RQ^4EWdKi0d zn0yGKeCN5@k#mdL5!i^?S^4Z{XD0I&p7M97TENt$L<{_3-aO z&MI!=a_)|@D(ep;1W(}po79sw6DOqROVqEPtJTSaKvthyO{R{6L9QX1Obav|+5s;~ z5DLgv4h4jEGnc3FB9jCDbNU1+7py4WTU^2~@Zcs&asid3)?`gShhGz6qXsgwBEJgP z7{ByF9Y4owNT0$#DE1(301u7{5#;gawYU%FfGGMP9$dN$XTS82y!z5D;nefM;KeZq zN{DEZ#`YJN9?f^ofnyE)vPN#|q_MLu?SzzqB0}N}ECMDASOhB>T}0G4BN^$SJx30BOJR_o*u2PiW$&)UdE|KSJAYQ1Wo0HwgO9m3o-h=?1T%V@tl# zIzyzrPqEg?EXRpre;ATnbDYORH_V@KGClu*D&ok3)p3fSoWd}tIF*?L9wP@xN1P^@ z)riLg9&(Lt>~q?X{0dbx$*f+`b&rLi1V9nr$$f|@B-CWehSFFvm&|Izs2PfE zSt!eN+{75aH?tjbHYv!ZkDMK3JnmE8PT4U2MS@OQFH7>& zWIj^UT8Q6>SV&<^#KKp%G~yY=QD{AM@G#L1wVBxXEO)Aw&VPoNGHrjfKg#4Jlf6vd znekQOrW7oHiz0&M_{~zY;uJtk|2soA`6{VdOq2XeO!NGfnoqNwV~IfutM(MR;6bOhMI(j+r-b#n@( znHof$cD+2UaCULZu<*gmny#IW;1U8_NudzDg3x)qAJddS&SVvcCVi9k>1JNP$5ez$|CI60uFJ^~i)Mes*DX)t#+uu!_ge_B1V zForJ1P_h5C@k3(yKFAeeMq+QX^a*4AJh34^_3i0aEU4q{_%rd~;xq+p*#fs%w9R}< z4RCNoxIx#ILipE5ttC$Tvt0|~p;La6-=-#OXP7WYXnw(1?UmC(r|;asI|VU|p#EqW z6*kO_oKR)fbk;bN#PYzg#Gezn$u7?NqHQr;vS`jKfrRM!dwe7zPD$3Zw3@O4Le)SX zj-!k-rA<$Je>;t$sFs_QlJ6o4lm`+*k-6Us+Bc=^NuFgEsV5==3jzO%2!JBKmQ1OQ z@fTJhp8px`B?J=n6A(B@NCBp|(5WSFw5t4WoMrqZaJnq`?yPVJx_OI{p?1*YYvfI6r&PT>dx+SP40L4pS|oS&!EwnsQM z6b+1Xf;LWmlIg=V2$OstRfW9Pi9Bx77-#eQxdWJ-$q=~^^O%{qSOQ^MX0GS)_CpnS SNfT8~F=6wZKG)Rd!~X#q4Vu#c diff --git a/ultralytics/trackers/__pycache__/byte_tracker.cpython-39.pyc b/ultralytics/trackers/__pycache__/byte_tracker.cpython-39.pyc deleted file mode 100644 index 9abb721862ea32bfc857fe928c979ecb743a0568..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 13250 zcmb_jTWlQHd7j(u&R)1&QWPahv^=tvn5#&X?KH8hD2{B=irqStBgt;Ok+K@@49QvU zjb~;hayyG$BqtJD8)|?8MT0hETND-00!7h>J{IUpAB&<-^HiWf5xur=K~VSm|1+~U zQK*r2i8*s_bIyPM|3Clj9NJS;IR(EznJTzfFDlBvQ(^RHpl}I~UqRw1j@nQvs-ro2 zLu=|4y=hboRYBQkq?+kUn)9hfrfF78(zEa74Q*lfudtY&~u6|FiE1vuOvlO|P}_SdB`*WjC63 z>x$P1T=AWc-h8K~(;)HB#5;xOG9Le@NCG8P@2TQbyfuESdzyo_+(-S2a!=h?`+7ii zI@f7&oe`*YJ=7h;NqwT#4f(Y!^;6ttV4X=DW{{maUj>#?9{KoQz}cYwN0$b#nK?C)iYZr+~f%=cH3~rthnjDW~Mj zpr+`|I*0I{c1zBjbC^e&L4Mvj!ueT@e$-ju{2}CzImbCahy0@R9On-^C!FUo#=K*J ziC>7)*KZIb)rfbKALY|{c>8?^nLw$lp|YoiDy`>ZZ9&-wU)Oa<|3nLwDprX!*v)*Xi!Ixi+QnEa?#qy=P&H^Sfny%f7a<%py zTX=S>=0*k2uO_oP*nzdSKqYfl!v&AHEs>}BbkEr-8|N3+y4I47;8@lLC~X^HOTr00R#j`4m0V}wdw*;DtGKJZ)D0=;f*>EZ;i z4J=-y*3@1)q_)hy#<(ZUW*i-qHF~eSFI>l~1-?a#x3I6Q;rgwv54mDVDQrnBEi%e$}qpUj(CYn9XW|8qg2+!GF9ei!m9jcAk?Hx_zK!j z;qj?n(Q?3zsrHI7R^q|NkV4?l#Eg``Lq;#5vAsfNMd|{T>%fpB zbwD3|x1!v#ZfbqKZ-fRkLaLqyN-q&gLwx~S@1nAODah2#y)0@pLTg{`%M%uxkN0`Ve}DC@Sm zO;-?iewiwXQ^apj@;o(9`I|Nvv?`@RG&Qz}Nbh+ai4o@BOq7Wc4Gt!PRat73WPZ4J zBGnVG(-ccIg^q?v$iI#Tk`inPD0)FPSc{av?L}yjV`@(A%^if-C!{5x5aMc-OMaw} z1_JqCQ5ft2L;N%IIxL?tUME|~7^~AEG@yDu)?~U4G2DlJv^_tR zzA&*%GT1M z6)VU-VWvNcT)8m0Ka4HWZbiDg>qaS7k>YFgF|8reopumqA%a>yaO{rkgq)x$BJ~|{ ziF&`ug=-b#+WDRHks64%sFt{2kW`-F{wOOq4Ddm#@JSic9)KOC&8hQhNhAKJ5_Ump z5`|-a0BwYUA3%l(WUx|&oF}D-9Lx&{Imq=r*yi=rKKy_Y6NLJjLAIX5J0EI}wxx-8 z95O=+Y?w@OT6AERms7pN*8^L?60utDUCDZ|10*A{?RsOiJRR%2mb(*FJts0*d?Qye zN$-1L+NfA-x9+$isMgwT;rQZRMumL~K&o6W>#R|E zUO}uoiEA^lVZMb%p9HM}QzNG?A~o?hW^^QduRLY#k&-1+`kP1?sbp0UdL?pIMy}4t zC7VuXiw|O&~KBHl#6=1wTmliu*@tDtpc(A@af<*puXMOSadFYVNZF(}E~rfHFjt-fYbFBOi@1 zK5@u^ChI6LZb2CXmbjz7rf5o)R4EJ@qLE~O@N{6-A!@5RrV=dh zdI6;Z-cxuFC=^qqo3r9yEQ@WDskKelk<=J8Um*LOHj6~g1xqezqCMg23l@CdVViJ+ zu4r8tR19|*GdWX?X-7t;_>dA7H>9TpQ}%#8bT9$MZ__t4Y{g_~$%HqljuZA&X#(PW zsjo9>wFr_GRTHGc7QzFPO~oXWj5em8Nw?^Dcn6WL1{ENx_9-}`_CC5SB?4G@t=(-o zWbUoCcV6ICe7}47p@Zg zlVG{5!eha5$P3!pwKrq?TXOxx5(#M`9-r+B4dRaN_~mhN^_0pSSRLQv@9T&nbrmrO zR)dAH4{eGZ>ZDUhHDuvAI$Hs%V=O4IOG|(pNBv_hu7~&_C(_Z6E9c{Omcbj}bOWo_ z6#}ap=zVa`7~&EH-ChFzEW)3)bB1s!^SfsTT*YWTV@bT0Z!ZrTR>c)8hn1&z8###C zC8}Uy{VH;>j@gsZ!KjnWLVWoHJbnR*lFDIE&qAG9YA+w-lxXs7Op>@Kti_I9>{7>= z(IjSK`-vDx?`@v?Wggx7BQ_8H4tc9(}p8KUhB&>95o zrX`5)M)AX#Sp_@s9wdZ_?@}_(Q{)b>iQhwmPi#V{SR6&dns_!62>$_45W*-Nunn`( zI3gVFxRM~l8L-cS^4ySZjNZfl6+tyHlq(1;aSmv*{SJ9!@EyGN5IqUAoUu6UWCfe< zSWnCIt=r64*14Fo#u#e_3uB&otPkCk#N%Rg-lQJmm=vTj{s50p^-8J;Zkkhjher4* z+2hYfDKYHlLFvej(|cE9)D4AE%oJytDH0X~vn9d`>KMW*UV7ZVEEz_SkO0@*!b=>q z%u{IiIUdO`dXe~LxX58R=l{OQglBjIy)s-RcIJV?gcb;9K43kfN~;;C*HG-zM}P6m z6-m)dea2Qq(a(W)KY;>SQ+4HM2*#6O#hbLchPUy@85WGUud%{}3W0}83IwrH;rife z;@+0kUS~#q>!TZQvseeO$H4|Rp4g&f_sK;Rh;=f|Eo9k%wccG>uIN{9RC`)=>6hx+ zU#evYL^g?T066lo9chj}R;4*QfepmH2%c*7uL%>ef*D4-WZP<4y;43Bp>K-}-D;Gp zR-0|7+n{`*THWs24cQ_HH{v2C9EPBnQB)wG(5p4wV6*KAa1>IWx{IhjsJFA9Ch6UqJq_Gtc=c_HK?v`4QH5LM%6C zZW3(tT*l)QMk!iTm8r^k=q1!3hJX}NGxDa6)4@i14ly;^jLJMqvf7oF{|11> z?~O~V<$N^FgCNY4gl`JuJq$|I!fm$i44b7LZ(ssnkhNBkVc+UncYWOz2ol*l)xZOf z(kM}B1L0s9Kaycm>2R2|{m2VUT(k~GigME4<#^PPr$lM-T-6=djlX=U&mwMN#Q|UxUTjyND-)^$Z?2tFBPOi z^ByeIy$l#n-PbliNQ&YkfTPwapaVkI;WBR-{al>SBcC7CEByk-DTMj3fKbsC!Z}k2 zBo*<_;XRFh^e*8|^)m?aP+hAXo^F)sJs!k5uu4 zaDHzwd=4XEY|Nu4u(TG@?>X5*TlW(ktwz%NNZr1~Yb}N|;UQXU|HMyJobu4-Hp2x9 zRQI0`=J!s7&xa=xs3|{$8ry#eu{8=Y`Z2`eiKige#H+tH#Q#Zf-CFLS#P}x(BQYkP z-_jpx;zNvU(Z&u_gH>wVAO1was&c%lW3;M=Op9=CNDIo3tSUcP)eA8lmL(m+Ia<}; zqNKwZ#<_*){7BpWr|<>ll#}QmL%Da7;YyFd^{Wuj+`8C*5u?2rujK{igBQ1yN4j|H zk-q%`R&)w=Ul^Nos@_az>W_W(y^D8fQtoyJ#b z`U<-oB^;7W0aim?pc0)WlGBC0ry&o{;7oA@Qq-JvW()ITTVp<+GOX&H zi9S9+SnOk8hw1|8fv`0N^bA=ZJyOsojsoD!(NPCSEiu(`$`93i2}J`InGETEaa0aV zSO1`(ZmfR(?HkXAc`kvukia|wqn!UPx&WZXrvX|_0NPmR-+vrD5k)n8`kaw7Cr?)r z#Dwvc13A|n^hyx1I+$=A4PQ-W^?m^*<1PH^v(KJBY$D@~=Ou;4^DfDGg+VHZoH@K< zDL~~QY=|elVH_Df^C@Qp`M~TZfCt)i2jRd7Tzo{O?#LyK1N&wItT>6m%FD4o3;_`-f%fc9%YF4Y*>eM&MQcLS7w-ltDEqLtCBY>C0W&I zBhJO#8s!`94G-5Rs<2C<93S{{AJL?7Xl0nE(}B?(P1V~7dnJa5Aj^+U*CZQ} z8wG-b+?APz*TRL5s_pyUMvH8pN`Yf<@yru!EzMjqcyXBB?(%Sav~9dcc1yNti!Euw zU=(CyD~O|HmC_(^mkiG4^GgI_jgoB|H}7}X66m8eqTX1PXj98kIa<}}b{aqiZlJ`r z7az-sFQ8BPl#HPeo^hJy<0!qkQJG2Ny#s5lQWWmG2Nxq5fziaXr1@ALH#^o4FmrvT zR0WaNrtP(G)b98ccz1lP@@^%|IU-#p&spCE=^b3BaB=ZP79xWRmAo9bx`it?m8rN8 zcQgarmkiQv5o39xiGy#VbCi-aKSueCjF9@3Ojia&D|wJ3xtIbNxv%R~3=g$9 zcX5Nt%%FWR$qcSDNC}#tqB1)&PO=8g3o29H>Ig&_7v$tk0`Hr}2@5&4JINXP2R#1Y zBT*2LHsl}Kp4urq1)NG2kW!R0{?kf2^~%9IMT=3?a#`wgluzqTL`I7!&8o#zNu9;X zP66jS^7}yx$4n7HRg3y8jln(U5h#;06uG^GIZBAN(rFNSV#E^u&>s)YMI{4s8UNjz zeOfM$iCzc;SSGOtLsoL2+=G}x`1Nbjb*AHmvD*%bmZDR3M!Ss54g<8LvS=jXP%>f7 z3)HP(!o=HM9EESvW2n*gPJWc9=Q1Au1tfGq40qaat;4~6wt#LT&{?Za2E=tpSPo%) zYs8C+J^KOB?BWmEiTx$X)bRZS|TDAA> z%XXvIZ9wS3p08dS^U*7@-+G1;&V#mZFtnH9rru-jLGC?m2oEp9}0@;{{ z4wc5v{}?t2_k*2H2Cb*`H8{5j=E03P+NW!BxM5LOa5F+j=-VJXZ{r{itxJ3#39fsn zf!|UN_{~4f{ZnJTPcskdy>pk_%{54U3;4FkfP%5Up4=0%V05%0@92tG!?oHi1WnlZ zCbT7?y&oYT>EMfU8b%lmVaM+^QuQ=sgyRoita8WTsUuHu4-Gd7L-WWetT9aJ4AKO2 z=gq{^8>8o#H8h4gf=-Vg-$F(TLwULPp~5~U-Jry2vlNTvlzj?Que|;KwYM+7dE@OB z>+;o=x3O?*GYC5Vg>&aNAnCemINxfX8@V<+mhW^MjdQP@fBofigOj1y2C=?w{Z6pV zyAC0q3l^WsA;tI-RsF#itUm?~Yr`{53qN2DLk&k7d_|4kfMClY!xH+Qv0g5Z4?z7K zw~6Z_&r9 zw8(eRWn#}p*^6L)zF*u02@a6#BUa;xXiq+R3LcWZhw%6$*;x6^N{`}!az`bvVnp$X zpA$d?)nnY0O$g*y*Cdc^@sQ5BGl1+%qY~v{$cXuQJv<1+)gRF$Kcj@j)js8T=!eK{ zVQ8OVR&qtPcjOtvkFhsJ8R-jRasO|Su$+Zrg;vo54L4Xc+-IcwQ3&B}K(akl9;$&6 z>YtM2SVefmfMh^;365{_PUCJmUH(jQoqSVW2KNwgS6y@-Ajl_!2-{~K(6s<5Z_e;$ zqNuYOU^~%$MuehF4o=aHF{*_-#@O`N1XuhiV2(kYd9WVjTrRQ4_IA$;*`RF%22g z;)j&nr-To7SSNc_yhjObIzg~WB2QQlTWI)OJRAoxwOk?heI;cYIm0wfgAZtMV3R9o zs-T|bLX^Ud(M}v4$gbgkRalA$A~E00ic;OcYxqo*=n&_ZW&A(BO)k%;$WoRz4vl*d dXpwnQ9&o)uWBX)ZnHt>zE$C*x^nK;T{{WPf1>67t diff --git a/ultralytics/trackers/__pycache__/track.cpython-39.pyc b/ultralytics/trackers/__pycache__/track.cpython-39.pyc deleted file mode 100644 index a5b8d035b128c6a3e66bacb426f3310663b55c95..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 2336 zcmbtV-D(?06rP#g)#}f-V%#J)wB16%Q3;}j6k0cwIB^1LQztlvQsE*-J0oe8)$V3y zltf{DQzy`y^aYxN1-;2T^hxHnSAK=uq-SO&Tj}3LJ1{e6e$F|2&YbUCy0lb9(0cxB zwpKytce7Y*4lJ%gml+HUF&v{2Zewg@5|cK8*@>Nz+jd7@+q3&_TpIapA7qb};_|4{ zu2|WRtD{=Gh7qsv3M;=P?K*3)Dyu!i?Il)cOV3bynJu#wm{%CNkDBLBz$yR)X1segnGv5e9{FNX8Knt2q{{ z=3^Dk(U1(CBYa4d+d~mjUQUkiulOf?=;Y3@#GFS+-uVhK*Q|bq^!x(1f53fo=qewy z%freM-a+37hn}jMJL@QSSc&;B$RWwy<62Ipkot!QxtpU--KMBayAWPAfOb2HOpc&zH@OC(irshg(pZf?G#CK(SR86>F+ zHcyLSGk~Z!XN9m?M4`*`obCz$3VQna-6Wm_a-3yQEpI8)gq>;I7f8~_mxJzD1%2b^ zHQqdV4@^KUyA*P+t33fl+!0jq=92aYR8pmcFa;-8V9@@|w=i%NuGy;3wrmL66Ex{_ z?K7&Vq ziFaY|^_88igB{MZJDiFHD(7I5#_6L^4Im~%Z znA;<8v!?1(d{~;>&J8H7pR2PO{8iP`@$#vj!`$b@=uR>7USI(126KSTN>9JImr9k1 zG~`kSj4Oa8XF)f8!ewBpW}LwT^5Ai#27g9vOBfCkCOf+)Z=eB-c~-NzBHl4a=V5>? zs=|R-GqNwZG#C*dn3WS|W9=p55xk8P^Q3s?QsENnLmDh-CrXs|R4T%Ou29)gJOMUr z`eOamg&U1NmKsOefe3WTzPtoQ_QaM;F_`++8w(|nokiY(Lc9d$MKO>vu0kQKK_~e9 zY+S?Q6S(mf2OB;%1!0TfLlAOw3>>VmA~`(p3o<3R*mIO)WadKBJqiy{@Z%ou19up% zF-&hLFr;zZ1=-Swp`B66c>F&|v(SH=FpIr2vK0UF89>+nK$`!VDhqD6*lJ?20eQ2j z-EF$MP8sXWb0AE9?KOK2EWB)E9zI| rp%NR1{g#Eow1L>_EMJBA#xeiW_?CwK>ZD8>xK4nUNY$xcEI0lD82^Fl diff --git a/ultralytics/trackers/basetrack.py b/ultralytics/trackers/basetrack.py index 3c7b0f7..c900cac 100644 --- a/ultralytics/trackers/basetrack.py +++ b/ultralytics/trackers/basetrack.py @@ -1,4 +1,5 @@ # Ultralytics YOLO 🚀, AGPL-3.0 license +"""This module defines the base classes and structures for object tracking in YOLO.""" from collections import OrderedDict @@ -6,7 +7,15 @@ import numpy as np class TrackState: - """Enumeration of possible object tracking states.""" + """ + Enumeration class representing the possible states of an object being tracked. + + Attributes: + New (int): State when the object is newly detected. + Tracked (int): State when the object is successfully tracked in subsequent frames. + Lost (int): State when the object is no longer tracked. + Removed (int): State when the object is removed from tracking. + """ New = 0 Tracked = 1 @@ -15,24 +24,49 @@ class TrackState: class BaseTrack: - """Base class for object tracking, handling basic track attributes and operations.""" + """ + Base class for object tracking, providing foundational attributes and methods. + + Attributes: + _count (int): Class-level counter for unique track IDs. + track_id (int): Unique identifier for the track. + is_activated (bool): Flag indicating whether the track is currently active. + state (TrackState): Current state of the track. + history (OrderedDict): Ordered history of the track's states. + features (list): List of features extracted from the object for tracking. + curr_feature (any): The current feature of the object being tracked. + score (float): The confidence score of the tracking. + start_frame (int): The frame number where tracking started. + frame_id (int): The most recent frame ID processed by the track. + time_since_update (int): Frames passed since the last update. + location (tuple): The location of the object in the context of multi-camera tracking. + + Methods: + end_frame: Returns the ID of the last frame where the object was tracked. + next_id: Increments and returns the next global track ID. + activate: Abstract method to activate the track. + predict: Abstract method to predict the next state of the track. + update: Abstract method to update the track with new data. + mark_lost: Marks the track as lost. + mark_removed: Marks the track as removed. + reset_id: Resets the global track ID counter. + """ _count = 0 - track_id = 0 - is_activated = False - state = TrackState.New - - history = OrderedDict() - features = [] - curr_feature = None - score = 0 - start_frame = 0 - frame_id = 0 - time_since_update = 0 - - # Multi-camera - location = (np.inf, np.inf) + def __init__(self): + """Initializes a new track with unique ID and foundational tracking attributes.""" + self.track_id = 0 + self.is_activated = False + self.state = TrackState.New + self.history = OrderedDict() + self.features = [] + self.curr_feature = None + self.score = 0 + self.start_frame = 0 + self.frame_id = 0 + self.time_since_update = 0 + self.location = (np.inf, np.inf) @property def end_frame(self): @@ -46,15 +80,15 @@ class BaseTrack: return BaseTrack._count def activate(self, *args): - """Activate the track with the provided arguments.""" + """Abstract method to activate the track with provided arguments.""" raise NotImplementedError def predict(self): - """Predict the next state of the track.""" + """Abstract method to predict the next state of the track.""" raise NotImplementedError def update(self, *args, **kwargs): - """Update the track with new observations.""" + """Abstract method to update the track with new observations.""" raise NotImplementedError def mark_lost(self): diff --git a/ultralytics/trackers/bot_sort.py b/ultralytics/trackers/bot_sort.py index 7bd63e5..31d5e1b 100644 --- a/ultralytics/trackers/bot_sort.py +++ b/ultralytics/trackers/bot_sort.py @@ -12,6 +12,34 @@ from .utils.kalman_filter import KalmanFilterXYWH class BOTrack(STrack): + """ + An extended version of the STrack class for YOLOv8, adding object tracking features. + + Attributes: + shared_kalman (KalmanFilterXYWH): A shared Kalman filter for all instances of BOTrack. + smooth_feat (np.ndarray): Smoothed feature vector. + curr_feat (np.ndarray): Current feature vector. + features (deque): A deque to store feature vectors with a maximum length defined by `feat_history`. + alpha (float): Smoothing factor for the exponential moving average of features. + mean (np.ndarray): The mean state of the Kalman filter. + covariance (np.ndarray): The covariance matrix of the Kalman filter. + + Methods: + update_features(feat): Update features vector and smooth it using exponential moving average. + predict(): Predicts the mean and covariance using Kalman filter. + re_activate(new_track, frame_id, new_id): Reactivates a track with updated features and optionally new ID. + update(new_track, frame_id): Update the YOLOv8 instance with new track and frame ID. + tlwh: Property that gets the current position in tlwh format `(top left x, top left y, width, height)`. + multi_predict(stracks): Predicts the mean and covariance of multiple object tracks using shared Kalman filter. + convert_coords(tlwh): Converts tlwh bounding box coordinates to xywh format. + tlwh_to_xywh(tlwh): Convert bounding box to xywh format `(center x, center y, width, height)`. + + Usage: + bo_track = BOTrack(tlwh, score, cls, feat) + bo_track.predict() + bo_track.update(new_track, frame_id) + """ + shared_kalman = KalmanFilterXYWH() def __init__(self, tlwh, score, cls, feat=None, feat_history=50): @@ -59,9 +87,7 @@ class BOTrack(STrack): @property def tlwh(self): - """Get current position in bounding box format `(top left x, top left y, - width, height)`. - """ + """Get current position in bounding box format `(top left x, top left y, width, height)`.""" if self.mean is None: return self._tlwh.copy() ret = self.mean[:4].copy() @@ -90,15 +116,37 @@ class BOTrack(STrack): @staticmethod def tlwh_to_xywh(tlwh): - """Convert bounding box to format `(center x, center y, width, - height)`. - """ + """Convert bounding box to format `(center x, center y, width, height)`.""" ret = np.asarray(tlwh).copy() ret[:2] += ret[2:] / 2 return ret class BOTSORT(BYTETracker): + """ + An extended version of the BYTETracker class for YOLOv8, designed for object tracking with ReID and GMC algorithm. + + Attributes: + proximity_thresh (float): Threshold for spatial proximity (IoU) between tracks and detections. + appearance_thresh (float): Threshold for appearance similarity (ReID embeddings) between tracks and detections. + encoder (object): Object to handle ReID embeddings, set to None if ReID is not enabled. + gmc (GMC): An instance of the GMC algorithm for data association. + args (object): Parsed command-line arguments containing tracking parameters. + + Methods: + get_kalmanfilter(): Returns an instance of KalmanFilterXYWH for object tracking. + init_track(dets, scores, cls, img): Initialize track with detections, scores, and classes. + get_dists(tracks, detections): Get distances between tracks and detections using IoU and (optionally) ReID. + multi_predict(tracks): Predict and track multiple objects with YOLOv8 model. + + Usage: + bot_sort = BOTSORT(args, frame_rate) + bot_sort.init_track(dets, scores, cls, img) + bot_sort.multi_predict(tracks) + + Note: + The class is designed to work with the YOLOv8 object detection model and supports ReID only if enabled via args. + """ def __init__(self, args, frame_rate=30): """Initialize YOLOv8 object with ReID module and GMC algorithm.""" @@ -110,8 +158,7 @@ class BOTSORT(BYTETracker): if args.with_reid: # Haven't supported BoT-SORT(reid) yet self.encoder = None - - # self.gmc = GMC(method=args.gmc_method) # commented by WQG + self.gmc = GMC(method=args.gmc_method) def get_kalmanfilter(self): """Returns an instance of KalmanFilterXYWH for object tracking.""" @@ -130,7 +177,7 @@ class BOTSORT(BYTETracker): def get_dists(self, tracks, detections): """Get distances between tracks and detections using IoU and (optionally) ReID embeddings.""" dists = matching.iou_distance(tracks, detections) - dists_mask = (dists > self.proximity_thresh) + dists_mask = dists > self.proximity_thresh # TODO: mot20 # if not self.args.mot20: @@ -146,3 +193,8 @@ class BOTSORT(BYTETracker): def multi_predict(self, tracks): """Predict and track multiple objects with YOLOv8 model.""" BOTrack.multi_predict(tracks) + + def reset(self): + """Reset tracker.""" + super().reset() + self.gmc.reset_params() diff --git a/ultralytics/trackers/byte_tracker.py b/ultralytics/trackers/byte_tracker.py index 91559df..01cbca9 100644 --- a/ultralytics/trackers/byte_tracker.py +++ b/ultralytics/trackers/byte_tracker.py @@ -1,29 +1,54 @@ -# Ultralytics YOLO 🚀, AGPL-3.0 license +# Ultralytics YOLO 🚀, AGPL-3.0 license import numpy as np from .basetrack import BaseTrack, TrackState from .utils import matching from .utils.kalman_filter import KalmanFilterXYAH - - -def dists_update(dists, strack_pool, detections): - if len(strack_pool) and len(detections): - alabel = np.array([int(stack.cls) for stack in strack_pool]) - blabel = np.array([int(stack.cls) for stack in detections]) - amlabel = np.expand_dims(alabel, axis=1).repeat(len(detections),axis=1) - bmlabel = np.expand_dims(blabel, axis=0).repeat(len(strack_pool),axis=0) - dist_label = 1 - (bmlabel == amlabel) - dists = np.where(dists > dist_label, dists, dist_label) - return dists +from ..utils.ops import xywh2ltwh +from ..utils import LOGGER class STrack(BaseTrack): + """ + Single object tracking representation that uses Kalman filtering for state estimation. + + This class is responsible for storing all the information regarding individual tracklets and performs state updates + and predictions based on Kalman filter. + + Attributes: + shared_kalman (KalmanFilterXYAH): Shared Kalman filter that is used across all STrack instances for prediction. + _tlwh (np.ndarray): Private attribute to store top-left corner coordinates and width and height of bounding box. + kalman_filter (KalmanFilterXYAH): Instance of Kalman filter used for this particular object track. + mean (np.ndarray): Mean state estimate vector. + covariance (np.ndarray): Covariance of state estimate. + is_activated (bool): Boolean flag indicating if the track has been activated. + score (float): Confidence score of the track. + tracklet_len (int): Length of the tracklet. + cls (any): Class label for the object. + idx (int): Index or identifier for the object. + frame_id (int): Current frame ID. + start_frame (int): Frame where the object was first detected. + + Methods: + predict(): Predict the next state of the object using Kalman filter. + multi_predict(stracks): Predict the next states for multiple tracks. + multi_gmc(stracks, H): Update multiple track states using a homography matrix. + activate(kalman_filter, frame_id): Activate a new tracklet. + re_activate(new_track, frame_id, new_id): Reactivate a previously lost tracklet. + update(new_track, frame_id): Update the state of a matched track. + convert_coords(tlwh): Convert bounding box to x-y-aspect-height format. + tlwh_to_xyah(tlwh): Convert tlwh bounding box to xyah format. + """ + shared_kalman = KalmanFilterXYAH() - def __init__(self, tlwh, score, cls): - """wait activate.""" - self._tlwh = np.asarray(self.tlbr_to_tlwh(tlwh[:-1]), dtype=np.float32) + def __init__(self, xywh, score, cls): + """Initialize new STrack instance.""" + super().__init__() + # xywh+idx or xywha+idx + assert len(xywh) in [5, 6], f"expected 5 or 6 values but got {len(xywh)}" + self._tlwh = np.asarray(xywh2ltwh(xywh[:4]), dtype=np.float32) self.kalman_filter = None self.mean, self.covariance = None, None self.is_activated = False @@ -31,7 +56,8 @@ class STrack(BaseTrack): self.score = score self.tracklet_len = 0 self.cls = cls - self.idx = tlwh[-1] + self.idx = xywh[-1] + self.angle = xywh[4] if len(xywh) == 6 else None def predict(self): """Predicts mean and covariance using Kalman filter.""" @@ -89,8 +115,9 @@ class STrack(BaseTrack): def re_activate(self, new_track, frame_id, new_id=False): """Reactivates a previously lost track with a new detection.""" - self.mean, self.covariance = self.kalman_filter.update(self.mean, self.covariance, - self.convert_coords(new_track.tlwh)) + self.mean, self.covariance = self.kalman_filter.update( + self.mean, self.covariance, self.convert_coords(new_track.tlwh) + ) self.tracklet_len = 0 self.state = TrackState.Tracked self.is_activated = True @@ -99,37 +126,39 @@ class STrack(BaseTrack): self.track_id = self.next_id() self.score = new_track.score self.cls = new_track.cls + self.angle = new_track.angle self.idx = new_track.idx def update(self, new_track, frame_id): """ - Update a matched track - :type new_track: STrack - :type frame_id: int - :return: + Update the state of a matched track. + + Args: + new_track (STrack): The new track containing updated information. + frame_id (int): The ID of the current frame. """ self.frame_id = frame_id self.tracklet_len += 1 new_tlwh = new_track.tlwh - self.mean, self.covariance = self.kalman_filter.update(self.mean, self.covariance, - self.convert_coords(new_tlwh)) + self.mean, self.covariance = self.kalman_filter.update( + self.mean, self.covariance, self.convert_coords(new_tlwh) + ) self.state = TrackState.Tracked self.is_activated = True self.score = new_track.score self.cls = new_track.cls + self.angle = new_track.angle self.idx = new_track.idx def convert_coords(self, tlwh): - """Convert a bounding box's top-left-width-height format to its x-y-angle-height equivalent.""" + """Convert a bounding box's top-left-width-height format to its x-y-aspect-height equivalent.""" return self.tlwh_to_xyah(tlwh) @property def tlwh(self): - """Get current position in bounding box format `(top left x, top left y, - width, height)`. - """ + """Get current position in bounding box format (top left x, top left y, width, height).""" if self.mean is None: return self._tlwh.copy() ret = self.mean[:4].copy() @@ -138,44 +167,76 @@ class STrack(BaseTrack): return ret @property - def tlbr(self): - """Convert bounding box to format `(min x, min y, max x, max y)`, i.e., - `(top left, bottom right)`. - """ + def xyxy(self): + """Convert bounding box to format (min x, min y, max x, max y), i.e., (top left, bottom right).""" ret = self.tlwh.copy() ret[2:] += ret[:2] return ret @staticmethod def tlwh_to_xyah(tlwh): - """Convert bounding box to format `(center x, center y, aspect ratio, - height)`, where the aspect ratio is `width / height`. + """Convert bounding box to format (center x, center y, aspect ratio, height), where the aspect ratio is width / + height. """ ret = np.asarray(tlwh).copy() ret[:2] += ret[2:] / 2 ret[2] /= ret[3] return ret - @staticmethod - def tlbr_to_tlwh(tlbr): - """Converts top-left bottom-right format to top-left width height format.""" - ret = np.asarray(tlbr).copy() - ret[2:] -= ret[:2] + @property + def xywh(self): + """Get current position in bounding box format (center x, center y, width, height).""" + ret = np.asarray(self.tlwh).copy() + ret[:2] += ret[2:] / 2 return ret - @staticmethod - def tlwh_to_tlbr(tlwh): - """Converts tlwh bounding box format to tlbr format.""" - ret = np.asarray(tlwh).copy() - ret[2:] += ret[:2] - return ret + @property + def xywha(self): + """Get current position in bounding box format (center x, center y, width, height, angle).""" + if self.angle is None: + LOGGER.warning("WARNING ⚠️ `angle` attr not found, returning `xywh` instead.") + return self.xywh + return np.concatenate([self.xywh, self.angle[None]]) + + @property + def result(self): + """Get current tracking results.""" + coords = self.xyxy if self.angle is None else self.xywha + return coords.tolist() + [self.track_id, self.score, self.cls, self.idx] def __repr__(self): """Return a string representation of the BYTETracker object with start and end frames and track ID.""" - return f'OT_{self.track_id}_({self.start_frame}-{self.end_frame})' + return f"OT_{self.track_id}_({self.start_frame}-{self.end_frame})" class BYTETracker: + """ + BYTETracker: A tracking algorithm built on top of YOLOv8 for object detection and tracking. + + The class is responsible for initializing, updating, and managing the tracks for detected objects in a video + sequence. It maintains the state of tracked, lost, and removed tracks over frames, utilizes Kalman filtering for + predicting the new object locations, and performs data association. + + Attributes: + tracked_stracks (list[STrack]): List of successfully activated tracks. + lost_stracks (list[STrack]): List of lost tracks. + removed_stracks (list[STrack]): List of removed tracks. + frame_id (int): The current frame ID. + args (namespace): Command-line arguments. + max_time_lost (int): The maximum frames for a track to be considered as 'lost'. + kalman_filter (object): Kalman Filter object. + + Methods: + update(results, img=None): Updates object tracker with new detections. + get_kalmanfilter(): Returns a Kalman filter object for tracking bounding boxes. + init_track(dets, scores, cls, img=None): Initialize object tracking with detections. + get_dists(tracks, detections): Calculates the distance between tracks and detections. + multi_predict(tracks): Predicts the location of tracks. + reset_id(): Resets the ID counter of STrack. + joint_stracks(tlista, tlistb): Combines two lists of stracks. + sub_stracks(tlista, tlistb): Filters out the stracks present in the second list from the first list. + remove_duplicate_stracks(stracksa, stracksb): Removes duplicate stracks based on IoU. + """ def __init__(self, args, frame_rate=30): """Initialize a YOLOv8 object to track objects with given arguments and frame rate.""" @@ -198,7 +259,7 @@ class BYTETracker: removed_stracks = [] scores = results.conf - bboxes = results.xyxy + bboxes = results.xywhr if hasattr(results, "xywhr") else results.xywh # Add index bboxes = np.concatenate([bboxes, np.arange(len(bboxes)).reshape(-1, 1)], axis=-1) cls = results.cls @@ -216,7 +277,6 @@ class BYTETracker: cls_second = cls[inds_second] detections = self.init_track(dets, scores_keep, cls_keep, img) - # Add newly detected tracklets to tracked_stracks unconfirmed = [] tracked_stracks = [] # type: list[STrack] @@ -225,24 +285,18 @@ class BYTETracker: unconfirmed.append(track) else: tracked_stracks.append(track) - - # Step 2: First association, with high score detection boxes strack_pool = self.joint_stracks(tracked_stracks, self.lost_stracks) # Predict the current location with KF self.multi_predict(strack_pool) - -# ============================================================= 没必要gmc,WQG -# if hasattr(self, 'gmc') and img is not None: -# warp = self.gmc.apply(img, dets) -# STrack.multi_gmc(strack_pool, warp) -# STrack.multi_gmc(unconfirmed, warp) -# ============================================================================= + if hasattr(self, "gmc") and img is not None: + warp = self.gmc.apply(img, dets) + STrack.multi_gmc(strack_pool, warp) + STrack.multi_gmc(unconfirmed, warp) dists = self.get_dists(strack_pool, detections) - dists = dists_update(dists, strack_pool, detections) - matches, u_track, u_detection = matching.linear_assignment(dists, thresh=self.args.match_thresh) + for itracked, idet in matches: track = strack_pool[itracked] det = detections[idet] @@ -252,17 +306,11 @@ class BYTETracker: else: track.re_activate(det, self.frame_id, new_id=False) refind_stracks.append(track) - - - # Step 3: Second association, with low score detection boxes - # association the untrack to the low score detections + # Step 3: Second association, with low score detection boxes association the untrack to the low score detections detections_second = self.init_track(dets_second, scores_second, cls_second, img) r_tracked_stracks = [strack_pool[i] for i in u_track if strack_pool[i].state == TrackState.Tracked] - # TODO dists = matching.iou_distance(r_tracked_stracks, detections_second) - dists = dists_update(dists, r_tracked_stracks, detections_second) - matches, u_track, u_detection_second = matching.linear_assignment(dists, thresh=0.5) for itracked, idet in matches: track = r_tracked_stracks[itracked] @@ -279,13 +327,9 @@ class BYTETracker: if track.state != TrackState.Lost: track.mark_lost() lost_stracks.append(track) - # Deal with unconfirmed tracks, usually tracks with only one beginning frame detections = [detections[i] for i in u_detection] dists = self.get_dists(unconfirmed, detections) - - dists = dists_update(dists, unconfirmed, detections) - matches, u_unconfirmed, u_detection = matching.linear_assignment(dists, thresh=0.7) for itracked, idet in matches: unconfirmed[itracked].update(detections[idet], self.frame_id) @@ -317,9 +361,8 @@ class BYTETracker: self.removed_stracks.extend(removed_stracks) if len(self.removed_stracks) > 1000: self.removed_stracks = self.removed_stracks[-999:] # clip remove stracks to 1000 maximum - return np.asarray( - [x.tlbr.tolist() + [x.track_id, x.score, x.cls, x.idx] for x in self.tracked_stracks if x.is_activated], - dtype=np.float32) + + return np.asarray([x.result for x in self.tracked_stracks if x.is_activated], dtype=np.float32) def get_kalmanfilter(self): """Returns a Kalman filter object for tracking bounding boxes.""" @@ -330,7 +373,7 @@ class BYTETracker: return [STrack(xyxy, s, c) for (xyxy, s, c) in zip(dets, scores, cls)] if len(dets) else [] # detections def get_dists(self, tracks, detections): - """Calculates the distance between tracks and detections using IOU and fuses scores.""" + """Calculates the distance between tracks and detections using IoU and fuses scores.""" dists = matching.iou_distance(tracks, detections) # TODO: mot20 # if not self.args.mot20: @@ -341,10 +384,20 @@ class BYTETracker: """Returns the predicted tracks using the YOLOv8 network.""" STrack.multi_predict(tracks) - def reset_id(self): + @staticmethod + def reset_id(): """Resets the ID counter of STrack.""" STrack.reset_id() + def reset(self): + """Reset tracker.""" + self.tracked_stracks = [] # type: list[STrack] + self.lost_stracks = [] # type: list[STrack] + self.removed_stracks = [] # type: list[STrack] + self.frame_id = 0 + self.kalman_filter = self.get_kalmanfilter() + self.reset_id() + @staticmethod def joint_stracks(tlista, tlistb): """Combine two lists of stracks into a single one.""" @@ -375,7 +428,7 @@ class BYTETracker: @staticmethod def remove_duplicate_stracks(stracksa, stracksb): - """Remove duplicate stracks with non-maximum IOU distance.""" + """Remove duplicate stracks with non-maximum IoU distance.""" pdist = matching.iou_distance(stracksa, stracksb) pairs = np.where(pdist < 0.15) dupa, dupb = [], [] diff --git a/ultralytics/trackers/track.py b/ultralytics/trackers/track.py index cfb4b08..7146a40 100644 --- a/ultralytics/trackers/track.py +++ b/ultralytics/trackers/track.py @@ -1,19 +1,20 @@ # Ultralytics YOLO 🚀, AGPL-3.0 license from functools import partial +from pathlib import Path import torch from ultralytics.utils import IterableSimpleNamespace, yaml_load from ultralytics.utils.checks import check_yaml - from .bot_sort import BOTSORT from .byte_tracker import BYTETracker -TRACKER_MAP = {'bytetrack': BYTETracker, 'botsort': BOTSORT} +# A mapping of tracker types to corresponding tracker classes +TRACKER_MAP = {"bytetrack": BYTETracker, "botsort": BOTSORT} -def on_predict_start(predictor, persist=False): +def on_predict_start(predictor: object, persist: bool = False) -> None: """ Initialize trackers for object tracking during prediction. @@ -24,43 +25,65 @@ def on_predict_start(predictor, persist=False): Raises: AssertionError: If the tracker_type is not 'bytetrack' or 'botsort'. """ - if hasattr(predictor, 'trackers') and persist: + if hasattr(predictor, "trackers") and persist: return + tracker = check_yaml(predictor.args.tracker) cfg = IterableSimpleNamespace(**yaml_load(tracker)) - assert cfg.tracker_type in ['bytetrack', 'botsort'], \ - f"Only support 'bytetrack' and 'botsort' for now, but got '{cfg.tracker_type}'" + + if cfg.tracker_type not in ["bytetrack", "botsort"]: + raise AssertionError(f"Only 'bytetrack' and 'botsort' are supported for now, but got '{cfg.tracker_type}'") + trackers = [] for _ in range(predictor.dataset.bs): tracker = TRACKER_MAP[cfg.tracker_type](args=cfg, frame_rate=30) trackers.append(tracker) + if predictor.dataset.mode != "stream": # only need one tracker for other modes. + break predictor.trackers = trackers + predictor.vid_path = [None] * predictor.dataset.bs # for determining when to reset tracker on new video -def on_predict_postprocess_end(predictor): - """Postprocess detected boxes and update with object tracking.""" - bs = predictor.dataset.bs - im0s = predictor.batch[1] - for i in range(bs): - det = predictor.results[i].boxes.cpu().numpy() +def on_predict_postprocess_end(predictor: object, persist: bool = False) -> None: + """ + Postprocess detected boxes and update with object tracking. + + Args: + predictor (object): The predictor object containing the predictions. + persist (bool, optional): Whether to persist the trackers if they already exist. Defaults to False. + """ + path, im0s = predictor.batch[:2] + + is_obb = predictor.args.task == "obb" + is_stream = predictor.dataset.mode == "stream" + for i in range(len(im0s)): + tracker = predictor.trackers[i if is_stream else 0] + vid_path = predictor.save_dir / Path(path[i]).name + if not persist and predictor.vid_path[i if is_stream else 0] != vid_path: + tracker.reset() + predictor.vid_path[i if is_stream else 0] = vid_path + + det = (predictor.results[i].obb if is_obb else predictor.results[i].boxes).cpu().numpy() if len(det) == 0: continue - tracks = predictor.trackers[i].update(det, im0s[i]) + tracks = tracker.update(det, im0s[i]) if len(tracks) == 0: continue idx = tracks[:, -1].astype(int) predictor.results[i] = predictor.results[i][idx] - predictor.results[i].update(boxes=torch.as_tensor(tracks[:, :-1])) + + update_args = dict() + update_args["obb" if is_obb else "boxes"] = torch.as_tensor(tracks[:, :-1]) + predictor.results[i].update(**update_args) -def register_tracker(model, persist): +def register_tracker(model: object, persist: bool) -> None: """ Register tracking callbacks to the model for object tracking during prediction. Args: model (object): The model object to register tracking callbacks for. persist (bool): Whether to persist the trackers if they already exist. - """ - model.add_callback('on_predict_start', partial(on_predict_start, persist=persist)) - model.add_callback('on_predict_postprocess_end', on_predict_postprocess_end) + model.add_callback("on_predict_start", partial(on_predict_start, persist=persist)) + model.add_callback("on_predict_postprocess_end", partial(on_predict_postprocess_end, persist=persist)) diff --git a/ultralytics/trackers/utils/__pycache__/__init__.cpython-39.pyc b/ultralytics/trackers/utils/__pycache__/__init__.cpython-39.pyc deleted file mode 100644 index 6cfb15e0f66daf3c496099bed8bcfb250f1c5e00..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 152 zcmYe~<>g`kf;9GDDOy1KF^Gc<7=auIATDMB5-AM944RC7D;bJF!U*D*t&3HROKNI? zPikUOUS?i;Ol5vfewk@ZX--K|VoqgAW^!>1kd>UBT2vfUT9TPl91|a(nU`4-AFo$X Vd5gm)H$SB`C)EyQ>}Mcm006wVC9MDe diff --git a/ultralytics/trackers/utils/__pycache__/gmc.cpython-39.pyc b/ultralytics/trackers/utils/__pycache__/gmc.cpython-39.pyc deleted file mode 100644 index 82e0b1bdc602aea415be31bba594028830f7d387..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 5409 zcma)AOKc=p6|GlQ{j2`C-ERLU(Bw;pV1-Erl2H=J|1t5{gY6KdW$4uOtG27%uI_nN z?HQZuEM_taLYiR~ED#9EMp2}&K-nN=#exMZkXXM}5E5ae@DUOM4U2H@>u&!Pg|uC_ z?z``Q-@WJFS9UsWY4}xR5BOwJ)BZ$-lRpE6m+|;t2jLoLb*TA}jEpV=+=^I#l}MlR_*^R`xH zYP1CA=;KiydyAD!p5<|#02kr?+~P@aQJ&)|o(30tTQjv%9J3BcW8w0QO#4k+_|8f* zn6Edk-`6ogg*MguMZm=Ikjy{FG%e6>X$S03V_L{Ut*i$IXMq{&x7aNm zNAF+rMBCt2+r%hY^zFwxhi70~3ysjM(s+4qTD4E2{fEIIsf07kn1S-LccN%9(|4=EebV_y-^>4U z;g9=2nk*Wnw6{_T>|JNc-b1HSf2HAJXg^rr5{|#ssPj_V;hWA;Q5sIA;(mWX(>Cw? z^;ZY~{_XoO2{I-3FBo~VGw@rJ^Ih(p$zrr*?btVF8p5OTQ`cJ9N3gf#>^k*Qa>w;% zT|cnBiX#(O>y66x8a7$#Z~4w8TL{;m#a=2wLrAt=G^FV@JV%<#pv%kqV{<|@#Kpp^ z-nQ4c?iFBq1?+vR!7moi$%x--IzqJz zTwai|ovt{!wm84u>lssRmd?xU<(2itmF4o>t20ZBvvX6+GI8A&&GJrzJF#KZUYGHTaB=9aEluuL0-33Hc2QRL;7jY~&eaS>qIJ@&? zCNfRzE6(1P2GrrpJ}S;SJ_c)IOTHW=)pZPy&$|N3p>D+pY{{ut1&lxwLm;KR*V?)2 zh;pM^rghkX+wlBSA6T5QDwUn4{~nWxa=B8seZO3OPkZp(?8S{)$7wD(w(wkUb7QYj zZ|q*!Xw{+A`d;8x{Ee=i`x~vmt@|6BJC%v%p2%U+G#>vY5KXuAD9h>=OEF8mb1bLc zCNo)_4WY(ldhhSQR?K2?J%c{@)Av)bn~9#4ES~}@auzb~89e?f2pk!B3`ZUc0aySl z{ZL=)NC}>vf>b{xH6R5*92h-mU>-!c$s^Q{N8#SF{THX2&H7%UakU0hFLdk-`YYhy zE(rU2p-NgWw0x-OgsXSVd7`L`7*-|{L{da(#ey(aP@GvVM%6*dv`y1=&D)o8Rh+9- zWKMBwCox;m zZkr-N%Xk!^s83-$nh(fa>!WaZ7~9a>@pkMr&3o3=g6!U(nbuA)ZU0=*e~{x=m>_&VWK4Ta zdtKx4cH+&1XLa6BYfo!5dNKJRd5rE8$H#`q9pS-V)ADb;S+^5CnylE6l}kM0OADz;Q|77#NV=k zlfxJUD6$*Ozy*cO%&aV}td^%2R?jc2PQ5NJ(8QFE$XK=Bu!CpLE5Dbi1-s?@uI)|N zTS7(=G63J=DQaj|8gNwPH=4}Sn$~+unwyys57T&$QC(v0M#X6oUI@w)WDJWWEGSya z@B|r?c#dkKTaLTA70AeSmj_#tm1U%3knEDwVaCikV~*k(beqQG({!4iVi}e*EYNc> zNXv-o^q8j39);}jE)w+I;*^a(g#P4Z%0_=|e723k!C|AYG_n@hDXjAi*ifu%kI}9@ z;(fv%QR>)b`Y1tFbX+^I;6X`@Zt+AI4=rw?#^R{It>c|gB}OG0?bmFiBjgE52fUx4i8ASQ;=luF@GBV)35v|2M_2+J>_WwitprRd5+%YELlGK3?2YR zYxyw68GJUqskJkB`gjVvABOK5oA92z*3JeaVfq&1iEaHJ6F>j|v#9aY*!xegzW#PD z?5~aTbeOA+=Cy4NW4w!b2Ou3A)oSDIfiMaFoZ|E0fZBha=!k|A=_XHf1n1Qr#M&MT z2dNgj%hJ0uVpooKN{)ATroMwOgoA_vVFp}3arfA@chUZfVINRLZx4Ylbot@1FB}@t zkpB(STH7Pg$S9sMJmbNapot8!Vurnaw|y=g3D5CF73X$O7dKVOIx3A~woHg!skz5c zqCG1~XWvRb4tcUGRT@z1)34pcnufzM*vOdT#=(t)dpI1UHS_qkdCw4A$C6_Xr$jaE z;E`}F90ztkdT8JrV(k&EK=}jeMHu6K5v^m9&*LfptBvvOX^CeAj~zVDmCiojsTXtN zBK-6iN5u;$-v5MTNs!HrAO7ZFU*7opd)IUsa{{DCL`*^Y|F<*X9 z7pfh_RPY42YJvxk9HFk)2nvqZXl-s4RJuo@x~a0N;uGRU>Ocr9C|wbgM2-+u0jVIE zk`}PD;g{O(fO+SNEhu9=KP#iFQ_E{pGYZxPT{4Rgoq=?*Kuf$t zWRb`>iM#@W$!vdH8rZ8egZfou*W1osN1rmuk2BPg)!Ulm_Y!Ttl*Rp%7SX#}_@x|j zek3?2g_7+^=9Tg%Q{bgWMJ21He5EBsFEKun_MI8Glqa2>8P_ijKu2eVGUj@9mu^7G z%|?T()4-A~n!THSTQp~vXYScpb{=ZEipReR(urj$!_qsE%+jAlo*QRlh+-KdXT(+F zm7?f|60%@DXJk4u-9#MBA>O6*49k3C8Y3+KiHW)l0A%7cKtf`jmH;w`4u&~GF9Oa{I(t$RD(y^t z8cYg8yyf){*sXvUSBQLz2;F{)BcB_}2FS{Lma*PI@>kgwPtZW00#!1PJ9q`3i(I_v zlhKtuvGj_lQ1cRzH;BAWgcEUyREg+B6t-kPFSdvp`joN?r5z3*@4`wHpCN*lk6Vur zqDkXkBO-`2h^T9hB6Qag&w_l2$ESF6!Xb?jxE-8CIp3IazARibt1hILamJPO{h=dA z0%jb4HAA+4bdM`Ce8}tlC+L%ghYKM-X;@|YOwg(mpDLH>+e)WKU694|SeQzJ1)+yp zu}pU_;1#D{FPBejYJ!@mogux|C|3G(-M~fpNh@g;tTzS8i?L3aQTK%iZvDvdDU`|n l6PNZ0byx2k4s|*)%aQ(M{DscaU!oR1eGZK4z%V^!{13BzXtn?V diff --git a/ultralytics/trackers/utils/__pycache__/kalman_filter.cpython-39.pyc b/ultralytics/trackers/utils/__pycache__/kalman_filter.cpython-39.pyc deleted file mode 100644 index 4c34a52d6bfc5abd58591b5e0a50e41bef9e2ad8..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 11415 zcmeHN+ix7#d7sMgkam2@>?B2$CSb?>jR)7p`bK zwo*TIh&`NhzkS!=H};VuZ3WMu^&3ZfLQ#HBnet;G^CW)JqevXZQGKPW@_((XKc_gF zqu){-{ifD6kTaYHat+tGsdpQW`kK;d-a$WQrB7-3(UEx)zvx9IE|1yO9JQ;<5xa&Q z3!^lh)-9#mbWCRk|65MmnZvnL-^J7=M z`JJbpw|}B4O54J7DHPV~cI?K&?q6TFp0XlueZzMx*~1!e7i)l4+`*$w*63 zlISOufp%Vb>xN;TTApPJ<|T=gk)aoSyA_J9SdwWm@ZYEWAFbW6O)z z@>8XoDDP+-^j$%B9dBD(d|%OWN4rD&8RCcMR~PSS_&5CLf1+pqV)n9qai!BrwO}LF z-EB9W?a6EFVb-)E>|jV`;|&KC$7PGXD0X^sie64WX{9tHi0zJ%8jPkFH#2 z&g)4&THe@B&0f#L+4Xu07%@7EMA1zBK6OrAR2PhS(o!3eL9Q$Y2k(B6yN&4 z;3dg!Jkf5eyLwFbikls1_ba=GqvD?RMYM5{2R+pD`hgPM_Q)vpsOOm;$}NWGZUeMK zzh=_7tzs3;#7LS0jVQ+cWiD|@>O3P{k_LQM)hy{&>~$9sAYvY%qH~jHQdH_|Jz)ioErh*2sijU&m*f}tI}{LRm^LblrH&-r!z2z3 zMNeJw5S2sRu=FXd{#7>~i{QZ0Co4OfEp;hhb7xXTvt->_QecY#aj;SIF9@pxspN}o&s1@4x-k!9=Kf#g%%MIk9ITey(+Io~^#0prkm zgzhbn0*aO_h>`D*R(t<%7yskA_jYbwyh9ZG{;$`5{?32??=LRww?qT^w7uhsFzWf< zbvM;SPf#NceKeQDK;JC)QavtdBVR zWcqu=;3Q5;JF6}-yCSeDpVA?WJ6R^3REdeLX^eXU6f71gxev+yY{8Mtu;MgTeU_5@ zk)*m4#)8N{RWGMam@c+IOlQ`i6}=v+_M6PN;z8$lO$6c&d9dWOVXdn|0yrs3|#POOx*4fwD6oV^7){-E9w@lfe2(zwKIY%;WRTq@iB|O(pNMB;& z+SCX6dWyz8bOHB*>B;2F?T14-{@$p}6&4kR&a61-ASc#3BuygHokQ@`)dkpTaJAoo z{vkW=2ijZ4Ytql$*9tYeucg}ZBjOl(?W?e9-x4Jgwte1**64@p8yEf&4gLo|W#`cY znow%uY~#!438?U+3wX&p@9TFUBNV|2S?!%8g{Vts!oZc*IUD7L;%6}@NY13X?*?hh zj#y&|7Kv;S%)rG9BB=V@dM~U1(V;27OvzVh2*Y;=vE1BaRLDm9k;>Y~DCYxI7S5_m zFabE#@6%4JZEYS7^rE)&;Njcv)AsU*+=+=f@O@+wWe?Xuq5=qtLBeFDjr0V#9bs_q z>L_jQwzw3YZPFTOaRa6|seCrQM<&`C@9HCS*CZ4Dl(IXMsN7~IG08ZG3Eoc7YotM7 znMs36$v8*L*`%G!vZ2thzje3>>W6}$Br+B5g=t>nkwcAydukq#(Lqny7`nUV@wKvX zQR$OA%LVS(S+cg)TsX9hMM|6w#!JoN2a3mThxG9$a*LM4NTWm-*SJmQ^Jl`a3=Q5J zGpBSW=Q4W8{Q#UpVN4e0%;ifREo}hUbVWMXUvvA{flI-kcRQ*e&$ZJQkKkY0i2B~f zHk&hWpx46Qz$UCAouT(A^r7U~1}ZEcvUk|>>CDxQoye=D9x3urC*4^L z>}Ruxr>Vp2b%~n|fev=tg~`-4fl|mZj?!D9jOuPfQ{sO=#AwoMI5Dpg$~LcRzc+PL zTOe!dR$R;Q9DIs|1OQ+&N58HKp9BoVOA|mH!UyldcR)=OHUEYh9exKvd@TqO zeFPztG)FC`g;%nL7M&R&8HoY3+!&>mx8aYLA- z0QZS<7&%&X<1Ke$87fH+J7XUwMG7Zvzi#_u_pgNr1;!y`uCOkaQx>Hvr$E0lR<;P0GKRSg(IW?~u>KT}wrfRAS_{~A9END9!l3D~@muo|; zbhJx+a-CU<2n7gi^m=K#he(z)_UV1D*8}S9XEm)JkhFfUCkUq&4^y&43F&L`6eZ75 z@){)+swyR9M+)>Otr?p0|Gle}hSrg3wQgm(RWS@~fBs#4+ftY783LKRyP%f2TqH??HG_7iVxBtoh~neU7qcDXASDDIx=`lLco^si%O;<94QqO2IRC@aRyg3%+R z2t89+~8yqR$`ZdCw!Z4YP{1N4M^k)Usn8ZuaoJaP=s&pJ&8=&20>^xUQf(6_{& zas>({Y9~;K z36bWDm7Dw_7=1)^TZaXw+Al)!PsH-U?*;xJl&pVgMB&hyTU~tFlt(MFk0Dv5=Pus| zfRgx52P3nFmtQ}U!9ow~danHRqmhI~M&k`vuY7U$r;1D5?nO?8&q$@;GE=M(a*UFyM7;Z3~# z48Mr{P4=WdYd%PRl%SeUGi?Opb)cujOz9jWsmt<{2=-;!*W#unWir`VYMz%4;05MG P3a!Ai)Q|q1^5!1^t)8PC diff --git a/ultralytics/trackers/utils/__pycache__/matching.cpython-39.pyc b/ultralytics/trackers/utils/__pycache__/matching.cpython-39.pyc deleted file mode 100644 index 4fe80d6b2b27cbf6dd683d97ead8beeef29bc417..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 4763 zcmb_gOK%*<5uWaO>~i^#NGl3uD8>MBEUv>W1lbAZM$N2{}W`8vLGd%h)D1;+e>O{=GdBpMNsoV1+kI|T$`n@0un6r1* z&ZtBcS?N~w)sLM&u+NMJjBLg&*uEBv-dv_YlXYUMl4 zB#%*FX>ZH+{ic#%3=$=KGBbK_K8^c3*RNe{Ty9+2XuGt2^sC?<;GsSpF~>P{x={W; zS62+{a>0!&+$Y?4#>Yq?*g5Bb9k5B;E889Sjx*%yPX=Z~r+49Gz=chMuU2 z+8=S!4kt}l>GULFI(H;vX0A6gHyChtkmVc=Sb->;UPrK4RaOZ;u?6m4;nSQ@FTUQuh<%iY3JHBd$Cc;PPmr!8<~ieig)W* z!i{Yijt1I!X5u6xu{1w?5T^sFL+n4ZuWn3)nr$WZcDU9_^VnEX)Xr03mq40Mgj6Ih z&SoT#+(b;p476cqs?g-nR?esx-jbbokQz-x8)_g& zO8-Whfy!p|UNeJ!D(P(WhFTL1p)>qngh?inw$$t4AUo0(c2wRAFT@wtaencw!uUr3^YGlkqeB#1 z&d|BHy2aD!n-{Nvb!kEikpw-z{oaQ?N%n@Aa== zfXsj5arS;xdSr9zk&UZq-i}j!9hLF?moy`z5QV&fN57B4YPP=54xK|j1h@}fDhU*a z>^@W8Z0}gxsami#adE+t=Uy>C? zY@=1`xlF4`*<$(F1)V{+ehY=OSYsEt$Ir4A{GD-6vv=46JHyWMb+*pcyO`0Y%NqZD zROsSv;?aLcVc?RMIX!jWup#2WImhs>d%*Pv7z2BPRXueBW0tkcF?D0;z=heqo1GV2 zxQ`&kvOE`_ZIztwVzvh<;A)ia0T>lMzVM%LaJKDNe=A4u4lq_3PG;B^NwD)pASzGX zeNR+}-YO7PpNoGzX4aeV=Yb(k0M)f(4KB#D@OEYZ0@)@G3iAgbpSu}V*KX%`>tRcp zhf-#t1q+z)1f~VY0!TVsqr3gxoegT;3x^$QD2 zMAs>K%Q`Gb(lcpGJs*MDw^~j?ZhhGT_cnxB;O>P;&?7XF@8xh^U;P`-Cb(Q%69 zSW`Gkwg&m2B!#*kRYvX;RYunx1u>aNM?tH6d)QVVu}`ruYmpBi-3AV4;XJE-!`ad| zoO>+bOKgdsXQ%PE#Q47hS6!xa1ONb%iH6c`^^sB~aQC3;W54Z=9!r2uuYDkO$bi&#Y9ywGwxZ z!gYM_N3du^w2-DmuAcg+?6ssIJe>^JlR$M0n$uimW``2sH$WUE#%ESdXH8f3BtX;- z*R)YD?sCFj-w?zebPRP%(qs8p!s0_y+XiG!TumE~j5q|OMg3vp~Cq9Jmxzq)U1)~t#^{~#T zfL^wNZ$5O-I0&MA6hwpg!!asQRLv5N None: + """ + Initialize a video tracker with specified parameters. + + Args: + method (str): The method used for tracking. Options include 'orb', 'sift', 'ecc', 'sparseOptFlow', 'none'. + downscale (int): Downscale factor for processing frames. + """ super().__init__() self.method = method self.downscale = max(1, int(downscale)) - if self.method == 'orb': + if self.method == "orb": self.detector = cv2.FastFeatureDetector_create(20) self.extractor = cv2.ORB_create() self.matcher = cv2.BFMatcher(cv2.NORM_HAMMING) - elif self.method == 'sift': + elif self.method == "sift": self.detector = cv2.SIFT_create(nOctaveLayers=3, contrastThreshold=0.02, edgeThreshold=20) self.extractor = cv2.SIFT_create(nOctaveLayers=3, contrastThreshold=0.02, edgeThreshold=20) self.matcher = cv2.BFMatcher(cv2.NORM_L2) - elif self.method == 'ecc': + elif self.method == "ecc": number_of_iterations = 5000 termination_eps = 1e-6 self.warp_mode = cv2.MOTION_EUCLIDEAN self.criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, number_of_iterations, termination_eps) - elif self.method == 'sparseOptFlow': - self.feature_params = dict(maxCorners=1000, - qualityLevel=0.01, - minDistance=1, - blockSize=3, - useHarrisDetector=False, - k=0.04) + elif self.method == "sparseOptFlow": + self.feature_params = dict( + maxCorners=1000, qualityLevel=0.01, minDistance=1, blockSize=3, useHarrisDetector=False, k=0.04 + ) - elif self.method in ['none', 'None', None]: + elif self.method in {"none", "None", None}: self.method = None else: - raise ValueError(f'Error: Unknown GMC method:{method}') + raise ValueError(f"Error: Unknown GMC method:{method}") self.prevFrame = None self.prevKeyPoints = None self.prevDescriptors = None - self.initializedFirstFrame = False - def apply(self, raw_frame, detections=None): - """Apply object detection on a raw frame using specified method.""" - if self.method in ['orb', 'sift']: + def apply(self, raw_frame: np.array, detections: list = None) -> np.array: + """ + Apply object detection on a raw frame using specified method. + + Args: + raw_frame (np.ndarray): The raw frame to be processed. + detections (list): List of detections to be used in the processing. + + Returns: + (np.ndarray): Processed frame. + + Examples: + >>> gmc = GMC() + >>> gmc.apply(np.array([[1, 2, 3], [4, 5, 6]])) + array([[1, 2, 3], + [4, 5, 6]]) + """ + if self.method in ["orb", "sift"]: return self.applyFeatures(raw_frame, detections) - elif self.method == 'ecc': - return self.applyEcc(raw_frame, detections) - elif self.method == 'sparseOptFlow': - return self.applySparseOptFlow(raw_frame, detections) + elif self.method == "ecc": + return self.applyEcc(raw_frame) + elif self.method == "sparseOptFlow": + return self.applySparseOptFlow(raw_frame) else: return np.eye(2, 3) - def applyEcc(self, raw_frame, detections=None): - """Initialize.""" + def applyEcc(self, raw_frame: np.array) -> np.array: + """ + Apply ECC algorithm to a raw frame. + + Args: + raw_frame (np.ndarray): The raw frame to be processed. + + Returns: + (np.ndarray): Processed frame. + + Examples: + >>> gmc = GMC() + >>> gmc.applyEcc(np.array([[1, 2, 3], [4, 5, 6]])) + array([[1, 2, 3], + [4, 5, 6]]) + """ height, width, _ = raw_frame.shape frame = cv2.cvtColor(raw_frame, cv2.COLOR_BGR2GRAY) H = np.eye(2, 3, dtype=np.float32) - # Downscale image (TODO: consider using pyramids) + # Downscale image if self.downscale > 1.0: frame = cv2.GaussianBlur(frame, (3, 3), 1.5) frame = cv2.resize(frame, (width // self.downscale, height // self.downscale)) @@ -89,33 +143,46 @@ class GMC: # Run the ECC algorithm. The results are stored in warp_matrix. # (cc, H) = cv2.findTransformECC(self.prevFrame, frame, H, self.warp_mode, self.criteria) try: - (cc, H) = cv2.findTransformECC(self.prevFrame, frame, H, self.warp_mode, self.criteria, None, 1) + (_, H) = cv2.findTransformECC(self.prevFrame, frame, H, self.warp_mode, self.criteria, None, 1) except Exception as e: - LOGGER.warning(f'WARNING: find transform failed. Set warp as identity {e}') + LOGGER.warning(f"WARNING: find transform failed. Set warp as identity {e}") return H - def applyFeatures(self, raw_frame, detections=None): - """Initialize.""" + def applyFeatures(self, raw_frame: np.array, detections: list = None) -> np.array: + """ + Apply feature-based methods like ORB or SIFT to a raw frame. + + Args: + raw_frame (np.ndarray): The raw frame to be processed. + detections (list): List of detections to be used in the processing. + + Returns: + (np.ndarray): Processed frame. + + Examples: + >>> gmc = GMC() + >>> gmc.applyFeatures(np.array([[1, 2, 3], [4, 5, 6]])) + array([[1, 2, 3], + [4, 5, 6]]) + """ height, width, _ = raw_frame.shape frame = cv2.cvtColor(raw_frame, cv2.COLOR_BGR2GRAY) H = np.eye(2, 3) - # Downscale image (TODO: consider using pyramids) + # Downscale image if self.downscale > 1.0: - # frame = cv2.GaussianBlur(frame, (3, 3), 1.5) frame = cv2.resize(frame, (width // self.downscale, height // self.downscale)) width = width // self.downscale height = height // self.downscale # Find the keypoints mask = np.zeros_like(frame) - # mask[int(0.05 * height): int(0.95 * height), int(0.05 * width): int(0.95 * width)] = 255 - mask[int(0.02 * height):int(0.98 * height), int(0.02 * width):int(0.98 * width)] = 255 + mask[int(0.02 * height) : int(0.98 * height), int(0.02 * width) : int(0.98 * width)] = 255 if detections is not None: for det in detections: tlbr = (det[:4] / self.downscale).astype(np.int_) - mask[tlbr[1]:tlbr[3], tlbr[0]:tlbr[2]] = 0 + mask[tlbr[1] : tlbr[3], tlbr[0] : tlbr[2]] = 0 keypoints = self.detector.detect(frame, mask) @@ -134,10 +201,10 @@ class GMC: return H - # Match descriptors. + # Match descriptors knnMatches = self.matcher.knnMatch(self.prevDescriptors, descriptors, 2) - # Filtered matches based on smallest spatial distance + # Filter matches based on smallest spatial distance matches = [] spatialDistances = [] @@ -157,11 +224,14 @@ class GMC: prevKeyPointLocation = self.prevKeyPoints[m.queryIdx].pt currKeyPointLocation = keypoints[m.trainIdx].pt - spatialDistance = (prevKeyPointLocation[0] - currKeyPointLocation[0], - prevKeyPointLocation[1] - currKeyPointLocation[1]) + spatialDistance = ( + prevKeyPointLocation[0] - currKeyPointLocation[0], + prevKeyPointLocation[1] - currKeyPointLocation[1], + ) - if (np.abs(spatialDistance[0]) < maxSpatialDistance[0]) and \ - (np.abs(spatialDistance[1]) < maxSpatialDistance[1]): + if (np.abs(spatialDistance[0]) < maxSpatialDistance[0]) and ( + np.abs(spatialDistance[1]) < maxSpatialDistance[1] + ): spatialDistances.append(spatialDistance) matches.append(m) @@ -187,7 +257,7 @@ class GMC: # import matplotlib.pyplot as plt # matches_img = np.hstack((self.prevFrame, frame)) # matches_img = cv2.cvtColor(matches_img, cv2.COLOR_GRAY2BGR) - # W = np.size(self.prevFrame, 1) + # W = self.prevFrame.shape[1] # for m in goodMatches: # prev_pt = np.array(self.prevKeyPoints[m.queryIdx].pt, dtype=np.int_) # curr_pt = np.array(keypoints[m.trainIdx].pt, dtype=np.int_) @@ -204,7 +274,7 @@ class GMC: # plt.show() # Find rigid matrix - if (np.size(prevPoints, 0) > 4) and (np.size(prevPoints, 0) == np.size(prevPoints, 0)): + if prevPoints.shape[0] > 4: H, inliers = cv2.estimateAffinePartial2D(prevPoints, currPoints, cv2.RANSAC) # Handle downscale @@ -212,7 +282,7 @@ class GMC: H[0, 2] *= self.downscale H[1, 2] *= self.downscale else: - LOGGER.warning('WARNING: not enough matching points') + LOGGER.warning("WARNING: not enough matching points") # Store to next iteration self.prevFrame = frame.copy() @@ -221,15 +291,28 @@ class GMC: return H - def applySparseOptFlow(self, raw_frame, detections=None): - """Initialize.""" + def applySparseOptFlow(self, raw_frame: np.array) -> np.array: + """ + Apply Sparse Optical Flow method to a raw frame. + + Args: + raw_frame (np.ndarray): The raw frame to be processed. + + Returns: + (np.ndarray): Processed frame. + + Examples: + >>> gmc = GMC() + >>> gmc.applySparseOptFlow(np.array([[1, 2, 3], [4, 5, 6]])) + array([[1, 2, 3], + [4, 5, 6]]) + """ height, width, _ = raw_frame.shape frame = cv2.cvtColor(raw_frame, cv2.COLOR_BGR2GRAY) H = np.eye(2, 3) # Downscale image if self.downscale > 1.0: - # frame = cv2.GaussianBlur(frame, (3, 3), 1.5) frame = cv2.resize(frame, (width // self.downscale, height // self.downscale)) # Find the keypoints @@ -237,17 +320,13 @@ class GMC: # Handle first frame if not self.initializedFirstFrame: - # Initialize data self.prevFrame = frame.copy() self.prevKeyPoints = copy.copy(keypoints) - - # Initialization done self.initializedFirstFrame = True - return H # Find correspondences - matchedKeypoints, status, err = cv2.calcOpticalFlowPyrLK(self.prevFrame, frame, self.prevKeyPoints, None) + matchedKeypoints, status, _ = cv2.calcOpticalFlowPyrLK(self.prevFrame, frame, self.prevKeyPoints, None) # Leave good correspondences only prevPoints = [] @@ -262,18 +341,23 @@ class GMC: currPoints = np.array(currPoints) # Find rigid matrix - if (np.size(prevPoints, 0) > 4) and (np.size(prevPoints, 0) == np.size(prevPoints, 0)): - H, inliers = cv2.estimateAffinePartial2D(prevPoints, currPoints, cv2.RANSAC) + if (prevPoints.shape[0] > 4) and (prevPoints.shape[0] == prevPoints.shape[0]): + H, _ = cv2.estimateAffinePartial2D(prevPoints, currPoints, cv2.RANSAC) - # Handle downscale if self.downscale > 1.0: H[0, 2] *= self.downscale H[1, 2] *= self.downscale else: - LOGGER.warning('WARNING: not enough matching points') + LOGGER.warning("WARNING: not enough matching points") - # Store to next iteration self.prevFrame = frame.copy() self.prevKeyPoints = copy.copy(keypoints) return H + + def reset_params(self) -> None: + """Reset parameters.""" + self.prevFrame = None + self.prevKeyPoints = None + self.prevDescriptors = None + self.initializedFirstFrame = False diff --git a/ultralytics/trackers/utils/kalman_filter.py b/ultralytics/trackers/utils/kalman_filter.py index 9527ede..4ae68be 100644 --- a/ultralytics/trackers/utils/kalman_filter.py +++ b/ultralytics/trackers/utils/kalman_filter.py @@ -8,8 +8,8 @@ class KalmanFilterXYAH: """ For bytetrack. A simple Kalman filter for tracking bounding boxes in image space. - The 8-dimensional state space (x, y, a, h, vx, vy, va, vh) contains the bounding box center position (x, y), - aspect ratio a, height h, and their respective velocities. + The 8-dimensional state space (x, y, a, h, vx, vy, va, vh) contains the bounding box center position (x, y), aspect + ratio a, height h, and their respective velocities. Object motion follows a constant velocity model. The bounding box location (x, y, a, h) is taken as direct observation of the state space (linear observation model). @@ -17,126 +17,126 @@ class KalmanFilterXYAH: def __init__(self): """Initialize Kalman filter model matrices with motion and observation uncertainty weights.""" - ndim, dt = 4, 1. + ndim, dt = 4, 1.0 - # Create Kalman filter model matrices. + # Create Kalman filter model matrices self._motion_mat = np.eye(2 * ndim, 2 * ndim) for i in range(ndim): self._motion_mat[i, ndim + i] = dt self._update_mat = np.eye(ndim, 2 * ndim) # Motion and observation uncertainty are chosen relative to the current state estimate. These weights control - # the amount of uncertainty in the model. This is a bit hacky. - self._std_weight_position = 1. / 20 - self._std_weight_velocity = 1. / 160 + # the amount of uncertainty in the model. + self._std_weight_position = 1.0 / 20 + self._std_weight_velocity = 1.0 / 160 - def initiate(self, measurement): + def initiate(self, measurement: np.ndarray) -> tuple: """ Create track from unassociated measurement. - Parameters - ---------- - measurement : ndarray - Bounding box coordinates (x, y, a, h) with center position (x, y), - aspect ratio a, and height h. + Args: + measurement (ndarray): Bounding box coordinates (x, y, a, h) with center position (x, y), aspect ratio a, + and height h. - Returns - ------- - (ndarray, ndarray) - Returns the mean vector (8 dimensional) and covariance matrix (8x8 - dimensional) of the new track. Unobserved velocities are initialized - to 0 mean. + Returns: + (tuple[ndarray, ndarray]): Returns the mean vector (8 dimensional) and covariance matrix (8x8 dimensional) of + the new track. Unobserved velocities are initialized to 0 mean. """ mean_pos = measurement mean_vel = np.zeros_like(mean_pos) mean = np.r_[mean_pos, mean_vel] std = [ - 2 * self._std_weight_position * measurement[3], 2 * self._std_weight_position * measurement[3], 1e-2, - 2 * self._std_weight_position * measurement[3], 10 * self._std_weight_velocity * measurement[3], - 10 * self._std_weight_velocity * measurement[3], 1e-5, 10 * self._std_weight_velocity * measurement[3]] + 2 * self._std_weight_position * measurement[3], + 2 * self._std_weight_position * measurement[3], + 1e-2, + 2 * self._std_weight_position * measurement[3], + 10 * self._std_weight_velocity * measurement[3], + 10 * self._std_weight_velocity * measurement[3], + 1e-5, + 10 * self._std_weight_velocity * measurement[3], + ] covariance = np.diag(np.square(std)) return mean, covariance - def predict(self, mean, covariance): + def predict(self, mean: np.ndarray, covariance: np.ndarray) -> tuple: """ Run Kalman filter prediction step. - Parameters - ---------- - mean : ndarray - The 8 dimensional mean vector of the object state at the previous time step. - covariance : ndarray - The 8x8 dimensional covariance matrix of the object state at the previous time step. + Args: + mean (ndarray): The 8 dimensional mean vector of the object state at the previous time step. + covariance (ndarray): The 8x8 dimensional covariance matrix of the object state at the previous time step. - Returns - ------- - (ndarray, ndarray) - Returns the mean vector and covariance matrix of the predicted state. Unobserved velocities are - initialized to 0 mean. + Returns: + (tuple[ndarray, ndarray]): Returns the mean vector and covariance matrix of the predicted state. Unobserved + velocities are initialized to 0 mean. """ std_pos = [ - self._std_weight_position * mean[3], self._std_weight_position * mean[3], 1e-2, - self._std_weight_position * mean[3]] + self._std_weight_position * mean[3], + self._std_weight_position * mean[3], + 1e-2, + self._std_weight_position * mean[3], + ] std_vel = [ - self._std_weight_velocity * mean[3], self._std_weight_velocity * mean[3], 1e-5, - self._std_weight_velocity * mean[3]] + self._std_weight_velocity * mean[3], + self._std_weight_velocity * mean[3], + 1e-5, + self._std_weight_velocity * mean[3], + ] motion_cov = np.diag(np.square(np.r_[std_pos, std_vel])) - # mean = np.dot(self._motion_mat, mean) mean = np.dot(mean, self._motion_mat.T) covariance = np.linalg.multi_dot((self._motion_mat, covariance, self._motion_mat.T)) + motion_cov return mean, covariance - def project(self, mean, covariance): + def project(self, mean: np.ndarray, covariance: np.ndarray) -> tuple: """ Project state distribution to measurement space. - Parameters - ---------- - mean : ndarray - The state's mean vector (8 dimensional array). - covariance : ndarray - The state's covariance matrix (8x8 dimensional). + Args: + mean (ndarray): The state's mean vector (8 dimensional array). + covariance (ndarray): The state's covariance matrix (8x8 dimensional). - Returns - ------- - (ndarray, ndarray) - Returns the projected mean and covariance matrix of the given state estimate. + Returns: + (tuple[ndarray, ndarray]): Returns the projected mean and covariance matrix of the given state estimate. """ std = [ - self._std_weight_position * mean[3], self._std_weight_position * mean[3], 1e-1, - self._std_weight_position * mean[3]] + self._std_weight_position * mean[3], + self._std_weight_position * mean[3], + 1e-1, + self._std_weight_position * mean[3], + ] innovation_cov = np.diag(np.square(std)) mean = np.dot(self._update_mat, mean) covariance = np.linalg.multi_dot((self._update_mat, covariance, self._update_mat.T)) return mean, covariance + innovation_cov - def multi_predict(self, mean, covariance): + def multi_predict(self, mean: np.ndarray, covariance: np.ndarray) -> tuple: """ Run Kalman filter prediction step (Vectorized version). - Parameters - ---------- - mean : ndarray - The Nx8 dimensional mean matrix of the object states at the previous time step. - covariance : ndarray - The Nx8x8 dimensional covariance matrix of the object states at the previous time step. + Args: + mean (ndarray): The Nx8 dimensional mean matrix of the object states at the previous time step. + covariance (ndarray): The Nx8x8 covariance matrix of the object states at the previous time step. - Returns - ------- - (ndarray, ndarray) - Returns the mean vector and covariance matrix of the predicted state. Unobserved velocities are - initialized to 0 mean. + Returns: + (tuple[ndarray, ndarray]): Returns the mean vector and covariance matrix of the predicted state. Unobserved + velocities are initialized to 0 mean. """ std_pos = [ - self._std_weight_position * mean[:, 3], self._std_weight_position * mean[:, 3], - 1e-2 * np.ones_like(mean[:, 3]), self._std_weight_position * mean[:, 3]] + self._std_weight_position * mean[:, 3], + self._std_weight_position * mean[:, 3], + 1e-2 * np.ones_like(mean[:, 3]), + self._std_weight_position * mean[:, 3], + ] std_vel = [ - self._std_weight_velocity * mean[:, 3], self._std_weight_velocity * mean[:, 3], - 1e-5 * np.ones_like(mean[:, 3]), self._std_weight_velocity * mean[:, 3]] + self._std_weight_velocity * mean[:, 3], + self._std_weight_velocity * mean[:, 3], + 1e-5 * np.ones_like(mean[:, 3]), + self._std_weight_velocity * mean[:, 3], + ] sqr = np.square(np.r_[std_pos, std_vel]).T motion_cov = [np.diag(sqr[i]) for i in range(len(mean))] @@ -148,60 +148,57 @@ class KalmanFilterXYAH: return mean, covariance - def update(self, mean, covariance, measurement): + def update(self, mean: np.ndarray, covariance: np.ndarray, measurement: np.ndarray) -> tuple: """ Run Kalman filter correction step. - Parameters - ---------- - mean : ndarray - The predicted state's mean vector (8 dimensional). - covariance : ndarray - The state's covariance matrix (8x8 dimensional). - measurement : ndarray - The 4 dimensional measurement vector (x, y, a, h), where (x, y) is the center position, a the aspect - ratio, and h the height of the bounding box. + Args: + mean (ndarray): The predicted state's mean vector (8 dimensional). + covariance (ndarray): The state's covariance matrix (8x8 dimensional). + measurement (ndarray): The 4 dimensional measurement vector (x, y, a, h), where (x, y) is the center + position, a the aspect ratio, and h the height of the bounding box. - Returns - ------- - (ndarray, ndarray) - Returns the measurement-corrected state distribution. + Returns: + (tuple[ndarray, ndarray]): Returns the measurement-corrected state distribution. """ projected_mean, projected_cov = self.project(mean, covariance) chol_factor, lower = scipy.linalg.cho_factor(projected_cov, lower=True, check_finite=False) - kalman_gain = scipy.linalg.cho_solve((chol_factor, lower), - np.dot(covariance, self._update_mat.T).T, - check_finite=False).T + kalman_gain = scipy.linalg.cho_solve( + (chol_factor, lower), np.dot(covariance, self._update_mat.T).T, check_finite=False + ).T innovation = measurement - projected_mean new_mean = mean + np.dot(innovation, kalman_gain.T) new_covariance = covariance - np.linalg.multi_dot((kalman_gain, projected_cov, kalman_gain.T)) return new_mean, new_covariance - def gating_distance(self, mean, covariance, measurements, only_position=False, metric='maha'): + def gating_distance( + self, + mean: np.ndarray, + covariance: np.ndarray, + measurements: np.ndarray, + only_position: bool = False, + metric: str = "maha", + ) -> np.ndarray: """ Compute gating distance between state distribution and measurements. A suitable distance threshold can be - obtained from `chi2inv95`. If `only_position` is False, the chi-square distribution has 4 degrees of - freedom, otherwise 2. + obtained from `chi2inv95`. If `only_position` is False, the chi-square distribution has 4 degrees of freedom, + otherwise 2. - Parameters - ---------- - mean : ndarray - Mean vector over the state distribution (8 dimensional). - covariance : ndarray - Covariance of the state distribution (8x8 dimensional). - measurements : ndarray - An Nx4 dimensional matrix of N measurements, each in format (x, y, a, h) where (x, y) is the bounding box - center position, a the aspect ratio, and h the height. - only_position : Optional[bool] - If True, distance computation is done with respect to the bounding box center position only. + Args: + mean (ndarray): Mean vector over the state distribution (8 dimensional). + covariance (ndarray): Covariance of the state distribution (8x8 dimensional). + measurements (ndarray): An Nx4 matrix of N measurements, each in format (x, y, a, h) where (x, y) + is the bounding box center position, a the aspect ratio, and h the height. + only_position (bool, optional): If True, distance computation is done with respect to the bounding box + center position only. Defaults to False. + metric (str, optional): The metric to use for calculating the distance. Options are 'gaussian' for the + squared Euclidean distance and 'maha' for the squared Mahalanobis distance. Defaults to 'maha'. - Returns - ------- - ndarray - Returns an array of length N, where the i-th element contains the squared Mahalanobis distance between - (mean, covariance) and `measurements[i]`. + Returns: + (np.ndarray): Returns an array of length N, where the i-th element contains the squared distance between + (mean, covariance) and `measurements[i]`. """ mean, covariance = self.project(mean, covariance) if only_position: @@ -209,77 +206,79 @@ class KalmanFilterXYAH: measurements = measurements[:, :2] d = measurements - mean - if metric == 'gaussian': + if metric == "gaussian": return np.sum(d * d, axis=1) - elif metric == 'maha': + elif metric == "maha": cholesky_factor = np.linalg.cholesky(covariance) z = scipy.linalg.solve_triangular(cholesky_factor, d.T, lower=True, check_finite=False, overwrite_b=True) return np.sum(z * z, axis=0) # square maha else: - raise ValueError('invalid distance metric') + raise ValueError("Invalid distance metric") class KalmanFilterXYWH(KalmanFilterXYAH): """ For BoT-SORT. A simple Kalman filter for tracking bounding boxes in image space. - The 8-dimensional state space (x, y, w, h, vx, vy, vw, vh) contains the bounding box center position (x, y), - width w, height h, and their respective velocities. + The 8-dimensional state space (x, y, w, h, vx, vy, vw, vh) contains the bounding box center position (x, y), width + w, height h, and their respective velocities. Object motion follows a constant velocity model. The bounding box location (x, y, w, h) is taken as direct observation of the state space (linear observation model). """ - def initiate(self, measurement): + def initiate(self, measurement: np.ndarray) -> tuple: """ Create track from unassociated measurement. - Parameters - ---------- - measurement : ndarray - Bounding box coordinates (x, y, w, h) with center position (x, y), width w, and height h. + Args: + measurement (ndarray): Bounding box coordinates (x, y, w, h) with center position (x, y), width, and height. - Returns - ------- - (ndarray, ndarray) - Returns the mean vector (8 dimensional) and covariance matrix (8x8 dimensional) of the new track. - Unobserved velocities are initialized to 0 mean. + Returns: + (tuple[ndarray, ndarray]): Returns the mean vector (8 dimensional) and covariance matrix (8x8 dimensional) of + the new track. Unobserved velocities are initialized to 0 mean. """ mean_pos = measurement mean_vel = np.zeros_like(mean_pos) mean = np.r_[mean_pos, mean_vel] std = [ - 2 * self._std_weight_position * measurement[2], 2 * self._std_weight_position * measurement[3], - 2 * self._std_weight_position * measurement[2], 2 * self._std_weight_position * measurement[3], - 10 * self._std_weight_velocity * measurement[2], 10 * self._std_weight_velocity * measurement[3], - 10 * self._std_weight_velocity * measurement[2], 10 * self._std_weight_velocity * measurement[3]] + 2 * self._std_weight_position * measurement[2], + 2 * self._std_weight_position * measurement[3], + 2 * self._std_weight_position * measurement[2], + 2 * self._std_weight_position * measurement[3], + 10 * self._std_weight_velocity * measurement[2], + 10 * self._std_weight_velocity * measurement[3], + 10 * self._std_weight_velocity * measurement[2], + 10 * self._std_weight_velocity * measurement[3], + ] covariance = np.diag(np.square(std)) return mean, covariance - def predict(self, mean, covariance): + def predict(self, mean, covariance) -> tuple: """ Run Kalman filter prediction step. - Parameters - ---------- - mean : ndarray - The 8 dimensional mean vector of the object state at the previous time step. - covariance : ndarray - The 8x8 dimensional covariance matrix of the object state at the previous time step. + Args: + mean (ndarray): The 8 dimensional mean vector of the object state at the previous time step. + covariance (ndarray): The 8x8 dimensional covariance matrix of the object state at the previous time step. - Returns - ------- - (ndarray, ndarray) - Returns the mean vector and covariance matrix of the predicted state. Unobserved velocities are - initialized to 0 mean. + Returns: + (tuple[ndarray, ndarray]): Returns the mean vector and covariance matrix of the predicted state. Unobserved + velocities are initialized to 0 mean. """ std_pos = [ - self._std_weight_position * mean[2], self._std_weight_position * mean[3], - self._std_weight_position * mean[2], self._std_weight_position * mean[3]] + self._std_weight_position * mean[2], + self._std_weight_position * mean[3], + self._std_weight_position * mean[2], + self._std_weight_position * mean[3], + ] std_vel = [ - self._std_weight_velocity * mean[2], self._std_weight_velocity * mean[3], - self._std_weight_velocity * mean[2], self._std_weight_velocity * mean[3]] + self._std_weight_velocity * mean[2], + self._std_weight_velocity * mean[3], + self._std_weight_velocity * mean[2], + self._std_weight_velocity * mean[3], + ] motion_cov = np.diag(np.square(np.r_[std_pos, std_vel])) mean = np.dot(mean, self._motion_mat.T) @@ -287,54 +286,53 @@ class KalmanFilterXYWH(KalmanFilterXYAH): return mean, covariance - def project(self, mean, covariance): + def project(self, mean, covariance) -> tuple: """ Project state distribution to measurement space. - Parameters - ---------- - mean : ndarray - The state's mean vector (8 dimensional array). - covariance : ndarray - The state's covariance matrix (8x8 dimensional). + Args: + mean (ndarray): The state's mean vector (8 dimensional array). + covariance (ndarray): The state's covariance matrix (8x8 dimensional). - Returns - ------- - (ndarray, ndarray) - Returns the projected mean and covariance matrix of the given state estimate. + Returns: + (tuple[ndarray, ndarray]): Returns the projected mean and covariance matrix of the given state estimate. """ std = [ - self._std_weight_position * mean[2], self._std_weight_position * mean[3], - self._std_weight_position * mean[2], self._std_weight_position * mean[3]] + self._std_weight_position * mean[2], + self._std_weight_position * mean[3], + self._std_weight_position * mean[2], + self._std_weight_position * mean[3], + ] innovation_cov = np.diag(np.square(std)) mean = np.dot(self._update_mat, mean) covariance = np.linalg.multi_dot((self._update_mat, covariance, self._update_mat.T)) return mean, covariance + innovation_cov - def multi_predict(self, mean, covariance): + def multi_predict(self, mean, covariance) -> tuple: """ Run Kalman filter prediction step (Vectorized version). - Parameters - ---------- - mean : ndarray - The Nx8 dimensional mean matrix of the object states at the previous time step. - covariance : ndarray - The Nx8x8 dimensional covariance matrix of the object states at the previous time step. + Args: + mean (ndarray): The Nx8 dimensional mean matrix of the object states at the previous time step. + covariance (ndarray): The Nx8x8 covariance matrix of the object states at the previous time step. - Returns - ------- - (ndarray, ndarray) - Returns the mean vector and covariance matrix of the predicted state. Unobserved velocities are - initialized to 0 mean. + Returns: + (tuple[ndarray, ndarray]): Returns the mean vector and covariance matrix of the predicted state. Unobserved + velocities are initialized to 0 mean. """ std_pos = [ - self._std_weight_position * mean[:, 2], self._std_weight_position * mean[:, 3], - self._std_weight_position * mean[:, 2], self._std_weight_position * mean[:, 3]] + self._std_weight_position * mean[:, 2], + self._std_weight_position * mean[:, 3], + self._std_weight_position * mean[:, 2], + self._std_weight_position * mean[:, 3], + ] std_vel = [ - self._std_weight_velocity * mean[:, 2], self._std_weight_velocity * mean[:, 3], - self._std_weight_velocity * mean[:, 2], self._std_weight_velocity * mean[:, 3]] + self._std_weight_velocity * mean[:, 2], + self._std_weight_velocity * mean[:, 3], + self._std_weight_velocity * mean[:, 2], + self._std_weight_velocity * mean[:, 3], + ] sqr = np.square(np.r_[std_pos, std_vel]).T motion_cov = [np.diag(sqr[i]) for i in range(len(mean))] @@ -346,23 +344,17 @@ class KalmanFilterXYWH(KalmanFilterXYAH): return mean, covariance - def update(self, mean, covariance, measurement): + def update(self, mean, covariance, measurement) -> tuple: """ Run Kalman filter correction step. - Parameters - ---------- - mean : ndarray - The predicted state's mean vector (8 dimensional). - covariance : ndarray - The state's covariance matrix (8x8 dimensional). - measurement : ndarray - The 4 dimensional measurement vector (x, y, w, h), where (x, y) is the center position, w the width, - and h the height of the bounding box. + Args: + mean (ndarray): The predicted state's mean vector (8 dimensional). + covariance (ndarray): The state's covariance matrix (8x8 dimensional). + measurement (ndarray): The 4 dimensional measurement vector (x, y, w, h), where (x, y) is the center + position, w the width, and h the height of the bounding box. - Returns - ------- - (ndarray, ndarray) - Returns the measurement-corrected state distribution. + Returns: + (tuple[ndarray, ndarray]): Returns the measurement-corrected state distribution. """ return super().update(mean, covariance, measurement) diff --git a/ultralytics/trackers/utils/matching.py b/ultralytics/trackers/utils/matching.py index f2ee75e..fa72b8b 100644 --- a/ultralytics/trackers/utils/matching.py +++ b/ultralytics/trackers/utils/matching.py @@ -4,7 +4,7 @@ import numpy as np import scipy from scipy.spatial.distance import cdist -from ultralytics.utils.metrics import bbox_ioa +from ultralytics.utils.metrics import bbox_ioa, batch_probiou try: import lap # for linear_assignment @@ -13,11 +13,11 @@ try: except (ImportError, AssertionError, AttributeError): from ultralytics.utils.checks import check_requirements - check_requirements('lapx>=0.5.2') # update to lap package from https://github.com/rathaROG/lapx + check_requirements("lapx>=0.5.2") # update to lap package from https://github.com/rathaROG/lapx import lap -def linear_assignment(cost_matrix, thresh, use_lap=True): +def linear_assignment(cost_matrix: np.ndarray, thresh: float, use_lap: bool = True) -> tuple: """ Perform linear assignment using scipy or lap.lapjv. @@ -27,19 +27,24 @@ def linear_assignment(cost_matrix, thresh, use_lap=True): use_lap (bool, optional): Whether to use lap.lapjv. Defaults to True. Returns: - (tuple): Tuple containing matched indices, unmatched indices from 'a', and unmatched indices from 'b'. + Tuple with: + - matched indices + - unmatched indices from 'a' + - unmatched indices from 'b' """ if cost_matrix.size == 0: return np.empty((0, 2), dtype=int), tuple(range(cost_matrix.shape[0])), tuple(range(cost_matrix.shape[1])) if use_lap: + # Use lap.lapjv # https://github.com/gatagat/lap _, x, y = lap.lapjv(cost_matrix, extend_cost=True, cost_limit=thresh) matches = [[ix, mx] for ix, mx in enumerate(x) if mx >= 0] unmatched_a = np.where(x < 0)[0] unmatched_b = np.where(y < 0)[0] else: + # Use scipy.optimize.linear_sum_assignment # https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.linear_sum_assignment.html x, y = scipy.optimize.linear_sum_assignment(cost_matrix) # row x, col y matches = np.asarray([[x[i], y[i]] for i in range(len(x)) if cost_matrix[x[i], y[i]] <= thresh]) @@ -53,7 +58,7 @@ def linear_assignment(cost_matrix, thresh, use_lap=True): return matches, unmatched_a, unmatched_b -def iou_distance(atracks, btracks): +def iou_distance(atracks: list, btracks: list) -> np.ndarray: """ Compute cost based on Intersection over Union (IoU) between tracks. @@ -65,23 +70,30 @@ def iou_distance(atracks, btracks): (np.ndarray): Cost matrix computed based on IoU. """ - if (len(atracks) > 0 and isinstance(atracks[0], np.ndarray)) \ - or (len(btracks) > 0 and isinstance(btracks[0], np.ndarray)): + if atracks and isinstance(atracks[0], np.ndarray) or btracks and isinstance(btracks[0], np.ndarray): atlbrs = atracks btlbrs = btracks else: - atlbrs = [track.tlbr for track in atracks] - btlbrs = [track.tlbr for track in btracks] + atlbrs = [track.xywha if track.angle is not None else track.xyxy for track in atracks] + btlbrs = [track.xywha if track.angle is not None else track.xyxy for track in btracks] ious = np.zeros((len(atlbrs), len(btlbrs)), dtype=np.float32) if len(atlbrs) and len(btlbrs): - ious = bbox_ioa(np.ascontiguousarray(atlbrs, dtype=np.float32), - np.ascontiguousarray(btlbrs, dtype=np.float32), - iou=True) + if len(atlbrs[0]) == 5 and len(btlbrs[0]) == 5: + ious = batch_probiou( + np.ascontiguousarray(atlbrs, dtype=np.float32), + np.ascontiguousarray(btlbrs, dtype=np.float32), + ).numpy() + else: + ious = bbox_ioa( + np.ascontiguousarray(atlbrs, dtype=np.float32), + np.ascontiguousarray(btlbrs, dtype=np.float32), + iou=True, + ) return 1 - ious # cost matrix -def embedding_distance(tracks, detections, metric='cosine'): +def embedding_distance(tracks: list, detections: list, metric: str = "cosine") -> np.ndarray: """ Compute distance between tracks and detections based on embeddings. @@ -105,7 +117,7 @@ def embedding_distance(tracks, detections, metric='cosine'): return cost_matrix -def fuse_score(cost_matrix, detections): +def fuse_score(cost_matrix: np.ndarray, detections: list) -> np.ndarray: """ Fuses cost matrix with detection scores to produce a single similarity matrix. diff --git a/ultralytics/utils/__init__.py b/ultralytics/utils/__init__.py index 872ce5f..93347f5 100644 --- a/ultralytics/utils/__init__.py +++ b/ultralytics/utils/__init__.py @@ -9,6 +9,7 @@ import re import subprocess import sys import threading +import time import urllib import uuid from pathlib import Path @@ -25,23 +26,22 @@ from tqdm import tqdm as tqdm_original from ultralytics import __version__ # PyTorch Multi-GPU DDP Constants -RANK = int(os.getenv('RANK', -1)) -LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html +RANK = int(os.getenv("RANK", -1)) +LOCAL_RANK = int(os.getenv("LOCAL_RANK", -1)) # https://pytorch.org/docs/stable/elastic/run.html # Other Constants FILE = Path(__file__).resolve() ROOT = FILE.parents[1] # YOLO -ASSETS = ROOT / 'assets' # default images -DEFAULT_CFG_PATH = ROOT / 'cfg/default.yaml' +ASSETS = ROOT / "assets" # default images +DEFAULT_CFG_PATH = ROOT / "cfg/default.yaml" NUM_THREADS = min(8, max(1, os.cpu_count() - 1)) # number of YOLOv5 multiprocessing threads -AUTOINSTALL = str(os.getenv('YOLO_AUTOINSTALL', True)).lower() == 'true' # global auto-install mode -VERBOSE = str(os.getenv('YOLO_VERBOSE', True)).lower() == 'true' # global verbose mode -TQDM_BAR_FORMAT = '{l_bar}{bar:10}{r_bar}' if VERBOSE else None # tqdm bar format -LOGGING_NAME = 'ultralytics' -MACOS, LINUX, WINDOWS = (platform.system() == x for x in ['Darwin', 'Linux', 'Windows']) # environment booleans -ARM64 = platform.machine() in ('arm64', 'aarch64') # ARM64 booleans -HELP_MSG = \ - """ +AUTOINSTALL = str(os.getenv("YOLO_AUTOINSTALL", True)).lower() == "true" # global auto-install mode +VERBOSE = str(os.getenv("YOLO_VERBOSE", True)).lower() == "true" # global verbose mode +TQDM_BAR_FORMAT = "{l_bar}{bar:10}{r_bar}" if VERBOSE else None # tqdm bar format +LOGGING_NAME = "ultralytics" +MACOS, LINUX, WINDOWS = (platform.system() == x for x in ["Darwin", "Linux", "Windows"]) # environment booleans +ARM64 = platform.machine() in ("arm64", "aarch64") # ARM64 booleans +HELP_MSG = """ Usage examples for running YOLOv8: 1. Install the ultralytics package: @@ -77,7 +77,7 @@ HELP_MSG = \ yolo detect train data=coco128.yaml model=yolov8n.pt epochs=10 lr0=0.01 - Predict a YouTube video using a pretrained segmentation model at image size 320: - yolo segment predict model=yolov8n-seg.pt source='https://youtu.be/Zgi9g1ksQHc' imgsz=320 + yolo segment predict model=yolov8n-seg.pt source='https://youtu.be/LNwODJXcvt4' imgsz=320 - Val a pretrained detection model at batch-size 1 and image size 640: yolo detect val model=yolov8n.pt data=coco128.yaml batch=1 imgsz=640 @@ -99,12 +99,12 @@ HELP_MSG = \ """ # Settings -torch.set_printoptions(linewidth=320, precision=4, profile='default') -np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5 +torch.set_printoptions(linewidth=320, precision=4, profile="default") +np.set_printoptions(linewidth=320, formatter={"float_kind": "{:11.5g}".format}) # format short g, %precision=5 cv2.setNumThreads(0) # prevent OpenCV from multithreading (incompatible with PyTorch DataLoader) -os.environ['NUMEXPR_MAX_THREADS'] = str(NUM_THREADS) # NumExpr max threads -os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':4096:8' # for deterministic training -os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # suppress verbose TF compiler warnings in Colab +os.environ["NUMEXPR_MAX_THREADS"] = str(NUM_THREADS) # NumExpr max threads +os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":4096:8" # for deterministic training +os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2" # suppress verbose TF compiler warnings in Colab class TQDM(tqdm_original): @@ -113,19 +113,22 @@ class TQDM(tqdm_original): Args: *args (list): Positional arguments passed to original tqdm. - **kwargs (dict): Keyword arguments, with custom defaults applied. + **kwargs (any): Keyword arguments, with custom defaults applied. """ def __init__(self, *args, **kwargs): - # Set new default values (these can still be overridden when calling TQDM) - kwargs['disable'] = not VERBOSE or kwargs.get('disable', False) # logical 'and' with default value if passed - kwargs.setdefault('bar_format', TQDM_BAR_FORMAT) # override default value if passed + """ + Initialize custom Ultralytics tqdm class with different default arguments. + + Note these can still be overridden when calling TQDM. + """ + kwargs["disable"] = not VERBOSE or kwargs.get("disable", False) # logical 'and' with default value if passed + kwargs.setdefault("bar_format", TQDM_BAR_FORMAT) # override default value if passed super().__init__(*args, **kwargs) class SimpleClass: - """ - Ultralytics SimpleClass is a base class providing helpful string representation, error reporting, and attribute + """Ultralytics SimpleClass is a base class providing helpful string representation, error reporting, and attribute access methods for easier debugging and usage. """ @@ -134,14 +137,14 @@ class SimpleClass: attr = [] for a in dir(self): v = getattr(self, a) - if not callable(v) and not a.startswith('_'): + if not callable(v) and not a.startswith("_"): if isinstance(v, SimpleClass): # Display only the module and class name for subclasses - s = f'{a}: {v.__module__}.{v.__class__.__name__} object' + s = f"{a}: {v.__module__}.{v.__class__.__name__} object" else: - s = f'{a}: {repr(v)}' + s = f"{a}: {repr(v)}" attr.append(s) - return f'{self.__module__}.{self.__class__.__name__} object with attributes:\n\n' + '\n'.join(attr) + return f"{self.__module__}.{self.__class__.__name__} object with attributes:\n\n" + "\n".join(attr) def __repr__(self): """Return a machine-readable string representation of the object.""" @@ -154,8 +157,7 @@ class SimpleClass: class IterableSimpleNamespace(SimpleNamespace): - """ - Ultralytics IterableSimpleNamespace is an extension class of SimpleNamespace that adds iterable functionality and + """Ultralytics IterableSimpleNamespace is an extension class of SimpleNamespace that adds iterable functionality and enables usage with dict() and for loops. """ @@ -165,24 +167,26 @@ class IterableSimpleNamespace(SimpleNamespace): def __str__(self): """Return a human-readable string representation of the object.""" - return '\n'.join(f'{k}={v}' for k, v in vars(self).items()) + return "\n".join(f"{k}={v}" for k, v in vars(self).items()) def __getattr__(self, attr): """Custom attribute access error message with helpful information.""" name = self.__class__.__name__ - raise AttributeError(f""" + raise AttributeError( + f""" '{name}' object has no attribute '{attr}'. This may be caused by a modified or out of date ultralytics 'default.yaml' file.\nPlease update your code with 'pip install -U ultralytics' and if necessary replace {DEFAULT_CFG_PATH} with the latest version from https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/default.yaml - """) + """ + ) def get(self, key, default=None): """Return the value of the specified key if it exists; otherwise, return the default value.""" return getattr(self, key, default) -def plt_settings(rcparams=None, backend='Agg'): +def plt_settings(rcparams=None, backend="Agg"): """ Decorator to temporarily set rc parameters and the backend for a plotting function. @@ -200,7 +204,7 @@ def plt_settings(rcparams=None, backend='Agg'): """ if rcparams is None: - rcparams = {'font.size': 11} + rcparams = {"font.size": 11} def decorator(func): """Decorator to apply temporary rc parameters and backend to a function.""" @@ -208,12 +212,16 @@ def plt_settings(rcparams=None, backend='Agg'): def wrapper(*args, **kwargs): """Sets rc parameters and backend, calls the original function, and restores the settings.""" original_backend = plt.get_backend() - plt.switch_backend(backend) + if backend.lower() != original_backend.lower(): + plt.close("all") # auto-close()ing of figures upon backend switching is deprecated since 3.8 + plt.switch_backend(backend) with plt.rc_context(rcparams): result = func(*args, **kwargs) - plt.switch_backend(original_backend) + if backend != original_backend: + plt.close("all") + plt.switch_backend(original_backend) return result return wrapper @@ -222,58 +230,59 @@ def plt_settings(rcparams=None, backend='Agg'): def set_logging(name=LOGGING_NAME, verbose=True): - """Sets up logging for the given name.""" - rank = int(os.getenv('RANK', -1)) # rank in world for Multi-GPU trainings - level = logging.INFO if verbose and rank in {-1, 0} else logging.ERROR - logging.config.dictConfig({ - 'version': 1, - 'disable_existing_loggers': False, - 'formatters': { - name: { - 'format': '%(message)s'}}, - 'handlers': { - name: { - 'class': 'logging.StreamHandler', - 'formatter': name, - 'level': level}}, - 'loggers': { - name: { - 'level': level, - 'handlers': [name], - 'propagate': False}}}) + """Sets up logging for the given name with UTF-8 encoding support.""" + level = logging.INFO if verbose and RANK in {-1, 0} else logging.ERROR # rank in world for Multi-GPU trainings + # Configure the console (stdout) encoding to UTF-8 + formatter = logging.Formatter("%(message)s") # Default formatter + if WINDOWS and sys.stdout.encoding != "utf-8": + try: + if hasattr(sys.stdout, "reconfigure"): + sys.stdout.reconfigure(encoding="utf-8") + elif hasattr(sys.stdout, "buffer"): + import io -def emojis(string=''): - """Return platform-dependent emoji-safe version of string.""" - return string.encode().decode('ascii', 'ignore') if WINDOWS else string + sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding="utf-8") + else: + sys.stdout.encoding = "utf-8" + except Exception as e: + print(f"Creating custom formatter for non UTF-8 environments due to {e}") + class CustomFormatter(logging.Formatter): + def format(self, record): + """Sets up logging with UTF-8 encoding and configurable verbosity.""" + return emojis(super().format(record)) -class EmojiFilter(logging.Filter): - """ - A custom logging filter class for removing emojis in log messages. + formatter = CustomFormatter("%(message)s") # Use CustomFormatter to eliminate UTF-8 output as last recourse - This filter is particularly useful for ensuring compatibility with Windows terminals - that may not support the display of emojis in log messages. - """ + # Create and configure the StreamHandler + stream_handler = logging.StreamHandler(sys.stdout) + stream_handler.setFormatter(formatter) + stream_handler.setLevel(level) - def filter(self, record): - """Filter logs by emoji unicode characters on windows.""" - record.msg = emojis(record.msg) - return super().filter(record) + logger = logging.getLogger(name) + logger.setLevel(level) + logger.addHandler(stream_handler) + logger.propagate = False + return logger # Set logger -set_logging(LOGGING_NAME, verbose=VERBOSE) # run before defining LOGGER -LOGGER = logging.getLogger(LOGGING_NAME) # define globally (used in train.py, val.py, detect.py, etc.) -if WINDOWS: # emoji-safe logging - LOGGER.addFilter(EmojiFilter()) +LOGGER = set_logging(LOGGING_NAME, verbose=VERBOSE) # define globally (used in train.py, val.py, predict.py, etc.) +for logger in "sentry_sdk", "urllib3.connectionpool": + logging.getLogger(logger).setLevel(logging.CRITICAL + 1) + + +def emojis(string=""): + """Return platform-dependent emoji-safe version of string.""" + return string.encode().decode("ascii", "ignore") if WINDOWS else string class ThreadingLocked: """ - A decorator class for ensuring thread-safe execution of a function or method. - This class can be used as a decorator to make sure that if the decorated function - is called from multiple threads, only one thread at a time will be able to execute the function. + A decorator class for ensuring thread-safe execution of a function or method. This class can be used as a decorator + to make sure that if the decorated function is called from multiple threads, only one thread at a time will be able + to execute the function. Attributes: lock (threading.Lock): A lock object used to manage access to the decorated function. @@ -290,20 +299,23 @@ class ThreadingLocked: """ def __init__(self): + """Initializes the decorator class for thread-safe execution of a function or method.""" self.lock = threading.Lock() def __call__(self, f): + """Run thread-safe execution of function or method.""" from functools import wraps @wraps(f) def decorated(*args, **kwargs): + """Applies thread-safety to the decorated function or method.""" with self.lock: return f(*args, **kwargs) return decorated -def yaml_save(file='data.yaml', data=None, header=''): +def yaml_save(file="data.yaml", data=None, header=""): """ Save YAML data to a file. @@ -323,18 +335,19 @@ def yaml_save(file='data.yaml', data=None, header=''): file.parent.mkdir(parents=True, exist_ok=True) # Convert Path objects to strings + valid_types = int, float, str, bool, list, tuple, dict, type(None) for k, v in data.items(): - if isinstance(v, Path): + if not isinstance(v, valid_types): data[k] = str(v) # Dump data to file in YAML format - with open(file, 'w', errors='ignore', encoding='utf-8') as f: + with open(file, "w", errors="ignore", encoding="utf-8") as f: if header: f.write(header) yaml.safe_dump(data, f, sort_keys=False, allow_unicode=True) -def yaml_load(file='data.yaml', append_filename=False): +def yaml_load(file="data.yaml", append_filename=False): """ Load YAML data from a file. @@ -345,18 +358,18 @@ def yaml_load(file='data.yaml', append_filename=False): Returns: (dict): YAML data and file name. """ - assert Path(file).suffix in ('.yaml', '.yml'), f'Attempting to load non-YAML file {file} with yaml_load()' - with open(file, errors='ignore', encoding='utf-8') as f: + assert Path(file).suffix in (".yaml", ".yml"), f"Attempting to load non-YAML file {file} with yaml_load()" + with open(file, errors="ignore", encoding="utf-8") as f: s = f.read() # string # Remove special characters if not s.isprintable(): - s = re.sub(r'[^\x09\x0A\x0D\x20-\x7E\x85\xA0-\uD7FF\uE000-\uFFFD\U00010000-\U0010ffff]+', '', s) + s = re.sub(r"[^\x09\x0A\x0D\x20-\x7E\x85\xA0-\uD7FF\uE000-\uFFFD\U00010000-\U0010ffff]+", "", s) # Add YAML filename to dict and return data = yaml.safe_load(s) or {} # always return a dict (yaml.safe_load() may return None for empty files) if append_filename: - data['yaml_file'] = str(file) + data["yaml_file"] = str(file) return data @@ -368,7 +381,7 @@ def yaml_print(yaml_file: Union[str, Path, dict]) -> None: yaml_file: The file path of the YAML file or a YAML-formatted dictionary. Returns: - None + (None) """ yaml_dict = yaml_load(yaml_file) if isinstance(yaml_file, (str, Path)) else yaml_file dump = yaml.dump(yaml_dict, sort_keys=False, allow_unicode=True) @@ -378,7 +391,7 @@ def yaml_print(yaml_file: Union[str, Path, dict]) -> None: # Default configuration DEFAULT_CFG_DICT = yaml_load(DEFAULT_CFG_PATH) for k, v in DEFAULT_CFG_DICT.items(): - if isinstance(v, str) and v.lower() == 'none': + if isinstance(v, str) and v.lower() == "none": DEFAULT_CFG_DICT[k] = None DEFAULT_CFG_KEYS = DEFAULT_CFG_DICT.keys() DEFAULT_CFG = IterableSimpleNamespace(**DEFAULT_CFG_DICT) @@ -392,8 +405,8 @@ def is_ubuntu() -> bool: (bool): True if OS is Ubuntu, False otherwise. """ with contextlib.suppress(FileNotFoundError): - with open('/etc/os-release') as f: - return 'ID=ubuntu' in f.read() + with open("/etc/os-release") as f: + return "ID=ubuntu" in f.read() return False @@ -404,7 +417,7 @@ def is_colab(): Returns: (bool): True if running inside a Colab notebook, False otherwise. """ - return 'COLAB_RELEASE_TAG' in os.environ or 'COLAB_BACKEND_VERSION' in os.environ + return "COLAB_RELEASE_TAG" in os.environ or "COLAB_BACKEND_VERSION" in os.environ def is_kaggle(): @@ -414,19 +427,19 @@ def is_kaggle(): Returns: (bool): True if running inside a Kaggle kernel, False otherwise. """ - return os.environ.get('PWD') == '/kaggle/working' and os.environ.get('KAGGLE_URL_BASE') == 'https://www.kaggle.com' + return os.environ.get("PWD") == "/kaggle/working" and os.environ.get("KAGGLE_URL_BASE") == "https://www.kaggle.com" def is_jupyter(): """ - Check if the current script is running inside a Jupyter Notebook. - Verified on Colab, Jupyterlab, Kaggle, Paperspace. + Check if the current script is running inside a Jupyter Notebook. Verified on Colab, Jupyterlab, Kaggle, Paperspace. Returns: (bool): True if running inside a Jupyter Notebook, False otherwise. """ with contextlib.suppress(Exception): from IPython import get_ipython + return get_ipython() is not None return False @@ -438,10 +451,10 @@ def is_docker() -> bool: Returns: (bool): True if the script is running inside a Docker container, False otherwise. """ - file = Path('/proc/self/cgroup') + file = Path("/proc/self/cgroup") if file.exists(): with open(file) as f: - return 'docker' in f.read() + return "docker" in f.read() else: return False @@ -455,7 +468,7 @@ def is_online() -> bool: """ import socket - for host in '1.1.1.1', '8.8.8.8', '223.5.5.5': # Cloudflare, Google, AliDNS: + for host in "1.1.1.1", "8.8.8.8", "223.5.5.5": # Cloudflare, Google, AliDNS: try: test_connection = socket.create_connection(address=(host, 53), timeout=2) except (socket.timeout, socket.gaierror, OSError): @@ -509,23 +522,23 @@ def is_pytest_running(): Returns: (bool): True if pytest is running, False otherwise. """ - return ('PYTEST_CURRENT_TEST' in os.environ) or ('pytest' in sys.modules) or ('pytest' in Path(sys.argv[0]).stem) + return ("PYTEST_CURRENT_TEST" in os.environ) or ("pytest" in sys.modules) or ("pytest" in Path(sys.argv[0]).stem) -def is_github_actions_ci() -> bool: +def is_github_action_running() -> bool: """ - Determine if the current environment is a GitHub Actions CI Python runner. + Determine if the current environment is a GitHub Actions runner. Returns: - (bool): True if the current environment is a GitHub Actions CI Python runner, False otherwise. + (bool): True if the current environment is a GitHub Actions runner, False otherwise. """ - return 'GITHUB_ACTIONS' in os.environ and 'RUNNER_OS' in os.environ and 'RUNNER_TOOL_CACHE' in os.environ + return "GITHUB_ACTIONS" in os.environ and "GITHUB_WORKFLOW" in os.environ and "RUNNER_OS" in os.environ def is_git_dir(): """ - Determines whether the current file is part of a git repository. - If the current file is not part of a git repository, returns None. + Determines whether the current file is part of a git repository. If the current file is not part of a git + repository, returns None. Returns: (bool): True if current file is part of a git repository. @@ -535,14 +548,14 @@ def is_git_dir(): def get_git_dir(): """ - Determines whether the current file is part of a git repository and if so, returns the repository root directory. - If the current file is not part of a git repository, returns None. + Determines whether the current file is part of a git repository and if so, returns the repository root directory. If + the current file is not part of a git repository, returns None. Returns: (Path | None): Git root directory if found or None if not found. """ for d in Path(__file__).parents: - if (d / '.git').is_dir(): + if (d / ".git").is_dir(): return d @@ -555,7 +568,7 @@ def get_git_origin_url(): """ if is_git_dir(): with contextlib.suppress(subprocess.CalledProcessError): - origin = subprocess.check_output(['git', 'config', '--get', 'remote.origin.url']) + origin = subprocess.check_output(["git", "config", "--get", "remote.origin.url"]) return origin.decode().strip() @@ -568,12 +581,13 @@ def get_git_branch(): """ if is_git_dir(): with contextlib.suppress(subprocess.CalledProcessError): - origin = subprocess.check_output(['git', 'rev-parse', '--abbrev-ref', 'HEAD']) + origin = subprocess.check_output(["git", "rev-parse", "--abbrev-ref", "HEAD"]) return origin.decode().strip() def get_default_args(func): - """Returns a dictionary of default arguments for a function. + """ + Returns a dictionary of default arguments for a function. Args: func (callable): The function to inspect. @@ -594,13 +608,13 @@ def get_ubuntu_version(): """ if is_ubuntu(): with contextlib.suppress(FileNotFoundError, AttributeError): - with open('/etc/os-release') as f: + with open("/etc/os-release") as f: return re.search(r'VERSION_ID="(\d+\.\d+)"', f.read())[1] -def get_user_config_dir(sub_dir='Ultralytics'): +def get_user_config_dir(sub_dir="yolov10"): """ - Get the user config directory. + Return the appropriate config directory based on the environment operating system. Args: sub_dir (str): The name of the subdirectory to create. @@ -608,21 +622,22 @@ def get_user_config_dir(sub_dir='Ultralytics'): Returns: (Path): The path to the user config directory. """ - # Return the appropriate config directory for each operating system if WINDOWS: - path = Path.home() / 'AppData' / 'Roaming' / sub_dir + path = Path.home() / "AppData" / "Roaming" / sub_dir elif MACOS: # macOS - path = Path.home() / 'Library' / 'Application Support' / sub_dir + path = Path.home() / "Library" / "Application Support" / sub_dir elif LINUX: - path = Path.home() / '.config' / sub_dir + path = Path.home() / ".config" / sub_dir else: - raise ValueError(f'Unsupported operating system: {platform.system()}') + raise ValueError(f"Unsupported operating system: {platform.system()}") # GCP and AWS lambda fix, only /tmp is writeable if not is_dir_writeable(path.parent): - LOGGER.warning(f"WARNING ⚠️ user config directory '{path}' is not writeable, defaulting to '/tmp' or CWD." - 'Alternatively you can define a YOLO_CONFIG_DIR environment variable for this path.') - path = Path('/tmp') / sub_dir if is_dir_writeable('/tmp') else Path().cwd() / sub_dir + LOGGER.warning( + f"WARNING ⚠️ user config directory '{path}' is not writeable, defaulting to '/tmp' or CWD." + "Alternatively you can define a YOLO_CONFIG_DIR environment variable for this path." + ) + path = Path("/tmp") / sub_dir if is_dir_writeable("/tmp") else Path().cwd() / sub_dir # Create the subdirectory if it does not exist path.mkdir(parents=True, exist_ok=True) @@ -630,40 +645,99 @@ def get_user_config_dir(sub_dir='Ultralytics'): return path -USER_CONFIG_DIR = Path(os.getenv('YOLO_CONFIG_DIR') or get_user_config_dir()) # Ultralytics settings dir -SETTINGS_YAML = USER_CONFIG_DIR / 'settings.yaml' +USER_CONFIG_DIR = Path(os.getenv("YOLO_CONFIG_DIR") or get_user_config_dir()) # Ultralytics settings dir +SETTINGS_YAML = USER_CONFIG_DIR / "settings.yaml" def colorstr(*input): - """Colors a string https://en.wikipedia.org/wiki/ANSI_escape_code, i.e. colorstr('blue', 'hello world').""" - *args, string = input if len(input) > 1 else ('blue', 'bold', input[0]) # color arguments, string + """ + Colors a string based on the provided color and style arguments. Utilizes ANSI escape codes. + See https://en.wikipedia.org/wiki/ANSI_escape_code for more details. + + This function can be called in two ways: + - colorstr('color', 'style', 'your string') + - colorstr('your string') + + In the second form, 'blue' and 'bold' will be applied by default. + + Args: + *input (str): A sequence of strings where the first n-1 strings are color and style arguments, + and the last string is the one to be colored. + + Supported Colors and Styles: + Basic Colors: 'black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white' + Bright Colors: 'bright_black', 'bright_red', 'bright_green', 'bright_yellow', + 'bright_blue', 'bright_magenta', 'bright_cyan', 'bright_white' + Misc: 'end', 'bold', 'underline' + + Returns: + (str): The input string wrapped with ANSI escape codes for the specified color and style. + + Examples: + >>> colorstr('blue', 'bold', 'hello world') + >>> '\033[34m\033[1mhello world\033[0m' + """ + *args, string = input if len(input) > 1 else ("blue", "bold", input[0]) # color arguments, string colors = { - 'black': '\033[30m', # basic colors - 'red': '\033[31m', - 'green': '\033[32m', - 'yellow': '\033[33m', - 'blue': '\033[34m', - 'magenta': '\033[35m', - 'cyan': '\033[36m', - 'white': '\033[37m', - 'bright_black': '\033[90m', # bright colors - 'bright_red': '\033[91m', - 'bright_green': '\033[92m', - 'bright_yellow': '\033[93m', - 'bright_blue': '\033[94m', - 'bright_magenta': '\033[95m', - 'bright_cyan': '\033[96m', - 'bright_white': '\033[97m', - 'end': '\033[0m', # misc - 'bold': '\033[1m', - 'underline': '\033[4m'} - return ''.join(colors[x] for x in args) + f'{string}' + colors['end'] + "black": "\033[30m", # basic colors + "red": "\033[31m", + "green": "\033[32m", + "yellow": "\033[33m", + "blue": "\033[34m", + "magenta": "\033[35m", + "cyan": "\033[36m", + "white": "\033[37m", + "bright_black": "\033[90m", # bright colors + "bright_red": "\033[91m", + "bright_green": "\033[92m", + "bright_yellow": "\033[93m", + "bright_blue": "\033[94m", + "bright_magenta": "\033[95m", + "bright_cyan": "\033[96m", + "bright_white": "\033[97m", + "end": "\033[0m", # misc + "bold": "\033[1m", + "underline": "\033[4m", + } + return "".join(colors[x] for x in args) + f"{string}" + colors["end"] + + +def remove_colorstr(input_string): + """ + Removes ANSI escape codes from a string, effectively un-coloring it. + + Args: + input_string (str): The string to remove color and style from. + + Returns: + (str): A new string with all ANSI escape codes removed. + + Examples: + >>> remove_colorstr(colorstr('blue', 'bold', 'hello world')) + >>> 'hello world' + """ + ansi_escape = re.compile(r"\x1B\[[0-9;]*[A-Za-z]") + return ansi_escape.sub("", input_string) class TryExcept(contextlib.ContextDecorator): - """YOLOv8 TryExcept class. Usage: @TryExcept() decorator or 'with TryExcept():' context manager.""" + """ + Ultralytics TryExcept class. Use as @TryExcept() decorator or 'with TryExcept():' context manager. - def __init__(self, msg='', verbose=True): + Examples: + As a decorator: + >>> @TryExcept(msg="Error occurred in func", verbose=True) + >>> def func(): + >>> # Function logic here + >>> pass + + As a context manager: + >>> with TryExcept(msg="Error occurred in block", verbose=True): + >>> # Code block here + >>> pass + """ + + def __init__(self, msg="", verbose=True): """Initialize TryExcept class with optional message and verbosity settings.""" self.msg = msg self.verbose = verbose @@ -679,14 +753,80 @@ class TryExcept(contextlib.ContextDecorator): return True +class Retry(contextlib.ContextDecorator): + """ + Retry class for function execution with exponential backoff. + + Can be used as a decorator or a context manager to retry a function or block of code on exceptions, up to a + specified number of times with an exponentially increasing delay between retries. + + Examples: + Example usage as a decorator: + >>> @Retry(times=3, delay=2) + >>> def test_func(): + >>> # Replace with function logic that may raise exceptions + >>> return True + + Example usage as a context manager: + >>> with Retry(times=3, delay=2): + >>> # Replace with code block that may raise exceptions + >>> pass + """ + + def __init__(self, times=3, delay=2): + """Initialize Retry class with specified number of retries and delay.""" + self.times = times + self.delay = delay + self._attempts = 0 + + def __call__(self, func): + """Decorator implementation for Retry with exponential backoff.""" + + def wrapped_func(*args, **kwargs): + """Applies retries to the decorated function or method.""" + self._attempts = 0 + while self._attempts < self.times: + try: + return func(*args, **kwargs) + except Exception as e: + self._attempts += 1 + print(f"Retry {self._attempts}/{self.times} failed: {e}") + if self._attempts >= self.times: + raise e + time.sleep(self.delay * (2**self._attempts)) # exponential backoff delay + + return wrapped_func + + def __enter__(self): + """Enter the runtime context related to this object.""" + self._attempts = 0 + + def __exit__(self, exc_type, exc_value, traceback): + """Exit the runtime context related to this object with exponential backoff.""" + if exc_type is not None: + self._attempts += 1 + if self._attempts < self.times: + print(f"Retry {self._attempts}/{self.times} failed: {exc_value}") + time.sleep(self.delay * (2**self._attempts)) # exponential backoff delay + return True # Suppresses the exception and retries + return False # Re-raises the exception if retries are exhausted + + def threaded(func): - """Multi-threads a target function and returns thread. Usage: @threaded decorator.""" + """ + Multi-threads a target function by default and returns the thread or function result. + + Use as @threaded decorator. The function runs in a separate thread unless 'threaded=False' is passed. + """ def wrapper(*args, **kwargs): - """Multi-threads a given function and returns the thread.""" - thread = threading.Thread(target=func, args=args, kwargs=kwargs, daemon=True) - thread.start() - return thread + """Multi-threads a given function based on 'threaded' kwarg and returns the thread or function result.""" + if kwargs.pop("threaded", True): # run in thread + thread = threading.Thread(target=func, args=args, kwargs=kwargs, daemon=True) + thread.start() + return thread + else: + return func(*args, **kwargs) return wrapper @@ -723,27 +863,28 @@ def set_sentry(): Returns: dict: The modified event or None if the event should not be sent to Sentry. """ - if 'exc_info' in hint: - exc_type, exc_value, tb = hint['exc_info'] - if exc_type in (KeyboardInterrupt, FileNotFoundError) \ - or 'out of memory' in str(exc_value): + if "exc_info" in hint: + exc_type, exc_value, tb = hint["exc_info"] + if exc_type in (KeyboardInterrupt, FileNotFoundError) or "out of memory" in str(exc_value): return None # do not send event - event['tags'] = { - 'sys_argv': sys.argv[0], - 'sys_argv_name': Path(sys.argv[0]).name, - 'install': 'git' if is_git_dir() else 'pip' if is_pip_package() else 'other', - 'os': ENVIRONMENT} + event["tags"] = { + "sys_argv": sys.argv[0], + "sys_argv_name": Path(sys.argv[0]).name, + "install": "git" if is_git_dir() else "pip" if is_pip_package() else "other", + "os": ENVIRONMENT, + } return event - if SETTINGS['sync'] and \ - RANK in (-1, 0) and \ - Path(sys.argv[0]).name == 'yolo' and \ - not TESTS_RUNNING and \ - ONLINE and \ - is_pip_package() and \ - not is_git_dir(): - + if ( + SETTINGS["sync"] + and RANK in (-1, 0) + and Path(sys.argv[0]).name == "yolo" + and not TESTS_RUNNING + and ONLINE + and is_pip_package() + and not is_git_dir() + ): # If sentry_sdk package is not installed then return and do not use Sentry try: import sentry_sdk # noqa @@ -751,18 +892,15 @@ def set_sentry(): return sentry_sdk.init( - dsn='https://5ff1556b71594bfea135ff0203a0d290@o4504521589325824.ingest.sentry.io/4504521592406016', + dsn="https://5ff1556b71594bfea135ff0203a0d290@o4504521589325824.ingest.sentry.io/4504521592406016", debug=False, traces_sample_rate=1.0, release=__version__, - environment='production', # 'dev' or 'production' + environment="production", # 'dev' or 'production' before_send=before_send, - ignore_errors=[KeyboardInterrupt, FileNotFoundError]) - sentry_sdk.set_user({'id': SETTINGS['uuid']}) # SHA-256 anonymized UUID hash - - # Disable all sentry logging - for logger in 'sentry_sdk', 'sentry_sdk.errors': - logging.getLogger(logger).setLevel(logging.CRITICAL) + ignore_errors=[KeyboardInterrupt, FileNotFoundError], + ) + sentry_sdk.set_user({"id": SETTINGS["uuid"]}) # SHA-256 anonymized UUID hash class SettingsManager(dict): @@ -774,7 +912,10 @@ class SettingsManager(dict): version (str): Settings version. In case of local version mismatch, new default settings will be saved. """ - def __init__(self, file=SETTINGS_YAML, version='0.0.4'): + def __init__(self, file=SETTINGS_YAML, version="0.0.4"): + """Initialize the SettingsManager with default settings, load and validate current settings from the YAML + file. + """ import copy import hashlib @@ -788,22 +929,24 @@ class SettingsManager(dict): self.file = Path(file) self.version = version self.defaults = { - 'settings_version': version, - 'datasets_dir': str(datasets_root / 'datasets'), - 'weights_dir': str(root / 'weights'), - 'runs_dir': str(root / 'runs'), - 'uuid': hashlib.sha256(str(uuid.getnode()).encode()).hexdigest(), - 'sync': True, - 'api_key': '', - 'clearml': True, # integrations - 'comet': True, - 'dvc': True, - 'hub': True, - 'mlflow': True, - 'neptune': True, - 'raytune': True, - 'tensorboard': True, - 'wandb': True} + "settings_version": version, + "datasets_dir": str(datasets_root / "datasets"), + "weights_dir": str(root / "weights"), + "runs_dir": str(root / "runs"), + "uuid": hashlib.sha256(str(uuid.getnode()).encode()).hexdigest(), + "sync": True, + "api_key": "", + "openai_api_key": "", + "clearml": True, # integrations + "comet": True, + "dvc": True, + "hub": True, + "mlflow": True, + "neptune": True, + "raytune": True, + "tensorboard": True, + "wandb": True, + } super().__init__(copy.deepcopy(self.defaults)) @@ -814,15 +957,26 @@ class SettingsManager(dict): self.load() correct_keys = self.keys() == self.defaults.keys() correct_types = all(type(a) is type(b) for a, b in zip(self.values(), self.defaults.values())) - correct_version = check_version(self['settings_version'], self.version) + correct_version = check_version(self["settings_version"], self.version) + help_msg = ( + f"\nView settings with 'yolo settings' or at '{self.file}'" + "\nUpdate settings with 'yolo settings key=value', i.e. 'yolo settings runs_dir=path/to/dir'. " + "For help see https://docs.ultralytics.com/quickstart/#ultralytics-settings." + ) if not (correct_keys and correct_types and correct_version): LOGGER.warning( - 'WARNING ⚠️ Ultralytics settings reset to default values. This may be due to a possible problem ' - 'with your settings or a recent ultralytics package update. ' - f"\nView settings with 'yolo settings' or at '{self.file}'" - "\nUpdate settings with 'yolo settings key=value', i.e. 'yolo settings runs_dir=path/to/dir'.") + "WARNING ⚠️ Ultralytics settings reset to default values. This may be due to a possible problem " + f"with your settings or a recent ultralytics package update. {help_msg}" + ) self.reset() + if self.get("datasets_dir") == self.get("runs_dir"): + LOGGER.warning( + f"WARNING ⚠️ Ultralytics setting 'datasets_dir: {self.get('datasets_dir')}' " + f"must be different than 'runs_dir: {self.get('runs_dir')}'. " + f"Please change one to avoid possible issues during training. {help_msg}" + ) + def load(self): """Loads settings from the YAML file.""" super().update(yaml_load(self.file)) @@ -847,14 +1001,16 @@ def deprecation_warn(arg, new_arg, version=None): """Issue a deprecation warning when a deprecated argument is used, suggesting an updated argument.""" if not version: version = float(__version__[:3]) + 0.2 # deprecate after 2nd major release - LOGGER.warning(f"WARNING ⚠️ '{arg}' is deprecated and will be removed in 'ultralytics {version}' in the future. " - f"Please use '{new_arg}' instead.") + LOGGER.warning( + f"WARNING ⚠️ '{arg}' is deprecated and will be removed in 'ultralytics {version}' in the future. " + f"Please use '{new_arg}' instead." + ) def clean_url(url): """Strip auth from URL, i.e. https://url.com/file.txt?auth -> https://url.com/file.txt.""" - url = Path(url).as_posix().replace(':/', '://') # Pathlib turns :// -> :/, as_posix() for Windows - return urllib.parse.unquote(url).split('?')[0] # '%2F' to '/', split https://url.com/file.txt?auth + url = Path(url).as_posix().replace(":/", "://") # Pathlib turns :// -> :/, as_posix() for Windows + return urllib.parse.unquote(url).split("?")[0] # '%2F' to '/', split https://url.com/file.txt?auth def url2file(url): @@ -865,12 +1021,23 @@ def url2file(url): # Run below code on utils init ------------------------------------------------------------------------------------ # Check first-install steps -PREFIX = colorstr('Ultralytics: ') +PREFIX = colorstr("Ultralytics: ") SETTINGS = SettingsManager() # initialize settings -DATASETS_DIR = Path(SETTINGS['datasets_dir']) # global datasets directory -ENVIRONMENT = 'Colab' if is_colab() else 'Kaggle' if is_kaggle() else 'Jupyter' if is_jupyter() else \ - 'Docker' if is_docker() else platform.system() -TESTS_RUNNING = is_pytest_running() or is_github_actions_ci() +DATASETS_DIR = Path(SETTINGS["datasets_dir"]) # global datasets directory +WEIGHTS_DIR = Path(SETTINGS["weights_dir"]) # global weights directory +RUNS_DIR = Path(SETTINGS["runs_dir"]) # global runs directory +ENVIRONMENT = ( + "Colab" + if is_colab() + else "Kaggle" + if is_kaggle() + else "Jupyter" + if is_jupyter() + else "Docker" + if is_docker() + else platform.system() +) +TESTS_RUNNING = is_pytest_running() or is_github_action_running() set_sentry() # Apply monkey patches diff --git a/ultralytics/utils/__pycache__/__init__.cpython-312.pyc b/ultralytics/utils/__pycache__/__init__.cpython-312.pyc index a595202ee89b836fa583488a18ba7e15198d913e..0951c2a17e89708ec1f153c458b730158bfe0f87 100644 GIT binary patch delta 15609 zcmb7r3s_XwweZ>Vo?&K~ff?S91E??{_y8602_gnXRKRFP&19H!z`v=+`sAV;~$kascHY-wa%FVG|B(p zd;D0l_S&zt*Is+=wb$NW`lag4qiXBVEfy04->Bz*wfB7aq&1C5CkyWjlYrT6<`_;D zuyk78R>A6( zrI0R@X{+U~5>*c3k#kq7^a=Ct$o0+4U9f;(AgH*cAmNC6A(!kf=NyL_&RNU6pzz4- zR0AIs?nUm!(2lkvi0#_Al)a3*l1p_j1!8)KBzLP8olJk#p$#YFm5O3&09~?v*NL9mCB5Mhjn1 z#b~Q!S`pA(yltxFYOa+lK1|%zd^wlcrs7HfuK}KG_;P;1g}GC;*Knn^%&z&+b}jAK zr03PfdFwc?v_BK_>W)W|-;k*f$nlbdIkr&4a6YaK`nXHxW*%@ef2xmqj^h?U-g=Jb z7Q*iauAEQe%RnO<+SEAlTm?tkG~A-Y8uv!vaRcv)x9x7^8#~S_Q>snK-0a>0`Az&5 zzAWClNRHX~EgkZ-XK{=76fq2tW^sgXbeXyiF;Bil+dRf8zamL~~?hd|#+aRx|JzN8{72q}k?3Aaf z5vFPr&vaY``6N@1#*i42>6?MRWhOn`#Wg{#`?+R-f?TEr%53Eurj$|?KLFJ2@djZ@ z?+TUx<(?UodxL0y+%qViU^-BC%%I#Cv;jri@rj})1Ca2>v!^868mGATL%tACfoU{x z-axo>&^_3Bz zM*3anL*_Lwh))KYNi&mIMAxQ#TW8P~M;QbCd&+k5+{lj9E;V_0YMb|6lc|it`eyderwY`-n^3x2>6%M0$NrGJUE@ zPd_SNS`<-(#2a0@h`K8vMH1S1$mb0x+#wmFhPtwQGo8@B+c55yJ2z96+!i!5|IHT`_+QP%#m}Uk91L=q$#JXbyCF#41!_ z8M&rf7(`sIbdbI~H&=TI(m$ks2P{dO8ZDxErD^mBtTF2u*f38s{X|HUpoH>qWG^Gw zBo(#!voyg(Kwus<11d>P{}|3rSHDZPCQ1TF6r>s5+czoio)JwDEQc6Wo1g#S-0A#ig8{GXxR_H2s|R6BfWBYfQ4r6c{_R&!^9&_s z(__UsMhQeJe{}vaC;jWv1g)XppyY)CaZOi`WBvN+W;k=d!Q-6DF*Gv;jD)LD{$?@4 z(gCfM8&7k_e(e*?SEr@~w>9v4Ot9S~NU?DBm0=x8ng7RqP1oDVRoNi+RbilsVl8y`JgYvp;Z&RyShSxR0a+g zXn_ojfb}KJF6!h((c8|u#9P{Ksjaj!k|6NDP_WJ4-YxJh5;4RZiRfCp+uFctV)e$p zESBG#E5MD#wXj%Df{>}ZJjIW0X9UI{<6!wRmySA?$_B=z6_!B}VL5^o2#(Xy^0aUy z@eaWWk3i<9@F(IX{mEhGW0lHY6=eV@mg)qM>s%=UDrZC=6GlW+-&nUPqFvk6w5cg# zoSKG+ep`KG?WS$b5w+MWMs%XYg}No-VY%*ZuNd}9k{~>UNtHhov9$0#QvIfFg14&+ z^y&!Gjca>+e3#@81tZ!n!5@?&mSzbwt@ADrb^vw-GstSXLj$B5_`Q4}VuT%}0n`MF zn7kYpFK+A-LS5c=uf)5I!d)Jj6J?fo*k`b&FdK-%Fy;_m!0|?`qTHotcdSnxj++-m zTr;w>K?mohhM$AfNAM?Z2K^djKFBJ#pt-1jKJnPP6Ah0v{9xOwyI$EfxOP&<*we1q z^2Tg==Ss(I^9SqS*IQ4OpRPJtbtQSuSn{0ng_n{SL{)adV^XrOIE%-e#TUxIRrP$;xO4Gf zhGRHKT!)q^V@;|gVu2^!$EIGr{xMkL0-Gs*U@TLQshSpvV zYce2lMdKXPIER9lHM1vDvj*!fYa9~^_Q8#xOcpY>^c#!;hGn*oIyR4PX&T+!Jet&U z*}Qd9qqbB%TX-eEd@R3wl7Yl^6O&$a-ZGxH_;~%NQ9IPSCE~X6`mCCQIy3VVvvHkO z{S)WBbw>41jatB6q(pcI3b<4elL(eY==F%)p1zcB0sOeu@-kSKLEeXY)D;Q^XyyEu zwZbVVPyaf9wplm^m<$|*mM&OEODZURqhgN61hY#&s+ddD7TqN$M($hmHBv-es=lUa z_fkZMlI6fm3SJ3JoiE@OMKpT!M8!OC7mZ8qGM9o*h&Jd3Rw>=Jq)pp5zo|P21u~M` zx`Q%fEL82}rQIQ}jQ-1#ZCbE>9HDGwHtk<#NK^GyRd;m-{52g}zd`IQ5@_08R>89pPmJNDyA3x}}Q>7hsK9 zkOn8^-wg#H7_CR{|5DndN^LB@sb)+`QN*Gq+Pbu;_4`=Oi$L~Z(32G|#z8tZWQP}JrReqYAmP*xCU9D1lCuF8wmubI1 zvSI`HNhn|Wlai(|#}tM}QvRl$+cv|1x}S8U%cmj;6tD~fXJ2MO1KV|GtTfZF1&<-N zp$OzyvJ^kRKu&y*=qLReG_J?;HH?%!Rg>W~oR-sF(7&KjXp5KuUB61O_v<>Q+<{nW z`b2SJzLXcQ+mRo~rhlOq-AMg1iKOA#On4B$^D5X^1R*4dvRS29m*H?k7zi*Pl{-yUNE@r*yct6bk2(0CHjb_%`cbm zO5TWWH)tG~(uBQmpyE7OUK9dYp&0=xl8F8pOM@#?P45D>5>?<$dSXTOA~1Q$DW|%h z^gUZ~!EvGcJKPUyUvqqa{b*9%;D!mC18vb)%ttjx$dLtC6Vr$DhIhP^=%T-0akr(i zbWNH1%`)wpC3>HVzQ{U8T35cHN|+kG@H$Pa?j&RMp=!2r9D0oyaaDkS;N^s$LmD=| zcaWTjTS}1g;E;mePF_*{7RVC*mHwtWNBbe5_fk^x61hrWsreK6Aw9q5+lfVup&;)< zE5O#Z2yfE%+F~0vE1<3lSVtJAPuI>WzY1u?*z4^Kcu=Ck?;tH=A*dd?rdbw4ATh$kG658hDO6H~fZ7(BiumQCEYVu55TA_XZZZiQsVn5q zCSqv!`ep5n=r=X5MKuP8D3Q>|wk*^-p}?<3ezB!gMN;Uk=7m~_%QF9aWI@Xi$;9zS zbY7nioK<UX+vGZrfm^p)7HkuwN0K)&B85g z%8Ovi?nTmIqT0dD?r!yXWmE2f7K7lR3iF^Vh`cZ&`kr8ht&BPAN#ShYnZD5k*QJC# zqh+i9W#Lb&UauN0+la}gU6;K+>lN>wF)t+cjB54>Rdg`NL4UnYOB>zo^cA;D*_Gb% zE~KlrCDMW&ZtX(oC1d2^jsl`*>)Yf_>%z|O7XAqOjOc(j&M$!HPB;;8m`UZ31OBYUcT#q%b7aMyA!7I;@Zhovr|3I@@*Gq@;U~!7e;{l6eO>{zA)@x} z}q+zEGAzVQVPB6~ggNiTi;b zfKSD-t-UaoHg9)8!a2RS*dOfbme?Zb#8t&svm$?gHynH+PS_UXNW?fHNB~u&%`b=& z8!TNgmFI<=FO>XTT5WZPS(zQI4(_59tI2`@kp$Zg9*a?YIDb4BlW?V8 zQQ}<1l6Nq-(J%U7#IV!LgTk4HPP>EPx z&#VYp<#A@jF5c&FLzTH>eq%GVw#VBEp4E)$Sh;fL3^i}{pemv$V%+VXGwBo1Xn)qq_ySdR2XwSd9#!e|4k9&rO>v=0?Wjfk5VW8#o0nt-^OF(wUFMJlcm`uk8S0B>BA&$< z(}#A5TqF*EQHAkfgu zb_YvkB|`P{%Z6u#tdgFXO2*6&s%ne^HbIbkx>Nh)Zs-Tztgvjag8aUC6*>L~Hu$qv zKGsy+wtT+kD&2WxK7om%DP+oK+^)#sm?qz$$}==r(cV7i&YlG|J9q4uU%GVJuHqfl zrEYI&-!9=j&^>Shufb15?+bNyfyXX_y{Ai{TWKkaA+x(N)6>hx=*X-Dz|v$4EtSk<@sfk2M&JV5A$y<5m1>HpnZr&r48Y1h(>`T=a`AObHIp7QO5UZf!y zNBv2GOpIw*wT7FlTAO>rq|#b$8Z=smQl*#B134vOJ*R?uVhyK;8)7Y|0jT4&0QJ2( zmm#7BKUC=T4nu=6+X2bxsL`|s#rN>OZj4zen-bsC1%5#gFEzp0^X&<>wZ%4_HHxd% zjc%(~Wa;}A<}iAFU?pywQS>PU>>RSD{4^-2Oau5f=>4JN%g3Xx;#>@RLp86=>h$gx z?CxxZ&_2|W{BUQWa0qO~nG6G$VB-8i9|WsJv~V0B@Pcp6OZ&k8klBD>))(%Yu@%L3 z<81^E>Wf#CEY2dCjTPl{m3~%)7v64xaKT8tD0y()pR;H{Bb)dxaFuvvC{yB5mXNgD zi|eA76}*19JD!%$XVw+S7Y`X>T(i^b>vM*4$K2e$M&#A@xdU7H*#&LLgW0U${ zXIwT2(+iH2(Hto+%#nV^1DdLQy(+l8t(gveg3P9MSLXTiGz3HfDVIiY zDWa99Euxjl5u*og1o+M_NsL{L2u%;+`g<6_ZQ&FF&(nQYx%V=U#}8Ni9*;o{r7+4j z$Xs(~9MK#zO~iiEA$452YlS!xSd%a=II3d9=%4D{W68%FS$>NLDbJF4+0G%Rsc(}6#C zd~hp&pvh6O2gELzjBq=6RNc?`Au5V*dYYMDeXkzw7twK(Ke%5!IRIDrJ*0y{Cxz{U z2&GR2-NZf#Lv!-Ssj)}8Io+w&D8103bWKse3hpCyF947D;f4Mevt{116W9V#9VC*G zF26Sm9}$e+?bylbKEGNYgm3%c5^lPAxVF>}sP?OZ2IY79+Vb;TP&mz!kTb z;xX9cnZB`{S;~%z`(tPaeC=!5IY(-bHBKa@oGv?AcCO;Ns;8?iC6z{1dV4i_H#7H~ zV>EOA`INED`J;~cpG37xa@i*kU`@-r!GyI?WWtely79}6=e%Q%!p9n~I?{*hpplhN zuY?y77xOOGzO??u^%s}F-20aOE%j(d!)3?DiPX%~`%msa@Ay{6^BI>?%O`SjhxJ#X zne`{v4@(%UI=|(j<|XTk*0Iu+W4WuYI9882R(~2z!0v8E^-OZA!fF_K9ZxF#M8vD` zujxyfZyML=)&E?hU!zjLsnP;2@2>H+Ez58ShV|Gq75KDW7b$7OaJ>P>@g85S_yrp4uG+ z*qJ2QI$@iWFoY3m?EoJ=Zlnn42+hk+ZbBcSFEKu$U=w)3<6)6AT;?!!GtJD@>Vz!3 zzkp)QO$W4qw;Kl@>W3R`d9@}x$(_r!pMjjwCmd)5Qe>syE}@pmAE%PDH>ao5gH6dP zPrwz!ovT;UdP&RgRc5{wKn|Z@* z(`Mb6rP5~JG#Io_#nn7aU)Z-$_qObR(BJH1!_y%wd7}lp2!4nXPEu|R(L7GMH3Ba} zuq)<%Z3UMGT&&efIamo(2bVGyPoLAZx}illpLii6*5!p*C(D==>W@xznhN&z^57ggx(Kvimp<#N+_XLe@+T+ef@+} zT~NOeTT-}9(`y^EG6;kF^_{z;OVj70|J0M)I2Fa6VyTeb1EJXV|Au%XhWK#kdp$V@ z4ZKc+sPVVy-+BriXujgAIH*KV4$vjNIpM{qe07kE<~+fEqUvbXP{+AhmlF!FI&-c# zXOB5&pKp0t^V;2`E!!_U?>SO8VNN>HaI^ueKE5#fAUSo|F`8U-E@dpaXw+P!Ot%-> zgDVx;W*)$v(I<@ltj48-0(^uRk!4SV#W#f#W*3 z2jC7?kWcBEgSoTh82!(osC@4tU?fSoHAAy3$QKMb^y)#@h$~#60BFLYYTY)J-9xnL z&^+sFp3B>JjPB&ex3`V0ZKKBzEl<#mlX&Gho2ci>6nHx6fK*&~MLwJk06 zjq94_cPUCgc-(>U zzHY3bd^D@#9mAq%0S!Ntt1_n3a}Q-%Yf6|mN{ls2)Nd>%HA_a`e(1-fXb=oNE(Eyy zR|S{>wfsv4H1Mkqs5;dHT6m62I1PR2$gjOxSO|pE&A6&Ia{4~7x^Y(vat$+b4V>|g zT+?m2@_$a4QP)g=d-SBo!dao6M9v1#F2f|5k}N}q44pDekzuL~(`1+~!wiJ5At!^> z;*l1a74!bUB=@qqm*O@6I??wC=eaHIRZd%Up})grC`A zwvdCT1&}FUP*$;hvkKWxc*lpg9-JSa7UD?Z{2GL-3)pw?8WT#bDlB6+2Oy>Z{u|^$ za3FSB;@unab8J^g6#e-2&M%7HaOUK?<>)?e1BXO-GRV3LA{t+)tC#K^nnm8A2Zjn7 z-bA6u7oxDX8KD4^czqYq!!8*L!27UwD?_=i^2;N2T z27)(f@5xQhhykVT!3fw=C@EWcmR>r!&`!}L{R%b&F#0zk{O+L1Uv6$lN;gfIbI$3b zYQV0UGoD;?&T`Q-##W7IEsJV_22p!U#*^j4(mDTF?!58L`B6Pm4U8rAN%e5;xy5HT zj;BE&ABgnZU!E7nOfzFi9;%I65Vtaxlp!ISh`5chWDmDS?T9BKU5X|n?qDpAp_Zr< z@f60AG*l5yMLbQx(-F^LEY2Y=nu&OpQa2m%90kusoCQ`hhAX0Zn8;TeEdU%&`IgBd z8l=yLuO+Q~S$ehimEM>4z=Os0lh&w;IGe~s zlY4Z_j;NYAci}~=JqZSHcjAMTQ|4>nBK92Z8Mclm7M`#8R^{`R7uS!Mth$uAD%QkA z%3W_cex3HSw9%&f#_PR}qj$B!m938~omjf!)wEaAUMn77x^c9!5r~^eGnr`KG1{~< zs#Q7fg}yZQq>oD&>-;O0d1ID&x8&!RKenySTwkGnyF$Bug`N(!q|gJW9@Kq^TJa7w zKbDpGAt*ziqxp8@QHECHGF|i7cN6g4jq>Q?*Obk7Wd7UdmKVKy`3YM7l^^6>N9Dpo z!Xi{C^sm5p#C)0S)N(o#Uc{^+L2YIX}Ru4 zgpQS`nO;LGf=B3|hSPPcv`LdSDrz~CMqZ{E%IE8%Kgsi`a^x7aw=1 z-Y4`YkLPP|LH&>DA0KxWPM9)=%ZF!=<~EPE>=I5(5lZZa{-&X|O^jr@ruNUf|nTQo%kD@7p z03bZ87KU(`pCWJ|xQ1XE_V*ZK-$o#xjsAhOm#F^ihP1svjA$VQ+tnLU1-l@M7Q(j= z1-^u@DFjk1T<1NrpNXXhleV41B5 zPSH8%=7*1CQx>pOjoqCsvY#&sf5XN~5Y!;>AvlEKIRyWK;Ohv^B6tcw#IUBRzNLOm zb%PRvzla&Ks>-VUYfR#csEDn0ZC&-&h8EA7x^s}a;9*nj|^i3s=}Ba*na z86Nqp+0Gb~>R;6Y7xw`GS=R%ijWQjj$ZRVSj`+_bi? ze!K88wrj4fZm9+rwHX_YBy3w-zwWMB!T>LeWU}By#?NCbuV5X0msj$^|5Fmo(3}AO zK@^@$sUVUdBqI86H1U~iQzf8KmJvUoi=QbZ2_rk6DJ2Hg60>2@5S1wX+TO686dzeJ z$-vLYMH!@rTxSqYR%Mcs;m{-lKR1^b5oh#n!ldM%ueiw+kfiG%5tDOh=lLaDR3cz? zxtWneY}ae(ul{Ln(?Uq!=pc!tbaGY>aSXvr=#nv8$s_}b$r@5goJZDQF&B=R3nv*! z+}KNOB>BdiwS+95+^pI`pb9t1;P1_Tm7eHsIB5CT=Z39GLzTlD$C8W3ttEpA?llOWaNl(`qctOLKl}jTrX<9Z4)-ITJd=LSn06%XMDEetarh5D s$qX@HPhsr&s5CjbBd delta 9070 zcmZ`f3v^T0k?-p1$@*H7E&2Zu{vc$GZT^3VV`DHFJ75z)0ff+dwq^7&_sM`A5xWUV zO+p&OC5h7#V)AJqByAF<`88>vlFx-1&-(CQuM8 zbQcm%#Ysi*Q+KUm)-{>TN^Azl*JyRbW}N{vlT>n!pq(7owU$|D=V{z!Qd!q}>^MiC z3uO09X{KoCGXUlU%&1_qpsPzX0jv=90M%W0vg_e|_FIx=sD$<$g_ayXTV*3Vk!!W= zodsH|zsBF2;xA6}Yor>g5;HfJ+=UyykyO#hhr;92dZsu)#b`np^5evk^t_Zu0FXc13_AC5eKHWS=s!UA)e8oYe z8;V!JNTOr9Jws`iq}uFvyv37$4y?e(y;8-k<;0m_qw}0tTBGEYmc7I%pi zhl%?(X`X0qSBbL$uN6IFCA@D}ddz_yRTAIz04zl&Go@+m_3%Bl(mbi=#p;xH!m%Cd z-2QnfyWI`aLYgHtbUl(B{Zr?ZVl1B#ti)&!4xhb9Qb>ePu9@x<+dd2zhUN0_? ztkN8CkPYoT`hmDmB<*T((P6c_3AC=aNV8H3-5aHiT~DI-%&g~j!W!>@d2qM5?*O?? z(jC&A)F?7JYLV{fQoJF5F?s_~%0wbUXz7_toMaKaid!-y-fd zaiO#YrfmtBaHqKRFz4PXE(5qtTn=#i6n6!1-Bb8V!0!SrccrGly+hjBPQ+Ex97RGc zu0AZdcZs(pKeb(z{7@K0EpDuZ&K~i0P`!IfT^&%oVm(myh-(1)#0G$(xE7!!t^?RE zHUjJred2nU!%i3)_sG;ntlN;9u!BTSQ?_Ym%CHp2?U(#wld{LU#Eqb9ud-InuvRS+ z*EOEhuHn3u!xCLaWcqc#^tl2AWogOU$?e3Hf z?jE#P>Jhi$G)&9v2kO?84D8J9q1h^KnY8T|k~>raLfdbl+!e|Niu)GIj*tZ?Lf0RX zi+d0nyHo7x`S7MF?q1-BQ!NN@dU0o zZ?8Yp!P50r#1&hqe?Vh!cleif%-t(*TIY*>V0nxuzhRQ?yK2$zi?DS2CwZJp-5gI7 z{jzsYKx&BV_jswNJxqh%2zxr~0evlu`{e-lsl&+@WWTCW3l#}Y&3>P~P4lAO0Z(jA zPB%{u$6n9xC**i+QQ;9n9%JW=?#;LdddfKf4s$nDyr8|QXKls3nls669~7S=tFE-`90&RB+yUZJv5l46?aeNAobL`q;)V_>;@JapEQLh&LFMz#QM6+x z0Gy)YlBfEg=)YWCGiogO*l50DEE+Qw4IlW>=w#>TIMz*SIFltw94)T?&{#8`nLD`g z3munHF=};vB^yD_1x{Bxi@z{SsGXZey+ClOX%t=*CzGOg&_irx)ykS*04kdS{Aqyu zqeiF8OJs4D?7?)p3}uv_JVg&mcIsy@R22&Y(0+@3P*vV(-44%NAGL*teD#b5h&ukSa>3Evgdv{T#JKG%Q?QpbnV<+1F8^KEwku zx2u3nG-j<5g(xR##56zg6Tq}lF3O9#lRUPfe3a`Tq9E$AEgDV=sR0PrYjm0VO4+;f zO8Tp)7EGZ!0B}SBrT}bGw6h_uNjhtvp?p?Ql4Wm)5H| zkf@7~3%-C?mT5V3jSB&3zZ7us^f0!xok<}%uIr}ZZf}P-BDsW3@wELCmHpvRJZEZ; zc%*}VIReT(I0!J~`lJaF!0G*{D{YF??9#@0e<%`Hh2^*goJR`nXTgQp{Tg35)b8(y z>v0>_DLAaON9haW?Fq|LTur^9y^3wJ8W_0QC~l{?K?X0J`at?9P_DsG#-kLR(2#zV zKGJ)ncQ|V#q%`-rM=s&Ve-_s}0|5{zmf^ zb>W!0a6HpCUgo-Hw2WJFhUX-BlIcqDs_cq{mNQw$b8E&6XN>0--ALmz9SL4-pOG*s z^dg`SH;fmSAg2sCw(=X<=Hi*3b7sNG-g+~itzYuAE}pivvAMowYb(34q}&|WNWpNI zU#3rj&41)r$69GcF5Lm3hui;O({N4yy7kzXEk@g&FeQ(Y#~~mAqC8Q;#wzUnQ|&16OmiaW7s_)) z73Cg=BO*#to2Zp@k-quKxx@*P9ea9{b~t)MeqUIW9KKF3oEPY|j&KM*QTe(m@Izdd zI3JWd=zPR9IOi&fvZD)1);$fBzKmNIsw%l6JXj@{023pjJ%702sr65+zif97YL4nZ zHrtLIIC9T$`(<0h=!w?AjuwHvCdeAwpySy zzfXWvK6a#nk%r^476l`yJoes7gjCG#^W9) zY!STZO9+razsJ5=3xbLByNtH@hF~M{Cj09o{Eo)spYun4% z&5J;Z-?+``b17g!MOJ9x3|QeHok@i935{0BO-iGnZz{J3l;hIirv&g*?DFbk>L}^b zVG6SuQeChIQ{6-SA$0`fr}9MAkrc|Mjzmavvm>dbgNv$DeibEAeg#M$ovG#`W>!&K zs1u?pYK;nAR(5P|u>ogUdAe-OTA9g{s1O^fjgaDDpnfIvMB3*pIje#YKxr6ar&bDq zGs2Ez1$9Nlz?Rn)m}v)y(v1M(yd0tISY0vEuvhAA3uyp3tq5?sWeot_M5)lI>-7c$ z9@z^q^lyPR4`=Qy)7IDKCp4VZ{WLAi2PD*Ew=70wJ!Gz3mbNV6RBeL)#_LOKj6RyFqb) zHth0Kwr^dLa2e1|?8Le^$j@0*;~&Tm+4A+TnwrBQNeLVcZS-ySAL}bDsD$ErQS3o4 zvPB!ptUm`dp8OmPrtOrJ9;CX;Sjxv+#3Lt?K*WN>HV^&tEamcf(iW`c+cX**8noW zO2GwY=p`g5iw^$FO)oeMEQTQ=qx4H4zZ$!=Ws#~3k9jyk>0f{p*LHaQinGPj zT3Xkj*TIqhpByXPHct@1&I_@&ZF5xMVE4M`3F$z%8hgt=?s$TvCubJrdAV0+b9b%Ozy$~QC&D7TJ|#b6+dZo+b&ZbX z3_55}C?wJ2t+UuAPo-{}&Jv#Le%`Kbej=V7Ac`Osy6OQdtgf|K;Az9;%d4X$4qf@k)=v-v&u zTkg@Swa^-E@_gu%uK0qwF&{L7qv{|#zbl_z+TD?Ki%PGH{d$jqZR~WihrE8F6x5qy zfAW@+|INSKx0A%!RbPR=1f(c}oY;T)Zh%)F17p~iQjr1s)9d&(zhN`l=b66)Xk383 z?(YV7Hn2VIr9utJl*b-yU#221cB!*Wg8`UUv(Gw<$pZF3`vO+yuhPr~qK~la^)~j9 z|1W9+&DgQ7+lfX=n-{aId#kL=fw&qc!>dfjWJW9}FkeNkv8GV@T--HNrXcoP1UnI2 zK=4Zdag8i_sjpMnZS+(2bf{EV1uC0j?}y%2>E2Lw^I9pw?w|!sZpe%!_6-s3tI*Sx zPM=_d^86z775XkJyow6b4tQw@VqTp09T4ez?5e!Tx&cJCqmj7B2e{9(m?Lsu;b`va z^XfPECpgkT0t8+;0W!%EU66e0S{gea=_b~`_PTHYLaD>+zzKv%dTM|5>Pm;dN~(hN zLg7W|Ea#p;kL0X$I6I|4AnZ60rUB9Es#3D(xEh)C@6bTpUGz%qY~M^`MM7NH6A~pF z@Pp63&#v8loSa~C^!A#QD7PDoPGzT4C->hY9l;^|n3*5|=jEq`4)#yc;{MEZ$GFx! zl$YQEyQVb__9X_D-wl> z7bWS%h&z&2h#eQ>WHv8!=3#+jS?AX9UN8irv zEln-^mpIyJZ!(f5^VTZI7OYd2v5A4zDj&~Y*==KI?v-=tX`DL-03y^P#HR@C`Vl*8 zyf2e{o9(>sHS$y}xX=wtA5l^_=*1S^zll7p+9-#<9#Q5m5w>;{w^B^I`6)x?@7k3&l6}D#0|NRSU_o5X9J1-zs2Rj@wBK>pt$t zgBl4k?nn;}K_1)*wUs#iF=NN=1wPO={ovtT48I9CfJe#k6%N}GDEgvMc)zGRr%uWp zQn3*``1*GZX)4HB`MyQ$IxEippW{fWu~_qoBA%zuv&adj4R?6T`390{<2~&ACyM%C z!ViP1I>qy>c~`76#;h~W*j}zbpEY_%+lSUIgPL(|`Xk08#-r)uX8WKinMZk{2OKm? z;7~3va4YPdI^nd*ua!jF15@_`j)3Ji`1T}Rie!Oz8Z!r)6-7*XG>iNri#}SA`FC(# zHT5Xv@-HF+tNG4CcJ0x;+Girh6i+@GNl)SQ(Fo>x%7fV?KJ>vzW)g#2t|>(BXUiXZ zFiT8_tZo7Bwn%A8q^k@vuL4zmTq$3B1TVGTsr z`<`4tjR-+Aakv(Zg*5%AdbAW*_~I{WjJ4s#c9;yUS|&$HhPv4fSJs@41) zXXP;4d#c(vo;~x$(lh>ZRb$yp*qKu+4C>tCL1D1{h$*3GpPl;XuKTgS$=?wQQ^}(o z&vIPl7vt)l9>3^P#r3iT?%eB<#l3yjzv5L%QI|cgZC%sW*4VtZHF-gx7@4%0{pV8- zBOc^Qdyf%TF_Pbp1(Xuq<65krq3EOZTkuhF9n|GbN+qvwn($H}uC3eL*w$EA+XOoi zO{K@sUJX{EP%q(N(>W(t8u278q$fe@b)1DSKw^NqS&=2!Cu=yZWk{|0Ra!Rt&Pe-) zG~Mk4gI4AhZT^@xf4J@0C1)DOO6QLjEcj5na6-dnFBxq3*qC|6SU6@Zgrdnl>p98$wu6;K8FL*NX6^K9EQzm(Kb6@&YjNIsaZJW-vf!F%0v@KN{x zH|-X#R@8ottNR~Z{nxmLsJg2VJ(4~3Y~O05Xaardq8Xq?!L16+P++D4Z3?t2FiU~i z3d{kx>$zNFiXD7zJ|Vg6#qahLHLHBS%ox>Eb<{vrQKLMcMV~L8ofqZ0vXb`S3ctgN z`A_nxFC!dC)v+6|70kmLP@JxU;3|qM<>o@Jdq73F?HqxBy;IeptDspqEZOq2_vQ?t zk30aNEuQA>_Ivh9y%gg!``y_$G;wvLw_9STUuY&ZEb+oZRnO1Z=@-jc>5Eb3JXc9} zu(oqMRIU=X`lSjo!n$56U3VJwDfyG)*wgt#JdH|nH~{sHk`I3qhDhe78oARuZ{Z?} zhQ!mrr9)8S({Hn@FU`9PbLqGmtD?9X%a*tr%#-81HxP*Peg5vaM%lx%lAO@Bs6l{X zqzL3M$}#lUv0g{J-as&d;56I&y%s%P3IoU(!PqaqH_xmClV$je3kJu(*JiSe@3%Ht za`VTHMI&hm9q#KxD6czq%ipa08V&RG%;*p3a%FhG@c!xCQY@!isnXXR-~oB{C7W zai)x+1qnOiSxGz_@f^-%9}*L}i038y<|AH^#0wEG0`rC!B#IGtaHe!rIs+Pm>Z7K~ zGS>U@v)ixQ9H)1_(*Lm~>+$qs=}_L=>&UgbwSTkp!qW4zA$4meGD!%$T_%&f+8QB{ z-6TuW6UHwJx%8SVrs^?M^;fb9Ci0T0ZgHcD|Cvc>%t~YX99hR-yKEqaiJY%CPMLEMu$(Hrqr;f+PJ5-TA0xB-$4N`>blal@7X{6Qzs z3pbT=@+xW|S*R=msdze`MX>l)$*W}LEK|yUiYc8^xX{^XiE=1DkF;N7bBM4<-dUg1 z1BAE`>IruD##N#2xDW|riF1+Nc&DWNA(Va?fpYnd|1E%)W9PSVWOYMh(;6CM%impU zI1eniNDm)?>%l|s&NS4c)EWd&uXG9U;qK$ zWhXBmHOY(1d4QoAY`;T(T zkD2_Vl76Uh!r=f_%DsQX&Ugo+TvjMI2UI`}ClD-0hLS%hVHYcTrJhcv6G~97LD~XE z7y(v;6brbxc}pu)?{zKB4UKC(^^Ke9hsc6*s2?nYi~fjoFj6o1!qf}%tz_X!KB*)Q zig#0VKdQJ8x=mbj$L2K+joavPY#8fn+iD^2X;oC)(ar%BXUI&0`)XgOB-7s`>E99j z0l{Yo&N~r$vB&fcV_dr;7#4d1(khw*(HaCe`2>r<=ZvY(&n5=-VnLgLKg8)0``O|P z`}<9#VsPao2d^(2`Q#wE!6BSnkw<0^hbK9BO)M)T_Czz`vP#Y@nBX#rbD~m3n#iQy zKxPg(AFn!AHOT>bqlhEM!KM-CM_k3YE^jdViY{+Vmp8oPvToLZ<|?lrSL?5+ZDVTN zvAfvL57nh#Xo`tB_Xbx?awqCZ0a-&PR>hY6SVQK9$gnuUrI8T1Y15MOn^}5dznP~e zv!_9rAiADRXmuJkwsWV=;%(!eR{gq|qm~;7M=SueAPnP~S0O7e^ diff --git a/ultralytics/utils/__pycache__/__init__.cpython-39.pyc b/ultralytics/utils/__pycache__/__init__.cpython-39.pyc index c6c39b29b4853396f807321ae1ca928591641030..30dadc218ef95927602b721b3f7ce718bb6c67b6 100644 GIT binary patch delta 12639 zcma)i3w&I~dGDFCPpwvO$(AKsK7L9vl5F__M%V^f4$Ol5zCnDO%`LpdynzQWw&jY(1GU_ZeE$xISXy)RiPccn%7*?_=;|Aj-7FZc7}F^ZZtjfH({!k z6Iy6DuMOR7t{U&)t9Tu+=c^xaVOO=PyoNW7-x6bd?ISwss^<4_?FvJ#&1{`xrh9xZ zH}+_Joyx68ZUg1AeB&PNfW|lR%~*D`xq8kO+J|x@Z$h~VySf#nX1)cbEoM1dZ{zRc zExh#-3w4@1c-e@~+xS+L_M7|79cDY<_J|(p;_cnq+0~(L(>32?c3)xhy{2pY01vEG zAYInaZ|4v49en4dEDPO^1$OZpvA~U3u8%*&ck`Pu;T`_$fkM7X`#K+A3?EC<5J!sxGqQx|`|D)`EEOgvF!8@N~d_V8P5<|S3_gvCKCwVVF zfbS`OJMY6cl<##1KZstZ^QA-lFiL04Gjm$#ta;Z61K=lkKL9?$k6!YG!p>Gk^i>+_ z?|dq&^J9DfL+<8-{5YnKP-G;WUbF_7>%9^>QaAJ6i0JdR8PNJ#J` zkdQRB@g(q^LQRUNQIpQsr1^O+(DyvIxQ(wsj|`tcj|uY-Mp*n|KFKd&ahp%^Y4phO zd-x2#6K0&xV);qa!ns@kXczdss|cX%lqzIZVOr%cs{B3X$vN$^TU_AxCAXNTRQ?jb zKiOc0RQ@ubOIDhvRsJb{Dd`25%m9_MW`yd}pEZZE)V-#d(->HkKM>C*E`}~9?hDAvG+bi&@b@Ivqj7kf>Pm4D0?NlwYi ziicObF^^~Zg8?J|*O!dhu}C~NZ5mOTEbl%=_5CT^H0&|cLS-arSavKPH-=3kHDL-7 z586dvk%|hzO6QL?UAM%V#U5nOE+t{5Jde zLPgb`I(tw?Yo258Us%7QgRv*&*_FKHeu!qvkHpX1Zut)@Gwcr+`fB?b`>1?k)hp~% zaliKhFRPXAZk^mFOI@m`(YAn&WIlTB;>iv5NB-)j<8Pg8Fa z{WPsPcHozinI(SmvxXC%Lnb3dfS267(x>bg~3 zx?6K=bC7(=@^;M%Z3Bks{}g@?VGfMHc;|weqG7t8PArejYrbNU}iGM`VawE}P!q2K>nl{5k zr42DUq2bHM^`slAYgV_}yqn{ova>GRBVM$<J+5x98iuXqzV88ZVx8&$!2fs$Yrfr`z7JFNxWxF6 z<3%%`4{9IM=dzd-k|n^4Az6w6n|sbR>z(m(=+s%)5YWv1vz~iB$$<0Srft&zdpS*g zh0XeAeDf7EE`@O4e5HeWv{%i5h-UpW{z4sc)iW3Y@erKxhihj1AdiyiTjf0)*Jq8% zm_243AM9=2ZJ5buiW9e4nRGfO?6!S$z5++73F4UP;!RBn)3PF?X0!E{Cu5Ja?#>kp zAUHV^8_fu_ndSV2Npjxd%*Y71nTGdI@97d|givWjGnSo77!J>FwoO4z*V(*$tZD6$3#j-tEv{%;pT6Ng zvxPQif@Y7{g`wg`N^YX$g8XUI>gHV8bzA;bVF5oA)5!NWuMj?bWn$CHY|-M$(f zrtMfNne(JYENSOT25ewHaXXGD4j~i7_LkQ3qm(^lLT%=XAdwCMbLbO@a9-$NloqLU zWHe%%%|&8R0pgsi39&^3#RgPD`C)Nps?CK;EwyBLEWf6g&;|&VfKiDOFvZSECls~( z5EED}NHo3H6=3BM_CcgU2%8mQKcYCI{9m^!qURO1eNh%->@M6NW$5Oma{$5KDV=~uLzuH->qcfe3 z$4puXDCMnIWW=lIo;~Fy)&LUHp6Dzd$jZYcIk}9-Q2^y-1mt4(QS~~x_IA=cfKqygUiP& z(AO)!)>hB{Y~crOcd**`0|a3q`J@g}AVf-7iml|4Z4InaCbzBb$hnAJJR|W`#0LM_ zBIh0^igCwd7V7OxI&S9NJQlTcZhHzGcZ%lhMk1fwwzkcebC1MSkBA+~dB=btuu|~^ zEH56W{+2jP1MZ;YY1!UWCI4kxVADQ2wK~*kL7W?dhS$qowV?a}D|J<(CcwmV3#;0F zdhy~O#3$qpJB~6bXLhV*e=eWeVeEbu6NHM0ARvAtTzn2?NT)AQbs5QrBo7mK$wBCf{WMbK2DMJsLq41Sj%P?Q3SAQOL!+GbrmFL&*{!2Vi(dgq^( zHT9>GX0tL2dk4j*WMbEra#~p^y+s4Wv-0D+HdZ{3Y{*%L4p98PJb$=a{(M(p^@Re} zXn{JcD87!KIXAQsl$q@8Sh3J@<4#uc=QP!qkifQa&hFw(fvU?iRuQL<+qI*d|A(b zmaTWJk#~0-WG~BSI(p?B9e>>L4I1)~lsteW=j%H<1wKuR?@(2acC;pDg~v1LDJZ$m zEr%_rH!<x%D7pKbCLawqwH&XbR$IE#P+rUE=lpFdh@({4lxBu9Eee zi{!b^*Xrp+bKXca3JXQiU(P)gK5`J!=ogCGr}tNsyh(E^WCX;&5;Ej7`+r#K-~z+N zMI`bgU7aOfbShx&SLnE(eO~V9-Y4(r4#`)$+t!pF=o`HK`2O(Fkz)sY4;>i_6&*X? z-`{gAd}KiUlWgg!-CPS?jKa3c42L61vW7(_nS@3!w0ulH)0pqJ$((uG@o!tN-p$mFZ%$>IS<56EDa+6nL4u19`VTF z9i`RVX-bFCy`&162(X2=JGL@iXEJ$mDYeJBz&NqI`{k(G1Db~OAe!A%!T@il_mHyIH`9XVHYl8aBvmyYdj93`O1>eT{nu#fYr z)GJ}1KrUZ(yVtu)bz#V@1J7n7XK`d|16|mXu#5;U)iA(DuLTqvnux)3g8iVEK{l&p zPr+D?h|!EPx!a86xQyerZW*2Z1AT^RMI&k2KQ}Gs20mb##+YrVt&Xi*&1BnT>|87@ z1A`4&WymK|f~*ib5{sk1f@p9o21BG^V9sZ>VfH`nM0e`m~D)m06;wu_p#&%u;Mw-v5#ADC)5o-rHMDKrkJ z;=B=tz2k=f>S5_{V|WUC8Hr@#HlZ=RC6-KQY@-Q_Gpp8)053E`W-r7vkm zs46-YNz!+6EM}XHi%8oqVxwdBve{KRytv4)Q_ioIFR3NfI!^19>2p?AXCaMZ_h@6j zYXOpcaT!4QngU$;;&tF1j#*Lc8HR{jjD&ZX~dJfVo5Fk_|7Xl#Ut<~j`LG1PM*l7I=f3?2LS?OR6Z`TZSnC=sxY5fY5a zlmMtpbWVdB>re04nW#VAo>*U1~?(9#!HL5j=zKeh8sV>~gAYXw3JXpBO!({;jdtT={+!s1s z<|Ahc`r;Vw&J*t>kBkukPeMV`wHPKG4$!r@6;2tMWUy5sf@EM!vF~1m)^HxJc_t=w z=WD=MLM?SY2Gu&UqHlFpaedO9EDTZJ6G*bR%gX#z1^#?z>^sYa6`TN~X)&|NcS54Mkd~&nm>iMpK&77fmJ7Fu}xE zfP`i}6kL8fx#CFDiiI8FzjSfl>|D>vu7mqagnp&6OQBh}G5LB|mHguq-K~+Y7I1hx zZi`5K%8o@XV^B;vZUX0y3XKCdaKw^jL$_ufjq&!=>LiV}c)49t6~qp=El${!utxW; zSN5opibgX6Bu^@al+XH>yhp~|J18>wYXfNxngT*zb;>q1UpqJ2HyFJI-33Pi1Skpk zMX^Vt=`W4jwgWts6u8hdHe4h|B9esk6xU9yDCcc}b6C>XKt2p7*179Jzjh{CZv*5m zHx1SMKY)+A3Mr2c-B+7);jA3bh0D2;Zzq<11o!Ec?E8|P{1p3 z!S%eoKqW=l?Wiv}_BacsdbC?&*Xoh?+ju1C5q@hQX6qbN_R1!Ux8X3t;=`Q&{o9nV<8bJtLgFaHb zg4RRRqA`Px&Y8miAQOoULbKokvsw)J3^6RCpj%Q9$xLDxeou_CV{oWCK*203z?B7H z5Mi-o6n++q1Q<8t5!jrjJqZ=9zL$K&veJr*G*m@-5p22Ni(pSLj)&X453)-9XGc9?c9Am?1aT^9;yF%z!moscm2pk0aO><84&O%kp*FPkn^;IxIqF@*HOVVP>E z%hDbHLG3|?@VIiv&+0SIKK67M0zXEK>>j#Y;93O!fJ+kOLX4!gR3LhX8694XV%Eh! zIy8o?#T(9?}M2W7zuC69~KDk-+p_ z4>7kkj>i`_0SjTxc5xRC{~-K$_%eJwGdhAY-Z_TQ8cj^|dPH(Yzn|Iec@N57?xmJ0 zty%@?RD8zQG^^jMpNEv9F9m2Q4?Oy(cXsBTr4D-Jcem7Mmw8E@s0wZCEw6JJZ?Ua! z6&WMAH<`S{P$bmf>=SzkI|@(a^-%&C{y&HfT-M{JnO5inKHI64B&m253F2|;O0BS5 zaCv97Trh9UDo(B-{8FO^{08CiE?1ZP2ISprdSe0Gjwg00oU(ihMK=X%QY&^XQqz9Q zDHHJ{SfbRW)fT+Lr6o(WM6=7wHLH)(Stw2bniMCr_dxy%b?bragtojWk_E(}4b=|W zYSZO1H9QW(uR!-q9hxj_saZjK$e?W!^J)5Ync9^6(#iTPd4{|2v+hDN118Kc5t;$r zEYZmf^7s*jg>0R>s7y3Nb=T62?V6`r_=KrXfkucFI7Qovru%v>#O&pJ$^L&f^Sha& zU)lF!FC8zrZ^f;YyoOy2_BMN$5sXsSw^3s!CB)|i_xny3B9kAYI1CRl?g=+ik86Yg z^f&4@(&3e2ZVG{uKdC`^B9`!HVqf8KKOBiS(-g0vrz~q&S+2|gLi{pYV2#*~KK~E6 zD8$LD0$zV5tO2T5Hx8r8BgaB3y+Jmpb|tU2LHsGl?M(6Nu#zH_5_z*A+zTNhU5AP#_BmSbCZ>s~V)(xCcnuO8u3o?+X?Qs)^Fz z!XL5OVz}k^P*7Syo5d}U<4u#eY%}ggK>o6=+pcj~dGrQ`*yfy$Iir3}M zQKM)l02SRtWMffd%@A!u3BFRRTeKm!O{|8>ACKN;Q4Lh^gjg^nQ%$d$xKlW??z zou~~WpFHQzWMaIzBIh6I8652EKQN$zsm{t5Y56zh&$$sKkF^8Dchbhbcy>EJpgsDvIgTOeKyDuV;e7qj4k_+ zYqn%2aG8mdGsP+mUS?L>UW+GRAb}o93 z=+65MA!3{-EG>9F00~1@nhMG(uQJ_Glko9TpT|?~Ey|%m-lS8VhYu~9#}p}9di~^B zck*!TkeBXV%@y+Ln89-Li?NmUVWOpzNJ73yIu<@>PK8S0kWNNoVfnq-Euia$@sLOS z7mSnlj&CS_9`qnqqBMPj{Ar?Aer3Gc8^>@Iv*%jaC*(8d&XgDk*yB8ad*V=w^lCOYbnI zH#Qmxyw4zyCXdmB5jqbT-buJ6A*S=aZf$Hcj>d5rf(?K++-mZU(8xq8#*K8!vSRe+ zB4$|`n8G}xT;G5TrYfF-7|k1U?r17KCClR*J)?9!^5*#3Lt_LT1x*~@UG&N4d{8W@ zI9?i1bS&pX#)9eUgq6J3SY-RI8wH(p&Ih@h#Fat(zMPM5znh|KIky#=K&Y0`opS>P zmKdgL*K{nM^D1>~DW|_!Lo;}=6lC`nbU|=H8Eh&fH$kuwu7yPj0WHR4YvM?A$WO=! z)BSx1MlF-*q;w@ojxl63xBOtDeHYXJkH5NFrxyzpimPPnS-Bp(s(UE9tDdwWl7LqZ zD3vn*RkyDH)_qm}B=Nuo6+l(69HwQiTWb~oy(DGwUB%B)tLpc8`K7vA&kR~#mitpr z!kk9SjC?Isi}<;w{SW!u)YGM^a$2<@Av4W$A2P4WzfB*lpjib?AXK;enEYAO>iQ8> zyodG`#8S{v{>siE9@qsvVnm)j|GW#fhg>V>8pUS`6bgPV9WbszE~v+l4{p0KGGc0Pp`(S5A>KTevE(| zq@^B}pSD-u@Js4L%16`1<7k^MS?2SCL9aL=NrO`IC-(8I;$4^cLk#G59CNxV^de%3 zC*OEi(%<}m6QL_XF;_~jScsT$_r-H7aStv1ElSEL`7$Mt-A)iw`~ek}?e{R%IkZY) zQ#?vh#4f*(>8OpOSIz@>M0zTxC)2nurYLy!tPD=9Z~6>1dO_caZaE5$G`qM;NroVD z_x2s?5q~5HCT>`nEABsjI6Qd!v7XNE0Y@d9=qNuhvAMX8AgHG#EB|g{Tb7{norfHM6g>eEiV_vs zex8b7M-nRU?&>e4mk4S^d<8)1&HI#<9`x+!4DzNxJ}rT&t@ zx)t=CW0$|Oa$V&u{wn|N<#z=Fey`u}hiidMn4rsj)xZ3wd;d)@E@fUNaT`KN5P*tN_>j zs{QVnGc#w-oS8Xu9;5GlQvHkHk^HdF=TYEq$oUS{rk==OLSB1f_3^A4%ZvMTA5o}6 zwOD?V*;~u_C<)`bn%fzCbUTZ(FD@l5M4WsVvL%GAQk_#qq^@xukK# zmc%Rc3Zk$|W}B^|PU^a@&JaB?Uw2biRyHhUK2|+LW^1l1`ZBg`bUF2uDf$Ywf_lpp zy%x$mC|9x|^|4jd1I$)mSE#2{nNep{eGOYH+2_*&FE8mj4r0- zbjc+RR<%ghD`@5D)+nJ>msDsgntgyO*9l;2*_s)`Y@^#~V4FghN~{{N8i_62rnD(^ zd6uq#X;-lF8BN~~4YhP7)K|i)c0esiS3zwRD}dge^ekON*PbK#F4jN`hE!Td*Fmk3 zK1Azj17N#h#0GjFjJS_!qfIcTnKiNoww`X3qG_`&tl>J*TiM2|DnHIzXWLk|c}7YL zLpRaQFmy9ay@$@w`{`C-*-p>VZFD~uVol*1wc5H}%;Qh1<1n#B>&e`?jxup!LWeUso(;n#PrF}3YMEmJMV0&Vo(II*m zTEp}R)uDBe9;E|NM(8nm97;+<^aPZQhUp-bLo`Aul*9C4Iz)$I&qiRsQ13^sldD?l zRW>pM+NDQXltw|FN9iaX%ff&$*!&oc!{*1CG8%((i_-*6LQjIG=t(G(K*6a26b9P| z{V95$nlueloTTG)0w_2=MJJ&&*chFHNlcc4)u%y#G(BC0(~h%4Gs;yP-%B%zRqU|D z57O*(qLLkvl|gzYQN(nKCula|0R5kUgE_@cU|Z@T76z7+EIFgV?e@?IV^i_d`ssK^ z&%~$o>G&D_3~2osRvOBJg4y`1>@=HZ8BI~)R)kK&Hw|Bgl3peF$gj{8bq2{?ojpJn z-PgQzpqa5$2MhB=G%-9dnT#dJH;NOEAPLSr=7>1F`f&9A;rcP-z@Et5AM!q;k)Mkv z{Lk2EG-8Uag%_77z}41iIcd}g55`!lG(}YLcF_+VH$4IPoBKiW z0hOE+H4DE^o}Byn!mWh-nW!kGzB3?|;R4_yHoLf7nsz*!8(=HzB)(5$uL?j>rvl=| z%2F}0_$BYNQ0I#POf|^1ua!H+(emx&nYoXam#F18pvUk7P*h+_)Z6mY)IUzuNb-NK z_&WK#c)BuPbqy)eM&slz8~i-{AXF`_adb9<_`X~z#khpEhx>&mnW zrFubLW5Bcv>UDCZk|-BQ2JTaa;C-6b4#B0hDK=#W3|GQmuNb@5!!hFT2zj0oiHm$t=fTtadM~1@vvJhM@~zpyvtX{nPG&7SxdwDV|J)hr=cd z+Ow8(oN*(XOn{%xU5*et6*X}GLO2O@TArK;Cg9uvUb=I>r!(lXv}nS#)TCiKKo3}A z+>)Iz+_JTIwsu+emY$xj9?KC)CWfNJx(DxMlf;8I-iOC0MHoyP%(C%tVvKW9zqPo{ z3#TY009J1Rm9YkZqWVcGaS)ZLs!D9P)!V8KaQyudY7WHU1|j~R{6V0%MDjwp*tBZI zZ8;CMcQ$t&>J?9|3YJ(77Eg{wjsF6g`Af?$i15}0!dL4S5r1vg+kynG(HJay>R;qN zZwy4zhM9~9=663ZBzpsL&jYYY0S;WpLEu1+dmx%X+Q9t58Fe1Xr*9-`+!+g4D$LDj zBpnO$*kmAWu%UD;Fq8#uEMcTMa*HJ6sjwLxjK-qoWMCp{jsy-x!3j(l0gx^pO@w2H zdk7nJEZ_juoR(+CVs=GX_)iD)EASs*eJ=HUoxP9T{8$)(VBaPk-M zaJ0BF%-163$Zcg%AC0eqUcz+dou z7$elW2JbSQT^bD{cGjIBo#F>|Ei1uvurm6 z@=MU{M*;^Ru;OBuCfnX|)36z?n+(Te5j@O0f07I;((K(3FR!of&X8G~jL#|u)M;(n zX4*406GCXd$#i6Fc=Q=#hWRu667CEJzt)Ao$Y$20a>SI$M4?okiW+QpIj za&m1hUB8!9&A?Re*wg0F+AYtiaP>G(f?08~1n3CpxA5p_Y0vsiEy0d&U7C#x`9%x#RHJT4xlJS(4lxMiJ!PM z7YLCLLgW!X2d!`co)`bU=@j{jNNxU{uQQoo@?y94@mW#7WtAVNl{+QL`Lkkd%d*1f zp;33sNzlCVFQEkudSpWiEQmRFM>iDDVwjeH1qN8Q1aJox{k-t+TQK+5mivhB8RY%~ z0C2~+e-*_uI8v&eGY@v^AmOjfg}3@hK0k##oM7>zJed&RZz~a3A8`AwBE7sUPW~-4 zsB`KDFJNLql zHnQj`B)_{z*lavLxza`DGzlRL{~QiQ&#O2gW`Gs1QTgk0J9k|q)jxnn%QYO1O8vE* zUA--+$mnyyd4Zq(JMrtr`^Yy%)$a159|3VtwOptYV7o{7=jS?hM+o_`c(G|i&5v;w zyo8F!rR8b-jd^NN^-zvl5s3>+M0NAe%H=*g!jTAgf2nnr?NF#|9~|7zaNU*us1d?( z6Fh1%jBek&yR9Dn3+dA$E&rOII{}ynZqTG5j|a`GMV>((8< zruIN?ivv8J5GQu76MEZ9_uW=;wryRNyQjaiv!y50)vFiIqkUan9igViradkEufQ~w zRxbeQhQVQ_2SeZo;OvZ0B&vae+)=)w?FrO>MP`<;wO6pCAVO8r8q$DtScWc)vY=IzQv@DPeb37eeGHZQl<*g zO`EJP$)gZN)unl?^EqG-6g;PG;%G-h!Fk*(J6M}&3U0$cNF95jqqulE&XBti4w4T) z{x%3^iuV0qRsTZ~&dwUg?~v6Wa71-JM#r!Lp+SgkA|nm|XYjk?%s{D_?p$Rfa2#_l zc0Nu>yV%}cufC^As9vciqs;_1F%}lS9yW(AN+FR)KC*- zfDlrI19&{(KsFy*w{cA%TF2@jhLL?HUs*dCOS9TFf!YxkizNdSNgkuM!8#dNTQ=QtU`p2x$L(b z3;0X&!hRyu9GE(Gq^qNAd`qB@Pv$INF3_wC^kWvVRpg)8se&*sgwK}p$8aLdPr(df z1U26v&XyO8^>sG#F>zw>2KkimM!r*i7W(@j`ap8aF8zZH$6$=O71=?a5^LxRXO7B= zaaCR&a)hK&;Wb43ZQH^!4$pSVhwo?yI91gHIVrS??7$+CjxQly;=}R4I!HMnCN|SN z0g>tmq$>1|^nqQuI-`NR$zGipNGx7a36q)1D7m0O0!1(ryJUkrOr2G)LoS+8A?nno zHi?%KRpOQSLQ*GclNB~t_GQtTtgVitsB*_Ke3EJA^C+h@32G6Tbn)@z((Kq>GC3d@ zYTc3O1`d!{Ov`idAS^WkU$o$7@gQcEb-LD9N8oeovPTbS{?&} zGd>wI=-5=@J-jX#(m`J3>TT)kYwv99&E>UHn4E0l>Zw598+YZ}MZ{N6l{c+Nt$G<+ zbr&Xlkp6H!1y#!phO=fSf858OEPUk4T!06ZQ2g$J)xTDI=!#NRu1QG`Z6wR zzl%A<3#Ti};Hev)yFd7IU-OG!p04s;11(oZy|bm)730^UII~>yP_K9(vy|nPVNc$S zI-QsCz$^8hj90?EtHhvY)og(JaOB4on*Rw+zSk(TOYzpg`_A2bB~etb0R7YlGnQaM zr+j=67p6NLXJMs5zp$nQHM)0#MTbXB8D&^52nQs_cjEqU2B15|+A|M;qD0T=5R{vf zDJEV!Q$yB^H_vQxV8omLj<}t@E>>pOkO9%39Z-YSV(-~i_73Q?#N^qkJ^PUv`tO|j z-gOq1lQSb31AinVwKP&-2n_yfB;2rZla#`8g03VW-Q*98Uz}|?`V&#AEA=p%tqeZ>9gW&kIFdx>P2{sV|gr{Xp(R87(q!XFSkQ(mV0@w!i zHa{ui7jBYw#NtQ81qZQTnp<@N;itu8kIr~cqlD6kO^VXTmiPvc291fL@{DMFteEtO z&}04Cv(N&z;KNXKa#>Mjr4_KfXSj z0v@^#RJ1c4?~{(y;IAUz6$rK>z{gUOvXjA=J51?(&m|ls zbxvv{CW^W2(BItB+SuRG7iwy43yE8gm)R-wS;Bj%G7HgDG8yBq;Mi{=_%?#?B6uCa zj}SB=tBr`gfk0mA-yrrT0_mINg?}5X_*?_QEC7r!IsBIqvg-a`NG_VXI$PV@Le1?x zJdRxuK4lfl4^N0DJ7qTCc_@|C8u8?lnt+}PI$ z&Z<|k_T&8ZNWL3@Jn6^?Gk6x89z<{!!9xfJ2?jxnMCfup`5TeATqYj9ymqw;Z;=o9 zLBHWy0X~oB_vcsoD?GLN3qAIt0!*GABu~q)_Ed?VTwb@%=kj?13-JYDaZ#}=&$ZXz c>2bLnE*E$#@Jq``5%{fq;?PRrwa20U4?2tMNB{r; diff --git a/ultralytics/utils/__pycache__/autobatch.cpython-312.pyc b/ultralytics/utils/__pycache__/autobatch.cpython-312.pyc index 239c7074711694f1600258aa916bca1123330810..e198f39f41595ca310a72ca28dcb5c7cd2017105 100644 GIT binary patch delta 126 zcmcbpc}A1>G%qg~0}xz@f0Qmhk++XAXX4zP&0LHhnWQZAGxBp&^~(#>^;1$)3-VG+ z^egjo^2-bjCX2AxF!F8=WVy%AXt3FUC!B>*ZgYpgSw=yNCBZ8M7l&OC*IALcA@B;1 e`Q|hsZ6!kes|kL>B-qN+=#;DJZ+^vY#l+|~S(R%hqu1vBT&YZq3Y%4VSs8g*c$hdC MIhaHkd6>Bb0pwU5g8%>k diff --git a/ultralytics/utils/__pycache__/checks.cpython-312.pyc b/ultralytics/utils/__pycache__/checks.cpython-312.pyc index 32e65c2818372270539d86ab7c064a48bf619407..aed4c3f5344b961947448a6b3ef5d16f855b52b2 100644 GIT binary patch delta 14857 zcmZ{L30PdmmF~T__nltYx62}+*%w(z2(d|^9Z7(-par$K4K!j?^+gix#;qjI1lgVl zO*s*c?PnDG8{?6j$WiiHndJMPJxL}?-g`;6!GUyeVke$SJWGOw=Gk%HH}lS^O9N6e zSL(V|r>agZ_tZJ3{wfy!iGSinA^(3_EJhBVaM!PUa|b!@H<*Y$ww1{8|Gt_(Ql}oU z4%#|xJSTAyKM>a$$KLi%2YWj^o$T%EbirFQ5I>mEnZU|w2NDO9IdjDzqR<$#v3z#@g()S3G;R(L8LMoGt-bzTBp3rnwL3|^`%}=O1 zACbx>3zXPo;MV?06_V9kE!p4~2S2-aqvU{}6XICYD!G8mW~ow&hnicYgeSPp8t8_2 zV`YF+Rk@V-1RqOwZk0AlNh_7rQZiJoh2mMU{D(T7BBesd)l%9z4%?)3XrxYhM9P3B z>!nRnCh&t!K{*llHAvM`7UVQao26`cH%VKh9C&Y+a-rT1sYcoWsb*=bln3t?DIcgi zr2+_ddDDA%$qm%qX9tplSzq@>#kkFWWe-32J~*LQq4) z&6zs>>4Ko9-;R@F6JnDhscUJ0`55IBY0_Qe(wnA%05uSq{ z;`h?=;*>4E-l1UvQ!EbnhP-}J?(@i^U-l4LyZ~Q9%+JXeJwp=w#zfhBQ5HQtvX>y4 zijMg79^ZhsYs3Qu3l2i)t^v-FkcWYhmxhWHwwJQ@uub^PXFa`MvA{2rVsUr`Kd@&2 ztBlEg!$Yh2PBm2L3}WFHsI4kAd;uqm{0phyxqPY zpO>5#&w9uF^r@pYw4pRU#F%xh=Ve?sN@(bTyn>a1KiF&cYK_yUPqTQy0P{;c;wWu0fA;pm4 z$2Esi0?((9l=Te{ddn`H>n)SK-jN}%TsAg5FnqqEybM1)tz9+!)E+cTepunJp9g zh_mYhXX2EB*3UbOBhKPkXX(6iOT@Wl*12^;|DH4c(!O8BB~7RoY|bm8z~#VG!3lNL zWSdgIXnWQ+qk1Q)c)^)?t>$XYR5z`ku824{%sY!B&Z1do$;6IDSHiVjS9gWe8{W*F zb?uyJj@n!cacT2$Igz-Wmk->DzjN-}NnuY((=-ZqVAE=e7cmI7%E_+gVG%j zj}p5Cjf06pr3_kBjExRD!c-a3Vr`;o#gGZmccihcb$9Dd@z-CO{==OsVvol+FiN~)&oEJRWmjy(fh2b^bP7cZdsF~C@jvw4 ztzt@QEpY3j8aA+~YS1$hRl%1dMbKhY*W(+KJOcw!L-WOM?}!|Nk4;dTqPhzn#uxOK zZl%R2@AacT6GeiHIcj;}3y=n&9f6nYu(Dc~>AbDAa>>$o1=8IQl)=OcT8laKyTvt1ZaQM2|-e29KkyM4n+8NUa6f}D)n zlRdGuM+$-*I1yq?YN}aYUCk>#36$*Rhm5k5hP8>A@herqe`YIEkP~AREKjfROQE99 z&Zp8!otsamCv;h+%vgzj5gZj4eO`Ccl(S09r4`4WG*Q3tp>j^@(-#1rus)gI(C6~0 zx5N6sQVB$)N6pvmBnucYfp9xX4aBL@l9x(0BXt)9QG?IFBF|B^_q=C-R$EF8C<$a6 zmOf|MT1CpSpx_Vq(V;_wfR7>zCRo?qXS;lZz5W2{hU@_R{CDZQmV=>1gLAU^`KmcX z=7PB4729>&oLKRUbMjG|a@G7z`A+lg$1ZC=uqG~8<0rk7N5W|p-^{%=db?=8vL#a4 z60X>}#PPfMqddGU$M|KH%4S^RR0iXHGp9FAJo34~Xo^W^e=0T^Isr>{TsN*?$5$V#J#G%_B+Js?az_J;~E%Y&aa-tjiU+Gy`YLQjlF}~&kA5j_2b&dwPOb5 zz0Q!vdwxvcwV%%wnZ{OJYxMlk>h*f%RTI%eh6ic82Dp!)}`v5 zbq#FAK``hCg%4I`wJf^p&CE-MeVZS>d1wv%O0c41qOS)VMZrdwoasid(?-AHET$Qy z4pol0oxblJ=C{*Bt{Dj5cc#+#_%kap8b2DsW@Fy)D7uRe87bcoXbhhuDoP+`pgt9f z#Xy#u`SxDeGf2fEcn%)eLa=fo)ENak-oez;C{FxCYy)}9#v%m6@%b4a*kwv0c*@{j z$ZRJ9#>*?UwlE(%$`=Rh1;I24MQ=l>~iAbX%EaWco0w^Z6woY>Al%PwJrWQP)9bAJsZ6 zw+xRCNzH@|lc;n5urvz$>Gec{28`KD0xN(cg&3ecW*&D`+e?N=NBq#9gfuS(+29#) zqz8ju3{XLldI+Mz@QBhrxR-wP!dsBalPG_lb=8mN3qRo6vtqs<_*!HXirs>r|Jz{E zz(?Q1gBAsotXSl_(47l+d~cGw1$R@z?Q%GAbWVRB{OtvcYx2t`V!za~sE?oQoz-Vt zYFx0yUpaO8)YSG_i#Tt|i&*le{Xer5P4J5eN!R*9SNp=5Rkse!CREQS)I}2NW)m7N znM2HgNg*wzbJpUIGB~qi!Id+; zV|vp}_Ka($cjj1l)2_R&yV|>F!iSEA4;~92I~OLS;dAH1jtdjIpPOupjtwvO%p7@X zc-FBgY~J*9lVj2BxYByLb*g&KoU>rEU)ul4{brtvOPaS8M{LFSIJF_Y<>xl%VuJf- z-mQAYIXV}vAhU)3*#tQpcq| zncB}olIN*9?X^5T$jCKL27>aCku{I$MvT@&|7gpg|B=?<0&A#ZTM;2=yI8!O3kW4gaU)6=EoH2QHmxOfB5ApKg#z)=%1L6oFofc^)03|jn^kXZ)->_D2lk8_$fE~E4ZnjP7XPG` zR5#T(1lpcel)j2ufvpqTxiReu$fN?0N%zuoS$t~;d~pq1_~6*9M#(@_ji5lQ+sNPs z<_r}G5Q@Z*sySgczew-tgqZS2m1YXJ`gEOH#AbGqb+*I9A zE9rL5tq$C$-MKG=8Ptc4^m$>IM%Y#jaV=&|Vz}3e9TNw=UKs&6uQ=!%@(qp-irvFd zmUv(ri`*I6N0K5;;2=l)`Q17b{%N#I^5jWzWV?GujCq#{XEwQlcUUw zS(%p+hPaWhKsG1<&X3!NOWb{siR9-qW)g3Wy(!O`_J!5^$Uewx(<6XW9S_<^*I$IQQoXw6 zK!qs=EN^gES#acCZyqv&=Jlyv%OWmbr(a~yY64WOH!_%UiET?k|W zq%1Kod*I4@G%^pCy?&rhc4`HJ>vUQ`DV9nV|KUGx-jbZ$qVbO#rI_T5l75 zrmAi0V$Ht09dk8pvxx(dnzmbq!iSH{A3hN|d?GBJef#j)sfrgjJ-cbDV7lU!t=G3s zm%N=+9#x7HQ4KEk_sQ9M|+$_2J|#cWP$iS|&6L$r)4Uo=v(ndgstLE>1r( zW52%nmAdP7b2(LSwoUX+sFwtlA$8FyzMM3lRT0UmxRpDbRedLE&e;?;H$|LHOD4{m zvgDvoZ9K@E=;Fq|Z$!HeDmo7Alq>l?lkJc9KzSz(@}JyK;Ov<%>*vL75pi4CR(Fr% z4XF#}xGQ@v@15eWv@WS2{iplcv~*Kp4fVM$UApE{FYqv)DM}5;HyEkz^ z(iHBl()_5B$8eKjPmbzG^(lKYR6owp?8&)3x9JwoM{a+9v!2(~7-6dEUvDWlC~#;8 zocEloji=EqH#$*oKHy3qq=A}e8Q1cc_`Mv@zzuc`$6pqLJR$>M=CAQ^Ze>z)W87;h zfQtE6!q$gMSs&;I((wl6qJXU1oS!`nK;$Ipa2yTLD$_=<&! z9d90rRO}3w?^@!7-9TCbPfgOKc+9h=*QK^q1PLSqYC z>mTju@m-8+28J(qiLzR8;X&uwdio(2_dpQkd)SIu9ms=DfWn6S7)sv35uqb>iF;pX zx~g3?#nXlc6Mu~EYq+RIAAo%E_KzD>Dt1tDc0`7)gRTRMY52z~_fGaXYK=<)gZ{I}!t zZ|=Aie`D`_ac!iy_WOqUhLaKaUwm@W27u>NkImciBDTEg(OFwbSYPs<*5;ZplYR8x zcO(lnDynbpHPj+1e4Z!CG~DB&87*mQe`b)KMB`~SeYLrk+Pnt(`{u!9^*Z2q^*{#< zq=w2Z)hg{x9sOEMBmIw-OwC2!U&hn#v{VUzAA=pu;_#kDZ0cigb}()S@XIFWP;qBE zzk$~6%;arzreyMOiyDE}hHJAO! zg2{HJ=5ozbwezOzh$(w|%bcm?J>0nb++d%SuJvE-rvvi|C6R=ZaH)4Lp@)8JcV9)v zvu9^^L^8@}Q!4Is%*8dm9oP4L>4(0zd~*le=Q@tgcXUNMy259AA{{-m2YY8*`e@~z z4qN@}u9-?WG`X`80uZJz?zw#GRi2FtwiVdp*n(%;26rKT6$GFyAJ~A6c?Ji_KSRc+ zI6$1&OWX&h_(^H1WyUpUDh;bkmGv*sM$wh_J1DSzgS7@x_p8u3PZ}52sesjWu4HP* zRYBDX5Pp#-W#AsF==*6dtHjHA{>h%8fFyHFJX2r;i-aD;h*4E|5}~6;)cV`;qyaHa z+>x#LQW5}#aJ>OwI{@BR_j59weFI!50ch9_z8GAT7#86S?>q>KQXckjxXFXl4!ASX zc?O_KD68C<5QBJXi3fqu3Z8U6$ZGxV322yIvUs2h>kqWYbW03&4PlpKJ@tEfyv1UR zX8-`vVaOmCe15OOw-rnVniTSP5Fpmb)DqZ2JnU!AoEwfZR{9Gr8jL+^$7C1upB<5c ze<|Ss9jLz86oJ775D{na0Ur#QvtrmC@_T6SzfdB&Ap$N~0+*aVb>unAd{Rjysbn^( zEUb=SbR|u>=$?63Uc{9*>ngak|2#^^)ha21G z8;?dBkIrs87Ix-$hEEO8pBjz8|NPOgdK8T6$X>Sq2Y@iz6+}uS$zvn%5$S9DkEf%6 z0LJN$>byheeFQw|1T2ahV^EtLTI*|>1Iu&-o>bvI3p~11d{D#zV&h%eV*3XDeZv>X ze?m^2K&M-O*@732B-H>vD2%IkZf%bn+a8kbYho&?CC$xMY>LFopjwrOe$?p_rPUoG zFtWw6Rv9JItbLiOddYCp7~8(VV&KUCkW6rE3D0<7Ezx}k<5e2jjt4tQ(&~yh0n2z? zizhmgMK;mLb!-D!117*4)CAS+E}ac_c5y*%P>(ynbu=^3)=Jo4fU_Ano&F?0jXDk` zTO0%SLC3fedSdL)haRM5z*PhIx7NiWL2`~81X#C09xM(yRAM*4wruqV6<~A%y>;jW z-%8DGY5Z;KZmUW95Qg7CrXh^-1p%G7{otc;Re=00eXK24AnEk0Z52nfBYwE`G7wE) z2hWuv1+J0d=BbOt2?rucl7JK2B#`^aQ|o7N6q5u<^f5kKGAI+Tmln2{g&slxb-NXa zhG>CV)Z7gx5u^)n&yi7i&BC}%3cRkv1`QZs(@|Bg?@UxhJc9)8LW13aYfusZXV4P! z;}}Z@+2QJ1d2%1rw{T5{Btc~nMAEN;$R+MKYCBzSui+PHX~*Xh-ZMMr&0@qX&Y816 z)~x`9M*72!T;5Lqts}V+2EF>=T%cNC?gKS(jbU5^+&I1!Ipzo9!W>-1jw0?KoU${MtiPSQUI9tZu;!%WITAZ1z!j=G*x09&nOWCgGr z>|yyPq=AqKF?Iy*Zw^`H)L5x?bPJ$4h(Ks18{-Oc3$kmM94kCjv9hd;)5)4cZ-6~y zvz&(FEA(gKDZC{`Ud`&yriVJ6bUxqN08QY5q4tG@m{oyAxgo|wa@~xN(E-7Neet*k zpqczwJ}?63$*1{CFh{K3xK=KTl@Ds=lGV7TzcdzKd&g=?2{#jO;Mao(rYH&enLMt8 zna_ZGhy4WzmbJh{quD&DSI6$MPEkKhgs+x>x+AAmavV6~Ms5B3-iDFA22t4Y40?txJW zZ@?UAFJ6Iei^aCa{l%zpiW!&fkM3wJWyg%IU>{7)t%pOq+YYquZ*J{~8kM`PXgOhb zV1^UgF1QWU#hOsVIwdpknWDmh_JFFkvK%TaW(e=PMzxAciRyMM*fA;!IjS0gn|T^$ z(xWQqNI+Fu*%MHUJDZGA74#{p-u`H4MeslDV!0Une>efhg>sAvCLaGP4G*C#*nslTzu}8^ViSMCX~X-Zf2->=Eyg@?_~YYqHhp6q%is`ax^4zuaSILsO1#Qe;%+7tqecc@{uAj|r2xm1e zaUs5m7Z=6sR}9w;;li!6Vr@8c8xnEEW`g z-SDa*T)B6)U|%?Y|J}AFlg4rg@FktW2>6md`;+^ob%(uzA#EWce`4oCeCm^#EpX+{ zkoKt`@s00oX;kk@6z(P_?yAt-E$1PG=o?2)Q)5@W z8WXq6kA_rQ=GOGmZyhf(;JU!A7)Sr*cy%0RtfEnuNENL(Q7Jqw-1eS`3L&_-%&s59 z?gOr|z%6q>VT=wV3mpay4A!>ksMtsgt_d4lB^?4?zKTwxiypN$%F2CbuL4;QWBHp9 zxOHR!<9PN(nlShl$JN8>?aF1uF1Yu?7_$znyPDjfI9mn<$?*)1ko{2Gi%Y458cz4> z?3(y@^||lU3#Z+>G=R50xt~g3Ih`9q?CX9aXGxt)D+`;-;iz3>T)FhHV2hvVgYTMn zj9)k=g^zjxt+n*RXM?+E4DP*e;mj%bIE^O$;TlKSWwoPtWjQs2s&4Zufx+;{WFR+ zU+KKB+uPL*SA=A87MlEflv1=7aA($P*4RI=r_S38BKCr)`5@rb_f>+!IH6rK(#wNi z(tcU#SLV>OA*{=Acm~uc3qWb&0XsJc#lqMGPAY8a;Tlf*tSMvK4oFp4oy{r-)K#Sw zm2Uo1O*veLE_bUx6rnI-K^Df?@U>(Jn`A*9n#LC8+7*b>1Qc@f0YfA4c?L>lxd#%O zs?yECOGS2Kt)C(rCfz*VLC~?k8`wMq*+}ewb8|Ru`w2wh*luia0Lrwug$|{LsHGDbc@LJ)%kNHv11IWjQJ*uc>g9IrtubjwIrh@@Z=H#iKc zj9Qfg{Ma3B#Si%vW^cq`GX@P1MAc`8hX>g8V|H6sS)t7R_&k<*5`!;e@D&WcgTbp9 z{1}5-3=o+PF~pDIcnrE>sD|Pdp)0|ZAkzj>HU2>jlW-=lOz;Sb2E$pS*4^!0E0+|i zx+*Fk7At)%fa7qBt&Rks3jF8%{|~|?gNo<*Wi7`$=3%J(;9P{H(q3l66VTW#_$;dObB<))>iZyj!`#0-?^*3j8jL^-ChL-pMfIG?wq#&YBWHCinOM}!87)h!_L7y;8kcQIT|OgN`Azq- z9lZUcLQb&H@{W5#3h!E0ae{M6fQ#HjYv0dL}$caX2<rJwWY$qEf%4P>48{EcOJ8`nnesMe&a(pD&?0Y>7;B3Ba zm{-TE<5hLN_x^f);%R>R*QEHG&1UA{xxewt9e>)zalgVs^0CcjX74y(+*e0J_WgE& z6G+Ih->Igq{cbf)*`K1Op8Xy*P2HagsWz0>lfFNlaFEx9&cAy12xRqQc?T~TPH6X6 z2*rZ&1leC{;*x(tiC_||1T*|B@Ux2Lf(?Gy$12!Q==QG=e1Ze|tQ4Flxc${UCwiZo zlTj!YTqj7fw11UQCb(e??5h=0pl=QD;KYJt12L~V#O~ImjCh7E94Dj-X+kIq8RWMP5nuI9Tw4n>YnBoZ ztI}*`k7)L?k2LGqU(BiOn*A>FI?Hf8tUVGAhuBvfFJ*nQs#P(+Sq#WB z^#_gydpp@RQ!eXv7L!u;dFK{S0gS5{gHpR+3Iv0685DEaZ=Lo0pLka8dV(~uS6uZ4 zOJxTNfR8{T+>@NZ%dSM3<09O1gl#yI<%Vj_Q@ZC!g7F}cQ`swSA4zBLx(i4qn?91m z%2L|do$a~ot0^^eH6N$=NGkJtGT8>t5|Vn^?|DSS-z4m2`ZKPZJPbvV_?(I+*ekOq zGnN??o!s9S64^{fxy=MT#W3iny|^@Y=K2*1Ij->1uw(>|ckZF;DlLI7F>tg!*wZNu z(=EU*ho3agMl)M?ykk!vD;{4uo*pZy{hn~8=DSCy>UK`l?Tppl6X!@95eOuiBAMlg zqXYA%4szSb87(Jk9vVK^bls30=ZWJ0IjMQe;=a5!%R-u8DO~Oj4X+qc|;WgWToWH8C8t4S}vlO)(Msf|0IWt6|8Cw z&!O!pL)6TEmYbPwPA({!K1d>33PI%Xnz`kB^5oldOHlu|{=5q1E{*mE~k^>v+3ThACw_^Fq z=s~IUpip+O0*G&zud8hv&RWd&0l#EXgWB50VZNYtnBP#VkUE8I=tuq9vw3Grb4!!= zmtPux@5Qs;VYfFZd6TDRx%WuF>>W?lzN4LrtHOo!2p^lkGjCB>6i<32f`UK7v{% zRMIw*(8)Pz>0W4b2!2up7?Dx#b{*$%PucS)?D^w{Nqfm?(~R45q4a!dEO*U|C6n&@ z>+Zc*3$O0Iy6vhX*0gtY`&%~GXyY4c1t-ljX&D!~&UeM~H@w(Anbtg&);f{aI+?cf zl;tFU5{5~gau-dwi$=zqCf&t zb1D{Bji+C1x?WKI|5a6=-ZyTzSbW{PV!l=NXVt37xvF?BmtOdOYaVBHPv?|fY`A#G zrIJfovGqHy3Ric>+786-IT$-A#j@m+j@K=Y8TZm>J1(|QE~}k%*Tt-LuUp(RR`=PK zQ!Qf~Z&=HwEw0fWv>0}dwMNs}qZLl(d(^{DN7FAyi&ycvH_@8-h+>d~Jz}_DzIheO ziZmFDZaq1hP6?k{~|*(P`+7dROz5)VBi zYw(j!0~zH$uyaQH;}tjDg)_GF zDVuk~=6yEvRa@Dc*248{Xyfe5W45xmo-1)~BOe-|_`aR9 zx;}J3E{?fRBrC}CkDJrBcr-72bX#&+Uqw27Yk`tx?r&09c@gQsrMR%n* z(6HPyB*|h=V;IeX#@{B%4+fp41ym277j{pii~(feIlHk!4A}9I{+yF%GdxSnj)s| zY{bv3yTNQQBBJfiWuzt5VNQ+_HJdou#hTl5KIMqd)M?>r&|Qh0>#Zlah_Qi7T9Tyr zQES9X-;40{cXHL73>^K9T%F8EjPosRj2z8d6VW0l5bP0~;6Q*e3(jzt)DC7SKnHe#%eQT=A@`h68^wKcM6>%%z zs11Ui?&Apjog-%s9@p&S21yYoJDDRT$Fqc^e${ST1bxUAwL~o4OBrcMF`+|HA3PM+ z1rP+*z~7z49t#??2-oX{fdG0X9vI657|GaO#@@aoW$Qdj#OZsrgIr?jeJIAyWoNQi z_wrPK0shnjUiRNNWwF$1ehkhRjOQx$ zhfO=%rfsgX`%djUb6{-qOx@G0&D(VrJr&?~L%S2#LEns}o#~C-W_fXOqp^NPdJKiaXM3eoY&g&-*9@Totab4;t6N*^V;X{nJTZJD6hY=ZL<6`lO?TF zCA%g{cD?G{{n1QH-Yt&U@~531L<3`+zhIARp#0}|5Ihu4I5G=?Z_1vwI#n6T*%siN zvj2G7%MRCbx?J+ zSa;_N!_`$O+wHoa7}sypYJaLBkpHPRYg>Wgr+GZ)y#(_G8q60L0lj9!HrHIH?RA=K zSy|iHYObx-K|a7;{&D>agj~Daztu=sMsua9!rRuRx>t6%F_py{pQ|VZ2?KNJ53(Ew zCYeJEK8tAb39=om+69Or9I6C!N$_@RIt73QcmQYN?z?Ldc!aa~1#*E9?#v1;H^jZ5 z86-Z^0@^$e=D>ZUW-DYsu?@7#VH)Tf2;!AQLg(rAj;e?Q^nA9-x2XYUkziWxqu+8L zjdC%&IB~adb@Lf%4z}e^Ix4?eJyqQ>QQdIm?#b$=R~=2z;$sON^FuXjE;rukB1L}> z1&YCh1h{-0b4!3Ix;_fGC7+J|GaGJRUG{CLlW_5P?l&dmIE#&uRH7(x$bcNP9yZhb zbPEbIX%}x;#2KqA=Bc`rd&v>ocrbQIh#eFGD0YzQ>FUNS$0n+qV=K4CIet4-#Wk9g z4)Ot~v2{Y8vqB9ZrDe4E^3`n*k%Z+!yF;sxtU!Vrqv)mnj*j4QMHdPWiWIC8>ac1X zKzRKOhN4%X1!xKJ1fIbB4j_s~>OZnzjNoOeQ#T>Y7N)_@V6Q*aj@WeWbreAG< zK^FQG=(rn2c#(alwcMBnT0FWvp32^9y{|;$5=q>s)~F!tQ)mkjD(=hd7|+`7=``hH z+-rc)YUB)PY5`<8%;PYCA*WBh-RfS0mmW=G>L)g%YPo zzi^5aqeBP}_J+cKArW9W#tQbPW#e{0L_v+x;VY&$prQ-k-b%J}udfE~!RNM)dnPrOey((?bp1r>`WvOS>V@ESlk1#tq5FI{3r(e!Po$N{Dh}L8JIH>tw<~qm7eg1@ zCh}HXYM97dGnuvaO4|+RHg@m6He>n4l`&&EEoLu)djVvR@eW9E=H%g1&ppXQW8w0;5I3mQTD{QSKeLTB$*|pTt--1-VypR$#@>~RBr=X_)6exGW{otvj*MX%Dxmi4LknIq1 z$Um@dVO>TLmcEHT0#qR-!v=4C>n>@zm!4tY6P9RN=mYGR!U~ox=CAoJG@+?T5X#UQ z?4g&`OS1Y#q0j}8E(9^O9+`@wqVJQi%(}(Ok-v&u7@89|RQfKCVhw~uKW&HjO<%vf zpo?%xNBc;Bj|68|G@SqrHPqij|A0kpzl2fe4wy`}+UjT$?5OI9&>1D~;n~SU4R)0N z1sXhohZ+tJ9BQ_qV;z}cgB=g0O!TIPeGQl~nLDve|S9_~RB1SqD2&GYy4QMizC-+-S~#r}L>r@^L6ebcz) zb#^r3GobVO=yoc5GO`51wpQnD7iY`9ky9D7RNdkTLGooFVD+?pe!1&YffKl! zwF{yYfK3)@E-d`lLzVn{gpEB^n^Fw* zpo!T0F(AYIaxc62P16+yMn$b;X*BDsb#6 zDBF*4pa;}uZBU1~HV6g6ZQ_TJg}}j3PpHjD8hw0QqM4?pt;Sw+6X(%fOJos>BpTpYV`{8>f#q&phfqY$We^1{KeGBt8xJkETSfQW7 zYf9W~ZevS764e@-A)4A33S$r;5s7sz3UODUH<2i>=h)IS{t-p}Dy)9lRlj;BFdu$t zNrb_}m>WidI|km!kDrzj&g(2zUqSK{BsY+}h6Is?>L65ur7EtX6(F#n9!>SMsmF%a zuEZ@G#pbA&{VUG=2Iqc*%YK8i zy}>!&;H+II67_`_jU50$k^ z0S`xUSqpFN%pcaXv0QVanJHZ1EZdT3k@wa_o4jvGY>@Z%L_6+*?8aP2qC=JqW;=6T ziMPJ@Ts5~Vv5P5YV0mOkN$fWJ7e;xgSK;AVl{fInZI;->FPM9I^c}{Vczxqq<~^u$fKT&2-j7lT`6j;kwvre# zjU|Q8@ah2Ha+@u&#MAtFS!wVcm2czQQEA*9SW;F8w1W>}P&$wECuI3Ud?z19Lx=eb zd^aCO(?|Fv*}@*Qu$S*c3rG2{@Q?BR$UDYgg<$zS4! z`4J>d@XP!tKL+j;Kh94e?=(NjCvZPw_RlhYil0W_B)?)do6mGB{0wGgl0Sp4&YI84 zSvkv}#jKpG&CxkNg{LWV_wspueuX72a5k;PU;cm=bSloga4uxpDW0-Z?lbB(*5MY_ ziRS4{u9!8aQaRHqrqZT+Urls(Wvt{}#-1--OQzCxrjSpHQa+!_&$&@;yjpG%=8aND zm^m|VTO;;O+f&jibyE8UaS!1-fy-J3ahT}ijH|a5TUllfTVV_8No9#u)O?GrIpCJ{ z6=gZFqEys5rJ^~=y)s+T9ep{7G~C6L;6q$nVHPJgOf1*C6~BPpupHq5M_*AZ0VjaM z+H#{Ebpkxtt$>EQ6>-PEQ~A8g!@S{5b%inI2FhZRBB`hG1`z|l3n^il?lGh1dS)(P z5O`}No5`D&Vb7;*!?IJtHeSKx7(2G^Fj9Gr>y}}gH*F&|Yny_SM zo5`BVVhR<9E`Y|z&=Vf)0va~m!&^E|jI5T$N?cFPnZ}T1i(#Wsq%}xosmd*TzK}QS z`KVm7Oe2%GY%|4;!YsAZH*91^%#nJbLpw@YTconL>`dC);a=_Qa=#r8zI-y1wqKIX zgIu|Swx7*dHkv>^OaM35FS3!c42}vk43WWH$O*Wbhtyn^b8yuvn(54J#uQhL>*g)X z{bqBQn>)0{)wgU6$Ohd_TRN+fORjHusP-z4jxxSF6IZWZm3*2(j_N-WHXYYAE~^V< zPH~hgEfvNgYKu(h4tqU>1FI-A_yZ?$Ug9FtE zY^$KLmW`p8QoG8>b|pr49o&_?vNL|!dSd9M@xud$UcIt&WrNPT)D~4|Vf@8d8;dfX4Xfe+N<0XRj^Gb!Dkx05B=R3wak5lWrCbA3 zHf+UawrXq3I%kfuqH>jMZ>ZI7Wl3FPmmnLuVh3(rW=e%Ir95Z{p+9s-c^jt5VQ(vU zb=v-ylou7QR@keI2d8V?s4@x;!V^k;0!t*t??fTLXkz214BRZN0u+H%4cLJx!!ALE z($>l3tF}3Z64-)uaRfB1^fPmN8YHW zJa+8GYV6sWE|zw_ZW|5a&V7y@hosY>WFqyCNQNu~uKZ+!Y_jph$*jL_=tk{xSBrCb#AqiHE&_d@)bUrHY=G$=hNNdh|lGnLJNr zvtGl*&9qsxpU0950z+Qtm6U8Zu6m)`OL@$Ed>=IVnyyyh5-M5}Xf>+LfaG^DaT00n zMeS+!m+mLEH{2tK!|vtIusfiCYDjFRj#&EfMjOWG%)EKCC=S1Z0%EJH1@`V=Rb>7K z48wG%taK)mp*~4TDve=HJd67W28h^3WV?Gk(9f>9zZMv+CTwgUJ75QA)g@J&vIAs~ zD@y*jOb;O~>nO|N6-8XLp=OuZ9LopohFYmUJ3`wJyGpeiDQzJNUb`DuVy`p1iBcTx zx-P!Yb(m(22d-=4ckCDs$~qw@fZ}&`S-5$*#oZJf3*TX)&8CN!g9FWDXd}49P)|kr znET0KcXQ&Aw8Z`Qf>$)Q&25i-<~YqpT%*;z>FH2$^PZkIGg+|-=}@qN*=(U8ydcRP zuPqwsEwKgU3&d=7A4G1nY$uk~bG%g?#?AeBW8bzZFr@lWd7Sbm>A4|e)x_P?&BavK z{c_{3DhY}>1`-c>flO}BDho1IUMQEzCwV3ZCUuh-agma>oHggEC?wbNIjT&9@*1Vp zNy-MwNU(-Ml$Jg%BGqSvMOdGzvW|y(FsiB#RXxnwSr3a;)piu=)1FlYM+I>lok$f`0(Y3TeKo5^RyryxG2|Rw&1sc637MlKC$XB- zhVI{&>5BD!Ewuq9-pBl?Z!1m+R~S>ifu1;a8fd!{z;@adsWdqn#`0B~PN#{c@pa~) zdGWpk;7w%u986?abBB zh)d*a0#Wl}19m+HfGx`zs8fQKospWM)cnzA%iDd>UBxS9EZGj+ByIq>8To=OaXO5O z$)`?9$oGH^ms^KkdKn1u$`kRSmoD=W;KZ_e=ulZ5I22dQ>fuAOU|AhM^ne|Hz{bmM z>sua{O_#eUt+u(dn8L&G2V_4+JSMG{R7PGXWAP09ItePJR`A*{_@2g8!9GydiwvOrsyq+{tVuZL+B4ipR?DK+iqEISYXx^l>St6uM#2k@%B2tYIUh~vK(eDk& z&LV70z}aKMV~cWNkK4H>n(p-Vxv9DjxA7 z=|Y|mb~ba(3tOck0Uk>nrGgTTLt3na``*S&jqA*@+tsmYBZH>&O@AbcjytI1*W9xm z{ap@f+lshpvn8VHvStI4erm@3)sBuC=uSE6Pr!441Hh2u-Ol?Qbo+$9aV zHbfe6?K$7FN3rsw3X^{?g7Pr7dV~AJj;{wq7TvmE?95I#3R35yhX`30@o^$Yi7XO% zoycoMN<^ehy9F+;$}BIK1q6jHT>r+IuFad<=#AS^Pzgow#!)pW7b@~l*GHJR?Y`SJ z#Dx1NUDJaLVMpii(ce@>-VQA^IJ%?q(3>iB6X3T$==(>yTiM>#KF~RVBXJhw9`p2B ztcyQ_0ZQHiGKT(P#$WH5oSMP12ISraTGL7*iwf|gvz^N^etF3m(b) zfsi1m!nHTa)78sAalhO91p<=QOY9pY$_SF; zN096=_DSAR=722_K?1sjiN9S5IU$(+FhWHQKq3)N#$Sbz_YHp-e1p@#n-C;w;4uV= z-t+GS2M{D`RFz7k(pc`YW5A=;7ALa8oW>>9`tCEz>u@n|ECRx`+RY1Xv+AwC_qUr9 zT;AY>Xp6px{83z7UvB3VzEqqb4H@t!>O}9TVvD1KZn8H@+Ceb4(qwlocR4yXdMj^p znt(y{b~5a`6RhI4;ck$cF5j`cm%9-ei`hL(03Fu+GqC*T>@IuoqMOx=yT1sWh*4*X1Rkq ze5>r4JOW=z<>LT7o&kAHcISR|aDttE_osubaq1Kh0STq}w9B@4R%eJM;q*e}d1tYo zew~sih#`FfX)7sH`s-^((>J72nbHpMO=9er1;|Zt9cg++o^mK+;^_;8OkRZO`3jMm z0*2D|bTU<<+x^k@?QKUX?-&SNmHotj)*ao^Tb0^LM|i*z7E%)tktC-F(H`yxZsc~B zcAIpm$MPOSWQf~>OtwEUzeOBPjMwJx;q^hFikYXIl_Xf9>YTi zwEq^E>Mqvies9NMquxU9iK?sOz8f6+Hv7)~?+t}nKt@R2;%K`&KinIsg-&M>6778A z531YI{zn2qH2E||QlTR77pt&;2=sqldjYoYhRPB9iL)vB4t0BBqxe0RXj)9#1(BX# z%uozxO-ad>{}*!QQLsGdzph=lKeuz3ZT)SeiRXz(n>b3`Gaz1|h!~I^*TvV}_lBSH zUGeuQmnKcT3-TG7C5lig(XiI04YEO%w0{StCB}jegAc{K?&e*8H!p>1l4?<`!1phu zj0LUI>}=+y7swV~F@-NeBo6U_pd}W+Pvjbi$7ZEfd`zb6>ep}Pciq=UI-9;p^;N(A zy!*wGzh?>ebGtj)H}1c;`&ZbJFH(~P^*qfgU8~C-s%W(;$VxOJGCr3}jUFs!lng4SZt5 z>B*Th&mT`7o1UJSnNCj6Oq`qc8Yxhk^k<9EgKTd7>rpE`zI@0$`Tj{70ydbL`wLp| zAPOn!CKgl0e*m}6yt!}hX=eM~uk0DDo+QT^!Rq}seBvcWrytfyL|TG42?=o=PKYz~ z@{TG#znTgaOHUQO=GH1}b;T!rl!zz{!YR<#h~tp%Cyfsq8C_@syaGxDzyfA;l8w;lb&ZAO2uTUX6cgp5gu6%Wd?B;)6&vSZ%1zG$Q*I**Y=)lEQjVqF+H|> zcRb+!bngz;PP<+EHnm7}B~@CWlrLwB?%91qi1^t1o~~{XB+kC#m7WTToWJ5yEx%8X zyNUD?scBJggrcM%h);p@^to)|nrI|8h~OQCNm@@4SX}QJNlgraTHShHHOvdIO^0DFr8pAIr;=XYc`a_bH-^H{8)IQtI*ZP*#!J&4lgYCKK_yP#|;ZkeBe$dg3FI2i0s*TpuseYoB9uXuIGQba* z!4rroLpjxF?D1u-8EG^wi&5tfeQgv$uZ`r2iSOUP_OTFa|4SOn(;%ecxP7Z=ifwLX z|4aR08lPIQ0LJo*837x)k&3nx>2m+~{v%^YiIuzP7&tg9It9enp@vmBRX07EoHbu~ zrA2k~PZ`z!hDwhJ8SOY~P6`=ZKk3tCAz<)rv@FZ16{ss)gO7aD)_L7u9JK?>P-PO% zhGbrN0YKl^U!>l|U-L#B9IMNb6=k8Z60pM*>szU^%UUH!fj-`ZCV=yy$YKtTj(Zi} z4E)>T1f6h~vicmwb3L#1E`oqijm>ro9Q8r?)r_}gTXSud2)c|cZ&(5TfG4ijJ@Xqs zfG7TE>pk&7;NT5#I^f*H^KBNY`aIEVC z%+FErHs}L3R1O&@E7(j30FFijtmP?L{~YiCU#5;!oRf~-LlQq-{fM`JxzbqF{y%EUt? zr76AsBLO8mCB@}SYvMv{;fo8)X%$Q>p7pGr3y}Eu_H!Y`1HICAE@7RdwS)-q@Y_24 z!A2@;CmgKM~ zatx1-+NvXj8{f6N$&(bHm-ABXwR@I(dEW}nL%-xVNq)2B4apBkev9O{N`6rC+a$kT z^1-_@)+`a+j`F1w1w=p7I7Y=Opv2Bl|5+THK%M*I3B8Q4TdtZSj|dOs5Yi#u`RU=> zw-3X{#bf7&X$uU?W==eR^4N%s$WB2SHBL;uFnRI()VYbN88700#zHz2dM=81b`s~A zN!i8!fDDPxgLvxs>9RJycNBGfm50w-ssMBLc;NKG;;Cz zsi}#J$@A0IF~e(EeJZy{YEX6J1-w^s>e#u7F(c9Xk;HOaz2K$u7tfx0_WUKZ(_AZ7 zPbxRp3mrQ#bN(V4iq?TJJ2`@G{5*e;z)zYeZQu4yf9p8n*>ArW6JFzLJk>D_ewnF zMd{FU5eLGlYw$&mQ?B&J3E22?s}gaOMB+s9bf7FK9#>P2kPR5Tc-YrzYh$BfCBjf_ zzSNK_EhFBEAiB(%k3>I_=WR&)5}x$MS=Ks4%8kOViWUZ#i>mPaSXv^yhbmJa>Z%Up zr~X3M^q;F*$Io?L|Gz3OJ)r*#NbqNRP=BBXyB-7|>X8^6$0&}FDfAs-Ti{GmAUupV z!l)lc-7xBfe+I|0jots@*lVoKnDl{`ge~%$0{6A?tM1_OjXI&R`wzw~H7ND@ylWlV z*+BCu@Qt71e)-73O)_JhNl)d9NrV&ASsc*3s=5!4e8_(9{)b2B*a3XKf(W@60j9wL z62648gsl7$wMQP2A4DNrFDQ=eN13-U_s;S5>MQ#Qshaj-36^22EwF`=qK z*gA?x1tkP&O&*4I`ZxtMi&IrGbpOl=o$=41{*-tJREC@|tVeCAxJOz4jL26&paH%| zyqvyua|lI}UyW<4z3osr`0|uxbD|{Vs)P66h+4AyDs@T)7LN*1kTH+2V)da6DveA zAnsRAPd*_V+>Oc@!pI|S$l%G>Wn~mD_q)~88{OVOgFAlaFf z_;*BJCh~8ItVxE9wo%k2-XM0me|)Crs6hp!1IUs97WgK{(`KO#&QdxJ4e^YfG%7i? z0VF^WDL9i%;Ul~+sqVH(gB@~DP1?avNFT)g{mH*L`gy92S_l$^*R7p&BS$IkFg4V6 zLdMT8hyuQ`v3$||Q_6kW{mwHzRxos>kUS)*6(Yd>Oyh^=j_h)G98tDo&tUS>HFZXD=4# zAd(Q0I)_^==t&&CCFGSdeR3S!DgPJ}l@=+RRmB_fE`F14n)p53WDvy{cZTy6@hv}# zBxHt;1-?T}oSllRGkz&;YUWh@Ie%yTF*UG{Ug|t?e?=rlj}%@KKOrKw%$LF8Q;JKu zVnNU$oB!DxK0&kR>4}8M3&KqS6cA)d#otg$0AF4dZ;Agx2@Vly5+XQ~DP{|@89L4p zP0T%gE>#uX^dL<~FE}sepGeeC8GXz0Csan-j&b5nQbGM%p^%kd(D9akXCiDw8rwER!jZm6Alw>hiS&nu!eh}; LdwZ-S*7*MbjzY@^ delta 7386 zcmZ`;3vgW3dA{e~yHBm8)p{-KWqBpbSnG#u3*)CGY;4Qe81b+HTgZ`Dd#;kDW+r7I328Dz%AF}QO-W}++96FleYgbr z{pZTE?4-N1|2_9V|M}13f1mx)x7q7g*v3?CZCJr)JaCPF{E(vj6TK||1o5&DKQn%L zW4?xE>xS#N!dbTd@|RaPvh2#?m3Y;&jk#6BtC)heFMIjN4^)_9Z07zK7!UB^1%0@O zAK(#Qdw~sa;RlVa^1IomKg3t^M$q)~RlMnfGJLy{m{a&5Z{e*M z*c=<)&JSbcX1@9bmACUX7;}fwF{c>qXLs;KtHL{YCto|R^DbUlCkO20xAFC0w2R-( zH}H*My_+AAEH;6~X5Ir9d-zelg>OaQUVabn<+r2N$B*&t{0_AG`MrDx--*@$-^F)> z*PTZDIOBWxUf}leA*0sV->mRHY_*>cfZtul0q*a@>z#Zbc7Aa29QO0O@OFr^<4W?r z59x%5oR`&gEbjbJJ>+z2C!H^9dqYX)`h{VYgfqXl)%m2RgT3b@B2VdKg+kVODe{Hm zuG+KJW$CMmXa|dHy$AP<95jsLF(W1Nnf%1aOd(sCx?`l2wL~gAV`b9jNXg1%&5`t^ zkv?bk6ldH}--MAjri)@=16cgK;$&-kPY^fBQmjGC4Q9;Il$p+C+VE!T07^|z6YKE& zFhPJ3Pyy0Dfa{+C8l*59e&!zMm$h4s?H02Y%V+t=)j3rRT0UEuV`U}ZW(7EUo>#bP z1qpXHgtm6YH^-i2R+uohc1{iI}sdzPY~lCS32r;ri1WTkrg7>^au%46JC}c~up(TbE5= zRbAhBwonkR-?T)A8_P~6+?UOmR=SWY4m<#A(cx59oR4->lg=br=W3a}^|^RkLz0;3 zMD6;lQZZ{d`{SD{bUbe0F;85{fa}ZTCd^r}Ribm5{3y@l+(0fhO%1Vy&|1!%a8(dV zA@u;|38_ZPbW}3f46NA+phVlWFblFe7FOHz2-DRdi?bHI#kC-7t7ykmaR6l3d|P|) zy%wA@gqbPi6N5>=8%URgF!Gig6vp{dMi|`H^QoNS>XRwK-2iCE3Z@~N!Qdvr*8oAn zC(dRg4#HBZknTJrZK+ial4kR2F~QY}%^)~+f|Y$-EBkHVLFM#|Q znEB!oIt27LB(ZLOV+k7uc7wz!=7vSA{}MA}yvA0aS7;MFB3q=vwO0b=u&qJWRLgG# z?C>1C)sxKDN%^c`HDrh98B=WkaYaworMjKQRa4HZt45s0rrpk|rjyP$oBEw+Beywc zn)hmJ6KUtw<^oGQiIy(|zS`XCe9$ts*cxjsjmXCfwR$N|?szqW-w5Fx`bFy+%^9Ydnpm`6X*|-o)p~ZbxbV}M!0ZK3 z)`VG;s>62TEX<L{5>DtrkHSCOny%(ETHfJqFc~;sN;2~ zp)-3jB0^Y^SViCrffRu;0*?|nOMuod?jL(fL8=n%1kJ zF{1!VAdIshfy7x%?O`J8)UEAid1vR^CZ^|;(lGO+N< z+6LCUBmhiGF+g+yTxG644nf{<{$d-@->3c#=ij=HB$KMEl|&X~87Ox$EGQsKFh!B@avgbUNkt8Nia?9_rsBN6u6cDB zeqBt1;F|CDo?Sg#v4Q}CMkZIviDu^?YvSwt=#*YKj)tXJtg6l{wp0MFUDP2TefBHP zk=tIZ3>ET5IDyY>;QkqF5+UFOwoj%fEib8wVYJ;~5ds)5h@4wjlw{YHG0DU%5icE{ zx9EtsXpiS`BhRAV*RY^Q)I*MR|iA z;jwdioT3%NEZ~YOI5DQoUb0%Nt@BE`)@qwm5aC`qs_?kXt1#MHeYTyiv}@!j4T1ZB z&Kvovi)wYvJbVXyi5zU#HYw%0rRkdNI^O)e&RgsV2DaWbd7B+@o=mPil(?kMoMB42 z9!u!N$PT&Cgk6tG7Cp}?2(n!Ncs=icvC7s~Wvi0c<$EI(xpU!X$=&*1(!WK;bwg&+ zNN2_~h7g;8h6x9Z>2$WVJcW~196*i&D}JD7W8=^@w&lZ5@tM_oCNh?I90LV&zSeWs zTB+K^=oTdx3E-MN7A;#2`N>^~^LinJ z0|*O+EW(hWH@#3a^3oByF)y+%nF=gLbP|ovEM_wn4TMpq5pNes-1tE_8j%?*RVcn( zkF#zllPea4j6=0vb2KL-G15B3Cu!_%%@Ii?gR<*S6|oHN)<5i}g$D}x@oXk-x%$~c zCNIdlh`j`s{IsiEnVcaSX?QOgx(=IL$RbVldaP@SKBc3BXj%TKY z^oMNR)sbA7E@Ep~jMApCATB;nEa{kCmUo%us@!tfVq~AAKIt%H^0*I-E}!$C(QA?? zQ(7oSP}oKu47dwFzl`^dOod;K0tOM>>kvnx&d;}X)o4*lomE{GFF7r}KVWY!{Jb~F zoYOnDgnAMuCZ+duwr&5bN;kNtp$aUJ8VZk;r9JSJ`mw)aY^*l4?mCmU?)Q!E=g^h4dY}+1lp`yPbdxu9vB` z7Qpot1+u54E?#xs*s-%$viK_XNX3@tP7<>5J?8Ipijj`XVcFA ztVrc0RTMi2>?A;YMWIw0hY(#~w(yuCpr|A`sVPaHl&tu3qTNs6MFJ$tTdc}m3>_Y; zduTazpi;05ND4BGHDEpBb)axMq%Pv9^UALOZg%}%**ohm^~|Jl*=s)e5DN=$>^{jV zpCiiq09?&1jor{(L^zKLx`t~DA~TW6r?R6I%x_&;qi4t7G$lzwbA7nK)_eyZ!@Th7aUt6A82tFP<$4k~%4nAl_KRNzvrBlg%3zO9Nsu$8KRUa1DS zii8DkW!)=`Q9|9Ie!>b>!(6j!Y!tubmYw_hS5|!7e-RlPakA>p)>+J|=Ydvu%~s%A zF%u8*@I@7cw9ovmt%>hiQ4}&13?f%Fa`yQytHDO;&1+F|{{j@h07ados9#oUOByP3 zY#)y{W3~zWc!R?R?o^YybI-u~%`(AH%tjK~LK>C8Od@5WwA8%(Mw+;a%CPfoLGY%$@NHxvZrBh9bH_UU)2@Eu^?tWl;8onjexkZx#LZBn*9%ByLh% zMWvK8$eJMJ7=A&#wLt}RwH@sk)F95pcR+Vl6<^20Vf&sqjC|<`+IUdUb>)+0LHPe? zUef0$XkI!BMHO!@ymeQA#owk8BLKKD@^NdXXo&UBPY*o0nyPS=LnZ=Wekvmna8?j$ zjKE6g^uas#j8Rh_oOIEut5dxs{*2JlEiAd0>js(0nUjUbo?_0s2NRVagDx!>g9d%* zkVE*%tn(Tgve-~^tc*9c+7FL8O@o8Z=wKV0ckIDEO&J`JoBy*u{dEtNAg zBt?KE>jsW^*}V7%dW)p9xEdTSl&oUOx?wJ;RgTh#_X*JK!`ehLrsd zf5tJH=c+~Vg+M>|2JIQ#Ez3A1j5%?K0!sw`Z+_qSL~78*y!6V{}W zkiOJ(`tCcU6WV$IMDM~6?|WGF%heopdLG;u-YyrBF7cEz_TcVz*|SV9Q@P?Oyh}QZ z+o(60^YsUR$zEG{`S=7oCjO8{hLJ(RKU+v*yglv)R!~ItlCv9rLl&Z+(3pDUI(Z{a z36yt1(eIpjC|)^EeRcayTzf5b;i}AsM9RpDUlGgyA@C)lt692wFee0Y^^WXgYRL>G z9IP$U4cd z78F=PRDd6{>=D5P;Ya%uF7_-1$GXb9l<}*Gk?N;@J%(po6`js|r+e6Cr}i^nsC3enmt?<8&K^hl?_nyH z$)?7#MjRZy(B~IxF$@XpZM4eu1iA^(PLX3Cq_zZy&>9?s29Rmvk@Fri^w7j>xLH)h z0$x1BVJCnjO*%ohAAd`YBpXbsC%kTL=)_=h&@-;Tp+OC__6^h`FD*VxfO1Rm69Ure zUO>wYkh$Rka4|hbgvQ@9f~-jFe}@Qs`BJVpBmR!swJ3Q|w3vmGfHb@!-~%E}6Og_z zjEkaTwm`KU?vu71Tc!FG=YL026<7W_B>$R`7yDicMFo#{wU(vUG=VYzB1ejD_`N`L zk-4_8SKCjdqfCLqD9Dv;0 ztzTT+$tvSIKBAuV)is2#M_#6UOzx1r`p1#I;f7!g-f(rSR%b1D2iw9mv4&W7%Q6rR~#uQ%geJ8ohy4k3fnl2{E%X#`bJQYzA_Tv}B^rBV(oj%VzwwDy|W zaf%~H&7p_(ltgpDQZHOULKO&vxKteAz=;D!1bG}lZBh(-)Q-L%#$8K>w-T_fgA zV-1n?O|X_BmPDJ3NOZceg8o)wm%sk%I5dAiSep0^{tR{TskT~LT-XSjbm97kjRit! zyG8jzH}Hea*G>tjFXnS{r6WgpLBbArlKtj_&b1r5{M#aE8ejD}tNLB;)k1u!<8$xT zi&w5<$$2fW)xexK+qmxe6wihmLB}U}o#JYT2hFNnQuX~VCe){#5?rlw%5W#76%=ez z#ymnpyn?r0LCC9P*KJjs)OGR73NE|iVd_V?B0e*xON@rHtjbJ^QZ}FX*$NUvQ$R!b z=}T}%+)GcEv$E+Z-=HnZs+_unv|ZI2o0I$ta)i&sv-I2NVnZ8q9y$5FnUCl8=k7aG z0~^Oce;LIl$mDMo_lo`F{pVtk!nqiM(eYdL7-$6;7wnv|bMZ+@Y+6%ShPme-92?)e zCQj~6_G^P&dFbSa`Qih6qECOZr-!-mf1ETj<19egp=Ix)+i361uhzuVykd_5dvtf{ z_R_!@e>#>B;*oV)i-0JO_;2*!T`1d?wyksfC<1`V=h1@1*fzMjw8ZZI@3&KYRD5Yn zPNjMVF?-++An9J}4(RC(YuoIZJ-x2I3o4fQ7o8LDXWoNIe4Cm3AUPS9k|In=Mtax{ zBSb%S|+ag}b-CC1GMmu0U(q$N?<&1iP(pXmP z+NH52$()fR+>o#X{xrrOitp@_^0XN`hj4Tzs_8`bbHJ45QBiBT8IX=o&$3m?sXGeQ&SD)xcxVEMP9Fd# m`OpE#{RJj}2j$otf#py1Lm=Jb;sh)_UPiERSkhrJQT`9@-xK5j delta 1406 zcmZ8gO-vg{6yEW!z3cVQn%E`=ig5`AmV#Y_5I+I(LrSPZX{)NLm+jMq- zgqXr12dJt7+DJi4544w3(MpYaN!6-NPmNTm0fB_IArewmsZ}p1q)LySwQ-30wDY~U z-@JJ<@4cC?Ywr1-zd9TuL*>Jv&l7dxveO49mz#nY83+_}#0(i|QY;xh!UG1fkW++= zHDWE=W<|`{A~tGUkUe37{0%PRfWi$X;)GU;5)>)At^wKhm$AX940P_z7#X{BvjKn~ zv>(9(z-gbD2CLdbW4R0pO?{9IK}1lT5FCqNNoAAT-=>ZV7hNmSp}tCCj(KALHpCK> zm!p5v_S_`GI0ca!=nqUc1~ivt2efN{T24qbMkJuX<3gLJ(kx~$DqkJ_T>VY;SoISaB%>OA_J^#hXXCqs6;SGQLJ_9<$g6Nv} zE%{a&Rx0-yb3ob`8JA~1LFc@leFrZcpF97=rQH|m-dG#n4TZmHTED)Qx!bYPdaBS8 zECf4utyV%#h!(p4bJmaDiT_k zwcWG_v@2Gnn>gq(i$a`bD{WNke*!@jW!6E7PP?@`R`;nxpE5hc5^s4~y0UB?X~;hF zytd)LFQ;r8yC25e-n&5Q2>x3(v?&g!g6FlH%9t!t{z#c*;` z=P^=q$|TZF80maGml>ybM>l6;BtE8F(AAVmR9!$>s7|HGSn*O}3ypz|{^%mQisBO_ zc3DBPpo>#EtiY(6nnqYQd_o4a8jf}~6icA!c#IH)v$_Cj+-N)lb#6R|iOy0}6{ut2 zP_U#aw(8kIN`>iAf1)tQ{LZ-xwquI}9}VmZ&PB(9V=1{)*|=HRxGgm8+MFMgn}fH8 zmxov0TE4I)1$Lz7O{sZHYT2^AJa=x_?Y-5z+`HlL+j95MjTG#@9lL+i?q4C>_J*%K zYjC}KNABH}d$;Ajd!Aovf3Ceh;n+U&&aM<#J-#K$^PKj*ZDY_V>Zr`(MyhpFPSq_K zW$5FTO{J8LY*iD5KmZqCrpT;=T3^p62jpT5AEIL6_idt-i&Ze@Y*pE_sjG`5H`jWxMHl=e2eVH7b zP|yG#rQ8MM5vlzY_E`YHfrSC$L&o}$aTGslOOF`eA585drheZdfcjPTfT8Mfr3ZK) O2kpRn;Aera82>-x_FfPG diff --git a/ultralytics/utils/__pycache__/dist.cpython-39.pyc b/ultralytics/utils/__pycache__/dist.cpython-39.pyc index e79d20c6aadbb0b7b3e741f01305a07573c9f981..ec3b3b51472b1b5008aa98a14d1496d8b9903215 100644 GIT binary patch delta 939 zcmZ8f&1=*^6rY)7lWF#Yet}rUj;+{TTYIpgAgu}(RHQ}FR#=6QW~OawK9+Pcev z?LSa+vx5JEphqtrdiEY~9uzMgJya0iqzKvx@As0)d+#^z_hx%|XQ<{?s}6!S^8Cx% zXS!QEi8pp9r*0E&gr&%8S{QM{&Crf$lVT+LcLhN<=NT8iaz? z)Aghsa<(Fv*H%g7L5dfKn~aMPXgTv%m5{8RiiVRs5mE+Rq->6DopqdGmAP*0MZ$I2 z+#DNovmeGjp3B~w<0qs@fmTNrq(V*?zTR+}**EhgHnOeKLT?<-o7;X8D-o;Q)F+a< z?L*K_<^yXY7SdC~<-FzUh~u!GP5~e?MhKz5%@X-lm_vTjYCkqH!4)uqEldt9o8UT@ zb%^)1K7W^uP#ID9x&f9hXLqd8*$&>uT|-&&95-T^XxCKsHtLoX-GG^POm2119^!VV zw1+yz8tqygt7EJZm?X`=O8N~7u z_(n}$1Scmo=sPK=HH-q}NE+IeRU-X$x2=MQVOofi8$qlJWe$a!(J8vxKitJlC@uk= cjS}7t#Z_G`9R{G#SMiVvt{AlnJw>a30Q^1OQvd(} delta 1051 zcmZ8g&2Jnv6t_Jy9`EezN0PRbQmI=CRkzKiO$6dYqy$lk9-u`V95zSJT)FTUa76Ndz>O1!aDe9t2vJ-9y*KuI&(FX2=4j>cTsv;H z0t931pYkigar-j<^7z)OKpdxD4zegd6MsLKP}Q?|MhB2HkLYIps0;OmvMk)=f@ui!RH;=n1#i@kTT&y z^)`F6>%Hot`zzk4es`~RwTzQo$hBJD=$mTI`x?8|&)!D|uY#-bM@6nhu8liRQXy>` zXVA8%OS=M`V=ac9Plwjx5^SsM0EoOy+L-+1+C|AJV_i&fhzU%J$)D6C_y(3Y!E!{b z{Xc_&wCjBjKz&nvORpSgs{Kcd;}_8(UNlYs=Z+2*P41lFxm&u6umnv;nwAb&v`&zW zN@vmL?g>^qx>MrXqbnbAFDIPNJ-u{@<~Hgw_e+nn5ME@aAwDwzps*_O8~^Z}w!jpMNxPGhZw%p=Aj?r@ZG<4y~yjRWfQ zWjy!4GD2oqobz=ErY-;=!s^OhfVS#k1~sJk5|*3QeYSI8U;d!)$ZH@CDU_j7WYBJX za+a4(>G|uLwpm80i1WSa7Mv>=0Fh-Z?N@(E-UT6FvoNsWTd?WKs{qD_r%lpi#}X`& zLDIULrY_`YGThTis%qMdljOSQ?u^P!8(_=Ae@$F#btkiTvV!lW;;#K+)I|UWPyKg> NB*dZ9Zn76x=Wj_1`IP_w diff --git a/ultralytics/utils/__pycache__/downloads.cpython-312.pyc b/ultralytics/utils/__pycache__/downloads.cpython-312.pyc index ecf94e020303b79f8924a8ef96d3260578cf2489..b40e6d8d2b72184dc33bcecc143c500b6a79b444 100644 GIT binary patch delta 9699 zcmcIJYgilCl{1oN^acq8;>`d;LKqMa8wmKp%V2E4b|8-PaEV|>ShkSx%wP;FMdZ;n zG;WP^n}&R;Q?lvT+O@YOA7A>pyM5SBHgUhWsQ@P?s<&yj+5Nh^Z5M*mJhtt(=Z-XB z$8LB3?1eeHk8{sC_jT?$=iYpoyuLsdy=gS+2uP9Me;N3sn;?FT3v!ZLps>22JXN$1 zKB?$e4iNo{17?B<5v-wwH~<(S1#1iv&l5q#^8}l4T6Ic!!k{8fB&&#iWk?Ys#()5u z*g~9yo>Fg()Z2vn0KW2+;)D)xWlZxOm^ukF>9q6zikgg0ke;MF=%k}r(wdxcM}4ML zpLIulHm=v^43N?Ya?yumyQ7@Vdy;%GldWL$;Wvw|WNq-9%~r96@SDTh6$Ga}v=x~* zWi+u4w!ouei=csFolg=E=4pw~|FIj{4bYy?ZemN|w}7o?OX1hHPR7I*unhc_vBfOo zQL`@eV}(_r459ZFHR$&WCs~8il+TursY?5O0ziQq7^6yu0s;qxKwn>!9u4>cM{CEF zWkaGC^tA&!l3Win6`8z!24Ke9@EFNM=PNaRrGW5&i9}{DqFUU)%Cl5 z-ag(fdISDm@1T2t>lNICLq4vTb&Kxa%JO3sm9^4W)$8#E++tN_nSW@sY|J~xLMX4lPca1=nEVNgDH?kbBAc_ ze1H}Dab`NIH<`nj8L+sQ6Nh=fAn{><2Mk`46i-1J7Qi71=fLw4lf~^50t|m^xA-*v zb;fI)T7dw^Xag|w{D@b8nbQ{6#dvN3F1;{`MfnFsaY(3hxkg4t%D7|FN|*Hq2J@Y3 zbN0w1k4Sl>p`Xd9bsY>0a;}jh11^^1hWwo9lD1w&Ir=_bWGRP@XM<087Qi@h(?;kl zbIbIjD!U7B$pn`3aUyIbK@bBx$Jaw;BSr_M1n8@F^i_Qi*)#VW{SyilNXSrO#&eSi z+ms1)7)=R)Va9nEddFN&9z*J+J!CD~pX4xYg%*Ar1`Qb0qmxOt#45-})i6?z*T?a8 zbUCT#^N4lGp6mn~dy-SqB@vCVk$YJ$&-H_ZqkJ!rcxdj?g6q(%J6~9OfUwb+-`3tu!pw?!H5O9qP-i z&y?0rH|)nND6RM?;5-hWa06Y){odwp3THOGG&bA#caL7+E;jz);PtJ$=eO>T?CHL? zwdaF;`;7Qf@p*B!<=oi!!e_(t`CB7~trIP$HZRje7K6UV?APQGI_qTiO&j_HQyKnM zV)9qT=|(j2Z0J;IKGAl=l=^JKsf0*g9@$d3zw)^a6bFpcl}^ur3m+)aKOO|ScyTCe_0z;kga&C<42d8s75Z6ke| zR$-{4p(g*bDYxw&`m(F34od%2)mRNbZ*0_IxRu(WQI8cPt*sb^@6#&!Rp@24Mf(oy z%Hz6c2`&0zUNK79o`(LMmxLbMo}z;7r9!{lo|z2{aeAE85-j~Cov4qM2_mREWI&Hq zCZl`XE$GKP^sk?4??8HcQSlOF1w718enWgR zPDIk0=8GF=_D-9pyH8so`>CLU#wQY!NYrnCiZr7CvKLZ(0n{Ojqt18)GEP0e0e<*u zw9io<4ntPJy8#q(JT>srfip?N0F@~m?v(`Rp9Zwc7+(t;4!AF8s3^+~gXLuBWao6j zq9JF3TuL;Z3{8fng@wfYh-Tl6f3|ye`-P+njX&x9LFa{SSMshj{(Q$zcU-B9*!C@I z_RXsj^Otor!I! zvWC#t%!(J9XAfPZFN#88GO`qI!5QdZw36iqB3#^pz z`K^3aBi;XJr&8#q0$V}G7czFbvGvnpep&lg%WQ=m&#hn zFCvYrfhR?2l6H(=SoGaC!AC+%Nl}&!2OBmsU14yj$ z=+EVLr7oyLj*6UoaqY4TDvptYf+Sdld~|~()acPkRVpK$eLbsOzte(*jCPcSE7!d< z1^wQV1v|{W)1nFKr%3bIaUblV^is^VzI0b1|kBxd+oEBn=Jg^5;jZsSkDS-+aS%Y*tH-T%T zH4s?wgc6XkDmOEMO=L~M1Q5b}S{WqRq@bQnW-V+AYeH>RxsJqOBJ?eSRe5VC!KMZi zpC>?Ai7VEftTki`npi9Pa#cO04{Fb+p`TUNq?<*2o}?s}%DJHVkPDHi$>DT%?KYPs z4jQmrYzC`gGtXz8qhozG5uqfpVy(ZP&6Y&!E4dSoJF^hXoNu!R*6y!lPdvV|DkX1nhQ-gY0Fy16$lTk`R zeb5@zz%)C~VWkEsWcA-KGoXrqK|AcK93ATn;%!zOw4$e~UDO5{WkU&GOJ2d2`boA7 z*4h=c4p6M-eEIr_gDTzx_|ixi5YF07jOO6K0-}Nt$PqXo4}cj)!X`UmM|*41$qg80 zm&dLQxKJ7-(Ez}OB7{=#_Cv>OT=rba^JUhpJ9&)3l*bi8V~{@lFbIccYEm;}sCjoo zMaJ5^hCaZ8?pytF{Muk!`n@sGm zHLAvF( zFt=eBf!}ucPKRPF9sRZ>UB^EIlk%Aod=RvGOL`8rMSIRFqG~V|!2lI_5h|P{FWH0~ z@@?s|Nv2>3s-t9+mw-ig()?GT$ai4?XPn@7qhD_MM%aV1K>$vbyG` z{PP$XFu*r`lop0P9`CW3-Hs=O=P|(MbJ&>{GXwAhMU^03l#=#Gl#cmw{L8q30ymW( zjcNzI{@%V(@c5-wlcq6B4@eddk3BePg`%W4N(%f5+>suH@dQ}D3>Q>GeQrLg@$}-6 zaV-C3f=Guo=A=c6YGal?h#*-u=v0FR%`~VSF*D@${mH)z^pe2*5C-9L0~4f!L~k}Q z86Rj26Ww2WaH@GxlQ!M5kX7-`9cQ{<>YUe9B4wisH8t+Yn5p=7?TfWTWf zkDMC4ZqA!%TvjmXg~sPAZ{!!w9K3F?owwIU?rFbLv1spH%z2}aOX;G;HKBc1pEBh>vE@cu{uy#sdvQnP{@!b8kD#pPT-}-a+5Sjc-8Dl!+TGll zA`xC8F!|4b{(N@Df8WeUbInf=)HP{3>J+alN;*oZ*UO9@jQaKZs*Yss8!ipx->A_* z{Ttgh0nU|b6Tqv~EWynd-Kjsv+x#sMmi<6RivQO2}z9`~>h zLiMBd0dAKC=WZKNksqF_S+P!vsYhN z62!9vOR@?dF-V3~Xk}L_`qPeANEMoE-vsdA+eeHPO9x4~fYqm!AsTF7Cwe$ltKcUj zgZMi;E$Lcz&1wy)#Y`Y9r67fJ+f&i+c5YACiv?@7YS}0Nk_NPES9uZ`sA@1xjcmf# zl|kh<$*+(gM;dx*S8_cif;kgQVEg2Yg>nh{V51;WisB~*DKJ~9J7`o<8gQev$k~xu zgL70+3skIkp`wv!-JMFB9lT@)W=cAojUohihD5wgWeb9V9SOlCbiN}u?1jA=JJ_DU zFa(2r0SH3-;Pm&vZuShv&$Z-dg027;8gEJ5TOuCEh6-K~C4~Uz7u1h*M%UN@4@t}5 zuIm~}I{cPfRgSDHu#!Lt8QOxlI}c|=k_Ncq&5j*;EP>?JBhx*Z|Njalu0$k-5+6ao zEUkQY@C-u;Tpl&Y-hSL2M{CO}%F1uiD#gWzN8J2?z!bUtqxk%<$Gi`7qwr<`QFMM_ zco17+Yg5lA#?3%xh!6CGGmBhV>E`utykrB40kr3yRPhuC{Le?~%DPrD-f#4sFL2 z5RPiQ#*#phOR7w8$&=(%jL{gi&NZfvcNtUEx%l&FTW4X4q%kDxFapPjmn=#az0jFX z{~DKV=trH^HQ40hv40j-5BrY{L*$glN(oFr0FvD&nS7EqlB_++_ca=VXm5oyu zi@v@mk%&terDIFOON=nRs6mz)$L=L%hRx9d{-2>yz+Sy4+ zz&P=douD?L*SnrcTuLuF({rXZQrQx*Zl5rqhjt$^M@&0s3eL1#Xr4FiyjZ@hcfom9WZe-56^VX=FK+G zZi?gO$MH-Oo{9S)F==Y!Q=<`08owC?<2eBRcvj@J7FPcnh$v%{u2kT8M$$d*_R|u$M2WRcDN>5&cU{iSEDh3tDgP9NS zP<%|w2^!gq!kVdCegFgQgm2=wcES%spqS*lSrP>9XjxqfSW#&~9HwXw1S8Q;@5wA4 zfybekofaF*h}Y-CDnxJ}{handHFXV6+1ik7@pZzC+XH)kZ7A4x%rjZg`MSJ1xkt=h zkh(yCK0b4?ZnwN1+-hJUL%ky`cOM2r24>7BDIxh70H50xob=?iZiC5_SmgD?%^<%q z!en&~5B7n)(%i)685AdGfp3>v*TyO@_3eBr(6Rcw&FfUK@r7@kHIvJ;HWzm{ysD5s zv!mei4)^U$q`_dLN-K}I2x)8#D?ii0G=idp<=o=Hn$;x+Tu^{2X>qpTN5nf#s2={i zu&HAw56;+H!WtQ-@Dg$EApqbXW8VzyH5|Z>VMw?V2F8u5UwHlaafWnYWaly#$%;!li868S=S@KQe1GlO&zf z1)L?F{#QkX;J4(n{{+hj#~xhYLHAHp2~*B1(Qmu6!#3O^jYRjC>8)ah|nia2v0f3o0nAT zPFf}a+_Vs8%T)8%Q)iUVXD*rwZ(0oy2+kmk=94=ocb@22GMG-bPqv@f1wr71yoiwj z5AmHIXE-ttc*iibq9l?U$d3zRO`i!kF7QfYLend!veuSAv~=>*Z0+i-?x`uQ}w+#Fk@ca?4Grc zADwVdbWf#DX{MD6R90lOJ3?j63@oVn-YwmDjoy3q{xjsOqZcwJQm4oXVXAJbWSU)2 z7hD{Ts0+^Azd-L@fj5PF$@lZB7wMY$yz1%3IHnC=HzIk}^K{J$rJ!n8Xu?stNSB6R z^vsy!D9#0S){JMKE?rS8sWN!VaJm+0*I9a|B95?OL7j7ko~K)jVCeqNgbPih-~>7is5hzgS^R#otNK?ft2}EwV zV396(j-577Sfqg~9}i8l^K`*VqMEX;n1}+$BJFt5HobQuZ7Odnd8&Cqoi<~eryVP1 z4OO&~MC98SY5R*77^ij0Jf)nff^p7R=4ty%vX*izTL_IcLR(i-bX3}MDxqm1BXrA( zRZnfl4>qJlv66)n_U9eUAkC|FR?@n<(MDQUyT~o1X}Li`WVDi3c3f$}Z%S|Oj6C#( z2=p!a?nrEB0Th0g_FA$(5w3gC0VXaDp= z4Pk8}g(lcMmkg<`<@BZE)YfXvWv#JwGkrP7*hbQqtBh?l{i?>;rl()cHny4QS1XNe ZDfDYKH zbuez(AvME{GF*troMAZqS@x9dgjNppQDy|hxzNqbNvQpaWY}Mk4F5Ap16e_4W5l1) zjg(I2(H`}lV3%O*EiQ}TVc=)ApN$pB59hhmu$Sp&Rn;k{O$L5d) zHbG`{kUy}eQ~aoSM4s#J;Q@J1$A}`g-^+Xa%3*Jhcc^rPwf76;C-Q6(qo_g6^WRZ? z8|9BUYC1jc!`7~LD=*-#z7A^_Z{4%I(dz2stnMI-55{UhrZM zE(KyX6>gG**D}c$q0RFf!=9EUXswW%J3UtH;RkyJs@zCdzqP%q#{*i<-!g1qLrZ8= z7Qkv9$2r6vm51DrH17}(XZ;_TWL=?g~XVe9~LYj zPnv9wA`s+50H`96o}hrGY3MBjkAM4v#pQ4I5}MpSp(fc{XL)QBM$4CiG5GGb1c$>*{4C}KV@&M!w* zM3;0dW+O1qF?bVX52g+kTuWX}N=DC<50akC-9iZs6zDAQRsb8q1vHDoPD*;5g0w8H zYD0zbUI4#Z`uMHh!?@+4mXxc6wb;?tjY~*RN=2-+JlerN zxa{92 zH7H0X2~~iUu=XZmnB$t$46$!ZB#BBb%Ze;%P0xsByXBJPHvs&R9Qd!KCr5CKXEj35 zeK8{2ttFRAjby=YOy!iED)_a}L;o~QYDRS6)#MYJcEqWlQIq2piAY1LGt6U(QRS#g zl#6IY!7!rgFxUx3!&*)!s<@Cda!w!QaiLHP3)V!{2>4Y=BK)jsR4vM|PA~>WgMQS( zk)J{W*k&Mg2WB6vO1bfYC}M^Uwg}f(h^RBK&ga|e5gE=pWZMXG*acu?aSbhR2Jo{T zU2PAW>L0T*^WFv~ZvpuFbLQ)D#%HNs%-T4!YuYf~bk+>TyF6Xkr^6x;c0-|=?8<%) zg^*9O(}*gkT-^sWn-*shXHMRj2o;{59(0pRX=7lkhD&Y?HYPMC*M6|pEJ%W=5}91Ki2<9KR34E zx`KY8{y@Dj;={6U#-PsYvlazazBux zw)wXni9wO0K=!MeJUrj!?ZZz)4JVOb7u2DrNKWBKluddI<4_KHw$Q456_{7x&li(7 z3&%4V6w`#3oA&y4xAZ~j{3SvZUcZ9TO|DnXO^#NI+ zo?S|!lGD(YdA+nk zp%v8^%;fi_CDGwR_PPio&;waYVtBWM9z$amPWQ0a5p`5A2shMR%!SzV%0P=EX4D|$ zuQ!Kf;NpVqfD<7?)N}}XA2o^*-SoJ4D2c}4dcYDIy6LGRC9+r+we3Fdfy@bq93mts z;E4~8gZ|$-eC7P|M5HCV%BuiRmS?c7cIbx9yA*(zw5EGhw0q&BLvs^EMPvkz8%H1f&FDcOm)1EnMA2y>L(#~eYnJlE6m z`ROXFoMdcr5#I(m*;GWzrxbQvJ-Z26}P%GuKfw)MDacJi@8Geh3U!=fM zpb?axg?9KY5?vctO`|ni7Cu9H)Is7<3b2pCk5Zr(f^`(s6!Iyw1Narahhb6hNitNM ziGDy{u1y_Fr%dV$@i8i)>F@}h9vA1qez~{b(}$ZVRXNztxdadHqH-Qek@JE}@T+*C zmG2ty;72IKN`d-PJWYXytC${)ekDKH-rjXI5bV)8V;YHJFTj}1jKK&4v7w&@?feQU zV)H8l;W_?3aQzDG5%92I)7#b8+BPhBcq#nzD?2#cHRQoG_LkP)kGlMb$CI>;vKOY} zJvB5b{A1~C&G)LVsq^3A77E|)CQbE@@Q=eHPH{f< z24CEUCH01hzavl8Z;8CFj=yOzO$|;DoEpAiNSdf#ktLAN>dzEiO}Wx}!}-Lb^9i48 zz{d+q&cUUvL(2`@mv^>YXNylc-_I>kn`7p-WS1E>{|H@1}M^b+MpeEy1w ze7PmNx`2sDUfqNkwSLNQ+H%VB_c1f*HT5fMUrzm^wf+-z`lpfR=XXr!zg+ra>4fHE zUDTB8MAc18@+_LuENr=M*-p9}5+iTqR4wLIE#++RWp7+yeU{DFLuv_W*lrFCx5`MP z-h*Kmyw%2GeY6j*qZv8Y z^%@_j+3og2Hai*FQGw#gzwD@09)+)rhY8vln^X-&n+nr7CJ?@i(b6~tX~*@Es-3av zS7}``ZfcK`$w@&dPs4|G0xS9u3ggVDs!(!n=Wod$w<;nof?b_v zIK;_%m|ir>B8EmHWMS725le=47Xe<_eFVvYYSSr$eiI*udK4K3$v>p)LPr%sEc7fT zQBGcMIuNZ9Qtnd}!DirswIrrFFG5tn`LDYW5=cB4 z9uA?7E}?U<&F=R0I!26+^?`XuP(ufH6UV-2<)^g&B;wpxTtdAoRs#4{gM9;oUhulq z+5*mA^7E2+m&{CZ?2@1Vh`hZox0+g!O&vH+Zc1IOkBC2|T}b^w9;HmM(6HAS_ECS{ZCq337r^DSl;@mHGd2&t_Mt!hwudWU6O$|U=3jQ_Y=KRg1*dJIlv2w(~NMR z3)@V3+ksq0ZvvAFm<6Kq;ADqm80G?detl(NzsQ}Eh0_5Zr*2F#-VdM^{x=Fgr9e+z zEIBYKBtg+-B+;^d->kby^^g&U`8UNX(FXJVJa({8I+ea4EK(Hl-~o!ok& zaXBRXWW!{`iEVfEOlXo%Z@nE(zRTH~KMFZSpOiMDJB9%Lp7h-JX0?7(C40G2QKL}V zP_qr4lZ|NXo10pj=#!;i-P*dnx@JdHi;Zo@UxSLx^yAV(U9*iPzf3EFTA5HN~6rQ2*ucY0xLnj?~KcMwrkp)l2 z*l%d@a|(w6{Koo*<}G_RwN^KQ-vkYtYqvLHxRSwRCxwF)q`2=SEzt#)J{P853x5IN zXWd6QX~{vM?@{U%0G@i|yu3j0rLOGta)UjdM=^uo!%_R^{}U0hy>#s3(hb*513yyD zGiaKhDW1ukO}S>a`<(qgv;Cs#nrYzU{L&>;*&F8B>UEjaYvznO^P;J2 zl||`VdGGIG&WysB54OIAV|ONP&su^Of_ zXZjf9DhekBok>nDGOWc|ka?{*6-BN&Q3(oP6Ho#&e7OmwqQsS*h_S>in_`v$qZ5|R zaj+kk;}YSGXU(t&q3E@KSqQ2|YY9mxdSwfFr_<7Un+Zjaqq}Q6bc`9p`=+T3{`Kp5 z0pum{FTVRLQ#DWDvoD31YW2!XHhry8dFfGotwnj+sIQGzUe46lCMz#j>1#8T??&ir NbCmDqMAzoa{||UVotFRr diff --git a/ultralytics/utils/__pycache__/downloads.cpython-39.pyc b/ultralytics/utils/__pycache__/downloads.cpython-39.pyc index 15379059b4c246b6dd68f16e3eb72d0e5ba57407..bdac1b6dd3f6a25dd88dded12cd72738dab5df1d 100644 GIT binary patch delta 8130 zcmcIpdvIJ=dB5lG-KSQv-cMV;w)I+DURi!&%TZ#-k0cP>#8MLC0?O5%yYfn_-SxS5 zWm~a96^#0Cw=U$o8w11__#!nQLqxd?X z|KP5#mN%WyR#=zDTX^(IjknfnZK~E@t97VaXRX$yYTdP3kE-?7YJGDgYctWa-Fyr6 z*^2XQ55I$N<>r&@Y%ibW+xZTZ`uLq?KFEhq+|2Ld!+Zp#Ej(dIEB&h)-^mBk2H(X; z(YlrI=E*0uvnD^v$N65gY~#oHetrO@?fh>37Jdk&9kr1IydQsu`55m{hxr6Qg1!OH zrnJQIAJSHwPOxf7*ah2?f5yHn->bKUmyO-|+1aZ6u)ec(&xwh5p0Hgz1^I|_w7yQ3YU`0nL>&|X8ogU z^2fnL+uuxwZQxTnoRas1zA$4A>zWob@p%~2VIqG4r`>CTGjB#*WFN$eJUNlbA54A-5*c z&6~5fxm25#n^tXxq|$~oQnIDh)OO~+v^gz`_NZB~MLI7Q!J0EI&NIFqKBozZj&0Y~ zf1NJtf+1Ueq-ke)HlNQzsWY})6gfvtVM07ss??(OCQZyetTY*>P4LyQvQFL<8;8~Q z>^D!kHq$8xu+3;5+bk|*99zyrcTV_oPGXiB?AXZmbFN!(CdS4tT)2?5FItPZ3dvM{ zaeHEIgC2YAF;$ia!=KnPfP;1D)@rSB7GW{g&I}f3hnUI4{u@6FmjXSp2r)Vq8em1v4;Beb0183BmOi%Y*PkGF2yYM~<)1gU=- zQHbChz}NXh`DE+EY`^@c*73fI8}XtmjK>xXanp-()}vLTykV6&H)UVj;f`)M?y*Ya zDlEe_Cc3@AHRg27Oxx7qL!~%xcMUJT!sc6-H1y89O_fc&!;61RaDr<=5NV+OF-3pj=Q0!ztEFZQX9ICNB5%b zIr(DGfqpsw@m3;gLvG*r=VYL_g!6iD?-urH`E2h8_yffGAQ82bXHZ(!lgVZCfxK(y z+>CnAnVe&%io*7fzF@iMCd}o|QPWw-6bf`L?29gJ0X@8AqFp>p;%14cBTdOi`u2wC zDvE>hnZDf1#Lk&ta`cqF3gMZ`-E@tuz;@#A?QPO|(jX$c+{WV7(E9U6X>1sZK zG0sj9ZF9Tc&YJ1o2jaNcP5O}D9WYpXpkME1t@^0mj0M9<^w3pf9vSn4JhOSR)COIL7=h= z){>RUTC)I#v;f>X1uJC(?r*%U|3Sv4<;S=F*5K_I7~l|?gK4|{3V%yBo4e(4^8>6= z{)0If6eHMb`BQVEv4BD%CidV*Ovr=V4zOjJ-PSwy*JyC)ZfT7=@NE?67Qq>JvVJ|z z45O39Uk$yg8-_wZ@$`*9+2#dU`o_+|Ue@kmR5d&WY{A79bZ)R${L1mG#1Qj_csDLedRT7p_GE4*+IR zUSi^5Friclu4?siso|QQoA(;1?V9eqgj*OB^PZLqxgp}ky$pH{prPx*Qrrz!A_}0w z*LAuCUc-EJKE@;Xi(b{sOmiDN)yQL4fmcAk5O3&$m=h5GE~C_VjV;f6LFa2;Babf_ zK)ublf6=(su9ceH#!3@!;+uH0*MtdMu44(j)ob8wyq$OO=DFA^o8MI0Eb;+PR|H13srb8R@28fUkyF;SgF$+Psz%+PnHo?Ot1@$7@&X zZ>{vYeO~(tj)ZAt;PvB=XuRLh3~hy`YaIuMJD=N#)^E`N&HttUryKimRGaCj#0W}T zR<-$lFN~ekh51Ms#^9QM{v~v5T}Am3?fgw(h}hw~3b>uL zyWQQvw<;tuuLo8b_MzhmJ!g82UT7f*&i8HL*tX7ryD-4FLmmzo$F;)?mGLv61ck4M zX+s{-9>s2yIv(#hf7Acn0|+~R6Lv5lL~SLiG(X4(VFgj}z&zI^zqQR>Rw53cecA5lUcqkGE10)=*ZU7+FXK~l(V zoNwa0z&glBaYq8;G~b=mc@k$l=5@^laq@fU;(7-9a`k=mx&W8`FtX#;O}GWhWjc|+ zkb@J=9cAZ0MSk%{|E{1R|FrY&W?ocIuQD`b^yIHQAC)s>gVKsV(CAoc_~f^GoGm)Z`NG`xHRntr>ZEx)cYLs$^~fWGJuz_&m~blq|4zO<*3(aBdHQ+18ZKA@ zw%8F48WKz>OPQ9x96LRr4CEy=SJ~Ye+)ynM67NSGg+Ykc#uanut# z#Yd^HLWGQ`Aea*gBKHv?6ck}1hltFBRD+8P@YKZ9@(bf5>;?J#@qu!ZIwM5hLmbgL z8!oX0C$ehf3wBPNrPfffK!J(K5?_W|49B(HYS?jSoXoN<4pRr=wulq?Fp&l#1tJ|p zl%GPY7UzkO6OiZ@p*>VNBCqZ3=Fg#64Z^7xm#WdlOm1fO5`rOh4r*i0hUSzp6v`)| zJ+HD%l{w-H6+?^AJkP~Gsu+b?OPr0QXJ`#J7f&f>Tcl)o-{|pH24g^L(S~4SSR4*Y zH|$A_^~0_-D~n@*#@Gn!)eZc%>rJTZ;8m9BRwyi&_HA!%Mpr8ac7riQuKZWzXZHPF z+4{0xyW@y!M8+obO;K0DPG!;=o106vAR{>Jn65Ra;yKH4Y=ms|229JmC*$5*oJDw3 z$U7PGs#Od`GHN3!4$9@-b&;bqgSz z&?NE;yEsJ)sj(F9S*|&Z_hySR;vY_$C+xI^CV8m6dh}Ely9W`fMX4(6^F>-GG*8hw^J(m6 z8qN~j>D%Q}{3jpW(R;!zBC10OddyTMGnLP|RwhS&Hf>y8N4plSg@z-;*yMm^ew!m+x~uPNgytg$?vRtH!*O&m}@t zbs4JR+UbZd5|?_P^uWYZ2xQvi-3MD6iu8Uu(WbS@nS&kszlPGS(9)sgcydoFK<%{; zVGvD2!IASGjN**zS#1$+y!`aR!)1d9d*G4vX_W{Zs#!Q!Yb`5?$=skDt;BqnYZYd7 zzM&N4A&*?D@O60QHD@diAD)RH)M{b!;8*oh*o9xd!b;@qlgrkG8NTLjsx7PKVYp07#X++at4pW$BcBohy+qLk7QRCz%q?Xo-WoPLxmWC zA%q=t`DwHgUgJiCa9o8ofCeC&dj4zEVGadgG;|&86t1*yM6>#%BK#Ru`3=fV)xw@y zii(=)ZJAI-v5+yB&+h751FU!de*u>NMGY(k*aH4f2p*~NS)PJZu8s0W!QfT z8$fI4&V>t>m~+e#D|d;|W<7JZU|&LB71=j2S6n2=V{QBSP|d|Lk(Egya7xD1X~3#b zwroOGOk~pjxT9NsZ&wR^qeL0|HdAwaEdNTZc}hMKjSTyFVFxMC;ZdyYO<}2qeZL8o z@TB1*RU=a;r>E~beb1D*>yv2lQ_u|;?9ALb*P-Ola$7x@=7S=o(Z-frVNH}la>nXg zDL;6qoBfge%%QZpH8uCxipNQmD<8_58 zQ>Z|a9JD#{RRxm4iLUa82_$I^T3bBeX9FYf5#V41+Tk-ap%lUIX1yDElaW9ZJP2|@ zhIH}I7$edkDe}weA5xUQgs;v`5EpqX;2VO~c?MhsvXbL{#oz(t(%w^c^()2-d*)sw zE+Un{W$iM%tX~dXHZBL#Ar6>Dk^@mKz!5@L^i}fNBb5+EJ@^g#(GL&R1F`d;8`m6n zV;Gg&kJO#!MqMopd=nq?v^kbTbc_hEw-j~D$ai?`O7IGDD^~(nbRNDMX4?6W@(^$< zVhtO=Aq?1n9!TdZkg;mX@U=u^(wr(53VDI|0+Wq=89D81FVZmwkq=&^JSxmPz)ZlW zMb29Ic(#U41VIPu*>NBc8TT3jQ6d@OQ#bcY5%?2zTyzMGPD34Ygt9fGYbp8K@HIuv z3rI2h9v}gkV_wK)vxFXwwe%X`kY5@-ny7mO&a~3F%WDgykf>#AyF9Xc+wS@Z zj>qKsGu1Q4Sh5Uk5Dl zba8PO>ZlFz_ZE;yqR4i<(Sh8KkmE9(+;+#h5OU$joNRF1>Z-i4|F%zls`LIGuZV^pTgVopC_ST z{X=DXo^AN56d9f;+#PanxZb0!KZGmrdh`Xx16Lz>3C3wbBbB^F#P_ydqK3aBLislF z9FZ>)Sq~8rYM=pOXrkK7R723MDqlw>F(`(p;}3|e5cxbvHJZT#bfK#w zbgIkGP98zB4&5&h`KJ8EEZZ)>5=#=dNlqEJ!bq|Z!mtQzp6Ji1!7xby|Llg(axdBDDeLF Nzn{laF*A1Xe*o)ETXX;b delta 5206 zcmaJ^4QyN2b-wq#$H!k$Kcpy$l4VNPA0>OE&s`K;>2jAQeJ&-HH?g7E~=TtU#Kr!?1YEx^_dc5!--eL$eOU77Sb0 zx#_%fE~QxMijsKeoqNu`_nmvsx#v6g!!ObQEcC^zsyqaK^UepX`5Ynth>g8J7c@@6 zW3GIz@6U?06J(7x5LU@aI$;g{)bm>pp) zY#(sVY?QUK{lK-bqpXd!1GkTju};fFfuXr!h~Vuo-*P*lgcDtSk0!hH%3IA+D8LoTx}D#v_5}XuZSOL%dwm#2DDBZ z6qg*Qf;)=Gco;T`*CA;ZZ)**-UOcDGh=-04t+YcTrav$Fi1;o2nTk|QIozVb`Secf z_Z?pGtIk?+skL7Gt8;iq`X${>XT&MjTSNJQv)7EoVk9*mF)a?pMp9#4{|C z*j1sxSV@_d856zUFYjwb_n~D(@lhA`&>+>QkB-s^+2WfF#6w!HaZM4Ayto97o{^B;!bqiDZ2T zJtTg;{&e^W5R_dxo=t*1Cg0Ojyk8%!k(P|&AnA?AMI_WfC&g%}&G8aQC&i1QFwKaS z(DPIi-wh2_f8KU8+C)lj=G~&^?}+AxqxC+^XVX&E76k7G9E30==Y#kl-hfX@8cLkesVIQRyaxBK%MZQ z%zO)oMM~5rTeP6s>Vm@PA~A?6RS=miK#0$y_NW?oq!qG>riQ5Rp$q(i8HTTKf1%4(X<|Fr8>Yce zL+`S48cu5}o8g~@7Vi>g`X=dT#J#>#bWq&y8!P?^h${{cG{O{`>Y;wfMG7P<<)Q1L zrq+S920uk(l)nzWe}vB5arXx&6X@dIAEuKkc+SFO{tEQ7Nr^BD?snU59=5>s;2)(x z3KaHu*jDVKoA=l>qgxOGYqXgFr`mLl7CcM=uY>FcSUT8Usn+p2Y_AIy;JP=F} zPMBRRFAjpY&(@iz0r&~{%!^vVw?Xp@w!?hS_A&3G23aZwapm%nASqN?RV6>GVt!U_ zSHa{pn_xDpwLJjb0TyJ{Nr=#e>Vn_)!&n8(uDMVotlsuRtV2}arkVFxs8DTJvk<5$ zf7+^9sD-2l=GiXRu#Rc&Z7Wa;;t6`?D}@@X-U`_@r3L_EFP;k&;xQJ=cW_)<1+aq>_#DYD|r2Wr1@UvdoyAL8n=8HM$Ohhugq=>WeEVEEb^=x*)qq8)c#gNS0R$4sKju zPH}@p=0U=C28*{xni>$oX5LWB&Rm=u8O!835b%Wf?~$nvi9jpRF4Hq90BRD|`8`xW zjpSJ*&xq9F*Nd11`Ar~EjaMW8B#;MM%!=~|YAl%tWW0h}br0I%#u`wz> zH`;jSw}CA?xMA_tvU@3&nY+4b8Kx`+X7wcFsT&5DB>;M$OjBiQ@;k`tOJH=C@j>yy zXwZFEY7KrxR2@Bd_TQmprh$+aKnUtnBD4_@0db)f9xM{-K<)=g4-EqtL9wf}R4@dn z@>AU*?4#{f(zYnf*v=Tp|>%U7Y^&1EB2JSpqzxCup+ z8R<#Ij9BDT-SR##nt|Iu_rb_c%dE6wvDk~z7{4!`JaL$|i`ys8={LY+ep9@A;+*#s zva&WiBQFZmVq|vRHW5p0g2u?21@e0hsC{xm835p@nfAVNRWsW95%w|$@ z0nl9qNL0n@$^AvG0*)mg)3+%9tDTmUIhkuqDLAYuTLZu(mP3N1AIAQXRb8rKZmZUI zNa*umPTSNlg=G9zoee!9OZg_M5H?n=gX9VapQ1o7y<;!?-+-T zhRV9^nm#u(^TgEoY5toakXfeey=kP9*DP}mPIOes>(?wRH#U%W;@bp985`hVhTeI7 zQhrpNoCwcWP8-hg9aIH&Iu9gM*0trUF(~*4@_~UJv&@0rWH}&9y{TxZTno239#5}Y zse~C@wo+-6e-EeY689z!AHmeX9YD&?<;?ZvEToLnD3YnF3OSjYWR%LRB$L!T;>Q#H z0~mTyS0zWjE2r2oqw)bc51Nk&GRgl`TsznM!t<~{CZd2EDuyu}Yc&W*8V@&| zSfvJT1MR9G&Mlteh{FFCM(_lX1e&t@!)g#86&~{&;)8SHHkGN18oy;JcLAGUnZ#Hr zsdva7dPljV-qGeAqJOfX=q%}o_@HxEke}(hMacCxhlt7dmJpd?%ViVGZIOA1K>n&t zk~9MqBvdzz1(yZYl1=Ywcj2CJS7FX|os#Rn!E}f&SN_5-@|iNd$eWKDnLiG9A2X83 zW)jOBVJy9h*L~|6hdgdZ#v?-zOlgQR6L1jJDH(e+{420bz`B395G>-V1GwsTAQqfs zyxjs-n#isxcU7h=Ql_pu!4z#uMyA}tsI!t~zKzqpjpPrIU^3@22T81hdww}Obj? F{2xAa$szy% diff --git a/ultralytics/utils/__pycache__/files.cpython-312.pyc b/ultralytics/utils/__pycache__/files.cpython-312.pyc index e3dbc826ca1503f46adf43c514aa97575126a214..7f4ff2632dd2e60c43af9c070aff150afafc34f9 100644 GIT binary patch delta 1717 zcmY*aT~8ZF6rHix_U`&45J*DJ*Tks;PQle*V zG^dHnP@7(P8Pt7pl;=hxB$n2?-C%z=Wg z>eu6g)n8rTdn;)vF31R>%BnO}+1hog9{C=G)73TqT^Ag$_J?Y|sMNgNvDood*SEdf z;lX-%@Q2n7Tn+!&{kVHG{@!+cq8^{v3{Tdj$?D0Y?H;!UW7X02IS5JRQOAcsZ+AaH?_yX0&5^I}Py3s#i2 zaG625=rOh}V6m~yHbV`ly*VK^?2>1fQrbr;mAq>J%O!~s3k@WOF3z%^qiT| zHARgMqw^ddr7|jULQEYw9DN9rl#*9X1F^N<$X>N0(by|^L(kJBNw7Ph-j5V>_5k*n zbF(rnpro#uN=D0Q>4qS#*;Htnm2#aBkTUc2(Ea1Kdt(ZL&5M zIoeWG&msk}oGWO0^!#MynS4s%iY4Iz`;$MT)35BGFgrIe^)_4{9&u%9Sg2c$xCXk3vh}WoL8CZvGQVRWS_b23E*H zY&5_ndV(D)woK{@ewABf>nzGiJ*f}H3QATj$`|eK;vq%Zk;JPy&S=EE!Rx1%)eBMC z^0R_dMJ!FE5)j zD1XPT;DIvG0$cl<{fr0faSQwx0iNT33*~p8Hn-QhV)f?Ot#dmu?THq*NwWz=`fpF<>+PZw^W2q!%LV+3zZw4o$^g)@_!HG kX63ui*{&CDZg>_x3xMv}&#(Vv{1UBQ{PZO_31c?g|NG1Dw*UYD delta 262 zcmbR1G0BGSG%qg~0}#}?ElOJ;GLcV$amz&Y9$nrPjuyr!o>bm6#uQEUtbyTGo=CDgCesne}D$KtxQ z;YDe~Eh-yIHfwFy->84t$oaaF&qX7j%hJ9VxqLS}iAl3=PLR%GWK5ZSTE>%~*{#S5 v=odFl##@YfnnIJmD-{D}3Y2BJvVg*jKwO+XdAf22_ZJCfMpwon0iXx~6g^FP diff --git a/ultralytics/utils/__pycache__/files.cpython-39.pyc b/ultralytics/utils/__pycache__/files.cpython-39.pyc index 39062d4c6312091498b804160c690627f62695ed..1b367bb8146587cde6dd9efaa71f7aaac5844cf3 100644 GIT binary patch delta 1274 zcmY*Y-EPxJ6!zGTZ*fVz9G>&HMIdjf;&i9>@f1B?w*U0p= zXTUS}{QFiow^uud{@S}3p|gAMTMKh^hR(h~>nG`H=F<8uqLsj+XXp=LJ4Mlbqc#8a zpV#Qr;_{V0mYHI{T9w5AGmN(OWAmGpt~$Ti9>zF*=`5vx9~}1obKWAfw*Sg4PmR#P z+YwwmU5eU0Ft0c|`gL+8?g6%xm-L51RIz_8G(UZ$`8ic5kJEmc1viSENC5pwEL=;PQDJDR(!TTD&VJek(-OUWXIk0^~Z7gZh-+Z2LN|UNdK1#P{aE*pkNBOImrX&# zCE+_N^kew|u(P>imUa}iNneRg_^Yyr%7@?}FKDk)Pll1o?8k%$St%!)m2%ZHTaU4Z zK1M(D#)Fepb56gqf?}REN6x%rKJ;{D-Z=X3y9e_oA3^r@icXRCDzmqyClr4Nm+?8+ zjFVMVHCY`%HRq<^;m&)tR46 G&-ownV0~@? delta 151 zcmbPbbVq|Pk(ZZ?0SMCAf2AxCnaC%@xM-qw9;4XAS(^OfDIzJNEsRluDU87kni3nI zbuo$?x>&`yq^1`5q$U>SW#*;FROaX8mzi#EV!F%B!lj^~Fgc#haPky33kc^7kh6wO wV6z~5Dl+=9$*MFv0{-831C0)Vup(ByE5ipf_blo;hEf0s}M0FxRilK=n! diff --git a/ultralytics/utils/__pycache__/instance.cpython-312.pyc b/ultralytics/utils/__pycache__/instance.cpython-312.pyc index c891ce9ee00ecf40c7e6adee7b94149b625fc1ab..6f3eddfdc056735d64a4ac519527bc362fdf78da 100644 GIT binary patch delta 5345 zcmaJFTWlOxb!K;Vy?(!T{dPRIldd101&&j?mF52wk zlxDg5Dvu}}7j0qvnsUE2!O69+@ln5A7Y(?$Sx&Cs;^c;_t|ERL8*7BIrt(<(<@d+i zy;ikqt|!Mfa_h-*b0b%n=2!Tx1N)MCTG_XLA+b+Zl#HgB`*OOfuZ2hH+paay55!3u z00Y@9izEOqt170M*^H`?PI}JW%ePa_{d&F+W~6sO`Wg{_Gh$qo3`1PiiI|i$SxspP zaYfH+vfbEFjFB3KIiFODxJEXU(TQn@YZ6K6Sz`x2Qc6oOsiq!N#NbA5BNrUT_j(fE zl9rdnAgUQ0VN{PfSc$S^N}`d}v#KnvC}M`_YbjZg#gry$*>ol+N`y$cl0Y-2Nm46W zQ!x%PQmD?#iVhNsLkxXHli7mMArXbQwK~c4LnOo0iA`J*$BsKW*rFjlTN!c4FiDY0 zaYSFGe`;-2#i5%u1UuULg^G5I=%!K;a6Xwbie{8`5JJ;U+wKZ9!EkD7Q!-P!CITH< z*)c??yE>{ZFHy`a(IPZ_u#K(>4|@a|{`4JT5AUJ>7J|H&4tRq13Zbga)2FnQnL?7l z$OH=x*{3LzScLkXCR{!HHGAW9ThvEa2lT-l+ zxyT5%k>WPbC>nU0i>JmT5k)dK>uhU{u-+*etBEsZepmQrt;pxQ0Y5^JE+c7MW)ull zktQ7V0I;e}J+|LIf-Tq=)RbXbZdK82bzF?O zzv=ceOZ3ru18sbOt_CLf%k<^IUA1iZi%7VI-U#>)ArGrIu4{40R5W;O>j;)Rt{P+p z5l~H8qK}Eq6*Y2?jbw$P(64$~w6N`b7t_ zJSk09Hpn>`a(K`myLR(esL-7mY)4$4`RTTc^g}NKo`TyA_QWpfqgT4eQ(wR)bEYwD z-G$&Vf@uJt62{iFVkUK&cyTy_-~j|32#^iiZ(CTDcmN*bJb*3kBUhcr_i0a+r}tw& z4fmY%GW0&0@9E6H4j5NBg^Ths7j?;e)GY_(Hd)y6Lz1d?Dgdng4%TfsSE$>nKBXC^q{S6O@_;Q^$i7?GOev)) z1mg@HoebsmRS0{g5;r0C=qryx!@%$_X<{lZC6r=Jy;bOsm14hrXdqml(!i56Ni7mW z9=FbMqMlI*3%hPeF4Wz3=Dc`ZS(UP?DMGl8=o&Caufty{CWC-OE4r=%qZ6u>04e1Z z_%Ei-^(1ULD1|ZCnM-Ny?4E}VC6QJ%P;01?p6)M=v3w=6&}<~7j3O>XhQ;vc=;*LG zwj@fcppZzwg6Rw<>XJ1`0#YD-X;)WZ226!HQUp7eIny_H_2(;WF$Rej#Ul4%P#zPG zgo?~S)`m&2Q<;jr31Vsa$n6dDn3BuraPTUcIcVpBxI?xI?#;R$7#7Eu{tp-2fhjt087WmdN#axmX{bCsiEe72hAAxr2@D)!Xkj4? zPK3`mdzOa9g)ww1c=oerX(=>ZLz_|k%fCQzI!2?TRXp|U=e7TC)!MsF^vcAhgl>3JwZ@$f28=D?x3 zdF!zhg~HNc(T1cU20_W0V<&dm;bO~h5E5BDuNA+zRS5_21N{zyKqhNKU96_^P))(H zD)KQ!NCZH~uEQ9X0i+diT~XCLaj7EP93m{e#4%fxewg#Hc=_Z@0IqN*=3w@_$X^uR z;7hlNi(ITS0)WD$f$P}y>I2v2 z-f2JjwD9%1_c{mQ)$m>yT^2nJ?{$bz3(wZGoEUdeY06K3CC2Z-QZE8hOYmly7TV<+ zuw2d1^ZgqA$V%x5AEq>kNlaBJ_?{@_G4{3Xlg78;^KjQ~?lAI?7Qa1rO7Cj#t z_$Vl?Kh0rP5{BY3aP#nT#U9J=yl!iZkvgwk5>%Os>Kb_a2eg zaL$zr!!zcnNNI9^FF!(`+W%L^X~{KUbHTFFz92A5UxIFlQkH&rAP`~|zjswlWmK}+ zRPiQzWD-s_c@k-~7e)@=?_NZzBmtm!0b#eYiYn#p3{^SwN#dfSQo!i*N@WpYjs9V> zvx~X*Gtl2`s0b5>RVNEghv#_EZgI-b*L8h1{}laTwm*LoQE)3G#0kJ$|)v}32)31%0KvZkS0axWrL1g8O5by-7+m8!7vp=@PWC&Q|SLN>-e z7LarF<&)uBCMeR}4A7s>2KWnwzn$FX!))&umzyntgH9vb<;aWl7pHskc#R1OkqG+& zFi#%DE`I`y(hseofnfF_9B(HN<6soQ0)j;Zm~2QAfaQaaM38*Vob6EIY zuHZLQXMzDmSazkS(b>JAvYTwXUI6`m@9^(}Ncth)j8`_Z?Pyfdq z^xE8UPrb+g$u9b@xl^@lA)<#m=-D&(@N<-&37*3n6ZI|ZG<@BD~E_}QY6 NaPxy7a|oFx{{!_2Hc$Wn delta 3958 zcmZ`+Yit|G5xza%k(5Y1DT$;^Ih15uq8!V5M1DV&Se9ROZ8iM$s1N%pOHi zHgW{No1LAV*_oZ4-TVF(a{m%3|6N&GsQ}OEzPCmiH=Qr{kYAl|d0{ACHCROi#hw)g zt3^SrR!XiC6cdC&7jKsq+BKts;&_V;)+)}yI#Jj!C}mkeDZeV_<=vcD0ldlr&vX8Z z?lzlI64NKgx~9^TV+C?5ZB$WJ3&&G2HKeJdaW$c9xmGevyqh+TCFAPG$&;fS6;)Rw zx=zE9@n~X{y)B;d*Md3f2GG#(ke}AU%dmwK`UI$=^=zA^fq2++mUnjhp(nGoH#w0| zV6?wCIi+g;NGz;r?f#>QSlXYMh^Ny2Q(+nn567S{OjW-&kxC`0t}5;9FY=TuDDY!1 zTGy6I?<& zqeM@Jo-mirg>5Mo)pWxWQxm2;A~$ZxS|kiBgxWBQC~1jL#4@G%DFA&8nA$afS>b-E z;AGB;&Lti;1+5_XjX$+lVO^%Fcs=m@#SWybbfDW+-RquiNpH+|7 zZGl02va}1Rr8uz;myg8R5m!B|?wIRcO9OY>4A>R7+$P+wR-B9j9Vgq6wY)f#QY?=~Psmq%F|S=b9l$ z6N)-Tj{&PF*>b)$xU*P*9HKx!k?rk?@Ly>VX@i?;IqhK zve@R7*Dd;-a?7Gml>O|n)g3cj2@Z|Gzwn?reD3`i;DVqEgG3PqMWsfmRV-O~(4t6C zK_yjI0O$@72Jjz93ACv*M`mim02V9}CJfnU=1JOkLaYgpO_gOp8m0fnphd3WD9H zJ09%=J!R}zO+!FbEZ3x^qu>>BTAUU_{0zb|X`*8t;}1JiQzI#|?qp{U`25$bUp*#Z z%iLsDwLbRSweR)>A$zbunWYiai5>)itVBBcckqxGod7XpMNg;HfCCck^>xiNd=F8a z3q1?~)kTLB#*i~AO=^a1B$f>8-Cc%#>0Am}&7#V!MRAyd!yLn~gF_=p8V~DK1&)S6 z#tpZ$lM!@?ykB1+ivfLsq<5p?N?6Ta(B-4 z-?0kb#yRPYvilanQ_s7cH+tVY`16Ce+`H%c7d-ws8G5OQZEu-ghsOlQ5wo3dJ)qYk zjUCIy>GPHVd7s%@{ZR5+TW9LHINzizO5O*pYny(J1Mz$YÚ&9|ptoBp(-@n%Ki zCl$>f*_){+Ck1vAqO;z%gJdgvqs{H+>iBjq1UDej_3Zt&wpCc`=~jSD#c~|^Y4G89 z)}MpiF7NiPGo8QldG@48*g$)fNMjiWNUmg2+UsHJxghU$# z-mLJ~3Kx_5UIz1#zT93O706#ds$9;dW###m!$2&XEb@{)yT5_#VH5j%xkHDU)F0Tq z8>PDdmaa|MC%xg!%WjIv6 zhTMw?hX4%cgr~M}9ohO|<{M_opp+jb-ayfK(DydqM#g8@c>r6lj2KlGN3Hls}21p+SunDI%giR58 zpGTsO4kGgfgclL;^rD#QhCM}-DV6GJGuf~l(`kg$2(Kbwp`foJoI}9err+fu{T`Bg z5MD;WGqNa#UTk_0cp!$5Lii#4wMzh5;l5NOH!eQTLdTzyOXQk+b?nOV0}eh0B?Dge z-{bqpVb(j)@-+WKss}Frvgk6uEH*)#e?`G}VhHZgp^%xPbw2Jbr-z~ wy^wx__7eN$z$%;asi52x6t*zX)aaVEJ+Pi8#Cumje9$W$wUFjT0g*fNe<15_R{#J2 diff --git a/ultralytics/utils/__pycache__/instance.cpython-39.pyc b/ultralytics/utils/__pycache__/instance.cpython-39.pyc index 0aefb93505293909b25a79d270fd09e298b3bc33..5c8b0144c001bd46feb5564ad7778374dd2e999e 100644 GIT binary patch delta 5277 zcmaJFOKcoTwX5gj@z@@J;&0+4?PL>AJLB;uak5I{?8f=*IxMrWakA_}!A#fK2!2i}ih5P;W_F`ZJ;U_CIgg8-yJcaRUe ze~~{vb_izLABZGH75KZXWDU)>m7K+uyk=4ZS^TJHxU!OG)g`-BELz;Ll}(N7R>@Y@ zt&&Lr4+h(~v7o`Cq2FW5r5$f zYg4C;D!QqdrAnb#Ry5AFvYm|jk~$98*Gmqw&x<)qzYY>GTZx+`Ku?-fY*5cDD15V& zfMYur_laxD{8ivkXW)y5(nAGCDBM zWaePe?gk*q?!nNr$`M393LwSDaCi`a+Q}6h3qpv?yAa@sB%TwZW zX;Unuai-bJd*zPTNPZOXQXFZB?Y?DH+}y$OFmgPMUZ-Pa&WHobdvP8#bldR)1~Ywi zWWmS_*;&nC?WzO{CF9HPuc8xs%boLN>4hA50!V}WXc;eT<#IN2o};BvY5*;Bat5#j zb3TQmHXS=M84xa$x$nN!l_ZDvzTWj)c}D1i*+i8+IXUYEAdcEMc3#|ICfhFZOOM=g zjGaZgU`Jt2nZFD(kF2+Eie5x}WUWH+(uXL_1$VIb>}|m=iF08y*(`b?Rwz1UFQ6F) zM+LktaC263m@B&%G=4WxsWdyBC@oVtyYkN<6)c_fBKN(m|AqOv@n+`{8d1_Ket5fY5`MI*7u5InI+P$r|J0wW6; z)j#*;!QRTpFkw=fP$?}_k`7Rn$~5$t&@k=T4W)zCx)ow!8l_#k64a~?Iz@YEY?q`X zw3qflRqLevbO7E_dVmhXyNgPK#ts7gm;V{qT}%HrH3gy0)sw! ziXNp35bUQD^cZ{@UnaN zGFuD5=@gmqie);QZZeKJ$z2{-D^?EDDwa&hQ`Y*sFtD+vXr`hUv<>!u%1NeHPyCb! zC1BZf7&kSeN(gz>%VcYsw2F+2v>t$hs;ftkpD-ZDiN@0R-u4L}-rcYFCwozavs1^a8en)vdn8cY{)sJNdk6z+wk0tz?>5-ecvW1qai9POozb<@DM< z^n6469YUt5uD7tjHlezPdR1zARl5qmi$O*F4nS?;$-!b`Pqy|#@GYwmsS-o0BaL(VYkVRc-&0;R}d80z(tQjOd?l3qJP%3=d4 zUSF=D;rwH;Xcm*ggYnhXIfx#7PK#$Pp{&khJi&8DJZo!eqD})z)Lgx|+C*g`p)BBD zpBnB_npWpvd9ET5eBywx)=IS=2hFmrB3anlpo|lotqPL0wVK4Rwx-sFZH&O6(WubB zy`O76g0+x=W6hvjg5%U4D1PG$>zIdl0kRWxxMgvc@1kw10eKe=$VLfIawCdz`*n|y zS};r@KNL$2wBd#hO&J`wDj!FI`T=zOc1|GKK$_ILLQPF0X5N7NWa#((&`V(57F||K4e>+nPlu1+_98V`t6{%C z5=gfY^CkeVt4U+Nf}a~jP1G~2j-SF~V^b$}#7LNg-J2u%3n*U-xV{B%Q9i5PjNcZv zQt`U09iGVHe|G;fvN69@ZmU9?7?v z$m?p{6WG7@v_lDUrj_yvzl%&X1g+9|o%L$-Z130O$H?kDBGqL=PYSOHnNH(cL&2&{ z!J*ntAldWEAbX)vI%;vfntR%`;_m?xdEGrY`NQag zdTw+-n!H8*+c)NPk(xLwSet&TDKyfSI4vp20Pi5Ps|bW!#kHdDz}jsjxM4x7+vGzx z6F<~-8y(mOTcxo3F#aSZ41sy3PEGi&L9Obnws31A3md&~sR+rEG1peNkDNhL`y>O= zh!@<>sRg*)yfXD!;MH@03F3-N2Zkk##cd^0tDnu{Bh~W&z6l!e3oy7Jcgc+6K8f|X zKS<8c2$3!}QGrW_-A>>YSbtv-aGxb-$g6H*=FAlCREh7?MHuh5>D&G$K(AEMZzAT4 z&>z)=RJ(ok>US!>g!`@_5FQlH6SNvfRHO>LpTHIUI%1X(Tn3QtE)Cc$Q!CNcWADB> zn`qCExcxo%{n?%4g0Ar2eon!K@3_gii3xJIY0wR9pP`li+>(L3c;HQ zP9ne~;YHvFB`BOu*^fy~%A6u#2=WNT6{~=wA_6=E+(zIaSU~Uv1ZP7CQEWd2p_$t~ zfkE$yiH+9rKIe(2TxyJnDBBrY}{u(gv{~9n2 rbFKh@U+^+;tIcHm=ttsB3 delta 3606 zcmZ`+U2Gd!6`ngYw#UD9{(qXJPWqE|noXK)k~VF&v}xMxmaeOobS2y@6W4c~II(BU zopEAEt|HOw3V{l$?mjHiA{7uqeOkZ|Ab|u(2nk+zV4nsg-k?ZG3p^qoIOmS@=VoKg zH|O3t=iGDdJwG=uo%eIGOf(vj;4c^WFCCnD7V9I=pPm2mtzt{Mg-BGR!CficYP8bO z6OwL&cNpI7s}hZTpQJlzG~Kx<(HK3kE722AekvmS*f}$%&;@XQo+z|W3_0Qj$Lmh{iJJZ;qvl=VU+F|8D{3q)#a*Hur{apb@}a! z?O@QeK3>uW@?hI)MP-FN?($a{uICFzZziOU=QozANZ$V zKZE311mTYW;g6>4c{A_mI)1bx021LZ{r?*}h0Fa^=F5Khaj11Ph!jokFb5~@qgKDrJH^TK>xtwh{ujtB9s)H_b_yTZ=#o2kJ zntTlCIvA(xMV^R`k%9W%=uAgz!1g(QFW%49miBkWy5smkY@TeET%S=aIaOE5 z6bcMIal>%D*^FbD@FW#i$rfyO5k=+^j(W##!Q5BtW=m2b-Tb5WH&eKRtK?0Gp?ktw zSKZ7TTWko%jo@;9d6OCy_61})s?iPW`l{jN9iyo0-{7Y^lBAcv({Z_X7ZfQGO;2nYHEg()Iz!S!vN(ugX@gIY}MfG4J@GU@IX9Q>bh@6OMG!;sR1 zN@rKCe%m$yS}tS15|@bry4#Qq@i62s+^xDX@o|1Njl(FxU}ZgwserT(l+1@ zK`}rb?X+W8O^0bG?Sk0|?WR5Oj?!Lw65cV|NBiNubApc00eT7)#%Y2M!c+?s)bmCg zm7ros^z_FvJwwm#`mn@;A?N6@FrFsYYrlm7iiYA~gk2uKBP&IciOQLa4H#vfGXi>Bq9;Uk`w| zLe;18W7&+uSff6`Aob&nsA(Z#TY%5?|7kh}d4)0TRUnb>*I@{efpq>Q)DMyRN#gpb zQ!N?EFze%qfzvx8JXsKB(Q?RfCzIW9ecJ}JY&VcASQ+PyNmtwJ6iL5Vbi`_D6nlVT z;YW~cdd_0SjKki=>4S(7!Ge+YLwH26NC5~J5+i;R^ve-hljCxne`jFr3KmfGWeGK4 zTsG>(u_ZdvD49C0{R=*KYI8)$iBLYSYJ`N5qCEbJ|K-%rh{Hcj+$IzJuZgZMAu1AJ z->am`&hhBrV0)7<6JXeH>+^#xWJyOuR}eA)w_f->=Z{hL!fOF8*>cHR)9$!oYM8ip zh5WYh$NGH9=2%;rw<%#*rb*+ zbjwA2)iSKe@Busb-=+HQ+0W#iqi4?^qE0;Xq~=A_zJ4zz#b$`scz>U>5k+4-Vvffy z)E)Iv)S!O@uZ>ra`hQ+qCYfHKQeIQTekOc<;rAj(EQVIPYSt=M4>=4cip*>HO|DMB zGugbK{#+qnD${8Flac%6rnd}ck=Hv{kTL-wT(GuA~ zb!-`+(Uiqi1WN}KPOKn>riesW&q#(z?f*Nlkh|?3@c>h@Unz*`ZS@iJs@n4Uh z3{S&m+1Lpsng4D4c}jQ-K`2_+K`b0-YiwjwR2{Y6^_NQ&dgnF%>4nO`G#YkHhO-&? z@D^X6n1(yS^NG)tE0`y&%|8Z^ZLx|I#69 zH(NkqacJVagsvr=6ZwD{Dy|lATU|tY8v*Yh=}5WNHuMdnTC(tx;UzTx@x{^R%6AL6 z@9=+JteqBm#gQF%4*0?1-@Y_RPV%2zYTM~#@8Du7gu4jX4j9I@tCg5lf{Sm}i#e>V zERV2`P(%NaM|U(z`j2;p89y m{4b=##ZTA0kQYVoz!uLGe(7=|en!zk8qs{3tSYe`N&Y{7?bYM} diff --git a/ultralytics/utils/__pycache__/loss.cpython-312.pyc b/ultralytics/utils/__pycache__/loss.cpython-312.pyc index 75173d754d277862c0709e711c6dc33d9ad11550..7bc1552e0fc75af8a44ad6e87e23f0e0cd8a421c 100644 GIT binary patch literal 44134 zcmeIb3v?StdM1iDKoTGbk|4pSK#7tFi6TY4-!dgxvTTXg`$0*DK>#E{k%ZI$MUe)^ zPL6XH8apu=@2;r4OwG)RskIYRo4IRxGIvM1*_)AbvoYGx3>ZYYqf90@@t&M>HI_Cr z>%BL7zrXr{2H2$J$0VC`Zc8jwcUMTt^?%rGg&G`5?~i-F^Gh1dU(<z0*EW(pfU#=yc$oF<|O19VqK88z}EAXXoaA=Rieg zg;t}}?9&7+uWAAXL0w;ql*R1tJ9@$2>0%)Z5whqpA*)zOD?-{H6SA6xEJjFsuv$*3 zhFvYeRmWqVuVo=i5wh$ZotV!$cC8%OoWVLd&U$vW0#_>)R~NCXE?ljWpLers)woup z2-(1{*5YcN9CGo9)>HopCEMZAW(uBv?x^?Z{(T4AJ^D<6p9^2`4)`MHGXSIvTsSRj4bpdUsKB4K>26V4lIt@W%z!1>Cs(nX%w$qf*T-J7)k81EPwi7-s)E)Nw z`VWL7ku)J!VS1y{3z03Ytv>E@=u%6V>uL3M zMOxRaUcITMZSC3(8(UZ?KfRW8P=4v};bJ)dMtfBgmF{S+Xk(gRMeCCCsMX*L=|-3A z9}Go9zW&f?&>ig!W-~^ab^H5$kw{C&4PC|<8M=VNw|KoFJmB@>k|7f8@6Hr>y)2a1 z8=+d}e&w^Zt-axaVC(S3p4LDxcwsOYZ5;{shcC6QZXN26a=!kNXviOF9g2qfBdzrM zEf+>OC!QUxkc)*4@{QMQoPb{`8X5QlZfu;H(u4w#{z@QTo{@Ck(oe7Va zKBnnY77vBNAr(QN^m&EL%#9h|5uX@E%mHmoHws}z{uuf3JBn~|W7Hr$)JLi29etL( z3pnj7ntLsvdsi=|w_c;wL~T--K6>B018?KVyM$BeZDQJm?gk9+8l?swGtztXmCcPS zpp|mdM}?kuNGZil0rpR7>{?CCbX2oM)24}Nhar?t<4v@hmvQZ~{oJd3?hX%J z7>WiX?n^$7H3nAkTR1Dy*oy{e4i5Ud`h$UtVK6)xynzUfiM!b+Sk%%`q7=^qU z@eYTgz25$CPbeD67`yw!zG%iU5cCaZ^pT+fk2zC#0V#ST{xBEJSbCzO&)Dzl3ifA= zzWxinzKpTQH!$GKn1+L)o?bjpwKrnMD^Q!p!BxSZF?WZ#VILP5b>`M*@iK*p{4ShV zG!IOg(#nbIbk$0}YGtZwRl2H;uWFlKnyT8wJ2uU<-!gu%pLguN<4PVo&F??WJ5DD| z_iUwabYAIv-OHUd{q2l zaq5{<$w1#-$NA)7ly^jb-U;`Vc)Xpjr}+}qu9 zJWYdxExUF9a&I_M|$?Z}(_p;3PQ0QuMsPO%=w8RNyk07NXD$u1yMR^N}B&sR6 zFCQ_;-GKI8og};B21r4QA@7i7lM!;7$+v($rX|^pU!x?$nd~+N3^5a3fjoztj+w6XdWrnsKmJj${XWyxl%Oq;kf}1f_Tup%-U@Q5KG@m5(~I6-E?_s}M5s zJ8)jnJThrY9BF$SZ*QA!oH5?1xNAR@Fgz&KIIFH67(ei4N1|Z1xNI!M7cZJ@n;g2n zdD=03`eq%!cqd=HGohcYb5E|lc6w?(U%NVKshYJ|lO+wjr6FOMEh-sn`}*eD8u#S- zYul&Vr;RuE&os;gKk)D?_wY4)5(j3jC1VG8t2?o0)?W6;$d!=^$7`{)eGzY8l(IJ@ zEe)(T%kuB%JA0609Lb&Bdi-$P;3$RF4!TIrGP*%u1H$Uh4bex2tf(qBE*3=iGt)*bRMj$;T~KsMq_N=XrW3l(1aA6HQCwm797@hch{@ z5$z4#=z*+!VmhgmWw+3&2;zwg_MpXZp;4jM5BM&J28IS$^VzfafIAY5QniHO@))>s zj_4^ z(p~YCX~~g{Bj?l$JsP?%wXeu|u8lIFr(w2*={@8t62oH@*d<;@6J!de+)L#36eV)2 z6FZ7$5O3t?a9+{echsjHi+RW5$u%j*vdLlI(VBK_<{g`77N;EBp(z(vjJ-6uk*{1i zb#7)OziRiLrO6YWcZ<)UlW@8w){dW^T*j9-rOQ_FWvfzUt;sdJ`Lf*!3$$`;$s60g zzU^N1lF5V9+ix{~zxj4^s`~lFbF-B-SL5UH$B^Oq=l$vCZG7{#ROR->GxzP4QdmKt$np~vM(F=pv$ zH z4)i&WMvHFhmZ&KNBO<^=Z6Xej5&k$0eHL@;A<0IrG}Ngl^VmA8WXBLhvlRa)vy` zW2JQECsx}V&tG|dV*e!f_VD%LDejvu&2-EZ)?@qs%>K05W`sZ{Za_ka#?h;CTgl1S z5J{YIq!A9&5TE(427__Yr&g$h58N2X{!~|NY>?GNA$9hcA5X}K)N~)x=a{1= z8uHadrWOLF0Z3^V0ar=da{CcOj*3UZfZ~5hs;s(-hrV1VH4K6mx#tj67&msn7ojLn z0hCIWD=%GHw;G@as3%tFRuh$z%YR^5F;}A^J%r`W1hIHk^NZG~q<4farTdC>sY^;- zDhjPjIAT!f0WBMUMc1t(T}~F0nm!uUsqp|lZ}AsS}Jk=LUb(|dF&(2 zJnvBGTBMoh9niH%LC-s+a#8A9+#v)KWE*K!Eghdw{SzeKG59<-rdV;u=_-Lq3YChK zX<O^pL+l6u``hn}&}VF9yXRK$2-F8S-Q1mW2c)24dT`IP8ixqlebn^KX8*?C*+N96Olw_E@;Z6sn(`CFHCmhgDb-%o#oO zT|oi0D95(vFJSy?#zYSDSl|kC&ITVB^kuYxjGZpAv8jk%YJ(9Pnm|QihoGg>ti)(} zP74ra_}6hK@+WX0!)+B~mv~$KWaU)F^+mjGb=ta~x2~V|rL3D~_TTE}w;fC#I+;B6 z0)NQMZ}XZO^$*}!wV)~CXkA9S^+8p zUC%qDdM~LCYNOpa#>Z-M0TTsUwN@`kGK`_ol7&M}O;b+#0{f)VJ39zM^U7mmD6uL) z6hdS{H=~;Rgb(;bqXP5xk8iZ3ba-=e851HCI}}3e9<~N4we^yJcwE|-G! zHA%kguX-M$Gzwxz-wMf>{f+70Es&IPq*Ni@87cG>jRvwkHD<$eYqbDv6AuGWcd)-1 zvpn4b3eV10aY33S3NTzC$VEfqd=cggMG-tS$mWDv^2YQ`#Qv?2jr-E1t?Q{9hKwN^ zzHmNMG&nRM&^Uq-Hs6ymbOob6Ourt)Ll6kYKy(Dd{7#&8cq%bTAG{pJjJZ5N$nIny zT6-|L&hnbk4Gv};b7nF#`YwMkqwgKLkTDVR5eOonAAey51W%0sQ~Dy~hmCWY(k;+* zn9OE9LB@m(4E6*$4?Tc+t8gDc2&T{cCW|8K>-W+N3fa^4X0!uyUSeB}2g49hrR`ZV43Dd0A@y4@Po=s@)!}Hvg=O%Plo=<4+ zl{wR8%lWe9)64FbZN`j%WwxxG{Zv$5?H%uhw`jJs>}un9BfMs%x6*ZWWPAiytxTJG zz39GhWwLENPFH{8S~=~y75)D3?cwB!)5#b8cU=Kox8191m~434bKNuLyS{3s{Cl+@ z)ZVSyo37f=SM9&EELC+Ffr{^yvuU(R$JPT{gpRY-s-+veh^w)fuOY+@Ve3= zOVv^)RU!AJJBnXH?gg~x(SEq4k3sLC?=h{3V$M52h7r?-gf$$csb53D@GE#i82-kM zXpOS`%RbByIcSx1oJBt4Cg@RSH9aaS^+2ivXviZmmbm$GZ-Mw_i6x{=uGFhM$s1;; z8~~e$>F&{-gFJ~_V#b(-N;6&%D^MfqNa&vVt|9=FZ!MTAk(I=Z5fdfhh|us&LxUmAod`0-h>4B0YchJ81vCocV zP|^=mDjC;EEVI$-e9;3PTDhN^*{g-aObhmVOV)p}=rlc%U_`s=gxPC)B(KcZiw&y1Z;IolF@v#~CFZ^exj?=;V7G5fLd_R71Z9f{rd zoYfOse&}2_289*5xVCJ}JX>CK^||rq-aI&_&6JhTHZ)CDPB+|KJXOOttRLGwp`BPW z(T+dkuRr^sKvS{&QK6=8*`$AJ$<)PZ$5dph>H0u=*(QG3rkUE*vc0L=eTm&;`mw`f z7srl{S*{$MwU;Ie1)-!=;p7Pq6PgY1J~rYjPK`-I zbD2QHjF=?B(~>}1c)~$VC?Qvc*;FN;>Kv#}jqN8jJLxyBQwj_k3}aSTEEU3VN009b z`k^%_#$X~E@sGE^;TV@~u~w=m(yhHW&gljbB^xM5`d6G{g( zJ?;p80eO>FAhflU@PVBd?v7G>GNIz51#!PiR}D;MV9k?cw$wAfK+m3~0NSpM79%l# zewUmnqUlC&fW6LWO@8}O-26jC6FCou8i<{y>}D$#T@8uPwb_*(Jg zMb=8R(}!=GQm&0@*G}HG^OiQ{+LdDMRd#?sK3x2}sDGHl~dm6a*@j3Kui%(Y#^wgRoohwar#@8H)1|mcnhS`C zT2&P8ph6R@ii9@rO@~YN%As)5yfSC{q+>^ECY(m|jFi0^f`ga0w z@($<=CWIB6MPE(cZJf$ICm}6gE(Q~HfZDk!p!YVrumJ<|n>s5sT?7RGw=_XFN% zeZp`y3fHaAJ{z~;-Iq!^&;G`2eao|Fl25&yByk!mh!yr|0>$58=;OO~>1~mQO|8}j zO0u<@l}K^i9<%q+$RqDS4RpL)D%ID%m3g;u3M^03GfR-mRdYjt)FQW`xC3F(a`O+= zvbOwd+4|i_z*XKM)%}=bp(kTjh83>Qdl;v9UzPeUy$kZpo{Sa8?6Hz}ME(9O)wj}k zS-d>%jF(0?O0mYu)yhuFq2ilY;O#d{Pi22&X2hWO)h$wf70FE{2853sa*c(e(?}HKJb8 zIxgxphBcnD(E~C9&4v;plY)i}gw!&QB6NdhOlb%s3-E{B&^V%@3;iJgBUnh29aOY% ze}w#Vzef%OnyG^okdar&N16@Rwy-gRFpL<(6@UyhEb)c{mos`KlQEIP5sZK-n2kjC zVF2<09zrba5=D88>xJuCEP&NF;TJ|Bg`r4jPyo!af??=FKX4Fmb{UUCP^P|tuwYc+ zRiXkKa-A-i0lW-g?L?*!eBfxPXDB=r5wd%V;+`Z2l~}{wCg(IcE8%4Hz`nUZq0?{D zDF|RupTAe&`~ERq{x&(kL(aFzc?%BUneb)q&zKKI>!}n*LPo;%f$Xr75xBs{?w}6< zyY6yGB#;SOqj(|^#DrYVP;5rP`jJtNl)sEk0DismOV6 zPn0`{Fx+>^@sjf*IbG!V;6y6nD8^VyuQ1-i(gcj!u%JK<=n?MxCyHn*6bAr3_VPDg zy7JOQ+vG7C8r`+GB@Fj!mrV}68@yY)Zp=95`*q9A(uDEelBTyeU*F8X7)-6|hMVj> zJJt1G&yAkx;7t3?-uDM@4&FMmN-gP2FS)=k zxiD*Uym9u**@<@ERyPrSYxvr5(z<-g@lda8EaWZjMA6t8aK$PYrz<>sg-6h1Qx$8{ z72ElW?Wu~LiK5wxhWT&aFR4j`|6J0LFwQ#NY3DNDi8%w_=}8pcD{)@6j$0>=Pqtk< z_0|j5UYKg1+Lf$poiTi`=!2p=>+hBvP98gzKIY|#(B|Whb@3%#csr%w>-f5L(?@0;sk*H*$8W9w;B0z(2fw}J z4wpQ7EVcc3^5og%xgdYCJ6YS44E3c-&nJL_Ra|WyZ=GEJ_KNE(rjEUL=Ej-S;>|N# zu>R%a&Tjr#&sb}!v=`wPEqg0|Ek1Q(`fzH|M&44JC>Yy3TUv8<)%dDO?bN!lRjJa| z>C$z4={kfc-Iy?g)K^&}pm&q;>GP?IU5TRm8@9YZaC2bRTA8-i^Va&5)t$CB@z$o4 zb;YdBm9}9jXHm-5FnN%-wWX~adF#fRg74Wsu&1og%v!4^4)Rvdtkuc>mp!zZH{ye> zO=uI_Csxrg_))0_Ty$I7-Nw7yQtq|LN$o~Z((yUVDz7$;H%)AvvZb3g@lBg%oT;Xr zsnVy?rF;0&J*m>Y$^D&tX=lQaIDCb&ZY_Ib>y@pqZ>Lnx@zxcIgR_g4y!EwfUz;td znK(1GYNqavEqSsx*>@53#$C)?YZH6M99Q;@9h+zvKZyhjY>C6Kna9>$-88=G>bCK1 zyk*gAW^m)5`09NS;xAvh{MzWmv%G!Tl;ORi8%6KflJ@mU%liNJsaLCM>Cv)*+->W+ zjw%aQeSSh(B4+P4;764o#pD$eP=ALYH-x1k!mQh`(KF<{iN6Q|{luH!^w3ah?9hJN zWH*+5y4Y;o`LJ}Yaf2||M1lSIlMiW(@$mPxbIDNIwVPSSe9){uzM1E`X`bumd9GXL zxn7`tz1tcneAOW7DV+saS*)Hv>pa(O^IR{U=em8K>+`*5$voj5^IR_t7GfQ>>N|9o z2g+VGbQUpYg%w~%d8YKzrlY|gA}un0e|E}``f%xhv=#>5gbu2cCSvl6MyyiQstugf zCipOEBM-3{z2c~H(uTB=x!Ds~7R^|HbDppkALedW(F93UFI!Ljg^M?swIA1lkA!+IXJ)1eu#k7N%Srx{q45kA} z7dK&?E@L24wt(~F1I5j8OS}N1S+sEBP=nkU0#c+vK#F2TYNJ$)yp1&CR7*{`15LeH zN-6t`aiLgZG6GTr5}R02)fVL<9(ndr5sy|xKqSePy`=#HBJ)4k=+nx^sigSi9T;ny z5mwBR8p0vT6vSXvL;gQU3|3%CUoH?jKSe=NKm>Ggmvrt&> z50I1XDy3qLx$y2)czg6LvFcb|tmd6uoW#gtmk=3Upq$BMwcIaQXZwgypLa+l7O#)C zOSfY6s`B1RGDIpJ!V0@}=kb*}cXYS(WZxdim;H^^2P~9d;fX-SyOok87UW929_6wx z8(#9oYGU=VT9ivEFeD@JGfm$!(!K2OJLrSykkF*THRO{q9Pp)BZB#+Uq&LU3)?>qr zB85@R5Gar)qnw7O?^%|G9H;L<&M7;|Lz=R={Gu_KACrfU$L2wam#MwQp~uCrP>shD zaslmHK+Y)8uA*TW%!sa#Qf!1xgLq|BMuK6pN0fT8%3sz6Eb+>(RSs*1C4(BS&)Tv$ z8Czi_EIi7=$>`+t^9P<6R8L_dPXNRI2+1nItiyEr3Y-q^PsvLt826va`7?4pA}2`> zPtM111c+o zaWG7?{CUA!3Xk=MBJLo^A%q{~(y-q@#JPuigM&FS$?KAR?&V?>%SGiHE2wZ$SQx)t z%7$oaVL6$g5f(KIp|>-eG@AxjA(EC0SWk~!Z}*4&;Y}?gzJdPbErlmwnxT8dJrYJX z5~3nOX1OA&@HXs!dk6Ji^5J$ z3zlTJig89-#W!wcKxl8U|3VAsRgeKR76W zjX$7|d6FE09l0NpL+zNmPEMMfS#qwDvmnyQ{W*eUtVcli2n__ErZ1Q&I0n8L^JXDP zE<;f@kfVk_Qs>XzC5Hr9rU*QZAa@~5jJHe;Ksv!ff>$g8HY^mVVE;EgL+B;<7v!)? zdk;R(B7qC`mvr_2BxemdKY}A#dtpVF0lFE6?E=Cn8OBJ{*nk~-#ouA>KHd2%a(+w> zE0Mn@AHkX2Psm}-<$dz~4LSc8Ie$yeYveHaG8eg&m(~LK1k}ucOzt59Gcy572vfx` zfFy*P(DeAxyn!+XegdXR1~wuuVxTk+rrF#&ZG7k1nTCJ&{Pp#>oVPCCb`jucOpH#f zpKV%22oG;rnkXD=AG7NemEd1hV=d5#4+PR!}E}zm)wN3d_&edt>R^GWa<=mbq zR6wC$E%f=oWGisay6Ps*^R5+XS1a#ooz_jar(7En`|nlMTn&wfCab2No?d$6K)QK5 z-@JXM>sCXmdC#rjoo8-`lkOAAQ>RlEor!(371hLEnCyAG?|R>K`TI3DYf{U$&x|Hd zo%{HOKK|7Cv2d!QAK{j)czgTx?b8(iSC?!hgBAP7Mu_?_Fg`HZJ{27sNL8#$S8U=d zHX%gC*2KOC6)N!OuD#!1b$iuU27m6>0{`482UeI{ z8gsn1Z^AgKy;gAbY5cUG!3X1g%%Y_HVp7!+mWNT6F`Z8YL>i~CM-`u&#x#_8HZZjA zY?aD>`cbHfa!#^F3_Rj|{3~ds+%LoNn8g(X!V4(uUqL-m+P^Y$TD0!0jN(X%Vj@bk znL;^die$u$C+V$l2-D0ZLVIN;##GbMB`VrkexT!|r5!_DFnzTGeH9tGnyi~POb1it z+Y$x$>?NWxEYHlzg4}SyF5F}eupL9p@ z%TV~jcwrp;uz(hbGI-}e3UvajilifuTv5tGUdnh=?y`0@8ZJ;RWR(u^tctUYNHw%3 zW+m=Xtf&Wu-PC3Pva2^4u3M5dvl+DuX&`Sf_y(NQ1a!)do`k+F4k-`d<^Kn9ZGj_R z8Y?}E`y!{QB3>DH#jDhjP^ll_27vUH5i%67jumh#fb>)g>pxYwW)|S0W~p~N--M@X zVl@kz6iQUTR{*sHS%!?ZyYoA2nb|cbxm|223j1Wji;5achsa8T*<&wz8frnF7Qp(hIT0SB z&QQ^#<(A1jN{rm!!T?a7CU+>OH~buBFh}8HOsqNaAEi||$RxEIVRa7EIvG?ox2mrZ zO_(;jn^@(`{jy>$bT@T{!~M`gPxivXD>O=qTd2QKzFuU$7v;ExnQujt{34pr6Swzy zbRqVM1*t(8WEa#dL0=EBN*5Zy$G!v&F>)aY*B5p7!n70KY#=xg=0>1FF-2Y3r1w%? z6@WfLPZIR0{MbPZ=nnVyhlleNG>S^D74FZWg}Ilwn?lWQe0}#*?k1{lZ5v3Z-LTr@ zUbTbWlP}0Jh%L@%SZ|962>%|s1pz81lo&Qs`ssoR4qI;-5b#9?4|dYUf*?i@uu}!h zPCpvD0PDmO#5@$S;DHtkkA>AVp8GJB(xP(Q4dF-+JlVOGJOuCfX9+ zW8Jg%5|~OgX&T$6*MFy$U%QuIvNvgQgPZ55PFAl;m8_lKJ>&SGbb23OvMpWG&X=?U zi7MHXFybMH=iT-1ZM(5;rs7t`or>h)lgUk|lD_V=Z-Dm=++8`ymkcJ1KdEW_)={wA zR^6(;a~!n1#W$xHj;c z;qiU<>z7RKf4k#)$Ml+1Rs)uo`XR?39Oihe-I= z2*2}i@@QxBOn^Tc1Oii4`csY0Q95n_fZ=cgt8rYbddq&zo^pAnI%gVgypV2wif?`@ z)x3*$?cz&zAz*{1wom(@5oWQ+ddTd;n*+RkNz$@}>D6cs8}JcT$!!GA3dX;dz?U%z zLvoH902Wt6E^_q!)s2GvJ>4PaGyFx0@rYnx;Smfhw0>H6SZmzypj!vCU=MT^#;3Kj zj*6?b}2VRx6fWbE1qoG`Mu^3n#GfjqaXEu*e{-R^`yH7`L03X^nO9{8?{$z zKQ&Sm56N=25cPxAPic*f_r(N^bb^Xf;(Q}^CHBC~PA#J8!UqNHv`{)NVy8fgs)%p+ zz{XCCrDyHzv_u^4JaDknQt56PJ1y6gRy=SrZ-u7NP8s}JC7lX`8Y&<^{^SGW6m~Ex zwE5=G)EIwC8@bHZgoQEC3JA5y6kghN2)6eQ$i^n5=qU{vG_t|bxA1ZDm}dZ&$Xw6k znPP{VMj!ph}lDK$bCJvxEq3YD4X{&_xyu&Vy8nrk(aN3JbOSFPcz)-V$TuIjX_nRhj(T%Z-c zZX;HA%XkY}qM6*sm#zeV8*JjQUHRICkGH!MhChF5-@kUI9Syvr;ai@RqdDzpg)P@< z!}~=yi|#tM{ps!?gh{1@xj-*v zd57E?*ydU7NozRE@*cwJoYkISc4E~ejbZYRCs^$%TO9&&INxednJ>xK7PjUkg40-$ zjtrYxRqogV#vba@SI@oo4ejrleoZ!&j zQemyCJ|OTgC5EKh8!Xr+`r4(`vcFQP_3h4HlYG)WAlTIbr_9DH6xWqQC6pP1)qx7d zeZg9z^1i0;X(`t1FK}iT7@@^L@|`hb-aF^7o>g-B_GaUdd}{2;1sI8HF+3~9mHo|& zLAiQXErtWxCnaBA3}tdDY5LS+=#ZYv{^rG?WJRjQ@OVz6icFKfbj-&3di_FeT)}BH zVFhpH=<-|y9v~=8Kr%gvP8Ik3jYmqen<{|zc0v!W2CoC5wz`yXvc)BhYHPB;ywWl zE@Sk=?0PWc%0fZ%TM3-Ew>RZjc)Hm)|=+XP+{1rJrCP$6Z z#`th-mQan?c8(tVE;(LuUL>cB95%z~Bi}N-aij)5HpEbO6il|vd@t(8xA) z?DH_#8e67@fFE7{Zu#BD9oN?1diqYo?Pst(AV_Q@|1t<`<#i%2$v5SgYEPB7q{}z+ z<(pIGTN4GpK*E{^Ylf9;foIFBCRXz0%hKg=S5EDk)`B91$!A!Zq9whjr;Jm<8`d;7 zuV}*d8Z*9B(^I#W-m%GDRtym9i}bjS4m_d9NO+}eV* z#q0cR(Q+4-D}TJspDGWa9RGz7-M;)nUz4V)USuH>9y%3x@BEGPDc6>?>nYy#6tj2q z4DWg-<$5-;AIp}MG=N$2Fn>n<#1hF|R(Sa<0~+xSJ>h~HK>RWQAIX5h}bjav9P8m*^UGz5uB_5Vx=h&S=flO=Pqq z5h@|bZb48tA;4&b0w7ZKaM&(c6X`){wy&kFg0|#9m4Yc^OF?2uf*aE&xWw|;3dj7s zrRudpy1MMOg0XYoXq&t^>7U%dJ6B8{<(;c}%j(w(9&;Dd92NoWu^>U|kini=WEvmT zpk`29Qf`17(yOr}i`6Q3(o(}n0l(00gj9SuL?{N(QvM;QQlOwBstu7XSSqAf8`cSN z&vTC1SZ`za2X<97U&Ktzx}lXpnpn)`jk%FIc{9SQ@;CgGYim;_*8f>?l| zGTHp*qPz%kiaclm0*Unuz=2texeP;RwXegLodpW%YjT#DLs)_uFw>&7L6@Qqy62{a z(99$k_LGy+E61>OiAqSM4kA;jRGy$_l|42$OAwbDj8i7$QQeG_RHkx{h?Z4CgAI?( z*J+%W@RJpUG8KTa4#=Fcf?L4W8p*^*S<4pak9l#>uqAs*w5ce^?sNBm#sjJV5lC9x z$3St&1{S6VB88If+l7TuSOSGjIuR@o>h1>bmh9)l*z#}?kF3%)q;(iXJg|U_-ddYzA!UblQLD&qQ zy2+dZ6_SrUQL=L3&q*?ev?K?UBkf>&5h$oc;sqFZ@(LMfb_*RUw%*EpRaU)J$yvK3 z=2Gyo^s>Z{dVKOO?ko*Ga=N0l12dG1!1`!E}nKxrtiw8S+un?qD!f1=R8;kYOw7H*AFzP?Je^1W0 z$+<~RKRHAc;{F$Mj>E~AdVC~qGA8Wf0YN9Qg^HkPGMW)*fe&<@A;1Csq4Pm*G2K~2 z4k1?_rvP+h$<2g12+Q$)LP;=mf9VK!vZqsJ`w|x1uUq`qbJw1mwPCx-$<1%?yuNd~jbE~UdgT3=Z@zr% zBENBe()w)j=-Kp9KY!H!NUy6agh8uER%~NGHu^)yVpy8So|*fn9oIYf>b2NbYU|#l zwQj0o~+;av31h}qo#BT_I6&}nr_&{H*A_IO*L#wH|*sb z_TJ)B4bLVm^%xb`!SdOcsZIbF7cr?R`#m@M0o zDtjKAMOV5foqXkTZ1dT&?!EpS{WEL$mL0bmK5p5Km6P4$Uz>7#T)E;;ok9*_ORbwf z4U=aBsr`d^#8sPit-!*;sZG--W=`FTrd$US`?29Cn;`Gv?G2NMKejKMbvM3kyKbB6 zoxVhITfSd>yEygKp_KdZnC-sZHL)sbU!Jrq7kEj9Pr^&0u^dPBOPz#NLwCrziN8oQ z>Z(LSvKTjiS`Hr0h6j6yg!EIL$p~|Gj57q^Ka{jP?#% ziu)NTSBPP+R5c^tlA20e$HxpzJvEH(QLgcl^eSS4L-AvRT~hZ*5@+-pttci>e+&Lhp`^L``nmHIEF*-dB>!KMBd>PO6jF-bP&7CY ziBN$t8KXd5>(t6xUO{|_q{boyV&p|O>awH>G^961Lm z12k?H?Y|j}ZXp=m%&A)9&_(l*ra_hLFUI`RzG~^z2UryoEWQN{sBR|4_cCKpVaya< z+VuRcT~yQ5K_co+@|a$jbj@daWk*`f&^JL$c?UsA+5Nbcmd3K%24TdcHpVlu?YuOq zWBx3aLmu&G6GnW1sq&9^#MX#B$jAg^O1&(rCApL-v3znEQ_?Oi&CGN>sIqK>8Y1=}5an95)G#mypp@h$Q_fFlB0O2!k6&55rj6P`{{y*r8EKgX&ptEQm%iW$=i z%xc;_*P0pK9m7ZF56#Jwok?Hdt}7_)zS1;R{$A~k+8HhH*?eo=_qW~NcDL$Ky6Pxj zbyPN_nW}mbmez_{CH5PTfOOD{kaT;v21zqCQo5t~{r`7bkqSeSykBCcl`eo_UwWq% zz-UF-O~nvkd#w=7NDb7=?xvzu?ru%JVBW!Yaw!T}Byb3;+Q6ZC*CqiWb9QzK0Eja{ zf)#UABuq{Jgv5@Zwu{RAG+lTFj%WskH72Ia|1vHrH%<}sbk<~;?#^1!AJF4Wi)U*4 zN;>@yGCFQLO)e|w|v9BhGn!V)6~A{eKUu#QQy(&1AKG)tqsYQ&tl`&yA4P4 zc2u!pN0kSlRAavvL7tHOkPZ^*Or8`2p8n@$J_2f$I~XZ@5_ldrDR_}8ogTB^Xjbqd zX=f*S)n+y}nHM>4O6UYjvf(A4%KjDRUpOxkRx7cu6A6pF<8!bY3%&wo<$MtMlX5eS zc#GP+SHNkGMb7%U_ZR$D3-KZgKL;y0XP43;rv#$pclQLWx)xsQ*D zN#Z`L)oP?zEJ=Rsgpo}hyhWN2!9;>t*c%PKkia!vI=8%~zLK$dr00s|=hzg98bq&r zF7Cke2O#)>>HJyn+Z@90DC96zV@w2SR5^62J7gfNQu7frEUTj?^m&?qUEG2SW8eEI=ZI_FNiPeg1Oo2C8vrwM6HL)ty?|?-qitn&Hn@7n<%u$pM*7C!4W4;|riKWT*`xvax z*{=ip;ZR##C|5Z=f!w`5_DJbxf7RH7IrT_sCsLzkx$+4ZlS;m$S{|Ow=1217=3$}t zjhRHQA)y0#hg5qMTthRnpV%S=WcCxC(KhKkP%hP$n3LI0bbigLu%B4LeTtC6A{|1v z1#mm76u&~(hT$w=5%FCLlO%_SBN-f_tD<$pUPzLxbwnt#Su27ITSim_{UTNo@%Wds zh#2nbk}VVf^*ZYnU)oCYWIG zVt&ml!8xkbP9GwU)WK1X*XH;Y;9?H2A}~5>2iC}DgM=u~*=J=bY!U)$w|uwp?$Vvt zp1yVbPTTEMAPkXNLT24>Deqi5>EoSE*udnUYy*~mfhGytv`l+|iT|f6tFYm;P>+kq(Ic5Sk2VmB) z{dVv72W}7iQJCLwd`y?vHCs_P!A%~%HaxXv+HqqeU%!^GSocq8@9@@^R7D3ie5v=O z>s$Hy)>M64x_%R1zbRF}C26U-Z>gEs4dZ_elcf{;QkKRk8*f>U-`RyWh_Nn8TN`<6 z*1Yx zey0)sd5~3zXCSMef4f*=9}4yDVhymVtz{OFAlW2hl-kTvK;Gq9Z6*o1Qb8_MSAc%* z?PAryf5mRlI?P>Zr4da`PrGemf&yL2Bv(A&AA7Z%ByE;j<2T zfe6`sWh}&sk&AI2&Iq%zC);nFo4Y(yNNh)KmJqU<&F^{zeTXTtg07oIe2en-@a6s! zzBTv1lk>aeFs+5@6-(%pa9)n;C+z4|lAkFW00K1Je$)-_`{cVr&WCX3SjJ=8(Wo;E z=aR-DE{ypuDFiV*1>%BBV=eiG?BWXxuWyW88Tl>GDAneD0S{dcw`t4<`(zHry> zO2w>M2wq(Cl%VbGra~a+B)tvIV`8%Wk==x@5ECwo7fR z_XMM$pUj!8gjywFz6V$oS!5Nv>L_T-nDCPl)8vHFXQ|ByBT4{`W<=vLbg)S|aKzt6 z)B;~z&vxj_Sg=xQ%>eBDWX$Bl9|l9LlKduuMkp4EK2GHc`nWRx@{HS`QOOZl_=@J9 z3ml9Y-CfsKHm_ERt@oxc-mToMzC{sDcIff45_p8FXD{rqG43?T$~MKfFGnyBW> z8WR?9RG1Cq-f1HlwT3~5%83HLbTQWaRMuRLjmIYUO&w2Fu1#2H?X?q!dHbTt=BbO< zTX}oix7WYF`6jIxY3FxzBoATBzZdvJ-sJWdQ)_)myDw?+vF4yc4q%N>jU4bi6&5-F zneu8wUSHLGs?!;_JhT{%U1Ia1`<^9iP?)g@w$ug(x!*$IOqq1rqTo?HMUgJ2+f_>I z`$N$X$QT1bkRSaKW_=^$oO?ffA;MAHn#8h9VAuFx(X(_JkXFLlE*s2R_fIu&9+gM6+84EBb_68P&-(R++Ai&<8gf~M`u_lmR+k$9 delta 9076 zcmb_idsrLSm7l9OAOQmL7M6G;KX5QMFWcZZeqv)BW8*lAtQoL)NO&Y{Ai{FKO}99Y z26DFxZeBP|KjS#v%Ij=Pzx~?cw%a;sn`j~@(22Y8AMJM2eU#X=NxR*4&$)wuB;NGv zemkG}>CU<5KIYE3=lt#&|I3W(ahW9k$YhG=;5xhG#kRIRBZ-+r94R?oN;qE2>$;M= z&8=ou)_0|Jr?#dNPQ^8Fyx~!fj}ufKrF3(4DzyGHZy`b7Wx1p0Fj*>K{7ug^)ppz5)0I_wfXZ9ccF%iiVl`|T(C z?X4$W-gdhXu)Dg-Xo)(XZi%a>PpX@<)~r71>lRj@In}p{$8Dv zmjZncq%U;07kVpmIz3)b!0E&~|FufZG$HXcQTGN(r$5sb=+aSqiR$zR58kq*O5aR-D`ExJ;x&L-8U`X8B6vDYLb@op4EO~+qn6ehD%#7ZvFE1 zSv6-ef2igX*Usv=WXn*(hk90~m-P>XaA4W!54ZvzcefBY>Ep%KC^9*n?k<<#?-shc zoX*QcOv9<*@@x`uR$^idlCZ)C@X@2(Pc^&eG%9V*tbx;|+)?SYmb+>w&0$H*h_!G- zVg8~|Srh21s`d28hO1WYGHDc(p+-yv@Z%e|pHoIlp>M=xWEG$o2ma_BcgtWpua^v& zZyU13jBB%Tr%6IK)QC9<@$^)Jr5x48LWEp|VuTU^N1Uwb5d~gcj*^uKD-g=)*Aup~ zH5XUYKP9A-Vw!HeL^9~NjhXw(QKK4Q=_1sk5W$WW3yTn#(-2q$9b1G%ZT|csKuKAI zTKb4JYiA|2`8+qu30y1TxmFdg8RFUqPachHRr9JxxmJy!<<*aJS3nQ1R^1TSM_TpG zoI@)cwt$7A3AxqufZ9TDnYIuIwIwzmF@vheUaMXd+MV65K3U)E7o2=smqR03u(knV zCjxt`8YtQA3rh`m1Lj!w3l>^SUr#I|u+oVMDl$O-kZfO9hdrRf_$B**H{piJvO>FN=fSvYj!A$lvV#KA_~k0UNw0lv+NTOS+wUj#VE%^K*+^y1L1 zWbFU9JkRxw3w$_-691>EuQ!x$%bhX5VXl(c~kMW5=cB|^tT zE^-7Y`Kn6-JKUNJN^losubel<6{)(Ij%TH%r{ebbOVlWrZ-Jek18b8DeGc3~O2 zx!Xm_vaDiWHDKVlAmPFW=mzV z(ghg@>L|B=LKos6b~FICJg5t5dDYXa(@O_nD)WPYJg8aJ0V!(ggF0RpAoSuuE_K<` zXmq88ZfcFwLp#{Qdio`sSc$twL`T>D#dnJTHKEL0$Bfva6>*EO%_QRb$!ejmM=@0c zY?0&ky)i%aHG7s0UxxpC^bLD@DBL6&D<>UOJEYn@H`*hIA9&3t)gF(Wa7#6A$><&; zw{q++)Lf_;KN2=yJRZ%dcvbj@=c;GQaAWgzQ?#;C%4xjWd}O-$0jc?c$Z;X!>5z_h zM4LM!UQucmrELFD(ru$@*gSk_^w`L;5!-l^lw2$sizf=hmdh)o5DadXjI~2#CTZDt z`AfZ*`Y-l}^TH`#9+V2!P8y{AP1AWhq`V!`yq!|g&WK^BV#_L7CklaH(c9?If6}Da z;Yuvp^*yb*6LkOzxCd}8&=y%Zs{Rdxquk#bblP3>#@or!|M-07HhAevT`!5bTy(gt zT3!RFS3*w1}+BaM1g9RqFrXwAt31B-hJsK{w zSfMGpPId)Va5qnd(CAQszExmV@me}tkfGJ_`d19pSZITOv1(IcE{UTL0;*`Ru!2b| zLaJBdW6TM|tDhZ)y1A_tlyt;cU>K1dIj?0{hMTuC_ww>KSaA)IUDA>e{0#W0<2U#~adhi7^e zhrtyogK)>=4GU*l2zEZdGQO_Y>w);EJb>2Y4OFa=)fkDj%G^t^6;4mqboX{S9SQ6K zWPQLTwhIBj9Djnox_mc~z&|ZtLH_F6iWNExb!%1zN$7oy*6*VW+yQgh1HXF z10uc-+2jw19$s)N3`rcP=IVm%`=Lhs3PKCQ5rkHRqqMQw4#wiEF45zB{2SEt)V?hG z^=i>b3Rg-7`%uF0u~{Achv#drd26c4_K9QDtLmjy_0d(Er}jy!nxj9Fv*B%@=ZChVEYxT{rF?Ix0;Vu%cF99uP!5zd=4&_9mfPdBYMl}>a{ zuD@Z5wD6HO|4dmusH=$`)Q#4mZNujC4a0}V@8dgh+bw8Dd7tJk*xA|i=W4W(RW%9yIeiM$o-wNe*P{- zl9=y89^}WZy@Vt+)!R3}iSkN-B_wGq?WVytlaevIVr0eGiILUQDTPu>;gIGD{Y-{+ zbYNs)I-^X=C<7K{6f@{^^;xR766me6gV*9W$y&oO5pM>$j*Sp-Qm_5@c0!5+=!)4D z)X!5SZW}jDjsrg&v;}j5HjHW1eAa3eXM!q$Dd-2oa>1GuTo$yo zvsf6k^O^zrL1$)>j@R&-SoG;E1rC+TFP!5C^Ss&6D1}B2S~_@LY#u=yDl&#=XG7%$ zvpzk6U{X8BC%(wWV+1Teuy6j6xL|&)FW$f#KBwaKh;1tDtz$nef7oJ-DGU|_^J(!G zTU>lFE(m|^nx|LW(~<2B79ua~Q}wZC@U%_1d^LGN?L8)=o0k;^3$CE2hikwDyjks5 zAz>aSPw46123`w=^%}aX;WRl&B~2QbP<-@2*8e=gi#ng0Efl(2e5PQRf`J`Ba`k@wdH?&>!u(sVZMX zuRc>n-`Smq#SeFH&;-D8Y3-i!kjRdHxVny2pY+Y@F2Sq#;O}6^7?4Et#^MwLTA7Hx zSiFw#U4%dHhsE!Ko@_h_5!ll$Y!8VdoEzf~0jJoG#YGP-ejhu`MYxxHF2ag$BNPD0 z2@shDvB%ftf}iJ6{8JQb&zuxQLB#FN2nH`FL%3=064=&-vtSu;5%l!pKOvx}7k>cY zC{iNo4^eOf0lYjXzD6I}n-M}E&%8GVWq+shLOWilp~?H(ND4i(zih|3>w7;owq;>{`+Z+6t*PDpub{rUAzZoF+Pc;W1Y zvx~8(Js`CNX3Ew_Ha12Mv|tE2dXErv>VPx!^R(=-+Ud07VKS4NJyt$aJ6bnVcQdtM z#nF+8c*(YYc;`$;&S>!I;H~_UODis}m=MBE;jNcDCXd`8-+W+t!+vSQ z{>XuYkwf=KT8>0F9F@wBMvgrgdB`2j=P&4Q=a)>xeJ$~FVpxop)=s*l(t0U>(^O_i z%HKVvpRqb7@+9lZiI#BFUv3JMtE!3nC2PgB zwHlSoQS18Q?K9TAm$qNpdvR~Lc(Qn^_(pN0yeZQ9;B>1XwF);2+azlnC|NSc%(P)F zFl}2a+17?zCOdAJzuA4`PyNO^6sxDxj4gTU={zlz-3!4N%n-{MHt*`-Y(ENF zX*edSI|xTDNoEqHuAwv&PhuJ10S!kVc`;4LlXenR({QOJnTNbVHuoa=s_qMLzz^u@ zyBcdzPzPxR%`4hitQyn@7EfmSpoaB_AuUPw%0k2pYQaC>w>6zk-DiL!gu^h9xes=` zk~)UemEGqR?4E8{yHKXMVD!k1^XV!mI_AmmDJ&CN&3n3mQZOGn4I;&_qVH1jni?z> zqb_6xu>9``^$5S@)}s5uT`ZmffgFeYlWle8miz_~DkML4K>7tp=)WB9 z_dcU-se#}~MJ7iO*(BI>Oc*IhTv9s9_s_8v`)}O5L!gy>dN;(bAp1r|^ zB>A)LnWi#)c^J%)EYj=k&p-nEdRdn0kV5}nKUuBQ0R0=G8s8ra0^326T|rZf05R4F zQyt6+W(0G9`mVJt^OGksgPCv#x=aecSRS+nvxA99*J#?Faa3>36?Y-S(M4rujs)mcOS(%yg^>NFfQS1)Pah&+*YpUV3%c`bZ}I=Tpm*g_MjCgc484+aqF+K9 zLkejWanHzcF*EjyJ*X@KI8GHs7cabad*zPYNjG=p;+!`R6nWC(9cfl4_m?3U9@#hGCK=>U3kgb>>X5MD?4Z0g5&<2O)m9N`oE?|zQkd+(&cuTc9a0tXI3VSQ>Od^#X}=ts^# zoorx10dG&ktv?UxUTG{Ipkp3LUG$moEl8n$Rp??iAoa_ldjk*0=-)L{?<3^@kL>N= z@Z?5JLmZck%ZK*Oe3I{dR<7Z8W-fi`%z@CYdojJYf$3$JOb{u1`9xK?{Bo_7T{*mU zCbxLJ|I*ouXTv9=B{h>xQc0bZTR)|ia<>idfG?|Km9%lZJKQk2`RZ<|xL&f<)5iZr zd@HAL{CK!rDhjO&w@t_tcp{B?6!)r&YN2*6Rj%i>`oW_8^?9!GIw1!45dM5oFRopZA#0)vD;X`g2qd z6Ds%nLK&a|NzOIcK?MRZ8%|t@kb;1As(kKJp{ZBlZ8gBsFSWlxA;Jo@U8GDg7NOVH z+)3F2EMj)2ihc9aY0dMWyr2Z^Max>kSA#wIUC_>rQ9O1*ox(ivNIS{}AVCsur4y+Z z`q=k1HOR?RBFNJnR4J#eF3fnORe4_hgnb#|x zr-LtN>5{N@0e$vmUr5%&|1S_dZa@1ZEUN?XR7U7d*6s80y>|T! zMr%fD<~S^WXg^L!F9&b&QCvYz1q??nisq$27x;!n%ndVIEK>2{Ky<2m#PVNQG z$+<>j_L^CqLr&f)Ag2)L%p<4hj38$u&RIats56F~F?Rvi9K`XsGlAoYxWpmkOgdA@ znYw0_594UsnZePFdl==9;P`+ui{n{2z75B7&ODCiQQF4Qf^!f@2j%STI6mYY#_?gC zJ?iMsXp2W)5e<{jc-Mq4P^6KW*P3~UZ z#O{@~#Zfy_L~N@{F#{~yNz|NIgXS1ap|RyG{HY`&ze z=#Jr-FJ#M>n?etzoy-gRHNBjENwc(a25%r2jeM}`)mJ*TYV*mC?{7VoxAB+c*-d2H z%bRw2t=e9--N3FkOL-}?76j}5!}r`%^{&=0mpb0+J=JCZo|E_9`(WwBefQn}KuJo~ z_)y(H?i<)lZ2l;cK)a^x=(`x7(jUgw9q(<|gL<`D-*W9>&FwXWjk9abs_&Q1T{ohX zzp;*2ovl>rxS&$Gu1A*dHdpYQt5l>MGPT#=cIM%WXWYQ81sA+(?NYtHdU2x}c-7`+ zP_OwHH-dW8zsP4St#5jhXweLt#x`ok)`8?T@b0DVy?&2pHSPD`8SiMPYAKZ^X$EN) z+i7fmh$J*aZAaV1gND|wK8;5WjX7;v3-LRx&1&1Jz-X8atC3=7y`ulv{kj&U8yQ@c z-9;&;j^(6YF~iifwv%)8U419Nt8J&B)tt12uDoXIT6_G2=C|*~tK|O=Vhhvg;LO(S z=}v2XBXE8Da@CWLmQF2s1+?ufhE~RHSC^Zv6Ity}+vOYdTxX*e)I05Si-jnIVcqJ~ zoyc78T!~Ua$E&SHR&B$nMrO6O9%ZW=0fvtsMEcptUaq&R-ev_e$oDI5JHSlZtX!!F zYn5hawI29UYNgq!29ecrtL@12H(HCCa(*3mtoXH#=a#dpf!d2w&FZq-j8fI+`dT$g ztyWvDYLvd>)>qeXJv+^t#UrR>4tr5%rQ=+SQg@sYLlorPXmZn%pl|D$j3JoAH@UrQ&pIm5L0RsOUo$)N3s_ zSnD|6J#5KHJP2!Uvx#E8c5-}qY^*nigmb{>V9DxMx@Z}?ZEk@qUE zSza!u@*=kGRGv)QK0J-T)1AiOcxSCENpYc;!^UYX(|8Lzf!5GNOh*SY^%Zm5!t^vS z2~(KHn3j$i>T^s}oZ6VwX~zoFb6RLjY1^653NsIBxW;r+mn^S|Ys|iD%=nrt%4MaT zwPQLarn$ANVG^ge&Y!L}Ya30d9N(^@zc$t#*{^llD;qv$s@q6E&3b-oi6?F}aP&aVC7F&>0rz ztM^vsEHQZ-6KbDwPG&OI7Q{s*C{M@)!fo5Dm6d7@!lPW2`5Yhh%?#-`PM*85`RqW= z98YESIlZW7@jr)cYogbWz1jO9ih2(*xtGa)lkEYV;Inimn<0~pxi_C|_v388I(2fQ zP))~rAySSNgpjI=YR$NfZuQegEoD8`glF+u~WNo{)F*m|-zsYIK0+hi-T zrskCAu3}($^({3xTGgxd)<#Q4*_pFX+P)jG_b}TQEpGyMR5CAv%K(wCMwt!YtvD;q z>!vr&v!|Fm&Sak{`2_O)G!o6o>uGdnw?)aUtWg?v|3<%^!*($&CbbmXlR*}0I72lY>Wu^BbaDs#rF%9$6m{%J69b18C~T-%cD;Ye<4lcA0Mv4p%_3$k7vl0jS<7)Y zMn~VIOy>D}aJ8D}#x!R4w3*gN^}F;@W760fPXKc%y(an>(sw5GNAJo5HA59vP3QDJb( zzrsGwY<)q%Eur#xH`wsnHr|WfS+Se$3Lr8Bv`3U;a{}?rxPAhMm5G$*sExyzaX0)%7OOQ;EBR#Q{$?TEJiT zJoLarC+~acL~@agJnsx%T2BF70O-qHa@}>O-dao(TeVk3{zoGNK+gNixD42PtqSl> zg|L|Ro?=#7bvg!N6Ps5yuHg5IH{9#*McEHw^C>B`ETuaB4O_Qw7&i>l`qO@a5Rdgm z{p@1SJAv}vNhbF(c{>xT2tixkJGjTnJ<9C9yY|r5qd4(SY(8yjpeVtx(V zz+V>!_*8KNDgYuLHr0>{5V23NosGs~yRbWay~j|)UJ=e4P?XE?S0WrD$k?UP4$ujCp85 z2+(vaRx`0!&5Bq}--1}p?c_zQ79duOgJRX=WLM*s0gfF7kik$wZhnS+v>jBif#VTcl&`bUSrNq<|qmj0sI2RYI-M5{i`H31qc zsCvJd@gnklnZdvqTQmLD_9h0AefN)ugU1|T{;Noc@qs6xOXjsXpa<#%nOO~pBOO9-U`jy8#PwJ_1MCmeALlb+dWX!6wQD$5U;v3^ z3feAo7wrRFlM(!%+p6^8|4VVFiDt1CsurE|u8mr=?!d0Lsq>-a-M9$MDeC`v4UCnP z1QLfaYj2-5^Zy}|fY1g6*);$d zz#6?l!&7en*vu2wq#9|(83FXu$kZy?FxAN6H-}%q-AW$6fVhGoCY6S*&C$wV1KyHC0M8{%Jz}tyFHf%u7k1K0lo<8v8@ZHAs)vYZDFNK zL-0k$X>7lKt832Bow{yCR?u0$6cySVEyW4CzSydfwd@Af>$<22p!1XyY~qg80p}KH z!GpP1gDBr)8stob@wp2AO4=G3?RGS=hXaYs<(eCrYn$s)njp_{QLu)s(`Ogaq5*3b z)5U^7Luzz82}NzHu(vix}x| zVDoA0YsRRahEPPRkQdUlQ3U!vq;Jg)byiPSiY(>j_@h`z2Ix0&fWZjJ0=rAUWO#Q9 zj)vS6nFj^~c9_-vEM%wUST7=<5CJkXg#k<~tzR^@(~yCg1`#?3pT3<@b%YtpzW#cG zY-n85&uAZc`!)u7n7ssW66SVt!2S6!?|A?lIZ9FmM98Fofywje5$$;|$OEYGyrItT z6gZp#SNd`vfend4oPYg8PkSz2cg-bo7t2}UTe}9bEKm##_?Frl&I&KZFzBQrTee$#5g|9#LAfqH4#PkD`DK>Jlu&M7%~R*o7Tm^>%ur zU5BZt22Kig+h3=C{#^moU@flzHLUWcp*T{X6{oJEyV)po79 zl2=7LU!%GJvF^PKzpz~%$B*hoJ_Y#n&Pu?ZO5>HHZ@f=3*AfXN9Zx*co#;QnYsJLU zmm?k8u{Qkn3pCp+jy%n;pmeMe(_qcsYHl4Ie%XDc2i`WdVh$VI?wQcFGKHu z?u70MpnU@Cn&BN)P^gXf`6DQ`13>u_CN^z=7qTyFXP~Q`^e&(buFW`^X$`+woHut3 zOn3HL3gii$QRc2Gehkx&R1Y+IOi@~@cK z8|66=HQ8Y7Wu6&z3fD~BFSng%8v*8uAb7@|5w-(IqXS1{^V$jtS7WNnbT@r5wLQK)u|2sxwLKmzgp)f{5P~M` zuxZqD5Mpm~XEIDELd^hlGQ7ymKrbT9MP zaEXWsX$gFpBF~k`4{#1BfGVU?cdkk^BNHzxN^{=2{&lN|$$NXy7@xwGs7g>fWzYrh zS)_}%D_s7Q*jH*JU-#>6WgEbix3S&?T7g#poDO;*P$1<+l!ois^;+;6MtQgxg8J%4 zXTw*G6n!Ly^9!st&qQA2XSgTEG?h zLjwHNx$1ph_AoXUXOz}T(Ru;&k~brchM3S)!Z$upgr#ca&$5aXEEa#uqYRBk6da3{ zocFhwmqQE1$LPzhdd1Xq{-w%l;605p-p?~R&*T{@C04m06h;wx zyckVhe()K0l}x9&FB02o4<$jxCd!TQq)WN&>N*iDZFj8#7=bE{t!Mg87|O1_w^01V z52PEK0!-i>Apo8i&jwW;PBm~8Lx3rP0MpNE?RzaPn0{5?{0Uv# zp3$|f1Hp_l1B+wk6UMU$&~S3wGe0!b&a2;hwR`c-02M~VJlb#o`?G)=fDAkHQ1k~B zcmQlzz}*NNM#C{E-^p+)oEF%S*`D2=+n(QE2o8pGJCh6<0c1>6yI1Hj4%-uNB6 zIlvR^5v_eEe+fJQ?&-y2%dPI-toA?`uyjGposGA2!HK#Kwe{y^Vi0p@xzM>%~4fDI3R@Tn)CQnO!~0t&qdj^XV4en1%SAD|+S z+>rN=n1~7znJ(h|myw6yKF#~5*x9;gU-|H6=meK*=e&Q$+5;w^V{(QGHM92xCSPPi zd&Bz@5(PIlK8`Ejb^iQQ=k0I*++Ty|Smd&eNQElA$S{x*;Nu}Op}9#U2Z>YU{*Hm%?_H8JQHL* zti-*K)oV+)D8e+LDvtH)l{-n@qAJ=5!a+P$(=Fwnab3IX+=C;}FZIQ&m4H6)iR<+5 ziJ$l$f!1qobG-zDis2ov{TW=(QKs#JRP_T;^%i-n#pA<(&}_PG1%ZE+o%w4_PB9@0 z^nRVmSD1X1$(y4-?_Z!?R6Gxwq~3Dh#mnniY zaJ}^o>A~nAhOY{Rsl-n&nqTo|-{dvdn0$?ijMv}5-r`}UR=>_%LU)BCRp6Ptum~Wt zgZLC8M?rnG&jAG2;@>OYzvP*J#YA4lzviCU=l_O#GD!$4z29Q;+f4o~ljoTTmORhB z1V$!)cc4&${R=$%ADPH(mU%5xn$Qi7LCyEDLsWSLq>A9m6mX>=OX5lcsPd>_%Q@@; zQQk1p#_MyreQ~Cd2l_-A{Kxq7Psa1)0S_U6`bIu|Cs@H4f$k47ln{Zpm`pz3l=qj| z_WzEA(GFKPy$+9EB()Th?jI=~9-qeMKZpeS7(|1(KTQ=130)v@95A=ja5APt7{Ktq zgJ5_CeBD9BYpAHG>W55COB~yAn4=h;T^pw>R8(k7VuM21U@kVn*|n;xm~TlPkAvZT zI60!TK7Kg+xWqk{E=aJ&;=}g4Roq%H*%L+$6Y}nR~>-YAH3y#Cl`mS!+74OKID>Y zl2-TEao@?f-GkMzFZa|k@c8|d85mInF32@X#xFsmV*~ zaNMw>mDpclBScCK02eW&686EamRVyzikE_j+LDVgyU{v+tlnN~uU)zOvHIGTV@pC$ zGDV*)^6g;{p}7(poje$a59?FzJvJFk3&~0%61lXmyNO-~3qyVNhE%WXw;*(rmh?hJ z7Gsh@Xp9D??_wr)R;UN)3me4b><=5vc)!DpL`rzS%R>r@IBJkB7qQ%Dr&0h$<7&3b z`w}t#0ZPL(PH8ivkab&k>;rR#uUX71<6R`|w{fYm>P2i*XnFsQi3oL3A|kj*Zgl?+ zVTZ>exQ?O)VY;EgD9Y2k*)BRdxIj1^U@9173*p=y4FJ<=R8e%rZU>q~`u12j8kh|W ztY3lKDuf)v&R&627zRo@VB~}e?lBEuJO;N%MjRiXfw=*0Fr2)Y0Y5msJ+pm4fqt-m zh!OX`LzWNFdLhhtUk0>ZpsP7HsWQ7T?Y0kw2ZeDx*ggP0TzFWR5)+reriKf+@*r49 z4X%-c;S6hnIWQ}~b4+(r4`|yX&#$9Q+JT9;Qv@JK$eh|EAdk=LJadqNtGVh~$ogcY zp6+UH8Jfww+E-fl&D_0GISxSED2gNevSq}~u1czRg+-s(HpqqBvv+*AKEbSI*b6zIy$D8NO`hP^}m zWE{G`WFMEoH`ufBD723+cREb~nP=CqYyqHzb*u3U{r#NmJ(tw2n5w=h`MoI9Tb}PX zQK!7_0cCHNRcvh8+itk2|+jrQ<>q|Cz-#%p@=Xg7DKVi}R_b%FZKPKmr zj6?=0&js!7k~k60^;{!joFf44!#L>?TuZcoqij7f+URSJaxMTU0!m>-nOpUBEZ=Ew zD(YVBt^diZN!>3VQpS>y-M6YQg(&|PtN0&GN=V9c17f7o1A=83sX%l15li83vv4-n z*!6NQ7VnjMd2B%VA)}bQaD*F|xj-)Ha7QYV9agM%0EDoUXtcfE|`Z9PBbY&5*A4Oym0{Z{ec_V^bupLv=7)wAmd(>6y0)DdR|ER zci_G;gi_Xp$BsaV3g)NB=G{D~_21#_>)5363==G-vd+^jzvU9jLs=>Io#>yJEM zQNuRyXU0eVBiMO!R-aKqYP|ypT`c-6XN8Z?LEq=2{N)FqM)>lR-593<9-c$f!p0l= zmyrzf@Qfr8F5V2RofP1OR!PI{52wCkt{MO^3{(P~0#q?W!#f{jcCtZE;_x-HqKM4T zAj}DDexm?u5$;aFFe?BsW3&ls8+EwqF}qdm=lFX~R?!@BbP_l#KM9fJ;<-AP$;FOK z7i&4dtk~6F36XDIETdwSUYo!}DnBO1m~v*qZL@;?bBkkr9)XV`C(1GIO$AqfoTY!5 znOQ^>AyVIW)fmYW*;g=+v0l+&A#u3L^tP~i&zAtUXrvq2CvX|QhiMk&P0E?NFV!K+ z3AdA`?B{3rWzLCFT%Z|6&yl&68kV2S18X(sM1K;&6AaGzPM}-*lmm%aXNMHed3uq6*-1>bB znIy0P=k^aQKn(~dz*sEAJ_d#t(1QSv1wsyrv5z6n9q=)b>7EEjk}!k9P?!P23i^Bu z@MDBy2s7yV7^V@bpl^?ZL1&mjO8G3{J3z<*JQoFLWYs|Zv!B+_Yd?w=EZY`J&fzV= zld!XZSv4U(1qLA;M7_Nr1o#xfaq%WF06`3l;7~X%K?w9F%!-#FjhxAlfi)@!VOG2e zGw=?~NDzYIeH>m11|dwL{1LD@6KEOy6fh=cVvhoS2`Uspc@*Msi=ErhLK|L%xp2;z z749EVT=QDY&CfY=aUR~f%EKLQhp}=1{tB+anL^No3Uh!50e7H}0|62-^MAB)N0;SK zJOoD@cP4pV55dtsPr^)cZO?1a^*9{u^BOQ5qaa=byk+qk%x@hVG)GW9R*A`Bpp!YS!k@wCOVED;K% zj1k=Fq*o=()XB_sU4(`s6IMuNmdNcbLF}@1Ld697eFRz3tt8)+(bX3dSi%xyA;K2W zP&JZe{suhcn3jMXDE#kWA8Q$YfLZM1NZ%YHxx7C{HrGu^9EN6cz3Wl@Zyo5HmVM)O zV4dU)E={N@+6nNab3L2|0tzv%Tq6loh4==B@=;@PT`~OlVAbiD#To_#z)Jakl3Xk_kUQ5SgT)#PV3u zV?vu102Z^uciN@49EHz0wZlmIEH-o3g605D+aALJt^s?gM)ui^Sy$S*gMlz|Bl6?WjeXp(4I&Ng^5F}K-N=3KaxZD9*o|TC<_Y;=g-{<|x}%RAj2o`*QUhZb zQ%{3*;vmB^V6yJva)htz-lT~aKm^qefj z=_AqN3`77{R0Ec5C3&zqbnsDSc+s}rbI&KboSl2M`> zZxJ)OeUF#q3|BosPwc6B;I&EkC0$#`ubx=4H?ds>ZLd&nZ&u49`5ves zAhcYZ#&UJU4PbGD_n%qfb|yc?`08>#{0jOuj`S2*TPr(;vgoRef|*K{w0X>uO|n7PK+)6&-~z z1ccc0b4mEF%ja=Zk9U@jyMX+WA^&k?MENebs5ZSaOVHNo(nDWo#&0qa@;1*qR`Btk z*6??hylY5>6W;wjy>j6ciFh1j7Pq1nZpuRQytf_CX$*_|Vr=tVk&s$h1 z!A|s;v#0aAZ{B}9igi0rf{}rB@!d1vNym~-B%6ewWN;|>Sj)f{&~ld_eCmlO*qZ~E z2*(AVkGJrjK+;cE2I$HzG0D%3Fnkzm;j#AJ!3Tiwo1r&%CyPDMlQ@fY?Ls_$ERoKG z3}oDb0RpnSL~uzK8iDDS)sghc$!O>UJM@^uL2s=MAFI%S&FlT%C{XOa+)Dt!OnGmnOd>Q3pzNgRGC87B}G)A|+y zw&Q2*@rUf=?jI*@VgP;%2e+`o@bipT{ywfST)UMOhKy;(ibBhgHHNUM@?24fPq*QV zFtQdA7LT$^;NZvdBWDDB^w6qAS#LP%{U{z&G}&wN%3sCdxme#ZG+(q?98gMXNX23{ zu-CslP^mJ}Vxr!pf4KHjDY~EG-XfE~$%KRouAb(;FbP5|e2RxZ$An5pEq>VH-Y%0* zGxpo=jS zu`131F%I(-bU4GbcR^eyXJDqv85sELi~%znMlRN!!5mJ7Ft;zIPJvn3COJI`lO6P} z1i<4%N{DKikTr0VP+cUaNkyvwc&t~0kv)y4#m6}z^Javco{R(F4kcXb%z5&%Ez z96*5hR1yF`^+Qt<0FU1|0Dca%0^aVf33Gh9OIUBugfk2h{%klH(k zK)}u*h<$=z4k0;#XNP$YQ=Y5p;z*h81Hlml!%xTI@e81jZv#~@w=)%jv;d)vGvR^| z+9Z(Yd5l2zFNL$i_dhJ*=eXWRd#7=~Sy0dmNyxk+xIsU+=eh(m2pAKTJ8Dcq*So~@ z?Tw=e2`wS-c=NaSk=0#d`t}}KEiJ{Ti2x(;^a3E}y#IhXqufoDAqpJ-1(L~|nEr}P zzNvN2Ri=$d^SLgR0XO2ep#h8~C-=aXJSWlvI$kluQm&u2j+$vxH z0t-{pi+vSYgqt2UNpB&cBJ3mP{TC*`#N<0nzRE<*Nu27(MK&`rrdnsx@ zf0N+vx9SfAU~zHbU;y|iKJhT8bFm@6xhFy2$znt1kKUNH=2W=1F$m9Me%|7(>? z=maePe_5^Hiw)jO7Anpt{zTp$zT)xz&j29KYVEC8mh8Rp*D%DnLUqsOJ*uY*>boE8 zL0!wB`=q|%06DSHWq`gxg>t^X7II$6YrF{~Bmu}tX#3vQ4Tbp0CokkiXlk#$_~>hL zBd>~~p)}DM?g>~e41Eg0`@1~#_n3T=$uBbbD3jZnhz!YL4MLQBg<$sw^&MdsYkyxv{J))6zu zL~EYy-YTmX^V!MldpzdWc<=r{l+sRN6Oi1sSLv65P1 zlv!wFCxDrx+&i!}bvZ)N3=9r@>&Iavu60f=7S_GLisH&IWy&JoC<|BS$rfV4q73)2 zd1B1<9#x)_ujIG+lp+og4+7Uze*=5woQaa%Kgx>4oI8gDwi6f=Av!z2F8t;lzi8yp zsRa8O-^j+d8juGJC@v{+OwHo+TKHTeOgtc1<)XsEOu;;Vv@(#G4v)lB)-WvLB*wk=i&!yu^M;J`?x9hA;U{X zR6m=( zyo9eOd-(XdgbzhCd(U^)eUDuzpN~=Dg!`02D1#@~Dtp-DB=a~|MVVs_eSD81vqq3a zCNCJ}Ui$s=QEmCuwf6uEJj&!bCY&%bDkV})2s- q3+u)XnMCLk*EEgmMRRg)a(?n{<8zY}qpRZ|(;mcjcr-tL`Tqer3@IZ3 delta 6923 zcmbVRYj7Lab>6$XSS%g{K@i}Z6a`WwLCLfz+mS3Qk|N1c8cVXqa#ID74T0Dt2oMkY z?otnCStKJlR-Iu-_BucMH0h>w)Fy3HIjz&AN$dPdnzM~^7;cq_pjxqnlOR*vO9qFY#*#?u0 zfT305)vj!pNCzv4YInAqNv9-3H$pcgBV@|u zsaCvrtzM~Le&phE#pe0SHM>->E-u@pigmG4x2&0^YrGSRNw`;(pRkZSr@nPAi98pO zLdpxy*ZGw^H+qn<;s8lWvjpp5^1CX26Y@&xqj_GMuNU%_)6`|Nen#fhg!j5U@X%8< zh!?f&JSI>z?S;C*_mHR~mn&5AmQ^q-m0ZprR*apXwlo1A1yFNK2k1};zI;_~*RFYfb^|`G3=lVqQy~-48o^apM z_a6KJHQ7(_071K6d*KhGU=+Xh!WAt1VJv+3*gnWjDa#BgD;xR?(mXR{L%9*kDu!wV zZb(_xgk>AS8`5=_)n1TPDI0__g*^QjjEDBg4Tc|O8TUis^B;<#God0c$jxG|n!oA= zmn}1A%vUlB@1oMj2o4c!quGP*55oVdM$H3MXNx|euc%O)7h0}a&n=tgipl9P z`n^RcMV)&(hRcyndg|qpd4-RVP_xaNRp*{;*FDWJFP92trh|96hodH&c0U)L-cG?s z-5*82%BI}cJEo?^RQLyJl-71XeVci~MVt|%RLy9fTB+BHJneoe)}1^~g9faH{E{g) zJfrbb?&o7ejrRTsn>>bk=kT*g8Xko4&>l{xJ2gncg8)*mZx3=z)8nv*gRqBhox%a&kDv_u)!t3bDiy1F z3nx+R;m4_yZMH8*eX0P7xyQR7Zy0j7_Oz%F3z&R1MZIt*UmQtkK|W%#m_IM*-@NS`OhG!Z(a>7~crK z5qzWgMjdQkPNp0-Tt&LWju`=LXqX&}p|6pw(~kQ!uz^y!!w9X(>y0i4Hi5!~-Ca&n zF}^9-Qh1F9?lEYCBWLe9m11Y_l4odo;)@un%xk$qeYs{^SkfX&gjQzYLeEo1_{0DlXdMfI zz|)p%CHPc-#(`4JerTVkkpB+&lbxVebhJISx?IU+B4Wl~(9ZLsX5naPYl{ z1^VJ97FaMX>$>~pfp;5ssf)V`;3VdC7~f3DSD%=c@R~uZK5x^|S~YKDFuo#&wXb0} z#7ctHO!HOtb4c@3RDsQTEbp$Mj2)TbVd1~Oc zES!Tdq}vLoY501&?ACVl4vNhYOQU`8!iOp)%P!QbONTey^&R^g&rtoB2#E2|qBqj1 zKUp$8m_XhxEaXbYRWXF86e^ad(Zo#aw(3(0HEpMEGky~SbR}FDb^bJe5}4WPyR1(l z?QdP!{pje3Z-~nfdO_%^x@?z+N zWJ2uX3sf^usurykPP^!JST-*iX3l47{%QBwonudYnEL+=!AA(5AUH#Cnn3KytCSKO zqay~)Q~38A5Lskhk{m~XImF`c>WcgAoqfq67X9y-8dFDLij6BTr_;aiK1Q9T zUg^Yfp&X%&Kcj5h#9J1@O%zn{v&id^2iKE&mmcCKdGRorJN;RE-QP@)P5IH(mRL&n z5|{)5d9v^Fz&I_6=HwVWxmM*hO1N*2 z^~`M2H5F|~*WaZ%X#j~uS@ z+z9N477j_ZalgK+PmZX!Qx=<{~wkZR|xMR#m^w!uXp zeiXGcd+v2TO2w@Cj`9oCW=u2Z|8`{so?hr(f}_49y9q;y^_c028sWI#DOadCO3J2(!{@p7q@W1 zt(ds5Sc~H7KrT(N1ECj>$#Z&FHd*jFeecI?xX`cs0h5`0aJZ%|S=!}t0@ufu2H99%Zhi*So? zQRNo_SzQc|0xyUPl*tCJUb}j&mJL+%+|1)c*?Sx*Cr9^v{H!uSk{fY00$(ZIsG7i? zJt1psnk8_HjI%Tw1@_^msq%mI1*5WxnoTb&hv9%V_}C<5LkL-7sHd|jnVj+netXqv zbzDxOeUc^RmBIHr!kJvoeRYo}7c}?T?!Ly;m=FR=Wu3xAX0Q!8E|n#N6>*=SGj1r) z!wtC4$0=-pLR=F*cQr32@mjXR2s)=_no&q?M}{Se*S-9FxpN|jkdoN zQo+{&RHOYw5YW%@uM>Z{yPizBx6=dZ)-ym*JOk*YG%tMlz`43*`nL=Bn+IQMh+CJ> zPLqr?RrZ$- z&BS&fMdU7s@h5Kh{Oq0oJoHVbD7@@`?Qpqa?{p$fc9d58os`oLGU^69%6#7Gp`0%R zuMIeTPINsk*sfo&-2kM6&Y;mt^x%XyYYzJqGidZ}CkG#QxB6|M;rrRJJO;b(Hj-=d z>JWHt>m-IT9P);;sH~2(SqyZI5@*C2+E9J|bB1vl>p;!aqAV7p7)@qd-{lNbAI`}7 zj>d+xA^Q!$k@M(qljbaJG4Lh{~%Uj`Ce$#7%hI0 z=JPBdBhUDL<6cVTzl&15B;kU=zaTvS&p@ie)7aGa{X%-dtRkfMA~1U^=ii4oIe8J} z--lQmC>9|;F{>9S;DxKnq{N>5uL%B*;0FXP(gdyI$Y#~W`drx$J_LX6*p5H>C2D#f zUIg?N2b=p+zqv5!*J;Qo~HJ(TVyy^voN&*>rk1l|$9CrD%A z%whI1_w_Tw`}Uw1(KsUTG9HrfdV>gFJRunRmEeHD$Z`e{f{%J8i>36 z>!^5Hzk3wf&F4Cz2QNIoToVsl`3irE>VC{t`0|0{?ki`y8i+mo3*}i1y`92@u7rl$AawmBboM-Tt0FG-*4nW4n_!zqa|#xla;;;3WN~&*td9 zyPUiK&vLKyiwn~4JSB^KL#Ia7Cn3>Nto4)#c-0bL_aCPIv8ee zvZcTh18FG|#v1jc7|B3bW-&g=im#Sfi^N+mwZxrdgBkXwej$5&SoQ=lG&zpE8QI`$Br_PqlH?e-X#ntYxevB*cV_c#< z5|hd=ciH*6M!(AJtZ`XeoOa$HQQu@YyGl!lK^H^vbo-VrhMe=^&*=x!$=pn1w1(5m zyO;OPd#v?w&w0;f&HT>F_ci)2#9d8HB@gIcRWFb;%o)Zl;af?hDuFabsfSNR?vqH@ zyN#sC5KBJSyXAjyryW)i!JHyDP(_|K7=vMB2QJF;Zk;<4lSui&6s23`RtH~5u`Cf;DLbGwo=lg46dy_<-a_T@P;{|u zc^)vOW5UmYP_;?T7hy~c5?n`oDG`l(BhA-DzF7uE#p2OzCSq=Q-;c*fEw~Dkw{TrO=+J@7i*g4VIZs z-?<8w*|!o~yl3KiV*thlE&a*rF%OBh$7-kQ|2!{NwimQgWzS!Q>{#c9xQZTMB z74)S;@!oJ#7{yrzp<7bSf?O1>^(I6m1VO%?}EPRGNqfI(_*ZQ>0u5_~Dy zmzK|NBVB2!@>+=KCQqc@$@<9LbQ6gwFwzyhKRqiD*1!ZY9yxhgn$Ajzyj=kPi1LCID+pV%Ukq<~JTN#a*ntjBD1 zR^Akdrg?6|mk53td<0r9?gQR}F__gawYE9g~kGNjha)}+;C@freX}fRh?$`DU3-^pKI4CSQ=&Lz2w!kbX%_KLgwCVJw z?oEB=W7;%9nbx26y`uwJL$ZF4kU4)uDr7DZluJjB3d$9oetyz^DadQ3pe|=?GzU76g0bh zvR&l!;bch*i`W$d`^LiFEwM-}(z_+~l0@hU>_8)#qgLlzWVlt_!7NhDC}ydbSmaMB zLTJajS#o}kF$E+}SoD67Djo>}m|F%L7$!#HkwcKqk<5eGB-W&36@ie(t8`1;5x61U zQa9b0xVIsWh9Qoo1>&R~Aimvlnp=UiUb>AjDa~|0x49a*w)W$VJXde$p#tL7=FVFgRwoLyHYYTL2W}3#SlVUh z?7XeP)?#kr0<6i)npkf&PZ#1+==#T6AUrO~LDny|*qX(qs}NVFv<~`$3)L@oI+|=Q zzqHQo^h=$#76fwIIKQj``sioPezw-n*6}Xn*NR1CkhjDygUlsP6-3~O(Z|bYV|rXo zgVj|tIdC=N!0{ePd=~zkF98Ack3;k?>n`iDK2Y9wwBOL@?5pZ+9*@fr;&KMqvA8({ zTZOp7q1b1Sj%2)$In;U~#TUD5wBoX6b!X*7Wc2C7-G}=O{gST3W0A8?tnMuDY&zPN zF`?F+&hE~>FQ;>5cu*60dSUm%p5pr!L)ob>+Adf3q8@W!R&S%AN$Y3(E6*zjiU(YS ziv+H8$UKs9!8+o+aA0(sFn^6tv&JV|!{9c(W7hCVSy9ULgP21Won>(+&*YeMNA za&lr$S{%(1>Y|&5MC37A#761ek?truWcm(8 zgsmXQ3YAK?(H$EeL8mA<1ku=CPl78Rrj$O16R02@ik5p5+zIX&I<}w|(m975$jJki z1O5wknPuED?gVcl^1}Sv;C&l2iz#ND>~RUp>|t2=cPge$x`#Q=nsnyRf;hX<+*H@v z1jxv_8m*jzx7Tu@n{%~R*KuowwFN;A0CWc2Y;?IC&c%g=X1>jKtiaAU6gn*Rg(byB z3kr%B6fG<{R$KrPBqJqO3P?@#8M*9ya$8197UK2{uv$GEE0X9?D3J zzeN!e8Fwf7qb9~%ozG1}dnmyRNk@U)(M|lA7CVPWlUe{!9cJTF>V>yx)NXn6* zYROjsF(vtB(7<9!OZX~`N9oP4MzRLUdL$c=pt8;{1mc&}74r-5Z4r{4G|E-%m$*tw zfRy?rwHCjm(ZVNS@>(S8kZeSPIxN2%$sQoiB$9q4H6SY@aMEq|R{ooi@+zi|0bX`8 z*Ake>=$=_Ud>`Ax_vCaR7}s%vj_WrJNPIeOOqX|}sEFe zRSLP4f~xXT<|Y2M9N*5}zI_LTod<-q2Ys~`UxQ7kwT+uw1aphe?ie#471kd0xsD5K zj|B)36B=z#bWd$>d|%1A1!ouZdovy^6AW47`g}p3Kd@y?UpQ1UBDt_|Wb4J9 zFYFvGe`$|US~Xr$EtFJWIyzReMbK{S*YtIykTu-TC9HR6&*6R|f{O%GBwtm{99_GJ9gqsCrdIBdC-6 z>I8MhxGG0b!F~hU_sz zQMUr;_3LHQMSjP4Ccf7j-QmOKFseGL_qE1Q#iP3mihI)hx zZEE_s8J7fI1KJ?Q^0BT6QoyCAhSY)<*3Edri?FvSLiE2-B|vW010{kwh^UT(VEZH| zChv>-3h3UJh?C5u&Jv#AJ0*W5^JJjWA$Nl=5HVeeiSU3L15Pr=HJKx%$b|7h{a@_? zr3PFQYAR_1TFc=m84ctUZ5LIr zOQbeBF-9BGXVjqXp*{nqs@P>rj|!z@+Tj?K=Ar@^>Y&(F63`D4EMmK&sJjEli|RIq z;|Y7W(P+^H7obOPiFE71Qcgsc+R46&b$C4nP;U$v4W?9-ITlcyO1&==@&9)TA2Pwn zoD^+3iZw2mx}gfTQx_EH=2{#DEfzD+o7+ixMxv+&thj|tpP~kod^xK)*~WbCYiS7N zZ<#TsudNf1%_ohCDA{jR0$C556B-x?K(506WrkVOmg+=mtRR%*Z&1$30r`m8^5;&*r&}lFt?f5=nNQogZGDpan)@oRL}ozTUv2=F zF?XhMY+VS>AOa)*Prc%MfCJx)ga8Te2!B6Be03K-$6_3ij~MKMVS`Y(ejuYKzEMKv?ti=mKK#&_cD2_Pn&Uu^>u*a?0+a0Ep|ls_V{&_o+s zjgEqvmeyuR`%H=(e? z6V<%w8r-{v4VMd7jan`jt{*dO2Q|I9qpXU%!9Wn=!@IlXmfJ)tu1vf8=M9Kgo);*^NSq4 zlx35-7EX}FdeJRcv2s}I%E;y_b51L8~ZBA#a z)md0lx@Z;+LO$d=cYfKTqLM}P<`ttkOBN~O7i!UW;*`ckr7VIxIyJsBiY23m`M(jF8K?;u~Hao+34$vbCX4IJs0kGea z7;O|;ll=_)I{9<<1RS*u`(~?Hc7phGhUFINF+lNeINEu1;W@^!|UCA8C28k}mLkSk>b8%#_M3X4*0L{2gS|>q?b1DZUy2QT_41Ts4 zU3k_*AMoJMz$=*3qk{U152eagZsxF&^cJKm)owMN7h)cc6+;aFXWFV1?WEapIF4+| zjaja?$b+o#fKQxI_@sRoO`gDXhMivoOHfTXnXnsUz|W>qYQiec3@i=#kxEz^kAUk@ z6X2r?7*eUIMaN;JCE{)=4mJLHAfqdtv}{aW5-cDOG(-JV$i*l&FPQ%yW}O#g&3-xS zmRM+tuq`Fh60#|4|cg#CZif zLlI1cC4GQlsP(A#M1UQlr)$gL*760eyik5aFuz4kcN~XXz$622tRXmlU$PmN1U5|V zm*lCDfi=Zfg|$%@tP|P@Zm>Wb<*vD*R)JB3tqJDi*;BDo_jJZral;)lw=^_a z4{^sEK{(l5IZkw5PMR?2b8K+c)Y8)&E?2AsY?gTkLN$^brL(+&GpM%q!nXFd_TWL( z7x>gQIYUeim=<#=guvbCbc%u};N-+gssFN?D$O5*5z5e$r;hI+W4Dl}ex1q7kXenTi3 zUZJ;Er(a^J^Q&oc;Ju;Fudq35VDv@z=XYr%;MA+G5wf!_eo1o?{1yAz8l|7L`Psvw zE>0%qmwK0@By)gt0ig*eV$r9Ub}#K?;ZB749ZmW~RH82`dm=SwK>chnsLAsLI8mQ> zvFwF1^g*Vs>QPT*6+CftC}Y4SWX&7TS|nsG8p~Sj(`USwl-E-+VN5u)zIT1Uq`&g< z)#q3DYsQTEL#$vdzHH3DCS{WIuWDt{6Pj3(ykMvI%--I;WPg`>B4sulBWLuxgp~a8 zlp-OeXe_0~r%aqMB##?11VctYkgNe#FqqyktQxRAV;{7Sn1zBBqvb-usxAfI35dHX zWpqh>rpI|7>|lpCl|`}zs15Ls0;#4}vPniu`IQ!%xdFEqk1K?K z42Vez8o_;w7qE8>zp~h#)g-y*K|gQTotHv2d^RPhM= z1o^z8EcW-1n{EQXq@~5Lph}sOtXsKVaW^iTr^sU~Cst4BV<%!_C*l&&c|DPw(O=w` zHxZliZtCpbRRgOg;*%$06JS!||Fu{~7k>kMvXP(0F;OYq&3$VxN6!8Q&d}b2js~7E-=Eo>;4*D(L>@KZT}Lo0NVz53pkucP0Y8UuJ8mxBF#>?Y0)b zwAl=rmD9!34uK=$3G9GCE>_Kr9L5&o@O3i(X^10#t*X}GG?--D{U9UBj@9#4KZ&`Y zLo$YhLVzDb3M3Rkl7S093&|rGmxBZ}DTY5EBitxl4c$xE&uBY#|-k~K%p-HsPJ9sgyZ$^>)AA9jH#giF?=i%x+Ddxv4|x#n`XOD}v4}Z?JNDIAW`m{t3hGIZ^lT0tk{<+uZQ4`;f8&YY;qO^>(r1Q zeDL^z+A&?jJ^Ozp)lMEb98N-oSQ8c0V?0sBekhxzYASi=P%)cMK0K7iW)PiuE<1}X zq0~m{8S`8X?m+su(@4=W>VPNzW7d%EwJF3{TcVrg$%Gqrnc@jNxloJBqKY_bWrhs! zazjZxS9a6W<6x>G%gwm6+*oRsTkqB!o=ZL~h$$)vR20&<+y-~FI}Oj|>$TfpC$nHb zc{oq{amEy5zM&lceeIpHQdmc>Mz}!JXm7Fvtohn@xX{zaRTT#=jZz!hYOZS(7hwT8 z;7BI-HO!v%HOdl$Bai%BL)KST0vQWPa%0-p$(TnDHGVCQdF08)YVs#X8d2C*lMF|4 zgrlael|N=BZ#5(m!>M@aiYAXME-ETAO;zi7F&~#Z=_CiuZY$B8O2smzGnJ_?p0Q9> z@mwcdQnC{LsRS(RovCcww3>F$MQ{`yD&V&Pbr&5HwC&CUZX0TZi@98Ec>c7O!`-U7 z8tRB~F6K7F3;1G#t{Er7MnrS@<`qQ+^I^nOA_8q8)wWbG?k?L$unVdYoSUrmE_!LI zL%Vsa*fA8-CTj5lC{+b|i;;dwRWS(q-MH5f4?8rQCrj{^G$xD(zY<~sNcX~WQ-ZjQ zy+4Ke`HM(iL{fv?zKI`&zTNm*qi^?K-~L0sUFNas+Mf8X2ol-UUIUzMy*A)1 z>EAw(aei0-nt}Rfw~Vku+XtIQ4VU9qc183^x;5aJj!oz*?T5cOpDE}wA1ish?0nh4mLb`YWo*`xEBd9^Bbex{YbGW- z?`8=T6MH7NH@9!eSaeS3dZzHJA6jrJ=_$1T(M`K(P>l+FLCjO;3FJMHQA z^lcf7O7D*uQ)Z7V^95!8n6jX=d?G6D+eiB{dR&m+@ICW@q2F@0d0_Xm<`Kh?W$+-` z-?D!D$ga_bOKrYAFeeQ@O@mL?@Ru8jjINxe2ke(}v>TQwF3pnxt%DU3dQhRm=LYKI zXBaE9m$fi^B^GwC)Dlhp*z!oeRSw_62pwqejfQvlE(WTl`0R}V!##%I2FuL+6ZyB_ zOeQ1tIi&Wyk^J4hm@IWHB*z_#$qyZSNzBnrr2c3odG1jIIe)Z?7~sPCy`b2=!7nB% z=T=hV+(w>v?jy0TRpgDA48-nQL55wMh@y2DF}JQKBX32LpSD^uB_?Ud_O&fw`NNkS zIzTZpa~tVb9Jt`g6B)G6&PLSSvuVSoV~ezWU7Z{{yR+xb&aBe`(Lci8i!-{U5NCzUL@BT9q(_-?TmJMNgpzC_yZc$1v- zY#_bf63CTAe&vnSKaaV>v;NIHOZ9VP+bgn79dt1f$u1XEFOOdFReoU?C)tAKaB5H#u+jU>2(1WwPYgcNMcA49A{K zVA+kN;O@j7RC3TsrL11TW}|6HOMq?r1VrH13>;1h+!S%0Xtr_ih=$VxkiA1Q*-+<) z!w=n^!4}i5QE~)wH2m9}`22o3Fpb7qPzv$)BSD!3a`6fBhi~MuACsheR+EP=s?>Kv zG#p*C`_;sC&oa52sgW}zp;vY$!fE9PlL{*a5iK zttk1${A8?w%$j=3h)NBmyPi zU3|yz^*9m_l4>o|Rv>hl6m=D@{R38x8I=Oaoj!8Du|OzSZh#49O;adNg8D2&~O}i zdo)gAky>Pd*X4K8*I?@B7vsI~ZHNX)hFdan{!ttkR1>N8$Zy5aVH_s8Xm8?>i9LjghO=)T;vbO%yzxhH8_M&&F=d zMv}vV{>kTJ@JVP4>Y#S;`nuZrNtnA2Qzs#k;41|;3O0TCijc)WjHDmQV@MuH@(3^u z^9iA(rsCR;%MCpjCw0sxk_5T(#=Ho*@me&Q(|uv?b#*d?7c+{;Pb3m~G`fi*uVDxV z3gaT=Y8ppFLO&qW$Vh zt4o|8r@3sn|6aAS4ELZoh2T@)RJkd%Ij7ERSfZaTsHz5!*@bT>vYq6OZ#%sfX*i}r zP4bTVNv#1k!eq4}#UTiiPBR>>TWnxL!NsZee9qBicX9Pi=7xMcKX0l9*LvtgU4#}9 zom$MzaI=}pNV10HdqtnBNh$hACd~-+l+ZcwOW`B}zi(o-TBv_@5KdfizNn!wwSZF& z2{OmEM0vkC~1_l$JLqdftF1JC2*z5@?E1pX`F#RZ!bL4m5En2?{ z0mBN(;OUG`o6trAR844&6WTbqqIxE=H*q315ic1}M8}<((>n*f^c#9N+%%|S)YqaJ zxG#};Ek+d+*%fiqNYb88CnxWpD;tz_EFkQaWa7KOMM0PO6;gR7TfQCZA}wbu(95wi zJ7!3lJOum@l1MxKADG6UJd z613yIJ&kkuuNb1^CN_g4KRjQ7Co?zZlZP)S5y!)`$h((Q$lE_t!)k}jCfR2(A>YH? ztybJGxfQ0*^xzD^LkJM0a5oHN;wQ6BO)!e3re>c}I8l`AFo?sDPQT?WB19{+(5a!T z?K+637S^Y12C2Bw3kM|cVJibGf&%39k-^B0#IUiQiaH&@o1egO$)T$qLW71Wcn(Pv z&LN|S>HJHW{Rx)whL7kp*rv1e0cN}+p( z!#R2e+J)y%w5Dp4Y9{!lcujEx4s)yx{10(19>dblLuf{Dj-T505I{eSv$qC{b}}pi zbT+yAcsvE@R0`1dUQCm~soCJmxn#u?I|%p6eDdZK?|CT}Pjf@@*b0i+erbih4HVEx z*q)Y^!uIble+af|cR)N*k%QRnm$@7@&C?Lvucdkv$$2UVEW=GtC2tcwA_%u|HNxT`OPEcPniijSoPKfN%-i$*;U#-2Y%zi&Eb0*O+Gp_{T6(7bc zKC(+gd3EY9VmC*Tyo}@(@`Io2XV?symF{<%bp{#Y8RFOv$sVD_T=?5knt=i{tzdc!Fr_1ITHgYK;bPlz0RN*_ z_>?O`f+U;a{~H=GU1k3WQcMd6iegi7=>MpqRwl@lFH2rY* z^UJ4!`77kNKlz~u=4vQO9}4EX!@wME>dkhiHHhV+(oFB#gp_U00YCo$YBs$^L-lD< z!A2FFer??Dz$-riEn7rx8QhJvR+}_iL6dF;(}~(qUyk8C{8$V#iQ(WCoLTKA|NGOl zoF}0>9mcShhmd>@&hlPJ{xhhxWXZ*i6vdM!QQQM4-a5>^63u%5fU`)Un*SpNzY?lx z3%>%Y9Wz6<%UOd%!e57aoG4s5jw9>$sU`eN2Mo&nX0X;WQLr>Eb^o$N){vy z$SmlTxeu4-t#C7OFmc>eF2Mh&a-Xt1gqwg#@Es1^UjdH_gl?gMe~AJfQoH@S8u1jb z&I<0s8aVib-+mIch83cJ`0rR35*;@8JFt`3Ka(v#jLZ06MlDaip715WT=mA>1*m~9 z3Mk++<8pQX~#5b^3)qkv#bjI79m0vK6};ht_pkCz%h+x z`1f!8TEbQl-&aj0CQPd$1gnqYs6q|~9T-R6PNArpMp3of^=3Jyhz`4`9?LQXnb zXdUs>8w9SnIB-U* z3#Nn82jtl049W8{a`y6@w?e$zuOueEgkz+K22l^JA=hVG;iJ0(y}#=wy}$hTaIMY= zom4G~o8D(}bB_Xh{`*iA*})gHgU;kHVMn)i+RW^N+xzE1wqbi$JLA^Hl^nF2*F%sF z)(6-Z>XQg<*6GTU9_JTHFAh*Ua6ub63f!BSa(VjoNRsl)+voV-^1U)}hBABQuoK~X!=arS zf%|l?5_jib6{M-d(s0*m!rnFTt_^$FTGuG>xAH>FbdVu3>^&0Rr?_0(r6z-4y{V!i zu#NdJ8~k*wNm@O*k>PimW^g-B+P&g_zNNJrGg(P!Bflvd{*dfv%lQ$UxEGMT2*io& z33L*8L_DRtFFK4tVB=lJ@F0p^6MjQ=Szm~kmnGcGEO0h(~X#?n6%ODB`QGmEqM zSCPXnkWAlq=;Z2%2M*{5RDFa+FJaLWeY-07*RjMKNWQ2O1j_{mxS$SkGqB!&k8IvV zHsE|JLl*_Vg|@Zp1TiqEhZ+I-(D=lxh_W}^=)#X$>pK5*hvgI91qM;6o;1PEF6 zwIGBdk|B(r2u3n+egFS}5%wn7vZ>~oD#$SmhxF5N2&JQFO9cIQ2tJJviHik(h64yS z)G0Qh+00Xbx+ONjZ$H8c0`dk7jT0~Ro@CzTvcesA3i*Eax}JLO$ec$Gd#5?;z7|qp z4JJv|Z`M5X;|H#fcPs-1rU0Yl+pDUo#30Kbf#yQO4*bXwwIw_=;H3}&cay*hYqHhy zWayWi{_hx%-y#rCx%0`@U*>s@m`}B7{kEEIYgcb9=VxR3VkG;J)FNp{;z9CFBxqCe z=a8I7G7mU}UDK+^*AqzS5f$}yQp=YrTNKvm_|g5JLynEdc;`W{paA1s5A>RpW!Vpz z^j|ZY_n4>;n1ugg(%)mE-($4DVb+Z?>%a_`!>^3ov*yH_cXZA%nQKC!h9B#YYv9&D ztG=O4W3z8muyO2K_D0DfHtNPPR?p_%m>t9JX0PWPFgiC2qN~|-Hu^>-t7B(Je#-5V zuuDH-klt_+c4E7`l4a%;j>uki{p&H`j$OV3wZ0?GH>A5+wvoMgOk$I;>aKk^8T`Dt zTVBhuIX%fY8T`DtPP$IQma{#OOanLf%E~2d1=~|_lc9l|Yh(-A=$<1t8Th#Ag1qpS L{3ixK=`#L*36I^i delta 10866 zcmb_?3s_XwweZ`l^}{=F52yn4U3cddPRm73n~`~MEk zI{U1>_S$Q$z1G@m@A32t@;6_S1^+TANW;PBz?$bO-#&jbIEGw1nbz5!DyR;VvT$LZ zpoV9JkS1v0se^YdJR{*52+t@XT?m3_G|#Q&gy6%R5OR@6ZX8z@BN&BH=!@k!Ysf_j z$w6B#gk2=ks}L^ml`neg;<0b2*?=3ZM;E{S=E7 zMmz_wb7v46T6kkdYn+BA@i9c>S<1i2lNIz+`9XSBp{GX_OWG{Wg4JoYIGv)|Qe&&HqN4$s%5V_c@h56f+4yiB-KJWmxg#UtX40`? zR7HQGstGs_M(pNXbRZ*2MlKR+{va+;;01ni0qE6&X!^)o90WP^q)~cZ|WktceM(I+z6w|MC zwKiHNH=C$QyIii(>-8SDHkHr#3|ILxbO#ZEF~F~Gs&_OtS*@*Bzrs@8RA1v)I2%Mu zHI)bN(Jcj#2a$c6|(oyq#-Kp@l zkU>|HAZt}|oGTfh^vB_b#S@5FS6nM_bVas;pSz+ENXN_T>^2c=u<6; z)D)qnTZ)rIf?UDaJL#>63sAc>h@>NLBdXFYr! zA9$K#mlHlHoF0ucpeqCwk&e~i3h%|$w|6z3jCSYthV?A%Ia4)53cqWY#l0n$XoEx(8DJ0eI~EYG-MIH5khCtdm$0N z$f##@XQO+fdkcFKypgFdq+ZJC&luQt+3?b?;f(vQDc;mv)ePnim2CU@(%}bSx{U4b z8h3k*yN4bs8#b1Eb>;N4_zvx0TF82%?B_U!Rf6_)O|@beXeTbD zmyHkfMXD%LR2&Calb)I~~cla%A z(NIJqpUu|I$orLo^--Gzyj5_v*sa*XsPd6YIiZKsTJLBOMKj>U3IKkYwb?F~)3nrL zO%1%ChOgrmZBNyw4G^y?XQ1@8l*^^B=Ug#g&hqB18(ch8vh(K!-n?Byd-i%4@Aazo z(zjC=?Q`dN0~5TegkJe`Tl?gFLhoL$e$jx%t6%O_syp&_F9cOWW!Mpsrzjggh~_Jb+m4e<7T_&Q<%bZ{fuU^=(x5*eiL z6!|)1TV-c`B<8AFqYPA(yI9h zq?|Tp$4xMvBvee`G%K8HV8lMI$pP?uf(K53o#IRc*(7cA$SYm+t-X;n zXiJbPz!f0KVIckM-bf8=apG4%hUurPO2hBd(Z+4M<WKR$M^2bQP`#j@R9ljAa^(N}9P=mVfaOJQ-!w;kq%)D4OA>+w z&19xWkxxCW3!hL}jtaM_U8;R^=)tY=^d(aS4QbYc-Jix>D zava-XgWBXSjVq`o9k>L$)MB<1U7CGd!ISi0R_GV?;E~Zrg-h!So;0BAoBi`X^xv#+ z?l;Lfm~YJt`4se}Z4oI!Z6VJ2Q!;2J*(N0s$y1Ri1qs1CO5fk4P59fWSdLNALX2AT zHQ3@IR#T~I`+EAX+ue!T8kV~!K&`pLTIK%Tiq>^7#k2)lRld6*je z2~)&@^_aA+GHbO)J=joR+2jDlHk+NItvMUEWO)3U^V}&5CfVquq60v&$TmDV8|7CC zHgi=&y}4FIONmYZMj7PZtUXHz@d$f#N+lbaPY+Z?^PdIM?uv9$N3T@8q-A&sP?u#X zd6vFxNl~JRuwTA!nMd|hUU-(Y(sRP{@E;=@7u3(!*NfkV2E8qmDpjC`<3+mO>RaE{ z0dryNd^8Lppat011GCDmD>d8o!p7}3Fv ztGMuJdbGM&i^ev}OZ}W!Pd~0+y!J787oR}z90F#jerzcb1R(f5g1ZRn5nv)>g_GEb zEd-Z1YO=*CG8MUZ_s|2jgk+;aKPKapIs_|5<@majC=>0?x$Tf-K3sgxgF~%tkuidehtdL0CRtaf|iOu0B9tB zxw+Zqkb*c8Fyfo^XhUj{7I?sMprICus3Y_z4QX1$h#?39>FovstW3ClW&0ow`8je5 z6Mutk3?mVbN#ZdCLkLbGU?n9Q21;zJxnA@k@;!W4Ijl}obFHmTYPDcXS!=Db)(fH^ zsXjpPApoNWD)VOew)lCoT^!+(pyL+y{uaR}2$+YS#nxXr`bYabg&t&H?TKxCLB5Z9 zxdPrL?IjDM?}{1(K?s5Y{O(K-e)?C6N03n{pjrdXwYB<*?4lQ61AMDoZ+7ZMHI52H zphFOeAc`({G!j)Qe(2GVE^axg;)rRbpZTI zh&roimkfqh{LBhqA_Zxr_5L%Qzg+XR87RY199d4gU0K>5Y$hY}EPcymP+*Mh_k7_B zCh=p?Wzhhb{HURU_X1<3p5y}`$!PkaRD$ITxZ&P3LR(A@Hnwn|8 zlL(#!U^Ivqu-#1m{dinEi!zoVMi6-m0hV!orCqdE+L}eo7XIL-dK+AWOjZi8zd5BH zWID2#7e9`~MD+6(o1a+xvU*c30@zb3?eOZfTf~JZQ4WG$Al8eEu$c?M&sXO7NsYJz z&|)6|hXDXRcVbD{X6Saq*Rcgw2ezM0^gmCOZVe7SQGTS{9p)}{JBNej_EvQA9jk7J zgq>(U((Dd*Z#%oYXLql;r+g^JI2@Ak!m3N_`qvFqT%J2nHk|puHN%_fSJMYq4V9D* z<`1vh*_qQx=*g}YQ)qa%`lQ;O>z>mq@7;PXz}?&z1=UK_N1;fP{X z$wkJv7xhN;EcfbCJJmPuF)sFpIRoZ_#r?^b8{Y{nl*Uop$qo0jDV{N$HTD>LSK(24 zIBwC6(8YHHxbV57MlO8*=weQz?c91KvMcOJY?s-ca?Eu4Wdj96#s|Df4_rwFMTZ`?-cVIa z{VDF)-_tELvAd9#b+4eOyLYPbH~_WK(meWrJAoFTSJ4P}23_tp z(bMkr^gZ_un$feJw)B+Kp`I-?{>-D%a0ye)wg9+h!7vbuKq_$~{mYqzC|2a32jt9x z_A$EfJG#Ii(9w|vT^*dyQ}UftM9X%-5*&P<{?{`pB+?V|-3w$1W~Y9cR5P$%wjs`D zic#3A-fV@c*ph9wCwu`Ib{dlgi?j5Cw%USs`yoqcL(o!#hpq<0aCo;j3hBn_qC zKbNRn1hR{}>A72xwB);LdgA#v=)iM1eBMHO>$yxVjudxDdc-_GhulvKp8pm39&P+y zF1_#kMu{wr_MQ(8{T7nWr2pypIm)%jeeV?aWoK)s{d>dnXOO|DnFhTL@2#2%-C^;z z()xe-Fme}4P=;U+0we;%nlI##a!N0}PAv4cJC-)|B}YxK+IFO53GEI?&-djJljr~R z#Sl0X-0qLv!E!WPQpSvB>#dU`jB@D+23Mta2TPvf8t8M#VP149sno3mRI7GcRENd0 z_xmX%hbhNEK2TKs`d9e*!%ASvs>Y8|_zIk=99pmewbDx$=aU-x{>8NsSh&jIJGKS@ z-XQH!(7YE`DB;-4x9+BTUp$@jgV>^mN`1OLQ!i%f&G4v%1FYFD<#i(q9~80}uBN7~ zu@NU^E{9sOS--xtpxut1n*wvenNY*6CY#`@u&`79l(I{HTIrTQ6VR(WAKxe3$S!1M zdOzGvidJWnSdRtYIE)iNNALxLFA+Qjz^}544RGFdwusYv?(i{+z_GE#IKH+)EFs4x`HW4PQ1t_LIxODg# z>!R&1rz=7P3GFznwv9r2d1t_QD7|`h6DSt^`cq1_{^x1$>!}K?bsh8y?iY} z`7(})reR-2(>vEnv_{gpWE+bD2TeSmMEAWBwsevWbK%(nu}l#%Qy|DImzf|xe4_>g zNxGhzG|7Sa|7;F6YQ7$=_z4@*cD)*ge0qKH9A*NRwPw>JvNaZF?CuC9QT*#X-SOsY zGRPq`e6T15rvTSnffhUf+KPv<#cV0fPWKPy@o)0<#lgaMu@0$2?iuZvs#wU22arPp zf@xl}VwwqxKOfEx>_)^PXlD8WPYmpMh3ZZ|mlBiOyZw3ffa0=qFn63&D)x}Op;|=> z+@9R|#lE0BJg?X-g}c~D4}KC$X2rK0x`OEA0M_mSoV58!`Mb zjpy8rBpxm`uDsjEn<_l_`$EY*i}^yFkHxfKwyL2Se%YH2?BC)Vj3Bs$fCctxq@N0G zCO9aD(e0Nz?WPI_>!jaBrh?obh#^9M`CfVru4Zf0EHzlPI~-%E1>S@}4IjhB%PMUAde7rb=Y@zN~`{pmWRM205>^AprM&@z-alVWNz$ZEf z_~b4c2nRmVd7at`J|B)Rq`LQWLOW3Zg9r{Gc%1HdzcflpCm{IDTydD1Khnk5SX&%X z;LKyeOs~62K2#v19IqA#{7iM6w%%N$pUyYTy0as$f{xtOlf>uA2PB1&GVf=k_sA+E zH+6LIS|Fbyqd{LK(TWd0lK8XqaZmo0AZQGqDx;gej-{tR`j0|J#ln?MH(MU;`MV{o zv^ndo4o3#4>M+s+*Q4oMBd4RDL>tW>zm>M!GAI`SrFazTw~=@@)v43`QP7Ed7#!LQMoo~EyT9B&A~&<;;gq>ip3oHFpy_Gvi1{re;+0TUrr(EELP^jmj7 zXzxZ#vdkdj;rf5g4E;!d&&(k9Ko?64_IoA<*gH*3|7U>Cp#n+ua31r!G`x6#-X4qD zVnz)x{rr;>!>;l4e^OE?|NDfn{`1nDdnW|Yt~C^wXmW)xIUGjEes<~{bL?veHGPpe zBPTGs&W@o9dhv@ya+Hz}66vR3{BA~GVC4763xE7Fkv}2x1bw9}bEF+677MyTl9e=d;2ZG_sB`500e&evhKL0K{K`kowM|wByH)oqoei}%_|fMw zNGHCEB2Jt=4s{31Nm?@7(xV^5pFM4rEt9@Iv8Vq?_DZRo%pnHMBv6P>G5Nb~!PecQ zAPM}BWv{%VAhBc#x)>zGS*7Ikc;kp2~|hTvJ8ZyU^qC1{93 ztr|1P(Caka-%^wNH)6fHYNFmmawEUyck3?w_{`mRTUUT{zkw)4>6$fbq$Uyb5kthe z_?ROdk?#ZU;xO0!(+H9o`Wt-dK)@~svbz5iLFQL}3<$q^%f{{S1NYj^h2lIUSc+gb z0uzE71Purp5wPoGJxn9r9_L|;6^RCHu@al@x-8z9y7k>>btER?m(UOYI7cQx_$iZ+ zJ93`L$CMli?0!o}BG--t>N;1QSa)RI+mW@yfptT6r#H~qp&nBvlGL#Wh>ol$W7&Bm zWXw)NN&48l2vSbQGI=uZOU*`}6x`tuj_v6#iX^4t2Es)r_+k^^i%a^>JYRGy{D(1L zOx#$+EPeIC diff --git a/ultralytics/utils/__pycache__/metrics.cpython-39.pyc b/ultralytics/utils/__pycache__/metrics.cpython-39.pyc index 6aa136b75a5cc520cee554fee350db1bfb238b71..2bb74a9d0edeca14cac470662387a7e6f341bcd8 100644 GIT binary patch delta 16238 zcmch833MCRm1R|7B?#`|zNo}S5+sTuseQ2)YPBp;iX<RFe|!J^``&+5{KfOiE1z-{h6)ON5B9(m22CzQmqWB3N4_OUyyn#wH;c8R*8}- ztxK!Ys*$SJc51a+9a1%WC9Pg-xFBf_=arsXZI`BMi&0*u7oC@Mr`C90)|xb>-;vUq zwU!I=dAX-PQ?*1?E!CEx+NA}x<)XGhTcNdL*hQL0Q*`Hf3C&k(ZT)U-mDY}qRSejn zEyIAz^n&w_p2f&_YO9f7E%J@XuhG^bzgFa%kYA^*zp!7@*6U4Z(2Rl&+D0&J6bvoM zZ_+j+zggs$Xv$G3xO<}dU|iFO)ia^ts1`D#@t8WIn=BeO+NQoFe?yjc@r2`UC8qIr z9Jkp&Dvvl1J8q8O%D?LTEdR^~AOEeRmRqhLl(y~KbYhoo>S6O33x$WGu?W}POREFe zC8LV=s;E#dbgYrvZYYZKd$)^Ex|gOtjdD{Olr5r)CSOvLc3#frQ8($(lxy;&Q*&sJ zYl-oVwT3qU;UIX_sb>C-k>uO#3Ybgi$J(mv)(P z79L3Fg^XTPj~Q{6_Vf?OL+0wvblzypI6bQC6MEVe9vF=c@gL>gQN9-K457kB#d5LI zCO13SI=?^4Y2BRskv5XnH5} zxaMCeZGX2h5~R9=KGsSlr9{eztO7|p67jQX=SavLNGpkG+8HuKF}4gf z{Q2TX<+Xf!$$^92$QoTBQkhd;E;r+Ei4u?la)n&ssFF)F3R30p%B3g`$bPv>SuQKT zcRJj1tL&54h%&!iEVJ&ZAC!DvskNu%5+kME7=C1Qm=$9LwuNz^Ykm#656`8 zJZ!Yv{zcu?_sSnojtIHa`hrTE5PeWdyEXkxG_13hOtIkEKp79)h@XrcY*l2pQVFr9 z9r{>;9p=wg?N2?8ysdzR2ZT6V6?Ot;VbbX7PaYB^rk(g2p98VaYG&Sw=D4WP+$R+} zpR_v2#9p_9R!?>PissVXIM=cz4>|yNB6lL?NQy&iIVYXhAYlALBqT_?Gl26im234jfm~QAX(Uir_kg02GUwkyCQD1%W zG2LiY&qmDwb!`0X08{(p488LD9W{Japm5F3oWTfC#|(_m&|@NQ_h81=IEzN2vCy!Z z>7IYvTb}A>Vk_4})NlDg7;WlxIqlG*BlhOI?Y(t`u?X7gw97~gN6oY&95>PqBN`(# z5)iKQ@4pu#?I&pBnsn@f9kwMUiZk$g0X94nal*l%x~;4t3G8wW(Rv z?xe>A`k$9iIFjCEUebpgj4N{aNq@4y@>=;&02cu^R0SXjCCIb%0geSK#O#RD@z7|PVyn<9!kUUv2K%PvUsIALn%^n`ssPOG6^TPt3QQ`k zl9FYX0`T7;&Za}lv(M!$Ne7bUVk1%uS63t}wLH^1n3u5}6;>teg%1!Gu*&V!jJT6k zX8xrAn)GP1ELm0fgN-6^ti)*r{8ir$$aG3h>AXXhVx=9@V~Ww(F3IAr`Z#{9GBkqU zPz3{QmEk}agx-_+X-9c9!Qy=?d=IrxH_z&NjLs0w=mJM`z%&!ard6v#Y%F@FEzTmV z5?cSN&W`qVZSCvY*LR-jXhT7pPoTsO7BM!76k!Gcp@WcM*8)foVFUrH$wqPV2{{%3 zfwtV9__c!CE0zAvd6)ut(V}6R`UiBwU-q8oTLa~(IbZ~K)rgKnhk<5hR5$G5+A_dF zc0ynV`y{12AaCG{&uiNlp^9)=GDXL?XA*C33l^{t)zWdcEr_t@PUCcn0b7RS5jZcy zp^*f;oj5iT*-T^$5wh>>CXirF+DY;gw3BV4a`OGyb|O27>>{$8NC%PCAZaDs!8$3o zhR7|V$m~cfW@lPiZmv!%eOg)>&{!o^cM&0UVS9+|C31ksL9+2F4=D*u{C7}ftOJq! z4RV7_zUKkqLTpv)>`$&vICM#9haWzo=+K}Z|S;1}b_X3D^0bBa2k zjzCJg7I-3@Va*5ZDahcfi@ay>37p06OzRXCFVs9=0PGCoJU&2}M_Bf}>4D*q7XTrB z7TQ|=*`)x;JOPkmax_K4BQ1ZE0AjedSQAikG9R8;ArP`yE3%4#kN_roMbsR)>9k4+ zXM)9{a@=()Qvcr|MqMVZ3~k56bwRb%SfVYaxk8ME#`)c))hFkAA%8ITbS^x^^avGe z4$gOVJ}xY^wEiJD#9zImJZ0eQ7=9UOMImXwJ&6b2YNFY(6(hb9u5r~Tidoo zJQ-e^OUK8W3mZd^Y@EnlTz;f1bq>iJluZ#KEMQS0SzKjG$DK2f%B2~*&7=jNsKQ9y=<7e8X*!R0@&@5f~71@nFNuK##MHj_+|quV^l`# zXC{$RB4>b{f$mwN`zMqe%RwZ6ja;KN;FeS z=>2JLd}n+lF>30DYK+9=<^XJ8B5uHlozc}0tZGb;z*R@43vMwRS9h|wVXWL8Gg&+_ zu9_hR#}wvHnAc+iQA6z?jfF+f0(%@D9fpr?%IhmsMnQiH*RR}+^}X% zN3L7Kf8bTu9UIVRPm1mv>YO#Tsk`+4(CDyf&|KQvI&d2a9uLPc-)M~142jPd&qqne zUcS(2TiNNV5R_RJ8QVIaEzw?xvk1Fvz}=Um0(KEJe7U5W|GR1?{}*0e^EU_{O}$eS zk!#LKY@gw&7vz*+bpnY>tA zqcZEafNL=C3U2-my8R+rAP+gKCBnN*xzoNe!}>Ta=Pv1F9WFr>S0IWymxMlD>m`Sj z+9I9aM}a!gxx^+d$D|_e$ZpZhBbuR+<;iV^R_E+i*&*jnWxAh*rX*uoE=<8alv3hv zTS_MD98!{A%lnwj+D(_GnC?MOw)?C*lPy75)4pXy6mn@U!AFW!nRJn#9x~hJypqf_ z-IIB68l*wr5JE3WpXDRT(q#}oxVr2Dg9qPyR3MxOR>43y3NTO*Mu( zw5Sbw$!wNu$cg^CaqG&@w8<0OQ3&4weQ2JG2-aY`P;aJ+28*+7E-{Qx%fEzp#xzT| z_Jcdg?L(g5N6cpyP2%>3Rb$A^B%v|z-SShDrC39miO>qJyama^WRV%L+~;NN{wLuF z79&PdU=?FyN-RHC^>wq{DnY#2HAL6FuM6#P;<8vm)19j$v8oT!p2w8aW9ZMpc2!|G zrx>nX4CgW{&>aRmFXPhoQ_;aZt=)nRJN;7;UoHq0Df~N&69w>Yc936lC$3cM`P#;( z_}yjIe0x*jDZrSBqJ)6Hng&}Cr-+Dd9aPYZ0@z zPeeP^W%dwoYSx_0j>muA*p$k|X~xD^jg60uPX}w}^%EX8vYng^Zc+(#apy4%8)-8) zEgDo@qPm6Mf6E165Qz1ba0tkIvF~0>kvfKuv7bNRRK33vahicpLKonbc0^&-2y&PJ zG%}p_CF&HV=+=^UVbM$%Fo&ilBI@i4RdQa~Tq8fogUyWthl$Ym16BElSnz6gPjj`I48GdR+#(!i{oMWndS>w{ZJ{P9c=ESBr)sqzR11Vic_nI?(#!lEw15sn0L@f@6_rNy3g?ePzj; z`E4azBX)2+}*aQKnO(<8^@mK@~wg5 z1cP2ok&K1uQTT0biwp0giI9wf&SUHaez9%i^2ey?Ng`h&A{6PbC?%B9O{uqu{6@NR zuCnP~Bo|Sq^eDzFJM(>Buiv}b>-2iu*t)`2b}w&S^@Ult_`x&vY)9m+&V$w{3rk+Pq9%m9QUC`V}JN zO|wZNKPK`gM2d;fqVe!S$0LXTMfq2GC5(o2ym%+x+V46-tvl_2TMFa%u_HcrjOdm11=)pXOdbq?kwvk@t!5 zX(DCFPWVH^!#1G6Pi5ah`GivqnJV)WbAU)Ckt!n9AQL4kcgAD=QB9|)_0CatMrSV} zMr(_t8;5>ICNq*CA=u*ZP)Bt{+EA8u!-F0Pjb%hbT=i7fK!o&zy+&m-g34zql`bZd z6;s;1XH1-hw0m6qet_K2hCIb=WLZiVB!=T=??|{eObbbKzHWsaociXvex?0C5$6y{ z+KmICvxF^E67*X-NCq8z_Rh;Pzjfn^{N*&S&r-|7{Gp92^9iU}BNaYRUg7ON_|WeD<)y%>g}8+% zTa2>BC|zW!R?+>^WQAFYAW5-RV%5_ly?#HO>GIGoy;H3>-Q!#h{W1se;9(#>tYxk@ zedBlnGN$h7$UN*7mkB)-9VWD;~ia#KMvz6 z{#=*(aTr(dpLcch@qG<^&(6L4wY{~Tgsok?u&bKibFz{z+LymV?Pzar56(9C%;r;< ziKvUXmc!y7;nvA|YO{KwHvJt7Hrms%(!j$Vo!@=3irS_YYCAS(q&rp;FxodvTx8(} z(d|r}45LjwN}j?dbs0^(b|+*c((({Hz)N&Sl2cMnj2F;Nu*yEY*N|qPBk~N9HmZM+ zQUb;gBb9cCM@Nj&5w`pes(gnk>5Kxv5(zz~v7Zr>D{AT^M%qbHbk;HHk=3$RH~ zt6bWX(J4f;qbveu{`~H8y`5&!LF9`x@@h(Tf~39E>ck$Wj%Ais`a9T{iAezGM&fLu zDt{=^3&+uR*#g)cV;_p8CO^EaQrYJSIC#~b#hVo$o}E|VS-Bm^Mlc)C)Yl*%kl8_W z7N$nIA83NK9ber$x7<9;uiiY$3-{l|5AEN?FYQ0Vf3p85 z-*n5J{Ha@Z^A&w1{Fk?E=j#s~;tw9Uo&UpuTlkuT-Ocn~ieuu~&A87;@m9(NOl3$t zC|*iYr^9J{<=!PNbY6}hI&kRB26cp9VrdgLqZ_w;k|(=`#CL#y=3t#$xY$$VdKNu~ zss~Ae1n817_3FU`a=tK~hlziR42RnpN~~??Q-=<9$(3|wt~+c$L$C;QDmrtKt$Buj z>+ma;WWV5`ijKtS87}nZR7Mh?=l}5&FVA8Uvzx`lzQ|uZ(j#BvMV~0=y~oz^fukL~ z`)ECX;b>95u#ZRW8Opa-I@Y#-jsN4(lf3Te0e3kXoy>h+(v{dAd`1)Q{}(qrMK(yPCj<~hVmKt9w9Da za^D44fdB37e);eCPj4TW5AsuYRCkDTEyORn5-0o<({14Fi#rQm7D*W5tg#4flB)+C zuk#o0;Py<#DlYI-#{;Ew@RGYp(+O>7y&a{- z`$dgVyvzl*6!FWms-c@y34@D39A9*Q1Fz|+-a01;*B>7qj-REF-2AR*oLzz>_Wd?w;t_wJ z2+f`{^x^)t#Hm)*PO)sNzayK3BMYwzSqj_Qe84E`D`g-Qr|Dy-?5JvXHZ(4*6p7Qe zT`EHKXD~$#r&<)BvSntRQq57sS#y*bZyL?0+yPP_aT0|h?WXJ=BKwJ`!j;~Iq)?8t zJx!gzpyN(2B}lc(uNB;P=KZ}pQD6g1+o;%ezBpQQgf3taJQ%tzzqjp(ZjQ1T0nu>O z5c_KzYGJoS;jnHX(jQTc1dMTiG(7zd-LyHk@myd_3Z@OtuaOU3GcYlDmD$1Vnpv;k zU>!4$9Je5YkPCgq^T7y|S>zB()128HUXjz3Y>r-;lOw2wio4jUH6$cqlF3rDW+IzG zP$ztSlAJBc8Nu2UFSMuEo7tseT7Zuh;KY+v`g?z(JR`-~oWTJ57qGG45_ylvzY-xN zPJ5vQux#cyn?aH}7<4<$=vU-~+Xw$RYJ~6rMT^2O#N);BR+TPYYX5L3(yHPq$#5Uy zNZIEm_W89TAQl?Iy9B~yTzB^P;O6j$w3huM@PNpLeF1zMva#=bZ%~SaopvDlM(-f$ zx+VfO*AYagO5s!GG$3fNp-?&|FC`kyxMadZTMG8> z>v_VFv11!^7=-N|Bt;PRhVmat1fsKE0=DC04^zn23TQJ?P^A~uz4ui@8IhPcY^1x!8Q3}LIO8!21*V&_VE^kOF<|GRfx* zrNkmKJ;Tr!W>BD)obQB#e|xBrzi_pIKRxEt*eNs!I^QeH-c42>(Mk1|bp_um#${GiVLW zl$^-vpn6^h(`a|SWI+t0fd!DU1ocYLog=O}NF~I6fmmqbtWQR0?x==jElq*^Ou*Hm zI-j?Qz$pV(1Xy#vjM{{0$+@4|$kp)S$Td|ya=Y)pjNh)Yhz|H-ISO?#3VAx-yAy*7$e`CPWwyz`MYZQ=yB;8FkJ(0uTC^1%#vxCuN?I{Cp%l|YY;{LY73 za}Y$VO&oCH2Msy-^AF`hh^U|Of8tbM$sc|AY7U0v^g$S6&{)qEi_$qD;->tVlh-`b zz#q3t_~Cdq2UH}?|Ep${R(c6^OK{`ZgE+@QO~|LxI*%ZD`m=N_f;+6IuQbgJXg?<;cml`HjlQrK`s zr3-Jb!+w!yVo46jgw&mc9nt>rX_%RgBH95%J4hyQ!z)k*oZUfZ+G)lUc#Ro!Z4NF7 z{S6Q>B}g63MB0k{|Uw>sla{^XNN$&jR zX&?@XeFn*De%Z+{U;V0W`NiS=nv?5~FXH3lCHz23UI0rJF%Z@ZW*byQ8|$ zEGhJOJqx(k#5YejNfO)fHU0=hmwIGP>QQurR`9xw2*31GpvCpHthqJM1y_$#^J;nc z2TvC?7w-%~b5nAUNAqh1DD>Kj-IJH2)}-R}FR4Pskrw}!3U&7*P5+om1mBi_X zM5G+cI8laaa225uHbVOlBN8W)u}Vl~jnaRo9zOi0T_{hKQjhVmwSIOw+mC$`$>RKk z`p7_O5pj-;*9*M=pV2};J4-Fbh}bR2dwnz8LYb(f7ON&&c+Wt!pN$ja1QB700_4p8 z<6Q27=q5kAi;XebxsSh%KI#=GDS=?zHfkh z-}iDjt7*=!58mcycN6D~FB~AvzmdZ!R`=Z1mJdD7!XP=}v`tH~1pQ|e>EoPaVTkf_ z+SB}Ix6dBpd3K04?*IM{jkb*vcbzL;{VZ1gJz9A)I`qnQ-jBRTKg)mdcHjdIYDHA$XO!iiCiRdFA?$7vWYfRJjA4@o%VgQg;E+3^4NvpWQ0(R)|)v| zJnviqFMqP=2#w}c$T{^ZesWTkDhDouH}L27E%sIWHv7ta72bevsjtyjT$qw-eO{+r cQRS3ts=SSbH+**%r=*p|I%pxjuHsGq2bJlvN&o-= delta 8289 zcmbtZ33wdEmG0{9IW!uL?)w@^zDAPeL-L8SY$Jn>E!kijuxxAGo^DAajb_F*Es(?r zVPvouK7;V%TnI?t7ck;5hX}94Yf?1pm}0<#Nj7P8mEHFxg33ko}p(Vm#eSTv-E7_^7K`DuAYZn{yCDKuNNGa^nzizwLo93 z7wN?)FErAJB}3IqhM8Wf%k7F@rk5XP!>qMvvc@ZFD)dTS9%{GgRr(xJU98X5=N*<> zOLT`W8|tux&FAY2+U>eeuSUbA*kO%ci5*rNX~Rlu8J27Hg;-uF%H>$D(-&cRktlny zyjWj? zo?>h*+@>6YGW7y{yl|7cT?s~d;k0@umaE_a+pGEYO{@1d8Af!gq497q+_ASm5{g{B z3`XqpbFnm-F0a=+@tR|!Qr1s1vgzkuvJo9EI1zeXcTg*~CA(E@Er|s@(NMZSgW)$(G{fL~#K2d}+`d z&yN=%pN)gbDZuGsePO%^CAlccvn2WGmSSnJB5oUz`T06C)h?W7HV)mik2>OMaZj-{ z#sr@dcM*@%bdn=-q`^v*r;JJ0&4IVvKGrayq`j*&&cktvY4N$pwfJ0o0ZC|mcs7w* zBnh=gGdmMKt<+tY$-gPxpm`atck5gB4w+rn$5xQJUNHuusoKh z6tDtZ-~z?TvQU!4-1yWoJ6p_LtU;9BEQ9ea6X#}~kc+LM*sRX%*mqBFh-aW>UJLHr zTDB2Bj%CBv+>}L!s8iu=PYzqa_<2|k*pN(poXAl9S@^_Wg2TBLs;*{Ba9?g&9x>>7 zn<;fMgpJfCiKE+Vkt&gm1MpTj#XdBUVze? zHzz);>0m{@65ph&VL6Ty_1kF5e!H$9>TS@sur_@^wz6Qr9mv9|g{w1kk{LwWiD<&} zC3PwCk0h91cRza>{-tguJY8P`zC~A5i|J2JObKXaLKz4~6G|Z3o3Mp@d!qe3fL44q z-T|*KIzxU=f~>_$h7^=deQ1@bbFp3~P9%;_$8j%@%cIhmG^oUtCTU;Epo&Yy#FDA) zvP~+Kba{oONNc2nIXc^ct*D(?E`^qh7Peb0lm=NG7f{HtU7 zvB{OPFf*mcbj6Yk%NT%cqjn4_QkSD!wStX~U3EE$jxc^Z)~Dd(ien7NU}nc(tf5k& zgjR=eQzwDdE+XD&>XZjWSO{g z_9}y}!PG&wnH^V#87^NN2JSRmLRUPkTOJ(p#8WVw<(N5f51xW@w@f9mzP5O3+(YAy z&sybhfvBe~JF8}d9ehRDi(+>+(cNOvUAmcz*0952HX_H?iuUP9<>GGwXhc4^mLTLy z)t+Z?s6Gl`*WXfFN9V!xC!`%PLNV`x^_*+{dzztaSrrsD&@E}$qZ)$;V-c5=j5;!#8fg#4^nN+g(hJ$!`@dL1SdF8fd5-D1^ z-Rm-q^#W5$viE)N;;`$r2?NsP%{h6_F9ys;pM2U4kFHt3_QKn19!?dS7J*yOT*0n_SI^APXm!L4?LdTw ztrcm8&(EC4bSPhY2kU^R*RIMMqn>G@5^^}qhmZkxV*_)-`o^*nJCm7RR=<{I8IL^K z`hIG6$)CmJ{^r<`f2t2lKRdpwc4idSV z2wCFS5wQ`m6Zr{|mn6`h&aY}i{y}_166a)Us>|tgJL{aP(_v>S^VIM**s=a(hQPD% zB@RL2%OG<@340LgHY{Zk&^DCioJm8YQ(BTsn1@v|{*E*O8?I#HFd3KmajZH`{qN*} z)W;I4))$OfdnOkp`~+-hnw$1ZWHBAX9Fubs%%=HnlJL_+{!@YzO%-e|{IscY-9023 zN8-!m4-kKn2<;8tM}(%u2Z@N3gJz8hkfDY7VE(BlNcC!_m%m3Uzb5iqBEKW@5t4z-g=a;=?LplL z2Mq67z5HT>-y;oSEqSHImo>vJNoR=F7an;iX6GoAeHe|3KuAL_Q@l z%U7xVGm?HzWZF{+`1>Ro5+U;xj2x3#%qe{F)=$B#)~0dSyVg*`^ZWIkZ=T zAEF~$C?152c9YM8;}>mCNyv4J6RewWhF@M(V}B19K-NI& zb_+gr(QpUU z3sgsd!2J=88eu(Q3z|blPb{J8!GOuXqj84Yj;4bw^FC>d}Omx3fAw=dz>l1QBPnIx|wb8ao_$8^NX4sJnl z>}(WE^IUFLqGU0blEWZ>PX(;rQ&e7s)d8xV`JpYc4xiW^5Zm*;Tzo4b`O5hJphU~X zvp}4ckojN%TznuOI`?M7v6=$7X75_??sCA-_BPc zOzzMwguAtE@UeFO8T7h9ZV>?nFLUHZUP>OQM1qSR3SqoRc+e9fHv5BN-RQFtxoaq^ zN$l;`;VSZe(jwhiZLa?x9qm?X& zpBr!Fk0OVukFX=gG^vHjfGYPyeekdCuY5yEjQJZ%_{!p~j#g+3ZiS)FCGxVx@K9%M zDjDX!>5(~XJz8|x;xmB-_p!U+@~)-O-dPCu2A5KUkzg5o8FXcEYV>U_S1daRTjotP zb$Qq~OrvLP|Aej1gWm3e+-m9%qdVUGiBQF)WAa03Y!A#087u(TgjVJz{l9=T1+-rj zF8noW`r(UEf1qdmc5kG8MhL6(Ijr3+cCtvaMHgaE&!4j6Q$Do@0c2(R@r^!=HA%Z%pCN zH=S3u;JrKPB|<3|Frs;iL3Ht|p{R^q<0DD7fwX4v#;>k7if;f7ObcE{F@J@~t3+NS zLMW!d6v6#&_Vc9kXD!`A>Q6XV%zP0Q$zyyekp?2@N_g`~W;Si}-=iQ_iwx$LMII-- zV$Oq#qebj;SaWnJM^|z06((FwhG~s4ra2~_?{zud308M@i?uRy;K3($rH(5t0)XRc z0^YjJ*N7`F!lX!Np1}^RM2-4PCcASo`mTCp~$PE}vM9L)p<%3%Pa+ zxf5?4-RwwPflZQNT{Ke4)c+D(k{d=$%2Fe!l|uJTcUsi~ zAfj4q78{i-u(}wUZ@$`65!+;j1uDJ6j)1oa z_S||RRj6-D)@(+$0p|Yge3_q1z+AWm1uxD3W=TnkvkG5A*zvBe;_r-yZ-M{5t&~ME z##X_a+c&w6NUGGDMt6XTyKis&Q^Xe7DN&1Ug^cfST|W(57sk+DJPxdFB%qT-(kW~( z1Hx)F5;G<-4C!|jmk??%M5{5nYD)O0zzp2Nrn{bJLx|SiBuew(@w?w1Y9R**RPt@e z{zX*oB>itiCEt!!0+g6N(^zMLlA?$h^Lb=qa?9;NR_r-Kt45>$tI(Rg9i1`-)jRL0 z`!=ZVwqR-}m>yPO`*?f?KE?C|Qbbh0MuDg9%?J0rM`wUk=p-Zhb)<9u41~_GK?nuv zBOk`^JC-48X2H}>faw=i4be$GfPa2YV2GJ}gbXjcpG_G}C&sRD;rl z9sg48HDclgO2TeNLcy5HTWPj?sNc!FBMd*Bm~SUI{Xl^aCT`9maO0Udt!M=T;r1sL z7=QG^d9(tUJj^-@lMSeTvK0l1zgOVqAN+6zihis>_haQtaP;4m=gl}0g)uV2&!#9- zu^ZVt$SYYmPF$o+kIm^wMi;&t?t8knm|0h9CSGCrs;mrhIr-`lV5b&lJU}eHv$hgt zAFMr9qdkk|)|72qcKCN}KBsv-pF{QaM79t)k4P(#eMH)byh9{;!*^5W9KthYsQe|$k>(9lcE0~q&CvW diff --git a/ultralytics/utils/__pycache__/ops.cpython-312.pyc b/ultralytics/utils/__pycache__/ops.cpython-312.pyc index d0ec5a2276ad8f52ec6d034c37b5966cbe910d2e..48b67ce03d468ebddf029b9e338b4bd7760ec494 100644 GIT binary patch delta 13332 zcmb_?4Om;pm8kC3FZw|U2@wCn#u(Xv4K}vpe=yjIv5E0doM6X9=w3irNHA9zaHODQ z-3DILT4cJl9H%if*)?vwt^G(_(ywit?(Ua#yW5otS!31Pm%f*`O?LNnT|4_V>9*T< z&Rj_lC+YTm-@7(P^K)kA%$YN1&N(yx@GbuH3w-8}OeVbozF5cq?0)vlM5djaomlBE z5EO#aYwpWx&*BukVvj;lolyvCQPI1MwiQ`NmDog~XwMc_3Ys%qyH%(ZwD4;abY~Rp zIf7HjfLv~d;AqkOOd;kPC3RkO#0_umfBn)Cl>|q(Ufw+)AMkV5P7|D1tJlPz3w1&n$F%Fn3UYlRii zu1;7dR6x6RBb;;n`-s=-a` zQzPe`n;lJzcF8d)iGricPaJ)ILG(I2zAll7KDSr{7+tPGZ$NSc{Ek+?Pjpne`v;vh zdh8UxgvvcAxLTc>1Q#GkWJ1;H_j?mcDL@i>Dc~YI1JaNu&_gny(y2_SB+=VNa6>S3 z30VUD{HAVAkH1e`Gc?e>Mi9k*pBPv(;`jPb)vl%AQ`K)Q12m})Kv;1*OOb6IQ$4F$ z$Sr~&!!4UVn){Rd#+P?ZYhG%(QNB4|zIk@TTzTW1Z5Pc|pE71*TV$Gpm}yj9##Phr zsEauZT~L2J*9zst1|Y!;0Y;ZoMKbB1X;yG%`Xfy#S4aOvvks6;wGGElw;vP(gTyB} zT#lZ>K9{eWh%Uj^=@lI?UyrZbLBxI{N}?}-nLv-|aJk)seS=uLtWZ@3*$@9QhJ}!7CSa#0hWdp?^wzv%P&<-RZQBavt#C}n68Q-ZTb9{ z6m%-1fG1A+du<(kBR|)ek_!-@4EoO2BKnR}M_oGmel=T+gqlf1!XS#z&rcJfp~WNpd^VNQTE4QU+B)bv~4aR5NP* z9M7$yH;vnuZ-it&ta!$KpSwu-@AwBuDo}SCh9o70wL8ZXs6lECFKwinFtPQG2V~`Ux!~8JA2x?`V z(|O1yH@_8HLA`cKJ!|=~dbQ*L9NiKe)|ZU4HQhBqR`_natMCkNBvbpRs(b@$y)eHsEL2jCnWT4ucyBQ1`{Qxj&@q>cOoST zRkzFIODOwYf}{g%Mj`+v>L7S-msgYeMp&|M&S(WKXE)9ncis9UaBi!WX8pMGww@la74PYK#uT+i+s^04 zb*0ak7IdbukC)suTNj|yif8sm4^3LmKNL4Ee`fzDOX!p>FVm0{5u@#MhRT?#l2~Tm zvdwdc9WalC=CJ6K{3N0D_yVLBOH{B3z!JCU@j7|30gJSP=adJqBxHDsFPVL=lVS&} z@qn@=hoNqi-p<`sh$`mOkJ*Rl?>+e-TPG_)hpY%F>A-;^mB0sh`o*n9nOBsr-~jL) zLhZ=46jyPA>WVs53i?42lsAhaV3ILmOTa&dL1T7%zUGQHrD8*>0G@B`>&>DK8mm&( zYemZ@hr6OnVF?`Wz94^+r{=SzEg7lukUFT8l@Z9i{`8NTM7Qog#ExWtXQx!<==Tr7 zku2aoNZL0Jmk-WtmlUY>^+}XFmII=iODE5$?DftnCe~!}n3mjrBGNCO$*o#)cEh18 zNjY*bi|6{f94e3B4dnZqX;eDaUpMSYJB9 zAE|+M3ANbQ9~eofVEH!@2bO4Eq#M>cKvb+Aj?lr5 zgx2Tx!AazoMPyVb4GfCnX|cnpAVC_&lOJ;M^9ZXpSNrb@5K$UDsC(eH{UYYqdAl6IYY$_Lsi^Rb=4I!RLvPS zg_~{}vd(TB-xjr9w4b+6x}x?ArPG#p{_1H@Jbxo#+%Po64Gq_q%^8}*O$)Ghgt(zF zrjm~>ccubOljMgIhU(_-muPF=HF~yCLm#!9=wIbqX~?eCYEFao5Ypk%Ungtxpbm=! zxxy@HAP>z$SwV%M6||q@p(G0xFG?k|CNC80^+E1r6Chax9sLh`87;S49|99upWYaX zph?;nG?Id)+d?ovkD63pDY=Gm(&eZxeQN`FzOoN?iR*-w8ct?xUW$gKe5|pbI%agmUKmEL}YSv9JVHDIO#`9<`b`DQMco-HUcc5}hZ(_jyWY=M0GfAaHM{+B{= z9QJ%DCzKn^4O)UZI2QgvUPJyOP{`;45Y)(ZDLTkSnDSRrrJ+1|C_Yd~Jq3jZ4f3>$ z59apbxTWSrWs!$mrr#^bFO#{Q;ulHN7RiLAk<#g;49)Jh84puQlr=8P6vT2fY;+zfK7P+8DKb}yldsq@hk0K7U6OetuWx=u_{|atx_(Da#HR<;0O|=RJV#9@& z1<(y*U+?-j_*Ej{I2-*%QR17-puHViKmcF3LUOZkrzK(vc%PrE#{@gl`uYWKAr<5po+yRdt z+#Gi7JBB=7ucK2$A2VgCIasTj8B2wk{+uShE@ z%e@XPRk~dP+2cXJvW^oOqHnNIBydcS6@9r%6S3ANPk^*}#EJ#Zo?)a>X? znlq<7;!r{(&zGR5LeRx$Ck?$?3C3jQ)7FG8#jS)kc@)v(&f;uz{Sq4gU;wz6;DZE#};cECyPwsY!vaASYz%(qNJ=}TDiJRF~3AU;Ozt4`1tS6%G5X~@25 zu-;m>dV23wX|{2;ZMO01P`ql#+_Ifxdm~b`G1?Ywj12w6So&^$$;JHh`7b{(-8*ZY z?VKHezr1)&1GvxE_r4{?4j+jhe(0^Cc+=6?@`qz59-Gg1jqRnb)sOO*yJxwSlW;R~xP_pFVZ@ zky-9LWwQrgJ2YGIT1Kp-@hyJd)-u+7E2}WtJh@@|zPYUW@rHM7InTMG+0n9Sc0`=G zKRPh6kD;71Pnd6HEstj{pKPAiZMX{9+r}E`zf~8|Wi{)~&AcLez;V$b@4@g=!Uk!4agBQZ9XQO^!jGEYl>*Zq;QQ0YxJh zA5zfMYo0X(!EAKuT`Rycr^dBG`onBH-Pxt0-^ns-AyR^-KfS%qPXEWCnqC;UfFWh4 zX9tY*2kWe(yP&ckO*wQ8^k|-tZ9s->V4-_Kwj5K|rZM$c*SP8bUv1I7UEkIzjF#}e+iFG0a{9B{c70q|uwcjv z?@O*-E?c`zOV`e}xORtd{g_?NESSu72Xt(4)UM_kVV#Bh1l!$yKLNWH_VyoO{#SuT zPClVw$kHb`nlLcuE?IjCkoN#*rRWmIR=lv{V%7PodF{%D{L)Fy`NonP`PK3K>S@>2 zoT~%Z>~r~>$24Q7#xv#W&#Fb{fEWXF3LJE@KA&EwFUf#L1`Ctz@73q?$9W9=&^z^0 zoOLRosQm(0|&($rG87Mi1Gwz}nH%HXBH43wx?%47+gvlN}l>buA zR;32@Hd#&A@7U49q??H|ql8KKPN=@DVT`?3r1i{A=3$Tb0Y)M+Ng9M;$+jxsXeiRsySoz7`I?BQb$u8 zee1!;F$nw6t1}-SnBX4$EIxxweVl02kPx?)Jy4a~-#+DNvCfZU=v%6BUu7{WIx9&G{RRCTXmyj+!PbzGTr zu+$+9bF{P}T__`{gg6LTq!u+eP}-m_2-hvA4dBe!Xy}VY1?Zu5t5aQYif8wOmC2DR zh}X0f6hXu?sOT+#_BmV;z2Yy*glG*0rrFn9f*ltEalJ*Uvc+GF<}5M%QYju*skB0?RDFRsNKkNeib(kJAtgSzYeR{?-N z(!vh`lU%_9BF?OPWljl~BndUxko_ZIs0IP%Gax3tDe{0 zza*aCKHeVHOm^WV!MthRl6X3YI@G!V^L!fa;{xfuGfvOetc3mY?C3aH76LCQQx0E9 zHKyo-FyR^9V(ddu3hAI>lj0R%7=E}HLa+})S{~wk!E~Gua>~VAC?li`s^$Fvu^HG3 z>0AcnRx7&skUpp}z)dl3nIINIbYf`(z%amm$)hzVOAGY^h9vQbgwK~7fHv|cha|07 zuS?ZKXXKiuvJ)-Sd*z|D!CdA)!fNLR?j@{kG|SVSswNgnadFm562Z;ai9 z+tr55pt)NGzA;cv+a9yhj~=mNd0whKJu)-7BlN}g?eykj6&pUWry-U(=oQ$u1~>Dw zH*`*Nuh1{L%G8Z$h102H>oOg13sBivYW(-w9|;IX>@&+qt3acj^$<7x2Bc( znC`Ub!1VACUk878@qqkOt_6H&xa?ItnguiG#(4oRemQ|d&ZWfa%iK8*G+(w#F~a|a za)@&(jY_gO&Vn#ZtCUG$A|AJJ`5{j**mUnnRZ1sM3+YgCTk}Lrs|_3(VDld&TCsfV%E=} zprv!AaW}(O$9!+UxF)Ok54upPGP0N?e~GJ zEu!U30$8Gk>tXjv(4IIerqYcnr-CQfLV@=IsxnYp$a`En(f{+s?Um8 zBb|`}_{$q#6W;aH>>aPx&st_1UfnX6z2nK2a6@=tY{kvOlIY-t%~R5J{Y$6EM#2v) z7)=pwygBj!1d5T%=LaW8;-zb2hFXaCnli^4BbM>qkw@aj5>R*zQ|jrAxT7{^s0%mU ztXltU-P!Hq+oR=^2j3nwRDO5G>(1Al*PGtzoZsAbZ~Z5#7xboxYJ62B5Z4!mcm46X zn;W*nu$#s=MGk>nNRzu_w#sQTzrs)#Q}z8-^P4Siw8YvD#~yklcI-s#F?Z~M5bO2M zH}yd+cx(?I`Z}!?Yq&gmR2)bePNmE@{=G<={vQmeq@e@0??^37yyDllmbDCWnkPE*3K) zO!hT!M2n<9p=H6Mb#>$)uo;pMmh^vM`b7k6i~kl=s5~GDw5I(*O+;>b-eYvgV)=>J3Lo-|7*juWiK>deBk^8ldkD(ISex$xP0{O zqWan4c;Wr?2M*pi&>la~K5smB!|0A1-7!Iob$jNFy*G^BxY2tXj#*2@^4|<_@>Eq!xJWj)>ig^=9FPD&4H^ow}c_By%&@xVSL*q+)Gwwag`5p9chrVRh!nqA6F`UzQ z0fQ+c?HDdqnt~?!q^tA{8*qi?~H_cR{Gsp|Ir9*F$?p7qV>; zGO;q;vS7`L@)H}w``*bf#faF2l9(!YL6!R)*rJiZoT_Nj5Lc~+-~Y1A7*|!@i;v@dRF(56u9E|DOPh)y24uhLOl9`VzCw2h$ zNG$!`llf{VlwYTXPldtxgkaIkH=p_zKZ>iJ90`{j;)cK)IYd-L7T4aXmMLJ$^#Ed| z*EzL=+19xqtBQ~cT+L+U2+O{K;3|S^2>v?&3FM#dAtp0!Xi|yz26DM}ubh zjP2?593SH7-mw<`22T&1$)jH%tD%Eua%OIieU4{}Sysu+f!+?i!8919moTj(O*p`x zBiM~#4}e6558r)w@u3qVdN0VO7ceA#4I0aacKTdvTY20JyRrdNAQ_Jcvn zGv8d@t!C7rFu(`qw(9g1W=@83C6~X?Cc}W+h4|yeHHQ`Z_(}7ds$JMi8_ixQXCj0l*TY z-S}y~E_=q8;DtUD;S$gnJj9&cgA+NL$o0aU|Lh9hl1~3pd{S^irMatYO>$&Mi054Dqe| zTrZbjS~<0Hx?yVddH1~uEP^N9L300=Qiff0y6e2uy%HjJiC z7TF3}gJ{QPG}7JY8Yb)>49zM~0C-M=2^GM*uPKO=1hIyu*#eD&q4mrf#)COPg8DFzFB@tfp^gozgO`T?U3?Hu(lfx>cwY7EaCAX<3*|&>tHneU~g+-I~EtJ!S%!Pe^NAN>U3Nj(G6MSP42uMt4wXj<6?@Dg>9JfYfwj|nv> z1aUZJjtoN)89?B`N>#w$e=?CV=u18rN@%*_b(u&YG$!_ouF(LR->v|7piW&vjrY?M z8N=4&2+$TKZtO=7d!R#pV??p8zM)}9h*2dnLRPu@e@ONOtm6rC|`g((w2#qNxUJ|P9Kc4%-Pn0f@vPxuwcnM zS2$5PnH?#dvy_MT-m({8)SuVGTfd>%#(DdWxGE>SH=>4zM`s@#e=rJ<{Yv5S5#;ub z?}G*_ZRZy4Tce8W6fePG_$79MTBWy(H#eEfK{0{no%Zv-y~ zXB%b*u2;NS^+wfOB;I&%Zq1>2)8X*$cY(aU6MLhlE9TD=qMc@Q&_lu9${fyecdY|P d*v*-_y1V6c)rGRGI|X{~F!zDo#Fepm|6dK5x`F@z delta 9806 zcmb_CX?R=Jm3rEDYq#Zik#{Rz5<7{rlEltN2{>ed1e4gvda^CYmgFlrUU-g`85WzQ zAakLyTfj0)aY_e5-Gy!gw6xGpTC6%WjwWrx{FpZ1w9_<(wshL*oO7S#7zq7l=Icvz z-o59ZyPbQ_`uxDW^rv5y8vn&$&`RJrzVqp}rv}E1S=^&zWnQCHvP;6t?vwCxpQJO7 zz8)2(-Fkix}0b}DefU)yhfDXQt*8wh* z*F!CfHvn|`kzTqLQ81c|pJ zDfjmBo}}I%Zt)!R_`5u zJ;0=P6IYB(?#FN-o=-r|%lOi{1?*Yv(hpBRXQF|EBz!!vxL5 zj6+HJ2wSmRPIUAoRSBo3@2K**74&`8D$Yo&G)~%}F0W`DH0|i}gu*`F74dibT>h|Y zQ0fZ1Io6q2aL&{M=~Z>^#?^?h1BgoI%?0t|=S!zb<5|;|*^J6rbtN%Eix=tB&*J$U zm(a0o**cOJNuL5OIBBQ-53@R_WJ#G#C z2W@q;0V>3bz=>cP0vz{@g!N!2L==ccfJ_d%0Nf|}om8(-FXX5d)z@q?h3%SFrpUgg z-8%9YZ7kT7qqo0cPj622#qCL_2OZ)Daz4PGteTPofKC*6fU2N~^JI3yy z2gf~=z6syw+T%UX_fGXb(|4Bpn&M@}jB3`l?lsk$hSv?}b&XdgoWga!`JM~SZHeZ# zd9@~%c{+D2_dOZc!ns(Zc$m1?^h0xe8jh!rWo+jZ^kT*`fK{d(YBq_b{I$w}L6GrM z+@c7F6i}D3Eh_~%DHk|i$xC^8yR=Q(5C5=5P}03IyIsl4c-4pTyo|nXvRaiq$E)vO z+CxEA=0Y6@;=W(v)_7i((Us;kG-R$dyTT!#*Wc!c`|J;BT`t$mjQNt3o}vyqXmLDR zFQ?z8Dyp&XDXYGMdV`b!aLY&~ypjqM><#cqt+&e`YJpSmh21LpS^Jvx?H+$1DGPb{ zFe(8U6(3P#bz!fk%O_HkZP2<8e&Ig>fKw}^8IH;WmyFg}Q=A(+5^p-oO&vLVAW^bD zVPF5s-h^%QYu1Eq`H~H=(b4#hTD>T99!! zbDQB{GB%IOt}5v%NA7BGtUlHgs~b5wr_LVt#tSCe66#_o&#Mh1pVUULIxMA(+YUU(+|$Xw6=>L%zaror011_p4NZHravYb zG6)7@5+t2EFsaL=5`mgRc6zx;MKkjB)Dp7N*Pc^qc-0`-cn(P+M{68h+&mx|HD zC@-W`+EV-`z(+t6Qd1-w4UBxq8p))~JvnqXKZ7O+tkb2Xf6muAQIoJoSR81(Bqlxb zCF*kJ%3g@%(ycD1;YmR$*qNkcisO_>nmRSR8)v5v6)xi{aBTYZLWekt$|>Xtxt(IWvB=C-Vf#ky;y&0ZGsZdu z*@-QJLvW|_GLX34vbuOBa}ZF&kZQzhe1YC>AMr$dNm-XK07~8y zX!pS=!9Y87@Uq8apoHg0bi#p@HGe0h!$ezrd$c-PHb5@V+jMa|b zLhIJ7&KTw-ma?nIIf>p9y$w9o+4WB8100<^vX=AEb8D(Oj&gM?lyH$Gyn@cISq_Nq zx)tCQ4ru9g-6`dO)~y@-8Qrp0pot*|jajVp-`5)H@7IV>VkJ-5Cvzrp&Z|n%1hYVYtaYfuSr9Q8A|6TC!sNI)y3*)P% z&C|4J{T+&*pi|4Zsua0ebDgVdV4?!}%F$?EHvMs(mHuUok=~zYr>?pz<$%Ji^gJ-5 z+GysQ8iN6!dxPshhtJj48}LTp7P`8FJUUZhR|K&B0NiHRJ>FoDfJ}lwh3^5JE7axj z`YK(m11@A+2&C2J^LRU4*Xd~P9nN7b^O-)4%wuPT$3q?l&`8DrLYSpwJ;ER3cv8n4 zvQ+D5fH)U8A3hERV7vnvU;0GJWaUKVc~#lG+BkA-)ObOin^5PDd*b$Z&s5fV_3~vm zYj z=x9abI+G{aO>=8s=1zU07nd@psp>ggZPTAF0Qt2Kt@) zs+?2=zm=~a-c1{u6xMmv5M#67B@it2@x+Mpa7CGA6QU!Mjzk8y7WN>$d`fk zF=XPep%9h)E>EJ?UjhNCfuM2KSYMorZ<|uhI?7QM>Gbr(?!C0~;Jv`;>4Q6@M?Q(O zaFss1+@!(__EWq-pe}=<0rEUd_|f~**HPOQf&24yXnT>~($oQ&hxx7-spsx$?f{M5 z9irUfUu|KxhfU3H54$*Dg&sE?60Rg0@>@Xg&#=*jgLF?*9sT*P5*T~>;FsyT=GEK> zw7dBb2xun#S@TK$lXgWuVG{dW+@mUOGYNcSz>S&8^FCBQ! zO26wXABF%1=|A!uhAt5ND~6Oqsv-4|W=K1vgQNfpRd{X?@`w+PqsHH-NP9hjz zKrIH(kOiDQX~aa&PUKi{WrG%mCi=66?bI(2vLBkM=k$6Ch=!4R9{H_zIu=5*B;bSo3O zm9z4|8zpa6yk7AZncaWK?7>5`O?S^8@y#~2&31L4ZwNs9#{~{9ub4Lw(gJsiV;KO; zVZ~Cx64Vj!hYE9sMVVqZS1my1iAk77P^C&*cJ0-RQln!+bG@z?G=gprhc-Y-&q_u? zo5E#4NyM2zh7BlMf{Dqe8D!W90yYf7$q*Js0IN?CSQ!C!5Zi4_+JS1wzN8)NjwKUd zJu@`{_Q!gbz>zW(OY!LhQN@S8Hes-WcnVew@*qsj6tdc2d-SQyY^5lwD91$~Bl(!H zZU$l9Ot*EEqQEL4DuXOF9d0e7e{Ee)3;lNwuX9(69^UIPujq*|5AQorb?dH+FnI$I z5O)P3fC~Eqq6J(OS@IV4{sDp?Vn-)9g={Wz4sn+d{0zZ62!4t{w3EYFya)ii3y@GA zGz3i10`izt!8EL(<@;OGNGxB+4=lkCr19iOIOdNLyp3QMK>`7~{A3Qnc>rN20Q%@k zXW62-gUMqq1qwYut@bAr_s*2K)1jm5hI5N14^13O$g`R3){peW%0Y-8lb=?Nsm4oE zLUdW99oai-2N6ye-Zh$I%ikzm*62n$NAt%wBs3*Bm~LeF z!_~ic?cGh=Atjv_{?>S5ymh*8y7g?~tbNU#uI_@ac3ncZ4%5sv=StqL_(27(X0Vd*g5#H7*_3PIjM^xvznbsE8P1Qh_>iljP~tPiswG0BCm z`?z7y7m_6|rZG*5iG5#~{3kTmz`WsBC_s|fCNX40cl@itKEln@U-ujUXRMy|%7#&V z@VlDa)9(+95>)~yg3cyL)DbM>k{~<^1|W~&!;mbg?(zr1A&AIGGqx!`EZsp`SOs#r zJ`zf*+PZ?C$g0|;3bG9>62WbQrd4jB0FoRbqd*WwFNYnn2#6g6nWV-`f+5J|gpZOxL;X(r(%@IbXhM$B z#LV$~Kg;Qvp6u!MbonC#GsQz*j@v^=j#pSoBXn`A#i)&kGV#$L9?zBjSwgQIFW$kT z2An%7?e2hIA6s}Bx0YUrsDSJt67)hEiLIlhHzXbs(S!Q&(|yCEOTruO*EF_m84dO+ zs3ZCqC#8uE?0Z}#xTQMyP$Px>uWZSu8uTDjuAo=um2QH$1s&y1Sch-ek{Us=WCZj` zKQ=;&PWiX#u6vefg44d_kQP2H<k%`O<{Wp|$lD}e|aapsJ z%ZQZ%cgWYUga=1tScL=>eCz`NKBx-IkW7&f4FWujq_GbzjXo5GnYoj1OPTMVnmHxFuxl|*R86j*oLeETs1h4@Ay_;m z(1%QlnT%}3CM~WC&mxs&LEKXaCK1FDOaVYS&RKdA>>!8@J0kcJJ^Ju_u+DoPxs8J> zLSKsQ=6*{5E4CACkZtD9vHPV=PmAjL(>UtG06+z?MPjZB1RBMJJdOaAs)TF-kkkZ% zq#INKe9_*D$a(|~0B*oVy~y)a0t};uAF^u!)KQ7_stdm|(vim=*T^$s%5m*^c@Zst zyb4xL$JfZW;<>W*B~@LJ&Gv;Ousp_KCL`}dv)d%{^AMDi3KkTB$Avs;~1!6LhR!Srdt4o3X#y}{lv<|n{3 zk)t>(YDlsSMkBaCk_RRD`-W0@E&gf4OH=uZDnKad+*C6#uJ-8;PL||a=;SkHF0_Ja zN+^PohrdS&w7~wz8t4PL=w}M6jL_niUWvlWNgd80Z9L%CwF4HO*)8oqI)~4tEsZ;O zk|O&4w5#0B!QY-RS`n}#L9i6NljPAYXR>NBGHR-5SOe1y!a;(6VQ|G zP4rXLGRb=ZO-`&^SsFl>92XabKYc!zn@uf?vQepYq!fY zxCZW8v5DKTP)fUAaBp&QLGIeRVy@;|BMfD@<}z}n3x#IRcCFmR)pOTMEnN1(@?0+a i+E$n~d!gLUnHN^NILpFLj=u9kfo>sN%N^j@PW}fIv=vVP diff --git a/ultralytics/utils/__pycache__/ops.cpython-39.pyc b/ultralytics/utils/__pycache__/ops.cpython-39.pyc index 43f7f866df2a94c5674c629d32141385b602d896..3fd77f65bb7ac9904cb4a6008d4732bd328ca811 100644 GIT binary patch delta 10821 zcmb7K33Oand4Bi3H~VOpMyutqylHGlwk5|&9B;85Ib>111(G4o@HF>H8hN(d_r{Xk zH%zF_DLn^+$)ivXA(h&48Uy8os4dVI3bdsgaM%N{v@Gpu;zB8+tO6uj2iDJ!*P*l5YT}mk;obc=qv!`6j;kQjMk8@kjVp zK8QyBJjJ)6ay{S9ci@@ix9}l6H}IW&7oG!rHy_4xBOl>=@Z7|2<$Lkm%+vgKeg`OQ z;rsZVz-;CF`2jo!`K$Rseh4)wewg0{%r^cgKf;d!vz?#i$7=j;6nF4ZejJsz@W=Qo z_`SdknW5>OW^j5JzwarP-_IYoq^EcDG5#Q04f6~?$xorx2xq62)N8KM_Ku~P7rg($ zl`2_ku=1nhBRq=H!}1)IbfkU`QLbSQ&0gq)C{5(@rjc`d)vq>i^L#FA9`GOT57ak5 zGWW{6eXhc@$_tk8)K=U$7~V8^gMmMRLXmrWeYEA*@n2ywq|nnlnA3T zrFpt#<|jl1FS7YnMKq%P({0E0J$lTv&8&S!7};~V;-u^78yhyD&I$r3;UIJ0qi5HX ze8?_7Xu_)($z;r;ZHi3h7I#OWa$gIzX{8#K+j6XwE~ulY+jC{p3!FFd71N8@7|I;S zaBB&ML2x!_XEMX?cLMj-Ge6pZ36!Zf%so;Qb^EO;PwV1eSLUylxan@+iY5y2bWB13ydkYKCinU5BBhYJP0o! zIl+rrbH(hGC>3)TOnDwc?wuTV^{BGqP8v}jhZ_WWG)kCzjlqv$KA4|pD+e1UA zdQtXCHdCpcKsSLN0&#=`-8b)mcO4-K#j?&m@ytyRs@ zoj|uz)otcz7nzt|>kj%0(t?Rar}LO|3h3 zwzsB>uQ-ec0}ARx9Ib9wG-W?zn1|{(P_N?x7PSrEpl#%3Z5fYp#$&HnCsbJ%Tqo&5 zA28?wlK!~Speyd{VtZPW<8zRIDaNVfgeVo<+CZoK`B+DblwzxFW^)rc@Mo^L_&>3) zsdq`i2AG|jnMw|lI73yO0aIiDNJ|t9JDIcmYUqq&$=X2aAS9*@rpL_i0HBxLtViRCW6Dd5rfGAap+>2)OxpD?$nN}+7zOL!kGc*xTpEPns zPb(XIt+d9htdTeU9*=;K)eoSwhS`AH&-(v27>Kj5sYJNFgjL<0kdW<52oHps1VPc&!N@Yu^S7_x`eIaTyQZdI-VYFxXFleA%Qz!xz6msPO6;(%kS ziXB8rxu{NuI84xG20_s0!;U^30bhhKgBJ1SIc0&(M|os{RRfMj0t3#7UigeW_^}60 z<21L3WNCcd+L0`mW}x&raZBAUCXFJjf??UW6btTM4<~S>iL--aJCe7icF5Bg*X@9n zEkOY;85n;@!8O!+g|&bgFs-Rlo+k$<@+HGYi&vB$fAX+wvHOfMuQV{I=X?sf;UW3wfcNawUfx9z7x|66G+PUu|{0 zv;K-^^~n{lccSNX5W9B;u^>%(x)W1tgIWqX(bwbMr?7T7hx$%%R#crBSRlDPA)--J zsvB_Pn)nK_+j3QixBGK-lAYMNI?47WW?jZamKjm2Gva{V4_k>kK?hO&8DcS%Q#DOdEz?~%LJ4rSbB{Cx4Hj;Dex*Hvk*m?`IttoyM{=k8pwT(O~#O`glz zxe|OwX{(YmxqLo3Zj!&gWV@5piA-;|yV&<*HsZc_-NPwqU&kivn&hxJh0?YEg`OM| zC#XnPNJvw(yT2zY9-vn;(C#Dsx3=|*yHWJ?e9p3=D=X!^=|0=P$+wzX*06mqZaa07 zu7AWVRtlzowiBnRXFwRmNfQK0#Yrz8Qf?V)dR@DqG$L4O&%@-|K~D3{iwhNNr3xkAnl6TD!lVq?cqnG-wRj^r&ynr1>~V6EkEgVo3sjMJ5Ak9HPS#gpz6TDyB-W2E*8&?GN`yn~CXC_)884CkR(;2S_iEI@&B2G0`y z=`tQHD)G9+!}_mjACUOqGJdIj5K8?7D8Xap;Yl(UuC?K4%#q%Mu^EQh{pbeAeX+62 zJu$Gs{ot7v_X)U^Pd>EKO>Anb9fki^CQpJ~IO%GLTQPm#ppzRY_7b4*McfWBBw0?} zf#TJG?c@szO?G2uR&xhRWMjMJw?`dhgG}7H zw3CMfYFF&!!C!GF-Ri~>m|2K!z_@$yLmOL|d-!C0)w;8#l7OQEG595`cQK8W>IG$E zOWa^dUQ9Z|euLw*Gzy9QmGyvA;KjZj@5^4L`v;p_*b(=`o1b7|cXZ1m>^tuKmIv8E z_j_B`v69=k^^kjIqGQq6nq)DO@%nkVZyda>Sq@K5xHwMWwEKm@-HAgerX#t+?u<2M zlubdglQ`=3r20o!Z$83E>4E18#-#abnrRYEm4+bZsDt+h*svC6Jq&S{^umeD6RMbB zd?ID2LDETLll#K0C7Te3Tn+jgMEYn2Z$zbk2l+i`N{8>N@2rbm)NIWZ zBeQd}bHfx#yp|>?9jsVC95C8Q3Jt`&DbJE+?gN1?_g$%b79+R(oIT`Ej_55AxIQV@1e;B+-)Owu&T>P-miW}bq{ZQ)GhDnh$d%u zBc z8sjDdVPQ1TwUq-&R{TJHjgKG^abo|&a*#jY|E8PDhq(DDG=SW`pR623 zYLDkIwJfm9pD0R{nUj&m(LQ?XO$-mX$k_M_lz)&?@jX2tT#0kK055YT*aIIIdqto#CCIghJdVC*- zdJ_y?9ImG_-GE;s*DoWF+K6UNK&n6>8g^SI67GXXhTLx*+;T_q6opyjBFW?)QgyiLQkV!+DGQR^MLEqocFE1{JM`B+o0HzT_D2BFCFIkHjRXb&Qq5}_ zN<%8t3&7bdxj4SS;BYf}6I1?F=N}fyRgbV*m*s^}w?njPn2pl12%6;+3NIZ$0+u-<) zWD$uN_ce!(x@V8}uHUmGIhd-E68C|b3@liZ{CTl{;i|n5?ix4z%a0ION-#{rB zHjw!d()^TT5O;bt{32ALk{5{^qKM+#O+Tyy{h>sC2 z0^%8|J<<^j;TJvqyqVoC;V_BaZUVz;fDrsDnw+-u$a#@*sqv@CU^@~S4jgop3+ zs9-NSs@&%2eu_%7r9%1O;}}eQ(yiRLy(YcJ0qR0b1r{n%S+40*D0$(0u4t7JCj0Se z(2xPDkZcr^5$VXV_#pM64MJY}i&VZsfEeE&|EpAzlW2j`M{t7Nv%maa+cIe7G)*Pe z48J#lm~QB$uqM8~c>n!r>asVq~If$UVJcuxEiAwmSm@}y$oom7^2 zNS5(ECerqZPX6F82teL&w1|IxjJo$LV zKs-K2YG^TdBFoq*clzY^&KE!gpKlyWh5gCdcoP-(nUm|#^OGkx56CIgSO}x1@SE|c zxMEYNfB|Eb8X>=brb-WgGBE1TfGLmI@#^F@J!GJPu zfK^eE_0sCM9I-q6+mj`{eiMA4j*qWtk2I3JEve(}fD?5ZE~<-drwf`i|6_hkB-x0< zQU9o3L}^r9qxbIsq#`RfhOqxZ;Qa(>_r!Mz{3n6$0i+tm_o+-;QT%`aZJ-yflzrau zkL`zab0B_1HEIsKn)7!x9h=E}@T!`REaQTHagdZk%MUbh`aZ2K6i&L+9KwY^Va?zn1EqW>NBmT1d z#nY0-e-T*05VWkem2=5JS?A81A6w>;2I7%c;*npVaaLR0lkQigJlD^Jga+|HxvLTk ziR;Ib3iy+gy6iZ$SU!R!0g_C6n5uqAfHt}w&r<0r0`DR~Ua$Ba0+{~Z`XWf!#m@o0 zNb{ABQ?ym8a2=6sGqf8dU2SEujempau0Q*?YL&Uy&W^C>-F1(RN|pYK_@2f0K6Xm2 zNfjX3Ooj`4K^u#c^M5;)!(u;;`8EOv2~aHVMT&I!mZ!Vg!-PCS;3$DLdmZ=VaX)qN z9h!7MS}Dyk6h{)ub&QVMOTesh^1?un<{^p}i9I}T>Dpapj|Qr&itDGUUJcj`*UGqm zJ*N|d&yQ=;Wejk@gjQezPQcctb#mW2l?9E;2?bQ{Wpu9vZ;--(z+`}YNa;7}7v3j* zqf_Q2F3<&5o2=*W{EQNLOj2(rU%d<0R=J(S?!u7-6Ye)3PBg5#o|4JX!zbHnVwqGu zE9HAe(n|O)2b~4}#1O0$$8&JgNh1r3z&!;1l)$F}a4_Uk+{pEo7xrm&CgZ7)*Hs5{ zo8rM#EUhNv`|wRLg>@TAyweIE5-Ql+3t`cdCXAcvE{t|9e#khc?j>_3vt+mzBm0Et ztZMQ z^`D7GlbZPsJ}HVZQdvxH_Vgye`TqO@#r+rvn2zIAo&BnOjo9u7cN25WG;LwP+#onoVSwMUfZ|+oX zYwb%Ytl3I0lEr_0iu6B?<14y$gZ8x(Xuh1ZLs?$#Mesa|pY>e;4zWOVLehtn>^ap+ z+^hULZr!Hgr#tE-j3QkPBbtUhI(m)ni-Eu$F9{a{YZ;AQt_R&ta322$!VPc`wL#qX z=)i<0#W&7ed0rFwl;y)LQamYE}@Uol2auLdE+Dx^9@&mINuASDUO zp>Hr8U1igMIB>s(NFz8VFDh2ZV_n^!#BO*2NTfNtWOfBm;v4}wKm4PoRw|$KB9&tO z+SUtB;`ZJYh?UK@*#17ZFmxfC)JB$l8-*ulA5}C9ih?q@dY15HSKNZS2e_xa@u^ zH;4@3sXR71>JRoB!l08P3vH;X;3>n5{O3S9uq{QPgdQ|=i2O~U>PAP_7XE5gI z8DKP!#iNHFd+;}l5exyZjWvZw}%sG-xF?@-`;Q^o-NUia9g+|I)D~kXww?) JZf^+3{|{)&<{tn6 delta 8399 zcmb7J32+?8b)BBQW-pwJ#R9~!1kVLYNCZiVlqnwKDVl;r5)vhZ_G-C302W;Au6hO} zK+k$5QSwoVWZJDbE?ZFmUkT&Hb_^%Z;W$-xq@-e7b|rRfPkcx|;;`gHiCs=CSIoTM zgJT7eRnAP!@9y8n|NZ~(-@kjFdWrqyYiv_49*<~vMnkXgXJ$0*dsL`@qA2XcXPtlN zrd;ia#(muXn8y94Hqm&=H`2sk&x1Vl7#nHk$9b4XkZa*l9(zn1Y2|nEHl73~!P|KX zm^R+QJCP>&5MRZ+P}9!4c@Hot-pkWSJNVsvHSa@BC%=cUk%LB0)XA3wo&@LMp%8h$I^3Cvo)i{FNH9Y4um z$9JPo-Fao6KNsVlzMdw9b$4qmTJV zw(vXoF?1c^?39+d?<$FHD8t;~@w<;4Idn2l<@(2m!ajV~Z2+b=!nih~b2j4R4N9tP z#Lojfh!F!k#0{iDB~fi8v>?$IgU3Ot(GA@r%A4M(?B&kw-u#~Uxeio!LAOXSJ%@(&*_4l z&E6zG8$5n^=wj2MVyy&2+&^U)0l9hN(eB<5FiejG`UNW-S|(x1-KdqGR%7 zp}yLZ)KaK$v@P=|L9hU>9Qxx*edYz@7w2F%y=yF>o}7tY{+8Mu0TyjlL|O zr6~%sw`P16^)i^hArdhHczwbre{S?KN4AHzB^#OAW$|VuC^0}hvwLOU;tWE%}*6Dq_D=L=}H zl5oRh?wpxbl?AMK3vE5CjK)}uiTx=55Fef|@BQHyzd+A7_a0WGT-_}F$#xc#UCEUG zeV@D~IbN#9c+iPW_|I$AxDyv|cC^{RoK~e3732&mY;cUqdsK;(8%k+aW3l8IFap9f z@{nV+Yt<&0ia`oPxrx-)q0NT4KF1t(&R=cjVfk{h(+@X~5WCt{rOxYO9Kjq0EdNBo37#+-(PR!!J(2PSw>yR5(AJ#x0at;WPJ z>;@<3v~>V~86_`YZKrWi(+)ZYjm1N1yref?H3fd!7x>qC;}TD)1>6FkyEWeAr0hm- zB#gD#2g?BSZK};J42%gxXPcd*lNyB+$Aptk(i7@s-3nf24;^%pvn?b#r*RZ(XHGxk zt9Dd7tE-&AWnBpLZgo0IbaU*CUhS%OtMNLVF07%X(}kYhn1nG(tFwEN$Qw$7!7etc_ zWEy$glMiIt?)WesT8TUGobN2vfs zU$qMYG#pqS2z8M-A;g5!2A32EiH}nra_n|ukc7f}*66gRG^YhqVB((I{i7Yi1} z1XGowDeeMBv=G{tuT04|tyx=B+Zv51))PK#mZm06k+V(LS2Rm-e!0?^iBZa>F?7md zD63K|Ah|GZ3eydY7R$LsM|HknUT}l9a$sUL4HwFZG59IljjEsoPODIc&n#Cg*H@af zT)h`~+O6Ht1uy)_tZ^e1Ve&%WrlxZ5+KGtA_fa&HpI+PDLc5R~EKk{$DcjYj3!-1X zw0291f&e#Y<;%jf>K&upvMzPYB~)4y0NR%IY&~m$1I9+p{LBDs!E3M{Kb)i|9uP zb2^jn+}N7IR)oE9MGfJCOE?%BwLZH36dWKdBYN>$^26&Lxpu>?a!0OFeypLnR+!9< znY3%Aujm7Ufw%_-kR!}udecJrW~2jxN`kUOR|3i+RA?i11LFqs#oS~ii@gO~8}=5G zpcp3b>Ucp60@o1*Ns9tI&4{stdXxG_CI*!-NVavtxa9`IU`$@wxXs`JOfr~z%l!8@ zHnWkTa>>lxmL48A)1y-*rTFw@nbS^frEOpfC8#?6V7^=yux}Wr^&oKRN->u=H>c0e zq-g>bSbo~f<;T;@?Ig=|zL7*G9sz&L@)D1tI3ykiL{J=59jxn3-Hobk-)qoK5+KoN z*iREIsi&BDyL@YZHye^4>VG(%@Zum9-T3ADO%JgDkq>XWn;ny1-qg)X@+X`2%DH%A z{*KLQ7FCK-S{#s*TW)Pqa;Jbp+)m&D`N=I?leZu{5-v<`&06ERiYazd)h_v~Ej>qH zts=y#D&|l|ax?MSho)LQhzNs>ypIjSv<=qD`t=5-yA;eisf%~bX9se6@E8r;E1$c0 zZ%s*x){AF5Bq{M2z?GmUAaW~pMien8EA~vbSU6`2qIt!OQN$){wt~s<^vv|k;PlLe z@l(X2GNxz`f}#{&GAZ$U+Wcc%AJp&l_@N;rfn|1x6Hl3nv8PZ(%-&B}qKHGs1$^Tx zD2vmm8K#etpKji#O8B716aHb;$*uca<(9o83&P6CXZL>On6eqgld>6QE`LB|Za@ojBU+!Sr=@K2 z0}Io9fc+AL?pH*f+5hAX7mBzs6(XP{_1jod=6nF3_3r@7?zM`b zS=73)@%ZrhNA*&~4on2`A&>Y?ulf)Pht3#Pe>G4IRzp>z8m>mFQJfPIi*vS0tHzvw z6JE&25e54Zt2a0y2Zy$5Bciqj&RR96kzhMwWyJ43w4$h<@F6FB#>=Y8W=D4#qnguj zNw2mb=+SLsBD_l5rJ=;c^ER7}I58*gG&#*q%V=PZO@vNqU^iNirTdd7bZiWP+%CE2 z0GI!DG9h0+@NA7-r56c3Ji5=@=a}o`g~?3Jvn=vEZm7g_LgZ#B=*Zi-(jYE79ul^9 zln>+xBhqLn=)~*lm4X{QZ`ry0IfOKnWLn)2f(O{8blwa&-CC{z?s3V%d5#XMA^*c|!4RG6lM*&>|Fk zBymzBPi^Q4)&-y4po>qV4yMsVZM>%g39S}o8~qpnwjsZFC{v^3*O|bw+8x6VO&Fwb z=)$1WHB=vVaPUGJfz}MDHVV~J8gs%ba*iX7A%bpjBC6bol*&yopEyF?CYVSw%%=gt zbR+UDgh#$9Rl|vlD$s~3=w3oq;QZINh(|uTI1uvfivuB_a_CGJgSmvsP@GnZt`UE? z6BX_DOg2NKzwIkc)BTXrJ z*TH`I$kA1`otXjUcEyJvRBzv-a7ug_AQN3SGVw=1APz1gezgiEZyQWgOEN0)Cj|bK zz()Zx9f&LxdGT?={Vjpd5cn$se@(!1zBS7J4FI+-m=vEGfhAg57|=)vWK<@?r?ZPl z$}i(*>hLp*c=2a6CWWYCp1>yw{5gTYAn+*ye@TGM=#*!oQDj|M=Y?^hwlA{LSI9a^ z7M^uB!5+yr@h!fv(O13tdX~5*KC7JO^T97KhtFR#3tneMK^Uh29Sw} z`zfzRkLJrI9L!9Li(R8wC|MPR#o_@pfI!swO^i?>q|R#F2Hg-&NGhC61C490XOW01 z0piW$5k)f|Ydt51&OyWsJ>f`_(>DCbSfQe0%GDi+RIMYAC1 zPPewuF7~Qh)tG$l^pToA>&IPI)QRE_M#l}AZ{ac&yDT|wbs4%?haFyBlwPkcuy~2; z7_aFUd~KZ_AP0X*pZ}fvC%uSx$U7n}9U7G;#W%q9Vd0WElzhVV@*(WE2+R`rI)QH! z_zr;=0I*4am-62u@O=U#N7q=CjHk#S6sU&699J(uxCKwR_zzXSQ1V2`vuD*pT&eU} zy};j0YFE2y6YZjX@KkiqidU!OekT(zec}yw=_-<5n5~*q=z4QnJXMiBS>D`UAU0nl zutM#v^;~4J7z^jkXO?U*9)kt8&}HwB(Kqj#|3bD$SF3t~3344+GVr$EF?FC$>Y0GY z+*+!!|lH^Ijf9q}U|KOrxk?PWib z?>+kiT%gNG^4s)()#aBSY?oim55R>Voj=G=>NVok+aw>M+@kruY`5kJ6y7 zS2HCywPLz1ehW!jFx-1!BNxokQgNtd2aEa z1ZXzm=Kv7VuP9$9Q#n%Zi3hPAS`ZKRNx|kHFCSw*mu2NwDjRCgt!R&;8_weoXo>#g zwbpesun=HVUDSae%uV?11O1rOcCm2Qi}8XW=KZcC9V$t90S1Z4mH%d~ z>rtySPsQe`B2-hHu?u-?V9G8OEuqUjcCv=4YU(GOxQ%lA2|P@IuG$2xBJnN)DtNqf`V+GR;FI`1=!*yg>3mteY!9x!^etPfh(qu@iB$%EaKC@&`}4cs zch0@Gj}JH9k4AL_w3U4_eotR*>NxiVtTjYY22o5U3uiD$*^;K@jEoyTC8N~O)Qnb_ zflPoYg}A8mB4tA0Z&{sg?+jCwMe+*OR@6+C#;C%=G{Ce%*Xl-RI7UNY>J76s6<$#? zK^7>G7mEoi^Ae5F=p9U(R-{bxoRo~uk9aa3jt%UJC zC|VYSl`OO%Q@J9|V6Ij`Qh5kM-4L)kQRN#^184-j4^^#NkQd z!={g!BkrugOb8b@!`J_-%TI#iG-}gM7ZW6#k7l;p+N*&_k@&6A>!a&5 z)}?P#iF82T)Ue=sD1BPpOmwGDshg)%(3ALVGT{fZS!(C9S^g?ydVTts_6jKcq*v7s z9ex8;@#LJVzyKA-zo5|_6n>04yzliRSHk#BTs>byLT)7X>hft<9Jf|;H3VWq`bz&? v-xF%>Xb=xohieGL`q;*0~k2YyK0?R*{Ajq=815Fi9u`1e!ESg$$We=#UwO(aDM!^tXn>;d)zZEyzls zQ97^Dx=^AtW{Fm!S}d%vI$cSH{*x_T*v4U-s6}uaADC7?cAsW-rgEai7bZ$Vi!LGM z$Q@D^XNPz<0r@!pBe;^mqplX6@~}Gw4LI1~h_ldrK3A?z>bctO@tmeRy6QMqQJpYu zjl0*#Y%GD>>@WOgM-K6~o<rH?G4cG5@xM4=hqC^gNa& z-$Is@VOVbS*g`?8s)d4UN5_~`*kWbe?P%)wIV|xzB6|@7o&c~*5kT#82)f-wtoH^F z7h!qCM*%mEHbdS`#i3(a^%159@rHlNHv{RXSr}Z-`v}w8*o(@uN;8mt62;q}RPI+6 MgeD_G#tMFrKlYEzp#T5? diff --git a/ultralytics/utils/__pycache__/patches.cpython-39.pyc b/ultralytics/utils/__pycache__/patches.cpython-39.pyc index 5d8dd57f79efa92bab2ac3ae4a080fbea14d699a..b45678261c46b3d19ebe75177f030024f3dd6fdf 100644 GIT binary patch delta 1041 zcmYLH&2JM&6rVRc`w`o5zDy24tGPhpP=$zIDujd(?gbF3N`%FV%9?m)<4u;`batFz zW8;eE)B_UQ1D7Bbsp_#k^v>&{E{Rj5+qF^1hh^X*nSYsp{_)rs;Va0 zDV47DPTX)_E+OLW*QJkGAmvlUm#PaG zlv5j8E#A??xobekEb5wb3Q#NlVh_F7Q>3`;mXocI_XB+q{T(bA^mEmbaguax9=k9J z@ldKL+bH>OsD_OZ$t!or3)?c9kg?My>F10XFf^hAtZeu6%o#h6NJ_VebVl?DbZdlG zIycC~Fv)&|7WwY+m<=1o9YIV+^cQ#uZ9*~p0l|dc;8!4H7szm9#H4#b^$#QW1sMP5 zKzbv#LZo+ryaga_@`y+}cAvZ7-<#PjvPGV5VR7oMx4+;*O6)7w*=|2KsmgPH9Gf2hl51sjta2_=Ic=qSdYGjuH5klugJ>2- zJWhF6VDc>Dxj0fh5^>Vks@QhV6@PoTCm*`Z*LSllXI!LiVLIbRp)0BhV;gX2s~E_89z z|7z`w;g39Xtg=JXoGY0vSk`al*b7_fYppX~O#Ib}t3|q}%4)}Cy4&m6H_=)-ByUQR=Yi>CZ4@$!zt~o3YTBECg{0O50hQshR~VpR(S>@7xX}sZa=@28&Rq z!5Rd#1{4}lN05QOiWi!2zqwNSosP`9osMoIRJ`|hy)|_7)uJ0*KEH}CqnHK-x8pgh L^~TC_&2#<(rE&)h delta 603 zcmYLF&ui2`6rMLTo6IKJ>~>kINQDTsY+6J?1QC03>p`Rn#b2Qq;?8V0YuuD1taeH1 zWzRy*(Y*-b!E3J`z4;&Hq(}V=#FH$TCctnDv5xk%tg^II?d2G?SS6g(=(-a%&o9u4-+B4!UNw%XAw>8}|jz8UM8sDp%xlr=^K{nhjY`6nI zPoBaCoVnV&vD;H>|E>y>aX8-FJ&2=ta;W}U9_2(~xWG{8PGVEtXCbopjOO7W+#aFRBF#Rk}`a?erCdy?c zuXaoAk0){P1&MhI7P8X#zrTrdY9kPxf`w^Q0Roa$R9x70+okUNGVc3+F|R+o`5n9jJ< zQrih7+ufAf^ia5+Si~e2Y0tFC^!r-Q@**cauj^8xC0FykagzQf;hVhoXyYuMdGDL| z`=7g(D8TLXTQ2C_TW3H2v;DX8KYx{$mZHNGY5$w9Yk#QI{Vn~FADi^?fHUZHmvx+u z)A#94if{c%J^O7qX~5giXY4nfH0i}K^GP!cOX^QPX_5S_CsUYT>d92RjeWNMw3BK5 z=_k|sGfrl(I41aIp0u-Xb6-|}_Q~x2oRc~IxhHe`^G@dV=by|+SQ3}qSI}R0varAC zWKn(F+^SApve!q91%NMiwyLpd`^Kkd5Ur|VkyW8XLZSO>$u{7~p z$~9*3`r2JSx7Ulp(!B$2-qY_H@V5`4c5W@;rO~E)Pz&G7I**R#2j+(Uq>(ecraNiE z$TgysW=|4ldQGQ{oa7O+GdY&DZ?JEW_qo1@3Z*#k9Pjh27zU_5&2$5sfx(EB~YExJS$GW{fhr7?^^Ern2!E;{D<8$xo?;Q*iifcfI<*-bbBsHwQ&zp#{NN8=8^q- z9p_wq!ycbhiAKMl_jU~XJ-+QM0<}!8e|4YN=XVTtI{d>!eIDm_$06xA&Wn=K zGvKK9qSv=Oj-k#2!~Go|o-$yOeW)pGihsRy{COJHWYk-Zg@J8XdkN zkK5bnbxS46(O3i#KAeAK2&E0x4x{X?VzzE+JzxVMAFdI956-|BtE7HRAJ8}HE*ad0 zE}h$OI!~t?GX!)!+B7Vo4(n#X@Q&`gh7Y8$Ny8fjCZwVG?Mi&VS&FVhF7h~&z3`S( zBMsGqJ$|eF)x*|z`Iq?a!V3JZ9=31GX!rWBm+L)Wh+;rwN}a2xatD(7|1!qMi|0k7Zd z>hq3zxQlL-qy5j3%$3juWzCE-iG=nOv;v~h7I+xq>Wo!wykfBC2ecix33=YHZ(Qu z-MTrJRNuI5!-kEqr1k5YHtyXUOKRA+ZCfMqTfcr^LksfRuyNz2Z5v`qn>TLRK)=_o zuix6dFP5}*E8gN0WrFArDH7Jm~x0k2N0T&(|&YSf}w|fwZ>Cd9Xlj zrYsNUL5@^YF;hw`d$4rmNcCX(u^PccsRH|ghf)Rhb=}CCBqos>M=#YENmxA;UnF53 zigR5bGqX8;*lCF+VqamX&(*KjF4c9zes797bCm9eKcdAICf=^Qe^E5!?~!tZ_3R~88`i+Mz?imG=Jw!|4eDfbk#Ox z3!j~~PaDH8+$mT$;}`OGj+@@F-L>Zgk6nIg@};P~48N1^maM$`^wiUTler?6xiXfy zDn=i0#s8v@=xT!yr&#{K>b^5KE&YocU3Tu}>dERa>Izn%Hg_zAzx>>+E7;|umFgSC zd)J!W%1Q(u6aLr=IfRePw2I-q{P8O(0(x~t>!GyEp3x+ot`L4>#(;rO4;VQ;{leQI zy^ZwlG#-WDQJvE?TD4Eyy2Mq2Eeu#W#Er|L*|hGlQIm$m^32(S@0bw_KfeuMP80tW zdFZ_{V@DUy@4*irRS^B|;2S9jo#=M;^~H6f=y9AP?}T$v_n9s8(xHh%SF%HG@8pFm z-d}xf_0%fC<{a05T3H>`2TLbSL03pW$%Pt2uBoO-Mrp)SH|>J+0O@scyN?1}nsna) zbp6V{F|O+b{`Q)MGc+mqh74K2#W=d>%fSV^Zmp+QK|`KvJ>(eVIS=owjjIW**R&)( zI1J}o`hz+>>+91c=qbHgs|nBx0sScAC~x+w2)m)Y`hQ!0S+Cc|thfbB7Dm{BtKZY!9!qI&?;qrb`{>)&-hOu2)hC4{x3_bH z?)G+mFVgWuT*PcXzYEx;hyt=YT-UK|#Y$yUL~?TQ=c|VEn(h~d6tm^?f(CQSgKoXi zy#9WQE-C9527`IO{(({X6f>cuZu}{aJewbU1g~ZIa3RBlOO1X5r)44J(6YmVJoU`Md z=>Y|%!LqY}>i{DG4xRxDE(bBPxe7*^<$zkr!|z-!ANl8jW+}k0{1Jn*DrRjQ7#IZD z26@*O^lu7Hb1h=71Uou`m;(*>4%7j`aP$xQT!gs01~|uo=k|*qL%fIcy8WP3=r_xk zz8`}rd;1;LjO?jpE+!ozy+b1o8GR)**h}OQVf0d-9u4B_?Q>Lj3=Z}I(|w^ExIfTk zL>%^c9O4{yxX(4Ps&k;L2(OQ;7qyliqKc%75YbiZ=Z8HECq{i(#h7p3po?plC>4}P zepC`Cg2r>fpE&cr^N#9Hq$RBWkaxfXoSOqtLhzI-rMAb{>j4r!2r7U`g;UJI(tP?f z(jJw=e5jt=Kg@er9=)ECp+Up}n8n689DUyYj&^Ym5c8odkOP~L3UxvipqQG3y+i&l zmu^|1husP$i>W~Ki|vJTJ`EC2RLXKdmY3NwgoztLB&`V7{RDCFym6 zWW0eh2P_jBnjm0+?@ql=r49V4a=w0AU)d9|@V0=yCtd!nz6Vm_Ppj{;N3J!eA4Ptg zVH8+^>L=F@HBo4i30)8N@Z}G`h_Vd<{XmXN@$?Y9mOUzdfIQU}(5BCxo+9~6eWOHL zUY9*`Es>AXUaQW(N{$vt<_(;2Kp!y4F~>{+BcQnTNl;8&ULvT3dX1wGArd+XmO$xDKx54hB7L+RL<6A;B1^3ZW@;kH=WCXYi4dH^S4Xi zcKBy1@AJ97-`Doz$d0DRMJ$)X-105b$&tx*i)h7!C%#kLZYaaJ(?a-#@h*9 zQ2)k`^STSh6FOjW#iKnM`kk0h>Mkm>s~A^G+}3CV?NfIJSKlBuDHbj+;4-_0R%>O| z#YRJ^Xzk_+K3)I%8$Cbtztz2a)ZWO0Z%})Dz)RSFt>53-^$%^cE5H8VuI;{OL(Eb0 zw%Uf;^>J7&t<}_)2YcXrOMh86p-)7;^vL)QV#!ZHvkG#CA|2t}hgp^QHpFonVkX*K zW650}e@DmQ1$gK$Fbcy5Y(wXe@L0>vK39JS=Xz?iLIG8+Od4uXSt-_UBB`$!4p7Ny z?nCc&?_%w4p?3G2K3cnXu70jfsNK(Sw$W!rv;wFIsG$MBw`+KC*jMXl92)8yA%+*& zo{Bd^IjX(=PQoCRYJlf9H0&2~5I~#ukb49O{t$i9l6uchP6jzFl$QN_v>)ChM$kQ) zt@kh?b@wpGbPO!_h=z1#^91GPPB;0f4navoyqA4Z@vIJybe#4_1|`fuiG``i5dF9 z-0tKD`$e{c!Q0OhEMg|c_BIhd#1l+;0wS2$5*~(}{5=0M`B_DtH}J}sO^cB6)B&+n zR6*vJvq8rSH*qe%3mkBzt7GQQJ{P#;*7LmA?{RhXd15B_AQ%(|e`n0#>x@}sL@buX zrlF7DMcJpQm{rWGAWO~^Hlq3SQj`HtJ45l%Qk2+ERbX}J2Ps0b=Yp#r?BJMn033bB zcaPcFAZrI79`)yah~}dab4Et!M9Q0yI-^<2&{Sr~SxT@S{fGJR^8!aWKU(07B?;Ff%n^2Wog+ke;c~dn1nV@MY-*G#? z?Y(_B%s1RK6+fuDS@naOn>A6);ZmV?-<(INJs7P%G~W@eXbYO|mQ*g5)CwiFH+(Z2 zKNy*T`Y9XWHXBGQ{rpwkz>z&Nf&%nCbGPz|jyYhB+WyE$N zQgtB;H@k94u>a=%@3*pw=M{!3u2xM|g&U{VhR;UxoaDQdS9G~+ zvMa2g?4_UglXU5s!L0G8m(nsG^GXjI#`iC!DKGta3;l|JeTgXX3DjTrt8xm#lX@c! zT&m*I#iDgW(Yo8U147XtJTvYVu39Xt5ejQ=uk#3no#Uq8PXCuLn)SMz6`$+$=m-L4 zgHX~CE!jvAyKjW&zxrqc_|w$Jy4FnHCz)lfn~k4r(!+IYkysj&wDvuC8@^gLOhE6a z)q_2cL*MyKpgCGnK}sWUst_yadT7;L_V{cn8Xr(Z2|>NDHelwP17HCWW;~XJ)U?Sh zdwOVTSoSdSMY1Hmz_JZugqCsk1d?Th6~c-PCag$C&Iu5wfboXm18G~->joO+n&2%m z=nRjnQ?6W^`VLU*pI8bQf`5c~2~hnWg*?#YsX*%`^w5}P%o>1@jv@v`G*RFC68uvh z0l7rAOp=(9DZC{C$t_0VDEDFls)ajTH~Z<*;9^tu$BU z5gr%9#)TZxX^0E6(3p|K;zF$2kWRx`>R6Jd{fUU%gzXO`1yUc?7QaHk^i;D(#W^=p zK9E)^j1~j`xu*0raS_>%07wu(tR_K-u16sk)zBNM@~99;*CXbURhmcg7-S&#`COXBJGkvtOk>ebL;Ids9xiI1Y$wAn>NNO>v zs?XKo>8oW3@O4AXEFx;jgToN04tN-P59yhy)7#f~-N1hfKoL2FbPUuO|P=8(73CQ5*efL+YY zz!ftyBp6M;An^xd$vkn5JzUH%R3FoK#PsKx5Fkd7^g$B?{tZ6bixm_WACk}l8Mm{1 zN3&I2R`GlniG2Tz%?h||YHrYdWz*H2Q#%(@tCzBJFRz(g^H$CHJ_!N6vNhcN-QAyN zFkbpdxXy3$iSe` zIT+!6LZ>g1<6kOVdEczFSA3x}+Or_Bu;)^nFQ-qYhYjKSYv!B#!~AsVwc+Wr?~cr6 z%{6|U8(Fn~J~wjw<%q|-=ot_^1Htq=nS=N3kiz^TN0*tuwAwjc`fg`5cipwlu>Ea! zsPtQ%;m)8TlDqEbnc0`qCeuRP)vl?o8-@>4uBXgw_@R5DdT*p^-`uhJO_3wdFCIA| z9650(^W@KqR*0Z{XYG5w=?&9ouW$a|E0MwtAGv4CXI-~ef4}$7Hq80v>pwpCr#m8> zo(=8|8t!WIN4*=Shv!Uxk^FJ;!ja>PM@|bzPT$FV8F0UwUv&A^$ydX})2`W#KiYC@ z%RKj!u1~ra@}DO%CI7^udO-wysBQAZ&3q9&)B0=H>Be`{W;^EWw>)#MTi*H7Tm6yT zg9$4$)A7UdAFaK$7QIhZO`S|#$|<jNTMCdq{UoBdXEa{Oq_a246DEhN7=`&9MhWHumQMuKez84!b|4hKx zL%fh>4+l;&rj;#6X z%$9^YUebgR%A5C;^Ei-M&~d(o-xjyv2|9fgvAOUmXoo=@B(d{v4j8 z87fZ8^8N`teg6>?3Yu+sI;cf~bNchVg3CRVJ>i`9^RMND)wq~fE9BKi^VTy?WNP~O z@ax+{_HXW9DqMAS<7eEXzI{yAkVqQmrqQd2n}BiOa-F3HRCOJ?Rl3= zCQCw{)5kx2>H155miregK3=h4e-^m0V1HpeX~~}d9erqHI46`Q*jM9sX71&T$&9c* znprk(Udk&DotrjK{RZkip88pFNoY7cJT)d1*DMxo5{fp>xS~Z{$1|4F3oi{$42Da` z2czi~i|K2H^tI#0&+Pe^izkbra8w>Fj{cb(1GQ zI{Jh5o9%NaBW=eQ+g=pfUR-Q@S!jDXx~YBKbSZfvc~U=~B9RlMvvzpmaA@PzEmK?W z*vf%q+j5v3>AK^`#q|it-SJ!_YC>YW}}P$FS|w#8~z)ZiN|KNVABVUC{G5 z!U9^}a1K0AaKe&DA#WKoX_O);|+mBmZIUOrfB%7 z%J#1CQ&R>~{0bTuP*IcAfK}#Bawgis$I`}Z0UOX9_KVaaIU}KEKK&cd2GZcqke)dI zQ~q4JhENZX5DU)C5L3!OiRGIfNavD!sZ;fD`27lk*P|d{avne-5^mU<_Sxi8G=6Ea zA2^Q2Kq_bPD;(QI2qBOs2`PY5;15l@0EkN7mAFqi25)fL6cigZOn@%RHE4o(`t&xX zCo?p))CV#|2m(36=;4f=Ouu7elyA|aOM>mdYR9~oZjMR|MK_{4DkdQl7 z?L4Zam_fCCE=a^gMNg!5i3f%p)r1y1@dvd=oQ84$Ri$=xdw>R#vLWXBWEhzQlIn11= zFv=tpx`V#hPli^rpH1-lJN)7KY&S!hp~O9fU;Je_hn-oE#-9m=jafawmOVTetNb@9 zRWmt+#4_}n{~pCja(j6<_`ttM-rpb}6JuRZ&~JtV=ZHA4iAFO+jblmTScq99Zgp;Wvc1}`1yVYhyqbA`7(&|Qf}eplanVWAPOJnma_6f zWs_@eXVrysrt`upfg4&fgZl+b@wnw~&WaHC?Pn*N#v8}ae!8m(+6ljL?Kfszx2v{B z^R@|9Tf?Pq`$F~KIv1(hdOL61k5@<9j{KDqBK%d3uzkuOZi8a{vEU2v$jA%jf`MBg zWR!;6q8Y2Fx6B-$t-txAuy)s+J-T-9-R!*J*|#=_+TPr;1e~~SYGbeo9OIDt&C#U- z2Qd&+CR0QN^Sz_f`QLl#qYX3XzrQn5v1e}ed{yM=@r5()#WTIanckmf_JKc}Tk&}g zkkF;<{L9-Xw}(0=pBitvTUZv}`2MbIAOh+q&EP#Z+$|^$HHFKs{HgqK#k6VG|D#uLy*hvP_U^W5?Ga&j z+iWScPU`>QTx2)Y%8o={ZeQr>U+ftcdWIJ>&)qN76*b&1l16Jn0^*mS*X#1Z+pX2r z9n~}D?w=U44s1yJWJT72t(kGu1Yv-Tn%IMnM^O_A)$oX+XM|d;2?_CFr_?(Qqh%xu zQzS>KAgmg?@#|>mbJl_hz=TfxH6(MI#g)TwVMG*ty#-JHO*rh-6PwQB{H54h>5WoI z1@LWO5kheU$YQ%>ov+ZFC!U!Vh^%(83Sp;r|J2yc7SXauZSuiJ0oL zmI&39YN`BAV%iV}a+R=T)o;0m1ZN&Ia!CPDbbNon&_k?=We=2}B{~Rf*n}9_mw>4i zFtvat1gD4Ti%9f}wx~o*e_-Y*Os=nuD1F~ZR!1zgRPHbImZqoP*lyB|*ii2;Un0$h zgzeOph&;{wn|?(*D3Rhn(E!LPfqn&i69@oqSmYLoQ~;x%rP0Xxn2keD)E2;D1Bq5( zJswDfwtzeqqFb*uV@=@hg;kBAQpFMrm|u z$|+KFlD{5N4ulGigdM>&0rVbUCum|wt;G6fbEb_}VSNKiOeb&H=^#nYKMcLpF3tI$ z+E`Kt&Y%wViznXZ<+bXD9gqHZG;Mynq*T8H|) ze*RtZAjFJ+pPb(!=j-G!*68oi*EAew8e3XO#xWMbRo zw#DoUA-f`cESkMWM2wbf>EmZ#Zw|IWlzx2T@axU8nrB8qywE!*nqMo4TjPY~M<7`} zKD`%G-{zTy8Q0BCz>K!dp1rwiu57OD=Ul~KI6rm<4=>~# zTFN05tQivD18+C~!mL|ScHg2Ak%##3#%b>?_apBuFG<0l{_BhaS+?w-Zkw(6QPr)g z`C~tM;gc5@@{dQJe?iE9@&6&&^2lu258XfNz16#rd4K`pt}Wxz!HI)7?VLQ79L@>z zVEb|(c3tn98JTbT$^K9FFTCtpeAz3!?ER^&N8CnfFv|~ZXKWjt#m6JnGqowCl(L*j zI+;XyuqV+5smgleWWkEeI1v*PlRd%GEpJK4M+p_tmg7TE*!DQ+@Odpx2$H_I!=*hK z2(%@cY1A;qb4K-u^No13`~`Aoq5`*hioR(oisua^MQS3GwCIbXkq$LkosWbeq)1&B zZg_9_{a3ENvQV&Lvh0VsKU#Hb)qGZT+ac0{22H_+-^Ta1T-y@OSqn8=tJo#TLU{<3 ztV{ObL)#^Zj9dUr2`~~FOgLJhA9Wn@;NahYe9#brFUHhx5YvDRe$?Wa?sgQVN@tvU z1_O3FS5m-9f&>XBRYo&a`b+dVK&>a{DO#Ulzce~ADyl?BGs+^CvWEv;GNQ89(E|FU z)@csBDcy}^(u1}q*umtsLc!*!Wy_*vr(oGRd;E@N-@`yAU@;)~;NwX^hMFi%0rn)CbB_nJ zybeg@L)B5&Ih?!g_B=G)u-jHaMb1NKv8yl{%_3_8k}`InV)+mhr(w(@C(%L>5Jo9+ zAjIz@1VU0;B@m`wN}WgzK7FO*YR^>9bk2wQ*Yl$*>+e_^L||#>j4~o=e}5vd63#b2 z<-wjrBN6~sMsK2}?uK3lB6M>}*VuT&sA}Sln4IR(e@Kz{ZaVY=`mBpYxwO}7m{<>s z)e*0qu_sAokl1))y64#0U&x`^XcXb+exAk@DZIk|K$gyG zyOcJO7Ubx7$9snNE!Qm5`9C!L$a2fF;Ao7j+B<(Na^mGkkME~Agw#s)Yl7p}jx}1Z z*u;DZ89#BXDN?l_Wr8BT^!VkE$8MB~Xh=@U7cxAB@=J6I0#jeX_?6;7YbP3~nw&D# zxbj!xoHX)0y(s12%tZ0XYj}ciCW({7Nlpf}CP0UzeVRFAaau?JQ5lJB&bYz7Z!wJ;#F}X|HA(m)oDShBS$;KsW9~H+%lk&Gv12!qMaxNE67KS!O?WK$MYQbK8BkRM0 z>jhDJ!=in=VBbEwJ!)?mPr8dOL7BYaq|k+#%Gu$_bM1F5XDBFTA|<$y4uD-b8qN*# z*9u|Nq$Ikc_9Np=^K9A8)>+rhL(%n(k&?Z0&k};)^a4%wGaYv4y3JE13myu z=@C!ajG0wq2+#sBigI3gmx2`ev8#qk`Kx#p_BFFuQXok^Gb7d8Bef za3Rtnv_M+pn2(2l?BXb;l9nhjA<6AnUh#{QxDFzqolXVNAy*kMUoBy+zD_=8$zga3ZP-jfO~d+mh5W9OGfmF#!I9O`aEO?i5ld%p1z;jJ!x%vZ zf1h{^!z#iOcG6-g)Ewz3F*N4zeStLR@E%F1e-^We*|W11zR^5&_0{?h(27fRF2rm( zy6jSRpf}Ba>4k|GZl@i)Vh(x4!@KW~T^pNe6RNg|ERi3ryR~j!FKj;`R2>jj9-Max zE1!)PJO|;7P;hMAa5oL-VuP;9ypa9%m&Xk-@RVC{`Ps>5-`h8x^u2>Kb|~A|-|CD! zcVr>wC=NFWInR$4SZoVSX0EzKv+nb!46j5Tc^UCr#H{A4MJGECsNi3V*(vGuFyH} zjhr~Wc;bw3;><$tz+&$?q4(TBQ8wa+L1>`-C?v=mfbkR2LS?a5t%oIK4Jc^d6XGdi zkTJuUeoSL2Ezwd8L`ys{DIE%A3nmsP#;BH9&g3U=7MijXEPVmk1(i~=H_Hv8mq+DB z;b1KTO5&4fdjm>nq>W93AWI9MoQmGsO%K*rEUBMCr)BN(yAa9_KMUu6~Q2kqLukcqdOkJ4Pzw^qC zy&pb({pp#u4-SsI#_PxRcY$Bx1X-vJBK%|F#<1&J)6}Wy(&@(Q6*Kmk`kOh|*G988 z|1j%EMYoFP8s}UeH_g}2w|%nl{Lcu5-&-+h^SZ?Byn#@I(1; z3I$$9rlpfLCc;SXcG&~To5Y!c2IcQC&9ekc1(pG;Ym;^vU<9RNBU#Z)Vyi&cNWa79 z_Y6rW5D3J_t_W&knY<)k4J`?32$>=wX;la28Cgd4aLPoZN$iD2jvMBtUWmF_ zc8^ZHOsmY>-{l)+?SKtMPapE`gdQ1`(c((fsEJ{HI4D_@!@&opJrdwUp_E-AmqP#e(L;XB9mwoXN_flwNWgx69NqST!sYQoIs8Qha9Qx*@S_L zh7th%T&Saq<2qY>!!PeEK|D8*E0vU|fq1S|YTmCI;znxM^07RU>9o84JqEP0p)l4% zJUUHU&>(&#l#`T}X-9F z&dxB>UMv>k_D+IKwzFTCEI`w;#iHNK!Ph+?h2W4#lAQM`jP3B(=!B@sZ2Y zc&TNHo(ao(D2U4PV@2f zabnzT-139nH+Ro%pFbNp@>1mGzUYR2!LnYktczF+BgHk-XYZ~m3vZuxUE391MLN)< zu*EPI-OwCa2}|R}OO;z^?Ly^_$gbx=)(N|gMxO5yc6CL1dWBuRk&3>ErF5yZex_L{ z-4@w#Xnt7OaV+v;Ct2cnu}9d^8*w1U3P}VZF^`sPm7it8gocn=6RB&0{mHuhLgxOZ zhrRBbjcMu7M@lP!w$&GQEBH`3#MB}IcN1Ba2%U^Lch0)zisnzC&;?7^muc?Os)m_* zVb!L{mIL$k!j@+u&$)yxt_asDZ0U@w>_VGAvsH#&f~^`AW$(z^J#%(p?LNUq{4(2q z)T=TD4M)|=8fNwirJEyLTj%!*TMtK$oPps#;Rq*e^+X(hAug0&}uCNOd>0d2nZO8HbSDLBC(0kVGtKYuWJbgW))3Mm4JRvEG^Q? zQ$%@kyi^$|36nCzBsZX=qFM>k#gQOL$9}6RVDC)P1P1ve<{Br#URD-5NJQZhq+@xD zp)xrHE(XjVCnPDpyj07apsdH$5xtWjMI0nhOT>aqiux=S7gQ*u3hCIoTG43=R@EY( z>>^x)XlW9e`#fEAUl_EiBta8ysp`Pi&TiC#UaNT6?XjpH9h6sC9_-Y-gYiiizk|w> z4b94h;hqOzUrh6aaZ#3|ew2r7$zE1(VTB+wa6qYcqKrvt8f|@?2M659KA4OzHhKJX zUX9dqm^YzUaVbO*d0d)fvMND>B$5R&65@}hu!xSufGUu!{Ml1XMcMKTcE;x*xl0eO-VmdGCSRYwcKif2)g7Y+x z+W&R!w&0r3sn9b*=9<}&akH$DJ3Q^0$%ZL`{H+tVaci(Xe(NEbQx0TK`A}Fcdc8d~ zGHssV)FAbZ;W{a5D;!NdKeMA!l@?nz=d2KzU3=;D^XxR^l$5$A? zfrA*IC}Z$$;9`;aH*;0AE#uOj*tT@etcjCg+ffDSn)W8(%y1dBExu#oGQSc;tz#)) z5u)Ioz=$cqP=sh!fWh+D2vJSTxNHry5|5Uz2GK;SXNkFHxVwh&1&v%Tdz-ktUt!av z#cJ6D5eLhW%LmspF;6e?2AnuAR;%TCjTATwUEkJjl?o>W)Ht(|&a~8soFywtojf#W zAG;;W$=Y#4j(mGelbn?zuR9Mx0&TG}OM%%k!UagUFEsw_wwm2n$PdT3y@@haOO?eH zd$s$%8cdsce0>;QFX^A+dNPts%8g?%VnOjqnmzK58T$L> z+zwK1`w>S@#E}PMp`_7Q2RX!rQ8ssCp~P}po;`f5{psc-N1tv!(SEe`ndc5QzbGn~ zC_EMM1UNeyw170Su1Sj8)v;9dyFX@?0q7$;_TVKk7(*vTQl^xC+t~Kc>AkXFS1iaKPg6kEIlqte`3{?JjNJMszba3s>7&y(L!zF79Xm7ux7_>hp=W34knEWdtvDQgs}HS#CbCEQg5VxaKSP}ORU|tZxWU}%;JOEBkhJ`gDXjNynINjB#n z%DaM0?TN(^1jLFxDygb_Eh#+`6jc!e+l-Vrag|1)zV>+O`SL;^BSde)z!E7vwgnK8 zN9Sl5@%B9mqcvS*pwklX6lppBI=-RjrvbUBtZh|5z>ZeXMJP(`VyRMmLKPB_b5^C= z+B0#A=RR72e-q_m1uoD*u)P85Gn-)=S%0l%YWK8DD5`_yx{u0dd^qoUZq5jU!$~(^ zoi7zOAB;8}isT&{PmNzF4o$C}8JR0vur$Z}ADk|pfk9+xVNn;Qn(0(n*=W5ZT@vXu zA3i*474JF1{YEXMa^!ZJVszW?S^}Et(3ahTotIP;S`hrdP%shI$8b&2VFrHqQmmNK zKRDEKU5^_mN3Eivpq`;Fy3B~}@|4DDfCOX>{Cuc7?-zVqK$@wWc*T{L7 z96HI!e@@O9;qioRl3mK75zp z&`p`CX3K-3I&+u)L5F^=IqShmeVMu9!PELWxE*?fdGiBHvAI|O`F5kZ=JOPTx%`2} zU~bZryWxSwY97`<$V-J8x(7ui=9K$8+3lKkb1u7Q^S%LJpRd+!*`uEug@V-sUA8&@ z!I~z$xm*9>kfG3=^7-bq=0^P&C-oV)n^UYSt0t@ZDKsKZe@Y>iH)7Wzg$T~jryE1z zcGf6F78(14z46o$ej* z6TfTx0BxZM>FnYc6b$q|+48sg{y_Q^NkKUsw7sxEyw z8)>p)C#*swB{Xtup=;l`Ox2h%XTOmpm!$E{zL6vQ-pEz;u>8exnThPvq1}1ZZZ7{u zfm{=`T1}I{%9-5exOfh^6!py&;$IP$exq3CLUSpEHzYD$Ea%BI+88F%Q>o@8f8pd` zmHgdPEq|$RFed2i10&Am5yO=*{_=_&D`g*?W>_nyQMKNn%eM=PIH^$ZC}g#)k>jZE z05fUaQ>XgKU;cXeE08Mp0<1AwBx{VA0q9=~w~Am53yfBFqm80n2W`3C(3Zm$eXL|! zz?MCnL(Wx?ndILj2dg*nxM8`2X#$$LQucmvz<_&4jw`utk<+Me6%L7VluMM~uvsbB zaGRQk{N>*vf5H9vHO*0S-@QC7#%HCRjx1;P&=R@qdDVhT zb*qpE>pi*#@kX^e3lgV6k2twCNVAsUzB~;F1I~2j8K%AUT;bOZoy=y9f1g}%qH7T~ zm+{q*sua|}_bc(AmcP`uW;`EIedRB1oqx6b-E&C(Qr|afE5V8ZKWLnHZ|4Nu;k&iP`dg`YbPXnRs+s{1~S#MM+_qd$+n^e z78N&*?h{#9bmcP<5KNpw{3UnaFca;FS6Oz0Z8O*fc9wVrG*J)WclqIq>!HPaRu0FK zyj=qjyo|0Uwf9ck!U%JQI8#ZwJ7g`<-3^5c$d91eiYpqs{V+^zaf8n28stY}$^E!u z1-e2&sOZ+p(QK*~vmq$e2&qO>#P3?*Y@?>y+FA$0?Yw*TX3&lHX8^gFeuP=F!U0KU zJ`skk$->=m|A6mU%;XyO55`Q?7M_mA&=oKpK%l$+^N;EIhuvfbdk8lhLaQBTd3f9u z0~w0f)zyweqg>3yT#M*&9{0g8Rz~l6oWT+61>yu22m2VDsE+sMQ6m&$P(U!2?1mf)zt!`F*G#P}*L+VAqdvVHj2#v&up;-#ywm?=keK7heQU zc3lgpy;w+thUJ~iIuMt|oS~9El&x9)ju~7RSRs5LLKx`yPWtMgy#F335m4KUo`*C|^AMPn7V*~&`tQgW^b=(6v+(B`0Y=K8eAMd5sZKtd( zAPr&b-Hp2~nUQ!HKX>`v-5%U3>4O}R(VL#UA&FQpHrtS(Lx} zc;fgS$H?(zV5D8tJ+SD^1wS(tfJbIdTQn65rsB}kH(rS}_1!V`-!<73udzF(T@)$x z8wbC3Fp^an?w%zl zxOi~EGuU#5rIfTwB@-n6H^|ICzh@$A={7fm^WDJOVp!Bo1GYkkukHNaQ_+Zso6YZn9U~O?NY;D2wSF)%;>KlpUs*pr?3`)Keymw^EXS!+s4n1A01!) zwP%Rg`pm>Lp~|Su5#A`+sxCI)P06@aH&J(`jNRc`heJFCw+a@D_Rbv@ik^<9v|ii? zt^1HM)D$`zvW7QJ=S-J}clQ32fgu^tD65>;+RUs!d@0#(}RL7$3&(zfCI~*Z&L_>KmCct%>GK zhb9hTls&s>t3oZO&3A0IzcA{uHc}@PkserXFd*26_Rn|O4(FzfVouYB(shLHcy7DDn+Yvij=5>|lOGTOBIETb z%%wQStM1EP+v&?y4+JG5m{r6RD5q5}Hj#N0NR1U@SdIr22xy{=8p;R>NES1{2}m+d z00IfdnLa{-84{(a>1c8=%jra2=tTDifhmhcg`VZP(u%p`_eXxf^E zpUWQAtbHOPTqa54AeOf*3;YZa@n7(;zYH%B`Fa&e1fm_M+!GfPcZK8E4RK*{mpI0S zFmzak9C`x%5lG^pE;InPJq$82c03)|=LbQ2B|uUW-bR$lrQWc~Z2&P}D!0%NR9bvg z@-h)Q&M5}=<+_2iek_3wQY)Qm&uYy^lAN+IP!wP^B{(yrv zZi-8p4FSO>GuNcegcE6`q%T(9_)=-;RYtubm{BCGuSG&j9&C>(cfDGwIGUbhVP2aT3WSw; zKoW{<13v0YvhOxPm6FD0Jam`cjI?+Ektdawpu=eFP)c#=pij`jpv3@g1VAz2u|-ms zKdH>e3>E@31nIr;WoV4xKXed0;b@L4L0V~=#jEMJvz36>xHD(Y42}4^2L}`%W{eRF z8t>{GVI)3oqQVkJxI3F=-hNtS*syK~InoYtLX|eM*}KbeY7?&0qQ82{QE%45+}q{A zWu~zEh~djsV=mA+A)q)_s_inV>bU#~kqMe|N!3(94c0XnKf}D-RVBq_>2cv0TdLS4 z0BUAvB5Ry@dbJHZz+py`izF)%ub{T+C5nrr?+%T>HV%=v$3Sp{7^Eu(Ks4LD{AkN^&pCXy1;(y2Ju~f_kxh)U!ee^#bT*+ zp+&oIxUw!8TM8P)!6fX_#iBgBhO>1=sH?h3oKY9ZBmHzViLD-*ATHbyfX<0cXoidLVz zlisi)+!)PU9ko>STf4*cFfPBCT_#&FYg&b0&I z%ZQ{y<74L7Y{ku!-(Q7`PyO>vACLa&?nwO+;h0xAeMW%NC$4xs=8dF#h18ylO`j&G zF2QIz6i*f{g@UCpTrraqv4D2k41(wF^pHQAS+SV8UdUWOlNZh08nta3*Gm>AL#EKt zYZaiXDnciwGNP91Maw!Ih{0v4S@ZUYW!-}1Ak6t*JSa-eHlm~CQN0e|qqy1()67RB zlqX(q7yj@>cDuETdxY8>@%P{<44rSR`zoXaUjq_=2$E%wj1DKVai@f(E=z=;a+ucC zNx<;s$Qphn5Z1@l<#FX1RCUpS@k^Iy{L1BN45b9jU$VS<$p5}#`x239s&(!UFkR`< zo&*UbeaTv_ggoRc)=GO+9b^wJyzEJ&pCc0&=wqs?6Int_>>1E<0--KL^PB55@e`1IAnq8kCCBGXy+v5Y&G0vmf&j%F zP^7&*2wDE9CqX9G1?*axO6aLjcF616krXXXUJ5CK!o?f8#KuE$OHW=DI4+#+_MNaxYTISbiWM(8hScIx2Z>wzsWve2W0!5@~`oAoq8_iH4C6=?=& zhqldS56a3a(m~#vBab^UGZbmJ%EI87Nh;Xqc^&sJULTXzKWIzZVBvyrXb5_Y(>EIH8Z4o>=)QG*0Gg=6^Yg^E|1FP?5rNE0;~93 z|2DZ{J=^6k^&MCt*JdC`6O$&;vPX?Mwn9^1B|ZN_V1+pU;N~kVJ*v*K=L6|CSHM=e zN~9|U^Tx_q$O^b}Q%DxAe9E0J6o>jhK8{@*XCP0R%XXHhR1YP`d~NJUl$xu(pM=GE zxYRst9KWJhfF4QE-XEN9mg8p!a#2F!=R}G!C(MD$0Gk10m2o-6k9ciP z@#B1z41q+gmirTZiT!)bK_lwbnaTf_M93ne+8=K+V>ol z{nU4$3Tv0CQ(4<#2^WE5)tWqysj=m+z-pv3-YAv#pRv`N_@qI)>_Pl$Y9r}DB}_va zmCGLV$jN11_^Gtzaiia<;s%-#v06?mzu6tN(4Kt6n8?tqUebytr^FhzS{xaP`jqJy ztr=SzSQD@_t9RzW+CUb4@y{vg9={$Z5NRl7*E|7Rs}XTCc^w_A3DhK9{a=!ytL8jV z@Ywn#NZ}{e5_@{iNFSP)zeD~Zm$8X3Bj=N_WLlmAHRN&?ge_`D7boYaz60fQ?p!6s zrV;#Q%OG@ua0AXN!%L5wi{D5ChF;jyrIsOo1)OqOh-ZFdE8^LFt2Fidl}GYXDk;H8 z@A1lIs&7@i2Pmd6eX10#--va@TCS_d9(f+RkS73Z)*Al>;|bk)y>s(n1)v}@%NUirGNMuprhxTBR83}ze>BJaeqKr#PL)?+!%G3iEhe_%YX=T9S3p4MRK_#DQIKLZEK9zGutdJe}$ zQBRy!xKYtKjKn|M**6H=Jie!DcXoLOJQs%er~Y?T-&cf)|MK^`f4QiOI9qO=!%3yEO zcRxAhs4bN1@6z{QQq9Q$d+S~DxIqq~J}Q5=OFZ}Yi`}*SO2iyB*4B3-IxXV-AHcyk zOZC68Z&8P>%lXo!Kl#V%`@5ZnT9FZ~=dU5LG%WASmXzQl=Kkq!-a*yaw2Y(6|1olf zHca23%g_HODiWhZs-d5PjQ!pLQNzgO_IUdklNZd_;l4hm*~7n$=s2TK!VcK{0Y?_H zt8Qi&0P0{KlJxLLC|erFqQ&ZK^u>Tdy@yL*q17XrX2gQ4i=`7snP<90jN41RWM;mR z-NCVWBY&0Bk5g1*m&Y%jMR<+;*jWUg0+L)z2Z}dRfC7?gIb2tOz>F0@WmT z=d4dX84)v+(9OsHA+_Q6$eE_-HmL4_$1SRIF; zv8&i(N&SO9m)9*CqC7(tBxW@j^UMg9p%eOEF8P{+n2~Du8x)=-PGtT)s>EkVhPL?H z2Rk}q`W{g;Y7gbb2Ho#du$k__ZI7k07PphJ2H4RZ=%Y@6fiQLw2nS2M@pqozL79F6 z$5)G_j$h^3qgA#Nq02u&PX80l(~q&}V3K}Op|fNKS6`{TS~FF%kX*H7&%azWS@c%P zqP<+Omq+cDi}tmGeQngf4wo?`+k@qA4_^*U1{RVlmeTE)x+c0__g)&D7>uT`8aGn- zVV^uT*=FBOE^UCq7FHOC)ki2FI_jElo`OI6-F6KCe94Fn> zW&JE8`{L6}Sp}DCCTqTS@Z#QaJ*)$}NHOe4uq0|-d2#PuYwD%^iF}weKRTHowHCrB z+4_xhUpqJ68El*IMw1Idjkl9mELqZnY0VIl45i!F-s*4>P(V0}m*Ix>|!xeK*NqO;SDGluIe*8`*r8ynq9)0U9b#gpKFY+!6g{uXM^>@vy+>~M?$3`SGaV_6LP-W7RgwB z+fsGk3>~lgDWqP+PdsyFUsxZmf2UQjRYJcl**1RmH%IQmgnH{lYtR#NO?sh41>?vM z``B}++e0nyHAkH-b9z|OIERmDMN8DapS3w9ZQKm01*kF$L#uBb zpJ|vi+}!fP%X9p^KXSTbA=7=al{NelwAQ{%!!35{?$|b}KlH^m;pD07dS#?!{Y=Gd z2CRQsiWe+xpQdKwoa?2-6Nf{MVf|FI7&Y|NM<(%Z=I^I3uHQ$7;rGoQiL8G*a?Cw` zc)`X&kTdRkeQVJ5`p%`SVpx)|9pATnNa4z`{X2zXga+8SZU}X~lRjN3 z6x6+wj;R%Nhw4KcL$1)qpf{T72sa9um3Tio_Zu{uS4_>_aVzm0H#p-6E zx;a|C|EDXTo@@y=27OBfs{r_vVB_7AG8$;X=1tSzlYJ#)#uaS*G=GI; z7G-<1bknD6)`#n_mCxE|jkEQ$$6#IMg}I}%&qOvK2$`1(N*4>(3Up7}Ozx~_{`pA3 z+B*fuevzhI)vAZNm8{(03vZYFtkCiJ0%_*c1(A*6voP!Lnm#&ve4Yy)zLRtGezLBp z^b4CVGdsBPjS<{C89F*;y_!Ci&TgJmyC(Gf};2({^<9J89ZqSJKq(2y3rF?Uhaf0=8kJSrq5n` z>UMVhQd!ls@!FcngP-BUPM^VpONAwL)%81t!M%4&R$gtLY7KY1^UNZQ>y^~wPP((R zr88@yCA;SILP=w=`EFUo`%c^_eh}K2r3+SzFXN^8)- zm5ZgDgwjn@Ng+dM?^5v!4CpQ4j_JmCyOzppscDPlTZQtiqKapw_n?i zJH*>&^)oHe@*Sb3rSd8Q#SK5!(W$1-;!@^B%b!B36;+EX)(b1ve^hv9#g5M$7`s?~ z>#q%mw*fmX#SNyF)80R}N6NPgjvcd2f}?S6pWxX4JMP6zb4zvr7 zc4%j=z*QE~(lbKq6wc6lrk#J(7Fo4fDA_V=5=wUcvr)Hlr?6tj{ajs9J?^--7cFL% z3z_BNy+6&Yx_{b$Mf(>o8*rKUUFr5kCvCvdjH-yGYRQyFYNv~)T)~tZ^emW4mg$E6 zvt7*mU)lAr|D~+a+L4v~H(BP6lGMbPZ7+d6t{jbo(Rj|-r|-w%d5|+YFob~$h>xn> z*#lwK@@F7L6H_^U1MN){U#c2&Pjn#}F+0fUn0h5MBi3Ol2gegF@9M`=2mbHot~MyH z>pb6G_IsE80tpa8S_Bdx0W$iq5Rw%kh(NMJ^kqqwV=IVV5D0;|3rLWahQw|Qv0W{e z5-qMXS~)+2YIQ=}nHo2f26xh_Gi^G%Ua;_5o;c&qbUf*_SaRyxZKr+SbMDu!R!GUT zGuzLy-0iv_6u1unS1&b!%KPE3sWm)!A~_}3Ks0ef&|jY zlhaeTa&7=>b75*Ko%jx1LIaZf6-tq9B@?#{kYq^VTVg@fnQ3_uCWkJ`A>{LjI{D^705sbOjQeEYP?)t zQX@z~Dv7e(rv_dkH~*v9}tZ2n?)4>kOZHk6+}(zp0mv8Bx= zfd7>xFEuXIrO8B6NnKO>Gc1E?(ls(D#28YZD!E}*Ov_y7gc6~jfDHC46~r}IAgq%A zchP(%?dM+^%0_+lg>oT4X^`taK`MUzh1`p|fQX*sd*^iL1PNYQ-Fgt+n~i2h3}(T6 znVYd;z6bUiSgG8pu>7~-&=E!%tM(DaQ6byX^b~Y9l8hERdj~+zgipO7Q5u;(p=t~# zi6Amv!b+AcP0oFi+Y)eYIX{+}Ty?fHx@K=rH7gqkx-f@hJW%2gmq|scKHJk#Msz~r zj)JoVAwrnLY51g6WDtW1ae?@v#NbP1%W`rm%!yK1qx*@M3KAhDEZ;X2lY084M?}Zd zpI0i`uj6S`R_q<@ z^xsF4)#N8V{0yAR-QBGq{t_D&HePs4lsbE!XOsDrr2ov*3O&XmY14QvwDZbPuwS{T{P4s`Af68phq}r+SPqjzO zGge}dM7f;lgIJ=}A-2GD)6>y95Ddbj(Fxozo_nry_M8IoMPPmURDpQ#lJ)OlDcZh z!I;jpil+mjq-jbzK8%@8I;eD<0j2aN@GgzP&WR8Xs2+zm@od|U#!+L1T(M=EANKLGtVGW28~%sJRfgZZRcvZV!H831JO7cb#5F^m9Q z8%71^R2G9sw9FMH-=!eUpMe zp})In@1JRpVXC?S18#$421UKZ{XY&~I<)Or%t4dfgXb6AfE}2MR>D;DL&N%Cm6pHM zA8sUQ(D$y(cgsJu@r}-r=J(C6JKiOP{9(FQJb7fMbk^H6W`+^q!K8cnnDKq9TYLWK z%%)jyvwXgZ&SyXP{MW~f|ENhk^;SPvUO zAW{P_)x`|Y#R|4eTW;4j$7`DtwR>Y*o{T;DY%K4&gyFf-x(~9wqq+rswzW|=pR?rZ zw()I|zRNpC8|O>QZ|r-0U&38;eP5(6>>)k3?-fM%jn>E9HS_M=tM+mGtM!pS;B@Zd z(fT|2#nIL8<(H2e!n$zHd~W{LC&!b*s?&h%WBR4?GapP6n zxDDV@>txnc&6|(^>Ekm8Ag3F)&AOkO*G7o10NZ-nGrMBj^r3ew@WDt?o|B`v#$5pO zimw|c#jtD6z2Q^4P*Aa87o0f@D_~{ItSK?geW3*+d*KTZ*aKpznnsd6=px3E#e!$U zze7;pU2s>>;rZNgcnKDa6>Xg^p3a%xHQhJe7AxF!+tKi@qv1amvMA1%!F8x@KeMfD zuG9alBCmO~>2HKx<@oc?W+R1l);$%*pJx^BDK-AQ)QJ6*$+QF4Qq#Q`MJ3kqp)+f< zgZrh2hj_XIzi(hmy66ywrG-28(@dt7^>um3iwTxhnqUp!N-G$*R^>Y6&wrWCE@MXGet`|Ev!;xbbFP2RV#t$#N6t+rULH9&K4HT&@f@a!Hn`|f zCeNAH`N>QYQ>HAi-;B^+GVy1DsfSt8w6H?c!qWOJbP>v=fGp#%iUS{Pd=$eMV7b#U zt{{p*&Pmu%LD?Jw8Q+FR9~t3u6^Bg5X@kCUW-nIN-FcdvEgos@9>8pYz6~_!JYPkI z_v%2Xeevi+Y0*iL;(m*)7R4HiotN`c3z{<4+MqGDYJ)W3{loMIlH#K?a!T4KlsaxL z8|=igb!SjB5^wE3drJ0;nKGEHXD1$>=3~{h>H`Y+I|58c0r#CgiTNtd@t34N;6;ks zgFzdd^m39n;@?hHc7g9(zo+6QVfnj}Qkk5?uV@gjAgJ zL0BX4bi^(+)591{e22byY$0|}EX`N_{u9-5-x;Ibj;_j`}n|@mN zR^3eNTTj5v$+mq72MGZ@dSrC?*1c zQ%h#*-ZQj(aW@BNnZ5m=*Vi@J4Jm938bDE3dQU)iqadj;8IX`vjKDH==VY;+$;?H? zSubSy4e(i;m8mPoN5H5-*^Dsppn8<{dqeuqQMNYy1<->1dPu@SoHUaN9nKrm5I3`m zM|ROHCGDq*m*MlJppcm;@fupaLY;)c0-m1yVMFI$C?}uEz<^Zc;og&)3VwL13dvA) zPYV1JYzl@T+kmx7cY{v?Bhg7Hq9f!8F)Mh8PN_NiSh6Vrq@2jx9=lflTq55RITF%= ze?s8SWW_Fhym4Mlvs8|$ZbR82b61)AU*41774T>0E!P_cemo_$ayhQL4cUQ-kuN_F z(QN%*e@>?kBy{x|sbyKKUM27Om-v?guT)~=IayO&z~0c@OQn33y6sTj9dO5=0CN;F z(ewTJ${TiS(tAuzN8SrL{ROv{spZkc-XOOV0)7P3luzE?=o`{mawpr5Zca`!}M_6u|?+id!omxy~4Y73*=G z4+KxCR7BkLm)=^PF^vp0#jlVpQ|c6bQqq2Yg`S!cb9-7e{Pma1Pb?+ojtnuCXPXjp z=lwC&xB7tEq|p5?{6DEp3aV|y;Bsy2Pw;WI(VTTZ5&LMw7Z`-X7*kx300C&DK?jY& z7f??q>mPuHw2T!HhMEb3CMKOos>>LHYDYM1gcfLRJ4N1wS-uij3{01tE@B*4)dt7E znlWs9XFnvLf=C9XfbPU$l<_1m>Z-E=5o0qj&pHSQ#{7K%IRdrj*z_r=baj(b7as}J zozV;&Ls^z>D&~yaAV>@r?&(9eN5Y*4nAn{BOi~7~_dl*h2XqW1&^}=m@zWk9Xi2gy zXJ+0l;%}6}Ssb+#pw?+@Siljzq*)sHGA%an=`gkgQ8IN{HLO3qj^T3{?foYb9L~bn z5mdvC9b%A@Blruda!ihm>5qXN2R0gq1sSz4Y2_3$4C;oLvJy*gpGk1R%*IQ|vjV5X=;mouEZ zTdFmKLEj$Cl~~0{TGd^%6k*uSN-58CYmKO9~*UwW=iEO#rzEdxHN*Y+6frELzEB$TCTHZ zWp?h2`lvu$Oe0SGF6F~mm8SFTiQ^i+H4rUVv?KKj7ZeGlimidWMku=+2x6RodkhhR zhcxmhNkN8jK^HJE6ckre=@`gGA0IxlzC(Mw-m?3X|Wy!VBtD%Iw zav~G~1azrstm)-Fkx<-T8LQeEtE!LLcOfpfKp&}}ce-8yG!!;;dsiV`bF<0cs zWZ#^nX5J3=+t}uC{fl*^A+~pHZ&-Y}Wg$oK_!gE5j@;Y!qPV>%w&L-GeG6>;CFIEv zDVZ~@P(-=z7;J2=uqKilF@;^T1|J^0)j8UFB`19J^75!I?kP@KR;kb9+~nao!v?y( z`P|3*j zLV15^)=(g)aQKB5>F^8tKCAo8Wy`_Z&1?I(Z!yA&);jR3np zgFO6UsF{)Z6Ew>)G7(dNYf*XAev)5511!C=Kt5^sp(CVv!Uw zwS(*;=cf*xUiuy^nAE$_YAs0RhSMpdeBGilV&q+r1i28OPj;05xL2YBDE~2;lHY@J z%*^Ao8z6CI`SN%}Z_gl*1UOdq!x%99NMsjjK8F# zzovl6qWlAO*NiD|f@=#bSwO22RC;>X;+u5t$8@bVY2w9(ek{1(@OPIq^U?=-4_u1O zI1l?Hx{_GstdkBBq^k78ya#skq)znrp5SSSU%}NMQwK~Jj|d+*-RL5T_WGV1T8ym2 zD_9n(jWk7SqX$9XJQ&$NvGR67WxSv=QBZxmU{kzc)6~;5xpcw|ln&xl)oti%s?_d}R;rw=^T0TwFzW4}D zTj>%q=@rTy;sv#7r#EHXM@rKMo9@1*ZPGMtv$z!%7rrPmHb2%7)7qeQ@ir*dQ8U^` z0c&hKkTH3a0w6w3l!O{O& zOZ&u!I~#V113kg@=(oW8Sr1F96!#X?M^P)fN4lJZJmQm!Hb5v|NH<7k*%o45Fa0Lp9^8U;CY#Ty)U`$9cQ;YShSRPZHL`=&MjEqrcQwE@Un4TH_%IW)|cx>?#ULzA51nL+YIv zl4!aB#BkBiu0vA-TDDI6L2;5WJ=9tH0Rf@r%6{m7&;sa(cef4iE*qF)Hl6NLP$YJm z1tGh1ZV^VMoF2G+a!LwLs;1=ENWQ;BhV8}jxvo|6p1SpWLN>fvZzv04PRNWf_Xk=; zN5P)EO4Y06Js_wYXOx~P?q7|+MvkR!<+qP`74p%pwep_2Rr!M8)UH>L%X=#BC_fWO zYN$#$s82lK`ktGlbi3PgVYkKZqdHfnyGtUg1(da>@+&_kk zH0n!TGTf*Um}E+Dj4DXLP|9kd$`wQ^6i?v+r2s;7U3pnrM_E?EOI?wbG;#mUNyC03 zt(XsS4h#4D#FX_qxQk=)3;Gz*m~%M$gme=eU!jI8{sjdD%+Si+IYgCAJa57nG-*1= zMc_@R*jD^D&JUXb#?VV1Hj*!b@}zEkQdjXwB~B%cl7|DapZ?j&j`0L&q|l9rKw!_= zp^5^&g7<66lm@)8aXdB9bDG(WinP|JVF;->32}v>B`=vcp5W4|e339pr_2!R3k0w{ zc%4$9@e;>9R4jx57-kyD#Q_P-LoVe!H8Sw3S#_-yD0cqDy*dqz8rcW zYRjDkyh2fk`I9uA1X1;Lkn@6QR1l=uj$C#5mVf(~NN<2d=~*3TfRjjS@L!-K3DO$? z=CM?8uEW6C?g-o8wHLnc@RHHnx&@Q4s^muA^}M)Y#Yp|g`S8-nl6iA>Soi!e*}7fs z1g|8rBr*``z|i+wx65kdWwp~eiLxi)53;O2wr1B%RV;6R+_4|ZdY+|M3&#s12Vl>4 z_}a0FV}EpfGJmRX*0U8Riw+lPGf?1L4pTOF&Q-^_?_Tq5{b#SY;=%y*doMrPJ&(d(;_w!Q~!WaXdIfN_R*;-F-g%_ zZT@}iF;@yeoKJFE+z(Mvk{--BsXxKd>a#<%2Te=1FM!`h{NzZ)(ro2hexQfptF)k29V)ww`$Eg5ST&k`jfpnsq0uSAthxT$P7^UDi z1?0Ja30o$OXl}hh@vrFUb_&SOmeJKXY)L2{n@Nf^sAUyA%@bSFvYofO9U`oQFn&Pw ze+nU1vUR$9CPC-@K(K!xxIPr}KNf5s3M)SlaK!OTVO?BU_o1-%pM-sL!oFV$TjIi& z4~62-O?z~@Cf%n3-yG8mHusD9Bc{(yy=HctZ~QJnZ-2hnRE9O^~-t3>+^~U*` z+`Ga?T^_u(>D?4$FBtIm({7zmygGX9hfe5RitntBJ#{?R8n`D^>D&w7)CqbUj8Gl9 zqZ=;Oj@5?iBL}Y?pEy2iFQ2zL=WX86RhP=g%3=Sy>ss?f^Q>+4ywx-6C4yYIEb6_n z?E11J472oUES#*2hgpNX>eVZPC+YtNek;KlUp9w2;JGoQ2{{gSRd*1*6 delta 12227 zcma)i3s@XgmT*<|yPIxkprL_=<{9WjNq8qFCLwQ)M#$4pW5B5CL}zSF#*pCbkk$Ppv-^+HUH@r9W`ey@<3E0*ncW$QaUMH6j{iATG!RF3 zw-Qd@y64<;&pr3tbM8I2esoRsf6sA+|6(>98F;?6?qtKZ7KZs1zQi8=bm8WjvxNbc zUSref8*IJ0LC2@j+qkqcC!hWpdup0!S0SIxXFSI4D&id+<5BaOr&tBm!0}m7@9JV* zIkb=al07lfr{dX>Kd47ltc#w}zmQhBu*u)%S$LqMaUt&!J@ukU-1RM9Un6ZbEJ?`) zKEl%g4lz?|HbG>2)E`zYrLP+nuRGax?!lkdUfN!N$s>QlFcoA6( z#VFe(sNgp{`w%p`5?2z$ZiL$a{OJ?sPluRD@|yFm;hMkOGGHAj8R!_S2w6ilCv0Ks ziL^-Cg7D@mwngV%|0(zUXl?=PQV|Z?61h(l$Hhi9VrP4+$HkIO$QqTtkE{epDgbm_ zR{LB1L?~|Z94sY`bvb2FDWm{^&Og^O3APE80Ki22nuvZ4*+9eQs$ey?Uk@M}Cx~v* zTi@mpoBX`Ya)==t5WNyXQViIjAQS?4jJd%XHTuu&Moq;{yGpZY%E)LFZ*ZJuEqhZn zQ&f0h^LqH3d2nb9isP7oPSPA?1&(DT-C?FyMSY3R_)kT{3@s}fY0W(j%PD0L4B;f! zmrTcA%c7s&WsHM1x)?3p{eBjEm{$Kk8+~JgehZ-FWr%14Bc{g4Bpu;+&c{mXm>2=I zqyixpvy7M)qY^V_N|~{e!u$NAHafOJ$MGuKo0Q4wY1YSC^kR|?O7AC?867=L0}DFS zAEv|EHo7R;v_vPdbLEsc=&bmE>P%vrOX=AkrU!Xdw~1kReK#mDCNZyy=>r(2$K>M; zys?qvjl2e+iH`#q$D0AdtOB&i_3_i?c)2D4U_5UHn7}6jwDL&+ReUl)8?OR*yG?#k z@hR|<%%=iO;Vl4Dd0^3)1Om*eYB9C^!eMV@c>75fRSC$fm~^6yECOWO4AL1xcBSVr zVo~fZpCJxfoKng4{)INB?B}{`=ohKgMMbV`udlsR%=Wgq8$CjCx3!vh-L1vjeBOFL z?x>*%Jw9RI(R2o?6E4q7$k4 z@wf~d|NO1y*G1a2f^4Ka?G=^R(cKU;0_1odP1%)1ny}(4paJU?Koin3@7Rd%|1EG? zza9_GbMO5M_gh~#xw4~b-YvSL#(Ef_x3SaTDUbx{F{*0!wnmL!!RrH8DqnM*81?69)HsE#CU%fx`!`#OGX!&xz>w zNAmVh0F2MMYR?X(PS{v*rKG$8SC6_^KWU18a`BPHR~_?0hDt(pp~7%= zxFcLSTs_<|Tsc}j+A&%=Rz21+Rtfw%E>=diZj0>L8QJncWY6A6?Y_uv_mqlFtB2^_ zYyMnI-*XgIWiZK^pE0bt;;N(IilcPgQ99u$?^8c*07`Skp9KsLKh3OOxIUkGGe38O zL3L5j0;DCIQ?x~3*+&`r&hawZo4MF-62bE*kHp1%kMN#EYhJg}OAC_07t`ucN}{AX z&7Q(F0uOj{j+~<(W~b1lP8A+)ypHH^ zrS++2-eWYHZs5do2*H`p&?_}Q;|#r4p`)jq3)ocp@aiO2jKiE(ibS)$G;k6psb%?H z*=CKY%Os{zXO4a6Y;1^i5YtElcMDF71Bu~o!RY2<8n}QVpcgafiT5+GbTQj`Y*|Q@B#$k+u4Tn4`SGQUgs}tbH&l$<`q^AaZ$|yFE2Jlb^cDV)$8*pt{k2J5DUS<+uAzBk?+APksX&D zI$CH=evzdgDzASD&nEEq?ex3(1-nJ4iW&%PsNKHCRu4He)9zb{QH!2~qP%5|s_OTb z5`vYwd+yq_wzj&ObmE%{0?}5W+T!i@M0F6*@Wo{!cha(g++y~Cyy=vM6}&w7;`S2w zD~)nugMjN%_LvHQ{cmqUp4J1k!h`h31uo~aD<- zZ6wZlIc^o*I4?7pIp7|g7b+Q?A7%#^_UR@Ql8+kt4ObHK#uM@;ZT10c|MsD(fh{5H z;P#OFnO&o*(dzTsG1XZ01?@%a1>?n<3zm2D=J&z&x_|qC`>9>Ss^RLh+EMFS<7mxU z3%D8EO7>b>)|8I1=6}wpt%*}{jMavH9JTaYLP1ri`h@muV^|fg9?}k5Up9`Fjdfh8 zj5zPUxHhuW9cgO4($q2D)X``8MFN>hg8lU8DNJhmKpx6XTV5;+3E`3x`(IcZu`PTx zZ@7Lm=WN5sg4YFw&1(-uN;dX!J*%$9#XouP;d=-2j~5OWhDt*uTrs-ljrHf3tpC^g zpSQf#@{71#@5U!zg#jlYPaOpJJD3r2PuPn3xT~q@M}z&rP-oaZTK-1W`Kqy+^GhR} zx6&o+_R%|wl6DNNIlg{y{iGu&v?|nbV(ocdxHRk@su(VLdCA!3i|mDMprZ#OHJdN& zjpS{abYxCCvavzPJ-BhwQ7}2L=xRaHOS@0({)1U-EuAvM2<5H&$XL;RT6Q%}SeP6v zhdFJ8KZd#HUrAcv7i?9`suX>yn)OQ0Opxdk^N?k>gV)b;-ME>a~U z+C8MLQ}hrYzNzZlJEIz3XB(hj#tIIC?bBG$iXNZfCsAEPD}8=pI@?LlFPxti1Qh82 zfF_D$nl`u4B9KSuUltY{EMDLKT6b%E6D?mvOcvqoH(9-+qm_%urO{d?a@vqr46j^Uq+)P9(Du0 zr!8APCqMZpdXjIj2du@TAphuVyM0BcQ!0A;H?b21|4m{BEcbsW|X(EI(l!F zlYX){j-D&ehPR3G8)*u4fy7GjiofI4vOCq#_bSp-H4SpS3M5I1ytV~gg`km=cf_4%L8jrkpJ0og;dckKqrVYb%t= zW64wSM*0A3awX2rBzXm^(Q}Y92yh0X1`kY`hqz%HAH&Ku2p&Ozb4-o_fQ_1$)Wf-z zJd0HiW0hKVc#q<{Jcr539I9dGCXgWg@{R?DwMfUdkXl-{bkDq%NcuK_Zp-WxA~o8I z@Nb|BNExKl-bMNJ-K8nI`JrSySdaW+>9d?sm3Vi#_N}bMyO+>smw$T`x=Jz#z@<@o zVf^iVzl?{m;bEG&h ztJoGsC!2|WItb3zLXW@;AOt}(bn>v|q<>kJqBF~(FOmLewS!iywtur!_f(KC4QmxkmnG{RwtX!^YH4jDBJpfL)0@27$P|d#i$kz6;0qN3~-*R6~r!xgD)Tn zrc0t4G&v#4){?id?j-~-BKRQy!2&>uUNNZ5JW4dzrDj}86{cK)#OH(C33j+ob` zu;u@a#}>miqxo+Xo-Z7$KEEJRv*mK?R@j1%r|zKp=Gp0A3s=y=b&0F3$$eXo?&#k! z(9yp;lrv$?SAxaH!y5W9cmd}61ji#<-Pl^>}?v~+w{vg zFJyGyUH9eob`VcKS;hq@&RP%IPRzL(O+(vK(D-R~Ase6HE)LqL3yd3$V>BKw|+Zeg;45 zTq+lL{Sd5Ravh0U0c>@tHf@skL(o(Pk|~x@@y)ni~DdxlOPDPXW+Ah}WS zh_zjQY@;vz3pVyPzR~ziQNy#BQv0II8-Im zs`9oqlH=Hfbw^nts%h}K+dP65YGR9b=2xESsHwfxFV@N;2;>gnrejB5d!kImdPhF( z`;=wbkq@3~XVpK4-bZ%+T|^Tnd#(F{;U==0mOqn|hMIyIkhOv!R$fhg&#W}HLyf#y zAxG)?XL1T<7xwp9SqPnhqWr!_NQbm~+iSr!!GME^TSM7nNsG`o6O=t_I^Y$XYK6{* z2JgYy1sqii?)@H@DXKo$1%Zmt)yHymcOhH0j{1(J=dZ-JaY$4P%ES)H7C^6nKcOA~ zWPg{@)5l)eq1JtO)5Dwk$_EmU+XwBJ)wz=}OVi`Gc0dv3-h!>M$ok?+$ zDaJ1lC)#Mu$xLe;Brx@}2jU|xdMcEYqfUcHu|qPiX-S8v(MSv&{1Rd3#z0$)hHYDo zrH5UTWp3_sHogNUelCXBHK=&4m`va3aaiCmh~tJQkRUo~?Rz#CxG?$TYYikyR>>sq z79sQO+c?WoikGa1U9|s&j0_8wy0xHNu+^BPfn?kf(nSkw^%_XGs%QF0loF)`DH;A* z(9FKgNlEZ8UMWLL*7$9%L51G0cZV8I`rM0Y1u3zy2GZuVRmE!iSaQ3#l-B$pbD>JIgXF1TDDu%GM$x=H z*5mANARQ7#C*U5${JzeaD)*QnWx&0L(WjM=PwMpX#UZ659b5_Eu*foaJs!@aV?WRa zol-il<_(S9LHLJQn0r`z^{RRf?G&&YUN;A8nTv(g4A^H2_-9zP>5@%ewdsGuF{vAl zK3~sKjWa;qLn&%0y?Hq;c_oRhru~`e%VXnS!|dHPywn;C=&4Ssb( zEmA6VpGq#3a+R5mHDa0GyjyfObMq%~eX3p-Rw}F!Juk;3h=TA^q`b%>LMy2iy$14p z1j^P%`EDzaC)u0t1(VG%=ZiMk*Z?$dLW?ow$NFxr!S}p;SWKFRnN_cN!9c;BcAJsT znkVJ+>eJaVOAF-D3okouc~SvN!sqb0b1*kvcB)dFcfuuM27TnEB%JYtK%(f3i4weJ zUELRx2z)NP7nY?Q3Snh^ttu1etg`8q7i$k|Ef4fdo%>U>Wghbah0;99(gJA+NRkvv z@mM1FBNfd&y@D;m(3Di91ftlw3=~O4wEu^;OjzT}GsC~gXP0|`YNT*}4TXq#TKQ>y zke#h7IrCA6^YK7-Oe#Jf@p9bwUvfn!ih-Xu z#>UKo=6e$${HHd1kh?80#vL%4-Z&YvQ+9=oh+miU{YI0Ac&0N2^gCy>gAZY=I}mgrz*CHCepIWR5+H5k7VDd8z5GG4 z5HL|K-iyOw>2;*V4x^fOx7Z|*B{+gb2uhJo-{yvN4_;fu$)(OZJnjimu1+Ao!nTHFty zo41mz#Ma7TRnZA~7|=c>>DDP9^4Mwl$VciV+o5}C>Z^rXp+9ZHkU`hHdcWP8K2Z5f zYwlr958G2ZX^QKMyJT{_8)v1@zM8+p9RK8=!+QpD25Lg=ggLi|gLHq7wx^@d+E>$W z>p3vM0?s{HFrYhWjl}0((&tT?n8duP1p4P!pVpQ1b@W#}xraXU>O6Y>$BzX+icjpT zeBz<2_Kf4^L35}fyk$6dxCAbncbxD~*ekEt7mwQ)_h}~6oW~u54!B@&hO5WZZtv5+ z>n!LO;UYN?E|TXBE$>rbOR^tb+P`$5v%#gHBTnk`fjbcnpGIqoY4*G&RB*l6Ge-EnYD!O z`ALPU{Dmy~{hur_N9+rJVPA8@%sB7(+(IKinUXR8Jo=9XUuk$HYf1mQzUn^V-PBCT z!MRVQhLOi}=Z^#@mn|Rd7_*MmT(FHExX512x#)hY;G*tQ&F098&Ew0qM7Hi6Uk110 zwd2cbFW2!`>YB&vnjq)%<<7>$L#^$*T8gF^jh z0pcn1fx6n@PSSmtq;Y4Cu&@1I&2|3^{jWE&<6*gWwu(ab>Mh%|;j)pw@J~mAE@Lcl zvTxr$xh(szO>nzoZFF}Eg4gY<1!Bndm+S^@&c#?*_rUP)Lzz0rC-^{H+=T6EH z$fA}&DXMDlHWE1>7{FHz7$Zc8%}CUSyk&0=-^?kgLehn$Pq9DN13Af8-gGWliEnXo zLU3C5D~{sN5uD@XA{`i z*|mV+Y3T=h_sRyLQWk-1gR=nnDQm~1Xbi(X`2fMw2p&Y4gHVFd+TISTfxA)I z5MbA272?j(8y{pZSc?>_UkN}MjpU#yk68YUDYlZcAt%55T8i<9(6kWx7AEL}*WR|A zT{UbQ%8Hn(BI+u#VdO_2eUH^Z>P#}zmA}qZnME_C$sAPr^#YAWGD{YE`qvpc3+_pq zGx0ziKW7sN=rkIj_L2CH<5eZULQ%eha(*95-R9Z`*b8)ms*BrwjmmcK6q4YQ*b2tV z())hnWKGono1)-fp)P8|un!+u)i-&BZgsJ*y$kM>Q6IB9ifZbcI(;qjhKcOKzGkg= zAHMEIfOZ^>7pAuZwQjz-6O2{f{>(>|PUz_LcwmFH0I9($gA7T=;*2TEJ1u#eLb_3a z4T!~^nJZq|iJIE{d}phtR^FwZMl60(Ok`7K+oBqIM-$c8H^F|w(<&&r_{Wh1O%`@I zt^Rtrv_6I{4QwVroK_(oz|8z91(?Ot|DK_J9VkmV`p&=SS?3kLwBy8%adqyYwLOdb zwhwHh?0@XbO|%`ot^c-X#p7LrU14juX2>?-TsUkUOTL`A?y#;$)w7vC`X6t>-ORne z&8YnZ`dPPb9r$;078twclV+4eR$&5^;5!i5aJ72t6hCnc$fMRaw}`6;YKz;uuoTrQ zzse+_dKA4qO>g`*lMT@L->q+X7WrBcg?U+dOQ~#0F&i)EtD_p23BN#ojQFJp9!3aT$nQ#uKW3`NnX31goIh%F*tP5pMusqd#)QZ1 zhqQmxZcwp#Hy9ai*V3|2tAn?(2~)84IOCL_v1j*@qX+vBhS+f8ne?Ib%cocxIsZW@aOHHgkon1VY!jxBDP)F8opIEu$b0&*j$;!7&{~xpk BL)riU diff --git a/ultralytics/utils/__pycache__/plotting.cpython-39.pyc b/ultralytics/utils/__pycache__/plotting.cpython-39.pyc index 01ed426ec1e5da03bf8a03012279bae56858e058..08c62be89ed3fc65758ad153d0811ce125439f2d 100644 GIT binary patch literal 36606 zcmb__3v?XUdEUb1Vr)|PEO&@7eC#Pw1(sz?IJ#E^PPSc*&X_KIJQ@3eaSMiDZ zegB==7a>|FEqQQv=FWZJJOBOv@Bi;V#s2MM&|KOKquHg7urWpVQq{+u7O zA`ul)RyC40zgFIodpmFA*RDoaWBHh6p2hQVd6rmB=2JmjI-ilazI-2kqt)zcf4+Zp zAV07=m>-lhF~kk!hvhn69a$aCkFJj8$5waccdd@+$5(gfcjH+?C94ywd-8i$_vZJm z?#u6!xKwrj>VfaCKZgs%F$lTs^2}(Y8~!tOZ-0R%Z}1t*nUe&YF$KJ(1eFZt;!dgJMvrSh8VN9${j zpIxiE1*hz~mD-}?r`#o7E~>KH#!85wl1yo-Tv{nCpiIBtTnDB4sftr5I;Bbll?_yC zC0$-E*WAJyT32FQJO4#-^A!G^83g4BR~1&2m5(Z0Mc<6%V=ANKDuLd{%b0tWdNYDC zOsX_`-sdMS)~j{x6n`9z%H;6xm8z?Y)eW~&a&oG?P+YINxf0Xl){0g1rJP%G-8JX& zlPB>cOgU4kub!OANC8)tDo(CcEjmtaP1mniRN2X`mffYga&m4xccZRXayKgOQqEl} zch%Na%XALiuC6N#9jdQkU^t4orSi?7HKjTWcI#5-OV3=$T`yMG%TCaU^RBBa^XqQe zd0Y~(XJPv%s};x1)faN^`dYO-^?2_2;GU(JMucTC8~@dElNHST*5B=IiGXZ%B9Ler4-Z{CX)nae3ZMfhT7I< z)=~GPrq~kKPWaEp#W?=B0-V2&Kv_-ebCCs0W`F`{B$ zvJl24Oh}lNFePDH!i9F}lI!chswB-|z8xP-eAj-mEA-*mJp zq4r<}?rl8%T&?0(iq%S^91dvFk%|6LZnfyHRqJlGGLIF;bvkq9{DtSAKL1ML;zytV z=u0opn2DO4nwc8QCoW$;^XQpJ`TN+V#~$JDBeSP2^Y_B(Q5KWqqi4@N%KL{-&(1#j5bC*L?$M5iE=qgOJaX}o zyQVII85`x)?MJW}@gi@GH8D|}*aAH3i;;KiE$jR2YcULx^{({*C_xgd|FmA5pR#>> zX4bb#cR;b6a&_TOxhttxsC{V*jcEQ zArTCMhM7W{IgF5e41#t@o1{gEXf;8u5YcLaT-}LEmrNGRl);~NEV4*DTCvC?5v>3d z8JE|6ZYt>~16trmtL2(+)qK0Q=EsFF_;H~H*jYMA%apBmqrQ9$%frzVteR9o??K?F z!l`60oQDxa?AZ92m9=7a!umk}ziN-iwi|;zOXG5hWGDO=!v+5?;?FsazyrZ?Ef3o@ zuI%y1o=7w5*?Pu{Dr-*!p)Fx_Ph=tbGVZa;V~t}M4W&1)zr67v35L$+I%xbhDW4{5 z6|Sa9T>DYbNqrnwQ!yD4->Uf0`9-aFF{KQQ1mVD7WmgM@SMbnb+avav)zMA!wCk|Z3>80fA54B6$W6jyMqxLPh0{7j*+T372;US|u1YwN{o@FZC% zsCubT&?ixlK8wK5I&Kk6jNv?_s*V-VS_XO;X*miG+4z(d%cjm|;H%E!tIO($I*O|S^-RFv4ytF>2{nVbq4Kagsb+<(8^NZ2 z>PwayQj=iH4U;>no>OPlLr6KMJ`4u=5nSy8Q~Vfe9akSwPpA(eX1B7jR-X6M=WDe( zhOVxQk6>Pend#&STaR-K;0eJ2R%$1)3+GnrPLVugv8Hk#e(@P|v8KzaQgXp4^Ipp3 z^*xxh%4%*>xb7JV@j8&-THDBloD(^)3b}JKlR=r~4xX!0%}vhN>s2sWuPuS?1gD0C z>rOdm-mYBf`e|vp0$Wd(va4N&-OC|C%?%nto`1&G>t$ib&>m@+#8&G?RS1}5)W4`O76NYOBpNWjkP+`V3@=1R4c3V1w&6wIV=Jt$P3qEjM)ID?8vyX=6)Xli=!eu{NoG+Di0xzTc{k?9-GrCYk9yW}vKB*_q6E4RL~jh_S6R2nTc*M9M)kW$_pP2)OS$%D z#Y&v+kgm)NiV2&GurqtY*xMuKh(*3sWs8gVJMCdK|4vyF;79 z?g&$R=`Gea`dXy6KNfMvHh#+j*|j1_v1<#&xiI>PNX?#&xa00_mAGZ=ugi8p84NvI z9xY7No<$G#Ebo2WaW9?+xw`t9>9V`y0z0Ts{TA z^C6W9L`9JZ&3jD>QfBh^51fQVCh_A z*et|XY8A2)T#$=j{n(cee&)Yyo!cf0(TG1bb9!dBYs2L(#1}R^Q=|H2M)U+9 zV_Z8BzF?<-)k_=RH<%iJ-$#bs*r`~F>uFN=?cCH6&CdFnic_h9Td0-Fnj_*z*D6&% zx>~&H$8~+Zru?Y0z6wbT5~?3}mWpd-->w1@TF~`X!3!D9P3O8OEgzQiSc$&vx!3np5HTp$Q?zBuh#vkExCSc+eA`*@Q{n#MC*O_`4Pxl2iTT&a0Is{ zF7WUl*EARRMf^G62F3?FlI2?5hx8a|aC9ruq$m?}4!d#BzGZ=8ANS(=MGq7K!c79t z63a=0xf$dIF)0W-vD;BAQafzYA&f)hv4iw!k7c$}dG_13ZXhPJ+2=w0f8A~-u?r@c z$4i0kTPyKqT53uomV#7T|6Kbyb_3Yiepsfu#ZZPTX=59hUlzZ_w7L`a+T%Pf^B1zn;Ex%nIkah zCA^>{#Eh!`+X?hQ#t-*n9(FCH+J(PyHE=t|R^2t4SsBgNSb1rTe&=Xs(eKXD%I@aO zp#_^AEl`83e+#wacek|6ZjGU>ub44ZeLZFMv8EVQLyYTp4Kx?}rsLp751bs%@Ovg|dG1Y--8Y8BOeJIf>u z{F64DD2Co(AUkVod(~|5&oiF#sy>R~irJw*f=e_k?JiYHE48u%G@dQH0)X|7ofq=< zGUWZT=DxiB9{$ZuP3YHf1NpVCOG|#@O1b9LwV!b7M8bpw%J!k>nJT$fuWl^XYx-Mx zm)5}HinT=)$bRZ2M#fwS1b!6aho5F@Hoy^Ds~?miijHh1BAffk=bxKRV@NHA#)loY7N#2I`1nQvA5giXRtJkKW!4n0G&^DbkgdZ?B#5tsD9=DFXB3 z0G>zW>7J4F)2%uF5|TR4B8b>oD+K}_vr=}>9s*u}bW`bl`SzFP~%R+rgjrG!xn;a(_VUW ztlgVtM#%2Cm#Gc7yEgI5zAf+Gl96fk5#t5oe94P!PM8q`d5MF(WQ)Is;cUr=F9H|_ z*zT-2pg4xe1Ts1$$n$!!Lyf{1lXTLRcLH+ci$>0eX`febMbkNrH?@p5&Q9k}br)Bxt}PYYvib`cAR-<$XRWBqwbI7px!IZ7 z+`@VlOn5uqMLCGBuNAMYm%BQw%Rt|Yg<$k{Mk$smi%U6OcIs7N_Lz)dKmmi)-I?4a zqmdI$+GPOq9mt2_g#{F)03-v zgJ)BF3~3`t(%-}2DuY!9QwTum%6b)wHNDKcXlZTTj|)Ph-(iFea`{K(Yds`U|7BG4 z>-cksHAngq;7+q3Y+3v#mkLf6F$4Ahu7`wB9tNRo47Q1#G~KnC@$D{p?Eiu5O3{{6c6+%4u&QUMlw#iSDyu=VP6M;0`@HVLZlf3OahvCDe}gT zAzcdeBdO;>6=Pu1BFjlHW!SV@!cBRAP(V-8!e*vbVowCDR$nvgfeGc^IA{i_ZUm9`Hcx8)76Zj}HV9AAI{E@gJcCQK|?w80=f z+P*{jFt(kZj4pZsO8HY=^`uy06U-~>4s)5f5pS6NbiUz%tz99F{i$o;@CIcB+hbi~ zxJt?B3&V{1xI@_5#SSsdJX;oahym;ngT*(n{95Eoc8E6G0>VTTM_$c)?i3I%oep~3 z-YD8-2{$VnMedWQ9+}Rad1EH`bg{J50(WvtPzX=noX%}b6a82K&jML}sts>)Gcg5B zhB|#i_)R2o7mcz%H_2US3V*wl|glh)~ei%Fix1*?mBc)Ro2lIpr_NI z@1)N~0?jX(c$yTTFbe;*R^x%v1Voz!Kh#|yMQ`$f2D0ud^SOoG6`%&wQslyVO;B{e zVAZNrAP5=kEb6rycunHitpRP#8HeQ_|A?P0t*;hqIcgKTn#r0wrgg6FmfLm|Eb?mD z@((fTRqU-AJEDtyuwFyptgo%rHD=f*5L28!y>sO%3iI%z{iPFMVBCnoMQC%GFW4DjO$Ko-pj5!YUGF* z$3r$-KVg=8J{8WjAFJUKA-Dx>nE-jPZbW_PTPyjbuychKKY0^-E!3EPawE6^CRHOI zwUUn^%PF7X={&@g+QKRG(<^}JFQZ|9fIsI+1d##mwwYjG&0&YlVb@LJKST?N9T^BC zv-r>7F+8=c3&Gzs_uzr<*%o=UYuDutOfX1xU1C+Fihz5`)knS97R~ag*R>mmD3rIP zyS79=3FXoWF8!$)@LVDj>hD59Q*kq__RS3=_cjr!JqA59&(w!8^F|qJcS=dE`uwO{ zU+a>Ph%sjHCoQLmp$Dh~`|)(p0|O!@z!dIP0BB?bQZjJ_m~bZ$Qj8FiiZ=l!dEi6zzw+!&n4LlXNp+Q;Mq5(g zN{)l$U@hQM0w5=1Af>RSUedJk+E3QTTPZmom>ZL__UA+fT62?t&>->Q34xi6N&)ae zYr#7NJx9yYYwXnVCgeQn6?)?JVb8q~_Jz*{q-pmpEr3tfO>PpRK~8W$QE)HGOu@(fa6&;qtY(tyJuoRh`M>2Dmv%E4wsrJyJ-11M)5*z;nCYI4R;%)>~hzG{^A zn9+#-HU>u!_*O=Lnm2DU5VAyK6wrx*HrQY&#yp>KT>-UTt5#h7-Haen&~Gznp$Ue^ z7uxWBcrev(aNxhoM3U!E;mS_{AzuOnBG`cAZh)rz8Kw|0Aue!+Zx!_4WZVxk_z?y@ z%bdkca+y!#B4QIzAdnEl@1r)Lh2Sk8x5n%Vyx9SafG3@AF-Nh)eIxwG>nQ%@H6X$r zptsNl0>z{TkdOg{aRCcrn{iALSRLE}{WCB2D{BDP%Q>1;%|HoFY(H1*)IH zN&+F8X50w;>mmaVINp9*(-&&|>hL|ioS&lppZ4GB&lkp(3b;xoga6QukT3>cxFZN+JP6pO)47I7I zd3e`|CsB}$`0=1!(6e&!$BdSYAk1f?WT64n0H6`pl12ux#-5j#>NkY5DH=Us*zTE` zIU|N9I!3v>{ZPapb1=++=PE81V$VST8X7d$(U*=LfSKP-8^(2cZSk&7we|rPDT57l zbl9ye(m1nVj=fyG4jTmUdp%v?>wOGRXPLGs$FOSRW*>%(DO#GI=?FWL3bXX|1>HqO zFTGD^@JJs zsL8@sw@46zFLwt&csC$~bOqt`367E@Hu+u@I|LvoVp#w__cVA-nkGyOwiA?oHi(>R z)lC*t*a?UZfst-dq98-Y*0(El3)u}Vb%XQ(YKoYp0QT+4XdvP{(LC&}FIK_KDE)&N zELzW?uCI+m*s*sg}IFyxB6>Mb1jaCD8SSOO(F$M&(gt`*T(?WYa1ZLe5 zZSu6A2`tsh4LT}&EordK$7TI5cqx%GQ>`a+P^5CL_=zQ`l}jtwIk8;=H~S4nVzr1^ z+Pk`bj-{PK5b}(1X_YLh{9sSR{e*dsdKcP+ETeEr3J_=ulG7=2)sePE(i6a|z{rLM z7&GseMI<1sMd@5C=wh7vP+p7u{GYf1J%PPB7FJ~Kv?)qxh(|hV(7@0Uib(<*=P#Eo%Oy)ny=ZJN0rDj8 zp`SBL+Xf^Bun2GyLI6)`!s!YuE7(m*Y3>L~0mR4hM$Kd&ZBskax6(mV7q*3rAX8=l z=aae1;FahrlA~#@6oE=gFdTsVOTrf{Yl$iAd%h1ru3%M~j1=;vtnT$_mOdU7sOF(= zKBRoYV@d>T4>Es%H)kG8bx{l+0@^Dl9Hi=yWSouznNkM<7vSHX<+9-$2|P5MS!NG;Ku8j-zxx?>$5=q*fo9|SX7Ibw``Tqq!J>Y1YJAnGii~6u z){Hd}Js3@a$xTIL*6e#RgvZ}&jJIFE&a7QF)m=3H`%#=>{kEXUrED7ff)R`ouZ4L+ z6SN;}t}P0DynK70gTftOK9OlAB3mW}co>N@DImjjrkH@v3FT1l6dfb$fL9PwTnv(B zbS0+0N8t)-5Rzdip6Xos7mL->dX;=M86(02*bkrv?RXFFx&B+qmYg|iBTJqqZKNn6 zdAVZiMdK?l-&W6DL6tjYnD5F^+A%|C1toKiiKQFnx<~nCdNirZi@MC?Lzm>DUA1{W zUv_Vl%Y;dhD%i?e4QUH^MkDj-g{LY@7)X%*Hj|MoYHE>B(|po_lD2}QW>!%*uoA?g z9XkndM`)v<_7GijHxulSmK_&-u8T8BmkL*4JC%jHxUrgZ-11sLd3j)_y(I4DecPEsMuto z8O+W?Gi5-Dru#<6_w!77n!ztJ_}dKr4ufA}(6h$Jgn0?)5euEd#U>Z|7*a4?t?p~eD-s;*CRW*HLNk`l5E#nK4pcZJ7^2=)PJk2T??>PNeh;C(V_Gi1N)3QV@ZP9mKnq{Li+?X!UZm?^?Ncqcf)$8mX6s@;{#Y-81s< zx6hd^I&%1_4mv1%;S~}jA%;hIC3NuL;%b|;XPdxF3q*LE34{g;hkBK_WK*Yh3tE`Z zv?nqT`(9z|F(>&=zkY8AN0MDAGEU5`v%?Ewkv>WB&J z>WFIBWg>U1>#j$vt9P$C$er7;Q6gBI22#Y9>_5w0=XDHgr0+0nmMLZPzZk&5DrDe~ z?U-%ZUyo%b_>9JKe;kWV0NI9N++EKPBj?Z#&l;2M-Rs`X>0;7SXjN+;%b=lcL=dVC z7^7{`AdNc!_BEn`UQExB(dW*FlQB_9>D)Q6uaUjtPDa%V;s`E@#4N@c0wRI>(Y(Rl z3-N>d7v*V*kzEc?@f%X!*0Y|RpxB_qt*6MrdtvZ!HYM1}1O@QotyDc_^wv!E4kUT@ zpikM{^^9t!_*^G-z8tFBNh-)7P(S087wLcjD0Lvr!<=j$Z>hK^plPn~VE3fdwEk)F zcVY^Z6Q)p@JJuDRK?p0u=|vDv!7qhn?4{f+BRrcGf}~Oj3WG$#))_gzi{7g;EDmTn z9Flu;Xk0*Ei6(XI%3+YFt~^&lgwDivWeF^9iH$Wfk}&t&rN?tZI~Nz^-|w6=@{iC@ zp`=1ONkr$S(!IEt2t>buXGUoy^iwFQ(9e|7ug2K(e5O4nI9D0=G~d_J1^ojk38t|W z=jA0QA?$tAQ<(1p5ApmF-)SSt=b(WelQ;5l7`Ea~{-anbWJAm8sV#?;VVb*7t{jyl1Kd3f8%g)^}SD>9p$Of9qUX|_<~S3 z25v8^b|H+Z@s77Qc&2uPvV{E+fz70D$Eyj5aC?f>aJLp`i;u!yE~j?{d*;SSBi02C z^~*iq$S_+0M^3}zn3uw$TH^Z+2yJ}BLxx05NpY$e#unYSJniEC1m(f3r?OL_?OH?8 z@IoiqX+I_{>28m32!Rp>XXgo2gq@Qyl1JTH!s0@*VzTX6qaiKr?2T-z^cV29|5v_c z*6Q#6$Pe@1_Bmmbpz4}K{PrFMa;1d=e4V%djX{fzLP|z#ix^0js?pyr9_9P?>S|cp zN0{@E5QLco2*g4OJOs$|7jfI7Eo8?0vCAKwyHa@e>6c!9_UTU)UViR{7oUIn;|4#x z%Dkj5##}}e+LUAiI1xv+gMlop;<$F}dNrR82hhpmgcUeZ8DqeXfr=!!&kFSs`!bZ! zFL#D>DXbdC%)|vLR`Ua0NmhHw)wpm1t%i^zVeR9PSB?2hU(6bNFB_c{W+V&ZMO%FS zk9|FM&u9C=hBVq|2xoS#=uUL|zhHQ_{|$%i6$YHwt^>bhKwA2$HQW#-3Q$NOKp-(T z6#^1VP+xRXVJrkP91Au!|4=X_Kt$p~M8Y_bNDz!Lju1VBbD~QSZmG9QfOsT}zuZYY zy0-Z>99tuDV9eD{TsLw{d7}*L>oq*&A#v!pLhmW}0)t2@c79c95(VpTxFl*(FhW0E z8xVa={Vi>$i8ZjH$UPlxx?U;Y=q^IsGqv)D`i5Z9wW*>Z2Hn^Wn}8mMZ+yr|nCdtJ zreW`wwXH1+bQT=Nw(u#0rj@3v(WulZbfZwcmEHr4n&m1Jg`2$INgs4;lP1 z0~lCzvSR}G$$mlhsf0i0I0888=+B(WoX8wT7|R?;#Zxi-#xi}G{>(w!%8ZWf%3RLu zOP$cnXJ(Bf-6a(bBcdf0{hXkC@WTgCHsrq}uFDW2C? zLv9vl@<(pNRG#iaZd+;>0c@NL96%l8@Nlr3p0gOUIk-hH1O3ZGa3TUXBFMiF8d8|g zZw}*}DnW9<=*u`!>5guWfj6Yy6z$oHs{N3>;79Lv6nfHK1kPcKcNF+yM(sh4aSsQN zHg}5yl88G&!+oCRq~*@F|7A+u+@tOnOCHPJ%X6?Xm?w$x`m^eg{C>IybIID-AoV_X zmq}+z30k(_Jpl8-gIg$DJ)jP&BXUM|Ajor{I?5U_HaU#VF*S*Jw2mGN)Rdg_G*6DJ z2k~UO^NHFoLL8@Z4N@aJb>{&;SD0kjCz=9P{soOzSXR#=8(+%Q17hEaf!c< zLmor7kVl-hJkq#mcwstY;cjL0kdP=_9`wW=9~|?!^+oW-#yJbODlpD2V&S;OoFtv| z6P3jp1jxows?!$WXaIKdFu5RvNHhG>5-=dh!B9uQ3&E0$14gM5HnqjN-td#FI9dtF z5(EZoAkr9Rv&1+(Xc2NXGUj>)##fEl%*;$qC|l*J3xgzw&*7c)tqn1eh5dxs<>P=7 zjf>Y;Yt9uvR$O=MevEz5{|AYAuyh{OdhyqOmH)naj>nnSaCmF7!^j!3V5PWN zfKxQ(#~9{QCgO&ZPYFx2a>Hqin|5I*geiMf#))m*{zs;gs9LLpTG>I{y zAn9jiJRn?itb~Z`|BWOd$61U8EbN4x>oV>G$=Ug+E*4^BUnKPYW}JlyDf}7MELUyH zNcpsmqRctn`=mp`81HaC*H4E)gR^}QIk)pHk_;QCQqGA24DeF~6s2ED@#To4s+<}P zibO4C>)N-AH>HERpOo8U8XzmiI(7T^qO#4vttbxPhp}RHC4H*=PsV);dEhW}VFA;R z`(R&}EsL8wV!7SH7X#ND?AFT*uKxRoxjc0eW-#E)ia5aK!21Cv+fUsnYQ6)GKLCR| za8|ARzE@zZ{+fB}_g`M8zntJ!lQs2Y(3I5;3-Jlwr!PUKx=b+=tE5=b99W)|E8!F` zJbR)}Sl?mxao(1;WAL@GKF>7J$7(#OY65M>5GVyk2!{qs@E;+k4e8^}h09NPZh(JK z!+eR|zMtX!hD@J1ZzmBq@rJHA{vaR9wV7WU@#8hP!`7N~5}!};?O;>PIlA=CjT=j+ zd5pYmRO!cXwA|4rm^Ip{tcm*yW5vqd9AsdXmXdhgZdM#W;ea7RYx~#$GBfZrF*WSR zK|&B4EmhHHnmgiLH3y}jD%wxND})T0y@qqg4&B(5%(~gnap6KTX9uFtSPEyR{}XGr zP}Twq>sjaUiGdy&@XC1w>)rVPaqe>7C5`3uLx%xf(q2q*iRy^*csz6GIo_Mm}y|QkK>>l{3O6GmWU5R zTE~Kmh{IwQ8nMk3uX14oCdUcYKXh0M<2Aq#oversE(M_ z8vEsvuCLLE`BOBDZ3gat7)P3vTZOH11-?gepIXG0*Ax-wQ)@-lge-Hj45P!G6b0IF z638k}gVIErMFxP-6}%ugw;LR)a*Hd_y%)O?T}pg78YQinmI|P-%3jrLH1?;O@wYFOd zhlTYU#;S}Rmu8{ct*VM@dEIddRm%zCyQ(#kYqtr5xR;m|F9$Qm$pM|Nk+1+VIuC6b z{T_r-7)u<;c2EKNjNbCWaTsqEprbH6X`@5*>L!PTJ6zyi;c&Hm615X^#&eVO#N2ft zj_K(DIXJt=y2D8@qb_ITIDD8Q)jG~5198l})yLYE$nY81ud9~x;o7QztEs@kd!bmu z*pS~5Kg>m#G_h(~hQXwF&ur%rZJYx?-L4Zeb@$mpb_+ujcoa6vm$hM5g~`HWVH{+x z4L=kvwtLh&+pg8Drf+t_u(K?4$`B(7+`Q&+E@_@$W21t#**&^F2hlG)^U^n~ZHEct z+F?$!bTB6vWw1;bZae@GdCw|2<=?E%dn^_XG~lz9?m9XSLEdEtw&QBPa7vm z?bDN2uU=i-fH&+~3k{K@CLPXtXT({rU~Tf2RaQx4WLX!mX&12Rwh<_Kdq0%>73Dse+zcYz)_k2HrOx86!XVm8o{;jdck#P8Zv@cz?*_) z;t^A@7-g^|ZlS&c@d3S<*(T_~1at^maW}xFtj|02O$8b?HEz!Ae3sRH4gtGz(z({4vPZ#=d_ID^EEplMHekVGE0+FuxQjnV z)(Jie+~iv@Q=;ofxf&5CkO_EJjpOM6P9a8{qi`TOC@{*$n`2N#k9cEmMfSqPoq4=r z-i^y0Y;<=uagx!4G7O$!Vsx0b74=5Far_^q=UxxSM&1ao@Ck-An8WB3@CtBkz#DiA z(ja^q40|KDfRi_Od%MwAdJOnwcNo3d%{F43hJ!n_6L%x-$mXaw>W+!i$&BiIGlkX- zF96V>pJFhCqtZgu_!e5IvTSp60-ouXook>5Dvih0J6e3-LjVVBK@d%HO8kT z$TYh9$Y|0Q8U6n{4B5t3>?WWnZ!g9bpPE3=_d!FDUXL{QBW)C}+G6@FbOCtYJRnaj z)cOwqp&$mVZMu2TJFuKmy8$GPpsf46`|$kt-2IyeyaQW2Jh*=oDNybKly?B-MYZ-0 zm^nf?5wP*uEx5s4;nNrKbT8Om_{87=IG(%pMjpe916H0-I0o$}!{$ltP%x{eboN_H zmfX8La(9=zD>%!0ce&%{iF==S0Pj&|IR~NOenM`5psoYZ$~Euz_IWvs$|3iVcL;O; z09>F&^?!t?p$9y~Y|-@NQ1g(t|LrI|SsY|quOH(cg};ye7}b<_=!=os2t&2cJXiZ+ z92spM#+)AZ?)MIR4|w;f1L7(!dhHi#&tnAl$&7s3+lO{M(N%X0;!$tivNpVNSoWe8 z?*RxD2c@(aJdEt4eem0HJqP^{Xa5K&%n`&Mks7}49Z~lomh+Osyu_TphnV|2WAyJZ zP1pP&h1`d;G7`*63~TL(hk0loY2^vWKFkx2_@HTnjPztPhp~z~Pjrl5Tw{DTkD@$` zs6c=5Ylo31j&^fR@5O4Q7A$Ul;h7|^Z+dZA!AEOzxEfmoz}r0P?cF?vJ|6=WoAi!h zey6-+SY5FNK+y`~VRI7me%w2X*r+-LYIPJTAH@CSicR2k%fw7$woV{7$8XAI>yMH` zhzFKb(EiT(kL#~s#mL%`Rdba60JXK7$D0p&$Gzc|=-UanM|{v5;gxR|7 zK@D4+k-`1Q7a7QmoY35k0OA>L$;1KS7=FZf`XfodWd&~F>8;k9(+{&KLWlrO?OO1y z0MV{Ij>z9{VTi`kEno`T7-Hz=eeqLcKmE*qXuN5_kt+b`To$~};NuLgGoZQ&K(HW! zuQK9mjEHXk7O&^YhcEyi^4*O1Qw&6X_ZeQ1XHqqi{(a_usE19|UqUS1 ztHMpU{&8IE-((;yBo7F`QI(nj_hO|ooSh0}7^5#R0oC*@YK5JVcnc#s2n!_~1)*+I z4qMZ{@Ix%|A_Ku-iMyG>z(jyM6bx*Nk!ju!kWJMB;|U8%2373sooU%IK0chb#?X&bk_V*BKAdN85kWo+ z*dC0tK~aSPmR2mlq8F9>DS1q+-JW;EP(pkUh!+AsVI0t#lLODOfkYU=?b9aKw-=mz zx)>gL#Frw_LVWlNEi_1~KF3D<60)H$PN6DHMkIoG=kx@b2ZJMOQj19`w)R(MgUu^+j8__`; zsN!fy*O3MvHA0#TUR3{3kOuD4d(z-DM@Vxpcn$?^&vShAh>FwABzDaVy{K)b)rf$f zSvNy(XU^}yrTZx2`?jbc9(xD8DBQbe!S{Tv+28nSNHSRuaA|V@A2=I_U$%kee)8r} z4-P=OfUzFcnBs$M2)wr_W8ZMmPslUv6yAt{$WRobq*3fS!`O?5y&)WCnDF|SM}*tl zBWlz3vm~DGh0@qo`=Jhwfv2L;uX#QSNofQ-DZWBMXcrR5;nyP9{&Nft?%%dHe$xW) zZ$%o1=yg)v@Aac+hrBTu3dgwfy@vf#+0C(U8@u+O&F^ev7P$fYJ^&72_ZD_jfYyg0 z#q9FZ!uR9L76Kv1H}}ZXIJoaI@Ho3#3eIE2mtp}#EPE)Zs!?dFj%}mcjR1lqe0?lj{A;zvV??ru9Re=EId>>bJD^N98JILbw*)ie06gjjY7H(5 zeNPu)Nt`qU3d4TGIIrdgL&OSu$EzFXI<~`$+JTg!wPGvWRqLhq>4LPe-H%*l!T7>J z-yQK@AAAz77oY8@FBtqC;JnN5bevUVqh~SPvzlOyZ2Sqk)4Rkv!R4L~1?slkBsM&3YE`(7>u^zrletwMeQ3W87)U_8 z<(uUaym>;`u>fUh+r}+-MU*0(Px2w+Plnd$i*PlD@1eB7%N+i7HfjzC<^Ol+Qxt!} zo79FrJ7XtzT*Sa9jp6?L0hHLM_&Fqhc(J(dI5hId+pmG6)+iCBDoDR~USh!Sf8}7(B$_V+>fPv8KGt zE9O8I!i}&9xP_CJyl)j!h*a?k^Y>Ko0^`1mT#3{p?U8zCHQHYHi}}#$EgFxQzI0*)r~uq#ns}w*5iP5SL^839b2;Lw>jb*R>&oL zaoyPe+WD8}o|}6HK9sS02;Pq>K*&hU8a7O+Im?Z{u!-15G`{uVX30O5?ylM63$T zdEVb-hhu!*;Ec#EuNtET<9JEr0f?^fYZ6#mfTb=&QdIgumNJb182l$#X%t%tEj8pU zjW_{Br6GY72|!^$O(siX#R<5ad7;yEtEt7Yz!~0P`UX9G+{ji$37SNob@^TbJ5Sm7UDdZ^(M}|yV19ReiEAjpO|oC%W=sE`)A;paF;?pPdt1l zil4#~5IW73MV&Q7A)GunMBac8=Df7A<;NZ{5A7zuIe@gMFXdY1Vmopp*K?ENVC+P- zvO+Hiugd3FW!aMDxOswxBkg@+;oiwNdXm$k>uz1?TuVLnGr?C~xrvt5Vp*)L(eGX! z?`b?UlVq42cKRMo<$6OqW$pw0V}}ciC!c&WcO6} z8h+qd8eIsQGkG0)LQ{_J!~1X-38w;Sl*|4q0{Jq<$MMlq82(d@^A>#c;VV*61yK-g z_w9+uTbA+i!|Pj69!V-FeUK{FlM41FCKbh@z;rHl1$0*Ysmfl725|>hd{H07wGTVM zMI3Df;TFB#BZ#{bH}oZgM80+1Pu%3pzyxYTe$95$+DYCa4Pn`dxcv>=I%*w#&wj6Q z1WbGQdl zkVkCm-yp6?&MxB58EOmiP{Rd+Jj6&N#M1_e&NLya!cRg|gm(zO@JL632IY)=TdNsI zOrpti^qoR*5{@|GGz585ZhAB0_7N~wNh#6XJ3PVtP;# zL|nk)B2vR4IbCAIsyDkydtCZDz_YC=+B$@1_y_~Psip=f8l&`Kuir~s@lkb`t3cNz?Q3UE3R}Z#i%xF7*bo zH$-GQTL9fZ!Oy{U?0E|nmtTefVMQxoL3Ra1b{-=C2{>*;X@Gm>OXOk_Edsw0_4&Pq zDlJpZ&PmctCL{EF*Db>1&7p(=Ta{iC|AEB#b+PUaU&F4=?(X5b=LR5rQ0EA@1*pzg za_)hZh!||l>9EMgMQ)6qWNYS zyD?=M8)&ij1tJaznvxDGqdo!rKTk`O=WmQlD+5X_Ca=y+7n|NlyiAj{T-Z>GNWN94 z>9{O8wC{UOnKZH$$GShjwts}d_v4BFK?M1f5hdWHa+VKx7Pc{VIasyVo!oQa?POu* z)9uS-kf32mcu*tTHUfaJDKCgQwm!xy?swRNWw4RQ z{Klv~(|;C^rUoHE1n*Rb>_FKdKMJe50?yQG9Bv?!!s7!l|1WO@mh3;z@d@(H99pf~>s-t&q6hiJq$2cyeDZ5p+3f0mv0mvOOZOlq)qVy{!!>!RYQ z4X4Wq9dkDoaM*(R4efbjTlVYlE$0zuuO5js2)?L{pcMj!Q?vtT%2?U(yS)gW`&Y7=5+>np?(4YJp z!pn1#C`-eeFkd#jV1mC06W9(gx&eqoog+^7{wyQD#NZz?_$-43qze4u<18rTT9~^- zCCHi3ALFwh$JJEYPnc%o3=DKw@IBxsP3s-AvcAlu-_K0H#DLl{v-o->)FD$$!L1ie z{@>>E8bBU=w;fCAC>GHn{OVsv30-T0>!P(bzKol$wUO^$8-U`?n!u{SnuwV-0lq3+ z6?6#;(-m12>DH=%^*4ZdoLYlGihNxw0xN`{+Ui&l-D^NRLAT}~XXtuXgnW)gRzy<& zV|0$k5_g(4nLj-5oVxVAr%wM94)?DxklD)UU*$DFfu;W`19|y(nxfxjJwhaU=4qlm zPr!c@*F5 z5pcxtpbfYBF^Ko@p?yvKkYe7%TLi4~VK5>FQIEq|E4;#6OCMzlIpO^w0fPuw{^LIN z4!n~=WhD;u6zu+Kwg=vjymPeK2UmLd1Q|HXsGER;U{Zmo!dHfoUpQg_6!iWA{#htM z7ODDRmV+~lhHs_G71}(Bj{!lwh0^=sos7Q&Tf*}OvEZU*{d}3QiIAY ziE!*4aQfhP6FA41p@K~97W1#f=BV(+W1G8Rrk25J#R)3iV73?ZgnaWBIBwxQ_Y{8= zgD4!}s}+X^qBuUV0JS&IwL$I@^-nRk2vcu-YfI>l`Nn)st*&u}G$g|k%E|!hD|9kkdl2|wT3qdCg zuZ8%C{xJq*(kS&qM;Tg>+<`TSpSWQTV8JId4`S(qc-}~YG2HU297J zBRryGc{$#whk4gWLk2j7<(Vy*9oN=2@S!xp3|cz{j)5($*H+B_ArM0wcx^LIgkW)w zE3k1Wm;)O|tp651O2Pz~-$)ZKvx53_+m z!ru^2)&Glme+2>9CLH9ffrB-t^~6et=mAVNpo3g-4C*R$v4`^Q3I?9%z+`C)&-s5y z_70=uNFO0nf~I3u&Yl2Yb5QQG*tmvle$N5i%~wzp@Qu&0O*fF|^5x4U_XJOSd3Q_y z4JNi^R>GeXn&ODzM<2kIoTq~rHg90>FQ6QM7(4?PHezSiHh2Y-ZTU5bILAy77W2Kj zD8ArgthFh~$akA{f>(=D*j@JdW^M?37+!!|ukK^o1QrQLu%9uc=jJ=1f1WKK;_Wa4 zL1akf<=mMVX8Uo>d)?80fw8o<7DEaxxH4fFVG3pAm)%50v4cOAc@ktN^@LdiPhwIc z4wX4(PO>=cM!=fxh_e8(5k?1|Pecd4J;ML%F>d{1!7!OpKe5Ap9TMZ;xbS PR`y&bmw6mV?fd>OHdj*! delta 8715 zcma)CYj7OLao*Y2y$`?v#FGS$00`np8hlCAixx?Vlth|_#D^#eA`KofdxrxKcgNWU zAJCjm5=fbjO^L~->_oCEKz`U(B9|O7b{s#Fl%sO_6-BO!9eK-kT#+jl6Ia4=Ipvfr zS^0Vvd|0+q!PWN8O!xH6^mO-h&*E3l>EC><9%WaSeZk$eaBBX-#{bD%bx-QQ3~i zt^LK?R_1SKgSQtVyoWD8!{*u8kkdY|@zuPS_o@17KFnA0{xfWBm=8E6ADm|eov-3U zXvuMQK+EMWew7)FEnfVF&e_Ec=0keN4Lff=ywh=Nd!4+flq$u;r>o`ai5vZA?KN!Z z;#ckWGM4kr=qFb6(>&S$G~LwOSd#U#Ui!VQo90{orRbk@tp+YIN)=D!P|BI2gBnLywNHv4xZM(q)*Zt_2xSl_e^MVF)l01EnY*^lwXc zSvq99>f*PP1-%giPg;w*vgUPht7Mg^XD!&0EizCzUQrX}gfXjF%U`7=2~`$QocNLf2A~0 z<<5xb6$Gi6|Jj!J*;EiW`*vm%8}o0;j3kP0K05U!&!5~THv2DUZrpN-c33188NekA zAlE8Jso`Byh)aEu9oFks z9hSoNE=}5<8aJjb823^?ZlE01M~l zc^!ij!g>`FFX&IFX2JYOwMe`}|LJ4X00{@zOi^8--Um2B#jrxu95FNNIpQGI zjEUN8U{z*kfPNYcHWphIkNE9y>HEZv_A0wKrRu6Bi2mbop!|}n#SzX%y^MF z3FA46qtxOt|K634+=f2T))(Mp z(%kCONMCuR086O1s5Thrb!{2h!lTog`MKYp-7{~OJJan+WL+Q_G< zS?vuIr- z7@_Tnl9+%cP)sNuCqOC_n9Ar%R9Cy$8t$SwZPh$iIR0xxYr83Q4A}HoL>)ZiD4fE! zRRtgOSFGOJ^a9X-hR3}fKuh_Pt6$8L{fS2ba#qMtKXp1ZV?mhXSYfM8_(1dT8{Uvk zqZr3S$CUd8|Eb~2jju@yoA!i9?&z8iOn9*=oC-_6B?=#yxXuUKaCxCyk6??VI473e zvV=>LN$@*(Jzlx3C3+kKQRM)1e`W% zsx&D>VxoS}nmr8{D-weM0gH=Ss*r(-(*$Vimv%QD`Zw}U;XYrLU|_kma>)}7LXm3< zo!MhldV+w`1GS^`sOHjPFz{JIHMDJjA4D;*5sge%MGzy`^9JAa}H+&;DoD^)cOlb=~qt&KeyR+kv`E>;TLeqC#|40wheZ zBjdRXh)+=4et?5H1D%qDH58HfoFYkzTZ!a00_2IsdkKsXI8T7QjUXjf3ajKC4c7r% zZ9LV!PC$qn^>i};$Zt~dp7_f6V0=T|jQ2$?g%6K_%EeFLTQ>B+)BK(TTVlIF9Q|LX?_8pKq5g&0`kYB|vJr_cwOuM;><;O_~LvR!%uTLYPVj~WzED!7WObe5EU z&QPkM6d2V)0f{B*OQE;&Q$&td5@(RMM5#FG!ErbRPyCgC`u>M(VR*e`NlnrD+#OA9~eFD!aQGJVwQOf6e&HF10{CTsEy88=yq6-sj`n6H2=u zp&3a3#qpK5DIOTj7B&wAnGJ$fbHV)&$iX6g`Py)M_7f!_steI(E65^rEp1&LDWWfvrOr zk!Cwpz>v`gndk$RpzXXe?>?00;!9~51TsOs^~zrk1*((1x<9r3Umo4J*<1?Jr=&?i z`T~yN1=~Y5TsINITT9p{q;&;bxBN!FrxE9RJwaaYuvt&mQ}uMc$&1OPOdwoMdhvyX zOmd@DTVx;7P)kZA%w4E)Ynq9FMa{lNs{=EY@{p&?G^M<%)TEN%NWtJXuxSL2M!nf< zlEClOj+7DZw$yP>H)KK`?Q19qKmHh;bCYa2r*-3ofEdX%)!J1JF?VadO=cFFku%4* zxoChvMmEbf{L&rTSzWf`*F+^`+wrq{!PrV%GctZo>(LOur)AT;-lNr*$z@moaH0`- zEm-3+nmJ^ZUaDdCs5N<+g;v?@wW+k9t~;WbUb>)D1ToJRb*zY{h#}8miabS=uD5&3 z(5GFt^K=jDcsC)+Za#0-JG>4=Cdg(;1{cgDpR2bt{vRvunCis3+QPMkYb}~sVGE>? zq4Y)C2$%jqc1(52R^EzpvW*~emu_jx7Rp$rdO+WH76B8k3EC+`^OjTg#vA4|#ac-1 zWXrT(@5CwC*#*tv9aw)Ch>Ur5!4#UeVxbpjVkg*iLa<=8$fo-0-O|9(P7zrrdiTg4 z@Vd<7mAInqR1w;8*&WhcFvNb@y^vhQZNxM~ZM*DlaFfT1x}_oOX}Sd(y;YOl*x$%f zTRLp3WR`YJv5?E4blsr4*0=Sh-|X8Kwn6E-r}`GMi%RTAJgDA^davw~D-b?5`n;90 z54)C?QOxKi8C~d?eX?(n?v48DeOQsBL-xzTq(1B1&R8Car|6mFJy^}A;@yC?1BYst*eodW2&J|vFje)I zwGrHT3P!_ltX@+lj2TSZkvJSPZ4^&1`YANC>t3Dt*7lyFvLAa&Wa&VC?49h`i zj#V(oJ#}dHRd75xIe=D^uY~&<0OcCgS55125tTwq6% zOX@=Fc;&ivSmHC#9Hm5R3kPUD@S=KssJ>bb$@H{=9&gFjvI!+y3}awWz$e2S!nTH$ zZy9cc4h61cSPldE9zr1%ze23)-pImwZ^I&cdJ49l`M>-fb6yTK;v;!Gqri>Y$dZ%yI{L7ZP1wdJp_2cI{q2#-6ok?WT@%K+emT? znp#w1tqv_DY8D5#(o()qc)RFw^ z75CaTx3~$7&oTd%$&HPBQ9OuH1|I`7Q6`}73}&dL;z}0AP83gG8V1N2m%dNP0Xkv{ z;E1JgSOwK#GDU$zF?aasul)D$B6OSxQt(2p*ik>BLKiOKO2KPBE$ zjE{ve*&@->c!5>J;#`4*P7`la>pRpM#qF%RDjiFz((JKXx$3!re#{kLCaNt2&S}~N zCOcDg^Q8$DkcP?5E<#d}ifnBnU&iH70aM9WDpfC!8xusWn9n~G(JzU}RspdqUZM({ zQt_-HbEb&rISuNxR`rhx(kQNq14N1Wj)|TyzDk?zhmYC2=E*TiGkW z%#{){H4`yo98ZQtjbTvHU2Z?>LqTjUQOW}PUoLOy?L;LcqbshYo<{9abW4Jt*nP2M=007Y`=(#6W}C3jvt`fC z-g)3q7T0`!*9Z3d1@~tE1-G+-d#jdWe%5tM`N}wuxYF3j)u#iP(fde31P+Q<0Ab8Y zY9T9BB%x!V)GDW6qJ&Km=bA&ZtNBdWf$TiwI7%gQ4`Pq9@4KnEn}G5_d#R-Iwfm@a z2LbAWG1R`%gm6Wprt&+-P)yVqipjf%xQBS|#a12)2U0Uq1MMLUO%=;m4QjOb?;cG} z^xBG)$oBf3voZnETCOLMY=x<1tL}e(cFo~c7&c%dITN~_7t@XqXE9t5qkDnyf;Jwu zD_@|}C1O-6&g9=@w+6O5Q>}WFm>zW#`$$V^qHuoS)%@E}+_dML)c!4M>J~r1Yb=Ei z54F=H72Hq4ZpHhki%vutD*YRkE)n>50zU%?%woBE3`ykQp6EF6GPOQOfLxatqUoDt z5%{R$IWs|w(oV1B;g%y$1QCEBax(k`3-fdE;ggG>5b6zpT)XJ-b0_=vkt+~5te_MG zens7kLa8WTBg8Hm#)3j3$Ll5h0cy3Vq#VFTs@_S}uTV$k2;2Yo$!ycl(B$5QCM_9- zE%@I&xq~_W*qMIzmwx@s3uzX{#03MVUS!VSb@s>+wZlfJq)Mr-0Wb?S9apYu^>hM= zzb5bqfgDZ1K?zQ^R>L}Rjf=A(Mz}QLKIWTGW;+@)%=ftxL3!u;g>$#)hM7NzEsnN1a$8oKoca$-;bAY+2y!%=18S>8W-OrjcXDL ztcl6l%CtH;#cCS+n!2r}>M#LPyr9WlItRw{d}tJIOJ&L?q{5N*Vkj_6#I(w87HQ{}T|XIvfEv{?0FKh!!l6Ikjf3!)Q~I7pnb z8{X&~Csw3=ft@*4u1?^x{TOvs8oJT{(MNLnCf!e*U$^yfH0Nxwhp-j&y6djHMj=8H zT5gTFk($+V{{^@pLZ@Q6bSyN&$NZ0-?;BBKp$Ug8A?*r``*!aQulm$&jYU|Sz=Z$f z^LOu@Ag;|+Zy`XZei${-{fxS(2rP)K>WXIwwT{4ULOBFx2t4jT`qa=pJAt9|1qP#& v(NGD8i@u!xH^N8{+D5ve?WJR_b4&Mn-J~lK`Y@?}zeqMT(`3mT;)(wS&1weT diff --git a/ultralytics/utils/__pycache__/tal.cpython-312.pyc b/ultralytics/utils/__pycache__/tal.cpython-312.pyc index 1b9e9bb69661fd4e6b96928976620b0d31371d8b..6c9ad96330adbf4df06ff62801e46d0d9fd69218 100644 GIT binary patch delta 6768 zcmZ`-3rt*RcE0z{`^C&K%mBmd@-Sw23@;m#U_-!;?Km-s6FW|bo$)aD0%L|5eD7cb z8FrkNs}9{rhWzb8dJGSKX{O($3Ae8hYjII#JrnRh4CJOWm#7 zP0zU(F!nkZapwQe|2~iJod5sb75by=bl%TxHZuiZCh*1ZkGtpdis+xr?`|#}GYlAL zieork#5iUeFwqo4@mij+ez%v?-(+wIMGaUO>IlUdu27tjXU-Rq&%G}43#Z}f0h`ib zf(A2hyQ$gip2yia%N2US&N(6>7lAIk`Nj?rDEMeOZ*uC=h|o4UrT$IpTm7vq{+5tBxk@)zqVE4G)c=@Tpm=J4=BNRNqX#sc zg)Eq#?bXAYX3h%RrUxvX5o%W6#@RU275e7pTmyN)F<+s!>-H2i3HyD8^5w}cQd#68 zM$31L*h?ef&vKp+$eieexj8lVot;k*1lZOtRGF~0fIh2dt7=f|Ubym=}XiCt`MX`YD0go#b{_!cn3gf3sPX@)&-bk3#71?0!a5Nkb1WL*O=Jhi7DbiyvHogy1 zTA&j2yH6DqaC9*YN>BkBev~hv1HPGKHu(&po61){vHz0RCaCjn^105n^0&^DG<}Vj zT`jxWm-!)H;G-cP#K7@vSY(42gW*VUFv7FJI7$OQ@`CF%a(Z{!%9`s7=EU8v;XCPL zWDOS{lMSPAJ}BJ%J#c*vStpJJ$9b6<6lF^^FczE)fHCl*tmpWP;SewD;xQpKBAbRL zB9TBOJjx4p^6#F?!*U@Anh(VTBjMo@UW^A~7kMEP9EWL!`FJ266yQgcwLzG2Q}h#2 z@xlbpPxG>NFcypWT!M?V7q+y^rr~%1Z4j0XnVl4neXCfP7F3_BJ_$ztI&?Ix}wU$Pd4iR&QV%hX0aVp{lt`A6#S5QW*mZY#&b>==?@As1(&1jP&)d_crCNz? zUEcF|T|er|I(l-BKFQIy){%7_yX|o29IWJE)5TdwL(bus9R8(?$*iN}$t{mIEOkhg zZCOWq&e0(`I+ibH9o;*IJ)w2OqA$qXp!Bx94F}cOl55y6HSAxvy62|SzNOb!&1*;Q zYnVp!F3DJzGR`%ojT=U#+YzbZ$j8Ra%LAD+=W=Hv(wRuMVT=qG|0C@|>tw#7$ZxdX zmFK+-?J5GqAUGlUqA6qkJJ;z8_>7?ERO(8vz8Vi`;YgT?ydQL+p$dB^fMF~s7yoI6(LqfM7 zIwp+gE5NgEP=i^7+J@~r^b23D(@m5`)J{? z@&mX7QbExP2!{eT_WSaMQmmky1Qv*_o92a>D3l?i90`tAja(avMTcd5PzXkc!8I<1 z`Ab4Cjxm6{yomlH)M3>Qz#Iq#BcX{%FdmLYWzBf((nEan*}hpHiYl0BP-r8SRrOV< z2N5B1r^s1V<~_ikt_IL8aHAh%WKf+>9KuoGU-@p;Ioh#a%H~R2q|%nuoPpm3D*h1T=mbOrj`@M+rWTsuJFg+VI+sy;ym#Hu9g^Q;2Q_Fi-p_NM?HpI!w~pfoR11GU`NHbU&E5370feaqZq;-Lg29Kt$KkU=Yw%scWDG7w$H4Z z-9w0M$KiW$q-ii14~;$-& z_T1=MZIc?hvqe2?HmRup-F+oJ>o#xB#!5Ce?UZcQ>9e_Aozkw(Rj0J8YxQLAz;Wro z@rSz7Ruv2AQ173D0fA!phhaF zNuQ7kcBf41#Z}iUu2szDrI^%_xfAP7??U-}d74T0rY~fjzMQjJayBp1%dJ`G-dR0# z>bvbJSqRUE)83^+Qf2FMpH$hg%B&82tOc=2oyRk5f9B+=Tb@^Qo;M`V8<{gLZ)VO7 zXTm}zelhF0l(O8hdvo>*$zGABv-YZ-y;iczBYJBfv1f4jdl_?Je?FUKB-o% zNFGhwXOlJJ#F&6MtQeyjP9VYzBc8Cx-iDKho+3oBAqqs+4Mk$XxX-8rXC-2*zM$A^ zo)WvqATJIFNR_F!TtHhEjX=;0@KY7F3pmpasHo&O5vTmuYdsSHL%DnV)w(ONu z|9Zvl>&a`$r3=}L){M3E%kt{$qt`~4>VD`?-tey(f13AEUgmTlcX~)VJ#?#lIQ8PX zwIpY)k*r`XnYzwgU7u9fx7M1idv@)@E$ee%7M3o&HUCz+d%0)rXtwZpuJ9G9@Re-g zsnpSRPx(S*K9X+BdTLWg?i8`t?bqx}Otz>YWm~tGE)J(BGxp|;v02f$Pe*>Xuii)t zvv5+MlRVhxPh!HOXhJqbV}W5I$O&D@szHJ=Scyc!Ggv|ZkgZ}IqJ9YAeI&*S=mP@E zSGH|lXhJ;X=K+W*1;7WcKp}u~xY#!i7-7YdL1e^-njY@o8o6%P}}HxB=Y%_Jj~Rwmt+F ztlxhX$m5B|R(ygK!`WRxY&s*E_ZE<}-5i1JLNnd)r0haamwG(4co;W)8bJ?>DXD~@ zAph%7-I+UVrVjNFcU_QtEQ8R4Rzy@9W)L3C+_Uz^oPCdE-?I$aLubz3BiVb_05|$F z#y&-op)J9|C~EuM`(dI-&%K#QQ8N&%&u__!6m{DUC~4Q`EN5Vb8O@A#MmM8RXcL-g zGt?o1ZbFbMA?o1pnR)8JK7!zU__yb9p?Xtc60dwt~p zd_D|VNd=`)P%40mSW*BKMFrZ%#{sw-k4M6B;caB)2~*ey^G4YaiV0C(5M??j(}O|+ zc{C?8!9n=P-*NP6VJrZQ%>pNzg5v=$7Dt{b=-(`JA|?;GnTOwwDA&Hjng|9vGaO(1 z2N-ghdf=s8m2mmYZ%7&LxJwt#&Yw-UE>+E)&AOXr%_%z7nhMT2*KK)o&e>BbX1lPV zr}ByxD`(#Xp3A++%F(lc!jFD4pyWgi5{sfL9dsWA<-_C7509Gx%^3(mbH00f1 z9Vj;ZCvf1sh6w`s_L@#<6nfH};NUES?YX4(R7X#|JWEADI} z&aw)U>dq(mW#weGJD=8&pLe(R3bzTco|mcT_on2|KpogN=ZuxEJ+-p$)F(7T4A8+3v+^$x@!K{M1sN(bi-v)$2C* z)i-C~Ts)Dc7Z0ZoW^Hw3wkH7Rz1x#bDq`Puz2UbC-aqpB^pzgi<^-o?gj6>cae1U z_Lg{n6#{VF{%{&K#E=bbSwTYc17<6*oB@PD2qod}3JbjjjByRAY%2F|Rc8!;<1x53!CPdMV-;D42jQhO7WJEP>LlAM42w@ItKdqwL;$<7b;{eK z|0Ew3V}kE#_NDE`w=f;Mp-DEfb%SCfJ5qnCk=^6-KROq@o51tuH%@jE_5~ewPSLt! z3iSwq%_BTMORsVV**qXjq3oc|=O>cElYG4MAMXFrw~C!x*gA#clY$a|P`6;<+m0z~ zLh$mb5yMf{MQyTaTXwQ}Tu6c(m7}Q+i`VQ;U&YctA^8aqnVB4tnW-U}o|Ne+s7-yt z+mEa|wt$ZP2@E`n-f|NPkez#|yuz!2*#P+P!BhY|`0c}&lYJ@Xw!Pr$)a=xvH*2r? z3;wUd?}eAm+1jp@>+-=ncIRB*>?H^_V>w%uWUGSkRGqO^Wo>>K3}e?VwmHpg=iK08 z`}_zZ29ti}J>yc_vgSso#I|3)aJl!kDer3WZ1G(0+=aQ0Me&xYIz1+t+AsIrwm7bK zJ~jIk46a%fuI-*XyQH~gY0PLF72NmH!?17o?)3PWlM22=YK$lYug)y_^WKVN2rBCP z5N39=CN?grQQr~>z*Dve_8SNYe~E4S<1ua`!YgU5pah~1a4>?O0xOENg!izDNTNcI zasQYys$0kc4@60c=YVWzXqvvSr_R#0&nWl5QI)@x=7Vh`aR=WFsxtXrJ=he_b`hnF+Hz`~A Ef0K6|Jpcdz delta 3996 zcmZ`+eN0=|6@Skk&(Ai-fDJJ=pTP-nOfa8;5<&_kB%}!;A1Dd=aE#x>b}%-%&jdnf zp=zd5sZxWkHpr?iv|TsaRB0k6m1@(H{WGQ7r1{0!jNVc$ty*bm)h#MfrB3@g=eZPV z+OFi|d(S=hzVpuco!`Ch*Vou@F04D&33RqnXU90SVqH$I*|q% z^S224>l!p+n0{Wc2;611-zXR^GyNtFvxgD*%Zy+YHRlRwuE{q09`DK$vIP?uSp=(K zhW~6K3yiH7Sx?UF<6;}D@B;(!aepK}I>FUe*Va_mEWKWHjMXGf^k21gTssL#EhgGl zS5HUlswws6(tAxd`un=PDT01H`ZqM9#hwlY>Uun^q7@>;k5KI~D0(Rz7!eiSg@`z& z=!4>ZkrX4S&^u95wBiViWBx!a z7>>h~W`LFG$@-f5zt{T0@nO+7c0S}2#Do}3BuF4Q6p4j=qfzKaFD4>E$v2vaL?vG$ z5Un1$NdK1SoFXOQTo(MK*Fj7(j|-SwS30Lm&M8~Sm2)blyI1l{=KQn%7kZ{UW(-$y zS1bjuo_YDqe8&R2P@A%BN?W|L#k<(Pcs^yReW44CG!ODN%z0)#^ZkoDxp>=6ja*!J zvt!wLr&E5aTW;u17VS&+9=e}*IGuN_PR=`)Jl>x?6-0hG#8}JF4^pw zZ%>&^(&mk_dE-KT%3PV$Rjx8i@rDwOevD-p6aB&V1Et+%W07dIY(OlN#3;PCC{&wF zWzfFO*Dd9HD|`H4tlexfS#}v()XLJ^y{X&sYm}>OC2#l2;SBxdLeNU62%GP zg-B5JSQM=^I;?1i1LKNuG$x%N6~zgd7RN^dF+t%1Bm`5$3X2u}gh=8N!GMwq6mHb= z666v&ex@~ zOQ!d%7_wiw^unbVpSyDYp24x=EKfVVvJ;Y-a#r8Wm7R6z!WOx(Ww~zo(%p_!;lY{g z71vb7wOvy$|NP?A9X@62PTLO3wu33#p&5Q9 z&pB5$TQv`m*f?W&XfK*OID2rRBh_KAoIbE(cFb2UG$+m8q|Qt54TJFeu3^DpTaAKv ziJ4-OxPLRFHhw73x_BQ6D6&Fd#co>_YK0ihO!Y?=+~nAv-9`m9x*%3a-5( z<0+zStzjCM2XQKt0BSRDw`+1Mzs&V|;k?Rj7GFw-pDk~6;gaF`Tf<%JEhszsL)PkTT)(N(lUbT8WeK3^%1W^Fg zZe?smbJere3%RMhiW%K2h836lTHDpO#qyM^ihf}&nR;L^RN>LRI4HXtZzkTqbn8;e z{!H55CEL61Tu9k_?)Ubkd(Xt}%J!RPszH8Ie|YcCKpKsxDUMx6JTY zsuuVs98>iSd#CyC@#L|S>0=@JSSaNU)6a5lZ&YFaKf&_}1Z$2-uuJ!HO{@b=;OH?X z;v+*Ub$B?H<*Cu;nIco84~kwCm^^Y2#UT)iSxsXk2APz|VN_$@ z6r-xf#}G`dgsg{PTx*#E=mzaX*--#Odo)6SZY#{cjK(j5m}WjLs9u~{ekxV4oBqW% z&X&@Ey-?GjTbi_=)jAVMZfct}PnraiV1762x(*feym14^UB@D!UsgKk?`n-%lZHt> z7ULv;uACk+*ytfgv2N0k8EBbvZoCT6+oc8?b68mmecrKu3JZa3L}3KsVO7q?V_S>~ zYVD{;x_ZL*bU_^{y77x+VMX^$cf1dvuF^b)QLP%X6?^ODRDnY`2t^l-$3l2e1Y#kP zSWu5?P!1ZtJxtUJB6`$u!DwK3#II;a;$!QS;<5byJgF(t!~(=e=L@$MV}2xj##$QG z|4c4U(LWb%;jW;?2TK;`S$6Y-BKN}a#qnkFuKRw`!F16vxd;#BRME+F(STe8P#01~ zB7M%)#Fj3Zi??yRRCuj1N)*XJO7t9s*mc6TYN#?1MOyP%Gm%kAax>27)3J);Ts2Y{ zJw+FZC7OwwzE$Ba+k*xeHIGq!Tc8h>B_tBZnUnNUMF)qNAg5`q+sD2|1MY{p?RVG@ zb}v`nJRooFxYPNEtZsUEQ;kb477XZGcAJjPqQZJ=anukCvdc_vVfLs@s{d`AX<|=++x37^Bh0P0fPBKEuKP4;Vqx&rvk9o z`YpnvsqV>O5)X|`pmFg!etSH*nQ)7!Mj$GB2=DfW4E?GcFQC>i7{&WYECa<5T7Q?3 z%vt6IU1x^ZIU&H6n)g{tH|VRA8q!z zxeq}r)zAT7*jSr#Y+D?b9j)|%&(*$gI=!h`-qgIDD{tDdd^)|eSKis1Jakyzc_i6) zBHbs-ePYr*_+YE=J@4Dz6>oFeyG!=&dZO19wzD&q#~j^W69NMH$5TEi?)sMMQ#;TB zD+t99i~B<)AdnNNLP8>^P@G1w4}@az`=Rb6DJa6Z=Lax%fL20&c{)=$&1i}Op)9pi zrKPm0O5e|}Y8fv7u?D+eonlS;ny(x-{pQuJ8}-#+?`5~?cc^DRX#}4@^O!1HbDF&Ts=Ak8D z{qAU=mr|ArdUCHH9R0=K)RY=8Bqg;ZLYM3}_Ix~Xd6!4`Uv3b@xks;h0GAbbgkm5f zUarT<8@T*U5E4?xI$;8+!LAOSBJaSA9sNPTfIqqUbIw`kJhkj=SC3`@&1IPJu=pHk z4|jPqN7a;ia)_1wrl)l30;n@AZ}$7)wl78EF~6U@ifa50%iOZK194$ADynEx6QhPg zeg4}_22N_M_|{?7u#Mb;wTM*-x&N`2W!Y7ZVJ#mq`JXZ6A2G!rTb%QaY1hsw*;Ta| zPFb3!O{-iEyXC9RPPXbv%Y=sAF~9Me=c?xkgZje^i%Sp;*8zL@6X;RARQv|mAYTK^INN^3bR}emfc3MB+_jb zl%WC7dzM#j)Y*~cmE>>Lm?_R2E*|DTH^%#~!xX+!B2+1{Kr;z;`V= zK?mp{d^byn=rCwx~LHd1?`QQhCoc?>Z*!ML5eMZ!T=C?z~5PIl0tE?9m z#tJCc{?#U*hH+Ul6XuV%lSkCinmHe)MR=GArn z*X*n0O6M$l?M1jR{PDZFuae?DlK(`#djC`S zU2{~S(Z-@kFZg~N8uvc|ty2)GuBRSO~hB{1o;IE(bf2^B2$$e_+!~C_r^Z61C z0s|{?D%RKepZhuk4Ei*s?hssaow&IiBJorLM`sv1R8!gZpJuAVg}_XFt?Fn@hZ}nagfM_W7TfM!E!nh;9vMvA_VO$Yeg&=&oujC$dw|&nw_Ebw ziQ%^1m0_&19WYL$n+U-$n!N^&#$C!4u+!L*?(daul~FCUEtDp~?s|YZj8vbx)doPH zoyQk3B!u2*MNcG-&ji4zk`_dYd3<_D|B<~oP6k#+ZVVtTK^wBG_~c6(-|L~;IstELM2U53QuABthOENeYrQl@Q#ixA*2zT5S|QHR5q&|A zP52;AWB}aEss_Q3x%ckG`>8HQ&Q|z4YH>D!5BfO4&}nR)1u^UNC3e1u8U&9>f&Gpj zlLkzHbOE*LDqte=TkH}Jl-PS7Thi{BgoGJZa0GKNCTKF4-}b#oq;Aw9B{)_RM_W|% zIKF-y-^`S~s8V+u0P_>5d;-Ojd}Ys`J^P{gQ@A7$l<{#O%z`$oS(;9UiB9zY8Y%wk zJ^Swb?fb?*Wp?4$$q}B;5A$D;gM7Sa7yl0#;P2@}ykHxAO5M}N{FB7mCZ*h$Q7_GL zm&owEk05l?$v$z%B2}p`wH6Yp(O{#PIvT(bfOE$Z*7Ay5@msL_FyUFu!=+5i^;#8x zVnR!$bjvMYka>i-LTyGuWo40-^$7Kwkr!@f+oxcbci~FdHfNC>87D<`^$=o=+uA6< zqTS291%6HTZt9l@Ub1fhAndYlCmGPEMNaY)Jl2o%ck~kvY@zdi=|%o>?X*B612fo50(OIQC6^PzE7V)n?#Dj+rDA z%rHqzq2EhAGS@6ZG174Pj4}NLlQFT4VD`b#58w*Xrb;gwVU$>qJqOi(xLaiPAe!UU zBND!oVkW%DGAJ->u`G%l2uHm(3+yACeLS_dcmd0w#K-AY;MSN&*-2Dk+GEJt4C#&` z8!!w_kqaY86&2u+dO#Wai|ylI-#ghI#->b}X=vr)3el`4(R+FoY%$!_#nuHi)e%l&Vl^YQ9c2s>|9EC<2p#7=Iyv1^E=q%g?73ob4RW zhO0}`%1sp#3Xus#l?g>#J^23;3QOkQo=vuh1(HV!W|hfJ7#s&bbPeKEuKx{a;QDWpjtW!^JCMmyozpv7NAILMMy$u$s3c~2 zH)BQKGr*PUL8w|O*zPvzU=?d_^sj&-8^e$d1@v>!-y8t_e4L}Hg%s2<)&e=L#Ch!D zJ9q^5~E1yV0Ww*pi*u( z8xd=xs)sG$n8T<~R|3`oUJ}F;q|BPABUGqf87^8;2(j=y4lEQ<()uRu27x`-EtYfec*A}5zLLm{gjIF2mZ=8`~ zwhHYOPK*1333UL+>b>>7=SdcwpJb*p{KGE{9+y@)i%Myp)6jw{?aam~k?~tgi#`KH zBvd|yPcHJ4(@yRZG=+}m23F_6^u^(=w@5=6Ayo1djtu;F(?xZf@Q_$-B#=uB$*YK_?9Rmt1I1G)#$Q?Ld+Z=Cp0G_cis3fo*?ZT-8@e5s0 zjwtZ5)yYO=UWdbKwgx8055?*ToI|8G2Skw?Q8u0ces%AAum_34^S(UAwwX^G6<}36 zD9^*8@-rj|2i<@{eGM@d+SMr*EZRxZx7z_!Qg8rnQ5!wQ8f7?Y2Q9u%hPpVVi&}fX zJi!*P`K>Tu&Pn^q=EpWTW5dmd?EU5NkX=7;7X47^htIfJ#N4Yl{m=DK4OvHYS;wK=4x?C0NN>z|$K~|u* zvW?6{2F-kgL0mtBA&fNuH#T9dsEeJB=Q#hVH&kaWi}Ys0?^rnu+qy8Jch zBu6Mrg+dG3VM5MX*Ms&>b3Hw@jv!_txx~} delta 2983 zcmZ`*OKcn06`eOf4u=#aQIts2*3YIt6HBxu$9C$t{zQsDjjg1y93-rO!H922QKE*- zo1rDSBdD#E)_@VX#%sFhN#P8TiEEbW6nj`SL%d$q8M$?}QoMySbCJOxW!9pPoYyHSGDU+m0k(6at1RMY0;I6g; zw>NqJp_{|kM^+lm=E%G|;=4_^8o2a-`CBes;J-d}3V7?~`BIr*FCF8Z(&Wa|=P#&Z zI_&yL78(uKfGC~1cN$gK>Jp0ISr*E&vnrAu&;PLFx_4pNU2Qua6}rRfFvK#jSZH@$ z*78{j+H4SE2(5+>`~sD>&{h^)<_fLWY&n5t2xIxE<1g9mmM@If9hWtowl7ki4I6&W zX`)t#p1;Hg51;x#bOrD=H?YAY4Z0f98pA&PPCmZ5z0jyHxPD;2*l`#J{TzUlK?V!N zlm|#o&cfRyY#i*on-sIN&~T*+p;ASrq@vJ%{yG^S9H6~43mP_QAI*V=Q{RzPsgk}W zS$WaNALsk{&&c;xlq!Em&h0M9Pl(1e-1L?zrdS`eEM2H@wi-JP9d-ucHH0rA;Dkp+ zByGFebbQ~o_in^qg|WBz_q3V95pZ-!wk1k-NGRDP)SxZz5dKWNmkP|#2+dGhlsEYU z{af;;!v9>z@uP|T*(iiCjNWx?K)qLWc_Z;Y|7PET-9IIs$kUP2XDyi};Zq{91p0)S zVf&$H^}wkc;8CQGy@G)5j6I#Kb(&4P*;sPfS>Br(9gBDrDfCY4Bp)LRgn{>CqX0sk zZ?&3MA3MiaQzuWeEZP$2T<~YS^)WQ1oo1sB8gK*Fs4@$ek0Bff;Qvmo9lQ+<{|JE8 zpC)PWp-xINCvI;Qlu$Mj|1tG zE^C7wgwfbPZ{oZ(JBNlV2v-rVBjAzQHH5DKR8rC58y=kBXK$eyPg_YwX8a7p3w=SR z>h+oQFB$NCbnSZN{k+n;PKvM5vvYLSwrJ zT-}TPsFmFzIcYr|ro%){23dCTA9vr_Ko7D52nK*fqIkrV=z27AdXz4s>^XA<1R&Ja zwUvd6I(5Bu8+;UnFftLjo0YJ&r-QxNR+}xaj!D4r>Mp}v6)`E^PJEsyBG)q&280d* zUvAq%X}4DP0>J9}|5PK(LiQYH@y9*G*D404V$8%b+n84WD_Xt^-S5KVV|tW87X}$3 zLvj(+k&zE72OxKj$OUru5JYW`pYQG7J(q2(7b9J~2!JLg7(bYq1;)M%yF>)VJYrF? zx??_~Kpw&IKK^uIus7ldITLEb3tnSGJfAO3Ucw2;m6eR{2x^Z+?Xx)?c?Y19j^!BJ zD||UWp}h6i<^%rR}%ASY8HXSDwow;q-2^i)B%Z#P87ppH5he*yl zL3P1yfckIXjyDlz_}1{D(J5&D5}sH$#Uja&9;HVyl_F7z`dnA|uZE9~S{gfnYfd6e zAWR})rnltT83?;S94;Tjj2&mlgx9j`%%SXUG-1#)6evUQvMD}uWYBsYn^?M78Q?rT zeh+{_)J(?IiCH$%M!#8N<=w}xOp{bZc$9(%dCyqx3<@`j?meO*m|l8BBweqw>@o*R zZ$urIX!ahjj9F<6RH0%HXWjh4*p-33Riq(wA5t%MSwH`DtStXa;`_&k5}UAOmg)S` z_@IeC2_>vXe0_ZI>@^fe9(wyF5DKWYAc2O+whUz{Sp)6vKwgUaFz-d*lKJn(C(cIM zNnu@hw&;M}(6UTn(1stt|16hNHG^QrQw{RXHkVoZ{IAlQL7DDMX8A?zOK z<@E{NJJux1vHtbkPph9|5>ZKyL06wESkzY(bf13LsL?eAN`j?*r$5QpW$tfc z2g@8RgJh@64$FBFc^8DfMWUdTDXBgV>pZ@k;|sL0j*NMH#Z7qIJbv+w_(}UbzEUyB znM&25C}$njpVuZ8`aE*>QK7F|&H;Fvyj0FTO8RPK2h^6xPJp#?9>Aq?otzIXb#ejJ z>g7U!4YCX1GPzzZf|lj*cEh_-E(Yuhxj`<0+R90}6sk?~GPw+@t7Hjk&GK@20n}E@ z3!%0~UIeg3Zj{Rbw^m*uSHOFnC~Oks#YY9Xa()4~%PZw7B|}-SSO!HGEI{@s>*Z>h z3>xLzjvD$}h$j1xTCwHtCPJe7L*(7YwMTQ){Pq) zYU|)-tgEe0zw1-=Wz(OEgEPF+!vo$cVw>=UDny==^2@3%X! zJJh{3hNPvZYsZE=y!GByGzW$yU4GT;4-G0*2@NRTU|3eXO32qAP-NC++Pk9x`lxFG z91;E{!}&zsjH@Z(YWl#{JnP6i*6|C+<`+61+c@K_N;s=7J2t=7dfCwxw{^YJ_-f0` zEjJ*pB8Sfj9zqwe-&%b~gyergtgBf=GpGY(9yYSh-rCx<4rsEFNz@1(`13767Nh1HYPytcN;53>j5Izb zoF{CJypZk8$YQP3$;L9uvPXppQ_QG}F%!*`EzxZDWnZx*Ti3v#QzRtC0Me3}AZHw? z0_H^7HX-m%CG0QU4wi4rA$&--HYA$TG+^9UoX0-4yKQvUEyy{yJ0n^$&(vR3IGXAkTJQJr8bE6SM5ZnNA>5XQ2*?K$p>^l&*RCcx@*Z<*s`dufK_795WK+`i}* ziol|Yz8@24PE6w@hm^1fGFOob_DW{&=Ieaw(+tY_<1qWUK@cWv(aJOAWLP1x5X z2z!C?3HwCmElhkr>R?B+ZX?y~#Vi-8;hym0tYXJ5kbi3w_ntp=4Q$}P#l`HU5-0n} zUTmb!m|iYtN3!!A6;JBM?Lb^FFP_K_IRIaY_?+u_(?oVmM=QAuzs$C>g`;kEvc{|@ zLdv4HJJS`YD5|E4~}7R z5+_R$4pZn7#i?r3nEw61k>m@>5>=ujG=xO{ObW`9_{X|7wR0!pADNHewja43ao(1$8@Gi_vz{U}g4S#{%ngFa{` zJFjpl-P*y`6tpY#?DPdjl#P^zX|n_i#Thjd^v{pPO}I%-Q58~Zt?dMgd~&{<6a>Za zN5DQ(VwpFc`B`ytZDA@XixPsJ!u^^b2-=W#78?)yf>uE!d2T><`Jx;g&j&=7o^jpCbkO|38u8O?-;Za8$X@QMVFRcX8QO zZs=`W@7>wa-O;*bqqlA2&W;TmyDKFnH0r0}5IR9=)CaB;y$5XQjqXk}8eBQr6sWGI zFzm&gZN-h?@@Wl#qz>#8qAgu8qK+E?G}lc#a-Su{!(Mhg=NmU{-PSEtjf);DZGrCN z2DGOtPjS-f2MMcDAA~hpq+`@1MDa>MF#t&FKy{ObK=_bCX&oZOu$nZbSf?1D&~@xx zr@JI+P$L0E3-W}iKjeGBjn|*9k{%T{~Ssu5Rv#;cbCSz~M;(NW9yY9KX z+4p{}?;~68$<4<%PmvRy*UW;`b*A}r^Ngc9;i$gssCkjR@2LOGKysGP8HN19Ig4O- zUb6{~lG)tKbH=lW=LB6{-8G|7zv3n91?x=RrbOMQ%XJ;`rFYKeF8#jYPmxU*-31pd zmvUzvMW;M-c4JP-Rh`+9IVYIynV&h?qXiv%XLDUs6;lOqsr6F*rA3$a#k+UM_wGw{ z?~C8#OLY6JvpZao3XglD+YLcf;yn<#Rfc(@(DI zbVyxi&zv(0IZn2$aI)a*t|vyGy6^G(X3FXlW%V;$vRe2R9vUNJ4K?7w6YgU z8d;>op_2_R_FpCUv0s%q^f9u31v_2(30VbUOeKd-_Cnbk8MwvN1HeRD&unJuTc%;vey>^r?Ljm*irmSeG4fA18m zg~v;$43{mXpI9=E=N@ZAMVYacCTyjbEoE~q!H_v832f2AuQhj)9pt?|Z|{k3z57J( z$-9o#AzFZr4b1F1v-l05CXolhp{w<0B==uGG7&RO?@8FyX6 zT{oNOKI1*@O}BzZIIN%%`L}9B`^l}xx6at4giW$uwk())3x=$@h3u8;lWTU6-DGC_ z-o*C3@$GU-C-R>vdAwxCRg-YloZFsoExl-b)&8=5rfEx}Y0J;-TjTp9Gy6vp`$s@0 zHnDeaYat7#Z8hg~C0>5cY{j4j4zBBmKR>+Kn@iU^w1Wsdwr@*Hx9?Q@0i)XGqHW=uk3{x#ZRr|q3UQ6$0R{w;L$Gu6anU{Z+!Gm#4u?bY7}!W2$hWYH z<|-nwwq_4Nxw!%0qs@%~Uv91|$7l)0@=+97_mHH08x0To0}5ACl|~p@{cy)S(3aE( zd~m{}D@f`ha98AlL1`roG&~ZL>36Yg8P#rBVF)`Q=u~8nE@_IwMJfal$UE$V)eQ@0 zp&z6c1r;TJM3^fU%voQJ-5;AVx!*UrKQ^!ZC97J~ZPGcc$4uAE?7=nFuw~y}}KbU1nm2^Wy)<17JuhZ(X zk>~47;~9{(Gh+f_K->dat#ktJ7JhU@*dvUQbgRe0-dS5Z`EMF6&=)*lxtDK$PD~p8 zAvhLPiYCc-koQym;UCu1YCw7HNj>OAB$)v@FEsf;KnW!EA?0wCqVVZP1b+h{Y1x>* z-D;tvf#3e<{n(*h0M|j^!=fI5N6(FfP9uWbx#$ma2ZRcxSq!r~O##M-U6FIlbl?^SQjg&bpX; zQGZGKz_I16?Fq=&?dVN7dgHtIBpiF*x9yoTA=QrmggDWbcSRru$A`d|4&HxIF-Sh% z)Y>S#CK}h9h1bmMD_a|MuP-HldcB@txj~2JM#~1GdqW@q*@ZRv)4#p_0Wt3YjQ&&t zz{Q{1EB3&Ke$8JH`VzKX0&ugd0K|A5vu`Y6VHxAov9W z+`Ob|*r%dfhKqyma8d_1XD!;w!5$AFNbMf9`XlxVJ-b>t)09#`CCQ@Jt9XQO!A~J4PUB zZ(<*Hte!N-3^Ai@L2DWZYnRP2J=D$V`jB3>KA(}cx^BT^8$a1S;Dc)>&*ebqgayeT zgyl+6C8WYt4KB0lurHzjcBraftyxwxR&&^MJ>@mzABBr7#Vfj`U`A+&`elh{BZk!L zd5XZ8=0o9_^oP>vs4(>p`61h&&7fWUQf552j_XG!T%`P%q43dmaFcqZ%lvmf?A8vK zq#kku!%6*s;tzn$y$0*1ud~d}CFBiOvv~mtvd+!x;F9>Y&4ot3)4yTQZGEM7T}r5k z;2faKmZT$^4|{`^_EeH%ti7kYya0$Kb>Kg_O?eDSQTD;FpO!!aA4^NwV?E`D8mNC| z`fqzS5F;1iL1x_9NZw>?b`~4zq3sd2Z)ZJ7>EzCDS7JSBgdElg80MQW^^L&+H+BT4 z*uGs0V8rCEXUj}bcfjxJF#uoJ!C?p_vc_7j*^e0GZ4R`a&bVfLDch!i$^G z&Aj{e8rLI{P3*OO#fA9O#0jr-T6UV#WEsZ)Z!q-i9=bH%g^C*lX@2(knD6aM+gV+5O30)v{r ze8d$sjna`r!qlO~=j`m5WirKMek;)0+RhI3JUVvMwf$5?T~BvA5fcH^^WC z@NLNOq%Iwi5n;?2h3HFrVlY69$kHSrt!}@mm1S=#VDGujYtntaXn6dAQ%4*XsNE0W z!_v5C3H0OV(6b~&Wa9)dwBLard0H)L9w>~{&Mu^h$uZdqpNxPme9@HZ#2@DOF<35Q-7&`lU8^e~ATCkJ8wm#w4#;^4zX zg)jaP;ELKv_?{%ORTahTK!uZizTaWV#L#xaq{+Wk&f@aVjuC2v?{ozABKt`l$R!s_ zj+jYyeoHUsJ!wiy26qSlQlL$LNbuxOfAhc+F&~+L3yCf0i+BU!0iGtO7uY`q>ztkR zU1&^N`MtsmpQ2Q{gDnn~<#Ws7=9h&X3`!(CFx<`pq4LS~fEc$*Dg-?6^-7g^>Xq^& z>|bJEacir`kvdqw*kmo~fj)_+Ln+=@lX`BSI5|a);0e^Ee%J>-fV+q15Xmjq%)8@k zv;&K*I-Iq{Ock_(J=p7Y1e_9{JUou3rO=ubgZ(OwW`S^7&In-elB^yE06DR$>1V=4 zME8N-#omZ)EBxZBMJVVZDyqO6&DrgO@$WJW0H?bSo-iAE0w=k#7Ukf`g0rt<3St7Ex@BKzGKZ9Szf&W9lT9G{w!|7g$Ets4SMzl?laoqKtL1wgM z6&F8l6lX{Co+j=(>sQbSa7h0@Hu9UyFQEiCS6>LO(!+>F5onq1Lp};A5I&CL`BOph z%!N}(35MyI7cSQd1ufE&Y(}^#a^();RKW-}b8+Kw%qsye%T zs_b0pMSZ-gCr?TK`WAR+m=?une*6By? zlXc_(`;Yq%Fy#T2yvRO$z(Z=8_~2J2X%bmNE63Z!U_0*c0p!ivkKq(ICi)N}%n>k0 z{5wU`4yi@e51&4~YBU@H72;}DN0-1+x$0;qfg8cGhpK_C|MSqt#4~;BD_LYQazej>0IAa{1oKJ&obrhS?8jfd z)3^yKo@05(h9_rpx4tF67koSTmKW0P`$)rV!-|)dURWBhZ-x2>vV3-V(@R?~Y<+PZ zulA5RgD$6+T+Zc*XeuM%XWx@py7}0fqzsV= znvf}UOGkwd^&?mHR>PvJg>HjWJMgdvoWM2E&Uc`_$D?cS`Sb`3g1?kJg{?aJM`&%Q z|AK75p^IG+Ks$fwwEe?QpQ4FCA8{$X+dbmA33sqI66F(k#80h&@APccLirA;(A$yE zA@+mgt&sQs;&>f7$f_T%$T0&tnZZ9pj>6}raENt3T*)RMc2DAgO40Ffqe^CU1S4S@ z<)0lXo-s*l`hPg}%lQ2@navUSAr1ahROhX)`x8w4pJ0}0_YOR;X*q&g1hBkGK{H$Y zYCth(OfjpdWu>0Q(kz1SBKREwymQhlT%-d54}0)xKrv#c7>aB6YYayzhLc*vvI&Qw z7uLK6-%T!gh@tL$q&VkkXaf5p61wx0_YHvUL?{?CQne-nBy3%#ERt#f)E$^N-M z>oY66@Q7p5UNqG>+DGsRL%*AxTw?KmwK-9`qiDJi8y{gN~(b) d0l?>T>vP!iC)`;Zox+curj4b#AD0k-{|x}eELs2n delta 8735 zcma($3s{ubmH(Z8UJQ?6nBn~z-V7)&0bigZiYN*w4|PDtfq#I(d3k5>!486n*=S;` z=GqNeV}sqM;%X``=|-D2AypHz*=%M)Qpm(?O`Eo{+3pgtU(z&f_ndz~lk9%q?*Gky z{(H|o=RVH4=iGD8eDVXy&!3V+yb&I*;@~-0^S!QkVa71AOl;xC2%sf4wNWj8BkdYv)Lfg^t~2W9 z%F%XRr-X3!7-Ou6k=x?z@y2+2f-%9Kc=F%mq4s2Bve=@qo%}@%ukKyOE=#p6ltj*# z2SYAMaPAyu%&^D>l_j%FB7`26UI-#NW0nwhm@{Sz;Q(`ld?Dg6G3E+ts2PMvfO&!j zV7{@=hrO0al-n6WVcxfo0wmQ1m2eLIJbv#|JBRUVv@JUX>dB~isIU3K@?I~VsfDkO{YKHGkp@DV8@5!}nu<+JsRoFdMcKY`Ar##C0EW2R>UGbBoRUpNX8I#D zC)Mk2N+#8HK4snWeJ>1L9{37k7DqbI83@%gZAey122}Ilb*ljkad+hs*+w#>Ll2QO<_nFFIVHyx2n1IWd``o5XA?Vd4N5swc#oV7 z?MjM)qV$YnKJl^G+#NU$ehX3s=uv}yDFg2v0i;-Vgm zgv-|6A1jqrsP%KqFbx%b+sjel>CHe()I`jW<{+>Xl_!y1Q!fSS{#o6U_F#_Ec`6~M z9|rE1ft12LSPc-!uO_HGpgxTal<%y36v=)4X zOd;z6A1u?=9{CfQN$m3lI+RxwP4JY=(@~vQDrB=~G|}3eGkkwI3@;Uw1Cb6b;Byh5 z@c^$Fi1hNb$WuPgEf_>y<%psTi&BxL>`+(&8>?2bAMXk$;F1<;nK3GCYhHhIlf?$s z;?_4FXm--h9{n22UTdeNK;Pim z*R2PQgU?VUK3z9lIU4?a&s6y)fBB}#@&;d7qpxx6JKJ_lZL|8fS$$i3eZ9MVN*gWL@{$1>Lv8qK?-cts5rC>OZPn{j%|@ajLS} zU)ela+3GX4yr53}r&B9_ z((*r%YbUxBCbr+~nAYbEAD9W3r{v9Ya&7($ClAm6G?KlRyne^Zr{QNU*qPR zZr>J@Z>W3Am0;&ZN2z4KDboRi$u=ZO5QjgVc zq3PI&zJ|fJj z*_lkOLXe5>gQaF3WZzm?!rscf&K@p^Vdt}=*oA@;_WP_Kgl+|X3LX{qIs0yQZnO>S zb_5OtP6RIYezu-$WnX63Y4!jjAnUhK*xph%EP!pvaSxsV?0$INSHXu4aUVrSJsEN` zWH@swAWt~ern>d}GeQv?o1=ATh=PJ)u&LdMZ z?x_`;tj{92?9#J4$JS0}Eql4-YRMJh<=t0zPpxS5uV|cHzIjH?$s%TSTtw9IHddSa z*s@JzGkI(M>+5|RcN}Xu-gdNYSocigQ;AdY27kO^G;K0|!C3PPZI|1o%GUYI*1a2E z>+9^E>a_VgZ7j@i7?gC<5Kl^&U;oqbp1d|m_!`fBU*B8GHupxem-BQW#|Ls%FFXh? zt$4Fe0^!x6FfB73Qm~g6-XN9Wfpu)SK+CcRwCtsV*1@lfH4x$K^SJsvCYMLtmFM_Z4PoF?sPU*Q3OW-7!>GhI$ z`Nfwi&sR<_D7jRBzJ7Yq(y2wY{zbLZ3m087oj1+58W!HuD5DBLi{y${oIiNQGFiCh zwJon~xha1u{Ppmu+ID|!`{ddkGja_lQ_sY(>7wHmo5@x()!625Z1XjC&TWdAXOf>v zo{G=+$LEh$O~x0D?Zr@OYH^)^aoxM&8+}&il-1)0R~%+o$9`8_Nz%r1OGbHZ<0hwI zvFWW2JOj*lC>5~Zm#v7X2d{5FV4?kz6=k%Ptteke9%i=k9FosYmA8e8h8?tS_CSt?9;`Gq>zO#Nr0|$m+0XT2L@WenwF$j z8lf1FS?%5K{s8YVxe#@-1K|wpO%zG{S+F{!|V2m z3i>cKiv4McS3yENnwTHU0SXjhRVihFVvSDja#2Bcp*12Ci$&w#(c;-2I} zzH+I+KpR@&flwJ~#K+4Tx|1OW3ndCxv@SOEeq8PeWhr?n?2X z_oy)ED?XfyOj zNQ_1=b{vpc?er?t+_*b}yEkZ1myra)FR}Gw0I=_*Au`Ra$i(5blac91WUQo4ms3`8 zrDdY_ow7}*w5OVeqn}B7Dk*r5PR8d?MlTrcfC$Lf)WY0#IfEY1`vH z)26YGvF3@gcOuu{O!G%>yy^BwZt=CWz8%@>6D&t$x8gHK)?O@qC%)*UY^?8!Wz2Cz zHm%m3=o{XAa=%}lI+E>I=Z=<*Y0fY4s~3%R`PCIZWyR;WLUngJBGbI5j)dq;ruo7h z1#^F>dSP`@79T;Wvzd4ON z>&qm6=SH=b1Y zmL%gJ#YWEo7o;SxBfOCZLRXtTojvt%P(hF|$Rg@jlX14WK411{Xyw?U`hIZ4U)FCR z99!35k@14`qKsW`h=x1mYYoe{D8QcNf>N|6h%p6)R|@r@eL=ljDui5A&0A~}X9%qq zO#vpgQj|DcT&y;$!(yg2240LI*l!viBNJ@TW}W;R&hk2Yd~-6n!Om|^Bhl=Q%_~!L z0p0!9XHCC)_Vlfl1`?3%#k-L~7Bq~Hm|=@PSpn`WpmLirH109OiXpYe1xQbT?4xb0 zYl~hR35alb-6x^DKf+x{5ynn$IR>lEZMtI+C5=i7$eS$=x042SCu&g&T$ljDgXKC~ z)@+bLu8oUlJ{93a>ND<3zO_%ARNzWQuo8j2_erp{`!94(s;@XO!{EfpX zd|NJr1LfP6SK~4bQgP!1qj>aWAlUp;GYold25%!2l-@Fpq+>3zi z^C9lUo~KKnS@P79k*bT*ON#S~vvp%x5Q9!8F7d}Mxl-kishmikps(ckqc_~#@^&;N z!GLdGw9B9)x*Wk$Ms_8|RY0*4UUvu(eKExSI<&zj#k^VBt}A>jUi1QLyV=oawwc@( zi(o=ww=k*Dp0gS|sYO=wEpy=tczB1^Ba*r)7$vuwJu3L5X+Xg)JsLB<$D-nO;?9xG zt=F>O1scE3j`t+PTA%C5B^~VNJvoucQqx>-2oX;XdYgq?<3byu$Dq^N$BL}c(HM_V z5xE&#nh^-hXe}Z;+2ht-u($rhdW9^)0qYGa+H#*re!UK6?t|bFd;|>j-VRxPE9u=4gn-c z9H|j$^SA)%0`Kv2M_S}+uo(=Y1PFL++<=7rWEbp51v7h-*bmcG`me+{ydd-PdkN&q z!l57W>5ag=+iJO(7F3+!=Yuc89guj?>xhpRIMJ~R%K2D{>OE)&;v=@Q%N?<7=OJaq zJgo_AAovI&Fe*pGt=xn7>RAGGA=u}$L=4rWUcNV8oQrCH4pa#=Dx|?aF`FcWuHklo z1P5e-1cEpbcPvE831L7Hjuc24&xRsW!R-dih3T=F-es3uideZ2;gy@zKqo%OLZzHn z+6$htIcei@*>OcDZ-qd02dujtbTlA?*NO@$b(Tr zaAXh6EBN$gVvaBw0=@mj4S#reIj`iRdj9AkoF^Mhq_`}R6=rDJ@jY5?B!+$i%5Q3A za4uQvB~&hGy#y1tXLyj74ocBpr4VykD#V^q%*zXP2EiikF7bh25&ycueIXf;Y=>O* zv6J&*BmNN+oc}(~5NF2!6;v2=HDO*(?}RwbZXHjhu@W{^C5@}6%b`a&;KJ-?m&xYr z6t9wWoNe?JX{is|0xI~=!0IrWX}6m;u@j!u9MQx?J4Jy*$mnu*_S8Tp;Vx_EZkxpv zbiC|_Cv$KWQ1q+xZip`gz1gi7bDvbqf}TZY$)?qYn1EEU+H8T)^>&w&dV(29d9bg! z1Igwz3&A`A`ZzIPDaJg=2}X|-rKlz`wHc81m~CC6k5L4#WpS*zFJmx-cA?R=Asxli zbu5Xzct!+6 zav>4NRYz4PioaiWuHtOPm~1kmeEj1+IgyRZ-<1}!Pxm%fe15Nx3yZ}-GUnfv(HKa^ zd^wxV#WoN(UN0V!)+o4ZDY!05Bbleq1*hx7i2WIW|D((I2%9|gcexWt{>uN8CqUQ_9>Qt7 z`cT^7gW)lfrCkUfYzCZUFr_5k!+(iYoF*NEQa}N@2eYTRoc1B=H`wS$@G&;ZAcfU! z5i^^jn1YG!eP${^vI6-i9-&#mWU1P2u{-Gj6ScT46!vs56()yl42Bw~+2arE%MYS> zc!-hcE*wOh?t54g_uHSK6p(nj;O(GaBKj(V81~`A@kC&MdN@NR&JDo@mV3CU^2Czi zH75#3az{%?QpZvzq`soH6AoX?cHdf~udUa&+vzj9rnL#fr6&qT;zlz^)UcN{#rI%i zmE4n$f9J5k%lkwhz`l5Nw=7t)9C4FN?7brfHuiWDOFi0~hw>F4@f5>*VOI^R-G-}C zbzs;h8am|JE*(uR70<>q(DpUGzx@^~K1;scU9{_SIGl8pn>rx9TH^t%Y|D;g#wj%;VwzO_OA2WWFY z$F$$>8T2Kz_*#uqt)2eXPG5@vnQ{wRHoa`c%Y|19eM>h$y^d5&SFCz@)74E^R*Kbj zNCxroJIFnb58sJ@H%97?go;OWy@A}pf1Th$+kwM(A`lQ*5Qr!HKVT^xh(wOTU}QUt zNCY3j>()T)VeV6jM5dfos$?0naq%);Fa|&xh%oYzA6ztqNd~^A`OXlO!CIfiR-XP9 z8|W>7V7lT^2%ug3@bvG2OeDG^FnT;T6MHjA?%?@cZ>SQ3GrArsbS;7^q_(or_LVktcq7oyj4&e6Y1n`vRX|x0Ja|^naiD`A=f;o@tbveOIY+|Y=R)4+)NJBp=HlOSZSQhz zA8;izQl3P8R^ zk&@WE5z!?2Zcz-0nJtMX`nyFEQhrw@A?wLqm5yj<3qpxzHYuEh-_@%~@@zr~S$Q{( z`JTwAZzQv-D6;6YJUvBnkfFdh5-C;^vRxad)wLqLqiw=<&5q zn?ra7@O5Q80(?F2m7qs~Zz$uHz}w4s74Qxx=ydYx4>DfEYtI^ISUkp;@w&5Gd?R1Z z>+#;iZ|9A?=`4$P@n+rv%4XimSDn?0@ov7Fw}G&QALVQKT6F2*>-c(5die(4j&~pL z;GKB)^J6^5H=@T@ew=UOUBI^S&Ac0J*YP`eFYg0oJMZUPLD|7i@ay(gH)6~YK8!KL7;_V6$F$fz@&^4nX2?_e z=DVYb#Iz&aR4$uH#Pm}2@ZHC6J~EUzap35&Lq~>6RmpKDIl0(nHrU&}v%6>3?CtLD z$2-{9-9w)}-Mx$N-tu?(U7hcMrvarHbQ6j^224qBpTq)PP5+ zDdi?o*-=M0*`$-mux_w0+rb3U#X5O8)K+}EPRlQ7Sg3wR_m|I^7r3Bt z-7^3Kv=O|G$3t_57o@Gsn+w{U#)CI$_twsVG9M}gxw)XvX(tB?8V^nCb0*&5X>Bf4 zFkM~!YD`$es~6ZAHW@t6+*+@8ep$g-V2m~qokt&DGpUObug&V*oU8Ea__BhAEz~V? z1HRnvn_ZxW`a&3RMWNz?_ITtBdw>tQqU*7|LaxjxMVsnV-T7!#k2w+%BU%9g5P^ z)!WtA)!)&L$wuv|bdgjdhdojrtF?a=B=X{wZdPV0 z?WOWCkg`~=*v>Ut;P1?ZvN&0s6rI4`NrI=2MT$a2Z90{By`3oH3)&9-5651*m5p>np5E+gRw{B8l9iS9|K_U?fWb*E8 z5MZF^(q!~0#w@w4>SA#=c-!zXt8-S5MMOWk&j!hWI%BP+ND3l#JsTN_pOl!sNJIs( zrYxcqfUcH;>D(zthz=s?IkyxlFI}`#zikATQCp=yJdw;zWxY}$?PNh6qc_K`m zV$>g^lLiBVsorxYGRz6D!x)jCz)QZnHKL@FO!CU(hxs_rx?F%tbUP;CLPX`A#(+`mbarpn>lQThAg!QS77m18HH`2pMo|B~Vd}p# zE*p_XR*&9mna)~S^<{(Um(9zDQOjzvhb;W9TMpWFn3^lkyuiZ(-YHd+oro zCut>qwidyB;$C^O_6zM3E4kT(WNRWiQOQFO>sbSMBT^`YW<1gPX22GCd|52o$_*Y~ z&=>UiR(WpOXQPzH2@4>$O5Re}x#B+hP7t`CfK6aTo~yIiE_t!;mS_@$QgGH0Iadak z=eK_h-)*3HA=^>Hx!*!mQ~2?CP@l+4nEWe8A~ zEwb|BijMU(znCU)ioj_CGXSwr92%d*{Nkq;oyK>t%W}A>mkr1VnjUG}kM)#{bjtNgAw;Yxc)lh35(i{Y zbHjwP)hfy;Qpjo&I1OG3!pS;mF{OrQ96L+a4KgPWSgIIvyu@famxp}=wLMSx_D#h` z$v|F`iPe;XDn!P^96eGfR>Y8ey16-e0)50m0(Z%OXl`Kdkw0(Vxbx>A_~J831Ia9F zl`ihMYy=u%S`Z@ZybNOtMp!dzfB`nLE+(FoGpz&i9W5u>`{jSPw3{gDHSv&aYwhlO z7g%0o;t86n;R;wu*rNcr2+&;AI4GOMqu}znEl&ZD>EeBOiT4xu2!YoP{U~VF{v^p& z8#j{mh>yQXzsEqUylRTaKT`^c?Frgvo3VJlaT7(IqkRaPBn zslYiP=(*Nz`LWfV@Xg;?9Ru9k*6>(ctB=T7TbsgqLKJ!KzyE$Yfi^B5Y?~a9w_>!g z(=%Qw>2^&RR8GDY@1FT6nr>9Zi|Od zAcgBSQ@dNc6ZLryV76Yd{xI=npwsetG+u!kCNkT4R8zoy^^%#&!ppg02yFsl(o{d^+uzR#h^*@mya1*P8kwsjIl2fCMO299Ycx z{dgNxIkB=x-+g_Gx&{DZhOgs)MufX)te(oeUTx_#H`7VA0?|?#7Nn_(8I z)GgKxg1A7g{nFRhwy_lyMt)%dsP&dNCtqDRxKY^(6Z6#ehs@*%=7TL})?Zir0`(&a z#OlN$!oNh|uL;n;OQCVwB{zb^#6DdLq_UnL!jxy8rf%ele02?@5*&p?5kiT8w5U`$ z>C7Z-WWx3sJlqv2#zMYmTCT{(XZvLuV-O!PFL!R})d!hOZ?M?6rL}VqzN@XAs%X6|WvPC4&?zo+ItW+^G z);Z#K^>@v5osKQl*<-0`WY2;M1I1-bq`0LjpfPu;ib>C?ScEzmeqgZ#&WY4mDr=|x z8T{N+`~ds+RUHXiiYg*o48{T_!yu;eM;&+7`%brHpc)x38A&IV79WwfcD6*w(Pn%N zI@#ILX5n+}?_WChA5UHQx4kh|3c@o@IkBLx7+;g0>9p?n3Hrn%uAOlbYMUMR|z~v;NJldJ1)xa#5OHEN!(0O=td3}^=lc~_&nP$f4{Mxb;>Q9tmUVv2RT9w znc`X)qj*`4Z2I4g#1(A*0x!`?;7y{>8H|Nly|XJ8wy3)yzA8W6HBj{q;#m%8ugK?m zR$Xe|ypE};=CdI?ML=BQt70L^`J7@2WOlRsVfQ-uNO#?KGPuR1yhJ1QlFmoOi$KMz z1e6FPyLc+;C3Bhl9-oQ)e)p!<=V@#J{cQmks`-P|RxWF@ZA)YIyRKSmfXUg`wQ_9B zpsB>{$V*$cv5(7NY-y`~_f`EUV9PDR6>@XW$?fz}GX0e1P2(>)nDbYSRu1P``GcOP zN9kmi<5BsB2mektwCbWWP_LcST5xj(E7K7f0%!Dd`Z;`o&Q>Bm#Ef!Ux(4HYhX(Nz zPFjHK3MG%8PeIBT<-hi}Mg9oX_hS_}FQcz>MGc{~1XK#4BvK{!^)0XZ6A%{jGdjb(+L-`H({sK;*h*~>soeLPHt_E?xl%uAh27W-&$|h6X$06(XFf5 z2jthcZeXpKe!KMneTRiEI7)ws&gs0M#02LVL`G}RlmebTU7#bJ(3o8skKoR^5|_T< zRxoyHCm-Tbv{dd=?M>QzWI;pLMV;{xUC_`yG8g1kgQ4IaU zyJ%O{V8`-5Eocim^0k@?x|?VD7xB$_ZHqQ%sy&wP;|mN|+pTCm7mvvmfU9`)Kd*9NzXg+BbJMN1h>_$CDwu9S8MMER?=_Vk2FX{6C<6oxu4I4sZ)%=SE!Sr41v}y$X}`Y zs$`7Y=~3Sd-$z6;5!73h_>eC^N`olcu8J}Wut6^4i3savq3e+^cXaWXkElc1jy1k}a9=P+>(fKuH* z=-tHB#LpzS?J7l86_7~U_~DXLX`DJr0$L484>?NjCISEw>+497Q>aoP)0vGs7{zx98sp`5%&{NV@C+PVqwrU#deqc-B927K`4d0mbMzjHg!tgO{DO(c<3qu@Uo#B zui=)@oNYK@i}cJfEG>`3w>yE8NT(Btp%@eQVTzb3`e>$p0_4-hHUcVmI21Py-iz-Y zG$9$anyyk14UtKC3832G@mOH+xHtn8e6M~5R7O3BO5(RinszJksua6|IOs~$gLq$u zPsI-SAn>8s0Y6Js3Bz(~y+8i#`cLDsHnt%%auMifXjZ@<2 zSD$ooAzBGg*cN1G;s9aFmg!J@m6LyZq+0&!_8Nm6;$ugfZf9Y=k=3GprsO^nAXxqn y=&Uvpt_nvY_k}~@)#0{CTR4dS)<{#NHe3~{4u_B@1S3QPjI5|$*SaZk{r>?H|Gl#S delta 7331 zcmaJm3veCPb$fUB?OW}8`v3pWvVKpNpY^e9SvI!ij|9e+<-d72yga|VlAiQ_zPl1g z-B$(V4or%v;nEBg93&=&Qa2<)%yg1TV4w*lfuwGx?F`$b11*^hok<6p=?sQ8?m2hm z7+hMt*|TTQz4zRE&pp3;^wxLCyWb#1@xnqwg1-s>C$u*DXi);`LE4BK0{5i?>0mS{aFQyF7C~Qi ztl^dE3Rxmjx++>NC|ar}T^p@U*G22n^^acF0_nzRqiFG`9(^yeSUlZG^(8slVm6zI z7XZHA)XY|DJWXhT2G6TYB-%y`Y0-Hp+D?mU3A{V#AzDEz&y#2;t)kUH3DX){3-2yk zN9*CefsWBe+5|niX)_H0Wg}fjTi_j`>uD>zH_^kiZIQM^mmWG!J7_0hy);a_ps|l0 zp&MxgDE)L3?E%VWdX)CjexMA{&2#`LTj(*mg>D7PRys(x0cDU5(e3cwMvv2-bQiP? z(Gzqx9R_Uss6_YBz2_ym7q+;Ao-~8zZDzkQA=gUgZIsacPb+kUj-K~LchXbz4tf9v z?Sesf(t|MQAPm|~$uTK>n&;###K)hPH*Qjo4v*dG24b-}lUd1ZCKd}5w=gkfCT4Ds z7JR*tZIPY@UtgqWb)zyts<-YNI&Ea zY>62*nU*suKxIjCq$Ql4@Spj{%fhN#aQyJmy|>4DW7}9Ubms@PL1QBzmJC4Rk7>Q6 zk^fNZEbc<0ibO?MS<~vL+5tk=^AWwZsTGKuv03P*#2pq8at%SvyOC;FzgYSk=Yy#<|S-fUM!#nEd(~YBM8_TCO4J? zD-wHIV8Ip1F2o6G5l*;7W_%ZkrP)Qx#g4i{h}R!_(PChIm6l{=s^tsp5?bouCdzK` z1HAkON5Cr_{V8dYRyqYwNtcWz@&IwPi?XFTeuOs$U6gUvuy)wxaw#oZA;3PEH(*IH zCLeGDv}#6$_9q+y3s}RPKou@WK{_m?+95DujS~P^>kubUCpm!%TpW)WEk_C+ut8c? zEmcVQpk0QHLCj-kDO@3W*soKFy9Bm>PqmEf1c61PQv|TdDW0x?cQd>z z`HE4#_-(pAUy?bA+hHSim95ZL+P0*KFJ-!Vx&~z|zG@LVDs8vxmT*xWmz1qiUZpyW zuL64KEp&gr#6dpi?Rpx9)gXqaf!KnhW;91#ZkSq;yu(>;bV^oGwYwIMA2U;C!nQ)= z^T)F+F%{Zp&LtCOBy>1ugLPSIT`_L0pKdo+f~vzYqh=fG692oyX=L+PT)&x_ zOR{W+wE=~py|Gek3wk?=C9-X^8QWD-W`^n5+KH{k$#`nk+|O7RI9j<>(&j}KTX*{> zO*?MGEc)4aV#dr+i}hjmHFwY#>+!6&0Errd^(1qy=FJo?We0h-qEqX|S^D^;isQkZ zfLauQBsXzVS$m+J7;^BsM&#?VY$*DFtE&EAs!#q@)#Oify@phPi)bNDr0BW|eGIvT zl#n3VK{V1Smk|vf6=*et-OW>#F|wJzT{%>G4rkHCECw+MyN6d-{Xko;1A`^pt*ZCc zJ7l_ne^M3pHBwM49)Qt|0&1Zx-6}}Xg^x8|{#ezukJZ5EM0tL7UBgqPeEd+BnyFASgVqyAbB^$L z8h4hDfxX1%O|~HK9ALssNBE|uPBO<&H+7H}{*|Wt$#=Qj+)K9c&CP#uWEduQ)l|~5 zU4J@DXH%wi(`wj0Y%R&e(Mv1H1m>@c$PCDW7ELR87batJ07q}VAOFoNJN(P<);%~`H*V%9RL zD^FPL5&rd1*T4(VxF#`FJRN=|LIDS_t8xuk4%p3gT`@=D*M$H}@YDf{a@m~Ns*r)}W7*jZWtXt2U@o3Y(zxw8OVgxb#q|U0 zWClF3;NypbhuN&p?44h1Z&-W__frNy3g~bgbtI_hr092=QbpJgfSQo8ZS4GA9A!d9xh@(yt3fa^Jh5BnDhpLxoL4^d2{CS^UtcKrVM|HJ?{k%8#`e58rmn8fOy}%*Cj1i|^dcNOgw`B;85)^_J&dH4%vg3j zlQ7wzBB5aaeF;+}1!2vzh!yONrYIP@ax%@H#jjf|1-0d0;IS$INU{v}7!>iW3+%56 zaCT|+vCe){`DJKkuOJ8^XhvXiH$2iMOp18i|1WF0hTR|L2fDT_qFoAUh0E9h#9v1s z>>5SGeu&^l2+*`{ipBHNH-v(z2g^=?S7M%AUWCqWaK@aE#UTZWPl6l#F^)&^g#9;V zQhie>40YXAkq>*8*KO#PKa}{P4Iy$B?6`tIu;HckUlTp-`JBlnklQ4F2lmA6#MBu0 z?LNvFKh?d5Jj4I2yL$uZ&zkMXK@g-tWOy{d8VLIZj1>AINgwWFukeq$Hxr4sZ``n` zK&bRxQ1g-_TT>5850VK;$f^Il4(Y3JLf!`&52>vRx-Cybz@<89vqlSQr5eOlz;(nY zRk#`iE-0Y3!7=trTx|$ftDjA#!0h8}B&>)a=c*7VOt%m`#AGIx;AbOuRERT_p(Sol zxoW~prq~Po)kwX8uCNle1&8;3qyy5!;HDZ)9KsvCYg4G>SI{0cEC?(yYbp+6dLQQ` zQ!crejRWfG+rRR2o5~vsQ9LC;^rB@Mcz4K-P&mJl_? zH4qB%9x_C21rL6O5BF>+twomTg(Q$TDC>tj-}50E=9%99%^g5>$=od(?-nGoIZzVF z8ACW2-IoMeS_|Z9>{I>^z1O->?_h?%fEPn1w{T!6lMOHyWb$!eSVvO`tL#TS+&@$p zMxHWIB$-`b&GqjlBII~1&^jPu-Q+2P6|(CQ>;PHV$j1lPb9ta_D@yssGCskPvMbZG zOqddC8pF}9v4bWPb|Rb3?ev)NI|nul{}#t8&_B);>?#IT0g=hSA6X1u2a*A}V8oDx z6@iu;OV{{c4J0*j78AUE%K-TnKeMH|tp77zpeiFJ$YZ`5etAo3H-5RAmwtVAmx z{LFCt{xv+}?*qzn{f&Hj>sL?ufKU#P$WQU6y$pfGmZs$j=^?2e?nt2Za=rAB{19M3 zBh}!+!^&E+7=n?Y=I_A^ZZ?qg%DE&+{0ff_HXDBo)LpOyIJc$2&Vh17p-v<*C>*UF zaw%d$t_5(LiY4X~Df2d<-H=P=>brwi31so#8LA`)_}>h*kURLl4b`>by@BmQumb^V zmFvIPD`>zW)*#;|9^T$W_VC-cw~^3lX8Qy3pwJz8ZN27wIaUH4;h=qxcE;yj#g5N< zsv|T7H*gj?Jakcd zleA;8d-xkWulD0g#=-^@iY*2S9(YWy0inh=g_bZ>aE9u|p5{N^wQisrzr~!Vff!ZN z7WEVyRrVIkakbeTBqin~Z{GdZIx*9y*b8}r1>I!zF;cJbz;H9M`G(>4>K2@3Jpy>p zD#T{`_|$NJU?Y-y5e)E^6E%zL06hhdFm1TsXhwlv$dLMp|Ey`*k(>>tK_-NMf;*vU z&$7(t(i%Ly0(eAPLt)@T5DH>)O3KTpq)Z*;3uGRogFiu+^YUGgfAnB!MO_4SRhE5J zSs@~MQK-*R<_L@0+OnTQaRe#`{!7pbgXLqAtvk>I(-=rv9J|2#nueN&hs6vEv^j}A zo|NpsWu$`Zzl>!S2nvp}9HhnyVJ`yRz{H6z`gTCo2nr&>ypPI|Nx}EImuL{03U^=! zm~6Rl1=hZX7p+Lp+sH$~$kAfO=ic>-noyJ|!4|QQ0#Rlv1#aXb$(1+|t7z#3a4mM( zbop|Hptd_;owRI6y_ENhy_So;ibi%23P)Jfsko%T%BujYcKo#FDV5e<^52j}xNdcH z??$-+Brc7yUOZ{y2Gj9eER{`&I}&?_e|3LPIa`G`*ANM9EY2n^7UneDc?Bc)O ze|P6i@BIae$W)l(8}NW7&5d-HVwEzNOw6RrnCHUy*GF0xx4Ccg$+g$w0n>F&gV0c%KjJY&-5l#M%+Wt{^Lvk7tH=@yXGS z@&#zIB0!WXWr&kOQY7;yM?+c_WO;0C_0`dzt9>&#<_=^54|-92-2@}-Rs4PpK?p&! z#E%>*=l^)HwdZ;G4ucZn%L|;YJfFP9`bFIQJLLZ`?>fZxU}iQJ_IYUCv zn}e)w4ha}Azlh5_g3Isc@5K4ppZ|X&TyNgn#?~#0s3zuv0I5kq6u0KQ*5A;Pqx=1W$T5 z?!!PI95Zv-#1z!T{`N?7v+(=V*j;#a0Ytn*&;9}7g-5?fWd7z+FzloMOqy4Jeyo_Z zBQKUjAV$t0_WM(WxdhW;?my9|{U@|pO?=mh>d_+Di7KnVGt`Bu{c)C>KW1lhIjBHI zVj%5QDi#|9J)VRCaF5!8Jhvji#D#4`Ac9Q>4I-Iv6o5)2 z$HU6VarQ@m0^iTR6p^BjuvZ>CS-VHbN~G+VBZCQvvG5)OL~sNAFyLe1VUa8dSHOmV zErb7SN$gAf 1024: # b outside of safe range b = batch_size - LOGGER.info(f'{prefix}WARNING ⚠️ CUDA anomaly detected, using default batch-size {batch_size}.') + LOGGER.info(f"{prefix}WARNING ⚠️ CUDA anomaly detected, using default batch-size {batch_size}.") fraction = (np.polyval(p, b) + r + a) / t # actual fraction predicted - LOGGER.info(f'{prefix}Using batch-size {b} for {d} {t * fraction:.2f}G/{t:.2f}G ({fraction * 100:.0f}%) ✅') + LOGGER.info(f"{prefix}Using batch-size {b} for {d} {t * fraction:.2f}G/{t:.2f}G ({fraction * 100:.0f}%) ✅") return b except Exception as e: - LOGGER.warning(f'{prefix}WARNING ⚠️ error detected: {e}, using default batch-size {batch_size}.') + LOGGER.warning(f"{prefix}WARNING ⚠️ error detected: {e}, using default batch-size {batch_size}.") return batch_size diff --git a/ultralytics/utils/benchmarks.py b/ultralytics/utils/benchmarks.py index ad1bcf3..0286990 100644 --- a/ultralytics/utils/benchmarks.py +++ b/ultralytics/utils/benchmarks.py @@ -1,6 +1,6 @@ # Ultralytics YOLO 🚀, AGPL-3.0 license """ -Benchmark a YOLO model formats for speed and accuracy +Benchmark a YOLO model formats for speed and accuracy. Usage: from ultralytics.utils.benchmarks import ProfileModels, benchmark @@ -21,34 +21,29 @@ TensorFlow Lite | `tflite` | yolov8n.tflite TensorFlow Edge TPU | `edgetpu` | yolov8n_edgetpu.tflite TensorFlow.js | `tfjs` | yolov8n_web_model/ PaddlePaddle | `paddle` | yolov8n_paddle_model/ -ncnn | `ncnn` | yolov8n_ncnn_model/ +NCNN | `ncnn` | yolov8n_ncnn_model/ """ import glob import platform -import sys import time from pathlib import Path import numpy as np import torch.cuda -from ultralytics import YOLO +from ultralytics import YOLO, YOLOWorld from ultralytics.cfg import TASK2DATA, TASK2METRIC from ultralytics.engine.exporter import export_formats -from ultralytics.utils import ASSETS, LINUX, LOGGER, MACOS, SETTINGS, TQDM -from ultralytics.utils.checks import check_requirements, check_yolo +from ultralytics.utils import ASSETS, LINUX, LOGGER, MACOS, TQDM, WEIGHTS_DIR +from ultralytics.utils.checks import IS_PYTHON_3_12, check_requirements, check_yolo from ultralytics.utils.files import file_size from ultralytics.utils.torch_utils import select_device -def benchmark(model=Path(SETTINGS['weights_dir']) / 'yolov8n.pt', - data=None, - imgsz=160, - half=False, - int8=False, - device='cpu', - verbose=False): +def benchmark( + model=WEIGHTS_DIR / "yolov8n.pt", data=None, imgsz=160, half=False, int8=False, device="cpu", verbose=False +): """ Benchmark a YOLO model across different formats for speed and accuracy. @@ -76,6 +71,7 @@ def benchmark(model=Path(SETTINGS['weights_dir']) / 'yolov8n.pt', """ import pandas as pd + pd.options.display.max_columns = 10 pd.options.display.width = 120 device = select_device(device, verbose=False) @@ -85,67 +81,72 @@ def benchmark(model=Path(SETTINGS['weights_dir']) / 'yolov8n.pt', y = [] t0 = time.time() for i, (name, format, suffix, cpu, gpu) in export_formats().iterrows(): # index, (name, format, suffix, CPU, GPU) - emoji, filename = '❌', None # export defaults + emoji, filename = "❌", None # export defaults try: - assert i != 9 or LINUX, 'Edge TPU export only supported on Linux' - if i == 10: - assert MACOS or LINUX, 'TF.js export only supported on macOS and Linux' - elif i == 11: - assert sys.version_info < (3, 11), 'PaddlePaddle export only supported on Python<=3.10' - if 'cpu' in device.type: - assert cpu, 'inference not supported on CPU' - if 'cuda' in device.type: - assert gpu, 'inference not supported on GPU' + # Checks + if i == 9: # Edge TPU + assert LINUX, "Edge TPU export only supported on Linux" + elif i == 7: # TF GraphDef + assert model.task != "obb", "TensorFlow GraphDef not supported for OBB task" + elif i in {5, 10}: # CoreML and TF.js + assert MACOS or LINUX, "export only supported on macOS and Linux" + if i in {3, 5}: # CoreML and OpenVINO + assert not IS_PYTHON_3_12, "CoreML and OpenVINO not supported on Python 3.12" + if i in {6, 7, 8, 9, 10}: # All TF formats + assert not isinstance(model, YOLOWorld), "YOLOWorldv2 TensorFlow exports not supported by onnx2tf yet" + if i in {11}: # Paddle + assert not isinstance(model, YOLOWorld), "YOLOWorldv2 Paddle exports not supported yet" + if i in {12}: # NCNN + assert not isinstance(model, YOLOWorld), "YOLOWorldv2 NCNN exports not supported yet" + if "cpu" in device.type: + assert cpu, "inference not supported on CPU" + if "cuda" in device.type: + assert gpu, "inference not supported on GPU" # Export - if format == '-': + if format == "-": filename = model.ckpt_path or model.cfg - export = model # PyTorch format + exported_model = model # PyTorch format else: filename = model.export(imgsz=imgsz, format=format, half=half, int8=int8, device=device, verbose=False) - export = YOLO(filename, task=model.task) - assert suffix in str(filename), 'export failed' - emoji = '❎' # indicates export succeeded + exported_model = YOLO(filename, task=model.task) + assert suffix in str(filename), "export failed" + emoji = "❎" # indicates export succeeded # Predict - assert model.task != 'pose' or i != 7, 'GraphDef Pose inference is not supported' - assert i not in (9, 10), 'inference not supported' # Edge TPU and TF.js are unsupported - assert i != 5 or platform.system() == 'Darwin', 'inference only supported on macOS>=10.13' # CoreML - export.predict(ASSETS / 'bus.jpg', imgsz=imgsz, device=device, half=half) + assert model.task != "pose" or i != 7, "GraphDef Pose inference is not supported" + assert i not in (9, 10), "inference not supported" # Edge TPU and TF.js are unsupported + assert i != 5 or platform.system() == "Darwin", "inference only supported on macOS>=10.13" # CoreML + exported_model.predict(ASSETS / "bus.jpg", imgsz=imgsz, device=device, half=half) # Validate data = data or TASK2DATA[model.task] # task to dataset, i.e. coco8.yaml for task=detect key = TASK2METRIC[model.task] # task to metric, i.e. metrics/mAP50-95(B) for task=detect - results = export.val(data=data, - batch=1, - imgsz=imgsz, - plots=False, - device=device, - half=half, - int8=int8, - verbose=False) - metric, speed = results.results_dict[key], results.speed['inference'] - y.append([name, '✅', round(file_size(filename), 1), round(metric, 4), round(speed, 2)]) + results = exported_model.val( + data=data, batch=1, imgsz=imgsz, plots=False, device=device, half=half, int8=int8, verbose=False + ) + metric, speed = results.results_dict[key], results.speed["inference"] + y.append([name, "✅", round(file_size(filename), 1), round(metric, 4), round(speed, 2)]) except Exception as e: if verbose: - assert type(e) is AssertionError, f'Benchmark failure for {name}: {e}' - LOGGER.warning(f'ERROR ❌️ Benchmark failure for {name}: {e}') + assert type(e) is AssertionError, f"Benchmark failure for {name}: {e}" + LOGGER.warning(f"ERROR ❌️ Benchmark failure for {name}: {e}") y.append([name, emoji, round(file_size(filename), 1), None, None]) # mAP, t_inference # Print results check_yolo(device=device) # print system info - df = pd.DataFrame(y, columns=['Format', 'Status❔', 'Size (MB)', key, 'Inference time (ms/im)']) + df = pd.DataFrame(y, columns=["Format", "Status❔", "Size (MB)", key, "Inference time (ms/im)"]) name = Path(model.ckpt_path).name - s = f'\nBenchmarks complete for {name} on {data} at imgsz={imgsz} ({time.time() - t0:.2f}s)\n{df}\n' + s = f"\nBenchmarks complete for {name} on {data} at imgsz={imgsz} ({time.time() - t0:.2f}s)\n{df}\n" LOGGER.info(s) - with open('benchmarks.log', 'a', errors='ignore', encoding='utf-8') as f: + with open("benchmarks.log", "a", errors="ignore", encoding="utf-8") as f: f.write(s) if verbose and isinstance(verbose, float): metrics = df[key].array # values to compare to floor floor = verbose # minimum metric floor to pass, i.e. = 0.29 mAP for YOLOv5n - assert all(x > floor for x in metrics if pd.notna(x)), f'Benchmark failure: metric(s) < floor {floor}' + assert all(x > floor for x in metrics if pd.notna(x)), f"Benchmark failure: metric(s) < floor {floor}" return df @@ -154,8 +155,7 @@ class ProfileModels: """ ProfileModels class for profiling different models on ONNX and TensorRT. - This class profiles the performance of different models, provided their paths. The profiling includes parameters such as - model speed and FLOPs. + This class profiles the performance of different models, returning results such as model speed and FLOPs. Attributes: paths (list): Paths of the models to profile. @@ -175,15 +175,30 @@ class ProfileModels: ``` """ - def __init__(self, - paths: list, - num_timed_runs=100, - num_warmup_runs=10, - min_time=60, - imgsz=640, - half=True, - trt=True, - device=None): + def __init__( + self, + paths: list, + num_timed_runs=100, + num_warmup_runs=10, + min_time=60, + imgsz=640, + half=True, + trt=True, + device=None, + ): + """ + Initialize the ProfileModels class for profiling models. + + Args: + paths (list): List of paths of the models to be profiled. + num_timed_runs (int, optional): Number of timed runs for the profiling. Default is 100. + num_warmup_runs (int, optional): Number of warmup runs before the actual profiling starts. Default is 10. + min_time (float, optional): Minimum time in seconds for profiling a model. Default is 60. + imgsz (int, optional): Size of the image used during profiling. Default is 640. + half (bool, optional): Flag to indicate whether to use half-precision floating point for profiling. + trt (bool, optional): Flag to indicate whether to profile using TensorRT. Default is True. + device (torch.device, optional): Device used for profiling. If None, it is determined automatically. + """ self.paths = paths self.num_timed_runs = num_timed_runs self.num_warmup_runs = num_warmup_runs @@ -191,36 +206,32 @@ class ProfileModels: self.imgsz = imgsz self.half = half self.trt = trt # run TensorRT profiling - self.device = device or torch.device(0 if torch.cuda.is_available() else 'cpu') + self.device = device or torch.device(0 if torch.cuda.is_available() else "cpu") def profile(self): + """Logs the benchmarking results of a model, checks metrics against floor and returns the results.""" files = self.get_files() if not files: - print('No matching *.pt or *.onnx files found.') + print("No matching *.pt or *.onnx files found.") return table_rows = [] output = [] for file in files: - engine_file = file.with_suffix('.engine') - if file.suffix in ('.pt', '.yaml', '.yml'): + engine_file = file.with_suffix(".engine") + if file.suffix in (".pt", ".yaml", ".yml"): model = YOLO(str(file)) model.fuse() # to report correct params and GFLOPs in model.info() model_info = model.info() - if self.trt and self.device.type != 'cpu' and not engine_file.is_file(): - engine_file = model.export(format='engine', - half=self.half, - imgsz=self.imgsz, - device=self.device, - verbose=False) - onnx_file = model.export(format='onnx', - half=self.half, - imgsz=self.imgsz, - simplify=True, - device=self.device, - verbose=False) - elif file.suffix == '.onnx': + if self.trt and self.device.type != "cpu" and not engine_file.is_file(): + engine_file = model.export( + format="engine", half=self.half, imgsz=self.imgsz, device=self.device, verbose=False + ) + onnx_file = model.export( + format="onnx", half=self.half, imgsz=self.imgsz, simplify=True, device=self.device, verbose=False + ) + elif file.suffix == ".onnx": model_info = self.get_onnx_model_info(file) onnx_file = file else: @@ -235,25 +246,30 @@ class ProfileModels: return output def get_files(self): + """Returns a list of paths for all relevant model files given by the user.""" files = [] for path in self.paths: path = Path(path) if path.is_dir(): - extensions = ['*.pt', '*.onnx', '*.yaml'] + extensions = ["*.pt", "*.onnx", "*.yaml"] files.extend([file for ext in extensions for file in glob.glob(str(path / ext))]) - elif path.suffix in {'.pt', '.yaml', '.yml'}: # add non-existing + elif path.suffix in {".pt", ".yaml", ".yml"}: # add non-existing files.append(str(path)) else: files.extend(glob.glob(str(path))) - print(f'Profiling: {sorted(files)}') + print(f"Profiling: {sorted(files)}") return [Path(file) for file in sorted(files)] def get_onnx_model_info(self, onnx_file: str): - # return (num_layers, num_params, num_gradients, num_flops) - return 0.0, 0.0, 0.0, 0.0 + """Retrieves the information including number of layers, parameters, gradients and FLOPs for an ONNX model + file. + """ + return 0.0, 0.0, 0.0, 0.0 # return (num_layers, num_params, num_gradients, num_flops) - def iterative_sigma_clipping(self, data, sigma=2, max_iters=3): + @staticmethod + def iterative_sigma_clipping(data, sigma=2, max_iters=3): + """Applies an iterative sigma clipping algorithm to the given data times number of iterations.""" data = np.array(data) for _ in range(max_iters): mean, std = np.mean(data), np.std(data) @@ -264,6 +280,7 @@ class ProfileModels: return data def profile_tensorrt_model(self, engine_file: str, eps: float = 1e-3): + """Profiles the TensorRT model, measuring average run time and standard deviation among runs.""" if not self.trt or not Path(engine_file).is_file(): return 0.0, 0.0 @@ -286,39 +303,44 @@ class ProfileModels: run_times = [] for _ in TQDM(range(num_runs), desc=engine_file): results = model(input_data, imgsz=self.imgsz, verbose=False) - run_times.append(results[0].speed['inference']) # Convert to milliseconds + run_times.append(results[0].speed["inference"]) # Convert to milliseconds run_times = self.iterative_sigma_clipping(np.array(run_times), sigma=2, max_iters=3) # sigma clipping return np.mean(run_times), np.std(run_times) def profile_onnx_model(self, onnx_file: str, eps: float = 1e-3): - check_requirements('onnxruntime') + """Profiles an ONNX model by executing it multiple times and returns the mean and standard deviation of run + times. + """ + check_requirements("onnxruntime") import onnxruntime as ort # Session with either 'TensorrtExecutionProvider', 'CUDAExecutionProvider', 'CPUExecutionProvider' sess_options = ort.SessionOptions() sess_options.graph_optimization_level = ort.GraphOptimizationLevel.ORT_ENABLE_ALL sess_options.intra_op_num_threads = 8 # Limit the number of threads - sess = ort.InferenceSession(onnx_file, sess_options, providers=['CPUExecutionProvider']) + sess = ort.InferenceSession(onnx_file, sess_options, providers=["CPUExecutionProvider"]) input_tensor = sess.get_inputs()[0] input_type = input_tensor.type + dynamic = not all(isinstance(dim, int) and dim >= 0 for dim in input_tensor.shape) # dynamic input shape + input_shape = (1, 3, self.imgsz, self.imgsz) if dynamic else input_tensor.shape # Mapping ONNX datatype to numpy datatype - if 'float16' in input_type: + if "float16" in input_type: input_dtype = np.float16 - elif 'float' in input_type: + elif "float" in input_type: input_dtype = np.float32 - elif 'double' in input_type: + elif "double" in input_type: input_dtype = np.float64 - elif 'int64' in input_type: + elif "int64" in input_type: input_dtype = np.int64 - elif 'int32' in input_type: + elif "int32" in input_type: input_dtype = np.int32 else: - raise ValueError(f'Unsupported ONNX datatype {input_type}') + raise ValueError(f"Unsupported ONNX datatype {input_type}") - input_data = np.random.rand(*input_tensor.shape).astype(input_dtype) + input_data = np.random.rand(*input_shape).astype(input_dtype) input_name = input_tensor.name output_name = sess.get_outputs()[0].name @@ -344,24 +366,39 @@ class ProfileModels: return np.mean(run_times), np.std(run_times) def generate_table_row(self, model_name, t_onnx, t_engine, model_info): + """Generates a formatted string for a table row that includes model performance and metric details.""" layers, params, gradients, flops = model_info - return f'| {model_name:18s} | {self.imgsz} | - | {t_onnx[0]:.2f} ± {t_onnx[1]:.2f} ms | {t_engine[0]:.2f} ± {t_engine[1]:.2f} ms | {params / 1e6:.1f} | {flops:.1f} |' + return ( + f"| {model_name:18s} | {self.imgsz} | - | {t_onnx[0]:.2f} ± {t_onnx[1]:.2f} ms | {t_engine[0]:.2f} ± " + f"{t_engine[1]:.2f} ms | {params / 1e6:.1f} | {flops:.1f} |" + ) - def generate_results_dict(self, model_name, t_onnx, t_engine, model_info): + @staticmethod + def generate_results_dict(model_name, t_onnx, t_engine, model_info): + """Generates a dictionary of model details including name, parameters, GFLOPS and speed metrics.""" layers, params, gradients, flops = model_info return { - 'model/name': model_name, - 'model/parameters': params, - 'model/GFLOPs': round(flops, 3), - 'model/speed_ONNX(ms)': round(t_onnx[0], 3), - 'model/speed_TensorRT(ms)': round(t_engine[0], 3)} + "model/name": model_name, + "model/parameters": params, + "model/GFLOPs": round(flops, 3), + "model/speed_ONNX(ms)": round(t_onnx[0], 3), + "model/speed_TensorRT(ms)": round(t_engine[0], 3), + } - def print_table(self, table_rows): - gpu = torch.cuda.get_device_name(0) if torch.cuda.is_available() else 'GPU' - header = f'| Model | size
    (pixels) | mAPval
    50-95 | Speed
    CPU ONNX
    (ms) | Speed
    {gpu} TensorRT
    (ms) | params
    (M) | FLOPs
    (B) |' - separator = '|-------------|---------------------|--------------------|------------------------------|-----------------------------------|------------------|-----------------|' + @staticmethod + def print_table(table_rows): + """Formats and prints a comparison table for different models with given statistics and performance data.""" + gpu = torch.cuda.get_device_name(0) if torch.cuda.is_available() else "GPU" + header = ( + f"| Model | size
    (pixels) | mAPval
    50-95 | Speed
    CPU ONNX
    (ms) | " + f"Speed
    {gpu} TensorRT
    (ms) | params
    (M) | FLOPs
    (B) |" + ) + separator = ( + "|-------------|---------------------|--------------------|------------------------------|" + "-----------------------------------|------------------|-----------------|" + ) - print(f'\n\n{header}') + print(f"\n\n{header}") print(separator) for row in table_rows: print(row) diff --git a/ultralytics/utils/callbacks/__init__.py b/ultralytics/utils/callbacks/__init__.py index 8ad4ad6..116babe 100644 --- a/ultralytics/utils/callbacks/__init__.py +++ b/ultralytics/utils/callbacks/__init__.py @@ -2,4 +2,4 @@ from .base import add_integration_callbacks, default_callbacks, get_default_callbacks -__all__ = 'add_integration_callbacks', 'default_callbacks', 'get_default_callbacks' +__all__ = "add_integration_callbacks", "default_callbacks", "get_default_callbacks" diff --git a/ultralytics/utils/callbacks/__pycache__/__init__.cpython-312.pyc b/ultralytics/utils/callbacks/__pycache__/__init__.cpython-312.pyc index 224d9a6a1b28951c634802391b1c058462a8c5c8..9cdee6c01d7d8254b144ec90d50d1dd82b9f1e1a 100644 GIT binary patch delta 50 zcmZ3-w4RCQG%qg~0}xz@f0RCvXQPy(enx(7s(yK4x_(M(YC&FViGF2%PJWr8!NhA3 E0FvzyZ~y=R delta 49 zcmZ3_w2q1AG%qg~0}#}?ElQinvr*DPKO;XkRlmG2T|XtYBsICDq$n{tJ2NkR;`Ims DZ8{L) diff --git a/ultralytics/utils/callbacks/__pycache__/__init__.cpython-39.pyc b/ultralytics/utils/callbacks/__pycache__/__init__.cpython-39.pyc index fa7f54213728601c708e87be8a4ba9bb7276a424..f4edc7aba9f127265e93b56f9168465f068340da 100644 GIT binary patch delta 42 wcmbQmG?j@bk(ZZ?0SKh3AEi&^*(GG@Vin_(T9TSv5>k|yoSm7MKJl(C0PGJ9RsaA1 delta 47 zcmbQrG>eHRk(ZZ?0SMCAf2B<1*(Gl0Vin_(np)tKnpl*VnU@|@nV*wiW;*ePEdVev B4>SM( diff --git a/ultralytics/utils/callbacks/__pycache__/base.cpython-312.pyc b/ultralytics/utils/callbacks/__pycache__/base.cpython-312.pyc index 0313f4068616af981d6330d09cf9cee03d3f5748..02e8cd17c291daf16a09177e55a692030e380fd0 100644 GIT binary patch delta 858 zcmY+B?@Lor7{|}E?QV1T?#^xZ?#G?m)R}H!n_A(}tPnwyevv^5nN6LRzqXnghG{PX z-z406>UBXcB_apCssEri=UqP_m_d*)f{PJEZ|a`IR61}D-_Q4apYw3edE4=#U4HL& zyNEgH=+~)7f!8uZt*19v%GZzQW@pE8<1?i+Zo&htCv&|pm(Sd}IhC2n=Zo|Ch0Ic6 zwy@aVhUYAOAmS6tW2nU~XByL@7vs(vELb~0L{eEGz^EivOBIpmaC_ZLFnO1b`C%vqX>l6c->!yhgmT;)4yaRA2m&UhpC!%Ve42gUztPf1Dq zD`{|>y94eb+~tbB!*6#tta2KYFT)yF+wvKBSnl-LEwG8lynAc{$Ui`AAA$`||!U{RTog&Pa`xzcBQu2{^^PvExlQk^tE@SQnJ8|HGK z{8EUJs^Ert1S(ij8_LOm6<`uugVE@;nE)33V6KK~!xmA$k;E4g-jT2nJc-YP0=^1n zS7WNBVvEzpZrX6n5KJ=cM2OM)P8!b|4#rL*sQL;GG#fPxT||h`XgjUzFhs`EKD!yq zdvOoLS|Vt)E<+=2hL^FA2w@sIM76ZxH+j`CZ?S8rED#_MknS?#j2d^d2}Vu(eGV{g=Cyv_sD&9xqQvypN;rjo kLXkuz)Ix*FkAZ|fy3=iLQ{vs}1qXEfRN~NfOEMq#A5e${9RL6T delta 992 zcmY+BOH30%7{}*pyW4HIEo|wwv}GwmX$uir5C!F>f{G@n3DKyLK&2IdmRB1e5fBp- zy^w4eP4r+)^k_T~q8AU|^)eU}Dv42Vaxht6i8q}Y5yE75^8fwkd+dDsq~T?w>y6WC zM|8~{etdDjx8%a06x$CbbLonqWF~VVIW(5D<0ZJ|Z;M}=$fo00riSCGbUraP1|3|vq#o$vYNtcO3sN8SP`VAh+*@}kxKoz# zvaJP9ZA59f*}fmnaU$j-ydXNL6T>I17W~!jh5>$3FZV!dv&IYg2xK<$lQ6X{_oyR= z|2P70nY%sCNx04xo5OEsE6j7c;5q|$xLSAh!oqTMk!XVFxI@_o@9;I{y(wkEi%MO) zF9j)6#-4>)8=x7}XpsG#?Q6#N6#h9Zq8s%eLugi>kw-N==pLbye(jE$g&J}Br`rS4 znsn&uM$+!ioUtGbel)NJV>Z1;4ZZZ3iztJ zBWj39a_eVS)#!>ET~!k+^sy#BNV^J>cU96>B<+6sofKYvpqc@O@k6f`8l#oi7v?_E z-JmvnvzWm0OF|I-fc*HQw;gx-O7OZ@#tC2eMui&rWC@YVR}2fP3@>6>jD!Fwi<4l&P#C+B;3vU45~w#y zDBm#Xo&Osu3-BVrNBlLUwAS!3E=7Vz&*}q)pHTn_CHi^&xDjMjwiT-}E=NLzPqE%KD>JL&ZUwX}I5w40RgDO9zS%8mlRZoya0h9Dw; gjQC;&Zj9UJgGF+}22Q#Bxkilfp^Wel6_v zoLXCm$0nVMo6bAM;u|MlsEqP6{N%&j#7xeuy7}sKIZPLq7dR!JKxaejBhFN$4x5LO z04Wa30s466C z7m8>q!6UeElai$?sk(5XuDcL7x~L#7UAb{1T{#n|1kVrm{@ioE|8fR1-)0Ppb0&nf z9N)DU$6p!uVUDrWR@<3evZ~dJwREe^Xn^kcd}+R1a-8M`$8zf{^_%58jcUU?LZ`$9 z=yXqf%FQsQEk1y!SpRN&c>E(^Pt=$CX9IhvAtn54Jz15DA0SPzHj zQ>=@_bY1CUmVPRPW8}vJ6lpi!!5Q+@Cpbqjt;;WtBxZ*Hollw+BoZX+CjHj(xJnbO zhimkbb+Jfui4M0%nA|_+vJo-IQKggmGqmW7-h)TWe1Hxy<1W_ek?|N0DVto!#@6%X zci;_q<~06L-+WQAWnWkoS0zty-`TLmDyt#TAoyaR``RWVR}yZ_1_?jWH#UXP=gVTC z2i)f&318nYcyPD(d`R(#YHhdP+p#Ocw6fLNmxj8dp1s{{xNEeLsu_yaY&vy&C$_TY z)Y_z`2ZayOa{2HOm<~nK@xLa9Z%k4#1_N%9zNTL*#cA#c+hr4j4=p7$5Lyy)NuVJ=iBnMJ)!3FTueMLK z+v09Wi+k*CZcRgN=*fga@BI^U$XUs$e?bqXgc6**nO$P4YM5`|y#40QyzjmFVfyfd zAJywM1HZ<1f2}Y0fB4hnBZkW>IP-Y~VGN9z#B|quL_WVEMHk$ z3=RflZ*1QIkBe1|-3q|PpCU-3Ri1`vmgiw2_;y}cLM@;w2|G`wFJFz0==MW5~2&Ke-ODQ{ItVTQ<&K`!G+S2@jCUN17MAIHPso80i zV~@bGPL(7NS9vbV0@YCTcdZ zqa;x*f3vSmDn(Ek_o=NE(GBfMR@r~N=&Kl05 zcbK$#Fr(ab8mh+%6Q4j~MQPIKMRk}(4^pTrZmAJG%u_&=bW{^unngx_0%6z%0l}An z!z1*KZQDd%biSI`+LfaC@1x`*0&!jb>EtsdjykCyrJ-irlsBr`*%CuFWfypyw8fBh zSH7xlUn#N1kj204W-ubHs5>j!4GNzi7Uo88^-jxy30#262o~i}wK>v|&uTZ^|9a2J jIoAu0Ptex5yyVV(=sFF@a{|ZGBdGmDNol;cU3&NqVxk>G delta 1224 zcmZ9LO;6N77{@z(-`#du5Ek5kEN_yRj%zQfyQ-+dW$R@FdDn&0U-)EH)RjZ z&uWW!R-isrYL<9TX1$Z*vnX7zt|mbkSAAYRu_tBDCXWo*r}}$u`+ZGKt<{jzsLoQo zzS871JGGWV?sHD5393|&8jA_lA*CA5ZMlXQ1>zeS?eBro$<)lP`I#_m&V@d0M2)5S zw@I9=-4sXU-ZY{qszY@33ZcD_*9lh69$Xf`MZ#}Gl7Zh6>*|vUp5Ih}x)4~Gl|IZI z%vN-|)gt@ufdZ#+rVy6)@u~ys)QYqSLt2Xcg)sKwgfTDT;VMfFjuYyLLv4ao#1C!I zqAEyfrx@2q#x7yn4dG?Zk7KVHC!Em=77?`_$|BKwhEU1M5ZZVZk|z!+kuFjpHe_^I z9O?tz9DdJ0HN{VTtk&e_zQt{Hl;_Y-;L7hPy9T-nT!ogh>_B(L4dVrI#V4bZqj+%; zO?)+~8=WwrLm1E~+JWKRCt8AO>bwR3Kk%eybO1|w>@!}|=s!>^V%|c?_(eigx=Ew` zqR0HTa0E&W&$Nx}kf{pk2U_U1_iVgMrYSXY{p}m+%oGF&(~qxr~J0~VzYsa2KQR?V^1pX zis@W3lu?vG(WGGz)p=X>P<+p=Rx_#&P=#-r2|W`%67%`K0{*k;Fotolksl<3VlO|J ssrL7$xjJ0x1;4(+E*yp2pe(Kz2FG)D$#$R_vbp(Vm1NqnX{*P70VuT!P5=M^ diff --git a/ultralytics/utils/callbacks/base.py b/ultralytics/utils/callbacks/base.py index 2e676bf..d015457 100644 --- a/ultralytics/utils/callbacks/base.py +++ b/ultralytics/utils/callbacks/base.py @@ -1,11 +1,10 @@ # Ultralytics YOLO 🚀, AGPL-3.0 license -""" -Base callbacks -""" +"""Base callbacks.""" from collections import defaultdict from copy import deepcopy + # Trainer callbacks ---------------------------------------------------------------------------------------------------- @@ -145,37 +144,35 @@ def on_export_end(exporter): default_callbacks = { # Run in trainer - 'on_pretrain_routine_start': [on_pretrain_routine_start], - 'on_pretrain_routine_end': [on_pretrain_routine_end], - 'on_train_start': [on_train_start], - 'on_train_epoch_start': [on_train_epoch_start], - 'on_train_batch_start': [on_train_batch_start], - 'optimizer_step': [optimizer_step], - 'on_before_zero_grad': [on_before_zero_grad], - 'on_train_batch_end': [on_train_batch_end], - 'on_train_epoch_end': [on_train_epoch_end], - 'on_fit_epoch_end': [on_fit_epoch_end], # fit = train + val - 'on_model_save': [on_model_save], - 'on_train_end': [on_train_end], - 'on_params_update': [on_params_update], - 'teardown': [teardown], - + "on_pretrain_routine_start": [on_pretrain_routine_start], + "on_pretrain_routine_end": [on_pretrain_routine_end], + "on_train_start": [on_train_start], + "on_train_epoch_start": [on_train_epoch_start], + "on_train_batch_start": [on_train_batch_start], + "optimizer_step": [optimizer_step], + "on_before_zero_grad": [on_before_zero_grad], + "on_train_batch_end": [on_train_batch_end], + "on_train_epoch_end": [on_train_epoch_end], + "on_fit_epoch_end": [on_fit_epoch_end], # fit = train + val + "on_model_save": [on_model_save], + "on_train_end": [on_train_end], + "on_params_update": [on_params_update], + "teardown": [teardown], # Run in validator - 'on_val_start': [on_val_start], - 'on_val_batch_start': [on_val_batch_start], - 'on_val_batch_end': [on_val_batch_end], - 'on_val_end': [on_val_end], - + "on_val_start": [on_val_start], + "on_val_batch_start": [on_val_batch_start], + "on_val_batch_end": [on_val_batch_end], + "on_val_end": [on_val_end], # Run in predictor - 'on_predict_start': [on_predict_start], - 'on_predict_batch_start': [on_predict_batch_start], - 'on_predict_postprocess_end': [on_predict_postprocess_end], - 'on_predict_batch_end': [on_predict_batch_end], - 'on_predict_end': [on_predict_end], - + "on_predict_start": [on_predict_start], + "on_predict_batch_start": [on_predict_batch_start], + "on_predict_postprocess_end": [on_predict_postprocess_end], + "on_predict_batch_end": [on_predict_batch_end], + "on_predict_end": [on_predict_end], # Run in exporter - 'on_export_start': [on_export_start], - 'on_export_end': [on_export_end]} + "on_export_start": [on_export_start], + "on_export_end": [on_export_end], +} def get_default_callbacks(): @@ -199,10 +196,11 @@ def add_integration_callbacks(instance): # Load HUB callbacks from .hub import callbacks as hub_cb + callbacks_list = [hub_cb] # Load training callbacks - if 'Trainer' in instance.__class__.__name__: + if "Trainer" in instance.__class__.__name__: from .clearml import callbacks as clear_cb from .comet import callbacks as comet_cb from .dvc import callbacks as dvc_cb @@ -211,12 +209,8 @@ def add_integration_callbacks(instance): from .raytune import callbacks as tune_cb from .tensorboard import callbacks as tb_cb from .wb import callbacks as wb_cb - callbacks_list.extend([clear_cb, comet_cb, dvc_cb, mlflow_cb, neptune_cb, tune_cb, tb_cb, wb_cb]) - # Load export callbacks (patch to avoid CoreML protobuf error) - if 'Exporter' in instance.__class__.__name__: - from .tensorboard import callbacks as tb_cb - callbacks_list.append(tb_cb) + callbacks_list.extend([clear_cb, comet_cb, dvc_cb, mlflow_cb, neptune_cb, tune_cb, tb_cb, wb_cb]) # Add the callbacks to the callbacks dictionary for callbacks in callbacks_list: diff --git a/ultralytics/utils/callbacks/clearml.py b/ultralytics/utils/callbacks/clearml.py index dfb2203..a030fc5 100644 --- a/ultralytics/utils/callbacks/clearml.py +++ b/ultralytics/utils/callbacks/clearml.py @@ -4,19 +4,19 @@ from ultralytics.utils import LOGGER, SETTINGS, TESTS_RUNNING try: assert not TESTS_RUNNING # do not log pytest - assert SETTINGS['clearml'] is True # verify integration is enabled + assert SETTINGS["clearml"] is True # verify integration is enabled import clearml from clearml import Task from clearml.binding.frameworks.pytorch_bind import PatchPyTorchModelIO from clearml.binding.matplotlib_bind import PatchedMatplotlib - assert hasattr(clearml, '__version__') # verify package is not directory + assert hasattr(clearml, "__version__") # verify package is not directory except (ImportError, AssertionError): clearml = None -def _log_debug_samples(files, title='Debug Samples') -> None: +def _log_debug_samples(files, title="Debug Samples") -> None: """ Log files (images) as debug samples in the ClearML task. @@ -29,12 +29,11 @@ def _log_debug_samples(files, title='Debug Samples') -> None: if task := Task.current_task(): for f in files: if f.exists(): - it = re.search(r'_batch(\d+)', f.name) + it = re.search(r"_batch(\d+)", f.name) iteration = int(it.groups()[0]) if it else 0 - task.get_logger().report_image(title=title, - series=f.name.replace(it.group(), ''), - local_path=str(f), - iteration=iteration) + task.get_logger().report_image( + title=title, series=f.name.replace(it.group(), ""), local_path=str(f), iteration=iteration + ) def _log_plot(title, plot_path) -> None: @@ -50,13 +49,12 @@ def _log_plot(title, plot_path) -> None: img = mpimg.imread(plot_path) fig = plt.figure() - ax = fig.add_axes([0, 0, 1, 1], frameon=False, aspect='auto', xticks=[], yticks=[]) # no ticks + ax = fig.add_axes([0, 0, 1, 1], frameon=False, aspect="auto", xticks=[], yticks=[]) # no ticks ax.imshow(img) - Task.current_task().get_logger().report_matplotlib_figure(title=title, - series='', - figure=fig, - report_interactive=False) + Task.current_task().get_logger().report_matplotlib_figure( + title=title, series="", figure=fig, report_interactive=False + ) def on_pretrain_routine_start(trainer): @@ -68,19 +66,21 @@ def on_pretrain_routine_start(trainer): PatchPyTorchModelIO.update_current_task(None) PatchedMatplotlib.update_current_task(None) else: - task = Task.init(project_name=trainer.args.project or 'YOLOv8', - task_name=trainer.args.name, - tags=['YOLOv8'], - output_uri=True, - reuse_last_task_id=False, - auto_connect_frameworks={ - 'pytorch': False, - 'matplotlib': False}) - LOGGER.warning('ClearML Initialized a new task. If you want to run remotely, ' - 'please add clearml-init and connect your arguments before initializing YOLO.') - task.connect(vars(trainer.args), name='General') + task = Task.init( + project_name=trainer.args.project or "YOLOv8", + task_name=trainer.args.name, + tags=["YOLOv8"], + output_uri=True, + reuse_last_task_id=False, + auto_connect_frameworks={"pytorch": False, "matplotlib": False}, + ) + LOGGER.warning( + "ClearML Initialized a new task. If you want to run remotely, " + "please add clearml-init and connect your arguments before initializing YOLO." + ) + task.connect(vars(trainer.args), name="General") except Exception as e: - LOGGER.warning(f'WARNING ⚠️ ClearML installed but not initialized correctly, not logging this run. {e}') + LOGGER.warning(f"WARNING ⚠️ ClearML installed but not initialized correctly, not logging this run. {e}") def on_train_epoch_end(trainer): @@ -88,22 +88,26 @@ def on_train_epoch_end(trainer): if task := Task.current_task(): # Log debug samples if trainer.epoch == 1: - _log_debug_samples(sorted(trainer.save_dir.glob('train_batch*.jpg')), 'Mosaic') + _log_debug_samples(sorted(trainer.save_dir.glob("train_batch*.jpg")), "Mosaic") # Report the current training progress - for k, v in trainer.validator.metrics.results_dict.items(): - task.get_logger().report_scalar('train', k, v, iteration=trainer.epoch) + for k, v in trainer.label_loss_items(trainer.tloss, prefix="train").items(): + task.get_logger().report_scalar("train", k, v, iteration=trainer.epoch) + for k, v in trainer.lr.items(): + task.get_logger().report_scalar("lr", k, v, iteration=trainer.epoch) def on_fit_epoch_end(trainer): """Reports model information to logger at the end of an epoch.""" if task := Task.current_task(): # You should have access to the validation bboxes under jdict - task.get_logger().report_scalar(title='Epoch Time', - series='Epoch Time', - value=trainer.epoch_time, - iteration=trainer.epoch) + task.get_logger().report_scalar( + title="Epoch Time", series="Epoch Time", value=trainer.epoch_time, iteration=trainer.epoch + ) + for k, v in trainer.metrics.items(): + task.get_logger().report_scalar("val", k, v, iteration=trainer.epoch) if trainer.epoch == 0: from ultralytics.utils.torch_utils import model_info_for_loggers + for k, v in model_info_for_loggers(trainer).items(): task.get_logger().report_single_value(k, v) @@ -112,7 +116,7 @@ def on_val_end(validator): """Logs validation results including labels and predictions.""" if Task.current_task(): # Log val_labels and val_pred - _log_debug_samples(sorted(validator.save_dir.glob('val*.jpg')), 'Validation') + _log_debug_samples(sorted(validator.save_dir.glob("val*.jpg")), "Validation") def on_train_end(trainer): @@ -120,8 +124,11 @@ def on_train_end(trainer): if task := Task.current_task(): # Log final results, CM matrix + PR plots files = [ - 'results.png', 'confusion_matrix.png', 'confusion_matrix_normalized.png', - *(f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R'))] + "results.png", + "confusion_matrix.png", + "confusion_matrix_normalized.png", + *(f"{x}_curve.png" for x in ("F1", "PR", "P", "R")), + ] files = [(trainer.save_dir / f) for f in files if (trainer.save_dir / f).exists()] # filter for f in files: _log_plot(title=f.stem, plot_path=f) @@ -132,9 +139,14 @@ def on_train_end(trainer): task.update_output_model(model_path=str(trainer.best), model_name=trainer.args.name, auto_delete_file=False) -callbacks = { - 'on_pretrain_routine_start': on_pretrain_routine_start, - 'on_train_epoch_end': on_train_epoch_end, - 'on_fit_epoch_end': on_fit_epoch_end, - 'on_val_end': on_val_end, - 'on_train_end': on_train_end} if clearml else {} +callbacks = ( + { + "on_pretrain_routine_start": on_pretrain_routine_start, + "on_train_epoch_end": on_train_epoch_end, + "on_fit_epoch_end": on_fit_epoch_end, + "on_val_end": on_val_end, + "on_train_end": on_train_end, + } + if clearml + else {} +) diff --git a/ultralytics/utils/callbacks/comet.py b/ultralytics/utils/callbacks/comet.py index 2da71a9..1c5f585 100644 --- a/ultralytics/utils/callbacks/comet.py +++ b/ultralytics/utils/callbacks/comet.py @@ -4,20 +4,20 @@ from ultralytics.utils import LOGGER, RANK, SETTINGS, TESTS_RUNNING, ops try: assert not TESTS_RUNNING # do not log pytest - assert SETTINGS['comet'] is True # verify integration is enabled + assert SETTINGS["comet"] is True # verify integration is enabled import comet_ml - assert hasattr(comet_ml, '__version__') # verify package is not directory + assert hasattr(comet_ml, "__version__") # verify package is not directory import os from pathlib import Path # Ensures certain logging functions only run for supported tasks - COMET_SUPPORTED_TASKS = ['detect'] + COMET_SUPPORTED_TASKS = ["detect"] # Names of plots created by YOLOv8 that are logged to Comet - EVALUATION_PLOT_NAMES = 'F1_curve', 'P_curve', 'R_curve', 'PR_curve', 'confusion_matrix' - LABEL_PLOT_NAMES = 'labels', 'labels_correlogram' + EVALUATION_PLOT_NAMES = "F1_curve", "P_curve", "R_curve", "PR_curve", "confusion_matrix" + LABEL_PLOT_NAMES = "labels", "labels_correlogram" _comet_image_prediction_count = 0 @@ -26,37 +26,44 @@ except (ImportError, AssertionError): def _get_comet_mode(): - return os.getenv('COMET_MODE', 'online') + """Returns the mode of comet set in the environment variables, defaults to 'online' if not set.""" + return os.getenv("COMET_MODE", "online") def _get_comet_model_name(): - return os.getenv('COMET_MODEL_NAME', 'YOLOv8') + """Returns the model name for Comet from the environment variable 'COMET_MODEL_NAME' or defaults to 'YOLOv8'.""" + return os.getenv("COMET_MODEL_NAME", "YOLOv8") def _get_eval_batch_logging_interval(): - return int(os.getenv('COMET_EVAL_BATCH_LOGGING_INTERVAL', 1)) + """Get the evaluation batch logging interval from environment variable or use default value 1.""" + return int(os.getenv("COMET_EVAL_BATCH_LOGGING_INTERVAL", 1)) def _get_max_image_predictions_to_log(): - return int(os.getenv('COMET_MAX_IMAGE_PREDICTIONS', 100)) + """Get the maximum number of image predictions to log from the environment variables.""" + return int(os.getenv("COMET_MAX_IMAGE_PREDICTIONS", 100)) def _scale_confidence_score(score): - scale = float(os.getenv('COMET_MAX_CONFIDENCE_SCORE', 100.0)) + """Scales the given confidence score by a factor specified in an environment variable.""" + scale = float(os.getenv("COMET_MAX_CONFIDENCE_SCORE", 100.0)) return score * scale def _should_log_confusion_matrix(): - return os.getenv('COMET_EVAL_LOG_CONFUSION_MATRIX', 'false').lower() == 'true' + """Determines if the confusion matrix should be logged based on the environment variable settings.""" + return os.getenv("COMET_EVAL_LOG_CONFUSION_MATRIX", "false").lower() == "true" def _should_log_image_predictions(): - return os.getenv('COMET_EVAL_LOG_IMAGE_PREDICTIONS', 'true').lower() == 'true' + """Determines whether to log image predictions based on a specified environment variable.""" + return os.getenv("COMET_EVAL_LOG_IMAGE_PREDICTIONS", "true").lower() == "true" def _get_experiment_type(mode, project_name): """Return an experiment based on mode and project name.""" - if mode == 'offline': + if mode == "offline": return comet_ml.OfflineExperiment(project_name=project_name) return comet_ml.Experiment(project_name=project_name) @@ -68,18 +75,21 @@ def _create_experiment(args): return try: comet_mode = _get_comet_mode() - _project_name = os.getenv('COMET_PROJECT_NAME', args.project) + _project_name = os.getenv("COMET_PROJECT_NAME", args.project) experiment = _get_experiment_type(comet_mode, _project_name) experiment.log_parameters(vars(args)) - experiment.log_others({ - 'eval_batch_logging_interval': _get_eval_batch_logging_interval(), - 'log_confusion_matrix_on_eval': _should_log_confusion_matrix(), - 'log_image_predictions': _should_log_image_predictions(), - 'max_image_predictions': _get_max_image_predictions_to_log(), }) - experiment.log_other('Created from', 'yolov8') + experiment.log_others( + { + "eval_batch_logging_interval": _get_eval_batch_logging_interval(), + "log_confusion_matrix_on_eval": _should_log_confusion_matrix(), + "log_image_predictions": _should_log_image_predictions(), + "max_image_predictions": _get_max_image_predictions_to_log(), + } + ) + experiment.log_other("Created from", "yolov8") except Exception as e: - LOGGER.warning(f'WARNING ⚠️ Comet installed but not initialized correctly, not logging this run. {e}') + LOGGER.warning(f"WARNING ⚠️ Comet installed but not initialized correctly, not logging this run. {e}") def _fetch_trainer_metadata(trainer): @@ -95,18 +105,14 @@ def _fetch_trainer_metadata(trainer): save_interval = curr_epoch % save_period == 0 save_assets = save and save_period > 0 and save_interval and not final_epoch - return dict( - curr_epoch=curr_epoch, - curr_step=curr_step, - save_assets=save_assets, - final_epoch=final_epoch, - ) + return dict(curr_epoch=curr_epoch, curr_step=curr_step, save_assets=save_assets, final_epoch=final_epoch) def _scale_bounding_box_to_original_image_shape(box, resized_image_shape, original_image_shape, ratio_pad): - """YOLOv8 resizes images during training and the label values - are normalized based on this resized shape. This function rescales the - bounding box labels to the original image shape. + """ + YOLOv8 resizes images during training and the label values are normalized based on this resized shape. + + This function rescales the bounding box labels to the original image shape. """ resized_image_height, resized_image_width = resized_image_shape @@ -126,29 +132,32 @@ def _scale_bounding_box_to_original_image_shape(box, resized_image_shape, origin def _format_ground_truth_annotations_for_detection(img_idx, image_path, batch, class_name_map=None): """Format ground truth annotations for detection.""" - indices = batch['batch_idx'] == img_idx - bboxes = batch['bboxes'][indices] + indices = batch["batch_idx"] == img_idx + bboxes = batch["bboxes"][indices] if len(bboxes) == 0: - LOGGER.debug(f'COMET WARNING: Image: {image_path} has no bounding boxes labels') + LOGGER.debug(f"COMET WARNING: Image: {image_path} has no bounding boxes labels") return None - cls_labels = batch['cls'][indices].squeeze(1).tolist() + cls_labels = batch["cls"][indices].squeeze(1).tolist() if class_name_map: cls_labels = [str(class_name_map[label]) for label in cls_labels] - original_image_shape = batch['ori_shape'][img_idx] - resized_image_shape = batch['resized_shape'][img_idx] - ratio_pad = batch['ratio_pad'][img_idx] + original_image_shape = batch["ori_shape"][img_idx] + resized_image_shape = batch["resized_shape"][img_idx] + ratio_pad = batch["ratio_pad"][img_idx] data = [] for box, label in zip(bboxes, cls_labels): box = _scale_bounding_box_to_original_image_shape(box, resized_image_shape, original_image_shape, ratio_pad) - data.append({ - 'boxes': [box], - 'label': f'gt_{label}', - 'score': _scale_confidence_score(1.0), }) + data.append( + { + "boxes": [box], + "label": f"gt_{label}", + "score": _scale_confidence_score(1.0), + } + ) - return {'name': 'ground_truth', 'data': data} + return {"name": "ground_truth", "data": data} def _format_prediction_annotations_for_detection(image_path, metadata, class_label_map=None): @@ -158,31 +167,34 @@ def _format_prediction_annotations_for_detection(image_path, metadata, class_lab predictions = metadata.get(image_id) if not predictions: - LOGGER.debug(f'COMET WARNING: Image: {image_path} has no bounding boxes predictions') + LOGGER.debug(f"COMET WARNING: Image: {image_path} has no bounding boxes predictions") return None data = [] for prediction in predictions: - boxes = prediction['bbox'] - score = _scale_confidence_score(prediction['score']) - cls_label = prediction['category_id'] + boxes = prediction["bbox"] + score = _scale_confidence_score(prediction["score"]) + cls_label = prediction["category_id"] if class_label_map: cls_label = str(class_label_map[cls_label]) - data.append({'boxes': [boxes], 'label': cls_label, 'score': score}) + data.append({"boxes": [boxes], "label": cls_label, "score": score}) - return {'name': 'prediction', 'data': data} + return {"name": "prediction", "data": data} def _fetch_annotations(img_idx, image_path, batch, prediction_metadata_map, class_label_map): """Join the ground truth and prediction annotations if they exist.""" - ground_truth_annotations = _format_ground_truth_annotations_for_detection(img_idx, image_path, batch, - class_label_map) - prediction_annotations = _format_prediction_annotations_for_detection(image_path, prediction_metadata_map, - class_label_map) + ground_truth_annotations = _format_ground_truth_annotations_for_detection( + img_idx, image_path, batch, class_label_map + ) + prediction_annotations = _format_prediction_annotations_for_detection( + image_path, prediction_metadata_map, class_label_map + ) annotations = [ - annotation for annotation in [ground_truth_annotations, prediction_annotations] if annotation is not None] + annotation for annotation in [ground_truth_annotations, prediction_annotations] if annotation is not None + ] return [annotations] if annotations else None @@ -190,8 +202,8 @@ def _create_prediction_metadata_map(model_predictions): """Create metadata map for model predictions by groupings them based on image ID.""" pred_metadata_map = {} for prediction in model_predictions: - pred_metadata_map.setdefault(prediction['image_id'], []) - pred_metadata_map[prediction['image_id']].append(prediction) + pred_metadata_map.setdefault(prediction["image_id"], []) + pred_metadata_map[prediction["image_id"]].append(prediction) return pred_metadata_map @@ -199,13 +211,9 @@ def _create_prediction_metadata_map(model_predictions): def _log_confusion_matrix(experiment, trainer, curr_step, curr_epoch): """Log the confusion matrix to Comet experiment.""" conf_mat = trainer.validator.confusion_matrix.matrix - names = list(trainer.data['names'].values()) + ['background'] + names = list(trainer.data["names"].values()) + ["background"] experiment.log_confusion_matrix( - matrix=conf_mat, - labels=names, - max_categories=len(names), - epoch=curr_epoch, - step=curr_step, + matrix=conf_mat, labels=names, max_categories=len(names), epoch=curr_epoch, step=curr_step ) @@ -243,7 +251,7 @@ def _log_image_predictions(experiment, validator, curr_step): if (batch_idx + 1) % batch_logging_interval != 0: continue - image_paths = batch['im_file'] + image_paths = batch["im_file"] for img_idx, image_path in enumerate(image_paths): if _comet_image_prediction_count >= max_image_predictions: return @@ -267,28 +275,23 @@ def _log_image_predictions(experiment, validator, curr_step): def _log_plots(experiment, trainer): """Logs evaluation plots and label plots for the experiment.""" - plot_filenames = [trainer.save_dir / f'{plots}.png' for plots in EVALUATION_PLOT_NAMES] + plot_filenames = [trainer.save_dir / f"{plots}.png" for plots in EVALUATION_PLOT_NAMES] _log_images(experiment, plot_filenames, None) - label_plot_filenames = [trainer.save_dir / f'{labels}.jpg' for labels in LABEL_PLOT_NAMES] + label_plot_filenames = [trainer.save_dir / f"{labels}.jpg" for labels in LABEL_PLOT_NAMES] _log_images(experiment, label_plot_filenames, None) def _log_model(experiment, trainer): """Log the best-trained model to Comet.ml.""" model_name = _get_comet_model_name() - experiment.log_model( - model_name, - file_or_folder=str(trainer.best), - file_name='best.pt', - overwrite=True, - ) + experiment.log_model(model_name, file_or_folder=str(trainer.best), file_name="best.pt", overwrite=True) def on_pretrain_routine_start(trainer): """Creates or resumes a CometML experiment at the start of a YOLO pre-training routine.""" experiment = comet_ml.get_global_experiment() - is_alive = getattr(experiment, 'alive', False) + is_alive = getattr(experiment, "alive", False) if not experiment or not is_alive: _create_experiment(trainer.args) @@ -300,17 +303,13 @@ def on_train_epoch_end(trainer): return metadata = _fetch_trainer_metadata(trainer) - curr_epoch = metadata['curr_epoch'] - curr_step = metadata['curr_step'] + curr_epoch = metadata["curr_epoch"] + curr_step = metadata["curr_step"] - experiment.log_metrics( - trainer.label_loss_items(trainer.tloss, prefix='train'), - step=curr_step, - epoch=curr_epoch, - ) + experiment.log_metrics(trainer.label_loss_items(trainer.tloss, prefix="train"), step=curr_step, epoch=curr_epoch) if curr_epoch == 1: - _log_images(experiment, trainer.save_dir.glob('train_batch*.jpg'), curr_step) + _log_images(experiment, trainer.save_dir.glob("train_batch*.jpg"), curr_step) def on_fit_epoch_end(trainer): @@ -320,14 +319,15 @@ def on_fit_epoch_end(trainer): return metadata = _fetch_trainer_metadata(trainer) - curr_epoch = metadata['curr_epoch'] - curr_step = metadata['curr_step'] - save_assets = metadata['save_assets'] + curr_epoch = metadata["curr_epoch"] + curr_step = metadata["curr_step"] + save_assets = metadata["save_assets"] experiment.log_metrics(trainer.metrics, step=curr_step, epoch=curr_epoch) experiment.log_metrics(trainer.lr, step=curr_step, epoch=curr_epoch) if curr_epoch == 1: from ultralytics.utils.torch_utils import model_info_for_loggers + experiment.log_metrics(model_info_for_loggers(trainer), step=curr_step, epoch=curr_epoch) if not save_assets: @@ -347,8 +347,8 @@ def on_train_end(trainer): return metadata = _fetch_trainer_metadata(trainer) - curr_epoch = metadata['curr_epoch'] - curr_step = metadata['curr_step'] + curr_epoch = metadata["curr_epoch"] + curr_step = metadata["curr_step"] plots = trainer.args.plots _log_model(experiment, trainer) @@ -363,8 +363,13 @@ def on_train_end(trainer): _comet_image_prediction_count = 0 -callbacks = { - 'on_pretrain_routine_start': on_pretrain_routine_start, - 'on_train_epoch_end': on_train_epoch_end, - 'on_fit_epoch_end': on_fit_epoch_end, - 'on_train_end': on_train_end} if comet_ml else {} +callbacks = ( + { + "on_pretrain_routine_start": on_pretrain_routine_start, + "on_train_epoch_end": on_train_epoch_end, + "on_fit_epoch_end": on_fit_epoch_end, + "on_train_end": on_train_end, + } + if comet_ml + else {} +) diff --git a/ultralytics/utils/callbacks/dvc.py b/ultralytics/utils/callbacks/dvc.py index b5bfa9d..ab51dc5 100644 --- a/ultralytics/utils/callbacks/dvc.py +++ b/ultralytics/utils/callbacks/dvc.py @@ -1,26 +1,18 @@ # Ultralytics YOLO 🚀, AGPL-3.0 license -from ultralytics.utils import LOGGER, SETTINGS, TESTS_RUNNING +from ultralytics.utils import LOGGER, SETTINGS, TESTS_RUNNING, checks try: assert not TESTS_RUNNING # do not log pytest - assert SETTINGS['dvc'] is True # verify integration is enabled + assert SETTINGS["dvc"] is True # verify integration is enabled import dvclive - assert hasattr(dvclive, '__version__') # verify package is not directory + assert checks.check_version("dvclive", "2.11.0", verbose=True) import os import re - from importlib.metadata import version from pathlib import Path - import pkg_resources as pkg - - ver = version('dvclive') - if pkg.parse_version(ver) < pkg.parse_version('2.11.0'): - LOGGER.debug(f'DVCLive is detected but version {ver} is incompatible (>=2.11 required).') - dvclive = None # noqa: F811 - # DVCLive logger instance live = None _processed_plots = {} @@ -33,108 +25,121 @@ except (ImportError, AssertionError, TypeError): dvclive = None -def _log_images(path, prefix=''): +def _log_images(path, prefix=""): + """Logs images at specified path with an optional prefix using DVCLive.""" if live: name = path.name # Group images by batch to enable sliders in UI - if m := re.search(r'_batch(\d+)', name): + if m := re.search(r"_batch(\d+)", name): ni = m[1] - new_stem = re.sub(r'_batch(\d+)', '_batch', path.stem) + new_stem = re.sub(r"_batch(\d+)", "_batch", path.stem) name = (Path(new_stem) / ni).with_suffix(path.suffix) live.log_image(os.path.join(prefix, name), path) -def _log_plots(plots, prefix=''): +def _log_plots(plots, prefix=""): + """Logs plot images for training progress if they have not been previously processed.""" for name, params in plots.items(): - timestamp = params['timestamp'] + timestamp = params["timestamp"] if _processed_plots.get(name) != timestamp: _log_images(name, prefix) _processed_plots[name] = timestamp def _log_confusion_matrix(validator): + """Logs the confusion matrix for the given validator using DVCLive.""" targets = [] preds = [] matrix = validator.confusion_matrix.matrix names = list(validator.names.values()) - if validator.confusion_matrix.task == 'detect': - names += ['background'] + if validator.confusion_matrix.task == "detect": + names += ["background"] for ti, pred in enumerate(matrix.T.astype(int)): for pi, num in enumerate(pred): targets.extend([names[ti]] * num) preds.extend([names[pi]] * num) - live.log_sklearn_plot('confusion_matrix', targets, preds, name='cf.json', normalized=True) + live.log_sklearn_plot("confusion_matrix", targets, preds, name="cf.json", normalized=True) def on_pretrain_routine_start(trainer): + """Initializes DVCLive logger for training metadata during pre-training routine.""" try: global live live = dvclive.Live(save_dvc_exp=True, cache_images=True) - LOGGER.info( - f'DVCLive is detected and auto logging is enabled (can be disabled in the {SETTINGS.file} with `dvc: false`).' - ) + LOGGER.info("DVCLive is detected and auto logging is enabled (run 'yolo settings dvc=False' to disable).") except Exception as e: - LOGGER.warning(f'WARNING ⚠️ DVCLive installed but not initialized correctly, not logging this run. {e}') + LOGGER.warning(f"WARNING ⚠️ DVCLive installed but not initialized correctly, not logging this run. {e}") def on_pretrain_routine_end(trainer): - _log_plots(trainer.plots, 'train') + """Logs plots related to the training process at the end of the pretraining routine.""" + _log_plots(trainer.plots, "train") def on_train_start(trainer): + """Logs the training parameters if DVCLive logging is active.""" if live: live.log_params(trainer.args) def on_train_epoch_start(trainer): + """Sets the global variable _training_epoch value to True at the start of training each epoch.""" global _training_epoch _training_epoch = True def on_fit_epoch_end(trainer): + """Logs training metrics and model info, and advances to next step on the end of each fit epoch.""" global _training_epoch if live and _training_epoch: - all_metrics = {**trainer.label_loss_items(trainer.tloss, prefix='train'), **trainer.metrics, **trainer.lr} + all_metrics = {**trainer.label_loss_items(trainer.tloss, prefix="train"), **trainer.metrics, **trainer.lr} for metric, value in all_metrics.items(): live.log_metric(metric, value) if trainer.epoch == 0: from ultralytics.utils.torch_utils import model_info_for_loggers + for metric, value in model_info_for_loggers(trainer).items(): live.log_metric(metric, value, plot=False) - _log_plots(trainer.plots, 'train') - _log_plots(trainer.validator.plots, 'val') + _log_plots(trainer.plots, "train") + _log_plots(trainer.validator.plots, "val") live.next_step() _training_epoch = False def on_train_end(trainer): + """Logs the best metrics, plots, and confusion matrix at the end of training if DVCLive is active.""" if live: # At the end log the best metrics. It runs validator on the best model internally. - all_metrics = {**trainer.label_loss_items(trainer.tloss, prefix='train'), **trainer.metrics, **trainer.lr} + all_metrics = {**trainer.label_loss_items(trainer.tloss, prefix="train"), **trainer.metrics, **trainer.lr} for metric, value in all_metrics.items(): live.log_metric(metric, value, plot=False) - _log_plots(trainer.plots, 'val') - _log_plots(trainer.validator.plots, 'val') + _log_plots(trainer.plots, "val") + _log_plots(trainer.validator.plots, "val") _log_confusion_matrix(trainer.validator) if trainer.best.exists(): - live.log_artifact(trainer.best, copy=True, type='model') + live.log_artifact(trainer.best, copy=True, type="model") live.end() -callbacks = { - 'on_pretrain_routine_start': on_pretrain_routine_start, - 'on_pretrain_routine_end': on_pretrain_routine_end, - 'on_train_start': on_train_start, - 'on_train_epoch_start': on_train_epoch_start, - 'on_fit_epoch_end': on_fit_epoch_end, - 'on_train_end': on_train_end} if dvclive else {} +callbacks = ( + { + "on_pretrain_routine_start": on_pretrain_routine_start, + "on_pretrain_routine_end": on_pretrain_routine_end, + "on_train_start": on_train_start, + "on_train_epoch_start": on_train_epoch_start, + "on_fit_epoch_end": on_fit_epoch_end, + "on_train_end": on_train_end, + } + if dvclive + else {} +) diff --git a/ultralytics/utils/callbacks/hub.py b/ultralytics/utils/callbacks/hub.py index 7171fb9..cdb42b9 100644 --- a/ultralytics/utils/callbacks/hub.py +++ b/ultralytics/utils/callbacks/hub.py @@ -9,51 +9,67 @@ from ultralytics.utils import LOGGER, SETTINGS def on_pretrain_routine_end(trainer): """Logs info before starting timer for upload rate limit.""" - session = getattr(trainer, 'hub_session', None) + session = getattr(trainer, "hub_session", None) if session: # Start timer for upload rate limit - LOGGER.info(f'{PREFIX}View model at {HUB_WEB_ROOT}/models/{session.model_id} 🚀') - session.timers = {'metrics': time(), 'ckpt': time()} # start timer on session.rate_limit + session.timers = { + "metrics": time(), + "ckpt": time(), + } # start timer on session.rate_limit def on_fit_epoch_end(trainer): """Uploads training progress metrics at the end of each epoch.""" - session = getattr(trainer, 'hub_session', None) + session = getattr(trainer, "hub_session", None) if session: # Upload metrics after val end - all_plots = {**trainer.label_loss_items(trainer.tloss, prefix='train'), **trainer.metrics} + all_plots = { + **trainer.label_loss_items(trainer.tloss, prefix="train"), + **trainer.metrics, + } if trainer.epoch == 0: from ultralytics.utils.torch_utils import model_info_for_loggers + all_plots = {**all_plots, **model_info_for_loggers(trainer)} + session.metrics_queue[trainer.epoch] = json.dumps(all_plots) - if time() - session.timers['metrics'] > session.rate_limits['metrics']: + + # If any metrics fail to upload, add them to the queue to attempt uploading again. + if session.metrics_upload_failed_queue: + session.metrics_queue.update(session.metrics_upload_failed_queue) + + if time() - session.timers["metrics"] > session.rate_limits["metrics"]: session.upload_metrics() - session.timers['metrics'] = time() # reset timer + session.timers["metrics"] = time() # reset timer session.metrics_queue = {} # reset queue def on_model_save(trainer): """Saves checkpoints to Ultralytics HUB with rate limiting.""" - session = getattr(trainer, 'hub_session', None) + session = getattr(trainer, "hub_session", None) if session: # Upload checkpoints with rate limiting is_best = trainer.best_fitness == trainer.fitness - if time() - session.timers['ckpt'] > session.rate_limits['ckpt']: - LOGGER.info(f'{PREFIX}Uploading checkpoint {HUB_WEB_ROOT}/models/{session.model_id}') + if time() - session.timers["ckpt"] > session.rate_limits["ckpt"]: + LOGGER.info(f"{PREFIX}Uploading checkpoint {HUB_WEB_ROOT}/models/{session.model.id}") session.upload_model(trainer.epoch, trainer.last, is_best) - session.timers['ckpt'] = time() # reset timer + session.timers["ckpt"] = time() # reset timer def on_train_end(trainer): """Upload final model and metrics to Ultralytics HUB at the end of training.""" - session = getattr(trainer, 'hub_session', None) + session = getattr(trainer, "hub_session", None) if session: # Upload final model and metrics with exponential standoff - LOGGER.info(f'{PREFIX}Syncing final model...') - session.upload_model(trainer.epoch, trainer.best, map=trainer.metrics.get('metrics/mAP50-95(B)', 0), final=True) + LOGGER.info(f"{PREFIX}Syncing final model...") + session.upload_model( + trainer.epoch, + trainer.best, + map=trainer.metrics.get("metrics/mAP50-95(B)", 0), + final=True, + ) session.alive = False # stop heartbeats - LOGGER.info(f'{PREFIX}Done ✅\n' - f'{PREFIX}View model at {HUB_WEB_ROOT}/models/{session.model_id} 🚀') + LOGGER.info(f"{PREFIX}Done ✅\n" f"{PREFIX}View model at {session.model_url} 🚀") def on_train_start(trainer): @@ -76,12 +92,17 @@ def on_export_start(exporter): events(exporter.args) -callbacks = { - 'on_pretrain_routine_end': on_pretrain_routine_end, - 'on_fit_epoch_end': on_fit_epoch_end, - 'on_model_save': on_model_save, - 'on_train_end': on_train_end, - 'on_train_start': on_train_start, - 'on_val_start': on_val_start, - 'on_predict_start': on_predict_start, - 'on_export_start': on_export_start} if SETTINGS['hub'] is True else {} # verify enabled +callbacks = ( + { + "on_pretrain_routine_end": on_pretrain_routine_end, + "on_fit_epoch_end": on_fit_epoch_end, + "on_model_save": on_model_save, + "on_train_end": on_train_end, + "on_train_start": on_train_start, + "on_val_start": on_val_start, + "on_predict_start": on_predict_start, + "on_export_start": on_export_start, + } + if SETTINGS["hub"] is True + else {} +) # verify enabled diff --git a/ultralytics/utils/callbacks/mlflow.py b/ultralytics/utils/callbacks/mlflow.py index 8d4501b..e554620 100644 --- a/ultralytics/utils/callbacks/mlflow.py +++ b/ultralytics/utils/callbacks/mlflow.py @@ -1,70 +1,133 @@ # Ultralytics YOLO 🚀, AGPL-3.0 license +""" +MLflow Logging for Ultralytics YOLO. -from ultralytics.utils import LOGGER, ROOT, SETTINGS, TESTS_RUNNING, colorstr +This module enables MLflow logging for Ultralytics YOLO. It logs metrics, parameters, and model artifacts. +For setting up, a tracking URI should be specified. The logging can be customized using environment variables. + +Commands: + 1. To set a project name: + `export MLFLOW_EXPERIMENT_NAME=` or use the project= argument + + 2. To set a run name: + `export MLFLOW_RUN=` or use the name= argument + + 3. To start a local MLflow server: + mlflow server --backend-store-uri runs/mlflow + It will by default start a local server at http://127.0.0.1:5000. + To specify a different URI, set the MLFLOW_TRACKING_URI environment variable. + + 4. To kill all running MLflow server instances: + ps aux | grep 'mlflow' | grep -v 'grep' | awk '{print $2}' | xargs kill -9 +""" + +from ultralytics.utils import LOGGER, RUNS_DIR, SETTINGS, TESTS_RUNNING, colorstr try: - assert not TESTS_RUNNING # do not log pytest - assert SETTINGS['mlflow'] is True # verify integration is enabled + import os + + assert not TESTS_RUNNING or "test_mlflow" in os.environ.get("PYTEST_CURRENT_TEST", "") # do not log pytest + assert SETTINGS["mlflow"] is True # verify integration is enabled import mlflow - assert hasattr(mlflow, '__version__') # verify package is not directory + assert hasattr(mlflow, "__version__") # verify package is not directory + from pathlib import Path - import os - import re + PREFIX = colorstr("MLflow: ") + SANITIZE = lambda x: {k.replace("(", "").replace(")", ""): float(v) for k, v in x.items()} except (ImportError, AssertionError): mlflow = None def on_pretrain_routine_end(trainer): - """Logs training parameters to MLflow.""" - global mlflow, run, experiment_name + """ + Log training parameters to MLflow at the end of the pretraining routine. - if os.environ.get('MLFLOW_TRACKING_URI') is None: - mlflow = None + This function sets up MLflow logging based on environment variables and trainer arguments. It sets the tracking URI, + experiment name, and run name, then starts the MLflow run if not already active. It finally logs the parameters + from the trainer. + Args: + trainer (ultralytics.engine.trainer.BaseTrainer): The training object with arguments and parameters to log. + + Global: + mlflow: The imported mlflow module to use for logging. + + Environment Variables: + MLFLOW_TRACKING_URI: The URI for MLflow tracking. If not set, defaults to 'runs/mlflow'. + MLFLOW_EXPERIMENT_NAME: The name of the MLflow experiment. If not set, defaults to trainer.args.project. + MLFLOW_RUN: The name of the MLflow run. If not set, defaults to trainer.args.name. + MLFLOW_KEEP_RUN_ACTIVE: Boolean indicating whether to keep the MLflow run active after the end of the training phase. + """ + global mlflow + + uri = os.environ.get("MLFLOW_TRACKING_URI") or str(RUNS_DIR / "mlflow") + LOGGER.debug(f"{PREFIX} tracking uri: {uri}") + mlflow.set_tracking_uri(uri) + + # Set experiment and run names + experiment_name = os.environ.get("MLFLOW_EXPERIMENT_NAME") or trainer.args.project or "/Shared/YOLOv8" + run_name = os.environ.get("MLFLOW_RUN") or trainer.args.name + mlflow.set_experiment(experiment_name) + + mlflow.autolog() + try: + active_run = mlflow.active_run() or mlflow.start_run(run_name=run_name) + LOGGER.info(f"{PREFIX}logging run_id({active_run.info.run_id}) to {uri}") + if Path(uri).is_dir(): + LOGGER.info(f"{PREFIX}view at http://127.0.0.1:5000 with 'mlflow server --backend-store-uri {uri}'") + LOGGER.info(f"{PREFIX}disable with 'yolo settings mlflow=False'") + mlflow.log_params(dict(trainer.args)) + except Exception as e: + LOGGER.warning(f"{PREFIX}WARNING ⚠️ Failed to initialize: {e}\n" f"{PREFIX}WARNING ⚠️ Not tracking this run") + + +def on_train_epoch_end(trainer): + """Log training metrics at the end of each train epoch to MLflow.""" if mlflow: - mlflow_location = os.environ['MLFLOW_TRACKING_URI'] # "http://192.168.xxx.xxx:5000" - mlflow.set_tracking_uri(mlflow_location) - - experiment_name = os.environ.get('MLFLOW_EXPERIMENT_NAME') or trainer.args.project or '/Shared/YOLOv8' - run_name = os.environ.get('MLFLOW_RUN') or trainer.args.name - experiment = mlflow.get_experiment_by_name(experiment_name) - if experiment is None: - mlflow.create_experiment(experiment_name) - mlflow.set_experiment(experiment_name) - - prefix = colorstr('MLFlow: ') - try: - run, active_run = mlflow, mlflow.active_run() - if not active_run: - active_run = mlflow.start_run(experiment_id=experiment.experiment_id, run_name=run_name) - LOGGER.info(f'{prefix}Using run_id({active_run.info.run_id}) at {mlflow_location}') - run.log_params(vars(trainer.model.args)) - except Exception as err: - LOGGER.error(f'{prefix}Failing init - {repr(err)}') - LOGGER.warning(f'{prefix}Continuing without Mlflow') + mlflow.log_metrics( + metrics={ + **SANITIZE(trainer.lr), + **SANITIZE(trainer.label_loss_items(trainer.tloss, prefix="train")), + }, + step=trainer.epoch, + ) def on_fit_epoch_end(trainer): - """Logs training metrics to Mlflow.""" + """Log training metrics at the end of each fit epoch to MLflow.""" if mlflow: - metrics_dict = {f"{re.sub('[()]', '', k)}": float(v) for k, v in trainer.metrics.items()} - run.log_metrics(metrics=metrics_dict, step=trainer.epoch) + mlflow.log_metrics(metrics=SANITIZE(trainer.metrics), step=trainer.epoch) def on_train_end(trainer): - """Called at end of train loop to log model artifact info.""" + """Log model artifacts at the end of the training.""" if mlflow: - run.log_artifact(trainer.last) - run.log_artifact(trainer.best) - run.pyfunc.log_model(artifact_path=experiment_name, - code_path=[str(ROOT.parent)], - artifacts={'model_path': str(trainer.save_dir)}, - python_model=run.pyfunc.PythonModel()) + mlflow.log_artifact(str(trainer.best.parent)) # log save_dir/weights directory with best.pt and last.pt + for f in trainer.save_dir.glob("*"): # log all other files in save_dir + if f.suffix in {".png", ".jpg", ".csv", ".pt", ".yaml"}: + mlflow.log_artifact(str(f)) + keep_run_active = os.environ.get("MLFLOW_KEEP_RUN_ACTIVE", "False").lower() in ("true") + if keep_run_active: + LOGGER.info(f"{PREFIX}mlflow run still alive, remember to close it using mlflow.end_run()") + else: + mlflow.end_run() + LOGGER.debug(f"{PREFIX}mlflow run ended") + + LOGGER.info( + f"{PREFIX}results logged to {mlflow.get_tracking_uri()}\n" + f"{PREFIX}disable with 'yolo settings mlflow=False'" + ) -callbacks = { - 'on_pretrain_routine_end': on_pretrain_routine_end, - 'on_fit_epoch_end': on_fit_epoch_end, - 'on_train_end': on_train_end} if mlflow else {} +callbacks = ( + { + "on_pretrain_routine_end": on_pretrain_routine_end, + "on_train_epoch_end": on_train_epoch_end, + "on_fit_epoch_end": on_fit_epoch_end, + "on_train_end": on_train_end, + } + if mlflow + else {} +) diff --git a/ultralytics/utils/callbacks/neptune.py b/ultralytics/utils/callbacks/neptune.py index 40916a3..6be8a82 100644 --- a/ultralytics/utils/callbacks/neptune.py +++ b/ultralytics/utils/callbacks/neptune.py @@ -4,11 +4,11 @@ from ultralytics.utils import LOGGER, SETTINGS, TESTS_RUNNING try: assert not TESTS_RUNNING # do not log pytest - assert SETTINGS['neptune'] is True # verify integration is enabled + assert SETTINGS["neptune"] is True # verify integration is enabled import neptune from neptune.types import File - assert hasattr(neptune, '__version__') + assert hasattr(neptune, "__version__") run = None # NeptuneAI experiment logger instance @@ -23,55 +23,55 @@ def _log_scalars(scalars, step=0): run[k].append(value=v, step=step) -def _log_images(imgs_dict, group=''): +def _log_images(imgs_dict, group=""): """Log scalars to the NeptuneAI experiment logger.""" if run: for k, v in imgs_dict.items(): - run[f'{group}/{k}'].upload(File(v)) + run[f"{group}/{k}"].upload(File(v)) def _log_plot(title, plot_path): - """Log plots to the NeptuneAI experiment logger.""" """ - Log image as plot in the plot section of NeptuneAI + Log plots to the NeptuneAI experiment logger. - arguments: - title (str) Title of the plot - plot_path (PosixPath or str) Path to the saved image file - """ + Args: + title (str): Title of the plot. + plot_path (PosixPath | str): Path to the saved image file. + """ import matplotlib.image as mpimg import matplotlib.pyplot as plt img = mpimg.imread(plot_path) fig = plt.figure() - ax = fig.add_axes([0, 0, 1, 1], frameon=False, aspect='auto', xticks=[], yticks=[]) # no ticks + ax = fig.add_axes([0, 0, 1, 1], frameon=False, aspect="auto", xticks=[], yticks=[]) # no ticks ax.imshow(img) - run[f'Plots/{title}'].upload(fig) + run[f"Plots/{title}"].upload(fig) def on_pretrain_routine_start(trainer): """Callback function called before the training routine starts.""" try: global run - run = neptune.init_run(project=trainer.args.project or 'YOLOv8', name=trainer.args.name, tags=['YOLOv8']) - run['Configuration/Hyperparameters'] = {k: '' if v is None else v for k, v in vars(trainer.args).items()} + run = neptune.init_run(project=trainer.args.project or "YOLOv8", name=trainer.args.name, tags=["YOLOv8"]) + run["Configuration/Hyperparameters"] = {k: "" if v is None else v for k, v in vars(trainer.args).items()} except Exception as e: - LOGGER.warning(f'WARNING ⚠️ NeptuneAI installed but not initialized correctly, not logging this run. {e}') + LOGGER.warning(f"WARNING ⚠️ NeptuneAI installed but not initialized correctly, not logging this run. {e}") def on_train_epoch_end(trainer): """Callback function called at end of each training epoch.""" - _log_scalars(trainer.label_loss_items(trainer.tloss, prefix='train'), trainer.epoch + 1) + _log_scalars(trainer.label_loss_items(trainer.tloss, prefix="train"), trainer.epoch + 1) _log_scalars(trainer.lr, trainer.epoch + 1) if trainer.epoch == 1: - _log_images({f.stem: str(f) for f in trainer.save_dir.glob('train_batch*.jpg')}, 'Mosaic') + _log_images({f.stem: str(f) for f in trainer.save_dir.glob("train_batch*.jpg")}, "Mosaic") def on_fit_epoch_end(trainer): """Callback function called at end of each fit (train+val) epoch.""" if run and trainer.epoch == 0: from ultralytics.utils.torch_utils import model_info_for_loggers - run['Configuration/Model'] = model_info_for_loggers(trainer) + + run["Configuration/Model"] = model_info_for_loggers(trainer) _log_scalars(trainer.metrics, trainer.epoch + 1) @@ -79,7 +79,7 @@ def on_val_end(validator): """Callback function called at end of each validation.""" if run: # Log val_labels and val_pred - _log_images({f.stem: str(f) for f in validator.save_dir.glob('val*.jpg')}, 'Validation') + _log_images({f.stem: str(f) for f in validator.save_dir.glob("val*.jpg")}, "Validation") def on_train_end(trainer): @@ -87,19 +87,26 @@ def on_train_end(trainer): if run: # Log final results, CM matrix + PR plots files = [ - 'results.png', 'confusion_matrix.png', 'confusion_matrix_normalized.png', - *(f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R'))] + "results.png", + "confusion_matrix.png", + "confusion_matrix_normalized.png", + *(f"{x}_curve.png" for x in ("F1", "PR", "P", "R")), + ] files = [(trainer.save_dir / f) for f in files if (trainer.save_dir / f).exists()] # filter for f in files: _log_plot(title=f.stem, plot_path=f) # Log the final model - run[f'weights/{trainer.args.name or trainer.args.task}/{str(trainer.best.name)}'].upload(File(str( - trainer.best))) + run[f"weights/{trainer.args.name or trainer.args.task}/{trainer.best.name}"].upload(File(str(trainer.best))) -callbacks = { - 'on_pretrain_routine_start': on_pretrain_routine_start, - 'on_train_epoch_end': on_train_epoch_end, - 'on_fit_epoch_end': on_fit_epoch_end, - 'on_val_end': on_val_end, - 'on_train_end': on_train_end} if neptune else {} +callbacks = ( + { + "on_pretrain_routine_start": on_pretrain_routine_start, + "on_train_epoch_end": on_train_epoch_end, + "on_fit_epoch_end": on_fit_epoch_end, + "on_val_end": on_val_end, + "on_train_end": on_train_end, + } + if neptune + else {} +) diff --git a/ultralytics/utils/callbacks/raytune.py b/ultralytics/utils/callbacks/raytune.py index 417b331..f269455 100644 --- a/ultralytics/utils/callbacks/raytune.py +++ b/ultralytics/utils/callbacks/raytune.py @@ -3,7 +3,7 @@ from ultralytics.utils import SETTINGS try: - assert SETTINGS['raytune'] is True # verify integration is enabled + assert SETTINGS["raytune"] is True # verify integration is enabled import ray from ray import tune from ray.air import session @@ -16,9 +16,14 @@ def on_fit_epoch_end(trainer): """Sends training metrics to Ray Tune at end of each epoch.""" if ray.tune.is_session_enabled(): metrics = trainer.metrics - metrics['epoch'] = trainer.epoch + metrics["epoch"] = trainer.epoch session.report(metrics) -callbacks = { - 'on_fit_epoch_end': on_fit_epoch_end, } if tune else {} +callbacks = ( + { + "on_fit_epoch_end": on_fit_epoch_end, + } + if tune + else {} +) diff --git a/ultralytics/utils/callbacks/tensorboard.py b/ultralytics/utils/callbacks/tensorboard.py index c1fce53..59024ee 100644 --- a/ultralytics/utils/callbacks/tensorboard.py +++ b/ultralytics/utils/callbacks/tensorboard.py @@ -1,17 +1,25 @@ # Ultralytics YOLO 🚀, AGPL-3.0 license +import contextlib from ultralytics.utils import LOGGER, SETTINGS, TESTS_RUNNING, colorstr try: - # WARNING: do not move import due to protobuf issue in https://github.com/ultralytics/ultralytics/pull/4674 + # WARNING: do not move SummaryWriter import due to protobuf bug https://github.com/ultralytics/ultralytics/pull/4674 from torch.utils.tensorboard import SummaryWriter assert not TESTS_RUNNING # do not log pytest - assert SETTINGS['tensorboard'] is True # verify integration is enabled + assert SETTINGS["tensorboard"] is True # verify integration is enabled WRITER = None # TensorBoard SummaryWriter instance + PREFIX = colorstr("TensorBoard: ") -except (ImportError, AssertionError, TypeError): + # Imports below only required if TensorBoard enabled + import warnings + from copy import deepcopy + from ultralytics.utils.torch_utils import de_parallel, torch + +except (ImportError, AssertionError, TypeError, AttributeError): # TypeError for handling 'Descriptors cannot not be created directly.' protobuf errors in Windows + # AttributeError: module 'tensorflow' has no attribute 'io' if 'tensorflow' not installed SummaryWriter = None @@ -24,20 +32,38 @@ def _log_scalars(scalars, step=0): def _log_tensorboard_graph(trainer): """Log model graph to TensorBoard.""" - try: - import warnings - from ultralytics.utils.torch_utils import de_parallel, torch + # Input image + imgsz = trainer.args.imgsz + imgsz = (imgsz, imgsz) if isinstance(imgsz, int) else imgsz + p = next(trainer.model.parameters()) # for device, type + im = torch.zeros((1, 3, *imgsz), device=p.device, dtype=p.dtype) # input image (must be zeros, not empty) - imgsz = trainer.args.imgsz - imgsz = (imgsz, imgsz) if isinstance(imgsz, int) else imgsz - p = next(trainer.model.parameters()) # for device, type - im = torch.zeros((1, 3, *imgsz), device=p.device, dtype=p.dtype) # input image (must be zeros, not empty) - with warnings.catch_warnings(): - warnings.simplefilter('ignore', category=UserWarning) # suppress jit trace warning + with warnings.catch_warnings(): + warnings.simplefilter("ignore", category=UserWarning) # suppress jit trace warning + warnings.simplefilter("ignore", category=torch.jit.TracerWarning) # suppress jit trace warning + + # Try simple method first (YOLO) + with contextlib.suppress(Exception): + trainer.model.eval() # place in .eval() mode to avoid BatchNorm statistics changes WRITER.add_graph(torch.jit.trace(de_parallel(trainer.model), im, strict=False), []) - except Exception as e: - LOGGER.warning(f'WARNING ⚠️ TensorBoard graph visualization failure {e}') + LOGGER.info(f"{PREFIX}model graph visualization added ✅") + return + + # Fallback to TorchScript export steps (RTDETR) + try: + model = deepcopy(de_parallel(trainer.model)) + model.eval() + model = model.fuse(verbose=False) + for m in model.modules(): + if hasattr(m, "export"): # Detect, RTDETRDecoder (Segment and Pose use Detect base class) + m.export = True + m.format = "torchscript" + model(im) # dry run + WRITER.add_graph(torch.jit.trace(model, im, strict=False), []) + LOGGER.info(f"{PREFIX}model graph visualization added ✅") + except Exception as e: + LOGGER.warning(f"{PREFIX}WARNING ⚠️ TensorBoard graph visualization failure {e}") def on_pretrain_routine_start(trainer): @@ -46,10 +72,9 @@ def on_pretrain_routine_start(trainer): try: global WRITER WRITER = SummaryWriter(str(trainer.save_dir)) - prefix = colorstr('TensorBoard: ') - LOGGER.info(f"{prefix}Start with 'tensorboard --logdir {trainer.save_dir}', view at http://localhost:6006/") + LOGGER.info(f"{PREFIX}Start with 'tensorboard --logdir {trainer.save_dir}', view at http://localhost:6006/") except Exception as e: - LOGGER.warning(f'WARNING ⚠️ TensorBoard not initialized correctly, not logging this run. {e}') + LOGGER.warning(f"{PREFIX}WARNING ⚠️ TensorBoard not initialized correctly, not logging this run. {e}") def on_train_start(trainer): @@ -58,9 +83,10 @@ def on_train_start(trainer): _log_tensorboard_graph(trainer) -def on_batch_end(trainer): - """Logs scalar statistics at the end of a training batch.""" - _log_scalars(trainer.label_loss_items(trainer.tloss, prefix='train'), trainer.epoch + 1) +def on_train_epoch_end(trainer): + """Logs scalar statistics at the end of a training epoch.""" + _log_scalars(trainer.label_loss_items(trainer.tloss, prefix="train"), trainer.epoch + 1) + _log_scalars(trainer.lr, trainer.epoch + 1) def on_fit_epoch_end(trainer): @@ -68,8 +94,13 @@ def on_fit_epoch_end(trainer): _log_scalars(trainer.metrics, trainer.epoch + 1) -callbacks = { - 'on_pretrain_routine_start': on_pretrain_routine_start, - 'on_train_start': on_train_start, - 'on_fit_epoch_end': on_fit_epoch_end, - 'on_batch_end': on_batch_end} if SummaryWriter else {} +callbacks = ( + { + "on_pretrain_routine_start": on_pretrain_routine_start, + "on_train_start": on_train_start, + "on_fit_epoch_end": on_fit_epoch_end, + "on_train_epoch_end": on_train_epoch_end, + } + if SummaryWriter + else {} +) diff --git a/ultralytics/utils/callbacks/wb.py b/ultralytics/utils/callbacks/wb.py index 27b3874..25a1b64 100644 --- a/ultralytics/utils/callbacks/wb.py +++ b/ultralytics/utils/callbacks/wb.py @@ -5,10 +5,13 @@ from ultralytics.utils.torch_utils import model_info_for_loggers try: assert not TESTS_RUNNING # do not log pytest - assert SETTINGS['wandb'] is True # verify integration is enabled + assert SETTINGS["wandb"] is True # verify integration is enabled import wandb as wb - assert hasattr(wb, '__version__') + assert hasattr(wb, "__version__") # verify package is not directory + + import numpy as np + import pandas as pd _processed_plots = {} @@ -16,9 +19,89 @@ except (ImportError, AssertionError): wb = None +def _custom_table(x, y, classes, title="Precision Recall Curve", x_title="Recall", y_title="Precision"): + """ + Create and log a custom metric visualization to wandb.plot.pr_curve. + + This function crafts a custom metric visualization that mimics the behavior of wandb's default precision-recall + curve while allowing for enhanced customization. The visual metric is useful for monitoring model performance across + different classes. + + Args: + x (List): Values for the x-axis; expected to have length N. + y (List): Corresponding values for the y-axis; also expected to have length N. + classes (List): Labels identifying the class of each point; length N. + title (str, optional): Title for the plot; defaults to 'Precision Recall Curve'. + x_title (str, optional): Label for the x-axis; defaults to 'Recall'. + y_title (str, optional): Label for the y-axis; defaults to 'Precision'. + + Returns: + (wandb.Object): A wandb object suitable for logging, showcasing the crafted metric visualization. + """ + df = pd.DataFrame({"class": classes, "y": y, "x": x}).round(3) + fields = {"x": "x", "y": "y", "class": "class"} + string_fields = {"title": title, "x-axis-title": x_title, "y-axis-title": y_title} + return wb.plot_table( + "wandb/area-under-curve/v0", wb.Table(dataframe=df), fields=fields, string_fields=string_fields + ) + + +def _plot_curve( + x, + y, + names=None, + id="precision-recall", + title="Precision Recall Curve", + x_title="Recall", + y_title="Precision", + num_x=100, + only_mean=False, +): + """ + Log a metric curve visualization. + + This function generates a metric curve based on input data and logs the visualization to wandb. + The curve can represent aggregated data (mean) or individual class data, depending on the 'only_mean' flag. + + Args: + x (np.ndarray): Data points for the x-axis with length N. + y (np.ndarray): Corresponding data points for the y-axis with shape CxN, where C is the number of classes. + names (list, optional): Names of the classes corresponding to the y-axis data; length C. Defaults to []. + id (str, optional): Unique identifier for the logged data in wandb. Defaults to 'precision-recall'. + title (str, optional): Title for the visualization plot. Defaults to 'Precision Recall Curve'. + x_title (str, optional): Label for the x-axis. Defaults to 'Recall'. + y_title (str, optional): Label for the y-axis. Defaults to 'Precision'. + num_x (int, optional): Number of interpolated data points for visualization. Defaults to 100. + only_mean (bool, optional): Flag to indicate if only the mean curve should be plotted. Defaults to True. + + Note: + The function leverages the '_custom_table' function to generate the actual visualization. + """ + # Create new x + if names is None: + names = [] + x_new = np.linspace(x[0], x[-1], num_x).round(5) + + # Create arrays for logging + x_log = x_new.tolist() + y_log = np.interp(x_new, x, np.mean(y, axis=0)).round(3).tolist() + + if only_mean: + table = wb.Table(data=list(zip(x_log, y_log)), columns=[x_title, y_title]) + wb.run.log({title: wb.plot.line(table, x_title, y_title, title=title)}) + else: + classes = ["mean"] * len(x_log) + for i, yi in enumerate(y): + x_log.extend(x_new) # add new x + y_log.extend(np.interp(x_new, x, yi)) # interpolate y to new x + classes.extend([names[i]] * len(x_new)) # add class names + wb.log({id: _custom_table(x_log, y_log, classes, title, x_title, y_title)}, commit=False) + + def _log_plots(plots, step): + """Logs plots from the input dictionary if they haven't been logged already at the specified step.""" for name, params in plots.items(): - timestamp = params['timestamp'] + timestamp = params["timestamp"] if _processed_plots.get(name) != timestamp: wb.run.log({name.stem: wb.Image(str(name))}, step=step) _processed_plots[name] = timestamp @@ -26,7 +109,7 @@ def _log_plots(plots, step): def on_pretrain_routine_start(trainer): """Initiate and start project if module is present.""" - wb.run or wb.init(project=trainer.args.project or 'YOLOv8', name=trainer.args.name, config=vars(trainer.args)) + wb.run or wb.init(project=trainer.args.project or "YOLOv8", name=trainer.args.name, config=vars(trainer.args)) def on_fit_epoch_end(trainer): @@ -40,7 +123,7 @@ def on_fit_epoch_end(trainer): def on_train_epoch_end(trainer): """Log metrics and save images at the end of each training epoch.""" - wb.run.log(trainer.label_loss_items(trainer.tloss, prefix='train'), step=trainer.epoch + 1) + wb.run.log(trainer.label_loss_items(trainer.tloss, prefix="train"), step=trainer.epoch + 1) wb.run.log(trainer.lr, step=trainer.epoch + 1) if trainer.epoch == 1: _log_plots(trainer.plots, step=trainer.epoch + 1) @@ -50,14 +133,31 @@ def on_train_end(trainer): """Save the best model as an artifact at end of training.""" _log_plots(trainer.validator.plots, step=trainer.epoch + 1) _log_plots(trainer.plots, step=trainer.epoch + 1) - art = wb.Artifact(type='model', name=f'run_{wb.run.id}_model') + art = wb.Artifact(type="model", name=f"run_{wb.run.id}_model") if trainer.best.exists(): art.add_file(trainer.best) - wb.run.log_artifact(art, aliases=['best']) + wb.run.log_artifact(art, aliases=["best"]) + for curve_name, curve_values in zip(trainer.validator.metrics.curves, trainer.validator.metrics.curves_results): + x, y, x_title, y_title = curve_values + _plot_curve( + x, + y, + names=list(trainer.validator.metrics.names.values()), + id=f"curves/{curve_name}", + title=curve_name, + x_title=x_title, + y_title=y_title, + ) + wb.run.finish() # required or run continues on dashboard -callbacks = { - 'on_pretrain_routine_start': on_pretrain_routine_start, - 'on_train_epoch_end': on_train_epoch_end, - 'on_fit_epoch_end': on_fit_epoch_end, - 'on_train_end': on_train_end} if wb else {} +callbacks = ( + { + "on_pretrain_routine_start": on_pretrain_routine_start, + "on_train_epoch_end": on_train_epoch_end, + "on_fit_epoch_end": on_fit_epoch_end, + "on_train_end": on_train_end, + } + if wb + else {} +) diff --git a/ultralytics/utils/checks.py b/ultralytics/utils/checks.py index 28cad0d..c44ac0b 100644 --- a/ultralytics/utils/checks.py +++ b/ultralytics/utils/checks.py @@ -1,4 +1,5 @@ # Ultralytics YOLO 🚀, AGPL-3.0 license + import contextlib import glob import inspect @@ -9,20 +10,96 @@ import re import shutil import subprocess import time +from importlib import metadata from pathlib import Path from typing import Optional import cv2 import numpy as np -import pkg_resources as pkg -import psutil import requests import torch from matplotlib import font_manager -from ultralytics.utils import (ASSETS, AUTOINSTALL, LINUX, LOGGER, ONLINE, ROOT, USER_CONFIG_DIR, ThreadingLocked, - TryExcept, clean_url, colorstr, downloads, emojis, is_colab, is_docker, is_jupyter, - is_kaggle, is_online, is_pip_package, url2file) +from ultralytics.utils import ( + ASSETS, + AUTOINSTALL, + LINUX, + LOGGER, + ONLINE, + ROOT, + USER_CONFIG_DIR, + SimpleNamespace, + ThreadingLocked, + TryExcept, + clean_url, + colorstr, + downloads, + emojis, + is_colab, + is_docker, + is_github_action_running, + is_jupyter, + is_kaggle, + is_online, + is_pip_package, + url2file, +) + +PYTHON_VERSION = platform.python_version() + + +def parse_requirements(file_path=ROOT.parent / "requirements.txt", package=""): + """ + Parse a requirements.txt file, ignoring lines that start with '#' and any text after '#'. + + Args: + file_path (Path): Path to the requirements.txt file. + package (str, optional): Python package to use instead of requirements.txt file, i.e. package='ultralytics'. + + Returns: + (List[Dict[str, str]]): List of parsed requirements as dictionaries with `name` and `specifier` keys. + + Example: + ```python + from ultralytics.utils.checks import parse_requirements + + parse_requirements(package='ultralytics') + ``` + """ + + if package: + requires = [x for x in metadata.distribution(package).requires if "extra == " not in x] + else: + requires = Path(file_path).read_text().splitlines() + + requirements = [] + for line in requires: + line = line.strip() + if line and not line.startswith("#"): + line = line.split("#")[0].strip() # ignore inline comments + match = re.match(r"([a-zA-Z0-9-_]+)\s*([<>!=~]+.*)?", line) + if match: + requirements.append(SimpleNamespace(name=match[1], specifier=match[2].strip() if match[2] else "")) + + return requirements + + +def parse_version(version="0.0.0") -> tuple: + """ + Convert a version string to a tuple of integers, ignoring any extra non-numeric string attached to the version. This + function replaces deprecated 'pkg_resources.parse_version(v)'. + + Args: + version (str): Version string, i.e. '2.0.1+cpu' + + Returns: + (tuple): Tuple of integers representing the numeric part of the version and the extra string, i.e. (2, 0, 1) + """ + try: + return tuple(map(int, re.findall(r"\d+", version)[:3])) # '2.0.1+cpu' -> (2, 0, 1) + except Exception as e: + LOGGER.warning(f"WARNING ⚠️ failure for parse_version({version}), returning (0, 0, 0): {e}") + return 0, 0, 0 def is_ascii(s) -> bool: @@ -33,7 +110,7 @@ def is_ascii(s) -> bool: s (str): String to be checked. Returns: - bool: True if the string is composed only of ASCII characters, False otherwise. + (bool): True if the string is composed only of ASCII characters, False otherwise. """ # Convert list, tuple, None, etc. to string s = str(s) @@ -65,16 +142,22 @@ def check_imgsz(imgsz, stride=32, min_dim=1, max_dim=2, floor=0): imgsz = [imgsz] elif isinstance(imgsz, (list, tuple)): imgsz = list(imgsz) + elif isinstance(imgsz, str): # i.e. '640' or '[640,640]' + imgsz = [int(imgsz)] if imgsz.isnumeric() else eval(imgsz) else: - raise TypeError(f"'imgsz={imgsz}' is of invalid type {type(imgsz).__name__}. " - f"Valid imgsz types are int i.e. 'imgsz=640' or list i.e. 'imgsz=[640,640]'") + raise TypeError( + f"'imgsz={imgsz}' is of invalid type {type(imgsz).__name__}. " + f"Valid imgsz types are int i.e. 'imgsz=640' or list i.e. 'imgsz=[640,640]'" + ) # Apply max_dim if len(imgsz) > max_dim: - msg = "'train' and 'val' imgsz must be an integer, while 'predict' and 'export' imgsz may be a [h, w] list " \ - "or an integer, i.e. 'yolo export imgsz=640,480' or 'yolo export imgsz=640'" + msg = ( + "'train' and 'val' imgsz must be an integer, while 'predict' and 'export' imgsz may be a [h, w] list " + "or an integer, i.e. 'yolo export imgsz=640,480' or 'yolo export imgsz=640'" + ) if max_dim != 1: - raise ValueError(f'imgsz={imgsz} is not a valid image size. {msg}') + raise ValueError(f"imgsz={imgsz} is not a valid image size. {msg}") LOGGER.warning(f"WARNING ⚠️ updating to 'imgsz={max(imgsz)}'. {msg}") imgsz = [max(imgsz)] # Make image size a multiple of the stride @@ -82,7 +165,7 @@ def check_imgsz(imgsz, stride=32, min_dim=1, max_dim=2, floor=0): # Print warning message if image size was updated if sz != imgsz: - LOGGER.warning(f'WARNING ⚠️ imgsz={imgsz} must be multiple of max stride {stride}, updating to {sz}') + LOGGER.warning(f"WARNING ⚠️ imgsz={imgsz} must be multiple of max stride {stride}, updating to {sz}") # Add missing dimensions if necessary sz = [sz[0], sz[0]] if min_dim == 2 and len(sz) == 1 else sz[0] if min_dim == 1 and len(sz) == 1 else sz @@ -90,66 +173,88 @@ def check_imgsz(imgsz, stride=32, min_dim=1, max_dim=2, floor=0): return sz -def check_version(current: str = '0.0.0', - required: str = '0.0.0', - name: str = 'version ', - hard: bool = False, - verbose: bool = False) -> bool: +def check_version( + current: str = "0.0.0", + required: str = "0.0.0", + name: str = "version", + hard: bool = False, + verbose: bool = False, + msg: str = "", +) -> bool: """ Check current version against the required version or range. Args: - current (str): Current version. + current (str): Current version or package name to get version from. required (str): Required version or range (in pip-style format). - name (str): Name to be used in warning message. - hard (bool): If True, raise an AssertionError if the requirement is not met. - verbose (bool): If True, print warning message if requirement is not met. + name (str, optional): Name to be used in warning message. + hard (bool, optional): If True, raise an AssertionError if the requirement is not met. + verbose (bool, optional): If True, print warning message if requirement is not met. + msg (str, optional): Extra message to display if verbose. Returns: (bool): True if requirement is met, False otherwise. Example: - # check if current version is exactly 22.04 + ```python + # Check if current version is exactly 22.04 check_version(current='22.04', required='==22.04') - # check if current version is greater than or equal to 22.04 + # Check if current version is greater than or equal to 22.04 check_version(current='22.10', required='22.04') # assumes '>=' inequality if none passed - # check if current version is less than or equal to 22.04 + # Check if current version is less than or equal to 22.04 check_version(current='22.04', required='<=22.04') - # check if current version is between 20.04 (inclusive) and 22.04 (exclusive) + # Check if current version is between 20.04 (inclusive) and 22.04 (exclusive) check_version(current='21.10', required='>20.04,<22.04') + ``` """ - current = pkg.parse_version(current) - constraints = re.findall(r'([<>!=]{1,2}\s*\d+\.\d+)', required) or [f'>={required}'] + if not current: # if current is '' or None + LOGGER.warning(f"WARNING ⚠️ invalid check_version({current}, {required}) requested, please check values.") + return True + elif not current[0].isdigit(): # current is package name rather than version string, i.e. current='ultralytics' + try: + name = current # assigned package name to 'name' arg + current = metadata.version(current) # get version string from package name + except metadata.PackageNotFoundError as e: + if hard: + raise ModuleNotFoundError(emojis(f"WARNING ⚠️ {current} package is required but not installed")) from e + else: + return False + if not required: # if required is '' or None + return True + + op = "" + version = "" result = True - for constraint in constraints: - op, version = re.match(r'([<>!=]{1,2})\s*(\d+\.\d+)', constraint).groups() - version = pkg.parse_version(version) - if op == '==' and current != version: + c = parse_version(current) # '1.2.3' -> (1, 2, 3) + for r in required.strip(",").split(","): + op, version = re.match(r"([^0-9]*)([\d.]+)", r).groups() # split '>=22.04' -> ('>=', '22.04') + v = parse_version(version) # '1.2.3' -> (1, 2, 3) + if op == "==" and c != v: result = False - elif op == '!=' and current == version: + elif op == "!=" and c == v: result = False - elif op == '>=' and not (current >= version): + elif op in (">=", "") and not (c >= v): # if no constraint passed assume '>=required' result = False - elif op == '<=' and not (current <= version): + elif op == "<=" and not (c <= v): result = False - elif op == '>' and not (current > version): + elif op == ">" and not (c > v): result = False - elif op == '<' and not (current < version): + elif op == "<" and not (c < v): result = False if not result: - warning_message = f'WARNING ⚠️ {name}{required} is required, but {name}{current} is currently installed' + warning = f"WARNING ⚠️ {name}{op}{version} is required, but {name}=={current} is currently installed {msg}" if hard: - raise ModuleNotFoundError(emojis(warning_message)) # assert version requirements met + raise ModuleNotFoundError(emojis(warning)) # assert version requirements met if verbose: - LOGGER.warning(warning_message) + LOGGER.warning(warning) return result -def check_latest_pypi_version(package_name='ultralytics'): +def check_latest_pypi_version(package_name="ultralytics"): """ Returns the latest version of a PyPI package without downloading or installing it. @@ -161,9 +266,9 @@ def check_latest_pypi_version(package_name='ultralytics'): """ with contextlib.suppress(Exception): requests.packages.urllib3.disable_warnings() # Disable the InsecureRequestWarning - response = requests.get(f'https://pypi.org/pypi/{package_name}/json', timeout=3) + response = requests.get(f"https://pypi.org/pypi/{package_name}/json", timeout=3) if response.status_code == 200: - return response.json()['info']['version'] + return response.json()["info"]["version"] def check_pip_update_available(): @@ -176,16 +281,19 @@ def check_pip_update_available(): if ONLINE and is_pip_package(): with contextlib.suppress(Exception): from ultralytics import __version__ + latest = check_latest_pypi_version() - if pkg.parse_version(__version__) < pkg.parse_version(latest): # update is available - LOGGER.info(f'New https://pypi.org/project/ultralytics/{latest} available 😃 ' - f"Update with 'pip install -U ultralytics'") + if check_version(__version__, f"<{latest}"): # check if current version is < latest version + LOGGER.info( + f"New https://pypi.org/project/ultralytics/{latest} available 😃 " + f"Update with 'pip install -U ultralytics'" + ) return True return False @ThreadingLocked() -def check_font(font='Arial.ttf'): +def check_font(font="Arial.ttf"): """ Find font locally or download to user's configuration directory if it does not already exist. @@ -208,13 +316,13 @@ def check_font(font='Arial.ttf'): return matches[0] # Download to USER_CONFIG_DIR if missing - url = f'https://ultralytics.com/assets/{name}' - if downloads.is_url(url): + url = f"https://ultralytics.com/assets/{name}" + if downloads.is_url(url, check=True): downloads.safe_download(url=url, file=file) return file -def check_python(minimum: str = '3.8.0') -> bool: +def check_python(minimum: str = "3.8.0") -> bool: """ Check current python version against the required minimum version. @@ -222,13 +330,13 @@ def check_python(minimum: str = '3.8.0') -> bool: minimum (str): Required minimum version of python. Returns: - None + (bool): Whether the installed Python version meets the minimum constraints. """ - return check_version(platform.python_version(), minimum, name='Python ', hard=True) + return check_version(PYTHON_VERSION, minimum, name="Python ", hard=True) @TryExcept() -def check_requirements(requirements=ROOT.parent / 'requirements.txt', exclude=(), install=True, cmds=''): +def check_requirements(requirements=ROOT.parent / "requirements.txt", exclude=(), install=True, cmds=""): """ Check if installed dependencies meet YOLOv8 requirements and attempt to auto-update if needed. @@ -253,46 +361,43 @@ def check_requirements(requirements=ROOT.parent / 'requirements.txt', exclude=() check_requirements(['numpy', 'ultralytics>=8.0.0']) ``` """ - prefix = colorstr('red', 'bold', 'requirements:') + + prefix = colorstr("red", "bold", "requirements:") check_python() # check python version check_torchvision() # check torch-torchvision compatibility if isinstance(requirements, Path): # requirements.txt file file = requirements.resolve() - assert file.exists(), f'{prefix} {file} not found, check failed.' - with file.open() as f: - requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(f) if x.name not in exclude] + assert file.exists(), f"{prefix} {file} not found, check failed." + requirements = [f"{x.name}{x.specifier}" for x in parse_requirements(file) if x.name not in exclude] elif isinstance(requirements, str): requirements = [requirements] pkgs = [] for r in requirements: - r_stripped = r.split('/')[-1].replace('.git', '') # replace git+https://org/repo.git -> 'repo' + r_stripped = r.split("/")[-1].replace(".git", "") # replace git+https://org/repo.git -> 'repo' + match = re.match(r"([a-zA-Z0-9-_]+)([<>!=~]+.*)?", r_stripped) + name, required = match[1], match[2].strip() if match[2] else "" try: - pkg.require(r_stripped) # exception if requirements not met - except pkg.DistributionNotFound: - try: # attempt to import (slower but more accurate) - import importlib - importlib.import_module(next(pkg.parse_requirements(r_stripped)).name) - except ImportError: - pkgs.append(r) - except pkg.VersionConflict: + assert check_version(metadata.version(name), required) # exception if requirements not met + except (AssertionError, metadata.PackageNotFoundError): pkgs.append(r) - s = ' '.join(f'"{x}"' for x in pkgs) # console string + s = " ".join(f'"{x}"' for x in pkgs) # console string if s: if install and AUTOINSTALL: # check environment variable n = len(pkgs) # number of packages updates LOGGER.info(f"{prefix} Ultralytics requirement{'s' * (n > 1)} {pkgs} not found, attempting AutoUpdate...") try: t = time.time() - assert is_online(), 'AutoUpdate skipped (offline)' - LOGGER.info(subprocess.check_output(f'pip install --no-cache {s} {cmds}', shell=True).decode()) + assert is_online(), "AutoUpdate skipped (offline)" + LOGGER.info(subprocess.check_output(f"pip install --no-cache {s} {cmds}", shell=True).decode()) dt = time.time() - t LOGGER.info( f"{prefix} AutoUpdate success ✅ {dt:.1f}s, installed {n} package{'s' * (n > 1)}: {pkgs}\n" - f"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\n") + f"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\n" + ) except Exception as e: - LOGGER.warning(f'{prefix} ❌ {e}') + LOGGER.warning(f"{prefix} ❌ {e}") return False else: return False @@ -305,134 +410,211 @@ def check_torchvision(): Checks the installed versions of PyTorch and Torchvision to ensure they're compatible. This function checks the installed versions of PyTorch and Torchvision, and warns if they're incompatible according - to the provided compatibility table based on https://github.com/pytorch/vision#installation. The - compatibility table is a dictionary where the keys are PyTorch versions and the values are lists of compatible + to the provided compatibility table based on: + https://github.com/pytorch/vision#installation. + + The compatibility table is a dictionary where the keys are PyTorch versions and the values are lists of compatible Torchvision versions. """ import torchvision # Compatibility table - compatibility_table = {'2.0': ['0.15'], '1.13': ['0.14'], '1.12': ['0.13']} + compatibility_table = {"2.0": ["0.15"], "1.13": ["0.14"], "1.12": ["0.13"]} # Extract only the major and minor versions - v_torch = '.'.join(torch.__version__.split('+')[0].split('.')[:2]) - v_torchvision = '.'.join(torchvision.__version__.split('+')[0].split('.')[:2]) + v_torch = ".".join(torch.__version__.split("+")[0].split(".")[:2]) + v_torchvision = ".".join(torchvision.__version__.split("+")[0].split(".")[:2]) if v_torch in compatibility_table: compatible_versions = compatibility_table[v_torch] - if all(pkg.parse_version(v_torchvision) != pkg.parse_version(v) for v in compatible_versions): - print(f'WARNING ⚠️ torchvision=={v_torchvision} is incompatible with torch=={v_torch}.\n' - f"Run 'pip install torchvision=={compatible_versions[0]}' to fix torchvision or " - "'pip install -U torch torchvision' to update both.\n" - 'For a full compatibility table see https://github.com/pytorch/vision#installation') + if all(v_torchvision != v for v in compatible_versions): + print( + f"WARNING ⚠️ torchvision=={v_torchvision} is incompatible with torch=={v_torch}.\n" + f"Run 'pip install torchvision=={compatible_versions[0]}' to fix torchvision or " + "'pip install -U torch torchvision' to update both.\n" + "For a full compatibility table see https://github.com/pytorch/vision#installation" + ) -def check_suffix(file='yolov8n.pt', suffix='.pt', msg=''): +def check_suffix(file="yolov8n.pt", suffix=".pt", msg=""): """Check file(s) for acceptable suffix.""" if file and suffix: if isinstance(suffix, str): - suffix = (suffix, ) + suffix = (suffix,) for f in file if isinstance(file, (list, tuple)) else [file]: s = Path(f).suffix.lower().strip() # file suffix if len(s): - assert s in suffix, f'{msg}{f} acceptable suffix is {suffix}, not {s}' + assert s in suffix, f"{msg}{f} acceptable suffix is {suffix}, not {s}" def check_yolov5u_filename(file: str, verbose: bool = True): """Replace legacy YOLOv5 filenames with updated YOLOv5u filenames.""" - if 'yolov3' in file or 'yolov5' in file: - if 'u.yaml' in file: - file = file.replace('u.yaml', '.yaml') # i.e. yolov5nu.yaml -> yolov5n.yaml - elif '.pt' in file and 'u' not in file: + if "yolov3" in file or "yolov5" in file: + if "u.yaml" in file: + file = file.replace("u.yaml", ".yaml") # i.e. yolov5nu.yaml -> yolov5n.yaml + elif ".pt" in file and "u" not in file: original_file = file - file = re.sub(r'(.*yolov5([nsmlx]))\.pt', '\\1u.pt', file) # i.e. yolov5n.pt -> yolov5nu.pt - file = re.sub(r'(.*yolov5([nsmlx])6)\.pt', '\\1u.pt', file) # i.e. yolov5n6.pt -> yolov5n6u.pt - file = re.sub(r'(.*yolov3(|-tiny|-spp))\.pt', '\\1u.pt', file) # i.e. yolov3-spp.pt -> yolov3-sppu.pt + file = re.sub(r"(.*yolov5([nsmlx]))\.pt", "\\1u.pt", file) # i.e. yolov5n.pt -> yolov5nu.pt + file = re.sub(r"(.*yolov5([nsmlx])6)\.pt", "\\1u.pt", file) # i.e. yolov5n6.pt -> yolov5n6u.pt + file = re.sub(r"(.*yolov3(|-tiny|-spp))\.pt", "\\1u.pt", file) # i.e. yolov3-spp.pt -> yolov3-sppu.pt if file != original_file and verbose: LOGGER.info( f"PRO TIP 💡 Replace 'model={original_file}' with new 'model={file}'.\nYOLOv5 'u' models are " - f'trained with https://github.com/ultralytics/ultralytics and feature improved performance vs ' - f'standard YOLOv5 models trained with https://github.com/ultralytics/yolov5.\n') + f"trained with https://github.com/ultralytics/ultralytics and feature improved performance vs " + f"standard YOLOv5 models trained with https://github.com/ultralytics/yolov5.\n" + ) return file -def check_file(file, suffix='', download=True, hard=True): +def check_model_file_from_stem(model="yolov8n"): + """Return a model filename from a valid model stem.""" + if model and not Path(model).suffix and Path(model).stem in downloads.GITHUB_ASSETS_STEMS: + return Path(model).with_suffix(".pt") # add suffix, i.e. yolov8n -> yolov8n.pt + else: + return model + + +def check_file(file, suffix="", download=True, hard=True): """Search/download file (if necessary) and return path.""" check_suffix(file, suffix) # optional file = str(file).strip() # convert to string and strip spaces file = check_yolov5u_filename(file) # yolov5n -> yolov5nu - if not file or ('://' not in file and Path(file).exists()): # exists ('://' check required in Windows Python<3.10) + if ( + not file + or ("://" not in file and Path(file).exists()) # '://' check required in Windows Python<3.10 + or file.lower().startswith("grpc://") + ): # file exists or gRPC Triton images return file - elif download and file.lower().startswith(('https://', 'http://', 'rtsp://', 'rtmp://')): # download + elif download and file.lower().startswith(("https://", "http://", "rtsp://", "rtmp://", "tcp://")): # download url = file # warning: Pathlib turns :// -> :/ file = url2file(file) # '%2F' to '/', split https://url.com/file.txt?auth if Path(file).exists(): - LOGGER.info(f'Found {clean_url(url)} locally at {file}') # file already exists + LOGGER.info(f"Found {clean_url(url)} locally at {file}") # file already exists else: downloads.safe_download(url=url, file=file, unzip=False) return file else: # search - files = glob.glob(str(ROOT / 'cfg' / '**' / file), recursive=True) # find file + files = glob.glob(str(ROOT / "**" / file), recursive=True) or glob.glob(str(ROOT.parent / file)) # find file if not files and hard: raise FileNotFoundError(f"'{file}' does not exist") elif len(files) > 1 and hard: raise FileNotFoundError(f"Multiple files match '{file}', specify exact path: {files}") - return files[0] if len(files) else [] # return file + return files[0] if len(files) else [] if hard else file # return file -def check_yaml(file, suffix=('.yaml', '.yml'), hard=True): +def check_yaml(file, suffix=(".yaml", ".yml"), hard=True): """Search/download YAML file (if necessary) and return path, checking suffix.""" return check_file(file, suffix, hard=hard) +def check_is_path_safe(basedir, path): + """ + Check if the resolved path is under the intended directory to prevent path traversal. + + Args: + basedir (Path | str): The intended directory. + path (Path | str): The path to check. + + Returns: + (bool): True if the path is safe, False otherwise. + """ + base_dir_resolved = Path(basedir).resolve() + path_resolved = Path(path).resolve() + + return path_resolved.is_file() and path_resolved.parts[: len(base_dir_resolved.parts)] == base_dir_resolved.parts + + def check_imshow(warn=False): """Check if environment supports image displays.""" try: if LINUX: - assert 'DISPLAY' in os.environ and not is_docker() and not is_colab() and not is_kaggle() - cv2.imshow('test', np.zeros((8, 8, 3), dtype=np.uint8)) # show a small 8-pixel image + assert "DISPLAY" in os.environ and not is_docker() and not is_colab() and not is_kaggle() + cv2.imshow("test", np.zeros((8, 8, 3), dtype=np.uint8)) # show a small 8-pixel image cv2.waitKey(1) cv2.destroyAllWindows() cv2.waitKey(1) return True except Exception as e: if warn: - LOGGER.warning(f'WARNING ⚠️ Environment does not support cv2.imshow() or PIL Image.show()\n{e}') + LOGGER.warning(f"WARNING ⚠️ Environment does not support cv2.imshow() or PIL Image.show()\n{e}") return False -def check_yolo(verbose=True, device=''): +def check_yolo(verbose=True, device=""): """Return a human-readable YOLO software and hardware summary.""" + import psutil + from ultralytics.utils.torch_utils import select_device if is_jupyter(): - if check_requirements('wandb', install=False): - os.system('pip uninstall -y wandb') # uninstall wandb: unwanted account creation prompt with infinite hang + if check_requirements("wandb", install=False): + os.system("pip uninstall -y wandb") # uninstall wandb: unwanted account creation prompt with infinite hang if is_colab(): - shutil.rmtree('sample_data', ignore_errors=True) # remove colab /sample_data directory + shutil.rmtree("sample_data", ignore_errors=True) # remove colab /sample_data directory if verbose: # System info gib = 1 << 30 # bytes per GiB ram = psutil.virtual_memory().total - total, used, free = shutil.disk_usage('/') - s = f'({os.cpu_count()} CPUs, {ram / gib:.1f} GB RAM, {(total - free) / gib:.1f}/{total / gib:.1f} GB disk)' + total, used, free = shutil.disk_usage("/") + s = f"({os.cpu_count()} CPUs, {ram / gib:.1f} GB RAM, {(total - free) / gib:.1f}/{total / gib:.1f} GB disk)" with contextlib.suppress(Exception): # clear display if ipython is installed from IPython import display + display.clear_output() else: - s = '' + s = "" select_device(device=device, newline=False) - LOGGER.info(f'Setup complete ✅ {s}') + LOGGER.info(f"Setup complete ✅ {s}") + + +def collect_system_info(): + """Collect and print relevant system information including OS, Python, RAM, CPU, and CUDA.""" + + import psutil + + from ultralytics.utils import ENVIRONMENT, is_git_dir + from ultralytics.utils.torch_utils import get_cpu_info + + ram_info = psutil.virtual_memory().total / (1024**3) # Convert bytes to GB + check_yolo() + LOGGER.info( + f"\n{'OS':<20}{platform.platform()}\n" + f"{'Environment':<20}{ENVIRONMENT}\n" + f"{'Python':<20}{PYTHON_VERSION}\n" + f"{'Install':<20}{'git' if is_git_dir() else 'pip' if is_pip_package() else 'other'}\n" + f"{'RAM':<20}{ram_info:.2f} GB\n" + f"{'CPU':<20}{get_cpu_info()}\n" + f"{'CUDA':<20}{torch.version.cuda if torch and torch.cuda.is_available() else None}\n" + ) + + for r in parse_requirements(package="ultralytics"): + try: + current = metadata.version(r.name) + is_met = "✅ " if check_version(current, str(r.specifier), hard=True) else "❌ " + except metadata.PackageNotFoundError: + current = "(not installed)" + is_met = "❌ " + LOGGER.info(f"{r.name:<20}{is_met}{current}{r.specifier}") + + if is_github_action_running(): + LOGGER.info( + f"\nRUNNER_OS: {os.getenv('RUNNER_OS')}\n" + f"GITHUB_EVENT_NAME: {os.getenv('GITHUB_EVENT_NAME')}\n" + f"GITHUB_WORKFLOW: {os.getenv('GITHUB_WORKFLOW')}\n" + f"GITHUB_ACTOR: {os.getenv('GITHUB_ACTOR')}\n" + f"GITHUB_REPOSITORY: {os.getenv('GITHUB_REPOSITORY')}\n" + f"GITHUB_REPOSITORY_OWNER: {os.getenv('GITHUB_REPOSITORY_OWNER')}\n" + ) def check_amp(model): """ - This function checks the PyTorch Automatic Mixed Precision (AMP) functionality of a YOLOv8 model. - If the checks fail, it means there are anomalies with AMP on the system that may cause NaN losses or zero-mAP - results, so AMP will be disabled during training. + This function checks the PyTorch Automatic Mixed Precision (AMP) functionality of a YOLOv8 model. If the checks + fail, it means there are anomalies with AMP on the system that may cause NaN losses or zero-mAP results, so AMP will + be disabled during training. Args: model (nn.Module): A YOLOv8 model instance. @@ -450,7 +632,7 @@ def check_amp(model): (bool): Returns True if the AMP functionality works correctly with YOLOv8 model, else False. """ device = next(model.parameters()).device # get model device - if device.type in ('cpu', 'mps'): + if device.type in ("cpu", "mps"): return False # AMP only used on CUDA devices def amp_allclose(m, im): @@ -461,23 +643,27 @@ def check_amp(model): del m return a.shape == b.shape and torch.allclose(a, b.float(), atol=0.5) # close to 0.5 absolute tolerance - im = ASSETS / 'bus.jpg' # image to check - prefix = colorstr('AMP: ') - LOGGER.info(f'{prefix}running Automatic Mixed Precision (AMP) checks with YOLOv8n...') + im = ASSETS / "bus.jpg" # image to check + prefix = colorstr("AMP: ") + LOGGER.info(f"{prefix}running Automatic Mixed Precision (AMP) checks with YOLOv8n...") warning_msg = "Setting 'amp=True'. If you experience zero-mAP or NaN losses you can disable AMP with amp=False." try: from ultralytics import YOLO - assert amp_allclose(YOLO('yolov8n.pt'), im) - LOGGER.info(f'{prefix}checks passed ✅') + + assert amp_allclose(YOLO("yolov8n.pt"), im) + LOGGER.info(f"{prefix}checks passed ✅") except ConnectionError: - LOGGER.warning(f'{prefix}checks skipped ⚠️, offline and unable to download YOLOv8n. {warning_msg}') + LOGGER.warning(f"{prefix}checks skipped ⚠️, offline and unable to download YOLOv8n. {warning_msg}") except (AttributeError, ModuleNotFoundError): LOGGER.warning( - f'{prefix}checks skipped ⚠️. Unable to load YOLOv8n due to possible Ultralytics package modifications. {warning_msg}' + f"{prefix}checks skipped ⚠️. " + f"Unable to load YOLOv8n due to possible Ultralytics package modifications. {warning_msg}" ) except AssertionError: - LOGGER.warning(f'{prefix}checks failed ❌. Anomalies were detected with AMP on your system that may lead to ' - f'NaN losses or zero-mAP results, so AMP will be disabled during training.') + LOGGER.warning( + f"{prefix}checks failed ❌. Anomalies were detected with AMP on your system that may lead to " + f"NaN losses or zero-mAP results, so AMP will be disabled during training." + ) return False return True @@ -485,8 +671,8 @@ def check_amp(model): def git_describe(path=ROOT): # path must be a directory """Return human-readable git description, i.e. v5.0-5-g3e25f1e https://git-scm.com/docs/git-describe.""" with contextlib.suppress(Exception): - return subprocess.check_output(f'git -C {path} describe --tags --long --always', shell=True).decode()[:-1] - return '' + return subprocess.check_output(f"git -C {path} describe --tags --long --always", shell=True).decode()[:-1] + return "" def print_args(args: Optional[dict] = None, show_file=True, show_func=False): @@ -494,7 +680,7 @@ def print_args(args: Optional[dict] = None, show_file=True, show_func=False): def strip_auth(v): """Clean longer Ultralytics HUB URLs by stripping potential authentication information.""" - return clean_url(v) if (isinstance(v, str) and v.startswith('http') and len(v) > 100) else v + return clean_url(v) if (isinstance(v, str) and v.startswith("http") and len(v) > 100) else v x = inspect.currentframe().f_back # previous frame file, _, func, _, _ = inspect.getframeinfo(x) @@ -502,26 +688,28 @@ def print_args(args: Optional[dict] = None, show_file=True, show_func=False): args, _, _, frm = inspect.getargvalues(x) args = {k: v for k, v in frm.items() if k in args} try: - file = Path(file).resolve().relative_to(ROOT).with_suffix('') + file = Path(file).resolve().relative_to(ROOT).with_suffix("") except ValueError: file = Path(file).stem - s = (f'{file}: ' if show_file else '') + (f'{func}: ' if show_func else '') - LOGGER.info(colorstr(s) + ', '.join(f'{k}={strip_auth(v)}' for k, v in args.items())) + s = (f"{file}: " if show_file else "") + (f"{func}: " if show_func else "") + LOGGER.info(colorstr(s) + ", ".join(f"{k}={strip_auth(v)}" for k, v in args.items())) def cuda_device_count() -> int: - """Get the number of NVIDIA GPUs available in the environment. + """ + Get the number of NVIDIA GPUs available in the environment. Returns: (int): The number of NVIDIA GPUs available. """ try: # Run the nvidia-smi command and capture its output - output = subprocess.check_output(['nvidia-smi', '--query-gpu=count', '--format=csv,noheader,nounits'], - encoding='utf-8') + output = subprocess.check_output( + ["nvidia-smi", "--query-gpu=count", "--format=csv,noheader,nounits"], encoding="utf-8" + ) # Take the first line and strip any leading/trailing white space - first_line = output.strip().split('\n')[0] + first_line = output.strip().split("\n")[0] return int(first_line) except (subprocess.CalledProcessError, FileNotFoundError, ValueError): @@ -530,9 +718,14 @@ def cuda_device_count() -> int: def cuda_is_available() -> bool: - """Check if CUDA is available in the environment. + """ + Check if CUDA is available in the environment. Returns: (bool): True if one or more NVIDIA GPUs are available, False otherwise. """ return cuda_device_count() > 0 + + +# Define constants +IS_PYTHON_3_12 = PYTHON_VERSION.startswith("3.12") diff --git a/ultralytics/utils/dist.py b/ultralytics/utils/dist.py index 1190098..b669e52 100644 --- a/ultralytics/utils/dist.py +++ b/ultralytics/utils/dist.py @@ -1,47 +1,53 @@ # Ultralytics YOLO 🚀, AGPL-3.0 license import os -import re import shutil import socket import sys import tempfile -from pathlib import Path from . import USER_CONFIG_DIR from .torch_utils import TORCH_1_9 def find_free_network_port() -> int: - """Finds a free port on localhost. + """ + Finds a free port on localhost. It is useful in single-node training when we don't want to connect to a real main node but have to set the `MASTER_PORT` environment variable. """ with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: - s.bind(('127.0.0.1', 0)) + s.bind(("127.0.0.1", 0)) return s.getsockname()[1] # port def generate_ddp_file(trainer): """Generates a DDP file and returns its file name.""" - module, name = f'{trainer.__class__.__module__}.{trainer.__class__.__name__}'.rsplit('.', 1) + module, name = f"{trainer.__class__.__module__}.{trainer.__class__.__name__}".rsplit(".", 1) - content = f'''overrides = {vars(trainer.args)} \nif __name__ == "__main__": + content = f""" +# Ultralytics Multi-GPU training temp file (should be automatically deleted after use) +overrides = {vars(trainer.args)} + +if __name__ == "__main__": from {module} import {name} from ultralytics.utils import DEFAULT_CFG_DICT cfg = DEFAULT_CFG_DICT.copy() cfg.update(save_dir='') # handle the extra key 'save_dir' trainer = {name}(cfg=cfg, overrides=overrides) - trainer.train()''' - (USER_CONFIG_DIR / 'DDP').mkdir(exist_ok=True) - with tempfile.NamedTemporaryFile(prefix='_temp_', - suffix=f'{id(trainer)}.py', - mode='w+', - encoding='utf-8', - dir=USER_CONFIG_DIR / 'DDP', - delete=False) as file: + results = trainer.train() +""" + (USER_CONFIG_DIR / "DDP").mkdir(exist_ok=True) + with tempfile.NamedTemporaryFile( + prefix="_temp_", + suffix=f"{id(trainer)}.py", + mode="w+", + encoding="utf-8", + dir=USER_CONFIG_DIR / "DDP", + delete=False, + ) as file: file.write(content) return file.name @@ -49,19 +55,17 @@ def generate_ddp_file(trainer): def generate_ddp_command(world_size, trainer): """Generates and returns command for distributed training.""" import __main__ # noqa local import to avoid https://github.com/Lightning-AI/lightning/issues/15218 + if not trainer.resume: shutil.rmtree(trainer.save_dir) # remove the save_dir - file = str(Path(sys.argv[0]).resolve()) - safe_pattern = re.compile(r'^[a-zA-Z0-9_. /\\-]{1,128}$') # allowed characters and maximum of 100 characters - if not (safe_pattern.match(file) and Path(file).exists() and file.endswith('.py')): # using CLI - file = generate_ddp_file(trainer) - dist_cmd = 'torch.distributed.run' if TORCH_1_9 else 'torch.distributed.launch' + file = generate_ddp_file(trainer) + dist_cmd = "torch.distributed.run" if TORCH_1_9 else "torch.distributed.launch" port = find_free_network_port() - cmd = [sys.executable, '-m', dist_cmd, '--nproc_per_node', f'{world_size}', '--master_port', f'{port}', file] + cmd = [sys.executable, "-m", dist_cmd, "--nproc_per_node", f"{world_size}", "--master_port", f"{port}", file] return cmd, file def ddp_cleanup(trainer, file): """Delete temp file if created.""" - if f'{id(trainer)}.py' in file: # if temp_file suffix in file + if f"{id(trainer)}.py" in file: # if temp_file suffix in file os.remove(file) diff --git a/ultralytics/utils/downloads.py b/ultralytics/utils/downloads.py index 6e310bf..6191ade 100644 --- a/ultralytics/utils/downloads.py +++ b/ultralytics/utils/downloads.py @@ -15,20 +15,42 @@ import torch from ultralytics.utils import LOGGER, TQDM, checks, clean_url, emojis, is_online, url2file # Define Ultralytics GitHub assets maintained at https://github.com/ultralytics/assets -GITHUB_ASSETS_REPO = 'ultralytics/assets' -GITHUB_ASSETS_NAMES = [f'yolov8{k}{suffix}.pt' for k in 'nsmlx' for suffix in ('', '6', '-cls', '-seg', '-pose')] + \ - [f'yolov5{k}u.pt' for k in 'nsmlx'] + \ - [f'yolov3{k}u.pt' for k in ('', '-spp', '-tiny')] + \ - [f'yolo_nas_{k}.pt' for k in 'sml'] + \ - [f'sam_{k}.pt' for k in 'bl'] + \ - [f'FastSAM-{k}.pt' for k in 'sx'] + \ - [f'rtdetr-{k}.pt' for k in 'lx'] + \ - ['mobile_sam.pt'] +GITHUB_ASSETS_REPO = "ultralytics/assets" +GITHUB_ASSETS_NAMES = ( + [f"yolov8{k}{suffix}.pt" for k in "nsmlx" for suffix in ("", "-cls", "-seg", "-pose", "-obb")] + + [f"yolov5{k}{resolution}u.pt" for k in "nsmlx" for resolution in ("", "6")] + + [f"yolov3{k}u.pt" for k in ("", "-spp", "-tiny")] + + [f"yolov8{k}-world.pt" for k in "smlx"] + + [f"yolov8{k}-worldv2.pt" for k in "smlx"] + + [f"yolov9{k}.pt" for k in "ce"] + + [f"yolo_nas_{k}.pt" for k in "sml"] + + [f"sam_{k}.pt" for k in "bl"] + + [f"FastSAM-{k}.pt" for k in "sx"] + + [f"rtdetr-{k}.pt" for k in "lx"] + + ["mobile_sam.pt"] + + ["calibration_image_sample_data_20x128x128x3_float32.npy.zip"] +) GITHUB_ASSETS_STEMS = [Path(k).stem for k in GITHUB_ASSETS_NAMES] -def is_url(url, check=True): - """Check if string is URL and check if URL exists.""" +def is_url(url, check=False): + """ + Validates if the given string is a URL and optionally checks if the URL exists online. + + Args: + url (str): The string to be validated as a URL. + check (bool, optional): If True, performs an additional check to see if the URL exists online. + Defaults to True. + + Returns: + (bool): Returns True for a valid URL. If 'check' is True, also returns True if the URL exists online. + Returns False otherwise. + + Example: + ```python + valid = is_url("https://www.example.com") + ``` + """ with contextlib.suppress(Exception): url = str(url) result = parse.urlparse(url) @@ -40,7 +62,7 @@ def is_url(url, check=True): return False -def delete_dsstore(path, files_to_delete=('.DS_Store', '__MACOSX')): +def delete_dsstore(path, files_to_delete=(".DS_Store", "__MACOSX")): """ Deletes all ".DS_store" files under a specified directory. @@ -59,18 +81,17 @@ def delete_dsstore(path, files_to_delete=('.DS_Store', '__MACOSX')): ".DS_store" files are created by the Apple operating system and contain metadata about folders and files. They are hidden system files and can cause issues when transferring files between different operating systems. """ - # Delete Apple .DS_store files for file in files_to_delete: matches = list(Path(path).rglob(file)) - LOGGER.info(f'Deleting {file} files: {matches}') + LOGGER.info(f"Deleting {file} files: {matches}") for f in matches: f.unlink() -def zip_directory(directory, compress=True, exclude=('.DS_Store', '__MACOSX'), progress=True): +def zip_directory(directory, compress=True, exclude=(".DS_Store", "__MACOSX"), progress=True): """ - Zips the contents of a directory, excluding files containing strings in the exclude list. - The resulting zip file is named after the directory and placed alongside it. + Zips the contents of a directory, excluding files containing strings in the exclude list. The resulting zip file is + named after the directory and placed alongside it. Args: directory (str | Path): The path to the directory to be zipped. @@ -96,17 +117,17 @@ def zip_directory(directory, compress=True, exclude=('.DS_Store', '__MACOSX'), p raise FileNotFoundError(f"Directory '{directory}' does not exist.") # Unzip with progress bar - files_to_zip = [f for f in directory.rglob('*') if f.is_file() and all(x not in f.name for x in exclude)] - zip_file = directory.with_suffix('.zip') + files_to_zip = [f for f in directory.rglob("*") if f.is_file() and all(x not in f.name for x in exclude)] + zip_file = directory.with_suffix(".zip") compression = ZIP_DEFLATED if compress else ZIP_STORED - with ZipFile(zip_file, 'w', compression) as f: - for file in TQDM(files_to_zip, desc=f'Zipping {directory} to {zip_file}...', unit='file', disable=not progress): + with ZipFile(zip_file, "w", compression) as f: + for file in TQDM(files_to_zip, desc=f"Zipping {directory} to {zip_file}...", unit="file", disable=not progress): f.write(file, file.relative_to(directory)) return zip_file # return path to zip file -def unzip_file(file, path=None, exclude=('.DS_Store', '__MACOSX'), exist_ok=False, progress=True): +def unzip_file(file, path=None, exclude=(".DS_Store", "__MACOSX"), exist_ok=False, progress=True): """ Unzips a *.zip file to the specified path, excluding files containing strings in the exclude list. @@ -146,51 +167,62 @@ def unzip_file(file, path=None, exclude=('.DS_Store', '__MACOSX'), exist_ok=Fals files = [f for f in zipObj.namelist() if all(x not in f for x in exclude)] top_level_dirs = {Path(f).parts[0] for f in files} - if len(top_level_dirs) > 1 or not files[0].endswith('/'): # zip has multiple files at top level + if len(top_level_dirs) > 1 or (len(files) > 1 and not files[0].endswith("/")): + # Zip has multiple files at top level path = extract_path = Path(path) / Path(file).stem # i.e. ../datasets/coco8 - else: # zip has 1 top-level directory + else: + # Zip has 1 top-level directory extract_path = path # i.e. ../datasets path = Path(path) / list(top_level_dirs)[0] # i.e. ../datasets/coco8 # Check if destination directory already exists and contains files if path.exists() and any(path.iterdir()) and not exist_ok: # If it exists and is not empty, return the path without unzipping - LOGGER.warning(f'WARNING ⚠️ Skipping {file} unzip as destination directory {path} is not empty.') + LOGGER.warning(f"WARNING ⚠️ Skipping {file} unzip as destination directory {path} is not empty.") return path - for f in TQDM(files, desc=f'Unzipping {file} to {Path(path).resolve()}...', unit='file', disable=not progress): - zipObj.extract(f, path=extract_path) + for f in TQDM(files, desc=f"Unzipping {file} to {Path(path).resolve()}...", unit="file", disable=not progress): + # Ensure the file is within the extract_path to avoid path traversal security vulnerability + if ".." in Path(f).parts: + LOGGER.warning(f"Potentially insecure file path: {f}, skipping extraction.") + continue + zipObj.extract(f, extract_path) return path # return unzip dir -def check_disk_space(url='https://ultralytics.com/assets/coco128.zip', sf=1.5, hard=True): +def check_disk_space(url="https://ultralytics.com/assets/coco128.zip", path=Path.cwd(), sf=1.5, hard=True): """ Check if there is sufficient disk space to download and store a file. Args: url (str, optional): The URL to the file. Defaults to 'https://ultralytics.com/assets/coco128.zip'. + path (str | Path, optional): The path or drive to check the available free space on. sf (float, optional): Safety factor, the multiplier for the required free space. Defaults to 2.0. hard (bool, optional): Whether to throw an error or not on insufficient disk space. Defaults to True. Returns: (bool): True if there is sufficient disk space, False otherwise. """ - r = requests.head(url) # response - - # Check response - assert r.status_code < 400, f'URL error for {url}: {r.status_code} {r.reason}' + try: + r = requests.head(url) # response + assert r.status_code < 400, f"URL error for {url}: {r.status_code} {r.reason}" # check response + except Exception: + return True # requests issue, default to True # Check file size gib = 1 << 30 # bytes per GiB - data = int(r.headers.get('Content-Length', 0)) / gib # file size (GB) - total, used, free = (x / gib for x in shutil.disk_usage('/')) # bytes + data = int(r.headers.get("Content-Length", 0)) / gib # file size (GB) + total, used, free = (x / gib for x in shutil.disk_usage(path)) # bytes + if data * sf < free: return True # sufficient space # Insufficient space - text = (f'WARNING ⚠️ Insufficient free disk space {free:.1f} GB < {data * sf:.3f} GB required, ' - f'Please free {data * sf - free:.1f} GB additional disk space and try again.') + text = ( + f"WARNING ⚠️ Insufficient free disk space {free:.1f} GB < {data * sf:.3f} GB required, " + f"Please free {data * sf - free:.1f} GB additional disk space and try again." + ) if hard: raise MemoryError(text) LOGGER.warning(text) @@ -216,35 +248,41 @@ def get_google_drive_file_info(link): url, filename = get_google_drive_file_info(link) ``` """ - file_id = link.split('/d/')[1].split('/view')[0] - drive_url = f'https://drive.google.com/uc?export=download&id={file_id}' + file_id = link.split("/d/")[1].split("/view")[0] + drive_url = f"https://drive.google.com/uc?export=download&id={file_id}" filename = None # Start session with requests.Session() as session: response = session.get(drive_url, stream=True) - if 'quota exceeded' in str(response.content.lower()): + if "quota exceeded" in str(response.content.lower()): raise ConnectionError( - emojis(f'❌ Google Drive file download quota exceeded. ' - f'Please try again later or download this file manually at {link}.')) + emojis( + f"❌ Google Drive file download quota exceeded. " + f"Please try again later or download this file manually at {link}." + ) + ) for k, v in response.cookies.items(): - if k.startswith('download_warning'): - drive_url += f'&confirm={v}' # v is token - cd = response.headers.get('content-disposition') + if k.startswith("download_warning"): + drive_url += f"&confirm={v}" # v is token + cd = response.headers.get("content-disposition") if cd: filename = re.findall('filename="(.+)"', cd)[0] return drive_url, filename -def safe_download(url, - file=None, - dir=None, - unzip=True, - delete=False, - curl=False, - retry=3, - min_bytes=1E0, - progress=True): +def safe_download( + url, + file=None, + dir=None, + unzip=True, + delete=False, + curl=False, + retry=3, + min_bytes=1e0, + exist_ok=False, + progress=True, +): """ Downloads files from a URL, with options for retrying, unzipping, and deleting the downloaded file. @@ -260,41 +298,49 @@ def safe_download(url, retry (int, optional): The number of times to retry the download in case of failure. Default: 3. min_bytes (float, optional): The minimum number of bytes that the downloaded file should have, to be considered a successful download. Default: 1E0. + exist_ok (bool, optional): Whether to overwrite existing contents during unzipping. Defaults to False. progress (bool, optional): Whether to display a progress bar during the download. Default: True. - """ - # Check if the URL is a Google Drive link - gdrive = url.startswith('https://drive.google.com/') + Example: + ```python + from ultralytics.utils.downloads import safe_download + + link = "https://ultralytics.com/assets/bus.jpg" + path = safe_download(link) + ``` + """ + gdrive = url.startswith("https://drive.google.com/") # check if the URL is a Google Drive link if gdrive: url, file = get_google_drive_file_info(url) - f = dir / (file if gdrive else url2file(url)) if dir else Path(file) # URL converted to filename - if '://' not in str(url) and Path(url).is_file(): # URL exists ('://' check required in Windows Python<3.10) + f = Path(dir or ".") / (file or url2file(url)) # URL converted to filename + if "://" not in str(url) and Path(url).is_file(): # URL exists ('://' check required in Windows Python<3.10) f = Path(url) # filename elif not f.is_file(): # URL and file do not exist - assert dir or file, 'dir or file required for download' desc = f"Downloading {url if gdrive else clean_url(url)} to '{f}'" - LOGGER.info(f'{desc}...') + LOGGER.info(f"{desc}...") f.parent.mkdir(parents=True, exist_ok=True) # make directory if missing - check_disk_space(url) + check_disk_space(url, path=f.parent) for i in range(retry + 1): try: if curl or i > 0: # curl download with retry, continue - s = 'sS' * (not progress) # silent - r = subprocess.run(['curl', '-#', f'-{s}L', url, '-o', f, '--retry', '3', '-C', '-']).returncode - assert r == 0, f'Curl return value {r}' + s = "sS" * (not progress) # silent + r = subprocess.run(["curl", "-#", f"-{s}L", url, "-o", f, "--retry", "3", "-C", "-"]).returncode + assert r == 0, f"Curl return value {r}" else: # urllib download - method = 'torch' - if method == 'torch': + method = "torch" + if method == "torch": torch.hub.download_url_to_file(url, f, progress=progress) else: - with request.urlopen(url) as response, TQDM(total=int(response.getheader('Content-Length', 0)), - desc=desc, - disable=not progress, - unit='B', - unit_scale=True, - unit_divisor=1024) as pbar: - with open(f, 'wb') as f_opened: + with request.urlopen(url) as response, TQDM( + total=int(response.getheader("Content-Length", 0)), + desc=desc, + disable=not progress, + unit="B", + unit_scale=True, + unit_divisor=1024, + ) as pbar: + with open(f, "wb") as f_opened: for data in response: f_opened.write(data) pbar.update(len(data)) @@ -305,88 +351,150 @@ def safe_download(url, f.unlink() # remove partial downloads except Exception as e: if i == 0 and not is_online(): - raise ConnectionError(emojis(f'❌ Download failure for {url}. Environment is not online.')) from e + raise ConnectionError(emojis(f"❌ Download failure for {url}. Environment is not online.")) from e elif i >= retry: - raise ConnectionError(emojis(f'❌ Download failure for {url}. Retry limit reached.')) from e - LOGGER.warning(f'⚠️ Download failure, retrying {i + 1}/{retry} {url}...') + raise ConnectionError(emojis(f"❌ Download failure for {url}. Retry limit reached.")) from e + LOGGER.warning(f"⚠️ Download failure, retrying {i + 1}/{retry} {url}...") - if unzip and f.exists() and f.suffix in ('', '.zip', '.tar', '.gz'): + if unzip and f.exists() and f.suffix in ("", ".zip", ".tar", ".gz"): from zipfile import is_zipfile - unzip_dir = dir or f.parent # unzip to dir if provided else unzip in place + unzip_dir = (dir or f.parent).resolve() # unzip to dir if provided else unzip in place if is_zipfile(f): - unzip_dir = unzip_file(file=f, path=unzip_dir, progress=progress) # unzip - elif f.suffix in ('.tar', '.gz'): - LOGGER.info(f'Unzipping {f} to {unzip_dir.resolve()}...') - subprocess.run(['tar', 'xf' if f.suffix == '.tar' else 'xfz', f, '--directory', unzip_dir], check=True) + unzip_dir = unzip_file(file=f, path=unzip_dir, exist_ok=exist_ok, progress=progress) # unzip + elif f.suffix in (".tar", ".gz"): + LOGGER.info(f"Unzipping {f} to {unzip_dir}...") + subprocess.run(["tar", "xf" if f.suffix == ".tar" else "xfz", f, "--directory", unzip_dir], check=True) if delete: f.unlink() # remove zip return unzip_dir -def get_github_assets(repo='ultralytics/assets', version='latest', retry=False): - """Return GitHub repo tag and assets (i.e. ['yolov8n.pt', 'yolov8s.pt', ...]).""" - if version != 'latest': - version = f'tags/{version}' # i.e. tags/v6.2 - url = f'https://api.github.com/repos/{repo}/releases/{version}' +def get_github_assets(repo="ultralytics/assets", version="latest", retry=False): + """ + Retrieve the specified version's tag and assets from a GitHub repository. If the version is not specified, the + function fetches the latest release assets. + + Args: + repo (str, optional): The GitHub repository in the format 'owner/repo'. Defaults to 'ultralytics/assets'. + version (str, optional): The release version to fetch assets from. Defaults to 'latest'. + retry (bool, optional): Flag to retry the request in case of a failure. Defaults to False. + + Returns: + (tuple): A tuple containing the release tag and a list of asset names. + + Example: + ```python + tag, assets = get_github_assets(repo='ultralytics/assets', version='latest') + ``` + """ + + if version != "latest": + version = f"tags/{version}" # i.e. tags/v6.2 + url = f"https://api.github.com/repos/{repo}/releases/{version}" r = requests.get(url) # github api - if r.status_code != 200 and r.reason != 'rate limit exceeded' and retry: # failed and not 403 rate limit exceeded + if r.status_code != 200 and r.reason != "rate limit exceeded" and retry: # failed and not 403 rate limit exceeded r = requests.get(url) # try again if r.status_code != 200: - LOGGER.warning(f'⚠️ GitHub assets check failure for {url}: {r.status_code} {r.reason}') - return '', [] + LOGGER.warning(f"⚠️ GitHub assets check failure for {url}: {r.status_code} {r.reason}") + return "", [] data = r.json() - return data['tag_name'], [x['name'] for x in data['assets']] # tag, assets + return data["tag_name"], [x["name"] for x in data["assets"]] # tag, assets i.e. ['yolov8n.pt', 'yolov8s.pt', ...] -def attempt_download_asset(file, repo='ultralytics/assets', release='v0.0.0'): - """Attempt file download from GitHub release assets if not found locally. release = 'latest', 'v6.2', etc.""" +def attempt_download_asset(file, repo="ultralytics/assets", release="v8.1.0", **kwargs): + """ + Attempt to download a file from GitHub release assets if it is not found locally. The function checks for the file + locally first, then tries to download it from the specified GitHub repository release. + + Args: + file (str | Path): The filename or file path to be downloaded. + repo (str, optional): The GitHub repository in the format 'owner/repo'. Defaults to 'ultralytics/assets'. + release (str, optional): The specific release version to be downloaded. Defaults to 'v8.1.0'. + **kwargs (any): Additional keyword arguments for the download process. + + Returns: + (str): The path to the downloaded file. + + Example: + ```python + file_path = attempt_download_asset('yolov5s.pt', repo='ultralytics/assets', release='latest') + ``` + """ from ultralytics.utils import SETTINGS # scoped for circular import # YOLOv3/5u updates file = str(file) file = checks.check_yolov5u_filename(file) - file = Path(file.strip().replace("'", '')) + file = Path(file.strip().replace("'", "")) if file.exists(): return str(file) - elif (SETTINGS['weights_dir'] / file).exists(): - return str(SETTINGS['weights_dir'] / file) + elif (SETTINGS["weights_dir"] / file).exists(): + return str(SETTINGS["weights_dir"] / file) else: # URL specified name = Path(parse.unquote(str(file))).name # decode '%2F' to '/' etc. - if str(file).startswith(('http:/', 'https:/')): # download - url = str(file).replace(':/', '://') # Pathlib turns :// -> :/ + download_url = f"https://github.com/{repo}/releases/download" + if str(file).startswith(("http:/", "https:/")): # download + url = str(file).replace(":/", "://") # Pathlib turns :// -> :/ file = url2file(name) # parse authentication https://url.com/file.txt?auth... if Path(file).is_file(): - LOGGER.info(f'Found {clean_url(url)} locally at {file}') # file already exists + LOGGER.info(f"Found {clean_url(url)} locally at {file}") # file already exists else: - safe_download(url=url, file=file, min_bytes=1E5) + safe_download(url=url, file=file, min_bytes=1e5, **kwargs) elif repo == GITHUB_ASSETS_REPO and name in GITHUB_ASSETS_NAMES: - safe_download(url=f'https://github.com/{repo}/releases/download/{release}/{name}', file=file, min_bytes=1E5) + safe_download(url=f"{download_url}/{release}/{name}", file=file, min_bytes=1e5, **kwargs) else: tag, assets = get_github_assets(repo, release) if not assets: tag, assets = get_github_assets(repo) # latest release if name in assets: - safe_download(url=f'https://github.com/{repo}/releases/download/{tag}/{name}', file=file, min_bytes=1E5) + safe_download(url=f"{download_url}/{tag}/{name}", file=file, min_bytes=1e5, **kwargs) return str(file) -def download(url, dir=Path.cwd(), unzip=True, delete=False, curl=False, threads=1, retry=3): - """Downloads and unzips files concurrently if threads > 1, else sequentially.""" +def download(url, dir=Path.cwd(), unzip=True, delete=False, curl=False, threads=1, retry=3, exist_ok=False): + """ + Downloads files from specified URLs to a given directory. Supports concurrent downloads if multiple threads are + specified. + + Args: + url (str | list): The URL or list of URLs of the files to be downloaded. + dir (Path, optional): The directory where the files will be saved. Defaults to the current working directory. + unzip (bool, optional): Flag to unzip the files after downloading. Defaults to True. + delete (bool, optional): Flag to delete the zip files after extraction. Defaults to False. + curl (bool, optional): Flag to use curl for downloading. Defaults to False. + threads (int, optional): Number of threads to use for concurrent downloads. Defaults to 1. + retry (int, optional): Number of retries in case of download failure. Defaults to 3. + exist_ok (bool, optional): Whether to overwrite existing contents during unzipping. Defaults to False. + + Example: + ```python + download('https://ultralytics.com/assets/example.zip', dir='path/to/dir', unzip=True) + ``` + """ dir = Path(dir) dir.mkdir(parents=True, exist_ok=True) # make directory if threads > 1: with ThreadPool(threads) as pool: pool.map( lambda x: safe_download( - url=x[0], dir=x[1], unzip=unzip, delete=delete, curl=curl, retry=retry, progress=threads <= 1), - zip(url, repeat(dir))) + url=x[0], + dir=x[1], + unzip=unzip, + delete=delete, + curl=curl, + retry=retry, + exist_ok=exist_ok, + progress=threads <= 1, + ), + zip(url, repeat(dir)), + ) pool.close() pool.join() else: for u in [url] if isinstance(url, (str, Path)) else url: - safe_download(url=u, dir=dir, unzip=unzip, delete=delete, curl=curl, retry=retry) + safe_download(url=u, dir=dir, unzip=unzip, delete=delete, curl=curl, retry=retry, exist_ok=exist_ok) diff --git a/ultralytics/utils/errors.py b/ultralytics/utils/errors.py index 5a76431..86aee1d 100644 --- a/ultralytics/utils/errors.py +++ b/ultralytics/utils/errors.py @@ -4,7 +4,19 @@ from ultralytics.utils import emojis class HUBModelError(Exception): + """ + Custom exception class for handling errors related to model fetching in Ultralytics YOLO. - def __init__(self, message='Model not found. Please check model URL and try again.'): + This exception is raised when a requested model is not found or cannot be retrieved. + The message is also processed to include emojis for better user experience. + + Attributes: + message (str): The error message displayed when the exception is raised. + + Note: + The message is automatically processed through the 'emojis' function from the 'ultralytics.utils' package. + """ + + def __init__(self, message="Model not found. Please check model URL and try again."): """Create an exception for when a model is not found.""" super().__init__(emojis(message)) diff --git a/ultralytics/utils/files.py b/ultralytics/utils/files.py index 0102c4b..719caca 100644 --- a/ultralytics/utils/files.py +++ b/ultralytics/utils/files.py @@ -30,9 +30,9 @@ class WorkingDirectory(contextlib.ContextDecorator): @contextmanager def spaces_in_path(path): """ - Context manager to handle paths with spaces in their names. - If a path contains spaces, it replaces them with underscores, copies the file/directory to the new path, - executes the context code block, then copies the file/directory back to its original location. + Context manager to handle paths with spaces in their names. If a path contains spaces, it replaces them with + underscores, copies the file/directory to the new path, executes the context code block, then copies the + file/directory back to its original location. Args: path (str | Path): The original path. @@ -45,18 +45,18 @@ def spaces_in_path(path): with ultralytics.utils.files import spaces_in_path with spaces_in_path('/path/with spaces') as new_path: - # your code here + # Your code here ``` """ # If path has spaces, replace them with underscores - if ' ' in str(path): + if " " in str(path): string = isinstance(path, str) # input type path = Path(path) # Create a temporary directory and construct the new path with tempfile.TemporaryDirectory() as tmp_dir: - tmp_path = Path(tmp_dir) / path.name.replace(' ', '_') + tmp_path = Path(tmp_dir) / path.name.replace(" ", "_") # Copy file/directory if path.is_dir(): @@ -82,7 +82,7 @@ def spaces_in_path(path): yield path -def increment_path(path, exist_ok=False, sep='', mkdir=False): +def increment_path(path, exist_ok=False, sep="", mkdir=False): """ Increments a file or directory path, i.e. runs/exp --> runs/exp{sep}2, runs/exp{sep}3, ... etc. @@ -102,12 +102,12 @@ def increment_path(path, exist_ok=False, sep='', mkdir=False): """ path = Path(path) # os-agnostic if path.exists() and not exist_ok: - path, suffix = (path.with_suffix(''), path.suffix) if path.is_file() else (path, '') + path, suffix = (path.with_suffix(""), path.suffix) if path.is_file() else (path, "") # Method 1 for n in range(2, 9999): - p = f'{path}{sep}{n}{suffix}' # increment path - if not os.path.exists(p): # + p = f"{path}{sep}{n}{suffix}" # increment path + if not os.path.exists(p): break path = Path(p) @@ -119,14 +119,14 @@ def increment_path(path, exist_ok=False, sep='', mkdir=False): def file_age(path=__file__): """Return days since last file update.""" - dt = (datetime.now() - datetime.fromtimestamp(Path(path).stat().st_mtime)) # delta + dt = datetime.now() - datetime.fromtimestamp(Path(path).stat().st_mtime) # delta return dt.days # + dt.seconds / 86400 # fractional days def file_date(path=__file__): """Return human-readable file modification date, i.e. '2021-3-26'.""" t = datetime.fromtimestamp(Path(path).stat().st_mtime) - return f'{t.year}-{t.month}-{t.day}' + return f"{t.year}-{t.month}-{t.day}" def file_size(path): @@ -137,11 +137,52 @@ def file_size(path): if path.is_file(): return path.stat().st_size / mb elif path.is_dir(): - return sum(f.stat().st_size for f in path.glob('**/*') if f.is_file()) / mb + return sum(f.stat().st_size for f in path.glob("**/*") if f.is_file()) / mb return 0.0 -def get_latest_run(search_dir='.'): +def get_latest_run(search_dir="."): """Return path to most recent 'last.pt' in /runs (i.e. to --resume from).""" - last_list = glob.glob(f'{search_dir}/**/last*.pt', recursive=True) - return max(last_list, key=os.path.getctime) if last_list else '' + last_list = glob.glob(f"{search_dir}/**/last*.pt", recursive=True) + return max(last_list, key=os.path.getctime) if last_list else "" + + +def update_models(model_names=("yolov8n.pt",), source_dir=Path("."), update_names=False): + """ + Updates and re-saves specified YOLO models in an 'updated_models' subdirectory. + + Args: + model_names (tuple, optional): Model filenames to update, defaults to ("yolov8n.pt"). + source_dir (Path, optional): Directory containing models and target subdirectory, defaults to current directory. + update_names (bool, optional): Update model names from a data YAML. + + Example: + ```python + from ultralytics.utils.files import update_models + + model_names = (f"rtdetr-{size}.pt" for size in "lx") + update_models(model_names) + ``` + """ + from ultralytics import YOLO + from ultralytics.nn.autobackend import default_class_names + + target_dir = source_dir / "updated_models" + target_dir.mkdir(parents=True, exist_ok=True) # Ensure target directory exists + + for model_name in model_names: + model_path = source_dir / model_name + print(f"Loading model from {model_path}") + + # Load model + model = YOLO(model_path) + model.half() + if update_names: # update model names from a dataset YAML + model.model.names = default_class_names("coco8.yaml") + + # Define new save path + save_path = target_dir / model_name + + # Save model using model.save() + print(f"Re-saving {model_name} model to {save_path}") + model.save(save_path, use_dill=False) diff --git a/ultralytics/utils/instance.py b/ultralytics/utils/instance.py index 4e2e438..4e9ef2c 100644 --- a/ultralytics/utils/instance.py +++ b/ultralytics/utils/instance.py @@ -7,7 +7,7 @@ from typing import List import numpy as np -from .ops import ltwh2xywh, ltwh2xyxy, resample_segments, xywh2ltwh, xywh2xyxy, xyxy2ltwh, xyxy2xywh +from .ops import ltwh2xywh, ltwh2xyxy, xywh2ltwh, xywh2xyxy, xyxy2ltwh, xyxy2xywh def _ntuple(n): @@ -26,16 +26,29 @@ to_4tuple = _ntuple(4) # `xyxy` means left top and right bottom # `xywh` means center x, center y and width, height(YOLO format) # `ltwh` means left top and width, height(COCO format) -_formats = ['xyxy', 'xywh', 'ltwh'] +_formats = ["xyxy", "xywh", "ltwh"] -__all__ = 'Bboxes', # tuple or list +__all__ = ("Bboxes",) # tuple or list class Bboxes: - """Bounding Boxes class. Only numpy variables are supported.""" + """ + A class for handling bounding boxes. - def __init__(self, bboxes, format='xyxy') -> None: - assert format in _formats, f'Invalid bounding box format: {format}, format must be one of {_formats}' + The class supports various bounding box formats like 'xyxy', 'xywh', and 'ltwh'. + Bounding box data should be provided in numpy arrays. + + Attributes: + bboxes (numpy.ndarray): The bounding boxes stored in a 2D numpy array. + format (str): The format of the bounding boxes ('xyxy', 'xywh', or 'ltwh'). + + Note: + This class does not handle normalization or denormalization of bounding boxes. + """ + + def __init__(self, bboxes, format="xyxy") -> None: + """Initializes the Bboxes class with bounding box data in a specified format.""" + assert format in _formats, f"Invalid bounding box format: {format}, format must be one of {_formats}" bboxes = bboxes[None, :] if bboxes.ndim == 1 else bboxes assert bboxes.ndim == 2 assert bboxes.shape[1] == 4 @@ -45,21 +58,21 @@ class Bboxes: def convert(self, format): """Converts bounding box format from one type to another.""" - assert format in _formats, f'Invalid bounding box format: {format}, format must be one of {_formats}' + assert format in _formats, f"Invalid bounding box format: {format}, format must be one of {_formats}" if self.format == format: return - elif self.format == 'xyxy': - func = xyxy2xywh if format == 'xywh' else xyxy2ltwh - elif self.format == 'xywh': - func = xywh2xyxy if format == 'xyxy' else xywh2ltwh + elif self.format == "xyxy": + func = xyxy2xywh if format == "xywh" else xyxy2ltwh + elif self.format == "xywh": + func = xywh2xyxy if format == "xyxy" else xywh2ltwh else: - func = ltwh2xyxy if format == 'xyxy' else ltwh2xywh + func = ltwh2xyxy if format == "xyxy" else ltwh2xywh self.bboxes = func(self.bboxes) self.format = format def areas(self): """Return box areas.""" - self.convert('xyxy') + self.convert("xyxy") return (self.bboxes[:, 2] - self.bboxes[:, 0]) * (self.bboxes[:, 3] - self.bboxes[:, 1]) # def denormalize(self, w, h): @@ -111,7 +124,7 @@ class Bboxes: return len(self.bboxes) @classmethod - def concatenate(cls, boxes_list: List['Bboxes'], axis=0) -> 'Bboxes': + def concatenate(cls, boxes_list: List["Bboxes"], axis=0) -> "Bboxes": """ Concatenate a list of Bboxes objects into a single Bboxes object. @@ -135,7 +148,7 @@ class Bboxes: return boxes_list[0] return cls(np.concatenate([b.bboxes for b in boxes_list], axis=axis)) - def __getitem__(self, index) -> 'Bboxes': + def __getitem__(self, index) -> "Bboxes": """ Retrieve a specific bounding box or a set of bounding boxes using indexing. @@ -156,32 +169,52 @@ class Bboxes: if isinstance(index, int): return Bboxes(self.bboxes[index].view(1, -1)) b = self.bboxes[index] - assert b.ndim == 2, f'Indexing on Bboxes with {index} failed to return a matrix!' + assert b.ndim == 2, f"Indexing on Bboxes with {index} failed to return a matrix!" return Bboxes(b) class Instances: + """ + Container for bounding boxes, segments, and keypoints of detected objects in an image. - def __init__(self, bboxes, segments=None, keypoints=None, bbox_format='xywh', normalized=True) -> None: + Attributes: + _bboxes (Bboxes): Internal object for handling bounding box operations. + keypoints (ndarray): keypoints(x, y, visible) with shape [N, 17, 3]. Default is None. + normalized (bool): Flag indicating whether the bounding box coordinates are normalized. + segments (ndarray): Segments array with shape [N, 1000, 2] after resampling. + + Args: + bboxes (ndarray): An array of bounding boxes with shape [N, 4]. + segments (list | ndarray, optional): A list or array of object segments. Default is None. + keypoints (ndarray, optional): An array of keypoints with shape [N, 17, 3]. Default is None. + bbox_format (str, optional): The format of bounding boxes ('xywh' or 'xyxy'). Default is 'xywh'. + normalized (bool, optional): Whether the bounding box coordinates are normalized. Default is True. + + Examples: + ```python + # Create an Instances object + instances = Instances( + bboxes=np.array([[10, 10, 30, 30], [20, 20, 40, 40]]), + segments=[np.array([[5, 5], [10, 10]]), np.array([[15, 15], [20, 20]])], + keypoints=np.array([[[5, 5, 1], [10, 10, 1]], [[15, 15, 1], [20, 20, 1]]]) + ) + ``` + + Note: + The bounding box format is either 'xywh' or 'xyxy', and is determined by the `bbox_format` argument. + This class does not perform input validation, and it assumes the inputs are well-formed. + """ + + def __init__(self, bboxes, segments=None, keypoints=None, bbox_format="xywh", normalized=True) -> None: """ Args: bboxes (ndarray): bboxes with shape [N, 4]. segments (list | ndarray): segments. keypoints (ndarray): keypoints(x, y, visible) with shape [N, 17, 3]. """ - if segments is None: - segments = [] self._bboxes = Bboxes(bboxes=bboxes, format=bbox_format) self.keypoints = keypoints self.normalized = normalized - - if len(segments) > 0: - # list[np.array(1000, 2)] * num_samples - segments = resample_segments(segments) - # (N, 1000, 2) - segments = np.stack(segments, axis=0) - else: - segments = np.zeros((0, 1000, 2), dtype=np.float32) self.segments = segments def convert_bbox(self, format): @@ -194,7 +227,7 @@ class Instances: return self._bboxes.areas() def scale(self, scale_w, scale_h, bbox_only=False): - """this might be similar with denormalize func but without normalized sign.""" + """This might be similar with denormalize func but without normalized sign.""" self._bboxes.mul(scale=(scale_w, scale_h, scale_w, scale_h)) if bbox_only: return @@ -230,7 +263,7 @@ class Instances: def add_padding(self, padw, padh): """Handle rect and mosaic situation.""" - assert not self.normalized, 'you should add padding with absolute coordinates.' + assert not self.normalized, "you should add padding with absolute coordinates." self._bboxes.add(offset=(padw, padh, padw, padh)) self.segments[..., 0] += padw self.segments[..., 1] += padh @@ -238,7 +271,7 @@ class Instances: self.keypoints[..., 0] += padw self.keypoints[..., 1] += padh - def __getitem__(self, index) -> 'Instances': + def __getitem__(self, index) -> "Instances": """ Retrieve a specific instance or a set of instances using indexing. @@ -268,7 +301,7 @@ class Instances: def flipud(self, h): """Flips the coordinates of bounding boxes, segments, and keypoints vertically.""" - if self._bboxes.format == 'xyxy': + if self._bboxes.format == "xyxy": y1 = self.bboxes[:, 1].copy() y2 = self.bboxes[:, 3].copy() self.bboxes[:, 1] = h - y2 @@ -281,7 +314,7 @@ class Instances: def fliplr(self, w): """Reverses the order of the bounding boxes and segments horizontally.""" - if self._bboxes.format == 'xyxy': + if self._bboxes.format == "xyxy": x1 = self.bboxes[:, 0].copy() x2 = self.bboxes[:, 2].copy() self.bboxes[:, 0] = w - x2 @@ -295,10 +328,10 @@ class Instances: def clip(self, w, h): """Clips bounding boxes, segments, and keypoints values to stay within image boundaries.""" ori_format = self._bboxes.format - self.convert_bbox(format='xyxy') + self.convert_bbox(format="xyxy") self.bboxes[:, [0, 2]] = self.bboxes[:, [0, 2]].clip(0, w) self.bboxes[:, [1, 3]] = self.bboxes[:, [1, 3]].clip(0, h) - if ori_format != 'xyxy': + if ori_format != "xyxy": self.convert_bbox(format=ori_format) self.segments[..., 0] = self.segments[..., 0].clip(0, w) self.segments[..., 1] = self.segments[..., 1].clip(0, h) @@ -307,7 +340,11 @@ class Instances: self.keypoints[..., 1] = self.keypoints[..., 1].clip(0, h) def remove_zero_area_boxes(self): - """Remove zero-area boxes, i.e. after clipping some boxes may have zero width or height. This removes them.""" + """ + Remove zero-area boxes, i.e. after clipping some boxes may have zero width or height. + + This removes them. + """ good = self.bbox_areas > 0 if not all(good): self._bboxes = self._bboxes[good] @@ -330,7 +367,7 @@ class Instances: return len(self.bboxes) @classmethod - def concatenate(cls, instances_list: List['Instances'], axis=0) -> 'Instances': + def concatenate(cls, instances_list: List["Instances"], axis=0) -> "Instances": """ Concatenates a list of Instances objects into a single Instances object. diff --git a/ultralytics/utils/loss.py b/ultralytics/utils/loss.py index 69f08db..d0ca9c3 100644 --- a/ultralytics/utils/loss.py +++ b/ultralytics/utils/loss.py @@ -6,14 +6,17 @@ import torch.nn.functional as F from ultralytics.utils.metrics import OKS_SIGMA from ultralytics.utils.ops import crop_mask, xywh2xyxy, xyxy2xywh -from ultralytics.utils.tal import TaskAlignedAssigner, dist2bbox, make_anchors - -from .metrics import bbox_iou +from ultralytics.utils.tal import RotatedTaskAlignedAssigner, TaskAlignedAssigner, dist2bbox, dist2rbox, make_anchors +from .metrics import bbox_iou, probiou from .tal import bbox2dist class VarifocalLoss(nn.Module): - """Varifocal loss by Zhang et al. https://arxiv.org/abs/2008.13367.""" + """ + Varifocal loss by Zhang et al. + + https://arxiv.org/abs/2008.13367. + """ def __init__(self): """Initialize the VarifocalLoss class.""" @@ -24,21 +27,25 @@ class VarifocalLoss(nn.Module): """Computes varfocal loss.""" weight = alpha * pred_score.sigmoid().pow(gamma) * (1 - label) + gt_score * label with torch.cuda.amp.autocast(enabled=False): - loss = (F.binary_cross_entropy_with_logits(pred_score.float(), gt_score.float(), reduction='none') * - weight).mean(1).sum() + loss = ( + (F.binary_cross_entropy_with_logits(pred_score.float(), gt_score.float(), reduction="none") * weight) + .mean(1) + .sum() + ) return loss class FocalLoss(nn.Module): """Wraps focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5).""" - def __init__(self, ): + def __init__(self): + """Initializer for FocalLoss class with no parameters.""" super().__init__() @staticmethod def forward(pred, label, gamma=1.5, alpha=0.25): """Calculates and updates confusion matrix for object detection/classification tasks.""" - loss = F.binary_cross_entropy_with_logits(pred, label, reduction='none') + loss = F.binary_cross_entropy_with_logits(pred, label, reduction="none") # p_t = torch.exp(-loss) # loss *= self.alpha * (1.000001 - p_t) ** self.gamma # non-zero power for gradient stability @@ -54,6 +61,7 @@ class FocalLoss(nn.Module): class BboxLoss(nn.Module): + """Criterion class for computing training losses during training.""" def __init__(self, reg_max, use_dfl=False): """Initialize the BboxLoss module with regularization maximum and DFL settings.""" @@ -79,42 +87,73 @@ class BboxLoss(nn.Module): @staticmethod def _df_loss(pred_dist, target): - """Return sum of left and right DFL losses.""" - # Distribution Focal Loss (DFL) proposed in Generalized Focal Loss https://ieeexplore.ieee.org/document/9792391 + """ + Return sum of left and right DFL losses. + + Distribution Focal Loss (DFL) proposed in Generalized Focal Loss + https://ieeexplore.ieee.org/document/9792391 + """ tl = target.long() # target left tr = tl + 1 # target right wl = tr - target # weight left wr = 1 - wl # weight right - return (F.cross_entropy(pred_dist, tl.view(-1), reduction='none').view(tl.shape) * wl + - F.cross_entropy(pred_dist, tr.view(-1), reduction='none').view(tl.shape) * wr).mean(-1, keepdim=True) + return ( + F.cross_entropy(pred_dist, tl.view(-1), reduction="none").view(tl.shape) * wl + + F.cross_entropy(pred_dist, tr.view(-1), reduction="none").view(tl.shape) * wr + ).mean(-1, keepdim=True) + + +class RotatedBboxLoss(BboxLoss): + """Criterion class for computing training losses during training.""" + + def __init__(self, reg_max, use_dfl=False): + """Initialize the BboxLoss module with regularization maximum and DFL settings.""" + super().__init__(reg_max, use_dfl) + + def forward(self, pred_dist, pred_bboxes, anchor_points, target_bboxes, target_scores, target_scores_sum, fg_mask): + """IoU loss.""" + weight = target_scores.sum(-1)[fg_mask].unsqueeze(-1) + iou = probiou(pred_bboxes[fg_mask], target_bboxes[fg_mask]) + loss_iou = ((1.0 - iou) * weight).sum() / target_scores_sum + + # DFL loss + if self.use_dfl: + target_ltrb = bbox2dist(anchor_points, xywh2xyxy(target_bboxes[..., :4]), self.reg_max) + loss_dfl = self._df_loss(pred_dist[fg_mask].view(-1, self.reg_max + 1), target_ltrb[fg_mask]) * weight + loss_dfl = loss_dfl.sum() / target_scores_sum + else: + loss_dfl = torch.tensor(0.0).to(pred_dist.device) + + return loss_iou, loss_dfl class KeypointLoss(nn.Module): """Criterion class for computing training losses.""" def __init__(self, sigmas) -> None: + """Initialize the KeypointLoss class.""" super().__init__() self.sigmas = sigmas def forward(self, pred_kpts, gt_kpts, kpt_mask, area): """Calculates keypoint loss factor and Euclidean distance loss for predicted and actual keypoints.""" - d = (pred_kpts[..., 0] - gt_kpts[..., 0]) ** 2 + (pred_kpts[..., 1] - gt_kpts[..., 1]) ** 2 - kpt_loss_factor = (torch.sum(kpt_mask != 0) + torch.sum(kpt_mask == 0)) / (torch.sum(kpt_mask != 0) + 1e-9) + d = (pred_kpts[..., 0] - gt_kpts[..., 0]).pow(2) + (pred_kpts[..., 1] - gt_kpts[..., 1]).pow(2) + kpt_loss_factor = kpt_mask.shape[1] / (torch.sum(kpt_mask != 0, dim=1) + 1e-9) # e = d / (2 * (area * self.sigmas) ** 2 + 1e-9) # from formula - e = d / (2 * self.sigmas) ** 2 / (area + 1e-9) / 2 # from cocoeval - return kpt_loss_factor * ((1 - torch.exp(-e)) * kpt_mask).mean() + e = d / ((2 * self.sigmas).pow(2) * (area + 1e-9) * 2) # from cocoeval + return (kpt_loss_factor.view(-1, 1) * ((1 - torch.exp(-e)) * kpt_mask)).mean() class v8DetectionLoss: """Criterion class for computing training losses.""" - def __init__(self, model): # model must be de-paralleled - + def __init__(self, model, tal_topk=10): # model must be de-paralleled + """Initializes v8DetectionLoss with the model, defining model-related properties and BCE loss function.""" device = next(model.parameters()).device # get model device h = model.args # hyperparameters m = model.model[-1] # Detect() module - self.bce = nn.BCEWithLogitsLoss(reduction='none') + self.bce = nn.BCEWithLogitsLoss(reduction="none") self.hyp = h self.stride = m.stride # model strides self.nc = m.nc # number of classes @@ -124,7 +163,7 @@ class v8DetectionLoss: self.use_dfl = m.reg_max > 1 - self.assigner = TaskAlignedAssigner(topk=10, num_classes=self.nc, alpha=0.5, beta=6.0) + self.assigner = TaskAlignedAssigner(topk=tal_topk, num_classes=self.nc, alpha=0.5, beta=6.0) self.bbox_loss = BboxLoss(m.reg_max - 1, use_dfl=self.use_dfl).to(device) self.proj = torch.arange(m.reg_max, dtype=torch.float, device=device) @@ -159,7 +198,8 @@ class v8DetectionLoss: loss = torch.zeros(3, device=self.device) # box, cls, dfl feats = preds[1] if isinstance(preds, tuple) else preds pred_distri, pred_scores = torch.cat([xi.view(feats[0].shape[0], self.no, -1) for xi in feats], 2).split( - (self.reg_max * 4, self.nc), 1) + (self.reg_max * 4, self.nc), 1 + ) pred_scores = pred_scores.permute(0, 2, 1).contiguous() pred_distri = pred_distri.permute(0, 2, 1).contiguous() @@ -169,30 +209,36 @@ class v8DetectionLoss: imgsz = torch.tensor(feats[0].shape[2:], device=self.device, dtype=dtype) * self.stride[0] # image size (h,w) anchor_points, stride_tensor = make_anchors(feats, self.stride, 0.5) - # targets - targets = torch.cat((batch['batch_idx'].view(-1, 1), batch['cls'].view(-1, 1), batch['bboxes']), 1) + # Targets + targets = torch.cat((batch["batch_idx"].view(-1, 1), batch["cls"].view(-1, 1), batch["bboxes"]), 1) targets = self.preprocess(targets.to(self.device), batch_size, scale_tensor=imgsz[[1, 0, 1, 0]]) gt_labels, gt_bboxes = targets.split((1, 4), 2) # cls, xyxy mask_gt = gt_bboxes.sum(2, keepdim=True).gt_(0) - # pboxes + # Pboxes pred_bboxes = self.bbox_decode(anchor_points, pred_distri) # xyxy, (b, h*w, 4) _, target_bboxes, target_scores, fg_mask, _ = self.assigner( - pred_scores.detach().sigmoid(), (pred_bboxes.detach() * stride_tensor).type(gt_bboxes.dtype), - anchor_points * stride_tensor, gt_labels, gt_bboxes, mask_gt) + pred_scores.detach().sigmoid(), + (pred_bboxes.detach() * stride_tensor).type(gt_bboxes.dtype), + anchor_points * stride_tensor, + gt_labels, + gt_bboxes, + mask_gt, + ) target_scores_sum = max(target_scores.sum(), 1) - # cls loss + # Cls loss # loss[1] = self.varifocal_loss(pred_scores, target_scores, target_labels) / target_scores_sum # VFL way loss[1] = self.bce(pred_scores, target_scores.to(dtype)).sum() / target_scores_sum # BCE - # bbox loss + # Bbox loss if fg_mask.sum(): target_bboxes /= stride_tensor - loss[0], loss[2] = self.bbox_loss(pred_distri, pred_bboxes, anchor_points, target_bboxes, target_scores, - target_scores_sum, fg_mask) + loss[0], loss[2] = self.bbox_loss( + pred_distri, pred_bboxes, anchor_points, target_bboxes, target_scores, target_scores_sum, fg_mask + ) loss[0] *= self.hyp.box # box gain loss[1] *= self.hyp.cls # cls gain @@ -205,8 +251,8 @@ class v8SegmentationLoss(v8DetectionLoss): """Criterion class for computing training losses.""" def __init__(self, model): # model must be de-paralleled + """Initializes the v8SegmentationLoss class, taking a de-paralleled model as argument.""" super().__init__(model) - self.nm = model.model[-1].nm # number of masks self.overlap = model.args.overlap_mask def __call__(self, preds, batch): @@ -215,9 +261,10 @@ class v8SegmentationLoss(v8DetectionLoss): feats, pred_masks, proto = preds if len(preds) == 3 else preds[1] batch_size, _, mask_h, mask_w = proto.shape # batch size, number of masks, mask height, mask width pred_distri, pred_scores = torch.cat([xi.view(feats[0].shape[0], self.no, -1) for xi in feats], 2).split( - (self.reg_max * 4, self.nc), 1) + (self.reg_max * 4, self.nc), 1 + ) - # b, grids, .. + # B, grids, .. pred_scores = pred_scores.permute(0, 2, 1).contiguous() pred_distri = pred_distri.permute(0, 2, 1).contiguous() pred_masks = pred_masks.permute(0, 2, 1).contiguous() @@ -226,80 +273,168 @@ class v8SegmentationLoss(v8DetectionLoss): imgsz = torch.tensor(feats[0].shape[2:], device=self.device, dtype=dtype) * self.stride[0] # image size (h,w) anchor_points, stride_tensor = make_anchors(feats, self.stride, 0.5) - # targets + # Targets try: - batch_idx = batch['batch_idx'].view(-1, 1) - targets = torch.cat((batch_idx, batch['cls'].view(-1, 1), batch['bboxes']), 1) + batch_idx = batch["batch_idx"].view(-1, 1) + targets = torch.cat((batch_idx, batch["cls"].view(-1, 1), batch["bboxes"]), 1) targets = self.preprocess(targets.to(self.device), batch_size, scale_tensor=imgsz[[1, 0, 1, 0]]) gt_labels, gt_bboxes = targets.split((1, 4), 2) # cls, xyxy mask_gt = gt_bboxes.sum(2, keepdim=True).gt_(0) except RuntimeError as e: - raise TypeError('ERROR ❌ segment dataset incorrectly formatted or not a segment dataset.\n' - "This error can occur when incorrectly training a 'segment' model on a 'detect' dataset, " - "i.e. 'yolo train model=yolov8n-seg.pt data=coco128.yaml'.\nVerify your dataset is a " - "correctly formatted 'segment' dataset using 'data=coco128-seg.yaml' " - 'as an example.\nSee https://docs.ultralytics.com/tasks/segment/ for help.') from e + raise TypeError( + "ERROR ❌ segment dataset incorrectly formatted or not a segment dataset.\n" + "This error can occur when incorrectly training a 'segment' model on a 'detect' dataset, " + "i.e. 'yolo train model=yolov8n-seg.pt data=coco8.yaml'.\nVerify your dataset is a " + "correctly formatted 'segment' dataset using 'data=coco8-seg.yaml' " + "as an example.\nSee https://docs.ultralytics.com/datasets/segment/ for help." + ) from e - # pboxes + # Pboxes pred_bboxes = self.bbox_decode(anchor_points, pred_distri) # xyxy, (b, h*w, 4) _, target_bboxes, target_scores, fg_mask, target_gt_idx = self.assigner( - pred_scores.detach().sigmoid(), (pred_bboxes.detach() * stride_tensor).type(gt_bboxes.dtype), - anchor_points * stride_tensor, gt_labels, gt_bboxes, mask_gt) + pred_scores.detach().sigmoid(), + (pred_bboxes.detach() * stride_tensor).type(gt_bboxes.dtype), + anchor_points * stride_tensor, + gt_labels, + gt_bboxes, + mask_gt, + ) target_scores_sum = max(target_scores.sum(), 1) - # cls loss + # Cls loss # loss[1] = self.varifocal_loss(pred_scores, target_scores, target_labels) / target_scores_sum # VFL way loss[2] = self.bce(pred_scores, target_scores.to(dtype)).sum() / target_scores_sum # BCE if fg_mask.sum(): - # bbox loss - loss[0], loss[3] = self.bbox_loss(pred_distri, pred_bboxes, anchor_points, target_bboxes / stride_tensor, - target_scores, target_scores_sum, fg_mask) - # masks loss - masks = batch['masks'].to(self.device).float() + # Bbox loss + loss[0], loss[3] = self.bbox_loss( + pred_distri, + pred_bboxes, + anchor_points, + target_bboxes / stride_tensor, + target_scores, + target_scores_sum, + fg_mask, + ) + # Masks loss + masks = batch["masks"].to(self.device).float() if tuple(masks.shape[-2:]) != (mask_h, mask_w): # downsample - masks = F.interpolate(masks[None], (mask_h, mask_w), mode='nearest')[0] + masks = F.interpolate(masks[None], (mask_h, mask_w), mode="nearest")[0] - for i in range(batch_size): - if fg_mask[i].sum(): - mask_idx = target_gt_idx[i][fg_mask[i]] - if self.overlap: - gt_mask = torch.where(masks[[i]] == (mask_idx + 1).view(-1, 1, 1), 1.0, 0.0) - else: - gt_mask = masks[batch_idx.view(-1) == i][mask_idx] - xyxyn = target_bboxes[i][fg_mask[i]] / imgsz[[1, 0, 1, 0]] - marea = xyxy2xywh(xyxyn)[:, 2:].prod(1) - mxyxy = xyxyn * torch.tensor([mask_w, mask_h, mask_w, mask_h], device=self.device) - loss[1] += self.single_mask_loss(gt_mask, pred_masks[i][fg_mask[i]], proto[i], mxyxy, marea) # seg - - # WARNING: lines below prevents Multi-GPU DDP 'unused gradient' PyTorch errors, do not remove - else: - loss[1] += (proto * 0).sum() + (pred_masks * 0).sum() # inf sums may lead to nan loss + loss[1] = self.calculate_segmentation_loss( + fg_mask, masks, target_gt_idx, target_bboxes, batch_idx, proto, pred_masks, imgsz, self.overlap + ) # WARNING: lines below prevent Multi-GPU DDP 'unused gradient' PyTorch errors, do not remove else: loss[1] += (proto * 0).sum() + (pred_masks * 0).sum() # inf sums may lead to nan loss loss[0] *= self.hyp.box # box gain - loss[1] *= self.hyp.box / batch_size # seg gain + loss[1] *= self.hyp.box # seg gain loss[2] *= self.hyp.cls # cls gain loss[3] *= self.hyp.dfl # dfl gain return loss.sum() * batch_size, loss.detach() # loss(box, cls, dfl) - def single_mask_loss(self, gt_mask, pred, proto, xyxy, area): - """Mask loss for one image.""" - pred_mask = (pred @ proto.view(self.nm, -1)).view(-1, *proto.shape[1:]) # (n, 32) @ (32,80,80) -> (n,80,80) - loss = F.binary_cross_entropy_with_logits(pred_mask, gt_mask, reduction='none') - return (crop_mask(loss, xyxy).mean(dim=(1, 2)) / area).mean() + @staticmethod + def single_mask_loss( + gt_mask: torch.Tensor, pred: torch.Tensor, proto: torch.Tensor, xyxy: torch.Tensor, area: torch.Tensor + ) -> torch.Tensor: + """ + Compute the instance segmentation loss for a single image. + + Args: + gt_mask (torch.Tensor): Ground truth mask of shape (n, H, W), where n is the number of objects. + pred (torch.Tensor): Predicted mask coefficients of shape (n, 32). + proto (torch.Tensor): Prototype masks of shape (32, H, W). + xyxy (torch.Tensor): Ground truth bounding boxes in xyxy format, normalized to [0, 1], of shape (n, 4). + area (torch.Tensor): Area of each ground truth bounding box of shape (n,). + + Returns: + (torch.Tensor): The calculated mask loss for a single image. + + Notes: + The function uses the equation pred_mask = torch.einsum('in,nhw->ihw', pred, proto) to produce the + predicted masks from the prototype masks and predicted mask coefficients. + """ + pred_mask = torch.einsum("in,nhw->ihw", pred, proto) # (n, 32) @ (32, 80, 80) -> (n, 80, 80) + loss = F.binary_cross_entropy_with_logits(pred_mask, gt_mask, reduction="none") + return (crop_mask(loss, xyxy).mean(dim=(1, 2)) / area).sum() + + def calculate_segmentation_loss( + self, + fg_mask: torch.Tensor, + masks: torch.Tensor, + target_gt_idx: torch.Tensor, + target_bboxes: torch.Tensor, + batch_idx: torch.Tensor, + proto: torch.Tensor, + pred_masks: torch.Tensor, + imgsz: torch.Tensor, + overlap: bool, + ) -> torch.Tensor: + """ + Calculate the loss for instance segmentation. + + Args: + fg_mask (torch.Tensor): A binary tensor of shape (BS, N_anchors) indicating which anchors are positive. + masks (torch.Tensor): Ground truth masks of shape (BS, H, W) if `overlap` is False, otherwise (BS, ?, H, W). + target_gt_idx (torch.Tensor): Indexes of ground truth objects for each anchor of shape (BS, N_anchors). + target_bboxes (torch.Tensor): Ground truth bounding boxes for each anchor of shape (BS, N_anchors, 4). + batch_idx (torch.Tensor): Batch indices of shape (N_labels_in_batch, 1). + proto (torch.Tensor): Prototype masks of shape (BS, 32, H, W). + pred_masks (torch.Tensor): Predicted masks for each anchor of shape (BS, N_anchors, 32). + imgsz (torch.Tensor): Size of the input image as a tensor of shape (2), i.e., (H, W). + overlap (bool): Whether the masks in `masks` tensor overlap. + + Returns: + (torch.Tensor): The calculated loss for instance segmentation. + + Notes: + The batch loss can be computed for improved speed at higher memory usage. + For example, pred_mask can be computed as follows: + pred_mask = torch.einsum('in,nhw->ihw', pred, proto) # (i, 32) @ (32, 160, 160) -> (i, 160, 160) + """ + _, _, mask_h, mask_w = proto.shape + loss = 0 + + # Normalize to 0-1 + target_bboxes_normalized = target_bboxes / imgsz[[1, 0, 1, 0]] + + # Areas of target bboxes + marea = xyxy2xywh(target_bboxes_normalized)[..., 2:].prod(2) + + # Normalize to mask size + mxyxy = target_bboxes_normalized * torch.tensor([mask_w, mask_h, mask_w, mask_h], device=proto.device) + + for i, single_i in enumerate(zip(fg_mask, target_gt_idx, pred_masks, proto, mxyxy, marea, masks)): + fg_mask_i, target_gt_idx_i, pred_masks_i, proto_i, mxyxy_i, marea_i, masks_i = single_i + if fg_mask_i.any(): + mask_idx = target_gt_idx_i[fg_mask_i] + if overlap: + gt_mask = masks_i == (mask_idx + 1).view(-1, 1, 1) + gt_mask = gt_mask.float() + else: + gt_mask = masks[batch_idx.view(-1) == i][mask_idx] + + loss += self.single_mask_loss( + gt_mask, pred_masks_i[fg_mask_i], proto_i, mxyxy_i[fg_mask_i], marea_i[fg_mask_i] + ) + + # WARNING: lines below prevents Multi-GPU DDP 'unused gradient' PyTorch errors, do not remove + else: + loss += (proto * 0).sum() + (pred_masks * 0).sum() # inf sums may lead to nan loss + + return loss / fg_mask.sum() class v8PoseLoss(v8DetectionLoss): """Criterion class for computing training losses.""" def __init__(self, model): # model must be de-paralleled + """Initializes v8PoseLoss with model, sets keypoint variables and declares a keypoint loss instance.""" super().__init__(model) self.kpt_shape = model.model[-1].kpt_shape self.bce_pose = nn.BCEWithLogitsLoss() @@ -313,9 +448,10 @@ class v8PoseLoss(v8DetectionLoss): loss = torch.zeros(5, device=self.device) # box, cls, dfl, kpt_location, kpt_visibility feats, pred_kpts = preds if isinstance(preds[0], list) else preds[1] pred_distri, pred_scores = torch.cat([xi.view(feats[0].shape[0], self.no, -1) for xi in feats], 2).split( - (self.reg_max * 4, self.nc), 1) + (self.reg_max * 4, self.nc), 1 + ) - # b, grids, .. + # B, grids, .. pred_scores = pred_scores.permute(0, 2, 1).contiguous() pred_distri = pred_distri.permute(0, 2, 1).contiguous() pred_kpts = pred_kpts.permute(0, 2, 1).contiguous() @@ -324,53 +460,50 @@ class v8PoseLoss(v8DetectionLoss): imgsz = torch.tensor(feats[0].shape[2:], device=self.device, dtype=dtype) * self.stride[0] # image size (h,w) anchor_points, stride_tensor = make_anchors(feats, self.stride, 0.5) - # targets + # Targets batch_size = pred_scores.shape[0] - batch_idx = batch['batch_idx'].view(-1, 1) - targets = torch.cat((batch_idx, batch['cls'].view(-1, 1), batch['bboxes']), 1) + batch_idx = batch["batch_idx"].view(-1, 1) + targets = torch.cat((batch_idx, batch["cls"].view(-1, 1), batch["bboxes"]), 1) targets = self.preprocess(targets.to(self.device), batch_size, scale_tensor=imgsz[[1, 0, 1, 0]]) gt_labels, gt_bboxes = targets.split((1, 4), 2) # cls, xyxy mask_gt = gt_bboxes.sum(2, keepdim=True).gt_(0) - # pboxes + # Pboxes pred_bboxes = self.bbox_decode(anchor_points, pred_distri) # xyxy, (b, h*w, 4) pred_kpts = self.kpts_decode(anchor_points, pred_kpts.view(batch_size, -1, *self.kpt_shape)) # (b, h*w, 17, 3) _, target_bboxes, target_scores, fg_mask, target_gt_idx = self.assigner( - pred_scores.detach().sigmoid(), (pred_bboxes.detach() * stride_tensor).type(gt_bboxes.dtype), - anchor_points * stride_tensor, gt_labels, gt_bboxes, mask_gt) + pred_scores.detach().sigmoid(), + (pred_bboxes.detach() * stride_tensor).type(gt_bboxes.dtype), + anchor_points * stride_tensor, + gt_labels, + gt_bboxes, + mask_gt, + ) target_scores_sum = max(target_scores.sum(), 1) - # cls loss + # Cls loss # loss[1] = self.varifocal_loss(pred_scores, target_scores, target_labels) / target_scores_sum # VFL way loss[3] = self.bce(pred_scores, target_scores.to(dtype)).sum() / target_scores_sum # BCE - # bbox loss + # Bbox loss if fg_mask.sum(): target_bboxes /= stride_tensor - loss[0], loss[4] = self.bbox_loss(pred_distri, pred_bboxes, anchor_points, target_bboxes, target_scores, - target_scores_sum, fg_mask) - keypoints = batch['keypoints'].to(self.device).float().clone() + loss[0], loss[4] = self.bbox_loss( + pred_distri, pred_bboxes, anchor_points, target_bboxes, target_scores, target_scores_sum, fg_mask + ) + keypoints = batch["keypoints"].to(self.device).float().clone() keypoints[..., 0] *= imgsz[1] keypoints[..., 1] *= imgsz[0] - for i in range(batch_size): - if fg_mask[i].sum(): - idx = target_gt_idx[i][fg_mask[i]] - gt_kpt = keypoints[batch_idx.view(-1) == i][idx] # (n, 51) - gt_kpt[..., 0] /= stride_tensor[fg_mask[i]] - gt_kpt[..., 1] /= stride_tensor[fg_mask[i]] - area = xyxy2xywh(target_bboxes[i][fg_mask[i]])[:, 2:].prod(1, keepdim=True) - pred_kpt = pred_kpts[i][fg_mask[i]] - kpt_mask = gt_kpt[..., 2] != 0 - loss[1] += self.keypoint_loss(pred_kpt, gt_kpt, kpt_mask, area) # pose loss - # kpt_score loss - if pred_kpt.shape[-1] == 3: - loss[2] += self.bce_pose(pred_kpt[..., 2], kpt_mask.float()) # keypoint obj loss + + loss[1], loss[2] = self.calculate_keypoints_loss( + fg_mask, target_gt_idx, keypoints, batch_idx, stride_tensor, target_bboxes, pred_kpts + ) loss[0] *= self.hyp.box # box gain - loss[1] *= self.hyp.pose / batch_size # pose gain - loss[2] *= self.hyp.kobj / batch_size # kobj gain + loss[1] *= self.hyp.pose # pose gain + loss[2] *= self.hyp.kobj # kobj gain loss[3] *= self.hyp.cls # cls gain loss[4] *= self.hyp.dfl # dfl gain @@ -385,12 +518,210 @@ class v8PoseLoss(v8DetectionLoss): y[..., 1] += anchor_points[:, [1]] - 0.5 return y + def calculate_keypoints_loss( + self, masks, target_gt_idx, keypoints, batch_idx, stride_tensor, target_bboxes, pred_kpts + ): + """ + Calculate the keypoints loss for the model. + + This function calculates the keypoints loss and keypoints object loss for a given batch. The keypoints loss is + based on the difference between the predicted keypoints and ground truth keypoints. The keypoints object loss is + a binary classification loss that classifies whether a keypoint is present or not. + + Args: + masks (torch.Tensor): Binary mask tensor indicating object presence, shape (BS, N_anchors). + target_gt_idx (torch.Tensor): Index tensor mapping anchors to ground truth objects, shape (BS, N_anchors). + keypoints (torch.Tensor): Ground truth keypoints, shape (N_kpts_in_batch, N_kpts_per_object, kpts_dim). + batch_idx (torch.Tensor): Batch index tensor for keypoints, shape (N_kpts_in_batch, 1). + stride_tensor (torch.Tensor): Stride tensor for anchors, shape (N_anchors, 1). + target_bboxes (torch.Tensor): Ground truth boxes in (x1, y1, x2, y2) format, shape (BS, N_anchors, 4). + pred_kpts (torch.Tensor): Predicted keypoints, shape (BS, N_anchors, N_kpts_per_object, kpts_dim). + + Returns: + (tuple): Returns a tuple containing: + - kpts_loss (torch.Tensor): The keypoints loss. + - kpts_obj_loss (torch.Tensor): The keypoints object loss. + """ + batch_idx = batch_idx.flatten() + batch_size = len(masks) + + # Find the maximum number of keypoints in a single image + max_kpts = torch.unique(batch_idx, return_counts=True)[1].max() + + # Create a tensor to hold batched keypoints + batched_keypoints = torch.zeros( + (batch_size, max_kpts, keypoints.shape[1], keypoints.shape[2]), device=keypoints.device + ) + + # TODO: any idea how to vectorize this? + # Fill batched_keypoints with keypoints based on batch_idx + for i in range(batch_size): + keypoints_i = keypoints[batch_idx == i] + batched_keypoints[i, : keypoints_i.shape[0]] = keypoints_i + + # Expand dimensions of target_gt_idx to match the shape of batched_keypoints + target_gt_idx_expanded = target_gt_idx.unsqueeze(-1).unsqueeze(-1) + + # Use target_gt_idx_expanded to select keypoints from batched_keypoints + selected_keypoints = batched_keypoints.gather( + 1, target_gt_idx_expanded.expand(-1, -1, keypoints.shape[1], keypoints.shape[2]) + ) + + # Divide coordinates by stride + selected_keypoints /= stride_tensor.view(1, -1, 1, 1) + + kpts_loss = 0 + kpts_obj_loss = 0 + + if masks.any(): + gt_kpt = selected_keypoints[masks] + area = xyxy2xywh(target_bboxes[masks])[:, 2:].prod(1, keepdim=True) + pred_kpt = pred_kpts[masks] + kpt_mask = gt_kpt[..., 2] != 0 if gt_kpt.shape[-1] == 3 else torch.full_like(gt_kpt[..., 0], True) + kpts_loss = self.keypoint_loss(pred_kpt, gt_kpt, kpt_mask, area) # pose loss + + if pred_kpt.shape[-1] == 3: + kpts_obj_loss = self.bce_pose(pred_kpt[..., 2], kpt_mask.float()) # keypoint obj loss + + return kpts_loss, kpts_obj_loss + class v8ClassificationLoss: """Criterion class for computing training losses.""" def __call__(self, preds, batch): """Compute the classification loss between predictions and true labels.""" - loss = torch.nn.functional.cross_entropy(preds, batch['cls'], reduction='sum') / 64 + loss = torch.nn.functional.cross_entropy(preds, batch["cls"], reduction="mean") loss_items = loss.detach() return loss, loss_items + + +class v8OBBLoss(v8DetectionLoss): + def __init__(self, model): + """ + Initializes v8OBBLoss with model, assigner, and rotated bbox loss. + + Note model must be de-paralleled. + """ + super().__init__(model) + self.assigner = RotatedTaskAlignedAssigner(topk=10, num_classes=self.nc, alpha=0.5, beta=6.0) + self.bbox_loss = RotatedBboxLoss(self.reg_max - 1, use_dfl=self.use_dfl).to(self.device) + + def preprocess(self, targets, batch_size, scale_tensor): + """Preprocesses the target counts and matches with the input batch size to output a tensor.""" + if targets.shape[0] == 0: + out = torch.zeros(batch_size, 0, 6, device=self.device) + else: + i = targets[:, 0] # image index + _, counts = i.unique(return_counts=True) + counts = counts.to(dtype=torch.int32) + out = torch.zeros(batch_size, counts.max(), 6, device=self.device) + for j in range(batch_size): + matches = i == j + n = matches.sum() + if n: + bboxes = targets[matches, 2:] + bboxes[..., :4].mul_(scale_tensor) + out[j, :n] = torch.cat([targets[matches, 1:2], bboxes], dim=-1) + return out + + def __call__(self, preds, batch): + """Calculate and return the loss for the YOLO model.""" + loss = torch.zeros(3, device=self.device) # box, cls, dfl + feats, pred_angle = preds if isinstance(preds[0], list) else preds[1] + batch_size = pred_angle.shape[0] # batch size, number of masks, mask height, mask width + pred_distri, pred_scores = torch.cat([xi.view(feats[0].shape[0], self.no, -1) for xi in feats], 2).split( + (self.reg_max * 4, self.nc), 1 + ) + + # b, grids, .. + pred_scores = pred_scores.permute(0, 2, 1).contiguous() + pred_distri = pred_distri.permute(0, 2, 1).contiguous() + pred_angle = pred_angle.permute(0, 2, 1).contiguous() + + dtype = pred_scores.dtype + imgsz = torch.tensor(feats[0].shape[2:], device=self.device, dtype=dtype) * self.stride[0] # image size (h,w) + anchor_points, stride_tensor = make_anchors(feats, self.stride, 0.5) + + # targets + try: + batch_idx = batch["batch_idx"].view(-1, 1) + targets = torch.cat((batch_idx, batch["cls"].view(-1, 1), batch["bboxes"].view(-1, 5)), 1) + rw, rh = targets[:, 4] * imgsz[0].item(), targets[:, 5] * imgsz[1].item() + targets = targets[(rw >= 2) & (rh >= 2)] # filter rboxes of tiny size to stabilize training + targets = self.preprocess(targets.to(self.device), batch_size, scale_tensor=imgsz[[1, 0, 1, 0]]) + gt_labels, gt_bboxes = targets.split((1, 5), 2) # cls, xywhr + mask_gt = gt_bboxes.sum(2, keepdim=True).gt_(0) + except RuntimeError as e: + raise TypeError( + "ERROR ❌ OBB dataset incorrectly formatted or not a OBB dataset.\n" + "This error can occur when incorrectly training a 'OBB' model on a 'detect' dataset, " + "i.e. 'yolo train model=yolov8n-obb.pt data=dota8.yaml'.\nVerify your dataset is a " + "correctly formatted 'OBB' dataset using 'data=dota8.yaml' " + "as an example.\nSee https://docs.ultralytics.com/datasets/obb/ for help." + ) from e + + # Pboxes + pred_bboxes = self.bbox_decode(anchor_points, pred_distri, pred_angle) # xyxy, (b, h*w, 4) + + bboxes_for_assigner = pred_bboxes.clone().detach() + # Only the first four elements need to be scaled + bboxes_for_assigner[..., :4] *= stride_tensor + _, target_bboxes, target_scores, fg_mask, _ = self.assigner( + pred_scores.detach().sigmoid(), + bboxes_for_assigner.type(gt_bboxes.dtype), + anchor_points * stride_tensor, + gt_labels, + gt_bboxes, + mask_gt, + ) + + target_scores_sum = max(target_scores.sum(), 1) + + # Cls loss + # loss[1] = self.varifocal_loss(pred_scores, target_scores, target_labels) / target_scores_sum # VFL way + loss[1] = self.bce(pred_scores, target_scores.to(dtype)).sum() / target_scores_sum # BCE + + # Bbox loss + if fg_mask.sum(): + target_bboxes[..., :4] /= stride_tensor + loss[0], loss[2] = self.bbox_loss( + pred_distri, pred_bboxes, anchor_points, target_bboxes, target_scores, target_scores_sum, fg_mask + ) + else: + loss[0] += (pred_angle * 0).sum() + + loss[0] *= self.hyp.box # box gain + loss[1] *= self.hyp.cls # cls gain + loss[2] *= self.hyp.dfl # dfl gain + + return loss.sum() * batch_size, loss.detach() # loss(box, cls, dfl) + + def bbox_decode(self, anchor_points, pred_dist, pred_angle): + """ + Decode predicted object bounding box coordinates from anchor points and distribution. + + Args: + anchor_points (torch.Tensor): Anchor points, (h*w, 2). + pred_dist (torch.Tensor): Predicted rotated distance, (bs, h*w, 4). + pred_angle (torch.Tensor): Predicted angle, (bs, h*w, 1). + + Returns: + (torch.Tensor): Predicted rotated bounding boxes with angles, (bs, h*w, 5). + """ + if self.use_dfl: + b, a, c = pred_dist.shape # batch, anchors, channels + pred_dist = pred_dist.view(b, a, 4, c // 4).softmax(3).matmul(self.proj.type(pred_dist.dtype)) + return torch.cat((dist2rbox(pred_dist, pred_angle, anchor_points), pred_angle), dim=-1) + +class v10DetectLoss: + def __init__(self, model): + self.one2many = v8DetectionLoss(model, tal_topk=10) + self.one2one = v8DetectionLoss(model, tal_topk=1) + + def __call__(self, preds, batch): + one2many = preds["one2many"] + loss_one2many = self.one2many(one2many, batch) + one2one = preds["one2one"] + loss_one2one = self.one2one(one2one, batch) + return loss_one2many[0] + loss_one2one[0], torch.cat((loss_one2many[1], loss_one2one[1])) diff --git a/ultralytics/utils/metrics.py b/ultralytics/utils/metrics.py index 731b55a..b598811 100644 --- a/ultralytics/utils/metrics.py +++ b/ultralytics/utils/metrics.py @@ -1,7 +1,6 @@ # Ultralytics YOLO 🚀, AGPL-3.0 license -""" -Model validation metrics -""" +"""Model validation metrics.""" + import math import warnings from pathlib import Path @@ -12,7 +11,10 @@ import torch from ultralytics.utils import LOGGER, SimpleClass, TryExcept, plt_settings -OKS_SIGMA = np.array([.26, .25, .25, .35, .35, .79, .79, .72, .72, .62, .62, 1.07, 1.07, .87, .87, .89, .89]) / 10.0 +OKS_SIGMA = ( + np.array([0.26, 0.25, 0.25, 0.35, 0.35, 0.79, 0.79, 0.72, 0.72, 0.62, 0.62, 1.07, 1.07, 0.87, 0.87, 0.89, 0.89]) + / 10.0 +) def bbox_ioa(box1, box2, iou=False, eps=1e-7): @@ -20,13 +22,13 @@ def bbox_ioa(box1, box2, iou=False, eps=1e-7): Calculate the intersection over box2 area given box1 and box2. Boxes are in x1y1x2y2 format. Args: - box1 (np.array): A numpy array of shape (n, 4) representing n bounding boxes. - box2 (np.array): A numpy array of shape (m, 4) representing m bounding boxes. - iou (bool): Calculate the standard iou if True else return inter_area/box2_area. + box1 (np.ndarray): A numpy array of shape (n, 4) representing n bounding boxes. + box2 (np.ndarray): A numpy array of shape (m, 4) representing m bounding boxes. + iou (bool): Calculate the standard IoU if True else return inter_area/box2_area. eps (float, optional): A small value to avoid division by zero. Defaults to 1e-7. Returns: - (np.array): A numpy array of shape (n, m) representing the intersection over box2 area. + (np.ndarray): A numpy array of shape (n, m) representing the intersection over box2 area. """ # Get the coordinates of bounding boxes @@ -34,10 +36,11 @@ def bbox_ioa(box1, box2, iou=False, eps=1e-7): b2_x1, b2_y1, b2_x2, b2_y2 = box2.T # Intersection area - inter_area = (np.minimum(b1_x2[:, None], b2_x2) - np.maximum(b1_x1[:, None], b2_x1)).clip(0) * \ - (np.minimum(b1_y2[:, None], b2_y2) - np.maximum(b1_y1[:, None], b2_y1)).clip(0) + inter_area = (np.minimum(b1_x2[:, None], b2_x2) - np.maximum(b1_x1[:, None], b2_x1)).clip(0) * ( + np.minimum(b1_y2[:, None], b2_y2) - np.maximum(b1_y1[:, None], b2_y1) + ).clip(0) - # box2 area + # Box2 area area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) if iou: box1_area = (b1_x2 - b1_x1) * (b1_y2 - b1_y1) @@ -49,8 +52,7 @@ def bbox_ioa(box1, box2, iou=False, eps=1e-7): def box_iou(box1, box2, eps=1e-7): """ - Calculate intersection-over-union (IoU) of boxes. - Both sets of boxes are expected to be in (x1, y1, x2, y2) format. + Calculate intersection-over-union (IoU) of boxes. Both sets of boxes are expected to be in (x1, y1, x2, y2) format. Based on https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py Args: @@ -62,6 +64,9 @@ def box_iou(box1, box2, eps=1e-7): (torch.Tensor): An NxM tensor containing the pairwise IoU values for every element in box1 and box2. """ + # NOTE: need float32 to get accurate iou values + box1 = torch.as_tensor(box1, dtype=torch.float32) + box2 = torch.as_tensor(box2, dtype=torch.float32) # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2) (a1, a2), (b1, b2) = box1.unsqueeze(1).chunk(2, 2), box2.unsqueeze(0).chunk(2, 2) inter = (torch.min(a2, b2) - torch.max(a1, b1)).clamp_(0).prod(2) @@ -101,8 +106,9 @@ def bbox_iou(box1, box2, xywh=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-7 w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps # Intersection area - inter = (b1_x2.minimum(b2_x2) - b1_x1.maximum(b2_x1)).clamp_(0) * \ - (b1_y2.minimum(b2_y2) - b1_y1.maximum(b2_y1)).clamp_(0) + inter = (b1_x2.minimum(b2_x2) - b1_x1.maximum(b2_x1)).clamp_(0) * ( + b1_y2.minimum(b2_y2) - b1_y1.maximum(b2_y1) + ).clamp_(0) # Union Area union = w1 * h1 + w2 * h2 - inter + eps @@ -113,10 +119,12 @@ def bbox_iou(box1, box2, xywh=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-7 cw = b1_x2.maximum(b2_x2) - b1_x1.minimum(b2_x1) # convex (smallest enclosing box) width ch = b1_y2.maximum(b2_y2) - b1_y1.minimum(b2_y1) # convex height if CIoU or DIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1 - c2 = cw ** 2 + ch ** 2 + eps # convex diagonal squared - rho2 = ((b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 + (b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2) / 4 # center dist ** 2 + c2 = cw.pow(2) + ch.pow(2) + eps # convex diagonal squared + rho2 = ( + (b2_x1 + b2_x2 - b1_x1 - b1_x2).pow(2) + (b2_y1 + b2_y2 - b1_y1 - b1_y2).pow(2) + ) / 4 # center dist**2 if CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47 - v = (4 / math.pi ** 2) * (torch.atan(w2 / h2) - torch.atan(w1 / h1)).pow(2) + v = (4 / math.pi**2) * ((w2 / h2).atan() - (w1 / h1).atan()).pow(2) with torch.no_grad(): alpha = v / (v - iou + (1 + eps)) return iou - (rho2 / c2 + v * alpha) # CIoU @@ -159,16 +167,120 @@ def kpt_iou(kpt1, kpt2, area, sigma, eps=1e-7): Returns: (torch.Tensor): A tensor of shape (N, M) representing keypoint similarities. """ - d = (kpt1[:, None, :, 0] - kpt2[..., 0]) ** 2 + (kpt1[:, None, :, 1] - kpt2[..., 1]) ** 2 # (N, M, 17) + d = (kpt1[:, None, :, 0] - kpt2[..., 0]).pow(2) + (kpt1[:, None, :, 1] - kpt2[..., 1]).pow(2) # (N, M, 17) sigma = torch.tensor(sigma, device=kpt1.device, dtype=kpt1.dtype) # (17, ) kpt_mask = kpt1[..., 2] != 0 # (N, 17) - e = d / (2 * sigma) ** 2 / (area[:, None, None] + eps) / 2 # from cocoeval + e = d / (2 * sigma).pow(2) / (area[:, None, None] + eps) / 2 # from cocoeval # e = d / ((area[None, :, None] + eps) * sigma) ** 2 / 2 # from formula - return (torch.exp(-e) * kpt_mask[:, None]).sum(-1) / (kpt_mask.sum(-1)[:, None] + eps) + return ((-e).exp() * kpt_mask[:, None]).sum(-1) / (kpt_mask.sum(-1)[:, None] + eps) -def smooth_BCE(eps=0.1): # https://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441 - # return positive, negative label smoothing BCE targets +def _get_covariance_matrix(boxes): + """ + Generating covariance matrix from obbs. + + Args: + boxes (torch.Tensor): A tensor of shape (N, 5) representing rotated bounding boxes, with xywhr format. + + Returns: + (torch.Tensor): Covariance metrixs corresponding to original rotated bounding boxes. + """ + # Gaussian bounding boxes, ignore the center points (the first two columns) because they are not needed here. + gbbs = torch.cat((boxes[:, 2:4].pow(2) / 12, boxes[:, 4:]), dim=-1) + a, b, c = gbbs.split(1, dim=-1) + cos = c.cos() + sin = c.sin() + cos2 = cos.pow(2) + sin2 = sin.pow(2) + return a * cos2 + b * sin2, a * sin2 + b * cos2, (a - b) * cos * sin + + +def probiou(obb1, obb2, CIoU=False, eps=1e-7): + """ + Calculate the prob IoU between oriented bounding boxes, https://arxiv.org/pdf/2106.06072v1.pdf. + + Args: + obb1 (torch.Tensor): A tensor of shape (N, 5) representing ground truth obbs, with xywhr format. + obb2 (torch.Tensor): A tensor of shape (N, 5) representing predicted obbs, with xywhr format. + eps (float, optional): A small value to avoid division by zero. Defaults to 1e-7. + + Returns: + (torch.Tensor): A tensor of shape (N, ) representing obb similarities. + """ + x1, y1 = obb1[..., :2].split(1, dim=-1) + x2, y2 = obb2[..., :2].split(1, dim=-1) + a1, b1, c1 = _get_covariance_matrix(obb1) + a2, b2, c2 = _get_covariance_matrix(obb2) + + t1 = ( + ((a1 + a2) * (y1 - y2).pow(2) + (b1 + b2) * (x1 - x2).pow(2)) / ((a1 + a2) * (b1 + b2) - (c1 + c2).pow(2) + eps) + ) * 0.25 + t2 = (((c1 + c2) * (x2 - x1) * (y1 - y2)) / ((a1 + a2) * (b1 + b2) - (c1 + c2).pow(2) + eps)) * 0.5 + t3 = ( + ((a1 + a2) * (b1 + b2) - (c1 + c2).pow(2)) + / (4 * ((a1 * b1 - c1.pow(2)).clamp_(0) * (a2 * b2 - c2.pow(2)).clamp_(0)).sqrt() + eps) + + eps + ).log() * 0.5 + bd = (t1 + t2 + t3).clamp(eps, 100.0) + hd = (1.0 - (-bd).exp() + eps).sqrt() + iou = 1 - hd + if CIoU: # only include the wh aspect ratio part + w1, h1 = obb1[..., 2:4].split(1, dim=-1) + w2, h2 = obb2[..., 2:4].split(1, dim=-1) + v = (4 / math.pi**2) * ((w2 / h2).atan() - (w1 / h1).atan()).pow(2) + with torch.no_grad(): + alpha = v / (v - iou + (1 + eps)) + return iou - v * alpha # CIoU + return iou + + +def batch_probiou(obb1, obb2, eps=1e-7): + """ + Calculate the prob IoU between oriented bounding boxes, https://arxiv.org/pdf/2106.06072v1.pdf. + + Args: + obb1 (torch.Tensor | np.ndarray): A tensor of shape (N, 5) representing ground truth obbs, with xywhr format. + obb2 (torch.Tensor | np.ndarray): A tensor of shape (M, 5) representing predicted obbs, with xywhr format. + eps (float, optional): A small value to avoid division by zero. Defaults to 1e-7. + + Returns: + (torch.Tensor): A tensor of shape (N, M) representing obb similarities. + """ + obb1 = torch.from_numpy(obb1) if isinstance(obb1, np.ndarray) else obb1 + obb2 = torch.from_numpy(obb2) if isinstance(obb2, np.ndarray) else obb2 + + x1, y1 = obb1[..., :2].split(1, dim=-1) + x2, y2 = (x.squeeze(-1)[None] for x in obb2[..., :2].split(1, dim=-1)) + a1, b1, c1 = _get_covariance_matrix(obb1) + a2, b2, c2 = (x.squeeze(-1)[None] for x in _get_covariance_matrix(obb2)) + + t1 = ( + ((a1 + a2) * (y1 - y2).pow(2) + (b1 + b2) * (x1 - x2).pow(2)) / ((a1 + a2) * (b1 + b2) - (c1 + c2).pow(2) + eps) + ) * 0.25 + t2 = (((c1 + c2) * (x2 - x1) * (y1 - y2)) / ((a1 + a2) * (b1 + b2) - (c1 + c2).pow(2) + eps)) * 0.5 + t3 = ( + ((a1 + a2) * (b1 + b2) - (c1 + c2).pow(2)) + / (4 * ((a1 * b1 - c1.pow(2)).clamp_(0) * (a2 * b2 - c2.pow(2)).clamp_(0)).sqrt() + eps) + + eps + ).log() * 0.5 + bd = (t1 + t2 + t3).clamp(eps, 100.0) + hd = (1.0 - (-bd).exp() + eps).sqrt() + return 1 - hd + + +def smooth_BCE(eps=0.1): + """ + Computes smoothed positive and negative Binary Cross-Entropy targets. + + This function calculates positive and negative label smoothing BCE targets based on a given epsilon value. + For implementation details, refer to https://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441. + + Args: + eps (float, optional): The epsilon value for label smoothing. Defaults to 0.1. + + Returns: + (tuple): A tuple containing the positive and negative label smoothing BCE targets. + """ return 1.0 - 0.5 * eps, 0.5 * eps @@ -178,23 +290,23 @@ class ConfusionMatrix: Attributes: task (str): The type of task, either 'detect' or 'classify'. - matrix (np.array): The confusion matrix, with dimensions depending on the task. + matrix (np.ndarray): The confusion matrix, with dimensions depending on the task. nc (int): The number of classes. conf (float): The confidence threshold for detections. iou_thres (float): The Intersection over Union threshold. """ - def __init__(self, nc, conf=0.25, iou_thres=0.45, task='detect'): + def __init__(self, nc, conf=0.25, iou_thres=0.45, task="detect"): """Initialize attributes for the YOLO model.""" self.task = task - self.matrix = np.zeros((nc + 1, nc + 1)) if self.task == 'detect' else np.zeros((nc, nc)) + self.matrix = np.zeros((nc + 1, nc + 1)) if self.task == "detect" else np.zeros((nc, nc)) self.nc = nc # number of classes - self.conf = 0.25 if conf is None else conf # argument may be None from default cfg + self.conf = 0.25 if conf in (None, 0.001) else conf # apply 0.25 if default val conf is passed self.iou_thres = iou_thres def process_cls_preds(self, preds, targets): """ - Update confusion matrix for classification task + Update confusion matrix for classification task. Args: preds (Array[N, min(nc,5)]): Predicted class labels. @@ -204,26 +316,39 @@ class ConfusionMatrix: for p, t in zip(preds.cpu().numpy(), targets.cpu().numpy()): self.matrix[p][t] += 1 - def process_batch(self, detections, labels): + def process_batch(self, detections, gt_bboxes, gt_cls): """ Update confusion matrix for object detection task. Args: - detections (Array[N, 6]): Detected bounding boxes and their associated information. - Each row should contain (x1, y1, x2, y2, conf, class). - labels (Array[M, 5]): Ground truth bounding boxes and their associated class labels. - Each row should contain (class, x1, y1, x2, y2). + detections (Array[N, 6] | Array[N, 7]): Detected bounding boxes and their associated information. + Each row should contain (x1, y1, x2, y2, conf, class) + or with an additional element `angle` when it's obb. + gt_bboxes (Array[M, 4]| Array[N, 5]): Ground truth bounding boxes with xyxy/xyxyr format. + gt_cls (Array[M]): The class labels. """ + if gt_cls.shape[0] == 0: # Check if labels is empty + if detections is not None: + detections = detections[detections[:, 4] > self.conf] + detection_classes = detections[:, 5].int() + for dc in detection_classes: + self.matrix[dc, self.nc] += 1 # false positives + return if detections is None: - gt_classes = labels.int() + gt_classes = gt_cls.int() for gc in gt_classes: self.matrix[self.nc, gc] += 1 # background FN return detections = detections[detections[:, 4] > self.conf] - gt_classes = labels[:, 0].int() + gt_classes = gt_cls.int() detection_classes = detections[:, 5].int() - iou = box_iou(labels[:, 1:], detections[:, :4]) + is_obb = detections.shape[1] == 7 and gt_bboxes.shape[1] == 5 # with additional `angle` dimension + iou = ( + batch_probiou(gt_bboxes, torch.cat([detections[:, :4], detections[:, -1:]], dim=-1)) + if is_obb + else box_iou(gt_bboxes, detections[:, :4]) + ) x = torch.where(iou > self.iou_thres) if x[0].shape[0]: @@ -259,11 +384,11 @@ class ConfusionMatrix: tp = self.matrix.diagonal() # true positives fp = self.matrix.sum(1) - tp # false positives # fn = self.matrix.sum(0) - tp # false negatives (missed detections) - return (tp[:-1], fp[:-1]) if self.task == 'detect' else (tp, fp) # remove background class if task=detect + return (tp[:-1], fp[:-1]) if self.task == "detect" else (tp, fp) # remove background class if task=detect - @TryExcept('WARNING ⚠️ ConfusionMatrix plot failure') + @TryExcept("WARNING ⚠️ ConfusionMatrix plot failure") @plt_settings() - def plot(self, normalize=True, save_dir='', names=(), on_plot=None): + def plot(self, normalize=True, save_dir="", names=(), on_plot=None): """ Plot the confusion matrix using seaborn and save it to a file. @@ -275,30 +400,31 @@ class ConfusionMatrix: """ import seaborn as sn - array = self.matrix / ((self.matrix.sum(0).reshape(1, -1) + 1E-9) if normalize else 1) # normalize columns + array = self.matrix / ((self.matrix.sum(0).reshape(1, -1) + 1e-9) if normalize else 1) # normalize columns array[array < 0.005] = np.nan # don't annotate (would appear as 0.00) fig, ax = plt.subplots(1, 1, figsize=(12, 9), tight_layout=True) nc, nn = self.nc, len(names) # number of classes, names sn.set(font_scale=1.0 if nc < 50 else 0.8) # for label size labels = (0 < nn < 99) and (nn == nc) # apply names to ticklabels - ticklabels = (list(names) + ['background']) if labels else 'auto' + ticklabels = (list(names) + ["background"]) if labels else "auto" with warnings.catch_warnings(): - warnings.simplefilter('ignore') # suppress empty matrix RuntimeWarning: All-NaN slice encountered - sn.heatmap(array, - ax=ax, - annot=nc < 30, - annot_kws={ - 'size': 8}, - cmap='Blues', - fmt='.2f' if normalize else '.0f', - square=True, - vmin=0.0, - xticklabels=ticklabels, - yticklabels=ticklabels).set_facecolor((1, 1, 1)) - title = 'Confusion Matrix' + ' Normalized' * normalize - ax.set_xlabel('True') - ax.set_ylabel('Predicted') + warnings.simplefilter("ignore") # suppress empty matrix RuntimeWarning: All-NaN slice encountered + sn.heatmap( + array, + ax=ax, + annot=nc < 30, + annot_kws={"size": 8}, + cmap="Blues", + fmt=".2f" if normalize else ".0f", + square=True, + vmin=0.0, + xticklabels=ticklabels, + yticklabels=ticklabels, + ).set_facecolor((1, 1, 1)) + title = "Confusion Matrix" + " Normalized" * normalize + ax.set_xlabel("True") + ax.set_ylabel("Predicted") ax.set_title(title) plot_fname = Path(save_dir) / f'{title.lower().replace(" ", "_")}.png' fig.savefig(plot_fname, dpi=250) @@ -307,11 +433,9 @@ class ConfusionMatrix: on_plot(plot_fname) def print(self): - """ - Print the confusion matrix to the console. - """ + """Print the confusion matrix to the console.""" for i in range(self.nc + 1): - LOGGER.info(' '.join(map(str, self.matrix[i]))) + LOGGER.info(" ".join(map(str, self.matrix[i]))) def smooth(y, f=0.05): @@ -319,28 +443,28 @@ def smooth(y, f=0.05): nf = round(len(y) * f * 2) // 2 + 1 # number of filter elements (must be odd) p = np.ones(nf // 2) # ones padding yp = np.concatenate((p * y[0], y, p * y[-1]), 0) # y padded - return np.convolve(yp, np.ones(nf) / nf, mode='valid') # y-smoothed + return np.convolve(yp, np.ones(nf) / nf, mode="valid") # y-smoothed @plt_settings() -def plot_pr_curve(px, py, ap, save_dir=Path('pr_curve.png'), names=(), on_plot=None): +def plot_pr_curve(px, py, ap, save_dir=Path("pr_curve.png"), names=(), on_plot=None): """Plots a precision-recall curve.""" fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True) py = np.stack(py, axis=1) if 0 < len(names) < 21: # display per-class legend if < 21 classes for i, y in enumerate(py.T): - ax.plot(px, y, linewidth=1, label=f'{names[i]} {ap[i, 0]:.3f}') # plot(recall, precision) + ax.plot(px, y, linewidth=1, label=f"{names[i]} {ap[i, 0]:.3f}") # plot(recall, precision) else: - ax.plot(px, py, linewidth=1, color='grey') # plot(recall, precision) + ax.plot(px, py, linewidth=1, color="grey") # plot(recall, precision) - ax.plot(px, py.mean(1), linewidth=3, color='blue', label='all classes %.3f mAP@0.5' % ap[:, 0].mean()) - ax.set_xlabel('Recall') - ax.set_ylabel('Precision') + ax.plot(px, py.mean(1), linewidth=3, color="blue", label="all classes %.3f mAP@0.5" % ap[:, 0].mean()) + ax.set_xlabel("Recall") + ax.set_ylabel("Precision") ax.set_xlim(0, 1) ax.set_ylim(0, 1) - ax.legend(bbox_to_anchor=(1.04, 1), loc='upper left') - ax.set_title('Precision-Recall Curve') + ax.legend(bbox_to_anchor=(1.04, 1), loc="upper left") + ax.set_title("Precision-Recall Curve") fig.savefig(save_dir, dpi=250) plt.close(fig) if on_plot: @@ -348,24 +472,24 @@ def plot_pr_curve(px, py, ap, save_dir=Path('pr_curve.png'), names=(), on_plot=N @plt_settings() -def plot_mc_curve(px, py, save_dir=Path('mc_curve.png'), names=(), xlabel='Confidence', ylabel='Metric', on_plot=None): +def plot_mc_curve(px, py, save_dir=Path("mc_curve.png"), names=(), xlabel="Confidence", ylabel="Metric", on_plot=None): """Plots a metric-confidence curve.""" fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True) if 0 < len(names) < 21: # display per-class legend if < 21 classes for i, y in enumerate(py): - ax.plot(px, y, linewidth=1, label=f'{names[i]}') # plot(confidence, metric) + ax.plot(px, y, linewidth=1, label=f"{names[i]}") # plot(confidence, metric) else: - ax.plot(px, py.T, linewidth=1, color='grey') # plot(confidence, metric) + ax.plot(px, py.T, linewidth=1, color="grey") # plot(confidence, metric) y = smooth(py.mean(0), 0.05) - ax.plot(px, y, linewidth=3, color='blue', label=f'all classes {y.max():.2f} at {px[y.argmax()]:.3f}') + ax.plot(px, y, linewidth=3, color="blue", label=f"all classes {y.max():.2f} at {px[y.argmax()]:.3f}") ax.set_xlabel(xlabel) ax.set_ylabel(ylabel) ax.set_xlim(0, 1) ax.set_ylim(0, 1) - ax.legend(bbox_to_anchor=(1.04, 1), loc='upper left') - ax.set_title(f'{ylabel}-Confidence Curve') + ax.legend(bbox_to_anchor=(1.04, 1), loc="upper left") + ax.set_title(f"{ylabel}-Confidence Curve") fig.savefig(save_dir, dpi=250) plt.close(fig) if on_plot: @@ -394,8 +518,8 @@ def compute_ap(recall, precision): mpre = np.flip(np.maximum.accumulate(np.flip(mpre))) # Integrate area under curve - method = 'interp' # methods: 'continuous', 'interp' - if method == 'interp': + method = "interp" # methods: 'continuous', 'interp' + if method == "interp": x = np.linspace(0, 1, 101) # 101-point interp (COCO) ap = np.trapz(np.interp(x, mrec, mpre), x) # integrate else: # 'continuous' @@ -405,16 +529,9 @@ def compute_ap(recall, precision): return ap, mpre, mrec -def ap_per_class(tp, - conf, - pred_cls, - target_cls, - plot=False, - on_plot=None, - save_dir=Path(), - names=(), - eps=1e-16, - prefix=''): +def ap_per_class( + tp, conf, pred_cls, target_cls, plot=False, on_plot=None, save_dir=Path(), names=(), eps=1e-16, prefix="" +): """ Computes the average precision per class for object detection evaluation. @@ -432,14 +549,18 @@ def ap_per_class(tp, Returns: (tuple): A tuple of six arrays and one array of unique classes, where: - tp (np.ndarray): True positive counts for each class. - fp (np.ndarray): False positive counts for each class. - p (np.ndarray): Precision values at each confidence threshold. - r (np.ndarray): Recall values at each confidence threshold. - f1 (np.ndarray): F1-score values at each confidence threshold. - ap (np.ndarray): Average precision for each class at different IoU thresholds. - unique_classes (np.ndarray): An array of unique classes that have data. - + tp (np.ndarray): True positive counts at threshold given by max F1 metric for each class.Shape: (nc,). + fp (np.ndarray): False positive counts at threshold given by max F1 metric for each class. Shape: (nc,). + p (np.ndarray): Precision values at threshold given by max F1 metric for each class. Shape: (nc,). + r (np.ndarray): Recall values at threshold given by max F1 metric for each class. Shape: (nc,). + f1 (np.ndarray): F1-score values at threshold given by max F1 metric for each class. Shape: (nc,). + ap (np.ndarray): Average precision for each class at different IoU thresholds. Shape: (nc, 10). + unique_classes (np.ndarray): An array of unique classes that have data. Shape: (nc,). + p_curve (np.ndarray): Precision curves for each class. Shape: (nc, 1000). + r_curve (np.ndarray): Recall curves for each class. Shape: (nc, 1000). + f1_curve (np.ndarray): F1-score curves for each class. Shape: (nc, 1000). + x (np.ndarray): X-axis values for the curves. Shape: (1000,). + prec_values: Precision values at mAP@0.5 for each class. Shape: (nc, 1000). """ # Sort by objectness @@ -451,8 +572,10 @@ def ap_per_class(tp, nc = unique_classes.shape[0] # number of classes, number of detections # Create Precision-Recall curve and compute AP for each class - px, py = np.linspace(0, 1, 1000), [] # for plotting - ap, p, r = np.zeros((nc, tp.shape[1])), np.zeros((nc, 1000)), np.zeros((nc, 1000)) + x, prec_values = np.linspace(0, 1, 1000), [] + + # Average precision, precision and recall curves + ap, p_curve, r_curve = np.zeros((nc, tp.shape[1])), np.zeros((nc, 1000)), np.zeros((nc, 1000)) for ci, c in enumerate(unique_classes): i = pred_cls == c n_l = nt[ci] # number of labels @@ -466,63 +589,66 @@ def ap_per_class(tp, # Recall recall = tpc / (n_l + eps) # recall curve - r[ci] = np.interp(-px, -conf[i], recall[:, 0], left=0) # negative x, xp because xp decreases + r_curve[ci] = np.interp(-x, -conf[i], recall[:, 0], left=0) # negative x, xp because xp decreases # Precision precision = tpc / (tpc + fpc) # precision curve - p[ci] = np.interp(-px, -conf[i], precision[:, 0], left=1) # p at pr_score + p_curve[ci] = np.interp(-x, -conf[i], precision[:, 0], left=1) # p at pr_score # AP from recall-precision curve for j in range(tp.shape[1]): ap[ci, j], mpre, mrec = compute_ap(recall[:, j], precision[:, j]) if plot and j == 0: - py.append(np.interp(px, mrec, mpre)) # precision at mAP@0.5 + prec_values.append(np.interp(x, mrec, mpre)) # precision at mAP@0.5 + + prec_values = np.array(prec_values) # (nc, 1000) # Compute F1 (harmonic mean of precision and recall) - f1 = 2 * p * r / (p + r + eps) + f1_curve = 2 * p_curve * r_curve / (p_curve + r_curve + eps) names = [v for k, v in names.items() if k in unique_classes] # list: only classes that have data names = dict(enumerate(names)) # to dict if plot: - plot_pr_curve(px, py, ap, save_dir / f'{prefix}PR_curve.png', names, on_plot=on_plot) - plot_mc_curve(px, f1, save_dir / f'{prefix}F1_curve.png', names, ylabel='F1', on_plot=on_plot) - plot_mc_curve(px, p, save_dir / f'{prefix}P_curve.png', names, ylabel='Precision', on_plot=on_plot) - plot_mc_curve(px, r, save_dir / f'{prefix}R_curve.png', names, ylabel='Recall', on_plot=on_plot) + plot_pr_curve(x, prec_values, ap, save_dir / f"{prefix}PR_curve.png", names, on_plot=on_plot) + plot_mc_curve(x, f1_curve, save_dir / f"{prefix}F1_curve.png", names, ylabel="F1", on_plot=on_plot) + plot_mc_curve(x, p_curve, save_dir / f"{prefix}P_curve.png", names, ylabel="Precision", on_plot=on_plot) + plot_mc_curve(x, r_curve, save_dir / f"{prefix}R_curve.png", names, ylabel="Recall", on_plot=on_plot) - i = smooth(f1.mean(0), 0.1).argmax() # max F1 index - p, r, f1 = p[:, i], r[:, i], f1[:, i] + i = smooth(f1_curve.mean(0), 0.1).argmax() # max F1 index + p, r, f1 = p_curve[:, i], r_curve[:, i], f1_curve[:, i] # max-F1 precision, recall, F1 values tp = (r * nt).round() # true positives fp = (tp / (p + eps) - tp).round() # false positives - return tp, fp, p, r, f1, ap, unique_classes.astype(int) + return tp, fp, p, r, f1, ap, unique_classes.astype(int), p_curve, r_curve, f1_curve, x, prec_values class Metric(SimpleClass): """ - Class for computing evaluation metrics for YOLOv8 model. + Class for computing evaluation metrics for YOLOv8 model. - Attributes: - p (list): Precision for each class. Shape: (nc,). - r (list): Recall for each class. Shape: (nc,). - f1 (list): F1 score for each class. Shape: (nc,). - all_ap (list): AP scores for all classes and all IoU thresholds. Shape: (nc, 10). - ap_class_index (list): Index of class for each AP score. Shape: (nc,). - nc (int): Number of classes. + Attributes: + p (list): Precision for each class. Shape: (nc,). + r (list): Recall for each class. Shape: (nc,). + f1 (list): F1 score for each class. Shape: (nc,). + all_ap (list): AP scores for all classes and all IoU thresholds. Shape: (nc, 10). + ap_class_index (list): Index of class for each AP score. Shape: (nc,). + nc (int): Number of classes. - Methods: - ap50(): AP at IoU threshold of 0.5 for all classes. Returns: List of AP scores. Shape: (nc,) or []. - ap(): AP at IoU thresholds from 0.5 to 0.95 for all classes. Returns: List of AP scores. Shape: (nc,) or []. - mp(): Mean precision of all classes. Returns: Float. - mr(): Mean recall of all classes. Returns: Float. - map50(): Mean AP at IoU threshold of 0.5 for all classes. Returns: Float. - map75(): Mean AP at IoU threshold of 0.75 for all classes. Returns: Float. - map(): Mean AP at IoU thresholds from 0.5 to 0.95 for all classes. Returns: Float. - mean_results(): Mean of results, returns mp, mr, map50, map. - class_result(i): Class-aware result, returns p[i], r[i], ap50[i], ap[i]. - maps(): mAP of each class. Returns: Array of mAP scores, shape: (nc,). - fitness(): Model fitness as a weighted combination of metrics. Returns: Float. - update(results): Update metric attributes with new evaluation results. - """ + Methods: + ap50(): AP at IoU threshold of 0.5 for all classes. Returns: List of AP scores. Shape: (nc,) or []. + ap(): AP at IoU thresholds from 0.5 to 0.95 for all classes. Returns: List of AP scores. Shape: (nc,) or []. + mp(): Mean precision of all classes. Returns: Float. + mr(): Mean recall of all classes. Returns: Float. + map50(): Mean AP at IoU threshold of 0.5 for all classes. Returns: Float. + map75(): Mean AP at IoU threshold of 0.75 for all classes. Returns: Float. + map(): Mean AP at IoU thresholds from 0.5 to 0.95 for all classes. Returns: Float. + mean_results(): Mean of results, returns mp, mr, map50, map. + class_result(i): Class-aware result, returns p[i], r[i], ap50[i], ap[i]. + maps(): mAP of each class. Returns: Array of mAP scores, shape: (nc,). + fitness(): Model fitness as a weighted combination of metrics. Returns: Float. + update(results): Update metric attributes with new evaluation results. + """ def __init__(self) -> None: + """Initializes a Metric instance for computing evaluation metrics for the YOLOv8 model.""" self.p = [] # (nc, ) self.r = [] # (nc, ) self.f1 = [] # (nc, ) @@ -576,7 +702,7 @@ class Metric(SimpleClass): Returns the mean Average Precision (mAP) at an IoU threshold of 0.5. Returns: - (float): The mAP50 at an IoU threshold of 0.5. + (float): The mAP at an IoU threshold of 0.5. """ return self.all_ap[:, 0].mean() if len(self.all_ap) else 0.0 @@ -586,7 +712,7 @@ class Metric(SimpleClass): Returns the mean Average Precision (mAP) at an IoU threshold of 0.75. Returns: - (float): The mAP50 at an IoU threshold of 0.75. + (float): The mAP at an IoU threshold of 0.75. """ return self.all_ap[:, 5].mean() if len(self.all_ap) else 0.0 @@ -605,12 +731,12 @@ class Metric(SimpleClass): return [self.mp, self.mr, self.map50, self.map] def class_result(self, i): - """class-aware result, return p[i], r[i], ap50[i], ap[i].""" + """Class-aware result, return p[i], r[i], ap50[i], ap[i].""" return self.p[i], self.r[i], self.ap50[i], self.ap[i] @property def maps(self): - """mAP of each class.""" + """MAP of each class.""" maps = np.zeros(self.nc) + self.map for i, c in enumerate(self.ap_class_index): maps[c] = self.ap[i] @@ -623,10 +749,47 @@ class Metric(SimpleClass): def update(self, results): """ + Updates the evaluation metrics of the model with a new set of results. + Args: - results (tuple): A tuple of (p, r, ap, f1, ap_class) + results (tuple): A tuple containing the following evaluation metrics: + - p (list): Precision for each class. Shape: (nc,). + - r (list): Recall for each class. Shape: (nc,). + - f1 (list): F1 score for each class. Shape: (nc,). + - all_ap (list): AP scores for all classes and all IoU thresholds. Shape: (nc, 10). + - ap_class_index (list): Index of class for each AP score. Shape: (nc,). + + Side Effects: + Updates the class attributes `self.p`, `self.r`, `self.f1`, `self.all_ap`, and `self.ap_class_index` based + on the values provided in the `results` tuple. """ - self.p, self.r, self.f1, self.all_ap, self.ap_class_index = results + ( + self.p, + self.r, + self.f1, + self.all_ap, + self.ap_class_index, + self.p_curve, + self.r_curve, + self.f1_curve, + self.px, + self.prec_values, + ) = results + + @property + def curves(self): + """Returns a list of curves for accessing specific metrics curves.""" + return [] + + @property + def curves_results(self): + """Returns a list of curves for accessing specific metrics curves.""" + return [ + [self.px, self.prec_values, "Recall", "Precision"], + [self.px, self.f1_curve, "Confidence", "F1"], + [self.px, self.p_curve, "Confidence", "Precision"], + [self.px, self.r_curve, "Confidence", "Recall"], + ] class DetMetrics(SimpleClass): @@ -657,33 +820,39 @@ class DetMetrics(SimpleClass): fitness: Computes the fitness score based on the computed detection metrics. ap_class_index: Returns a list of class indices sorted by their average precision (AP) values. results_dict: Returns a dictionary that maps detection metric keys to their computed values. + curves: TODO + curves_results: TODO """ - def __init__(self, save_dir=Path('.'), plot=False, on_plot=None, names=()) -> None: + def __init__(self, save_dir=Path("."), plot=False, on_plot=None, names=()) -> None: + """Initialize a DetMetrics instance with a save directory, plot flag, callback function, and class names.""" self.save_dir = save_dir self.plot = plot self.on_plot = on_plot self.names = names self.box = Metric() - self.speed = {'preprocess': 0.0, 'inference': 0.0, 'loss': 0.0, 'postprocess': 0.0} + self.speed = {"preprocess": 0.0, "inference": 0.0, "loss": 0.0, "postprocess": 0.0} + self.task = "detect" def process(self, tp, conf, pred_cls, target_cls): """Process predicted results for object detection and update metrics.""" - results = ap_per_class(tp, - conf, - pred_cls, - target_cls, - plot=self.plot, - save_dir=self.save_dir, - names=self.names, - on_plot=self.on_plot)[2:] + results = ap_per_class( + tp, + conf, + pred_cls, + target_cls, + plot=self.plot, + save_dir=self.save_dir, + names=self.names, + on_plot=self.on_plot, + )[2:] self.box.nc = len(self.names) self.box.update(results) @property def keys(self): """Returns a list of keys for accessing specific metrics.""" - return ['metrics/precision(B)', 'metrics/recall(B)', 'metrics/mAP50(B)', 'metrics/mAP50-95(B)'] + return ["metrics/precision(B)", "metrics/recall(B)", "metrics/mAP50(B)", "metrics/mAP50-95(B)"] def mean_results(self): """Calculate mean of detected objects & return precision, recall, mAP50, and mAP50-95.""" @@ -711,7 +880,17 @@ class DetMetrics(SimpleClass): @property def results_dict(self): """Returns dictionary of computed performance metrics and statistics.""" - return dict(zip(self.keys + ['fitness'], self.mean_results() + [self.fitness])) + return dict(zip(self.keys + ["fitness"], self.mean_results() + [self.fitness])) + + @property + def curves(self): + """Returns a list of curves for accessing specific metrics curves.""" + return ["Precision-Recall(B)", "F1-Confidence(B)", "Precision-Confidence(B)", "Recall-Confidence(B)"] + + @property + def curves_results(self): + """Returns dictionary of computed performance metrics and statistics.""" + return self.box.curves_results class SegmentMetrics(SimpleClass): @@ -743,47 +922,53 @@ class SegmentMetrics(SimpleClass): results_dict: Returns the dictionary containing all the detection and segmentation metrics and fitness score. """ - def __init__(self, save_dir=Path('.'), plot=False, on_plot=None, names=()) -> None: + def __init__(self, save_dir=Path("."), plot=False, on_plot=None, names=()) -> None: + """Initialize a SegmentMetrics instance with a save directory, plot flag, callback function, and class names.""" self.save_dir = save_dir self.plot = plot self.on_plot = on_plot self.names = names self.box = Metric() self.seg = Metric() - self.speed = {'preprocess': 0.0, 'inference': 0.0, 'loss': 0.0, 'postprocess': 0.0} + self.speed = {"preprocess": 0.0, "inference": 0.0, "loss": 0.0, "postprocess": 0.0} + self.task = "segment" - def process(self, tp_b, tp_m, conf, pred_cls, target_cls): + def process(self, tp, tp_m, conf, pred_cls, target_cls): """ Processes the detection and segmentation metrics over the given set of predictions. Args: - tp_b (list): List of True Positive boxes. + tp (list): List of True Positive boxes. tp_m (list): List of True Positive masks. conf (list): List of confidence scores. pred_cls (list): List of predicted classes. target_cls (list): List of target classes. """ - results_mask = ap_per_class(tp_m, - conf, - pred_cls, - target_cls, - plot=self.plot, - on_plot=self.on_plot, - save_dir=self.save_dir, - names=self.names, - prefix='Mask')[2:] + results_mask = ap_per_class( + tp_m, + conf, + pred_cls, + target_cls, + plot=self.plot, + on_plot=self.on_plot, + save_dir=self.save_dir, + names=self.names, + prefix="Mask", + )[2:] self.seg.nc = len(self.names) self.seg.update(results_mask) - results_box = ap_per_class(tp_b, - conf, - pred_cls, - target_cls, - plot=self.plot, - on_plot=self.on_plot, - save_dir=self.save_dir, - names=self.names, - prefix='Box')[2:] + results_box = ap_per_class( + tp, + conf, + pred_cls, + target_cls, + plot=self.plot, + on_plot=self.on_plot, + save_dir=self.save_dir, + names=self.names, + prefix="Box", + )[2:] self.box.nc = len(self.names) self.box.update(results_box) @@ -791,8 +976,15 @@ class SegmentMetrics(SimpleClass): def keys(self): """Returns a list of keys for accessing metrics.""" return [ - 'metrics/precision(B)', 'metrics/recall(B)', 'metrics/mAP50(B)', 'metrics/mAP50-95(B)', - 'metrics/precision(M)', 'metrics/recall(M)', 'metrics/mAP50(M)', 'metrics/mAP50-95(M)'] + "metrics/precision(B)", + "metrics/recall(B)", + "metrics/mAP50(B)", + "metrics/mAP50-95(B)", + "metrics/precision(M)", + "metrics/recall(M)", + "metrics/mAP50(M)", + "metrics/mAP50-95(M)", + ] def mean_results(self): """Return the mean metrics for bounding box and segmentation results.""" @@ -820,7 +1012,26 @@ class SegmentMetrics(SimpleClass): @property def results_dict(self): """Returns results of object detection model for evaluation.""" - return dict(zip(self.keys + ['fitness'], self.mean_results() + [self.fitness])) + return dict(zip(self.keys + ["fitness"], self.mean_results() + [self.fitness])) + + @property + def curves(self): + """Returns a list of curves for accessing specific metrics curves.""" + return [ + "Precision-Recall(B)", + "F1-Confidence(B)", + "Precision-Confidence(B)", + "Recall-Confidence(B)", + "Precision-Recall(M)", + "F1-Confidence(M)", + "Precision-Confidence(M)", + "Recall-Confidence(M)", + ] + + @property + def curves_results(self): + """Returns dictionary of computed performance metrics and statistics.""" + return self.box.curves_results + self.seg.curves_results class PoseMetrics(SegmentMetrics): @@ -852,7 +1063,8 @@ class PoseMetrics(SegmentMetrics): results_dict: Returns the dictionary containing all the detection and segmentation metrics and fitness score. """ - def __init__(self, save_dir=Path('.'), plot=False, on_plot=None, names=()) -> None: + def __init__(self, save_dir=Path("."), plot=False, on_plot=None, names=()) -> None: + """Initialize the PoseMetrics class with directory path, class names, and plotting options.""" super().__init__(save_dir, plot, names) self.save_dir = save_dir self.plot = plot @@ -860,40 +1072,45 @@ class PoseMetrics(SegmentMetrics): self.names = names self.box = Metric() self.pose = Metric() - self.speed = {'preprocess': 0.0, 'inference': 0.0, 'loss': 0.0, 'postprocess': 0.0} + self.speed = {"preprocess": 0.0, "inference": 0.0, "loss": 0.0, "postprocess": 0.0} + self.task = "pose" - def process(self, tp_b, tp_p, conf, pred_cls, target_cls): + def process(self, tp, tp_p, conf, pred_cls, target_cls): """ Processes the detection and pose metrics over the given set of predictions. Args: - tp_b (list): List of True Positive boxes. + tp (list): List of True Positive boxes. tp_p (list): List of True Positive keypoints. conf (list): List of confidence scores. pred_cls (list): List of predicted classes. target_cls (list): List of target classes. """ - results_pose = ap_per_class(tp_p, - conf, - pred_cls, - target_cls, - plot=self.plot, - on_plot=self.on_plot, - save_dir=self.save_dir, - names=self.names, - prefix='Pose')[2:] + results_pose = ap_per_class( + tp_p, + conf, + pred_cls, + target_cls, + plot=self.plot, + on_plot=self.on_plot, + save_dir=self.save_dir, + names=self.names, + prefix="Pose", + )[2:] self.pose.nc = len(self.names) self.pose.update(results_pose) - results_box = ap_per_class(tp_b, - conf, - pred_cls, - target_cls, - plot=self.plot, - on_plot=self.on_plot, - save_dir=self.save_dir, - names=self.names, - prefix='Box')[2:] + results_box = ap_per_class( + tp, + conf, + pred_cls, + target_cls, + plot=self.plot, + on_plot=self.on_plot, + save_dir=self.save_dir, + names=self.names, + prefix="Box", + )[2:] self.box.nc = len(self.names) self.box.update(results_box) @@ -901,8 +1118,15 @@ class PoseMetrics(SegmentMetrics): def keys(self): """Returns list of evaluation metric keys.""" return [ - 'metrics/precision(B)', 'metrics/recall(B)', 'metrics/mAP50(B)', 'metrics/mAP50-95(B)', - 'metrics/precision(P)', 'metrics/recall(P)', 'metrics/mAP50(P)', 'metrics/mAP50-95(P)'] + "metrics/precision(B)", + "metrics/recall(B)", + "metrics/mAP50(B)", + "metrics/mAP50-95(B)", + "metrics/precision(P)", + "metrics/recall(P)", + "metrics/mAP50(P)", + "metrics/mAP50-95(P)", + ] def mean_results(self): """Return the mean results of box and pose.""" @@ -922,6 +1146,25 @@ class PoseMetrics(SegmentMetrics): """Computes classification metrics and speed using the `targets` and `pred` inputs.""" return self.pose.fitness() + self.box.fitness() + @property + def curves(self): + """Returns a list of curves for accessing specific metrics curves.""" + return [ + "Precision-Recall(B)", + "F1-Confidence(B)", + "Precision-Confidence(B)", + "Recall-Confidence(B)", + "Precision-Recall(P)", + "F1-Confidence(P)", + "Precision-Confidence(P)", + "Recall-Confidence(P)", + ] + + @property + def curves_results(self): + """Returns dictionary of computed performance metrics and statistics.""" + return self.box.curves_results + self.pose.curves_results + class ClassifyMetrics(SimpleClass): """ @@ -942,9 +1185,11 @@ class ClassifyMetrics(SimpleClass): """ def __init__(self) -> None: + """Initialize a ClassifyMetrics instance.""" self.top1 = 0 self.top5 = 0 - self.speed = {'preprocess': 0.0, 'inference': 0.0, 'loss': 0.0, 'postprocess': 0.0} + self.speed = {"preprocess": 0.0, "inference": 0.0, "loss": 0.0, "postprocess": 0.0} + self.task = "classify" def process(self, targets, pred): """Target classes and predicted classes.""" @@ -961,9 +1206,87 @@ class ClassifyMetrics(SimpleClass): @property def results_dict(self): """Returns a dictionary with model's performance metrics and fitness score.""" - return dict(zip(self.keys + ['fitness'], [self.top1, self.top5, self.fitness])) + return dict(zip(self.keys + ["fitness"], [self.top1, self.top5, self.fitness])) @property def keys(self): """Returns a list of keys for the results_dict property.""" - return ['metrics/accuracy_top1', 'metrics/accuracy_top5'] + return ["metrics/accuracy_top1", "metrics/accuracy_top5"] + + @property + def curves(self): + """Returns a list of curves for accessing specific metrics curves.""" + return [] + + @property + def curves_results(self): + """Returns a list of curves for accessing specific metrics curves.""" + return [] + + +class OBBMetrics(SimpleClass): + def __init__(self, save_dir=Path("."), plot=False, on_plot=None, names=()) -> None: + self.save_dir = save_dir + self.plot = plot + self.on_plot = on_plot + self.names = names + self.box = Metric() + self.speed = {"preprocess": 0.0, "inference": 0.0, "loss": 0.0, "postprocess": 0.0} + + def process(self, tp, conf, pred_cls, target_cls): + """Process predicted results for object detection and update metrics.""" + results = ap_per_class( + tp, + conf, + pred_cls, + target_cls, + plot=self.plot, + save_dir=self.save_dir, + names=self.names, + on_plot=self.on_plot, + )[2:] + self.box.nc = len(self.names) + self.box.update(results) + + @property + def keys(self): + """Returns a list of keys for accessing specific metrics.""" + return ["metrics/precision(B)", "metrics/recall(B)", "metrics/mAP50(B)", "metrics/mAP50-95(B)"] + + def mean_results(self): + """Calculate mean of detected objects & return precision, recall, mAP50, and mAP50-95.""" + return self.box.mean_results() + + def class_result(self, i): + """Return the result of evaluating the performance of an object detection model on a specific class.""" + return self.box.class_result(i) + + @property + def maps(self): + """Returns mean Average Precision (mAP) scores per class.""" + return self.box.maps + + @property + def fitness(self): + """Returns the fitness of box object.""" + return self.box.fitness() + + @property + def ap_class_index(self): + """Returns the average precision index per class.""" + return self.box.ap_class_index + + @property + def results_dict(self): + """Returns dictionary of computed performance metrics and statistics.""" + return dict(zip(self.keys + ["fitness"], self.mean_results() + [self.fitness])) + + @property + def curves(self): + """Returns a list of curves for accessing specific metrics curves.""" + return [] + + @property + def curves_results(self): + """Returns a list of curves for accessing specific metrics curves.""" + return [] diff --git a/ultralytics/utils/ops.py b/ultralytics/utils/ops.py index 9089d0f..edbb103 100644 --- a/ultralytics/utils/ops.py +++ b/ultralytics/utils/ops.py @@ -12,6 +12,7 @@ import torch.nn.functional as F import torchvision from ultralytics.utils import LOGGER +from ultralytics.utils.metrics import batch_probiou class Profile(contextlib.ContextDecorator): @@ -22,22 +23,24 @@ class Profile(contextlib.ContextDecorator): ```python from ultralytics.utils.ops import Profile - with Profile() as dt: + with Profile(device=device) as dt: pass # slow operation here print(dt) # prints "Elapsed time is 9.5367431640625e-07 s" ``` """ - def __init__(self, t=0.0): + def __init__(self, t=0.0, device: torch.device = None): """ Initialize the Profile class. Args: t (float): Initial time. Defaults to 0.0. + device (torch.device): Devices used for model inference. Defaults to None (cpu). """ self.t = t - self.cuda = torch.cuda.is_available() + self.device = device + self.cuda = bool(device and str(device).startswith("cuda")) def __enter__(self): """Start timing.""" @@ -50,12 +53,13 @@ class Profile(contextlib.ContextDecorator): self.t += self.dt # accumulate dt def __str__(self): - return f'Elapsed time is {self.t} s' + """Returns a human-readable string representing the accumulated elapsed time in the profiler.""" + return f"Elapsed time is {self.t} s" def time(self): """Get current time.""" if self.cuda: - torch.cuda.synchronize() + torch.cuda.synchronize(self.device) return time.time() @@ -71,18 +75,21 @@ def segment2box(segment, width=640, height=640): Returns: (np.ndarray): the minimum and maximum x and y values of the segment. """ - # Convert 1 segment label to 1 box label, applying inside-image constraint, i.e. (xy1, xy2, ...) to (xyxy) x, y = segment.T # segment xy inside = (x >= 0) & (y >= 0) & (x <= width) & (y <= height) - x, y, = x[inside], y[inside] - return np.array([x.min(), y.min(), x.max(), y.max()], dtype=segment.dtype) if any(x) else np.zeros( - 4, dtype=segment.dtype) # xyxy + x = x[inside] + y = y[inside] + return ( + np.array([x.min(), y.min(), x.max(), y.max()], dtype=segment.dtype) + if any(x) + else np.zeros(4, dtype=segment.dtype) + ) # xyxy -def scale_boxes(img1_shape, boxes, img0_shape, ratio_pad=None, padding=True): +def scale_boxes(img1_shape, boxes, img0_shape, ratio_pad=None, padding=True, xywh=False): """ - Rescales bounding boxes (in the format of xyxy) from the shape of the image they were originally specified in - (img1_shape) to the shape of a different image (img0_shape). + Rescales bounding boxes (in the format of xyxy by default) from the shape of the image they were originally + specified in (img1_shape) to the shape of a different image (img0_shape). Args: img1_shape (tuple): The shape of the image that the bounding boxes are for, in the format of (height, width). @@ -92,24 +99,29 @@ def scale_boxes(img1_shape, boxes, img0_shape, ratio_pad=None, padding=True): calculated based on the size difference between the two images. padding (bool): If True, assuming the boxes is based on image augmented by yolo style. If False then do regular rescaling. + xywh (bool): The box format is xywh or not, default=False. Returns: boxes (torch.Tensor): The scaled bounding boxes, in the format of (x1, y1, x2, y2) """ if ratio_pad is None: # calculate from img0_shape gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new - pad = round((img1_shape[1] - img0_shape[1] * gain) / 2 - 0.1), round( - (img1_shape[0] - img0_shape[0] * gain) / 2 - 0.1) # wh padding + pad = ( + round((img1_shape[1] - img0_shape[1] * gain) / 2 - 0.1), + round((img1_shape[0] - img0_shape[0] * gain) / 2 - 0.1), + ) # wh padding else: gain = ratio_pad[0][0] pad = ratio_pad[1] if padding: - boxes[..., [0, 2]] -= pad[0] # x padding - boxes[..., [1, 3]] -= pad[1] # y padding + boxes[..., 0] -= pad[0] # x padding + boxes[..., 1] -= pad[1] # y padding + if not xywh: + boxes[..., 2] -= pad[0] # x padding + boxes[..., 3] -= pad[1] # y padding boxes[..., :4] /= gain - clip_boxes(boxes, img0_shape) - return boxes + return clip_boxes(boxes, img0_shape) def make_divisible(x, divisor): @@ -128,19 +140,41 @@ def make_divisible(x, divisor): return math.ceil(x / divisor) * divisor +def nms_rotated(boxes, scores, threshold=0.45): + """ + NMS for obbs, powered by probiou and fast-nms. + + Args: + boxes (torch.Tensor): (N, 5), xywhr. + scores (torch.Tensor): (N, ). + threshold (float): IoU threshold. + + Returns: + """ + if len(boxes) == 0: + return np.empty((0,), dtype=np.int8) + sorted_idx = torch.argsort(scores, descending=True) + boxes = boxes[sorted_idx] + ious = batch_probiou(boxes, boxes).triu_(diagonal=1) + pick = torch.nonzero(ious.max(dim=0)[0] < threshold).squeeze_(-1) + return sorted_idx[pick] + + def non_max_suppression( - prediction, - conf_thres=0.25, - iou_thres=0.45, - classes=None, - agnostic=False, - multi_label=False, - labels=(), - max_det=300, - nc=0, # number of classes (optional) - max_time_img=0.05, - max_nms=30000, - max_wh=7680, + prediction, + conf_thres=0.25, + iou_thres=0.45, + classes=None, + agnostic=False, + multi_label=False, + labels=(), + max_det=300, + nc=0, # number of classes (optional) + max_time_img=0.05, + max_nms=30000, + max_wh=7680, + in_place=True, + rotated=False, ): """ Perform non-maximum suppression (NMS) on a set of boxes, with support for masks and multiple labels per box. @@ -164,7 +198,8 @@ def non_max_suppression( nc (int, optional): The number of classes output by the model. Any indices after this will be considered masks. max_time_img (float): The maximum time (seconds) for processing one image. max_nms (int): The maximum number of boxes into torchvision.ops.nms(). - max_wh (int): The maximum box width and height in pixels + max_wh (int): The maximum box width and height in pixels. + in_place (bool): If True, the input prediction tensor will be modified in place. Returns: (List[torch.Tensor]): A list of length batch_size, where each element is a tensor of @@ -173,15 +208,11 @@ def non_max_suppression( """ # Checks - assert 0 <= conf_thres <= 1, f'Invalid Confidence threshold {conf_thres}, valid values are between 0.0 and 1.0' - assert 0 <= iou_thres <= 1, f'Invalid IoU {iou_thres}, valid values are between 0.0 and 1.0' + assert 0 <= conf_thres <= 1, f"Invalid Confidence threshold {conf_thres}, valid values are between 0.0 and 1.0" + assert 0 <= iou_thres <= 1, f"Invalid IoU {iou_thres}, valid values are between 0.0 and 1.0" if isinstance(prediction, (list, tuple)): # YOLOv8 model in validation model, output = (inference_out, loss_out) prediction = prediction[0] # select only inference output - device = prediction.device - mps = 'mps' in device.type # Apple MPS - if mps: # MPS not fully supported yet, convert tensors to CPU before NMS - prediction = prediction.cpu() bs = prediction.shape[0] # batch size nc = nc or (prediction.shape[1] - 4) # number of classes nm = prediction.shape[1] - nc - 4 @@ -190,11 +221,15 @@ def non_max_suppression( # Settings # min_wh = 2 # (pixels) minimum box width and height - time_limit = 0.5 + max_time_img * bs # seconds to quit after + time_limit = 2.0 + max_time_img * bs # seconds to quit after multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img) prediction = prediction.transpose(-1, -2) # shape(1,84,6300) to shape(1,6300,84) - prediction[..., :4] = xywh2xyxy(prediction[..., :4]) # xywh to xyxy + if not rotated: + if in_place: + prediction[..., :4] = xywh2xyxy(prediction[..., :4]) # xywh to xyxy + else: + prediction = torch.cat((xywh2xyxy(prediction[..., :4]), prediction[..., 4:]), dim=-1) # xywh to xyxy t = time.time() output = [torch.zeros((0, 6 + nm), device=prediction.device)] * bs @@ -204,7 +239,7 @@ def non_max_suppression( x = x[xc[xi]] # confidence # Cat apriori labels if autolabelling - if labels and len(labels[xi]): + if labels and len(labels[xi]) and not rotated: lb = labels[xi] v = torch.zeros((len(lb), nc + nm + 4), device=x.device) v[:, :4] = xywh2xyxy(lb[:, 1:5]) # box @@ -238,8 +273,13 @@ def non_max_suppression( # Batched NMS c = x[:, 5:6] * (0 if agnostic else max_wh) # classes - boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores - i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS + scores = x[:, 4] # scores + if rotated: + boxes = torch.cat((x[:, :2] + c, x[:, 2:4], x[:, -1:]), dim=-1) # xywhr + i = nms_rotated(boxes, scores, iou_thres) + else: + boxes = x[:, :4] + c # boxes (offset by class) + i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS i = i[:max_det] # limit detections # # Experimental @@ -247,7 +287,7 @@ def non_max_suppression( # if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean) # # Update boxes as boxes(i,4) = weights(i,n) * boxes(n,4) # from .metrics import box_iou - # iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix + # iou = box_iou(boxes[i], boxes) > iou_thres # IoU matrix # weights = iou * scores[None] # box weights # x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes # redundant = True # require redundant detections @@ -255,10 +295,8 @@ def non_max_suppression( # i = i[iou.sum(1) > 1] # require redundancy output[xi] = x[i] - if mps: - output[xi] = output[xi].to(device) if (time.time() - t) > time_limit: - LOGGER.warning(f'WARNING ⚠️ NMS time limit {time_limit:.3f}s exceeded') + LOGGER.warning(f"WARNING ⚠️ NMS time limit {time_limit:.3f}s exceeded") break # time limit exceeded return output @@ -269,17 +307,21 @@ def clip_boxes(boxes, shape): Takes a list of bounding boxes and a shape (height, width) and clips the bounding boxes to the shape. Args: - boxes (torch.Tensor): the bounding boxes to clip - shape (tuple): the shape of the image + boxes (torch.Tensor): the bounding boxes to clip + shape (tuple): the shape of the image + + Returns: + (torch.Tensor | numpy.ndarray): Clipped boxes """ - if isinstance(boxes, torch.Tensor): # faster individually - boxes[..., 0].clamp_(0, shape[1]) # x1 - boxes[..., 1].clamp_(0, shape[0]) # y1 - boxes[..., 2].clamp_(0, shape[1]) # x2 - boxes[..., 3].clamp_(0, shape[0]) # y2 + if isinstance(boxes, torch.Tensor): # faster individually (WARNING: inplace .clamp_() Apple MPS bug) + boxes[..., 0] = boxes[..., 0].clamp(0, shape[1]) # x1 + boxes[..., 1] = boxes[..., 1].clamp(0, shape[0]) # y1 + boxes[..., 2] = boxes[..., 2].clamp(0, shape[1]) # x2 + boxes[..., 3] = boxes[..., 3].clamp(0, shape[0]) # y2 else: # np.array (faster grouped) boxes[..., [0, 2]] = boxes[..., [0, 2]].clip(0, shape[1]) # x1, x2 boxes[..., [1, 3]] = boxes[..., [1, 3]].clip(0, shape[0]) # y1, y2 + return boxes def clip_coords(coords, shape): @@ -291,19 +333,20 @@ def clip_coords(coords, shape): shape (tuple): A tuple of integers representing the size of the image in the format (height, width). Returns: - (None): The function modifies the input `coordinates` in place, by clipping each coordinate to the image boundaries. + (torch.Tensor | numpy.ndarray): Clipped coordinates """ - if isinstance(coords, torch.Tensor): # faster individually - coords[..., 0].clamp_(0, shape[1]) # x - coords[..., 1].clamp_(0, shape[0]) # y + if isinstance(coords, torch.Tensor): # faster individually (WARNING: inplace .clamp_() Apple MPS bug) + coords[..., 0] = coords[..., 0].clamp(0, shape[1]) # x + coords[..., 1] = coords[..., 1].clamp(0, shape[0]) # y else: # np.array (faster grouped) coords[..., 0] = coords[..., 0].clip(0, shape[1]) # x coords[..., 1] = coords[..., 1].clip(0, shape[0]) # y + return coords def scale_image(masks, im0_shape, ratio_pad=None): """ - Takes a mask, and resizes it to the original image size + Takes a mask, and resizes it to the original image size. Args: masks (np.ndarray): resized and padded masks/images, [h, w, num]/[h, w, 3]. @@ -321,7 +364,7 @@ def scale_image(masks, im0_shape, ratio_pad=None): gain = min(im1_shape[0] / im0_shape[0], im1_shape[1] / im0_shape[1]) # gain = old / new pad = (im1_shape[1] - im0_shape[1] * gain) / 2, (im1_shape[0] - im0_shape[0] * gain) / 2 # wh padding else: - gain = ratio_pad[0][0] + # gain = ratio_pad[0][0] pad = ratio_pad[1] top, left = int(pad[1]), int(pad[0]) # y, x bottom, right = int(im1_shape[0] - pad[1]), int(im1_shape[1] - pad[0]) @@ -347,7 +390,7 @@ def xyxy2xywh(x): Returns: y (np.ndarray | torch.Tensor): The bounding box coordinates in (x, y, width, height) format. """ - assert x.shape[-1] == 4, f'input shape last dimension expected 4 but input shape is {x.shape}' + assert x.shape[-1] == 4, f"input shape last dimension expected 4 but input shape is {x.shape}" y = torch.empty_like(x) if isinstance(x, torch.Tensor) else np.empty_like(x) # faster than clone/copy y[..., 0] = (x[..., 0] + x[..., 2]) / 2 # x center y[..., 1] = (x[..., 1] + x[..., 3]) / 2 # y center @@ -367,7 +410,7 @@ def xywh2xyxy(x): Returns: y (np.ndarray | torch.Tensor): The bounding box coordinates in (x1, y1, x2, y2) format. """ - assert x.shape[-1] == 4, f'input shape last dimension expected 4 but input shape is {x.shape}' + assert x.shape[-1] == 4, f"input shape last dimension expected 4 but input shape is {x.shape}" y = torch.empty_like(x) if isinstance(x, torch.Tensor) else np.empty_like(x) # faster than clone/copy dw = x[..., 2] / 2 # half-width dh = x[..., 3] / 2 # half-height @@ -392,7 +435,7 @@ def xywhn2xyxy(x, w=640, h=640, padw=0, padh=0): y (np.ndarray | torch.Tensor): The coordinates of the bounding box in the format [x1, y1, x2, y2] where x1,y1 is the top-left corner, x2,y2 is the bottom-right corner of the bounding box. """ - assert x.shape[-1] == 4, f'input shape last dimension expected 4 but input shape is {x.shape}' + assert x.shape[-1] == 4, f"input shape last dimension expected 4 but input shape is {x.shape}" y = torch.empty_like(x) if isinstance(x, torch.Tensor) else np.empty_like(x) # faster than clone/copy y[..., 0] = w * (x[..., 0] - x[..., 2] / 2) + padw # top left x y[..., 1] = h * (x[..., 1] - x[..., 3] / 2) + padh # top left y @@ -403,8 +446,8 @@ def xywhn2xyxy(x, w=640, h=640, padw=0, padh=0): def xyxy2xywhn(x, w=640, h=640, clip=False, eps=0.0): """ - Convert bounding box coordinates from (x1, y1, x2, y2) format to (x, y, width, height, normalized) format. - x, y, width and height are normalized to image dimensions + Convert bounding box coordinates from (x1, y1, x2, y2) format to (x, y, width, height, normalized) format. x, y, + width and height are normalized to image dimensions. Args: x (np.ndarray | torch.Tensor): The input bounding box coordinates in (x1, y1, x2, y2) format. @@ -417,8 +460,8 @@ def xyxy2xywhn(x, w=640, h=640, clip=False, eps=0.0): y (np.ndarray | torch.Tensor): The bounding box coordinates in (x, y, width, height, normalized) format """ if clip: - clip_boxes(x, (h - eps, w - eps)) # warning: inplace clip - assert x.shape[-1] == 4, f'input shape last dimension expected 4 but input shape is {x.shape}' + x = clip_boxes(x, (h - eps, w - eps)) + assert x.shape[-1] == 4, f"input shape last dimension expected 4 but input shape is {x.shape}" y = torch.empty_like(x) if isinstance(x, torch.Tensor) else np.empty_like(x) # faster than clone/copy y[..., 0] = ((x[..., 0] + x[..., 2]) / 2) / w # x center y[..., 1] = ((x[..., 1] + x[..., 3]) / 2) / h # y center @@ -445,7 +488,7 @@ def xywh2ltwh(x): def xyxy2ltwh(x): """ - Convert nx4 bounding boxes from [x1, y1, x2, y2] to [x1, y1, w, h], where xy1=top-left, xy2=bottom-right + Convert nx4 bounding boxes from [x1, y1, x2, y2] to [x1, y1, w, h], where xy1=top-left, xy2=bottom-right. Args: x (np.ndarray | torch.Tensor): The input tensor with the bounding boxes coordinates in the xyxy format @@ -461,7 +504,7 @@ def xyxy2ltwh(x): def ltwh2xywh(x): """ - Convert nx4 boxes from [x1, y1, w, h] to [x, y, w, h] where xy1=top-left, xy=center + Convert nx4 boxes from [x1, y1, w, h] to [x, y, w, h] where xy1=top-left, xy=center. Args: x (torch.Tensor): the input tensor @@ -477,7 +520,8 @@ def ltwh2xywh(x): def xyxyxyxy2xywhr(corners): """ - Convert batched Oriented Bounding Boxes (OBB) from [xy1, xy2, xy3, xy4] to [xywh, rotation]. + Convert batched Oriented Bounding Boxes (OBB) from [xy1, xy2, xy3, xy4] to [xywh, rotation]. Rotation values are + expected in degrees from 0 to 90. Args: corners (numpy.ndarray | torch.Tensor): Input corners of shape (n, 8). @@ -485,66 +529,53 @@ def xyxyxyxy2xywhr(corners): Returns: (numpy.ndarray | torch.Tensor): Converted data in [cx, cy, w, h, rotation] format of shape (n, 5). """ - is_numpy = isinstance(corners, np.ndarray) - atan2, sqrt = (np.arctan2, np.sqrt) if is_numpy else (torch.atan2, torch.sqrt) - - x1, y1, x2, y2, x3, y3, x4, y4 = corners.T - cx = (x1 + x3) / 2 - cy = (y1 + y3) / 2 - dx21 = x2 - x1 - dy21 = y2 - y1 - - w = sqrt(dx21 ** 2 + dy21 ** 2) - h = sqrt((x2 - x3) ** 2 + (y2 - y3) ** 2) - - rotation = atan2(-dy21, dx21) - rotation *= 180.0 / math.pi # radians to degrees - - return np.vstack((cx, cy, w, h, rotation)).T if is_numpy else torch.stack((cx, cy, w, h, rotation), dim=1) + is_torch = isinstance(corners, torch.Tensor) + points = corners.cpu().numpy() if is_torch else corners + points = points.reshape(len(corners), -1, 2) + rboxes = [] + for pts in points: + # NOTE: Use cv2.minAreaRect to get accurate xywhr, + # especially some objects are cut off by augmentations in dataloader. + (x, y), (w, h), angle = cv2.minAreaRect(pts) + rboxes.append([x, y, w, h, angle / 180 * np.pi]) + return ( + torch.tensor(rboxes, device=corners.device, dtype=corners.dtype) + if is_torch + else np.asarray(rboxes, dtype=points.dtype) + ) # rboxes -def xywhr2xyxyxyxy(center): +def xywhr2xyxyxyxy(rboxes): """ - Convert batched Oriented Bounding Boxes (OBB) from [xywh, rotation] to [xy1, xy2, xy3, xy4]. + Convert batched Oriented Bounding Boxes (OBB) from [xywh, rotation] to [xy1, xy2, xy3, xy4]. Rotation values should + be in degrees from 0 to 90. Args: - center (numpy.ndarray | torch.Tensor): Input data in [cx, cy, w, h, rotation] format of shape (n, 5). + rboxes (numpy.ndarray | torch.Tensor): Boxes in [cx, cy, w, h, rotation] format of shape (n, 5) or (b, n, 5). Returns: - (numpy.ndarray | torch.Tensor): Converted corner points of shape (n, 8). + (numpy.ndarray | torch.Tensor): Converted corner points of shape (n, 4, 2) or (b, n, 4, 2). """ - is_numpy = isinstance(center, np.ndarray) + is_numpy = isinstance(rboxes, np.ndarray) cos, sin = (np.cos, np.sin) if is_numpy else (torch.cos, torch.sin) - cx, cy, w, h, rotation = center.T - rotation *= math.pi / 180.0 # degrees to radians - - dx = w / 2 - dy = h / 2 - - cos_rot = cos(rotation) - sin_rot = sin(rotation) - dx_cos_rot = dx * cos_rot - dx_sin_rot = dx * sin_rot - dy_cos_rot = dy * cos_rot - dy_sin_rot = dy * sin_rot - - x1 = cx - dx_cos_rot - dy_sin_rot - y1 = cy + dx_sin_rot - dy_cos_rot - x2 = cx + dx_cos_rot - dy_sin_rot - y2 = cy - dx_sin_rot - dy_cos_rot - x3 = cx + dx_cos_rot + dy_sin_rot - y3 = cy - dx_sin_rot + dy_cos_rot - x4 = cx - dx_cos_rot + dy_sin_rot - y4 = cy + dx_sin_rot + dy_cos_rot - - return np.vstack((x1, y1, x2, y2, x3, y3, x4, y4)).T if is_numpy else torch.stack( - (x1, y1, x2, y2, x3, y3, x4, y4), dim=1) + ctr = rboxes[..., :2] + w, h, angle = (rboxes[..., i : i + 1] for i in range(2, 5)) + cos_value, sin_value = cos(angle), sin(angle) + vec1 = [w / 2 * cos_value, w / 2 * sin_value] + vec2 = [-h / 2 * sin_value, h / 2 * cos_value] + vec1 = np.concatenate(vec1, axis=-1) if is_numpy else torch.cat(vec1, dim=-1) + vec2 = np.concatenate(vec2, axis=-1) if is_numpy else torch.cat(vec2, dim=-1) + pt1 = ctr + vec1 + vec2 + pt2 = ctr + vec1 - vec2 + pt3 = ctr - vec1 - vec2 + pt4 = ctr - vec1 + vec2 + return np.stack([pt1, pt2, pt3, pt4], axis=-2) if is_numpy else torch.stack([pt1, pt2, pt3, pt4], dim=-2) def ltwh2xyxy(x): """ - It converts the bounding box from [x1, y1, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right + It converts the bounding box from [x1, y1, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right. Args: x (np.ndarray | torch.Tensor): the input image @@ -590,8 +621,9 @@ def resample_segments(segments, n=1000): s = np.concatenate((s, s[0:1, :]), axis=0) x = np.linspace(0, len(s) - 1, n) xp = np.arange(len(s)) - segments[i] = np.concatenate([np.interp(x, xp, s[:, i]) for i in range(2)], - dtype=np.float32).reshape(2, -1).T # segment xy + segments[i] = ( + np.concatenate([np.interp(x, xp, s[:, i]) for i in range(2)], dtype=np.float32).reshape(2, -1).T + ) # segment xy return segments @@ -606,7 +638,7 @@ def crop_mask(masks, boxes): Returns: (torch.Tensor): The masks are being cropped to the bounding box. """ - n, h, w = masks.shape + _, h, w = masks.shape x1, y1, x2, y2 = torch.chunk(boxes[:, :, None], 4, 1) # x1 shape(n,1,1) r = torch.arange(w, device=masks.device, dtype=x1.dtype)[None, None, :] # rows shape(1,1,w) c = torch.arange(h, device=masks.device, dtype=x1.dtype)[None, :, None] # cols shape(1,h,1) @@ -616,8 +648,8 @@ def crop_mask(masks, boxes): def process_mask_upsample(protos, masks_in, bboxes, shape): """ - Takes the output of the mask head, and applies the mask to the bounding boxes. This produces masks of higher - quality but is slower. + Takes the output of the mask head, and applies the mask to the bounding boxes. This produces masks of higher quality + but is slower. Args: protos (torch.Tensor): [mask_dim, mask_h, mask_w] @@ -630,7 +662,7 @@ def process_mask_upsample(protos, masks_in, bboxes, shape): """ c, mh, mw = protos.shape # CHW masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw) - masks = F.interpolate(masks[None], shape, mode='bilinear', align_corners=False)[0] # CHW + masks = F.interpolate(masks[None], shape, mode="bilinear", align_corners=False)[0] # CHW masks = crop_mask(masks, bboxes) # CHW return masks.gt_(0.5) @@ -654,16 +686,18 @@ def process_mask(protos, masks_in, bboxes, shape, upsample=False): c, mh, mw = protos.shape # CHW ih, iw = shape masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw) # CHW + width_ratio = mw / iw + height_ratio = mh / ih downsampled_bboxes = bboxes.clone() - downsampled_bboxes[:, 0] *= mw / iw - downsampled_bboxes[:, 2] *= mw / iw - downsampled_bboxes[:, 3] *= mh / ih - downsampled_bboxes[:, 1] *= mh / ih + downsampled_bboxes[:, 0] *= width_ratio + downsampled_bboxes[:, 2] *= width_ratio + downsampled_bboxes[:, 3] *= height_ratio + downsampled_bboxes[:, 1] *= height_ratio masks = crop_mask(masks, downsampled_bboxes) # CHW if upsample: - masks = F.interpolate(masks[None], shape, mode='bilinear', align_corners=False)[0] # CHW + masks = F.interpolate(masks[None], shape, mode="bilinear", align_corners=False)[0] # CHW return masks.gt_(0.5) @@ -707,13 +741,13 @@ def scale_masks(masks, shape, padding=True): bottom, right = (int(mh - pad[1]), int(mw - pad[0])) masks = masks[..., top:bottom, left:right] - masks = F.interpolate(masks, shape, mode='bilinear', align_corners=False) # NCHW + masks = F.interpolate(masks, shape, mode="bilinear", align_corners=False) # NCHW return masks def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None, normalize=False, padding=True): """ - Rescale segment coordinates (xy) from img1_shape to img0_shape + Rescale segment coordinates (xy) from img1_shape to img0_shape. Args: img1_shape (tuple): The shape of the image that the coords are from. @@ -739,14 +773,32 @@ def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None, normalize=False coords[..., 1] -= pad[1] # y padding coords[..., 0] /= gain coords[..., 1] /= gain - clip_coords(coords, img0_shape) + coords = clip_coords(coords, img0_shape) if normalize: coords[..., 0] /= img0_shape[1] # width coords[..., 1] /= img0_shape[0] # height return coords -def masks2segments(masks, strategy='largest'): +def regularize_rboxes(rboxes): + """ + Regularize rotated boxes in range [0, pi/2]. + + Args: + rboxes (torch.Tensor): (N, 5), xywhr. + + Returns: + (torch.Tensor): The regularized boxes. + """ + x, y, w, h, t = rboxes.unbind(dim=-1) + # Swap edge and angle if h >= w + w_ = torch.where(w > h, w, h) + h_ = torch.where(w > h, h, w) + t = torch.where(w > h, t, t + math.pi / 2) % math.pi + return torch.stack([x, y, w_, h_, t], dim=-1) # regularized boxes + + +def masks2segments(masks, strategy="largest"): """ It takes a list of masks(n,h,w) and returns a list of segments(n,xy) @@ -758,16 +810,16 @@ def masks2segments(masks, strategy='largest'): segments (List): list of segment masks """ segments = [] - for x in masks.int().cpu().numpy().astype('uint8'): + for x in masks.int().cpu().numpy().astype("uint8"): c = cv2.findContours(x, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[0] if c: - if strategy == 'concat': # concatenate all segments + if strategy == "concat": # concatenate all segments c = np.concatenate([x.reshape(-1, 2) for x in c]) - elif strategy == 'largest': # select largest segment + elif strategy == "largest": # select largest segment c = np.array(c[np.array([len(x) for x in c]).argmax()]).reshape(-1, 2) else: c = np.zeros((0, 2)) # no segments found - segments.append(c.astype('float32')) + segments.append(c.astype("float32")) return segments @@ -794,4 +846,19 @@ def clean_str(s): Returns: (str): a string with special characters replaced by an underscore _ """ - return re.sub(pattern='[|@#!¡·$€%&()=?¿^*;:,¨´><+]', repl='_', string=s) + return re.sub(pattern="[|@#!¡·$€%&()=?¿^*;:,¨´><+]", repl="_", string=s) + +def v10postprocess(preds, max_det, nc=80): + assert(4 + nc == preds.shape[-1]) + boxes, scores = preds.split([4, nc], dim=-1) + max_scores = scores.amax(dim=-1) + max_scores, index = torch.topk(max_scores, max_det, dim=-1) + index = index.unsqueeze(-1) + boxes = torch.gather(boxes, dim=1, index=index.repeat(1, 1, boxes.shape[-1])) + scores = torch.gather(scores, dim=1, index=index.repeat(1, 1, scores.shape[-1])) + + scores, index = torch.topk(scores.flatten(1), max_det, dim=-1) + labels = index % nc + index = index // nc + boxes = boxes.gather(dim=1, index=index.unsqueeze(-1).repeat(1, 1, boxes.shape[-1])) + return boxes, scores, labels \ No newline at end of file diff --git a/ultralytics/utils/patches.py b/ultralytics/utils/patches.py index a145763..d438407 100644 --- a/ultralytics/utils/patches.py +++ b/ultralytics/utils/patches.py @@ -1,8 +1,7 @@ # Ultralytics YOLO 🚀, AGPL-3.0 license -""" -Monkey patches to update/extend functionality of existing functions -""" +"""Monkey patches to update/extend functionality of existing functions.""" +import time from pathlib import Path import cv2 @@ -14,7 +13,8 @@ _imshow = cv2.imshow # copy to avoid recursion errors def imread(filename: str, flags: int = cv2.IMREAD_COLOR): - """Read an image from a file. + """ + Read an image from a file. Args: filename (str): Path to the file to read. @@ -27,7 +27,8 @@ def imread(filename: str, flags: int = cv2.IMREAD_COLOR): def imwrite(filename: str, img: np.ndarray, params=None): - """Write an image to a file. + """ + Write an image to a file. Args: filename (str): Path to the file to write. @@ -45,31 +46,43 @@ def imwrite(filename: str, img: np.ndarray, params=None): def imshow(winname: str, mat: np.ndarray): - """Displays an image in the specified window. + """ + Displays an image in the specified window. Args: winname (str): Name of the window. mat (np.ndarray): Image to be shown. """ - _imshow(winname.encode('unicode_escape').decode(), mat) + _imshow(winname.encode("unicode_escape").decode(), mat) # PyTorch functions ---------------------------------------------------------------------------------------------------- _torch_save = torch.save # copy to avoid recursion errors -def torch_save(*args, **kwargs): - """Use dill (if exists) to serialize the lambda functions where pickle does not do this. +def torch_save(*args, use_dill=True, **kwargs): + """ + Optionally use dill to serialize lambda functions where pickle does not, adding robustness with 3 retries and + exponential standoff in case of save failure. Args: *args (tuple): Positional arguments to pass to torch.save. - **kwargs (dict): Keyword arguments to pass to torch.save. + use_dill (bool): Whether to try using dill for serialization if available. Defaults to True. + **kwargs (any): Keyword arguments to pass to torch.save. """ try: - import dill as pickle # noqa - except ImportError: + assert use_dill + import dill as pickle + except (AssertionError, ImportError): import pickle - if 'pickle_module' not in kwargs: - kwargs['pickle_module'] = pickle # noqa - return _torch_save(*args, **kwargs) + if "pickle_module" not in kwargs: + kwargs["pickle_module"] = pickle + + for i in range(4): # 3 retries + try: + return _torch_save(*args, **kwargs) + except RuntimeError as e: # unable to save, possibly waiting for device to flush or antivirus scan + if i == 3: + raise e + time.sleep((2**i) / 2) # exponential standoff: 0.5s, 1.0s, 2.0s diff --git a/ultralytics/utils/plotting.py b/ultralytics/utils/plotting.py index 6237f13..d0215ba 100644 --- a/ultralytics/utils/plotting.py +++ b/ultralytics/utils/plotting.py @@ -13,7 +13,6 @@ from PIL import Image, ImageDraw, ImageFont from PIL import __version__ as pil_version from ultralytics.utils import LOGGER, TryExcept, ops, plt_settings, threaded - from .checks import check_font, check_version, is_ascii from .files import increment_path @@ -28,20 +27,60 @@ class Colors: Attributes: palette (list of tuple): List of RGB color values. n (int): The number of colors in the palette. - pose_palette (np.array): A specific color palette array with dtype np.uint8. + pose_palette (np.ndarray): A specific color palette array with dtype np.uint8. """ def __init__(self): """Initialize colors as hex = matplotlib.colors.TABLEAU_COLORS.values().""" - hexs = ('FF3838', 'FF9D97', 'FF701F', 'FFB21D', 'CFD231', '48F90A', '92CC17', '3DDB86', '1A9334', '00D4BB', - '2C99A8', '00C2FF', '344593', '6473FF', '0018EC', '8438FF', '520085', 'CB38FF', 'FF95C8', 'FF37C7') - self.palette = [self.hex2rgb(f'#{c}') for c in hexs] + hexs = ( + "FF3838", + "FF9D97", + "FF701F", + "FFB21D", + "CFD231", + "48F90A", + "92CC17", + "3DDB86", + "1A9334", + "00D4BB", + "2C99A8", + "00C2FF", + "344593", + "6473FF", + "0018EC", + "8438FF", + "520085", + "CB38FF", + "FF95C8", + "FF37C7", + ) + self.palette = [self.hex2rgb(f"#{c}") for c in hexs] self.n = len(self.palette) - self.pose_palette = np.array([[255, 128, 0], [255, 153, 51], [255, 178, 102], [230, 230, 0], [255, 153, 255], - [153, 204, 255], [255, 102, 255], [255, 51, 255], [102, 178, 255], [51, 153, 255], - [255, 153, 153], [255, 102, 102], [255, 51, 51], [153, 255, 153], [102, 255, 102], - [51, 255, 51], [0, 255, 0], [0, 0, 255], [255, 0, 0], [255, 255, 255]], - dtype=np.uint8) + self.pose_palette = np.array( + [ + [255, 128, 0], + [255, 153, 51], + [255, 178, 102], + [230, 230, 0], + [255, 153, 255], + [153, 204, 255], + [255, 102, 255], + [255, 51, 255], + [102, 178, 255], + [51, 153, 255], + [255, 153, 153], + [255, 102, 102], + [255, 51, 51], + [153, 255, 153], + [102, 255, 102], + [51, 255, 51], + [0, 255, 0], + [0, 0, 255], + [255, 0, 0], + [255, 255, 255], + ], + dtype=np.uint8, + ) def __call__(self, i, bgr=False): """Converts hex color codes to RGB values.""" @@ -51,7 +90,7 @@ class Colors: @staticmethod def hex2rgb(h): """Converts hex color codes to RGB values (i.e. default PIL order).""" - return tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4)) + return tuple(int(h[1 + i : 1 + i + 2], 16) for i in (0, 2, 4)) colors = Colors() # create instance for 'from utils.plots import colors' @@ -71,65 +110,99 @@ class Annotator: kpt_color (List[int]): Color palette for keypoints. """ - def __init__(self, im, line_width=None, font_size=None, font='Arial.ttf', pil=False, example='abc'): + def __init__(self, im, line_width=None, font_size=None, font="Arial.ttf", pil=False, example="abc"): """Initialize the Annotator class with image and line width along with color palette for keypoints and limbs.""" - assert im.data.contiguous, 'Image not contiguous. Apply np.ascontiguousarray(im) to Annotator() input images.' non_ascii = not is_ascii(example) # non-latin labels, i.e. asian, arabic, cyrillic - self.pil = pil or non_ascii + input_is_pil = isinstance(im, Image.Image) + self.pil = pil or non_ascii or input_is_pil + self.lw = line_width or max(round(sum(im.size if input_is_pil else im.shape) / 2 * 0.003), 2) if self.pil: # use PIL - self.im = im if isinstance(im, Image.Image) else Image.fromarray(im) + self.im = im if input_is_pil else Image.fromarray(im) self.draw = ImageDraw.Draw(self.im) try: - font = check_font('Arial.Unicode.ttf' if non_ascii else font) + font = check_font("Arial.Unicode.ttf" if non_ascii else font) size = font_size or max(round(sum(self.im.size) / 2 * 0.035), 12) self.font = ImageFont.truetype(str(font), size) except Exception: self.font = ImageFont.load_default() # Deprecation fix for w, h = getsize(string) -> _, _, w, h = getbox(string) - if check_version(pil_version, '9.2.0'): + if check_version(pil_version, "9.2.0"): self.font.getsize = lambda x: self.font.getbbox(x)[2:4] # text width, height else: # use cv2 - self.im = im - self.lw = line_width or max(round(sum(im.shape) / 2 * 0.003), 2) # line width + assert im.data.contiguous, "Image not contiguous. Apply np.ascontiguousarray(im) to Annotator input images." + self.im = im if im.flags.writeable else im.copy() + self.tf = max(self.lw - 1, 1) # font thickness + self.sf = self.lw / 3 # font scale # Pose - self.skeleton = [[16, 14], [14, 12], [17, 15], [15, 13], [12, 13], [6, 12], [7, 13], [6, 7], [6, 8], [7, 9], - [8, 10], [9, 11], [2, 3], [1, 2], [1, 3], [2, 4], [3, 5], [4, 6], [5, 7]] + self.skeleton = [ + [16, 14], + [14, 12], + [17, 15], + [15, 13], + [12, 13], + [6, 12], + [7, 13], + [6, 7], + [6, 8], + [7, 9], + [8, 10], + [9, 11], + [2, 3], + [1, 2], + [1, 3], + [2, 4], + [3, 5], + [4, 6], + [5, 7], + ] self.limb_color = colors.pose_palette[[9, 9, 9, 9, 7, 7, 7, 0, 0, 0, 0, 0, 16, 16, 16, 16, 16, 16, 16]] self.kpt_color = colors.pose_palette[[16, 16, 16, 16, 16, 0, 0, 0, 0, 0, 0, 9, 9, 9, 9, 9, 9]] - def box_label(self, box, label='', color=(128, 128, 128), txt_color=(255, 255, 255)): + def box_label(self, box, label="", color=(128, 128, 128), txt_color=(255, 255, 255), rotated=False): """Add one xyxy box to image with label.""" if isinstance(box, torch.Tensor): box = box.tolist() if self.pil or not is_ascii(label): - self.draw.rectangle(box, width=self.lw, outline=color) # box + if rotated: + p1 = box[0] + # NOTE: PIL-version polygon needs tuple type. + self.draw.polygon([tuple(b) for b in box], width=self.lw, outline=color) + else: + p1 = (box[0], box[1]) + self.draw.rectangle(box, width=self.lw, outline=color) # box if label: w, h = self.font.getsize(label) # text width, height - outside = box[1] - h >= 0 # label fits outside box + outside = p1[1] - h >= 0 # label fits outside box self.draw.rectangle( - (box[0], box[1] - h if outside else box[1], box[0] + w + 1, - box[1] + 1 if outside else box[1] + h + 1), + (p1[0], p1[1] - h if outside else p1[1], p1[0] + w + 1, p1[1] + 1 if outside else p1[1] + h + 1), fill=color, ) # self.draw.text((box[0], box[1]), label, fill=txt_color, font=self.font, anchor='ls') # for PIL>8.0 - self.draw.text((box[0], box[1] - h if outside else box[1]), label, fill=txt_color, font=self.font) + self.draw.text((p1[0], p1[1] - h if outside else p1[1]), label, fill=txt_color, font=self.font) else: # cv2 - p1, p2 = (int(box[0]), int(box[1])), (int(box[2]), int(box[3])) - cv2.rectangle(self.im, p1, p2, color, thickness=self.lw, lineType=cv2.LINE_AA) + if rotated: + p1 = [int(b) for b in box[0]] + # NOTE: cv2-version polylines needs np.asarray type. + cv2.polylines(self.im, [np.asarray(box, dtype=int)], True, color, self.lw) + else: + p1, p2 = (int(box[0]), int(box[1])), (int(box[2]), int(box[3])) + cv2.rectangle(self.im, p1, p2, color, thickness=self.lw, lineType=cv2.LINE_AA) if label: - tf = max(self.lw - 1, 1) # font thickness - w, h = cv2.getTextSize(label, 0, fontScale=self.lw / 3, thickness=tf)[0] # text width, height + w, h = cv2.getTextSize(label, 0, fontScale=self.sf, thickness=self.tf)[0] # text width, height outside = p1[1] - h >= 3 p2 = p1[0] + w, p1[1] - h - 3 if outside else p1[1] + h + 3 cv2.rectangle(self.im, p1, p2, color, -1, cv2.LINE_AA) # filled - cv2.putText(self.im, - label, (p1[0], p1[1] - 2 if outside else p1[1] + h + 2), - 0, - self.lw / 3, - txt_color, - thickness=tf, - lineType=cv2.LINE_AA) + cv2.putText( + self.im, + label, + (p1[0], p1[1] - 2 if outside else p1[1] + h + 2), + 0, + self.sf, + txt_color, + thickness=self.tf, + lineType=cv2.LINE_AA, + ) def masks(self, masks, colors, im_gpu, alpha=0.5, retina_masks=False): """ @@ -154,13 +227,13 @@ class Annotator: masks = masks.unsqueeze(3) # shape(n,h,w,1) masks_color = masks * (colors * alpha) # shape(n,h,w,3) - inv_alph_masks = (1 - masks * alpha).cumprod(0) # shape(n,h,w,1) + inv_alpha_masks = (1 - masks * alpha).cumprod(0) # shape(n,h,w,1) mcs = masks_color.max(dim=0).values # shape(n,h,w,3) im_gpu = im_gpu.flip(dims=[0]) # flip channel im_gpu = im_gpu.permute(1, 2, 0).contiguous() # shape(h,w,3) - im_gpu = im_gpu * inv_alph_masks[-1] + mcs - im_mask = (im_gpu * 255) + im_gpu = im_gpu * inv_alpha_masks[-1] + mcs + im_mask = im_gpu * 255 im_mask_np = im_mask.byte().cpu().numpy() self.im[:] = im_mask_np if retina_masks else ops.scale_image(im_mask_np, self.im.shape) if self.pil: @@ -178,13 +251,14 @@ class Annotator: kpt_line (bool, optional): If True, the function will draw lines connecting keypoints for human pose. Default is True. - Note: `kpt_line=True` currently only supports human pose plotting. + Note: + `kpt_line=True` currently only supports human pose plotting. """ if self.pil: # Convert to numpy first self.im = np.asarray(self.im).copy() nkpt, ndim = kpts.shape - is_pose = nkpt == 17 and ndim == 3 + is_pose = nkpt == 17 and ndim in {2, 3} kpt_line &= is_pose # `kpt_line=True` for now only supports human pose plotting for i, k in enumerate(kpts): color_k = [int(x) for x in self.kpt_color[i]] if is_pose else colors(i) @@ -219,9 +293,9 @@ class Annotator: """Add rectangle to image (PIL-only).""" self.draw.rectangle(xy, fill, outline, width) - def text(self, xy, text, txt_color=(255, 255, 255), anchor='top', box_style=False): + def text(self, xy, text, txt_color=(255, 255, 255), anchor="top", box_style=False): """Adds text to an image using PIL or cv2.""" - if anchor == 'bottom': # start y from font bottom + if anchor == "bottom": # start y from font bottom w, h = self.font.getsize(text) # text width, height xy[1] += 1 - h if self.pil: @@ -230,8 +304,8 @@ class Annotator: self.draw.rectangle((xy[0], xy[1], xy[0] + w + 1, xy[1] + h + 1), fill=txt_color) # Using `txt_color` for background and draw fg with white color txt_color = (255, 255, 255) - if '\n' in text: - lines = text.split('\n') + if "\n" in text: + lines = text.split("\n") _, h = self.font.getsize(text) for line in lines: self.draw.text(xy, line, fill=txt_color, font=self.font) @@ -240,15 +314,13 @@ class Annotator: self.draw.text(xy, text, fill=txt_color, font=self.font) else: if box_style: - tf = max(self.lw - 1, 1) # font thickness - w, h = cv2.getTextSize(text, 0, fontScale=self.lw / 3, thickness=tf)[0] # text width, height + w, h = cv2.getTextSize(text, 0, fontScale=self.sf, thickness=self.tf)[0] # text width, height outside = xy[1] - h >= 3 p2 = xy[0] + w, xy[1] - h - 3 if outside else xy[1] + h + 3 cv2.rectangle(self.im, xy, p2, txt_color, -1, cv2.LINE_AA) # filled # Using `txt_color` for background and draw fg with white color txt_color = (255, 255, 255) - tf = max(self.lw - 1, 1) # font thickness - cv2.putText(self.im, text, xy, 0, self.lw / 3, txt_color, thickness=tf, lineType=cv2.LINE_AA) + cv2.putText(self.im, text, xy, 0, self.sf, txt_color, thickness=self.tf, lineType=cv2.LINE_AA) def fromarray(self, im): """Update self.im from a numpy array.""" @@ -259,27 +331,289 @@ class Annotator: """Return annotated image as array.""" return np.asarray(self.im) + def show(self, title=None): + """Show the annotated image.""" + Image.fromarray(np.asarray(self.im)[..., ::-1]).show(title) + + def save(self, filename="image.jpg"): + """Save the annotated image to 'filename'.""" + cv2.imwrite(filename, np.asarray(self.im)) + + def draw_region(self, reg_pts=None, color=(0, 255, 0), thickness=5): + """ + Draw region line. + + Args: + reg_pts (list): Region Points (for line 2 points, for region 4 points) + color (tuple): Region Color value + thickness (int): Region area thickness value + """ + cv2.polylines(self.im, [np.array(reg_pts, dtype=np.int32)], isClosed=True, color=color, thickness=thickness) + + def draw_centroid_and_tracks(self, track, color=(255, 0, 255), track_thickness=2): + """ + Draw centroid point and track trails. + + Args: + track (list): object tracking points for trails display + color (tuple): tracks line color + track_thickness (int): track line thickness value + """ + points = np.hstack(track).astype(np.int32).reshape((-1, 1, 2)) + cv2.polylines(self.im, [points], isClosed=False, color=color, thickness=track_thickness) + cv2.circle(self.im, (int(track[-1][0]), int(track[-1][1])), track_thickness * 2, color, -1) + + def count_labels(self, counts=0, count_txt_size=2, color=(255, 255, 255), txt_color=(0, 0, 0)): + """ + Plot counts for object counter. + + Args: + counts (int): objects counts value + count_txt_size (int): text size for counts display + color (tuple): background color of counts display + txt_color (tuple): text color of counts display + """ + self.tf = count_txt_size + tl = self.tf or round(0.002 * (self.im.shape[0] + self.im.shape[1]) / 2) + 1 + tf = max(tl - 1, 1) + + # Get text size for in_count and out_count + t_size_in = cv2.getTextSize(str(counts), 0, fontScale=tl / 2, thickness=tf)[0] + + # Calculate positions for counts label + text_width = t_size_in[0] + text_x = (self.im.shape[1] - text_width) // 2 # Center x-coordinate + text_y = t_size_in[1] + + # Create a rounded rectangle for in_count + cv2.rectangle( + self.im, (text_x - 5, text_y - 5), (text_x + text_width + 7, text_y + t_size_in[1] + 7), color, -1 + ) + cv2.putText( + self.im, str(counts), (text_x, text_y + t_size_in[1]), 0, tl / 2, txt_color, self.tf, lineType=cv2.LINE_AA + ) + + @staticmethod + def estimate_pose_angle(a, b, c): + """ + Calculate the pose angle for object. + + Args: + a (float) : The value of pose point a + b (float): The value of pose point b + c (float): The value o pose point c + + Returns: + angle (degree): Degree value of angle between three points + """ + a, b, c = np.array(a), np.array(b), np.array(c) + radians = np.arctan2(c[1] - b[1], c[0] - b[0]) - np.arctan2(a[1] - b[1], a[0] - b[0]) + angle = np.abs(radians * 180.0 / np.pi) + if angle > 180.0: + angle = 360 - angle + return angle + + def draw_specific_points(self, keypoints, indices=[2, 5, 7], shape=(640, 640), radius=2): + """ + Draw specific keypoints for gym steps counting. + + Args: + keypoints (list): list of keypoints data to be plotted + indices (list): keypoints ids list to be plotted + shape (tuple): imgsz for model inference + radius (int): Keypoint radius value + """ + for i, k in enumerate(keypoints): + if i in indices: + x_coord, y_coord = k[0], k[1] + if x_coord % shape[1] != 0 and y_coord % shape[0] != 0: + if len(k) == 3: + conf = k[2] + if conf < 0.5: + continue + cv2.circle(self.im, (int(x_coord), int(y_coord)), radius, (0, 255, 0), -1, lineType=cv2.LINE_AA) + return self.im + + def plot_angle_and_count_and_stage(self, angle_text, count_text, stage_text, center_kpt, line_thickness=2): + """ + Plot the pose angle, count value and step stage. + + Args: + angle_text (str): angle value for workout monitoring + count_text (str): counts value for workout monitoring + stage_text (str): stage decision for workout monitoring + center_kpt (int): centroid pose index for workout monitoring + line_thickness (int): thickness for text display + """ + angle_text, count_text, stage_text = (f" {angle_text:.2f}", f"Steps : {count_text}", f" {stage_text}") + font_scale = 0.6 + (line_thickness / 10.0) + + # Draw angle + (angle_text_width, angle_text_height), _ = cv2.getTextSize(angle_text, 0, font_scale, line_thickness) + angle_text_position = (int(center_kpt[0]), int(center_kpt[1])) + angle_background_position = (angle_text_position[0], angle_text_position[1] - angle_text_height - 5) + angle_background_size = (angle_text_width + 2 * 5, angle_text_height + 2 * 5 + (line_thickness * 2)) + cv2.rectangle( + self.im, + angle_background_position, + ( + angle_background_position[0] + angle_background_size[0], + angle_background_position[1] + angle_background_size[1], + ), + (255, 255, 255), + -1, + ) + cv2.putText(self.im, angle_text, angle_text_position, 0, font_scale, (0, 0, 0), line_thickness) + + # Draw Counts + (count_text_width, count_text_height), _ = cv2.getTextSize(count_text, 0, font_scale, line_thickness) + count_text_position = (angle_text_position[0], angle_text_position[1] + angle_text_height + 20) + count_background_position = ( + angle_background_position[0], + angle_background_position[1] + angle_background_size[1] + 5, + ) + count_background_size = (count_text_width + 10, count_text_height + 10 + (line_thickness * 2)) + + cv2.rectangle( + self.im, + count_background_position, + ( + count_background_position[0] + count_background_size[0], + count_background_position[1] + count_background_size[1], + ), + (255, 255, 255), + -1, + ) + cv2.putText(self.im, count_text, count_text_position, 0, font_scale, (0, 0, 0), line_thickness) + + # Draw Stage + (stage_text_width, stage_text_height), _ = cv2.getTextSize(stage_text, 0, font_scale, line_thickness) + stage_text_position = (int(center_kpt[0]), int(center_kpt[1]) + angle_text_height + count_text_height + 40) + stage_background_position = (stage_text_position[0], stage_text_position[1] - stage_text_height - 5) + stage_background_size = (stage_text_width + 10, stage_text_height + 10) + + cv2.rectangle( + self.im, + stage_background_position, + ( + stage_background_position[0] + stage_background_size[0], + stage_background_position[1] + stage_background_size[1], + ), + (255, 255, 255), + -1, + ) + cv2.putText(self.im, stage_text, stage_text_position, 0, font_scale, (0, 0, 0), line_thickness) + + def seg_bbox(self, mask, mask_color=(255, 0, 255), det_label=None, track_label=None): + """ + Function for drawing segmented object in bounding box shape. + + Args: + mask (list): masks data list for instance segmentation area plotting + mask_color (tuple): mask foreground color + det_label (str): Detection label text + track_label (str): Tracking label text + """ + cv2.polylines(self.im, [np.int32([mask])], isClosed=True, color=mask_color, thickness=2) + + label = f"Track ID: {track_label}" if track_label else det_label + text_size, _ = cv2.getTextSize(label, 0, 0.7, 1) + + cv2.rectangle( + self.im, + (int(mask[0][0]) - text_size[0] // 2 - 10, int(mask[0][1]) - text_size[1] - 10), + (int(mask[0][0]) + text_size[0] // 2 + 5, int(mask[0][1] + 5)), + mask_color, + -1, + ) + + cv2.putText( + self.im, label, (int(mask[0][0]) - text_size[0] // 2, int(mask[0][1]) - 5), 0, 0.7, (255, 255, 255), 2 + ) + + def plot_distance_and_line(self, distance_m, distance_mm, centroids, line_color, centroid_color): + """ + Plot the distance and line on frame. + + Args: + distance_m (float): Distance between two bbox centroids in meters. + distance_mm (float): Distance between two bbox centroids in millimeters. + centroids (list): Bounding box centroids data. + line_color (RGB): Distance line color. + centroid_color (RGB): Bounding box centroid color. + """ + (text_width_m, text_height_m), _ = cv2.getTextSize( + f"Distance M: {distance_m:.2f}m", cv2.FONT_HERSHEY_SIMPLEX, 0.8, 2 + ) + cv2.rectangle(self.im, (15, 25), (15 + text_width_m + 10, 25 + text_height_m + 20), (255, 255, 255), -1) + cv2.putText( + self.im, + f"Distance M: {distance_m:.2f}m", + (20, 50), + cv2.FONT_HERSHEY_SIMPLEX, + 0.8, + (0, 0, 0), + 2, + cv2.LINE_AA, + ) + + (text_width_mm, text_height_mm), _ = cv2.getTextSize( + f"Distance MM: {distance_mm:.2f}mm", cv2.FONT_HERSHEY_SIMPLEX, 0.8, 2 + ) + cv2.rectangle(self.im, (15, 75), (15 + text_width_mm + 10, 75 + text_height_mm + 20), (255, 255, 255), -1) + cv2.putText( + self.im, + f"Distance MM: {distance_mm:.2f}mm", + (20, 100), + cv2.FONT_HERSHEY_SIMPLEX, + 0.8, + (0, 0, 0), + 2, + cv2.LINE_AA, + ) + + cv2.line(self.im, centroids[0], centroids[1], line_color, 3) + cv2.circle(self.im, centroids[0], 6, centroid_color, -1) + cv2.circle(self.im, centroids[1], 6, centroid_color, -1) + + def visioneye(self, box, center_point, color=(235, 219, 11), pin_color=(255, 0, 255), thickness=2, pins_radius=10): + """ + Function for pinpoint human-vision eye mapping and plotting. + + Args: + box (list): Bounding box coordinates + center_point (tuple): center point for vision eye view + color (tuple): object centroid and line color value + pin_color (tuple): visioneye point color value + thickness (int): int value for line thickness + pins_radius (int): visioneye point radius value + """ + center_bbox = int((box[0] + box[2]) / 2), int((box[1] + box[3]) / 2) + cv2.circle(self.im, center_point, pins_radius, pin_color, -1) + cv2.circle(self.im, center_bbox, pins_radius, color, -1) + cv2.line(self.im, center_point, center_bbox, color, thickness) + @TryExcept() # known issue https://github.com/ultralytics/yolov5/issues/5395 @plt_settings() -def plot_labels(boxes, cls, names=(), save_dir=Path(''), on_plot=None): +def plot_labels(boxes, cls, names=(), save_dir=Path(""), on_plot=None): """Plot training labels including class histograms and box statistics.""" import pandas as pd import seaborn as sn # Filter matplotlib>=3.7.2 warning and Seaborn use_inf and is_categorical FutureWarnings - warnings.filterwarnings('ignore', category=UserWarning, message='The figure layout has changed to tight') - warnings.filterwarnings('ignore', category=FutureWarning) + warnings.filterwarnings("ignore", category=UserWarning, message="The figure layout has changed to tight") + warnings.filterwarnings("ignore", category=FutureWarning) # Plot dataset labels LOGGER.info(f"Plotting labels to {save_dir / 'labels.jpg'}... ") nc = int(cls.max() + 1) # number of classes boxes = boxes[:1000000] # limit to 1M boxes - x = pd.DataFrame(boxes, columns=['x', 'y', 'width', 'height']) + x = pd.DataFrame(boxes, columns=["x", "y", "width", "height"]) # Seaborn correlogram - sn.pairplot(x, corner=True, diag_kind='auto', kind='hist', diag_kws=dict(bins=50), plot_kws=dict(pmax=0.9)) - plt.savefig(save_dir / 'labels_correlogram.jpg', dpi=200) + sn.pairplot(x, corner=True, diag_kind="auto", kind="hist", diag_kws=dict(bins=50), plot_kws=dict(pmax=0.9)) + plt.savefig(save_dir / "labels_correlogram.jpg", dpi=200) plt.close() # Matplotlib labels @@ -287,14 +621,14 @@ def plot_labels(boxes, cls, names=(), save_dir=Path(''), on_plot=None): y = ax[0].hist(cls, bins=np.linspace(0, nc, nc + 1) - 0.5, rwidth=0.8) for i in range(nc): y[2].patches[i].set_color([x / 255 for x in colors(i)]) - ax[0].set_ylabel('instances') + ax[0].set_ylabel("instances") if 0 < len(names) < 30: ax[0].set_xticks(range(len(names))) ax[0].set_xticklabels(list(names.values()), rotation=90, fontsize=10) else: - ax[0].set_xlabel('classes') - sn.histplot(x, x='x', y='y', ax=ax[2], bins=50, pmax=0.9) - sn.histplot(x, x='width', y='height', ax=ax[3], bins=50, pmax=0.9) + ax[0].set_xlabel("classes") + sn.histplot(x, x="x", y="y", ax=ax[2], bins=50, pmax=0.9) + sn.histplot(x, x="width", y="height", ax=ax[3], bins=50, pmax=0.9) # Rectangles boxes[:, 0:2] = 0.5 # center @@ -303,21 +637,22 @@ def plot_labels(boxes, cls, names=(), save_dir=Path(''), on_plot=None): for cls, box in zip(cls[:500], boxes[:500]): ImageDraw.Draw(img).rectangle(box, width=1, outline=colors(cls)) # plot ax[1].imshow(img) - ax[1].axis('off') + ax[1].axis("off") for a in [0, 1, 2, 3]: - for s in ['top', 'right', 'left', 'bottom']: + for s in ["top", "right", "left", "bottom"]: ax[a].spines[s].set_visible(False) - fname = save_dir / 'labels.jpg' + fname = save_dir / "labels.jpg" plt.savefig(fname, dpi=200) plt.close() if on_plot: on_plot(fname) -def save_one_box(xyxy, im, file=Path('im.jpg'), gain=1.02, pad=10, square=False, BGR=False, save=True): - """Save image crop as {file} with crop size multiple {gain} and {pad} pixels. Save and/or return crop. +def save_one_box(xyxy, im, file=Path("im.jpg"), gain=1.02, pad=10, square=False, BGR=False, save=True): + """ + Save image crop as {file} with crop size multiple {gain} and {pad} pixels. Save and/or return crop. This function takes a bounding box and an image, and then saves a cropped portion of the image according to the bounding box. Optionally, the crop can be squared, and the function allows for gain and padding @@ -353,27 +688,33 @@ def save_one_box(xyxy, im, file=Path('im.jpg'), gain=1.02, pad=10, square=False, b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # attempt rectangle to square b[:, 2:] = b[:, 2:] * gain + pad # box wh * gain + pad xyxy = ops.xywh2xyxy(b).long() - ops.clip_boxes(xyxy, im.shape) - crop = im[int(xyxy[0, 1]):int(xyxy[0, 3]), int(xyxy[0, 0]):int(xyxy[0, 2]), ::(1 if BGR else -1)] + xyxy = ops.clip_boxes(xyxy, im.shape) + crop = im[int(xyxy[0, 1]) : int(xyxy[0, 3]), int(xyxy[0, 0]) : int(xyxy[0, 2]), :: (1 if BGR else -1)] if save: file.parent.mkdir(parents=True, exist_ok=True) # make directory - f = str(increment_path(file).with_suffix('.jpg')) + f = str(increment_path(file).with_suffix(".jpg")) # cv2.imwrite(f, crop) # save BGR, https://github.com/ultralytics/yolov5/issues/7007 chroma subsampling issue Image.fromarray(crop[..., ::-1]).save(f, quality=95, subsampling=0) # save RGB return crop @threaded -def plot_images(images, - batch_idx, - cls, - bboxes=np.zeros(0, dtype=np.float32), - masks=np.zeros(0, dtype=np.uint8), - kpts=np.zeros((0, 51), dtype=np.float32), - paths=None, - fname='images.jpg', - names=None, - on_plot=None): +def plot_images( + images, + batch_idx, + cls, + bboxes=np.zeros(0, dtype=np.float32), + confs=None, + masks=np.zeros(0, dtype=np.uint8), + kpts=np.zeros((0, 51), dtype=np.float32), + paths=None, + fname="images.jpg", + names=None, + on_plot=None, + max_subplots=16, + save=True, + conf_thres=0.25, +): """Plot image grid with labels.""" if isinstance(images, torch.Tensor): images = images.cpu().float().numpy() @@ -389,21 +730,17 @@ def plot_images(images, batch_idx = batch_idx.cpu().numpy() max_size = 1920 # max image size - max_subplots = 16 # max image subplots, i.e. 4x4 bs, _, h, w = images.shape # batch size, _, height, width bs = min(bs, max_subplots) # limit plot images - ns = np.ceil(bs ** 0.5) # number of subplots (square) + ns = np.ceil(bs**0.5) # number of subplots (square) if np.max(images[0]) <= 1: images *= 255 # de-normalise (optional) # Build Image mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8) # init - for i, im in enumerate(images): - if i == max_subplots: # if last batch has fewer images than we expect - break + for i in range(bs): x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin - im = im.transpose(1, 2, 0) - mosaic[y:y + h, x:x + w, :] = im + mosaic[y : y + h, x : x + w, :] = images[i].transpose(1, 2, 0) # Resize (optional) scale = max_size / ns / max(h, w) @@ -415,40 +752,42 @@ def plot_images(images, # Annotate fs = int((h + w) * ns * 0.01) # font size annotator = Annotator(mosaic, line_width=round(fs / 10), font_size=fs, pil=True, example=names) - for i in range(i + 1): + for i in range(bs): x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin annotator.rectangle([x, y, x + w, y + h], None, (255, 255, 255), width=2) # borders if paths: annotator.text((x + 5, y + 5), text=Path(paths[i]).name[:40], txt_color=(220, 220, 220)) # filenames if len(cls) > 0: idx = batch_idx == i - classes = cls[idx].astype('int') + classes = cls[idx].astype("int") + labels = confs is None if len(bboxes): - boxes = ops.xywh2xyxy(bboxes[idx, :4]).T - labels = bboxes.shape[1] == 4 # labels if no conf column - conf = None if labels else bboxes[idx, 4] # check for confidence presence (label vs pred) - - if boxes.shape[1]: - if boxes.max() <= 1.01: # if normalized with tolerance 0.01 - boxes[[0, 2]] *= w # scale to pixels - boxes[[1, 3]] *= h + boxes = bboxes[idx] + conf = confs[idx] if confs is not None else None # check for confidence presence (label vs pred) + is_obb = boxes.shape[-1] == 5 # xywhr + boxes = ops.xywhr2xyxyxyxy(boxes) if is_obb else ops.xywh2xyxy(boxes) + if len(boxes): + if boxes[:, :4].max() <= 1.1: # if normalized with tolerance 0.1 + boxes[..., 0::2] *= w # scale to pixels + boxes[..., 1::2] *= h elif scale < 1: # absolute coords need scale if image scales - boxes *= scale - boxes[[0, 2]] += x - boxes[[1, 3]] += y - for j, box in enumerate(boxes.T.tolist()): + boxes[..., :4] *= scale + boxes[..., 0::2] += x + boxes[..., 1::2] += y + for j, box in enumerate(boxes.astype(np.int64).tolist()): c = classes[j] color = colors(c) c = names.get(c, c) if names else c - if labels or conf[j] > 0.25: # 0.25 conf thresh - label = f'{c}' if labels else f'{c} {conf[j]:.1f}' - annotator.box_label(box, label, color=color) + if labels or conf[j] > conf_thres: + label = f"{c}" if labels else f"{c} {conf[j]:.1f}" + annotator.box_label(box, label, color=color, rotated=is_obb) + elif len(classes): for c in classes: color = colors(c) c = names.get(c, c) if names else c - annotator.text((x, y), f'{c}', txt_color=color, box_style=True) + annotator.text((x, y), f"{c}", txt_color=color, box_style=True) # Plot keypoints if len(kpts): @@ -462,7 +801,7 @@ def plot_images(images, kpts_[..., 0] += x kpts_[..., 1] += y for j in range(len(kpts_)): - if labels or conf[j] > 0.25: # 0.25 conf thresh + if labels or conf[j] > conf_thres: annotator.kpts(kpts_[j]) # Plot masks @@ -477,8 +816,8 @@ def plot_images(images, image_masks = np.where(image_masks == index, 1.0, 0.0) im = np.asarray(annotator.im).copy() - for j, box in enumerate(boxes.T.tolist()): - if labels or conf[j] > 0.25: # 0.25 conf thresh + for j in range(len(image_masks)): + if labels or conf[j] > conf_thres: color = colors(classes[j]) mh, mw = image_masks[j].shape if mh != h or mw != w: @@ -488,27 +827,42 @@ def plot_images(images, else: mask = image_masks[j].astype(bool) with contextlib.suppress(Exception): - im[y:y + h, x:x + w, :][mask] = im[y:y + h, x:x + w, :][mask] * 0.4 + np.array(color) * 0.6 + im[y : y + h, x : x + w, :][mask] = ( + im[y : y + h, x : x + w, :][mask] * 0.4 + np.array(color) * 0.6 + ) annotator.fromarray(im) + if not save: + return np.asarray(annotator.im) annotator.im.save(fname) # save if on_plot: on_plot(fname) @plt_settings() -def plot_results(file='path/to/results.csv', dir='', segment=False, pose=False, classify=False, on_plot=None): +def plot_results(file="path/to/results.csv", dir="", segment=False, pose=False, classify=False, on_plot=None): """ - Plot training results from results CSV file. + Plot training results from a results CSV file. The function supports various types of data including segmentation, + pose estimation, and classification. Plots are saved as 'results.png' in the directory where the CSV is located. + + Args: + file (str, optional): Path to the CSV file containing the training results. Defaults to 'path/to/results.csv'. + dir (str, optional): Directory where the CSV file is located if 'file' is not provided. Defaults to ''. + segment (bool, optional): Flag to indicate if the data is for segmentation. Defaults to False. + pose (bool, optional): Flag to indicate if the data is for pose estimation. Defaults to False. + classify (bool, optional): Flag to indicate if the data is for classification. Defaults to False. + on_plot (callable, optional): Callback function to be executed after plotting. Takes filename as an argument. + Defaults to None. Example: ```python from ultralytics.utils.plotting import plot_results - plot_results('path/to/results.csv') + plot_results('path/to/results.csv', segment=True) ``` """ import pandas as pd from scipy.ndimage import gaussian_filter1d + save_dir = Path(file).parent if file else Path(dir) if classify: fig, ax = plt.subplots(2, 2, figsize=(6, 6), tight_layout=True) @@ -523,31 +877,121 @@ def plot_results(file='path/to/results.csv', dir='', segment=False, pose=False, fig, ax = plt.subplots(2, 5, figsize=(12, 6), tight_layout=True) index = [1, 2, 3, 4, 5, 8, 9, 10, 6, 7] ax = ax.ravel() - files = list(save_dir.glob('results*.csv')) - assert len(files), f'No results.csv files found in {save_dir.resolve()}, nothing to plot.' + files = list(save_dir.glob("results*.csv")) + assert len(files), f"No results.csv files found in {save_dir.resolve()}, nothing to plot." for f in files: try: data = pd.read_csv(f) s = [x.strip() for x in data.columns] x = data.values[:, 0] for i, j in enumerate(index): - y = data.values[:, j].astype('float') + y = data.values[:, j].astype("float") # y[y == 0] = np.nan # don't show zero values - ax[i].plot(x, y, marker='.', label=f.stem, linewidth=2, markersize=8) # actual results - ax[i].plot(x, gaussian_filter1d(y, sigma=3), ':', label='smooth', linewidth=2) # smoothing line + ax[i].plot(x, y, marker=".", label=f.stem, linewidth=2, markersize=8) # actual results + ax[i].plot(x, gaussian_filter1d(y, sigma=3), ":", label="smooth", linewidth=2) # smoothing line ax[i].set_title(s[j], fontsize=12) # if j in [8, 9, 10]: # share train and val loss y axes # ax[i].get_shared_y_axes().join(ax[i], ax[i - 5]) except Exception as e: - LOGGER.warning(f'WARNING: Plotting error for {f}: {e}') + LOGGER.warning(f"WARNING: Plotting error for {f}: {e}") ax[1].legend() - fname = save_dir / 'results.png' + fname = save_dir / "results.png" fig.savefig(fname, dpi=200) plt.close() if on_plot: on_plot(fname) +def plt_color_scatter(v, f, bins=20, cmap="viridis", alpha=0.8, edgecolors="none"): + """ + Plots a scatter plot with points colored based on a 2D histogram. + + Args: + v (array-like): Values for the x-axis. + f (array-like): Values for the y-axis. + bins (int, optional): Number of bins for the histogram. Defaults to 20. + cmap (str, optional): Colormap for the scatter plot. Defaults to 'viridis'. + alpha (float, optional): Alpha for the scatter plot. Defaults to 0.8. + edgecolors (str, optional): Edge colors for the scatter plot. Defaults to 'none'. + + Examples: + >>> v = np.random.rand(100) + >>> f = np.random.rand(100) + >>> plt_color_scatter(v, f) + """ + + # Calculate 2D histogram and corresponding colors + hist, xedges, yedges = np.histogram2d(v, f, bins=bins) + colors = [ + hist[ + min(np.digitize(v[i], xedges, right=True) - 1, hist.shape[0] - 1), + min(np.digitize(f[i], yedges, right=True) - 1, hist.shape[1] - 1), + ] + for i in range(len(v)) + ] + + # Scatter plot + plt.scatter(v, f, c=colors, cmap=cmap, alpha=alpha, edgecolors=edgecolors) + + +def plot_tune_results(csv_file="tune_results.csv"): + """ + Plot the evolution results stored in an 'tune_results.csv' file. The function generates a scatter plot for each key + in the CSV, color-coded based on fitness scores. The best-performing configurations are highlighted on the plots. + + Args: + csv_file (str, optional): Path to the CSV file containing the tuning results. Defaults to 'tune_results.csv'. + + Examples: + >>> plot_tune_results('path/to/tune_results.csv') + """ + + import pandas as pd + from scipy.ndimage import gaussian_filter1d + + # Scatter plots for each hyperparameter + csv_file = Path(csv_file) + data = pd.read_csv(csv_file) + num_metrics_columns = 1 + keys = [x.strip() for x in data.columns][num_metrics_columns:] + x = data.values + fitness = x[:, 0] # fitness + j = np.argmax(fitness) # max fitness index + n = math.ceil(len(keys) ** 0.5) # columns and rows in plot + plt.figure(figsize=(10, 10), tight_layout=True) + for i, k in enumerate(keys): + v = x[:, i + num_metrics_columns] + mu = v[j] # best single result + plt.subplot(n, n, i + 1) + plt_color_scatter(v, fitness, cmap="viridis", alpha=0.8, edgecolors="none") + plt.plot(mu, fitness.max(), "k+", markersize=15) + plt.title(f"{k} = {mu:.3g}", fontdict={"size": 9}) # limit to 40 characters + plt.tick_params(axis="both", labelsize=8) # Set axis label size to 8 + if i % n != 0: + plt.yticks([]) + + file = csv_file.with_name("tune_scatter_plots.png") # filename + plt.savefig(file, dpi=200) + plt.close() + LOGGER.info(f"Saved {file}") + + # Fitness vs iteration + x = range(1, len(fitness) + 1) + plt.figure(figsize=(10, 6), tight_layout=True) + plt.plot(x, fitness, marker="o", linestyle="none", label="fitness") + plt.plot(x, gaussian_filter1d(fitness, sigma=3), ":", label="smoothed", linewidth=2) # smoothing line + plt.title("Fitness vs Iteration") + plt.xlabel("Iteration") + plt.ylabel("Fitness") + plt.grid(True) + plt.legend() + + file = csv_file.with_name("tune_fitness.png") # filename + plt.savefig(file, dpi=200) + plt.close() + LOGGER.info(f"Saved {file}") + + def output_to_target(output, max_det=300): """Convert model output to target format [batch_id, class_id, x, y, w, h, conf] for plotting.""" targets = [] @@ -556,10 +1000,21 @@ def output_to_target(output, max_det=300): j = torch.full((conf.shape[0], 1), i) targets.append(torch.cat((j, cls, ops.xyxy2xywh(box), conf), 1)) targets = torch.cat(targets, 0).numpy() - return targets[:, 0], targets[:, 1], targets[:, 2:] + return targets[:, 0], targets[:, 1], targets[:, 2:-1], targets[:, -1] -def feature_visualization(x, module_type, stage, n=32, save_dir=Path('runs/detect/exp')): +def output_to_rotated_target(output, max_det=300): + """Convert model output to target format [batch_id, class_id, x, y, w, h, conf] for plotting.""" + targets = [] + for i, o in enumerate(output): + box, conf, cls, angle = o[:max_det].cpu().split((4, 1, 1, 1), 1) + j = torch.full((conf.shape[0], 1), i) + targets.append(torch.cat((j, cls, box, angle, conf), 1)) + targets = torch.cat(targets, 0).numpy() + return targets[:, 0], targets[:, 1], targets[:, 2:-1], targets[:, -1] + + +def feature_visualization(x, module_type, stage, n=32, save_dir=Path("runs/detect/exp")): """ Visualize feature maps of a given model module during inference. @@ -570,23 +1025,23 @@ def feature_visualization(x, module_type, stage, n=32, save_dir=Path('runs/detec n (int, optional): Maximum number of feature maps to plot. Defaults to 32. save_dir (Path, optional): Directory to save results. Defaults to Path('runs/detect/exp'). """ - for m in ['Detect', 'Pose', 'Segment']: + for m in ["Detect", "Pose", "Segment"]: if m in module_type: return - batch, channels, height, width = x.shape # batch, channels, height, width + _, channels, height, width = x.shape # batch, channels, height, width if height > 1 and width > 1: f = save_dir / f"stage{stage}_{module_type.split('.')[-1]}_features.png" # filename blocks = torch.chunk(x[0].cpu(), channels, dim=0) # select batch index 0, block by channels n = min(n, channels) # number of plots - fig, ax = plt.subplots(math.ceil(n / 8), 8, tight_layout=True) # 8 rows x n/8 cols + _, ax = plt.subplots(math.ceil(n / 8), 8, tight_layout=True) # 8 rows x n/8 cols ax = ax.ravel() plt.subplots_adjust(wspace=0.05, hspace=0.05) for i in range(n): ax[i].imshow(blocks[i].squeeze()) # cmap='gray' - ax[i].axis('off') + ax[i].axis("off") - LOGGER.info(f'Saving {f}... ({n}/{channels})') - plt.savefig(f, dpi=300, bbox_inches='tight') + LOGGER.info(f"Saving {f}... ({n}/{channels})") + plt.savefig(f, dpi=300, bbox_inches="tight") plt.close() - np.save(str(f.with_suffix('.npy')), x[0].cpu().numpy()) # npy save + np.save(str(f.with_suffix(".npy")), x[0].cpu().numpy()) # npy save diff --git a/ultralytics/utils/tal.py b/ultralytics/utils/tal.py index 432e7a7..b11c2b2 100644 --- a/ultralytics/utils/tal.py +++ b/ultralytics/utils/tal.py @@ -4,65 +4,18 @@ import torch import torch.nn as nn from .checks import check_version -from .metrics import bbox_iou +from .metrics import bbox_iou, probiou +from .ops import xywhr2xyxyxyxy -TORCH_1_10 = check_version(torch.__version__, '1.10.0') - - -def select_candidates_in_gts(xy_centers, gt_bboxes, eps=1e-9): - """ - Select the positive anchor center in gt. - - Args: - xy_centers (Tensor): shape(h*w, 2) - gt_bboxes (Tensor): shape(b, n_boxes, 4) - - Returns: - (Tensor): shape(b, n_boxes, h*w) - """ - n_anchors = xy_centers.shape[0] - bs, n_boxes, _ = gt_bboxes.shape - lt, rb = gt_bboxes.view(-1, 1, 4).chunk(2, 2) # left-top, right-bottom - bbox_deltas = torch.cat((xy_centers[None] - lt, rb - xy_centers[None]), dim=2).view(bs, n_boxes, n_anchors, -1) - # return (bbox_deltas.min(3)[0] > eps).to(gt_bboxes.dtype) - return bbox_deltas.amin(3).gt_(eps) - - -def select_highest_overlaps(mask_pos, overlaps, n_max_boxes): - """ - If an anchor box is assigned to multiple gts, the one with the highest IoI will be selected. - - Args: - mask_pos (Tensor): shape(b, n_max_boxes, h*w) - overlaps (Tensor): shape(b, n_max_boxes, h*w) - - Returns: - target_gt_idx (Tensor): shape(b, h*w) - fg_mask (Tensor): shape(b, h*w) - mask_pos (Tensor): shape(b, n_max_boxes, h*w) - """ - # (b, n_max_boxes, h*w) -> (b, h*w) - fg_mask = mask_pos.sum(-2) - if fg_mask.max() > 1: # one anchor is assigned to multiple gt_bboxes - mask_multi_gts = (fg_mask.unsqueeze(1) > 1).expand(-1, n_max_boxes, -1) # (b, n_max_boxes, h*w) - max_overlaps_idx = overlaps.argmax(1) # (b, h*w) - - is_max_overlaps = torch.zeros(mask_pos.shape, dtype=mask_pos.dtype, device=mask_pos.device) - is_max_overlaps.scatter_(1, max_overlaps_idx.unsqueeze(1), 1) - - mask_pos = torch.where(mask_multi_gts, is_max_overlaps, mask_pos).float() # (b, n_max_boxes, h*w) - fg_mask = mask_pos.sum(-2) - # Find each grid serve which gt(index) - target_gt_idx = mask_pos.argmax(-2) # (b, h*w) - return target_gt_idx, fg_mask, mask_pos +TORCH_1_10 = check_version(torch.__version__, "1.10.0") class TaskAlignedAssigner(nn.Module): """ A task-aligned assigner for object detection. - This class assigns ground-truth (gt) objects to anchors based on the task-aligned metric, - which combines both classification and localization information. + This class assigns ground-truth (gt) objects to anchors based on the task-aligned metric, which combines both + classification and localization information. Attributes: topk (int): The number of top candidates to consider. @@ -85,8 +38,8 @@ class TaskAlignedAssigner(nn.Module): @torch.no_grad() def forward(self, pd_scores, pd_bboxes, anc_points, gt_labels, gt_bboxes, mask_gt): """ - Compute the task-aligned assignment. - Reference https://github.com/Nioolek/PPYOLOE_pytorch/blob/master/ppyoloe/assigner/tal_assigner.py + Compute the task-aligned assignment. Reference code is available at + https://github.com/Nioolek/PPYOLOE_pytorch/blob/master/ppyoloe/assigner/tal_assigner.py. Args: pd_scores (Tensor): shape(bs, num_total_anchors, num_classes) @@ -103,19 +56,24 @@ class TaskAlignedAssigner(nn.Module): fg_mask (Tensor): shape(bs, num_total_anchors) target_gt_idx (Tensor): shape(bs, num_total_anchors) """ - self.bs = pd_scores.size(0) - self.n_max_boxes = gt_bboxes.size(1) + self.bs = pd_scores.shape[0] + self.n_max_boxes = gt_bboxes.shape[1] if self.n_max_boxes == 0: device = gt_bboxes.device - return (torch.full_like(pd_scores[..., 0], self.bg_idx).to(device), torch.zeros_like(pd_bboxes).to(device), - torch.zeros_like(pd_scores).to(device), torch.zeros_like(pd_scores[..., 0]).to(device), - torch.zeros_like(pd_scores[..., 0]).to(device)) + return ( + torch.full_like(pd_scores[..., 0], self.bg_idx).to(device), + torch.zeros_like(pd_bboxes).to(device), + torch.zeros_like(pd_scores).to(device), + torch.zeros_like(pd_scores[..., 0]).to(device), + torch.zeros_like(pd_scores[..., 0]).to(device), + ) - mask_pos, align_metric, overlaps = self.get_pos_mask(pd_scores, pd_bboxes, gt_labels, gt_bboxes, anc_points, - mask_gt) + mask_pos, align_metric, overlaps = self.get_pos_mask( + pd_scores, pd_bboxes, gt_labels, gt_bboxes, anc_points, mask_gt + ) - target_gt_idx, fg_mask, mask_pos = select_highest_overlaps(mask_pos, overlaps, self.n_max_boxes) + target_gt_idx, fg_mask, mask_pos = self.select_highest_overlaps(mask_pos, overlaps, self.n_max_boxes) # Assigned target target_labels, target_bboxes, target_scores = self.get_targets(gt_labels, gt_bboxes, target_gt_idx, fg_mask) @@ -131,7 +89,7 @@ class TaskAlignedAssigner(nn.Module): def get_pos_mask(self, pd_scores, pd_bboxes, gt_labels, gt_bboxes, anc_points, mask_gt): """Get in_gts mask, (b, max_num_obj, h*w).""" - mask_in_gts = select_candidates_in_gts(anc_points, gt_bboxes) + mask_in_gts = self.select_candidates_in_gts(anc_points, gt_bboxes) # Get anchor_align metric, (b, max_num_obj, h*w) align_metric, overlaps = self.get_box_metrics(pd_scores, pd_bboxes, gt_labels, gt_bboxes, mask_in_gts * mask_gt) # Get topk_metric mask, (b, max_num_obj, h*w) @@ -157,11 +115,15 @@ class TaskAlignedAssigner(nn.Module): # (b, max_num_obj, 1, 4), (b, 1, h*w, 4) pd_boxes = pd_bboxes.unsqueeze(1).expand(-1, self.n_max_boxes, -1, -1)[mask_gt] gt_boxes = gt_bboxes.unsqueeze(2).expand(-1, -1, na, -1)[mask_gt] - overlaps[mask_gt] = bbox_iou(gt_boxes, pd_boxes, xywh=False, CIoU=True).squeeze(-1).clamp_(0) + overlaps[mask_gt] = self.iou_calculation(gt_boxes, pd_boxes) align_metric = bbox_scores.pow(self.alpha) * overlaps.pow(self.beta) return align_metric, overlaps + def iou_calculation(self, gt_bboxes, pd_bboxes): + """IoU calculation for horizontal bounding boxes.""" + return bbox_iou(gt_bboxes, pd_bboxes, xywh=False, CIoU=True).squeeze(-1).clamp_(0) + def select_topk_candidates(self, metrics, largest=True, topk_mask=None): """ Select the top-k candidates based on the given metrics. @@ -191,9 +153,9 @@ class TaskAlignedAssigner(nn.Module): ones = torch.ones_like(topk_idxs[:, :, :1], dtype=torch.int8, device=topk_idxs.device) for k in range(self.topk): # Expand topk_idxs for each value of k and add 1 at the specified positions - count_tensor.scatter_add_(-1, topk_idxs[:, :, k:k + 1], ones) + count_tensor.scatter_add_(-1, topk_idxs[:, :, k : k + 1], ones) # count_tensor.scatter_add_(-1, topk_idxs, torch.ones_like(topk_idxs, dtype=torch.int8, device=topk_idxs.device)) - # filter invalid bboxes + # Filter invalid bboxes count_tensor.masked_fill_(count_tensor > 1, 0) return count_tensor.to(metrics.dtype) @@ -229,15 +191,17 @@ class TaskAlignedAssigner(nn.Module): target_labels = gt_labels.long().flatten()[target_gt_idx] # (b, h*w) # Assigned target boxes, (b, max_num_obj, 4) -> (b, h*w, 4) - target_bboxes = gt_bboxes.view(-1, 4)[target_gt_idx] + target_bboxes = gt_bboxes.view(-1, gt_bboxes.shape[-1])[target_gt_idx] # Assigned target scores target_labels.clamp_(0) # 10x faster than F.one_hot() - target_scores = torch.zeros((target_labels.shape[0], target_labels.shape[1], self.num_classes), - dtype=torch.int64, - device=target_labels.device) # (b, h*w, 80) + target_scores = torch.zeros( + (target_labels.shape[0], target_labels.shape[1], self.num_classes), + dtype=torch.int64, + device=target_labels.device, + ) # (b, h*w, 80) target_scores.scatter_(2, target_labels.unsqueeze(-1), 1) fg_scores_mask = fg_mask[:, :, None].repeat(1, 1, self.num_classes) # (b, h*w, 80) @@ -245,6 +209,87 @@ class TaskAlignedAssigner(nn.Module): return target_labels, target_bboxes, target_scores + @staticmethod + def select_candidates_in_gts(xy_centers, gt_bboxes, eps=1e-9): + """ + Select the positive anchor center in gt. + + Args: + xy_centers (Tensor): shape(h*w, 2) + gt_bboxes (Tensor): shape(b, n_boxes, 4) + + Returns: + (Tensor): shape(b, n_boxes, h*w) + """ + n_anchors = xy_centers.shape[0] + bs, n_boxes, _ = gt_bboxes.shape + lt, rb = gt_bboxes.view(-1, 1, 4).chunk(2, 2) # left-top, right-bottom + bbox_deltas = torch.cat((xy_centers[None] - lt, rb - xy_centers[None]), dim=2).view(bs, n_boxes, n_anchors, -1) + # return (bbox_deltas.min(3)[0] > eps).to(gt_bboxes.dtype) + return bbox_deltas.amin(3).gt_(eps) + + @staticmethod + def select_highest_overlaps(mask_pos, overlaps, n_max_boxes): + """ + If an anchor box is assigned to multiple gts, the one with the highest IoU will be selected. + + Args: + mask_pos (Tensor): shape(b, n_max_boxes, h*w) + overlaps (Tensor): shape(b, n_max_boxes, h*w) + + Returns: + target_gt_idx (Tensor): shape(b, h*w) + fg_mask (Tensor): shape(b, h*w) + mask_pos (Tensor): shape(b, n_max_boxes, h*w) + """ + # (b, n_max_boxes, h*w) -> (b, h*w) + fg_mask = mask_pos.sum(-2) + if fg_mask.max() > 1: # one anchor is assigned to multiple gt_bboxes + mask_multi_gts = (fg_mask.unsqueeze(1) > 1).expand(-1, n_max_boxes, -1) # (b, n_max_boxes, h*w) + max_overlaps_idx = overlaps.argmax(1) # (b, h*w) + + is_max_overlaps = torch.zeros(mask_pos.shape, dtype=mask_pos.dtype, device=mask_pos.device) + is_max_overlaps.scatter_(1, max_overlaps_idx.unsqueeze(1), 1) + + mask_pos = torch.where(mask_multi_gts, is_max_overlaps, mask_pos).float() # (b, n_max_boxes, h*w) + fg_mask = mask_pos.sum(-2) + # Find each grid serve which gt(index) + target_gt_idx = mask_pos.argmax(-2) # (b, h*w) + return target_gt_idx, fg_mask, mask_pos + + +class RotatedTaskAlignedAssigner(TaskAlignedAssigner): + def iou_calculation(self, gt_bboxes, pd_bboxes): + """IoU calculation for rotated bounding boxes.""" + return probiou(gt_bboxes, pd_bboxes).squeeze(-1).clamp_(0) + + @staticmethod + def select_candidates_in_gts(xy_centers, gt_bboxes): + """ + Select the positive anchor center in gt for rotated bounding boxes. + + Args: + xy_centers (Tensor): shape(h*w, 2) + gt_bboxes (Tensor): shape(b, n_boxes, 5) + + Returns: + (Tensor): shape(b, n_boxes, h*w) + """ + # (b, n_boxes, 5) --> (b, n_boxes, 4, 2) + corners = xywhr2xyxyxyxy(gt_bboxes) + # (b, n_boxes, 1, 2) + a, b, _, d = corners.split(1, dim=-2) + ab = b - a + ad = d - a + + # (b, n_boxes, h*w, 2) + ap = xy_centers - a + norm_ab = (ab * ab).sum(dim=-1) + norm_ad = (ad * ad).sum(dim=-1) + ap_dot_ab = (ap * ab).sum(dim=-1) + ap_dot_ad = (ap * ad).sum(dim=-1) + return (ap_dot_ab >= 0) & (ap_dot_ab <= norm_ab) & (ap_dot_ad >= 0) & (ap_dot_ad <= norm_ad) # is_in_box + def make_anchors(feats, strides, grid_cell_offset=0.5): """Generate anchors from features.""" @@ -255,7 +300,7 @@ def make_anchors(feats, strides, grid_cell_offset=0.5): _, _, h, w = feats[i].shape sx = torch.arange(end=w, device=device, dtype=dtype) + grid_cell_offset # shift x sy = torch.arange(end=h, device=device, dtype=dtype) + grid_cell_offset # shift y - sy, sx = torch.meshgrid(sy, sx, indexing='ij') if TORCH_1_10 else torch.meshgrid(sy, sx) + sy, sx = torch.meshgrid(sy, sx, indexing="ij") if TORCH_1_10 else torch.meshgrid(sy, sx) anchor_points.append(torch.stack((sx, sy), -1).view(-1, 2)) stride_tensor.append(torch.full((h * w, 1), stride, dtype=dtype, device=device)) return torch.cat(anchor_points), torch.cat(stride_tensor) @@ -263,7 +308,8 @@ def make_anchors(feats, strides, grid_cell_offset=0.5): def dist2bbox(distance, anchor_points, xywh=True, dim=-1): """Transform distance(ltrb) to box(xywh or xyxy).""" - lt, rb = distance.chunk(2, dim) + assert(distance.shape[dim] == 4) + lt, rb = distance.split([2, 2], dim) x1y1 = anchor_points - lt x2y2 = anchor_points + rb if xywh: @@ -277,3 +323,23 @@ def bbox2dist(anchor_points, bbox, reg_max): """Transform bbox(xyxy) to dist(ltrb).""" x1y1, x2y2 = bbox.chunk(2, -1) return torch.cat((anchor_points - x1y1, x2y2 - anchor_points), -1).clamp_(0, reg_max - 0.01) # dist (lt, rb) + + +def dist2rbox(pred_dist, pred_angle, anchor_points, dim=-1): + """ + Decode predicted object bounding box coordinates from anchor points and distribution. + + Args: + pred_dist (torch.Tensor): Predicted rotated distance, (bs, h*w, 4). + pred_angle (torch.Tensor): Predicted angle, (bs, h*w, 1). + anchor_points (torch.Tensor): Anchor points, (h*w, 2). + Returns: + (torch.Tensor): Predicted rotated bounding boxes, (bs, h*w, 4). + """ + lt, rb = pred_dist.split(2, dim=dim) + cos, sin = torch.cos(pred_angle), torch.sin(pred_angle) + # (bs, h*w, 1) + xf, yf = ((rb - lt) / 2).split(1, dim=dim) + x, y = xf * cos - yf * sin, xf * sin + yf * cos + xy = torch.cat([x, y], dim=dim) + anchor_points + return torch.cat([xy, lt + rb], dim=dim) diff --git a/ultralytics/utils/torch_utils.py b/ultralytics/utils/torch_utils.py index def7442..d476e1f 100644 --- a/ultralytics/utils/torch_utils.py +++ b/ultralytics/utils/torch_utils.py @@ -2,7 +2,6 @@ import math import os -import platform import random import time from contextlib import contextmanager @@ -15,17 +14,23 @@ import torch import torch.distributed as dist import torch.nn as nn import torch.nn.functional as F +import torchvision -from ultralytics.utils import DEFAULT_CFG_DICT, DEFAULT_CFG_KEYS, LOGGER, RANK, __version__ -from ultralytics.utils.checks import check_version +from ultralytics.utils import DEFAULT_CFG_DICT, DEFAULT_CFG_KEYS, LOGGER, __version__ +from ultralytics.utils.checks import PYTHON_VERSION, check_version try: import thop except ImportError: thop = None -TORCH_1_9 = check_version(torch.__version__, '1.9.0') -TORCH_2_0 = check_version(torch.__version__, '2.0.0') +# Version checks (all default to version>=min_version) +TORCH_1_9 = check_version(torch.__version__, "1.9.0") +TORCH_1_13 = check_version(torch.__version__, "1.13.0") +TORCH_2_0 = check_version(torch.__version__, "2.0.0") +TORCHVISION_0_10 = check_version(torchvision.__version__, "0.10.0") +TORCHVISION_0_11 = check_version(torchvision.__version__, "0.11.0") +TORCHVISION_0_13 = check_version(torchvision.__version__, "0.13.0") @contextmanager @@ -44,7 +49,10 @@ def smart_inference_mode(): def decorate(fn): """Applies appropriate torch decorator for inference mode based on torch version.""" - return (torch.inference_mode if TORCH_1_9 else torch.no_grad)()(fn) + if TORCH_1_9 and torch.is_inference_mode_enabled(): + return fn # already in inference_mode, act as a pass-through + else: + return (torch.inference_mode if TORCH_1_9 else torch.no_grad)()(fn) return decorate @@ -53,59 +61,102 @@ def get_cpu_info(): """Return a string with system CPU information, i.e. 'Apple M2'.""" import cpuinfo # pip install py-cpuinfo - k = 'brand_raw', 'hardware_raw', 'arch_string_raw' # info keys sorted by preference (not all keys always available) + k = "brand_raw", "hardware_raw", "arch_string_raw" # info keys sorted by preference (not all keys always available) info = cpuinfo.get_cpu_info() # info dict - string = info.get(k[0] if k[0] in info else k[1] if k[1] in info else k[2], 'unknown') - return string.replace('(R)', '').replace('CPU ', '').replace('@ ', '') + string = info.get(k[0] if k[0] in info else k[1] if k[1] in info else k[2], "unknown") + return string.replace("(R)", "").replace("CPU ", "").replace("@ ", "") -def select_device(device='', batch=0, newline=False, verbose=True): - """Selects PyTorch Device. Options are device = None or 'cpu' or 0 or '0' or '0,1,2,3'.""" - s = f'Ultralytics YOLOv{__version__} 🚀 Python-{platform.python_version()} torch-{torch.__version__} ' +def select_device(device="", batch=0, newline=False, verbose=True): + """ + Selects the appropriate PyTorch device based on the provided arguments. + + The function takes a string specifying the device or a torch.device object and returns a torch.device object + representing the selected device. The function also validates the number of available devices and raises an + exception if the requested device(s) are not available. + + Args: + device (str | torch.device, optional): Device string or torch.device object. + Options are 'None', 'cpu', or 'cuda', or '0' or '0,1,2,3'. Defaults to an empty string, which auto-selects + the first available GPU, or CPU if no GPU is available. + batch (int, optional): Batch size being used in your model. Defaults to 0. + newline (bool, optional): If True, adds a newline at the end of the log string. Defaults to False. + verbose (bool, optional): If True, logs the device information. Defaults to True. + + Returns: + (torch.device): Selected device. + + Raises: + ValueError: If the specified device is not available or if the batch size is not a multiple of the number of + devices when using multiple GPUs. + + Examples: + >>> select_device('cuda:0') + device(type='cuda', index=0) + + >>> select_device('cpu') + device(type='cpu') + + Note: + Sets the 'CUDA_VISIBLE_DEVICES' environment variable for specifying which GPUs to use. + """ + + if isinstance(device, torch.device): + return device + + s = f"Ultralytics YOLOv{__version__} 🚀 Python-{PYTHON_VERSION} torch-{torch.__version__} " device = str(device).lower() - for remove in 'cuda:', 'none', '(', ')', '[', ']', "'", ' ': - device = device.replace(remove, '') # to string, 'cuda:0' -> '0' and '(0, 1)' -> '0,1' - cpu = device == 'cpu' - mps = device == 'mps' # Apple Metal Performance Shaders (MPS) + for remove in "cuda:", "none", "(", ")", "[", "]", "'", " ": + device = device.replace(remove, "") # to string, 'cuda:0' -> '0' and '(0, 1)' -> '0,1' + cpu = device == "cpu" + mps = device in ("mps", "mps:0") # Apple Metal Performance Shaders (MPS) if cpu or mps: - os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False + os.environ["CUDA_VISIBLE_DEVICES"] = "-1" # force torch.cuda.is_available() = False elif device: # non-cpu device requested - if device == 'cuda': - device = '0' - visible = os.environ.get('CUDA_VISIBLE_DEVICES', None) - os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable - must be before assert is_available() - if not (torch.cuda.is_available() and torch.cuda.device_count() >= len(device.replace(',', ''))): + if device == "cuda": + device = "0" + visible = os.environ.get("CUDA_VISIBLE_DEVICES", None) + os.environ["CUDA_VISIBLE_DEVICES"] = device # set environment variable - must be before assert is_available() + if not (torch.cuda.is_available() and torch.cuda.device_count() >= len(device.split(","))): LOGGER.info(s) - install = 'See https://pytorch.org/get-started/locally/ for up-to-date torch install instructions if no ' \ - 'CUDA devices are seen by torch.\n' if torch.cuda.device_count() == 0 else '' - raise ValueError(f"Invalid CUDA 'device={device}' requested." - f" Use 'device=cpu' or pass valid CUDA device(s) if available," - f" i.e. 'device=0' or 'device=0,1,2,3' for Multi-GPU.\n" - f'\ntorch.cuda.is_available(): {torch.cuda.is_available()}' - f'\ntorch.cuda.device_count(): {torch.cuda.device_count()}' - f"\nos.environ['CUDA_VISIBLE_DEVICES']: {visible}\n" - f'{install}') + install = ( + "See https://pytorch.org/get-started/locally/ for up-to-date torch install instructions if no " + "CUDA devices are seen by torch.\n" + if torch.cuda.device_count() == 0 + else "" + ) + raise ValueError( + f"Invalid CUDA 'device={device}' requested." + f" Use 'device=cpu' or pass valid CUDA device(s) if available," + f" i.e. 'device=0' or 'device=0,1,2,3' for Multi-GPU.\n" + f"\ntorch.cuda.is_available(): {torch.cuda.is_available()}" + f"\ntorch.cuda.device_count(): {torch.cuda.device_count()}" + f"\nos.environ['CUDA_VISIBLE_DEVICES']: {visible}\n" + f"{install}" + ) if not cpu and not mps and torch.cuda.is_available(): # prefer GPU if available - devices = device.split(',') if device else '0' # range(torch.cuda.device_count()) # i.e. 0,1,6,7 + devices = device.split(",") if device else "0" # range(torch.cuda.device_count()) # i.e. 0,1,6,7 n = len(devices) # device count if n > 1 and batch > 0 and batch % n != 0: # check batch_size is divisible by device_count - raise ValueError(f"'batch={batch}' must be a multiple of GPU count {n}. Try 'batch={batch // n * n}' or " - f"'batch={batch // n * n + n}', the nearest batch sizes evenly divisible by {n}.") - space = ' ' * (len(s) + 1) + raise ValueError( + f"'batch={batch}' must be a multiple of GPU count {n}. Try 'batch={batch // n * n}' or " + f"'batch={batch // n * n + n}', the nearest batch sizes evenly divisible by {n}." + ) + space = " " * (len(s) + 1) for i, d in enumerate(devices): p = torch.cuda.get_device_properties(i) s += f"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / (1 << 20):.0f}MiB)\n" # bytes to MB - arg = 'cuda:0' - elif mps and getattr(torch, 'has_mps', False) and torch.backends.mps.is_available() and TORCH_2_0: + arg = "cuda:0" + elif mps and TORCH_2_0 and torch.backends.mps.is_available(): # Prefer MPS if available - s += f'MPS ({get_cpu_info()})\n' - arg = 'mps' + s += f"MPS ({get_cpu_info()})\n" + arg = "mps" else: # revert to CPU - s += f'CPU ({get_cpu_info()})\n' - arg = 'cpu' + s += f"CPU ({get_cpu_info()})\n" + arg = "cpu" - if verbose and RANK == -1: + if verbose: LOGGER.info(s if newline else s.rstrip()) return torch.device(arg) @@ -119,14 +170,20 @@ def time_sync(): def fuse_conv_and_bn(conv, bn): """Fuse Conv2d() and BatchNorm2d() layers https://tehnokv.com/posts/fusing-batchnorm-and-conv/.""" - fusedconv = nn.Conv2d(conv.in_channels, - conv.out_channels, - kernel_size=conv.kernel_size, - stride=conv.stride, - padding=conv.padding, - dilation=conv.dilation, - groups=conv.groups, - bias=True).requires_grad_(False).to(conv.weight.device) + fusedconv = ( + nn.Conv2d( + conv.in_channels, + conv.out_channels, + kernel_size=conv.kernel_size, + stride=conv.stride, + padding=conv.padding, + dilation=conv.dilation, + groups=conv.groups, + bias=True, + ) + .requires_grad_(False) + .to(conv.weight.device) + ) # Prepare filters w_conv = conv.weight.clone().view(conv.out_channels, -1) @@ -134,7 +191,7 @@ def fuse_conv_and_bn(conv, bn): fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.shape)) # Prepare spatial bias - b_conv = torch.zeros(conv.weight.size(0), device=conv.weight.device) if conv.bias is None else conv.bias + b_conv = torch.zeros(conv.weight.shape[0], device=conv.weight.device) if conv.bias is None else conv.bias b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps)) fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn) @@ -143,15 +200,21 @@ def fuse_conv_and_bn(conv, bn): def fuse_deconv_and_bn(deconv, bn): """Fuse ConvTranspose2d() and BatchNorm2d() layers.""" - fuseddconv = nn.ConvTranspose2d(deconv.in_channels, - deconv.out_channels, - kernel_size=deconv.kernel_size, - stride=deconv.stride, - padding=deconv.padding, - output_padding=deconv.output_padding, - dilation=deconv.dilation, - groups=deconv.groups, - bias=True).requires_grad_(False).to(deconv.weight.device) + fuseddconv = ( + nn.ConvTranspose2d( + deconv.in_channels, + deconv.out_channels, + kernel_size=deconv.kernel_size, + stride=deconv.stride, + padding=deconv.padding, + output_padding=deconv.output_padding, + dilation=deconv.dilation, + groups=deconv.groups, + bias=True, + ) + .requires_grad_(False) + .to(deconv.weight.device) + ) # Prepare filters w_deconv = deconv.weight.clone().view(deconv.out_channels, -1) @@ -159,7 +222,7 @@ def fuse_deconv_and_bn(deconv, bn): fuseddconv.weight.copy_(torch.mm(w_bn, w_deconv).view(fuseddconv.weight.shape)) # Prepare spatial bias - b_conv = torch.zeros(deconv.weight.size(1), device=deconv.weight.device) if deconv.bias is None else deconv.bias + b_conv = torch.zeros(deconv.weight.shape[1], device=deconv.weight.device) if deconv.bias is None else deconv.bias b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps)) fuseddconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn) @@ -167,7 +230,11 @@ def fuse_deconv_and_bn(deconv, bn): def model_info(model, detailed=False, verbose=True, imgsz=640): - """Model information. imgsz may be int or list, i.e. imgsz=640 or imgsz=[640, 320].""" + """ + Model information. + + imgsz may be int or list, i.e. imgsz=640 or imgsz=[640, 320]. + """ if not verbose: return n_p = get_num_params(model) # number of parameters @@ -175,18 +242,21 @@ def model_info(model, detailed=False, verbose=True, imgsz=640): n_l = len(list(model.modules())) # number of layers if detailed: LOGGER.info( - f"{'layer':>5} {'name':>40} {'gradient':>9} {'parameters':>12} {'shape':>20} {'mu':>10} {'sigma':>10}") + f"{'layer':>5} {'name':>40} {'gradient':>9} {'parameters':>12} {'shape':>20} {'mu':>10} {'sigma':>10}" + ) for i, (name, p) in enumerate(model.named_parameters()): - name = name.replace('module_list.', '') - LOGGER.info('%5g %40s %9s %12g %20s %10.3g %10.3g %10s' % - (i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std(), p.dtype)) + name = name.replace("module_list.", "") + LOGGER.info( + "%5g %40s %9s %12g %20s %10.3g %10.3g %10s" + % (i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std(), p.dtype) + ) flops = get_flops(model, imgsz) - fused = ' (fused)' if getattr(model, 'is_fused', lambda: False)() else '' - fs = f', {flops:.1f} GFLOPs' if flops else '' - yaml_file = getattr(model, 'yaml_file', '') or getattr(model, 'yaml', {}).get('yaml_file', '') - model_name = Path(yaml_file).stem.replace('yolo', 'YOLO') or 'Model' - LOGGER.info(f'{model_name} summary{fused}: {n_l} layers, {n_p} parameters, {n_g} gradients{fs}') + fused = " (fused)" if getattr(model, "is_fused", lambda: False)() else "" + fs = f", {flops:.1f} GFLOPs" if flops else "" + yaml_file = getattr(model, "yaml_file", "") or getattr(model, "yaml", {}).get("yaml_file", "") + model_name = Path(yaml_file).stem.replace("yolo", "YOLO") or "Model" + LOGGER.info(f"{model_name} summary{fused}: {n_l} layers, {n_p} parameters, {n_g} gradients{fs}") return n_l, n_p, n_g, flops @@ -204,37 +274,53 @@ def model_info_for_loggers(trainer): """ Return model info dict with useful model information. - Example for YOLOv8n: - {'model/parameters': 3151904, - 'model/GFLOPs': 8.746, - 'model/speed_ONNX(ms)': 41.244, - 'model/speed_TensorRT(ms)': 3.211, - 'model/speed_PyTorch(ms)': 18.755} + Example: + YOLOv8n info for loggers + ```python + results = {'model/parameters': 3151904, + 'model/GFLOPs': 8.746, + 'model/speed_ONNX(ms)': 41.244, + 'model/speed_TensorRT(ms)': 3.211, + 'model/speed_PyTorch(ms)': 18.755} + ``` """ if trainer.args.profile: # profile ONNX and TensorRT times from ultralytics.utils.benchmarks import ProfileModels + results = ProfileModels([trainer.last], device=trainer.device).profile()[0] - results.pop('model/name') + results.pop("model/name") else: # only return PyTorch times from most recent validation results = { - 'model/parameters': get_num_params(trainer.model), - 'model/GFLOPs': round(get_flops(trainer.model), 3)} - results['model/speed_PyTorch(ms)'] = round(trainer.validator.speed['inference'], 3) + "model/parameters": get_num_params(trainer.model), + "model/GFLOPs": round(get_flops(trainer.model), 3), + } + results["model/speed_PyTorch(ms)"] = round(trainer.validator.speed["inference"], 3) return results def get_flops(model, imgsz=640): """Return a YOLO model's FLOPs.""" + if not thop: + return 0.0 # if not installed return 0.0 GFLOPs + try: model = de_parallel(model) p = next(model.parameters()) - stride = max(int(model.stride.max()), 32) if hasattr(model, 'stride') else 32 # max stride - im = torch.empty((1, p.shape[1], stride, stride), device=p.device) # input image in BCHW format - flops = thop.profile(deepcopy(model), inputs=[im], verbose=False)[0] / 1E9 * 2 if thop else 0 # stride GFLOPs - imgsz = imgsz if isinstance(imgsz, list) else [imgsz, imgsz] # expand if int/float - return flops * imgsz[0] / stride * imgsz[1] / stride # 640x640 GFLOPs + if not isinstance(imgsz, list): + imgsz = [imgsz, imgsz] # expand if int/float + try: + # Use stride size for input tensor + # stride = max(int(model.stride.max()), 32) if hasattr(model, "stride") else 32 # max stride + # im = torch.empty((1, p.shape[1], stride, stride), device=p.device) # input image in BCHW format + # flops = thop.profile(deepcopy(model), inputs=[im], verbose=False)[0] / 1e9 * 2 # stride GFLOPs + # return flops * imgsz[0] / stride * imgsz[1] / stride # imgsz GFLOPs + raise Exception + except Exception: + # Use actual image size for input tensor (i.e. required for RTDETR models) + im = torch.empty((1, p.shape[1], *imgsz), device=p.device) # input image in BCHW format + return thop.profile(deepcopy(model), inputs=[im], verbose=False)[0] / 1e9 * 2 # imgsz GFLOPs except Exception: - return 0 + return 0.0 def get_flops_with_torch_profiler(model, imgsz=640): @@ -242,11 +328,11 @@ def get_flops_with_torch_profiler(model, imgsz=640): if TORCH_2_0: model = de_parallel(model) p = next(model.parameters()) - stride = (max(int(model.stride.max()), 32) if hasattr(model, 'stride') else 32) * 2 # max stride + stride = (max(int(model.stride.max()), 32) if hasattr(model, "stride") else 32) * 2 # max stride im = torch.zeros((1, p.shape[1], stride, stride), device=p.device) # input image in BCHW format with torch.profiler.profile(with_flops=True) as prof: model(im) - flops = sum(x.flops for x in prof.key_averages()) / 1E9 + flops = sum(x.flops for x in prof.key_averages()) / 1e9 imgsz = imgsz if isinstance(imgsz, list) else [imgsz, imgsz] # expand if int/float flops = flops * imgsz[0] / stride * imgsz[1] / stride # 640x640 GFLOPs return flops @@ -266,13 +352,15 @@ def initialize_weights(model): m.inplace = True -def scale_img(img, ratio=1.0, same_shape=False, gs=32): # img(16,3,256,416) - # Scales img(bs,3,y,x) by ratio constrained to gs-multiple +def scale_img(img, ratio=1.0, same_shape=False, gs=32): + """Scales and pads an image tensor of shape img(bs,3,y,x) based on given ratio and grid size gs, optionally + retaining the original shape. + """ if ratio == 1.0: return img h, w = img.shape[2:] s = (int(h * ratio), int(w * ratio)) # new size - img = F.interpolate(img, size=s, mode='bilinear', align_corners=False) # resize + img = F.interpolate(img, size=s, mode="bilinear", align_corners=False) # resize if not same_shape: # pad/crop img h, w = (math.ceil(x * ratio / gs) * gs for x in (h, w)) return F.pad(img, [0, w - s[1], 0, h - s[0]], value=0.447) # value = imagenet mean @@ -288,7 +376,7 @@ def make_divisible(x, divisor): def copy_attr(a, b, include=(), exclude=()): """Copies attributes from object 'b' to object 'a', with options to include/exclude certain attributes.""" for k, v in b.__dict__.items(): - if (len(include) and k not in include) or k.startswith('_') or k in exclude: + if (len(include) and k not in include) or k.startswith("_") or k in exclude: continue else: setattr(a, k, v) @@ -296,7 +384,7 @@ def copy_attr(a, b, include=(), exclude=()): def get_latest_opset(): """Return second-most (for maturity) recently supported ONNX opset by this version of torch.""" - return max(int(k[14:]) for k in vars(torch.onnx) if 'symbolic_opset' in k) - 1 # opset + return max(int(k[14:]) for k in vars(torch.onnx) if "symbolic_opset" in k) - 1 # opset def intersect_dicts(da, db, exclude=()): @@ -316,7 +404,7 @@ def de_parallel(model): def one_cycle(y1=0.0, y2=1.0, steps=100): """Returns a lambda function for sinusoidal ramp from y1 to y2 https://arxiv.org/pdf/1812.01187.pdf.""" - return lambda x: ((1 - math.cos(x * math.pi / steps)) / 2) * (y2 - y1) + y1 + return lambda x: max((1 - math.cos(x * math.pi / steps)) / 2, 0) * (y2 - y1) + y1 def init_seeds(seed=0, deterministic=False): @@ -331,10 +419,10 @@ def init_seeds(seed=0, deterministic=False): if TORCH_2_0: torch.use_deterministic_algorithms(True, warn_only=True) # warn if deterministic is not possible torch.backends.cudnn.deterministic = True - os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':4096:8' - os.environ['PYTHONHASHSEED'] = str(seed) + os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":4096:8" + os.environ["PYTHONHASHSEED"] = str(seed) else: - LOGGER.warning('WARNING ⚠️ Upgrade to torch>=2.0.0 for deterministic training.') + LOGGER.warning("WARNING ⚠️ Upgrade to torch>=2.0.0 for deterministic training.") else: torch.use_deterministic_algorithms(False) torch.backends.cudnn.deterministic = False @@ -369,13 +457,13 @@ class ModelEMA: v += (1 - d) * msd[k].detach() # assert v.dtype == msd[k].dtype == torch.float32, f'{k}: EMA {v.dtype}, model {msd[k].dtype}' - def update_attr(self, model, include=(), exclude=('process_group', 'reducer')): + def update_attr(self, model, include=(), exclude=("process_group", "reducer")): """Updates attributes and saves stripped model with optimizer removed.""" if self.enabled: copy_attr(self.ema, model, include, exclude) -def strip_optimizer(f: Union[str, Path] = 'best.pt', s: str = '') -> None: +def strip_optimizer(f: Union[str, Path] = "best.pt", s: str = "") -> None: """ Strip optimizer from 'f' to finalize training, optionally save as 's'. @@ -395,32 +483,26 @@ def strip_optimizer(f: Union[str, Path] = 'best.pt', s: str = '') -> None: strip_optimizer(f) ``` """ - # Use dill (if exists) to serialize the lambda functions where pickle does not do this - try: - import dill as pickle - except ImportError: - import pickle - - x = torch.load(f, map_location=torch.device('cpu')) - if 'model' not in x: - LOGGER.info(f'Skipping {f}, not a valid Ultralytics model.') + x = torch.load(f, map_location=torch.device("cpu")) + if "model" not in x: + LOGGER.info(f"Skipping {f}, not a valid Ultralytics model.") return - if hasattr(x['model'], 'args'): - x['model'].args = dict(x['model'].args) # convert from IterableSimpleNamespace to dict - args = {**DEFAULT_CFG_DICT, **x['train_args']} if 'train_args' in x else None # combine args - if x.get('ema'): - x['model'] = x['ema'] # replace model with ema - for k in 'optimizer', 'best_fitness', 'ema', 'updates': # keys + if hasattr(x["model"], "args"): + x["model"].args = dict(x["model"].args) # convert from IterableSimpleNamespace to dict + args = {**DEFAULT_CFG_DICT, **x["train_args"]} if "train_args" in x else None # combine args + if x.get("ema"): + x["model"] = x["ema"] # replace model with ema + for k in "optimizer", "best_fitness", "ema", "updates": # keys x[k] = None - x['epoch'] = -1 - x['model'].half() # to FP16 - for p in x['model'].parameters(): + x["epoch"] = -1 + x["model"].half() # to FP16 + for p in x["model"].parameters(): p.requires_grad = False - x['train_args'] = {k: v for k, v in args.items() if k in DEFAULT_CFG_KEYS} # strip non-default keys + x["train_args"] = {k: v for k, v in args.items() if k in DEFAULT_CFG_KEYS} # strip non-default keys # x['model'].args = x['train_args'] - torch.save(x, s or f, pickle_module=pickle) - mb = os.path.getsize(s or f) / 1E6 # filesize + torch.save(x, s or f) + mb = os.path.getsize(s or f) / 1e6 # file size LOGGER.info(f"Optimizer stripped from {f},{f' saved as {s},' if s else ''} {mb:.1f}MB") @@ -441,18 +523,20 @@ def profile(input, ops, n=10, device=None): results = [] if not isinstance(device, torch.device): device = select_device(device) - LOGGER.info(f"{'Params':>12s}{'GFLOPs':>12s}{'GPU_mem (GB)':>14s}{'forward (ms)':>14s}{'backward (ms)':>14s}" - f"{'input':>24s}{'output':>24s}") + LOGGER.info( + f"{'Params':>12s}{'GFLOPs':>12s}{'GPU_mem (GB)':>14s}{'forward (ms)':>14s}{'backward (ms)':>14s}" + f"{'input':>24s}{'output':>24s}" + ) for x in input if isinstance(input, list) else [input]: x = x.to(device) x.requires_grad = True for m in ops if isinstance(ops, list) else [ops]: - m = m.to(device) if hasattr(m, 'to') else m # device - m = m.half() if hasattr(m, 'half') and isinstance(x, torch.Tensor) and x.dtype is torch.float16 else m + m = m.to(device) if hasattr(m, "to") else m # device + m = m.half() if hasattr(m, "half") and isinstance(x, torch.Tensor) and x.dtype is torch.float16 else m tf, tb, t = 0, 0, [0, 0, 0] # dt forward, backward try: - flops = thop.profile(m, inputs=[x], verbose=False)[0] / 1E9 * 2 if thop else 0 # GFLOPs + flops = thop.profile(m, inputs=[x], verbose=False)[0] / 1e9 * 2 if thop else 0 # GFLOPs except Exception: flops = 0 @@ -466,13 +550,13 @@ def profile(input, ops, n=10, device=None): t[2] = time_sync() except Exception: # no backward method # print(e) # for debug - t[2] = float('nan') + t[2] = float("nan") tf += (t[1] - t[0]) * 1000 / n # ms per op forward tb += (t[2] - t[1]) * 1000 / n # ms per op backward - mem = torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0 # (GB) - s_in, s_out = (tuple(x.shape) if isinstance(x, torch.Tensor) else 'list' for x in (x, y)) # shapes + mem = torch.cuda.memory_reserved() / 1e9 if torch.cuda.is_available() else 0 # (GB) + s_in, s_out = (tuple(x.shape) if isinstance(x, torch.Tensor) else "list" for x in (x, y)) # shapes p = sum(x.numel() for x in m.parameters()) if isinstance(m, nn.Module) else 0 # parameters - LOGGER.info(f'{p:12}{flops:12.4g}{mem:>14.3f}{tf:14.4g}{tb:14.4g}{str(s_in):>24s}{str(s_out):>24s}') + LOGGER.info(f"{p:12}{flops:12.4g}{mem:>14.3f}{tf:14.4g}{tb:14.4g}{str(s_in):>24s}{str(s_out):>24s}") results.append([p, flops, mem, tf, tb, s_in, s_out]) except Exception as e: LOGGER.info(e) @@ -482,25 +566,23 @@ def profile(input, ops, n=10, device=None): class EarlyStopping: - """ - Early stopping class that stops training when a specified number of epochs have passed without improvement. - """ + """Early stopping class that stops training when a specified number of epochs have passed without improvement.""" def __init__(self, patience=50): """ - Initialize early stopping object + Initialize early stopping object. Args: patience (int, optional): Number of epochs to wait after fitness stops improving before stopping. """ self.best_fitness = 0.0 # i.e. mAP self.best_epoch = 0 - self.patience = patience or float('inf') # epochs to wait after fitness stops improving to stop + self.patience = patience or float("inf") # epochs to wait after fitness stops improving to stop self.possible_stop = False # possible stop may occur next epoch def __call__(self, epoch, fitness): """ - Check whether to stop training + Check whether to stop training. Args: epoch (int): Current epoch of training @@ -519,8 +601,10 @@ class EarlyStopping: self.possible_stop = delta >= (self.patience - 1) # possible stop may occur next epoch stop = delta >= self.patience # stop training if patience exceeded if stop: - LOGGER.info(f'Stopping training early as no improvement observed in last {self.patience} epochs. ' - f'Best results observed at epoch {self.best_epoch}, best model saved as best.pt.\n' - f'To update EarlyStopping(patience={self.patience}) pass a new patience value, ' - f'i.e. `patience=300` or use `patience=0` to disable EarlyStopping.') + LOGGER.info( + f"Stopping training early as no improvement observed in last {self.patience} epochs. " + f"Best results observed at epoch {self.best_epoch}, best model saved as best.pt.\n" + f"To update EarlyStopping(patience={self.patience}) pass a new patience value, " + f"i.e. `patience=300` or use `patience=0` to disable EarlyStopping." + ) return stop diff --git a/ultralytics/utils/triton.py b/ultralytics/utils/triton.py new file mode 100644 index 0000000..3f873a6 --- /dev/null +++ b/ultralytics/utils/triton.py @@ -0,0 +1,92 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license + +from typing import List +from urllib.parse import urlsplit + +import numpy as np + + +class TritonRemoteModel: + """ + Client for interacting with a remote Triton Inference Server model. + + Attributes: + endpoint (str): The name of the model on the Triton server. + url (str): The URL of the Triton server. + triton_client: The Triton client (either HTTP or gRPC). + InferInput: The input class for the Triton client. + InferRequestedOutput: The output request class for the Triton client. + input_formats (List[str]): The data types of the model inputs. + np_input_formats (List[type]): The numpy data types of the model inputs. + input_names (List[str]): The names of the model inputs. + output_names (List[str]): The names of the model outputs. + """ + + def __init__(self, url: str, endpoint: str = "", scheme: str = ""): + """ + Initialize the TritonRemoteModel. + + Arguments may be provided individually or parsed from a collective 'url' argument of the form + ://// + + Args: + url (str): The URL of the Triton server. + endpoint (str): The name of the model on the Triton server. + scheme (str): The communication scheme ('http' or 'grpc'). + """ + if not endpoint and not scheme: # Parse all args from URL string + splits = urlsplit(url) + endpoint = splits.path.strip("/").split("/")[0] + scheme = splits.scheme + url = splits.netloc + + self.endpoint = endpoint + self.url = url + + # Choose the Triton client based on the communication scheme + if scheme == "http": + import tritonclient.http as client # noqa + + self.triton_client = client.InferenceServerClient(url=self.url, verbose=False, ssl=False) + config = self.triton_client.get_model_config(endpoint) + else: + import tritonclient.grpc as client # noqa + + self.triton_client = client.InferenceServerClient(url=self.url, verbose=False, ssl=False) + config = self.triton_client.get_model_config(endpoint, as_json=True)["config"] + + # Sort output names alphabetically, i.e. 'output0', 'output1', etc. + config["output"] = sorted(config["output"], key=lambda x: x.get("name")) + + # Define model attributes + type_map = {"TYPE_FP32": np.float32, "TYPE_FP16": np.float16, "TYPE_UINT8": np.uint8} + self.InferRequestedOutput = client.InferRequestedOutput + self.InferInput = client.InferInput + self.input_formats = [x["data_type"] for x in config["input"]] + self.np_input_formats = [type_map[x] for x in self.input_formats] + self.input_names = [x["name"] for x in config["input"]] + self.output_names = [x["name"] for x in config["output"]] + + def __call__(self, *inputs: np.ndarray) -> List[np.ndarray]: + """ + Call the model with the given inputs. + + Args: + *inputs (List[np.ndarray]): Input data to the model. + + Returns: + (List[np.ndarray]): Model outputs. + """ + infer_inputs = [] + input_format = inputs[0].dtype + for i, x in enumerate(inputs): + if x.dtype != self.np_input_formats[i]: + x = x.astype(self.np_input_formats[i]) + infer_input = self.InferInput(self.input_names[i], [*x.shape], self.input_formats[i].replace("TYPE_", "")) + infer_input.set_data_from_numpy(x) + infer_inputs.append(infer_input) + + infer_outputs = [self.InferRequestedOutput(output_name) for output_name in self.output_names] + outputs = self.triton_client.infer(model_name=self.endpoint, inputs=infer_inputs, outputs=infer_outputs) + + return [outputs.as_numpy(output_name).astype(input_format) for output_name in self.output_names] diff --git a/ultralytics/utils/tuner.py b/ultralytics/utils/tuner.py index 015e596..305c60a 100644 --- a/ultralytics/utils/tuner.py +++ b/ultralytics/utils/tuner.py @@ -2,16 +2,13 @@ import subprocess -from ultralytics.cfg import TASK2DATA, TASK2METRIC -from ultralytics.utils import DEFAULT_CFG_DICT, LOGGER, NUM_THREADS +from ultralytics.cfg import TASK2DATA, TASK2METRIC, get_save_dir +from ultralytics.utils import DEFAULT_CFG, DEFAULT_CFG_DICT, LOGGER, NUM_THREADS, checks -def run_ray_tune(model, - space: dict = None, - grace_period: int = 10, - gpu_per_trial: int = None, - max_samples: int = 10, - **train_args): +def run_ray_tune( + model, space: dict = None, grace_period: int = 10, gpu_per_trial: int = None, max_samples: int = 10, **train_args +): """ Runs hyperparameter tuning using Ray Tune. @@ -37,49 +34,59 @@ def run_ray_tune(model, result_grid = model.tune(data='coco8.yaml', use_ray=True) ``` """ + + LOGGER.info("💡 Learn about RayTune at https://docs.ultralytics.com/integrations/ray-tune") if train_args is None: train_args = {} try: - subprocess.run('pip install ray[tune]'.split(), check=True) + subprocess.run("pip install ray[tune]<=2.9.3".split(), check=True) # do not add single quotes here + import ray from ray import tune from ray.air import RunConfig from ray.air.integrations.wandb import WandbLoggerCallback from ray.tune.schedulers import ASHAScheduler except ImportError: - raise ModuleNotFoundError('Tuning hyperparameters requires Ray Tune. Install with: pip install "ray[tune]"') + raise ModuleNotFoundError('Ray Tune required but not found. To install run: pip install "ray[tune]<=2.9.3"') try: import wandb - assert hasattr(wandb, '__version__') + assert hasattr(wandb, "__version__") except (ImportError, AssertionError): wandb = False + checks.check_version(ray.__version__, "<=2.9.3", "ray") default_space = { # 'optimizer': tune.choice(['SGD', 'Adam', 'AdamW', 'NAdam', 'RAdam', 'RMSProp']), - 'lr0': tune.uniform(1e-5, 1e-1), - 'lrf': tune.uniform(0.01, 1.0), # final OneCycleLR learning rate (lr0 * lrf) - 'momentum': tune.uniform(0.6, 0.98), # SGD momentum/Adam beta1 - 'weight_decay': tune.uniform(0.0, 0.001), # optimizer weight decay 5e-4 - 'warmup_epochs': tune.uniform(0.0, 5.0), # warmup epochs (fractions ok) - 'warmup_momentum': tune.uniform(0.0, 0.95), # warmup initial momentum - 'box': tune.uniform(0.02, 0.2), # box loss gain - 'cls': tune.uniform(0.2, 4.0), # cls loss gain (scale with pixels) - 'hsv_h': tune.uniform(0.0, 0.1), # image HSV-Hue augmentation (fraction) - 'hsv_s': tune.uniform(0.0, 0.9), # image HSV-Saturation augmentation (fraction) - 'hsv_v': tune.uniform(0.0, 0.9), # image HSV-Value augmentation (fraction) - 'degrees': tune.uniform(0.0, 45.0), # image rotation (+/- deg) - 'translate': tune.uniform(0.0, 0.9), # image translation (+/- fraction) - 'scale': tune.uniform(0.0, 0.9), # image scale (+/- gain) - 'shear': tune.uniform(0.0, 10.0), # image shear (+/- deg) - 'perspective': tune.uniform(0.0, 0.001), # image perspective (+/- fraction), range 0-0.001 - 'flipud': tune.uniform(0.0, 1.0), # image flip up-down (probability) - 'fliplr': tune.uniform(0.0, 1.0), # image flip left-right (probability) - 'mosaic': tune.uniform(0.0, 1.0), # image mixup (probability) - 'mixup': tune.uniform(0.0, 1.0), # image mixup (probability) - 'copy_paste': tune.uniform(0.0, 1.0)} # segment copy-paste (probability) + "lr0": tune.uniform(1e-5, 1e-1), + "lrf": tune.uniform(0.01, 1.0), # final OneCycleLR learning rate (lr0 * lrf) + "momentum": tune.uniform(0.6, 0.98), # SGD momentum/Adam beta1 + "weight_decay": tune.uniform(0.0, 0.001), # optimizer weight decay 5e-4 + "warmup_epochs": tune.uniform(0.0, 5.0), # warmup epochs (fractions ok) + "warmup_momentum": tune.uniform(0.0, 0.95), # warmup initial momentum + "box": tune.uniform(0.02, 0.2), # box loss gain + "cls": tune.uniform(0.2, 4.0), # cls loss gain (scale with pixels) + "hsv_h": tune.uniform(0.0, 0.1), # image HSV-Hue augmentation (fraction) + "hsv_s": tune.uniform(0.0, 0.9), # image HSV-Saturation augmentation (fraction) + "hsv_v": tune.uniform(0.0, 0.9), # image HSV-Value augmentation (fraction) + "degrees": tune.uniform(0.0, 45.0), # image rotation (+/- deg) + "translate": tune.uniform(0.0, 0.9), # image translation (+/- fraction) + "scale": tune.uniform(0.0, 0.9), # image scale (+/- gain) + "shear": tune.uniform(0.0, 10.0), # image shear (+/- deg) + "perspective": tune.uniform(0.0, 0.001), # image perspective (+/- fraction), range 0-0.001 + "flipud": tune.uniform(0.0, 1.0), # image flip up-down (probability) + "fliplr": tune.uniform(0.0, 1.0), # image flip left-right (probability) + "bgr": tune.uniform(0.0, 1.0), # image channel BGR (probability) + "mosaic": tune.uniform(0.0, 1.0), # image mixup (probability) + "mixup": tune.uniform(0.0, 1.0), # image mixup (probability) + "copy_paste": tune.uniform(0.0, 1.0), # segment copy-paste (probability) + } + + # Put the model in ray store + task = model.task + model_in_store = ray.put(model) def _tune(config): """ @@ -89,42 +96,50 @@ def run_ray_tune(model, config (dict): A dictionary of hyperparameters to use for training. Returns: - None. + None """ - model._reset_callbacks() + model_to_train = ray.get(model_in_store) # get the model from ray store for tuning + model_to_train.reset_callbacks() config.update(train_args) - model.train(**config) + results = model_to_train.train(**config) + return results.results_dict # Get search space if not space: space = default_space - LOGGER.warning('WARNING ⚠️ search space not provided, using default search space.') + LOGGER.warning("WARNING ⚠️ search space not provided, using default search space.") # Get dataset - data = train_args.get('data', TASK2DATA[model.task]) - space['data'] = data - if 'data' not in train_args: + data = train_args.get("data", TASK2DATA[task]) + space["data"] = data + if "data" not in train_args: LOGGER.warning(f'WARNING ⚠️ data not provided, using default "data={data}".') # Define the trainable function with allocated resources - trainable_with_resources = tune.with_resources(_tune, {'cpu': NUM_THREADS, 'gpu': gpu_per_trial or 0}) + trainable_with_resources = tune.with_resources(_tune, {"cpu": NUM_THREADS, "gpu": gpu_per_trial or 0}) # Define the ASHA scheduler for hyperparameter search - asha_scheduler = ASHAScheduler(time_attr='epoch', - metric=TASK2METRIC[model.task], - mode='max', - max_t=train_args.get('epochs') or DEFAULT_CFG_DICT['epochs'] or 100, - grace_period=grace_period, - reduction_factor=3) + asha_scheduler = ASHAScheduler( + time_attr="epoch", + metric=TASK2METRIC[task], + mode="max", + max_t=train_args.get("epochs") or DEFAULT_CFG_DICT["epochs"] or 100, + grace_period=grace_period, + reduction_factor=3, + ) # Define the callbacks for the hyperparameter search - tuner_callbacks = [WandbLoggerCallback(project='YOLOv8-tune')] if wandb else [] + tuner_callbacks = [WandbLoggerCallback(project="YOLOv8-tune")] if wandb else [] # Create the Ray Tune hyperparameter search tuner - tuner = tune.Tuner(trainable_with_resources, - param_space=space, - tune_config=tune.TuneConfig(scheduler=asha_scheduler, num_samples=max_samples), - run_config=RunConfig(callbacks=tuner_callbacks, storage_path='./runs/tune')) + tune_dir = get_save_dir(DEFAULT_CFG, name="tune").resolve() # must be absolute dir + tune_dir.mkdir(parents=True, exist_ok=True) + tuner = tune.Tuner( + trainable_with_resources, + param_space=space, + tune_config=tune.TuneConfig(scheduler=asha_scheduler, num_samples=max_samples), + run_config=RunConfig(callbacks=tuner_callbacks, storage_path=tune_dir), + ) # Run the hyperparameter search tuner.fit() diff --git a/ultralytics/yolo/__init__.py b/ultralytics/yolo/__init__.py deleted file mode 100644 index d1fa558..0000000 --- a/ultralytics/yolo/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -# Ultralytics YOLO 🚀, AGPL-3.0 license - -from . import v8 - -__all__ = 'v8', # tuple or list diff --git a/ultralytics/yolo/__pycache__/__init__.cpython-39.pyc b/ultralytics/yolo/__pycache__/__init__.cpython-39.pyc deleted file mode 100644 index e1746fdb60160318d4b8c61318d2b5598991b4a0..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 184 zcmYe~<>g`kf;9GDDRDshF^Gc1 zpp+)#Ev7OHKTYOa3`HP4x7g$36LWIn<5w~iu>$#E;+L_DRg6n&YJpE`Vo_dZUV2Pr zeolUwX-sKONl{`>Wl3goF<2-jK0Y%qvm`!Vub}c4hfQvNN@-529W&6JVvylHOaPs# BD_8&k diff --git a/ultralytics/yolo/cfg/__init__.py b/ultralytics/yolo/cfg/__init__.py deleted file mode 100644 index 5ea5519..0000000 --- a/ultralytics/yolo/cfg/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ -import importlib -import sys - -from ultralytics.utils import LOGGER - -# Set modules in sys.modules under their old name -sys.modules['ultralytics.yolo.cfg'] = importlib.import_module('ultralytics.cfg') - -LOGGER.warning("WARNING ⚠️ 'ultralytics.yolo.cfg' is deprecated since '8.0.136' and will be removed in '8.1.0'. " - "Please use 'ultralytics.cfg' instead.") diff --git a/ultralytics/yolo/data/__init__.py b/ultralytics/yolo/data/__init__.py deleted file mode 100644 index f68391e..0000000 --- a/ultralytics/yolo/data/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -import importlib -import sys - -from ultralytics.utils import LOGGER - -# Set modules in sys.modules under their old name -sys.modules['ultralytics.yolo.data'] = importlib.import_module('ultralytics.data') -# This is for updating old cls models, or the way in following warning won't work. -sys.modules['ultralytics.yolo.data.augment'] = importlib.import_module('ultralytics.data.augment') - -DATA_WARNING = """WARNING ⚠️ 'ultralytics.yolo.data' is deprecated since '8.0.136' and will be removed in '8.1.0'. Please use 'ultralytics.data' instead. -Note this warning may be related to loading older models. You can update your model to current structure with: - import torch - ckpt = torch.load("model.pt") # applies to both official and custom models - torch.save(ckpt, "updated-model.pt") -""" -LOGGER.warning(DATA_WARNING) diff --git a/ultralytics/yolo/engine/__init__.py b/ultralytics/yolo/engine/__init__.py deleted file mode 100644 index 794efcd..0000000 --- a/ultralytics/yolo/engine/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ -import importlib -import sys - -from ultralytics.utils import LOGGER - -# Set modules in sys.modules under their old name -sys.modules['ultralytics.yolo.engine'] = importlib.import_module('ultralytics.engine') - -LOGGER.warning("WARNING ⚠️ 'ultralytics.yolo.engine' is deprecated since '8.0.136' and will be removed in '8.1.0'. " - "Please use 'ultralytics.engine' instead.") diff --git a/ultralytics/yolo/engine/__pycache__/__init__.cpython-39.pyc b/ultralytics/yolo/engine/__pycache__/__init__.cpython-39.pyc deleted file mode 100644 index 33d188c03121d5a4f446871d15e7b552afc1ec12..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 489 zcmZ`$!AiqG5Zz7MCKU_n#p~WeFQG*gDIyA$B9toiAe2BzyJK0|Y(h3^NpF5Z@1EMD zf8e+5QBR({_vE(m5DyN_3^ViI%wsobH1?3TCu>7rFhZZ(qHLxvuC&XJ4ir($(WuCH zgmrCjBQ!@QMu1ioYOXQh=PIpSATU=oT3utTIS;5=`_gid03oPV$mVUkdY6PN@kP$HR_0CCRU zmfJo#b4Va4nKRDG7)SzPG}9O+wt%+Va$IuHA&@{a?SD=b67_Z=QwXTrv{apiag?N- zjg={LsSeAMvXpU|7+Sb$6gWSOXvRTVMV88%2Z>-}`i7Fa#_htU)9rX&fVdCE(A&Pl n?9?mm-nG0!kmvhMu+;b6I9K*XvHDVfC6cxdHnxp@^T7B4m?4#7 diff --git a/ultralytics/yolo/utils/__init__.py b/ultralytics/yolo/utils/__init__.py deleted file mode 100644 index 71557b0..0000000 --- a/ultralytics/yolo/utils/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -import importlib -import sys - -from ultralytics.utils import LOGGER - -# Set modules in sys.modules under their old name -sys.modules['ultralytics.yolo.utils'] = importlib.import_module('ultralytics.utils') - -UTILS_WARNING = """WARNING ⚠️ 'ultralytics.yolo.utils' is deprecated since '8.0.136' and will be removed in '8.1.0'. Please use 'ultralytics.utils' instead. -Note this warning may be related to loading older models. You can update your model to current structure with: - import torch - ckpt = torch.load("model.pt") # applies to both official and custom models - torch.save(ckpt, "updated-model.pt") -""" -LOGGER.warning(UTILS_WARNING) diff --git a/ultralytics/yolo/utils/__pycache__/__init__.cpython-39.pyc b/ultralytics/yolo/utils/__pycache__/__init__.cpython-39.pyc deleted file mode 100644 index 4aa537063915e76bf52eb05edca12531260271e8..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 755 zcmZuvO^Xvj5be%nlWZ`c7tcjJWCJ?vB8o2SqOcMcLX=%a41u9DT`{da(?fq`nf(KM z^zJEp^dIb;_>Sg+S5-WKf}e$^o4N9k-^;GO6Gm}7r& z!l@+F`lVCq{f2BL!8D)*$-2pdYl`&a4sRY2zB6rMux?}Pno^&4PKK?Yo`Xz=v}lcA zjYcoeF7G&LHIum&u?d|O(p=u%4&_R!I_|D0A@5$CO-?5x`2J=6>)U4-{2w|1VSr<% zF=iGy7?H*ZgD2sBc<}J?09eZ5LP!a71dWMWd5lO)z(Ke_2;sFvW)Pe|e<@V;dGgd) zWIXInltr*hFLuFnD$);-u)HRfm9SMH731ZPk{mT8ila0kyjKolmV(Q;2STpg<_~;I z?6k(zg0b4g)@k&`+T~#vyon@J+Ip0ZmsKGClv#LI2f|WpuV0CTne7h&?g7g(DbSR} zb7hyH77GyzCaW&Q&RCUf%BuJ(=FqSe?v>Q{p5z_NIehiQe^T)phgq@8{VOv#;xFXquKfmE<^8R diff --git a/ultralytics/yolo/v8/__init__.py b/ultralytics/yolo/v8/__init__.py deleted file mode 100644 index 51adf81..0000000 --- a/ultralytics/yolo/v8/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ -import importlib -import sys - -from ultralytics.utils import LOGGER - -# Set modules in sys.modules under their old name -sys.modules['ultralytics.yolo.v8'] = importlib.import_module('ultralytics.models.yolo') - -LOGGER.warning("WARNING ⚠️ 'ultralytics.yolo.v8' is deprecated since '8.0.136' and will be removed in '8.1.0'. " - "Please use 'ultralytics.models.yolo' instead.") diff --git a/ultralytics/yolo/v8/__pycache__/__init__.cpython-39.pyc b/ultralytics/yolo/v8/__pycache__/__init__.cpython-39.pyc deleted file mode 100644 index 063c3ba011c80562b8c83a829a40dd1c22ba5a65..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 487 zcmZuuO-sW-5Zz7MCKU^c2d{ezy(}$aDMdt~QiM{a9)uDIX?K*B%?H^eCB6Cs^z5lU z`VagqIqJ!i_nzD|mk18b3^ViIn_<^&wf2y*2V={gF+yKOQ8ir^7s};81%?ZPV=IJvXBS7mK(>ECKQ=Qe$5a{a$t8Xw?oIBKRd@DYhv~k`T-;73A)75bz;>Zu@n)Z`d#lF9sPOc{-^7*#@`FJ7LKRAnUNf?9?EZi6vk=$PZ zvHG+_yC*%1xIQCGE(DnaiNFg|h2eey=+cfw$*lla0!ftrk+0mRR`@aom(jM7H@O!E zQ7rg8*JUR2!|F{*%!Q0Jb$Q+@agNeS1ms4UWo|6p$mjm!JBpeLw+q|ea9|Gs!ZDOX pyLg23)UIgnT6WsE9f$iob{raJxp`K~o~w^U(p163rnaviXuspBln4L- diff --git a/utils/__pycache__/dataloaders.cpython-39.pyc b/utils/__pycache__/dataloaders.cpython-39.pyc index c77703a2a567690df93ad96b03849d7c5af821f1..8da62af4153631e4490bf3227f52b6b68f2530d3 100644 GIT binary patch delta 4742 zcma)Ad2p0f63?5=5i;bE014p=hzX-EjVEo<7Ow6gxuB%t|5p=(S0Yi$g5!s5nAj z-M2y%Q7|h@?4e^>UE%~~WVeb%q2t+BAzq@BIR$rDL0Tb8(_-A-+H{Yj%H^>)ID&P7 z5WFxdFOr3<74&{=TqtjH;s9}(7UzWq*5k?Pu)`(j$u;q&BE~-rCEhr=H*~$p-*NX z5*5@vXGFFKgqvAUhJ!b6KIhPA)Q1Z_~0OS)@X>~624)@4_P zDEEL<1zH7_t4a@~91hHaU8VmOV8cLb19%usTQH@(3sH-rivo)|8O+A6CJ{}j~ViXY@-dU@^jBiGIsRuw;JGs-fGTu@^@ z0)gvbFwu$y<0ha4ewuycNUBtvK8BRfCzWxS6F^c#)ZkH zh@>r6m4s$;`<$-ko2tN4drQn-UTTKARL!tFRW1Oz3=GQG4}d)Z+Utx}Qrp55u{E@R z;aQP*0$VCs&jOnXCS}16dTjBu%sC()19%SLc?PxtdU=+Ba0UGH8Pg0kK6wMAF#2}! zC=-fz}+m9ARr0PDL=z+ldYd}bhN9X$R)W(@eLI?r-?S& z5{fYJsDdd;xT?k zvsBml-PNvuRM{j5oQlmC=r69jYOW-2r&Izv=4=(VR}RnOqqbqp9T`~>BR0Z?5fI41onT#R;5V= z_#Y5(6{3_C`l&nN8`yq(%yaros$}J_y>u}sz<8w z>VQ8*?2i`CL&j#&8-+So=iucG?Bj6jre8+GqTe(a#mdmxhIxkA3~0O!&^>`tzBxP^ zn9@EQQjNiu2A50+mnv1I6#BTQsGjt5s?fkH!`X-fA9RR1IuuN5KM#gt_Q{qYe+$>n0lW#I zntc*uTGXtf^88YJQGRY|ep$IRvhkW#r{z^VG%HVp`ONkzRP{wm?PqQ zx%2}A`GGLQATf~dg^^;Mkh4Rn%Y6nOx_esu;v~hi-Ya71{?=0KuOTK6qNmaJ*6j9L ztY<=B1+Y$Fs)R>jxeZ`D1I<#yRT9id*L%)xuziS4+zB+3x8CWNG8xpPiXeF!dgK9c z;4EOiEtl?~%NDH{%mD0g7IVVnCqPw_Z%qZw;;B;OP_=G7X>Wo}mA0zscd2{D7;%)I zUXeAXg57D2uJ%{^)6%mis!<=Xh1b6GHa=G#l;=a?ZSmpak2Ji!klVAneN{~XKcWeT zbVI6_L6I#^&wv1BE&|jGcT~4n4NSERbp=AWG;^cRT>(sW7RY0bdI`fvYqZ zW9ulS3>PdtZuUfU{I|b^U6wPYZvp?_4&2g?UxWAb#+xI-;Cw0GLcbO{zSTJ zizo|uC#3cQsOGv5w7CG+0jhYzHb|bvzAB1x*rsl-{r9NyGhkI0slNkLg`=cl%Bxc0 zN$GI}0z4^S2T4s170OdsD$kUU%DszB`;<<1+YJ}-G7=w}v`!1pDq_uAgsa&d2-^Mi zN}XonOPwpg^GGT8GlwlkGus<|t?mZBbSh~2#!1!^SbYOd^itQxnO0?26Nu_Ew>i-$ zY@ur#i$#(;K##KlM}!uR1N7sl=aS>e-=*CuMb_K~DXLr)=L#(0g%-)Dg?mej!|Rz6 zTDWPU$a@ODj)N7dWH(@`&i+b%h;P{H2Cp)xgerOc>KmA8dg$BDK@nZW=x6CHsGJRO z(#kDUCmSGQunh8#?)VH~1E5Sbk^R8_4DcraGj>(SxJ3WmGBzH~QI-MVl>BgAn@_1* zhnXWlK1Fl4ritRvs;z4cqJ|>3EfO}W+cuiMLre{$) zoYc%gzlV#01E_8#ZTz4nnruRL!|?t9;?y+L7d`hTIhdt2GQj6OFkxxH?`yS`Ihx$Q z+9df`XfC7N9b>GjJS2kENZuU@*?0);@n+mgu(Jgk)R9qb2G>y;0PPU~1C|wZddDbn zi9XyhK1!7x3}U~!>5iT8qKa%guf(il?p8~Z-X-NG8n;WzB@$5A*eY3kOUi(AoC!WYQ%MdrWzQUvfP2){q*kMkt6o; zn(sEsMb#@*_|(j?kY!(z3DbB#-Mga8Y1W`$R@XJT18%=hE~9%6JtQj0cjy(-L*oxm7W=6B@RZSN_UwWMvmqFR gQ7#5Ro0CiF^x@$aSSgPo6GQ2X!(GBhZAX&-2LW5rIsgCw delta 4775 zcma)Ad2p0f66eh%lM50EiGgq=AOZsjNeCnXRIYF)AqgP}1i~blBx5Es;mtq}f(b!^ zpdixRXSh!h9JPc6WvOCuS6xwR55E$Tb^q{SRjSriJj(4~e?ZW3t9Gj9H~sqc*RQYF z?|rlHs%76*OQfSyr|=H^8QU6F5oMf?e5Aj!1hV>fS89Bqd_b&@SaFvzoJbG>TK(&%?3~y1J17)dmmD)*fj99Dov9@FC0DU)d zshUClv^2GyPNc0=$7pzZgL*u0GCe}6({wf?Yfve)WwSOT%2i#F;&zlc-NiKyf2Hpv zya*C6`YT6%i_XFl;@YrtE zi$K=^meSXmjmk#Vlj>DIQZaUr_7UD9rHRCS zZIFy8^?FJjey4}Ox`c|dCa64W%etg$sXTjXL=yy`XF)xO&Sob^=JD82WoA_N)9vg> zIC5z@qep2>1l5!{w0GnkTWk?)zOx`gotUL#;OU?*S)CW=7q2Gzop5#)35Qfql^*fgi2 zn*KAZ*O=Ea9}ai}Z~-8OVgZh|*X!2rgA8$17ni%-PQ&6Z(IX+Yie?n{QRk$R_AtXi zt%V0I%M~q;jSIfWORfrJ%^qe|8>w+_>EN4qW26$MqpyJ1)x8Bb#$$iWo zqzfo~UU|eZ%&mb78)@mhenU|d`W3)4U}B@{qW}ZLSU@X4wvvP1nb$XVGp4(Mi2&%^ z7=BMdMg8*;TYG0I5+m5KB zMFZ6Oz=5JmD*hCvBz>nrO@WYDu#;9Sn3$Rc<_mzA009Pjm|3v-VBCcK9HtCQxkq=f z@JP`Q3wj5`dxLrxZy)t5z1J}j>hKlUczv!qod^YqP$hm^@6&DW`mhQ=mY0Nb~AYxOY@kE=>Y(#c`GLN0dCP=PyB$N znKVLV%lJ}_FrTxc+UfB#X{j$YEOn*k8u?UNJ|;sni_-f$hvc-~?Mf^qzDwsoL%$Wc zR(>T&b*7V53AQfCMKoQlYEao6j%6x_-gftl`Hg7Jq#xZ&2MvOJCkDGkPlN7DfGBX= zgNqZSi1t>`f1u3D6F5M0EJR{J#aZaa+_)y$Dczu{yBSdgQ^!N>*MUTQS}@xKvr?-) zLH!@Gl{86!KZSx@P$gE-F;ADK`@@~l9_|dWfVW8vZ<87=B`jMA&Kf~{z-HVjVzz?Q zlFaGhZ(R!irkGYqUJ_trBA`7(HvKRdlBNNJK@I`@GEYWFx7SftTlrsix4G~guW{;hNJ*8-qF9d`j5MgYnK6U z0^}@6WXk9?HK!mmzc?pzTz=-v0&QdCjR=!BVb=Jm6LbhTktQc=T+W2KGiIh5UA4ng zR_yZdAzo8l?etfA%Y1UYY8;X}7;7ZpB+wT@S`iZI*yuMJyGf^!b8)JAnYJ&^!p*oMY zHKaGm-D3*YE5u9-D5>7w7;Xb>XD~v_xk~)QQ_Oa=8)DZmi7SH(z-vm$AzK! zuypG~kia&;@mH%|ey2UsESNCNa27K<=zoBeB;PwCxGR;&X(@f&OxhnICZ#Pk{VugG z?V}FT@ug{fO4yx-$Wm{qH#sGJnA`+>c5B5uZ{u_GF?}^)ZH%+3E7YSYn|rmZX?gh^ ze#B4?{RFgn7;@}g%?v1TiV&bSxFe0?DNxca;I5oVhHbIO?ebJ{;b~5(j)I8n8i{BD zhJ}>d+%2;s>#|3fZkJ-&NQcMc_48e>tk~;u*Xz5nFnf_<_1F1LN85YB9D1R-m%2=E zH9w@P0=Jsa2B~l9#M6r`Pqts9zoXGBS9ZL8uL(w5=)K8Z>Eo5-`$of^u`sEMf$s<{ zpe4=oF?AeTdZ_Pd;i~H~a@oy-fOK&jUN)M!dI8^TUs|2sgu~Lj*~-qhVR)}hbSDgd zrCmBA%0fQ~t!)75tMkFj1^fgk;R$=RK96}R3I}YHi*5TgtKbcYUI2UwkirpdIP#@b z_>govd_F!TFM=h9hJ^AQhT@s{DDHj4yc=}Bwb=3z-d5rQBi0z!w0XR;5$ddS`TWJ+ zDp}45<~yI$$48MY-pCU6D8p8~*wf&uF-xbACaoJ0u>e;83@7%|%5_sB#I6-!%3W_m ze30rFxV0`j`e>^zE_I8ax1h*xrpRy4B>?l&bx(sOTDAo zJu*=Ae36>46~6X^6;iTmF_gW(h9BY!xLoihgQ8UQwJ*Png~kPb*x*-@UM4?FVMPUO zz#>|vSSc4OZ-G)KJY)V$s1Ist9wkQXMZ=J6mrpm1eOt0RW)l=TjerYi0L31q&+BOjndw;K z@>C@150R4s8n>fQgp`Lyuv(~kN4Io5g!XtRZh4s5ixp(cNSl!hax-}401Jc*>HLn~ z>NMzebow@ysKBzt588uCc0&L z(Tzm&d!m%g-(kv&fLhwPYk+E|bGsf5t7C~t9EE4ey8FQ%+0cPH)2Jg;@}srmS~j_B8!@U$360 zc+7VjaZ!4Ogip?#Fbdh97;I&MJv3y0iL%mj`+Fsv^H9|kFJH1cPGoHlYg&!)OV zi_~A}^Fu?sKFUHygx{-6E0?%@F0V&FNrMlMR|a_wpHVw#;E_@M$4%*x(Ft<)?1BZe jp%@3Ft^%OVamLOc=@9}e^%-R10s7|1O1}3r9_{~Mrd{J1 diff --git a/utils/__pycache__/getsource.cpython-39.pyc b/utils/__pycache__/getsource.cpython-39.pyc index 2253c09642764b26383502212488666406d76a78..51e647147f6caadcb72c69fd5006877d6f2bb871 100644 GIT binary patch delta 20 acmX@Zdxn=gk(ZZ?0SFfBKitTDkPQGf#|3!+ delta 20 acmX@Zdxn=gk(ZZ?0SKCoD{kaI$OZs4umvyx diff --git a/utils/dataloaders.py b/utils/dataloaders.py index 406009c..eeceb2b 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -300,7 +300,9 @@ class LoadImages: ret_val, im0 = self.cap.read() self.frame += 1 - im0 = self._cv2_rotate(im0) # for use if cv2 autorotation is False + + # if self.orientation == 270: + # im0 = cv2.rotate(im0, cv2.ROTATE_90_COUNTERCLOCKWISE) # for use if cv2 autorotation is False s = f'video {self.count + 1}/{self.nf} ({self.frame}/{self.frames}) {path}: ' else: @@ -329,14 +331,14 @@ class LoadImages: def _cv2_rotate(self, im): # Rotate a cv2 video manually - # if self.orientation == 0: - # return cv2.rotate(im, cv2.ROTATE_90_CLOCKWISE) - # elif self.orientation == 180: - # return cv2.rotate(im, cv2.ROTATE_90_COUNTERCLOCKWISE) - # elif self.orientation == 90: - # return cv2.rotate(im, cv2.ROTATE_180) - if self.orientation == 270: + if self.orientation == 0: + return cv2.rotate(im, cv2.ROTATE_90_CLOCKWISE) + elif self.orientation == 180: return cv2.rotate(im, cv2.ROTATE_90_COUNTERCLOCKWISE) + elif self.orientation == 90: + return cv2.rotate(im, cv2.ROTATE_180) + # if self.orientation == 270: + # return cv2.rotate(im, cv2.ROTATE_90_COUNTERCLOCKWISE) return im