From a95db2a8fba893e11bfab4dcfd61589d72904c0c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9D=8E=E6=99=A8?= Date: Thu, 17 Oct 2024 19:14:27 +0800 Subject: [PATCH] =?UTF-8?q?3588=E9=80=82=E9=85=8D?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- Dockerfile | 6 +++++- anchors.py | 4 ++-- detect.py | 15 +++++++++------ models/yolov5l.yaml | 6 +++--- models/yolov5m.yaml | 6 +++--- requirements.txt | 25 ++++++++++++------------- spilt_train_val.py | 14 +++++++++++--- train_zhanting.py | 29 ++++++++++++++++++----------- voc_label.py | 28 +++++++++++++++------------- 9 files changed, 78 insertions(+), 55 deletions(-) mode change 100755 => 100644 models/yolov5m.yaml mode change 100755 => 100644 train_zhanting.py diff --git a/Dockerfile b/Dockerfile index c0484e5..2ae6196 100755 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ # Start FROM Nvidia PyTorch image https://ngc.nvidia.com/catalog/containers/nvidia:pytorch -FROM nvcr.io/nvidia/pytorch:21.03-py3 +FROM nvcr.io/nvidia/pytorch:23.06-py3 # Install linux packages RUN apt update && apt install -y zip htop screen libgl1-mesa-glx @@ -20,6 +20,10 @@ COPY . /usr/src/app # Set environment variables ENV HOME=/usr/src/app +EXPOSE 8000 + +CMD ["python", "ieemoo-ai-zhanting.py"] + # --------------------------------------------------- Extras Below --------------------------------------------------- diff --git a/anchors.py b/anchors.py index 2df94db..7de9868 100644 --- a/anchors.py +++ b/anchors.py @@ -5,8 +5,8 @@ import numpy as np import xml.etree.cElementTree as et from kmeans import kmeans, avg_iou -FILE_ROOT = "/home/nxy/nxy_project/python_project/Data/zhanting_add/" # 根路径 -ANNOTATION_ROOT = "xmls" # 数据集标签文件夹路径 +FILE_ROOT = "paper_data/" # 根路径 +ANNOTATION_ROOT = "Annotations" # 数据集标签文件夹路径 ANNOTATION_PATH = FILE_ROOT + ANNOTATION_ROOT ANCHORS_TXT_PATH = "data/anchors.txt" diff --git a/detect.py b/detect.py index 1da7335..d567b87 100755 --- a/detect.py +++ b/detect.py @@ -15,7 +15,7 @@ from utils.plots import plot_one_box from utils.torch_utils import select_device, load_classifier, time_synchronized -def detect(opt, save_img=False): +def detect(opt, model, stride, save_img=False): source, weights, view_img, save_txt, imgsz = opt.source, opt.weights, opt.view_img, opt.save_txt, opt.img_size save_img = not opt.nosave and not source.endswith('.txt') # save inference images webcam = source.isnumeric() or source.endswith('.txt') or source.lower().startswith( @@ -31,12 +31,15 @@ def detect(opt, save_img=False): half = device.type != 'cpu' # half precision only supported on CUDA # Load model - model = attempt_load(weights, map_location=device) # load FP32 model - stride = int(model.stride.max()) # model stride +# model = attempt_load(weights, map_location=device) # load FP32 model +# stride = int(model.stride.max()) # model stride imgsz = check_img_size(imgsz, s=stride) # check img_size - if half: - model.half() # to FP16 - model.eval() +# if half: +# model.half() # to FP16 +# model.eval() + model = model + + # Second-stage classifier classify = False if classify: diff --git a/models/yolov5l.yaml b/models/yolov5l.yaml index c0eab64..420daca 100755 --- a/models/yolov5l.yaml +++ b/models/yolov5l.yaml @@ -8,9 +8,9 @@ anchors: #- [10,13, 16,30, 33,23] # P3/8 #- [30,61, 62,45, 59,119] # P4/16 #- [116,90, 156,198, 373,326] # P5/32 - - [87,51, 80 ,84, 142,66] # P3/8 - - [98,156, 139,112, 238,72] # P4/16 - - [238,120, 177,180, 277,198] # P5/32 + - [109,52, 78,81, 96,152] # P3/8 + - [139,106, 230,70, 160,172] # P4/16 + - [241,126, 217,202, 307,201] # P5/32 # YOLOv5 backbone backbone: diff --git a/models/yolov5m.yaml b/models/yolov5m.yaml old mode 100755 new mode 100644 index bb7c7bf..2df20e9 --- a/models/yolov5m.yaml +++ b/models/yolov5m.yaml @@ -8,9 +8,9 @@ anchors: #- [10,13, 16,30, 33,23] # P3/8 #- [30,61, 62,45, 59,119] # P4/16 #- [116,90, 156,198, 373,326] # P5/32 - - [87,51, 80 ,84, 142,66] # P3/8 - - [98,156, 139,112, 238,72] # P4/16 - - [238,120, 177,180, 277,198] # P5/32 + - [109,52, 78,81, 96,152] # P3/8 + - [139,106, 230,70, 160,172] # P4/16 + - [241,126, 217,202, 307,201] # P5/32 # YOLOv5 backbone backbone: diff --git a/requirements.txt b/requirements.txt index b3b7c3f..766103a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,21 +1,20 @@ -esdk_obs_python==3.21.8 +apache_skywalking==0.7.0 +coremltools==5.2.0 Flask==2.0.0 gevent==21.1.2 matplotlib==3.4.1 -numpy==1.20.2 -esdk-obs-python --trusted-host pypi.org -opencv_python==4.5.1.48 -opencv-contrib-python==4.5.5.64 -Pillow==9.1.0 -scipy==1.6.2 -setuptools==49.6.0 -coremltools==5.2.0 +numpy==1.22.4 onnx==1.7.0 +opencv_contrib_python==4.5.5.64 pandas==1.2.4 -pycocotools==2.0.2 -PyYAML==6.0 -requests==2.25.1 +Pillow==10.0.0 +pycocotools==2.0 +PyYAML==6.0.1 +requests==2.19.1 +scipy==1.5.3 seaborn==0.11.1 +setuptools==67.7.2 thop==0.0.31.post2005241907 +torch==1.8.2+cu111 +torchvision==0.9.2+cu111 tqdm==4.60.0 -ml-collections==0.1.1 diff --git a/spilt_train_val.py b/spilt_train_val.py index c205366..bf2bc3f 100644 --- a/spilt_train_val.py +++ b/spilt_train_val.py @@ -5,9 +5,9 @@ import argparse parser = argparse.ArgumentParser() #xml文件的地址,根据自己的数据进行修改 xml一般存放在Annotations下 -parser.add_argument('--xml_path', default='/home/nxy/nxy_project/python_project/Data/paper_data/Annotations', type=str, help='input xml label path') +parser.add_argument('--xml_path', default='paper_data/Annotations', type=str, help='input xml label path') #数据集的划分,地址选择自己数据下的ImageSets/Main -parser.add_argument('--txt_path', default='/home/nxy/nxy_project/python_project/Data/paper_data/ImageSets/Main', type=str, help='output txt label path') +parser.add_argument('--txt_path', default='paper_data/ImageSets/Main', type=str, help='output txt label path') opt = parser.parse_args() trainval_percent = 1.0 @@ -30,14 +30,22 @@ file_test = open(txtsavepath + '/test.txt', 'w') file_train = open(txtsavepath + '/train.txt', 'w') file_val = open(txtsavepath + '/val.txt', 'w') +addtrain_path = r"D:\PycharmProjects\Zhanting\yolov5_1\img_data\getimg_6.30" for i in list_index: name = total_xml[i][:-4] + '\n' + addimg_name = name.strip() + ".jpg" + # print(addimg_name,type(addimg_name),len(addimg_name)) if i in trainval: file_trainval.write(name) if i in train: file_train.write(name) else: - file_val.write(name) + if addimg_name in os.listdir(addtrain_path):#把某些数据加入训练集中 + print("addimg_name:",addimg_name) + file_train.write(name) + else: + file_val.write(name) + # file_val.write(name) else: file_test.write(name) diff --git a/train_zhanting.py b/train_zhanting.py old mode 100755 new mode 100644 index 0730a64..e8dbab0 --- a/train_zhanting.py +++ b/train_zhanting.py @@ -1,7 +1,13 @@ import argparse import logging import math -import os +import os,sys +sys.path.append(os.path.dirname(os.path.abspath(__file__))) +#print(os.path.dirname(os.path.abspath(__file__))) +#wandb +import wandb +#wandb.init(project="ieemoo-ai-zhanting", entity="wb_ht") + import random import time from copy import deepcopy @@ -35,10 +41,11 @@ from utils.plots import plot_images, plot_labels, plot_results, plot_evolution from utils.torch_utils import ModelEMA, select_device, intersect_dicts, torch_distributed_zero_first, is_parallel from utils.wandb_logging.wandb_utils import WandbLogger, check_wandb_resume + logger = logging.getLogger(__name__) os.environ["CUDA_VISIBLE_DEVICES"] = "0" -os.environ["CUDA_VISIBLE_DEVICES"] ="1" +os.environ["CUDA_VISIBLE_DEVICES"] = "1" def train(hyp, opt, device, tb_writer=None): #logger.info(colorstr('hyperparameters: ') + ', '.join(f'{k}={v}'for k, v in hyp.items())) save_dir, epochs, batch_size, total_batch_size, weights, rank = \ @@ -459,16 +466,16 @@ def train(hyp, opt, device, tb_writer=None): if __name__ == '__main__': parser = argparse.ArgumentParser() #parser.add_argument('--weights', type=str, default='runs/zhanting/yolov5s_finetune/exp9/weights/best.pt', help='initial weights path') - #parser.add_argument('--weights', type=str, default='runs/zhanting/yolov5m_finetune/exp/weights/best.pt', help='initial weights path') - parser.add_argument('--weights', type=str, default='weights/yolov5m.pt', help='initial weights path') + #parser.add_argument('--weights', type=str, default='runs/zhanting/yolov5m_finetune/exp10/weights/best.pt', help='initial weights path') + parser.add_argument('--weights', type=str, default='weights/yolov5l.pt', help='initial weights path') #parser.add_argument('--weights', type=str, default='runs/zhanting/yolov5l_finetune/exp7/weights/best.pt', help='initial weights path') - parser.add_argument('--cfg', type=str, default='models/yolov5m.yaml', help='model.yaml path') + parser.add_argument('--cfg', type=str, default='models/yolov5l.yaml', help='model.yaml path') parser.add_argument('--data', type=str, default='data/zhanting.yaml', help='data.yaml path') parser.add_argument('--hyp', type=str, default='data/hyp.finetune.yaml', help='hyperparameters path') #parser.add_argument('--hyp', type=str, default='data/hyp.scratch.yaml', help='hyperparameters path') - parser.add_argument('--epochs', type=int, default=600) - parser.add_argument('--batch-size', type=int, default=32, help='total batch size for all GPUs') - parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='[train, test] image sizes') + parser.add_argument('--epochs', type=int, default=200) + parser.add_argument('--batch-size', type=int, default=72, help='total batch size for all GPUs') + parser.add_argument('--img-size', nargs='+', type=int, default=[640,640], help='[train, test] image sizes') parser.add_argument('--rect', action='store_true', help='rectangular training') parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training') parser.add_argument('--nosave', action='store_true', help='only save final checkpoint') @@ -485,10 +492,10 @@ if __name__ == '__main__': parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode') parser.add_argument('--local_rank', type=int, default=-1, help='DDP parameter, do not modify') parser.add_argument('--workers', type=int, default=8, help='maximum number of dataloader workers') - #parser.add_argument('--project', default='runs/zhanting/yolov5s_finetune', help='save to project/name') - parser.add_argument('--project', default='runs/zhanting/yolov5m_finetune', help='save to project/name') + parser.add_argument('--project', default='runs/zhanting/yolov5l_finetune', help='save to project/name') + #parser.add_argument('--project', default='runs/zhanting/yolov5m_finetune', help='save to project/name') #parser.add_argument('--project', default='runs/zhanting/yolov5_scratch', help='save to project/name') - parser.add_argument('--entity', default=None, help='W&B entity') + parser.add_argument('--entity', default="wb_ht", help='W&B entity') parser.add_argument('--name', default='exp', help='save to project/name') parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') parser.add_argument('--quad', action='store_true', help='quad dataloader') diff --git a/voc_label.py b/voc_label.py index db1172c..a91f964 100644 --- a/voc_label.py +++ b/voc_label.py @@ -1,15 +1,14 @@ -#标签xml转txt +# 标签xml转txt import xml.etree.ElementTree as ET import os from os import getcwd sets = ['train', 'val', 'test'] -classes = ["6925303773908", "6924743915848", "6920152471616", "6920005772716", "6902227018162", - "6920459905012", "6972194461407", "6935284412918", "6921489033706", "6904012526494", - "6923644272159", "6924882486100", "6956511907458"] +classes = ['6925303773908','6924743915848','6920152471616','6920005772716','6902227018162','6920459905012','6972194461407','6935284412918','6921489033706','6904012526494','6923644272159','6924882486100','6956511907458'] abs_path = os.getcwd() print(abs_path) + def convert(size, box): dw = 1. / (size[0]) dh = 1. / (size[1]) @@ -23,9 +22,10 @@ def convert(size, box): h = h * dh return x, y, w, h + def convert_annotation(image_id): - in_file = open(r'/home/nxy/nxy_project/python_project/Data/paper_data/Annotations/%s.xml' % (image_id), encoding='UTF-8') - out_file = open(r'/home/nxy/nxy_project/python_project/Data/paper_data/labels/%s.txt' % (image_id), 'w') + in_file = open('paper_data/Annotations/%s.xml' % (image_id), encoding='UTF-8') + out_file = open('paper_data/labels/%s.txt' % (image_id), 'w') tree = ET.parse(in_file) root = tree.getroot() size = root.find('size') @@ -35,6 +35,8 @@ def convert_annotation(image_id): # difficult = obj.find('difficult').text # difficult = obj.find('Difficult').text cls = obj.find('name').text + cls = cls[:13] + print("cls:",cls,len(cls)) # if cls not in classes or int(difficult) == 1: # continue cls_id = classes.index(cls) @@ -51,16 +53,16 @@ def convert_annotation(image_id): bb = convert((w, h), b) out_file.write(str(cls_id) + " " + " ".join([str(a) for a in bb]) + '\n') + wd = getcwd() for image_set in sets: - if not os.path.exists(r'/home/nxy/nxy_project/python_project/Data/paper_data/labels'): - os.makedirs(r'/home/nxy/nxy_project/python_project/Data/paper_data/labels') - image_ids = open(r'/home/nxy/nxy_project/python_project/Data/paper_data/ImageSets/Main/%s.txt' % (image_set)).read().strip().split() - list_file = open('/home/nxy/nxy_project/python_project/Data/paper_data/%s.txt' % (image_set), 'w') + if not os.path.exists('paper_data/labels'): + os.makedirs('paper_data/labels') + image_ids = open('paper_data/ImageSets/Main/%s.txt' % (image_set)).read().strip().split() + list_file = open('paper_data/%s.txt' % (image_set), 'w') for image_id in image_ids: list_file.write(abs_path + '/paper_data/images/%s.jpg\n' % (image_id)) + print(image_id, "Converting...") convert_annotation(image_id) - print("Converting...") - list_file.close() -#(yv5_1) pfc@ps:/home/nxy/nxy_project/python_project/Data/paper_data/labels$ mv ./* /home/nxy/nxy_project/python_project/Data/zhanting_add/labels/train/ \ No newline at end of file + list_file.close()