3588适配

This commit is contained in:
2024-10-17 19:14:27 +08:00
parent 8475980895
commit a95db2a8fb
9 changed files with 78 additions and 55 deletions

View File

@ -1,5 +1,5 @@
# Start FROM Nvidia PyTorch image https://ngc.nvidia.com/catalog/containers/nvidia:pytorch
FROM nvcr.io/nvidia/pytorch:21.03-py3
FROM nvcr.io/nvidia/pytorch:23.06-py3
# Install linux packages
RUN apt update && apt install -y zip htop screen libgl1-mesa-glx
@ -20,6 +20,10 @@ COPY . /usr/src/app
# Set environment variables
ENV HOME=/usr/src/app
EXPOSE 8000
CMD ["python", "ieemoo-ai-zhanting.py"]
# --------------------------------------------------- Extras Below ---------------------------------------------------

View File

@ -5,8 +5,8 @@ import numpy as np
import xml.etree.cElementTree as et
from kmeans import kmeans, avg_iou
FILE_ROOT = "/home/nxy/nxy_project/python_project/Data/zhanting_add/" # 根路径
ANNOTATION_ROOT = "xmls" # 数据集标签文件夹路径
FILE_ROOT = "paper_data/" # 根路径
ANNOTATION_ROOT = "Annotations" # 数据集标签文件夹路径
ANNOTATION_PATH = FILE_ROOT + ANNOTATION_ROOT
ANCHORS_TXT_PATH = "data/anchors.txt"

View File

@ -15,7 +15,7 @@ from utils.plots import plot_one_box
from utils.torch_utils import select_device, load_classifier, time_synchronized
def detect(opt, save_img=False):
def detect(opt, model, stride, save_img=False):
source, weights, view_img, save_txt, imgsz = opt.source, opt.weights, opt.view_img, opt.save_txt, opt.img_size
save_img = not opt.nosave and not source.endswith('.txt') # save inference images
webcam = source.isnumeric() or source.endswith('.txt') or source.lower().startswith(
@ -31,12 +31,15 @@ def detect(opt, save_img=False):
half = device.type != 'cpu' # half precision only supported on CUDA
# Load model
model = attempt_load(weights, map_location=device) # load FP32 model
stride = int(model.stride.max()) # model stride
# model = attempt_load(weights, map_location=device) # load FP32 model
# stride = int(model.stride.max()) # model stride
imgsz = check_img_size(imgsz, s=stride) # check img_size
if half:
model.half() # to FP16
model.eval()
# if half:
# model.half() # to FP16
# model.eval()
model = model
# Second-stage classifier
classify = False
if classify:

View File

@ -8,9 +8,9 @@ anchors:
#- [10,13, 16,30, 33,23] # P3/8
#- [30,61, 62,45, 59,119] # P4/16
#- [116,90, 156,198, 373,326] # P5/32
- [87,51, 80 ,84, 142,66] # P3/8
- [98,156, 139,112, 238,72] # P4/16
- [238,120, 177,180, 277,198] # P5/32
- [109,52, 78,81, 96,152] # P3/8
- [139,106, 230,70, 160,172] # P4/16
- [241,126, 217,202, 307,201] # P5/32
# YOLOv5 backbone
backbone:

6
models/yolov5m.yaml Executable file → Normal file
View File

@ -8,9 +8,9 @@ anchors:
#- [10,13, 16,30, 33,23] # P3/8
#- [30,61, 62,45, 59,119] # P4/16
#- [116,90, 156,198, 373,326] # P5/32
- [87,51, 80 ,84, 142,66] # P3/8
- [98,156, 139,112, 238,72] # P4/16
- [238,120, 177,180, 277,198] # P5/32
- [109,52, 78,81, 96,152] # P3/8
- [139,106, 230,70, 160,172] # P4/16
- [241,126, 217,202, 307,201] # P5/32
# YOLOv5 backbone
backbone:

View File

@ -1,21 +1,20 @@
esdk_obs_python==3.21.8
apache_skywalking==0.7.0
coremltools==5.2.0
Flask==2.0.0
gevent==21.1.2
matplotlib==3.4.1
numpy==1.20.2
esdk-obs-python --trusted-host pypi.org
opencv_python==4.5.1.48
opencv-contrib-python==4.5.5.64
Pillow==9.1.0
scipy==1.6.2
setuptools==49.6.0
coremltools==5.2.0
numpy==1.22.4
onnx==1.7.0
opencv_contrib_python==4.5.5.64
pandas==1.2.4
pycocotools==2.0.2
PyYAML==6.0
requests==2.25.1
Pillow==10.0.0
pycocotools==2.0
PyYAML==6.0.1
requests==2.19.1
scipy==1.5.3
seaborn==0.11.1
setuptools==67.7.2
thop==0.0.31.post2005241907
torch==1.8.2+cu111
torchvision==0.9.2+cu111
tqdm==4.60.0
ml-collections==0.1.1

View File

@ -5,9 +5,9 @@ import argparse
parser = argparse.ArgumentParser()
#xml文件的地址根据自己的数据进行修改 xml一般存放在Annotations下
parser.add_argument('--xml_path', default='/home/nxy/nxy_project/python_project/Data/paper_data/Annotations', type=str, help='input xml label path')
parser.add_argument('--xml_path', default='paper_data/Annotations', type=str, help='input xml label path')
#数据集的划分地址选择自己数据下的ImageSets/Main
parser.add_argument('--txt_path', default='/home/nxy/nxy_project/python_project/Data/paper_data/ImageSets/Main', type=str, help='output txt label path')
parser.add_argument('--txt_path', default='paper_data/ImageSets/Main', type=str, help='output txt label path')
opt = parser.parse_args()
trainval_percent = 1.0
@ -30,14 +30,22 @@ file_test = open(txtsavepath + '/test.txt', 'w')
file_train = open(txtsavepath + '/train.txt', 'w')
file_val = open(txtsavepath + '/val.txt', 'w')
addtrain_path = r"D:\PycharmProjects\Zhanting\yolov5_1\img_data\getimg_6.30"
for i in list_index:
name = total_xml[i][:-4] + '\n'
addimg_name = name.strip() + ".jpg"
# print(addimg_name,type(addimg_name),len(addimg_name))
if i in trainval:
file_trainval.write(name)
if i in train:
file_train.write(name)
else:
if addimg_name in os.listdir(addtrain_path):#把某些数据加入训练集中
print("addimg_name:",addimg_name)
file_train.write(name)
else:
file_val.write(name)
# file_val.write(name)
else:
file_test.write(name)

29
train_zhanting.py Executable file → Normal file
View File

@ -1,7 +1,13 @@
import argparse
import logging
import math
import os
import os,sys
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
#print(os.path.dirname(os.path.abspath(__file__)))
#wandb
import wandb
#wandb.init(project="ieemoo-ai-zhanting", entity="wb_ht")
import random
import time
from copy import deepcopy
@ -35,10 +41,11 @@ from utils.plots import plot_images, plot_labels, plot_results, plot_evolution
from utils.torch_utils import ModelEMA, select_device, intersect_dicts, torch_distributed_zero_first, is_parallel
from utils.wandb_logging.wandb_utils import WandbLogger, check_wandb_resume
logger = logging.getLogger(__name__)
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
os.environ["CUDA_VISIBLE_DEVICES"] ="1"
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
def train(hyp, opt, device, tb_writer=None):
#logger.info(colorstr('hyperparameters: ') + ', '.join(f'{k}={v}'for k, v in hyp.items()))
save_dir, epochs, batch_size, total_batch_size, weights, rank = \
@ -459,16 +466,16 @@ def train(hyp, opt, device, tb_writer=None):
if __name__ == '__main__':
parser = argparse.ArgumentParser()
#parser.add_argument('--weights', type=str, default='runs/zhanting/yolov5s_finetune/exp9/weights/best.pt', help='initial weights path')
#parser.add_argument('--weights', type=str, default='runs/zhanting/yolov5m_finetune/exp/weights/best.pt', help='initial weights path')
parser.add_argument('--weights', type=str, default='weights/yolov5m.pt', help='initial weights path')
#parser.add_argument('--weights', type=str, default='runs/zhanting/yolov5m_finetune/exp10/weights/best.pt', help='initial weights path')
parser.add_argument('--weights', type=str, default='weights/yolov5l.pt', help='initial weights path')
#parser.add_argument('--weights', type=str, default='runs/zhanting/yolov5l_finetune/exp7/weights/best.pt', help='initial weights path')
parser.add_argument('--cfg', type=str, default='models/yolov5m.yaml', help='model.yaml path')
parser.add_argument('--cfg', type=str, default='models/yolov5l.yaml', help='model.yaml path')
parser.add_argument('--data', type=str, default='data/zhanting.yaml', help='data.yaml path')
parser.add_argument('--hyp', type=str, default='data/hyp.finetune.yaml', help='hyperparameters path')
#parser.add_argument('--hyp', type=str, default='data/hyp.scratch.yaml', help='hyperparameters path')
parser.add_argument('--epochs', type=int, default=600)
parser.add_argument('--batch-size', type=int, default=32, help='total batch size for all GPUs')
parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='[train, test] image sizes')
parser.add_argument('--epochs', type=int, default=200)
parser.add_argument('--batch-size', type=int, default=72, help='total batch size for all GPUs')
parser.add_argument('--img-size', nargs='+', type=int, default=[640,640], help='[train, test] image sizes')
parser.add_argument('--rect', action='store_true', help='rectangular training')
parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training')
parser.add_argument('--nosave', action='store_true', help='only save final checkpoint')
@ -485,10 +492,10 @@ if __name__ == '__main__':
parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode')
parser.add_argument('--local_rank', type=int, default=-1, help='DDP parameter, do not modify')
parser.add_argument('--workers', type=int, default=8, help='maximum number of dataloader workers')
#parser.add_argument('--project', default='runs/zhanting/yolov5s_finetune', help='save to project/name')
parser.add_argument('--project', default='runs/zhanting/yolov5m_finetune', help='save to project/name')
parser.add_argument('--project', default='runs/zhanting/yolov5l_finetune', help='save to project/name')
#parser.add_argument('--project', default='runs/zhanting/yolov5m_finetune', help='save to project/name')
#parser.add_argument('--project', default='runs/zhanting/yolov5_scratch', help='save to project/name')
parser.add_argument('--entity', default=None, help='W&B entity')
parser.add_argument('--entity', default="wb_ht", help='W&B entity')
parser.add_argument('--name', default='exp', help='save to project/name')
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
parser.add_argument('--quad', action='store_true', help='quad dataloader')

View File

@ -1,15 +1,14 @@
#标签xml转txt
# 标签xml转txt
import xml.etree.ElementTree as ET
import os
from os import getcwd
sets = ['train', 'val', 'test']
classes = ["6925303773908", "6924743915848", "6920152471616", "6920005772716", "6902227018162",
"6920459905012", "6972194461407", "6935284412918", "6921489033706", "6904012526494",
"6923644272159", "6924882486100", "6956511907458"]
classes = ['6925303773908','6924743915848','6920152471616','6920005772716','6902227018162','6920459905012','6972194461407','6935284412918','6921489033706','6904012526494','6923644272159','6924882486100','6956511907458']
abs_path = os.getcwd()
print(abs_path)
def convert(size, box):
dw = 1. / (size[0])
dh = 1. / (size[1])
@ -23,9 +22,10 @@ def convert(size, box):
h = h * dh
return x, y, w, h
def convert_annotation(image_id):
in_file = open(r'/home/nxy/nxy_project/python_project/Data/paper_data/Annotations/%s.xml' % (image_id), encoding='UTF-8')
out_file = open(r'/home/nxy/nxy_project/python_project/Data/paper_data/labels/%s.txt' % (image_id), 'w')
in_file = open('paper_data/Annotations/%s.xml' % (image_id), encoding='UTF-8')
out_file = open('paper_data/labels/%s.txt' % (image_id), 'w')
tree = ET.parse(in_file)
root = tree.getroot()
size = root.find('size')
@ -35,6 +35,8 @@ def convert_annotation(image_id):
# difficult = obj.find('difficult').text
# difficult = obj.find('Difficult').text
cls = obj.find('name').text
cls = cls[:13]
print("cls:",cls,len(cls))
# if cls not in classes or int(difficult) == 1:
# continue
cls_id = classes.index(cls)
@ -51,16 +53,16 @@ def convert_annotation(image_id):
bb = convert((w, h), b)
out_file.write(str(cls_id) + " " + " ".join([str(a) for a in bb]) + '\n')
wd = getcwd()
for image_set in sets:
if not os.path.exists(r'/home/nxy/nxy_project/python_project/Data/paper_data/labels'):
os.makedirs(r'/home/nxy/nxy_project/python_project/Data/paper_data/labels')
image_ids = open(r'/home/nxy/nxy_project/python_project/Data/paper_data/ImageSets/Main/%s.txt' % (image_set)).read().strip().split()
list_file = open('/home/nxy/nxy_project/python_project/Data/paper_data/%s.txt' % (image_set), 'w')
if not os.path.exists('paper_data/labels'):
os.makedirs('paper_data/labels')
image_ids = open('paper_data/ImageSets/Main/%s.txt' % (image_set)).read().strip().split()
list_file = open('paper_data/%s.txt' % (image_set), 'w')
for image_id in image_ids:
list_file.write(abs_path + '/paper_data/images/%s.jpg\n' % (image_id))
print(image_id, "Converting...")
convert_annotation(image_id)
print("Converting...")
list_file.close()
#(yv5_1) pfc@ps:/home/nxy/nxy_project/python_project/Data/paper_data/labels$ mv ./* /home/nxy/nxy_project/python_project/Data/zhanting_add/labels/train/
list_file.close()