diff --git a/__pycache__/event_time_specify.cpython-39.pyc b/__pycache__/event_time_specify.cpython-39.pyc index f810d64..dd44679 100644 Binary files a/__pycache__/event_time_specify.cpython-39.pyc and b/__pycache__/event_time_specify.cpython-39.pyc differ diff --git a/__pycache__/imgs_inference.cpython-39.pyc b/__pycache__/imgs_inference.cpython-39.pyc index b9f30b5..981322a 100644 Binary files a/__pycache__/imgs_inference.cpython-39.pyc and b/__pycache__/imgs_inference.cpython-39.pyc differ diff --git a/__pycache__/move_detect.cpython-39.pyc b/__pycache__/move_detect.cpython-39.pyc index 2565888..02e5739 100644 Binary files a/__pycache__/move_detect.cpython-39.pyc and b/__pycache__/move_detect.cpython-39.pyc differ diff --git a/__pycache__/track_reid.cpython-39.pyc b/__pycache__/track_reid.cpython-39.pyc index c565e61..47bda46 100644 Binary files a/__pycache__/track_reid.cpython-39.pyc and b/__pycache__/track_reid.cpython-39.pyc differ diff --git a/bclass.py b/bclass.py new file mode 100644 index 0000000..24cb443 --- /dev/null +++ b/bclass.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +""" +Created on Fri Nov 15 16:23:03 2024 + +@author: ym +""" + +class CamEvent: + def __init__(self, datapath): + self.data_path = datapath + self.bboxes = None + self.bfeats = None + self.tboxes = None + self.tfeats = None + + + +class ShopEvent: + def __init__(self, eventpath, stdpath): + self.barcode = "" + self.event_path = eventpath + self.event_type = self.get_event_type(eventpath) + + self.FrontEvent = "" + self.BackEvent = "" + self.fusion_boxes = None + self.fusion_feats = None + self.stdfeats = self.get_stdfeats(stdpath) + self.weight = None + self.imu = None + + def get_event_type(self, eventpath): + pass + + + + def get_stdfeats(self, stdpath): + pass + + + + + + + + + + + + \ No newline at end of file diff --git a/contrast/__pycache__/config.cpython-39.pyc b/contrast/__pycache__/config.cpython-39.pyc index e81b06c..79cd9dd 100644 Binary files a/contrast/__pycache__/config.cpython-39.pyc and b/contrast/__pycache__/config.cpython-39.pyc differ diff --git a/contrast/__pycache__/feat_inference.cpython-39.pyc b/contrast/__pycache__/feat_inference.cpython-39.pyc index 725550e..07eb78f 100644 Binary files a/contrast/__pycache__/feat_inference.cpython-39.pyc and b/contrast/__pycache__/feat_inference.cpython-39.pyc differ diff --git a/contrast/__pycache__/one2n_contrast.cpython-39.pyc b/contrast/__pycache__/one2n_contrast.cpython-39.pyc index 7e47383..2f9f7ce 100644 Binary files a/contrast/__pycache__/one2n_contrast.cpython-39.pyc and b/contrast/__pycache__/one2n_contrast.cpython-39.pyc differ diff --git a/contrast/feat_analysisi.py b/contrast/feat_analysisi.py new file mode 100644 index 0000000..fc3ebe5 --- /dev/null +++ b/contrast/feat_analysisi.py @@ -0,0 +1,160 @@ +# -*- coding: utf-8 -*- +""" +Created on Wed Nov 20 11:17:29 2024 + +@author: ym +""" + +import os +import cv2 +import pickle +import numpy as np +from scipy.spatial.distance import cdist +import matplotlib.pyplot as plt + +def save_imgpairs(barcode, imgpaths, matrix, savepath, thresh=(0.4, 0.6), ctype="intra"): + if ctype=="intra": + rows, cols = np.triu_indices(matrix.shape[0], k=1) # k=1 表示不包括对角线 + mask = matrix[rows, cols] < thresh[1] + indices = list(zip(rows[mask], cols[mask])) + else: + rows, cols = np.where(matrix > thresh[0]) + indices = list(zip(rows, cols)) + + + if len(indices): + savepath = os.path.join(savepath, barcode) + if not os.path.exists(savepath): + os.makedirs (savepath) + + + for idx1, idx2 in indices: + if len(imgpaths) == 1: + img1 = cv2.imread(imgpaths[0][idx1]) + img2 = cv2.imread(imgpaths[0][idx2]) + elif len(imgpaths) == 2: + img1 = cv2.imread(imgpaths[0][idx1]) + img2 = cv2.imread(imgpaths[1][idx2]) + + + + simi = matrix[idx1, idx2] + + H1, W1 = img1.shape[:2] + H2, W2 = img2.shape[:2] + H, W = max((H1, H2)), max((W1, W2)) + img = np.ones((H, 2*W, 3), dtype=np.uint8) *np.array([255, 128, 128]) + + img[0:H1, 0:W1, :] = img1 + img[0:H2, (2*W-W2):, :] = img2 + + text = f"sim: {simi:.2f}" + org = (10, H-10) + cv2.putText(img, text, org, fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.75, + color=(0, 0, 255), thickness=2, lineType=cv2.LINE_AA) + imgpath = os.path.join(savepath, f"{simi:.2f}_{barcode}_{idx1}_{idx2}.png") + cv2.imwrite(imgpath, img) + + + +def feat_analysis(featpath): + savepath = r"D:\exhibition\result\stdfeat" + + InterThresh = (0.4, 0.6) + + featDict, features= [], [] + for filename in os.listdir(featpath): + file, ext = os.path.splitext(filename) + if ext != ".pickle": continue + + filepath = os.path.join(featpath, filename) + with open(filepath, 'rb') as f: + bpDict = pickle.load(f) + + feat = bpDict["feats_ft32"] + + featDict.append(bpDict) + features.append(feat) + N = len(features) + + simMatrix = [] + intra_simi = np.empty(0) + low_simi_index = {} + for i, feats in enumerate(features): + matrix = 1 - cdist(feats, feats, 'cosine') + + simMatrix.append(matrix) + + '''提取相似矩阵上三角元素''' + rows, cols = np.triu_indices(matrix.shape[0], k=1) # k=1 表示不包括对角线 + upper_tri= matrix[rows, cols] + intra_simi = np.concatenate((intra_simi, upper_tri)) + + '''保存相似度小于阈值的图像对''' + barcode = featDict[i]["barcode"] + imgpaths = featDict[i]["imgpaths"] + # save_imgpairs(barcode, [imgpaths], matrix, savepath, InterThresh, "intra") + print(f"{barcode} have done!") + + Matrix = np.zeros((N, N)) + inter_bcds = [] + inter_simi = np.empty(0) + for i, feati in enumerate(features): + bcdi = featDict[i]["barcode"] + imgpathi = featDict[i]["imgpaths"] + for j, featj in enumerate(features): + bcdj = featDict[j]["barcode"] + imgpathj = featDict[j]["imgpaths"] + + matrix = 1 - cdist(feati, featj, 'cosine') + + inter_bcds.append((i, j, bcdi, bcdj)) + Matrix[i, j] = np.mean(matrix) + if j>i: + bcd_ij = bcdi+'_'+bcdj + # save_imgpairs(bcd_ij, [imgpathi, imgpathj], matrix, savepath, InterThresh, "inter") + inter_simi = np.concatenate((inter_simi, matrix.ravel())) + + print(f"{bcd_ij} have done!") + + fig, axs = plt.subplots(2, 1) + axs[0].hist(intra_simi, bins=100, color='blue', edgecolor='black', alpha=0.7) + axs[0].set_xlim(0, 1) + axs[0].set_xlabel('Performance') + axs[0].set_title("inter similarity") + + axs[1].hist(inter_simi, bins=100, color='green', edgecolor='black', alpha=0.7) + axs[1].set_xlim(0, 1) + axs[1].set_xlabel('Performance') + axs[1].set_title("inter similarity") + + + + + + print("Done!") + + + + + + + + +def main(): + stdpath = r"D:\exhibition\dataset\feats" + + feat_analysis(stdpath) + + +if __name__ == '__main__': + main() + + + + + + + + + \ No newline at end of file diff --git a/contrast/feat_extract/__pycache__/config.cpython-39.pyc b/contrast/feat_extract/__pycache__/config.cpython-39.pyc new file mode 100644 index 0000000..dd9fad2 Binary files /dev/null and b/contrast/feat_extract/__pycache__/config.cpython-39.pyc differ diff --git a/contrast/feat_extract/__pycache__/inference.cpython-39.pyc b/contrast/feat_extract/__pycache__/inference.cpython-39.pyc new file mode 100644 index 0000000..1f5c72f Binary files /dev/null and b/contrast/feat_extract/__pycache__/inference.cpython-39.pyc differ diff --git a/contrast/checkpoints/resnet18_0515/best.rknn b/contrast/feat_extract/checkpoints/resnet18_0515/best.rknn similarity index 100% rename from contrast/checkpoints/resnet18_0515/best.rknn rename to contrast/feat_extract/checkpoints/resnet18_0515/best.rknn diff --git a/contrast/config.py b/contrast/feat_extract/config.py similarity index 94% rename from contrast/config.py rename to contrast/feat_extract/config.py index 4864cdb..3bb15c9 100644 --- a/contrast/config.py +++ b/contrast/feat_extract/config.py @@ -10,6 +10,8 @@ class Config: embedding_size = 256 drop_ratio = 0.5 img_size = 224 + + batch_size = 8 # data preprocess # input_shape = [1, 128, 128] @@ -58,7 +60,11 @@ class Config: testbackbone = 'resnet18' # [resnet18, mobilevit_s, mobilenet_v2, mobilenetv3_small, mobilenetv3_large, mobilenet_v1, PPLCNET_x1_0, PPLCNET_x0_5] test_val = "D:/比对/cl" # test_val = "./data/test_data_100" - test_model = "checkpoints/resnet18_0515/best.pth" + + # test_model = "checkpoints/best_resnet18_v11.pth" + test_model = "checkpoints/zhanting_cls22_v11.pth" + + train_batch_size = 512 # 256 test_batch_size = 256 # 256 diff --git a/contrast/feat_inference.py b/contrast/feat_extract/inference.py similarity index 71% rename from contrast/feat_inference.py rename to contrast/feat_extract/inference.py index 613a620..70ff84a 100644 --- a/contrast/feat_inference.py +++ b/contrast/feat_extract/inference.py @@ -3,30 +3,140 @@ @author: LiChen """ - - +# import pdb +# import shutil +import torch.nn as nn +# import statistics import os -import os.path as osp -import pdb import numpy as np -import shutil from scipy.spatial.distance import cdist import torch -import torch.nn as nn +import os.path as osp from PIL import Image import json -from config import config as conf -from model import resnet18 +import matplotlib.pyplot as plt +from pathlib import Path +# import sys +# sys.path.append(r"D:\DetectTracking") +# from contrast.config import config as conf +# from contrast.model import resnet18 + +from .config import config as conf +from .model import resnet18 # from model import (mobilevit_s, resnet14, resnet18, resnet34, resnet50, mobilenet_v2, # MobileNetV3_Small, mobilenet_v1, PPLCNET_x1_0, PPLCNET_x0_5, PPLCNET_x2_5) +curpath = Path(__file__).resolve().parents[0] + +class FeatsInterface: + def __init__(self, conf): + self.device = conf.device + + # if conf.backbone == 'resnet18': + # model = resnet18().to(conf.device) + + model = resnet18().to(conf.device) + self.transform = conf.test_transform + self.batch_size = conf.batch_size + self.embedding_size = conf.embedding_size + + if conf.test_model.find("zhanting") == -1: + model = nn.DataParallel(model).to(conf.device) + self.model = model + + modpath = os.path.join(curpath, conf.test_model) + self.model.load_state_dict(torch.load(modpath, map_location=conf.device)) + self.model.eval() + print('load model {} '.format(conf.testbackbone)) + + def inference(self, images, detections=None): + ''' + 如果是BGR,需要转变为RGB格式 + ''' + if isinstance(images, np.ndarray): + imgs, features = self.inference_image(images, detections) + return imgs, features + + batch_patches = [] + patches = [] + for i, img in enumerate(images): + img = img.copy() + patch = self.transform(img) + if str(self.device) != "cpu": + patch = patch.to(device=self.device).half() + else: + patch = patch.to(device=self.device) + + patches.append(patch) + if (i + 1) % self.batch_size == 0: + patches = torch.stack(patches, dim=0) + batch_patches.append(patches) + patches = [] + + if len(patches): + patches = torch.stack(patches, dim=0) + batch_patches.append(patches) + + features = np.zeros((0, self.embedding_size)) + for patches in batch_patches: + pred=self.model(patches) + pred[torch.isinf(pred)] = 1.0 + feat = pred.cpu().data.numpy() + features = np.vstack((features, feat)) + return features + + def inference_image(self, image, detections): + H, W, _ = np.shape(image) + + batch_patches = [] + patches = [] + imgs = [] + for d in range(np.size(detections, 0)): + tlbr = detections[d, :4].astype(np.int_) + tlbr[0] = max(0, tlbr[0]) + tlbr[1] = max(0, tlbr[1]) + tlbr[2] = min(W - 1, tlbr[2]) + tlbr[3] = min(H - 1, tlbr[3]) + img = image[tlbr[1]:tlbr[3], tlbr[0]:tlbr[2], :] + + imgs.append(img) + + + img1 = img[:, :, ::-1].copy() # the model expects RGB inputs + patch = self.transform(img1) + + # patch = patch.to(device=self.device).half() + if str(self.device) != "cpu": + patch = patch.to(device=self.device).half() + else: + patch = patch.to(device=self.device) + + patches.append(patch) + if (d + 1) % self.batch_size == 0: + patches = torch.stack(patches, dim=0) + batch_patches.append(patches) + patches = [] + + if len(patches): + patches = torch.stack(patches, dim=0) + batch_patches.append(patches) + + features = np.zeros((0, self.embedding_size)) + for patches in batch_patches: + pred = self.model(patches) + pred[torch.isinf(pred)] = 1.0 + feat = pred.cpu().data.numpy() + features = np.vstack((features, feat)) + + return imgs, features + + + + + + -import matplotlib.pyplot as plt -import statistics -embedding_size = conf.embedding_size -img_size = conf.img_size -device = conf.device def unique_image(pair_list) -> set: @@ -102,38 +212,38 @@ def featurize(images: list, transform, net, device, train=False) -> dict: res = {img: feature for (img, feature) in zip(images, features)} return res -def inference_image(images: list, transform, net, device, bs=16, embedding_size=256) -> dict: - batch_patches = [] - patches = [] - for d, img in enumerate(images): - img = Image.open(img) - patch = transform(img) +# def inference_image(images: list, transform, net, device, bs=16, embedding_size=256) -> dict: +# batch_patches = [] +# patches = [] +# for d, img in enumerate(images): +# img = Image.open(img) +# patch = transform(img) - if str(device) != "cpu": - patch = patch.to(device).half() - else: - patch = patch.to(device) +# if str(device) != "cpu": +# patch = patch.to(device).half() +# else: +# patch = patch.to(device) - patches.append(patch) - if (d + 1) % bs == 0: - patches = torch.stack(patches, dim=0) - batch_patches.append(patches) - patches = [] +# patches.append(patch) +# if (d + 1) % bs == 0: +# patches = torch.stack(patches, dim=0) +# batch_patches.append(patches) +# patches = [] - if len(patches): - patches = torch.stack(patches, dim=0) - batch_patches.append(patches) +# if len(patches): +# patches = torch.stack(patches, dim=0) +# batch_patches.append(patches) - features = np.zeros((0, embedding_size), dtype=np.float32) - for patches in batch_patches: - pred = net(patches) - pred[torch.isinf(pred)] = 1.0 - feat = pred.cpu().data.numpy() - features = np.vstack((features, feat)) +# features = np.zeros((0, embedding_size), dtype=np.float32) +# for patches in batch_patches: +# pred = net(patches) +# pred[torch.isinf(pred)] = 1.0 +# feat = pred.cpu().data.numpy() +# features = np.vstack((features, feat)) - return features +# return features @@ -283,6 +393,7 @@ def compute_contrast_accuracy(content_list_read): npairs = min((len(same_folder_pairs), len(cross_folder_pairs))) + Encoder = FeatsInterface(conf) same_pairs = same_folder_pairs[:npairs] cross_pairs = cross_folder_pairs[:npairs] @@ -292,8 +403,8 @@ def compute_contrast_accuracy(content_list_read): images_a = [osp.join(conf.test_val, img) for img in same_pairs[i][0]] images_b = [osp.join(conf.test_val, img) for img in same_pairs[i][1]] - feats_a = inference_image(images_a, conf.test_transform, model, conf.device) - feats_b = inference_image(images_b, conf.test_transform, model, conf.device) + feats_a = Encoder.inference(images_a) + feats_b = Encoder.inference(images_b) # matrix = 1- np.maximum(0.0, cdist(feats_a, feats_b, 'cosine')) matrix = 1 - cdist(feats_a, feats_b, 'cosine') @@ -324,8 +435,8 @@ def compute_contrast_accuracy(content_list_read): images_a = [osp.join(conf.test_val, img) for img in cross_pairs[i][0]] images_b = [osp.join(conf.test_val, img) for img in cross_pairs[i][1]] - feats_a = inference_image(images_a, conf.test_transform, model, conf.device) - feats_b = inference_image(images_b, conf.test_transform, model, conf.device) + feats_a = Encoder.inference(images_a) + feats_b = Encoder.inference(images_b) # matrix = 1- np.maximum(0.0, cdist(feats_a, feats_b, 'cosine')) matrix = 1 - cdist(feats_a, feats_b, 'cosine') @@ -407,28 +518,28 @@ if __name__ == '__main__': # Network Setup if conf.testbackbone == 'resnet18': - # model = ResIRSE(img_size, embedding_size, conf.drop_ratio).to(device) - model = resnet18().to(device) + # model = ResIRSE(conf.img_size, conf.embedding_size, conf.drop_ratio).to(conf.device) + model = resnet18().to(conf.device) # elif conf.testbackbone == 'resnet34': - # model = resnet34().to(device) + # model = resnet34().to(conf.device) # elif conf.testbackbone == 'resnet50': - # model = resnet50().to(device) + # model = resnet50().to(conf.device) # elif conf.testbackbone == 'mobilevit_s': - # model = mobilevit_s().to(device) + # model = mobilevit_s().to(conf.device) # elif conf.testbackbone == 'mobilenetv3': - # model = MobileNetV3_Small().to(device) + # model = MobileNetV3_Small().to(conf.device) # elif conf.testbackbone == 'mobilenet_v1': - # model = mobilenet_v1().to(device) + # model = mobilenet_v1().to(conf.device) # elif conf.testbackbone == 'PPLCNET_x1_0': - # model = PPLCNET_x1_0().to(device) + # model = PPLCNET_x1_0().to(conf.device) # elif conf.testbackbone == 'PPLCNET_x0_5': - # model = PPLCNET_x0_5().to(device) + # model = PPLCNET_x0_5().to(conf.device) # elif conf.backbone == 'PPLCNET_x2_5': - # model = PPLCNET_x2_5().to(device) + # model = PPLCNET_x2_5().to(conf.device) # elif conf.testbackbone == 'mobilenet_v2': - # model = mobilenet_v2().to(device) + # model = mobilenet_v2().to(conf.device) # elif conf.testbackbone == 'resnet14': - # model = resnet14().to(device) + # model = resnet14().to(conf.device) else: raise ValueError('Have not model {}'.format(conf.backbone)) diff --git a/contrast/model/BAM.py b/contrast/feat_extract/model/BAM.py similarity index 100% rename from contrast/model/BAM.py rename to contrast/feat_extract/model/BAM.py diff --git a/contrast/model/CBAM.py b/contrast/feat_extract/model/CBAM.py similarity index 100% rename from contrast/model/CBAM.py rename to contrast/feat_extract/model/CBAM.py diff --git a/contrast/model/Tool.py b/contrast/feat_extract/model/Tool.py similarity index 100% rename from contrast/model/Tool.py rename to contrast/feat_extract/model/Tool.py diff --git a/contrast/model/__init__.py b/contrast/feat_extract/model/__init__.py similarity index 100% rename from contrast/model/__init__.py rename to contrast/feat_extract/model/__init__.py diff --git a/contrast/model/__pycache__/BAM.cpython-38.pyc b/contrast/feat_extract/model/__pycache__/BAM.cpython-38.pyc similarity index 100% rename from contrast/model/__pycache__/BAM.cpython-38.pyc rename to contrast/feat_extract/model/__pycache__/BAM.cpython-38.pyc diff --git a/contrast/model/__pycache__/CBAM.cpython-38.pyc b/contrast/feat_extract/model/__pycache__/CBAM.cpython-38.pyc similarity index 100% rename from contrast/model/__pycache__/CBAM.cpython-38.pyc rename to contrast/feat_extract/model/__pycache__/CBAM.cpython-38.pyc diff --git a/contrast/model/__pycache__/CBAM.cpython-39.pyc b/contrast/feat_extract/model/__pycache__/CBAM.cpython-39.pyc similarity index 100% rename from contrast/model/__pycache__/CBAM.cpython-39.pyc rename to contrast/feat_extract/model/__pycache__/CBAM.cpython-39.pyc diff --git a/contrast/model/__pycache__/Tool.cpython-38.pyc b/contrast/feat_extract/model/__pycache__/Tool.cpython-38.pyc similarity index 100% rename from contrast/model/__pycache__/Tool.cpython-38.pyc rename to contrast/feat_extract/model/__pycache__/Tool.cpython-38.pyc diff --git a/contrast/model/__pycache__/Tool.cpython-39.pyc b/contrast/feat_extract/model/__pycache__/Tool.cpython-39.pyc similarity index 100% rename from contrast/model/__pycache__/Tool.cpython-39.pyc rename to contrast/feat_extract/model/__pycache__/Tool.cpython-39.pyc diff --git a/contrast/model/__pycache__/__init__.cpython-310.pyc b/contrast/feat_extract/model/__pycache__/__init__.cpython-310.pyc similarity index 100% rename from contrast/model/__pycache__/__init__.cpython-310.pyc rename to contrast/feat_extract/model/__pycache__/__init__.cpython-310.pyc diff --git a/contrast/model/__pycache__/__init__.cpython-38.pyc b/contrast/feat_extract/model/__pycache__/__init__.cpython-38.pyc similarity index 100% rename from contrast/model/__pycache__/__init__.cpython-38.pyc rename to contrast/feat_extract/model/__pycache__/__init__.cpython-38.pyc diff --git a/contrast/model/__pycache__/__init__.cpython-39.pyc b/contrast/feat_extract/model/__pycache__/__init__.cpython-39.pyc similarity index 100% rename from contrast/model/__pycache__/__init__.cpython-39.pyc rename to contrast/feat_extract/model/__pycache__/__init__.cpython-39.pyc diff --git a/contrast/model/__pycache__/fmobilenet.cpython-310.pyc b/contrast/feat_extract/model/__pycache__/fmobilenet.cpython-310.pyc similarity index 100% rename from contrast/model/__pycache__/fmobilenet.cpython-310.pyc rename to contrast/feat_extract/model/__pycache__/fmobilenet.cpython-310.pyc diff --git a/contrast/model/__pycache__/fmobilenet.cpython-38.pyc b/contrast/feat_extract/model/__pycache__/fmobilenet.cpython-38.pyc similarity index 100% rename from contrast/model/__pycache__/fmobilenet.cpython-38.pyc rename to contrast/feat_extract/model/__pycache__/fmobilenet.cpython-38.pyc diff --git a/contrast/model/__pycache__/fmobilenet.cpython-39.pyc b/contrast/feat_extract/model/__pycache__/fmobilenet.cpython-39.pyc similarity index 100% rename from contrast/model/__pycache__/fmobilenet.cpython-39.pyc rename to contrast/feat_extract/model/__pycache__/fmobilenet.cpython-39.pyc diff --git a/contrast/model/__pycache__/lcnet.cpython-38.pyc b/contrast/feat_extract/model/__pycache__/lcnet.cpython-38.pyc similarity index 100% rename from contrast/model/__pycache__/lcnet.cpython-38.pyc rename to contrast/feat_extract/model/__pycache__/lcnet.cpython-38.pyc diff --git a/contrast/model/__pycache__/lcnet.cpython-39.pyc b/contrast/feat_extract/model/__pycache__/lcnet.cpython-39.pyc similarity index 100% rename from contrast/model/__pycache__/lcnet.cpython-39.pyc rename to contrast/feat_extract/model/__pycache__/lcnet.cpython-39.pyc diff --git a/contrast/model/__pycache__/loss.cpython-38.pyc b/contrast/feat_extract/model/__pycache__/loss.cpython-38.pyc similarity index 100% rename from contrast/model/__pycache__/loss.cpython-38.pyc rename to contrast/feat_extract/model/__pycache__/loss.cpython-38.pyc diff --git a/contrast/model/__pycache__/loss.cpython-39.pyc b/contrast/feat_extract/model/__pycache__/loss.cpython-39.pyc similarity index 100% rename from contrast/model/__pycache__/loss.cpython-39.pyc rename to contrast/feat_extract/model/__pycache__/loss.cpython-39.pyc diff --git a/contrast/model/__pycache__/metric.cpython-38.pyc b/contrast/feat_extract/model/__pycache__/metric.cpython-38.pyc similarity index 100% rename from contrast/model/__pycache__/metric.cpython-38.pyc rename to contrast/feat_extract/model/__pycache__/metric.cpython-38.pyc diff --git a/contrast/model/__pycache__/metric.cpython-39.pyc b/contrast/feat_extract/model/__pycache__/metric.cpython-39.pyc similarity index 100% rename from contrast/model/__pycache__/metric.cpython-39.pyc rename to contrast/feat_extract/model/__pycache__/metric.cpython-39.pyc diff --git a/contrast/model/__pycache__/mobilenet_v1.cpython-38.pyc b/contrast/feat_extract/model/__pycache__/mobilenet_v1.cpython-38.pyc similarity index 100% rename from contrast/model/__pycache__/mobilenet_v1.cpython-38.pyc rename to contrast/feat_extract/model/__pycache__/mobilenet_v1.cpython-38.pyc diff --git a/contrast/model/__pycache__/mobilenet_v1.cpython-39.pyc b/contrast/feat_extract/model/__pycache__/mobilenet_v1.cpython-39.pyc similarity index 100% rename from contrast/model/__pycache__/mobilenet_v1.cpython-39.pyc rename to contrast/feat_extract/model/__pycache__/mobilenet_v1.cpython-39.pyc diff --git a/contrast/model/__pycache__/mobilenet_v2.cpython-38.pyc b/contrast/feat_extract/model/__pycache__/mobilenet_v2.cpython-38.pyc similarity index 100% rename from contrast/model/__pycache__/mobilenet_v2.cpython-38.pyc rename to contrast/feat_extract/model/__pycache__/mobilenet_v2.cpython-38.pyc diff --git a/contrast/model/__pycache__/mobilenet_v2.cpython-39.pyc b/contrast/feat_extract/model/__pycache__/mobilenet_v2.cpython-39.pyc similarity index 74% rename from contrast/model/__pycache__/mobilenet_v2.cpython-39.pyc rename to contrast/feat_extract/model/__pycache__/mobilenet_v2.cpython-39.pyc index d782cd0..1680304 100644 Binary files a/contrast/model/__pycache__/mobilenet_v2.cpython-39.pyc and b/contrast/feat_extract/model/__pycache__/mobilenet_v2.cpython-39.pyc differ diff --git a/contrast/model/__pycache__/mobilenet_v3.cpython-38.pyc b/contrast/feat_extract/model/__pycache__/mobilenet_v3.cpython-38.pyc similarity index 100% rename from contrast/model/__pycache__/mobilenet_v3.cpython-38.pyc rename to contrast/feat_extract/model/__pycache__/mobilenet_v3.cpython-38.pyc diff --git a/contrast/feat_extract/model/__pycache__/mobilenet_v3.cpython-39.pyc b/contrast/feat_extract/model/__pycache__/mobilenet_v3.cpython-39.pyc new file mode 100644 index 0000000..30ed788 Binary files /dev/null and b/contrast/feat_extract/model/__pycache__/mobilenet_v3.cpython-39.pyc differ diff --git a/contrast/model/__pycache__/mobilevit.cpython-310.pyc b/contrast/feat_extract/model/__pycache__/mobilevit.cpython-310.pyc similarity index 100% rename from contrast/model/__pycache__/mobilevit.cpython-310.pyc rename to contrast/feat_extract/model/__pycache__/mobilevit.cpython-310.pyc diff --git a/contrast/model/__pycache__/mobilevit.cpython-38.pyc b/contrast/feat_extract/model/__pycache__/mobilevit.cpython-38.pyc similarity index 100% rename from contrast/model/__pycache__/mobilevit.cpython-38.pyc rename to contrast/feat_extract/model/__pycache__/mobilevit.cpython-38.pyc diff --git a/contrast/feat_extract/model/__pycache__/mobilevit.cpython-39.pyc b/contrast/feat_extract/model/__pycache__/mobilevit.cpython-39.pyc new file mode 100644 index 0000000..6dc8172 Binary files /dev/null and b/contrast/feat_extract/model/__pycache__/mobilevit.cpython-39.pyc differ diff --git a/contrast/model/__pycache__/resbam.cpython-38.pyc b/contrast/feat_extract/model/__pycache__/resbam.cpython-38.pyc similarity index 100% rename from contrast/model/__pycache__/resbam.cpython-38.pyc rename to contrast/feat_extract/model/__pycache__/resbam.cpython-38.pyc diff --git a/contrast/feat_extract/model/__pycache__/resbam.cpython-39.pyc b/contrast/feat_extract/model/__pycache__/resbam.cpython-39.pyc new file mode 100644 index 0000000..55f08d7 Binary files /dev/null and b/contrast/feat_extract/model/__pycache__/resbam.cpython-39.pyc differ diff --git a/contrast/model/__pycache__/resnet.cpython-310.pyc b/contrast/feat_extract/model/__pycache__/resnet.cpython-310.pyc similarity index 100% rename from contrast/model/__pycache__/resnet.cpython-310.pyc rename to contrast/feat_extract/model/__pycache__/resnet.cpython-310.pyc diff --git a/contrast/model/__pycache__/resnet.cpython-38.pyc b/contrast/feat_extract/model/__pycache__/resnet.cpython-38.pyc similarity index 100% rename from contrast/model/__pycache__/resnet.cpython-38.pyc rename to contrast/feat_extract/model/__pycache__/resnet.cpython-38.pyc diff --git a/contrast/model/__pycache__/resnet_face.cpython-38.pyc b/contrast/feat_extract/model/__pycache__/resnet_face.cpython-38.pyc similarity index 100% rename from contrast/model/__pycache__/resnet_face.cpython-38.pyc rename to contrast/feat_extract/model/__pycache__/resnet_face.cpython-38.pyc diff --git a/contrast/model/__pycache__/resnet_face.cpython-39.pyc b/contrast/feat_extract/model/__pycache__/resnet_face.cpython-39.pyc similarity index 100% rename from contrast/model/__pycache__/resnet_face.cpython-39.pyc rename to contrast/feat_extract/model/__pycache__/resnet_face.cpython-39.pyc diff --git a/contrast/model/__pycache__/resnet_pre.cpython-38.pyc b/contrast/feat_extract/model/__pycache__/resnet_pre.cpython-38.pyc similarity index 100% rename from contrast/model/__pycache__/resnet_pre.cpython-38.pyc rename to contrast/feat_extract/model/__pycache__/resnet_pre.cpython-38.pyc diff --git a/contrast/model/__pycache__/resnet_pre.cpython-39.pyc b/contrast/feat_extract/model/__pycache__/resnet_pre.cpython-39.pyc similarity index 53% rename from contrast/model/__pycache__/resnet_pre.cpython-39.pyc rename to contrast/feat_extract/model/__pycache__/resnet_pre.cpython-39.pyc index d99c1c8..e2b7242 100644 Binary files a/contrast/model/__pycache__/resnet_pre.cpython-39.pyc and b/contrast/feat_extract/model/__pycache__/resnet_pre.cpython-39.pyc differ diff --git a/contrast/model/__pycache__/utils.cpython-38.pyc b/contrast/feat_extract/model/__pycache__/utils.cpython-38.pyc similarity index 100% rename from contrast/model/__pycache__/utils.cpython-38.pyc rename to contrast/feat_extract/model/__pycache__/utils.cpython-38.pyc diff --git a/contrast/model/__pycache__/utils.cpython-39.pyc b/contrast/feat_extract/model/__pycache__/utils.cpython-39.pyc similarity index 100% rename from contrast/model/__pycache__/utils.cpython-39.pyc rename to contrast/feat_extract/model/__pycache__/utils.cpython-39.pyc diff --git a/contrast/model/fmobilenet.py b/contrast/feat_extract/model/fmobilenet.py similarity index 100% rename from contrast/model/fmobilenet.py rename to contrast/feat_extract/model/fmobilenet.py diff --git a/contrast/model/lcnet.py b/contrast/feat_extract/model/lcnet.py similarity index 100% rename from contrast/model/lcnet.py rename to contrast/feat_extract/model/lcnet.py diff --git a/contrast/model/loss.py b/contrast/feat_extract/model/loss.py similarity index 100% rename from contrast/model/loss.py rename to contrast/feat_extract/model/loss.py diff --git a/contrast/model/metric.py b/contrast/feat_extract/model/metric.py similarity index 100% rename from contrast/model/metric.py rename to contrast/feat_extract/model/metric.py diff --git a/contrast/model/mobilenet_v1.py b/contrast/feat_extract/model/mobilenet_v1.py similarity index 100% rename from contrast/model/mobilenet_v1.py rename to contrast/feat_extract/model/mobilenet_v1.py diff --git a/contrast/model/mobilenet_v2.py b/contrast/feat_extract/model/mobilenet_v2.py similarity index 99% rename from contrast/model/mobilenet_v2.py rename to contrast/feat_extract/model/mobilenet_v2.py index d62f0cd..eec44d4 100644 --- a/contrast/model/mobilenet_v2.py +++ b/contrast/feat_extract/model/mobilenet_v2.py @@ -1,6 +1,6 @@ from torch import nn from .utils import load_state_dict_from_url -from config import config as conf +from ..config import config as conf __all__ = ['MobileNetV2', 'mobilenet_v2'] diff --git a/contrast/model/mobilenet_v3.py b/contrast/feat_extract/model/mobilenet_v3.py similarity index 99% rename from contrast/model/mobilenet_v3.py rename to contrast/feat_extract/model/mobilenet_v3.py index d69a5a0..953db51 100644 --- a/contrast/model/mobilenet_v3.py +++ b/contrast/feat_extract/model/mobilenet_v3.py @@ -7,7 +7,7 @@ import torch import torch.nn as nn import torch.nn.functional as F from torch.nn import init -from config import config as conf +from ..config import config as conf class hswish(nn.Module): diff --git a/contrast/model/mobilevit.py b/contrast/feat_extract/model/mobilevit.py similarity index 98% rename from contrast/model/mobilevit.py rename to contrast/feat_extract/model/mobilevit.py index f371ee9..c4a0824 100644 --- a/contrast/model/mobilevit.py +++ b/contrast/feat_extract/model/mobilevit.py @@ -2,7 +2,10 @@ import torch import torch.nn as nn from einops import rearrange -from config import config as conf + +# import sys +# sys.path.append(r"D:\DetectTracking") +from ..config import config as conf def conv_1x1_bn(inp, oup): diff --git a/contrast/model/resbam.py b/contrast/feat_extract/model/resbam.py similarity index 97% rename from contrast/model/resbam.py rename to contrast/feat_extract/model/resbam.py index 21395c3..15a88e6 100644 --- a/contrast/model/resbam.py +++ b/contrast/feat_extract/model/resbam.py @@ -1,7 +1,10 @@ -from model.CBAM import CBAM import torch import torch.nn as nn -from model.Tool import GeM as gem + +from .CBAM import CBAM +from .Tool import GeM as gem +# from model.CBAM import CBAM +# from model.Tool import GeM as gem class Bottleneck(nn.Module): diff --git a/contrast/model/resnet.py b/contrast/feat_extract/model/resnet.py similarity index 100% rename from contrast/model/resnet.py rename to contrast/feat_extract/model/resnet.py diff --git a/contrast/model/resnet_face.py b/contrast/feat_extract/model/resnet_face.py similarity index 100% rename from contrast/model/resnet_face.py rename to contrast/feat_extract/model/resnet_face.py diff --git a/contrast/resnet_vit/model/resnet_pre.py b/contrast/feat_extract/model/resnet_pre.py similarity index 99% rename from contrast/resnet_vit/model/resnet_pre.py rename to contrast/feat_extract/model/resnet_pre.py index 5e52ad9..3ea6441 100644 --- a/contrast/resnet_vit/model/resnet_pre.py +++ b/contrast/feat_extract/model/resnet_pre.py @@ -1,6 +1,6 @@ import torch import torch.nn as nn -from config import config as conf +from ..config import config as conf try: from torch.hub import load_state_dict_from_url diff --git a/contrast/model/utils.py b/contrast/feat_extract/model/utils.py similarity index 100% rename from contrast/model/utils.py rename to contrast/feat_extract/model/utils.py diff --git a/contrast/model/vit.py b/contrast/feat_extract/model/vit.py similarity index 100% rename from contrast/model/vit.py rename to contrast/feat_extract/model/vit.py diff --git a/contrast/resnet_vit/.idea/.gitignore b/contrast/feat_extract/resnet_vit/.idea/.gitignore similarity index 100% rename from contrast/resnet_vit/.idea/.gitignore rename to contrast/feat_extract/resnet_vit/.idea/.gitignore diff --git a/contrast/resnet_vit/.idea/contrastInference.iml b/contrast/feat_extract/resnet_vit/.idea/contrastInference.iml similarity index 100% rename from contrast/resnet_vit/.idea/contrastInference.iml rename to contrast/feat_extract/resnet_vit/.idea/contrastInference.iml diff --git a/contrast/resnet_vit/.idea/deployment.xml b/contrast/feat_extract/resnet_vit/.idea/deployment.xml similarity index 100% rename from contrast/resnet_vit/.idea/deployment.xml rename to contrast/feat_extract/resnet_vit/.idea/deployment.xml diff --git a/contrast/resnet_vit/.idea/inspectionProfiles/Project_Default.xml b/contrast/feat_extract/resnet_vit/.idea/inspectionProfiles/Project_Default.xml similarity index 100% rename from contrast/resnet_vit/.idea/inspectionProfiles/Project_Default.xml rename to contrast/feat_extract/resnet_vit/.idea/inspectionProfiles/Project_Default.xml diff --git a/contrast/resnet_vit/.idea/inspectionProfiles/profiles_settings.xml b/contrast/feat_extract/resnet_vit/.idea/inspectionProfiles/profiles_settings.xml similarity index 100% rename from contrast/resnet_vit/.idea/inspectionProfiles/profiles_settings.xml rename to contrast/feat_extract/resnet_vit/.idea/inspectionProfiles/profiles_settings.xml diff --git a/contrast/resnet_vit/.idea/misc.xml b/contrast/feat_extract/resnet_vit/.idea/misc.xml similarity index 100% rename from contrast/resnet_vit/.idea/misc.xml rename to contrast/feat_extract/resnet_vit/.idea/misc.xml diff --git a/contrast/resnet_vit/.idea/modules.xml b/contrast/feat_extract/resnet_vit/.idea/modules.xml similarity index 100% rename from contrast/resnet_vit/.idea/modules.xml rename to contrast/feat_extract/resnet_vit/.idea/modules.xml diff --git a/contrast/resnet_vit/__init__.py b/contrast/feat_extract/resnet_vit/__init__.py similarity index 100% rename from contrast/resnet_vit/__init__.py rename to contrast/feat_extract/resnet_vit/__init__.py diff --git a/contrast/resnet_vit/__pycache__/__init__.cpython-39.pyc b/contrast/feat_extract/resnet_vit/__pycache__/__init__.cpython-39.pyc similarity index 100% rename from contrast/resnet_vit/__pycache__/__init__.cpython-39.pyc rename to contrast/feat_extract/resnet_vit/__pycache__/__init__.cpython-39.pyc diff --git a/contrast/resnet_vit/__pycache__/config.cpython-38.pyc b/contrast/feat_extract/resnet_vit/__pycache__/config.cpython-38.pyc similarity index 100% rename from contrast/resnet_vit/__pycache__/config.cpython-38.pyc rename to contrast/feat_extract/resnet_vit/__pycache__/config.cpython-38.pyc diff --git a/contrast/resnet_vit/__pycache__/config.cpython-39.pyc b/contrast/feat_extract/resnet_vit/__pycache__/config.cpython-39.pyc similarity index 100% rename from contrast/resnet_vit/__pycache__/config.cpython-39.pyc rename to contrast/feat_extract/resnet_vit/__pycache__/config.cpython-39.pyc diff --git a/contrast/resnet_vit/__pycache__/inference.cpython-39.pyc b/contrast/feat_extract/resnet_vit/__pycache__/inference.cpython-39.pyc similarity index 100% rename from contrast/resnet_vit/__pycache__/inference.cpython-39.pyc rename to contrast/feat_extract/resnet_vit/__pycache__/inference.cpython-39.pyc diff --git a/contrast/resnet_vit/config.py b/contrast/feat_extract/resnet_vit/config.py similarity index 100% rename from contrast/resnet_vit/config.py rename to contrast/feat_extract/resnet_vit/config.py diff --git a/contrast/resnet_vit/inference.py b/contrast/feat_extract/resnet_vit/inference.py similarity index 100% rename from contrast/resnet_vit/inference.py rename to contrast/feat_extract/resnet_vit/inference.py diff --git a/contrast/resnet_vit/model/__init__.py b/contrast/feat_extract/resnet_vit/model/__init__.py similarity index 100% rename from contrast/resnet_vit/model/__init__.py rename to contrast/feat_extract/resnet_vit/model/__init__.py diff --git a/contrast/resnet_vit/model/__pycache__/__init__.cpython-38.pyc b/contrast/feat_extract/resnet_vit/model/__pycache__/__init__.cpython-38.pyc similarity index 100% rename from contrast/resnet_vit/model/__pycache__/__init__.cpython-38.pyc rename to contrast/feat_extract/resnet_vit/model/__pycache__/__init__.cpython-38.pyc diff --git a/contrast/resnet_vit/model/__pycache__/__init__.cpython-39.pyc b/contrast/feat_extract/resnet_vit/model/__pycache__/__init__.cpython-39.pyc similarity index 100% rename from contrast/resnet_vit/model/__pycache__/__init__.cpython-39.pyc rename to contrast/feat_extract/resnet_vit/model/__pycache__/__init__.cpython-39.pyc diff --git a/contrast/resnet_vit/model/__pycache__/resnet_pre.cpython-38.pyc b/contrast/feat_extract/resnet_vit/model/__pycache__/resnet_pre.cpython-38.pyc similarity index 100% rename from contrast/resnet_vit/model/__pycache__/resnet_pre.cpython-38.pyc rename to contrast/feat_extract/resnet_vit/model/__pycache__/resnet_pre.cpython-38.pyc diff --git a/contrast/resnet_vit/model/__pycache__/resnet_pre.cpython-39.pyc b/contrast/feat_extract/resnet_vit/model/__pycache__/resnet_pre.cpython-39.pyc similarity index 100% rename from contrast/resnet_vit/model/__pycache__/resnet_pre.cpython-39.pyc rename to contrast/feat_extract/resnet_vit/model/__pycache__/resnet_pre.cpython-39.pyc diff --git a/contrast/model/resnet_pre.py b/contrast/feat_extract/resnet_vit/model/resnet_pre.py similarity index 100% rename from contrast/model/resnet_pre.py rename to contrast/feat_extract/resnet_vit/model/resnet_pre.py diff --git a/contrast/genfeats.py b/contrast/genfeats.py index 2e889ee..ddaa509 100644 --- a/contrast/genfeats.py +++ b/contrast/genfeats.py @@ -6,26 +6,33 @@ Created on Sun Nov 3 12:05:19 2024 """ import os import time -import torch +# import torch import pickle +# import json import numpy as np -from config import config as conf -from model import resnet18 as resnet18 -from feat_inference import inference_image +from PIL import Image +from feat_extract.config import config as conf +# from model import resnet18 as resnet18 +from feat_extract.inference import FeatsInterface #, inference_image IMG_FORMAT = ['.bmp', '.jpg', '.jpeg', '.png'] -'''======= 0. 配置特征提取模型地址 =======''' -model_path = conf.test_model -model_path = r"D:\exhibition\ckpt\zhanting.pth" +# def model_init(conf, mpath=None): +# '''======= 0. 配置特征提取模型地址 =======''' +# if mpath is None: +# model_path = conf.test_model +# else: +# model_path = mpath -##============ load resnet mdoel -model = resnet18().to(conf.device) -# model = nn.DataParallel(model).to(conf.device) -model.load_state_dict(torch.load(model_path, map_location=conf.device)) -model.eval() -print('load model {} '.format(conf.testbackbone)) +# ##============ load resnet mdoel +# model = resnet18().to(conf.device) +# # model = nn.DataParallel(model).to(conf.device) +# model.load_state_dict(torch.load(model_path, map_location=conf.device)) +# model.eval() +# print('load model {} '.format(conf.testbackbone)) + +# return model def get_std_barcodeDict(bcdpath, savepath, bcdSet): ''' @@ -42,9 +49,9 @@ def get_std_barcodeDict(bcdpath, savepath, bcdSet): '''读取数据集中 barcode 列表''' stdBarcodeList = [] for filename in os.listdir(bcdpath): - # filepath = os.path.join(bcdpath, filename) - # if not os.path.isdir(filepath) or not filename.isdigit() or len(filename)<8: - # continue + filepath = os.path.join(bcdpath, filename) + if not os.path.isdir(filepath) or not filename.isdigit() or len(filename)<8: + continue if bcdSet is None: stdBarcodeList.append(filename) elif filename in bcdSet: @@ -59,7 +66,7 @@ def get_std_barcodeDict(bcdpath, savepath, bcdSet): for barcode, bpath in bcdPaths: pickpath = os.path.join(savepath, f"{barcode}.pickle") if os.path.isfile(pickpath): - continue + continue stdBarcodeDict = {} stdBarcodeDict[barcode] = [] @@ -89,6 +96,7 @@ def get_std_barcodeDict(bcdpath, savepath, bcdSet): pickpath = os.path.join(savepath, f"{barcode}.pickle") with open(pickpath, 'wb') as f: pickle.dump(stdBarcodeDict, f) + print(f"Barcode: {barcode}") # k += 1 @@ -115,32 +123,37 @@ def stdfeat_infer(imgPath, featPath, bcdSet=None): stdBarcodeDict = {} stdBarcodeDict_ft16 = {} + Encoder = FeatsInterface(conf) - '''4处同名: (1)barcode原始图像文件夹; (2)imgPath中的 .pickle 文件名、该pickle文件中字典的key值''' - + '''4处同名: (1)barcode原始图像文件夹; (2)imgPath中的 .pickle 文件名; + (3)该pickle文件中字典的key值; (4)特征向量字典中的一个key值''' k = 0 for filename in os.listdir(imgPath): bcd, ext = os.path.splitext(filename) - pkpath = os.path.join(featPath, f"{bcd}.pickle") - - if os.path.isfile(pkpath): continue + filepath = os.path.join(imgPath, filename) + if ext != ".pickle": continue if bcdSet is not None and bcd not in bcdSet: continue - filepath = os.path.join(imgPath, filename) + featpath = os.path.join(featPath, f"{bcd}.pickle") stdbDict = {} - stdbDict_ft16 = {} - stdbDict_uint8 = {} - t1 = time.time() - try: with open(filepath, 'rb') as f: - bpDict = pickle.load(f) + bpDict = pickle.load(f) + for barcode, imgpaths in bpDict.items(): # feature = batch_inference(imgpaths, 8) #from vit distilled model of LiChen - feature = inference_image(imgpaths, conf.test_transform, model, conf.device) + # feature = inference_image(imgpaths, conf.test_transform, model, conf.device) + + imgs = [] + for d, imgpath in enumerate(imgpaths): + img = Image.open(imgpath) + imgs.append(img) + + feature = Encoder.inference(imgs) + feature /= np.linalg.norm(feature, axis=1)[:, None] # float16 @@ -162,7 +175,7 @@ def stdfeat_infer(imgPath, featPath, bcdSet=None): stdbDict["feats_ft16"] = feature_ft16 stdbDict["feats_uint8"] = feature_uint8 - with open(pkpath, 'wb') as f: + with open(featpath, 'wb') as f: pickle.dump(stdbDict, f) stdBarcodeDict[barcode] = feature @@ -174,21 +187,10 @@ def stdfeat_infer(imgPath, featPath, bcdSet=None): # if k == 10: # break - ##================== float32 - # pickpath = os.path.join(featPath, f"barcode_features_{k}.pickle") - # with open(pickpath, 'wb') as f: - # pickle.dump(stdBarcodeDict, f) - - ##================== float16 - # pickpath_ft16 = os.path.join(featPath, f"barcode_features_ft16_{k}.pickle") - # with open(pickpath_ft16, 'wb') as f: - # pickle.dump(stdBarcodeDict_ft16, f) - return - -def genfeatures(imgpath, bcdpath, featpath, bcdSet=None): +def gen_bcd_features(imgpath, bcdpath, featpath, bcdSet=None): ''' 生成标准特征集 ''' '''1. 提取 imgpath 中样本地址,生成字典{barcode: [imgpath1, imgpath1, ...]} 并存储于: bcdpath, 格式为 barcode.pickle''' @@ -198,11 +200,12 @@ def genfeatures(imgpath, bcdpath, featpath, bcdSet=None): stdfeat_infer(bcdpath, featpath, bcdSet) def main(): - imgpath = r"\\192.168.1.28\share\展厅barcode数据\整理\zhantingBase" + imgpath = r"\\192.168.1.28\share\数据\已完成数据\展厅数据\v1.0\比对数据\整理\zhantingBase" bcdpath = r"D:\exhibition\dataset\bcdpath" featpath = r"D:\exhibition\dataset\feats" - - genfeatures(imgpath, bcdpath, featpath) + + + gen_bcd_features(imgpath, bcdpath, featpath) diff --git a/contrast/model/__pycache__/mobilenet_v3.cpython-39.pyc b/contrast/model/__pycache__/mobilenet_v3.cpython-39.pyc deleted file mode 100644 index e1fc34a..0000000 Binary files a/contrast/model/__pycache__/mobilenet_v3.cpython-39.pyc and /dev/null differ diff --git a/contrast/model/__pycache__/mobilevit.cpython-39.pyc b/contrast/model/__pycache__/mobilevit.cpython-39.pyc deleted file mode 100644 index aff5d93..0000000 Binary files a/contrast/model/__pycache__/mobilevit.cpython-39.pyc and /dev/null differ diff --git a/contrast/model/__pycache__/resbam.cpython-39.pyc b/contrast/model/__pycache__/resbam.cpython-39.pyc deleted file mode 100644 index 65a91de..0000000 Binary files a/contrast/model/__pycache__/resbam.cpython-39.pyc and /dev/null differ diff --git a/contrast/one2n_contrast.py b/contrast/one2n_contrast.py index 57feea7..7ad3cb3 100644 --- a/contrast/one2n_contrast.py +++ b/contrast/one2n_contrast.py @@ -325,7 +325,7 @@ def one2n_deleted(all_list): -def one2n_return(all_list, basepath): +def one2n_return(all_list): corrpairs, corr_similarity, errpairs, err_similarity = [], [], [], [] for s_list in all_list: @@ -410,7 +410,7 @@ def test_rpath_return(): savepath = r'D:\DetectTracking\contrast\result' all_list = read_returnGoods_file(return_bfile) - corrpairs, errpairs, _, _ = one2n_return(all_list, basepath) + corrpairs, errpairs, _, _ = one2n_return(all_list) for corrpair in corrpairs: GetoutPath = os.path.join(basepath, corrpair[0]) InputPath = os.path.join(basepath, corrpair[1]) @@ -435,7 +435,7 @@ def test_one2n(): savepath: pr曲线保存路径 ''' # fpath = r'\\192.168.1.28\share\测试_202406\deletedBarcode\other' # deletedBarcode.txt - fpath = r'\\192.168.1.28\share\测试_202406\returnGoods\all' # returnGoods.txt + fpath = r'\\192.168.1.28\share\测试_202406\1108_展厅模型v800测试' # returnGoods.txt savepath = r'\\192.168.1.28\share\测试_202406\deletedBarcode\illustration' if os.path.isdir(fpath): @@ -476,9 +476,9 @@ def test_one2n(): plt1.savefig(os.path.join(savepath, file+'_pr.png')) # plt1.close() - # plt2 = showHist(err_similarity, correct_similarity) - # plt2.show() - # plt2.savefig(os.path.join(savepath, file+'_hist.png')) + plt2 = showHist(err_similarity, correct_similarity) + plt2.show() + plt2.savefig(os.path.join(savepath, file+'_hist.png')) # plt.close() @@ -486,7 +486,7 @@ def test_one2n(): if __name__ == '__main__': # test_one2n() test_rpath_return() # returnGoods.txt - test_rpath_deleted() # deleteBarcode.txt + # test_rpath_deleted() # deleteBarcode.txt # try: diff --git a/contrast/one2one_contrast.py b/contrast/one2one_contrast.py index 289d4fd..1d61aed 100644 --- a/contrast/one2one_contrast.py +++ b/contrast/one2one_contrast.py @@ -49,7 +49,8 @@ from datetime import datetime sys.path.append(r"D:\DetectTracking") from tracking.utils.read_data import extract_data, read_tracking_output, read_one2one_simi, read_deletedBarcode_file -from genfeats import genfeatures, stdfeat_infer +from config import config as conf +from genfeats import model_init, genfeatures, stdfeat_infer IMG_FORMAT = ['.bmp', '.jpg', '.jpeg', '.png'] @@ -546,10 +547,10 @@ def test_one2one(): - + model = model_init(conf) '''==== 1. 生成标准特征集, 只需运行一次 ===============''' - genfeatures(stdSamplePath, stdBarcodePath, stdFeaturePath, bcdSet) + genfeatures(model, stdSamplePath, stdBarcodePath, stdFeaturePath, bcdSet) print("stdFeats have generated and saved!") diff --git a/contrast/one2one_onsite.py b/contrast/one2one_onsite.py index a11769e..94c8c3e 100644 --- a/contrast/one2one_onsite.py +++ b/contrast/one2one_onsite.py @@ -7,7 +7,12 @@ Created on Wed Sep 11 11:57:30 2024 """ import os import numpy as np +from pathlib import Path import matplotlib.pyplot as plt +import sys + +sys.path.append(r"D:\DetectTracking") +from tracking.utils.read_data import read_similar def read_one2one_data(filepath): simiList = [] @@ -85,7 +90,7 @@ def plot_pr_curve(matrix): pass -def main(): +def test_compare(): filepaths = [r"\\192.168.1.28\share\测试_202406\0913_扫A放B\0913_1\OneToOneCompare.txt", r"\\192.168.1.28\share\测试_202406\0913_扫A放B\0913_2\OneToOneCompare.txt", r"\\192.168.1.28\share\测试_202406\0914_扫A放B\0914_1\OneToOneCompare.txt", @@ -99,10 +104,251 @@ def main(): plot_pr_curve(simiList) +def one2one_pr(paths): + paths = Path(paths) + evtpaths = [p for p in paths.iterdir() if p.is_dir() and len(p.name.split('_'))>=2] + + events, similars = [], [] + + ##===================================== 扫A放A, 扫A放B场景 + one2oneAA, one2oneAB = [], [] + + ##===================================== 应用于展厅 1:N + tp_events, fn_events, fp_events, tn_events = [], [], [], [] + tp_simi, fn_simi, tn_simi, fp_simi = [], [], [], [] + + ##===================================== 应用于1:n + tpevents, fnevents, fpevents, tnevents = [], [], [], [] + tpsimi, fnsimi, tnsimi, fpsimi = [], [], [], [] + + for path in evtpaths: + barcode = path.stem.split('_')[-1] + datapath = path.joinpath('process.data') + + if not barcode.isdigit() or len(barcode)<10: continue + if not datapath.is_file(): continue + + try: + SimiDict = read_similar(datapath) + except Exception as e: + print(f"{path.stem}, Error: {e}") + + one2one = SimiDict['one2one'] + one2n = SimiDict['one2n'] + + barcodes, similars = [], [] + for dt in one2one: + barcodes.append(dt['barcode']) + similars.append(dt['similar']) + + if len(barcodes)!=len(similars) or len(barcodes)==0: + continue + + ##===================================== 扫A放A, 扫A放B场景 + simAA = [similars[i] for i in range(len(barcodes)) if barcodes[i]==barcode] + simAB = [similars[i] for i in range(len(barcodes)) if barcodes[i]!=barcode] + + one2oneAA.extend(simAA) + one2oneAB.extend(simAB) + + + ##===================================== 以下应用适用于展厅 1:N + max_idx = similars.index(max(similars)) + max_sim = similars[max_idx] + # max_bcd = barcodes[max_idx] + + for i in range(len(one2one)): + bcd, simi = barcodes[i], similars[i] + if bcd==barcode and simi==max_sim: + tp_simi.append(simi) + tp_events.append(path.stem) + elif bcd==barcode and simi!=max_sim: + fn_simi.append(simi) + fn_events.append(path.stem) + elif bcd!=barcode and simi!=max_sim: + tn_simi.append(simi) + tn_events.append(path.stem) + else: + fp_simi.append(simi) + fp_events.append(path.stem) + + + ##===================================== 以下应用适用1:n + events, evt_barcodes, evt_similars, evt_types = [], [], [], [] + for dt in one2n: + events.append(dt["event"]) + evt_barcodes.append(dt["barcode"]) + evt_similars.append(dt["similar"]) + evt_types.append(dt["type"]) + + if len(events)!=len(evt_barcodes) or len(evt_barcodes)!=len(evt_similars) \ + or len(evt_barcodes)!=len(evt_similars) or len(events)==0: continue + + maxsim = evt_similars[evt_similars.index(max(evt_similars))] + for i in range(len(one2n)): + bcd, simi = evt_barcodes[i], evt_similars[i] + + if bcd==barcode and simi==maxsim: + tpsimi.append(simi) + tpevents.append(path.stem) + elif bcd==barcode and simi!=maxsim: + fnsimi.append(simi) + fnevents.append(path.stem) + elif bcd!=barcode and simi!=maxsim: + tnsimi.append(simi) + tnevents.append(path.stem) + else: + fpsimi.append(simi) + fpevents.append(path.stem) + + '''命名规则: + 1:1 1:n 1:N + TP_ TP TPX + PPrecise_ PPrecise PPreciseX + tpsimi tp_simi + ''' + + ''' 1:1 数据存储''' + PPrecise_, PRecall_ = [], [] + NPrecise_, NRecall_ = [], [] + + ''' 1:n 数据存储''' + PPrecise, PRecall = [], [] + NPrecise, NRecall = [], [] + + ''' 展厅 1:N 数据存储''' + PPreciseX, PRecallX = [], [] + NPreciseX, NRecallX = [], [] + + Thresh = np.linspace(-0.2, 1, 100) + for th in Thresh: + '''============================= 1:1''' + TP_ = sum(np.array(one2oneAA) >= th) + FP_ = sum(np.array(one2oneAB) >= th) + FN_ = sum(np.array(one2oneAA) < th) + TN_ = sum(np.array(one2oneAB) < th) + PPrecise_.append(TP_/(TP_+FP_+1e-6)) + PRecall_.append(TP_/(TP_+FN_+1e-6)) + NPrecise_.append(TN_/(TN_+FN_+1e-6)) + NRecall_.append(TN_/(TN_+FP_+1e-6)) + + '''============================= 1:n''' + TP = sum(np.array(tpsimi) >= th) + FP = sum(np.array(fpsimi) >= th) + FN = sum(np.array(fnsimi) < th) + TN = sum(np.array(tnsimi) < th) + PPrecise.append(TP/(TP+FP+1e-6)) + PRecall.append(TP/(TP+FN+1e-6)) + NPrecise.append(TN/(TN+FN+1e-6)) + NRecall.append(TN/(TN+FP+1e-6)) + + + '''============================= 1:N 展厅''' + TPX = sum(np.array(tp_simi) >= th) + FPX = sum(np.array(fp_simi) >= th) + FNX = sum(np.array(fn_simi) < th) + TNX = sum(np.array(tn_simi) < th) + PPreciseX.append(TPX/(TPX+FPX+1e-6)) + PRecallX.append(TPX/(TPX+FNX+1e-6)) + NPreciseX.append(TNX/(TNX+FNX+1e-6)) + NRecallX.append(TNX/(TNX+FPX+1e-6)) + + '''============================= 1:1 曲线''' + fig, ax = plt.subplots() + ax.plot(Thresh, PPrecise_, 'r', label='Precise_Pos: TP/TPFP') + ax.plot(Thresh, PRecall_, 'b', label='Recall_Pos: TP/TPFN') + ax.plot(Thresh, NPrecise_, 'g', label='Precise_Neg: TN/TNFP') + ax.plot(Thresh, NRecall_, 'c', label='Recall_Neg: TN/TNFN') + ax.set_xlim([0, 1]) + ax.set_ylim([0, 1]) + ax.grid(True) + ax.set_title('Precise & Recall') + ax.set_xlabel(f"Num: {len(evtpaths)}") + ax.legend() + plt.show() + + '''============================= 1:1 直方图''' + fig, axes = plt.subplots(2, 1) + axes[0].hist(np.array(one2oneAA), bins=60, edgecolor='black') + axes[0].set_xlim([-0.2, 1]) + axes[0].set_title('AA') + axes[1].hist(np.array(one2oneAB), bins=60, edgecolor='black') + axes[1].set_xlim([-0.2, 1]) + axes[1].set_title('BB') + plt.show() + + '''============================= 1:n 曲线''' + fig, ax = plt.subplots() + ax.plot(Thresh, PPrecise, 'r', label='Precise_Pos: TP/TPFP') + ax.plot(Thresh, PRecall, 'b', label='Recall_Pos: TP/TPFN') + ax.plot(Thresh, NPrecise, 'g', label='Precise_Neg: TN/TNFP') + ax.plot(Thresh, NRecall, 'c', label='Recall_Neg: TN/TNFN') + ax.set_xlim([0, 1]) + ax.set_ylim([0, 1]) + ax.grid(True) + ax.set_title('Precise & Recall') + ax.set_xlabel(f"Num: {len(evtpaths)}") + ax.legend() + plt.show() + + '''============================= 1:n 直方图''' + fig, axes = plt.subplots(2, 2) + axes[0, 0].hist(tpsimi, bins=60, edgecolor='black') + axes[0, 0].set_xlim([-0.2, 1]) + axes[0, 0].set_title('TP') + axes[0, 1].hist(fpsimi, bins=60, edgecolor='black') + axes[0, 1].set_xlim([-0.2, 1]) + axes[0, 1].set_title('FP') + axes[1, 0].hist(tnsimi, bins=60, edgecolor='black') + axes[1, 0].set_xlim([-0.2, 1]) + axes[1, 0].set_title('TN') + axes[1, 1].hist(fnsimi, bins=60, edgecolor='black') + axes[1, 1].set_xlim([-0.2, 1]) + axes[1, 1].set_title('FN') + plt.show() + + + '''============================= 1:N 展厅 曲线''' + fig, ax = plt.subplots() + ax.plot(Thresh, PPreciseX, 'r', label='Precise_Pos: TP/TPFP') + ax.plot(Thresh, PRecallX, 'b', label='Recall_Pos: TP/TPFN') + ax.plot(Thresh, NPreciseX, 'g', label='Precise_Neg: TN/TNFP') + ax.plot(Thresh, NRecallX, 'c', label='Recall_Neg: TN/TNFN') + ax.set_xlim([0, 1]) + ax.set_ylim([0, 1]) + ax.grid(True) + ax.set_title('Precise & Recall') + ax.set_xlabel(f"Num: {len(evtpaths)}") + ax.legend() + plt.show() + + '''============================= 1:N 展厅 直方图''' + fig, axes = plt.subplots(2, 2) + axes[0, 0].hist(tp_simi, bins=60, edgecolor='black') + axes[0, 0].set_xlim([-0.2, 1]) + axes[0, 0].set_title('TP') + axes[0, 1].hist(fp_simi, bins=60, edgecolor='black') + axes[0, 1].set_xlim([-0.2, 1]) + axes[0, 1].set_title('FP') + axes[1, 0].hist(tn_simi, bins=60, edgecolor='black') + axes[1, 0].set_xlim([-0.2, 1]) + axes[1, 0].set_title('TN') + axes[1, 1].hist(fn_simi, bins=60, edgecolor='black') + axes[1, 1].set_xlim([-0.2, 1]) + axes[1, 1].set_title('FN') + plt.show() + + + + + print('Done!') + + if __name__ == "__main__": - main() + evtpaths = r"\\192.168.1.28\share\测试视频数据以及日志\各模块测试记录\展厅测试\1120_展厅模型v801测试\扫A放A" + one2one_pr(evtpaths) diff --git a/contrast/说明文档.txt b/contrast/说明文档.txt deleted file mode 100644 index e69de29..0000000 diff --git a/event_time_specify.py b/event_time_specify.py index 8a421da..b0f1568 100644 --- a/event_time_specify.py +++ b/event_time_specify.py @@ -11,13 +11,10 @@ import numpy as np import matplotlib.pyplot as plt from move_detect import MoveDetect - import sys sys.path.append(r"D:\DetectTracking") # from tracking.utils.read_data import extract_data, read_deletedBarcode_file, read_tracking_output, read_weight_timeConsuming - - from tracking.utils.read_data import read_weight_timeConsuming def str_to_float_arr(s): @@ -118,7 +115,14 @@ def extract_data_1(datapath): def devide_motion_state(tboxes, width): - '''frameTstamp: 用于标记当前相机视野内用购物车运动状态变化''' + '''frameTstamp: 用于标记当前相机视野内用购物车运动状态变化 + + Hand状态: + 0: 不存在 + 1: 手部存在 + 2: 手部存在且处于某种状态(静止) + + ''' periods = [] if len(tboxes) < width: @@ -131,28 +135,22 @@ def devide_motion_state(tboxes, width): state = np.zeros((fnum, 2), dtype=np.int64) frameState = np.concatenate((frameTstamp, state), axis = 1).astype(np.int64) + handState = np.concatenate((frameTstamp, state), axis = 1).astype(np.int64) mtrackFid = {} + handFid = {} '''frameState 标记由图像判断的购物车状态:0: 静止,1: 运动''' for idx in range(width, fnum+1): - lboxes = np.concatenate(fboxes[idx-width:idx], axis = 0) - + idx0 = idx-width + + lboxes = np.concatenate(fboxes[idx0:idx], axis = 0) md = MoveDetect(lboxes) md.classify() - # if idx==60: - # print('a') - ## track.during 二元素组, 表征在该时间片段内,轨迹 track 的起止时间,数值用 boxes[:, 7] for track in md.track_motion: - if track.cls == 0: continue - - - f1, f2 = track.during - - - + # if track.cls == 0: continue idx1 = set(np.where(frameState[:,0] >= f1)[0]) idx2 = set(np.where(frameState[:,0] <= f2)[0]) idx3 = list(idx1.intersection(idx2)) @@ -164,7 +162,25 @@ def devide_motion_state(tboxes, width): frameState[idx-1, 3] = 1 frameState[idx3, 2] = 1 + + for track in md.hand_tracks: + f11, f22 = track.during + idx11 = set(np.where(handState[:,0] >= f11)[0]) + idx22 = set(np.where(handState[:,0] <= f22)[0]) + idx33 = list(idx11.intersection(idx22)) + '''手部存在标记''' + handState[idx33, 2] = 1 + '''未来改进方向:is_static 可以用手部状态判断的函数代替''' + if track.is_static(70) and len(idx33)>1: + idx11 = set(np.where(handState[:,0] >= f11)[0]) + idx22 = set(np.where(handState[:,0] <= f22)[0]) + idx33 = list(idx11.intersection(idx22)) + + '''手部静止标记''' + handState[idx33, 2] = 2 + + '''状态变化输出''' for tid, fid in mtrackFid.items(): @@ -172,16 +188,11 @@ def devide_motion_state(tboxes, width): fstate[list(fid), 0] = tid frameState = np.concatenate((frameState, fstate), axis = 1).astype(np.int64) - - - - - - - return frameState + + return frameState, handState -def state_measure(periods, weights, spath=None): +def state_measure(periods, weights, hands, spath=None): '''两种状态:static、motion, (t0, t1) t0: static ----> motion @@ -269,15 +280,16 @@ def main(): '''====================图像运动分析====================''' win_width = 12 - periods = [] + periods, hands = [], [] for ctype, tboxes, _ in tracker_boxes: - period = devide_motion_state(tboxes, win_width) + period, handState = devide_motion_state(tboxes, win_width) periods.append((ctype, period)) + hands.append((ctype, handState)) print('done!') '''===============重力、图像信息融合===================''' - state_measure(periods, weights) + state_measure(periods, weights, hands) if __name__ == "__main__": diff --git a/imgs_inference.py b/imgs_inference.py index ed314f3..69d903f 100644 --- a/imgs_inference.py +++ b/imgs_inference.py @@ -39,13 +39,20 @@ from tracking.trackers import BOTSORT, BYTETracker from tracking.utils.showtrack import drawtracks from hands.hand_inference import hand_pose -from tracking.trackers.reid.reid_interface import ReIDInterface -from tracking.trackers.reid.config import config as ReIDConfig +# from tracking.trackers.reid.reid_interface import ReIDInterface +# from tracking.trackers.reid.config import config as ReIDConfig +# ReIDEncoder = ReIDInterface(ReIDConfig) + +from contrast.feat_extract.config import config as conf +from contrast.feat_extract.inference import FeatsInterface +ReIDEncoder = FeatsInterface(conf) + -ReIDEncoder = ReIDInterface(ReIDConfig) IMG_FORMATS = 'bmp', 'dng', 'jpeg', 'jpg', 'mpo', 'png', 'tif', 'tiff', 'webp', 'pfm' # include image suffixes VID_FORMATS = 'asf', 'avi', 'gif', 'm4v', 'mkv', 'mov', 'mp4', 'mpeg', 'mpg', 'ts', 'wmv' # include video suffixes + +'''================== 对图像进行旋转 ================== ''' class LoadImages: # YOLOv5 image/video dataloader, i.e. `python detect.py --source image.jpg/vid.mp4` def __init__(self, files, img_size=640, stride=32, auto=True, transforms=None, vid_stride=1): diff --git a/move_detect.py b/move_detect.py index 4701fdb..7103ed6 100644 --- a/move_detect.py +++ b/move_detect.py @@ -138,7 +138,7 @@ class TrackFrag: return False - def is_static(self): + def is_static(self, THRESH=50): box1 = self.boxes[0, :4] box2 = self.boxes[-1, :4] @@ -148,7 +148,7 @@ class TrackFrag: ptd2 = np.linalg.norm((ptd[2], ptd[1])) ptd3 = np.linalg.norm((ptd[0], ptd[3])) ptd4 = np.linalg.norm((ptd[2], ptd[3])) - condt1 = ptd1<50 and ptd2<50 and ptd3<50 and ptd4<50 + condt1 = ptd1=3] + def draw(self): diff --git a/pipeline.py b/pipeline.py index e013fd6..354563c 100644 --- a/pipeline.py +++ b/pipeline.py @@ -16,12 +16,6 @@ from tracking.dotrack.dotracks_front import doFrontTracks from tracking.utils.drawtracks import plot_frameID_y2, draw_all_trajectories from utils.getsource import get_image_pairs, get_video_pairs - - - - -std_feature_path = r"\\192.168.1.28\share\测试_202406\contrast\std_features_2192_ft32vsft16" - def get_interbcd_inputenents(): bcdpath = r"\\192.168.1.28\share\测试_202406\contrast\std_barcodes_2192" eventpath = r"\\192.168.1.28\share\测试_202406\0918" @@ -49,8 +43,7 @@ def pipeline(eventpath, stdfeat_path=None, SourceType = "image"): outputs: ''' - SourceType = "image" # image - + # SourceType = "image" # image # eventpath = r"\\192.168.1.28\share\测试_202406\0918\images1\20240918-110822-1bc3902e-5a8e-4e23-8eca-fb3f02738551_6938314601726" savepath = r"D:\contrast\detect" @@ -71,16 +64,14 @@ def pipeline(eventpath, stdfeat_path=None, SourceType = "image"): event_tracks = [] - for vpath in vpaths: - + for vpath in vpaths: '''事件结果文件夹''' save_dir_event = Path(savepath) / Path(eventname) if isinstance(vpath, list): save_dir_video = save_dir_event / Path("images") else: save_dir_video = save_dir_event / Path(str(Path(vpath).stem)) - - + if not save_dir_video.exists(): save_dir_video.mkdir(parents=True, exist_ok=True) @@ -112,6 +103,11 @@ def pipeline(eventpath, stdfeat_path=None, SourceType = "image"): for CamerType, vts in event_tracks: if CamerType == 'front': edgeline = cv2.imread("./tracking/shopcart/cart_tempt/board_ftmp_line.png") + + h, w = edgeline.shape[:2] + nh, nw = h//2, w//2 + edgeline = cv2.resize(edgeline, (nw, nh), interpolation=cv2.INTER_AREA) + img_tracking = draw_all_trajectories(vts, edgeline, save_dir_event, CamerType, draw5p=True) illus[0] = img_tracking @@ -121,6 +117,11 @@ def pipeline(eventpath, stdfeat_path=None, SourceType = "image"): if CamerType == 'back': edgeline = cv2.imread("./tracking/shopcart/cart_tempt/edgeline.png") + + h, w = edgeline.shape[:2] + nh, nw = h//2, w//2 + edgeline = cv2.resize(edgeline, (nw, nh), interpolation=cv2.INTER_AREA) + img_tracking = draw_all_trajectories(vts, edgeline, save_dir_event, CamerType, draw5p=True) illus[1] = img_tracking @@ -177,18 +178,14 @@ def main_loop(): def main(): eventpath = r"D:\datasets\ym\exhibition\175836" + + eventpath = r"\\192.168.1.28\share\测试视频数据以及日志\各模块测试记录\展厅测试\1120_展厅模型v801测试\扫A放A\20241121-144855-dce94b09-1100-43f1-92e8-33a1b538b159_6924743915848_6924743915848" + SourceType = 'image' stdfeat_path = None pipeline(eventpath, stdfeat_path, SourceType) - - - - - - - - + if __name__ == "__main__": diff --git a/time_devide.py b/time_devide.py index 28d4d65..c035bd6 100644 --- a/time_devide.py +++ b/time_devide.py @@ -14,18 +14,14 @@ import glob import numpy as np import copy - import matplotlib.pyplot as plt from imgs_inference import run_yolo from event_time_specify import devide_motion_state#, state_measure from tracking.utils.read_data import read_seneor - - # IMG_FORMATS = 'bmp', 'dng', 'jpeg', 'jpg', 'mpo', 'png', 'tif', 'tiff', 'webp', 'pfm' # include image suffixes # VID_FORMATS = 'asf', 'avi', 'gif', 'm4v', 'mkv', 'mov', 'mp4', 'mpeg', 'mpg', 'ts', 'wmv' # include video suffixes - def filesort(p): ''' 需将图像文件名标准化 @@ -104,7 +100,7 @@ def rerename(filePath=None): os.rename(os.path.join(filePath, file), os.path.join(filePath, newname)) -def state_measure(periods, weights, spath=None): +def state_measure(periods, weights, hands, spath=None): ''' 数据类型 后摄: 0, 前摄: 1, CV综合: 2, 重力: 9 @@ -120,7 +116,7 @@ def state_measure(periods, weights, spath=None): 0 1 2 3 4 5 6 7 单摄状态1:基于运动轨迹的起止点确定的运动区间 - 单摄状态2: 基于滑动窗口的起止点确定的运动区间 + 单摄状态2: 基于滑动窗口的起止点(窗口终点)确定的运动区间 重力(t0, t1): 重力波动的精确时间区间,基于重力波动的起止点,而不是仅依赖重力稳定时间 重力(t0', t1'): 根据加退购对重力波动窗口进行扩展,扩展应该涵盖购物事件的发生过程 方案: @@ -131,6 +127,7 @@ def state_measure(periods, weights, spath=None): # BackType = 0 # 后摄数据类型 # FrontType = 1 # 前摄数据类型 CameraType = 2 # CV数据综合类型 + HandType = 3 # 手部类型 WeightType = 9 # 重力数据类型 WeightStableThresh = 7.5 # 单位:g,重力稳定状态下的最大波动范围 WeightWinWidth = 10 # 单位:重力数据点数,该值和采样间隔关联,重力稳定时间设定为500ms = WeightWinWidth * 采样间隔 @@ -166,7 +163,8 @@ def state_measure(periods, weights, spath=None): '''对重力波动区间进行标记,并标记最新一次重力稳定值的索引和相应重力值''' if wmax - wmin > WeightStableThresh: weights[i2, 4] = w_max - elif i2==0: + + if i2==0: i0=0 wi0 = weights[i0, 3] elif i2>0 and weights[i2-1, 4]==0: @@ -207,6 +205,10 @@ def state_measure(periods, weights, spath=None): state1 = frstate_1[:,2][:, None] state11 = frstate_1[:,3][:, None] + + + + @@ -268,10 +270,7 @@ def state_measure(periods, weights, spath=None): if ctype != ctype0 and state !=0 and state0 !=0: time_stream[i, 7] = 1 - MotionSlice = [] - - motion_slice = [] - + MotionSlice, motion_slice = [], [] t0 = time_stream[0, 7] for i in range(1, len(time_stream)): f0 = time_stream[i-1, 7] @@ -285,14 +284,65 @@ def state_measure(periods, weights, spath=None): motion_slice.append((t0, t1)) else: print(f"T0: {t0}, T1: {t1}") + + + + + '''===================== 4. Hands数据综合并排序 ==========================''' + BackType, hdstate_0 = hands[0] + FrontType, hdstate_1 = hands[1] + n0, n1 = len(hdstate_0), len(hdstate_1) + idx0 = np.array([i for i in range(0, n0)], dtype=np.int64)[:, None] + idx1 = np.array([i for i in range(0, n1)], dtype=np.int64)[:, None] + ctype0 = BackType * np.ones((n0, 1), dtype=np.int64) + ctype1 = FrontType * np.ones((n1, 1), dtype=np.int64) + hstamp0 = hdstate_0[:,1][:, None] + hstamp1 = hdstate_1[:,1][:, None] + state0 = hdstate_0[:,2][:, None] + state1 = hdstate_1[:,2][:, None] + + '''序列索引号, 相机类型,时间戳, 单摄手部状态、手部综合状态、保留位2、综合数据类型、综合状态 + 0 1 2 3 4 5 6 7 + ''' + hstream0 = np.concatenate((idx0, ctype0, hstamp0, state0), axis=1) + hstream1 = np.concatenate((idx1, ctype1, hstamp1, state1), axis=1) + hstream = np.concatenate((hstream0, hstream1), axis=0) + hstream = np.concatenate((hstream, np.zeros((len(hstream), 4), dtype=np.int64)), axis=1) + hstream[:, 6] = HandType + hstream = hstream[np.argsort(hstream[:, 2]), :] + + for i in range(0, len(hstream)): + idx, ctype, stamp, state = hstream[i, :4] + if i==0: + hstream[i, 4] = state + if i>0: + j = i-1 + idx0, ctype0, stamp0, state0 = hstream[j, :4] + while stamp-stamp0 < CameraTimeInterval and ctype == ctype0 and j>0: + j -= 1 + idx0, ctype0, stamp0, state0 = hstream[j, :4] + + '''两摄像头状态的或运算. 由于前后摄图像不同时,如何构造或运算,关键在于选择不同摄像头的对齐点 + i时刻摄像头(ctype)状态state,另一摄像头(ctype0 != ctype)距 i 最近最近时刻 j 的状态state0 + ''' + if ctype != ctype0 and state0==2: + hstream[i, 4] = state0 + elif ctype != ctype0 and state0==1: + hstream[i, 4] = state0 + else: + hstream[i, 4] = state + + + + - '''========================== 4 结果显示 ================================''' + '''========================== 5 结果显示 ================================''' frstate_0[:, 1] = frstate_0[:, 1]-tmin frstate_1[:, 1] = frstate_1[:, 1]-tmin tstream[:, 2] = tstream[:, 2]-tmin - fig, (ax1, ax2, ax3, ax4, ax5) = plt.subplots(5, 1) + fig, (ax1, ax2, ax3, ax4, ax5, ax6) = plt.subplots(6, 1) during = np.max(time_stream[:, 2]) ax1.plot(weights[:, 2]-tmin, weights[:, 3], 'bo-', linewidth=1, markersize=4) @@ -318,7 +368,12 @@ def state_measure(periods, weights, spath=None): ax5.plot(time_stream[:, 2], time_stream[:, 7], 'gx-', linewidth=1, markersize=4) ax5.set_xlim([0, during]) ax5.set_title('Cart State') - + + ax6.plot(hstream[:, 2]-tmin, hstream[:, 4], 'gx-', linewidth=1, markersize=4) + ax6.set_xlim([0, during]) + ax6.set_title('Hand State') + + plt.show() if spath: plt.savefig(spath) @@ -352,12 +407,18 @@ def splitevent(imgpath, MotionSlice): def runyolo(): eventdirs = r"\\192.168.1.28\share\realtime\eventdata" savedir = r"\\192.168.1.28\share\realtime\result" - + + k = 0 for edir in os.listdir(eventdirs): + edir = "1731316835560" source = os.path.join(eventdirs, edir) files = filesort(source) for flist in files: run_yolo(flist, savedir) + + k += 1 + if k==1: + break def run_tracking(trackboxes, MotionSlice): pass @@ -367,8 +428,8 @@ def run_tracking(trackboxes, MotionSlice): def show_seri(): - datapath = r"\\192.168.1.28\share\realtime\eventdata\1728978106733" - savedir = r"\\192.168.1.28\share\realtime\result" + datapath = r"\\192.168.1.28\share\realtime\eventdata\1731316835560" + savedir = r"D:\DetectTracking\realtime" imgdir = datapath.split('\\')[-2] + "_" + datapath.split('\\')[-1] @@ -378,7 +439,7 @@ def show_seri(): datafiles = sorted(glob.glob(os.path.join(datapath, '*.npy'))) - periods, trackboxes = [], [] + periods, trackboxes, hands = [], [], [] win_width = 12 for npypath in datafiles: CameraType = Path(npypath).stem.split('_')[-1] @@ -386,8 +447,9 @@ def show_seri(): trackboxes.append((CameraType, tkboxes)) - period = devide_motion_state(tkboxes, win_width) + period, handState = devide_motion_state(tkboxes, win_width) periods.append((int(CameraType), period)) + hands.append((int(CameraType), handState)) @@ -401,23 +463,19 @@ def show_seri(): '''===============重力、图像信息融合===================''' spath = os.path.join(savedir, f"{eventname}.png" ) - tmin, MotionSlice = state_measure(periods, weights, spath) - - - - + tmin, MotionSlice = state_measure(periods, weights, hands, spath) + # 第一次运行时用于更改图像文件名 # rerename(imgpath) # rename(imgpath, tmin) - # splitevent(imgpath, MotionSlice) def main(): - # runyolo() + runyolo() show_seri() diff --git a/track_reid.py b/track_reid.py index 3e687be..9ccf665 100644 --- a/track_reid.py +++ b/track_reid.py @@ -60,53 +60,57 @@ from tracking.trackers import BOTSORT, BYTETracker from tracking.utils.showtrack import drawtracks from hands.hand_inference import hand_pose -from tracking.trackers.reid.reid_interface import ReIDInterface -from tracking.trackers.reid.config import config as ReIDConfig -ReIDEncoder = ReIDInterface(ReIDConfig) +from contrast.feat_extract.config import config as conf +from contrast.feat_extract.inference import FeatsInterface +ReIDEncoder = FeatsInterface(conf) + +# from tracking.trackers.reid.reid_interface import ReIDInterface +# from tracking.trackers.reid.config import config as ReIDConfig +# ReIDEncoder = ReIDInterface(ReIDConfig) # tracker_yaml = r"./tracking/trackers/cfg/botsort.yaml" -def inference_image(image, detections): - H, W, _ = np.shape(image) - imgs = [] - batch_patches = [] - patches = [] - for d in range(np.size(detections, 0)): - tlbr = detections[d, :4].astype(np.int_) - tlbr[0] = max(0, tlbr[0]) - tlbr[1] = max(0, tlbr[1]) - tlbr[2] = min(W - 1, tlbr[2]) - tlbr[3] = min(H - 1, tlbr[3]) - img1 = image[tlbr[1]:tlbr[3], tlbr[0]:tlbr[2], :] +# def inference_image(image, detections): +# H, W, _ = np.shape(image) +# imgs = [] +# batch_patches = [] +# patches = [] +# for d in range(np.size(detections, 0)): +# tlbr = detections[d, :4].astype(np.int_) +# tlbr[0] = max(0, tlbr[0]) +# tlbr[1] = max(0, tlbr[1]) +# tlbr[2] = min(W - 1, tlbr[2]) +# tlbr[3] = min(H - 1, tlbr[3]) +# img1 = image[tlbr[1]:tlbr[3], tlbr[0]:tlbr[2], :] - img = img1[:, :, ::-1].copy() # the model expects RGB inputs - patch = ReIDEncoder.transform(img) +# img = img1[:, :, ::-1].copy() # the model expects RGB inputs +# patch = ReIDEncoder.transform(img) - imgs.append(img1) - # patch = patch.to(device=self.device).half() - if str(ReIDEncoder.device) != "cpu": - patch = patch.to(device=ReIDEncoder.device).half() - else: - patch = patch.to(device=ReIDEncoder.device) +# imgs.append(img1) +# # patch = patch.to(device=self.device).half() +# if str(ReIDEncoder.device) != "cpu": +# patch = patch.to(device=ReIDEncoder.device).half() +# else: +# patch = patch.to(device=ReIDEncoder.device) - patches.append(patch) - if (d + 1) % ReIDEncoder.batch_size == 0: - patches = torch.stack(patches, dim=0) - batch_patches.append(patches) - patches = [] +# patches.append(patch) +# if (d + 1) % ReIDEncoder.batch_size == 0: +# patches = torch.stack(patches, dim=0) +# batch_patches.append(patches) +# patches = [] - if len(patches): - patches = torch.stack(patches, dim=0) - batch_patches.append(patches) +# if len(patches): +# patches = torch.stack(patches, dim=0) +# batch_patches.append(patches) - features = np.zeros((0, ReIDEncoder.embedding_size)) - for patches in batch_patches: - pred = ReIDEncoder.model(patches) - pred[torch.isinf(pred)] = 1.0 - feat = pred.cpu().data.numpy() - features = np.vstack((features, feat)) +# features = np.zeros((0, ReIDEncoder.embedding_size)) +# for patches in batch_patches: +# pred = ReIDEncoder.model(patches) +# pred[torch.isinf(pred)] = 1.0 +# feat = pred.cpu().data.numpy() +# features = np.vstack((features, feat)) - return imgs, features +# return imgs, features @@ -127,6 +131,7 @@ def init_trackers(tracker_yaml = None, bs=1): return trackers +'''=============== used in pipeline.py ==================''' @smart_inference_mode() def yolo_resnet_tracker( weights=ROOT / 'yolov5s.pt', # model path or triton URL @@ -237,7 +242,9 @@ def yolo_resnet_tracker( '''================== 1. 存储 dets/subimgs/features Dict =============''' - imgs, features = inference_image(im0, tracks) + imgs, features = ReIDEncoder.inference(im0, tracks) + + # imgs, features = inference_image(im0, tracks) # TrackerFeats = np.concatenate([TrackerFeats, features], axis=0) @@ -499,7 +506,8 @@ def run( tracks[:, 7] = frameId '''================== 1. 存储 dets/subimgs/features Dict =============''' - imgs, features = inference_image(im0, tracks) + # imgs, features = inference_image(im0, tracks) + imgs, features = ReIDEncoder.inference(im0, tracks) TrackerFeats = np.concatenate([TrackerFeats, features], axis=0) @@ -681,32 +689,17 @@ def main(opt): optdict = vars(opt) p = r"D:\datasets\ym" - p = r"D:\datasets\ym\exhibition\153112511_0_seek_105.mp4" + p = r"D:\exhibition\images\153112511_0_seek_105.mp4" + + optdict["project"] = r"D:\exhibition\result" files = [] - k = 0 if os.path.isdir(p): files.extend(sorted(glob.glob(os.path.join(p, '*.*')))) - for file in files: - - optdict["source"] = file - run(**optdict) - - k += 1 - if k == 1: - break + optdict["source"] = files elif os.path.isfile(p): optdict["source"] = p - run(**vars(opt)) - -def main_imgdir(opt): - check_requirements(ROOT / 'requirements.txt', exclude=('tensorboard', 'thop')) - optdict = vars(opt) - - optdict["project"] = r"\\192.168.1.28\share\realtime" - - optdict["source"] = r"\\192.168.1.28\share\realtime\addReturn\add\1728978052624" run(**optdict) @@ -745,7 +738,7 @@ def main_loop(opt): # break elif os.path.isfile(p): optdict["source"] = p - run(**vars(opt)) + run(**optdict) @@ -754,7 +747,6 @@ if __name__ == '__main__': opt = parse_opt() main(opt) - # main_imgdir(opt) # main_loop(opt) diff --git a/tracking/module_analysis.py b/tracking/module_analysis.py index 03a5579..4c653f2 100644 --- a/tracking/module_analysis.py +++ b/tracking/module_analysis.py @@ -24,7 +24,7 @@ from tracking.utils.drawtracks import plot_frameID_y2, draw_all_trajectories from tracking.utils.read_data import extract_data, read_deletedBarcode_file, read_tracking_output, read_returnGoods_file -from contrast.one2n_contrast import get_relative_paths, one2n_new, read_returnGoods_file +from contrast.one2n_contrast import get_contrast_paths, one2n_return from tracking.utils.annotator import TrackAnnotator W, H = 1024, 1280 @@ -362,10 +362,10 @@ def main_loop(): saveimgs = True if os.path.basename(del_barcode_file).find('deletedBarcode'): - relative_paths = get_relative_paths(del_barcode_file, basepath, SavePath, saveimgs) + relative_paths = get_contrast_paths(del_barcode_file, basepath, SavePath, saveimgs) elif os.path.basename(del_barcode_file).find('returnGoods'): blist = read_returnGoods_file(del_barcode_file) - errpairs, corrpairs, err_similarity, correct_similarity = one2n_new(blist) + errpairs, corrpairs, err_similarity, correct_similarity = one2n_return(blist) relative_paths = [] for getoutevent, inputevent, errevent in errpairs: relative_paths.append(os.path.join(basepath, getoutevent)) @@ -412,13 +412,13 @@ def main(): SavePath: 包含二级目录,一级目录为轨迹图像;二级目录为与data文件对应的序列图像存储地址。 ''' # eventPaths = r'\\192.168.1.28\share\测试_202406\0723\0723_3' - eventPaths = r'D:\datasets\ym\exhibition\识别错' - savePath = r'D:\contrast\dataset\result' + eventPaths = r'\\192.168.1.28\share\测试视频数据以及日志\各模块测试记录\展厅测试\1120_展厅模型v801测试\扫A放A' + savePath = r'D:\exhibition\result' k=0 for pathname in os.listdir(eventPaths): - pathname = "放入薯片识别为辣条" - + pathname = "20241121-144901-fdba61c6-aefa-4b50-876d-5e05998befdc_6920459905012_6920459905012" + eventpath = os.path.join(eventPaths, pathname) savepath = os.path.join(savePath, pathname) if not os.path.exists(savepath): diff --git a/tracking/trackers/__pycache__/bot_sort.cpython-39.pyc b/tracking/trackers/__pycache__/bot_sort.cpython-39.pyc index 0c57999..2078695 100644 Binary files a/tracking/trackers/__pycache__/bot_sort.cpython-39.pyc and b/tracking/trackers/__pycache__/bot_sort.cpython-39.pyc differ diff --git a/tracking/trackers/bot_sort.py b/tracking/trackers/bot_sort.py index 7a96d90..40ad90d 100644 --- a/tracking/trackers/bot_sort.py +++ b/tracking/trackers/bot_sort.py @@ -10,8 +10,15 @@ from .utils import matching # from .utils.gmc import GMC from .utils.kalman_filter import KalmanFilterXYWH -from .reid.reid_interface import ReIDInterface -from .reid.config import config +# from .reid.reid_interface import ReIDInterface +# from .reid.config import config + +from contrast.feat_extract.inference import FeatsInterface +from contrast.feat_extract.config import config as conf + + + + class BOTrack(STrack): shared_kalman = KalmanFilterXYWH() @@ -111,7 +118,9 @@ class BOTSORT(BYTETracker): if args.with_reid: # Haven't supported BoT-SORT(reid) yet - self.encoder = ReIDInterface(config) + # self.encoder = ReIDInterface(config) + + self.encoder = FeatsInterface(conf) # self.gmc = GMC(method=args.gmc_method) # commented by WQG @@ -119,13 +128,13 @@ class BOTSORT(BYTETracker): """Returns an instance of KalmanFilterXYWH for object tracking.""" return KalmanFilterXYWH() - def init_track(self, dets, scores, cls, imgs, features_keep): + def init_track(self, dets, scores, cls, image, features_keep): """Initialize track with detections, scores, and classes.""" if len(dets) == 0: return [] if self.args.with_reid and self.encoder is not None: if features_keep is None: - features_keep = self.encoder.inference(imgs, dets) + imgs, features_keep = self.encoder.inference(image, dets) return [BOTrack(xyxy, s, c, f) for (xyxy, s, c, f) in zip(dets, scores, cls, features_keep)] # detections else: diff --git a/tracking/utils/__pycache__/plotting.cpython-39.pyc b/tracking/utils/__pycache__/plotting.cpython-39.pyc index a0008c1..e0fd814 100644 Binary files a/tracking/utils/__pycache__/plotting.cpython-39.pyc and b/tracking/utils/__pycache__/plotting.cpython-39.pyc differ diff --git a/tracking/utils/__pycache__/read_data.cpython-39.pyc b/tracking/utils/__pycache__/read_data.cpython-39.pyc index 3fc17b0..5f04eaa 100644 Binary files a/tracking/utils/__pycache__/read_data.cpython-39.pyc and b/tracking/utils/__pycache__/read_data.cpython-39.pyc differ diff --git a/tracking/utils/read_data.py b/tracking/utils/read_data.py index a733fe6..1a12b73 100644 --- a/tracking/utils/read_data.py +++ b/tracking/utils/read_data.py @@ -1,9 +1,11 @@ # -*- coding: utf-8 -*- """ Created on Fri Jul 5 13:59:21 2024 -func: extract_data() - 读取 Pipeline 各模块的数据,在 read_pipeline_data.py(马晓慧)的基础上完成接口改造 - + 函数 读取文件 + extract_data() 0/1_track.data + read_tracking_output() 0/1_tracking_output.data + read_similar() process.data + @author: ym """ import numpy as np @@ -275,6 +277,7 @@ def read_returnGoods_file(filePath): continue if split_flag: bcd = label.split('_')[-1] + if len(bcd)<8: continue # event_list.append(label + '_' + bcd) event_list.append(label) barcode_list.append(bcd) @@ -294,47 +297,78 @@ def read_returnGoods_file(filePath): -# ============================================================================= -# def read_seneor(filepath): -# WeightDict = OrderedDict() -# with open(filepath, 'r', encoding='utf-8') as f: -# lines = f.readlines() -# for i, line in enumerate(lines): -# line = line.strip() -# -# keyword = line.split(':')[0] -# value = line.split(':')[1] -# -# vdata = [float(s) for s in value.split(',') if len(s)] -# -# WeightDict[keyword] = vdata[-1] -# -# return WeightDict -# ============================================================================= - -def read_one2one_simi(filePath): +def read_seneor(filepath): + WeightDict = OrderedDict() + with open(filepath, 'r', encoding='utf-8') as f: + lines = f.readlines() + clean_lines = [line.strip().replace("'", '').replace('"', '') for line in lines] + for i, line in enumerate(clean_lines): + line = line.strip() + + keyword = line.split(':')[0] + value = line.split(':')[1] + + vdata = [float(s) for s in value.split(',') if len(s)] + + WeightDict[keyword] = vdata[-1] + return WeightDict + +def read_similar(filePath): SimiDict = {} + SimiDict['one2one'] = [] + SimiDict['one2n'] = [] + with open(filePath, 'r', encoding='utf-8') as f: lines = f.readlines() - flag = False - for i, line in enumerate(lines): + clean_lines = [line.strip().replace("'", '').replace('"', '') for line in lines] + one2one_list, one2n_list = [], [] + + Flag_1to1, Flag_1ton = False, False + for i, line in enumerate(clean_lines): line = line.strip() - if line.find('barcode:')<0 and not flag: + if line.endswith(','): + line = line[:-1] + Dict = {} + + + if not line: + if len(one2one_list): SimiDict['one2one'] = one2one_list + if len(one2n_list): SimiDict['one2n'] = one2n_list + one2one_list, one2n_list = [], [] + Flag_1to1, Flag_1ton = False, False continue - if line.find('barcode:')==0 : - flag = True + + if line.find('oneToOne')>=0: + Flag_1to1, Flag_1ton = True, False + continue + if line.find('oneTon')>=0: + Flag_1to1, Flag_1ton = False, True continue - # if line.endswith(','): - # line = line[:-1] - if flag: + if Flag_1to1: barcode = line.split(',')[0].strip() value = line.split(',')[1].split(':')[1].strip() - SimiDict[barcode] = float(value) + Dict['barcode'] = barcode + Dict['similar'] = float(value) + one2one_list.append(Dict) + continue - if flag and not line: - flag = False + if Flag_1ton: + label = line.split(':')[0].strip() + value = line.split(':')[1].strip() + + bcd = label.split('_')[-1] + if len(bcd)<8: continue + + Dict['event'] = label + Dict['barcode'] = bcd + Dict['similar'] = float(value.split(',')[0]) + Dict['type'] = value.split('=')[-1] + one2n_list.append(Dict) + + if len(one2one_list): SimiDict['one2one'] = one2one_list + if len(one2n_list): SimiDict['one2n'] = one2n_list return SimiDict diff --git a/tracking/说明文档.txt b/tracking/说明文档.txt deleted file mode 100644 index e69de29..0000000