Compare commits
10 Commits
Author | SHA1 | Date | |
---|---|---|---|
e044c85a04 | |||
798c596acc | |||
183299c06b | |||
0ccfd0151f | |||
f14faa323e | |||
9b5b135fa3 | |||
0efe8892f3 | |||
b657be729b | |||
64248b1557 | |||
bfe7bc0fd5 |
10
.gitignore
vendored
@ -1,11 +1,14 @@
|
|||||||
# Repo-specific GitIgnore ----------------------------------------------------------------------------------------------
|
# Repo-specific GitIgnore ----------------------------------------------------------------------------------------------
|
||||||
|
*.jpg
|
||||||
*.jpeg
|
*.jpeg
|
||||||
|
*.png
|
||||||
*.bmp
|
*.bmp
|
||||||
*.tif
|
*.tif
|
||||||
*.tiff
|
*.tiff
|
||||||
*.heic
|
*.heic
|
||||||
*.JPG
|
*.JPG
|
||||||
*.JPEG
|
*.JPEG
|
||||||
|
*.PNG
|
||||||
*.BMP
|
*.BMP
|
||||||
*.TIF
|
*.TIF
|
||||||
*.TIFF
|
*.TIFF
|
||||||
@ -23,7 +26,7 @@
|
|||||||
*.pickle
|
*.pickle
|
||||||
*.npy
|
*.npy
|
||||||
*.csv
|
*.csv
|
||||||
*.pyc
|
|
||||||
|
|
||||||
|
|
||||||
# for tracking ---------------------------------------------------------------
|
# for tracking ---------------------------------------------------------------
|
||||||
@ -36,10 +39,6 @@ tracking/data/boxes_imgs/*
|
|||||||
tracking/data/trackfeats/*
|
tracking/data/trackfeats/*
|
||||||
tracking/data/tracks/*
|
tracking/data/tracks/*
|
||||||
tracking/data/handlocal/*
|
tracking/data/handlocal/*
|
||||||
contrast/feat_extract/model/__pycache__/*
|
|
||||||
std_img*
|
|
||||||
.gitignore
|
|
||||||
*/__pycache__/*
|
|
||||||
ckpts/*
|
ckpts/*
|
||||||
doc
|
doc
|
||||||
|
|
||||||
@ -55,6 +54,7 @@ VOC/
|
|||||||
|
|
||||||
# Neural Network weights -----------------------------------------------------------------------------------------------
|
# Neural Network weights -----------------------------------------------------------------------------------------------
|
||||||
*.weights
|
*.weights
|
||||||
|
*.pt
|
||||||
*.pth
|
*.pth
|
||||||
*.pb
|
*.pb
|
||||||
*.onnx
|
*.onnx
|
||||||
|
176
Qwen_agent.py
@ -1,176 +0,0 @@
|
|||||||
from transformers import Qwen2VLForConditionalGeneration, AutoTokenizer, AutoProcessor
|
|
||||||
from accelerate import init_empty_weights, load_checkpoint_in_model
|
|
||||||
from stream_pipeline import stream_pipeline
|
|
||||||
from PIL import Image
|
|
||||||
from io import BytesIO
|
|
||||||
import torch
|
|
||||||
import ast
|
|
||||||
import requests
|
|
||||||
import random
|
|
||||||
|
|
||||||
# default: Load the model on the available device(s)
|
|
||||||
model = Qwen2VLForConditionalGeneration.from_pretrained(
|
|
||||||
"Qwen/Qwen2-VL-7B-Instruct",
|
|
||||||
torch_dtype=torch.bfloat16,
|
|
||||||
attn_implementation="flash_attention_2",
|
|
||||||
device_map="auto"
|
|
||||||
)
|
|
||||||
|
|
||||||
# default processer
|
|
||||||
processor = AutoProcessor.from_pretrained("Qwen/Qwen2-VL-7B-Instruct", attn_implementation="flash_attention_2")
|
|
||||||
|
|
||||||
def qwen_prompt(img_list, messages):
|
|
||||||
# Preparation for inference
|
|
||||||
text = processor.apply_chat_template(
|
|
||||||
messages, tokenize=False, add_generation_prompt=True
|
|
||||||
)
|
|
||||||
inputs = processor(
|
|
||||||
text=[text],
|
|
||||||
images=img_list,
|
|
||||||
padding=True,
|
|
||||||
return_tensors="pt",
|
|
||||||
)
|
|
||||||
inputs = inputs.to("cuda")
|
|
||||||
|
|
||||||
# Inference: Generation of the output
|
|
||||||
with torch.no_grad():
|
|
||||||
generated_ids = model.generate(**inputs, max_new_tokens=256)
|
|
||||||
generated_ids_trimmed = [
|
|
||||||
out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
|
|
||||||
]
|
|
||||||
output_text = processor.batch_decode(
|
|
||||||
generated_ids_trimmed, add_special_tokens=False, skip_special_tokens=True, clean_up_tokenization_spaces=False
|
|
||||||
)
|
|
||||||
del inputs
|
|
||||||
del generated_ids
|
|
||||||
del generated_ids_trimmed
|
|
||||||
torch.cuda.empty_cache()
|
|
||||||
|
|
||||||
return output_text[0]
|
|
||||||
|
|
||||||
|
|
||||||
def get_best_image(track_imgs):
|
|
||||||
if len(track_imgs) >= 5:
|
|
||||||
track_imgs = random.sample(track_imgs, 5)
|
|
||||||
img_frames = []
|
|
||||||
for i in range(len(track_imgs)):
|
|
||||||
content = {}
|
|
||||||
content['type'] = 'image'
|
|
||||||
content['min_pixels'] = 224 * 224
|
|
||||||
content['max_pixels'] = 800 * 800
|
|
||||||
img_frames.append(content)
|
|
||||||
|
|
||||||
messages = [
|
|
||||||
{
|
|
||||||
"role": "system",
|
|
||||||
"content": "你是一个在超市工作的chatbot,你现在需要帮助顾客找到一张质量最好的商品图像。一个好的商品图像需要满足以下条件: \
|
|
||||||
1. 文字清晰且连贯。\
|
|
||||||
2. 商品图案清晰可识别。\
|
|
||||||
3. 商品可提取的描述信息丰富。\
|
|
||||||
基于以上条件,从多张图像中筛选出最好的图像,然后以dict输出该图像的索引信息,key为'index'。"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"role": "system",
|
|
||||||
"content": img_frames,
|
|
||||||
},
|
|
||||||
]
|
|
||||||
|
|
||||||
output_text = qwen_prompt(track_imgs, messages)
|
|
||||||
output_dict = ast.literal_eval(output_text.strip('```python\n'))
|
|
||||||
if output_dict['index'] > len(track_imgs):
|
|
||||||
output_dict['index'] = len(track_imgs)
|
|
||||||
best_img = track_imgs[output_dict['index'] - 1]
|
|
||||||
|
|
||||||
return best_img
|
|
||||||
|
|
||||||
def get_product_description(std_img, track_imgs):
|
|
||||||
messages = [
|
|
||||||
{
|
|
||||||
"role": "system",
|
|
||||||
"content": "你是一个在超市工作的chatbot,你现在需要提取图像中商品的信息,信息需要按照以下python dict的格式输出,如果 \
|
|
||||||
信息模糊不清则输出'未知': \
|
|
||||||
{ \
|
|
||||||
'item1': {\
|
|
||||||
'Text': 第一张图像中商品中提取出的文字信息, \
|
|
||||||
'Color': 第一张图像中商品的颜色, \
|
|
||||||
'Shape': 第一张图像中商品的形状, \
|
|
||||||
'Material': 第一张图像中商品的材质, \
|
|
||||||
'Category': 第一张图像中商品的类别, \
|
|
||||||
} \
|
|
||||||
'item2': {\
|
|
||||||
'Text': 第二张图像中商品中提取出的文字信息, \
|
|
||||||
'Color': 第二张图像中商品的颜色, \
|
|
||||||
'Shape': 第二张图像中商品的形状, \
|
|
||||||
'Material': 第二张图像中商品的材质, \
|
|
||||||
'Category': 第二张图像中商品的类别, \
|
|
||||||
} \
|
|
||||||
'is_Same': 首先判断'Color'是否一致,如果不一致则返回False,如果一致则判断是否以上两个dict的['Text', 'Shape', 'Material', 'Category']key中至少有3个相同则输出True,\
|
|
||||||
否则输出False。 \
|
|
||||||
} \
|
|
||||||
"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"role": "user",
|
|
||||||
"content": [
|
|
||||||
{
|
|
||||||
"type": "image",
|
|
||||||
"min_pixels": 224 * 224,
|
|
||||||
"max_pixels": 800 * 800,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"type": "image",
|
|
||||||
"min_pixels": 224 * 224,
|
|
||||||
"max_pixels": 800 * 800,
|
|
||||||
},
|
|
||||||
],
|
|
||||||
},
|
|
||||||
# {
|
|
||||||
# "role": "user",
|
|
||||||
# "content": "以python dict的形式输出第二张图像的比对信息。"
|
|
||||||
# "content": "输出一个list,list的内容包含两张图像提取出的dict信息。"
|
|
||||||
# }
|
|
||||||
]
|
|
||||||
best_img = get_best_image(track_imgs)
|
|
||||||
if std_img is not None:
|
|
||||||
img_list = [std_img, best_img]
|
|
||||||
else:
|
|
||||||
img_list = [best_img, best_img]
|
|
||||||
|
|
||||||
output_text = qwen_prompt(img_list, messages)
|
|
||||||
contrast_pair = ast.literal_eval(output_text.strip('```python\n'))
|
|
||||||
|
|
||||||
return contrast_pair
|
|
||||||
|
|
||||||
def item_analysis(stream_dict):
|
|
||||||
track_imgs = stream_pipeline(stream_dict)
|
|
||||||
if len(track_imgs) == 0:
|
|
||||||
return {}
|
|
||||||
std_img = None
|
|
||||||
if stream_dict['goodsPic'] is not None:
|
|
||||||
# response = requests.get(stream_dict['goodsPic'])
|
|
||||||
# std_img = Image.open(BytesIO(response.content))
|
|
||||||
std_img = Image.open(stream_dict['goodsPic']).convert("RGB")
|
|
||||||
description_dict = get_product_description(std_img, track_imgs)
|
|
||||||
|
|
||||||
return description_dict
|
|
||||||
|
|
||||||
def main():
|
|
||||||
# sample input dict
|
|
||||||
stream_dict = {
|
|
||||||
"goodsName" : "优诺优丝黄桃果粒风味发酵乳",
|
|
||||||
"measureProperty" : 0,
|
|
||||||
"qty" : 1,
|
|
||||||
"price" : 25.9,
|
|
||||||
"weight": 560, # 单位克
|
|
||||||
"barcode": "6931806801024",
|
|
||||||
"video" : "https://ieemoo-ai.obs.cn-east-3.myhuaweicloud.com/videos/20231009/04/04_20231009-082149_21f2ca35-f2c2-4386-8497-3e7a3b407f03_4901872831197.mp4",
|
|
||||||
"goodsPic" : "https://ieemoo-storage.obs.cn-east-3.myhuaweicloud.com/lhpic/6931806801024.jpg",
|
|
||||||
"measureUnit" : "组",
|
|
||||||
"goodsSpec" : "405g"
|
|
||||||
}
|
|
||||||
|
|
||||||
result = item_analysis(stream_dict)
|
|
||||||
print(result)
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
main()
|
|
BIN
__pycache__/export.cpython-312.pyc
Normal file
BIN
__pycache__/move_detect.cpython-312.pyc
Normal file
BIN
__pycache__/pipeline_01.cpython-312.pyc
Normal file
BIN
__pycache__/pipeline_01.cpython-39.pyc
Normal file
BIN
__pycache__/track_reid.cpython-312.pyc
Normal file
359
bakeup/pipeline.py
Normal file
@ -0,0 +1,359 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
"""
|
||||||
|
Created on Sun Sep 29 08:59:21 2024
|
||||||
|
|
||||||
|
@author: ym
|
||||||
|
"""
|
||||||
|
import os
|
||||||
|
# import sys
|
||||||
|
import cv2
|
||||||
|
import pickle
|
||||||
|
import numpy as np
|
||||||
|
from pathlib import Path
|
||||||
|
from scipy.spatial.distance import cdist
|
||||||
|
from track_reid import yolo_resnet_tracker, yolov10_resnet_tracker
|
||||||
|
|
||||||
|
from tracking.dotrack.dotracks_back import doBackTracks
|
||||||
|
from tracking.dotrack.dotracks_front import doFrontTracks
|
||||||
|
from tracking.utils.drawtracks import plot_frameID_y2, draw_all_trajectories
|
||||||
|
from utils.getsource import get_image_pairs, get_video_pairs
|
||||||
|
from tracking.utils.read_data import read_similar
|
||||||
|
|
||||||
|
|
||||||
|
def save_subimgs(imgdict, boxes, spath, ctype, featdict = None):
|
||||||
|
'''
|
||||||
|
当前 box 特征和该轨迹前一个 box 特征的相似度,可用于和跟踪序列中的相似度进行比较
|
||||||
|
'''
|
||||||
|
boxes = boxes[np.argsort(boxes[:, 7])]
|
||||||
|
for i in range(len(boxes)):
|
||||||
|
simi = None
|
||||||
|
tid, fid, bid = int(boxes[i, 4]), int(boxes[i, 7]), int(boxes[i, 8])
|
||||||
|
|
||||||
|
if i>0:
|
||||||
|
_, fid0, bid0 = int(boxes[i-1, 4]), int(boxes[i-1, 7]), int(boxes[i-1, 8])
|
||||||
|
if f"{fid0}_{bid0}" in featdict.keys() and f"{fid}_{bid}" in featdict.keys():
|
||||||
|
feat0 = featdict[f"{fid0}_{bid0}"]
|
||||||
|
feat1 = featdict[f"{fid}_{bid}"]
|
||||||
|
simi = 1 - np.maximum(0.0, cdist(feat0[None, :], feat1[None, :], "cosine"))[0][0]
|
||||||
|
|
||||||
|
img = imgdict[f"{fid}_{bid}"]
|
||||||
|
imgpath = spath / f"{ctype}_tid{tid}-{fid}-{bid}.png"
|
||||||
|
if simi is not None:
|
||||||
|
imgpath = spath / f"{ctype}_tid{tid}-{fid}-{bid}_sim{simi:.2f}.png"
|
||||||
|
|
||||||
|
cv2.imwrite(imgpath, img)
|
||||||
|
|
||||||
|
|
||||||
|
def save_subimgs_1(imgdict, boxes, spath, ctype, simidict = None):
|
||||||
|
'''
|
||||||
|
当前 box 特征和该轨迹 smooth_feat 特征的相似度, yolo_resnet_tracker 函数中,
|
||||||
|
采用该方式记录特征相似度
|
||||||
|
'''
|
||||||
|
for i in range(len(boxes)):
|
||||||
|
tid, fid, bid = int(boxes[i, 4]), int(boxes[i, 7]), int(boxes[i, 8])
|
||||||
|
|
||||||
|
key = f"{fid}_{bid}"
|
||||||
|
img = imgdict[key]
|
||||||
|
imgpath = spath / f"{ctype}_tid{tid}-{fid}-{bid}.png"
|
||||||
|
if simidict is not None and key in simidict.keys():
|
||||||
|
imgpath = spath / f"{ctype}_tid{tid}-{fid}-{bid}_sim{simidict[key]:.2f}.png"
|
||||||
|
|
||||||
|
cv2.imwrite(imgpath, img)
|
||||||
|
|
||||||
|
|
||||||
|
def pipeline(
|
||||||
|
eventpath,
|
||||||
|
savepath,
|
||||||
|
SourceType,
|
||||||
|
weights,
|
||||||
|
YoloVersion="V5"
|
||||||
|
):
|
||||||
|
'''
|
||||||
|
eventpath: 单个事件的存储路径
|
||||||
|
|
||||||
|
'''
|
||||||
|
optdict = {}
|
||||||
|
optdict["weights"] = weights
|
||||||
|
|
||||||
|
if SourceType == "video":
|
||||||
|
vpaths = get_video_pairs(eventpath)
|
||||||
|
elif SourceType == "image":
|
||||||
|
vpaths = get_image_pairs(eventpath)
|
||||||
|
event_tracks = []
|
||||||
|
|
||||||
|
## 构造购物事件字典
|
||||||
|
evtname = Path(eventpath).stem
|
||||||
|
barcode = evtname.split('_')[-1] if len(evtname.split('_'))>=2 \
|
||||||
|
and len(evtname.split('_')[-1])>=8 \
|
||||||
|
and evtname.split('_')[-1].isdigit() else ''
|
||||||
|
'''事件结果存储文件夹'''
|
||||||
|
if not savepath:
|
||||||
|
savepath = Path(__file__).resolve().parents[0] / "events_result"
|
||||||
|
|
||||||
|
savepath_pipeline = Path(savepath) / Path("Yolos_Tracking") / evtname
|
||||||
|
|
||||||
|
|
||||||
|
"""ShoppingDict pickle 文件保存地址 """
|
||||||
|
savepath_spdict = Path(savepath) / "ShoppingDict_pkfile"
|
||||||
|
if not savepath_spdict.exists():
|
||||||
|
savepath_spdict.mkdir(parents=True, exist_ok=True)
|
||||||
|
pf_path = Path(savepath_spdict) / Path(str(evtname)+".pickle")
|
||||||
|
|
||||||
|
# if pf_path.exists():
|
||||||
|
# print(f"Pickle file have saved: {evtname}.pickle")
|
||||||
|
# return
|
||||||
|
|
||||||
|
'''====================== 构造 ShoppingDict 模块 ======================='''
|
||||||
|
ShoppingDict = {"eventPath": eventpath,
|
||||||
|
"eventName": evtname,
|
||||||
|
"barcode": barcode,
|
||||||
|
"eventType": '', # "input", "output", "other"
|
||||||
|
"frontCamera": {},
|
||||||
|
"backCamera": {},
|
||||||
|
"one2n": [] #
|
||||||
|
}
|
||||||
|
yrtDict = {}
|
||||||
|
|
||||||
|
|
||||||
|
procpath = Path(eventpath).joinpath('process.data')
|
||||||
|
if procpath.is_file():
|
||||||
|
SimiDict = read_similar(procpath)
|
||||||
|
ShoppingDict["one2n"] = SimiDict['one2n']
|
||||||
|
|
||||||
|
|
||||||
|
for vpath in vpaths:
|
||||||
|
'''================= 1. 构造相机事件字典 ================='''
|
||||||
|
CameraEvent = {"cameraType": '', # "front", "back"
|
||||||
|
"videoPath": '',
|
||||||
|
"imagePaths": [],
|
||||||
|
"yoloResnetTracker": [],
|
||||||
|
"tracking": [],
|
||||||
|
}
|
||||||
|
|
||||||
|
if isinstance(vpath, list):
|
||||||
|
CameraEvent["imagePaths"] = vpath
|
||||||
|
bname = os.path.basename(vpath[0])
|
||||||
|
if not isinstance(vpath, list):
|
||||||
|
CameraEvent["videoPath"] = vpath
|
||||||
|
bname = os.path.basename(vpath).split('.')[0]
|
||||||
|
if bname.split('_')[0] == "0" or bname.find('back')>=0:
|
||||||
|
CameraEvent["cameraType"] = "back"
|
||||||
|
if bname.split('_')[0] == "1" or bname.find('front')>=0:
|
||||||
|
CameraEvent["cameraType"] = "front"
|
||||||
|
|
||||||
|
'''================= 2. 事件结果存储文件夹 ================='''
|
||||||
|
if isinstance(vpath, list):
|
||||||
|
savepath_pipeline_imgs = savepath_pipeline / Path("images")
|
||||||
|
else:
|
||||||
|
savepath_pipeline_imgs = savepath_pipeline / Path(str(Path(vpath).stem))
|
||||||
|
|
||||||
|
if not savepath_pipeline_imgs.exists():
|
||||||
|
savepath_pipeline_imgs.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
|
savepath_pipeline_subimgs = savepath_pipeline / Path("subimgs")
|
||||||
|
if not savepath_pipeline_subimgs.exists():
|
||||||
|
savepath_pipeline_subimgs.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
|
|
||||||
|
'''================= 3. Yolo + Resnet + Tracker ================='''
|
||||||
|
optdict["source"] = vpath
|
||||||
|
optdict["save_dir"] = savepath_pipeline_imgs
|
||||||
|
optdict["is_save_img"] = True
|
||||||
|
optdict["is_save_video"] = True
|
||||||
|
|
||||||
|
|
||||||
|
if YoloVersion == "V5":
|
||||||
|
yrtOut = yolo_resnet_tracker(**optdict)
|
||||||
|
elif YoloVersion == "V10":
|
||||||
|
yrtOut = yolov10_resnet_tracker(**optdict)
|
||||||
|
|
||||||
|
|
||||||
|
yrtOut_save = []
|
||||||
|
for frdict in yrtOut:
|
||||||
|
fr_dict = {}
|
||||||
|
for k, v in frdict.items():
|
||||||
|
if k != "imgs":
|
||||||
|
fr_dict[k]=v
|
||||||
|
yrtOut_save.append(fr_dict)
|
||||||
|
CameraEvent["yoloResnetTracker"] = yrtOut_save
|
||||||
|
|
||||||
|
# CameraEvent["yoloResnetTracker"] = yrtOut
|
||||||
|
|
||||||
|
'''================= 4. tracking ================='''
|
||||||
|
'''(1) 生成用于 tracking 模块的 boxes、feats'''
|
||||||
|
bboxes = np.empty((0, 6), dtype=np.float64)
|
||||||
|
trackerboxes = np.empty((0, 9), dtype=np.float64)
|
||||||
|
trackefeats = {}
|
||||||
|
for frameDict in yrtOut:
|
||||||
|
tboxes = frameDict["tboxes"]
|
||||||
|
ffeats = frameDict["feats"]
|
||||||
|
|
||||||
|
boxes = frameDict["bboxes"]
|
||||||
|
bboxes = np.concatenate((bboxes, np.array(boxes)), axis=0)
|
||||||
|
trackerboxes = np.concatenate((trackerboxes, np.array(tboxes)), axis=0)
|
||||||
|
for i in range(len(tboxes)):
|
||||||
|
fid, bid = int(tboxes[i, 7]), int(tboxes[i, 8])
|
||||||
|
trackefeats.update({f"{fid}_{bid}": ffeats[f"{fid}_{bid}"]})
|
||||||
|
|
||||||
|
|
||||||
|
'''(2) tracking, 后摄'''
|
||||||
|
if CameraEvent["cameraType"] == "back":
|
||||||
|
vts = doBackTracks(trackerboxes, trackefeats)
|
||||||
|
vts.classify()
|
||||||
|
event_tracks.append(("back", vts))
|
||||||
|
|
||||||
|
CameraEvent["tracking"] = vts
|
||||||
|
ShoppingDict["backCamera"] = CameraEvent
|
||||||
|
|
||||||
|
yrtDict["backyrt"] = yrtOut
|
||||||
|
|
||||||
|
'''(2) tracking, 前摄'''
|
||||||
|
if CameraEvent["cameraType"] == "front":
|
||||||
|
vts = doFrontTracks(trackerboxes, trackefeats)
|
||||||
|
vts.classify()
|
||||||
|
event_tracks.append(("front", vts))
|
||||||
|
|
||||||
|
CameraEvent["tracking"] = vts
|
||||||
|
ShoppingDict["frontCamera"] = CameraEvent
|
||||||
|
|
||||||
|
yrtDict["frontyrt"] = yrtOut
|
||||||
|
|
||||||
|
'''========================== 保存模块 ================================='''
|
||||||
|
'''(1) 保存 ShoppingDict 事件'''
|
||||||
|
with open(str(pf_path), 'wb') as f:
|
||||||
|
pickle.dump(ShoppingDict, f)
|
||||||
|
|
||||||
|
'''(2) 保存 Tracking 输出的运动轨迹子图,并记录相似度'''
|
||||||
|
for CamerType, vts in event_tracks:
|
||||||
|
if len(vts.tracks)==0: continue
|
||||||
|
if CamerType == 'front':
|
||||||
|
# yolos = ShoppingDict["frontCamera"]["yoloResnetTracker"]
|
||||||
|
|
||||||
|
yolos = yrtDict["frontyrt"]
|
||||||
|
ctype = 1
|
||||||
|
if CamerType == 'back':
|
||||||
|
# yolos = ShoppingDict["backCamera"]["yoloResnetTracker"]
|
||||||
|
|
||||||
|
yolos = yrtDict["backyrt"]
|
||||||
|
ctype = 0
|
||||||
|
|
||||||
|
imgdict, featdict, simidict = {}, {}, {}
|
||||||
|
for y in yolos:
|
||||||
|
imgdict.update(y["imgs"])
|
||||||
|
featdict.update(y["feats"])
|
||||||
|
simidict.update(y["featsimi"])
|
||||||
|
|
||||||
|
for track in vts.Residual:
|
||||||
|
if isinstance(track, np.ndarray):
|
||||||
|
save_subimgs(imgdict, track, savepath_pipeline_subimgs, ctype, featdict)
|
||||||
|
else:
|
||||||
|
save_subimgs(imgdict, track.slt_boxes, savepath_pipeline_subimgs, ctype, featdict)
|
||||||
|
|
||||||
|
'''(3) 轨迹显示与保存'''
|
||||||
|
illus = [None, None]
|
||||||
|
for CamerType, vts in event_tracks:
|
||||||
|
if len(vts.tracks)==0: continue
|
||||||
|
|
||||||
|
if CamerType == 'front':
|
||||||
|
edgeline = cv2.imread("./tracking/shopcart/cart_tempt/board_ftmp_line.png")
|
||||||
|
|
||||||
|
h, w = edgeline.shape[:2]
|
||||||
|
# nh, nw = h//2, w//2
|
||||||
|
# edgeline = cv2.resize(edgeline, (nw, nh), interpolation=cv2.INTER_AREA)
|
||||||
|
|
||||||
|
img_tracking = draw_all_trajectories(vts, edgeline, savepath_pipeline, CamerType, draw5p=True)
|
||||||
|
illus[0] = img_tracking
|
||||||
|
|
||||||
|
plt = plot_frameID_y2(vts)
|
||||||
|
plt.savefig(os.path.join(savepath_pipeline, "front_y2.png"))
|
||||||
|
|
||||||
|
if CamerType == 'back':
|
||||||
|
edgeline = cv2.imread("./tracking/shopcart/cart_tempt/edgeline.png")
|
||||||
|
|
||||||
|
h, w = edgeline.shape[:2]
|
||||||
|
# nh, nw = h//2, w//2
|
||||||
|
# edgeline = cv2.resize(edgeline, (nw, nh), interpolation=cv2.INTER_AREA)
|
||||||
|
|
||||||
|
img_tracking = draw_all_trajectories(vts, edgeline, savepath_pipeline, CamerType, draw5p=True)
|
||||||
|
illus[1] = img_tracking
|
||||||
|
|
||||||
|
illus = [im for im in illus if im is not None]
|
||||||
|
if len(illus):
|
||||||
|
img_cat = np.concatenate(illus, axis = 1)
|
||||||
|
if len(illus)==2:
|
||||||
|
H, W = img_cat.shape[:2]
|
||||||
|
cv2.line(img_cat, (int(W/2), 0), (int(W/2), int(H)), (128, 128, 255), 3)
|
||||||
|
|
||||||
|
trajpath = os.path.join(savepath_pipeline, "trajectory.png")
|
||||||
|
cv2.imwrite(trajpath, img_cat)
|
||||||
|
|
||||||
|
def execute_pipeline(evtdir = r"D:\datasets\ym\后台数据\unzip",
|
||||||
|
source_type = "video", # video, image,
|
||||||
|
save_path = r"D:\work\result_pipeline",
|
||||||
|
yolo_ver = "V10", # V10, V5
|
||||||
|
|
||||||
|
weight_yolo_v5 = r'./ckpts/best_cls10_0906.pt' ,
|
||||||
|
weight_yolo_v10 = r'./ckpts/best_v10s_width0375_1205.pt',
|
||||||
|
k=0
|
||||||
|
):
|
||||||
|
'''
|
||||||
|
运行函数 pipeline(),遍历事件文件夹,每个文件夹是一个事件
|
||||||
|
'''
|
||||||
|
parmDict = {}
|
||||||
|
parmDict["SourceType"] = source_type
|
||||||
|
parmDict["savepath"] = save_path
|
||||||
|
parmDict["YoloVersion"] = yolo_ver
|
||||||
|
if parmDict["YoloVersion"] == "V5":
|
||||||
|
parmDict["weights"] = weight_yolo_v5
|
||||||
|
elif parmDict["YoloVersion"] == "V10":
|
||||||
|
parmDict["weights"] = weight_yolo_v10
|
||||||
|
|
||||||
|
evtdir = Path(evtdir)
|
||||||
|
errEvents = []
|
||||||
|
for item in evtdir.iterdir():
|
||||||
|
if item.is_dir():
|
||||||
|
item = evtdir/Path("20250310-175352-741")
|
||||||
|
parmDict["eventpath"] = item
|
||||||
|
pipeline(**parmDict)
|
||||||
|
# try:
|
||||||
|
# pipeline(**parmDict)
|
||||||
|
# except Exception as e:
|
||||||
|
# errEvents.append(str(item))
|
||||||
|
k+=1
|
||||||
|
if k==1:
|
||||||
|
break
|
||||||
|
|
||||||
|
errfile = os.path.join(parmDict["savepath"], 'error_events.txt')
|
||||||
|
with open(errfile, 'w', encoding='utf-8') as f:
|
||||||
|
for line in errEvents:
|
||||||
|
f.write(line + '\n')
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
execute_pipeline()
|
||||||
|
|
||||||
|
# spath_v10 = r"D:\work\result_pipeline_v10"
|
||||||
|
# spath_v5 = r"D:\work\result_pipeline_v5"
|
||||||
|
# execute_pipeline(save_path=spath_v10, yolo_ver="V10")
|
||||||
|
# execute_pipeline(save_path=spath_v5, yolo_ver="V5")
|
||||||
|
|
||||||
|
datapath = r'/home/wqg/dataset/test_dataset/base_dataset/single_event/source/'
|
||||||
|
savepath = r'/home/wqg/dataset/pipeline/contrast/single_event_V5'
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
execute_pipeline(evtdir = datapath,
|
||||||
|
DataType = "raw", # raw, pkl
|
||||||
|
kk=1,
|
||||||
|
source_type = "video", # video, image,
|
||||||
|
save_path = savepath,
|
||||||
|
yolo_ver = "V10", # V10, V5
|
||||||
|
weight_yolo_v5 = r'./ckpts/best_cls10_0906.pt' ,
|
||||||
|
weight_yolo_v10 = r'./ckpts/best_v10s_width0375_1205.pt',
|
||||||
|
saveimages = False
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
BIN
contrast/__pycache__/__init__.cpython-312.pyc
Normal file
BIN
contrast/__pycache__/event_test.cpython-312.pyc
Normal file
BIN
contrast/__pycache__/genfeats.cpython-312.pyc
Normal file
BIN
contrast/__pycache__/one2n_contrast.cpython-312.pyc
Normal file
@ -9,17 +9,19 @@ import cv2
|
|||||||
import json
|
import json
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import matplotlib.pyplot as plt
|
import matplotlib.pyplot as plt
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
from matplotlib import rcParams
|
from matplotlib import rcParams
|
||||||
from matplotlib.font_manager import FontProperties
|
from matplotlib.font_manager import FontProperties
|
||||||
from scipy.spatial.distance import cdist
|
from scipy.spatial.distance import cdist
|
||||||
from utils.event import ShoppingEvent, save_data
|
from utils.event import ShoppingEvent, save_data
|
||||||
|
from utils.calsimi import calsimi_vs_stdfeat_new, get_topk_percent, cluster
|
||||||
|
from utils.tools import get_evtList
|
||||||
|
import pickle
|
||||||
|
|
||||||
rcParams['font.sans-serif'] = ['SimHei'] # 用黑体显示中文
|
rcParams['font.sans-serif'] = ['SimHei'] # 用黑体显示中文
|
||||||
rcParams['axes.unicode_minus'] = False # 正确显示负号
|
rcParams['axes.unicode_minus'] = False # 正确显示负号
|
||||||
|
|
||||||
|
|
||||||
'''*********** USearch ***********'''
|
'''*********** USearch ***********'''
|
||||||
def read_usearch():
|
def read_usearch():
|
||||||
stdFeaturePath = r"D:\contrast\stdlib\v11_test.json"
|
stdFeaturePath = r"D:\contrast\stdlib\v11_test.json"
|
||||||
@ -35,13 +37,12 @@ def read_usearch():
|
|||||||
|
|
||||||
return stdlib
|
return stdlib
|
||||||
|
|
||||||
def get_eventlist():
|
def get_eventlist_errortxt(evtpaths):
|
||||||
'''
|
'''
|
||||||
读取一次测试中的错误事件
|
读取一次测试中的错误事件
|
||||||
'''
|
'''
|
||||||
evtpaths = r"\\192.168.1.28\share\测试视频数据以及日志\算法全流程测试\202412\images"
|
text1 = "one_2_Small_n_Error.txt"
|
||||||
text1 = "one2n_Error.txt"
|
text2 = "one_2_Big_N_Error.txt"
|
||||||
text2 = "one2SN_Error.txt"
|
|
||||||
events = []
|
events = []
|
||||||
text = (text1, text2)
|
text = (text1, text2)
|
||||||
for txt in text:
|
for txt in text:
|
||||||
@ -54,15 +55,15 @@ def get_eventlist():
|
|||||||
fpath=os.path.join(evtpaths, line)
|
fpath=os.path.join(evtpaths, line)
|
||||||
events.append(fpath)
|
events.append(fpath)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
events = list(set(events))
|
events = list(set(events))
|
||||||
|
|
||||||
return events
|
return events
|
||||||
|
|
||||||
def single_event():
|
def save_eventdata():
|
||||||
|
evtpaths = r"/home/wqg/dataset/test_dataset/performence_dataset/"
|
||||||
events = get_eventlist()
|
events = get_eventlist_errortxt(evtpaths)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
'''定义当前事件存储地址及生成相应文件件'''
|
'''定义当前事件存储地址及生成相应文件件'''
|
||||||
resultPath = r"\\192.168.1.28\share\测试视频数据以及日志\算法全流程测试\202412\result\single_event"
|
resultPath = r"\\192.168.1.28\share\测试视频数据以及日志\算法全流程测试\202412\result\single_event"
|
||||||
@ -74,120 +75,148 @@ def single_event():
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
def get_topk_percent(data, k):
|
# def get_topk_percent(data, k):
|
||||||
"""
|
# """
|
||||||
获取数据中最大的 k% 的元素
|
# 获取数据中最大的 k% 的元素
|
||||||
"""
|
# """
|
||||||
# 将数据转换为 NumPy 数组
|
# # 将数据转换为 NumPy 数组
|
||||||
if isinstance(data, list):
|
# if isinstance(data, list):
|
||||||
data = np.array(data)
|
# data = np.array(data)
|
||||||
|
|
||||||
percentile = np.percentile(data, 100-k)
|
# percentile = np.percentile(data, 100-k)
|
||||||
top_k_percent = data[data >= percentile]
|
# top_k_percent = data[data >= percentile]
|
||||||
|
|
||||||
return top_k_percent
|
# return top_k_percent
|
||||||
def cluster(data, thresh=0.15):
|
# def cluster(data, thresh=0.15):
|
||||||
# data = np.array([0.1, 0.13, 0.7, 0.2, 0.8, 0.52, 0.3, 0.7, 0.85, 0.58])
|
# # data = np.array([0.1, 0.13, 0.7, 0.2, 0.8, 0.52, 0.3, 0.7, 0.85, 0.58])
|
||||||
# data = np.array([0.1, 0.13, 0.2, 0.3])
|
# # data = np.array([0.1, 0.13, 0.2, 0.3])
|
||||||
# data = np.array([0.1])
|
# # data = np.array([0.1])
|
||||||
|
|
||||||
if isinstance(data, list):
|
# if isinstance(data, list):
|
||||||
data = np.array(data)
|
# data = np.array(data)
|
||||||
|
|
||||||
data1 = np.sort(data)
|
# data1 = np.sort(data)
|
||||||
cluter, Cluters, = [data1[0]], []
|
# cluter, Cluters, = [data1[0]], []
|
||||||
for i in range(1, len(data1)):
|
# for i in range(1, len(data1)):
|
||||||
if data1[i] - data1[i-1]< thresh:
|
# if data1[i] - data1[i-1]< thresh:
|
||||||
cluter.append(data1[i])
|
# cluter.append(data1[i])
|
||||||
else:
|
# else:
|
||||||
Cluters.append(cluter)
|
# Cluters.append(cluter)
|
||||||
cluter = [data1[i]]
|
# cluter = [data1[i]]
|
||||||
Cluters.append(cluter)
|
# Cluters.append(cluter)
|
||||||
|
|
||||||
clt_center = []
|
# clt_center = []
|
||||||
for clt in Cluters:
|
# for clt in Cluters:
|
||||||
## 是否应该在此处限制一个聚类中的最小轨迹样本数,应该将该因素放在轨迹分析中
|
# ## 是否应该在此处限制一个聚类中的最小轨迹样本数,应该将该因素放在轨迹分析中
|
||||||
# if len(clt)>=3:
|
# # if len(clt)>=3:
|
||||||
# clt_center.append(np.mean(clt))
|
# # clt_center.append(np.mean(clt))
|
||||||
clt_center.append(np.mean(clt))
|
# clt_center.append(np.mean(clt))
|
||||||
|
|
||||||
# print(clt_center)
|
# # print(clt_center)
|
||||||
|
|
||||||
return clt_center
|
# return clt_center
|
||||||
|
|
||||||
def calc_simil(event, stdfeat):
|
# def calsimi_vs_stdfeat_new(event, stdfeat):
|
||||||
|
# '''事件与标准库的对比策略
|
||||||
def calsiml(feat1, feat2):
|
# 该比对策略是否可以拓展到事件与事件的比对?
|
||||||
'''轨迹样本和标准特征集样本相似度的选择策略'''
|
# '''
|
||||||
matrix = 1 - cdist(feat1, feat2, 'cosine')
|
|
||||||
simi_max = []
|
|
||||||
for i in range(len(matrix)):
|
|
||||||
sim = np.mean(get_topk_percent(matrix[i, :], 75))
|
|
||||||
simi_max.append(sim)
|
|
||||||
cltc_max = cluster(simi_max)
|
|
||||||
Simi = max(cltc_max)
|
|
||||||
|
|
||||||
## cltc_max为空属于编程考虑不周,应予以排查解决
|
|
||||||
# if len(cltc_max):
|
|
||||||
# Simi = max(cltc_max)
|
|
||||||
# else:
|
|
||||||
# Simi = 0 #不应该走到该处
|
|
||||||
|
|
||||||
|
|
||||||
return Simi
|
# def calsiml(feat1, feat2, topkp=75, cluth=0.15):
|
||||||
|
# '''轨迹样本和标准特征集样本相似度的选择策略'''
|
||||||
|
# matrix = 1 - cdist(feat1, feat2, 'cosine')
|
||||||
|
# simi_max = []
|
||||||
|
# for i in range(len(matrix)):
|
||||||
|
# sim = np.mean(get_topk_percent(matrix[i, :], topkp))
|
||||||
|
# simi_max.append(sim)
|
||||||
|
# cltc_max = cluster(simi_max, cluth)
|
||||||
|
# Simi = max(cltc_max)
|
||||||
|
|
||||||
|
# ## cltc_max为空属于编程考虑不周,应予以排查解决
|
||||||
|
# # if len(cltc_max):
|
||||||
|
# # Simi = max(cltc_max)
|
||||||
|
# # else:
|
||||||
|
# # Simi = 0 #不应该走到该处
|
||||||
|
|
||||||
|
|
||||||
front_boxes = np.empty((0, 9), dtype=np.float64) ##和类doTracks兼容
|
# return Simi
|
||||||
front_feats = np.empty((0, 256), dtype=np.float64) ##和类doTracks兼容
|
|
||||||
for i in range(len(event.front_boxes)):
|
|
||||||
front_boxes = np.concatenate((front_boxes, event.front_boxes[i]), axis=0)
|
|
||||||
front_feats = np.concatenate((front_feats, event.front_feats[i]), axis=0)
|
|
||||||
|
|
||||||
back_boxes = np.empty((0, 9), dtype=np.float64) ##和类doTracks兼容
|
|
||||||
back_feats = np.empty((0, 256), dtype=np.float64) ##和类doTracks兼容
|
|
||||||
for i in range(len(event.back_boxes)):
|
|
||||||
back_boxes = np.concatenate((back_boxes, event.back_boxes[i]), axis=0)
|
|
||||||
back_feats = np.concatenate((back_feats, event.back_feats[i]), axis=0)
|
|
||||||
|
|
||||||
if len(front_feats):
|
# front_boxes = np.empty((0, 9), dtype=np.float64) ##和类doTracks兼容
|
||||||
front_simi = calsiml(front_feats, stdfeat)
|
# front_feats = np.empty((0, 256), dtype=np.float64) ##和类doTracks兼容
|
||||||
if len(back_feats):
|
# for i in range(len(event.front_boxes)):
|
||||||
back_simi = calsiml(back_feats, stdfeat)
|
# front_boxes = np.concatenate((front_boxes, event.front_boxes[i]), axis=0)
|
||||||
|
# front_feats = np.concatenate((front_feats, event.front_feats[i]), axis=0)
|
||||||
|
|
||||||
'''前后摄相似度融合策略'''
|
# back_boxes = np.empty((0, 9), dtype=np.float64) ##和类doTracks兼容
|
||||||
if len(front_feats) and len(back_feats):
|
# back_feats = np.empty((0, 256), dtype=np.float64) ##和类doTracks兼容
|
||||||
diff_simi = abs(front_simi - back_simi)
|
# for i in range(len(event.back_boxes)):
|
||||||
if diff_simi>0.15:
|
# back_boxes = np.concatenate((back_boxes, event.back_boxes[i]), axis=0)
|
||||||
Similar = max([front_simi, back_simi])
|
# back_feats = np.concatenate((back_feats, event.back_feats[i]), axis=0)
|
||||||
else:
|
|
||||||
Similar = (front_simi+back_simi)/2
|
|
||||||
elif len(front_feats) and len(back_feats)==0:
|
|
||||||
Similar = front_simi
|
|
||||||
elif len(front_feats)==0 and len(back_feats):
|
|
||||||
Similar = back_simi
|
|
||||||
else:
|
|
||||||
Similar = None # 在event.front_feats和event.back_feats同时为空时
|
|
||||||
|
|
||||||
return Similar
|
# if len(front_feats):
|
||||||
|
# front_simi = calsiml(front_feats, stdfeat)
|
||||||
|
# if len(back_feats):
|
||||||
|
# back_simi = calsiml(back_feats, stdfeat)
|
||||||
|
|
||||||
|
# '''前后摄相似度融合策略'''
|
||||||
|
# if len(front_feats) and len(back_feats):
|
||||||
|
# diff_simi = abs(front_simi - back_simi)
|
||||||
|
# if diff_simi>0.15:
|
||||||
|
# Similar = max([front_simi, back_simi])
|
||||||
|
# else:
|
||||||
|
# Similar = (front_simi+back_simi)/2
|
||||||
|
# elif len(front_feats) and len(back_feats)==0:
|
||||||
|
# Similar = front_simi
|
||||||
|
# elif len(front_feats)==0 and len(back_feats):
|
||||||
|
# Similar = back_simi
|
||||||
|
# else:
|
||||||
|
# Similar = None # 在event.front_feats和event.back_feats同时为空时
|
||||||
|
|
||||||
|
# return Similar
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def simi_matrix():
|
def simi_matrix():
|
||||||
resultPath = r"\\192.168.1.28\share\测试视频数据以及日志\算法全流程测试\202412\result\single_event"
|
evtpaths = r"/home/wqg/dataset/pipeline/contrast/single_event_V10/evtobjs/"
|
||||||
|
|
||||||
stdlib = read_usearch()
|
stdfeatPath = r"/home/wqg/dataset/test_dataset/total_barcode/features_json/v11_barcode_0304/"
|
||||||
events = get_eventlist()
|
resultPath = r"/home/wqg/dataset/performence_dataset/result/"
|
||||||
for evtpath in events:
|
|
||||||
evtname = os.path.basename(evtpath)
|
|
||||||
_, barcode = evtname.split("_")
|
|
||||||
|
|
||||||
# 生成事件与相应标准特征集
|
evt_paths, bcdSet = get_evtList(evtpaths)
|
||||||
event = ShoppingEvent(evtpath)
|
|
||||||
stdfeat = stdlib[barcode]
|
|
||||||
|
|
||||||
Similar = calc_simil(event, stdfeat)
|
## read std features
|
||||||
|
stdDict={}
|
||||||
|
evtDict = {}
|
||||||
|
for barcode in bcdSet:
|
||||||
|
stdpath = os.path.join(stdfeatPath, f"{barcode}.json")
|
||||||
|
if not os.path.isfile(stdpath):
|
||||||
|
continue
|
||||||
|
|
||||||
|
with open(stdpath, 'r', encoding='utf-8') as f:
|
||||||
|
stddata = json.load(f)
|
||||||
|
feat = np.array(stddata["value"])
|
||||||
|
stdDict[barcode] = feat
|
||||||
|
|
||||||
|
for evtpath in evt_paths:
|
||||||
|
barcode = Path(evtpath).stem.split("_")[-1]
|
||||||
|
|
||||||
|
if barcode not in stdDict.keys():
|
||||||
|
continue
|
||||||
|
|
||||||
|
# try:
|
||||||
|
# with open(evtpath, 'rb') as f:
|
||||||
|
# evtdata = pickle.load(f)
|
||||||
|
# except Exception as e:
|
||||||
|
# print(evtname)
|
||||||
|
|
||||||
|
with open(evtpath, 'rb') as f:
|
||||||
|
event = pickle.load(f)
|
||||||
|
|
||||||
|
stdfeat = stdDict[barcode]
|
||||||
|
|
||||||
|
Similar = calsimi_vs_stdfeat_new(event, stdfeat)
|
||||||
|
|
||||||
# 构造 boxes 子图存储路径
|
# 构造 boxes 子图存储路径
|
||||||
subimgpath = os.path.join(resultPath, f"{event.evtname}", "subimg")
|
subimgpath = os.path.join(resultPath, f"{event.evtname}", "subimg")
|
||||||
@ -197,8 +226,6 @@ def simi_matrix():
|
|||||||
if not os.path.exists(histpath):
|
if not os.path.exists(histpath):
|
||||||
os.makedirs(histpath)
|
os.makedirs(histpath)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
mean_values, max_values = [], []
|
mean_values, max_values = [], []
|
||||||
cameras = ('front', 'back')
|
cameras = ('front', 'back')
|
||||||
fig, ax = plt.subplots(2, 3, figsize=(16, 9), dpi=100)
|
fig, ax = plt.subplots(2, 3, figsize=(16, 9), dpi=100)
|
||||||
@ -218,9 +245,9 @@ def simi_matrix():
|
|||||||
evtfeat = np.concatenate((evtfeat, event.back_feats[i]), axis=0)
|
evtfeat = np.concatenate((evtfeat, event.back_feats[i]), axis=0)
|
||||||
imgpaths = event.back_imgpaths
|
imgpaths = event.back_imgpaths
|
||||||
|
|
||||||
assert len(boxes)==len(evtfeat), f"Please check the Event: {evtname}"
|
assert len(boxes)==len(evtfeat), f"Please check the Event: {event.evtname}"
|
||||||
if len(boxes)==0: continue
|
if len(boxes)==0: continue
|
||||||
print(evtname)
|
print(event.evtname)
|
||||||
|
|
||||||
matrix = 1 - cdist(evtfeat, stdfeat, 'cosine')
|
matrix = 1 - cdist(evtfeat, stdfeat, 'cosine')
|
||||||
simi_1d = matrix.flatten()
|
simi_1d = matrix.flatten()
|
||||||
@ -310,8 +337,8 @@ def simi_matrix():
|
|||||||
mean_diff = abs(mean_values[1]-mean_values[0])
|
mean_diff = abs(mean_values[1]-mean_values[0])
|
||||||
ax[0, 1].set_title(f"mean diff: {mean_diff:.3f}")
|
ax[0, 1].set_title(f"mean diff: {mean_diff:.3f}")
|
||||||
if len(max_values)==2:
|
if len(max_values)==2:
|
||||||
max_values = abs(max_values[1]-max_values[0])
|
max_diff = abs(max_values[1]-max_values[0])
|
||||||
ax[0, 2].set_title(f"max diff: {max_values:.3f}")
|
ax[0, 2].set_title(f"max diff: {max_diff:.3f}")
|
||||||
try:
|
try:
|
||||||
fig.suptitle(f"Similar: {Similar:.3f}", fontsize=16)
|
fig.suptitle(f"Similar: {Similar:.3f}", fontsize=16)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
@ -320,19 +347,14 @@ def simi_matrix():
|
|||||||
pltpath = os.path.join(subimgpath, f"hist_max_{kpercent}%_.png")
|
pltpath = os.path.join(subimgpath, f"hist_max_{kpercent}%_.png")
|
||||||
plt.savefig(pltpath)
|
plt.savefig(pltpath)
|
||||||
|
|
||||||
pltpath1 = os.path.join(histpath, f"{evtname}_.png")
|
pltpath1 = os.path.join(histpath, f"{event.evtname}_.png")
|
||||||
plt.savefig(pltpath1)
|
plt.savefig(pltpath1)
|
||||||
|
|
||||||
|
|
||||||
plt.close()
|
plt.close()
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
|
|
||||||
simi_matrix()
|
simi_matrix()
|
||||||
|
|
||||||
|
|
||||||
|
BIN
contrast/feat_extract/__pycache__/config.cpython-312.pyc
Normal file
BIN
contrast/feat_extract/__pycache__/inference.cpython-312.pyc
Normal file
@ -61,8 +61,9 @@ class Config:
|
|||||||
test_val = "D:/比对/cl"
|
test_val = "D:/比对/cl"
|
||||||
# test_val = "./data/test_data_100"
|
# test_val = "./data/test_data_100"
|
||||||
|
|
||||||
test_model = "checkpoints/zhanting_res_801.pth"
|
test_model = "checkpoints/best_20250228.pth"
|
||||||
# test_model = "checkpoints/zhanting_res_801.pth"
|
# test_model = "checkpoints/zhanting_res_801.pth"
|
||||||
|
# test_model = "checkpoints/zhanting_res_abroad_8021.pth"
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
@ -48,7 +48,7 @@ class FeatsInterface:
|
|||||||
modpath = os.path.join(curpath, conf.test_model)
|
modpath = os.path.join(curpath, conf.test_model)
|
||||||
self.model.load_state_dict(torch.load(modpath, map_location=conf.device))
|
self.model.load_state_dict(torch.load(modpath, map_location=conf.device))
|
||||||
self.model.eval()
|
self.model.eval()
|
||||||
print('load model {} '.format(conf.testbackbone))
|
# print('load model {} '.format(conf.testbackbone))
|
||||||
|
|
||||||
def inference(self, images, detections=None):
|
def inference(self, images, detections=None):
|
||||||
'''
|
'''
|
||||||
@ -62,11 +62,21 @@ class FeatsInterface:
|
|||||||
patches = []
|
patches = []
|
||||||
for i, img in enumerate(images):
|
for i, img in enumerate(images):
|
||||||
img = img.copy()
|
img = img.copy()
|
||||||
patch = self.transform(img)
|
|
||||||
if str(self.device) != "cpu":
|
## 对 img 进行补黑边,生成新的图像new_img
|
||||||
patch = patch.to(device=self.device).half()
|
width, height = img.size
|
||||||
else:
|
new_size = max(width, height)
|
||||||
patch = patch.to(device=self.device)
|
new_img = Image.new("RGB", (new_size, new_size), (0, 0, 0))
|
||||||
|
paste_x = (new_size - width) // 2
|
||||||
|
paste_y = (new_size - height) // 2
|
||||||
|
new_img.paste(img, (paste_x, paste_y))
|
||||||
|
|
||||||
|
patch = self.transform(new_img)
|
||||||
|
patch = patch.to(device=self.device)
|
||||||
|
# if str(self.device) != "cpu":
|
||||||
|
# patch = patch.to(device=self.device).half()
|
||||||
|
# else:
|
||||||
|
# patch = patch.to(device=self.device)
|
||||||
|
|
||||||
patches.append(patch)
|
patches.append(patch)
|
||||||
if (i + 1) % self.batch_size == 0:
|
if (i + 1) % self.batch_size == 0:
|
||||||
@ -107,10 +117,12 @@ class FeatsInterface:
|
|||||||
patch = self.transform(img1)
|
patch = self.transform(img1)
|
||||||
|
|
||||||
# patch = patch.to(device=self.device).half()
|
# patch = patch.to(device=self.device).half()
|
||||||
if str(self.device) != "cpu":
|
# if str(self.device) != "cpu":
|
||||||
patch = patch.to(device=self.device)
|
# patch = patch.to(device=self.device).half()
|
||||||
else:
|
# patch = patch.to(device=self.device)
|
||||||
patch = patch.to(device=self.device)
|
# else:
|
||||||
|
# patch = patch.to(device=self.device)
|
||||||
|
patch = patch.to(device=self.device)
|
||||||
|
|
||||||
patches.append(patch)
|
patches.append(patch)
|
||||||
if (d + 1) % self.batch_size == 0:
|
if (d + 1) % self.batch_size == 0:
|
||||||
|
BIN
contrast/feat_extract/model/__pycache__/CBAM.cpython-312.pyc
Normal file
BIN
contrast/feat_extract/model/__pycache__/Tool.cpython-312.pyc
Normal file
BIN
contrast/feat_extract/model/__pycache__/__init__.cpython-312.pyc
Normal file
BIN
contrast/feat_extract/model/__pycache__/lcnet.cpython-312.pyc
Normal file
BIN
contrast/feat_extract/model/__pycache__/loss.cpython-312.pyc
Normal file
BIN
contrast/feat_extract/model/__pycache__/metric.cpython-312.pyc
Normal file
BIN
contrast/feat_extract/model/__pycache__/resbam.cpython-312.pyc
Normal file
BIN
contrast/feat_extract/model/__pycache__/utils.cpython-312.pyc
Normal file
53
contrast/feat_infer.py
Normal file
@ -0,0 +1,53 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
"""
|
||||||
|
Created on Fri Feb 28 16:27:17 2025
|
||||||
|
|
||||||
|
@author: ym
|
||||||
|
"""
|
||||||
|
|
||||||
|
import os
|
||||||
|
import time
|
||||||
|
import pickle
|
||||||
|
import numpy as np
|
||||||
|
from PIL import Image
|
||||||
|
from scipy.spatial.distance import cdist
|
||||||
|
from feat_extract.config import config as conf
|
||||||
|
from feat_extract.inference import FeatsInterface #, inference_image
|
||||||
|
|
||||||
|
Encoder = FeatsInterface(conf)
|
||||||
|
|
||||||
|
def main():
|
||||||
|
imgpaths = r"D:\全实时\202502\result\Yolos_Tracking\20250228-160049-188_6921168558018_6921168558018\a"
|
||||||
|
featDict = {}
|
||||||
|
imgs, imgfiles = [], []
|
||||||
|
for filename in os.listdir(imgpaths):
|
||||||
|
file, ext = os.path.splitext(filename)
|
||||||
|
|
||||||
|
imgpath = os.path.join(imgpaths, filename)
|
||||||
|
img = Image.open(imgpath)
|
||||||
|
|
||||||
|
imgs.append(img)
|
||||||
|
imgfiles.append(filename)
|
||||||
|
|
||||||
|
feature = Encoder.inference([img])
|
||||||
|
feature /= np.linalg.norm(feature, axis=1)[:, None]
|
||||||
|
feature_ft32 = feature.astype(np.float32)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
featDict[file] = feature_ft32
|
||||||
|
|
||||||
|
feature = Encoder.inference(imgs)
|
||||||
|
feature /= np.linalg.norm(feature, axis=1)[:, None]
|
||||||
|
feature_ft32 = feature.astype(np.float32)
|
||||||
|
|
||||||
|
|
||||||
|
matrix = 1 - cdist(feature, feature, 'cosine')
|
||||||
|
|
||||||
|
print("do")
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
@ -38,13 +38,13 @@ def get_std_barcodeDict(bcdpath, savepath, bcdSet):
|
|||||||
'''
|
'''
|
||||||
inputs:
|
inputs:
|
||||||
bcdpath: 已清洗的barcode样本图像,如果barcode下有'base'文件夹,只选用该文件夹下图像
|
bcdpath: 已清洗的barcode样本图像,如果barcode下有'base'文件夹,只选用该文件夹下图像
|
||||||
(default = r'\\192.168.1.28\share\已标注数据备份\对比数据\barcode\barcode_1771')
|
(default = r'\\\\192.168.1.28\\share\\已标注数据备份\\对比数据\\barcode\\barcode_1771')
|
||||||
功能:
|
功能:
|
||||||
生成并保存只有一个key值的字典 {barcode: [imgpath1, imgpath1, ...]},
|
生成并保存只有一个key值的字典 {barcode: [imgpath1, imgpath1, ...]},
|
||||||
savepath: 字典存储地址,文件名格式:barcode.pickle
|
savepath: 字典存储地址,文件名格式:barcode.pickle
|
||||||
'''
|
'''
|
||||||
|
|
||||||
# savepath = r'\\192.168.1.28\share\测试_202406\contrast\std_barcodes'
|
# savepath = r'\\\\192.168.1.28\\share\\测试_202406\\contrast\\std_barcodes'
|
||||||
|
|
||||||
'''读取数据集中 barcode 列表'''
|
'''读取数据集中 barcode 列表'''
|
||||||
stdBarcodeList = []
|
stdBarcodeList = []
|
||||||
@ -120,8 +120,7 @@ def stdfeat_infer(imgPath, featPath, bcdSet=None):
|
|||||||
|
|
||||||
# imgPath = r"\\192.168.1.28\share\测试_202406\contrast\std_barcodes"
|
# imgPath = r"\\192.168.1.28\share\测试_202406\contrast\std_barcodes"
|
||||||
# featPath = r"\\192.168.1.28\share\测试_202406\contrast\std_features"
|
# featPath = r"\\192.168.1.28\share\测试_202406\contrast\std_features"
|
||||||
stdBarcodeDict = {}
|
|
||||||
stdBarcodeDict_ft16 = {}
|
|
||||||
|
|
||||||
Encoder = FeatsInterface(conf)
|
Encoder = FeatsInterface(conf)
|
||||||
|
|
||||||
@ -158,6 +157,8 @@ def stdfeat_infer(imgPath, featPath, bcdSet=None):
|
|||||||
|
|
||||||
feature /= np.linalg.norm(feature, axis=1)[:, None]
|
feature /= np.linalg.norm(feature, axis=1)[:, None]
|
||||||
|
|
||||||
|
feature_ft32 = feature.astype(np.float32)
|
||||||
|
|
||||||
# float16
|
# float16
|
||||||
feature_ft16 = feature.astype(np.float16)
|
feature_ft16 = feature.astype(np.float16)
|
||||||
feature_ft16 /= np.linalg.norm(feature_ft16, axis=1)[:, None]
|
feature_ft16 /= np.linalg.norm(feature_ft16, axis=1)[:, None]
|
||||||
@ -166,22 +167,20 @@ def stdfeat_infer(imgPath, featPath, bcdSet=None):
|
|||||||
# feature_uint8, _ = ft16_to_uint8(feature_ft16)
|
# feature_uint8, _ = ft16_to_uint8(feature_ft16)
|
||||||
feature_uint8 = (feature_ft16*128).astype(np.int8)
|
feature_uint8 = (feature_ft16*128).astype(np.int8)
|
||||||
|
|
||||||
|
'''================ 保存单个barcode特征 ================'''
|
||||||
|
##================== float32
|
||||||
|
stdbDict["barcode"] = barcode
|
||||||
|
stdbDict["imgpaths"] = imgpaths
|
||||||
|
stdbDict["feats_ft32"] = feature_ft32
|
||||||
|
stdbDict["feats_ft16"] = feature_ft16
|
||||||
|
stdbDict["feats_uint8"] = feature_uint8
|
||||||
|
|
||||||
|
with open(featpath, 'wb') as f:
|
||||||
|
pickle.dump(stdbDict, f)
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f"Error accured at: {filename}, with Exception is: {e}")
|
print(f"Error accured at: {filename}, with Exception is: {e}")
|
||||||
|
|
||||||
'''================ 保存单个barcode特征 ================'''
|
|
||||||
##================== float32
|
|
||||||
stdbDict["barcode"] = barcode
|
|
||||||
stdbDict["imgpaths"] = imgpaths
|
|
||||||
stdbDict["feats_ft32"] = feature
|
|
||||||
stdbDict["feats_ft16"] = feature_ft16
|
|
||||||
stdbDict["feats_uint8"] = feature_uint8
|
|
||||||
|
|
||||||
with open(featpath, 'wb') as f:
|
|
||||||
pickle.dump(stdbDict, f)
|
|
||||||
|
|
||||||
stdBarcodeDict[barcode] = feature
|
|
||||||
stdBarcodeDict_ft16[barcode] = feature_ft16
|
|
||||||
|
|
||||||
t2 = time.time()
|
t2 = time.time()
|
||||||
print(f"Barcode: {barcode}, need time: {t2-t1:.1f} secs")
|
print(f"Barcode: {barcode}, need time: {t2-t1:.1f} secs")
|
||||||
@ -192,19 +191,32 @@ def stdfeat_infer(imgPath, featPath, bcdSet=None):
|
|||||||
return
|
return
|
||||||
|
|
||||||
|
|
||||||
def gen_bcd_features(imgpath, bcdpath, featpath, bcdSet=None):
|
def gen_bcd_features(imgpath, bcdpath, featpath, eventSourcePath):
|
||||||
''' 生成标准特征集 '''
|
''' 生成标准特征集 '''
|
||||||
'''1. 提取 imgpath 中样本地址,生成字典{barcode: [imgpath1, imgpath1, ...]}
|
'''1. 提取 imgpath 中样本地址,生成字典{barcode: [imgpath1, imgpath1, ...]}
|
||||||
并存储于: bcdpath, 格式为 barcode.pickle'''
|
并存储于: bcdpath, 格式为 barcode.pickle'''
|
||||||
|
|
||||||
|
bcdList = []
|
||||||
|
for evtname in os.listdir(eventSourcePath):
|
||||||
|
bname, ext = os.path.splitext(evtname)
|
||||||
|
evt = bname.split('_')
|
||||||
|
if len(evt)>=2 and evt[-1].isdigit() and len(evt[-1])>=10:
|
||||||
|
bcdList.append(evt[-1])
|
||||||
|
|
||||||
|
bcdSet = set(bcdList)
|
||||||
get_std_barcodeDict(imgpath, bcdpath, bcdSet)
|
get_std_barcodeDict(imgpath, bcdpath, bcdSet)
|
||||||
|
|
||||||
'''2. 特征提取,并保存至文件夹 featpath 中,也根据 bcdSet 交集执行'''
|
'''2. 特征提取,并保存至文件夹 featpath 中,也根据 bcdSet 交集执行'''
|
||||||
stdfeat_infer(bcdpath, featpath, bcdSet)
|
stdfeat_infer(bcdpath, featpath, bcdSet)
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
imgpath = r"\\192.168.1.28\share\数据\已完成数据\展厅数据\v1.0\比对数据\整理\zhantingBase"
|
imgpath = r"\\192.168.1.28\share\数据\已完成数据\展厅数据\v2.0_abroad\比对数据\all_base_二筛"
|
||||||
bcdpath = r"D:\exhibition\dataset\bcdpath"
|
bcdpath = r"D:\exhibition\dataset\bcdpath_abroad"
|
||||||
featpath = r"D:\exhibition\dataset\feats"
|
featpath = r"D:\exhibition\dataset\feats_abroad"
|
||||||
|
if not os.path.exists(bcdpath):
|
||||||
|
os.makedirs(bcdpath)
|
||||||
|
if not os.path.exists(featpath):
|
||||||
|
os.makedirs(featpath)
|
||||||
|
|
||||||
|
|
||||||
gen_bcd_features(imgpath, bcdpath, featpath)
|
gen_bcd_features(imgpath, bcdpath, featpath)
|
||||||
|
@ -10,46 +10,7 @@ import numpy as np
|
|||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
import matplotlib.pyplot as plt
|
import matplotlib.pyplot as plt
|
||||||
from scipy.spatial.distance import cdist
|
from scipy.spatial.distance import cdist
|
||||||
from utils.event import ShoppingEvent
|
from utils.tools import init_eventDict
|
||||||
|
|
||||||
|
|
||||||
def init_eventdict(sourcePath, stype="data"):
|
|
||||||
'''stype: str,
|
|
||||||
'source': 由 videos 或 images 生成的 pickle 文件
|
|
||||||
'data': 从 data 文件中读取的现场运行数据
|
|
||||||
'''
|
|
||||||
|
|
||||||
k, errEvents = 0, []
|
|
||||||
for bname in os.listdir(sourcePath):
|
|
||||||
# bname = r"20241126-135911-bdf91cf9-3e9a-426d-94e8-ddf92238e175_6923555210479"
|
|
||||||
|
|
||||||
source_path = os.path.join(sourcePath, bname)
|
|
||||||
if stype=="data":
|
|
||||||
pickpath = os.path.join(eventDataPath, f"{bname}.pickle")
|
|
||||||
if not os.path.isdir(source_path) or os.path.isfile(pickpath):
|
|
||||||
continue
|
|
||||||
if stype=="source":
|
|
||||||
pickpath = os.path.join(eventDataPath, bname)
|
|
||||||
if not os.path.isfile(source_path) or os.path.isfile(pickpath):
|
|
||||||
continue
|
|
||||||
|
|
||||||
try:
|
|
||||||
event = ShoppingEvent(source_path, stype)
|
|
||||||
|
|
||||||
with open(pickpath, 'wb') as f:
|
|
||||||
pickle.dump(event, f)
|
|
||||||
print(bname)
|
|
||||||
except Exception as e:
|
|
||||||
errEvents.append(source_path)
|
|
||||||
print(e)
|
|
||||||
# k += 1
|
|
||||||
# if k==1:
|
|
||||||
# break
|
|
||||||
|
|
||||||
errfile = os.path.join(resultPath, 'error_events.txt')
|
|
||||||
with open(errfile, 'a', encoding='utf-8') as f:
|
|
||||||
for line in errEvents:
|
|
||||||
f.write(line + '\n')
|
|
||||||
|
|
||||||
def read_eventdict(eventDataPath):
|
def read_eventdict(eventDataPath):
|
||||||
evtDict = {}
|
evtDict = {}
|
||||||
@ -65,37 +26,37 @@ def read_eventdict(eventDataPath):
|
|||||||
|
|
||||||
return evtDict
|
return evtDict
|
||||||
|
|
||||||
def simi_calc(event, o2nevt, typee=None):
|
def simi_calc(event, o2nevt, pattern, typee=None):
|
||||||
if typee == "11":
|
if pattern==1 or pattern==2:
|
||||||
boxes1 = event.front_boxes
|
if typee == "11":
|
||||||
boxes2 = o2nevt.front_boxes
|
boxes1 = event.front_boxes
|
||||||
|
boxes2 = o2nevt.front_boxes
|
||||||
|
|
||||||
feat1 = event.front_feats
|
feat1 = event.front_feats
|
||||||
feat2 = o2nevt.front_feats
|
feat2 = o2nevt.front_feats
|
||||||
if typee == "10":
|
if typee == "10":
|
||||||
boxes1 = event.front_boxes
|
boxes1 = event.front_boxes
|
||||||
boxes2 = o2nevt.back_boxes
|
boxes2 = o2nevt.back_boxes
|
||||||
|
|
||||||
feat1 = event.front_feats
|
feat1 = event.front_feats
|
||||||
feat2 = o2nevt.back_feats
|
feat2 = o2nevt.back_feats
|
||||||
if typee == "00":
|
if typee == "00":
|
||||||
boxes1 = event.back_boxes
|
boxes1 = event.back_boxes
|
||||||
boxes2 = o2nevt.back_boxes
|
boxes2 = o2nevt.back_boxes
|
||||||
|
|
||||||
feat1 = event.back_feats
|
feat1 = event.back_feats
|
||||||
feat2 = o2nevt.back_feats
|
feat2 = o2nevt.back_feats
|
||||||
if typee == "01":
|
if typee == "01":
|
||||||
boxes1 = event.back_boxes
|
boxes1 = event.back_boxes
|
||||||
boxes2 = o2nevt.front_boxes
|
boxes2 = o2nevt.front_boxes
|
||||||
|
|
||||||
feat1 = event.back_feats
|
feat1 = event.back_feats
|
||||||
feat2 = o2nevt.front_feats
|
feat2 = o2nevt.front_feats
|
||||||
|
|
||||||
'''自定义事件特征选择'''
|
'''自定义事件特征选择'''
|
||||||
if typee==3:
|
if pattern==3 and len(event.feats_compose) and len(o2nevt.feats_compose):
|
||||||
feat1 = event.feats_compose
|
feat1 = [event.feats_compose]
|
||||||
feat2 = o2nevt.feats_compose
|
feat2 = [o2nevt.feats_compose]
|
||||||
|
|
||||||
|
|
||||||
if len(feat1) and len(feat2):
|
if len(feat1) and len(feat2):
|
||||||
matrix = 1 - cdist(feat1[0], feat2[0], 'cosine')
|
matrix = 1 - cdist(feat1[0], feat2[0], 'cosine')
|
||||||
@ -109,54 +70,64 @@ def one2n_pr(evtDicts, pattern=1):
|
|||||||
'''
|
'''
|
||||||
pattern:
|
pattern:
|
||||||
1: process.data 中记录的相似度
|
1: process.data 中记录的相似度
|
||||||
2: 根据 process.data 中标记的 type 选择特征计算
|
2: 根据 process.data 中标记的 type 选择特征组合方式计算相似度
|
||||||
3: 以其它方式选择特征计算
|
3: 利用 process.data 中的轨迹特征,以其它方式计算相似度
|
||||||
'''
|
'''
|
||||||
|
|
||||||
tpevents, fnevents, fpevents, tnevents = [], [], [], []
|
tpevents, fnevents, fpevents, tnevents = [], [], [], []
|
||||||
tpsimi, fnsimi, tnsimi, fpsimi = [], [], [], []
|
tpsimi, fnsimi, tnsimi, fpsimi = [], [], [], []
|
||||||
errorFile_one2n = []
|
one2nFile, errorFile_one2n = [], []
|
||||||
|
errorFile_one2n_ = []
|
||||||
|
evts_output = []
|
||||||
for evtname, event in evtDicts.items():
|
for evtname, event in evtDicts.items():
|
||||||
evt_names, evt_barcodes, evt_similars, evt_types = [], [], [], []
|
evt_names, evt_barcodes, evt_similars, evt_types = [], [], [], []
|
||||||
|
|
||||||
|
if len(event.one2n)==0 or len(event.barcode)==0:
|
||||||
|
continue
|
||||||
|
|
||||||
|
evts_output.append(evtname)
|
||||||
|
|
||||||
for ndict in event.one2n:
|
for ndict in event.one2n:
|
||||||
nname = ndict["event"]
|
nname = ndict["event"]
|
||||||
barcode = ndict["barcode"]
|
barcode = ndict["barcode"]
|
||||||
similar = ndict["similar"]
|
similar = ndict["similar"]
|
||||||
typee = ndict["type"].strip()
|
typee = ndict["type"].strip()
|
||||||
|
|
||||||
evt_names.append(nname)
|
if len(barcode)==0:
|
||||||
evt_barcodes.append(barcode)
|
continue
|
||||||
evt_types.append(typee)
|
if typee.find(",") >=0:
|
||||||
|
typee = typee.split(",")[-1]
|
||||||
|
|
||||||
if pattern==1:
|
if pattern==1:
|
||||||
evt_similars.append(similar)
|
evt_similars.append(similar)
|
||||||
|
|
||||||
if pattern==2 or pattern==3:
|
if pattern==2 or pattern==3:
|
||||||
o2n_evt = [evt for name, evt in evtDicts.items() if name.find(nname[:15])==0]
|
o2n_evt = [evt for name, evt in evtDicts.items() if name.find(nname[:15])==0]
|
||||||
if len(o2n_evt)==1:
|
if len(o2n_evt)!=1:
|
||||||
o2nevt = o2n_evt[0]
|
|
||||||
else:
|
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if pattern==2:
|
simival = simi_calc(event, o2n_evt[0], pattern, typee)
|
||||||
simival = simi_calc(event, o2nevt, typee)
|
|
||||||
|
|
||||||
if pattern==3:
|
|
||||||
simival = simi_calc(event, o2nevt, typee=pattern)
|
|
||||||
|
|
||||||
if simival==None:
|
if simival==None:
|
||||||
continue
|
continue
|
||||||
evt_similars.append(simival)
|
evt_similars.append(simival)
|
||||||
|
|
||||||
if len(evt_names)==len(evt_barcodes) and len(evt_barcodes)==len(evt_similars) \
|
evt_names.append(nname)
|
||||||
and len(evt_similars)==len(evt_types) and len(evt_names)>0:
|
evt_barcodes.append(barcode)
|
||||||
|
evt_types.append(typee)
|
||||||
|
|
||||||
|
# if evtname == "20250226-170321-327_6903244678377":
|
||||||
|
# print("evtname")
|
||||||
|
|
||||||
|
## process.data的oneTon的各项中,均不包括当前事件的barcode
|
||||||
|
if event.barcode not in evt_barcodes:
|
||||||
|
errorFile_one2n.append(evtname)
|
||||||
|
continue
|
||||||
|
else:
|
||||||
|
one2nFile.append(evtname)
|
||||||
|
|
||||||
|
if len(evt_names)==len(evt_barcodes)==len(evt_similars)==len(evt_types) and len(evt_names)>0:
|
||||||
# maxsim = evt_similars[evt_similars.index(max(evt_similars))]
|
# maxsim = evt_similars[evt_similars.index(max(evt_similars))]
|
||||||
maxsim = max(evt_similars)
|
maxsim = max(evt_similars)
|
||||||
for i in range(len(evt_names)):
|
for i in range(len(evt_names)):
|
||||||
bcd, simi = evt_barcodes[i], evt_similars[i]
|
bcd, simi = evt_barcodes[i], evt_similars[i]
|
||||||
|
|
||||||
if bcd==event.barcode and simi==maxsim:
|
if bcd==event.barcode and simi==maxsim:
|
||||||
tpsimi.append(simi)
|
tpsimi.append(simi)
|
||||||
tpevents.append(evtname)
|
tpevents.append(evtname)
|
||||||
@ -170,14 +141,11 @@ def one2n_pr(evtDicts, pattern=1):
|
|||||||
fpsimi.append(simi)
|
fpsimi.append(simi)
|
||||||
fpevents.append(evtname)
|
fpevents.append(evtname)
|
||||||
else:
|
else:
|
||||||
errorFile_one2n.append(evtname)
|
errorFile_one2n_.append(evtname)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
''' 1:n 数据存储,需根据相似度排序'''
|
''' 1:n 数据存储,需根据相似度排序'''
|
||||||
PPrecise, PRecall = [], []
|
PPrecise, PRecall = [], []
|
||||||
NPrecise, NRecall = [], []
|
NPrecise, NRecall = [], []
|
||||||
|
|
||||||
Thresh = np.linspace(-0.2, 1, 100)
|
Thresh = np.linspace(-0.2, 1, 100)
|
||||||
for th in Thresh:
|
for th in Thresh:
|
||||||
'''============================= 1:n 计算'''
|
'''============================= 1:n 计算'''
|
||||||
@ -187,9 +155,9 @@ def one2n_pr(evtDicts, pattern=1):
|
|||||||
TN = sum(np.array(tnsimi) < th)
|
TN = sum(np.array(tnsimi) < th)
|
||||||
|
|
||||||
PPrecise.append(TP/(TP+FP+1e-6))
|
PPrecise.append(TP/(TP+FP+1e-6))
|
||||||
PRecall.append(TP/(len(tpsimi)+len(fnsimi)+1e-6))
|
PRecall.append(TP/(TP+FN+1e-6))
|
||||||
NPrecise.append(TN/(TN+FN+1e-6))
|
NPrecise.append(TN/(TN+FN+1e-6))
|
||||||
NRecall.append(TN/(len(tnsimi)+len(fpsimi)+1e-6))
|
NRecall.append(TN/(TN+FP+1e-6))
|
||||||
|
|
||||||
|
|
||||||
'''4. ============================= 1:n 曲线,'''
|
'''4. ============================= 1:n 曲线,'''
|
||||||
@ -200,40 +168,49 @@ def one2n_pr(evtDicts, pattern=1):
|
|||||||
ax.plot(Thresh, NRecall, 'c', label='Recall_Neg: TN/TNFN')
|
ax.plot(Thresh, NRecall, 'c', label='Recall_Neg: TN/TNFN')
|
||||||
ax.set_xlim([0, 1])
|
ax.set_xlim([0, 1])
|
||||||
ax.set_ylim([0, 1])
|
ax.set_ylim([0, 1])
|
||||||
ax.grid(True)
|
|
||||||
|
ax.set_xticks(np.arange(0, 1, 0.1))
|
||||||
|
ax.set_yticks(np.arange(0, 1, 0.1))
|
||||||
|
ax.grid(True, linestyle='--')
|
||||||
|
|
||||||
ax.set_title('1:n Precise & Recall')
|
ax.set_title('1:n Precise & Recall')
|
||||||
ax.set_xlabel(f"Event Num: {len(tpsimi)+len(fnsimi)}")
|
ax.set_xlabel(f"Event Num: {len(one2nFile)}")
|
||||||
ax.legend()
|
ax.legend()
|
||||||
plt.show()
|
plt.show()
|
||||||
## ============================= 1:n 直方图'''
|
## ============================= 1:n 直方图'''
|
||||||
fig, axes = plt.subplots(2, 2)
|
fig, axes = plt.subplots(2, 2)
|
||||||
axes[0, 0].hist(tpsimi, bins=60, range=(-0.2, 1), edgecolor='black')
|
axes[0, 0].hist(tpsimi, bins=60, range=(-0.2, 1), edgecolor='black')
|
||||||
axes[0, 0].set_xlim([-0.2, 1])
|
axes[0, 0].set_xlim([-0.2, 1])
|
||||||
axes[0, 0].set_title('TP')
|
axes[0, 0].set_title(f'TP: {len(tpsimi)}')
|
||||||
axes[0, 1].hist(fpsimi, bins=60, range=(-0.2, 1), edgecolor='black')
|
axes[0, 1].hist(fpsimi, bins=60, range=(-0.2, 1), edgecolor='black')
|
||||||
axes[0, 1].set_xlim([-0.2, 1])
|
axes[0, 1].set_xlim([-0.2, 1])
|
||||||
axes[0, 1].set_title('FP')
|
axes[0, 1].set_title(f'FP: {len(fpsimi)}')
|
||||||
axes[1, 0].hist(tnsimi, bins=60, range=(-0.2, 1), edgecolor='black')
|
axes[1, 0].hist(tnsimi, bins=60, range=(-0.2, 1), edgecolor='black')
|
||||||
axes[1, 0].set_xlim([-0.2, 1])
|
axes[1, 0].set_xlim([-0.2, 1])
|
||||||
axes[1, 0].set_title('TN')
|
axes[1, 0].set_title(f'TN: {len(tnsimi)}')
|
||||||
axes[1, 1].hist(fnsimi, bins=60, range=(-0.2, 1), edgecolor='black')
|
axes[1, 1].hist(fnsimi, bins=60, range=(-0.2, 1), edgecolor='black')
|
||||||
axes[1, 1].set_xlim([-0.2, 1])
|
axes[1, 1].set_xlim([-0.2, 1])
|
||||||
axes[1, 1].set_title('FN')
|
axes[1, 1].set_title(f'FN: {len(fnsimi)}')
|
||||||
plt.show()
|
plt.show()
|
||||||
|
|
||||||
return fpevents
|
return fpevents
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
|
|
||||||
'''1. 生成事件字典并保存至 eventDataPath, 只需运行一次 '''
|
'''1. 生成事件字典并保存至 eventDataPath, 只需运行一次 '''
|
||||||
init_eventdict(eventSourcePath, stype="source")
|
init_eventDict(eventSourcePath, eventDataPath, stype="realtime") # 'source', 'data', 'realtime'
|
||||||
|
|
||||||
|
# for pfile in os.listdir(eventDataPath):
|
||||||
|
# evt = os.path.splitext(pfile)[0].split('_')
|
||||||
|
# cont = len(evt)>=2 and evt[-1].isdigit() and len(evt[-1])>=10
|
||||||
|
# if not cont:
|
||||||
|
# continue
|
||||||
|
|
||||||
'''2. 读取事件字典 '''
|
'''2. 读取事件字典 '''
|
||||||
evtDicts = read_eventdict(eventDataPath)
|
evtDicts = read_eventdict(eventDataPath)
|
||||||
|
|
||||||
|
|
||||||
'''3. 1:n 比对事件评估 '''
|
'''3. 1:n 比对事件评估 '''
|
||||||
fpevents = one2n_pr(evtDicts, pattern=3)
|
fpevents = one2n_pr(evtDicts, pattern=1)
|
||||||
|
|
||||||
fpErrFile = str(Path(resultPath).joinpath("one2n_fp_Error.txt"))
|
fpErrFile = str(Path(resultPath).joinpath("one2n_fp_Error.txt"))
|
||||||
with open(fpErrFile, "w") as file:
|
with open(fpErrFile, "w") as file:
|
||||||
@ -243,15 +220,16 @@ def main():
|
|||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
eventSourcePath = r"\\192.168.1.28\share\测试视频数据以及日志\算法全流程测试\202412\result\ShoppingDict_pkfile"
|
eventSourcePath = r"\\192.168.1.28\share\测试视频数据以及日志\全实时测试\V12\2025-3-4_2"
|
||||||
resultPath = r"\\192.168.1.28\share\测试视频数据以及日志\算法全流程测试\202412\result\contrast"
|
resultPath = r"\\192.168.1.28\share\测试视频数据以及日志\全实时测试\testing"
|
||||||
|
|
||||||
eventDataPath = os.path.join(resultPath, "evtobjs")
|
eventDataPath = os.path.join(resultPath, "evtobjs_wang")
|
||||||
similPath = os.path.join(resultPath, "simidata")
|
|
||||||
if not os.path.exists(eventDataPath):
|
if not os.path.exists(eventDataPath):
|
||||||
os.makedirs(eventDataPath)
|
os.makedirs(eventDataPath)
|
||||||
if not os.path.exists(similPath):
|
|
||||||
os.makedirs(similPath)
|
# similPath = os.path.join(resultPath, "simidata")
|
||||||
|
# if not os.path.exists(similPath):
|
||||||
|
# os.makedirs(similPath)
|
||||||
|
|
||||||
main()
|
main()
|
||||||
|
|
||||||
|
@ -27,187 +27,24 @@ Created on Fri Aug 30 17:53:03 2024
|
|||||||
|
|
||||||
"""
|
"""
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import cv2
|
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
import random
|
import random
|
||||||
import pickle
|
import pickle
|
||||||
import json
|
import json
|
||||||
import random
|
|
||||||
import copy
|
|
||||||
import sys
|
|
||||||
# import torch
|
|
||||||
import time
|
|
||||||
# import json
|
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from scipy.spatial.distance import cdist
|
|
||||||
import matplotlib.pyplot as plt
|
import matplotlib.pyplot as plt
|
||||||
import shutil
|
|
||||||
from datetime import datetime
|
|
||||||
# from openpyxl import load_workbook, Workbook
|
|
||||||
|
|
||||||
# from config import config as conf
|
FILE = Path(__file__).resolve()
|
||||||
# from model import resnet18 as resnet18
|
ROOT = FILE.parents[1] # YOLOv5 root directory
|
||||||
# from feat_inference import inference_image
|
if str(ROOT) not in sys.path:
|
||||||
|
sys.path.append(str(ROOT))
|
||||||
|
|
||||||
sys.path.append(r"D:\DetectTracking")
|
from utils.calsimi import calsimi_vs_stdfeat, calsimi_vs_stdfeat_new
|
||||||
from tracking.utils.read_data import extract_data, read_tracking_output, read_similar, read_deletedBarcode_file
|
from utils.tools import get_evtList, init_eventDict
|
||||||
from tracking.utils.plotting import Annotator, colors
|
from utils.databits import data_precision_compare
|
||||||
from feat_extract.config import config as conf
|
|
||||||
from feat_extract.inference import FeatsInterface
|
|
||||||
from utils.event import ShoppingEvent, save_data
|
|
||||||
from genfeats import gen_bcd_features
|
from genfeats import gen_bcd_features
|
||||||
from event_test import calc_simil
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def int8_to_ft16(arr_uint8, amin, amax):
|
|
||||||
arr_ft16 = (arr_uint8 / 255 * (amax-amin) + amin).astype(np.float16)
|
|
||||||
|
|
||||||
return arr_ft16
|
|
||||||
|
|
||||||
def ft16_to_uint8(arr_ft16):
|
|
||||||
# pickpath = r"\\192.168.1.28\share\测试_202406\contrast\std_features_ft32vsft16\6902265587712_ft16.pickle"
|
|
||||||
|
|
||||||
# with open(pickpath, 'rb') as f:
|
|
||||||
# edict = pickle.load(f)
|
|
||||||
|
|
||||||
# arr_ft16 = edict['feats']
|
|
||||||
|
|
||||||
amin = np.min(arr_ft16)
|
|
||||||
amax = np.max(arr_ft16)
|
|
||||||
arr_ft255 = (arr_ft16 - amin) * 255 / (amax-amin)
|
|
||||||
arr_uint8 = arr_ft255.astype(np.uint8)
|
|
||||||
|
|
||||||
arr_ft16_ = int8_to_ft16(arr_uint8, amin, amax)
|
|
||||||
|
|
||||||
arrDistNorm = np.linalg.norm(arr_ft16_ - arr_ft16) / arr_ft16_.size
|
|
||||||
|
|
||||||
return arr_uint8, arr_ft16_
|
|
||||||
|
|
||||||
|
|
||||||
def data_precision_compare(stdfeat, evtfeat, evtMessage, save=True):
|
|
||||||
evt, stdbcd, label = evtMessage
|
|
||||||
rltdata, rltdata_ft16, rltdata_ft16_ = [], [], []
|
|
||||||
|
|
||||||
matrix = 1 - cdist(stdfeat, evtfeat, 'cosine')
|
|
||||||
simi_mean = np.mean(matrix)
|
|
||||||
simi_max = np.max(matrix)
|
|
||||||
stdfeatm = np.mean(stdfeat, axis=0, keepdims=True)
|
|
||||||
evtfeatm = np.mean(evtfeat, axis=0, keepdims=True)
|
|
||||||
simi_mfeat = 1- np.maximum(0.0, cdist(stdfeatm, evtfeatm, 'cosine'))
|
|
||||||
rltdata = [label, stdbcd, evt, simi_mean, simi_max, simi_mfeat[0,0]]
|
|
||||||
|
|
||||||
|
|
||||||
##================================================================= float16
|
|
||||||
stdfeat_ft16 = stdfeat.astype(np.float16)
|
|
||||||
evtfeat_ft16 = evtfeat.astype(np.float16)
|
|
||||||
stdfeat_ft16 /= np.linalg.norm(stdfeat_ft16, axis=1)[:, None]
|
|
||||||
evtfeat_ft16 /= np.linalg.norm(evtfeat_ft16, axis=1)[:, None]
|
|
||||||
|
|
||||||
|
|
||||||
matrix_ft16 = 1 - cdist(stdfeat_ft16, evtfeat_ft16, 'cosine')
|
|
||||||
simi_mean_ft16 = np.mean(matrix_ft16)
|
|
||||||
simi_max_ft16 = np.max(matrix_ft16)
|
|
||||||
stdfeatm_ft16 = np.mean(stdfeat_ft16, axis=0, keepdims=True)
|
|
||||||
evtfeatm_ft16 = np.mean(evtfeat_ft16, axis=0, keepdims=True)
|
|
||||||
simi_mfeat_ft16 = 1- np.maximum(0.0, cdist(stdfeatm_ft16, evtfeatm_ft16, 'cosine'))
|
|
||||||
rltdata_ft16 = [label, stdbcd, evt, simi_mean_ft16, simi_max_ft16, simi_mfeat_ft16[0,0]]
|
|
||||||
|
|
||||||
'''****************** uint8 is ok!!!!!! ******************'''
|
|
||||||
##=================================================================== uint8
|
|
||||||
# stdfeat_uint8, stdfeat_ft16_ = ft16_to_uint8(stdfeat_ft16)
|
|
||||||
# evtfeat_uint8, evtfeat_ft16_ = ft16_to_uint8(evtfeat_ft16)
|
|
||||||
|
|
||||||
stdfeat_uint8 = (stdfeat_ft16*128).astype(np.int8)
|
|
||||||
evtfeat_uint8 = (evtfeat_ft16*128).astype(np.int8)
|
|
||||||
stdfeat_ft16_ = stdfeat_uint8.astype(np.float16)/128
|
|
||||||
evtfeat_ft16_ = evtfeat_uint8.astype(np.float16)/128
|
|
||||||
|
|
||||||
absdiff = np.linalg.norm(stdfeat_ft16_ - stdfeat) / stdfeat.size
|
|
||||||
|
|
||||||
matrix_ft16_ = 1 - cdist(stdfeat_ft16_, evtfeat_ft16_, 'cosine')
|
|
||||||
simi_mean_ft16_ = np.mean(matrix_ft16_)
|
|
||||||
simi_max_ft16_ = np.max(matrix_ft16_)
|
|
||||||
stdfeatm_ft16_ = np.mean(stdfeat_ft16_, axis=0, keepdims=True)
|
|
||||||
evtfeatm_ft16_ = np.mean(evtfeat_ft16_, axis=0, keepdims=True)
|
|
||||||
simi_mfeat_ft16_ = 1- np.maximum(0.0, cdist(stdfeatm_ft16_, evtfeatm_ft16_, 'cosine'))
|
|
||||||
rltdata_ft16_ = [label, stdbcd, evt, simi_mean_ft16_, simi_max_ft16_, simi_mfeat_ft16_[0,0]]
|
|
||||||
|
|
||||||
if not save:
|
|
||||||
return
|
|
||||||
|
|
||||||
|
|
||||||
##========================================================= save as float32
|
|
||||||
rppath = os.path.join(similPath, f'{evt}_ft32.pickle')
|
|
||||||
with open(rppath, 'wb') as f:
|
|
||||||
pickle.dump(rltdata, f)
|
|
||||||
|
|
||||||
rtpath = os.path.join(similPath, f'{evt}_ft32.txt')
|
|
||||||
with open(rtpath, 'w', encoding='utf-8') as f:
|
|
||||||
for result in rltdata:
|
|
||||||
part = [f"{x:.3f}" if isinstance(x, float) else str(x) for x in result]
|
|
||||||
line = ', '.join(part)
|
|
||||||
f.write(line + '\n')
|
|
||||||
|
|
||||||
|
|
||||||
##========================================================= save as float16
|
|
||||||
rppath_ft16 = os.path.join(similPath, f'{evt}_ft16.pickle')
|
|
||||||
with open(rppath_ft16, 'wb') as f:
|
|
||||||
pickle.dump(rltdata_ft16, f)
|
|
||||||
|
|
||||||
rtpath_ft16 = os.path.join(similPath, f'{evt}_ft16.txt')
|
|
||||||
with open(rtpath_ft16, 'w', encoding='utf-8') as f:
|
|
||||||
for result in rltdata_ft16:
|
|
||||||
part = [f"{x:.3f}" if isinstance(x, float) else str(x) for x in result]
|
|
||||||
line = ', '.join(part)
|
|
||||||
f.write(line + '\n')
|
|
||||||
|
|
||||||
|
|
||||||
##=========================================================== save as uint8
|
|
||||||
rppath_uint8 = os.path.join(similPath, f'{evt}_uint8.pickle')
|
|
||||||
with open(rppath_uint8, 'wb') as f:
|
|
||||||
pickle.dump(rltdata_ft16_, f)
|
|
||||||
|
|
||||||
rtpath_uint8 = os.path.join(similPath, f'{evt}_uint8.txt')
|
|
||||||
with open(rtpath_uint8, 'w', encoding='utf-8') as f:
|
|
||||||
for result in rltdata_ft16_:
|
|
||||||
part = [f"{x:.3f}" if isinstance(x, float) else str(x) for x in result]
|
|
||||||
line = ', '.join(part)
|
|
||||||
f.write(line + '\n')
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def simi_calc(event, stdfeat):
|
|
||||||
evtfeat = event.feats_compose
|
|
||||||
if isinstance(event.feats_select, list):
|
|
||||||
if len(event.feats_select) and len(event.feats_select[0]):
|
|
||||||
evtfeat = event.feats_select[0]
|
|
||||||
else:
|
|
||||||
return None, None, None
|
|
||||||
else:
|
|
||||||
evtfeat = event.feats_select
|
|
||||||
|
|
||||||
if len(evtfeat)==0 or len(stdfeat)==0:
|
|
||||||
return None, None, None
|
|
||||||
|
|
||||||
|
|
||||||
evtfeat /= np.linalg.norm(evtfeat, axis=1)[:, None]
|
|
||||||
stdfeat /= np.linalg.norm(stdfeat, axis=1)[:, None]
|
|
||||||
|
|
||||||
matrix = 1 - cdist(evtfeat, stdfeat, 'cosine')
|
|
||||||
matrix[matrix < 0] = 0
|
|
||||||
|
|
||||||
simi_mean = np.mean(matrix)
|
|
||||||
simi_max = np.max(matrix)
|
|
||||||
stdfeatm = np.mean(stdfeat, axis=0, keepdims=True)
|
|
||||||
evtfeatm = np.mean(evtfeat, axis=0, keepdims=True)
|
|
||||||
simi_mfeat = 1- np.maximum(0.0, cdist(stdfeatm, evtfeatm, 'cosine'))
|
|
||||||
|
|
||||||
return simi_mean, simi_max, simi_mfeat[0,0]
|
|
||||||
|
|
||||||
|
|
||||||
def build_std_evt_dict():
|
def build_std_evt_dict():
|
||||||
@ -218,18 +55,6 @@ def build_std_evt_dict():
|
|||||||
|
|
||||||
stdBarcode = [p.stem for p in Path(stdFeaturePath).iterdir() if p.is_file() and (p.suffix=='.json' or p.suffix=='.pickle')]
|
stdBarcode = [p.stem for p in Path(stdFeaturePath).iterdir() if p.is_file() and (p.suffix=='.json' or p.suffix=='.pickle')]
|
||||||
|
|
||||||
'''*********** USearch ***********'''
|
|
||||||
# stdFeaturePath = r"D:\contrast\stdlib\v11_test.json"
|
|
||||||
# stdBarcode = []
|
|
||||||
# stdlib = {}
|
|
||||||
# with open(stdFeaturePath, 'r', encoding='utf-8') as f:
|
|
||||||
# data = json.load(f)
|
|
||||||
# for dic in data['total']:
|
|
||||||
# barcode = dic['key']
|
|
||||||
# feature = np.array(dic['value'])
|
|
||||||
# stdBarcode.append(barcode)
|
|
||||||
# stdlib[barcode] = feature
|
|
||||||
|
|
||||||
'''======1. 购物事件列表,该列表中的 Barcode 存在于标准的 stdBarcode 内 ==='''
|
'''======1. 购物事件列表,该列表中的 Barcode 存在于标准的 stdBarcode 内 ==='''
|
||||||
evtList = [(p.stem, p.stem.split('_')[-1]) for p in Path(eventDataPath).iterdir()
|
evtList = [(p.stem, p.stem.split('_')[-1]) for p in Path(eventDataPath).iterdir()
|
||||||
if p.is_file()
|
if p.is_file()
|
||||||
@ -259,9 +84,6 @@ def build_std_evt_dict():
|
|||||||
feat = stddata["feats_ft32"]
|
feat = stddata["feats_ft32"]
|
||||||
stdDict[barcode] = feat
|
stdDict[barcode] = feat
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
'''*********** USearch ***********'''
|
'''*********** USearch ***********'''
|
||||||
# stdDict = {}
|
# stdDict = {}
|
||||||
# for barcode in barcodes:
|
# for barcode in barcodes:
|
||||||
@ -271,13 +93,17 @@ def build_std_evt_dict():
|
|||||||
evtDict = {}
|
evtDict = {}
|
||||||
for evtname, barcode in evtList:
|
for evtname, barcode in evtList:
|
||||||
evtpath = os.path.join(eventDataPath, evtname+'.pickle')
|
evtpath = os.path.join(eventDataPath, evtname+'.pickle')
|
||||||
with open(evtpath, 'rb') as f:
|
try:
|
||||||
evtdata = pickle.load(f)
|
with open(evtpath, 'rb') as f:
|
||||||
|
evtdata = pickle.load(f)
|
||||||
|
except Exception as e:
|
||||||
|
print(evtname)
|
||||||
|
|
||||||
evtDict[evtname] = evtdata
|
evtDict[evtname] = evtdata
|
||||||
|
|
||||||
return evtList, evtDict, stdDict
|
return evtList, evtDict, stdDict
|
||||||
|
|
||||||
def one2SN_pr(evtList, evtDict, stdDict):
|
def one2SN_pr(evtList, evtDict, stdDict, simType="simple"):
|
||||||
|
|
||||||
std_barcodes = set([bcd for _, bcd in evtList])
|
std_barcodes = set([bcd for _, bcd in evtList])
|
||||||
|
|
||||||
@ -300,14 +126,21 @@ def one2SN_pr(evtList, evtDict, stdDict):
|
|||||||
event = evtDict[evtname]
|
event = evtDict[evtname]
|
||||||
## 无轨迹判断
|
## 无轨迹判断
|
||||||
if len(event.front_feats)+len(event.back_feats)==0:
|
if len(event.front_feats)+len(event.back_feats)==0:
|
||||||
print(evtname)
|
errorFile_one2SN.append(evtname)
|
||||||
|
print(f"No trajectory: {evtname}")
|
||||||
continue
|
continue
|
||||||
|
|
||||||
barcodes, similars = [], []
|
barcodes, similars = [], []
|
||||||
for stdbcd in bcd_selected:
|
for stdbcd in bcd_selected:
|
||||||
stdfeat = stdDict[stdbcd]
|
stdfeat = stdDict[stdbcd]
|
||||||
simi_mean, simi_max, simi_mfeat = simi_calc(event, stdfeat)
|
|
||||||
# simi_mean = calc_simil(event, stdfeat)
|
if simType=="typea":
|
||||||
|
simi_mean, simi_max, simi_mfeat = calsimi_vs_stdfeat(event, stdfeat)
|
||||||
|
elif simType=="typeb":
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
simi_mean, simi_1, simi_2 = calsimi_vs_stdfeat_new(event, stdfeat)
|
||||||
|
|
||||||
|
|
||||||
## 在event.front_feats和event.back_feats同时为空时,此处不需要保护
|
## 在event.front_feats和event.back_feats同时为空时,此处不需要保护
|
||||||
# if simi_mean==None:
|
# if simi_mean==None:
|
||||||
@ -351,10 +184,10 @@ def one2SN_pr(evtList, evtDict, stdDict):
|
|||||||
FNX = sum(np.array(fn_simi) < th)
|
FNX = sum(np.array(fn_simi) < th)
|
||||||
TNX = sum(np.array(tn_simi) < th)
|
TNX = sum(np.array(tn_simi) < th)
|
||||||
PPreciseX.append(TPX/(TPX+FPX+1e-6))
|
PPreciseX.append(TPX/(TPX+FPX+1e-6))
|
||||||
PRecallX.append(TPX/(len(tp_simi)+len(fn_simi)+1e-6))
|
PRecallX.append(TPX/(TPX+FNX+1e-6))
|
||||||
|
|
||||||
NPreciseX.append(TNX/(TNX+FNX+1e-6))
|
NPreciseX.append(TNX/(TNX+FNX+1e-6))
|
||||||
NRecallX.append(TNX/(len(tn_simi)+len(fp_simi)+1e-6))
|
NRecallX.append(TNX/(TNX+FPX+1e-6))
|
||||||
|
|
||||||
fig, ax = plt.subplots()
|
fig, ax = plt.subplots()
|
||||||
ax.plot(Thresh, PPreciseX, 'r', label='Precise_Pos: TP/TPFP')
|
ax.plot(Thresh, PPreciseX, 'r', label='Precise_Pos: TP/TPFP')
|
||||||
@ -363,11 +196,17 @@ def one2SN_pr(evtList, evtDict, stdDict):
|
|||||||
ax.plot(Thresh, NRecallX, 'c', label='Recall_Neg: TN/TNFN')
|
ax.plot(Thresh, NRecallX, 'c', label='Recall_Neg: TN/TNFN')
|
||||||
ax.set_xlim([0, 1])
|
ax.set_xlim([0, 1])
|
||||||
ax.set_ylim([0, 1])
|
ax.set_ylim([0, 1])
|
||||||
ax.grid(True)
|
ax.set_xticks(np.arange(0, 1, 0.1))
|
||||||
|
ax.set_yticks(np.arange(0, 1, 0.1))
|
||||||
|
ax.grid(True, linestyle='--')
|
||||||
ax.set_title('1:SN Precise & Recall')
|
ax.set_title('1:SN Precise & Recall')
|
||||||
ax.set_xlabel(f"Event Num: {len(evtList)}")
|
ax.set_xlabel(f"Event Num: {len(tp_events) + len(fn_events)}")
|
||||||
ax.legend()
|
ax.legend()
|
||||||
plt.show()
|
plt.show()
|
||||||
|
|
||||||
|
rltpath = os.path.join(similPath, f'pr_1toSN_{simType}.png')
|
||||||
|
plt.savefig(rltpath)
|
||||||
|
|
||||||
## ============================= 1:N 展厅 直方图'''
|
## ============================= 1:N 展厅 直方图'''
|
||||||
fig, axes = plt.subplots(2, 2)
|
fig, axes = plt.subplots(2, 2)
|
||||||
axes[0, 0].hist(tp_simi, bins=60, range=(-0.2, 1), edgecolor='black')
|
axes[0, 0].hist(tp_simi, bins=60, range=(-0.2, 1), edgecolor='black')
|
||||||
@ -384,10 +223,13 @@ def one2SN_pr(evtList, evtDict, stdDict):
|
|||||||
axes[1, 1].set_title(f'FN({len(fn_simi)})')
|
axes[1, 1].set_title(f'FN({len(fn_simi)})')
|
||||||
plt.show()
|
plt.show()
|
||||||
|
|
||||||
|
rltpath = os.path.join(similPath, f'hist_1toSN_{simType}.png')
|
||||||
|
plt.savefig(rltpath)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def one2one_simi(evtList, evtDict, stdDict):
|
|
||||||
|
def one2one_simi(evtList, evtDict, stdDict, simType):
|
||||||
|
|
||||||
barcodes = set([bcd for _, bcd in evtList])
|
barcodes = set([bcd for _, bcd in evtList])
|
||||||
'''======1 构造 3 个事件对: 扫 A 放 A, 扫 A 放 B, 合并 ===================='''
|
'''======1 构造 3 个事件对: 扫 A 放 A, 扫 A 放 B, 合并 ===================='''
|
||||||
@ -403,34 +245,61 @@ def one2one_simi(evtList, evtDict, stdDict):
|
|||||||
|
|
||||||
'''======2 计算事件、标准特征集相似度 =================='''
|
'''======2 计算事件、标准特征集相似度 =================='''
|
||||||
rltdata = []
|
rltdata = []
|
||||||
|
errorFile_one2one = []
|
||||||
for i in range(len(mergePairs)):
|
for i in range(len(mergePairs)):
|
||||||
evtname, stdbcd, label = mergePairs[i]
|
evtname, stdbcd, label = mergePairs[i]
|
||||||
event = evtDict[evtname]
|
event = evtDict[evtname]
|
||||||
if len(event.feats_compose)==0: continue
|
if len(event.feats_compose)==0:
|
||||||
|
errorFile_one2one.append(evtname)
|
||||||
|
|
||||||
|
continue
|
||||||
|
|
||||||
stdfeat = stdDict[stdbcd] # float32
|
stdfeat = stdDict[stdbcd] # float32
|
||||||
|
|
||||||
simi_mean, simi_max, simi_mfeat = simi_calc(event, stdfeat)
|
if simType=="typea":
|
||||||
|
simi_mean, simi_1, simi_2 = calsimi_vs_stdfeat_new(event, stdfeat)
|
||||||
|
elif simType=="typeb":
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
simi_mean, simi_1, simi_2 = calsimi_vs_stdfeat(event, stdfeat)
|
||||||
|
|
||||||
if simi_mean is None:
|
if simi_mean is None:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
rltdata.append((label, stdbcd, evtname, simi_mean, simi_max, simi_mfeat))
|
rltdata.append((label, stdbcd, evtname, simi_mean, simi_1, simi_2))
|
||||||
|
|
||||||
'''================ float32、16、int8 精度比较与存储 ============='''
|
'''================ float32、16、int8 精度比较与存储 ============='''
|
||||||
# data_precision_compare(stdfeat, evtfeat, mergePairs[i], save=True)
|
# data_precision_compare(stdfeat, evtfeat, mergePairs[i], similPath, save=True)
|
||||||
|
|
||||||
|
errorFile_one2one = list(set(errorFile_one2one))
|
||||||
|
|
||||||
|
return rltdata, errorFile_one2one
|
||||||
|
|
||||||
|
|
||||||
return rltdata
|
def one2one_pr(evtList, evtDict, stdDict, simType="simple"):
|
||||||
|
|
||||||
|
rltdata, errorFile_one2one = one2one_simi(evtList, evtDict, stdDict, simType)
|
||||||
|
|
||||||
def one2one_pr(rltdata):
|
|
||||||
Same, Cross = [], []
|
Same, Cross = [], []
|
||||||
|
|
||||||
for label, stdbcd, evtname, simi_mean, simi_max, simi_mft in rltdata:
|
for label, stdbcd, evtname, simi_mean, simi_max, simi_mft in rltdata:
|
||||||
if label == "same":
|
if simType=="simple" and label == "same":
|
||||||
Same.append(simi_max)
|
Same.append(simi_max)
|
||||||
if label == "diff":
|
if simType=="simple" and label == "diff":
|
||||||
Cross.append(simi_max)
|
Cross.append(simi_max)
|
||||||
|
|
||||||
|
if simType=="typea" and label == "same":
|
||||||
|
Same.append(simi_mean)
|
||||||
|
if simType=="typea" and label == "diff":
|
||||||
|
Cross.append(simi_mean)
|
||||||
|
|
||||||
|
|
||||||
|
# for label, stdbcd, evtname, simi_mean, simi_max, simi_mft in rltdata:
|
||||||
|
# if label == "same":
|
||||||
|
# Same.append(simi_mean)
|
||||||
|
# if label == "diff":
|
||||||
|
# Cross.append(simi_mean)
|
||||||
|
|
||||||
Same = np.array(Same)
|
Same = np.array(Same)
|
||||||
Cross = np.array(Cross)
|
Cross = np.array(Cross)
|
||||||
TPFN = len(Same)
|
TPFN = len(Same)
|
||||||
@ -452,33 +321,47 @@ def one2one_pr(rltdata):
|
|||||||
Correct = []
|
Correct = []
|
||||||
Thresh = np.linspace(-0.2, 1, 100)
|
Thresh = np.linspace(-0.2, 1, 100)
|
||||||
for th in Thresh:
|
for th in Thresh:
|
||||||
TP = np.sum(Same > th)
|
TP = np.sum(Same >= th)
|
||||||
FN = TPFN - TP
|
FN = np.sum(Same < th)
|
||||||
TN = np.sum(Cross < th)
|
# FN = TPFN - TP
|
||||||
FP = TNFP - TN
|
|
||||||
|
TN = np.sum(Cross < th)
|
||||||
|
FP = np.sum(Cross >= th)
|
||||||
|
# FP = TNFP - TN
|
||||||
|
|
||||||
|
|
||||||
Recall_Pos.append(TP/TPFN)
|
|
||||||
Recall_Neg.append(TN/TNFP)
|
|
||||||
Precision_Pos.append(TP/(TP+FP+1e-6))
|
Precision_Pos.append(TP/(TP+FP+1e-6))
|
||||||
Precision_Neg.append(TN/(TN+FN+1e-6))
|
Precision_Neg.append(TN/(TN+FN+1e-6))
|
||||||
|
Recall_Pos.append(TP/(TP+FN+1e-6))
|
||||||
|
Recall_Neg.append(TN/(TN+FP+1e-6))
|
||||||
|
|
||||||
|
# Recall_Pos.append(TP/TPFN)
|
||||||
|
# Recall_Neg.append(TN/TNFP)
|
||||||
|
|
||||||
|
|
||||||
Correct.append((TN+TP)/(TPFN+TNFP))
|
Correct.append((TN+TP)/(TPFN+TNFP))
|
||||||
|
|
||||||
fig, ax = plt.subplots()
|
fig, ax = plt.subplots()
|
||||||
ax.plot(Thresh, Correct, 'r', label='Correct: (TN+TP)/(TPFN+TNFP)')
|
|
||||||
|
ax.plot(Thresh, Precision_Pos, 'r', label='Precision_Pos: TP/(TP+FP)')
|
||||||
ax.plot(Thresh, Recall_Pos, 'b', label='Recall_Pos: TP/TPFN')
|
ax.plot(Thresh, Recall_Pos, 'b', label='Recall_Pos: TP/TPFN')
|
||||||
ax.plot(Thresh, Recall_Neg, 'g', label='Recall_Neg: TN/TNFP')
|
ax.plot(Thresh, Recall_Neg, 'g', label='Recall_Neg: TN/TNFP')
|
||||||
ax.plot(Thresh, Precision_Pos, 'c', label='Precision_Pos: TP/(TP+FP)')
|
ax.plot(Thresh, Correct, 'c', label='Correct: (TN+TP)/(TPFN+TNFP)')
|
||||||
ax.plot(Thresh, Precision_Neg, 'm', label='Precision_Neg: TN/(TN+FN)')
|
ax.plot(Thresh, Precision_Neg, 'm', label='Precision_Neg: TN/(TN+FN)')
|
||||||
|
|
||||||
ax.set_xlim([0, 1])
|
ax.set_xlim([0, 1])
|
||||||
ax.set_ylim([0, 1])
|
ax.set_ylim([0, 1])
|
||||||
ax.grid(True)
|
|
||||||
|
ax.set_xticks(np.arange(0, 1, 0.1))
|
||||||
|
ax.set_yticks(np.arange(0, 1, 0.1))
|
||||||
|
ax.grid(True, linestyle='--')
|
||||||
|
|
||||||
ax.set_title('PrecisePos & PreciseNeg')
|
ax.set_title('PrecisePos & PreciseNeg')
|
||||||
ax.set_xlabel(f"Same Num: {TPFN}, Cross Num: {TNFP}")
|
ax.set_xlabel(f"Same Num: {TPFN}, Cross Num: {TNFP}")
|
||||||
ax.legend()
|
ax.legend()
|
||||||
plt.show()
|
plt.show()
|
||||||
|
|
||||||
rltpath = os.path.join(similPath, 'pr.png')
|
rltpath = os.path.join(similPath, f'pr_1to1_{simType}.png')
|
||||||
plt.savefig(rltpath) # svg, png, pdf
|
plt.savefig(rltpath) # svg, png, pdf
|
||||||
|
|
||||||
|
|
||||||
@ -491,7 +374,7 @@ def one2one_pr(rltdata):
|
|||||||
axes[1].set_xlim([-0.2, 1])
|
axes[1].set_xlim([-0.2, 1])
|
||||||
axes[1].set_title(f'TN({len(Cross)})')
|
axes[1].set_title(f'TN({len(Cross)})')
|
||||||
|
|
||||||
rltpath = os.path.join(similPath, 'hist.png')
|
rltpath = os.path.join(similPath, f'hist_1to1_{simType}.png')
|
||||||
plt.savefig(rltpath)
|
plt.savefig(rltpath)
|
||||||
|
|
||||||
|
|
||||||
@ -499,112 +382,25 @@ def one2one_pr(rltdata):
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
def gen_eventdict(sourcePath, saveimg=True):
|
|
||||||
k, errEvents = 0, []
|
|
||||||
for source_path in sourcePath:
|
|
||||||
evtpath, bname = os.path.split(source_path)
|
|
||||||
|
|
||||||
## 兼容事件的两种情况:文件夹 和 Yolo-Resnet-Tracker 的输出
|
|
||||||
if os.path.isfile(source_path):
|
|
||||||
bname, ext = os.path.splitext(bname)
|
|
||||||
evt = bname.split("_")
|
|
||||||
|
|
||||||
evt = bname.split('_')
|
|
||||||
condt = len(evt)>=2 and evt[-1].isdigit() and len(evt[-1])>=10
|
|
||||||
if not condt: continue
|
|
||||||
|
|
||||||
# bname = r"20241126-135911-bdf91cf9-3e9a-426d-94e8-ddf92238e175_6923555210479"
|
|
||||||
# source_path = os.path.join(evtpath, bname)
|
|
||||||
|
|
||||||
# 如果已完成事件生成,则不执行
|
|
||||||
pickpath = os.path.join(eventDataPath, f"{bname}.pickle")
|
|
||||||
if os.path.isfile(pickpath): continue
|
|
||||||
|
|
||||||
# event = ShoppingEvent(source_path, stype="data")
|
|
||||||
# with open(pickpath, 'wb') as f:
|
|
||||||
# pickle.dump(event, f)
|
|
||||||
|
|
||||||
try:
|
|
||||||
event = ShoppingEvent(source_path, stype="source")
|
|
||||||
# save_data(event, resultPath)
|
|
||||||
|
|
||||||
with open(pickpath, 'wb') as f:
|
|
||||||
pickle.dump(event, f)
|
|
||||||
print(bname)
|
|
||||||
except Exception as e:
|
|
||||||
errEvents.append(source_path)
|
|
||||||
print(e)
|
|
||||||
|
|
||||||
# k += 1
|
|
||||||
# if k==1:
|
|
||||||
# break
|
|
||||||
|
|
||||||
|
|
||||||
errfile = os.path.join(resultPath, 'error_events.txt')
|
def test_one2one_one2SN(simType):
|
||||||
with open(errfile, 'w', encoding='utf-8') as f:
|
|
||||||
for line in errEvents:
|
|
||||||
f.write(line + '\n')
|
|
||||||
|
|
||||||
|
|
||||||
def init_std_evt_dict():
|
|
||||||
'''==== 0. 生成事件列表和对应的 Barcodes列表 ==========='''
|
|
||||||
bcdList, event_spath = [], []
|
|
||||||
for evtpath in eventSourcePath:
|
|
||||||
for evtname in os.listdir(evtpath):
|
|
||||||
bname, ext = os.path.splitext(evtname)
|
|
||||||
|
|
||||||
## 处理事件的两种情况:文件夹 和 Yolo-Resnet-Tracker 的输出
|
|
||||||
fpath = os.path.join(evtpath, evtname)
|
|
||||||
if os.path.isfile(fpath) and (ext==".pkl" or ext==".pickle"):
|
|
||||||
evt = bname.split('_')
|
|
||||||
elif os.path.isdir(fpath):
|
|
||||||
evt = evtname.split('_')
|
|
||||||
else:
|
|
||||||
continue
|
|
||||||
|
|
||||||
|
|
||||||
if len(evt)>=2 and evt[-1].isdigit() and len(evt[-1])>=10:
|
|
||||||
bcdList.append(evt[-1])
|
|
||||||
event_spath.append(os.path.join(evtpath, evtname))
|
|
||||||
|
|
||||||
'''==== 1. 生成标准特征集, 只需运行一次, 在 genfeats.py 中实现 ==========='''
|
|
||||||
bcdSet = set(bcdList)
|
|
||||||
gen_bcd_features(stdSamplePath, stdBarcodePath, stdFeaturePath, bcdSet)
|
|
||||||
print("stdFeats have generated and saved!")
|
|
||||||
|
|
||||||
|
|
||||||
'''==== 2. 生成事件字典, 只需运行一次 ==============='''
|
|
||||||
gen_eventdict(event_spath)
|
|
||||||
print("eventList have generated and saved!")
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def test_one2one():
|
|
||||||
'''1:1性能评估'''
|
'''1:1性能评估'''
|
||||||
|
|
||||||
# 1. 只需运行一次,生成事件字典和相应的标准特征库字典
|
# evtpaths, bcdSet = get_evtList(eventSourcePath)
|
||||||
init_std_evt_dict()
|
|
||||||
|
|
||||||
# 2. 基于事件barcode集和标准库barcode交集构造事件集合
|
'''=== 1. 只需运行一次,生成事件对应的标准特征库字典,如已生成,无需运行 ===='''
|
||||||
|
# gen_bcd_features(stdSamplePath, stdBarcodePath, stdFeaturePath, eventSourcePath)
|
||||||
|
|
||||||
|
'''==== 2. 生成事件字典, 只需运行一次 ===================='''
|
||||||
|
# init_eventDict(eventSourcePath, eventDataPath, source_type)
|
||||||
|
|
||||||
|
'''==== 3. 基于事件barcode集和标准库barcode交集构造事件集合 ========='''
|
||||||
evtList, evtDict, stdDict = build_std_evt_dict()
|
evtList, evtDict, stdDict = build_std_evt_dict()
|
||||||
|
|
||||||
rltdata = one2one_simi(evtList, evtDict, stdDict)
|
one2one_pr(evtList, evtDict, stdDict, simType)
|
||||||
|
|
||||||
one2one_pr(rltdata)
|
|
||||||
|
|
||||||
|
|
||||||
def test_one2SN():
|
|
||||||
'''1:SN性能评估'''
|
|
||||||
|
|
||||||
# 1. 只需运行一次,生成事件字典和相应的标准特征库字典
|
|
||||||
init_std_evt_dict()
|
|
||||||
|
|
||||||
# 2. 事件barcode集和标准库barcode求交集
|
|
||||||
evtList, evtDict, stdDict = build_std_evt_dict()
|
|
||||||
|
|
||||||
one2SN_pr(evtList, evtDict, stdDict)
|
|
||||||
|
|
||||||
|
one2SN_pr(evtList, evtDict, stdDict, simType)
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
'''
|
'''
|
||||||
@ -612,26 +408,36 @@ if __name__ == '__main__':
|
|||||||
(1) stdSamplePath: 用于生成比对标准特征集的原始图像地址
|
(1) stdSamplePath: 用于生成比对标准特征集的原始图像地址
|
||||||
(2) stdBarcodePath: 比对标准特征集原始图像地址的pickle文件存储,{barcode: [imgpath1, imgpath1, ...]}
|
(2) stdBarcodePath: 比对标准特征集原始图像地址的pickle文件存储,{barcode: [imgpath1, imgpath1, ...]}
|
||||||
(3) stdFeaturePath: 比对标准特征集特征存储地址
|
(3) stdFeaturePath: 比对标准特征集特征存储地址
|
||||||
(4) eventSourcePath: 事件地址
|
(4) eventSourcePath: 事件地址, 包含data文件的文件夹或 Yolo-Resnet-Tracker输出的Pickle文件父文件夹
|
||||||
(5) resultPath: 结果存储地址
|
(5) resultPath: 结果存储地址
|
||||||
(6) eventDataPath: 用于1:1比对的购物事件存储地址,在resultPath下
|
(6) eventDataPath: 用于1:1比对的购物事件存储地址,在resultPath下
|
||||||
(7) similPath: 1:1比对结果存储地址(事件级),在resultPath下
|
(7) similPath: 1:1比对结果存储地址(事件级),在resultPath下
|
||||||
'''
|
'''
|
||||||
|
|
||||||
# stdSamplePath = r"\\192.168.1.28\share\数据\已完成数据\展厅数据\v1.0\比对数据\整理\zhantingBase"
|
stdSamplePath = "/home/wqg/dataset/total_barcode/totalBarcode"
|
||||||
# stdBarcodePath = r"D:\exhibition\dataset\bcdpath"
|
stdBarcodePath = "/home/wqg/dataset/total_barcode/bcdpath"
|
||||||
# stdFeaturePath = r"\\192.168.1.28\share\数据\已完成数据\比对数据\barcode\all_totalBarocde\features_json\v11_barcode_11592"
|
stdFeaturePath = "/home/wqg/dataset/test_dataset/total_barcode/features_json/v11_barcode_0304/"
|
||||||
|
|
||||||
# eventSourcePath = [r'D:\exhibition\images\20241202']
|
if not os.path.exists(stdBarcodePath):
|
||||||
# eventSourcePath = [r"\\192.168.1.28\share\测试视频数据以及日志\各模块测试记录\展厅测试\1129_展厅模型v801测试组测试"]
|
os.makedirs(stdBarcodePath)
|
||||||
|
if not os.path.exists(stdFeaturePath):
|
||||||
|
os.makedirs(stdFeaturePath)
|
||||||
|
|
||||||
|
'''source_type:
|
||||||
|
"source": eventSourcePath 为 Yolo-Resnet-Tracker 输出的 pickle 文件
|
||||||
|
"data": 基于事件切分的原 data 文件版本
|
||||||
|
"realtime": 全实时生成的 data 文件
|
||||||
|
'''
|
||||||
|
source_type = 'source' # 'source', 'data', 'realtime'
|
||||||
|
simType = "typea" # "simple", "typea", "typeb"
|
||||||
|
|
||||||
stdSamplePath = r"\\192.168.1.28\share\数据\已完成数据\比对数据\barcode\all_totalBarocde\totalBarcode"
|
evttype = "single_event_V10"
|
||||||
stdBarcodePath = r"D:\全实时\source_data\bcdpath"
|
# evttype = "single_event_V5"
|
||||||
stdFeaturePath = r"D:\全实时\source_data\stdfeats"
|
# evttype = "performence_V10"
|
||||||
|
# evttype = "performence_V5"
|
||||||
|
eventSourcePath = "/home/wqg/dataset/pipeline/yrt/{}/shopping_pkl".format(evttype)
|
||||||
|
|
||||||
eventSourcePath = [r"\\192.168.1.28\share\测试视频数据以及日志\算法全流程测试\202412\result\ShoppingDict_pkfile"]
|
resultPath = "/home/wqg/dataset/pipeline/contrast/{}".format(evttype)
|
||||||
resultPath = r"\\192.168.1.28\share\测试视频数据以及日志\算法全流程测试\202412\result\contrast"
|
|
||||||
eventDataPath = os.path.join(resultPath, "evtobjs")
|
eventDataPath = os.path.join(resultPath, "evtobjs")
|
||||||
similPath = os.path.join(resultPath, "simidata")
|
similPath = os.path.join(resultPath, "simidata")
|
||||||
if not os.path.exists(eventDataPath):
|
if not os.path.exists(eventDataPath):
|
||||||
@ -639,9 +445,7 @@ if __name__ == '__main__':
|
|||||||
if not os.path.exists(similPath):
|
if not os.path.exists(similPath):
|
||||||
os.makedirs(similPath)
|
os.makedirs(similPath)
|
||||||
|
|
||||||
# test_one2one()
|
test_one2one_one2SN(simType)
|
||||||
|
|
||||||
test_one2SN()
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
@ -1,8 +1,13 @@
|
|||||||
|
|
||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
"""
|
"""
|
||||||
Created on Wed Sep 11 11:57:30 2024
|
Created on Wed Sep 11 11:57:30 2024
|
||||||
永辉现场试验输出数据的 1:1 性能评估
|
contrast_pr:
|
||||||
适用于202410前数据保存版本的,需调用 OneToOneCompare.txt
|
直接利用测试数据中的 data 文件进行 1:1、1:SN、1:n 性能评估
|
||||||
|
|
||||||
|
test_compare:
|
||||||
|
永辉现场试验输出数据的 1:1 性能评估
|
||||||
|
适用于202410前数据保存版本的,需调用 OneToOneCompare.txt
|
||||||
@author: ym
|
@author: ym
|
||||||
"""
|
"""
|
||||||
import os
|
import os
|
||||||
@ -11,7 +16,11 @@ from pathlib import Path
|
|||||||
import matplotlib.pyplot as plt
|
import matplotlib.pyplot as plt
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
sys.path.append(r"D:\DetectTracking")
|
FILE = Path(__file__).resolve()
|
||||||
|
ROOT = FILE.parents[1] # YOLOv5 root directory
|
||||||
|
if str(ROOT) not in sys.path:
|
||||||
|
sys.path.append(str(ROOT))
|
||||||
|
|
||||||
from tracking.utils.read_data import read_similar
|
from tracking.utils.read_data import read_similar
|
||||||
|
|
||||||
def read_one2one_data(filepath):
|
def read_one2one_data(filepath):
|
||||||
@ -104,24 +113,31 @@ def test_compare():
|
|||||||
|
|
||||||
plot_pr_curve(simiList)
|
plot_pr_curve(simiList)
|
||||||
|
|
||||||
def contrast_pr(paths):
|
def contrast_pr(evtPaths):
|
||||||
'''
|
'''
|
||||||
1:1
|
1:1
|
||||||
'''
|
'''
|
||||||
paths = Path(paths)
|
|
||||||
|
|
||||||
evtpaths = []
|
evtpaths = []
|
||||||
for p in paths.iterdir():
|
# date_ = ['2025-3-4_1', '2025-3-5_1', '2025-3-5_2']
|
||||||
|
# for dt in date_:
|
||||||
|
# paths = Path(evtPaths) / dt
|
||||||
|
abc = []
|
||||||
|
for p in Path(evtPaths).iterdir():
|
||||||
condt1 = p.is_dir()
|
condt1 = p.is_dir()
|
||||||
condt2 = len(p.name.split('_'))>=2
|
condt2 = len(p.name.split('_'))>=2
|
||||||
condt3 = len(p.name.split('_')[-1])>8
|
condt3 = len(p.name.split('_')[-1])>=8
|
||||||
condt4 = p.name.split('_')[-1].isdigit()
|
condt4 = p.name.split('_')[-1].isdigit()
|
||||||
if condt1 and condt2 and condt3 and condt4:
|
if condt1 and condt2 and condt3 and condt4:
|
||||||
evtpaths.append(p)
|
evtpaths.append(p)
|
||||||
|
elif p.is_dir():
|
||||||
|
abc.append(p.stem)
|
||||||
|
|
||||||
|
|
||||||
# evtpaths = [p for p in paths.iterdir() if p.is_dir() and len(p.name.split('_'))>=2 and len(p.name.split('_')[-1])>8]
|
# evtpaths = [p for p in paths.iterdir() if p.is_dir() and len(p.name.split('_'))>=2 and len(p.name.split('_')[-1])>8]
|
||||||
# evtpaths = [p for p in paths.iterdir() if p.is_dir()]
|
# evtpaths = [p for p in paths.iterdir() if p.is_dir()]
|
||||||
|
|
||||||
|
alg_times = []
|
||||||
|
|
||||||
events, similars = [], []
|
events, similars = [], []
|
||||||
##===================================== 扫A放A, 扫A放B场景()
|
##===================================== 扫A放A, 扫A放B场景()
|
||||||
one2oneAA, one2oneAB = [], []
|
one2oneAA, one2oneAB = [], []
|
||||||
@ -147,11 +163,12 @@ def contrast_pr(paths):
|
|||||||
|
|
||||||
errorFile_one2one, errorFile_one2SN, errorFile_one2n = [], [], []
|
errorFile_one2one, errorFile_one2SN, errorFile_one2n = [], [], []
|
||||||
|
|
||||||
|
errorFile = []
|
||||||
for path in evtpaths:
|
for path in evtpaths:
|
||||||
barcode = path.stem.split('_')[-1]
|
barcode = path.stem.split('_')[-1]
|
||||||
datapath = path.joinpath('process.data')
|
datapath = path.joinpath('process.data')
|
||||||
|
|
||||||
if not barcode.isdigit() or len(barcode)<10: continue
|
if not barcode.isdigit() or len(barcode)<8: continue
|
||||||
if not datapath.is_file(): continue
|
if not datapath.is_file(): continue
|
||||||
|
|
||||||
bcdList.append(barcode)
|
bcdList.append(barcode)
|
||||||
@ -167,8 +184,17 @@ def contrast_pr(paths):
|
|||||||
one2SN = SimiDict['one2SN']
|
one2SN = SimiDict['one2SN']
|
||||||
one2n = SimiDict['one2n']
|
one2n = SimiDict['one2n']
|
||||||
|
|
||||||
|
if len(one2one)+len(one2SN)+len(one2n) == 0:
|
||||||
|
errorFile.append(path.stem)
|
||||||
|
|
||||||
|
dtime = SimiDict["algroStartToEnd"]
|
||||||
|
if dtime >= 0:
|
||||||
|
alg_times.append((dtime, path.stem))
|
||||||
|
|
||||||
|
|
||||||
'''================== 0. 1:1 ==================='''
|
'''================== 0. 1:1 ==================='''
|
||||||
barcodes, similars = [], []
|
barcodes, similars = [], []
|
||||||
|
barcodes_ = []
|
||||||
for dt in one2one:
|
for dt in one2one:
|
||||||
one2onePath.append((path.stem))
|
one2onePath.append((path.stem))
|
||||||
if dt['similar']==0:
|
if dt['similar']==0:
|
||||||
@ -176,6 +202,14 @@ def contrast_pr(paths):
|
|||||||
continue
|
continue
|
||||||
barcodes.append(dt['barcode'])
|
barcodes.append(dt['barcode'])
|
||||||
similars.append(dt['similar'])
|
similars.append(dt['similar'])
|
||||||
|
|
||||||
|
|
||||||
|
barcodes_.append(path.stem)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
if len(barcodes)==len(similars) and len(barcodes)!=0:
|
if len(barcodes)==len(similars) and len(barcodes)!=0:
|
||||||
## 扫A放A, 扫A放B场景
|
## 扫A放A, 扫A放B场景
|
||||||
simAA = [similars[i] for i in range(len(barcodes)) if barcodes[i]==barcode]
|
simAA = [similars[i] for i in range(len(barcodes)) if barcodes[i]==barcode]
|
||||||
@ -204,6 +238,8 @@ def contrast_pr(paths):
|
|||||||
_fp_events.append(path.stem)
|
_fp_events.append(path.stem)
|
||||||
else:
|
else:
|
||||||
errorFile_one2one.append(path.stem)
|
errorFile_one2one.append(path.stem)
|
||||||
|
elif len(one2SN)+len(one2n) == 0:
|
||||||
|
errorFile_one2one.append(path.stem)
|
||||||
|
|
||||||
|
|
||||||
'''================== 2. 取出场景下的 1 : Small N ==================='''
|
'''================== 2. 取出场景下的 1 : Small N ==================='''
|
||||||
@ -212,6 +248,7 @@ def contrast_pr(paths):
|
|||||||
barcodes.append(dt['barcode'])
|
barcodes.append(dt['barcode'])
|
||||||
similars.append(dt['similar'])
|
similars.append(dt['similar'])
|
||||||
|
|
||||||
|
|
||||||
if len(barcodes)==len(similars) and len(barcodes)!=0:
|
if len(barcodes)==len(similars) and len(barcodes)!=0:
|
||||||
## 扫A放A, 扫A放B场景
|
## 扫A放A, 扫A放B场景
|
||||||
simAA = [similars[i] for i in range(len(barcodes)) if barcodes[i]==barcode]
|
simAA = [similars[i] for i in range(len(barcodes)) if barcodes[i]==barcode]
|
||||||
@ -219,10 +256,10 @@ def contrast_pr(paths):
|
|||||||
|
|
||||||
one2SNAA.extend(simAA)
|
one2SNAA.extend(simAA)
|
||||||
one2SNAB.extend(simAB)
|
one2SNAB.extend(simAB)
|
||||||
|
|
||||||
one2SNPath.append(path.stem)
|
one2SNPath.append(path.stem)
|
||||||
if len(simAA)==0:
|
if len(simAA)==0:
|
||||||
one2SNPath1.append(path.stem)
|
errorFile_one2SN.append(path.stem)
|
||||||
|
|
||||||
|
|
||||||
## 相似度排序,barcode相等且排名第一为TP,适用于多的barcode相似度比较
|
## 相似度排序,barcode相等且排名第一为TP,适用于多的barcode相似度比较
|
||||||
max_idx = similars.index(max(similars))
|
max_idx = similars.index(max(similars))
|
||||||
@ -247,6 +284,7 @@ def contrast_pr(paths):
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
'''===================== 3. 取出场景下的 1:n ========================'''
|
'''===================== 3. 取出场景下的 1:n ========================'''
|
||||||
events, evt_barcodes, evt_similars, evt_types = [], [], [], []
|
events, evt_barcodes, evt_similars, evt_types = [], [], [], []
|
||||||
for dt in one2n:
|
for dt in one2n:
|
||||||
@ -255,8 +293,15 @@ def contrast_pr(paths):
|
|||||||
evt_similars.append(dt["similar"])
|
evt_similars.append(dt["similar"])
|
||||||
evt_types.append(dt["type"])
|
evt_types.append(dt["type"])
|
||||||
|
|
||||||
if len(events)==len(evt_barcodes) and len(evt_barcodes)==len(evt_similars) \
|
|
||||||
and len(evt_similars)==len(evt_types) and len(events)>0:
|
|
||||||
|
if len(events)==len(evt_barcodes)==len(evt_similars)==len(evt_types) and len(events)>0:
|
||||||
|
if not barcode in evt_barcodes:
|
||||||
|
errorFile_one2n.append(path.stem)
|
||||||
|
continue
|
||||||
|
|
||||||
|
if len(barcodes_):
|
||||||
|
print("do")
|
||||||
|
|
||||||
one2nPath.append(path.stem)
|
one2nPath.append(path.stem)
|
||||||
maxsim = evt_similars[evt_similars.index(max(evt_similars))]
|
maxsim = evt_similars[evt_similars.index(max(evt_similars))]
|
||||||
@ -312,9 +357,9 @@ def contrast_pr(paths):
|
|||||||
_TN = sum(np.array(one2oneAB) < th)
|
_TN = sum(np.array(one2oneAB) < th)
|
||||||
|
|
||||||
_PPrecise.append(_TP/(_TP+_FP+1e-6))
|
_PPrecise.append(_TP/(_TP+_FP+1e-6))
|
||||||
_PRecall.append(_TP/(len(one2oneAA)+1e-6))
|
_PRecall.append(_TP/(_TP+_FN+1e-6))
|
||||||
_NPrecise.append(_TN/(_TN+_FN+1e-6))
|
_NPrecise.append(_TN/(_TN+_FN+1e-6))
|
||||||
_NRecall.append(_TN/(len(one2oneAB)+1e-6))
|
_NRecall.append(_TN/(_TN+_FP+1e-6))
|
||||||
|
|
||||||
'''===================================== 1:SN 均值'''
|
'''===================================== 1:SN 均值'''
|
||||||
TP_ = sum(np.array(one2SNAA) >= th)
|
TP_ = sum(np.array(one2SNAA) >= th)
|
||||||
@ -334,10 +379,10 @@ def contrast_pr(paths):
|
|||||||
FNX = sum(np.array(fn_simi) < th)
|
FNX = sum(np.array(fn_simi) < th)
|
||||||
TNX = sum(np.array(tn_simi) < th)
|
TNX = sum(np.array(tn_simi) < th)
|
||||||
PPreciseX.append(TPX/(TPX+FPX+1e-6))
|
PPreciseX.append(TPX/(TPX+FPX+1e-6))
|
||||||
PRecallX.append(TPX/(len(tp_simi)+len(fn_simi)+1e-6))
|
PRecallX.append(TPX/(TPX+FNX+1e-6))
|
||||||
|
|
||||||
NPreciseX.append(TNX/(TNX+FNX+1e-6))
|
NPreciseX.append(TNX/(TNX+FNX+1e-6))
|
||||||
NRecallX.append(TNX/(len(tn_simi)+len(fp_simi)+1e-6))
|
NRecallX.append(TNX/(TNX+FPX+1e-6))
|
||||||
|
|
||||||
|
|
||||||
'''===================================== 1:n'''
|
'''===================================== 1:n'''
|
||||||
@ -347,12 +392,18 @@ def contrast_pr(paths):
|
|||||||
TN = sum(np.array(tnsimi) < th)
|
TN = sum(np.array(tnsimi) < th)
|
||||||
|
|
||||||
PPrecise.append(TP/(TP+FP+1e-6))
|
PPrecise.append(TP/(TP+FP+1e-6))
|
||||||
PRecall.append(TP/(len(tpsimi)+len(fnsimi)+1e-6))
|
PRecall.append(TP/(TP+FN+1e-6))
|
||||||
NPrecise.append(TN/(TN+FN+1e-6))
|
NPrecise.append(TN/(TN+FN+1e-6))
|
||||||
NRecall.append(TN/(len(tnsimi)+len(fpsimi)+1e-6))
|
NRecall.append(TN/(TN+FP+1e-6))
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
algtime = []
|
||||||
|
for tm, _ in alg_times:
|
||||||
|
algtime.append(tm)
|
||||||
|
fig, ax = plt.subplots()
|
||||||
|
ax.hist(np.array(algtime), bins=100, edgecolor='black')
|
||||||
|
ax.set_title('Algorthm Spend Time')
|
||||||
|
ax.set_xlabel(f"Event Num: {len(alg_times)}")
|
||||||
|
plt.show()
|
||||||
|
|
||||||
'''1. ============================= 1:1 最大值方案 曲线'''
|
'''1. ============================= 1:1 最大值方案 曲线'''
|
||||||
fig, ax = plt.subplots()
|
fig, ax = plt.subplots()
|
||||||
@ -362,7 +413,9 @@ def contrast_pr(paths):
|
|||||||
ax.plot(Thresh, _NRecall, 'c', label='Recall_Neg: TN/TNFN')
|
ax.plot(Thresh, _NRecall, 'c', label='Recall_Neg: TN/TNFN')
|
||||||
ax.set_xlim([0, 1])
|
ax.set_xlim([0, 1])
|
||||||
ax.set_ylim([0, 1])
|
ax.set_ylim([0, 1])
|
||||||
ax.grid(True)
|
ax.set_xticks(np.arange(0, 1, 0.1))
|
||||||
|
ax.set_yticks(np.arange(0, 1, 0.1))
|
||||||
|
ax.grid(True, linestyle='--')
|
||||||
ax.set_title('1:1 Precise & Recall')
|
ax.set_title('1:1 Precise & Recall')
|
||||||
ax.set_xlabel(f"Event Num: {len(one2oneAA)+len(one2oneAB)}")
|
ax.set_xlabel(f"Event Num: {len(one2oneAA)+len(one2oneAB)}")
|
||||||
ax.legend()
|
ax.legend()
|
||||||
@ -381,30 +434,30 @@ def contrast_pr(paths):
|
|||||||
|
|
||||||
|
|
||||||
'''2. ============================= 1:1 均值方案 曲线'''
|
'''2. ============================= 1:1 均值方案 曲线'''
|
||||||
fig, ax = plt.subplots()
|
# fig, ax = plt.subplots()
|
||||||
ax.plot(Thresh, PPrecise_, 'r', label='Precise_Pos: TP/TPFP')
|
# ax.plot(Thresh, PPrecise_, 'r', label='Precise_Pos: TP/TPFP')
|
||||||
ax.plot(Thresh, PRecall_, 'b', label='Recall_Pos: TP/TPFN')
|
# ax.plot(Thresh, PRecall_, 'b', label='Recall_Pos: TP/TPFN')
|
||||||
ax.plot(Thresh, NPrecise_, 'g', label='Precise_Neg: TN/TNFP')
|
# ax.plot(Thresh, NPrecise_, 'g', label='Precise_Neg: TN/TNFP')
|
||||||
ax.plot(Thresh, NRecall_, 'c', label='Recall_Neg: TN/TNFN')
|
# ax.plot(Thresh, NRecall_, 'c', label='Recall_Neg: TN/TNFN')
|
||||||
ax.set_xlim([0, 1])
|
# ax.set_xlim([0, 1])
|
||||||
ax.set_ylim([0, 1])
|
# ax.set_ylim([0, 1])
|
||||||
ax.grid(True)
|
# ax.grid(True)
|
||||||
ax.set_title('1:1 Precise & Recall')
|
# ax.set_title('1:1 Precise & Recall')
|
||||||
ax.set_xlabel(f"Event Num: {len(one2SNAA)}")
|
# ax.set_xlabel(f"Event Num: {len(one2SNAA)}")
|
||||||
ax.legend()
|
# ax.legend()
|
||||||
plt.show()
|
# plt.show()
|
||||||
## ============================= 1:1 均值方案 直方图'''
|
# ## ============================= 1:1 均值方案 直方图'''
|
||||||
fig, axes = plt.subplots(2, 1)
|
# fig, axes = plt.subplots(2, 1)
|
||||||
axes[0].hist(np.array(one2SNAA), bins=60, edgecolor='black')
|
# axes[0].hist(np.array(one2SNAA), bins=60, edgecolor='black')
|
||||||
axes[0].set_xlim([-0.2, 1])
|
# axes[0].set_xlim([-0.2, 1])
|
||||||
axes[0].set_title('AA')
|
# axes[0].set_title('AA')
|
||||||
axes[0].set_xlabel(f"Event Num: {len(one2SNAA)}")
|
# axes[0].set_xlabel(f"Event Num: {len(one2SNAA)}")
|
||||||
|
|
||||||
axes[1].hist(np.array(one2SNAB), bins=60, edgecolor='black')
|
# axes[1].hist(np.array(one2SNAB), bins=60, edgecolor='black')
|
||||||
axes[1].set_xlim([-0.2, 1])
|
# axes[1].set_xlim([-0.2, 1])
|
||||||
axes[1].set_title('BB')
|
# axes[1].set_title('BB')
|
||||||
axes[1].set_xlabel(f"Event Num: {len(one2SNAB)}")
|
# axes[1].set_xlabel(f"Event Num: {len(one2SNAB)}")
|
||||||
plt.show()
|
# plt.show()
|
||||||
|
|
||||||
''''3. ============================= 1:SN 曲线'''
|
''''3. ============================= 1:SN 曲线'''
|
||||||
fig, ax = plt.subplots()
|
fig, ax = plt.subplots()
|
||||||
@ -414,7 +467,9 @@ def contrast_pr(paths):
|
|||||||
ax.plot(Thresh, NRecallX, 'c', label='Recall_Neg: TN/TNFN')
|
ax.plot(Thresh, NRecallX, 'c', label='Recall_Neg: TN/TNFN')
|
||||||
ax.set_xlim([0, 1])
|
ax.set_xlim([0, 1])
|
||||||
ax.set_ylim([0, 1])
|
ax.set_ylim([0, 1])
|
||||||
ax.grid(True)
|
ax.set_xticks(np.arange(0, 1, 0.1))
|
||||||
|
ax.set_yticks(np.arange(0, 1, 0.1))
|
||||||
|
ax.grid(True, linestyle='--')
|
||||||
ax.set_title('1:SN Precise & Recall')
|
ax.set_title('1:SN Precise & Recall')
|
||||||
ax.set_xlabel(f"Event Num: {len(one2SNAA)}")
|
ax.set_xlabel(f"Event Num: {len(one2SNAA)}")
|
||||||
ax.legend()
|
ax.legend()
|
||||||
@ -444,7 +499,9 @@ def contrast_pr(paths):
|
|||||||
ax.plot(Thresh, NRecall, 'c', label='Recall_Neg: TN/TNFN')
|
ax.plot(Thresh, NRecall, 'c', label='Recall_Neg: TN/TNFN')
|
||||||
ax.set_xlim([0, 1])
|
ax.set_xlim([0, 1])
|
||||||
ax.set_ylim([0, 1])
|
ax.set_ylim([0, 1])
|
||||||
ax.grid(True)
|
ax.set_xticks(np.arange(0, 1, 0.1))
|
||||||
|
ax.set_yticks(np.arange(0, 1, 0.1))
|
||||||
|
ax.grid(True, linestyle='--')
|
||||||
ax.set_title('1:n Precise & Recall')
|
ax.set_title('1:n Precise & Recall')
|
||||||
ax.set_xlabel(f"Event Num: {len(tpsimi)+len(fnsimi)}")
|
ax.set_xlabel(f"Event Num: {len(tpsimi)+len(fnsimi)}")
|
||||||
ax.legend()
|
ax.legend()
|
||||||
@ -461,31 +518,31 @@ def contrast_pr(paths):
|
|||||||
axes[1, 0].set_xlim([-0.2, 1])
|
axes[1, 0].set_xlim([-0.2, 1])
|
||||||
axes[1, 0].set_title(f'TN({len(tnsimi)})')
|
axes[1, 0].set_title(f'TN({len(tnsimi)})')
|
||||||
axes[1, 1].hist(fnsimi, bins=60, edgecolor='black')
|
axes[1, 1].hist(fnsimi, bins=60, edgecolor='black')
|
||||||
|
|
||||||
axes[1, 1].set_xlim([-0.2, 1])
|
axes[1, 1].set_xlim([-0.2, 1])
|
||||||
axes[1, 1].set_title(f'FN({len(fnsimi)})')
|
axes[1, 1].set_title(f'FN({len(fnsimi)})')
|
||||||
plt.show()
|
plt.show()
|
||||||
|
|
||||||
|
# fpsnErrFile = str(paths.joinpath("one2SN_Error.txt"))
|
||||||
|
# with open(fpsnErrFile, "w") as file:
|
||||||
|
# for item in fp_events:
|
||||||
|
# file.write(item + "\n")
|
||||||
|
|
||||||
fpsnErrFile = str(paths.joinpath("one2SN_Error.txt"))
|
# fpErrFile = str(paths.joinpath("one2n_Error.txt"))
|
||||||
with open(fpsnErrFile, "w") as file:
|
# with open(fpErrFile, "w") as file:
|
||||||
for item in fp_events:
|
# for item in fpevents:
|
||||||
file.write(item + "\n")
|
# file.write(item + "\n")
|
||||||
|
|
||||||
fpErrFile = str(paths.joinpath("one2n_Error.txt"))
|
|
||||||
with open(fpErrFile, "w") as file:
|
|
||||||
for item in fpevents:
|
|
||||||
file.write(item + "\n")
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# bcdSet = set(bcdList)
|
# bcdSet = set(bcdList)
|
||||||
# one2nErrFile = str(paths.joinpath("one_2_Small_n_Error.txt"))
|
|
||||||
|
|
||||||
|
# one2nErrFile = os.path.join(evtPaths, "one_2_Small_n_Error.txt")
|
||||||
# with open(one2nErrFile, "w") as file:
|
# with open(one2nErrFile, "w") as file:
|
||||||
# for item in fnevents:
|
# for item in fnevents:
|
||||||
# file.write(item + "\n")
|
# file.write(item + "\n")
|
||||||
|
|
||||||
# one2NErrFile = str(paths.joinpath("one_2_Big_N_Error.txt"))
|
# one2NErrFile = os.path.join(evtPaths, "one_2_Big_N_Error.txt")
|
||||||
# with open(one2NErrFile, "w") as file:
|
# with open(one2NErrFile, "w") as file:
|
||||||
# for item in fn_events:
|
# for item in fn_events:
|
||||||
# file.write(item + "\n")
|
# file.write(item + "\n")
|
||||||
@ -493,9 +550,8 @@ def contrast_pr(paths):
|
|||||||
print('Done!')
|
print('Done!')
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
evtpaths = r"D:\全实时\source_data\2024122416"
|
evtpaths = r"/home/wqg/dataset/test_base_dataset/single_event/source"
|
||||||
contrast_pr(evtpaths)
|
contrast_pr(evtpaths)
|
||||||
|
|
||||||
|
|
||||||
|
@ -58,6 +58,12 @@ def save_imgpairs(barcode, imgpaths, matrix, savepath, thresh=(0.4, 0.6), ctype=
|
|||||||
|
|
||||||
|
|
||||||
def feat_analysis(featpath):
|
def feat_analysis(featpath):
|
||||||
|
'''
|
||||||
|
标准特征集中样本类内、类间相似度分布
|
||||||
|
'''
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
savepath = r"D:\exhibition\result\stdfeat"
|
savepath = r"D:\exhibition\result\stdfeat"
|
||||||
|
|
||||||
InterThresh = (0.4, 0.6)
|
InterThresh = (0.4, 0.6)
|
||||||
|
172
contrast/trail2trail.py
Normal file
@ -0,0 +1,172 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
"""
|
||||||
|
取出再放回场景下商品轨迹特征比对方式与性能分析
|
||||||
|
|
||||||
|
Created on Tue Apr 1 17:17:47 2025
|
||||||
|
@author: wqg
|
||||||
|
"""
|
||||||
|
import os
|
||||||
|
import pickle
|
||||||
|
import random
|
||||||
|
import numpy as np
|
||||||
|
from pathlib import Path
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
from scipy.spatial.distance import cdist
|
||||||
|
from utils.calsimi import calsiml, calsimi_vs_evts
|
||||||
|
|
||||||
|
def read_eventdict(evtpaths):
|
||||||
|
evtDict = {}
|
||||||
|
for filename in os.listdir(evtpaths):
|
||||||
|
evtname, ext = os.path.splitext(filename)
|
||||||
|
if ext != ".pickle": continue
|
||||||
|
|
||||||
|
evtpath = os.path.join(evtpaths, filename)
|
||||||
|
with open(evtpath, 'rb') as f:
|
||||||
|
evtdata = pickle.load(f)
|
||||||
|
evtDict[evtname] = evtdata
|
||||||
|
|
||||||
|
|
||||||
|
return evtDict
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def compute_show_pr(Same, Cross):
|
||||||
|
TPFN = len(Same)
|
||||||
|
TNFP = len(Cross)
|
||||||
|
|
||||||
|
Recall_Pos, Recall_Neg = [], []
|
||||||
|
Precision_Pos, Precision_Neg = [], []
|
||||||
|
Correct = []
|
||||||
|
Thresh = np.linspace(-0.2, 1, 100)
|
||||||
|
for th in Thresh:
|
||||||
|
TP = np.sum(Same >= th)
|
||||||
|
FN = np.sum(Same < th)
|
||||||
|
# FN = TPFN - TP
|
||||||
|
|
||||||
|
TN = np.sum(Cross < th)
|
||||||
|
FP = np.sum(Cross >= th)
|
||||||
|
# FP = TNFP - TN
|
||||||
|
|
||||||
|
|
||||||
|
Precision_Pos.append(TP/(TP+FP+1e-6))
|
||||||
|
Precision_Neg.append(TN/(TN+FN+1e-6))
|
||||||
|
Recall_Pos.append(TP/(TP+FN+1e-6))
|
||||||
|
Recall_Neg.append(TN/(TN+FP+1e-6))
|
||||||
|
|
||||||
|
# Recall_Pos.append(TP/TPFN)
|
||||||
|
# Recall_Neg.append(TN/TNFP)
|
||||||
|
|
||||||
|
|
||||||
|
Correct.append((TN+TP)/(TPFN+TNFP))
|
||||||
|
|
||||||
|
fig, ax = plt.subplots()
|
||||||
|
|
||||||
|
ax.plot(Thresh, Precision_Pos, 'r', label='Precision_Pos: TP/(TP+FP)')
|
||||||
|
ax.plot(Thresh, Recall_Pos, 'b', label='Recall_Pos: TP/TPFN')
|
||||||
|
ax.plot(Thresh, Recall_Neg, 'g', label='Recall_Neg: TN/TNFP')
|
||||||
|
ax.plot(Thresh, Correct, 'c', label='Correct: (TN+TP)/(TPFN+TNFP)')
|
||||||
|
ax.plot(Thresh, Precision_Neg, 'm', label='Precision_Neg: TN/(TN+FN)')
|
||||||
|
|
||||||
|
ax.set_xlim([0, 1])
|
||||||
|
ax.set_ylim([0, 1])
|
||||||
|
|
||||||
|
ax.set_xticks(np.arange(0, 1, 0.1))
|
||||||
|
ax.set_yticks(np.arange(0, 1, 0.1))
|
||||||
|
ax.grid(True, linestyle='--')
|
||||||
|
|
||||||
|
ax.set_title('PrecisePos & PreciseNeg')
|
||||||
|
ax.set_xlabel(f"Same Num: {TPFN}, Cross Num: {TNFP}")
|
||||||
|
ax.legend()
|
||||||
|
plt.show()
|
||||||
|
|
||||||
|
# rltpath = os.path.join(similPath, f'pr_1to1_{simType}.png')
|
||||||
|
# plt.savefig(rltpath) # svg, png, pdf
|
||||||
|
|
||||||
|
|
||||||
|
fig, axes = plt.subplots(2,1)
|
||||||
|
axes[0].hist(Same, bins=60, range=(-0.2, 1), edgecolor='black')
|
||||||
|
axes[0].set_xlim([-0.2, 1])
|
||||||
|
axes[0].set_title(f'TP({len(Same)})')
|
||||||
|
|
||||||
|
axes[1].hist(Cross, bins=60, range=(-0.2, 1), edgecolor='black')
|
||||||
|
axes[1].set_xlim([-0.2, 1])
|
||||||
|
axes[1].set_title(f'TN({len(Cross)})')
|
||||||
|
|
||||||
|
# rltpath = os.path.join(similPath, f'hist_1to1_{simType}.png')
|
||||||
|
# plt.savefig(rltpath)
|
||||||
|
|
||||||
|
|
||||||
|
plt.show()
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def trail_to_trail(evtpaths, rltpaths):
|
||||||
|
# select the method type of how to calculate the feat similarity of trail
|
||||||
|
simType = 2
|
||||||
|
|
||||||
|
##1. read all the ShoppingEvent object in the dir 'evtpaths'
|
||||||
|
evtDicts = read_eventdict(evtpaths)
|
||||||
|
|
||||||
|
##2. Combine event object with the same barcode
|
||||||
|
barcodes, evtpairDict = [], {}
|
||||||
|
for k in evtDicts.keys():
|
||||||
|
evt = k.split('_')
|
||||||
|
condt = len(evt)>=2 and evt[-1].isdigit() and len(evt[-1])>=10
|
||||||
|
if not condt: continue
|
||||||
|
|
||||||
|
barcode = evt[-1]
|
||||||
|
if barcode not in evtpairDict.keys():
|
||||||
|
evtpairDict[barcode] = []
|
||||||
|
barcodes.append(barcode)
|
||||||
|
|
||||||
|
evtpairDict[barcode].append(evtDicts[k])
|
||||||
|
barcodes = set(barcodes)
|
||||||
|
|
||||||
|
AA_list, AB_list = [], []
|
||||||
|
for barcode in evtpairDict.keys():
|
||||||
|
events = evtpairDict[barcode]
|
||||||
|
if len(events)>1:
|
||||||
|
evta, evtb = random.sample(events, 2)
|
||||||
|
AA_list.append((evta, evtb, "same"))
|
||||||
|
|
||||||
|
evtc = random.sample(events, 1)[0]
|
||||||
|
|
||||||
|
dset = list(barcodes.symmetric_difference(set([barcode])))
|
||||||
|
bcd = random.sample(dset, 1)[0]
|
||||||
|
evtd = random.sample(evtpairDict[bcd], 1)[0]
|
||||||
|
AB_list.append((evtc, evtd, "diff"))
|
||||||
|
|
||||||
|
mergePairs = AA_list + AB_list
|
||||||
|
|
||||||
|
##3. calculate the similar of two event: evta, evtb
|
||||||
|
new_pirs = []
|
||||||
|
for evta, evtb, label in mergePairs:
|
||||||
|
similar = calsimi_vs_evts(evta, evtb, simType)
|
||||||
|
if similar is None:
|
||||||
|
continue
|
||||||
|
new_pirs.append((label, round(similar, 3), evta.evtname[:15], evtb.evtname[:15]))
|
||||||
|
|
||||||
|
##4. compute PR and showing
|
||||||
|
Same = np.array([s for label, s, _, _ in new_pirs if label=="same"])
|
||||||
|
Cross = np.array([s for label, s, _, _ in new_pirs if label=="diff"])
|
||||||
|
compute_show_pr(Same, Cross)
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
evttypes = ["single_event_V10", "single_event_V5", "performence_V10", "performence_V5"]
|
||||||
|
# evttypes = ["single_event_V10"]
|
||||||
|
|
||||||
|
for evttype in evttypes:
|
||||||
|
evtpaths = "/home/wqg/dataset/pipeline/contrast/{}/evtobjs/".format(evttype)
|
||||||
|
rltpaths = "/home/wqg/dataset/pipeline/yrt/{}/yolos_tracking".format(evttype)
|
||||||
|
|
||||||
|
trail_to_trail(evtpaths, rltpaths)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
||||||
|
|
||||||
|
|
||||||
|
|
BIN
contrast/utils/__pycache__/__init__.cpython-312.pyc
Normal file
BIN
contrast/utils/__pycache__/calsimi.cpython-312.pyc
Normal file
BIN
contrast/utils/__pycache__/databits.cpython-312.pyc
Normal file
BIN
contrast/utils/__pycache__/event.cpython-312.pyc
Normal file
BIN
contrast/utils/__pycache__/tools.cpython-312.pyc
Normal file
216
contrast/utils/calsimi.py
Normal file
@ -0,0 +1,216 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
"""
|
||||||
|
Created on Mon Mar 31 16:25:43 2025
|
||||||
|
|
||||||
|
@author: wqg
|
||||||
|
"""
|
||||||
|
import numpy as np
|
||||||
|
from scipy.spatial.distance import cdist
|
||||||
|
|
||||||
|
|
||||||
|
def get_topk_percent(data, k):
|
||||||
|
"""
|
||||||
|
获取数据中最大的 k% 的元素
|
||||||
|
"""
|
||||||
|
# 将数据转换为 NumPy 数组
|
||||||
|
if isinstance(data, list):
|
||||||
|
data = np.array(data)
|
||||||
|
|
||||||
|
percentile = np.percentile(data, 100-k)
|
||||||
|
top_k_percent = data[data >= percentile]
|
||||||
|
|
||||||
|
return top_k_percent
|
||||||
|
def cluster(data, thresh=0.15):
|
||||||
|
# data = np.array([0.1, 0.13, 0.7, 0.2, 0.8, 0.52, 0.3, 0.7, 0.85, 0.58])
|
||||||
|
# data = np.array([0.1, 0.13, 0.2, 0.3])
|
||||||
|
# data = np.array([0.1])
|
||||||
|
|
||||||
|
if isinstance(data, list):
|
||||||
|
data = np.array(data)
|
||||||
|
|
||||||
|
data1 = np.sort(data)
|
||||||
|
cluter, Cluters, = [data1[0]], []
|
||||||
|
for i in range(1, len(data1)):
|
||||||
|
if data1[i] - data1[i-1]< thresh:
|
||||||
|
cluter.append(data1[i])
|
||||||
|
else:
|
||||||
|
Cluters.append(cluter)
|
||||||
|
cluter = [data1[i]]
|
||||||
|
Cluters.append(cluter)
|
||||||
|
|
||||||
|
clt_center = []
|
||||||
|
for clt in Cluters:
|
||||||
|
## 是否应该在此处限制一个聚类中的最小轨迹样本数,应该将该因素放在轨迹分析中
|
||||||
|
# if len(clt)>=3:
|
||||||
|
# clt_center.append(np.mean(clt))
|
||||||
|
clt_center.append(np.mean(clt))
|
||||||
|
|
||||||
|
# print(clt_center)
|
||||||
|
|
||||||
|
return clt_center
|
||||||
|
|
||||||
|
def calsiml(feat1, feat2, topkp=75, cluth=0.15):
|
||||||
|
'''轨迹样本和标准特征集样本相似度的选择策略'''
|
||||||
|
matrix = 1 - cdist(feat1, feat2, 'cosine')
|
||||||
|
simi_max = []
|
||||||
|
for i in range(len(matrix)):
|
||||||
|
sim = np.mean(get_topk_percent(matrix[i, :], topkp))
|
||||||
|
simi_max.append(sim)
|
||||||
|
cltc_max = cluster(simi_max, cluth)
|
||||||
|
Simi = max(cltc_max)
|
||||||
|
|
||||||
|
## cltc_max为空属于编程考虑不周,应予以排查解决
|
||||||
|
# if len(cltc_max):
|
||||||
|
# Simi = max(cltc_max)
|
||||||
|
# else:
|
||||||
|
# Simi = 0 #不应该走到该处
|
||||||
|
|
||||||
|
return Simi
|
||||||
|
|
||||||
|
|
||||||
|
def calsimi_vs_stdfeat_new(event, stdfeat):
|
||||||
|
'''事件与标准库的对比策略
|
||||||
|
该比对策略是否可以拓展到事件与事件的比对?
|
||||||
|
'''
|
||||||
|
front_boxes = np.empty((0, 9), dtype=np.float64) ##和类doTracks兼容
|
||||||
|
front_feats = np.empty((0, 256), dtype=np.float64) ##和类doTracks兼容
|
||||||
|
for i in range(len(event.front_boxes)):
|
||||||
|
front_boxes = np.concatenate((front_boxes, event.front_boxes[i]), axis=0)
|
||||||
|
front_feats = np.concatenate((front_feats, event.front_feats[i]), axis=0)
|
||||||
|
|
||||||
|
back_boxes = np.empty((0, 9), dtype=np.float64) ##和类doTracks兼容
|
||||||
|
back_feats = np.empty((0, 256), dtype=np.float64) ##和类doTracks兼容
|
||||||
|
for i in range(len(event.back_boxes)):
|
||||||
|
back_boxes = np.concatenate((back_boxes, event.back_boxes[i]), axis=0)
|
||||||
|
back_feats = np.concatenate((back_feats, event.back_feats[i]), axis=0)
|
||||||
|
|
||||||
|
front_simi, back_simi = None, None
|
||||||
|
if len(front_feats):
|
||||||
|
front_simi = calsiml(front_feats, stdfeat)
|
||||||
|
if len(back_feats):
|
||||||
|
back_simi = calsiml(back_feats, stdfeat)
|
||||||
|
|
||||||
|
'''前后摄相似度融合策略'''
|
||||||
|
if len(front_feats) and len(back_feats):
|
||||||
|
diff_simi = abs(front_simi - back_simi)
|
||||||
|
if diff_simi>0.15:
|
||||||
|
Similar = max([front_simi, back_simi])
|
||||||
|
else:
|
||||||
|
Similar = (front_simi+back_simi)/2
|
||||||
|
elif len(front_feats) and len(back_feats)==0:
|
||||||
|
Similar = front_simi
|
||||||
|
elif len(front_feats)==0 and len(back_feats):
|
||||||
|
Similar = back_simi
|
||||||
|
else:
|
||||||
|
Similar = None # 在event.front_feats和event.back_feats同时为空时
|
||||||
|
|
||||||
|
return Similar, front_simi, back_simi
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def calsimi_vs_stdfeat(event, stdfeat):
|
||||||
|
evtfeat = event.feats_compose
|
||||||
|
if isinstance(event.feats_select, list):
|
||||||
|
if len(event.feats_select) and len(event.feats_select[0]):
|
||||||
|
evtfeat = event.feats_select[0]
|
||||||
|
else:
|
||||||
|
return None, None, None
|
||||||
|
else:
|
||||||
|
evtfeat = event.feats_select
|
||||||
|
|
||||||
|
if len(evtfeat)==0 or len(stdfeat)==0:
|
||||||
|
return None, None, None
|
||||||
|
|
||||||
|
|
||||||
|
evtfeat /= np.linalg.norm(evtfeat, axis=1)[:, None]
|
||||||
|
stdfeat /= np.linalg.norm(stdfeat, axis=1)[:, None]
|
||||||
|
|
||||||
|
matrix = 1 - cdist(evtfeat, stdfeat, 'cosine')
|
||||||
|
matrix[matrix < 0] = 0
|
||||||
|
|
||||||
|
simi_mean = np.mean(matrix)
|
||||||
|
simi_max = np.max(matrix)
|
||||||
|
stdfeatm = np.mean(stdfeat, axis=0, keepdims=True)
|
||||||
|
evtfeatm = np.mean(evtfeat, axis=0, keepdims=True)
|
||||||
|
simi_mfeat = 1- np.maximum(0.0, cdist(stdfeatm, evtfeatm, 'cosine'))
|
||||||
|
|
||||||
|
return simi_mean, simi_max, simi_mfeat[0,0]
|
||||||
|
|
||||||
|
|
||||||
|
def calsimi_vs_evts(evta, evtb, simType=1):
|
||||||
|
if simType==1:
|
||||||
|
if len(evta.feats_compose) and len(evtb.feats_compose):
|
||||||
|
feata = evta.feats_compose
|
||||||
|
featb = evtb.feats_compose
|
||||||
|
matrix = 1 - cdist(feata, featb, 'cosine')
|
||||||
|
similar = np.mean(matrix)
|
||||||
|
else:
|
||||||
|
similar = None
|
||||||
|
return similar
|
||||||
|
|
||||||
|
if simType==2:
|
||||||
|
if len(evta.feats_compose) and len(evtb.feats_compose):
|
||||||
|
feata = evta.feats_compose
|
||||||
|
featb = evtb.feats_compose
|
||||||
|
matrix = 1 - cdist(feata, featb, 'cosine')
|
||||||
|
similar = np.max(matrix)
|
||||||
|
else:
|
||||||
|
similar = None
|
||||||
|
return similar
|
||||||
|
|
||||||
|
if simType==3:
|
||||||
|
if len(evta.feats_compose) and len(evtb.feats_compose):
|
||||||
|
feata = evta.feats_compose
|
||||||
|
featb = evtb.feats_compose
|
||||||
|
similar = calsiml(feata, featb)
|
||||||
|
else:
|
||||||
|
similar = None
|
||||||
|
return similar
|
||||||
|
|
||||||
|
|
||||||
|
##1. the front feats of evta, evtb
|
||||||
|
fr_feata = np.empty((0, 256), dtype=np.float64) ##和类doTracks兼容
|
||||||
|
for i in range(len(evta.front_feats)):
|
||||||
|
fr_feata = np.concatenate((fr_feata, evta.front_feats[i]), axis=0)
|
||||||
|
|
||||||
|
fr_featb = np.empty((0, 256), dtype=np.float64) ##和类doTracks兼容
|
||||||
|
for i in range(len(evtb.front_feats)):
|
||||||
|
fr_featb = np.concatenate((fr_featb, evtb.front_feats[i]), axis=0)
|
||||||
|
|
||||||
|
##2. the back feats of evta, evtb
|
||||||
|
bk_feata = np.empty((0, 256), dtype=np.float64) ##和类doTracks兼容
|
||||||
|
for i in range(len(evta.back_feats)):
|
||||||
|
bk_feata = np.concatenate((bk_feata, evta.back_feats[i]), axis=0)
|
||||||
|
|
||||||
|
bk_featb = np.empty((0, 256), dtype=np.float64) ##和类doTracks兼容
|
||||||
|
for i in range(len(evtb.back_feats)):
|
||||||
|
bk_featb = np.concatenate((bk_featb, evtb.back_feats[i]), axis=0)
|
||||||
|
|
||||||
|
|
||||||
|
front_simi, back_simi = None, None
|
||||||
|
if len(fr_feata) and len(fr_featb):
|
||||||
|
front_simi = calsiml(fr_feata, fr_featb)
|
||||||
|
|
||||||
|
if len(bk_feata) and len(bk_featb):
|
||||||
|
back_simi = calsiml(bk_feata, bk_featb)
|
||||||
|
|
||||||
|
'''前后摄相似度融合策略'''
|
||||||
|
if front_simi is not None and back_simi is not None:
|
||||||
|
diff_simi = abs(front_simi - back_simi)
|
||||||
|
if diff_simi>0.15:
|
||||||
|
similar = max([front_simi, back_simi])
|
||||||
|
else:
|
||||||
|
similar = (front_simi+back_simi)/2
|
||||||
|
elif front_simi is not None and back_simi is None:
|
||||||
|
similar = front_simi
|
||||||
|
elif front_simi is None and back_simi is not None:
|
||||||
|
similar = back_simi
|
||||||
|
else:
|
||||||
|
similar = None # 在event.front_feats和event.back_feats同时为空时
|
||||||
|
|
||||||
|
return similar
|
||||||
|
|
||||||
|
|
127
contrast/utils/databits.py
Normal file
@ -0,0 +1,127 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
"""
|
||||||
|
Created on Tue Apr 1 16:43:04 2025
|
||||||
|
|
||||||
|
@author: wqg
|
||||||
|
"""
|
||||||
|
import os
|
||||||
|
import pickle
|
||||||
|
import numpy as np
|
||||||
|
from scipy.spatial.distance import cdist
|
||||||
|
|
||||||
|
|
||||||
|
def int8_to_ft16(arr_uint8, amin, amax):
|
||||||
|
arr_ft16 = (arr_uint8 / 255 * (amax-amin) + amin).astype(np.float16)
|
||||||
|
|
||||||
|
return arr_ft16
|
||||||
|
|
||||||
|
def ft16_to_uint8(arr_ft16):
|
||||||
|
# pickpath = r"\\192.168.1.28\share\测试_202406\contrast\std_features_ft32vsft16\6902265587712_ft16.pickle"
|
||||||
|
|
||||||
|
# with open(pickpath, 'rb') as f:
|
||||||
|
# edict = pickle.load(f)
|
||||||
|
|
||||||
|
# arr_ft16 = edict['feats']
|
||||||
|
|
||||||
|
amin = np.min(arr_ft16)
|
||||||
|
amax = np.max(arr_ft16)
|
||||||
|
arr_ft255 = (arr_ft16 - amin) * 255 / (amax-amin)
|
||||||
|
arr_uint8 = arr_ft255.astype(np.uint8)
|
||||||
|
|
||||||
|
arr_ft16_ = int8_to_ft16(arr_uint8, amin, amax)
|
||||||
|
|
||||||
|
arrDistNorm = np.linalg.norm(arr_ft16_ - arr_ft16) / arr_ft16_.size
|
||||||
|
|
||||||
|
return arr_uint8, arr_ft16_
|
||||||
|
|
||||||
|
|
||||||
|
def data_precision_compare(stdfeat, evtfeat, evtMessage, similPath='', save=True):
|
||||||
|
evt, stdbcd, label = evtMessage
|
||||||
|
rltdata, rltdata_ft16, rltdata_ft16_ = [], [], []
|
||||||
|
|
||||||
|
matrix = 1 - cdist(stdfeat, evtfeat, 'cosine')
|
||||||
|
simi_mean = np.mean(matrix)
|
||||||
|
simi_max = np.max(matrix)
|
||||||
|
stdfeatm = np.mean(stdfeat, axis=0, keepdims=True)
|
||||||
|
evtfeatm = np.mean(evtfeat, axis=0, keepdims=True)
|
||||||
|
simi_mfeat = 1- np.maximum(0.0, cdist(stdfeatm, evtfeatm, 'cosine'))
|
||||||
|
rltdata = [label, stdbcd, evt, simi_mean, simi_max, simi_mfeat[0,0]]
|
||||||
|
|
||||||
|
|
||||||
|
##================================================================= float16
|
||||||
|
stdfeat_ft16 = stdfeat.astype(np.float16)
|
||||||
|
evtfeat_ft16 = evtfeat.astype(np.float16)
|
||||||
|
stdfeat_ft16 /= np.linalg.norm(stdfeat_ft16, axis=1)[:, None]
|
||||||
|
evtfeat_ft16 /= np.linalg.norm(evtfeat_ft16, axis=1)[:, None]
|
||||||
|
|
||||||
|
|
||||||
|
matrix_ft16 = 1 - cdist(stdfeat_ft16, evtfeat_ft16, 'cosine')
|
||||||
|
simi_mean_ft16 = np.mean(matrix_ft16)
|
||||||
|
simi_max_ft16 = np.max(matrix_ft16)
|
||||||
|
stdfeatm_ft16 = np.mean(stdfeat_ft16, axis=0, keepdims=True)
|
||||||
|
evtfeatm_ft16 = np.mean(evtfeat_ft16, axis=0, keepdims=True)
|
||||||
|
simi_mfeat_ft16 = 1- np.maximum(0.0, cdist(stdfeatm_ft16, evtfeatm_ft16, 'cosine'))
|
||||||
|
rltdata_ft16 = [label, stdbcd, evt, simi_mean_ft16, simi_max_ft16, simi_mfeat_ft16[0,0]]
|
||||||
|
|
||||||
|
'''****************** uint8 is ok!!!!!! ******************'''
|
||||||
|
##=================================================================== uint8
|
||||||
|
# stdfeat_uint8, stdfeat_ft16_ = ft16_to_uint8(stdfeat_ft16)
|
||||||
|
# evtfeat_uint8, evtfeat_ft16_ = ft16_to_uint8(evtfeat_ft16)
|
||||||
|
|
||||||
|
stdfeat_uint8 = (stdfeat_ft16*128).astype(np.int8)
|
||||||
|
evtfeat_uint8 = (evtfeat_ft16*128).astype(np.int8)
|
||||||
|
stdfeat_ft16_ = stdfeat_uint8.astype(np.float16)/128
|
||||||
|
evtfeat_ft16_ = evtfeat_uint8.astype(np.float16)/128
|
||||||
|
|
||||||
|
absdiff = np.linalg.norm(stdfeat_ft16_ - stdfeat) / stdfeat.size
|
||||||
|
|
||||||
|
matrix_ft16_ = 1 - cdist(stdfeat_ft16_, evtfeat_ft16_, 'cosine')
|
||||||
|
simi_mean_ft16_ = np.mean(matrix_ft16_)
|
||||||
|
simi_max_ft16_ = np.max(matrix_ft16_)
|
||||||
|
stdfeatm_ft16_ = np.mean(stdfeat_ft16_, axis=0, keepdims=True)
|
||||||
|
evtfeatm_ft16_ = np.mean(evtfeat_ft16_, axis=0, keepdims=True)
|
||||||
|
simi_mfeat_ft16_ = 1- np.maximum(0.0, cdist(stdfeatm_ft16_, evtfeatm_ft16_, 'cosine'))
|
||||||
|
rltdata_ft16_ = [label, stdbcd, evt, simi_mean_ft16_, simi_max_ft16_, simi_mfeat_ft16_[0,0]]
|
||||||
|
|
||||||
|
if not save:
|
||||||
|
return
|
||||||
|
|
||||||
|
|
||||||
|
##========================================================= save as float32
|
||||||
|
rppath = os.path.join(similPath, f'{evt}_ft32.pickle')
|
||||||
|
with open(rppath, 'wb') as f:
|
||||||
|
pickle.dump(rltdata, f)
|
||||||
|
|
||||||
|
rtpath = os.path.join(similPath, f'{evt}_ft32.txt')
|
||||||
|
with open(rtpath, 'w', encoding='utf-8') as f:
|
||||||
|
for result in rltdata:
|
||||||
|
part = [f"{x:.3f}" if isinstance(x, float) else str(x) for x in result]
|
||||||
|
line = ', '.join(part)
|
||||||
|
f.write(line + '\n')
|
||||||
|
|
||||||
|
|
||||||
|
##========================================================= save as float16
|
||||||
|
rppath_ft16 = os.path.join(similPath, f'{evt}_ft16.pickle')
|
||||||
|
with open(rppath_ft16, 'wb') as f:
|
||||||
|
pickle.dump(rltdata_ft16, f)
|
||||||
|
|
||||||
|
rtpath_ft16 = os.path.join(similPath, f'{evt}_ft16.txt')
|
||||||
|
with open(rtpath_ft16, 'w', encoding='utf-8') as f:
|
||||||
|
for result in rltdata_ft16:
|
||||||
|
part = [f"{x:.3f}" if isinstance(x, float) else str(x) for x in result]
|
||||||
|
line = ', '.join(part)
|
||||||
|
f.write(line + '\n')
|
||||||
|
|
||||||
|
|
||||||
|
##=========================================================== save as uint8
|
||||||
|
rppath_uint8 = os.path.join(similPath, f'{evt}_uint8.pickle')
|
||||||
|
with open(rppath_uint8, 'wb') as f:
|
||||||
|
pickle.dump(rltdata_ft16_, f)
|
||||||
|
|
||||||
|
rtpath_uint8 = os.path.join(similPath, f'{evt}_uint8.txt')
|
||||||
|
with open(rtpath_uint8, 'w', encoding='utf-8') as f:
|
||||||
|
for result in rltdata_ft16_:
|
||||||
|
part = [f"{x:.3f}" if isinstance(x, float) else str(x) for x in result]
|
||||||
|
line = ', '.join(part)
|
||||||
|
f.write(line + '\n')
|
@ -5,18 +5,30 @@ Created on Tue Nov 26 17:35:05 2024
|
|||||||
@author: ym
|
@author: ym
|
||||||
"""
|
"""
|
||||||
import os
|
import os
|
||||||
|
import sys
|
||||||
import cv2
|
import cv2
|
||||||
import pickle
|
import pickle
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
|
||||||
import sys
|
FILE = Path(__file__).resolve()
|
||||||
sys.path.append(r"D:\DetectTracking")
|
ROOT = FILE.parents[2] # YOLOv5 root directory
|
||||||
|
if str(ROOT) not in sys.path:
|
||||||
|
sys.path.append(str(ROOT))
|
||||||
|
|
||||||
from tracking.utils.plotting import Annotator, colors
|
from tracking.utils.plotting import Annotator, colors
|
||||||
from tracking.utils.drawtracks import drawTrack
|
from tracking.utils.drawtracks import drawTrack
|
||||||
from tracking.utils.read_data import extract_data, read_tracking_output, read_similar
|
from tracking.utils.read_data import extract_data, read_tracking_output, read_similar
|
||||||
from tracking.utils.read_data import extract_data_realtime, read_tracking_output_realtime
|
from tracking.utils.read_data import extract_data_realtime, read_tracking_output_realtime
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# import platform
|
||||||
|
# import pathlib
|
||||||
|
# plt = platform.system()
|
||||||
|
|
||||||
|
|
||||||
IMG_FORMAT = ['.bmp', '.jpg', '.jpeg', '.png']
|
IMG_FORMAT = ['.bmp', '.jpg', '.jpeg', '.png']
|
||||||
VID_FORMAT = ['.mp4', '.avi']
|
VID_FORMAT = ['.mp4', '.avi']
|
||||||
|
|
||||||
@ -76,7 +88,11 @@ def array2list(bboxes):
|
|||||||
|
|
||||||
class ShoppingEvent:
|
class ShoppingEvent:
|
||||||
def __init__(self, eventpath, stype="data"):
|
def __init__(self, eventpath, stype="data"):
|
||||||
'''stype: str, 'source', 'data', 'realtime', 共三种 '''
|
'''stype: str, 'source', 'data', 'realtime', 共三种
|
||||||
|
source: 前后摄视频经 pipeline 生成的文件
|
||||||
|
data: 基于事件切分的原 data 文件版本
|
||||||
|
realtime: 全实时生成的 data 文件
|
||||||
|
'''
|
||||||
|
|
||||||
self.eventpath = eventpath
|
self.eventpath = eventpath
|
||||||
self.evtname = str(Path(eventpath).stem)
|
self.evtname = str(Path(eventpath).stem)
|
||||||
@ -167,6 +183,8 @@ class ShoppingEvent:
|
|||||||
|
|
||||||
|
|
||||||
def from_source_pkl(self, eventpath):
|
def from_source_pkl(self, eventpath):
|
||||||
|
# if plt == 'Windows':
|
||||||
|
# pathlib.PosixPath = pathlib.WindowsPath
|
||||||
with open(eventpath, 'rb') as f:
|
with open(eventpath, 'rb') as f:
|
||||||
ShoppingDict = pickle.load(f)
|
ShoppingDict = pickle.load(f)
|
||||||
|
|
||||||
@ -296,13 +314,13 @@ class ShoppingEvent:
|
|||||||
self.front_feats = tracking_output_feats
|
self.front_feats = tracking_output_feats
|
||||||
|
|
||||||
def from_realtime_datafile(self, eventpath):
|
def from_realtime_datafile(self, eventpath):
|
||||||
# evtList = self.evtname.split('_')
|
evtList = self.evtname.split('_')
|
||||||
# if len(evtList)>=2 and len(evtList[-1])>=10 and evtList[-1].isdigit():
|
if len(evtList)>=2 and len(evtList[-1])>=10 and evtList[-1].isdigit():
|
||||||
# self.barcode = evtList[-1]
|
self.barcode = evtList[-1]
|
||||||
# if len(evtList)==3 and evtList[-1]== evtList[-2]:
|
if len(evtList)==3 and evtList[-1]== evtList[-2]:
|
||||||
# self.evtType = 'input'
|
self.evtType = 'input'
|
||||||
# else:
|
else:
|
||||||
# self.evtType = 'other'
|
self.evtType = 'other'
|
||||||
|
|
||||||
'''================ path of video ============='''
|
'''================ path of video ============='''
|
||||||
for vidname in os.listdir(eventpath):
|
for vidname in os.listdir(eventpath):
|
||||||
@ -330,7 +348,7 @@ class ShoppingEvent:
|
|||||||
if not os.path.isfile(datapath): continue
|
if not os.path.isfile(datapath): continue
|
||||||
CamerType = dataname.split('_')[0]
|
CamerType = dataname.split('_')[0]
|
||||||
'''========== 0/1_track.data =========='''
|
'''========== 0/1_track.data =========='''
|
||||||
if dataname.find("_track.data")>0:
|
if dataname.find("_tracker.data")>0:
|
||||||
trackerboxes, trackerfeats = extract_data_realtime(datapath)
|
trackerboxes, trackerfeats = extract_data_realtime(datapath)
|
||||||
if CamerType == '0':
|
if CamerType == '0':
|
||||||
self.back_trackerboxes = trackerboxes
|
self.back_trackerboxes = trackerboxes
|
||||||
|
@ -4,8 +4,81 @@ Created on Thu Oct 31 15:17:01 2024
|
|||||||
|
|
||||||
@author: ym
|
@author: ym
|
||||||
"""
|
"""
|
||||||
|
import os
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
import pickle
|
||||||
|
from pathlib import Path
|
||||||
import matplotlib.pyplot as plt
|
import matplotlib.pyplot as plt
|
||||||
|
from .event import ShoppingEvent
|
||||||
|
|
||||||
|
def init_eventDict(sourcePath, eventDataPath, stype="data"):
|
||||||
|
'''
|
||||||
|
stype: str,
|
||||||
|
'source': 由 videos 或 images 生成的 pickle 文件
|
||||||
|
'data': 从 data 文件中读取的现场运行数据
|
||||||
|
"realtime": 全实时数据,从 data 文件中读取的现场运行数据
|
||||||
|
|
||||||
|
sourcePath:事件文件夹,事件类型包含2种:
|
||||||
|
(1) pipeline生成的 pickle 文件
|
||||||
|
(2) 直接采集的事件文件夹
|
||||||
|
'''
|
||||||
|
k, errEvents = 0, []
|
||||||
|
for evtname in os.listdir(sourcePath):
|
||||||
|
bname, ext = os.path.splitext(evtname)
|
||||||
|
source_path = os.path.join(sourcePath, evtname)
|
||||||
|
|
||||||
|
if stype=="source" and ext not in ['.pkl', '.pickle']: continue
|
||||||
|
if stype=="data" and os.path.isfile(source_path): continue
|
||||||
|
if stype=="realtime" and os.path.isfile(source_path): continue
|
||||||
|
|
||||||
|
evt = bname.split('_')
|
||||||
|
condt = len(evt)>=2 and evt[-1].isdigit() and len(evt[-1])>=10
|
||||||
|
if not condt: continue
|
||||||
|
|
||||||
|
pickpath = os.path.join(eventDataPath, f"{bname}.pickle")
|
||||||
|
if os.path.isfile(pickpath): continue
|
||||||
|
|
||||||
|
# event = ShoppingEvent(source_path, stype)
|
||||||
|
try:
|
||||||
|
event = ShoppingEvent(source_path, stype)
|
||||||
|
with open(pickpath, 'wb') as f:
|
||||||
|
pickle.dump(event, f)
|
||||||
|
print(evtname)
|
||||||
|
except Exception as e:
|
||||||
|
errEvents.append(source_path)
|
||||||
|
print(f"Error: {evtname}, {e}")
|
||||||
|
# k += 1
|
||||||
|
# if k==1:
|
||||||
|
# break
|
||||||
|
|
||||||
|
errfile = Path(eventDataPath).parent / 'error_events.txt'
|
||||||
|
with open(str(errfile), 'a', encoding='utf-8') as f:
|
||||||
|
for line in errEvents:
|
||||||
|
f.write(line + '\n')
|
||||||
|
|
||||||
|
|
||||||
|
def get_evtList(evtpath):
|
||||||
|
'''==== 0. 生成事件列表和对应的 Barcodes 集合 ==========='''
|
||||||
|
bcdList, evtpaths = [], []
|
||||||
|
for evtname in os.listdir(evtpath):
|
||||||
|
bname, ext = os.path.splitext(evtname)
|
||||||
|
|
||||||
|
## 处理事件的两种情况:文件夹 和 Yolo-Resnet-Tracker 的输出
|
||||||
|
fpath = os.path.join(evtpath, evtname)
|
||||||
|
if os.path.isfile(fpath) and (ext==".pkl" or ext==".pickle"):
|
||||||
|
evt = bname.split('_')
|
||||||
|
elif os.path.isdir(fpath):
|
||||||
|
evt = evtname.split('_')
|
||||||
|
else:
|
||||||
|
continue
|
||||||
|
|
||||||
|
if len(evt)>=2 and evt[-1].isdigit() and len(evt[-1])>=10:
|
||||||
|
bcdList.append(evt[-1])
|
||||||
|
evtpaths.append(fpath)
|
||||||
|
|
||||||
|
bcdSet = set(bcdList)
|
||||||
|
|
||||||
|
return evtpaths, bcdSet
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
23239
dataPair_test.ipynb
72
dataset/multi-trajs.py
Normal file
@ -0,0 +1,72 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
"""
|
||||||
|
Created on Mon Mar 10 09:33:35 2025
|
||||||
|
基准数据集筛选,选取tracking输出多个轨迹的事件
|
||||||
|
|
||||||
|
@author: ym
|
||||||
|
"""
|
||||||
|
import os
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
import sys
|
||||||
|
sys.path.append(r"D:\DetectTracking")
|
||||||
|
from tracking.utils.read_data import extract_data, read_tracking_output_realtime
|
||||||
|
|
||||||
|
|
||||||
|
def get_multitraj_file(spath, pattern):
|
||||||
|
multi_traj_events = []
|
||||||
|
n = 0
|
||||||
|
for evtname in os.listdir(spath):
|
||||||
|
name, ext = os.path.splitext(evtname)
|
||||||
|
eventpath = os.path.join(spath, evtname)
|
||||||
|
|
||||||
|
evt = name.split('_')
|
||||||
|
condt = len(evt)>=2 and evt[-1].isdigit() and len(evt[-1])>=8
|
||||||
|
if not condt: continue
|
||||||
|
if not os.path.isdir(eventpath): continue
|
||||||
|
|
||||||
|
trackingboxes = []
|
||||||
|
for dataname in os.listdir(eventpath):
|
||||||
|
if os.path.splitext(dataname)[-1] in [".jpg", ".png"]:
|
||||||
|
continue
|
||||||
|
|
||||||
|
datapath = os.path.join(eventpath, dataname)
|
||||||
|
if not os.path.isfile(datapath): continue
|
||||||
|
CamerType = dataname.split('_')[0]
|
||||||
|
|
||||||
|
if pattern=="realtime" and dataname.find("_tracking_output.data")>0:
|
||||||
|
trackingboxes, trackingfeats, tracking_outboxes, tracking_outfeats = read_tracking_output_realtime(datapath)
|
||||||
|
if pattern=="evtsplit" and dataname.find("_track.data")>0:
|
||||||
|
bboxes, ffeats, trackerboxes, trackerfeats, trackingboxes, trackingfeats = extract_data(datapath)
|
||||||
|
|
||||||
|
if len(trackingboxes)>=2:
|
||||||
|
multi_traj_events.append(evtname)
|
||||||
|
n += 1
|
||||||
|
print(f"{n}: {evtname}")
|
||||||
|
break
|
||||||
|
|
||||||
|
|
||||||
|
multi_traj_file = os.path.join(spath, "multi_traj_file.txt")
|
||||||
|
with open(multi_traj_file, "w") as file:
|
||||||
|
for item in multi_traj_events:
|
||||||
|
file.write(item + "\n")
|
||||||
|
|
||||||
|
def main():
|
||||||
|
spaths = [r"\\192.168.1.28\share\测试视频数据以及日志\各模块测试记录\比对测试\1212",
|
||||||
|
r"\\192.168.1.28\share\测试视频数据以及日志\各模块测试记录\比对测试\1216",
|
||||||
|
r"\\192.168.1.28\share\测试视频数据以及日志\各模块测试记录\比对测试\1218",
|
||||||
|
r"\\192.168.1.28\share\测试视频数据以及日志\各模块测试记录\比对测试\202412"
|
||||||
|
]
|
||||||
|
|
||||||
|
pattern = "evtsplit" # realtime # 全实时版、事件切分版数据读取方式
|
||||||
|
for spath in spaths:
|
||||||
|
get_multitraj_file(spath, pattern)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
41
execute_pipeline.py
Normal file
@ -0,0 +1,41 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
"""
|
||||||
|
Created on Fri Mar 28 11:35:28 2025
|
||||||
|
|
||||||
|
@author: ym
|
||||||
|
"""
|
||||||
|
|
||||||
|
from pipeline import execute_pipeline
|
||||||
|
|
||||||
|
|
||||||
|
def execute(datapath, savepath_v5, savepath_v10):
|
||||||
|
execute_pipeline(evtdir = datapath,
|
||||||
|
DataType = "raw", # raw, pkl
|
||||||
|
kk=None,
|
||||||
|
source_type = "video", # video, image,
|
||||||
|
save_path = savepath_v5,
|
||||||
|
yolo_ver = "V5", # V10, V5
|
||||||
|
weight_yolo_v5 = r'./ckpts/best_cls10_0906.pt' ,
|
||||||
|
weight_yolo_v10 = r'./ckpts/best_v10s_width0375_1205.pt',
|
||||||
|
saveimages = False
|
||||||
|
)
|
||||||
|
execute_pipeline(evtdir = datapath,
|
||||||
|
DataType = "raw", # raw, pkl
|
||||||
|
kk=None,
|
||||||
|
source_type = "video", # video, image,
|
||||||
|
save_path = savepath_v10,
|
||||||
|
yolo_ver = "V10", # V10, V5
|
||||||
|
weight_yolo_v5 = r'./ckpts/best_cls10_0906.pt' ,
|
||||||
|
weight_yolo_v10 = r'./ckpts/best_v10s_width0375_1205.pt',
|
||||||
|
saveimages = False
|
||||||
|
)
|
||||||
|
|
||||||
|
datapath = r'/home/wqg/dataset/test_dataset/base_dataset/single_event/source/'
|
||||||
|
savepath_v5 = r'/home/wqg/dataset/pipeline/contrast/single_event_V5'
|
||||||
|
savepath_v10 = r'/home/wqg/dataset/pipeline/contrast/single_event_V10'
|
||||||
|
execute(datapath, savepath_v5, savepath_v10)
|
||||||
|
|
||||||
|
datapath = r'/home/wqg/dataset/test_performence_dataset/'
|
||||||
|
savepath_v5 = r'/home/wqg/dataset/pipeline/contrast/performence_V5'
|
||||||
|
savepath_v10 = r'/home/wqg/dataset/pipeline/contrast/performence_V10'
|
||||||
|
execute(datapath, savepath_v5, savepath_v10)
|
BIN
hands/__pycache__/hand_inference.cpython-312.pyc
Normal file
Before Width: | Height: | Size: 55 KiB |
BIN
images/34414.png
Before Width: | Height: | Size: 46 KiB |
Before Width: | Height: | Size: 58 KiB |
Before Width: | Height: | Size: 416 KiB |
Before Width: | Height: | Size: 35 KiB |
Before Width: | Height: | Size: 190 KiB |
Before Width: | Height: | Size: 126 KiB |
Before Width: | Height: | Size: 69 KiB |
Before Width: | Height: | Size: 204 KiB |
Before Width: | Height: | Size: 181 KiB |
Before Width: | Height: | Size: 434 KiB |
Before Width: | Height: | Size: 103 KiB |
Before Width: | Height: | Size: 45 KiB |
Before Width: | Height: | Size: 321 KiB |
Before Width: | Height: | Size: 47 KiB |
127
imgs_to_video.py
Normal file
@ -0,0 +1,127 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
"""
|
||||||
|
Created on Tue Jan 30 19:15:05 2024
|
||||||
|
|
||||||
|
@author: ym
|
||||||
|
"""
|
||||||
|
import cv2
|
||||||
|
import os
|
||||||
|
import glob
|
||||||
|
IMG_FORMATS = "bmp", "dng", "jpeg", "jpg", "mpo", "png", "tif", "tiff", "webp", "pfm" # include image suffixes
|
||||||
|
VID_FORMATS = "asf", "avi", "gif", "m4v", "mkv", "mov", "mp4", "mpeg", "mpg", "ts", "wmv" # include video suffixes
|
||||||
|
|
||||||
|
|
||||||
|
def for_test():
|
||||||
|
save_path = video_path + img_path
|
||||||
|
|
||||||
|
fps, w, h = 10, 1024, 1280
|
||||||
|
cap = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
|
||||||
|
|
||||||
|
pathx = path + img_path
|
||||||
|
imgfiles = [f for f in os.listdir(pathx) if not f.find("_cut") != -1]
|
||||||
|
|
||||||
|
imgfiles.sort(key = lambda x: int(x[:-5]))
|
||||||
|
imgpaths = []
|
||||||
|
for imgfile in imgfiles:
|
||||||
|
imgpaths.append(os.path.join(pathx, imgfile))
|
||||||
|
|
||||||
|
center = (1280/2, 1024/2)
|
||||||
|
rotate_matrix = cv2.getRotationMatrix2D(center=center, angle=-90, scale=1)
|
||||||
|
k = 0
|
||||||
|
for ipath in imgpaths:
|
||||||
|
img = cv2.imread(ipath)
|
||||||
|
rotated_image = cv2.warpAffine(src=img, M=rotate_matrix, dsize=(w, h))
|
||||||
|
cap.write(rotated_image)
|
||||||
|
print("Have imgs")
|
||||||
|
|
||||||
|
def test_1():
|
||||||
|
|
||||||
|
# name = os.path.split(img_path)[-1]
|
||||||
|
# save_path = video_path + name + '.mp4'
|
||||||
|
|
||||||
|
save_path = video_path + img_path
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
fps, w, h = 10, 1024, 1280
|
||||||
|
cap = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
|
||||||
|
|
||||||
|
pathx = path + img_path
|
||||||
|
imgfiles = [f for f in os.listdir(pathx) if not f.find("_cut") != -1]
|
||||||
|
|
||||||
|
imgfiles.sort(key = lambda x: int(x[:-5]))
|
||||||
|
imgpaths = []
|
||||||
|
for imgfile in imgfiles:
|
||||||
|
imgpaths.append(os.path.join(pathx, imgfile))
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# ipaths = [os.path.join(pathx, f) for f in os.listdir(pathx) if not f.find("_cut") != -1]
|
||||||
|
# ipaths = []
|
||||||
|
# for f in os.listdir(pathx):
|
||||||
|
# if not f.find('_cut'):
|
||||||
|
# ipaths.append(os.path.join(pathx, f))
|
||||||
|
# ipaths.sort(key = lambda x: int(x.split('_')[-2]))
|
||||||
|
|
||||||
|
|
||||||
|
k = 0
|
||||||
|
for ipath in imgpaths:
|
||||||
|
img = cv2.imread(ipath)
|
||||||
|
cap.write(img)
|
||||||
|
|
||||||
|
|
||||||
|
k += 1
|
||||||
|
|
||||||
|
cap.release()
|
||||||
|
|
||||||
|
print(img_path + f" have imgs: {k}")
|
||||||
|
|
||||||
|
def img2video(imgpath):
|
||||||
|
if not os.path.isdir(imgpath):
|
||||||
|
return
|
||||||
|
|
||||||
|
files = []
|
||||||
|
files.extend(sorted(glob.glob(os.path.join(imgpath, "*.*"))))
|
||||||
|
images = [x for x in files if x.split(".")[-1].lower() in IMG_FORMATS]
|
||||||
|
|
||||||
|
h, w = cv2.imread(images[0]).shape[:2]
|
||||||
|
fps = 25
|
||||||
|
|
||||||
|
vidpath = imgpath + '.mp4'
|
||||||
|
cap = cv2.VideoWriter(vidpath, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
|
||||||
|
for p in images:
|
||||||
|
img = cv2.imread(p)
|
||||||
|
cap.write(img)
|
||||||
|
cap.release()
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
imgpath = r"D:\work\result\202503251112_v10s_result"
|
||||||
|
|
||||||
|
img2video(imgpath)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
16326
minicpm.ipynb
25
minicpm.py
@ -1,25 +0,0 @@
|
|||||||
# Load model directly
|
|
||||||
from transformers import AutoModel, AutoTokenizer
|
|
||||||
import torch
|
|
||||||
from PIL import Image
|
|
||||||
|
|
||||||
model = AutoModel.from_pretrained(
|
|
||||||
"openbmb/MiniCPM-o-2_6",
|
|
||||||
trust_remote_code=True,
|
|
||||||
attn_implementation='flash_attention_2',
|
|
||||||
torch_dtype=torch.bfloat16,
|
|
||||||
# device_map="auto"
|
|
||||||
)
|
|
||||||
model = model.eval().cuda()
|
|
||||||
tokenizer = AutoTokenizer.from_pretrained('openbmb/MiniCPM-o-2_6', use_fast=True, trust_remote_code=True)
|
|
||||||
|
|
||||||
img1 = Image.open('/home/ieemoo0337/projects/datasets/constrast_pair/8850813311020/8850813311020.jpg')
|
|
||||||
img2 = Image.open('/home/ieemoo0337/projects/datasets/constrast_pair/8850511321499/8850511321499.jpg')
|
|
||||||
|
|
||||||
question = '描述第一张图像的1。'
|
|
||||||
msgs = [{'role': 'user', 'content': [img1, img2, question]}]
|
|
||||||
answer = model.chat(
|
|
||||||
msgs=msgs,
|
|
||||||
tokenizer=tokenizer
|
|
||||||
)
|
|
||||||
print(answer)
|
|
BIN
models/__pycache__/__init__.cpython-312.pyc
Normal file
BIN
models/__pycache__/common.cpython-312.pyc
Normal file
BIN
models/__pycache__/experimental.cpython-312.pyc
Normal file
BIN
models/__pycache__/yolo.cpython-312.pyc
Normal file
@ -76,7 +76,11 @@ def attempt_load(weights, device=None, inplace=True, fuse=True):
|
|||||||
|
|
||||||
model = Ensemble()
|
model = Ensemble()
|
||||||
for w in weights if isinstance(weights, list) else [weights]:
|
for w in weights if isinstance(weights, list) else [weights]:
|
||||||
ckpt = torch.load(attempt_download(w), map_location=device) # load
|
if torch.__version__ >= '2.6':
|
||||||
|
ckpt = torch.load(attempt_download(w), map_location=device, weights_only=False) # load
|
||||||
|
else:
|
||||||
|
ckpt = torch.load(attempt_download(w), map_location=device)
|
||||||
|
|
||||||
ckpt = (ckpt.get('ema') or ckpt['model']).to(device).float() # FP32 model
|
ckpt = (ckpt.get('ema') or ckpt['model']).to(device).float() # FP32 model
|
||||||
|
|
||||||
# Model compatibility updates
|
# Model compatibility updates
|
||||||
|