Compare commits
14 Commits
Author | SHA1 | Date | |
---|---|---|---|
9400ae904a | |||
ad850221c5 | |||
31db54fa37 | |||
38d962cd59 | |||
f797aea5d0 | |||
a3b65be6b1 | |||
6ede9580cb | |||
2feedd622d | |||
544eac9cec | |||
415a804c9b | |||
a64dcb5a10 | |||
47a384131e | |||
c7859a7792 | |||
a16235a593 |
10
.gitignore
vendored
@ -1,14 +1,11 @@
|
||||
# Repo-specific GitIgnore ----------------------------------------------------------------------------------------------
|
||||
*.jpg
|
||||
*.jpeg
|
||||
*.png
|
||||
*.bmp
|
||||
*.tif
|
||||
*.tiff
|
||||
*.heic
|
||||
*.JPG
|
||||
*.JPEG
|
||||
*.PNG
|
||||
*.BMP
|
||||
*.TIF
|
||||
*.TIFF
|
||||
@ -26,7 +23,7 @@
|
||||
*.pickle
|
||||
*.npy
|
||||
*.csv
|
||||
|
||||
*.pyc
|
||||
|
||||
|
||||
# for tracking ---------------------------------------------------------------
|
||||
@ -39,6 +36,10 @@ tracking/data/boxes_imgs/*
|
||||
tracking/data/trackfeats/*
|
||||
tracking/data/tracks/*
|
||||
tracking/data/handlocal/*
|
||||
contrast/feat_extract/model/__pycache__/*
|
||||
std_img*
|
||||
.gitignore
|
||||
*/__pycache__/*
|
||||
ckpts/*
|
||||
doc
|
||||
|
||||
@ -54,7 +55,6 @@ VOC/
|
||||
|
||||
# Neural Network weights -----------------------------------------------------------------------------------------------
|
||||
*.weights
|
||||
*.pt
|
||||
*.pth
|
||||
*.pb
|
||||
*.onnx
|
||||
|
176
Qwen_agent.py
Normal file
@ -0,0 +1,176 @@
|
||||
from transformers import Qwen2VLForConditionalGeneration, AutoTokenizer, AutoProcessor
|
||||
from accelerate import init_empty_weights, load_checkpoint_in_model
|
||||
from stream_pipeline import stream_pipeline
|
||||
from PIL import Image
|
||||
from io import BytesIO
|
||||
import torch
|
||||
import ast
|
||||
import requests
|
||||
import random
|
||||
|
||||
# default: Load the model on the available device(s)
|
||||
model = Qwen2VLForConditionalGeneration.from_pretrained(
|
||||
"Qwen/Qwen2-VL-7B-Instruct",
|
||||
torch_dtype=torch.bfloat16,
|
||||
attn_implementation="flash_attention_2",
|
||||
device_map="auto"
|
||||
)
|
||||
|
||||
# default processer
|
||||
processor = AutoProcessor.from_pretrained("Qwen/Qwen2-VL-7B-Instruct", attn_implementation="flash_attention_2")
|
||||
|
||||
def qwen_prompt(img_list, messages):
|
||||
# Preparation for inference
|
||||
text = processor.apply_chat_template(
|
||||
messages, tokenize=False, add_generation_prompt=True
|
||||
)
|
||||
inputs = processor(
|
||||
text=[text],
|
||||
images=img_list,
|
||||
padding=True,
|
||||
return_tensors="pt",
|
||||
)
|
||||
inputs = inputs.to("cuda")
|
||||
|
||||
# Inference: Generation of the output
|
||||
with torch.no_grad():
|
||||
generated_ids = model.generate(**inputs, max_new_tokens=256)
|
||||
generated_ids_trimmed = [
|
||||
out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
|
||||
]
|
||||
output_text = processor.batch_decode(
|
||||
generated_ids_trimmed, add_special_tokens=False, skip_special_tokens=True, clean_up_tokenization_spaces=False
|
||||
)
|
||||
del inputs
|
||||
del generated_ids
|
||||
del generated_ids_trimmed
|
||||
torch.cuda.empty_cache()
|
||||
|
||||
return output_text[0]
|
||||
|
||||
|
||||
def get_best_image(track_imgs):
|
||||
if len(track_imgs) >= 5:
|
||||
track_imgs = random.sample(track_imgs, 5)
|
||||
img_frames = []
|
||||
for i in range(len(track_imgs)):
|
||||
content = {}
|
||||
content['type'] = 'image'
|
||||
content['min_pixels'] = 224 * 224
|
||||
content['max_pixels'] = 800 * 800
|
||||
img_frames.append(content)
|
||||
|
||||
messages = [
|
||||
{
|
||||
"role": "system",
|
||||
"content": "你是一个在超市工作的chatbot,你现在需要帮助顾客找到一张质量最好的商品图像。一个好的商品图像需要满足以下条件: \
|
||||
1. 文字清晰且连贯。\
|
||||
2. 商品图案清晰可识别。\
|
||||
3. 商品可提取的描述信息丰富。\
|
||||
基于以上条件,从多张图像中筛选出最好的图像,然后以dict输出该图像的索引信息,key为'index'。"
|
||||
},
|
||||
{
|
||||
"role": "system",
|
||||
"content": img_frames,
|
||||
},
|
||||
]
|
||||
|
||||
output_text = qwen_prompt(track_imgs, messages)
|
||||
output_dict = ast.literal_eval(output_text.strip('```python\n'))
|
||||
if output_dict['index'] > len(track_imgs):
|
||||
output_dict['index'] = len(track_imgs)
|
||||
best_img = track_imgs[output_dict['index'] - 1]
|
||||
|
||||
return best_img
|
||||
|
||||
def get_product_description(std_img, track_imgs):
|
||||
messages = [
|
||||
{
|
||||
"role": "system",
|
||||
"content": "你是一个在超市工作的chatbot,你现在需要提取图像中商品的信息,信息需要按照以下python dict的格式输出,如果 \
|
||||
信息模糊不清则输出'未知': \
|
||||
{ \
|
||||
'item1': {\
|
||||
'Text': 第一张图像中商品中提取出的文字信息, \
|
||||
'Color': 第一张图像中商品的颜色, \
|
||||
'Shape': 第一张图像中商品的形状, \
|
||||
'Material': 第一张图像中商品的材质, \
|
||||
'Category': 第一张图像中商品的类别, \
|
||||
} \
|
||||
'item2': {\
|
||||
'Text': 第二张图像中商品中提取出的文字信息, \
|
||||
'Color': 第二张图像中商品的颜色, \
|
||||
'Shape': 第二张图像中商品的形状, \
|
||||
'Material': 第二张图像中商品的材质, \
|
||||
'Category': 第二张图像中商品的类别, \
|
||||
} \
|
||||
'is_Same': 首先判断'Color'是否一致,如果不一致则返回False,如果一致则判断是否以上两个dict的['Text', 'Shape', 'Material', 'Category']key中至少有3个相同则输出True,\
|
||||
否则输出False。 \
|
||||
} \
|
||||
"
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{
|
||||
"type": "image",
|
||||
"min_pixels": 224 * 224,
|
||||
"max_pixels": 800 * 800,
|
||||
},
|
||||
{
|
||||
"type": "image",
|
||||
"min_pixels": 224 * 224,
|
||||
"max_pixels": 800 * 800,
|
||||
},
|
||||
],
|
||||
},
|
||||
# {
|
||||
# "role": "user",
|
||||
# "content": "以python dict的形式输出第二张图像的比对信息。"
|
||||
# "content": "输出一个list,list的内容包含两张图像提取出的dict信息。"
|
||||
# }
|
||||
]
|
||||
best_img = get_best_image(track_imgs)
|
||||
if std_img is not None:
|
||||
img_list = [std_img, best_img]
|
||||
else:
|
||||
img_list = [best_img, best_img]
|
||||
|
||||
output_text = qwen_prompt(img_list, messages)
|
||||
contrast_pair = ast.literal_eval(output_text.strip('```python\n'))
|
||||
|
||||
return contrast_pair
|
||||
|
||||
def item_analysis(stream_dict):
|
||||
track_imgs = stream_pipeline(stream_dict)
|
||||
if len(track_imgs) == 0:
|
||||
return {}
|
||||
std_img = None
|
||||
if stream_dict['goodsPic'] is not None:
|
||||
# response = requests.get(stream_dict['goodsPic'])
|
||||
# std_img = Image.open(BytesIO(response.content))
|
||||
std_img = Image.open(stream_dict['goodsPic']).convert("RGB")
|
||||
description_dict = get_product_description(std_img, track_imgs)
|
||||
|
||||
return description_dict
|
||||
|
||||
def main():
|
||||
# sample input dict
|
||||
stream_dict = {
|
||||
"goodsName" : "优诺优丝黄桃果粒风味发酵乳",
|
||||
"measureProperty" : 0,
|
||||
"qty" : 1,
|
||||
"price" : 25.9,
|
||||
"weight": 560, # 单位克
|
||||
"barcode": "6931806801024",
|
||||
"video" : "https://ieemoo-ai.obs.cn-east-3.myhuaweicloud.com/videos/20231009/04/04_20231009-082149_21f2ca35-f2c2-4386-8497-3e7a3b407f03_4901872831197.mp4",
|
||||
"goodsPic" : "https://ieemoo-storage.obs.cn-east-3.myhuaweicloud.com/lhpic/6931806801024.jpg",
|
||||
"measureUnit" : "组",
|
||||
"goodsSpec" : "405g"
|
||||
}
|
||||
|
||||
result = item_analysis(stream_dict)
|
||||
print(result)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -1,359 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Created on Sun Sep 29 08:59:21 2024
|
||||
|
||||
@author: ym
|
||||
"""
|
||||
import os
|
||||
# import sys
|
||||
import cv2
|
||||
import pickle
|
||||
import numpy as np
|
||||
from pathlib import Path
|
||||
from scipy.spatial.distance import cdist
|
||||
from track_reid import yolo_resnet_tracker, yolov10_resnet_tracker
|
||||
|
||||
from tracking.dotrack.dotracks_back import doBackTracks
|
||||
from tracking.dotrack.dotracks_front import doFrontTracks
|
||||
from tracking.utils.drawtracks import plot_frameID_y2, draw_all_trajectories
|
||||
from utils.getsource import get_image_pairs, get_video_pairs
|
||||
from tracking.utils.read_data import read_similar
|
||||
|
||||
|
||||
def save_subimgs(imgdict, boxes, spath, ctype, featdict = None):
|
||||
'''
|
||||
当前 box 特征和该轨迹前一个 box 特征的相似度,可用于和跟踪序列中的相似度进行比较
|
||||
'''
|
||||
boxes = boxes[np.argsort(boxes[:, 7])]
|
||||
for i in range(len(boxes)):
|
||||
simi = None
|
||||
tid, fid, bid = int(boxes[i, 4]), int(boxes[i, 7]), int(boxes[i, 8])
|
||||
|
||||
if i>0:
|
||||
_, fid0, bid0 = int(boxes[i-1, 4]), int(boxes[i-1, 7]), int(boxes[i-1, 8])
|
||||
if f"{fid0}_{bid0}" in featdict.keys() and f"{fid}_{bid}" in featdict.keys():
|
||||
feat0 = featdict[f"{fid0}_{bid0}"]
|
||||
feat1 = featdict[f"{fid}_{bid}"]
|
||||
simi = 1 - np.maximum(0.0, cdist(feat0[None, :], feat1[None, :], "cosine"))[0][0]
|
||||
|
||||
img = imgdict[f"{fid}_{bid}"]
|
||||
imgpath = spath / f"{ctype}_tid{tid}-{fid}-{bid}.png"
|
||||
if simi is not None:
|
||||
imgpath = spath / f"{ctype}_tid{tid}-{fid}-{bid}_sim{simi:.2f}.png"
|
||||
|
||||
cv2.imwrite(imgpath, img)
|
||||
|
||||
|
||||
def save_subimgs_1(imgdict, boxes, spath, ctype, simidict = None):
|
||||
'''
|
||||
当前 box 特征和该轨迹 smooth_feat 特征的相似度, yolo_resnet_tracker 函数中,
|
||||
采用该方式记录特征相似度
|
||||
'''
|
||||
for i in range(len(boxes)):
|
||||
tid, fid, bid = int(boxes[i, 4]), int(boxes[i, 7]), int(boxes[i, 8])
|
||||
|
||||
key = f"{fid}_{bid}"
|
||||
img = imgdict[key]
|
||||
imgpath = spath / f"{ctype}_tid{tid}-{fid}-{bid}.png"
|
||||
if simidict is not None and key in simidict.keys():
|
||||
imgpath = spath / f"{ctype}_tid{tid}-{fid}-{bid}_sim{simidict[key]:.2f}.png"
|
||||
|
||||
cv2.imwrite(imgpath, img)
|
||||
|
||||
|
||||
def pipeline(
|
||||
eventpath,
|
||||
savepath,
|
||||
SourceType,
|
||||
weights,
|
||||
YoloVersion="V5"
|
||||
):
|
||||
'''
|
||||
eventpath: 单个事件的存储路径
|
||||
|
||||
'''
|
||||
optdict = {}
|
||||
optdict["weights"] = weights
|
||||
|
||||
if SourceType == "video":
|
||||
vpaths = get_video_pairs(eventpath)
|
||||
elif SourceType == "image":
|
||||
vpaths = get_image_pairs(eventpath)
|
||||
event_tracks = []
|
||||
|
||||
## 构造购物事件字典
|
||||
evtname = Path(eventpath).stem
|
||||
barcode = evtname.split('_')[-1] if len(evtname.split('_'))>=2 \
|
||||
and len(evtname.split('_')[-1])>=8 \
|
||||
and evtname.split('_')[-1].isdigit() else ''
|
||||
'''事件结果存储文件夹'''
|
||||
if not savepath:
|
||||
savepath = Path(__file__).resolve().parents[0] / "events_result"
|
||||
|
||||
savepath_pipeline = Path(savepath) / Path("Yolos_Tracking") / evtname
|
||||
|
||||
|
||||
"""ShoppingDict pickle 文件保存地址 """
|
||||
savepath_spdict = Path(savepath) / "ShoppingDict_pkfile"
|
||||
if not savepath_spdict.exists():
|
||||
savepath_spdict.mkdir(parents=True, exist_ok=True)
|
||||
pf_path = Path(savepath_spdict) / Path(str(evtname)+".pickle")
|
||||
|
||||
# if pf_path.exists():
|
||||
# print(f"Pickle file have saved: {evtname}.pickle")
|
||||
# return
|
||||
|
||||
'''====================== 构造 ShoppingDict 模块 ======================='''
|
||||
ShoppingDict = {"eventPath": eventpath,
|
||||
"eventName": evtname,
|
||||
"barcode": barcode,
|
||||
"eventType": '', # "input", "output", "other"
|
||||
"frontCamera": {},
|
||||
"backCamera": {},
|
||||
"one2n": [] #
|
||||
}
|
||||
yrtDict = {}
|
||||
|
||||
|
||||
procpath = Path(eventpath).joinpath('process.data')
|
||||
if procpath.is_file():
|
||||
SimiDict = read_similar(procpath)
|
||||
ShoppingDict["one2n"] = SimiDict['one2n']
|
||||
|
||||
|
||||
for vpath in vpaths:
|
||||
'''================= 1. 构造相机事件字典 ================='''
|
||||
CameraEvent = {"cameraType": '', # "front", "back"
|
||||
"videoPath": '',
|
||||
"imagePaths": [],
|
||||
"yoloResnetTracker": [],
|
||||
"tracking": [],
|
||||
}
|
||||
|
||||
if isinstance(vpath, list):
|
||||
CameraEvent["imagePaths"] = vpath
|
||||
bname = os.path.basename(vpath[0])
|
||||
if not isinstance(vpath, list):
|
||||
CameraEvent["videoPath"] = vpath
|
||||
bname = os.path.basename(vpath).split('.')[0]
|
||||
if bname.split('_')[0] == "0" or bname.find('back')>=0:
|
||||
CameraEvent["cameraType"] = "back"
|
||||
if bname.split('_')[0] == "1" or bname.find('front')>=0:
|
||||
CameraEvent["cameraType"] = "front"
|
||||
|
||||
'''================= 2. 事件结果存储文件夹 ================='''
|
||||
if isinstance(vpath, list):
|
||||
savepath_pipeline_imgs = savepath_pipeline / Path("images")
|
||||
else:
|
||||
savepath_pipeline_imgs = savepath_pipeline / Path(str(Path(vpath).stem))
|
||||
|
||||
if not savepath_pipeline_imgs.exists():
|
||||
savepath_pipeline_imgs.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
savepath_pipeline_subimgs = savepath_pipeline / Path("subimgs")
|
||||
if not savepath_pipeline_subimgs.exists():
|
||||
savepath_pipeline_subimgs.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
|
||||
'''================= 3. Yolo + Resnet + Tracker ================='''
|
||||
optdict["source"] = vpath
|
||||
optdict["save_dir"] = savepath_pipeline_imgs
|
||||
optdict["is_save_img"] = True
|
||||
optdict["is_save_video"] = True
|
||||
|
||||
|
||||
if YoloVersion == "V5":
|
||||
yrtOut = yolo_resnet_tracker(**optdict)
|
||||
elif YoloVersion == "V10":
|
||||
yrtOut = yolov10_resnet_tracker(**optdict)
|
||||
|
||||
|
||||
yrtOut_save = []
|
||||
for frdict in yrtOut:
|
||||
fr_dict = {}
|
||||
for k, v in frdict.items():
|
||||
if k != "imgs":
|
||||
fr_dict[k]=v
|
||||
yrtOut_save.append(fr_dict)
|
||||
CameraEvent["yoloResnetTracker"] = yrtOut_save
|
||||
|
||||
# CameraEvent["yoloResnetTracker"] = yrtOut
|
||||
|
||||
'''================= 4. tracking ================='''
|
||||
'''(1) 生成用于 tracking 模块的 boxes、feats'''
|
||||
bboxes = np.empty((0, 6), dtype=np.float64)
|
||||
trackerboxes = np.empty((0, 9), dtype=np.float64)
|
||||
trackefeats = {}
|
||||
for frameDict in yrtOut:
|
||||
tboxes = frameDict["tboxes"]
|
||||
ffeats = frameDict["feats"]
|
||||
|
||||
boxes = frameDict["bboxes"]
|
||||
bboxes = np.concatenate((bboxes, np.array(boxes)), axis=0)
|
||||
trackerboxes = np.concatenate((trackerboxes, np.array(tboxes)), axis=0)
|
||||
for i in range(len(tboxes)):
|
||||
fid, bid = int(tboxes[i, 7]), int(tboxes[i, 8])
|
||||
trackefeats.update({f"{fid}_{bid}": ffeats[f"{fid}_{bid}"]})
|
||||
|
||||
|
||||
'''(2) tracking, 后摄'''
|
||||
if CameraEvent["cameraType"] == "back":
|
||||
vts = doBackTracks(trackerboxes, trackefeats)
|
||||
vts.classify()
|
||||
event_tracks.append(("back", vts))
|
||||
|
||||
CameraEvent["tracking"] = vts
|
||||
ShoppingDict["backCamera"] = CameraEvent
|
||||
|
||||
yrtDict["backyrt"] = yrtOut
|
||||
|
||||
'''(2) tracking, 前摄'''
|
||||
if CameraEvent["cameraType"] == "front":
|
||||
vts = doFrontTracks(trackerboxes, trackefeats)
|
||||
vts.classify()
|
||||
event_tracks.append(("front", vts))
|
||||
|
||||
CameraEvent["tracking"] = vts
|
||||
ShoppingDict["frontCamera"] = CameraEvent
|
||||
|
||||
yrtDict["frontyrt"] = yrtOut
|
||||
|
||||
'''========================== 保存模块 ================================='''
|
||||
'''(1) 保存 ShoppingDict 事件'''
|
||||
with open(str(pf_path), 'wb') as f:
|
||||
pickle.dump(ShoppingDict, f)
|
||||
|
||||
'''(2) 保存 Tracking 输出的运动轨迹子图,并记录相似度'''
|
||||
for CamerType, vts in event_tracks:
|
||||
if len(vts.tracks)==0: continue
|
||||
if CamerType == 'front':
|
||||
# yolos = ShoppingDict["frontCamera"]["yoloResnetTracker"]
|
||||
|
||||
yolos = yrtDict["frontyrt"]
|
||||
ctype = 1
|
||||
if CamerType == 'back':
|
||||
# yolos = ShoppingDict["backCamera"]["yoloResnetTracker"]
|
||||
|
||||
yolos = yrtDict["backyrt"]
|
||||
ctype = 0
|
||||
|
||||
imgdict, featdict, simidict = {}, {}, {}
|
||||
for y in yolos:
|
||||
imgdict.update(y["imgs"])
|
||||
featdict.update(y["feats"])
|
||||
simidict.update(y["featsimi"])
|
||||
|
||||
for track in vts.Residual:
|
||||
if isinstance(track, np.ndarray):
|
||||
save_subimgs(imgdict, track, savepath_pipeline_subimgs, ctype, featdict)
|
||||
else:
|
||||
save_subimgs(imgdict, track.slt_boxes, savepath_pipeline_subimgs, ctype, featdict)
|
||||
|
||||
'''(3) 轨迹显示与保存'''
|
||||
illus = [None, None]
|
||||
for CamerType, vts in event_tracks:
|
||||
if len(vts.tracks)==0: continue
|
||||
|
||||
if CamerType == 'front':
|
||||
edgeline = cv2.imread("./tracking/shopcart/cart_tempt/board_ftmp_line.png")
|
||||
|
||||
h, w = edgeline.shape[:2]
|
||||
# nh, nw = h//2, w//2
|
||||
# edgeline = cv2.resize(edgeline, (nw, nh), interpolation=cv2.INTER_AREA)
|
||||
|
||||
img_tracking = draw_all_trajectories(vts, edgeline, savepath_pipeline, CamerType, draw5p=True)
|
||||
illus[0] = img_tracking
|
||||
|
||||
plt = plot_frameID_y2(vts)
|
||||
plt.savefig(os.path.join(savepath_pipeline, "front_y2.png"))
|
||||
|
||||
if CamerType == 'back':
|
||||
edgeline = cv2.imread("./tracking/shopcart/cart_tempt/edgeline.png")
|
||||
|
||||
h, w = edgeline.shape[:2]
|
||||
# nh, nw = h//2, w//2
|
||||
# edgeline = cv2.resize(edgeline, (nw, nh), interpolation=cv2.INTER_AREA)
|
||||
|
||||
img_tracking = draw_all_trajectories(vts, edgeline, savepath_pipeline, CamerType, draw5p=True)
|
||||
illus[1] = img_tracking
|
||||
|
||||
illus = [im for im in illus if im is not None]
|
||||
if len(illus):
|
||||
img_cat = np.concatenate(illus, axis = 1)
|
||||
if len(illus)==2:
|
||||
H, W = img_cat.shape[:2]
|
||||
cv2.line(img_cat, (int(W/2), 0), (int(W/2), int(H)), (128, 128, 255), 3)
|
||||
|
||||
trajpath = os.path.join(savepath_pipeline, "trajectory.png")
|
||||
cv2.imwrite(trajpath, img_cat)
|
||||
|
||||
def execute_pipeline(evtdir = r"D:\datasets\ym\后台数据\unzip",
|
||||
source_type = "video", # video, image,
|
||||
save_path = r"D:\work\result_pipeline",
|
||||
yolo_ver = "V10", # V10, V5
|
||||
|
||||
weight_yolo_v5 = r'./ckpts/best_cls10_0906.pt' ,
|
||||
weight_yolo_v10 = r'./ckpts/best_v10s_width0375_1205.pt',
|
||||
k=0
|
||||
):
|
||||
'''
|
||||
运行函数 pipeline(),遍历事件文件夹,每个文件夹是一个事件
|
||||
'''
|
||||
parmDict = {}
|
||||
parmDict["SourceType"] = source_type
|
||||
parmDict["savepath"] = save_path
|
||||
parmDict["YoloVersion"] = yolo_ver
|
||||
if parmDict["YoloVersion"] == "V5":
|
||||
parmDict["weights"] = weight_yolo_v5
|
||||
elif parmDict["YoloVersion"] == "V10":
|
||||
parmDict["weights"] = weight_yolo_v10
|
||||
|
||||
evtdir = Path(evtdir)
|
||||
errEvents = []
|
||||
for item in evtdir.iterdir():
|
||||
if item.is_dir():
|
||||
item = evtdir/Path("20250310-175352-741")
|
||||
parmDict["eventpath"] = item
|
||||
pipeline(**parmDict)
|
||||
# try:
|
||||
# pipeline(**parmDict)
|
||||
# except Exception as e:
|
||||
# errEvents.append(str(item))
|
||||
k+=1
|
||||
if k==1:
|
||||
break
|
||||
|
||||
errfile = os.path.join(parmDict["savepath"], 'error_events.txt')
|
||||
with open(errfile, 'w', encoding='utf-8') as f:
|
||||
for line in errEvents:
|
||||
f.write(line + '\n')
|
||||
|
||||
if __name__ == "__main__":
|
||||
execute_pipeline()
|
||||
|
||||
# spath_v10 = r"D:\work\result_pipeline_v10"
|
||||
# spath_v5 = r"D:\work\result_pipeline_v5"
|
||||
# execute_pipeline(save_path=spath_v10, yolo_ver="V10")
|
||||
# execute_pipeline(save_path=spath_v5, yolo_ver="V5")
|
||||
|
||||
datapath = r'/home/wqg/dataset/test_dataset/base_dataset/single_event/source/'
|
||||
savepath = r'/home/wqg/dataset/pipeline/contrast/single_event_V5'
|
||||
|
||||
|
||||
|
||||
|
||||
execute_pipeline(evtdir = datapath,
|
||||
DataType = "raw", # raw, pkl
|
||||
kk=1,
|
||||
source_type = "video", # video, image,
|
||||
save_path = savepath,
|
||||
yolo_ver = "V10", # V10, V5
|
||||
weight_yolo_v5 = r'./ckpts/best_cls10_0906.pt' ,
|
||||
weight_yolo_v10 = r'./ckpts/best_v10s_width0375_1205.pt',
|
||||
saveimages = False
|
||||
)
|
||||
|
||||
|
||||
|
||||
|
||||
|
@ -9,19 +9,17 @@ import cv2
|
||||
import json
|
||||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
from pathlib import Path
|
||||
|
||||
from matplotlib import rcParams
|
||||
from matplotlib.font_manager import FontProperties
|
||||
from scipy.spatial.distance import cdist
|
||||
from utils.event import ShoppingEvent, save_data
|
||||
from utils.calsimi import calsimi_vs_stdfeat_new, get_topk_percent, cluster
|
||||
from utils.tools import get_evtList
|
||||
import pickle
|
||||
|
||||
|
||||
|
||||
rcParams['font.sans-serif'] = ['SimHei'] # 用黑体显示中文
|
||||
rcParams['axes.unicode_minus'] = False # 正确显示负号
|
||||
|
||||
|
||||
'''*********** USearch ***********'''
|
||||
def read_usearch():
|
||||
stdFeaturePath = r"D:\contrast\stdlib\v11_test.json"
|
||||
@ -37,12 +35,13 @@ def read_usearch():
|
||||
|
||||
return stdlib
|
||||
|
||||
def get_eventlist_errortxt(evtpaths):
|
||||
def get_eventlist():
|
||||
'''
|
||||
读取一次测试中的错误事件
|
||||
'''
|
||||
text1 = "one_2_Small_n_Error.txt"
|
||||
text2 = "one_2_Big_N_Error.txt"
|
||||
'''
|
||||
evtpaths = r"\\192.168.1.28\share\测试视频数据以及日志\算法全流程测试\202412\images"
|
||||
text1 = "one2n_Error.txt"
|
||||
text2 = "one2SN_Error.txt"
|
||||
events = []
|
||||
text = (text1, text2)
|
||||
for txt in text:
|
||||
@ -54,16 +53,16 @@ def get_eventlist_errortxt(evtpaths):
|
||||
if line:
|
||||
fpath=os.path.join(evtpaths, line)
|
||||
events.append(fpath)
|
||||
|
||||
|
||||
|
||||
events = list(set(events))
|
||||
|
||||
return events
|
||||
|
||||
def save_eventdata():
|
||||
evtpaths = r"/home/wqg/dataset/test_dataset/performence_dataset/"
|
||||
events = get_eventlist_errortxt(evtpaths)
|
||||
def single_event():
|
||||
|
||||
events = get_eventlist()
|
||||
|
||||
|
||||
|
||||
'''定义当前事件存储地址及生成相应文件件'''
|
||||
resultPath = r"\\192.168.1.28\share\测试视频数据以及日志\算法全流程测试\202412\result\single_event"
|
||||
@ -75,148 +74,120 @@ def save_eventdata():
|
||||
|
||||
|
||||
|
||||
# def get_topk_percent(data, k):
|
||||
# """
|
||||
# 获取数据中最大的 k% 的元素
|
||||
# """
|
||||
# # 将数据转换为 NumPy 数组
|
||||
# if isinstance(data, list):
|
||||
# data = np.array(data)
|
||||
def get_topk_percent(data, k):
|
||||
"""
|
||||
获取数据中最大的 k% 的元素
|
||||
"""
|
||||
# 将数据转换为 NumPy 数组
|
||||
if isinstance(data, list):
|
||||
data = np.array(data)
|
||||
|
||||
# percentile = np.percentile(data, 100-k)
|
||||
# top_k_percent = data[data >= percentile]
|
||||
percentile = np.percentile(data, 100-k)
|
||||
top_k_percent = data[data >= percentile]
|
||||
|
||||
# return top_k_percent
|
||||
# def cluster(data, thresh=0.15):
|
||||
# # data = np.array([0.1, 0.13, 0.7, 0.2, 0.8, 0.52, 0.3, 0.7, 0.85, 0.58])
|
||||
# # data = np.array([0.1, 0.13, 0.2, 0.3])
|
||||
# # data = np.array([0.1])
|
||||
return top_k_percent
|
||||
def cluster(data, thresh=0.15):
|
||||
# data = np.array([0.1, 0.13, 0.7, 0.2, 0.8, 0.52, 0.3, 0.7, 0.85, 0.58])
|
||||
# data = np.array([0.1, 0.13, 0.2, 0.3])
|
||||
# data = np.array([0.1])
|
||||
|
||||
# if isinstance(data, list):
|
||||
# data = np.array(data)
|
||||
if isinstance(data, list):
|
||||
data = np.array(data)
|
||||
|
||||
# data1 = np.sort(data)
|
||||
# cluter, Cluters, = [data1[0]], []
|
||||
# for i in range(1, len(data1)):
|
||||
# if data1[i] - data1[i-1]< thresh:
|
||||
# cluter.append(data1[i])
|
||||
# else:
|
||||
# Cluters.append(cluter)
|
||||
# cluter = [data1[i]]
|
||||
# Cluters.append(cluter)
|
||||
data1 = np.sort(data)
|
||||
cluter, Cluters, = [data1[0]], []
|
||||
for i in range(1, len(data1)):
|
||||
if data1[i] - data1[i-1]< thresh:
|
||||
cluter.append(data1[i])
|
||||
else:
|
||||
Cluters.append(cluter)
|
||||
cluter = [data1[i]]
|
||||
Cluters.append(cluter)
|
||||
|
||||
# clt_center = []
|
||||
# for clt in Cluters:
|
||||
# ## 是否应该在此处限制一个聚类中的最小轨迹样本数,应该将该因素放在轨迹分析中
|
||||
# # if len(clt)>=3:
|
||||
# # clt_center.append(np.mean(clt))
|
||||
# clt_center.append(np.mean(clt))
|
||||
clt_center = []
|
||||
for clt in Cluters:
|
||||
## 是否应该在此处限制一个聚类中的最小轨迹样本数,应该将该因素放在轨迹分析中
|
||||
# if len(clt)>=3:
|
||||
# clt_center.append(np.mean(clt))
|
||||
clt_center.append(np.mean(clt))
|
||||
|
||||
# # print(clt_center)
|
||||
# print(clt_center)
|
||||
|
||||
# return clt_center
|
||||
return clt_center
|
||||
|
||||
# def calsimi_vs_stdfeat_new(event, stdfeat):
|
||||
# '''事件与标准库的对比策略
|
||||
# 该比对策略是否可以拓展到事件与事件的比对?
|
||||
# '''
|
||||
def calc_simil(event, stdfeat):
|
||||
|
||||
|
||||
# def calsiml(feat1, feat2, topkp=75, cluth=0.15):
|
||||
# '''轨迹样本和标准特征集样本相似度的选择策略'''
|
||||
# matrix = 1 - cdist(feat1, feat2, 'cosine')
|
||||
# simi_max = []
|
||||
# for i in range(len(matrix)):
|
||||
# sim = np.mean(get_topk_percent(matrix[i, :], topkp))
|
||||
# simi_max.append(sim)
|
||||
# cltc_max = cluster(simi_max, cluth)
|
||||
# Simi = max(cltc_max)
|
||||
def calsiml(feat1, feat2):
|
||||
'''轨迹样本和标准特征集样本相似度的选择策略'''
|
||||
matrix = 1 - cdist(feat1, feat2, 'cosine')
|
||||
simi_max = []
|
||||
for i in range(len(matrix)):
|
||||
sim = np.mean(get_topk_percent(matrix[i, :], 75))
|
||||
simi_max.append(sim)
|
||||
cltc_max = cluster(simi_max)
|
||||
Simi = max(cltc_max)
|
||||
|
||||
# ## cltc_max为空属于编程考虑不周,应予以排查解决
|
||||
# # if len(cltc_max):
|
||||
# # Simi = max(cltc_max)
|
||||
# # else:
|
||||
# # Simi = 0 #不应该走到该处
|
||||
## cltc_max为空属于编程考虑不周,应予以排查解决
|
||||
# if len(cltc_max):
|
||||
# Simi = max(cltc_max)
|
||||
# else:
|
||||
# Simi = 0 #不应该走到该处
|
||||
|
||||
|
||||
# return Simi
|
||||
return Simi
|
||||
|
||||
|
||||
# front_boxes = np.empty((0, 9), dtype=np.float64) ##和类doTracks兼容
|
||||
# front_feats = np.empty((0, 256), dtype=np.float64) ##和类doTracks兼容
|
||||
# for i in range(len(event.front_boxes)):
|
||||
# front_boxes = np.concatenate((front_boxes, event.front_boxes[i]), axis=0)
|
||||
# front_feats = np.concatenate((front_feats, event.front_feats[i]), axis=0)
|
||||
front_boxes = np.empty((0, 9), dtype=np.float64) ##和类doTracks兼容
|
||||
front_feats = np.empty((0, 256), dtype=np.float64) ##和类doTracks兼容
|
||||
for i in range(len(event.front_boxes)):
|
||||
front_boxes = np.concatenate((front_boxes, event.front_boxes[i]), axis=0)
|
||||
front_feats = np.concatenate((front_feats, event.front_feats[i]), axis=0)
|
||||
|
||||
# back_boxes = np.empty((0, 9), dtype=np.float64) ##和类doTracks兼容
|
||||
# back_feats = np.empty((0, 256), dtype=np.float64) ##和类doTracks兼容
|
||||
# for i in range(len(event.back_boxes)):
|
||||
# back_boxes = np.concatenate((back_boxes, event.back_boxes[i]), axis=0)
|
||||
# back_feats = np.concatenate((back_feats, event.back_feats[i]), axis=0)
|
||||
back_boxes = np.empty((0, 9), dtype=np.float64) ##和类doTracks兼容
|
||||
back_feats = np.empty((0, 256), dtype=np.float64) ##和类doTracks兼容
|
||||
for i in range(len(event.back_boxes)):
|
||||
back_boxes = np.concatenate((back_boxes, event.back_boxes[i]), axis=0)
|
||||
back_feats = np.concatenate((back_feats, event.back_feats[i]), axis=0)
|
||||
|
||||
# if len(front_feats):
|
||||
# front_simi = calsiml(front_feats, stdfeat)
|
||||
# if len(back_feats):
|
||||
# back_simi = calsiml(back_feats, stdfeat)
|
||||
if len(front_feats):
|
||||
front_simi = calsiml(front_feats, stdfeat)
|
||||
if len(back_feats):
|
||||
back_simi = calsiml(back_feats, stdfeat)
|
||||
|
||||
# '''前后摄相似度融合策略'''
|
||||
# if len(front_feats) and len(back_feats):
|
||||
# diff_simi = abs(front_simi - back_simi)
|
||||
# if diff_simi>0.15:
|
||||
# Similar = max([front_simi, back_simi])
|
||||
# else:
|
||||
# Similar = (front_simi+back_simi)/2
|
||||
# elif len(front_feats) and len(back_feats)==0:
|
||||
# Similar = front_simi
|
||||
# elif len(front_feats)==0 and len(back_feats):
|
||||
# Similar = back_simi
|
||||
# else:
|
||||
# Similar = None # 在event.front_feats和event.back_feats同时为空时
|
||||
|
||||
# return Similar
|
||||
'''前后摄相似度融合策略'''
|
||||
if len(front_feats) and len(back_feats):
|
||||
diff_simi = abs(front_simi - back_simi)
|
||||
if diff_simi>0.15:
|
||||
Similar = max([front_simi, back_simi])
|
||||
else:
|
||||
Similar = (front_simi+back_simi)/2
|
||||
elif len(front_feats) and len(back_feats)==0:
|
||||
Similar = front_simi
|
||||
elif len(front_feats)==0 and len(back_feats):
|
||||
Similar = back_simi
|
||||
else:
|
||||
Similar = None # 在event.front_feats和event.back_feats同时为空时
|
||||
|
||||
return Similar
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def simi_matrix():
|
||||
evtpaths = r"/home/wqg/dataset/pipeline/contrast/single_event_V10/evtobjs/"
|
||||
resultPath = r"\\192.168.1.28\share\测试视频数据以及日志\算法全流程测试\202412\result\single_event"
|
||||
|
||||
stdfeatPath = r"/home/wqg/dataset/test_dataset/total_barcode/features_json/v11_barcode_0304/"
|
||||
resultPath = r"/home/wqg/dataset/performence_dataset/result/"
|
||||
|
||||
evt_paths, bcdSet = get_evtList(evtpaths)
|
||||
|
||||
## read std features
|
||||
stdDict={}
|
||||
evtDict = {}
|
||||
for barcode in bcdSet:
|
||||
stdpath = os.path.join(stdfeatPath, f"{barcode}.json")
|
||||
if not os.path.isfile(stdpath):
|
||||
continue
|
||||
stdlib = read_usearch()
|
||||
events = get_eventlist()
|
||||
for evtpath in events:
|
||||
evtname = os.path.basename(evtpath)
|
||||
_, barcode = evtname.split("_")
|
||||
|
||||
with open(stdpath, 'r', encoding='utf-8') as f:
|
||||
stddata = json.load(f)
|
||||
feat = np.array(stddata["value"])
|
||||
stdDict[barcode] = feat
|
||||
|
||||
for evtpath in evt_paths:
|
||||
barcode = Path(evtpath).stem.split("_")[-1]
|
||||
|
||||
if barcode not in stdDict.keys():
|
||||
continue
|
||||
# 生成事件与相应标准特征集
|
||||
event = ShoppingEvent(evtpath)
|
||||
stdfeat = stdlib[barcode]
|
||||
|
||||
# try:
|
||||
# with open(evtpath, 'rb') as f:
|
||||
# evtdata = pickle.load(f)
|
||||
# except Exception as e:
|
||||
# print(evtname)
|
||||
|
||||
with open(evtpath, 'rb') as f:
|
||||
event = pickle.load(f)
|
||||
|
||||
stdfeat = stdDict[barcode]
|
||||
|
||||
Similar = calsimi_vs_stdfeat_new(event, stdfeat)
|
||||
Similar = calc_simil(event, stdfeat)
|
||||
|
||||
# 构造 boxes 子图存储路径
|
||||
subimgpath = os.path.join(resultPath, f"{event.evtname}", "subimg")
|
||||
@ -225,6 +196,8 @@ def simi_matrix():
|
||||
histpath = os.path.join(resultPath, "simi_hist")
|
||||
if not os.path.exists(histpath):
|
||||
os.makedirs(histpath)
|
||||
|
||||
|
||||
|
||||
mean_values, max_values = [], []
|
||||
cameras = ('front', 'back')
|
||||
@ -245,9 +218,9 @@ def simi_matrix():
|
||||
evtfeat = np.concatenate((evtfeat, event.back_feats[i]), axis=0)
|
||||
imgpaths = event.back_imgpaths
|
||||
|
||||
assert len(boxes)==len(evtfeat), f"Please check the Event: {event.evtname}"
|
||||
assert len(boxes)==len(evtfeat), f"Please check the Event: {evtname}"
|
||||
if len(boxes)==0: continue
|
||||
print(event.evtname)
|
||||
print(evtname)
|
||||
|
||||
matrix = 1 - cdist(evtfeat, stdfeat, 'cosine')
|
||||
simi_1d = matrix.flatten()
|
||||
@ -337,8 +310,8 @@ def simi_matrix():
|
||||
mean_diff = abs(mean_values[1]-mean_values[0])
|
||||
ax[0, 1].set_title(f"mean diff: {mean_diff:.3f}")
|
||||
if len(max_values)==2:
|
||||
max_diff = abs(max_values[1]-max_values[0])
|
||||
ax[0, 2].set_title(f"max diff: {max_diff:.3f}")
|
||||
max_values = abs(max_values[1]-max_values[0])
|
||||
ax[0, 2].set_title(f"max diff: {max_values:.3f}")
|
||||
try:
|
||||
fig.suptitle(f"Similar: {Similar:.3f}", fontsize=16)
|
||||
except Exception as e:
|
||||
@ -347,14 +320,19 @@ def simi_matrix():
|
||||
pltpath = os.path.join(subimgpath, f"hist_max_{kpercent}%_.png")
|
||||
plt.savefig(pltpath)
|
||||
|
||||
pltpath1 = os.path.join(histpath, f"{event.evtname}_.png")
|
||||
pltpath1 = os.path.join(histpath, f"{evtname}_.png")
|
||||
plt.savefig(pltpath1)
|
||||
|
||||
|
||||
plt.close()
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
simi_matrix()
|
||||
|
||||
|
||||
|
@ -61,9 +61,8 @@ class Config:
|
||||
test_val = "D:/比对/cl"
|
||||
# test_val = "./data/test_data_100"
|
||||
|
||||
test_model = "checkpoints/best_20250228.pth"
|
||||
test_model = "checkpoints/zhanting_res_801.pth"
|
||||
# test_model = "checkpoints/zhanting_res_801.pth"
|
||||
# test_model = "checkpoints/zhanting_res_abroad_8021.pth"
|
||||
|
||||
|
||||
|
||||
|
@ -48,7 +48,7 @@ class FeatsInterface:
|
||||
modpath = os.path.join(curpath, conf.test_model)
|
||||
self.model.load_state_dict(torch.load(modpath, map_location=conf.device))
|
||||
self.model.eval()
|
||||
# print('load model {} '.format(conf.testbackbone))
|
||||
print('load model {} '.format(conf.testbackbone))
|
||||
|
||||
def inference(self, images, detections=None):
|
||||
'''
|
||||
@ -61,22 +61,12 @@ class FeatsInterface:
|
||||
batch_patches = []
|
||||
patches = []
|
||||
for i, img in enumerate(images):
|
||||
img = img.copy()
|
||||
|
||||
## 对 img 进行补黑边,生成新的图像new_img
|
||||
width, height = img.size
|
||||
new_size = max(width, height)
|
||||
new_img = Image.new("RGB", (new_size, new_size), (0, 0, 0))
|
||||
paste_x = (new_size - width) // 2
|
||||
paste_y = (new_size - height) // 2
|
||||
new_img.paste(img, (paste_x, paste_y))
|
||||
|
||||
patch = self.transform(new_img)
|
||||
patch = patch.to(device=self.device)
|
||||
# if str(self.device) != "cpu":
|
||||
# patch = patch.to(device=self.device).half()
|
||||
# else:
|
||||
# patch = patch.to(device=self.device)
|
||||
img = img.copy()
|
||||
patch = self.transform(img)
|
||||
if str(self.device) != "cpu":
|
||||
patch = patch.to(device=self.device).half()
|
||||
else:
|
||||
patch = patch.to(device=self.device)
|
||||
|
||||
patches.append(patch)
|
||||
if (i + 1) % self.batch_size == 0:
|
||||
@ -117,12 +107,10 @@ class FeatsInterface:
|
||||
patch = self.transform(img1)
|
||||
|
||||
# patch = patch.to(device=self.device).half()
|
||||
# if str(self.device) != "cpu":
|
||||
# patch = patch.to(device=self.device).half()
|
||||
# patch = patch.to(device=self.device)
|
||||
# else:
|
||||
# patch = patch.to(device=self.device)
|
||||
patch = patch.to(device=self.device)
|
||||
if str(self.device) != "cpu":
|
||||
patch = patch.to(device=self.device)
|
||||
else:
|
||||
patch = patch.to(device=self.device)
|
||||
|
||||
patches.append(patch)
|
||||
if (d + 1) % self.batch_size == 0:
|
||||
|
@ -1,53 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Created on Fri Feb 28 16:27:17 2025
|
||||
|
||||
@author: ym
|
||||
"""
|
||||
|
||||
import os
|
||||
import time
|
||||
import pickle
|
||||
import numpy as np
|
||||
from PIL import Image
|
||||
from scipy.spatial.distance import cdist
|
||||
from feat_extract.config import config as conf
|
||||
from feat_extract.inference import FeatsInterface #, inference_image
|
||||
|
||||
Encoder = FeatsInterface(conf)
|
||||
|
||||
def main():
|
||||
imgpaths = r"D:\全实时\202502\result\Yolos_Tracking\20250228-160049-188_6921168558018_6921168558018\a"
|
||||
featDict = {}
|
||||
imgs, imgfiles = [], []
|
||||
for filename in os.listdir(imgpaths):
|
||||
file, ext = os.path.splitext(filename)
|
||||
|
||||
imgpath = os.path.join(imgpaths, filename)
|
||||
img = Image.open(imgpath)
|
||||
|
||||
imgs.append(img)
|
||||
imgfiles.append(filename)
|
||||
|
||||
feature = Encoder.inference([img])
|
||||
feature /= np.linalg.norm(feature, axis=1)[:, None]
|
||||
feature_ft32 = feature.astype(np.float32)
|
||||
|
||||
|
||||
|
||||
featDict[file] = feature_ft32
|
||||
|
||||
feature = Encoder.inference(imgs)
|
||||
feature /= np.linalg.norm(feature, axis=1)[:, None]
|
||||
feature_ft32 = feature.astype(np.float32)
|
||||
|
||||
|
||||
matrix = 1 - cdist(feature, feature, 'cosine')
|
||||
|
||||
print("do")
|
||||
|
||||
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -38,13 +38,13 @@ def get_std_barcodeDict(bcdpath, savepath, bcdSet):
|
||||
'''
|
||||
inputs:
|
||||
bcdpath: 已清洗的barcode样本图像,如果barcode下有'base'文件夹,只选用该文件夹下图像
|
||||
(default = r'\\\\192.168.1.28\\share\\已标注数据备份\\对比数据\\barcode\\barcode_1771')
|
||||
(default = r'\\192.168.1.28\share\已标注数据备份\对比数据\barcode\barcode_1771')
|
||||
功能:
|
||||
生成并保存只有一个key值的字典 {barcode: [imgpath1, imgpath1, ...]},
|
||||
savepath: 字典存储地址,文件名格式:barcode.pickle
|
||||
'''
|
||||
|
||||
# savepath = r'\\\\192.168.1.28\\share\\测试_202406\\contrast\\std_barcodes'
|
||||
# savepath = r'\\192.168.1.28\share\测试_202406\contrast\std_barcodes'
|
||||
|
||||
'''读取数据集中 barcode 列表'''
|
||||
stdBarcodeList = []
|
||||
@ -120,7 +120,8 @@ def stdfeat_infer(imgPath, featPath, bcdSet=None):
|
||||
|
||||
# imgPath = r"\\192.168.1.28\share\测试_202406\contrast\std_barcodes"
|
||||
# featPath = r"\\192.168.1.28\share\测试_202406\contrast\std_features"
|
||||
|
||||
stdBarcodeDict = {}
|
||||
stdBarcodeDict_ft16 = {}
|
||||
|
||||
Encoder = FeatsInterface(conf)
|
||||
|
||||
@ -157,8 +158,6 @@ def stdfeat_infer(imgPath, featPath, bcdSet=None):
|
||||
|
||||
feature /= np.linalg.norm(feature, axis=1)[:, None]
|
||||
|
||||
feature_ft32 = feature.astype(np.float32)
|
||||
|
||||
# float16
|
||||
feature_ft16 = feature.astype(np.float16)
|
||||
feature_ft16 /= np.linalg.norm(feature_ft16, axis=1)[:, None]
|
||||
@ -166,21 +165,23 @@ def stdfeat_infer(imgPath, featPath, bcdSet=None):
|
||||
# uint8, 两种策略,1) 精度损失小, 2) 计算复杂度小
|
||||
# feature_uint8, _ = ft16_to_uint8(feature_ft16)
|
||||
feature_uint8 = (feature_ft16*128).astype(np.int8)
|
||||
|
||||
'''================ 保存单个barcode特征 ================'''
|
||||
##================== float32
|
||||
stdbDict["barcode"] = barcode
|
||||
stdbDict["imgpaths"] = imgpaths
|
||||
stdbDict["feats_ft32"] = feature_ft32
|
||||
stdbDict["feats_ft16"] = feature_ft16
|
||||
stdbDict["feats_uint8"] = feature_uint8
|
||||
|
||||
with open(featpath, 'wb') as f:
|
||||
pickle.dump(stdbDict, f)
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error accured at: {filename}, with Exception is: {e}")
|
||||
|
||||
|
||||
'''================ 保存单个barcode特征 ================'''
|
||||
##================== float32
|
||||
stdbDict["barcode"] = barcode
|
||||
stdbDict["imgpaths"] = imgpaths
|
||||
stdbDict["feats_ft32"] = feature
|
||||
stdbDict["feats_ft16"] = feature_ft16
|
||||
stdbDict["feats_uint8"] = feature_uint8
|
||||
|
||||
with open(featpath, 'wb') as f:
|
||||
pickle.dump(stdbDict, f)
|
||||
|
||||
stdBarcodeDict[barcode] = feature
|
||||
stdBarcodeDict_ft16[barcode] = feature_ft16
|
||||
|
||||
t2 = time.time()
|
||||
print(f"Barcode: {barcode}, need time: {t2-t1:.1f} secs")
|
||||
@ -191,32 +192,19 @@ def stdfeat_infer(imgPath, featPath, bcdSet=None):
|
||||
return
|
||||
|
||||
|
||||
def gen_bcd_features(imgpath, bcdpath, featpath, eventSourcePath):
|
||||
def gen_bcd_features(imgpath, bcdpath, featpath, bcdSet=None):
|
||||
''' 生成标准特征集 '''
|
||||
'''1. 提取 imgpath 中样本地址,生成字典{barcode: [imgpath1, imgpath1, ...]}
|
||||
并存储于: bcdpath, 格式为 barcode.pickle'''
|
||||
|
||||
bcdList = []
|
||||
for evtname in os.listdir(eventSourcePath):
|
||||
bname, ext = os.path.splitext(evtname)
|
||||
evt = bname.split('_')
|
||||
if len(evt)>=2 and evt[-1].isdigit() and len(evt[-1])>=10:
|
||||
bcdList.append(evt[-1])
|
||||
|
||||
bcdSet = set(bcdList)
|
||||
get_std_barcodeDict(imgpath, bcdpath, bcdSet)
|
||||
|
||||
'''2. 特征提取,并保存至文件夹 featpath 中,也根据 bcdSet 交集执行'''
|
||||
stdfeat_infer(bcdpath, featpath, bcdSet)
|
||||
|
||||
def main():
|
||||
imgpath = r"\\192.168.1.28\share\数据\已完成数据\展厅数据\v2.0_abroad\比对数据\all_base_二筛"
|
||||
bcdpath = r"D:\exhibition\dataset\bcdpath_abroad"
|
||||
featpath = r"D:\exhibition\dataset\feats_abroad"
|
||||
if not os.path.exists(bcdpath):
|
||||
os.makedirs(bcdpath)
|
||||
if not os.path.exists(featpath):
|
||||
os.makedirs(featpath)
|
||||
imgpath = r"\\192.168.1.28\share\数据\已完成数据\展厅数据\v1.0\比对数据\整理\zhantingBase"
|
||||
bcdpath = r"D:\exhibition\dataset\bcdpath"
|
||||
featpath = r"D:\exhibition\dataset\feats"
|
||||
|
||||
|
||||
gen_bcd_features(imgpath, bcdpath, featpath)
|
||||
|
@ -10,7 +10,46 @@ import numpy as np
|
||||
from pathlib import Path
|
||||
import matplotlib.pyplot as plt
|
||||
from scipy.spatial.distance import cdist
|
||||
from utils.tools import init_eventDict
|
||||
from utils.event import ShoppingEvent
|
||||
|
||||
|
||||
def init_eventdict(sourcePath, stype="data"):
|
||||
'''stype: str,
|
||||
'source': 由 videos 或 images 生成的 pickle 文件
|
||||
'data': 从 data 文件中读取的现场运行数据
|
||||
'''
|
||||
|
||||
k, errEvents = 0, []
|
||||
for bname in os.listdir(sourcePath):
|
||||
# bname = r"20241126-135911-bdf91cf9-3e9a-426d-94e8-ddf92238e175_6923555210479"
|
||||
|
||||
source_path = os.path.join(sourcePath, bname)
|
||||
if stype=="data":
|
||||
pickpath = os.path.join(eventDataPath, f"{bname}.pickle")
|
||||
if not os.path.isdir(source_path) or os.path.isfile(pickpath):
|
||||
continue
|
||||
if stype=="source":
|
||||
pickpath = os.path.join(eventDataPath, bname)
|
||||
if not os.path.isfile(source_path) or os.path.isfile(pickpath):
|
||||
continue
|
||||
|
||||
try:
|
||||
event = ShoppingEvent(source_path, stype)
|
||||
|
||||
with open(pickpath, 'wb') as f:
|
||||
pickle.dump(event, f)
|
||||
print(bname)
|
||||
except Exception as e:
|
||||
errEvents.append(source_path)
|
||||
print(e)
|
||||
# k += 1
|
||||
# if k==1:
|
||||
# break
|
||||
|
||||
errfile = os.path.join(resultPath, 'error_events.txt')
|
||||
with open(errfile, 'a', encoding='utf-8') as f:
|
||||
for line in errEvents:
|
||||
f.write(line + '\n')
|
||||
|
||||
def read_eventdict(eventDataPath):
|
||||
evtDict = {}
|
||||
@ -26,38 +65,38 @@ def read_eventdict(eventDataPath):
|
||||
|
||||
return evtDict
|
||||
|
||||
def simi_calc(event, o2nevt, pattern, typee=None):
|
||||
if pattern==1 or pattern==2:
|
||||
if typee == "11":
|
||||
boxes1 = event.front_boxes
|
||||
boxes2 = o2nevt.front_boxes
|
||||
|
||||
feat1 = event.front_feats
|
||||
feat2 = o2nevt.front_feats
|
||||
if typee == "10":
|
||||
boxes1 = event.front_boxes
|
||||
boxes2 = o2nevt.back_boxes
|
||||
|
||||
feat1 = event.front_feats
|
||||
feat2 = o2nevt.back_feats
|
||||
if typee == "00":
|
||||
boxes1 = event.back_boxes
|
||||
boxes2 = o2nevt.back_boxes
|
||||
|
||||
feat1 = event.back_feats
|
||||
feat2 = o2nevt.back_feats
|
||||
if typee == "01":
|
||||
boxes1 = event.back_boxes
|
||||
boxes2 = o2nevt.front_boxes
|
||||
|
||||
feat1 = event.back_feats
|
||||
feat2 = o2nevt.front_feats
|
||||
def simi_calc(event, o2nevt, typee=None):
|
||||
if typee == "11":
|
||||
boxes1 = event.front_boxes
|
||||
boxes2 = o2nevt.front_boxes
|
||||
|
||||
feat1 = event.front_feats
|
||||
feat2 = o2nevt.front_feats
|
||||
if typee == "10":
|
||||
boxes1 = event.front_boxes
|
||||
boxes2 = o2nevt.back_boxes
|
||||
|
||||
feat1 = event.front_feats
|
||||
feat2 = o2nevt.back_feats
|
||||
if typee == "00":
|
||||
boxes1 = event.back_boxes
|
||||
boxes2 = o2nevt.back_boxes
|
||||
|
||||
feat1 = event.back_feats
|
||||
feat2 = o2nevt.back_feats
|
||||
if typee == "01":
|
||||
boxes1 = event.back_boxes
|
||||
boxes2 = o2nevt.front_boxes
|
||||
|
||||
feat1 = event.back_feats
|
||||
feat2 = o2nevt.front_feats
|
||||
|
||||
'''自定义事件特征选择'''
|
||||
if pattern==3 and len(event.feats_compose) and len(o2nevt.feats_compose):
|
||||
feat1 = [event.feats_compose]
|
||||
feat2 = [o2nevt.feats_compose]
|
||||
|
||||
if typee==3:
|
||||
feat1 = event.feats_compose
|
||||
feat2 = o2nevt.feats_compose
|
||||
|
||||
|
||||
if len(feat1) and len(feat2):
|
||||
matrix = 1 - cdist(feat1[0], feat2[0], 'cosine')
|
||||
simi = np.mean(matrix)
|
||||
@ -70,64 +109,54 @@ def one2n_pr(evtDicts, pattern=1):
|
||||
'''
|
||||
pattern:
|
||||
1: process.data 中记录的相似度
|
||||
2: 根据 process.data 中标记的 type 选择特征组合方式计算相似度
|
||||
3: 利用 process.data 中的轨迹特征,以其它方式计算相似度
|
||||
2: 根据 process.data 中标记的 type 选择特征计算
|
||||
3: 以其它方式选择特征计算
|
||||
'''
|
||||
|
||||
tpevents, fnevents, fpevents, tnevents = [], [], [], []
|
||||
tpsimi, fnsimi, tnsimi, fpsimi = [], [], [], []
|
||||
one2nFile, errorFile_one2n = [], []
|
||||
errorFile_one2n_ = []
|
||||
evts_output = []
|
||||
errorFile_one2n = []
|
||||
for evtname, event in evtDicts.items():
|
||||
evt_names, evt_barcodes, evt_similars, evt_types = [], [], [], []
|
||||
evt_names, evt_barcodes, evt_similars, evt_types = [], [], [], []
|
||||
|
||||
if len(event.one2n)==0 or len(event.barcode)==0:
|
||||
continue
|
||||
|
||||
evts_output.append(evtname)
|
||||
|
||||
for ndict in event.one2n:
|
||||
nname = ndict["event"]
|
||||
barcode = ndict["barcode"]
|
||||
similar = ndict["similar"]
|
||||
typee = ndict["type"].strip()
|
||||
|
||||
if len(barcode)==0:
|
||||
continue
|
||||
if typee.find(",") >=0:
|
||||
typee = typee.split(",")[-1]
|
||||
|
||||
if pattern==1:
|
||||
evt_similars.append(similar)
|
||||
if pattern==2 or pattern==3:
|
||||
o2n_evt = [evt for name, evt in evtDicts.items() if name.find(nname[:15])==0]
|
||||
if len(o2n_evt)!=1:
|
||||
continue
|
||||
|
||||
simival = simi_calc(event, o2n_evt[0], pattern, typee)
|
||||
if simival==None:
|
||||
continue
|
||||
evt_similars.append(simival)
|
||||
|
||||
|
||||
evt_names.append(nname)
|
||||
evt_barcodes.append(barcode)
|
||||
evt_types.append(typee)
|
||||
|
||||
# if evtname == "20250226-170321-327_6903244678377":
|
||||
# print("evtname")
|
||||
if pattern==1:
|
||||
evt_similars.append(similar)
|
||||
|
||||
if pattern==2 or pattern==3:
|
||||
o2n_evt = [evt for name, evt in evtDicts.items() if name.find(nname[:15])==0]
|
||||
if len(o2n_evt)==1:
|
||||
o2nevt = o2n_evt[0]
|
||||
else:
|
||||
continue
|
||||
|
||||
if pattern==2:
|
||||
simival = simi_calc(event, o2nevt, typee)
|
||||
|
||||
if pattern==3:
|
||||
simival = simi_calc(event, o2nevt, typee=pattern)
|
||||
|
||||
## process.data的oneTon的各项中,均不包括当前事件的barcode
|
||||
if event.barcode not in evt_barcodes:
|
||||
errorFile_one2n.append(evtname)
|
||||
continue
|
||||
else:
|
||||
one2nFile.append(evtname)
|
||||
|
||||
if len(evt_names)==len(evt_barcodes)==len(evt_similars)==len(evt_types) and len(evt_names)>0:
|
||||
if simival==None:
|
||||
continue
|
||||
evt_similars.append(simival)
|
||||
|
||||
if len(evt_names)==len(evt_barcodes) and len(evt_barcodes)==len(evt_similars) \
|
||||
and len(evt_similars)==len(evt_types) and len(evt_names)>0:
|
||||
|
||||
# maxsim = evt_similars[evt_similars.index(max(evt_similars))]
|
||||
maxsim = max(evt_similars)
|
||||
for i in range(len(evt_names)):
|
||||
bcd, simi = evt_barcodes[i], evt_similars[i]
|
||||
|
||||
if bcd==event.barcode and simi==maxsim:
|
||||
tpsimi.append(simi)
|
||||
tpevents.append(evtname)
|
||||
@ -141,11 +170,14 @@ def one2n_pr(evtDicts, pattern=1):
|
||||
fpsimi.append(simi)
|
||||
fpevents.append(evtname)
|
||||
else:
|
||||
errorFile_one2n_.append(evtname)
|
||||
|
||||
errorFile_one2n.append(evtname)
|
||||
|
||||
|
||||
|
||||
''' 1:n 数据存储,需根据相似度排序'''
|
||||
PPrecise, PRecall = [], []
|
||||
NPrecise, NRecall = [], []
|
||||
|
||||
Thresh = np.linspace(-0.2, 1, 100)
|
||||
for th in Thresh:
|
||||
'''============================= 1:n 计算'''
|
||||
@ -155,9 +187,9 @@ def one2n_pr(evtDicts, pattern=1):
|
||||
TN = sum(np.array(tnsimi) < th)
|
||||
|
||||
PPrecise.append(TP/(TP+FP+1e-6))
|
||||
PRecall.append(TP/(TP+FN+1e-6))
|
||||
PRecall.append(TP/(len(tpsimi)+len(fnsimi)+1e-6))
|
||||
NPrecise.append(TN/(TN+FN+1e-6))
|
||||
NRecall.append(TN/(TN+FP+1e-6))
|
||||
NRecall.append(TN/(len(tnsimi)+len(fpsimi)+1e-6))
|
||||
|
||||
|
||||
'''4. ============================= 1:n 曲线,'''
|
||||
@ -168,49 +200,40 @@ def one2n_pr(evtDicts, pattern=1):
|
||||
ax.plot(Thresh, NRecall, 'c', label='Recall_Neg: TN/TNFN')
|
||||
ax.set_xlim([0, 1])
|
||||
ax.set_ylim([0, 1])
|
||||
|
||||
ax.set_xticks(np.arange(0, 1, 0.1))
|
||||
ax.set_yticks(np.arange(0, 1, 0.1))
|
||||
ax.grid(True, linestyle='--')
|
||||
|
||||
ax.grid(True)
|
||||
ax.set_title('1:n Precise & Recall')
|
||||
ax.set_xlabel(f"Event Num: {len(one2nFile)}")
|
||||
ax.set_xlabel(f"Event Num: {len(tpsimi)+len(fnsimi)}")
|
||||
ax.legend()
|
||||
plt.show()
|
||||
## ============================= 1:n 直方图'''
|
||||
fig, axes = plt.subplots(2, 2)
|
||||
axes[0, 0].hist(tpsimi, bins=60, range=(-0.2, 1), edgecolor='black')
|
||||
axes[0, 0].set_xlim([-0.2, 1])
|
||||
axes[0, 0].set_title(f'TP: {len(tpsimi)}')
|
||||
axes[0, 0].set_title('TP')
|
||||
axes[0, 1].hist(fpsimi, bins=60, range=(-0.2, 1), edgecolor='black')
|
||||
axes[0, 1].set_xlim([-0.2, 1])
|
||||
axes[0, 1].set_title(f'FP: {len(fpsimi)}')
|
||||
axes[0, 1].set_title('FP')
|
||||
axes[1, 0].hist(tnsimi, bins=60, range=(-0.2, 1), edgecolor='black')
|
||||
axes[1, 0].set_xlim([-0.2, 1])
|
||||
axes[1, 0].set_title(f'TN: {len(tnsimi)}')
|
||||
axes[1, 0].set_title('TN')
|
||||
axes[1, 1].hist(fnsimi, bins=60, range=(-0.2, 1), edgecolor='black')
|
||||
axes[1, 1].set_xlim([-0.2, 1])
|
||||
axes[1, 1].set_title(f'FN: {len(fnsimi)}')
|
||||
axes[1, 1].set_title('FN')
|
||||
plt.show()
|
||||
|
||||
return fpevents
|
||||
|
||||
def main():
|
||||
'''1. 生成事件字典并保存至 eventDataPath, 只需运行一次 '''
|
||||
init_eventDict(eventSourcePath, eventDataPath, stype="realtime") # 'source', 'data', 'realtime'
|
||||
def main():
|
||||
|
||||
# for pfile in os.listdir(eventDataPath):
|
||||
# evt = os.path.splitext(pfile)[0].split('_')
|
||||
# cont = len(evt)>=2 and evt[-1].isdigit() and len(evt[-1])>=10
|
||||
# if not cont:
|
||||
# continue
|
||||
|
||||
'''1. 生成事件字典并保存至 eventDataPath, 只需运行一次 '''
|
||||
init_eventdict(eventSourcePath, stype="source")
|
||||
|
||||
'''2. 读取事件字典 '''
|
||||
evtDicts = read_eventdict(eventDataPath)
|
||||
|
||||
|
||||
'''3. 1:n 比对事件评估 '''
|
||||
fpevents = one2n_pr(evtDicts, pattern=1)
|
||||
fpevents = one2n_pr(evtDicts, pattern=3)
|
||||
|
||||
fpErrFile = str(Path(resultPath).joinpath("one2n_fp_Error.txt"))
|
||||
with open(fpErrFile, "w") as file:
|
||||
@ -220,16 +243,15 @@ def main():
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
eventSourcePath = r"\\192.168.1.28\share\测试视频数据以及日志\全实时测试\V12\2025-3-4_2"
|
||||
resultPath = r"\\192.168.1.28\share\测试视频数据以及日志\全实时测试\testing"
|
||||
eventSourcePath = r"\\192.168.1.28\share\测试视频数据以及日志\算法全流程测试\202412\result\ShoppingDict_pkfile"
|
||||
resultPath = r"\\192.168.1.28\share\测试视频数据以及日志\算法全流程测试\202412\result\contrast"
|
||||
|
||||
eventDataPath = os.path.join(resultPath, "evtobjs_wang")
|
||||
eventDataPath = os.path.join(resultPath, "evtobjs")
|
||||
similPath = os.path.join(resultPath, "simidata")
|
||||
if not os.path.exists(eventDataPath):
|
||||
os.makedirs(eventDataPath)
|
||||
|
||||
# similPath = os.path.join(resultPath, "simidata")
|
||||
# if not os.path.exists(similPath):
|
||||
# os.makedirs(similPath)
|
||||
if not os.path.exists(similPath):
|
||||
os.makedirs(similPath)
|
||||
|
||||
main()
|
||||
|
||||
|
@ -27,24 +27,187 @@ Created on Fri Aug 30 17:53:03 2024
|
||||
|
||||
"""
|
||||
import numpy as np
|
||||
import cv2
|
||||
import os
|
||||
import sys
|
||||
import random
|
||||
import pickle
|
||||
import json
|
||||
|
||||
import random
|
||||
import copy
|
||||
import sys
|
||||
# import torch
|
||||
import time
|
||||
# import json
|
||||
from pathlib import Path
|
||||
from scipy.spatial.distance import cdist
|
||||
import matplotlib.pyplot as plt
|
||||
import shutil
|
||||
from datetime import datetime
|
||||
# from openpyxl import load_workbook, Workbook
|
||||
|
||||
FILE = Path(__file__).resolve()
|
||||
ROOT = FILE.parents[1] # YOLOv5 root directory
|
||||
if str(ROOT) not in sys.path:
|
||||
sys.path.append(str(ROOT))
|
||||
# from config import config as conf
|
||||
# from model import resnet18 as resnet18
|
||||
# from feat_inference import inference_image
|
||||
|
||||
from utils.calsimi import calsimi_vs_stdfeat, calsimi_vs_stdfeat_new
|
||||
from utils.tools import get_evtList, init_eventDict
|
||||
from utils.databits import data_precision_compare
|
||||
sys.path.append(r"D:\DetectTracking")
|
||||
from tracking.utils.read_data import extract_data, read_tracking_output, read_similar, read_deletedBarcode_file
|
||||
from tracking.utils.plotting import Annotator, colors
|
||||
from feat_extract.config import config as conf
|
||||
from feat_extract.inference import FeatsInterface
|
||||
from utils.event import ShoppingEvent, save_data
|
||||
from genfeats import gen_bcd_features
|
||||
from event_test import calc_simil
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def int8_to_ft16(arr_uint8, amin, amax):
|
||||
arr_ft16 = (arr_uint8 / 255 * (amax-amin) + amin).astype(np.float16)
|
||||
|
||||
return arr_ft16
|
||||
|
||||
def ft16_to_uint8(arr_ft16):
|
||||
# pickpath = r"\\192.168.1.28\share\测试_202406\contrast\std_features_ft32vsft16\6902265587712_ft16.pickle"
|
||||
|
||||
# with open(pickpath, 'rb') as f:
|
||||
# edict = pickle.load(f)
|
||||
|
||||
# arr_ft16 = edict['feats']
|
||||
|
||||
amin = np.min(arr_ft16)
|
||||
amax = np.max(arr_ft16)
|
||||
arr_ft255 = (arr_ft16 - amin) * 255 / (amax-amin)
|
||||
arr_uint8 = arr_ft255.astype(np.uint8)
|
||||
|
||||
arr_ft16_ = int8_to_ft16(arr_uint8, amin, amax)
|
||||
|
||||
arrDistNorm = np.linalg.norm(arr_ft16_ - arr_ft16) / arr_ft16_.size
|
||||
|
||||
return arr_uint8, arr_ft16_
|
||||
|
||||
|
||||
def data_precision_compare(stdfeat, evtfeat, evtMessage, save=True):
|
||||
evt, stdbcd, label = evtMessage
|
||||
rltdata, rltdata_ft16, rltdata_ft16_ = [], [], []
|
||||
|
||||
matrix = 1 - cdist(stdfeat, evtfeat, 'cosine')
|
||||
simi_mean = np.mean(matrix)
|
||||
simi_max = np.max(matrix)
|
||||
stdfeatm = np.mean(stdfeat, axis=0, keepdims=True)
|
||||
evtfeatm = np.mean(evtfeat, axis=0, keepdims=True)
|
||||
simi_mfeat = 1- np.maximum(0.0, cdist(stdfeatm, evtfeatm, 'cosine'))
|
||||
rltdata = [label, stdbcd, evt, simi_mean, simi_max, simi_mfeat[0,0]]
|
||||
|
||||
|
||||
##================================================================= float16
|
||||
stdfeat_ft16 = stdfeat.astype(np.float16)
|
||||
evtfeat_ft16 = evtfeat.astype(np.float16)
|
||||
stdfeat_ft16 /= np.linalg.norm(stdfeat_ft16, axis=1)[:, None]
|
||||
evtfeat_ft16 /= np.linalg.norm(evtfeat_ft16, axis=1)[:, None]
|
||||
|
||||
|
||||
matrix_ft16 = 1 - cdist(stdfeat_ft16, evtfeat_ft16, 'cosine')
|
||||
simi_mean_ft16 = np.mean(matrix_ft16)
|
||||
simi_max_ft16 = np.max(matrix_ft16)
|
||||
stdfeatm_ft16 = np.mean(stdfeat_ft16, axis=0, keepdims=True)
|
||||
evtfeatm_ft16 = np.mean(evtfeat_ft16, axis=0, keepdims=True)
|
||||
simi_mfeat_ft16 = 1- np.maximum(0.0, cdist(stdfeatm_ft16, evtfeatm_ft16, 'cosine'))
|
||||
rltdata_ft16 = [label, stdbcd, evt, simi_mean_ft16, simi_max_ft16, simi_mfeat_ft16[0,0]]
|
||||
|
||||
'''****************** uint8 is ok!!!!!! ******************'''
|
||||
##=================================================================== uint8
|
||||
# stdfeat_uint8, stdfeat_ft16_ = ft16_to_uint8(stdfeat_ft16)
|
||||
# evtfeat_uint8, evtfeat_ft16_ = ft16_to_uint8(evtfeat_ft16)
|
||||
|
||||
stdfeat_uint8 = (stdfeat_ft16*128).astype(np.int8)
|
||||
evtfeat_uint8 = (evtfeat_ft16*128).astype(np.int8)
|
||||
stdfeat_ft16_ = stdfeat_uint8.astype(np.float16)/128
|
||||
evtfeat_ft16_ = evtfeat_uint8.astype(np.float16)/128
|
||||
|
||||
absdiff = np.linalg.norm(stdfeat_ft16_ - stdfeat) / stdfeat.size
|
||||
|
||||
matrix_ft16_ = 1 - cdist(stdfeat_ft16_, evtfeat_ft16_, 'cosine')
|
||||
simi_mean_ft16_ = np.mean(matrix_ft16_)
|
||||
simi_max_ft16_ = np.max(matrix_ft16_)
|
||||
stdfeatm_ft16_ = np.mean(stdfeat_ft16_, axis=0, keepdims=True)
|
||||
evtfeatm_ft16_ = np.mean(evtfeat_ft16_, axis=0, keepdims=True)
|
||||
simi_mfeat_ft16_ = 1- np.maximum(0.0, cdist(stdfeatm_ft16_, evtfeatm_ft16_, 'cosine'))
|
||||
rltdata_ft16_ = [label, stdbcd, evt, simi_mean_ft16_, simi_max_ft16_, simi_mfeat_ft16_[0,0]]
|
||||
|
||||
if not save:
|
||||
return
|
||||
|
||||
|
||||
##========================================================= save as float32
|
||||
rppath = os.path.join(similPath, f'{evt}_ft32.pickle')
|
||||
with open(rppath, 'wb') as f:
|
||||
pickle.dump(rltdata, f)
|
||||
|
||||
rtpath = os.path.join(similPath, f'{evt}_ft32.txt')
|
||||
with open(rtpath, 'w', encoding='utf-8') as f:
|
||||
for result in rltdata:
|
||||
part = [f"{x:.3f}" if isinstance(x, float) else str(x) for x in result]
|
||||
line = ', '.join(part)
|
||||
f.write(line + '\n')
|
||||
|
||||
|
||||
##========================================================= save as float16
|
||||
rppath_ft16 = os.path.join(similPath, f'{evt}_ft16.pickle')
|
||||
with open(rppath_ft16, 'wb') as f:
|
||||
pickle.dump(rltdata_ft16, f)
|
||||
|
||||
rtpath_ft16 = os.path.join(similPath, f'{evt}_ft16.txt')
|
||||
with open(rtpath_ft16, 'w', encoding='utf-8') as f:
|
||||
for result in rltdata_ft16:
|
||||
part = [f"{x:.3f}" if isinstance(x, float) else str(x) for x in result]
|
||||
line = ', '.join(part)
|
||||
f.write(line + '\n')
|
||||
|
||||
|
||||
##=========================================================== save as uint8
|
||||
rppath_uint8 = os.path.join(similPath, f'{evt}_uint8.pickle')
|
||||
with open(rppath_uint8, 'wb') as f:
|
||||
pickle.dump(rltdata_ft16_, f)
|
||||
|
||||
rtpath_uint8 = os.path.join(similPath, f'{evt}_uint8.txt')
|
||||
with open(rtpath_uint8, 'w', encoding='utf-8') as f:
|
||||
for result in rltdata_ft16_:
|
||||
part = [f"{x:.3f}" if isinstance(x, float) else str(x) for x in result]
|
||||
line = ', '.join(part)
|
||||
f.write(line + '\n')
|
||||
|
||||
|
||||
|
||||
|
||||
def simi_calc(event, stdfeat):
|
||||
evtfeat = event.feats_compose
|
||||
if isinstance(event.feats_select, list):
|
||||
if len(event.feats_select) and len(event.feats_select[0]):
|
||||
evtfeat = event.feats_select[0]
|
||||
else:
|
||||
return None, None, None
|
||||
else:
|
||||
evtfeat = event.feats_select
|
||||
|
||||
if len(evtfeat)==0 or len(stdfeat)==0:
|
||||
return None, None, None
|
||||
|
||||
|
||||
evtfeat /= np.linalg.norm(evtfeat, axis=1)[:, None]
|
||||
stdfeat /= np.linalg.norm(stdfeat, axis=1)[:, None]
|
||||
|
||||
matrix = 1 - cdist(evtfeat, stdfeat, 'cosine')
|
||||
matrix[matrix < 0] = 0
|
||||
|
||||
simi_mean = np.mean(matrix)
|
||||
simi_max = np.max(matrix)
|
||||
stdfeatm = np.mean(stdfeat, axis=0, keepdims=True)
|
||||
evtfeatm = np.mean(evtfeat, axis=0, keepdims=True)
|
||||
simi_mfeat = 1- np.maximum(0.0, cdist(stdfeatm, evtfeatm, 'cosine'))
|
||||
|
||||
return simi_mean, simi_max, simi_mfeat[0,0]
|
||||
|
||||
|
||||
def build_std_evt_dict():
|
||||
@ -54,6 +217,18 @@ def build_std_evt_dict():
|
||||
'''
|
||||
|
||||
stdBarcode = [p.stem for p in Path(stdFeaturePath).iterdir() if p.is_file() and (p.suffix=='.json' or p.suffix=='.pickle')]
|
||||
|
||||
'''*********** USearch ***********'''
|
||||
# stdFeaturePath = r"D:\contrast\stdlib\v11_test.json"
|
||||
# stdBarcode = []
|
||||
# stdlib = {}
|
||||
# with open(stdFeaturePath, 'r', encoding='utf-8') as f:
|
||||
# data = json.load(f)
|
||||
# for dic in data['total']:
|
||||
# barcode = dic['key']
|
||||
# feature = np.array(dic['value'])
|
||||
# stdBarcode.append(barcode)
|
||||
# stdlib[barcode] = feature
|
||||
|
||||
'''======1. 购物事件列表,该列表中的 Barcode 存在于标准的 stdBarcode 内 ==='''
|
||||
evtList = [(p.stem, p.stem.split('_')[-1]) for p in Path(eventDataPath).iterdir()
|
||||
@ -83,7 +258,10 @@ def build_std_evt_dict():
|
||||
stddata = pickle.load(f)
|
||||
feat = stddata["feats_ft32"]
|
||||
stdDict[barcode] = feat
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
'''*********** USearch ***********'''
|
||||
# stdDict = {}
|
||||
# for barcode in barcodes:
|
||||
@ -93,17 +271,13 @@ def build_std_evt_dict():
|
||||
evtDict = {}
|
||||
for evtname, barcode in evtList:
|
||||
evtpath = os.path.join(eventDataPath, evtname+'.pickle')
|
||||
try:
|
||||
with open(evtpath, 'rb') as f:
|
||||
evtdata = pickle.load(f)
|
||||
except Exception as e:
|
||||
print(evtname)
|
||||
|
||||
with open(evtpath, 'rb') as f:
|
||||
evtdata = pickle.load(f)
|
||||
evtDict[evtname] = evtdata
|
||||
|
||||
return evtList, evtDict, stdDict
|
||||
|
||||
def one2SN_pr(evtList, evtDict, stdDict, simType="simple"):
|
||||
def one2SN_pr(evtList, evtDict, stdDict):
|
||||
|
||||
std_barcodes = set([bcd for _, bcd in evtList])
|
||||
|
||||
@ -126,21 +300,14 @@ def one2SN_pr(evtList, evtDict, stdDict, simType="simple"):
|
||||
event = evtDict[evtname]
|
||||
## 无轨迹判断
|
||||
if len(event.front_feats)+len(event.back_feats)==0:
|
||||
errorFile_one2SN.append(evtname)
|
||||
print(f"No trajectory: {evtname}")
|
||||
print(evtname)
|
||||
continue
|
||||
|
||||
barcodes, similars = [], []
|
||||
for stdbcd in bcd_selected:
|
||||
stdfeat = stdDict[stdbcd]
|
||||
|
||||
if simType=="typea":
|
||||
simi_mean, simi_max, simi_mfeat = calsimi_vs_stdfeat(event, stdfeat)
|
||||
elif simType=="typeb":
|
||||
pass
|
||||
else:
|
||||
simi_mean, simi_1, simi_2 = calsimi_vs_stdfeat_new(event, stdfeat)
|
||||
|
||||
simi_mean, simi_max, simi_mfeat = simi_calc(event, stdfeat)
|
||||
# simi_mean = calc_simil(event, stdfeat)
|
||||
|
||||
## 在event.front_feats和event.back_feats同时为空时,此处不需要保护
|
||||
# if simi_mean==None:
|
||||
@ -184,10 +351,10 @@ def one2SN_pr(evtList, evtDict, stdDict, simType="simple"):
|
||||
FNX = sum(np.array(fn_simi) < th)
|
||||
TNX = sum(np.array(tn_simi) < th)
|
||||
PPreciseX.append(TPX/(TPX+FPX+1e-6))
|
||||
PRecallX.append(TPX/(TPX+FNX+1e-6))
|
||||
PRecallX.append(TPX/(len(tp_simi)+len(fn_simi)+1e-6))
|
||||
|
||||
NPreciseX.append(TNX/(TNX+FNX+1e-6))
|
||||
NRecallX.append(TNX/(TNX+FPX+1e-6))
|
||||
NRecallX.append(TNX/(len(tn_simi)+len(fp_simi)+1e-6))
|
||||
|
||||
fig, ax = plt.subplots()
|
||||
ax.plot(Thresh, PPreciseX, 'r', label='Precise_Pos: TP/TPFP')
|
||||
@ -196,17 +363,11 @@ def one2SN_pr(evtList, evtDict, stdDict, simType="simple"):
|
||||
ax.plot(Thresh, NRecallX, 'c', label='Recall_Neg: TN/TNFN')
|
||||
ax.set_xlim([0, 1])
|
||||
ax.set_ylim([0, 1])
|
||||
ax.set_xticks(np.arange(0, 1, 0.1))
|
||||
ax.set_yticks(np.arange(0, 1, 0.1))
|
||||
ax.grid(True, linestyle='--')
|
||||
ax.grid(True)
|
||||
ax.set_title('1:SN Precise & Recall')
|
||||
ax.set_xlabel(f"Event Num: {len(tp_events) + len(fn_events)}")
|
||||
ax.set_xlabel(f"Event Num: {len(evtList)}")
|
||||
ax.legend()
|
||||
plt.show()
|
||||
|
||||
rltpath = os.path.join(similPath, f'pr_1toSN_{simType}.png')
|
||||
plt.savefig(rltpath)
|
||||
|
||||
## ============================= 1:N 展厅 直方图'''
|
||||
fig, axes = plt.subplots(2, 2)
|
||||
axes[0, 0].hist(tp_simi, bins=60, range=(-0.2, 1), edgecolor='black')
|
||||
@ -222,14 +383,11 @@ def one2SN_pr(evtList, evtDict, stdDict, simType="simple"):
|
||||
axes[1, 1].set_xlim([-0.2, 1])
|
||||
axes[1, 1].set_title(f'FN({len(fn_simi)})')
|
||||
plt.show()
|
||||
|
||||
rltpath = os.path.join(similPath, f'hist_1toSN_{simType}.png')
|
||||
plt.savefig(rltpath)
|
||||
|
||||
|
||||
|
||||
|
||||
def one2one_simi(evtList, evtDict, stdDict, simType):
|
||||
def one2one_simi(evtList, evtDict, stdDict):
|
||||
|
||||
barcodes = set([bcd for _, bcd in evtList])
|
||||
'''======1 构造 3 个事件对: 扫 A 放 A, 扫 A 放 B, 合并 ===================='''
|
||||
@ -245,60 +403,33 @@ def one2one_simi(evtList, evtDict, stdDict, simType):
|
||||
|
||||
'''======2 计算事件、标准特征集相似度 =================='''
|
||||
rltdata = []
|
||||
errorFile_one2one = []
|
||||
for i in range(len(mergePairs)):
|
||||
evtname, stdbcd, label = mergePairs[i]
|
||||
event = evtDict[evtname]
|
||||
if len(event.feats_compose)==0:
|
||||
errorFile_one2one.append(evtname)
|
||||
|
||||
continue
|
||||
if len(event.feats_compose)==0: continue
|
||||
|
||||
stdfeat = stdDict[stdbcd] # float32
|
||||
|
||||
if simType=="typea":
|
||||
simi_mean, simi_1, simi_2 = calsimi_vs_stdfeat_new(event, stdfeat)
|
||||
elif simType=="typeb":
|
||||
pass
|
||||
else:
|
||||
simi_mean, simi_1, simi_2 = calsimi_vs_stdfeat(event, stdfeat)
|
||||
|
||||
|
||||
simi_mean, simi_max, simi_mfeat = simi_calc(event, stdfeat)
|
||||
if simi_mean is None:
|
||||
continue
|
||||
|
||||
rltdata.append((label, stdbcd, evtname, simi_mean, simi_1, simi_2))
|
||||
rltdata.append((label, stdbcd, evtname, simi_mean, simi_max, simi_mfeat))
|
||||
|
||||
'''================ float32、16、int8 精度比较与存储 ============='''
|
||||
# data_precision_compare(stdfeat, evtfeat, mergePairs[i], similPath, save=True)
|
||||
# data_precision_compare(stdfeat, evtfeat, mergePairs[i], save=True)
|
||||
|
||||
|
||||
errorFile_one2one = list(set(errorFile_one2one))
|
||||
|
||||
return rltdata, errorFile_one2one
|
||||
return rltdata
|
||||
|
||||
|
||||
def one2one_pr(evtList, evtDict, stdDict, simType="simple"):
|
||||
|
||||
rltdata, errorFile_one2one = one2one_simi(evtList, evtDict, stdDict, simType)
|
||||
|
||||
def one2one_pr(rltdata):
|
||||
Same, Cross = [], []
|
||||
|
||||
for label, stdbcd, evtname, simi_mean, simi_max, simi_mft in rltdata:
|
||||
if simType=="simple" and label == "same":
|
||||
if label == "same":
|
||||
Same.append(simi_max)
|
||||
if simType=="simple" and label == "diff":
|
||||
if label == "diff":
|
||||
Cross.append(simi_max)
|
||||
|
||||
if simType=="typea" and label == "same":
|
||||
Same.append(simi_mean)
|
||||
if simType=="typea" and label == "diff":
|
||||
Cross.append(simi_mean)
|
||||
|
||||
|
||||
# for label, stdbcd, evtname, simi_mean, simi_max, simi_mft in rltdata:
|
||||
# if label == "same":
|
||||
# Same.append(simi_mean)
|
||||
# if label == "diff":
|
||||
# Cross.append(simi_mean)
|
||||
|
||||
Same = np.array(Same)
|
||||
Cross = np.array(Cross)
|
||||
@ -321,47 +452,33 @@ def one2one_pr(evtList, evtDict, stdDict, simType="simple"):
|
||||
Correct = []
|
||||
Thresh = np.linspace(-0.2, 1, 100)
|
||||
for th in Thresh:
|
||||
TP = np.sum(Same >= th)
|
||||
FN = np.sum(Same < th)
|
||||
# FN = TPFN - TP
|
||||
|
||||
TP = np.sum(Same > th)
|
||||
FN = TPFN - TP
|
||||
TN = np.sum(Cross < th)
|
||||
FP = np.sum(Cross >= th)
|
||||
# FP = TNFP - TN
|
||||
FP = TNFP - TN
|
||||
|
||||
|
||||
Recall_Pos.append(TP/TPFN)
|
||||
Recall_Neg.append(TN/TNFP)
|
||||
Precision_Pos.append(TP/(TP+FP+1e-6))
|
||||
Precision_Neg.append(TN/(TN+FN+1e-6))
|
||||
Recall_Pos.append(TP/(TP+FN+1e-6))
|
||||
Recall_Neg.append(TN/(TN+FP+1e-6))
|
||||
|
||||
# Recall_Pos.append(TP/TPFN)
|
||||
# Recall_Neg.append(TN/TNFP)
|
||||
|
||||
|
||||
Correct.append((TN+TP)/(TPFN+TNFP))
|
||||
|
||||
fig, ax = plt.subplots()
|
||||
|
||||
ax.plot(Thresh, Precision_Pos, 'r', label='Precision_Pos: TP/(TP+FP)')
|
||||
ax.plot(Thresh, Correct, 'r', label='Correct: (TN+TP)/(TPFN+TNFP)')
|
||||
ax.plot(Thresh, Recall_Pos, 'b', label='Recall_Pos: TP/TPFN')
|
||||
ax.plot(Thresh, Recall_Neg, 'g', label='Recall_Neg: TN/TNFP')
|
||||
ax.plot(Thresh, Correct, 'c', label='Correct: (TN+TP)/(TPFN+TNFP)')
|
||||
ax.plot(Thresh, Precision_Pos, 'c', label='Precision_Pos: TP/(TP+FP)')
|
||||
ax.plot(Thresh, Precision_Neg, 'm', label='Precision_Neg: TN/(TN+FN)')
|
||||
|
||||
ax.set_xlim([0, 1])
|
||||
ax.set_ylim([0, 1])
|
||||
|
||||
ax.set_xticks(np.arange(0, 1, 0.1))
|
||||
ax.set_yticks(np.arange(0, 1, 0.1))
|
||||
ax.grid(True, linestyle='--')
|
||||
|
||||
ax.grid(True)
|
||||
ax.set_title('PrecisePos & PreciseNeg')
|
||||
ax.set_xlabel(f"Same Num: {TPFN}, Cross Num: {TNFP}")
|
||||
ax.legend()
|
||||
plt.show()
|
||||
|
||||
rltpath = os.path.join(similPath, f'pr_1to1_{simType}.png')
|
||||
rltpath = os.path.join(similPath, 'pr.png')
|
||||
plt.savefig(rltpath) # svg, png, pdf
|
||||
|
||||
|
||||
@ -374,7 +491,7 @@ def one2one_pr(evtList, evtDict, stdDict, simType="simple"):
|
||||
axes[1].set_xlim([-0.2, 1])
|
||||
axes[1].set_title(f'TN({len(Cross)})')
|
||||
|
||||
rltpath = os.path.join(similPath, f'hist_1to1_{simType}.png')
|
||||
rltpath = os.path.join(similPath, 'hist.png')
|
||||
plt.savefig(rltpath)
|
||||
|
||||
|
||||
@ -382,62 +499,139 @@ def one2one_pr(evtList, evtDict, stdDict, simType="simple"):
|
||||
|
||||
|
||||
|
||||
def gen_eventdict(sourcePath, saveimg=True):
|
||||
k, errEvents = 0, []
|
||||
for source_path in sourcePath:
|
||||
evtpath, bname = os.path.split(source_path)
|
||||
|
||||
## 兼容事件的两种情况:文件夹 和 Yolo-Resnet-Tracker 的输出
|
||||
if os.path.isfile(source_path):
|
||||
bname, ext = os.path.splitext(bname)
|
||||
evt = bname.split("_")
|
||||
|
||||
evt = bname.split('_')
|
||||
condt = len(evt)>=2 and evt[-1].isdigit() and len(evt[-1])>=10
|
||||
if not condt: continue
|
||||
|
||||
# bname = r"20241126-135911-bdf91cf9-3e9a-426d-94e8-ddf92238e175_6923555210479"
|
||||
# source_path = os.path.join(evtpath, bname)
|
||||
|
||||
# 如果已完成事件生成,则不执行
|
||||
pickpath = os.path.join(eventDataPath, f"{bname}.pickle")
|
||||
if os.path.isfile(pickpath): continue
|
||||
|
||||
# event = ShoppingEvent(source_path, stype="data")
|
||||
# with open(pickpath, 'wb') as f:
|
||||
# pickle.dump(event, f)
|
||||
|
||||
try:
|
||||
event = ShoppingEvent(source_path, stype="source")
|
||||
# save_data(event, resultPath)
|
||||
|
||||
with open(pickpath, 'wb') as f:
|
||||
pickle.dump(event, f)
|
||||
print(bname)
|
||||
except Exception as e:
|
||||
errEvents.append(source_path)
|
||||
print(e)
|
||||
|
||||
# k += 1
|
||||
# if k==1:
|
||||
# break
|
||||
|
||||
|
||||
errfile = os.path.join(resultPath, 'error_events.txt')
|
||||
with open(errfile, 'w', encoding='utf-8') as f:
|
||||
for line in errEvents:
|
||||
f.write(line + '\n')
|
||||
|
||||
|
||||
def init_std_evt_dict():
|
||||
'''==== 0. 生成事件列表和对应的 Barcodes列表 ==========='''
|
||||
bcdList, event_spath = [], []
|
||||
for evtpath in eventSourcePath:
|
||||
for evtname in os.listdir(evtpath):
|
||||
bname, ext = os.path.splitext(evtname)
|
||||
|
||||
## 处理事件的两种情况:文件夹 和 Yolo-Resnet-Tracker 的输出
|
||||
fpath = os.path.join(evtpath, evtname)
|
||||
if os.path.isfile(fpath) and (ext==".pkl" or ext==".pickle"):
|
||||
evt = bname.split('_')
|
||||
elif os.path.isdir(fpath):
|
||||
evt = evtname.split('_')
|
||||
else:
|
||||
continue
|
||||
|
||||
|
||||
if len(evt)>=2 and evt[-1].isdigit() and len(evt[-1])>=10:
|
||||
bcdList.append(evt[-1])
|
||||
event_spath.append(os.path.join(evtpath, evtname))
|
||||
|
||||
'''==== 1. 生成标准特征集, 只需运行一次, 在 genfeats.py 中实现 ==========='''
|
||||
bcdSet = set(bcdList)
|
||||
gen_bcd_features(stdSamplePath, stdBarcodePath, stdFeaturePath, bcdSet)
|
||||
print("stdFeats have generated and saved!")
|
||||
|
||||
|
||||
'''==== 2. 生成事件字典, 只需运行一次 ==============='''
|
||||
gen_eventdict(event_spath)
|
||||
print("eventList have generated and saved!")
|
||||
|
||||
|
||||
|
||||
def test_one2one_one2SN(simType):
|
||||
|
||||
def test_one2one():
|
||||
'''1:1性能评估'''
|
||||
|
||||
# evtpaths, bcdSet = get_evtList(eventSourcePath)
|
||||
# 1. 只需运行一次,生成事件字典和相应的标准特征库字典
|
||||
init_std_evt_dict()
|
||||
|
||||
'''=== 1. 只需运行一次,生成事件对应的标准特征库字典,如已生成,无需运行 ===='''
|
||||
# gen_bcd_features(stdSamplePath, stdBarcodePath, stdFeaturePath, eventSourcePath)
|
||||
|
||||
'''==== 2. 生成事件字典, 只需运行一次 ===================='''
|
||||
# init_eventDict(eventSourcePath, eventDataPath, source_type)
|
||||
|
||||
'''==== 3. 基于事件barcode集和标准库barcode交集构造事件集合 ========='''
|
||||
# 2. 基于事件barcode集和标准库barcode交集构造事件集合
|
||||
evtList, evtDict, stdDict = build_std_evt_dict()
|
||||
|
||||
one2one_pr(evtList, evtDict, stdDict, simType)
|
||||
|
||||
one2SN_pr(evtList, evtDict, stdDict, simType)
|
||||
|
||||
rltdata = one2one_simi(evtList, evtDict, stdDict)
|
||||
|
||||
one2one_pr(rltdata)
|
||||
|
||||
|
||||
def test_one2SN():
|
||||
'''1:SN性能评估'''
|
||||
|
||||
# 1. 只需运行一次,生成事件字典和相应的标准特征库字典
|
||||
init_std_evt_dict()
|
||||
|
||||
# 2. 事件barcode集和标准库barcode求交集
|
||||
evtList, evtDict, stdDict = build_std_evt_dict()
|
||||
|
||||
one2SN_pr(evtList, evtDict, stdDict)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
'''
|
||||
共7个地址:
|
||||
(1) stdSamplePath: 用于生成比对标准特征集的原始图像地址
|
||||
(2) stdBarcodePath: 比对标准特征集原始图像地址的pickle文件存储,{barcode: [imgpath1, imgpath1, ...]}
|
||||
(3) stdFeaturePath: 比对标准特征集特征存储地址
|
||||
(4) eventSourcePath: 事件地址, 包含data文件的文件夹或 Yolo-Resnet-Tracker输出的Pickle文件父文件夹
|
||||
(4) eventSourcePath: 事件地址
|
||||
(5) resultPath: 结果存储地址
|
||||
(6) eventDataPath: 用于1:1比对的购物事件存储地址,在resultPath下
|
||||
(7) similPath: 1:1比对结果存储地址(事件级),在resultPath下
|
||||
'''
|
||||
|
||||
stdSamplePath = "/home/wqg/dataset/total_barcode/totalBarcode"
|
||||
stdBarcodePath = "/home/wqg/dataset/total_barcode/bcdpath"
|
||||
stdFeaturePath = "/home/wqg/dataset/test_dataset/total_barcode/features_json/v11_barcode_0304/"
|
||||
# stdSamplePath = r"\\192.168.1.28\share\数据\已完成数据\展厅数据\v1.0\比对数据\整理\zhantingBase"
|
||||
# stdBarcodePath = r"D:\exhibition\dataset\bcdpath"
|
||||
# stdFeaturePath = r"\\192.168.1.28\share\数据\已完成数据\比对数据\barcode\all_totalBarocde\features_json\v11_barcode_11592"
|
||||
|
||||
if not os.path.exists(stdBarcodePath):
|
||||
os.makedirs(stdBarcodePath)
|
||||
if not os.path.exists(stdFeaturePath):
|
||||
os.makedirs(stdFeaturePath)
|
||||
# eventSourcePath = [r'D:\exhibition\images\20241202']
|
||||
# eventSourcePath = [r"\\192.168.1.28\share\测试视频数据以及日志\各模块测试记录\展厅测试\1129_展厅模型v801测试组测试"]
|
||||
|
||||
'''source_type:
|
||||
"source": eventSourcePath 为 Yolo-Resnet-Tracker 输出的 pickle 文件
|
||||
"data": 基于事件切分的原 data 文件版本
|
||||
"realtime": 全实时生成的 data 文件
|
||||
'''
|
||||
source_type = 'source' # 'source', 'data', 'realtime'
|
||||
simType = "typea" # "simple", "typea", "typeb"
|
||||
|
||||
evttype = "single_event_V10"
|
||||
# evttype = "single_event_V5"
|
||||
# evttype = "performence_V10"
|
||||
# evttype = "performence_V5"
|
||||
eventSourcePath = "/home/wqg/dataset/pipeline/yrt/{}/shopping_pkl".format(evttype)
|
||||
|
||||
resultPath = "/home/wqg/dataset/pipeline/contrast/{}".format(evttype)
|
||||
stdSamplePath = r"\\192.168.1.28\share\数据\已完成数据\比对数据\barcode\all_totalBarocde\totalBarcode"
|
||||
stdBarcodePath = r"D:\全实时\source_data\bcdpath"
|
||||
stdFeaturePath = r"D:\全实时\source_data\stdfeats"
|
||||
|
||||
eventSourcePath = [r"\\192.168.1.28\share\测试视频数据以及日志\算法全流程测试\202412\result\ShoppingDict_pkfile"]
|
||||
resultPath = r"\\192.168.1.28\share\测试视频数据以及日志\算法全流程测试\202412\result\contrast"
|
||||
eventDataPath = os.path.join(resultPath, "evtobjs")
|
||||
similPath = os.path.join(resultPath, "simidata")
|
||||
if not os.path.exists(eventDataPath):
|
||||
@ -445,7 +639,9 @@ if __name__ == '__main__':
|
||||
if not os.path.exists(similPath):
|
||||
os.makedirs(similPath)
|
||||
|
||||
test_one2one_one2SN(simType)
|
||||
# test_one2one()
|
||||
|
||||
test_one2SN()
|
||||
|
||||
|
||||
|
||||
|
@ -1,13 +1,8 @@
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Created on Wed Sep 11 11:57:30 2024
|
||||
contrast_pr:
|
||||
直接利用测试数据中的 data 文件进行 1:1、1:SN、1:n 性能评估
|
||||
|
||||
test_compare:
|
||||
永辉现场试验输出数据的 1:1 性能评估
|
||||
适用于202410前数据保存版本的,需调用 OneToOneCompare.txt
|
||||
永辉现场试验输出数据的 1:1 性能评估
|
||||
适用于202410前数据保存版本的,需调用 OneToOneCompare.txt
|
||||
@author: ym
|
||||
"""
|
||||
import os
|
||||
@ -16,11 +11,7 @@ from pathlib import Path
|
||||
import matplotlib.pyplot as plt
|
||||
import sys
|
||||
|
||||
FILE = Path(__file__).resolve()
|
||||
ROOT = FILE.parents[1] # YOLOv5 root directory
|
||||
if str(ROOT) not in sys.path:
|
||||
sys.path.append(str(ROOT))
|
||||
|
||||
sys.path.append(r"D:\DetectTracking")
|
||||
from tracking.utils.read_data import read_similar
|
||||
|
||||
def read_one2one_data(filepath):
|
||||
@ -113,31 +104,24 @@ def test_compare():
|
||||
|
||||
plot_pr_curve(simiList)
|
||||
|
||||
def contrast_pr(evtPaths):
|
||||
def contrast_pr(paths):
|
||||
'''
|
||||
1:1
|
||||
'''
|
||||
'''
|
||||
paths = Path(paths)
|
||||
|
||||
evtpaths = []
|
||||
# date_ = ['2025-3-4_1', '2025-3-5_1', '2025-3-5_2']
|
||||
# for dt in date_:
|
||||
# paths = Path(evtPaths) / dt
|
||||
abc = []
|
||||
for p in Path(evtPaths).iterdir():
|
||||
for p in paths.iterdir():
|
||||
condt1 = p.is_dir()
|
||||
condt2 = len(p.name.split('_'))>=2
|
||||
condt3 = len(p.name.split('_')[-1])>=8
|
||||
condt3 = len(p.name.split('_')[-1])>8
|
||||
condt4 = p.name.split('_')[-1].isdigit()
|
||||
if condt1 and condt2 and condt3 and condt4:
|
||||
evtpaths.append(p)
|
||||
elif p.is_dir():
|
||||
abc.append(p.stem)
|
||||
|
||||
|
||||
# evtpaths = [p for p in paths.iterdir() if p.is_dir() and len(p.name.split('_'))>=2 and len(p.name.split('_')[-1])>8]
|
||||
# evtpaths = [p for p in paths.iterdir() if p.is_dir()]
|
||||
|
||||
alg_times = []
|
||||
|
||||
events, similars = [], []
|
||||
##===================================== 扫A放A, 扫A放B场景()
|
||||
one2oneAA, one2oneAB = [], []
|
||||
@ -163,12 +147,11 @@ def contrast_pr(evtPaths):
|
||||
|
||||
errorFile_one2one, errorFile_one2SN, errorFile_one2n = [], [], []
|
||||
|
||||
errorFile = []
|
||||
for path in evtpaths:
|
||||
barcode = path.stem.split('_')[-1]
|
||||
datapath = path.joinpath('process.data')
|
||||
|
||||
if not barcode.isdigit() or len(barcode)<8: continue
|
||||
if not barcode.isdigit() or len(barcode)<10: continue
|
||||
if not datapath.is_file(): continue
|
||||
|
||||
bcdList.append(barcode)
|
||||
@ -184,17 +167,8 @@ def contrast_pr(evtPaths):
|
||||
one2SN = SimiDict['one2SN']
|
||||
one2n = SimiDict['one2n']
|
||||
|
||||
if len(one2one)+len(one2SN)+len(one2n) == 0:
|
||||
errorFile.append(path.stem)
|
||||
|
||||
dtime = SimiDict["algroStartToEnd"]
|
||||
if dtime >= 0:
|
||||
alg_times.append((dtime, path.stem))
|
||||
|
||||
|
||||
'''================== 0. 1:1 ==================='''
|
||||
barcodes, similars = [], []
|
||||
barcodes_ = []
|
||||
for dt in one2one:
|
||||
one2onePath.append((path.stem))
|
||||
if dt['similar']==0:
|
||||
@ -202,14 +176,6 @@ def contrast_pr(evtPaths):
|
||||
continue
|
||||
barcodes.append(dt['barcode'])
|
||||
similars.append(dt['similar'])
|
||||
|
||||
|
||||
barcodes_.append(path.stem)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
if len(barcodes)==len(similars) and len(barcodes)!=0:
|
||||
## 扫A放A, 扫A放B场景
|
||||
simAA = [similars[i] for i in range(len(barcodes)) if barcodes[i]==barcode]
|
||||
@ -238,8 +204,6 @@ def contrast_pr(evtPaths):
|
||||
_fp_events.append(path.stem)
|
||||
else:
|
||||
errorFile_one2one.append(path.stem)
|
||||
elif len(one2SN)+len(one2n) == 0:
|
||||
errorFile_one2one.append(path.stem)
|
||||
|
||||
|
||||
'''================== 2. 取出场景下的 1 : Small N ==================='''
|
||||
@ -247,7 +211,6 @@ def contrast_pr(evtPaths):
|
||||
for dt in one2SN:
|
||||
barcodes.append(dt['barcode'])
|
||||
similars.append(dt['similar'])
|
||||
|
||||
|
||||
if len(barcodes)==len(similars) and len(barcodes)!=0:
|
||||
## 扫A放A, 扫A放B场景
|
||||
@ -256,11 +219,11 @@ def contrast_pr(evtPaths):
|
||||
|
||||
one2SNAA.extend(simAA)
|
||||
one2SNAB.extend(simAB)
|
||||
|
||||
one2SNPath.append(path.stem)
|
||||
if len(simAA)==0:
|
||||
errorFile_one2SN.append(path.stem)
|
||||
|
||||
one2SNPath1.append(path.stem)
|
||||
|
||||
|
||||
## 相似度排序,barcode相等且排名第一为TP,适用于多的barcode相似度比较
|
||||
max_idx = similars.index(max(similars))
|
||||
max_sim = similars[max_idx]
|
||||
@ -281,7 +244,6 @@ def contrast_pr(evtPaths):
|
||||
fp_events.append(path.stem)
|
||||
else:
|
||||
errorFile_one2SN.append(path.stem)
|
||||
|
||||
|
||||
|
||||
|
||||
@ -292,17 +254,10 @@ def contrast_pr(evtPaths):
|
||||
evt_barcodes.append(dt["barcode"])
|
||||
evt_similars.append(dt["similar"])
|
||||
evt_types.append(dt["type"])
|
||||
|
||||
if len(events)==len(evt_barcodes) and len(evt_barcodes)==len(evt_similars) \
|
||||
and len(evt_similars)==len(evt_types) and len(events)>0:
|
||||
|
||||
|
||||
|
||||
if len(events)==len(evt_barcodes)==len(evt_similars)==len(evt_types) and len(events)>0:
|
||||
if not barcode in evt_barcodes:
|
||||
errorFile_one2n.append(path.stem)
|
||||
continue
|
||||
|
||||
if len(barcodes_):
|
||||
print("do")
|
||||
|
||||
one2nPath.append(path.stem)
|
||||
maxsim = evt_similars[evt_similars.index(max(evt_similars))]
|
||||
for i in range(len(one2n)):
|
||||
@ -357,9 +312,9 @@ def contrast_pr(evtPaths):
|
||||
_TN = sum(np.array(one2oneAB) < th)
|
||||
|
||||
_PPrecise.append(_TP/(_TP+_FP+1e-6))
|
||||
_PRecall.append(_TP/(_TP+_FN+1e-6))
|
||||
_PRecall.append(_TP/(len(one2oneAA)+1e-6))
|
||||
_NPrecise.append(_TN/(_TN+_FN+1e-6))
|
||||
_NRecall.append(_TN/(_TN+_FP+1e-6))
|
||||
_NRecall.append(_TN/(len(one2oneAB)+1e-6))
|
||||
|
||||
'''===================================== 1:SN 均值'''
|
||||
TP_ = sum(np.array(one2SNAA) >= th)
|
||||
@ -379,10 +334,10 @@ def contrast_pr(evtPaths):
|
||||
FNX = sum(np.array(fn_simi) < th)
|
||||
TNX = sum(np.array(tn_simi) < th)
|
||||
PPreciseX.append(TPX/(TPX+FPX+1e-6))
|
||||
PRecallX.append(TPX/(TPX+FNX+1e-6))
|
||||
PRecallX.append(TPX/(len(tp_simi)+len(fn_simi)+1e-6))
|
||||
|
||||
NPreciseX.append(TNX/(TNX+FNX+1e-6))
|
||||
NRecallX.append(TNX/(TNX+FPX+1e-6))
|
||||
NRecallX.append(TNX/(len(tn_simi)+len(fp_simi)+1e-6))
|
||||
|
||||
|
||||
'''===================================== 1:n'''
|
||||
@ -392,19 +347,13 @@ def contrast_pr(evtPaths):
|
||||
TN = sum(np.array(tnsimi) < th)
|
||||
|
||||
PPrecise.append(TP/(TP+FP+1e-6))
|
||||
PRecall.append(TP/(TP+FN+1e-6))
|
||||
PRecall.append(TP/(len(tpsimi)+len(fnsimi)+1e-6))
|
||||
NPrecise.append(TN/(TN+FN+1e-6))
|
||||
NRecall.append(TN/(TN+FP+1e-6))
|
||||
|
||||
algtime = []
|
||||
for tm, _ in alg_times:
|
||||
algtime.append(tm)
|
||||
fig, ax = plt.subplots()
|
||||
ax.hist(np.array(algtime), bins=100, edgecolor='black')
|
||||
ax.set_title('Algorthm Spend Time')
|
||||
ax.set_xlabel(f"Event Num: {len(alg_times)}")
|
||||
plt.show()
|
||||
NRecall.append(TN/(len(tnsimi)+len(fpsimi)+1e-6))
|
||||
|
||||
|
||||
|
||||
|
||||
'''1. ============================= 1:1 最大值方案 曲线'''
|
||||
fig, ax = plt.subplots()
|
||||
ax.plot(Thresh, _PPrecise, 'r', label='Precise_Pos: TP/TPFP')
|
||||
@ -413,9 +362,7 @@ def contrast_pr(evtPaths):
|
||||
ax.plot(Thresh, _NRecall, 'c', label='Recall_Neg: TN/TNFN')
|
||||
ax.set_xlim([0, 1])
|
||||
ax.set_ylim([0, 1])
|
||||
ax.set_xticks(np.arange(0, 1, 0.1))
|
||||
ax.set_yticks(np.arange(0, 1, 0.1))
|
||||
ax.grid(True, linestyle='--')
|
||||
ax.grid(True)
|
||||
ax.set_title('1:1 Precise & Recall')
|
||||
ax.set_xlabel(f"Event Num: {len(one2oneAA)+len(one2oneAB)}")
|
||||
ax.legend()
|
||||
@ -434,30 +381,30 @@ def contrast_pr(evtPaths):
|
||||
|
||||
|
||||
'''2. ============================= 1:1 均值方案 曲线'''
|
||||
# fig, ax = plt.subplots()
|
||||
# ax.plot(Thresh, PPrecise_, 'r', label='Precise_Pos: TP/TPFP')
|
||||
# ax.plot(Thresh, PRecall_, 'b', label='Recall_Pos: TP/TPFN')
|
||||
# ax.plot(Thresh, NPrecise_, 'g', label='Precise_Neg: TN/TNFP')
|
||||
# ax.plot(Thresh, NRecall_, 'c', label='Recall_Neg: TN/TNFN')
|
||||
# ax.set_xlim([0, 1])
|
||||
# ax.set_ylim([0, 1])
|
||||
# ax.grid(True)
|
||||
# ax.set_title('1:1 Precise & Recall')
|
||||
# ax.set_xlabel(f"Event Num: {len(one2SNAA)}")
|
||||
# ax.legend()
|
||||
# plt.show()
|
||||
# ## ============================= 1:1 均值方案 直方图'''
|
||||
# fig, axes = plt.subplots(2, 1)
|
||||
# axes[0].hist(np.array(one2SNAA), bins=60, edgecolor='black')
|
||||
# axes[0].set_xlim([-0.2, 1])
|
||||
# axes[0].set_title('AA')
|
||||
# axes[0].set_xlabel(f"Event Num: {len(one2SNAA)}")
|
||||
fig, ax = plt.subplots()
|
||||
ax.plot(Thresh, PPrecise_, 'r', label='Precise_Pos: TP/TPFP')
|
||||
ax.plot(Thresh, PRecall_, 'b', label='Recall_Pos: TP/TPFN')
|
||||
ax.plot(Thresh, NPrecise_, 'g', label='Precise_Neg: TN/TNFP')
|
||||
ax.plot(Thresh, NRecall_, 'c', label='Recall_Neg: TN/TNFN')
|
||||
ax.set_xlim([0, 1])
|
||||
ax.set_ylim([0, 1])
|
||||
ax.grid(True)
|
||||
ax.set_title('1:1 Precise & Recall')
|
||||
ax.set_xlabel(f"Event Num: {len(one2SNAA)}")
|
||||
ax.legend()
|
||||
plt.show()
|
||||
## ============================= 1:1 均值方案 直方图'''
|
||||
fig, axes = plt.subplots(2, 1)
|
||||
axes[0].hist(np.array(one2SNAA), bins=60, edgecolor='black')
|
||||
axes[0].set_xlim([-0.2, 1])
|
||||
axes[0].set_title('AA')
|
||||
axes[0].set_xlabel(f"Event Num: {len(one2SNAA)}")
|
||||
|
||||
# axes[1].hist(np.array(one2SNAB), bins=60, edgecolor='black')
|
||||
# axes[1].set_xlim([-0.2, 1])
|
||||
# axes[1].set_title('BB')
|
||||
# axes[1].set_xlabel(f"Event Num: {len(one2SNAB)}")
|
||||
# plt.show()
|
||||
axes[1].hist(np.array(one2SNAB), bins=60, edgecolor='black')
|
||||
axes[1].set_xlim([-0.2, 1])
|
||||
axes[1].set_title('BB')
|
||||
axes[1].set_xlabel(f"Event Num: {len(one2SNAB)}")
|
||||
plt.show()
|
||||
|
||||
''''3. ============================= 1:SN 曲线'''
|
||||
fig, ax = plt.subplots()
|
||||
@ -467,9 +414,7 @@ def contrast_pr(evtPaths):
|
||||
ax.plot(Thresh, NRecallX, 'c', label='Recall_Neg: TN/TNFN')
|
||||
ax.set_xlim([0, 1])
|
||||
ax.set_ylim([0, 1])
|
||||
ax.set_xticks(np.arange(0, 1, 0.1))
|
||||
ax.set_yticks(np.arange(0, 1, 0.1))
|
||||
ax.grid(True, linestyle='--')
|
||||
ax.grid(True)
|
||||
ax.set_title('1:SN Precise & Recall')
|
||||
ax.set_xlabel(f"Event Num: {len(one2SNAA)}")
|
||||
ax.legend()
|
||||
@ -499,9 +444,7 @@ def contrast_pr(evtPaths):
|
||||
ax.plot(Thresh, NRecall, 'c', label='Recall_Neg: TN/TNFN')
|
||||
ax.set_xlim([0, 1])
|
||||
ax.set_ylim([0, 1])
|
||||
ax.set_xticks(np.arange(0, 1, 0.1))
|
||||
ax.set_yticks(np.arange(0, 1, 0.1))
|
||||
ax.grid(True, linestyle='--')
|
||||
ax.grid(True)
|
||||
ax.set_title('1:n Precise & Recall')
|
||||
ax.set_xlabel(f"Event Num: {len(tpsimi)+len(fnsimi)}")
|
||||
ax.legend()
|
||||
@ -518,31 +461,31 @@ def contrast_pr(evtPaths):
|
||||
axes[1, 0].set_xlim([-0.2, 1])
|
||||
axes[1, 0].set_title(f'TN({len(tnsimi)})')
|
||||
axes[1, 1].hist(fnsimi, bins=60, edgecolor='black')
|
||||
|
||||
axes[1, 1].set_xlim([-0.2, 1])
|
||||
axes[1, 1].set_title(f'FN({len(fnsimi)})')
|
||||
plt.show()
|
||||
|
||||
# fpsnErrFile = str(paths.joinpath("one2SN_Error.txt"))
|
||||
# with open(fpsnErrFile, "w") as file:
|
||||
# for item in fp_events:
|
||||
# file.write(item + "\n")
|
||||
|
||||
fpsnErrFile = str(paths.joinpath("one2SN_Error.txt"))
|
||||
with open(fpsnErrFile, "w") as file:
|
||||
for item in fp_events:
|
||||
file.write(item + "\n")
|
||||
|
||||
# fpErrFile = str(paths.joinpath("one2n_Error.txt"))
|
||||
# with open(fpErrFile, "w") as file:
|
||||
# for item in fpevents:
|
||||
# file.write(item + "\n")
|
||||
|
||||
fpErrFile = str(paths.joinpath("one2n_Error.txt"))
|
||||
with open(fpErrFile, "w") as file:
|
||||
for item in fpevents:
|
||||
file.write(item + "\n")
|
||||
|
||||
|
||||
|
||||
|
||||
# bcdSet = set(bcdList)
|
||||
|
||||
|
||||
# one2nErrFile = os.path.join(evtPaths, "one_2_Small_n_Error.txt")
|
||||
# one2nErrFile = str(paths.joinpath("one_2_Small_n_Error.txt"))
|
||||
# with open(one2nErrFile, "w") as file:
|
||||
# for item in fnevents:
|
||||
# file.write(item + "\n")
|
||||
|
||||
# one2NErrFile = os.path.join(evtPaths, "one_2_Big_N_Error.txt")
|
||||
# one2NErrFile = str(paths.joinpath("one_2_Big_N_Error.txt"))
|
||||
# with open(one2NErrFile, "w") as file:
|
||||
# for item in fn_events:
|
||||
# file.write(item + "\n")
|
||||
@ -550,8 +493,9 @@ def contrast_pr(evtPaths):
|
||||
print('Done!')
|
||||
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
evtpaths = r"/home/wqg/dataset/test_base_dataset/single_event/source"
|
||||
evtpaths = r"D:\全实时\source_data\2024122416"
|
||||
contrast_pr(evtpaths)
|
||||
|
||||
|
||||
|
@ -57,13 +57,7 @@ def save_imgpairs(barcode, imgpaths, matrix, savepath, thresh=(0.4, 0.6), ctype=
|
||||
|
||||
|
||||
|
||||
def feat_analysis(featpath):
|
||||
'''
|
||||
标准特征集中样本类内、类间相似度分布
|
||||
'''
|
||||
|
||||
|
||||
|
||||
def feat_analysis(featpath):
|
||||
savepath = r"D:\exhibition\result\stdfeat"
|
||||
|
||||
InterThresh = (0.4, 0.6)
|
||||
|
@ -1,172 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
取出再放回场景下商品轨迹特征比对方式与性能分析
|
||||
|
||||
Created on Tue Apr 1 17:17:47 2025
|
||||
@author: wqg
|
||||
"""
|
||||
import os
|
||||
import pickle
|
||||
import random
|
||||
import numpy as np
|
||||
from pathlib import Path
|
||||
import matplotlib.pyplot as plt
|
||||
from scipy.spatial.distance import cdist
|
||||
from utils.calsimi import calsiml, calsimi_vs_evts
|
||||
|
||||
def read_eventdict(evtpaths):
|
||||
evtDict = {}
|
||||
for filename in os.listdir(evtpaths):
|
||||
evtname, ext = os.path.splitext(filename)
|
||||
if ext != ".pickle": continue
|
||||
|
||||
evtpath = os.path.join(evtpaths, filename)
|
||||
with open(evtpath, 'rb') as f:
|
||||
evtdata = pickle.load(f)
|
||||
evtDict[evtname] = evtdata
|
||||
|
||||
|
||||
return evtDict
|
||||
|
||||
|
||||
|
||||
def compute_show_pr(Same, Cross):
|
||||
TPFN = len(Same)
|
||||
TNFP = len(Cross)
|
||||
|
||||
Recall_Pos, Recall_Neg = [], []
|
||||
Precision_Pos, Precision_Neg = [], []
|
||||
Correct = []
|
||||
Thresh = np.linspace(-0.2, 1, 100)
|
||||
for th in Thresh:
|
||||
TP = np.sum(Same >= th)
|
||||
FN = np.sum(Same < th)
|
||||
# FN = TPFN - TP
|
||||
|
||||
TN = np.sum(Cross < th)
|
||||
FP = np.sum(Cross >= th)
|
||||
# FP = TNFP - TN
|
||||
|
||||
|
||||
Precision_Pos.append(TP/(TP+FP+1e-6))
|
||||
Precision_Neg.append(TN/(TN+FN+1e-6))
|
||||
Recall_Pos.append(TP/(TP+FN+1e-6))
|
||||
Recall_Neg.append(TN/(TN+FP+1e-6))
|
||||
|
||||
# Recall_Pos.append(TP/TPFN)
|
||||
# Recall_Neg.append(TN/TNFP)
|
||||
|
||||
|
||||
Correct.append((TN+TP)/(TPFN+TNFP))
|
||||
|
||||
fig, ax = plt.subplots()
|
||||
|
||||
ax.plot(Thresh, Precision_Pos, 'r', label='Precision_Pos: TP/(TP+FP)')
|
||||
ax.plot(Thresh, Recall_Pos, 'b', label='Recall_Pos: TP/TPFN')
|
||||
ax.plot(Thresh, Recall_Neg, 'g', label='Recall_Neg: TN/TNFP')
|
||||
ax.plot(Thresh, Correct, 'c', label='Correct: (TN+TP)/(TPFN+TNFP)')
|
||||
ax.plot(Thresh, Precision_Neg, 'm', label='Precision_Neg: TN/(TN+FN)')
|
||||
|
||||
ax.set_xlim([0, 1])
|
||||
ax.set_ylim([0, 1])
|
||||
|
||||
ax.set_xticks(np.arange(0, 1, 0.1))
|
||||
ax.set_yticks(np.arange(0, 1, 0.1))
|
||||
ax.grid(True, linestyle='--')
|
||||
|
||||
ax.set_title('PrecisePos & PreciseNeg')
|
||||
ax.set_xlabel(f"Same Num: {TPFN}, Cross Num: {TNFP}")
|
||||
ax.legend()
|
||||
plt.show()
|
||||
|
||||
# rltpath = os.path.join(similPath, f'pr_1to1_{simType}.png')
|
||||
# plt.savefig(rltpath) # svg, png, pdf
|
||||
|
||||
|
||||
fig, axes = plt.subplots(2,1)
|
||||
axes[0].hist(Same, bins=60, range=(-0.2, 1), edgecolor='black')
|
||||
axes[0].set_xlim([-0.2, 1])
|
||||
axes[0].set_title(f'TP({len(Same)})')
|
||||
|
||||
axes[1].hist(Cross, bins=60, range=(-0.2, 1), edgecolor='black')
|
||||
axes[1].set_xlim([-0.2, 1])
|
||||
axes[1].set_title(f'TN({len(Cross)})')
|
||||
|
||||
# rltpath = os.path.join(similPath, f'hist_1to1_{simType}.png')
|
||||
# plt.savefig(rltpath)
|
||||
|
||||
|
||||
plt.show()
|
||||
|
||||
|
||||
|
||||
def trail_to_trail(evtpaths, rltpaths):
|
||||
# select the method type of how to calculate the feat similarity of trail
|
||||
simType = 2
|
||||
|
||||
##1. read all the ShoppingEvent object in the dir 'evtpaths'
|
||||
evtDicts = read_eventdict(evtpaths)
|
||||
|
||||
##2. Combine event object with the same barcode
|
||||
barcodes, evtpairDict = [], {}
|
||||
for k in evtDicts.keys():
|
||||
evt = k.split('_')
|
||||
condt = len(evt)>=2 and evt[-1].isdigit() and len(evt[-1])>=10
|
||||
if not condt: continue
|
||||
|
||||
barcode = evt[-1]
|
||||
if barcode not in evtpairDict.keys():
|
||||
evtpairDict[barcode] = []
|
||||
barcodes.append(barcode)
|
||||
|
||||
evtpairDict[barcode].append(evtDicts[k])
|
||||
barcodes = set(barcodes)
|
||||
|
||||
AA_list, AB_list = [], []
|
||||
for barcode in evtpairDict.keys():
|
||||
events = evtpairDict[barcode]
|
||||
if len(events)>1:
|
||||
evta, evtb = random.sample(events, 2)
|
||||
AA_list.append((evta, evtb, "same"))
|
||||
|
||||
evtc = random.sample(events, 1)[0]
|
||||
|
||||
dset = list(barcodes.symmetric_difference(set([barcode])))
|
||||
bcd = random.sample(dset, 1)[0]
|
||||
evtd = random.sample(evtpairDict[bcd], 1)[0]
|
||||
AB_list.append((evtc, evtd, "diff"))
|
||||
|
||||
mergePairs = AA_list + AB_list
|
||||
|
||||
##3. calculate the similar of two event: evta, evtb
|
||||
new_pirs = []
|
||||
for evta, evtb, label in mergePairs:
|
||||
similar = calsimi_vs_evts(evta, evtb, simType)
|
||||
if similar is None:
|
||||
continue
|
||||
new_pirs.append((label, round(similar, 3), evta.evtname[:15], evtb.evtname[:15]))
|
||||
|
||||
##4. compute PR and showing
|
||||
Same = np.array([s for label, s, _, _ in new_pirs if label=="same"])
|
||||
Cross = np.array([s for label, s, _, _ in new_pirs if label=="diff"])
|
||||
compute_show_pr(Same, Cross)
|
||||
|
||||
|
||||
def main():
|
||||
evttypes = ["single_event_V10", "single_event_V5", "performence_V10", "performence_V5"]
|
||||
# evttypes = ["single_event_V10"]
|
||||
|
||||
for evttype in evttypes:
|
||||
evtpaths = "/home/wqg/dataset/pipeline/contrast/{}/evtobjs/".format(evttype)
|
||||
rltpaths = "/home/wqg/dataset/pipeline/yrt/{}/yolos_tracking".format(evttype)
|
||||
|
||||
trail_to_trail(evtpaths, rltpaths)
|
||||
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
||||
|
||||
|
@ -1,216 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Created on Mon Mar 31 16:25:43 2025
|
||||
|
||||
@author: wqg
|
||||
"""
|
||||
import numpy as np
|
||||
from scipy.spatial.distance import cdist
|
||||
|
||||
|
||||
def get_topk_percent(data, k):
|
||||
"""
|
||||
获取数据中最大的 k% 的元素
|
||||
"""
|
||||
# 将数据转换为 NumPy 数组
|
||||
if isinstance(data, list):
|
||||
data = np.array(data)
|
||||
|
||||
percentile = np.percentile(data, 100-k)
|
||||
top_k_percent = data[data >= percentile]
|
||||
|
||||
return top_k_percent
|
||||
def cluster(data, thresh=0.15):
|
||||
# data = np.array([0.1, 0.13, 0.7, 0.2, 0.8, 0.52, 0.3, 0.7, 0.85, 0.58])
|
||||
# data = np.array([0.1, 0.13, 0.2, 0.3])
|
||||
# data = np.array([0.1])
|
||||
|
||||
if isinstance(data, list):
|
||||
data = np.array(data)
|
||||
|
||||
data1 = np.sort(data)
|
||||
cluter, Cluters, = [data1[0]], []
|
||||
for i in range(1, len(data1)):
|
||||
if data1[i] - data1[i-1]< thresh:
|
||||
cluter.append(data1[i])
|
||||
else:
|
||||
Cluters.append(cluter)
|
||||
cluter = [data1[i]]
|
||||
Cluters.append(cluter)
|
||||
|
||||
clt_center = []
|
||||
for clt in Cluters:
|
||||
## 是否应该在此处限制一个聚类中的最小轨迹样本数,应该将该因素放在轨迹分析中
|
||||
# if len(clt)>=3:
|
||||
# clt_center.append(np.mean(clt))
|
||||
clt_center.append(np.mean(clt))
|
||||
|
||||
# print(clt_center)
|
||||
|
||||
return clt_center
|
||||
|
||||
def calsiml(feat1, feat2, topkp=75, cluth=0.15):
|
||||
'''轨迹样本和标准特征集样本相似度的选择策略'''
|
||||
matrix = 1 - cdist(feat1, feat2, 'cosine')
|
||||
simi_max = []
|
||||
for i in range(len(matrix)):
|
||||
sim = np.mean(get_topk_percent(matrix[i, :], topkp))
|
||||
simi_max.append(sim)
|
||||
cltc_max = cluster(simi_max, cluth)
|
||||
Simi = max(cltc_max)
|
||||
|
||||
## cltc_max为空属于编程考虑不周,应予以排查解决
|
||||
# if len(cltc_max):
|
||||
# Simi = max(cltc_max)
|
||||
# else:
|
||||
# Simi = 0 #不应该走到该处
|
||||
|
||||
return Simi
|
||||
|
||||
|
||||
def calsimi_vs_stdfeat_new(event, stdfeat):
|
||||
'''事件与标准库的对比策略
|
||||
该比对策略是否可以拓展到事件与事件的比对?
|
||||
'''
|
||||
front_boxes = np.empty((0, 9), dtype=np.float64) ##和类doTracks兼容
|
||||
front_feats = np.empty((0, 256), dtype=np.float64) ##和类doTracks兼容
|
||||
for i in range(len(event.front_boxes)):
|
||||
front_boxes = np.concatenate((front_boxes, event.front_boxes[i]), axis=0)
|
||||
front_feats = np.concatenate((front_feats, event.front_feats[i]), axis=0)
|
||||
|
||||
back_boxes = np.empty((0, 9), dtype=np.float64) ##和类doTracks兼容
|
||||
back_feats = np.empty((0, 256), dtype=np.float64) ##和类doTracks兼容
|
||||
for i in range(len(event.back_boxes)):
|
||||
back_boxes = np.concatenate((back_boxes, event.back_boxes[i]), axis=0)
|
||||
back_feats = np.concatenate((back_feats, event.back_feats[i]), axis=0)
|
||||
|
||||
front_simi, back_simi = None, None
|
||||
if len(front_feats):
|
||||
front_simi = calsiml(front_feats, stdfeat)
|
||||
if len(back_feats):
|
||||
back_simi = calsiml(back_feats, stdfeat)
|
||||
|
||||
'''前后摄相似度融合策略'''
|
||||
if len(front_feats) and len(back_feats):
|
||||
diff_simi = abs(front_simi - back_simi)
|
||||
if diff_simi>0.15:
|
||||
Similar = max([front_simi, back_simi])
|
||||
else:
|
||||
Similar = (front_simi+back_simi)/2
|
||||
elif len(front_feats) and len(back_feats)==0:
|
||||
Similar = front_simi
|
||||
elif len(front_feats)==0 and len(back_feats):
|
||||
Similar = back_simi
|
||||
else:
|
||||
Similar = None # 在event.front_feats和event.back_feats同时为空时
|
||||
|
||||
return Similar, front_simi, back_simi
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def calsimi_vs_stdfeat(event, stdfeat):
|
||||
evtfeat = event.feats_compose
|
||||
if isinstance(event.feats_select, list):
|
||||
if len(event.feats_select) and len(event.feats_select[0]):
|
||||
evtfeat = event.feats_select[0]
|
||||
else:
|
||||
return None, None, None
|
||||
else:
|
||||
evtfeat = event.feats_select
|
||||
|
||||
if len(evtfeat)==0 or len(stdfeat)==0:
|
||||
return None, None, None
|
||||
|
||||
|
||||
evtfeat /= np.linalg.norm(evtfeat, axis=1)[:, None]
|
||||
stdfeat /= np.linalg.norm(stdfeat, axis=1)[:, None]
|
||||
|
||||
matrix = 1 - cdist(evtfeat, stdfeat, 'cosine')
|
||||
matrix[matrix < 0] = 0
|
||||
|
||||
simi_mean = np.mean(matrix)
|
||||
simi_max = np.max(matrix)
|
||||
stdfeatm = np.mean(stdfeat, axis=0, keepdims=True)
|
||||
evtfeatm = np.mean(evtfeat, axis=0, keepdims=True)
|
||||
simi_mfeat = 1- np.maximum(0.0, cdist(stdfeatm, evtfeatm, 'cosine'))
|
||||
|
||||
return simi_mean, simi_max, simi_mfeat[0,0]
|
||||
|
||||
|
||||
def calsimi_vs_evts(evta, evtb, simType=1):
|
||||
if simType==1:
|
||||
if len(evta.feats_compose) and len(evtb.feats_compose):
|
||||
feata = evta.feats_compose
|
||||
featb = evtb.feats_compose
|
||||
matrix = 1 - cdist(feata, featb, 'cosine')
|
||||
similar = np.mean(matrix)
|
||||
else:
|
||||
similar = None
|
||||
return similar
|
||||
|
||||
if simType==2:
|
||||
if len(evta.feats_compose) and len(evtb.feats_compose):
|
||||
feata = evta.feats_compose
|
||||
featb = evtb.feats_compose
|
||||
matrix = 1 - cdist(feata, featb, 'cosine')
|
||||
similar = np.max(matrix)
|
||||
else:
|
||||
similar = None
|
||||
return similar
|
||||
|
||||
if simType==3:
|
||||
if len(evta.feats_compose) and len(evtb.feats_compose):
|
||||
feata = evta.feats_compose
|
||||
featb = evtb.feats_compose
|
||||
similar = calsiml(feata, featb)
|
||||
else:
|
||||
similar = None
|
||||
return similar
|
||||
|
||||
|
||||
##1. the front feats of evta, evtb
|
||||
fr_feata = np.empty((0, 256), dtype=np.float64) ##和类doTracks兼容
|
||||
for i in range(len(evta.front_feats)):
|
||||
fr_feata = np.concatenate((fr_feata, evta.front_feats[i]), axis=0)
|
||||
|
||||
fr_featb = np.empty((0, 256), dtype=np.float64) ##和类doTracks兼容
|
||||
for i in range(len(evtb.front_feats)):
|
||||
fr_featb = np.concatenate((fr_featb, evtb.front_feats[i]), axis=0)
|
||||
|
||||
##2. the back feats of evta, evtb
|
||||
bk_feata = np.empty((0, 256), dtype=np.float64) ##和类doTracks兼容
|
||||
for i in range(len(evta.back_feats)):
|
||||
bk_feata = np.concatenate((bk_feata, evta.back_feats[i]), axis=0)
|
||||
|
||||
bk_featb = np.empty((0, 256), dtype=np.float64) ##和类doTracks兼容
|
||||
for i in range(len(evtb.back_feats)):
|
||||
bk_featb = np.concatenate((bk_featb, evtb.back_feats[i]), axis=0)
|
||||
|
||||
|
||||
front_simi, back_simi = None, None
|
||||
if len(fr_feata) and len(fr_featb):
|
||||
front_simi = calsiml(fr_feata, fr_featb)
|
||||
|
||||
if len(bk_feata) and len(bk_featb):
|
||||
back_simi = calsiml(bk_feata, bk_featb)
|
||||
|
||||
'''前后摄相似度融合策略'''
|
||||
if front_simi is not None and back_simi is not None:
|
||||
diff_simi = abs(front_simi - back_simi)
|
||||
if diff_simi>0.15:
|
||||
similar = max([front_simi, back_simi])
|
||||
else:
|
||||
similar = (front_simi+back_simi)/2
|
||||
elif front_simi is not None and back_simi is None:
|
||||
similar = front_simi
|
||||
elif front_simi is None and back_simi is not None:
|
||||
similar = back_simi
|
||||
else:
|
||||
similar = None # 在event.front_feats和event.back_feats同时为空时
|
||||
|
||||
return similar
|
||||
|
||||
|
@ -1,127 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Created on Tue Apr 1 16:43:04 2025
|
||||
|
||||
@author: wqg
|
||||
"""
|
||||
import os
|
||||
import pickle
|
||||
import numpy as np
|
||||
from scipy.spatial.distance import cdist
|
||||
|
||||
|
||||
def int8_to_ft16(arr_uint8, amin, amax):
|
||||
arr_ft16 = (arr_uint8 / 255 * (amax-amin) + amin).astype(np.float16)
|
||||
|
||||
return arr_ft16
|
||||
|
||||
def ft16_to_uint8(arr_ft16):
|
||||
# pickpath = r"\\192.168.1.28\share\测试_202406\contrast\std_features_ft32vsft16\6902265587712_ft16.pickle"
|
||||
|
||||
# with open(pickpath, 'rb') as f:
|
||||
# edict = pickle.load(f)
|
||||
|
||||
# arr_ft16 = edict['feats']
|
||||
|
||||
amin = np.min(arr_ft16)
|
||||
amax = np.max(arr_ft16)
|
||||
arr_ft255 = (arr_ft16 - amin) * 255 / (amax-amin)
|
||||
arr_uint8 = arr_ft255.astype(np.uint8)
|
||||
|
||||
arr_ft16_ = int8_to_ft16(arr_uint8, amin, amax)
|
||||
|
||||
arrDistNorm = np.linalg.norm(arr_ft16_ - arr_ft16) / arr_ft16_.size
|
||||
|
||||
return arr_uint8, arr_ft16_
|
||||
|
||||
|
||||
def data_precision_compare(stdfeat, evtfeat, evtMessage, similPath='', save=True):
|
||||
evt, stdbcd, label = evtMessage
|
||||
rltdata, rltdata_ft16, rltdata_ft16_ = [], [], []
|
||||
|
||||
matrix = 1 - cdist(stdfeat, evtfeat, 'cosine')
|
||||
simi_mean = np.mean(matrix)
|
||||
simi_max = np.max(matrix)
|
||||
stdfeatm = np.mean(stdfeat, axis=0, keepdims=True)
|
||||
evtfeatm = np.mean(evtfeat, axis=0, keepdims=True)
|
||||
simi_mfeat = 1- np.maximum(0.0, cdist(stdfeatm, evtfeatm, 'cosine'))
|
||||
rltdata = [label, stdbcd, evt, simi_mean, simi_max, simi_mfeat[0,0]]
|
||||
|
||||
|
||||
##================================================================= float16
|
||||
stdfeat_ft16 = stdfeat.astype(np.float16)
|
||||
evtfeat_ft16 = evtfeat.astype(np.float16)
|
||||
stdfeat_ft16 /= np.linalg.norm(stdfeat_ft16, axis=1)[:, None]
|
||||
evtfeat_ft16 /= np.linalg.norm(evtfeat_ft16, axis=1)[:, None]
|
||||
|
||||
|
||||
matrix_ft16 = 1 - cdist(stdfeat_ft16, evtfeat_ft16, 'cosine')
|
||||
simi_mean_ft16 = np.mean(matrix_ft16)
|
||||
simi_max_ft16 = np.max(matrix_ft16)
|
||||
stdfeatm_ft16 = np.mean(stdfeat_ft16, axis=0, keepdims=True)
|
||||
evtfeatm_ft16 = np.mean(evtfeat_ft16, axis=0, keepdims=True)
|
||||
simi_mfeat_ft16 = 1- np.maximum(0.0, cdist(stdfeatm_ft16, evtfeatm_ft16, 'cosine'))
|
||||
rltdata_ft16 = [label, stdbcd, evt, simi_mean_ft16, simi_max_ft16, simi_mfeat_ft16[0,0]]
|
||||
|
||||
'''****************** uint8 is ok!!!!!! ******************'''
|
||||
##=================================================================== uint8
|
||||
# stdfeat_uint8, stdfeat_ft16_ = ft16_to_uint8(stdfeat_ft16)
|
||||
# evtfeat_uint8, evtfeat_ft16_ = ft16_to_uint8(evtfeat_ft16)
|
||||
|
||||
stdfeat_uint8 = (stdfeat_ft16*128).astype(np.int8)
|
||||
evtfeat_uint8 = (evtfeat_ft16*128).astype(np.int8)
|
||||
stdfeat_ft16_ = stdfeat_uint8.astype(np.float16)/128
|
||||
evtfeat_ft16_ = evtfeat_uint8.astype(np.float16)/128
|
||||
|
||||
absdiff = np.linalg.norm(stdfeat_ft16_ - stdfeat) / stdfeat.size
|
||||
|
||||
matrix_ft16_ = 1 - cdist(stdfeat_ft16_, evtfeat_ft16_, 'cosine')
|
||||
simi_mean_ft16_ = np.mean(matrix_ft16_)
|
||||
simi_max_ft16_ = np.max(matrix_ft16_)
|
||||
stdfeatm_ft16_ = np.mean(stdfeat_ft16_, axis=0, keepdims=True)
|
||||
evtfeatm_ft16_ = np.mean(evtfeat_ft16_, axis=0, keepdims=True)
|
||||
simi_mfeat_ft16_ = 1- np.maximum(0.0, cdist(stdfeatm_ft16_, evtfeatm_ft16_, 'cosine'))
|
||||
rltdata_ft16_ = [label, stdbcd, evt, simi_mean_ft16_, simi_max_ft16_, simi_mfeat_ft16_[0,0]]
|
||||
|
||||
if not save:
|
||||
return
|
||||
|
||||
|
||||
##========================================================= save as float32
|
||||
rppath = os.path.join(similPath, f'{evt}_ft32.pickle')
|
||||
with open(rppath, 'wb') as f:
|
||||
pickle.dump(rltdata, f)
|
||||
|
||||
rtpath = os.path.join(similPath, f'{evt}_ft32.txt')
|
||||
with open(rtpath, 'w', encoding='utf-8') as f:
|
||||
for result in rltdata:
|
||||
part = [f"{x:.3f}" if isinstance(x, float) else str(x) for x in result]
|
||||
line = ', '.join(part)
|
||||
f.write(line + '\n')
|
||||
|
||||
|
||||
##========================================================= save as float16
|
||||
rppath_ft16 = os.path.join(similPath, f'{evt}_ft16.pickle')
|
||||
with open(rppath_ft16, 'wb') as f:
|
||||
pickle.dump(rltdata_ft16, f)
|
||||
|
||||
rtpath_ft16 = os.path.join(similPath, f'{evt}_ft16.txt')
|
||||
with open(rtpath_ft16, 'w', encoding='utf-8') as f:
|
||||
for result in rltdata_ft16:
|
||||
part = [f"{x:.3f}" if isinstance(x, float) else str(x) for x in result]
|
||||
line = ', '.join(part)
|
||||
f.write(line + '\n')
|
||||
|
||||
|
||||
##=========================================================== save as uint8
|
||||
rppath_uint8 = os.path.join(similPath, f'{evt}_uint8.pickle')
|
||||
with open(rppath_uint8, 'wb') as f:
|
||||
pickle.dump(rltdata_ft16_, f)
|
||||
|
||||
rtpath_uint8 = os.path.join(similPath, f'{evt}_uint8.txt')
|
||||
with open(rtpath_uint8, 'w', encoding='utf-8') as f:
|
||||
for result in rltdata_ft16_:
|
||||
part = [f"{x:.3f}" if isinstance(x, float) else str(x) for x in result]
|
||||
line = ', '.join(part)
|
||||
f.write(line + '\n')
|
@ -5,30 +5,18 @@ Created on Tue Nov 26 17:35:05 2024
|
||||
@author: ym
|
||||
"""
|
||||
import os
|
||||
import sys
|
||||
import cv2
|
||||
import pickle
|
||||
import numpy as np
|
||||
from pathlib import Path
|
||||
|
||||
FILE = Path(__file__).resolve()
|
||||
ROOT = FILE.parents[2] # YOLOv5 root directory
|
||||
if str(ROOT) not in sys.path:
|
||||
sys.path.append(str(ROOT))
|
||||
|
||||
import sys
|
||||
sys.path.append(r"D:\DetectTracking")
|
||||
from tracking.utils.plotting import Annotator, colors
|
||||
from tracking.utils.drawtracks import drawTrack
|
||||
from tracking.utils.read_data import extract_data, read_tracking_output, read_similar
|
||||
from tracking.utils.read_data import extract_data_realtime, read_tracking_output_realtime
|
||||
|
||||
|
||||
|
||||
|
||||
# import platform
|
||||
# import pathlib
|
||||
# plt = platform.system()
|
||||
|
||||
|
||||
IMG_FORMAT = ['.bmp', '.jpg', '.jpeg', '.png']
|
||||
VID_FORMAT = ['.mp4', '.avi']
|
||||
|
||||
@ -88,11 +76,7 @@ def array2list(bboxes):
|
||||
|
||||
class ShoppingEvent:
|
||||
def __init__(self, eventpath, stype="data"):
|
||||
'''stype: str, 'source', 'data', 'realtime', 共三种
|
||||
source: 前后摄视频经 pipeline 生成的文件
|
||||
data: 基于事件切分的原 data 文件版本
|
||||
realtime: 全实时生成的 data 文件
|
||||
'''
|
||||
'''stype: str, 'source', 'data', 'realtime', 共三种 '''
|
||||
|
||||
self.eventpath = eventpath
|
||||
self.evtname = str(Path(eventpath).stem)
|
||||
@ -182,9 +166,7 @@ class ShoppingEvent:
|
||||
return kdata, outdata
|
||||
|
||||
|
||||
def from_source_pkl(self, eventpath):
|
||||
# if plt == 'Windows':
|
||||
# pathlib.PosixPath = pathlib.WindowsPath
|
||||
def from_source_pkl(self, eventpath):
|
||||
with open(eventpath, 'rb') as f:
|
||||
ShoppingDict = pickle.load(f)
|
||||
|
||||
@ -314,13 +296,13 @@ class ShoppingEvent:
|
||||
self.front_feats = tracking_output_feats
|
||||
|
||||
def from_realtime_datafile(self, eventpath):
|
||||
evtList = self.evtname.split('_')
|
||||
if len(evtList)>=2 and len(evtList[-1])>=10 and evtList[-1].isdigit():
|
||||
self.barcode = evtList[-1]
|
||||
if len(evtList)==3 and evtList[-1]== evtList[-2]:
|
||||
self.evtType = 'input'
|
||||
else:
|
||||
self.evtType = 'other'
|
||||
# evtList = self.evtname.split('_')
|
||||
# if len(evtList)>=2 and len(evtList[-1])>=10 and evtList[-1].isdigit():
|
||||
# self.barcode = evtList[-1]
|
||||
# if len(evtList)==3 and evtList[-1]== evtList[-2]:
|
||||
# self.evtType = 'input'
|
||||
# else:
|
||||
# self.evtType = 'other'
|
||||
|
||||
'''================ path of video ============='''
|
||||
for vidname in os.listdir(eventpath):
|
||||
@ -348,7 +330,7 @@ class ShoppingEvent:
|
||||
if not os.path.isfile(datapath): continue
|
||||
CamerType = dataname.split('_')[0]
|
||||
'''========== 0/1_track.data =========='''
|
||||
if dataname.find("_tracker.data")>0:
|
||||
if dataname.find("_track.data")>0:
|
||||
trackerboxes, trackerfeats = extract_data_realtime(datapath)
|
||||
if CamerType == '0':
|
||||
self.back_trackerboxes = trackerboxes
|
||||
|
@ -4,81 +4,8 @@ Created on Thu Oct 31 15:17:01 2024
|
||||
|
||||
@author: ym
|
||||
"""
|
||||
import os
|
||||
import numpy as np
|
||||
import pickle
|
||||
from pathlib import Path
|
||||
import matplotlib.pyplot as plt
|
||||
from .event import ShoppingEvent
|
||||
|
||||
def init_eventDict(sourcePath, eventDataPath, stype="data"):
|
||||
'''
|
||||
stype: str,
|
||||
'source': 由 videos 或 images 生成的 pickle 文件
|
||||
'data': 从 data 文件中读取的现场运行数据
|
||||
"realtime": 全实时数据,从 data 文件中读取的现场运行数据
|
||||
|
||||
sourcePath:事件文件夹,事件类型包含2种:
|
||||
(1) pipeline生成的 pickle 文件
|
||||
(2) 直接采集的事件文件夹
|
||||
'''
|
||||
k, errEvents = 0, []
|
||||
for evtname in os.listdir(sourcePath):
|
||||
bname, ext = os.path.splitext(evtname)
|
||||
source_path = os.path.join(sourcePath, evtname)
|
||||
|
||||
if stype=="source" and ext not in ['.pkl', '.pickle']: continue
|
||||
if stype=="data" and os.path.isfile(source_path): continue
|
||||
if stype=="realtime" and os.path.isfile(source_path): continue
|
||||
|
||||
evt = bname.split('_')
|
||||
condt = len(evt)>=2 and evt[-1].isdigit() and len(evt[-1])>=10
|
||||
if not condt: continue
|
||||
|
||||
pickpath = os.path.join(eventDataPath, f"{bname}.pickle")
|
||||
if os.path.isfile(pickpath): continue
|
||||
|
||||
# event = ShoppingEvent(source_path, stype)
|
||||
try:
|
||||
event = ShoppingEvent(source_path, stype)
|
||||
with open(pickpath, 'wb') as f:
|
||||
pickle.dump(event, f)
|
||||
print(evtname)
|
||||
except Exception as e:
|
||||
errEvents.append(source_path)
|
||||
print(f"Error: {evtname}, {e}")
|
||||
# k += 1
|
||||
# if k==1:
|
||||
# break
|
||||
|
||||
errfile = Path(eventDataPath).parent / 'error_events.txt'
|
||||
with open(str(errfile), 'a', encoding='utf-8') as f:
|
||||
for line in errEvents:
|
||||
f.write(line + '\n')
|
||||
|
||||
|
||||
def get_evtList(evtpath):
|
||||
'''==== 0. 生成事件列表和对应的 Barcodes 集合 ==========='''
|
||||
bcdList, evtpaths = [], []
|
||||
for evtname in os.listdir(evtpath):
|
||||
bname, ext = os.path.splitext(evtname)
|
||||
|
||||
## 处理事件的两种情况:文件夹 和 Yolo-Resnet-Tracker 的输出
|
||||
fpath = os.path.join(evtpath, evtname)
|
||||
if os.path.isfile(fpath) and (ext==".pkl" or ext==".pickle"):
|
||||
evt = bname.split('_')
|
||||
elif os.path.isdir(fpath):
|
||||
evt = evtname.split('_')
|
||||
else:
|
||||
continue
|
||||
|
||||
if len(evt)>=2 and evt[-1].isdigit() and len(evt[-1])>=10:
|
||||
bcdList.append(evt[-1])
|
||||
evtpaths.append(fpath)
|
||||
|
||||
bcdSet = set(bcdList)
|
||||
|
||||
return evtpaths, bcdSet
|
||||
|
||||
|
||||
|
||||
|
23239
dataPair_test.ipynb
Normal file
@ -1,72 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Created on Mon Mar 10 09:33:35 2025
|
||||
基准数据集筛选,选取tracking输出多个轨迹的事件
|
||||
|
||||
@author: ym
|
||||
"""
|
||||
import os
|
||||
import numpy as np
|
||||
|
||||
import sys
|
||||
sys.path.append(r"D:\DetectTracking")
|
||||
from tracking.utils.read_data import extract_data, read_tracking_output_realtime
|
||||
|
||||
|
||||
def get_multitraj_file(spath, pattern):
|
||||
multi_traj_events = []
|
||||
n = 0
|
||||
for evtname in os.listdir(spath):
|
||||
name, ext = os.path.splitext(evtname)
|
||||
eventpath = os.path.join(spath, evtname)
|
||||
|
||||
evt = name.split('_')
|
||||
condt = len(evt)>=2 and evt[-1].isdigit() and len(evt[-1])>=8
|
||||
if not condt: continue
|
||||
if not os.path.isdir(eventpath): continue
|
||||
|
||||
trackingboxes = []
|
||||
for dataname in os.listdir(eventpath):
|
||||
if os.path.splitext(dataname)[-1] in [".jpg", ".png"]:
|
||||
continue
|
||||
|
||||
datapath = os.path.join(eventpath, dataname)
|
||||
if not os.path.isfile(datapath): continue
|
||||
CamerType = dataname.split('_')[0]
|
||||
|
||||
if pattern=="realtime" and dataname.find("_tracking_output.data")>0:
|
||||
trackingboxes, trackingfeats, tracking_outboxes, tracking_outfeats = read_tracking_output_realtime(datapath)
|
||||
if pattern=="evtsplit" and dataname.find("_track.data")>0:
|
||||
bboxes, ffeats, trackerboxes, trackerfeats, trackingboxes, trackingfeats = extract_data(datapath)
|
||||
|
||||
if len(trackingboxes)>=2:
|
||||
multi_traj_events.append(evtname)
|
||||
n += 1
|
||||
print(f"{n}: {evtname}")
|
||||
break
|
||||
|
||||
|
||||
multi_traj_file = os.path.join(spath, "multi_traj_file.txt")
|
||||
with open(multi_traj_file, "w") as file:
|
||||
for item in multi_traj_events:
|
||||
file.write(item + "\n")
|
||||
|
||||
def main():
|
||||
spaths = [r"\\192.168.1.28\share\测试视频数据以及日志\各模块测试记录\比对测试\1212",
|
||||
r"\\192.168.1.28\share\测试视频数据以及日志\各模块测试记录\比对测试\1216",
|
||||
r"\\192.168.1.28\share\测试视频数据以及日志\各模块测试记录\比对测试\1218",
|
||||
r"\\192.168.1.28\share\测试视频数据以及日志\各模块测试记录\比对测试\202412"
|
||||
]
|
||||
|
||||
pattern = "evtsplit" # realtime # 全实时版、事件切分版数据读取方式
|
||||
for spath in spaths:
|
||||
get_multitraj_file(spath, pattern)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,41 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Created on Fri Mar 28 11:35:28 2025
|
||||
|
||||
@author: ym
|
||||
"""
|
||||
|
||||
from pipeline import execute_pipeline
|
||||
|
||||
|
||||
def execute(datapath, savepath_v5, savepath_v10):
|
||||
execute_pipeline(evtdir = datapath,
|
||||
DataType = "raw", # raw, pkl
|
||||
kk=None,
|
||||
source_type = "video", # video, image,
|
||||
save_path = savepath_v5,
|
||||
yolo_ver = "V5", # V10, V5
|
||||
weight_yolo_v5 = r'./ckpts/best_cls10_0906.pt' ,
|
||||
weight_yolo_v10 = r'./ckpts/best_v10s_width0375_1205.pt',
|
||||
saveimages = False
|
||||
)
|
||||
execute_pipeline(evtdir = datapath,
|
||||
DataType = "raw", # raw, pkl
|
||||
kk=None,
|
||||
source_type = "video", # video, image,
|
||||
save_path = savepath_v10,
|
||||
yolo_ver = "V10", # V10, V5
|
||||
weight_yolo_v5 = r'./ckpts/best_cls10_0906.pt' ,
|
||||
weight_yolo_v10 = r'./ckpts/best_v10s_width0375_1205.pt',
|
||||
saveimages = False
|
||||
)
|
||||
|
||||
datapath = r'/home/wqg/dataset/test_dataset/base_dataset/single_event/source/'
|
||||
savepath_v5 = r'/home/wqg/dataset/pipeline/contrast/single_event_V5'
|
||||
savepath_v10 = r'/home/wqg/dataset/pipeline/contrast/single_event_V10'
|
||||
execute(datapath, savepath_v5, savepath_v10)
|
||||
|
||||
datapath = r'/home/wqg/dataset/test_performence_dataset/'
|
||||
savepath_v5 = r'/home/wqg/dataset/pipeline/contrast/performence_V5'
|
||||
savepath_v10 = r'/home/wqg/dataset/pipeline/contrast/performence_V10'
|
||||
execute(datapath, savepath_v5, savepath_v10)
|
BIN
images/20250123160635.jpg
Normal file
After Width: | Height: | Size: 55 KiB |
BIN
images/34414.png
Normal file
After Width: | Height: | Size: 46 KiB |
BIN
images/6917935002150.png
Normal file
After Width: | Height: | Size: 58 KiB |
BIN
images/6917935002150_std.png
Normal file
After Width: | Height: | Size: 416 KiB |
BIN
images/6920584471215.png
Normal file
After Width: | Height: | Size: 35 KiB |
BIN
images/6925819700245.jpg
Normal file
After Width: | Height: | Size: 190 KiB |
BIN
images/6931941252224.png
Normal file
After Width: | Height: | Size: 126 KiB |
BIN
images/6934129300472.png
Normal file
After Width: | Height: | Size: 69 KiB |
BIN
images/6942070231936.jpg
Normal file
After Width: | Height: | Size: 204 KiB |
BIN
images/carton_tw_asw_竹炭深潔_770.png
Normal file
After Width: | Height: | Size: 181 KiB |
BIN
images/image1.png
Normal file
After Width: | Height: | Size: 434 KiB |
BIN
images/output.png
Normal file
After Width: | Height: | Size: 103 KiB |
BIN
images/pair1/20250211100406.jpg
Normal file
After Width: | Height: | Size: 45 KiB |
BIN
images/pair1/6924743915886.jpg
Normal file
After Width: | Height: | Size: 321 KiB |
BIN
images/pair2/6903244682954.jpg
Normal file
After Width: | Height: | Size: 47 KiB |
127
imgs_to_video.py
@ -1,127 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Created on Tue Jan 30 19:15:05 2024
|
||||
|
||||
@author: ym
|
||||
"""
|
||||
import cv2
|
||||
import os
|
||||
import glob
|
||||
IMG_FORMATS = "bmp", "dng", "jpeg", "jpg", "mpo", "png", "tif", "tiff", "webp", "pfm" # include image suffixes
|
||||
VID_FORMATS = "asf", "avi", "gif", "m4v", "mkv", "mov", "mp4", "mpeg", "mpg", "ts", "wmv" # include video suffixes
|
||||
|
||||
|
||||
def for_test():
|
||||
save_path = video_path + img_path
|
||||
|
||||
fps, w, h = 10, 1024, 1280
|
||||
cap = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
|
||||
|
||||
pathx = path + img_path
|
||||
imgfiles = [f for f in os.listdir(pathx) if not f.find("_cut") != -1]
|
||||
|
||||
imgfiles.sort(key = lambda x: int(x[:-5]))
|
||||
imgpaths = []
|
||||
for imgfile in imgfiles:
|
||||
imgpaths.append(os.path.join(pathx, imgfile))
|
||||
|
||||
center = (1280/2, 1024/2)
|
||||
rotate_matrix = cv2.getRotationMatrix2D(center=center, angle=-90, scale=1)
|
||||
k = 0
|
||||
for ipath in imgpaths:
|
||||
img = cv2.imread(ipath)
|
||||
rotated_image = cv2.warpAffine(src=img, M=rotate_matrix, dsize=(w, h))
|
||||
cap.write(rotated_image)
|
||||
print("Have imgs")
|
||||
|
||||
def test_1():
|
||||
|
||||
# name = os.path.split(img_path)[-1]
|
||||
# save_path = video_path + name + '.mp4'
|
||||
|
||||
save_path = video_path + img_path
|
||||
|
||||
|
||||
|
||||
fps, w, h = 10, 1024, 1280
|
||||
cap = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
|
||||
|
||||
pathx = path + img_path
|
||||
imgfiles = [f for f in os.listdir(pathx) if not f.find("_cut") != -1]
|
||||
|
||||
imgfiles.sort(key = lambda x: int(x[:-5]))
|
||||
imgpaths = []
|
||||
for imgfile in imgfiles:
|
||||
imgpaths.append(os.path.join(pathx, imgfile))
|
||||
|
||||
|
||||
|
||||
|
||||
# ipaths = [os.path.join(pathx, f) for f in os.listdir(pathx) if not f.find("_cut") != -1]
|
||||
# ipaths = []
|
||||
# for f in os.listdir(pathx):
|
||||
# if not f.find('_cut'):
|
||||
# ipaths.append(os.path.join(pathx, f))
|
||||
# ipaths.sort(key = lambda x: int(x.split('_')[-2]))
|
||||
|
||||
|
||||
k = 0
|
||||
for ipath in imgpaths:
|
||||
img = cv2.imread(ipath)
|
||||
cap.write(img)
|
||||
|
||||
|
||||
k += 1
|
||||
|
||||
cap.release()
|
||||
|
||||
print(img_path + f" have imgs: {k}")
|
||||
|
||||
def img2video(imgpath):
|
||||
if not os.path.isdir(imgpath):
|
||||
return
|
||||
|
||||
files = []
|
||||
files.extend(sorted(glob.glob(os.path.join(imgpath, "*.*"))))
|
||||
images = [x for x in files if x.split(".")[-1].lower() in IMG_FORMATS]
|
||||
|
||||
h, w = cv2.imread(images[0]).shape[:2]
|
||||
fps = 25
|
||||
|
||||
vidpath = imgpath + '.mp4'
|
||||
cap = cv2.VideoWriter(vidpath, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
|
||||
for p in images:
|
||||
img = cv2.imread(p)
|
||||
cap.write(img)
|
||||
cap.release()
|
||||
|
||||
|
||||
def main():
|
||||
imgpath = r"D:\work\result\202503251112_v10s_result"
|
||||
|
||||
img2video(imgpath)
|
||||
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
16326
minicpm.ipynb
Normal file
25
minicpm.py
Normal file
@ -0,0 +1,25 @@
|
||||
# Load model directly
|
||||
from transformers import AutoModel, AutoTokenizer
|
||||
import torch
|
||||
from PIL import Image
|
||||
|
||||
model = AutoModel.from_pretrained(
|
||||
"openbmb/MiniCPM-o-2_6",
|
||||
trust_remote_code=True,
|
||||
attn_implementation='flash_attention_2',
|
||||
torch_dtype=torch.bfloat16,
|
||||
# device_map="auto"
|
||||
)
|
||||
model = model.eval().cuda()
|
||||
tokenizer = AutoTokenizer.from_pretrained('openbmb/MiniCPM-o-2_6', use_fast=True, trust_remote_code=True)
|
||||
|
||||
img1 = Image.open('/home/ieemoo0337/projects/datasets/constrast_pair/8850813311020/8850813311020.jpg')
|
||||
img2 = Image.open('/home/ieemoo0337/projects/datasets/constrast_pair/8850511321499/8850511321499.jpg')
|
||||
|
||||
question = '描述第一张图像的1。'
|
||||
msgs = [{'role': 'user', 'content': [img1, img2, question]}]
|
||||
answer = model.chat(
|
||||
msgs=msgs,
|
||||
tokenizer=tokenizer
|
||||
)
|
||||
print(answer)
|
@ -76,11 +76,7 @@ def attempt_load(weights, device=None, inplace=True, fuse=True):
|
||||
|
||||
model = Ensemble()
|
||||
for w in weights if isinstance(weights, list) else [weights]:
|
||||
if torch.__version__ >= '2.6':
|
||||
ckpt = torch.load(attempt_download(w), map_location=device, weights_only=False) # load
|
||||
else:
|
||||
ckpt = torch.load(attempt_download(w), map_location=device)
|
||||
|
||||
ckpt = torch.load(attempt_download(w), map_location=device) # load
|
||||
ckpt = (ckpt.get('ema') or ckpt['model']).to(device).float() # FP32 model
|
||||
|
||||
# Model compatibility updates
|
||||
|