update
This commit is contained in:
BIN
tools/.usearch
Normal file
BIN
tools/.usearch
Normal file
Binary file not shown.
175
tools/Interface.py
Normal file
175
tools/Interface.py
Normal file
@ -0,0 +1,175 @@
|
||||
import abc
|
||||
# import os
|
||||
# import pdb
|
||||
# import pickle
|
||||
import sys
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
from tools.config import gvalue
|
||||
|
||||
sys.path.append('./ytracking')
|
||||
sys.path.append('./contrast')
|
||||
# from ytracking.tracking.dotrack import init_tracker, VideoTracks, boxes_add_fid
|
||||
from ytracking.tracking.have_tracking import have_tracked
|
||||
from ytracking.track_ import *
|
||||
from contrast.logic import datacollection, similarityResult, similarity
|
||||
from PIL import Image
|
||||
from tools.config import gvalue
|
||||
|
||||
class AiInterface(metaclass=abc.ABCMeta):
|
||||
@abc.abstractmethod
|
||||
def getTrackingBox(self):
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def getSimilarity(self):
|
||||
pass
|
||||
|
||||
|
||||
class AiClass(AiInterface):
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
def get_xyxy_coordinates(self, box, frame_id_img):
|
||||
"""
|
||||
计算并返回边界框的坐标。
|
||||
"""
|
||||
try:
|
||||
x1 = max(0, int(box[0]))
|
||||
x2 = min(frame_id_img.shape[1], int(box[2]))
|
||||
y1 = max(0, int(box[1]))
|
||||
y2 = min(frame_id_img.shape[0], int(box[3]))
|
||||
return x1, y1, x2, y2
|
||||
except IndexError as e:
|
||||
raise ValueError("边界框坐标超出图像尺寸") from e
|
||||
|
||||
def getTrackingBox(self, bboxes, features_dict, camera_id, frame_id_img, save_imgs_dir):
|
||||
"""
|
||||
根据提供的边界框和帧图像,返回图像列表和轨迹ID列表。
|
||||
"""
|
||||
|
||||
image_lists = {}
|
||||
track_id_list = []
|
||||
|
||||
gt = Profile()
|
||||
with gt:
|
||||
vts = have_tracked(bboxes, features_dict, camera_id)
|
||||
|
||||
nn = 0
|
||||
for res in vts.Residual:
|
||||
for box in res.boxes:
|
||||
try:
|
||||
box = [int(i) for i in box.tolist()]
|
||||
print('box[7] >>>> {}'.format(box[7]))
|
||||
x1, y1, x2, y2 = self.get_xyxy_coordinates(box, frame_id_img[box[7]])
|
||||
gvalue.track_y_lists.append(y1)
|
||||
c_img = frame_id_img[box[7]][y1:y2, x1:x2][:, :, ::-1]
|
||||
|
||||
# c_img = frame_id_img[box[7]][box[1]:box[3], box[0]:box[2]][:, :, ::-1]
|
||||
img_pil = Image.fromarray(c_img.astype('uint8'), 'RGB')
|
||||
|
||||
img_pil.save(os.sep.join([save_imgs_dir, str(nn) + '.jpg']))
|
||||
nn += 1
|
||||
|
||||
track_id = str(box[4])
|
||||
track_id_list.append(track_id)
|
||||
if track_id not in image_lists:
|
||||
image_lists[track_id] = []
|
||||
image_lists[track_id].append(img_pil)
|
||||
except Exception as e:
|
||||
print("y1: {}, y2: {}, x1:{} x2:{}".format(box[2], box[3], box[0], box[1]))
|
||||
print("x:{}, y:{}".format(frame_id_img[box[7]].shape[1], frame_id_img[box[7]].shape[0]))
|
||||
print(f"处理边界框时发生错误: {e}")
|
||||
continue
|
||||
|
||||
all_image_list = list(image_lists.values())
|
||||
trackIdList = list(set(track_id_list))
|
||||
|
||||
return all_image_list, trackIdList
|
||||
|
||||
@staticmethod
|
||||
def process_topn_data(source_data):
|
||||
if source_data is None:
|
||||
return None
|
||||
|
||||
if not isinstance(source_data, dict):
|
||||
raise ValueError("输入数据必须是字典类型")
|
||||
|
||||
if not source_data:
|
||||
return {}
|
||||
|
||||
total = {}
|
||||
carId_barcode_trackId_list = []
|
||||
data_category = []
|
||||
|
||||
for category, category_data in source_data.items():
|
||||
carId_barcode_trackId_list.append(category)
|
||||
for car_id, similarity in category_data.items():
|
||||
data_category.append({'carId_barcode_trackId_n': car_id, 'similarity': similarity})
|
||||
|
||||
total['carId_barcode_trackId'] = carId_barcode_trackId_list
|
||||
total['data'] = data_category
|
||||
|
||||
return total
|
||||
|
||||
@staticmethod
|
||||
def process_top10_data(source_data):
|
||||
if source_data is None:
|
||||
return None
|
||||
|
||||
if not isinstance(source_data, dict):
|
||||
raise ValueError("输入数据必须是字典类型")
|
||||
|
||||
if not source_data:
|
||||
return {}
|
||||
|
||||
total = {}
|
||||
data_category = []
|
||||
|
||||
for category, category_data in source_data.items():
|
||||
trackid = category.split('_')[-1]
|
||||
barcode = category.split('_')[-2]
|
||||
for car_id, similarity in category_data.items():
|
||||
data_category.append({'barcode': car_id, 'similarity': similarity, 'trackid': trackid})
|
||||
|
||||
total['barcode'] = barcode
|
||||
total['data'] = data_category
|
||||
return total
|
||||
|
||||
def getSimilarity(self, model, queueImgs):
|
||||
data_collection = datacollection()
|
||||
similarityRes = similarityResult()
|
||||
data_collection.barcode_flag = queueImgs['barcode_flag']
|
||||
data_collection.add_flag = queueImgs['add_flag']
|
||||
data_collection.barcode_list = queueImgs['barcode_list'].strip("'").split(',')
|
||||
data_collection.queImgsDict = queueImgs
|
||||
|
||||
similarityRes = similarity().getSimilarity(model, data_collection, similarityRes)
|
||||
# print('similarityRes.top10: ------------------ {}'.format(similarityRes.top10))
|
||||
if similarityRes.top1:
|
||||
similarityRes.top1 = {"barcode": list(similarityRes.top1.keys())[0],
|
||||
"similarity": list(similarityRes.top1.values())[0]}
|
||||
# similarityRes.tempLibList = gvalue.tempLibList
|
||||
# print('-------------------------', gvalue.tempLibLists)
|
||||
if gvalue.tempLibLists.get(gvalue.mac_id) is not None:
|
||||
similarityRes.tempLibList = gvalue.tempLibLists[gvalue.mac_id]
|
||||
else:
|
||||
similarityRes.tempLibList = []
|
||||
similarityresult = {
|
||||
'top10': AiClass.process_top10_data(similarityRes.top10),
|
||||
'top1': similarityRes.top1,
|
||||
'topn': AiClass.process_topn_data(similarityRes.topn),
|
||||
'tempLibList': similarityRes.tempLibList,
|
||||
'sequenceId': queueImgs['sequenceId'],
|
||||
}
|
||||
return similarityresult
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
AI = AiClass()
|
||||
|
||||
# track_boxes, frame_id_img = run()
|
||||
# AI.getTrackingBox(track_boxes, frame_id_img)
|
||||
# print('=== test ===')
|
||||
# AI.getSimilarity(cfg.queueImgs)
|
BIN
tools/Template_images/cartboarder.png
Normal file
BIN
tools/Template_images/cartboarder.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 13 KiB |
BIN
tools/Template_images/cartedge.png
Normal file
BIN
tools/Template_images/cartedge.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 11 KiB |
BIN
tools/Template_images/edgeline.png
Normal file
BIN
tools/Template_images/edgeline.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 7.2 KiB |
BIN
tools/Template_images/incart.png
Normal file
BIN
tools/Template_images/incart.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 9.6 KiB |
BIN
tools/Template_images/incart_ftmp.png
Normal file
BIN
tools/Template_images/incart_ftmp.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 4.0 KiB |
BIN
tools/Template_images/outcart.png
Normal file
BIN
tools/Template_images/outcart.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 9.6 KiB |
0
tools/__init__.py
Normal file
0
tools/__init__.py
Normal file
BIN
tools/__pycache__/Interface.cpython-38.pyc
Normal file
BIN
tools/__pycache__/Interface.cpython-38.pyc
Normal file
Binary file not shown.
BIN
tools/__pycache__/__init__.cpython-38.pyc
Normal file
BIN
tools/__pycache__/__init__.cpython-38.pyc
Normal file
Binary file not shown.
BIN
tools/__pycache__/config.cpython-38.pyc
Normal file
BIN
tools/__pycache__/config.cpython-38.pyc
Normal file
Binary file not shown.
BIN
tools/__pycache__/getResult.cpython-38.pyc
Normal file
BIN
tools/__pycache__/getResult.cpython-38.pyc
Normal file
Binary file not shown.
BIN
tools/__pycache__/getbox.cpython-38.pyc
Normal file
BIN
tools/__pycache__/getbox.cpython-38.pyc
Normal file
Binary file not shown.
BIN
tools/__pycache__/initModel.cpython-38.pyc
Normal file
BIN
tools/__pycache__/initModel.cpython-38.pyc
Normal file
Binary file not shown.
BIN
tools/__pycache__/operate_usearch.cpython-38.pyc
Normal file
BIN
tools/__pycache__/operate_usearch.cpython-38.pyc
Normal file
Binary file not shown.
BIN
tools/__pycache__/uploadvideos.cpython-38.pyc
Normal file
BIN
tools/__pycache__/uploadvideos.cpython-38.pyc
Normal file
Binary file not shown.
93
tools/config.py
Normal file
93
tools/config.py
Normal file
@ -0,0 +1,93 @@
|
||||
|
||||
# from yacs.config import CfgNode as CfgNode
|
||||
import torchvision.transforms as T
|
||||
import torch
|
||||
import os
|
||||
|
||||
class globalVal:
|
||||
tempLibList = []
|
||||
tempLibLists = {}
|
||||
track_y_lists = []
|
||||
mac_id = None
|
||||
back_return_similarity = []
|
||||
back_add_similarity = []
|
||||
front_return_similarity = []
|
||||
front_add_similarity = []
|
||||
comprehensive_similarity = []
|
||||
|
||||
class config:
|
||||
save_videos_dir = 'videos'
|
||||
|
||||
#url
|
||||
# push_url = 'http://api.test2.ieemoo.cn/emoo-api/intelligence/addVideoPathBySequenceId.do'
|
||||
push_url = 'https://api.test2.ieemoo.cn/emoo-api/intelligence/addVideoPathBySequenceId.do' # 闲时上传
|
||||
get_config_url = 'https://api.test2.ieemoo.cn/emoo-api/intelligence/addVideoPathByStoreId.do' # 闲时上传相应配置
|
||||
storidPth = 'tools/storeId.txt'
|
||||
|
||||
#obs update
|
||||
obs_access_key_id = 'LHXJC7GIC2NNUUHHTNVI'
|
||||
obs_secret_access_key = 'sVWvEItrFKWPp5DxeMvX8jLFU69iXPpzkjuMX3iM'
|
||||
obs_server = 'https://obs.cn-east-3.myhuaweicloud.com'
|
||||
obs_bucketName = 'ieemoo-ai'
|
||||
|
||||
keys = ['x', 'y', 'w', 'h', 'track_id', 'score', 'cls', 'frame_index']
|
||||
|
||||
obs_root_dir = 'ieemoo_ai_data'
|
||||
|
||||
#contrast config
|
||||
host = "192.168.1.28"
|
||||
port = "19530"
|
||||
embedding_size = 256
|
||||
img_size = 224
|
||||
test_transform = T.Compose([
|
||||
T.ToTensor(),
|
||||
T.Resize((224, 224)),
|
||||
T.ConvertImageDtype(torch.float32),
|
||||
T.Normalize(mean=[0.5], std=[0.5]),
|
||||
])
|
||||
|
||||
# test_model = "./tools/ckpts/MobilenetV3Large_noParallel_2624.pth"
|
||||
test_model = "./tools/ckpts/resnet18_0721_best.pth"
|
||||
tracking_model = "./tools/ckpts/best_158734_cls11_noaug10.pt"
|
||||
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
||||
|
||||
httpHost = '0.0.0.0'
|
||||
httpPort = 8088
|
||||
|
||||
#tracking config
|
||||
botsort = './ytracking/tracking/trackers/cfg/botsort.yaml'
|
||||
incart = './tools/Template_images/incart.png'
|
||||
outcart = './tools/Template_images/outcart.png'
|
||||
cartboarder = './tools/Template_images/cartboarder.png'
|
||||
edgeline = './tools/Template_images/edgeline.png'
|
||||
cartedge = './tools/Template_images/cartedge.png'
|
||||
incart_ftmp = './tools/Template_images/incart_ftmp.png'
|
||||
|
||||
|
||||
action_type = {
|
||||
"1": 'purchase',
|
||||
'2': 'jettison',
|
||||
'3': 'unswept_purchase',
|
||||
'4': 'unswept_jettison'
|
||||
}
|
||||
camera_id = {
|
||||
'0': 'back',
|
||||
'1': 'front',
|
||||
}
|
||||
recognize_result = {
|
||||
'01': 'uncatalogued',
|
||||
'02': 'fail',
|
||||
'03': 'exception',
|
||||
'04': 'pass',
|
||||
}
|
||||
|
||||
# reid config
|
||||
backbone = 'resnet18' # [resnet18, mobilevit_s, mobilenet_v2, mobilenetv3]
|
||||
batch_size = 8
|
||||
|
||||
model_path = './tools/ckpts/best_resnet18_0515.pth'
|
||||
|
||||
temp_video_name = None
|
||||
|
||||
cfg = config()
|
||||
gvalue = globalVal()
|
78
tools/getResult.py
Normal file
78
tools/getResult.py
Normal file
@ -0,0 +1,78 @@
|
||||
from tools.Interface import AiInterface, AiClass
|
||||
# from Interface import AiInterface, AiClass
|
||||
from config import cfg
|
||||
import numpy as np
|
||||
from PIL import Image
|
||||
import pdb
|
||||
from track_ import *
|
||||
import time
|
||||
|
||||
'''
|
||||
跟踪与对比输入
|
||||
'''
|
||||
ai_obj = AiClass()
|
||||
|
||||
|
||||
def deal_similarity_data(message):
|
||||
# print('message --- > {}'.format(message))
|
||||
car_mac = message['videoIds'].split('_')[-3]
|
||||
similar = {'add_flag': True if message['action'] in {'1', '3'} else False,
|
||||
'barcode_flag': True if not message['barcode'] == 'null' else False}
|
||||
if similar['add_flag'] and similar['barcode_flag']: # 加购有barcode
|
||||
for Id, image_list in zip(message['trackIdList'],
|
||||
message['images']):
|
||||
similar[car_mac + '_' + message['barcode'] + '_' + Id] = image_list
|
||||
similar['barcode_list'] = message['barcodeList']
|
||||
elif similar['add_flag'] and (not similar['barcode_flag']): # 加购无barcode
|
||||
for Id, image_list in zip(message['trackIdList'],
|
||||
message['images']):
|
||||
similar[car_mac + '_' + Id] = image_list
|
||||
similar['barcode_list'] = message['barcodeList']
|
||||
elif (not similar['add_flag']) and similar['barcode_flag']: # 退购有barcode
|
||||
for Id, image_list in zip(message['trackIdList'],
|
||||
message['images']):
|
||||
similar[car_mac + '_' + message['barcode'] + '_' + Id] = image_list
|
||||
similar['barcode_list'] = message['barcodeList']
|
||||
else: # 退购无barcode
|
||||
for Id, image_list in zip(message['trackIdList'],
|
||||
message['images']):
|
||||
similar[car_mac + '_' + Id] = image_list
|
||||
similar['barcode_list'] = message['barcodeList']
|
||||
similar['sequenceId'] = message['sequenceId']
|
||||
return similar
|
||||
|
||||
|
||||
def get_similarity_result(obj: AiInterface, videopth, model, camera_id, message):
|
||||
"""
|
||||
获取相似度结果。
|
||||
|
||||
:param videopth:
|
||||
:param obj: AiInterface 对象,用于获取跟踪框和相似度数据。
|
||||
:param message: dict, 可选参数,包含跟踪数据和相似度处理结果。
|
||||
:return: 相似度结果。
|
||||
"""
|
||||
try:
|
||||
track_boxes, features_dict, frame_id_img = run(model, source=videopth)
|
||||
allimages, trackIdList = obj.getTrackingBox(track_boxes, features_dict, camera_id, frame_id_img)
|
||||
message['trackIdList'] = trackIdList
|
||||
message['images'] = allimages
|
||||
message = deal_similarity_data(message)
|
||||
similarityRes = obj.getSimilarity(model, message)
|
||||
except ValueError as ve:
|
||||
print('ve >>>> ', ve)
|
||||
similarityRes = {'top10': {},
|
||||
'top1': {},
|
||||
'topn': {},
|
||||
'tempLibList': [],
|
||||
'sequenceId': message['sequenceId']
|
||||
}
|
||||
return similarityRes
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
message = {
|
||||
'action': '1',
|
||||
'barcode': '084501446314',
|
||||
'sequenceId': 'test'
|
||||
}
|
||||
get_similarity_result(ai_obj, message)
|
153
tools/getbox.py
Normal file
153
tools/getbox.py
Normal file
@ -0,0 +1,153 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Created on Tue May 21 15:25:23 2024
|
||||
|
||||
@author: ieemoo-zl003
|
||||
"""
|
||||
|
||||
import os
|
||||
import numpy as np
|
||||
|
||||
# 替换为你的目录路径
|
||||
files_path = 'D:/Project/ieemoo/kmeans/comparisonData/deletedBarcode_10_0709_am/err_pair/6902088146356/20240709-105804_6902088146356'
|
||||
|
||||
|
||||
def str_to_float_arr(s):
|
||||
# 移除字符串末尾的逗号(如果存在)
|
||||
if s.endswith(','):
|
||||
s = s[:-1]
|
||||
|
||||
# 使用split()方法分割字符串,然后将每个元素转化为float
|
||||
float_array = [float(x) for x in s.split(",")]
|
||||
return float_array
|
||||
|
||||
|
||||
def extract_tracker_input_boxes_feats(file_name):
|
||||
framesId = []
|
||||
boxes = []
|
||||
feats = []
|
||||
|
||||
|
||||
frame_id = 0
|
||||
with open(file_name, 'r', encoding='utf-8') as file:
|
||||
for line in file:
|
||||
line = line.strip() # 去除行尾的换行符和可能的空白字符
|
||||
|
||||
# 跳过空行
|
||||
if not line:
|
||||
continue
|
||||
if line.find("frameId") >= 0:
|
||||
frame_id = line[line.find("frameId:") + 8:].strip()
|
||||
|
||||
# 检查是否以'box:'或'feat:'开始
|
||||
if line.find("box:") >= 0 and line.find("output_box:") < 0:
|
||||
boxes.append(line[line.find("box:") + 4:].strip()) # 去掉'box:'并去除可能的空白字符
|
||||
framesId.append(frame_id)
|
||||
|
||||
if line.find("feat:") >= 0:
|
||||
feats.append(line[line.find("feat:") + 5:].strip()) # 去掉'box:'并去除可能的空白字符
|
||||
|
||||
return boxes, feats, framesId
|
||||
|
||||
|
||||
def find_string_in_array(arr, target):
|
||||
"""
|
||||
在字符串数组中找到目标字符串对应的行(索引)。
|
||||
|
||||
参数:
|
||||
arr -- 字符串数组
|
||||
target -- 要查找的目标字符串
|
||||
|
||||
返回:
|
||||
目标字符串在数组中的索引。如果未找到,则返回-1。
|
||||
"""
|
||||
parts = target.split(',')
|
||||
box_substrings = ','.join(parts[:4])
|
||||
conf_substring = ','.join(parts[5:6])
|
||||
for i, s in enumerate(arr):
|
||||
if s.find(box_substrings) >= 0 and s.find(conf_substring[:7]) >= 0:
|
||||
return i
|
||||
return -1
|
||||
|
||||
|
||||
def extract_tracker_output_boxes_feats(read_file_name):
|
||||
input_boxes, input_feats, framesId = extract_tracker_input_boxes_feats(read_file_name)
|
||||
|
||||
boxes = []
|
||||
feats = []
|
||||
with open(read_file_name, 'r', encoding='utf-8') as file:
|
||||
for line in file:
|
||||
line = line.strip() # 去除行尾的换行符和可能的空白字符
|
||||
|
||||
# 跳过空行
|
||||
if not line:
|
||||
continue
|
||||
|
||||
# 检查是否以'output_box:'开始
|
||||
if line.find("output_box:") >= 0:
|
||||
boxes_str = line[line.find("output_box:") + 11:].strip()
|
||||
boxes.append(boxes_str) # 去掉'output_box:'并去除可能的空白字符
|
||||
index = find_string_in_array(input_boxes, boxes_str)
|
||||
feat_f = str_to_float_arr(input_feats[index])
|
||||
norm_f = np.linalg.norm(feat_f)
|
||||
feat_f = feat_f / norm_f
|
||||
feats.append(feat_f)
|
||||
return input_boxes, input_feats, boxes, feats, framesId
|
||||
|
||||
|
||||
def extract_tracking_output_boxes_feats(read_file_name):
|
||||
tracker_boxes, tracker_feats, input_boxes, input_feats, framesId = extract_tracker_output_boxes_feats(
|
||||
read_file_name)
|
||||
boxes = []
|
||||
feats = []
|
||||
boxes_frames_id = []
|
||||
tracking_flag = False
|
||||
tracking_num_cnt = 0
|
||||
with open(read_file_name, 'r', encoding='utf-8') as file:
|
||||
for line in file:
|
||||
line = line.strip() # 去除行尾的换行符和可能的空白字符
|
||||
|
||||
# 跳过空行
|
||||
if not line:
|
||||
continue
|
||||
|
||||
if tracking_flag:
|
||||
if line.find("tracking_") >= 0:
|
||||
tracking_flag = False
|
||||
tracking_num_cnt = tracking_num_cnt + 1
|
||||
else:
|
||||
boxes.append(line)
|
||||
index = find_string_in_array(input_boxes, line)
|
||||
feats.append(input_feats[index])
|
||||
|
||||
if tracking_num_cnt == 0:
|
||||
index = find_string_in_array(tracker_boxes, line)
|
||||
boxes_frames_id.append(framesId[index])
|
||||
# 检查是否以tracking_'开始
|
||||
if line.find("tracking_") >= 0:
|
||||
tracking_flag = True
|
||||
|
||||
return tracker_boxes, tracker_feats, input_boxes, input_feats, boxes, feats, boxes_frames_id
|
||||
|
||||
def find_index_feats(files_path):
|
||||
# 遍历目录下的所有文件和目录
|
||||
all_boxes, boboxes_frames_ids, Boxes, framesIds =[],[],[],[]
|
||||
for filename in os.listdir(files_path):
|
||||
# 构造完整的文件路径
|
||||
file_path = os.path.join(files_path, filename)
|
||||
# 判断是否是文件
|
||||
if os.path.isfile(file_path):
|
||||
# 打开文件
|
||||
if filename.endswith('data') and (not 'tracking' in filename):
|
||||
tracker_boxes, tracker_feats, input_boxes, input_feats, boxes, feats, boxes_frames_id = extract_tracking_output_boxes_feats(file_path)
|
||||
box, feats, framesId = extract_tracker_input_boxes_feats(file_path)
|
||||
Boxes += box
|
||||
framesIds += framesId
|
||||
all_boxes += boxes[:len(boxes_frames_id)]
|
||||
boboxes_frames_ids += boxes_frames_id
|
||||
# print(all_boxes)
|
||||
# print(boboxes_frames_ids)
|
||||
return all_boxes, boboxes_frames_ids, tracker_boxes, Boxes, framesIds
|
||||
|
||||
if __name__ == '__main__':
|
||||
find_index_feats(files_path)
|
45
tools/initModel.py
Normal file
45
tools/initModel.py
Normal file
@ -0,0 +1,45 @@
|
||||
import torch
|
||||
from ytracking.models.common import DetectMultiBackend
|
||||
from ytracking.utils.torch_utils import select_device
|
||||
from tools.config import cfg
|
||||
from contrast.model.resnet_pre import resnet18
|
||||
from ytracking.tracking.utils import Boxes, IterableSimpleNamespace, yaml_load
|
||||
from ytracking.tracking.trackers import BOTSORT, BYTETracker
|
||||
# import mediapipe as mp
|
||||
# from pymilvus import (
|
||||
# connections,
|
||||
# utility,
|
||||
# FieldSchema, CollectionSchema, DataType,
|
||||
# Collection,
|
||||
# Milvus
|
||||
# )
|
||||
|
||||
class Models:
|
||||
def __init__(self):
|
||||
self.yoloModel = None
|
||||
self.reidModel = None
|
||||
self.similarityModel = None
|
||||
self.Milvus = None
|
||||
self.device = 'cpu'
|
||||
|
||||
def initSimilarityModel(self):
|
||||
# model = MobileNetV3_Large().to(cfg.device)
|
||||
model = resnet18().to(cfg.device)
|
||||
# model.load_state_dict(torch.load(cfg.test_model, map_location=cfg.device))
|
||||
model.load_state_dict(torch.load(cfg.model_path, map_location=cfg.device))
|
||||
model.eval()
|
||||
return model
|
||||
|
||||
def initYoloModel(self):
|
||||
device = select_device(self.device)
|
||||
model = DetectMultiBackend(cfg.tracking_model, device=device, dnn=False, fp16=False)
|
||||
return model
|
||||
|
||||
def initModel(self):
|
||||
self.yoloModel = self.initYoloModel()
|
||||
self.similarityModel = self.initSimilarityModel()
|
||||
|
||||
models = Models()
|
||||
|
||||
if __name__ == "__main__":
|
||||
Models().initModel()
|
153
tools/operate_usearch.py
Normal file
153
tools/operate_usearch.py
Normal file
@ -0,0 +1,153 @@
|
||||
import os
|
||||
import numpy as np
|
||||
from usearch.index import Index
|
||||
import json
|
||||
import statistics
|
||||
|
||||
|
||||
def create_index():
|
||||
index = Index(
|
||||
ndim=256,
|
||||
metric='cos',
|
||||
# dtype='f32',
|
||||
dtype='f16',
|
||||
connectivity=32,
|
||||
expansion_add=40,#128,
|
||||
expansion_search=10,#64,
|
||||
multi=True
|
||||
)
|
||||
return index
|
||||
|
||||
def compare_feature(features1, features2, model = '1'):
|
||||
"""
|
||||
:param model 比对策略
|
||||
'0':模拟一个轨迹的图像(所有的图像、或者挑选的若干图像)与标准库,先求每个图片与标准库的最大值,再求所有图片对应最大值的均值
|
||||
'1':带对比的所有相似度的均值
|
||||
'2':比对1:1的最大值
|
||||
:param feature1:
|
||||
:param feature2:
|
||||
:return:
|
||||
"""
|
||||
similarity_group, similarity_groups = [], []
|
||||
if model == '0':
|
||||
for feature1 in features1:
|
||||
for feature2 in features2[0]:
|
||||
similarity = np.dot(feature1, feature2) / (np.linalg.norm(feature1) * np.linalg.norm(feature2))
|
||||
similarity_group.append(similarity)
|
||||
similarity_groups.append(max(similarity_group))
|
||||
similarity_group = []
|
||||
return sum(similarity_groups)/len(similarity_groups)
|
||||
|
||||
elif model == '1':
|
||||
feature2 = features2[0]
|
||||
for feature1 in features1:
|
||||
for num in range(len(feature2)):
|
||||
similarity = np.dot(feature1, feature2[num]) / (np.linalg.norm(feature1) * np.linalg.norm(feature2[num]))
|
||||
similarity_group.append(similarity)
|
||||
similarity_groups.append(sum(similarity_group) / len(similarity_group))
|
||||
similarity_group = []
|
||||
# return sum(similarity_groups)/len(similarity_groups), max(similarity_groups)
|
||||
if len(similarity_groups) == 0:
|
||||
return -1
|
||||
return sum(similarity_groups)/len(similarity_groups)
|
||||
elif model == '2':
|
||||
feature2 = features2[0]
|
||||
for feature1 in features1:
|
||||
for num in range(len(feature2)):
|
||||
similarity = np.dot(feature1, feature2[num]) / (np.linalg.norm(feature1) * np.linalg.norm(feature2[num]))
|
||||
similarity_group.append(similarity)
|
||||
return max(similarity_group)
|
||||
|
||||
|
||||
|
||||
def get_barcode_feature(data):
|
||||
barcode = data['key']
|
||||
features = data['value']
|
||||
return [barcode] * len(features), features
|
||||
|
||||
|
||||
def analysis_file(file_path):
|
||||
"""
|
||||
:param file_path:
|
||||
:return:
|
||||
"""
|
||||
barcodes, features = [], []
|
||||
with open(file_path, 'r', encoding='utf-8') as f:
|
||||
data = json.load(f)
|
||||
for dic in data['total']:
|
||||
barcode, feature = get_barcode_feature(dic)
|
||||
barcodes.append(barcode)
|
||||
features.append(feature)
|
||||
return barcodes, features
|
||||
|
||||
|
||||
def create_base_index(index_file_pth=None,
|
||||
barcodes=None,
|
||||
features=None,
|
||||
save_index_name=None):
|
||||
index = create_index()
|
||||
if index_file_pth is not None:
|
||||
# save_index_name = index_file_pth.split('json')[0] + 'usearch'
|
||||
save_index_name = index_file_pth.split('json')[0] + 'data'
|
||||
barcodes, features = analysis_file(index_file_pth)
|
||||
else:
|
||||
assert barcodes is not None and features is not None, 'barcodes and features must be not None'
|
||||
for barcode, feature in zip(barcodes, features):
|
||||
index.add(np.array(barcode), np.array(feature))
|
||||
index.save(save_index_name)
|
||||
|
||||
def get_feature_index(index_file_pth=None,
|
||||
barcodes=None):
|
||||
assert index_file_pth is not None, 'index_file_pth must be not None'
|
||||
index = Index.restore(index_file_pth, view=True)
|
||||
feature_lists = index.get(np.array(barcodes))
|
||||
print("memory {} size {}".format(index.memory_usage, index.size))
|
||||
return feature_lists
|
||||
|
||||
def search_in_index(query=None,
|
||||
barcode=None, # barcode -> int or np.ndarray
|
||||
index_name=None,
|
||||
temp_index=False, # 是否为临时库
|
||||
model='0',
|
||||
):
|
||||
if temp_index:
|
||||
assert index_name is not None, 'index_name must be not None'
|
||||
index = Index.restore(index_name, view=True)
|
||||
if barcode is not None: # 1:1对比测试
|
||||
feature_lists = index.get(np.array(barcode))
|
||||
results = compare_feature(query, feature_lists)
|
||||
else:
|
||||
results = index.search(query, count=5)
|
||||
return results
|
||||
else: # 标准库
|
||||
assert index_name is not None, 'index_name must be not None'
|
||||
index = Index.restore(index_name, view=True)
|
||||
if barcode is not None: # 1:1对比测试
|
||||
feature_lists = index.get(np.array(barcode))
|
||||
results = compare_feature(query, feature_lists, model)
|
||||
else:
|
||||
results = index.search(query, count=10)
|
||||
return results
|
||||
|
||||
def delete_index(index_name=None, key=None, index=None):
|
||||
assert key is not None, 'key must be not None'
|
||||
if index is None:
|
||||
assert index_name is not None, 'index_name must be not None'
|
||||
index = Index.restore(index_name, view=True)
|
||||
index.remove(index_name)
|
||||
else:
|
||||
index.remove(key)
|
||||
|
||||
if __name__ == '__main__':
|
||||
# index_file_pth = '../search_library/data_0923.json'
|
||||
# create_base_index(index_file_pth)
|
||||
|
||||
# index_file_pth = '../search_library/test_index_10_normal_0717.usearch'
|
||||
# # index_file_pth = '../search_library/data_10_normal_0718.index'
|
||||
# search_in_index(query='693', index_name=index_file_pth, barcode='6934024590466')
|
||||
|
||||
# check index data file
|
||||
index_file_pth = '../search_library/data_0923.data'
|
||||
# # get_feature_index(index_file_pth, ['6901070602818'])
|
||||
get_feature_index(index_file_pth, ['6934230050105'])
|
||||
|
1
tools/storeId.txt
Normal file
1
tools/storeId.txt
Normal file
@ -0,0 +1 @@
|
||||
32011001
|
159
tools/uploadvideos.py
Normal file
159
tools/uploadvideos.py
Normal file
@ -0,0 +1,159 @@
|
||||
from obs import ObsClient
|
||||
import obs
|
||||
import sys
|
||||
import os
|
||||
from tools.config import cfg
|
||||
|
||||
import requests
|
||||
import argparse
|
||||
|
||||
class uploadVideos:
|
||||
def __init__(self):
|
||||
self.obsClient, self.headers = self.InitObsClient()
|
||||
|
||||
def InitObsClient(self):
|
||||
# 创建ObsClient实例
|
||||
obsClient = ObsClient(
|
||||
access_key_id=cfg.obs_access_key_id, # 配置访问密钥ID
|
||||
secret_access_key=cfg.obs_secret_access_key, # 配置访问密钥
|
||||
server=cfg.obs_server) # 配置服务器地址
|
||||
headers = obs.SetObjectMetadataHeader() # 设置对象元数据头
|
||||
headers.cacheControl = "no-cache" # 设置缓存控制为不缓存
|
||||
return obsClient, headers # 返回ObsClient实例和元数据头
|
||||
|
||||
def upload(self, video_name, video_dir=None):
|
||||
# 检查是否已初始化ObsClient实例
|
||||
if not self.obsClient:
|
||||
raise ValueError("请先初始化ObsClient实例")
|
||||
|
||||
class uploadResult:
|
||||
def __init__(self, video_path=None, squenceId=None):
|
||||
self.video_path = video_path
|
||||
self.squenceId = squenceId
|
||||
|
||||
uploadRes = uploadResult()
|
||||
|
||||
# 解析视频名称获取相关信息
|
||||
information = video_name.split('.')[0].split('_')
|
||||
action_category = cfg.action_type[information[-1]] # 动作类别
|
||||
camera_id = cfg.camera_id[information[-3]] # 摄像头ID
|
||||
recognize_result = cfg.recognize_result[information[0]] # 识别结果
|
||||
time = information[1].split('-')[0] # 时间
|
||||
squenceId = information[1]
|
||||
|
||||
|
||||
# 构建OSS对象键
|
||||
objectkey = os.path.join(cfg.obs_root_dir, time, action_category, camera_id, recognize_result, video_name)
|
||||
if video_dir is None:
|
||||
file_path = os.sep.join([cfg.save_videos_dir, squenceId, video_name]) # 本地文件路径
|
||||
else:
|
||||
file_path = os.sep.join([video_dir, squenceId, video_name])
|
||||
|
||||
# 上传文件到OSS
|
||||
resp = self.obsClient.putFile(cfg.obs_bucketName, objectkey, file_path)
|
||||
|
||||
uploadRes.video_path = resp['body']['objectUrl']
|
||||
uploadRes.squenceId = squenceId
|
||||
os.remove(file_path)
|
||||
return uploadRes
|
||||
|
||||
def get_information(self, video_squence, camera_type):
|
||||
"""获取视频信息"""
|
||||
videos_path = []
|
||||
videos_dir = os.sep.join([cfg.save_videos_dir, video_squence])
|
||||
for video_name in os.listdir(videos_dir):
|
||||
if video_squence in video_name:
|
||||
if camera_type == video_name.split('_')[-3]: # 摄像头位置ID
|
||||
videos_path.append(self.upload(video_name).video_path)
|
||||
elif camera_type == '2':
|
||||
videos_path.append(self.upload(video_name).video_path)
|
||||
return {"videos_path": videos_path}
|
||||
|
||||
@staticmethod
|
||||
def exception_action(queueImgs):
|
||||
"""删除视频"""
|
||||
# 解析视频名称获取相关信息
|
||||
video_squence = queueImgs['videoIds'].split(',')[0]
|
||||
for video_name in os.listdir(cfg.save_videos_dir):
|
||||
if video_squence in video_name:
|
||||
os.rename(os.sep.join([cfg.save_videos_dir, video_name]),
|
||||
os.sep.join([cfg.save_videos_dir, '03_' + video_name]))
|
||||
|
||||
class VideoUploader:
|
||||
@staticmethod
|
||||
def read_config_file(file_path):
|
||||
"""安全地读取配置文件"""
|
||||
try:
|
||||
with open(file_path, 'r') as file:
|
||||
lines = file.readlines()
|
||||
except IOError as e:
|
||||
print(f"读取配置文件错误: {e}")
|
||||
return []
|
||||
return [line.strip() for line in lines if line.strip() != '']
|
||||
|
||||
@staticmethod
|
||||
def upload_videos_for_ids(video_ids, video_dir):
|
||||
"""根据ID上传视频"""
|
||||
tempdata = []
|
||||
print('----------->', video_dir)
|
||||
for root, dirs, files in os.walk(video_dir):
|
||||
for name in files:
|
||||
print(name)
|
||||
name_s = name.split('.')[0] # 避免重复分割
|
||||
parts = name_s.split('_')
|
||||
if len(parts) < 7:
|
||||
continue
|
||||
for data in video_ids:
|
||||
if parts[-1] == data['action'] and (parts[-3] == data['type'] or data['type'] == '2'):
|
||||
try:
|
||||
upload_rs = uploadVideos().upload(name, video_dir)
|
||||
if upload_rs:
|
||||
video_path = upload_rs.video_path
|
||||
sequence_id = upload_rs.squenceId
|
||||
tempdata.append({
|
||||
"squenceId": sequence_id,
|
||||
"video_path": [video_path]
|
||||
})
|
||||
break # 找到匹配项即跳出循环
|
||||
except Exception as e:
|
||||
print(f"上传视频 {name} 时出错: {e}")
|
||||
return tempdata
|
||||
|
||||
|
||||
@staticmethod
|
||||
def timedUpload(rootPth = '/home/lc/project/ieemoo'):
|
||||
"""定时上传视频"""
|
||||
storidPth = os.sep.join([rootPth, 'tools', 'storeId.txt'])
|
||||
save_videos_dir = os.sep.join([rootPth, 'videos'])
|
||||
config_lines = VideoUploader.read_config_file(storidPth)
|
||||
if not config_lines:
|
||||
print("未找到有效配置。")
|
||||
|
||||
print('配置行 --- >', config_lines)
|
||||
soreid_list = [{"storeId": line} for line in config_lines]
|
||||
try:
|
||||
rep = requests.post(url=cfg.get_config_url, data=soreid_list[0])
|
||||
rep.raise_for_status() # 检查响应状态
|
||||
video_ids = rep.json().get('data', [])
|
||||
except requests.RequestException as e:
|
||||
print(f"获取配置信息失败: {e}")
|
||||
|
||||
if video_ids:
|
||||
tempdata = VideoUploader.upload_videos_for_ids(video_ids, save_videos_dir)
|
||||
if tempdata:
|
||||
tmpdata = {'videosPth': str(tempdata)}
|
||||
try:
|
||||
requests.post(url=cfg.push_url, data=tmpdata)
|
||||
print("推送数据成功")
|
||||
except requests.RequestException as e:
|
||||
print(f"推送数据失败: {e}")
|
||||
else:
|
||||
tmpdata = {'videosPth': str([])}
|
||||
try:
|
||||
requests.post(url=cfg.push_url, data=tmpdata)
|
||||
except requests.RequestException as e:
|
||||
print(f"空数据推送失败: {e}")
|
||||
# print(tmpdata)
|
||||
|
||||
if __name__ == '__main__':
|
||||
VideoUploader.timedUpload()
|
Reference in New Issue
Block a user