智能秤分析
This commit is contained in:
36
.idea/CopilotChatHistory.xml
generated
36
.idea/CopilotChatHistory.xml
generated
@ -3,6 +3,42 @@
|
||||
<component name="CopilotChatHistory">
|
||||
<option name="conversations">
|
||||
<list>
|
||||
<Conversation>
|
||||
<option name="createTime" value="1754286137102" />
|
||||
<option name="id" value="0198739a1f0e75c38b0579ade7b34050" />
|
||||
<option name="title" value="新对话 2025年8月04日 13:42:17" />
|
||||
<option name="updateTime" value="1754286137102" />
|
||||
</Conversation>
|
||||
<Conversation>
|
||||
<option name="createTime" value="1753932970546" />
|
||||
<option name="id" value="01985e8d3a3170bf871ba640afdf246d" />
|
||||
<option name="title" value="新对话 2025年7月31日 11:36:10" />
|
||||
<option name="updateTime" value="1753932970546" />
|
||||
</Conversation>
|
||||
<Conversation>
|
||||
<option name="createTime" value="1753932554257" />
|
||||
<option name="id" value="01985e86e01170d6a09dca496e3dad46" />
|
||||
<option name="title" value="新对话 2025年7月31日 11:29:14" />
|
||||
<option name="updateTime" value="1753932554257" />
|
||||
</Conversation>
|
||||
<Conversation>
|
||||
<option name="createTime" value="1753680371881" />
|
||||
<option name="id" value="01984f7ee0a9779aabcd3f1671b815b3" />
|
||||
<option name="title" value="新对话 2025年7月28日 13:26:11" />
|
||||
<option name="updateTime" value="1753680371881" />
|
||||
</Conversation>
|
||||
<Conversation>
|
||||
<option name="createTime" value="1753405176017" />
|
||||
<option name="id" value="01983f17b8d173dda926d0ffa5422bbf" />
|
||||
<option name="title" value="新对话 2025年7月25日 08:59:36" />
|
||||
<option name="updateTime" value="1753405176017" />
|
||||
</Conversation>
|
||||
<Conversation>
|
||||
<option name="createTime" value="1753065086744" />
|
||||
<option name="id" value="01982ad25f18712f862c5c18b627f40d" />
|
||||
<option name="title" value="新对话 2025年7月21日 10:31:26" />
|
||||
<option name="updateTime" value="1753065086744" />
|
||||
</Conversation>
|
||||
<Conversation>
|
||||
<option name="createTime" value="1752195523240" />
|
||||
<option name="id" value="0197f6fde2a87e68b893b3a36dfc838f" />
|
||||
|
@ -16,24 +16,24 @@ base:
|
||||
# 模型配置
|
||||
models:
|
||||
backbone: 'resnet18'
|
||||
channel_ratio: 0.75
|
||||
channel_ratio: 1.0
|
||||
|
||||
# 训练参数
|
||||
training:
|
||||
epochs: 600 # 总训练轮次
|
||||
epochs: 400 # 总训练轮次
|
||||
batch_size: 128 # 批次大小
|
||||
lr: 0.007 # 初始学习率
|
||||
lr: 0.01 # 初始学习率
|
||||
optimizer: "sgd" # 优化器类型
|
||||
metric: 'arcface' # 损失函数类型(可选:arcface/cosface/sphereface/softmax)
|
||||
loss: "cross_entropy" # 损失函数类型(可选:cross_entropy/cross_entropy_smooth/center_loss/center_loss_smooth/arcface/cosface/sphereface/softmax)
|
||||
lr_step: 10 # 学习率调整间隔(epoch)
|
||||
lr_step: 5 # 学习率调整间隔(epoch)
|
||||
lr_decay: 0.95 # 学习率衰减率
|
||||
weight_decay: 0.0005 # 权重衰减
|
||||
scheduler: "cosine" # 学习率调度器(可选:cosine/cosine_warm/step/None)
|
||||
scheduler: "step" # 学习率调度器(可选:cosine/cosine_warm/step/None)
|
||||
num_workers: 32 # 数据加载线程数
|
||||
checkpoints: "./checkpoints/resnet18_20250717_scale=0.75_nosub/" # 模型保存目录
|
||||
restore: true
|
||||
restore_model: "./checkpoints/resnet18_20250716_scale=0.75_nosub/best.pth" # 模型恢复路径
|
||||
checkpoints: "./checkpoints/resnet18_electornic_20250806/" # 模型保存目录
|
||||
restore: false
|
||||
restore_model: "./checkpoints/resnet18_20250717_scale=0.75_nosub/best.pth" # 模型恢复路径
|
||||
cosine_t_0: 10 # 初始周期长度
|
||||
cosine_t_mult: 1 # 周期长度倍率
|
||||
cosine_eta_min: 0.00001 # 最小学习率
|
||||
@ -49,8 +49,8 @@ data:
|
||||
train_batch_size: 128 # 训练批次大小
|
||||
val_batch_size: 128 # 验证批次大小
|
||||
num_workers: 32 # 数据加载线程数
|
||||
data_train_dir: "../data_center/contrast_data/v2/train" # 训练数据集根目录
|
||||
data_val_dir: "../data_center/contrast_data/v2/val" # 验证数据集根目录
|
||||
data_train_dir: "../data_center/electornic/v1/train" # 训练数据集根目录
|
||||
data_val_dir: "../data_center/electornic/v1/val" # 验证数据集根目录
|
||||
|
||||
transform:
|
||||
img_size: 224 # 图像尺寸
|
||||
|
53
configs/pic_pic_similar.yml
Normal file
53
configs/pic_pic_similar.yml
Normal file
@ -0,0 +1,53 @@
|
||||
# configs/similar_analysis.yml
|
||||
# 专为模型训练对比设计的配置文件
|
||||
# 支持对比不同训练策略(如蒸馏vs独立训练)
|
||||
|
||||
# 基础配置
|
||||
base:
|
||||
experiment_name: "model_comparison" # 实验名称(用于结果保存目录)
|
||||
device: "cuda" # 训练设备(cuda/cpu)
|
||||
embedding_size: 256 # 特征维度
|
||||
pin_memory: true # 是否启用pin_memory
|
||||
distributed: true # 是否启用分布式训练
|
||||
|
||||
|
||||
# 模型配置
|
||||
models:
|
||||
backbone: 'resnet18'
|
||||
channel_ratio: 0.75
|
||||
model_path: "../checkpoints/resnet18_1009/best.pth"
|
||||
|
||||
heatmap:
|
||||
feature_layer: "layer4"
|
||||
show_heatmap: true
|
||||
# 数据配置
|
||||
data:
|
||||
dataset: "imagenet" # 数据集名称(示例用,可替换为实际数据集)
|
||||
train_batch_size: 128 # 训练批次大小
|
||||
val_batch_size: 8 # 验证批次大小
|
||||
num_workers: 32 # 数据加载线程数
|
||||
data_dir: "/home/lc/data_center/image_analysis/pic_pic_similar_maxtrix"
|
||||
image_joint_pth: "/home/lc/data_center/image_analysis/error_compare_result"
|
||||
total_pkl: "/home/lc/data_center/image_analysis/pic_pic_similar_maxtrix/total.pkl"
|
||||
result_txt: "/home/lc/data_center/image_analysis/pic_pic_similar_maxtrix/result.txt"
|
||||
|
||||
transform:
|
||||
img_size: 224 # 图像尺寸
|
||||
img_mean: 0.5 # 图像均值
|
||||
img_std: 0.5 # 图像方差
|
||||
RandomHorizontalFlip: 0.5 # 随机水平翻转概率
|
||||
RandomRotation: 180 # 随机旋转角度
|
||||
ColorJitter: 0.5 # 随机颜色抖动强度
|
||||
|
||||
# 日志与监控
|
||||
logging:
|
||||
logging_dir: "./logs/resnet18_scale=0.75_nosub_log" # 日志保存目录
|
||||
tensorboard: true # 是否启用TensorBoard
|
||||
checkpoint_interval: 30 # 检查点保存间隔(epoch)
|
||||
|
||||
event:
|
||||
oneToOne_max_th: 0.9
|
||||
oneToSn_min_th: 0.6
|
||||
event_save_dir: "/home/lc/works/realtime_yolov10s/online_yolov10s_resnetv11_20250702/yolos_tracking"
|
||||
stdlib_image_path: "/testDataAndLogs/module_test_record/comparison/标准图测试数据/pic/stlib_base"
|
||||
pickle_path: "event.pickle"
|
@ -4,10 +4,10 @@
|
||||
# 数据配置
|
||||
data:
|
||||
dataset: "imagenet" # 数据集名称(示例用,可替换为实际数据集)
|
||||
source_dir: "../../data_center/scatter/v5/source" # 原始数据
|
||||
train_dir: "../../data_center/scatter/v5/train" # 训练数据集根目录
|
||||
val_dir: "../../data_center/scatter/v5/val" # 验证数据集根目录
|
||||
extra_dir: "../../data_center/scatter/v5/extra" # 验证数据集根目录
|
||||
source_dir: "../../data_center/electornic/source" # 原始数据
|
||||
train_dir: "../../data_center/electornic/v1/train" # 训练数据集根目录
|
||||
val_dir: "../../data_center/electornic/v1/val" # 验证数据集根目录
|
||||
extra_dir: "../../data_center/electornic/v1/extra" # 验证数据集根目录
|
||||
split_ratio: 0.9
|
||||
max_files: 10 # 数据集小于该阈值则归纳至extra
|
||||
|
||||
|
@ -43,7 +43,11 @@ logging:
|
||||
tensorboard: true # 是否启用TensorBoard
|
||||
checkpoint_interval: 30 # 检查点保存间隔(epoch)
|
||||
|
||||
# 分布式训练(可选)
|
||||
distributed:
|
||||
enabled: false # 是否启用分布式训练
|
||||
backend: "nccl" # 分布式后端(nccl/gloo)
|
||||
event:
|
||||
oneToOneTxt: "/home/lc/detecttracking/oneToOne.txt"
|
||||
oneToSnTxt: "/home/lc/detecttracking/oneToSn.txt"
|
||||
oneToOne_max_th: 0.9
|
||||
oneToSn_min_th: 0.6
|
||||
event_save_dir: "/home/lc/works/realtime_yolov10s/online_yolov10s_resnetv11_20250702/yolos_tracking"
|
||||
stdlib_image_path: "/testDataAndLogs/module_test_record/comparison/标准图测试数据/pic/stlib_base"
|
||||
pickle_path: "event.pickle"
|
||||
|
@ -13,8 +13,10 @@ base:
|
||||
# 模型配置
|
||||
models:
|
||||
backbone: 'resnet18'
|
||||
channel_ratio: 1.0
|
||||
model_path: "checkpoints/resnet18_scatter_7.3/best.pth"
|
||||
channel_ratio: 0.75
|
||||
model_path: "checkpoints/resnet18_1009/best.pth"
|
||||
#resnet18_20250715_scale=0.75_sub
|
||||
#resnet18_20250718_scale=0.75_nosub
|
||||
half: false # 是否启用半精度测试(fp16)
|
||||
contrast_learning: false
|
||||
|
||||
@ -22,9 +24,9 @@ models:
|
||||
data:
|
||||
test_batch_size: 128 # 训练批次大小
|
||||
num_workers: 32 # 数据加载线程数
|
||||
test_dir: "../data_center/scatter/v4/val" # 验证数据集根目录
|
||||
test_dir: "../data_center/contrast_data/v1/extra" # 验证数据集根目录
|
||||
test_group_json: "../data_center/contrast_learning/model_test_data/test/inner_group_pairs.json"
|
||||
test_list: "../data_center/scatter/v4/standard_cross_same.txt"
|
||||
test_list: "../data_center/contrast_data/v1/extra_cross_same.txt"
|
||||
group_test: false
|
||||
save_image_joint: true
|
||||
image_joint_pth: "./joint_images"
|
||||
|
@ -14,7 +14,7 @@ base:
|
||||
models:
|
||||
backbone: 'resnet18'
|
||||
channel_ratio: 0.75
|
||||
checkpoints: "../checkpoints/resnet18_1009/best.pth"
|
||||
checkpoints: "../checkpoints/resnet18_20250715_scale=0.75_sub/best.pth"
|
||||
|
||||
# 数据配置
|
||||
data:
|
||||
@ -42,7 +42,7 @@ logging:
|
||||
|
||||
save:
|
||||
json_bin: "../search_library/yunhedian_05-09.json" # 保存整个json文件
|
||||
json_path: "/home/lc/data_center/baseStlib/feature_json/stlib_base" # 保存单个json文件路径
|
||||
json_path: "/home/lc/data_center/baseStlib/feature_json/stlib_base_resnet18_sub" # 保存单个json文件路径
|
||||
error_barcodes: "error_barcodes.txt"
|
||||
barcodes_statistics: "../search_library/barcodes_statistics.txt"
|
||||
create_single_json: true # 是否保存单个json文件
|
@ -2,7 +2,7 @@ import os
|
||||
import shutil
|
||||
import random
|
||||
from pathlib import Path
|
||||
|
||||
import yaml
|
||||
def is_image_file(filename):
|
||||
"""检查文件是否为图像文件"""
|
||||
image_extensions = ('.jpg', '.jpeg', '.png', '.bmp', '.gif', '.tiff')
|
||||
@ -61,10 +61,12 @@ def control_train_number():
|
||||
pass
|
||||
|
||||
if __name__ == "__main__":
|
||||
# 设置目录路径
|
||||
TRAIN_DIR = "scatter_data/train"
|
||||
VAL_DIR = "scatter_data/val"
|
||||
# # 设置目录路径
|
||||
# TRAIN_DIR = "/home/lc/data_center/electornic/v1/train"
|
||||
# VAL_DIR = "/home/lc/data_center/electornic/v1/val"
|
||||
|
||||
with open('../configs/scatter_data.yml', 'r') as f:
|
||||
conf = yaml.load(f, Loader=yaml.FullLoader)
|
||||
print("开始分割数据集...")
|
||||
split_directory(TRAIN_DIR, VAL_DIR)
|
||||
split_directory(conf)
|
||||
print("数据集分割完成")
|
||||
|
@ -141,7 +141,7 @@ class ImageExtendProcessor:
|
||||
same_directory,
|
||||
dir_name)
|
||||
|
||||
def random_remove_image(self, subdir_path, max_count=200):
|
||||
def random_remove_image(self, subdir_path, max_count=1000):
|
||||
"""
|
||||
随机删除子目录中的图像文件,直到数量不超过max_count
|
||||
:param subdir_path: 子目录路径
|
||||
|
15
test_ori.py
15
test_ori.py
@ -17,7 +17,7 @@ from configs import trainer_tools
|
||||
import yaml
|
||||
from datetime import datetime
|
||||
|
||||
with open('../configs/test.yml', 'r') as f:
|
||||
with open('./configs/test.yml', 'r') as f:
|
||||
conf = yaml.load(f, Loader=yaml.FullLoader)
|
||||
|
||||
# Constants from config
|
||||
@ -141,7 +141,7 @@ def threshold_search(y_score, y_true):
|
||||
|
||||
|
||||
def showgrid(recall, recall_TN, PrecisePos, PreciseNeg, Correct):
|
||||
x = np.linspace(start=0, stop=1.0, num=50, endpoint=True).tolist()
|
||||
x = np.linspace(start=-1, stop=1.0, num=100, endpoint=True).tolist()
|
||||
plt.figure(figsize=(10, 6))
|
||||
plt.plot(x, recall, color='red', label='recall:TP/TPFN')
|
||||
plt.plot(x, recall_TN, color='black', label='recall_TN:TN/TNFP')
|
||||
@ -151,6 +151,7 @@ def showgrid(recall, recall_TN, PrecisePos, PreciseNeg, Correct):
|
||||
plt.legend()
|
||||
plt.xlabel('threshold')
|
||||
# plt.ylabel('Similarity')
|
||||
|
||||
plt.grid(True, linestyle='--', alpha=0.5)
|
||||
plt.savefig('grid.png')
|
||||
plt.show()
|
||||
@ -162,19 +163,19 @@ def showHist(same, cross):
|
||||
Cross = np.array(cross)
|
||||
|
||||
fig, axs = plt.subplots(2, 1)
|
||||
axs[0].hist(Same, bins=50, edgecolor='black')
|
||||
axs[0].set_xlim([-0.1, 1])
|
||||
axs[0].hist(Same, bins=100, edgecolor='black')
|
||||
axs[0].set_xlim([-1, 1])
|
||||
axs[0].set_title('Same Barcode')
|
||||
|
||||
axs[1].hist(Cross, bins=50, edgecolor='black')
|
||||
axs[1].set_xlim([-0.1, 1])
|
||||
axs[1].hist(Cross, bins=100, edgecolor='black')
|
||||
axs[1].set_xlim([-1, 1])
|
||||
axs[1].set_title('Cross Barcode')
|
||||
plt.savefig('plot.png')
|
||||
|
||||
|
||||
def compute_accuracy_recall(score, labels):
|
||||
th = 0.1
|
||||
squence = np.linspace(-1, 1, num=50)
|
||||
squence = np.linspace(-1, 1, num=100)
|
||||
recall, PrecisePos, PreciseNeg, recall_TN, Correct = [], [], [], [], []
|
||||
Same = score[:len(score) // 2]
|
||||
Cross = score[len(score) // 2:]
|
||||
|
@ -24,7 +24,7 @@ def get_transform(cfg):
|
||||
T.Normalize(mean=[cfg['transform']['img_mean']], std=[cfg['transform']['img_std']]),
|
||||
])
|
||||
test_transform = T.Compose([
|
||||
# T.Lambda(pad_to_square), # 补边
|
||||
T.Lambda(pad_to_square), # 补边
|
||||
T.ToTensor(),
|
||||
T.Resize((cfg['transform']['img_size'], cfg['transform']['img_size']), antialias=True),
|
||||
T.ConvertImageDtype(torch.float32),
|
||||
|
144
tools/event_similar_analysis.py
Normal file
144
tools/event_similar_analysis.py
Normal file
@ -0,0 +1,144 @@
|
||||
from similar_analysis import SimilarAnalysis
|
||||
import os
|
||||
import pickle
|
||||
from tools.image_joint import merge_imgs
|
||||
|
||||
|
||||
class EventSimilarAnalysis(SimilarAnalysis):
|
||||
def __init__(self):
|
||||
super(EventSimilarAnalysis, self).__init__()
|
||||
self.fn_one2one_event, self.fp_one2one_event = self.One2one_similar_analysis()
|
||||
self.fn_one2sn_event, self.fp_one2sn_event = self.One2Sn_similar_analysis()
|
||||
if os.path.exists(self.conf['event']['pickle_path']):
|
||||
print('pickle file exists')
|
||||
else:
|
||||
self.target_image = self.get_path()
|
||||
|
||||
def get_path(self):
|
||||
events = [self.fn_one2one_event, self.fp_one2one_event,
|
||||
self.fn_one2sn_event, self.fp_one2sn_event]
|
||||
event_image_path = []
|
||||
barcode_image_path = []
|
||||
for event in events:
|
||||
for event_name, bcd in event:
|
||||
event_sub_image = os.sep.join([self.conf['event']['event_save_dir'],
|
||||
event_name,
|
||||
'subimgs'])
|
||||
barcode_images = os.sep.join([self.conf['event']['stdlib_image_path'],
|
||||
bcd])
|
||||
for image_name in os.listdir(event_sub_image):
|
||||
event_image_path.append(os.sep.join([event_sub_image, image_name]))
|
||||
for barcode in os.listdir(barcode_images):
|
||||
barcode_image_path.append(os.sep.join([barcode_images, barcode]))
|
||||
return list(set(event_image_path + barcode_image_path))
|
||||
|
||||
|
||||
def write_dict_to_pickle(self, data):
|
||||
"""将字典写入pickle文件."""
|
||||
with open(self.conf['event']['pickle_path'], 'wb') as file:
|
||||
pickle.dump(data, file)
|
||||
|
||||
def get_dict_to_pickle(self):
|
||||
with open(self.conf['event']['pickle_path'], 'rb') as f:
|
||||
data = pickle.load(f)
|
||||
return data
|
||||
|
||||
def create_total_feature(self):
|
||||
feature_dicts = self.get_feature_map(self.target_image)
|
||||
self.write_dict_to_pickle(feature_dicts)
|
||||
print(feature_dicts)
|
||||
|
||||
def One2one_similar_analysis(self):
|
||||
fn_event, fp_event = [], []
|
||||
with open(self.conf['event']['oneToOneTxt'], 'r') as f:
|
||||
lines = f.readlines()
|
||||
for line in lines:
|
||||
print(line.strip().split(' '))
|
||||
event_infor = line.strip().split(' ')
|
||||
label = event_infor[0]
|
||||
event_name = event_infor[1]
|
||||
bcd = event_infor[2]
|
||||
simi1 = event_infor[3]
|
||||
simi2 = event_infor[4]
|
||||
if label == 'same' and float(simi2) < self.conf['event']['oneToOne_max_th']:
|
||||
print(event_name, bcd, simi1)
|
||||
fn_event.append((event_name, bcd))
|
||||
elif label == 'diff' and float(simi2) > self.conf['event']['oneToSn_min_th']:
|
||||
fp_event.append((event_name, bcd))
|
||||
return fn_event, fp_event
|
||||
|
||||
def One2Sn_similar_analysis(self):
|
||||
fn_event, fp_event = [], []
|
||||
with open(self.conf['event']['oneToOneTxt'], 'r') as f:
|
||||
lines = f.readlines()
|
||||
for line in lines:
|
||||
print(line.strip().split(' '))
|
||||
event_infor = line.strip().split(' ')
|
||||
label = event_infor[0]
|
||||
event_name = event_infor[1]
|
||||
bcd = event_infor[2]
|
||||
simi = event_infor[3]
|
||||
if label == 'fn':
|
||||
print(event_name, bcd, simi)
|
||||
fn_event.append((event_name, bcd))
|
||||
elif label == 'fp':
|
||||
fp_event.append((event_name, bcd))
|
||||
return fn_event, fp_event
|
||||
|
||||
def save_joint_image(self, img_pth1, img_pth2, feature_dicts, record):
|
||||
feature_dict1 = feature_dicts[img_pth1]
|
||||
feature_dict2 = feature_dicts[img_pth2]
|
||||
similarity = self.get_similarity(feature_dict1.cpu().numpy(),
|
||||
feature_dict2.cpu().numpy())
|
||||
dir_name = img_pth1.split('/')[-3]
|
||||
save_path = os.sep.join([self.conf['data']['image_joint_pth'], dir_name, record])
|
||||
if "fp" in record:
|
||||
if similarity > 0.8:
|
||||
merge_imgs(img_pth1,
|
||||
img_pth2,
|
||||
self.conf,
|
||||
similarity,
|
||||
label=None,
|
||||
cam=self.cam,
|
||||
save_path=save_path)
|
||||
else:
|
||||
if similarity < 0.8:
|
||||
merge_imgs(img_pth1,
|
||||
img_pth2,
|
||||
self.conf,
|
||||
similarity,
|
||||
label=None,
|
||||
cam=self.cam,
|
||||
save_path=save_path)
|
||||
print(similarity)
|
||||
|
||||
def get_contrast(self, feature_dicts):
|
||||
events_compare = [self.fp_one2one_event, self.fn_one2one_event, self.fp_one2sn_event, self.fn_one2sn_event]
|
||||
event_record = ['fp_one2one', 'fn_one2one', 'fp_one2sn', 'fn_one2sn']
|
||||
for event_compare, record in zip(events_compare, event_record):
|
||||
for img, img_std in event_compare:
|
||||
imgs_pth1 = os.sep.join([self.conf['event']['event_save_dir'],
|
||||
img,
|
||||
'subimgs'])
|
||||
imgs_pth2 = os.sep.join([self.conf['event']['stdlib_image_path'],
|
||||
img_std])
|
||||
for img1 in os.listdir(imgs_pth1):
|
||||
for img2 in os.listdir(imgs_pth2):
|
||||
img_pth1 = os.sep.join([imgs_pth1, img1])
|
||||
img_pth2 = os.sep.join([imgs_pth2, img2])
|
||||
try:
|
||||
self.save_joint_image(img_pth1, img_pth2, feature_dicts, record)
|
||||
except Exception as e:
|
||||
continue
|
||||
print(e)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
event_similar_analysis = EventSimilarAnalysis()
|
||||
if os.path.exists(event_similar_analysis.conf['event']['pickle_path']):
|
||||
print('pickle file exists')
|
||||
else:
|
||||
event_similar_analysis.create_total_feature() # 生成pickle文件, 生成时间较长,生成一个文件即可
|
||||
feature_dicts = event_similar_analysis.get_dict_to_pickle()
|
||||
# all_compare_img = event_similar_analysis.get_image_map()
|
||||
event_similar_analysis.get_contrast(feature_dicts) # 获取比对结果
|
@ -9,8 +9,23 @@ import logging
|
||||
class PairGenerator:
|
||||
"""Generate positive and negative image pairs for contrastive learning."""
|
||||
|
||||
def __init__(self):
|
||||
def __init__(self, original_path):
|
||||
self._setup_logging()
|
||||
self.original_path = original_path
|
||||
self._delete_space()
|
||||
|
||||
def _delete_space(self): # 删除图片文件名中的空格
|
||||
print(self.original_path)
|
||||
for root, dirs, files in os.walk(self.original_path):
|
||||
for file_name in files:
|
||||
if file_name.endswith('.jpg' or '.png'):
|
||||
n_file_name = file_name.replace(' ', '')
|
||||
os.rename(os.path.join(root, file_name), os.path.join(root, n_file_name))
|
||||
if 'rotate' in file_name:
|
||||
os.remove(os.path.join(root, file_name))
|
||||
for dir_name in dirs:
|
||||
n_dir_name = dir_name.replace(' ', '')
|
||||
os.rename(os.path.join(root, dir_name), os.path.join(root, n_dir_name))
|
||||
|
||||
def _setup_logging(self):
|
||||
"""Configure logging settings."""
|
||||
@ -188,11 +203,11 @@ class PairGenerator:
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
original_path = '/home/lc/data_center/scatter/v4/val'
|
||||
original_path = '/home/lc/data_center/contrast_data/v1/extra'
|
||||
parent_dir = str(Path(original_path).parent)
|
||||
generator = PairGenerator()
|
||||
generator = PairGenerator(original_path)
|
||||
|
||||
# Example usage:
|
||||
pairs = generator.get_pairs(original_path,
|
||||
output_txt=os.sep.join([parent_dir, 'cross_same.txt'])) # Individual pairs
|
||||
output_txt=os.sep.join([parent_dir, 'extra_cross_same.txt'])) # Individual pairs
|
||||
# groups = generator.get_group_pairs('val') # Group pairs
|
||||
|
@ -18,31 +18,49 @@ def merge_imgs(img1_path, img2_path, conf, similar=None, label=None, cam=None, s
|
||||
img2 = Image.open(img2_path)
|
||||
img1 = img1.resize((224, 224))
|
||||
img2 = img2.resize((224, 224))
|
||||
new_img = Image.new('RGB', (img1.width + img2.width + 10, img1.height))
|
||||
# save_path = conf['data']['image_joint_pth']
|
||||
else:
|
||||
assert cam is not None, 'cam is None'
|
||||
img1 = cam.get_hot_map(img1_path)
|
||||
img2 = cam.get_hot_map(img2_path)
|
||||
img1_ori = Image.open(img1_path)
|
||||
img2_ori = Image.open(img2_path)
|
||||
img1_ori = img1_ori.resize((224, 224))
|
||||
img2_ori = img2_ori.resize((224, 224))
|
||||
new_img = Image.new('RGB',
|
||||
(img1.width + img2.width + 10,
|
||||
img1.height + img2.width + 10))
|
||||
# save_path = conf['heatmap']['image_joint_pth']
|
||||
# print('img1_path', img1)
|
||||
# print('img2_path', img2)
|
||||
if not os.path.exists(os.sep.join([save_path, str(label)])):
|
||||
if not os.path.exists(os.sep.join([save_path, str(label)])) and (label is not None):
|
||||
os.makedirs(os.sep.join([save_path, str(label)]))
|
||||
if save_path is None:
|
||||
save_path = os.sep.join([save_path, str(label)])
|
||||
img_name = os.path.basename(img1_path).split('.')[0] + '_' + os.path.basename(img2_path).split('.')[0] + '.png'
|
||||
if save_path is None:
|
||||
# save_path = os.sep.join([save_path, str(label)])
|
||||
pass
|
||||
# img_name = os.path.basename(img1_path).split('.')[0] + '_' + os.path.basename(img2_path).split('.')[0] + '.png'
|
||||
img_name = os.path.basename(img1_path).split('.')[0][:30] + '_' + os.path.basename(img2_path).split('.')[0][
|
||||
:30] + '.png'
|
||||
assert img1.height == img2.height
|
||||
|
||||
new_img = Image.new('RGB', (img1.width + img2.width + 10, img1.height))
|
||||
|
||||
|
||||
# print('new_img', new_img)
|
||||
if not conf['heatmap']['show_heatmap']:
|
||||
new_img.paste(img1, (0, 0))
|
||||
new_img.paste(img2, (img1.width + 10, 0))
|
||||
else:
|
||||
new_img.paste(img1_ori, (10, 10))
|
||||
new_img.paste(img2_ori, (img2_ori.width + 20, 10))
|
||||
new_img.paste(img1, (10, img1.height+20))
|
||||
new_img.paste(img2, (img2.width+20, img2.height+20))
|
||||
|
||||
if similar is not None:
|
||||
if label == '1' and similar > 0.5:
|
||||
if label == '1' and (similar > 0.5 or similar < 0.25):
|
||||
save = False
|
||||
elif label == '0' and similar < 0.5:
|
||||
elif label == '0' and similar > 0.25:
|
||||
save = False
|
||||
similar = str(similar) + '_' + str(label)
|
||||
draw = ImageDraw.Draw(new_img)
|
||||
|
@ -122,7 +122,7 @@ if __name__ == '__main__':
|
||||
|
||||
# Build model
|
||||
print('--> Building model')
|
||||
ret = rknn.build(do_quantization=True,
|
||||
ret = rknn.build(do_quantization=False, # True
|
||||
dataset='./dataset.txt',
|
||||
rknn_batch_size=conf['models']['rknn_batch_size'])
|
||||
# ret = rknn.build(do_quantization=False, dataset='./dataset.txt')
|
||||
|
242
tools/picdir_to_picdir_similar.py
Normal file
242
tools/picdir_to_picdir_similar.py
Normal file
@ -0,0 +1,242 @@
|
||||
from similar_analysis import SimilarAnalysis
|
||||
import os
|
||||
import pickle
|
||||
from tools.image_joint import merge_imgs
|
||||
import yaml
|
||||
from PIL import Image
|
||||
import torch
|
||||
from sklearn.metrics.pairwise import cosine_similarity
|
||||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
'''
|
||||
轨迹图与标准库之间的相似度分析
|
||||
1.用于生成轨迹图与标准库中所有图片的相似度
|
||||
2.用于分析轨迹图与标准库比对选取策略的判断
|
||||
'''
|
||||
|
||||
|
||||
class picDirSimilarAnalysis(SimilarAnalysis):
|
||||
def __init__(self):
|
||||
super(picDirSimilarAnalysis, self).__init__()
|
||||
with open('../configs/pic_pic_similar.yml', 'r') as f:
|
||||
self.conf = yaml.load(f, Loader=yaml.FullLoader)
|
||||
if not os.path.exists(self.conf['data']['total_pkl']):
|
||||
# self.create_total_feature()
|
||||
self.create_total_pkl()
|
||||
if os.path.exists(self.conf['data']['total_pkl']):
|
||||
self.all_dicts = self.load_dict_from_pkl()
|
||||
|
||||
def is_image_file(self, filename):
|
||||
"""
|
||||
检查文件是否为图像文件
|
||||
"""
|
||||
image_extensions = ('.jpg', '.jpeg', '.png', '.bmp', '.gif', '.tiff')
|
||||
return filename.lower().endswith(image_extensions)
|
||||
|
||||
def create_total_pkl(self): # 将目录下所有的图片特征存入pkl文件
|
||||
all_images_feature_dict = {}
|
||||
for roots, dirs, files in os.walk(self.conf['data']['data_dir']):
|
||||
for file_name in files:
|
||||
if self.is_image_file(file_name):
|
||||
try:
|
||||
print(f"处理图像 {os.sep.join([roots, file_name])}")
|
||||
feature = self.extract_features(os.sep.join([roots, file_name]))
|
||||
except Exception as e:
|
||||
print(f"处理图像 {os.sep.join([roots, file_name])} 时出错: {e}")
|
||||
feature = None
|
||||
all_images_feature_dict[os.sep.join([roots, file_name])] = feature
|
||||
if not os.path.exists(self.conf['data']['total_pkl']):
|
||||
with open(self.conf['data']['total_pkl'], 'wb') as f:
|
||||
pickle.dump(all_images_feature_dict, f)
|
||||
|
||||
def load_dict_from_pkl(self):
|
||||
with open(self.conf['data']['total_pkl'], 'rb') as f:
|
||||
data = pickle.load(f)
|
||||
print(f"字典已从 {self.conf['data']['total_pkl']} 加载")
|
||||
return data
|
||||
|
||||
def get_image_files(self, folder_path):
|
||||
"""
|
||||
获取文件夹中的所有图像文件
|
||||
"""
|
||||
image_files = []
|
||||
for root, _, files in os.walk(folder_path):
|
||||
for file in files:
|
||||
if self.is_image_file(file):
|
||||
image_files.append(os.path.join(root, file))
|
||||
return image_files
|
||||
|
||||
def extract_features(self, image_path):
|
||||
feature_dict = self.get_feature(image_path)
|
||||
return feature_dict[image_path]
|
||||
|
||||
def create_one_similarity_matrix(self, folder1_path, folder2_path):
|
||||
images1 = self.get_image_files(folder1_path)
|
||||
images2 = self.get_image_files(folder2_path)
|
||||
|
||||
print(f"文件夹1 ({folder1_path}) 包含 {len(images1)} 张图像")
|
||||
print(f"文件夹2 ({folder2_path}) 包含 {len(images2)} 张图像")
|
||||
|
||||
if len(images1) == 0 or len(images2) == 0:
|
||||
raise ValueError("至少有一个文件夹中没有图像文件")
|
||||
|
||||
# 提取文件夹1中的所有图像特征
|
||||
features1 = []
|
||||
print("正在提取文件夹1中的图像特征...")
|
||||
for i, img_path in enumerate(images1):
|
||||
try:
|
||||
# feature = self.extract_features(img_path)
|
||||
feature = self.all_dicts[img_path]
|
||||
features1.append(feature.cpu().numpy())
|
||||
# if (i + 1) % 10 == 0:
|
||||
# print(f"已处理 {i + 1}/{len(images1)} 张图像")
|
||||
except Exception as e:
|
||||
print(f"处理图像 {img_path} 时出错: {e}")
|
||||
features1.append(None)
|
||||
|
||||
# 提取文件夹2中的所有图像特征
|
||||
features2 = []
|
||||
print("正在提取文件夹2中的图像特征...")
|
||||
for i, img_path in enumerate(images2):
|
||||
try:
|
||||
# feature = self.extract_features(img_path)
|
||||
feature = self.all_dicts[img_path]
|
||||
features2.append(feature.cpu().numpy())
|
||||
# if (i + 1) % 10 == 0:
|
||||
# print(f"已处理 {i + 1}/{len(images2)} 张图像")
|
||||
except Exception as e:
|
||||
print(f"处理图像 {img_path} 时出错: {e}")
|
||||
features2.append(None)
|
||||
|
||||
# 移除处理失败的图像
|
||||
valid_features1 = []
|
||||
valid_images1 = []
|
||||
for i, feature in enumerate(features1):
|
||||
if feature is not None:
|
||||
valid_features1.append(feature)
|
||||
valid_images1.append(images1[i])
|
||||
|
||||
valid_features2 = []
|
||||
valid_images2 = []
|
||||
for i, feature in enumerate(features2):
|
||||
if feature is not None:
|
||||
valid_features2.append(feature)
|
||||
valid_images2.append(images2[i])
|
||||
|
||||
# print(f"文件夹1中成功处理 {len(valid_features1)} 张图像")
|
||||
# print(f"文件夹2中成功处理 {len(valid_features2)} 张图像")
|
||||
|
||||
if len(valid_features1) == 0 or len(valid_features2) == 0:
|
||||
raise ValueError("没有成功处理任何图像")
|
||||
|
||||
# 计算相似度矩阵
|
||||
print("正在计算相似度矩阵...")
|
||||
similarity_matrix = cosine_similarity(valid_features1, valid_features2)
|
||||
|
||||
return similarity_matrix, valid_images1, valid_images2
|
||||
|
||||
def get_group_similarity_matrix(self, folder_path):
|
||||
tracking_folder = os.sep.join([folder_path, 'tracking'])
|
||||
standard_folder = os.sep.join([folder_path, 'standard_slim'])
|
||||
for dir_name in os.listdir(tracking_folder):
|
||||
tracking_dir = os.sep.join([tracking_folder, dir_name])
|
||||
standard_dir = os.sep.join([standard_folder, dir_name])
|
||||
similarity_matrix, valid_images1, valid_images2 = self.create_one_similarity_matrix(tracking_dir,
|
||||
standard_dir)
|
||||
mean_similarity = np.mean(similarity_matrix)
|
||||
std_similarity = np.std(similarity_matrix)
|
||||
max_similarity = np.max(similarity_matrix)
|
||||
min_similarity = np.min(similarity_matrix)
|
||||
print(f"文件夹 {dir_name} 的相似度矩阵已计算完成 "
|
||||
f"均值:{mean_similarity} 标准差:{std_similarity} 最大值:{max_similarity} 最小值:{min_similarity}")
|
||||
result = f"{os.path.basename(standard_folder)} {dir_name} {mean_similarity:.3f} {std_similarity:.3f} {max_similarity:.3f} {min_similarity:.3f}"
|
||||
with open(self.conf['data']['result_txt'], 'a') as f:
|
||||
f.write(result + '\n')
|
||||
|
||||
|
||||
def read_result_txt():
|
||||
parts = []
|
||||
value_num = 2
|
||||
with open('../configs/pic_pic_similar.yml', 'r') as f:
|
||||
conf = yaml.load(f, Loader=yaml.FullLoader)
|
||||
f.close()
|
||||
with open(conf['data']['result_txt'], 'r') as f:
|
||||
lines = f.readlines()
|
||||
for line in lines:
|
||||
line = line.strip()
|
||||
if line:
|
||||
parts.append(line.split(' '))
|
||||
parts = np.array(parts)
|
||||
print(parts)
|
||||
labels = ['Mean', 'Std', 'Max', 'Min']
|
||||
while value_num < 6:
|
||||
dicts = {}
|
||||
for barcode, value in zip(parts[:, 1], parts[:, value_num]):
|
||||
if barcode in dicts:
|
||||
dicts[barcode].append(float(value))
|
||||
else:
|
||||
dicts[barcode] = [float(value)]
|
||||
get_histogram(dicts, labels[value_num - 2])
|
||||
value_num += 1
|
||||
f.close()
|
||||
|
||||
|
||||
def get_histogram(data, label=None):
|
||||
# 准备数据
|
||||
categories = list(data.keys())
|
||||
values1 = [data[cat][0] for cat in categories] # 第一个值
|
||||
values2 = [data[cat][1] for cat in categories] # 第二个值
|
||||
|
||||
# 设置柱状图的位置
|
||||
x = np.arange(len(categories)) # 标签位置
|
||||
width = 0.35 # 柱状图的宽度
|
||||
|
||||
# 创建图形和轴
|
||||
fig, ax = plt.subplots(figsize=(10, 6))
|
||||
|
||||
# 绘制柱状图
|
||||
bars1 = ax.bar(x - width / 2, values1, width, label='standard', color='red', alpha=0.7)
|
||||
bars2 = ax.bar(x + width / 2, values2, width, label='standard_slim', color='green', alpha=0.7)
|
||||
|
||||
# 在每个柱状图上显示数值
|
||||
for bar in bars1:
|
||||
height = bar.get_height()
|
||||
ax.annotate(f'{height:.3f}',
|
||||
xy=(bar.get_x() + bar.get_width() / 2, height),
|
||||
xytext=(0, 3), # 3点垂直偏移
|
||||
textcoords="offset points",
|
||||
ha='center', va='bottom',
|
||||
fontsize=12)
|
||||
|
||||
for bar in bars2:
|
||||
height = bar.get_height()
|
||||
ax.annotate(f'{height:.3f}',
|
||||
xy=(bar.get_x() + bar.get_width() / 2, height),
|
||||
xytext=(0, 3), # 3点垂直偏移
|
||||
textcoords="offset points",
|
||||
ha='center', va='bottom',
|
||||
fontsize=12)
|
||||
|
||||
# 添加标签和标题
|
||||
if label is None:
|
||||
label = ''
|
||||
ax.set_xlabel('barcode')
|
||||
ax.set_ylabel('Values')
|
||||
ax.set_title(label)
|
||||
ax.set_xticks(x)
|
||||
ax.set_xticklabels(categories)
|
||||
ax.legend()
|
||||
|
||||
# 添加网格
|
||||
ax.grid(True, alpha=0.3)
|
||||
|
||||
# 调整布局并显示
|
||||
plt.tight_layout()
|
||||
plt.show()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
# picTopic_matrix = picDirSimilarAnalysis()
|
||||
# picTopic_matrix.get_group_similarity_matrix('/home/lc/data_center/image_analysis/pic_pic_similar_maxtrix')
|
||||
read_result_txt()
|
@ -10,7 +10,7 @@ import yaml
|
||||
import os
|
||||
|
||||
|
||||
class analysis:
|
||||
class SimilarAnalysis:
|
||||
def __init__(self):
|
||||
with open('../configs/similar_analysis.yml', 'r') as f:
|
||||
self.conf = yaml.load(f, Loader=yaml.FullLoader)
|
||||
@ -53,6 +53,7 @@ class analysis:
|
||||
def get_feature_map(self, all_imgs):
|
||||
feature_dicts = {}
|
||||
for img_pth in all_imgs:
|
||||
print(f"Processing {img_pth}")
|
||||
feature_dict = self.get_feature(img_pth)
|
||||
feature_dicts = dict(ChainMap(feature_dict, feature_dicts))
|
||||
return feature_dicts
|
||||
@ -85,7 +86,7 @@ class analysis:
|
||||
feature_dict2 = feature_dicts[img_pth2]
|
||||
similarity = self.get_similarity(feature_dict1.cpu().numpy(),
|
||||
feature_dict2.cpu().numpy())
|
||||
dir_name = img_pth1.split(os.sep)[-3]
|
||||
dir_name = img_pth1.split('/')[-3]
|
||||
save_path = os.sep.join([self.conf['data']['image_joint_pth'], dir_name])
|
||||
if similarity > 0.7:
|
||||
merge_imgs(img_pth1,
|
||||
@ -99,7 +100,7 @@ class analysis:
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
ana = analysis()
|
||||
ana = SimilarAnalysis()
|
||||
all_imgs = ana.create_total_feature()
|
||||
feature_dicts = ana.get_feature_map(all_imgs)
|
||||
all_compare_img = ana.get_image_map()
|
||||
|
Reference in New Issue
Block a user