Files
ieemoo-ai-searchv2/utils/tools.py
2022-11-22 15:32:06 +08:00

125 lines
4.0 KiB
Python

#from config import cfg
from utils.config import cfg
from cirtorch.networks.imageretrievalnet import init_network, extract_vectors
import torch
from torchvision import transforms
import cv2
import numpy as np
import requests
import os, scipy, math
import http.client
#http.client.HTTPConnection._http_vsn = 10
#http.client.HTTPConnection._http_vsn_str = 'HTTP/1.0'
def rotate_bound(image, angle): #ratio
(h, w) = image.shape[:2]
(cX, cY) = (w // 2, h // 2)
M = cv2.getRotationMatrix2D((cX, cY), -angle, 1.0)
cos = np.abs(M[0, 0])
sin = np.abs(M[0, 1])
nW = int((h * sin) + (w * cos))
nH = int((h * cos) + (w * sin))
M[0, 2] += (nW / 2) - cX
M[1, 2] += (nH / 2) - cY
return cv2.warpAffine(image, M, (nW, nH))
def createNet(): #load model
multiscale = '[1]'
print(">> Loading network:\n>>>> '{}'".format(cfg.NETWORK))
state = torch.load(cfg.NETWORK)
net_params = {}
net_params['architecture'] = state['meta']['architecture']
net_params['pooling'] = state['meta']['pooling']
net_params['local_whitening'] = state['meta'].get('local_whitening', False)
net_params['regional'] = state['meta'].get('regional', False)
net_params['whitening'] = state['meta'].get('whitening', False)
net_params['mean'] = state['meta']['mean']
net_params['std'] = state['meta']['std']
net_params['pretrained'] = False
net = init_network(net_params)
net.load_state_dict(state['state_dict'])
print(">>>> loaded network: ")
print(net.meta_repr())
ms = list(eval(multiscale))
print(">>>> Evaluating scales: {}".format(ms))
if torch.cuda.is_available():
net.cuda()
net.eval()
normalize = transforms.Normalize(
mean=net.meta['mean'],
std=net.meta['std']
)
transform = transforms.Compose([
transforms.ToTensor(),
normalize
])
return net, transform, ms
class ManagingFeature: #特征增删改查
def __init__(self):
pass
def addfeature(self, code, feature):
url = os.sep.join([cfg.URL, 'addImageFeatureInfo.do'])
json = {'code':code,
'featureVal':feature}
r = requests.post(url=url, data=json)
return r.text
def deletefeature(self, code, timeStamp): #eg: "timeStamp":"2022/02/10 17:59:59"
url = os.sep.join([cfg.URL, 'deletImageFeatureInfo.do'])
json = {'code':code,
'timeStamp':timeStamp}
r = requests.get(url=url, params=json)
return r.json
def getfeature(self, code):
try:
url = os.sep.join([cfg.URL, 'getImageFeatureInfo.do'])
json = {'code': code}
r = requests.get(url=url, params=json)
data = r.json()['data']
return data
except Exception as e:
print('>>>>>get feature error<<<<<<')
class EvaluteMap():
def __init__(self):
self.MF = ManagingFeature()
def match_feature(self, features, search_r):
alldict = []
#search_r = self.MF.getfeature(barcode)
for feature in features:
for r in search_r:
dist = np.linalg.norm(feature - r)
#alldict.append(math.pow(dist, 2))
alldict.append(dist)
meandist = scipy.mean(sorted(alldict)[0:5])
return meandist
def match_feature_single(self, feature, search_r):
alldict = []
for r in search_r:
r = np.array(r)
feature = np.array(feature)
dist = np.linalg.norm(feature-r)
alldict.append(dist)
meandist = scipy.mean(sorted(alldict)[0:2])
return meandist
def match_images(self, feature_dict, barcode, choose = False, mod = 'batch'):
if mod == 'batch':
result = self.match_feature(feature_dict, barcode)
return result
else:
result = self.match_feature_single(feature_dict, barcode)
return result
if __name__ == '__main__':
pass
# MF = ManagingFeature()
# result = MF.getfeature('7613035075443')
# print(result)