This commit is contained in:
lichen
2022-06-02 16:23:27 +08:00
parent 0b34aa05e0
commit b84a92f67a
8 changed files with 82 additions and 31 deletions

Binary file not shown.

59
ieemoo-ai-isempty.py Normal file → Executable file
View File

@ -13,6 +13,23 @@ from torchvision import transforms
from models.modeling import VisionTransformer, CONFIGS
sys.path.insert(0, ".")
import logging.config
from skywalking import agent, config
SW_SERVER = os.environ.get('SW_AGENT_COLLECTOR_BACKEND_SERVICES')
SW_SERVICE_NAME = os.environ.get('SW_AGENT_NAME')
if SW_SERVER and SW_SERVICE_NAME:
config.init() #采集服务的地址,给自己的服务起个名称
#config.init(collector="123.60.56.51:11800", service='ieemoo-ai-search') #采集服务的地址,给自己的服务起个名称
agent.start()
def setup_logging(path):
if os.path.exists(path):
with open(path, 'r') as f:
config = json.load(f)
logging.config.dictConfig(config)
logger = logging.getLogger("root")
return logger
logger = setup_logging('utils/logging.json')
app = Flask(__name__)
app.use_reloader=False
@ -34,7 +51,7 @@ class Predictor(object):
def __init__(self, args):
self.args = args
self.args.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(self.args.device)
#print(self.args.device)
self.args.nprocs = torch.cuda.device_count()
self.cls_dict = {}
self.num_classes = 0
@ -49,7 +66,7 @@ class Predictor(object):
config.split = self.args.split
config.slide_step = self.args.slide_step
model_name = os.path.basename(self.args.pretrained_model).replace("_checkpoint.bin", "")
print("use model_name: ", model_name)
#print("use model_name: ", model_name)
self.num_classes = 5
self.cls_dict = {0: "noemp", 1: "yesemp", 2: "hard", 3: "fly", 4: "stack"}
self.model = VisionTransformer(config, self.args.img_size, zero_head=True, num_classes=self.num_classes, smoothing_value=self.args.smoothing_value)
@ -67,7 +84,8 @@ class Predictor(object):
def normal_predict(self, img_data, result):
# img = Image.open(img_path)
if img_data is None:
print('error, img data is None')
#print('error, img data is None')
logger.warning('error, img data is None')
return result
else:
with torch.no_grad():
@ -79,7 +97,7 @@ class Predictor(object):
topN = torch.argsort(probs, dim=-1, descending=True).tolist()
clas_ids = topN[0][0]
clas_ids = 0 if 0==int(clas_ids) or 2 == int(clas_ids) or 3 == int(clas_ids) else 1
print("cur_img result: class id: %d, score: %0.3f" % (clas_ids, probs[0, clas_ids].item()))
#print("cur_img result: class id: %d, score: %0.3f" % (clas_ids, probs[0, clas_ids].item()))
result["success"] = "true"
result["rst_cls"] = str(clas_ids)
return result
@ -94,14 +112,15 @@ predictor = Predictor(args)
@app.route("/isempty", methods=['POST'])
def get_isempty():
start = time.time()
print('--------------------EmptyPredict-----------------')
#print('--------------------EmptyPredict-----------------')
data = request.get_data()
ip = request.remote_addr
print('------ ip = %s ------' % ip)
#print('------ ip = %s ------' % ip)
logger.info(ip)
json_data = json.loads(data.decode("utf-8"))
getdateend = time.time()
print('get date use time: {0:.2f}s'.format(getdateend - start))
#print('get date use time: {0:.2f}s'.format(getdateend - start))
pic = json_data.get("pic")
result = {"success": "false",
@ -113,29 +132,11 @@ def get_isempty():
img_src = cv2.imdecode(imgdata_np, cv2.IMREAD_COLOR)
img_data = Image.fromarray(np.uint8(img_src))
result = predictor.normal_predict(img_data, result) # 1==empty, 0==nonEmpty
except:
except Exception as e:
logger.warning(e)
return repr(result)
logger.info(repr(result))
return repr(result)
if __name__ == "__main__":
app.run()
# http_server = WSGIServer(('0.0.0.0',8000), app)
# http_server.serve_forever()
app.run(host='192.168.1.142', port=8000)

0
init.sh Normal file → Executable file
View File

0
requirements.txt Normal file → Executable file
View File

0
start.sh Normal file → Executable file
View File

0
stop.sh Normal file → Executable file
View File

View File

@ -20,10 +20,11 @@ from models.modeling import VisionTransformer, CONFIGS
from utils.scheduler import WarmupLinearSchedule, WarmupCosineSchedule
from utils.data_utils import get_loader
from utils.dist_util import get_world_size
import pdb
logger = logging.getLogger(__name__)
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1,2"
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
@ -92,12 +93,14 @@ def setup(args):
if args.pretrained_model is not None:
pretrained_model = torch.load(args.pretrained_model)['model']
model.load_state_dict(pretrained_model)
model.to(args.device)
#model.to(args.device)
#pdb.set_trace()
num_params = count_parameters(model)
logger.info("{}".format(config))
logger.info("Training parameters %s", args)
logger.info("Total Parameter: \t%2.1fM" % num_params)
model = torch.nn.DataParallel(model, device_ids=[0,1]).cuda()
return args, model
@ -351,6 +354,7 @@ def main():
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print('torch.cuda.device_count()>>>>>>>>>>>>>>>>>>>>>>>>>', torch.cuda.device_count())
args.n_gpu = torch.cuda.device_count()
print('torch.cuda.device_count()>>>>>>>>>>>>>>>>>>>>>>>>>', torch.cuda.device_count())
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)

46
utils/logging.json Normal file
View File

@ -0,0 +1,46 @@
{
"version":1,
"disable_existing_loggers":false,
"formatters":{
"simple":{
"format":"%(asctime)s - %(module)s - %(thread)d - %(levelname)s : %(message)s"
}
},
"handlers":{
"console":{
"class":"logging.StreamHandler",
"level":"DEBUG",
"formatter":"simple",
"stream":"ext://sys.stdout"
},
"info_file_handler":{
"class":"logging.handlers.RotatingFileHandler",
"level":"INFO",
"formatter":"simple",
"filename":"../log/ieemoo-ai-isempty-biz.log",
"maxBytes":10485760,
"backupCount":20,
"encoding":"utf8"
},
"error_file_handler":{
"class":"logging.handlers.RotatingFileHandler",
"level":"ERROR",
"formatter":"simple",
"filename":"../log/ieemoo-ai-isempty-biz.log",
"maxBytes":10485760,
"backupCount":20,
"encoding":"utf8"
}
},
"loggers":{
"my_module":{
"level":"ERROR",
"handlers":["info_file_handler"],
"propagate":"no"}
},
"root":{
"level":"INFO",
"handlers":["console","info_file_handler","error_file_handler"]
}
}