This commit is contained in:
2023-02-13 15:04:44 +08:00
parent 07b0d54146
commit 3014078e68
5 changed files with 492 additions and 0 deletions

4
.gitignore vendored
View File

@ -136,3 +136,7 @@ dmypy.json
# Cython debug symbols
cython_debug/
*.jpg
*.png
*.pth

63
checkobject.py Normal file
View File

@ -0,0 +1,63 @@
import cv2 as cv
#from segmentation import get_object_mask
import os, time
def get_object_location(pfile, mask_path = 'lianhua_1.jpg'):
kernel = cv.getStructuringElement(cv.MORPH_ELLIPSE, (3, 3)) # 定义结构元素
fgbg = cv.createBackgroundSubtractorMOG2(detectShadows=False)
nu, nn = 0, 1
flag = False
T1 = time.time()
# 设置文件
cap = cv.VideoCapture(pfile)
while True:
# 读取一帧
ret, frame = cap.read()
nn += 1
if (not ret):
break
if flag:
flag = False
print('flag change>>{}>>{}'.format(pfile, nn))
return '1'
frame = cv.resize(frame, (512, 640), interpolation=cv.INTER_CUBIC)
frame = cv.medianBlur(frame, ksize=3)
frame_motion = frame.copy()
# 计算前景掩码
fgmask = fgbg.apply(frame_motion)
draw1 = cv.threshold(fgmask, 25, 255, cv.THRESH_BINARY)[1] # 二值化
draw1 = cv.dilate(draw1, kernel, iterations=1)
if nn<10: #判断10帧内有入侵动作
flag = check_tings(mask_path, draw1)
T2 = time.time()
print('single video >>> {}-->{}-->{}'.format(pfile, nn, (T2 - T1)))
return '0'
def check_tings(mask_path, img):
dics = {}
mask_img = cv.imread(mask_path)
img = cv.bitwise_and(mask_img[:,:,0], img)
contours_m, hierarchy_m = cv.findContours(img.copy(), cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)
for contour in contours_m:
# print('contour', hierarchy_m)
dics[len(contour)] = contour
if len(dics.keys()) > 0:
cc = sorted(dics.keys())
iouArea = cv.contourArea(dics[cc[-1]])
# if iouArea>10000 and iouArea<40000:
if iouArea>10000 and iouArea<40000:
return '1'
else:
return '0'
else:
return '0'
if __name__ == '__main__':
pfile = "videos/20230130-100958_e5910f7d-90dd-4f6b-9468-689ba45fe656.mp4"
mask_path = 'models/lianhua_1.jpg'
#frame_path = 'frame'
#result_path = 'result'
get_object_location(pfile, mask_path)

23
ieemoo-ai-filtervideo.py Normal file
View File

@ -0,0 +1,23 @@
from flask import request,Flask, jsonify
from checkobject import get_object_location
import numpy as np
import cv2
app = Flask(__name__)
@app.route('/filtervideo', methods=['POST'])
def filtervideo():
videourls = request.form.get('videoUrls')
videoid = request.form.get('id')
videopath = os.sep.join(['data', videoid+'.mp4'])
barcode = request.form.get('barcode')
videourls = videourls.split(',')
results = []
for name in videourls:
videos = requests.get(url)
videos.save(videopath)
flag = get_object_location(videopath)
results.append({'id':videoids, 'result':flags})
return result
if __name__ == '__main__':
app.run()

180
segpredict.py Normal file
View File

@ -0,0 +1,180 @@
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
import numpy as np
import cv2
import matplotlib.pyplot as plt
import albumentations as albu
import torch
import segmentation_models_pytorch as smp
from torch.utils.data import Dataset as BaseDataset
import imageio
# ---------------------------------------------------------------
### Dataloader
class Dataset(BaseDataset):
"""CamVid数据集。进行图像读取图像增强增强和图像预处理.
Args:
images_dir (str): 图像文件夹所在路径
masks_dir (str): 图像分割的标签图像所在路径
class_values (list): 用于图像分割的所有类别数
augmentation (albumentations.Compose): 数据传输管道
preprocessing (albumentations.Compose): 数据预处理
"""
# CamVid数据集中用于图像分割的所有标签类别
#CLASSES = ['sky', 'building', 'pole', 'road', 'pavement',
# 'tree', 'signsymbol', 'fence', 'car',
# 'pedestrian', 'bicyclist', 'unlabelled']
CLASSES = ['front']
def __init__(
self,
images_dir,
# masks_dir,
classes=None,
augmentation=None,
preprocessing=None,
):
self.ids = os.listdir(images_dir)
self.images_fps = [os.path.join(images_dir, image_id) for image_id in self.ids]
# convert str names to class values on masks
self.class_values = [self.CLASSES.index(cls.lower()) for cls in classes]
self.augmentation = augmentation
self.preprocessing = preprocessing
def __getitem__(self, i):
# read data
image = cv2.imread(self.images_fps[i])
image = cv2.resize(image, (512, 512)) # 改变图片分辨率
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# 图像增强应用
if self.augmentation:
sample = self.augmentation(image=image)
image = sample['image']
# 图像预处理应用
if self.preprocessing:
sample = self.preprocessing(image=image)
image = sample['image']
return image
def __len__(self):
return len(self.ids)
# ---------------------------------------------------------------
def get_validation_augmentation():
"""调整图像使得图片的分辨率长宽能被32整除"""
test_transform = [
albu.PadIfNeeded(384, 480)
]
return albu.Compose(test_transform)
def to_tensor(x, **kwargs):
return x.transpose(2, 0, 1).astype('float32')
def get_preprocessing(preprocessing_fn):
"""进行图像预处理操作
Args:
preprocessing_fn (callbale): 数据规范化的函数
(针对每种预训练的神经网络)
Return:
transform: albumentations.Compose
"""
_transform = [
albu.Lambda(image=preprocessing_fn),
albu.Lambda(image=to_tensor),
]
return albu.Compose(_transform)
# 图像分割结果的可视化展示
def visualize(**images):
"""PLot images in one row."""
n = len(images)
plt.figure(figsize=(16, 5))
for i, (name, image) in enumerate(images.items()):
plt.subplot(1, n, i + 1)
plt.xticks([])
plt.yticks([])
plt.title(' '.join(name.split('_')).title())
plt.imshow(image)
plt.show()
# ---------------------------------------------------------------
if __name__ == '__main__':
DATA_DIR = './data/CamVid/'
x_test_dir = os.path.join(DATA_DIR, 'abc')
img_test = cv2.imread('data/CamVid/abc/pic_unscan_front.jpg')
height = img_test.shape[0]
weight = img_test.shape[1]
print(type(img_test))
print('>>>>>>shape {}'.format(img_test.shape))
#ENCODER = 'resnet18'
ENCODER = 'mobilenet_v2'
ENCODER_WEIGHTS = 'imagenet'
CLASSES = ['front']
ACTIVATION = 'sigmoid' # could be None for logits or 'softmax2d' for multiclass segmentation
DEVICE = 'cuda'
# 按照权重预训练的相同方法准备数据
preprocessing_fn = smp.encoders.get_preprocessing_fn(ENCODER, ENCODER_WEIGHTS)
# 加载最佳模型
best_model = torch.load('./best_model.pth')
# 创建检测数据集
predict_dataset = Dataset(
x_test_dir,
augmentation=get_validation_augmentation(),
preprocessing=get_preprocessing(preprocessing_fn),
classes=CLASSES,
)
# 对检测图像进行图像分割并进行图像可视化展示
predict_dataset_vis = Dataset(
x_test_dir,
classes=CLASSES,
)
for i in range(len(predict_dataset)):
# 原始图像image_vis
image_vis = predict_dataset_vis[i].astype('uint8')
image = predict_dataset[i]
# 通过图像分割得到的0-1图像pr_mask
x_tensor = torch.from_numpy(image).to(DEVICE).unsqueeze(0)
pr_mask = best_model.predict(x_tensor)
pr_mask = (pr_mask.squeeze().cpu().numpy().round())
print('>>>>>>> pr_mask{}'.format(pr_mask.shape))
print('>>>>>>{} {}'.format(height, weight))
# 恢复图片原来的分辨率
#image_vis = cv2.resize(image_vis, (weight, height))
#pr_mask = cv2.resize(pr_mask, (weight, height))
pr_mask = cv2.resize(pr_mask[0,:,:], (weight, height))
# 保存图像分割后的黑白结果图像
imageio.imwrite('f_test_out.png', pr_mask)
# 原始图像和图像分割结果的可视化展示
# visualize(
# image=image_vis,
# predicted_mask=pr_mask
# )

222
segtrain.py Normal file
View File

@ -0,0 +1,222 @@
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
import numpy as np
import cv2
import matplotlib.pyplot as plt
import albumentations as albu
import torch
import segmentation_models_pytorch as smp
from torch.utils.data import DataLoader
from torch.utils.data import Dataset as BaseDataset
# ---------------------------------------------------------------
class Dataset(BaseDataset):
#CLASSES = ['sky', 'building', 'pole', 'road', 'pavement',
# 'tree', 'signsymbol', 'fence', 'car',
# 'pedestrian', 'bicyclist', 'unlabelled']
CLASSES = ['front', 'background']
def __init__(
self,
images_dir,
masks_dir,
classes=None,
augmentation=None,
preprocessing=None,
):
self.ids = os.listdir(images_dir)
self.images_fps = [os.path.join(images_dir, image_id) for image_id in self.ids]
self.masks_fps = [os.path.join(masks_dir, image_id) for image_id in self.ids]
# convert str names to class values on masks
self.class_values = [self.CLASSES.index(cls.lower()) for cls in classes]
self.augmentation = augmentation
self.preprocessing = preprocessing
def __getitem__(self, i):
# read data
image = cv2.imread(self.images_fps[i])
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
mask = cv2.imread(self.masks_fps[i], 0)
masks = [(mask == v) for v in self.class_values]
mask = np.stack(masks, axis=-1).astype('float')
if self.augmentation:
#print('>>>>>>>{}'.format(image.shape[:2]))
sample = self.augmentation(image=image, mask=mask)
image, mask = sample['image'], sample['mask']
if self.preprocessing:
sample = self.preprocessing(image=image, mask=mask)
image, mask = sample['image'], sample['mask']
return image, mask
def __len__(self):
return len(self.ids)
# ---------------------------------------------------------------
def get_training_augmentation():
train_transform = [
albu.HorizontalFlip(p=0.5),
albu.ShiftScaleRotate(scale_limit=0.5, rotate_limit=0, shift_limit=0.1, p=1, border_mode=0),
albu.PadIfNeeded(min_height=320, min_width=320, always_apply=True, border_mode=0),
albu.RandomCrop(height=320, width=320, always_apply=True),
albu.IAAAdditiveGaussianNoise(p=0.2),
albu.IAAPerspective(p=0.5),
albu.OneOf(
[
albu.CLAHE(p=1),
albu.RandomBrightness(p=1),
albu.RandomGamma(p=1),
],
p=0.9,
),
albu.OneOf(
[
albu.IAASharpen(p=1),
albu.Blur(blur_limit=3, p=1),
albu.MotionBlur(blur_limit=3, p=1),
],
p=0.9,
),
albu.OneOf(
[
albu.RandomContrast(p=1),
albu.HueSaturationValue(p=1),
],
p=0.9,
),
]
return albu.Compose(train_transform)
def get_validation_augmentation():
test_transform = [
albu.PadIfNeeded(384, 480)
]
return albu.Compose(test_transform)
def to_tensor(x, **kwargs):
return x.transpose(2, 0, 1).astype('float32')
def get_preprocessing(preprocessing_fn):
_transform = [
albu.Lambda(image=preprocessing_fn),
albu.Lambda(image=to_tensor, mask=to_tensor),
]
return albu.Compose(_transform)
# ---------------------------------------------------------------
if __name__ == '__main__':
DATA_DIR = './data/CamVid/'
if not os.path.exists(DATA_DIR):
print('Loading data...')
os.system('git clone https://github.com/alexgkendall/SegNet-Tutorial ./data')
print('Done!')
x_train_dir = os.path.join(DATA_DIR, 'train')
y_train_dir = os.path.join(DATA_DIR, 'trainannot')
x_valid_dir = os.path.join(DATA_DIR, 'val')
y_valid_dir = os.path.join(DATA_DIR, 'valannot')
#ENCODER = 'se_resnext50_32x4d'
#ENCODER = 'resnet18'
ENCODER = 'mobilenet_v2'
ENCODER_WEIGHTS = 'imagenet'
CLASSES = ['front', 'background']
ACTIVATION = 'sigmoid' # could be None for logits or 'softmax2d' for multiclass segmentation
DEVICE = 'cuda'
#model = smp.UnetPlusPlus(
model = smp.Unet(
encoder_name=ENCODER,
encoder_weights=ENCODER_WEIGHTS,
classes=len(CLASSES),
activation=ACTIVATION,
)
preprocessing_fn = smp.encoders.get_preprocessing_fn(ENCODER, ENCODER_WEIGHTS)
train_dataset = Dataset(
x_train_dir,
y_train_dir,
augmentation=get_training_augmentation(),
preprocessing=get_preprocessing(preprocessing_fn),
classes=CLASSES,
)
valid_dataset = Dataset(
x_valid_dir,
y_valid_dir,
augmentation=get_validation_augmentation(),
preprocessing=get_preprocessing(preprocessing_fn),
classes=CLASSES,
)
train_loader = DataLoader(train_dataset, batch_size=16, shuffle=True, num_workers=0)
valid_loader = DataLoader(valid_dataset, batch_size=1, shuffle=False, num_workers=0)
loss = smp.utils.losses.DiceLoss()
metrics = [
smp.utils.metrics.IoU(threshold=0.5),
]
optimizer = torch.optim.Adam([
dict(params=model.parameters(), lr=0.0001),
])
train_epoch = smp.utils.train.TrainEpoch(
model,
loss=loss,
metrics=metrics,
optimizer=optimizer,
device=DEVICE,
verbose=True,
)
valid_epoch = smp.utils.train.ValidEpoch(
model,
loss=loss,
metrics=metrics,
device=DEVICE,
verbose=True,
)
max_score = 0
for i in range(0, 100):
print('\nEpoch: {}'.format(i))
train_logs = train_epoch.run(train_loader)
valid_logs = valid_epoch.run(valid_loader)
if max_score < valid_logs['iou_score']:
max_score = valid_logs['iou_score']
torch.save(model, './best_model.pth')
print('Model saved!')
if i == 25:
optimizer.param_groups[0]['lr'] = 1e-5
print('Decrease decoder learning rate to 1e-5!')