Files
detecttracking/tracking/trackers/reid/model/BAM.py
2024-05-20 20:01:06 +08:00

83 lines
3.2 KiB
Python

import torch.nn as nn
import torchvision
from torch.nn import init
class Flatten(nn.Module):
def forward(self, x):
return x.view(x.shape[0], -1)
class ChannelAttention(nn.Module):
def __int__(self,channel,reduction, num_layers):
super(ChannelAttention,self).__init__()
self.avgpool = nn.AdaptiveAvgPool2d(1)
gate_channels = [channel]
gate_channels += [len(channel)//reduction]*num_layers
gate_channels += [channel]
self.ca = nn.Sequential()
self.ca.add_module('flatten', Flatten())
for i in range(len(gate_channels)-2):
self.ca.add_module('',nn.Linear(gate_channels[i], gate_channels[i+1]))
self.ca.add_module('',nn.BatchNorm1d(gate_channels[i+1]))
self.ca.add_module('',nn.ReLU())
self.ca.add_module('',nn.Linear(gate_channels[-2], gate_channels[-1]))
def forward(self, x):
res = self.avgpool(x)
res = self.ca(res)
res = res.unsqueeze(-1).unsqueeze(-1).expand_as(x)
return res
class SpatialAttention(nn.Module):
def __int__(self, channel,reduction=16,num_lay=3,dilation=2):
super(SpatialAttention).__init__()
self.sa = nn.Sequential()
self.sa.add_module('', nn.Conv2d(kernel_size=1, in_channels=channel, out_channels=(channel//reduction)*3))
self.sa.add_module('',nn.BatchNorm2d(num_features=(channel//reduction)))
self.sa.add_module('',nn.ReLU())
for i in range(num_lay):
self.sa.add_module('', nn.Conv2d(kernel_size=3,
in_channels=(channel//reduction),
out_channels=(channel//reduction),
padding=1,
dilation= 2))
self.sa.add_module('',nn.BatchNorm2d(channel//reduction))
self.sa.add_module('',nn.ReLU())
self.sa.add_module('',nn.Conv2d(channel//reduction, 1, kernel_size=1))
def forward(self,x):
res = self.sa(x)
res = res.expand_as(x)
return res
class BAMblock(nn.Module):
def __init__(self,channel=512, reduction=16, dia_val=2):
super(BAMblock, self).__init__()
self.ca = ChannelAttention(channel, reduction)
self.sa = SpatialAttention(channel,reduction,dia_val)
self.sigmoid = nn.Sigmoid()
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal(m.weight, mode='fan_out')
if m.bais is not None:
init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
init.normal_(m.weight, std=0.001)
if m.bias is not None:
init.constant_(m.bias, 0)
def forward(self,x):
b, c, _, _ = x.size()
sa_out=self.sa(x)
ca_out=self.ca(x)
weight=self.sigmoid(sa_out+ca_out)
out=(1+weight)*x
return out
if __name__ =="__main__":
print(512//14)