Source code for pywick.models.segmentation.bisenet

# Source: https://github.com/Tramac/awesome-semantic-segmentation-pytorch/blob/master/core/models/bisenet.py (License: Apache 2.0)

"""
Implementation of `BiSeNet: Bilateral Segmentation Network for Real-time Semantic Segmentation <https://arxiv.org/pdf/1808.00897>`_
"""

import torch
import torch.nn as nn
import torch.nn.functional as F

from pywick.models.segmentation.da_basenets.resnet import resnet18

__all__ = ['BiSeNet', 'BiSeNet_Resnet18']


[docs]class BiSeNet(nn.Module): def __init__(self, num_classes, pretrained=True, backbone='resnet18', aux=False, **kwargs): super(BiSeNet, self).__init__() self.aux = aux self.spatial_path = SpatialPath(3, 128, **kwargs) self.context_path = ContextPath(backbone=backbone, pretrained=pretrained, **kwargs) self.ffm = FeatureFusion(256, 256, 4, **kwargs) self.head = _BiSeHead(256, 64, num_classes, **kwargs) if aux: self.auxlayer1 = _BiSeHead(128, 256, num_classes, **kwargs) self.auxlayer2 = _BiSeHead(128, 256, num_classes, **kwargs) self.__setattr__('exclusive', ['spatial_path', 'context_path', 'ffm', 'head', 'auxlayer1', 'auxlayer2'] if aux else [ 'spatial_path', 'context_path', 'ffm', 'head']) def forward(self, x): size = x.size()[2:] spatial_out = self.spatial_path(x) context_out = self.context_path(x) fusion_out = self.ffm(spatial_out, context_out[-1]) outputs = [] x = self.head(fusion_out) x = F.interpolate(x, size, mode='bilinear', align_corners=True) outputs.append(x) if self.aux and self.training: auxout1 = self.auxlayer1(context_out[0]) auxout1 = F.interpolate(auxout1, size, mode='bilinear', align_corners=True) outputs.append(auxout1) auxout2 = self.auxlayer2(context_out[1]) auxout2 = F.interpolate(auxout2, size, mode='bilinear', align_corners=True) outputs.append(auxout2) return tuple(outputs) else: return outputs[0]
class _BiSeHead(nn.Module): def __init__(self, in_channels, inter_channels, nclass, norm_layer=nn.BatchNorm2d, **kwargs): super(_BiSeHead, self).__init__() self.block = nn.Sequential( _ConvBNReLU(in_channels, inter_channels, 3, 1, 1, norm_layer=norm_layer, **kwargs), nn.Dropout(0.1), nn.Conv2d(inter_channels, nclass, 1) ) def forward(self, x): x = self.block(x) return x class _ConvBNReLU(nn.Module): def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=1, dilation=1, groups=1, norm_layer=nn.BatchNorm2d, bias=False, **kwargs): super(_ConvBNReLU, self).__init__() self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias) self.bn = norm_layer(out_channels) self.relu = nn.ReLU(True) def forward(self, x): x = self.conv(x) x = self.bn(x) x = self.relu(x) return x class SpatialPath(nn.Module): """Spatial path""" def __init__(self, in_channels, out_channels, norm_layer=nn.BatchNorm2d, **kwargs): super(SpatialPath, self).__init__() inter_channels = 64 self.conv7x7 = _ConvBNReLU(in_channels, inter_channels, 7, 2, 3, norm_layer=norm_layer, **kwargs) self.conv3x3_1 = _ConvBNReLU(inter_channels, inter_channels, 3, 2, 1, norm_layer=norm_layer, **kwargs) self.conv3x3_2 = _ConvBNReLU(inter_channels, inter_channels, 3, 2, 1, norm_layer=norm_layer, **kwargs) self.conv1x1 = _ConvBNReLU(inter_channels, out_channels, 1, 1, 0, norm_layer=norm_layer, **kwargs) def forward(self, x): x = self.conv7x7(x) x = self.conv3x3_1(x) x = self.conv3x3_2(x) x = self.conv1x1(x) return x class _GlobalAvgPooling(nn.Module): def __init__(self, in_channels, out_channels, norm_layer, **kwargs): super(_GlobalAvgPooling, self).__init__() self.gap = nn.Sequential( nn.AdaptiveAvgPool2d(1), nn.Conv2d(in_channels, out_channels, 1, bias=False), norm_layer(out_channels), nn.ReLU(True) ) def forward(self, x): size = x.size()[2:] pool = self.gap(x) out = F.interpolate(pool, size, mode='bilinear', align_corners=True) return out class AttentionRefinmentModule(nn.Module): def __init__(self, in_channels, out_channels, norm_layer=nn.BatchNorm2d, **kwargs): super(AttentionRefinmentModule, self).__init__() self.conv3x3 = _ConvBNReLU(in_channels, out_channels, 3, 1, 1, norm_layer=norm_layer, **kwargs) self.channel_attention = nn.Sequential( nn.AdaptiveAvgPool2d(1), _ConvBNReLU(out_channels, out_channels, 1, 1, 0, norm_layer=norm_layer, **kwargs), nn.Sigmoid() ) def forward(self, x): x = self.conv3x3(x) attention = self.channel_attention(x) x = x * attention return x class ContextPath(nn.Module): def __init__(self, pretrained=True, backbone='resnet18', norm_layer=nn.BatchNorm2d, **kwargs): super(ContextPath, self).__init__() if backbone == 'resnet18': pretrained = resnet18(pretrained=pretrained, **kwargs) else: raise RuntimeError('unknown backbone: {}'.format(backbone)) self.conv1 = pretrained.conv1 self.bn1 = pretrained.bn1 self.relu = pretrained.relu self.maxpool = pretrained.maxpool self.layer1 = pretrained.layer1 self.layer2 = pretrained.layer2 self.layer3 = pretrained.layer3 self.layer4 = pretrained.layer4 inter_channels = 128 self.global_context = _GlobalAvgPooling(512, inter_channels, norm_layer, **kwargs) self.arms = nn.ModuleList( [AttentionRefinmentModule(512, inter_channels, norm_layer, **kwargs), AttentionRefinmentModule(256, inter_channels, norm_layer, **kwargs)] ) self.refines = nn.ModuleList( [_ConvBNReLU(inter_channels, inter_channels, 3, 1, 1, norm_layer=norm_layer, **kwargs), _ConvBNReLU(inter_channels, inter_channels, 3, 1, 1, norm_layer=norm_layer, **kwargs)] ) def forward(self, x): x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.maxpool(x) x = self.layer1(x) context_blocks = [] context_blocks.append(x) x = self.layer2(x) context_blocks.append(x) c3 = self.layer3(x) context_blocks.append(c3) c4 = self.layer4(c3) context_blocks.append(c4) context_blocks.reverse() global_context = self.global_context(c4) last_feature = global_context context_outputs = [] for i, (feature, arm, refine) in enumerate(zip(context_blocks[:2], self.arms, self.refines)): feature = arm(feature) feature += last_feature last_feature = F.interpolate(feature, size=context_blocks[i + 1].size()[2:], mode='bilinear', align_corners=True) last_feature = refine(last_feature) context_outputs.append(last_feature) return context_outputs class FeatureFusion(nn.Module): def __init__(self, in_channels, out_channels, reduction=1, norm_layer=nn.BatchNorm2d, **kwargs): super(FeatureFusion, self).__init__() self.conv1x1 = _ConvBNReLU(in_channels, out_channels, 1, 1, 0, norm_layer=norm_layer, **kwargs) self.channel_attention = nn.Sequential( nn.AdaptiveAvgPool2d(1), _ConvBNReLU(out_channels, out_channels // reduction, 1, 1, 0, norm_layer=norm_layer, **kwargs), _ConvBNReLU(out_channels // reduction, out_channels, 1, 1, 0, norm_layer=norm_layer, **kwargs), nn.Sigmoid() ) def forward(self, x1, x2): fusion = torch.cat([x1, x2], dim=1) out = self.conv1x1(fusion) attention = self.channel_attention(out) out = out + out * attention return out def get_bisenet(num_classes=1, backbone='resnet18', pretrained=True, **kwargs): model = BiSeNet(num_classes=num_classes, backbone=backbone, pretrained=pretrained, **kwargs) return model
[docs]def BiSeNet_Resnet18(num_classes=1, **kwargs): return get_bisenet(num_classes=num_classes, backbone='resnet18', **kwargs)
if __name__ == '__main__': img = torch.randn(2, 3, 224, 224) model = BiSeNet(19, backbone='resnet18') print(model.exclusive)