# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math import paddle import paddle.nn as nn from paddleseg.utils import utils from paddleseg.cvlibs import manager, param_init from paddleseg.models.layers.layer_libs import SyncBatchNorm __all__ = ["STDC1", "STDC2", "STDC_Small", "STDC_Tiny"] class STDCNet(nn.Layer): """ The STDCNet implementation based on PaddlePaddle. The original article refers to Meituan Fan, Mingyuan, et al. "Rethinking BiSeNet For Real-time Semantic Segmentation." (https://arxiv.org/abs/2104.13188) Args: base(int, optional): base channels. Default: 64. layers(list, optional): layers numbers list. It determines STDC block numbers of STDCNet's stage3\4\5. Defualt: [4, 5, 3]. block_num(int,optional): block_num of features block. Default: 4. type(str,optional): feature fusion method "cat"/"add". Default: "cat". pretrained(str, optional): the path of pretrained model. """ def __init__(self, input_channels=3, channels=[32, 64, 256, 512, 1024], layers=[4, 5, 3], block_num=4, type="cat", pretrained=None): super(STDCNet, self).__init__() if type == "cat": block = CatBottleneck elif type == "add": block = AddBottleneck self.input_channels = input_channels self.layers = layers self.feat_channels = channels self.features = self._make_layers(channels, layers, block_num, block) self.pretrained = pretrained self.init_weight() def forward(self, x): """ forward function for feature extract. """ out_feats = [] x = self.features[0](x) out_feats.append(x) x = self.features[1](x) out_feats.append(x) idx = [[2, 2 + self.layers[0]], [2 + self.layers[0], 2 + sum(self.layers[0:2])], [2 + sum(self.layers[0:2]), 2 + sum(self.layers)]] for start_idx, end_idx in idx: for i in range(start_idx, end_idx): x = self.features[i](x) out_feats.append(x) return out_feats def _make_layers(self, channels, layers, block_num, block): features = [] features += [ConvBNRelu(self.input_channels, channels[0], 3, 2)] features += [ConvBNRelu(channels[0], channels[1], 3, 2)] for i, layer in enumerate(layers): for j in range(layer): if i == 0 and j == 0: features.append( block(channels[i + 1], channels[i + 2], block_num, 2)) elif j == 0: features.append( block(channels[i + 1], channels[i + 2], block_num, 2)) else: features.append( block(channels[i + 2], channels[i + 2], block_num, 1)) return nn.Sequential(*features) def init_weight(self): for layer in self.sublayers(): if isinstance(layer, nn.Conv2D): param_init.normal_init(layer.weight, std=0.001) elif isinstance(layer, (nn.BatchNorm, nn.SyncBatchNorm)): param_init.constant_init(layer.weight, value=1.0) param_init.constant_init(layer.bias, value=0.0) if self.pretrained is not None: utils.load_pretrained_model(self, self.pretrained) class ConvBNRelu(nn.Layer): def __init__(self, in_planes, out_planes, kernel=3, stride=1): super(ConvBNRelu, self).__init__() self.conv = nn.Conv2D( in_planes, out_planes, kernel_size=kernel, stride=stride, padding=kernel // 2, bias_attr=False) self.bn = SyncBatchNorm(out_planes, data_format='NCHW') self.relu = nn.ReLU() def forward(self, x): out = self.relu(self.bn(self.conv(x))) return out class AddBottleneck(nn.Layer): def __init__(self, in_planes, out_planes, block_num=3, stride=1): super(AddBottleneck, self).__init__() assert block_num > 1, "block number should be larger than 1." self.conv_list = nn.LayerList() self.stride = stride if stride == 2: self.avd_layer = nn.Sequential( nn.Conv2D( out_planes // 2, out_planes // 2, kernel_size=3, stride=2, padding=1, groups=out_planes // 2, bias_attr=False), nn.BatchNorm2D(out_planes // 2), ) self.skip = nn.Sequential( nn.Conv2D( in_planes, in_planes, kernel_size=3, stride=2, padding=1, groups=in_planes, bias_attr=False), nn.BatchNorm2D(in_planes), nn.Conv2D( in_planes, out_planes, kernel_size=1, bias_attr=False), nn.BatchNorm2D(out_planes), ) stride = 1 for idx in range(block_num): if idx == 0: self.conv_list.append( ConvBNRelu( in_planes, out_planes // 2, kernel=1)) elif idx == 1 and block_num == 2: self.conv_list.append( ConvBNRelu( out_planes // 2, out_planes // 2, stride=stride)) elif idx == 1 and block_num > 2: self.conv_list.append( ConvBNRelu( out_planes // 2, out_planes // 4, stride=stride)) elif idx < block_num - 1: self.conv_list.append( ConvBNRelu(out_planes // int(math.pow(2, idx)), out_planes // int(math.pow(2, idx + 1)))) else: self.conv_list.append( ConvBNRelu(out_planes // int(math.pow(2, idx)), out_planes // int(math.pow(2, idx)))) def forward(self, x): out_list = [] out = x for idx, conv in enumerate(self.conv_list): if idx == 0 and self.stride == 2: out = self.avd_layer(conv(out)) else: out = conv(out) out_list.append(out) if self.stride == 2: x = self.skip(x) return paddle.concat(out_list, axis=1) + x class CatBottleneck(nn.Layer): def __init__(self, in_planes, out_planes, block_num=3, stride=1): super(CatBottleneck, self).__init__() assert block_num > 1, "block number should be larger than 1." self.conv_list = nn.LayerList() self.stride = stride if stride == 2: self.avd_layer = nn.Sequential( nn.Conv2D( out_planes // 2, out_planes // 2, kernel_size=3, stride=2, padding=1, groups=out_planes // 2, bias_attr=False), nn.BatchNorm2D(out_planes // 2), ) self.skip = nn.AvgPool2D(kernel_size=3, stride=2, padding=1) stride = 1 for idx in range(block_num): if idx == 0: self.conv_list.append( ConvBNRelu( in_planes, out_planes // 2, kernel=1)) elif idx == 1 and block_num == 2: self.conv_list.append( ConvBNRelu( out_planes // 2, out_planes // 2, stride=stride)) elif idx == 1 and block_num > 2: self.conv_list.append( ConvBNRelu( out_planes // 2, out_planes // 4, stride=stride)) elif idx < block_num - 1: self.conv_list.append( ConvBNRelu(out_planes // int(math.pow(2, idx)), out_planes // int(math.pow(2, idx + 1)))) else: self.conv_list.append( ConvBNRelu(out_planes // int(math.pow(2, idx)), out_planes // int(math.pow(2, idx)))) def forward(self, x): out_list = [] out1 = self.conv_list[0](x) for idx, conv in enumerate(self.conv_list[1:]): if idx == 0: if self.stride == 2: out = conv(self.avd_layer(out1)) else: out = conv(out1) else: out = conv(out) out_list.append(out) if self.stride == 2: out1 = self.skip(out1) out_list.insert(0, out1) out = paddle.concat(out_list, axis=1) return out @manager.BACKBONES.add_component def STDC2(**kwargs): model = STDCNet( channels=[32, 64, 256, 512, 1024], layers=[4, 5, 3], **kwargs) return model @manager.BACKBONES.add_component def STDC1(**kwargs): model = STDCNet( channels=[32, 64, 256, 512, 1024], layers=[2, 2, 2], **kwargs) return model @manager.BACKBONES.add_component def STDC_Small(**kwargs): model = STDCNet(channels=[32, 32, 64, 128, 256], layers=[4, 5, 3], **kwargs) return model @manager.BACKBONES.add_component def STDC_Tiny(**kwargs): model = STDCNet(channels=[32, 32, 64, 128, 256], layers=[2, 2, 2], **kwargs) return model