""" Ported to pytorch thanks to [tstandley](https://github.com/tstandley/Xception-PyTorch) @author: tstandley Adapted by cadene Creates an Xception Model as defined in: Francois Chollet Xception: Deep Learning with Depthwise Separable Convolutions https://arxiv.org/pdf/1610.02357.pdf This weights ported from the Keras implementation. Achieves the following performance on the validation set: Loss:0.9173 Prec@1:78.892 Prec@5:94.292 REMEMBER to set your image size to 3x299x299 for both test and validation normalize = transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) The resize parameter of the validation transform should be 333, and make sure to center crop at 299x299 """ import torch.nn as nn import torch.nn.functional as F from utils.Stomach_Config import Model_Config from timm.layers import create_classifier from .ViT_Model import ViTBranch # 添加或替換這個導入 class SeparableConv2d(nn.Module): def __init__( self, in_channels: int, out_channels: int, kernel_size: int = 1, stride: int = 1, padding: int = 0, dilation: int = 1, device=None, dtype=None, ): dd = {'device': device, 'dtype': dtype} super().__init__() self.conv1 = nn.Conv2d( in_channels, in_channels, kernel_size, stride, padding, dilation, groups=in_channels, bias=False, **dd, ) self.pointwise = nn.Conv2d(in_channels, out_channels, 1, 1, 0, 1, 1, bias=False, **dd) def forward(self, x): x = self.conv1(x) x = self.pointwise(x) return x class Block(nn.Module): def __init__( self, in_channels: int, out_channels: int, reps: int, strides: int = 1, start_with_relu: bool = True, grow_first: bool = True, device=None, dtype=None, ): dd = {'device': device, 'dtype': dtype} super().__init__() if out_channels != in_channels or strides != 1: self.skip = nn.Conv2d(in_channels, out_channels, 1, stride=strides, bias=False, **dd) self.skipbn = nn.BatchNorm2d(out_channels, **dd) else: self.skip = None rep = [] for i in range(reps): if grow_first: inc = in_channels if i == 0 else out_channels outc = out_channels else: inc = in_channels outc = in_channels if i < (reps - 1) else out_channels rep.append(nn.ReLU(inplace=True)) rep.append(SeparableConv2d(inc, outc, 3, stride=1, padding=1, **dd)) rep.append(nn.BatchNorm2d(outc, **dd)) if not start_with_relu: rep = rep[1:] else: rep[0] = nn.ReLU(inplace=False) if strides != 1: rep.append(nn.MaxPool2d(3, strides, 1)) self.rep = nn.Sequential(*rep) def forward(self, inp): x = self.rep(inp) if self.skip is not None: skip = self.skip(inp) skip = self.skipbn(skip) else: skip = inp x += skip return x class Xception(nn.Module): """ Xception optimized for the ImageNet dataset, as specified in https://arxiv.org/pdf/1610.02357.pdf """ def __init__( self, num_classes: int = 1000, in_chans: int = 3, drop_rate: float = 0., global_pool: str = 'avg', device=None, dtype=None, ): """ Constructor Args: num_classes: number of classes """ super().__init__() dd = {'device': device, 'dtype': dtype} self.drop_rate = drop_rate self.global_pool = global_pool self.num_classes = num_classes self.num_features = self.head_hidden_size = 2048 self.conv1 = nn.Conv2d(in_chans, 32, 3, 2, 0, bias=False, **dd) self.bn1 = nn.BatchNorm2d(32, **dd) self.act1 = nn.ReLU(inplace=True) self.conv2 = nn.Conv2d(32, 64, 3, bias=False, **dd) self.bn2 = nn.BatchNorm2d(64, **dd) self.act2 = nn.ReLU(inplace=True) self.block1 = Block(64, 128, 2, 2, start_with_relu=False, **dd) self.block2 = Block(128, 256, 2, 2, **dd) self.block3 = Block(256, 728, 2, 2, **dd) self.block4 = Block(728, 728, 3, 1, **dd) self.block5 = Block(728, 728, 3, 1, **dd) self.block6 = Block(728, 728, 3, 1, **dd) self.block7 = Block(728, 728, 3, 1, **dd) self.block8 = Block(728, 728, 3, 1, **dd) self.block9 = Block(728, 728, 3, 1, **dd) self.block10 = Block(728, 728, 3, 1, **dd) self.block11 = Block(728, 728, 3, 1, **dd) self.block12 = Block(728, 1024, 2, 2, grow_first=False, **dd) self.conv3 = SeparableConv2d(1024, 1536, 3, 1, 1, **dd) self.bn3 = nn.BatchNorm2d(1536, **dd) self.act3 = nn.ReLU(inplace=True) self.conv4 = SeparableConv2d(1536, self.num_features, 3, 1, 1, **dd) self.bn4 = nn.BatchNorm2d(self.num_features, **dd) self.act4 = nn.ReLU(inplace=True) self.feature_info = [ dict(num_chs=64, reduction=2, module='act2'), dict(num_chs=128, reduction=4, module='block2.rep.0'), dict(num_chs=256, reduction=8, module='block3.rep.0'), dict(num_chs=728, reduction=16, module='block12.rep.0'), dict(num_chs=2048, reduction=32, module='act4'), ] self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool, **dd) self.hidden_layer = nn.Linear(2048, Model_Config["Linear Hidden Nodes"]) # 隱藏層,輸入大小取決於 Xception 的輸出大小 self.output_layer = nn.Linear(Model_Config["Linear Hidden Nodes"], Model_Config["Output Linear Nodes"]) # 輸出層,依據分類數目設定 # 激活函數與 dropout self.relu = nn.ReLU() self.dropout = nn.Dropout(Model_Config["Dropout Rate"]) # #------- init weights -------- for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() def reset_classifier(self, num_classes: int, global_pool: str = 'avg'): self.num_classes = num_classes self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) def forward_features(self, x): x = self.conv1(x) x = self.bn1(x) x = self.act1(x) x = self.conv2(x) x = self.bn2(x) x = self.act2(x) x = self.block1(x) x = self.block2(x) x = self.block3(x) x = self.block4(x) x = self.block5(x) x = self.block6(x) x = self.block7(x) x = self.block8(x) x = self.block9(x) x = self.block10(x) x = self.block11(x) x = self.block12(x) x = self.conv3(x) x = self.bn3(x) x = self.act3(x) x = self.conv4(x) x = self.bn4(x) x = self.act4(x) return x def forward_head(self, x): x = self.global_pool(x) return x def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) x = self.dropout(x) # Dropout x = self.hidden_layer(x) x = self.relu(x) # 隱藏層 + ReLU x = self.output_layer(x) # 輸出層 return x # xception_vit.py class XceptionWithViT(nn.Module): """ Xception + ViT Branch (Middle Flow 後融合) """ def __init__( self, num_classes: int = 1000, in_chans: int = 3, drop_rate: float = 0.0, global_pool: str = 'avg', vit_patch_size: int = 4, vit_depth: int = 3, vit_heads: int = 8, device=None, dtype=None, ): super().__init__() dd = {'device': device, 'dtype': dtype} self.drop_rate = drop_rate self.global_pool = global_pool self.num_features = 2048 # === Entry Flow === self.conv1 = nn.Conv2d(in_chans, 32, 3, 2, 0, bias=False, **dd) self.bn1 = nn.BatchNorm2d(32, **dd) self.act1 = nn.ReLU(inplace=True) self.conv2 = nn.Conv2d(32, 64, 3, bias=False, **dd) self.bn2 = nn.BatchNorm2d(64, **dd) self.act2 = nn.ReLU(inplace=True) self.block1 = Block(64, 128, 2, 2, start_with_relu=False, **dd) self.block2 = Block(128, 256, 2, 2, **dd) self.block3 = Block(256, 728, 2, 2, **dd) # === Middle Flow === for i in range(4, 12): setattr(self, f'block{i}', Block(728, 728, 3, 1, **dd)) # === ViT Branch === self.vit_branch = ViTBranch( in_chs=728, embed_dim=728, patch_size=4, depth=3, num_heads=8, mlp_ratio=4.0, drop_rate=0.0, attn_drop_rate=0.0, device=device, dtype=dtype ) # === Exit Flow === self.block12 = Block(728, 1024, 2, 2, grow_first=False, **dd) self.conv3 = SeparableConv2d(1024, 1536, 3, 1, 1, **dd) self.bn3 = nn.BatchNorm2d(1536, **dd) self.act3 = nn.ReLU(inplace=True) self.conv4 = SeparableConv2d(1536, self.num_features, 3, 1, 1, **dd) self.bn4 = nn.BatchNorm2d(self.num_features, **dd) self.act4 = nn.ReLU(inplace=True) # === Classifier === self.global_pool, _ = create_classifier(self.num_features, num_classes, pool_type=global_pool, **dd) self.hidden_layer = nn.Linear(2048, Model_Config["Linear Hidden Nodes"]) self.output_layer = nn.Linear(Model_Config["Linear Hidden Nodes"], Model_Config["Output Linear Nodes"]) self.relu = nn.ReLU() self.dropout = nn.Dropout(Model_Config["Dropout Rate"]) # === Weight Init === self._init_weights() def _init_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, (nn.BatchNorm2d, nn.LayerNorm)): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): nn.init.normal_(m.weight, 0, 0.01) if m.bias is not None: nn.init.constant_(m.bias, 0) def forward_features(self, x): # Entry x = self.act1(self.bn1(self.conv1(x))) x = self.act2(self.bn2(self.conv2(x))) x = self.block1(x) x = self.block2(x) x = self.block3(x) # Middle Flow for i in range(4, 12): x = getattr(self, f'block{i}')(x) # === ViT Fusion === vit_out = self.vit_branch(x) x = x + vit_out # element-wise add # Exit Flow x = self.block12(x) x = self.act3(self.bn3(self.conv3(x))) x = self.act4(self.bn4(self.conv4(x))) return x def forward(self, x): x = self.forward_features(x) x = self.global_pool(x) x = self.dropout(x) x = self.relu(self.hidden_layer(x)) x = self.output_layer(x) return x