unet改进笔记

论文:U-Net: Convolutional Networks for Biomedical Image Segmentation

github:https://github.com/milesial/Pytorch-UNet

改进1:数据增强,新增augmentations.py

# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
"""
Image augmentation functions
"""

import math
import random

import cv2
import numpy as np
import imgaug.augmenters as iaa



class Albumentations:
    # YOLOv5 Albumentations class (optional, only used if package is installed)
    def __init__(self):
        self.transform = None
        try:
            #version 1.0.3
            import albumentations as A

            T = [
                #A.OneOf([
                    #A.IAAAdditiveGaussianNoise(),   # 将高斯噪声添加到输入图像
                    #A.GaussNoise(),    # 将高斯噪声应用于输入图像。
                #], p=0.2),   # 应用选定变换的概率
                A.OneOf([
                    A.MotionBlur(p=0.2),   # 使用随机大小的内核将运动模糊应用于输入图像。
                    A.MedianBlur(blur_limit=3, p=0.01),    # 中值滤波
                    A.Blur(blur_limit=3, p=0.01),   # 使用随机大小的内核模糊输入图像。
                ], p=0.2),
                # 随机应用仿射变换:平移,缩放和旋转输入
                A.RandomBrightnessContrast(p=0.2),   # 随机明亮对比度
                A.CLAHE(p=0.01),
                A.RandomGamma(p=0.0),
                A.ImageCompression(quality_lower=75, p=0.0)]  # transforms
            self.transform = A.Compose(T)

            print('albumentations: ' + ', '.join(f'{x}' for x in self.transform.transforms if x.p))
        except ImportError:  # package not installed, skip
            pass
        except Exception as e:
            print('albumentations: '+ f'{e}')

    def __call__(self, im, p=0.8):
        if self.transform and random.random() < p:
            new = self.transform(image=im)  # transformed
            im = new['image']


        if random.random() > p:
            im = augment_hsv(im, hgain=0.5, sgain=0.5, vgain=0.5)
        if random.random() > p:
            im = hist_equalize(im, clahe=True, bgr=True) 
            
            
            
            
        #数据增强

        range_num = random.randint(1, 2)
        for iii in range(range_num):
            rand_num = random.randint(0, 20)
            if rand_num == 0: 
                im = channel_shuffle(im)
            #elif rand_num == 1:
                #im = random_noise(im, limit=[0, 0.2], p=0.5)
            elif rand_num == 2:
                im = random_brightness(im, brightness=0.3)
            elif rand_num == 3:
                im = random_contrast(im,contrast=0.3)
            elif rand_num == 4:
                im = random_saturation(im, saturation=0.5)
            elif rand_num == 5:
                im = EqExtension(im)
            elif rand_num == 6:
                im = HueExtension(im)
            elif rand_num == 7:
                im = zaodian(im)
            elif rand_num == 8:
                im = superpixelsaug(im)
            elif rand_num == 9:
                im = fogaug(im)
            elif rand_num == 10:
                im = cloudsaug(im)
            #elif rand_num == 11:
                #im = fnaug(im)
            elif rand_num == 11:
                im = Coarseaug(im)
            elif rand_num == 12:
                im = random_hue(im)
            else :
                im = im
        ####aug
        #数据增强
            

        return im


def augment_hsv(im, hgain=0.5, sgain=0.5, vgain=0.5):
    # HSV color-space augmentation
    if hgain or sgain or vgain:
        r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1  # random gains
        hue, sat, val = cv2.split(cv2.cvtColor(im, cv2.COLOR_BGR2HSV))
        dtype = im.dtype  # uint8

        x = np.arange(0, 256, dtype=r.dtype)
        lut_hue = ((x * r[0]) % 180).astype(dtype)
        lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)
        lut_val = np.clip(x * r[2], 0, 255).astype(dtype)

        im_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val)))
        return cv2.cvtColor(im_hsv, cv2.COLOR_HSV2BGR)  # no return needed


def hist_equalize(im, clahe=True, bgr=True):
    # Equalize histogram on BGR image 'im' with im.shape(n,m,3) and range 0-255
    yuv = cv2.cvtColor(im, cv2.COLOR_BGR2YUV if bgr else cv2.COLOR_RGB2YUV)
    if clahe:
        c = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
        yuv[:, :, 0] = c.apply(yuv[:, :, 0])
    else:
        yuv[:, :, 0] = cv2.equalizeHist(yuv[:, :, 0])  # equalize Y channel histogram
    return cv2.cvtColor(yuv, cv2.COLOR_YUV2BGR if bgr else cv2.COLOR_YUV2RGB)  # convert YUV image to RGB




def channel_shuffle(img):
    if(img.shape[2] == 3):
        ch_arr = [0, 1, 2]
        np.random.shuffle(ch_arr)
        img = img[..., ch_arr]
    return img




def EqExtension(src):
    I_backup = src.copy()
    b, g, r = cv2.split(I_backup)
    b = cv2.equalizeHist(b)
    g = cv2.equalizeHist(g)
    r = cv2.equalizeHist(r)
    I_eq = cv2.merge([b, g, r])
    return I_eq.astype(np.uint8)


def HueExtension(src):
    img_hsv = cv2.cvtColor(src, cv2.COLOR_BGR2HSV)
    cre = random.randint(90, 100)
    cre = float(cre) / 100
    img_hsv[:,:,2] = img_hsv[:,:,2] * cre
 
    # print(img_hsv[:,:,0])
    dst = cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR)
    return dst.astype(np.uint8)




# 随机生成500个椒盐噪声
def zaodian(img):
    height,weight,channel = img.shape
    img_zao = img.copy()
    for i in range(500):
        x = np.random.randint(0,height)
        y = np.random.randint(0,weight)
        img_zao[x ,y ,:] = 255
    return img_zao.astype(np.uint8)


#def random_noise(img, limit=[0, 0.2], p=0.5):
    #if random.random() < p:
        #H, W = img.shape[:2]
        #noise = np.random.uniform(limit[0], limit[1], size=(H,W)) * 255

        #img = img + noise[:,:,np.newaxis]*np.array([1,1,1])
        #img = np.clip(img, 0, 255).astype(np.uint8)
        
    #return img

def random_brightness(image, brightness=0.3):
    alpha = 1 + np.random.uniform(-brightness, brightness)
    img = alpha * image
    img = np.clip(img, 0, 255).astype(np.uint8)
    return img

def random_contrast(img,contrast=0.3):
    coef = np.array([[[0.114, 0.587,  0.299]]])   # rgb to gray (YCbCr)
    alpha = 1.0 + np.random.uniform(-contrast, contrast)
    gray = img * coef
    gray = (3.0 * (1.0 - alpha) / gray.size) * np.sum(gray)
    img = alpha*img  + gray
    img = np.clip(img, 0, 255).astype(np.uint8)
    return img

def random_saturation(img, saturation=0.5):
    coef = np.array([[[0.299, 0.587, 0.114]]])
    alpha = np.random.uniform(-saturation, saturation)
    gray  = img * coef
    gray  = np.sum(gray,axis=2, keepdims=True)
    img = alpha*img  + (1.0 - alpha)*gray
    img = np.clip(img, 0, 255).astype(np.uint8)
    return img

def random_hue(image,hue=0.5):
    h = int(np.random.uniform(-hue, hue)*180)

    hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
    hsv[:, :, 0] = (hsv[:, :, 0].astype(int) + h) % 180
    image = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
    return image



def superpixelsaug(img):
    images = np.expand_dims(img, axis=0)
    aug = iaa.Superpixels(p_replace=0.005,max_size=50)
    images_aug = aug(images=images)
    img_aug = np.squeeze(images_aug)
    return img_aug.astype(np.uint8)



def fogaug(img):
    images = np.expand_dims(img, axis=0)
    aug = iaa.Fog()
    images_aug = aug(images=images)
    img_aug = np.squeeze(images_aug)
    return img_aug.astype(np.uint8)


def cloudsaug(img):
    images = np.expand_dims(img, axis=0)
    aug = iaa.Clouds()
    images_aug = aug(images=images)
    img_aug = np.squeeze(images_aug)
    return img_aug.astype(np.uint8)
 
#def fnaug(img):
    #images = np.expand_dims(img, axis=0)
    #aug = iaa.FrequencyNoiseAlpha(first=iaa.EdgeDetect(0.5))
    #images_aug = aug(images=images)
    #img_aug = np.squeeze(images_aug)
    #return img_aug.astype(np.uint8)


def Coarseaug(img):
    images = np.expand_dims(img, axis=0)
    aug = iaa.CoarseDropout(0.02, size_percent=0.5)
    images_aug = aug(images=images)
    img_aug = np.squeeze(images_aug)
    return img_aug.astype(np.uint8)




调用方式,在utils/data_loading.py中__getitem__函数增加

#albumentations
            if self.albumentations is not None and self.mode == 'train':  
                image = self.albumentations(image)
            #albumentations

改进2:基于图片拼接的数据增强,需要修改图片和标签,在utils/data_loading.py中__getitem__函数增加

def __getitem__(self, idx):
        #训练数据使用bgr格式
        max_height = 0
        max_width = 0
        height_up_left, width_up_left, height_up_right, width_up_right, height_down_left, width_down_left, height_down_right, width_down_right = 0,0,0,0,0,0,0,0
        num1, num2, num3, num4 = idx, random.randint(0,self.img_num-1),random.randint(0,self.img_num-1),random.randint(0,self.img_num-1)
        
        
        img_up_left = cv2.imread(self.img_list[num1], 1)
        #随机4个图片拼接为一个大图片
        if self.train:
            img_up_right  = cv2.imread(self.img_list[num2], 1)
            img_down_left  = cv2.imread(self.img_list[num3], 1)
            img_down_right  = cv2.imread(self.img_list[num4], 1)
        
            height_up_left, width_up_left = img_up_left.shape[:2]
            height_up_right, width_up_right = img_up_right.shape[:2]
            height_down_left, width_down_left = img_down_left.shape[:2]
            height_down_right, width_down_right = img_down_right.shape[:2]
            
            max_height = max([height_up_left, height_up_right, height_down_left, height_down_right])
            max_width = max([width_up_left, width_up_right, width_down_left, width_down_right])
            
            #假设训练数据大小不一样,类比fasterrcnn的方式,采用最大长宽的进行拼接,其余补黑边
            for num, im in enumerate([img_up_left, img_up_right, img_down_left, img_down_right]):
                img_template = np.zeros((max_height, max_width, 3),np.uint8)
                if num ==0: 
                    img_template[:height_up_left, :width_up_left, :] = im.copy()
                    img_up_left = img_template.copy()
                elif num==1:
                    img_template[:height_up_right, :width_up_right, :] = im.copy()
                    img_up_right = img_template.copy()
                elif num==2:
                    img_template[:height_down_left, :width_down_left, :] = im.copy()
                    img_down_left = img_template.copy()
                elif num==3:
                    img_template[:height_down_right, :width_down_right, :] = im.copy()
                    img_down_right = img_template.copy()
                else:
                    pass
                 
            img = np.vstack([np.hstack([img_up_left,img_up_right]), np.hstack([img_down_left,img_down_right])])
        else:
            img = img_up_left
        

        
       
        
        

        mask_up_left = cv2.imread(self.mask[num1], 0)
        if self.train:
            mask_up_right = cv2.imread(self.mask[num2], 0)
            mask_down_left = cv2.imread(self.mask[num3], 0)
            mask_down_right = cv2.imread(self.mask[num4], 0)
        
        
            #假设训练数据大小不一样,类比fasterrcnn的方式,采用最大长宽的进行拼接,其余补黑边
            for num, ma in enumerate([mask_up_left, mask_up_right, mask_down_left, mask_down_right]):
                mask_template = np.zeros((max_height, max_width), np.uint8)
                if num ==0: 
                    mask_template[:height_up_left, :width_up_left] = ma.copy()
                    mask_up_left = mask_template.copy()
                elif num==1:
                    mask_template[:height_up_right, :width_up_right] = ma.copy()
                    mask_up_right = mask_template.copy()
                elif num==2:
                    mask_template[:height_down_left, :width_down_left] = ma.copy()
                    mask_down_left = mask_template.copy()
                elif num==3:
                    mask_template[:height_down_right, :width_down_right] = ma.copy()
                    mask_down_right = mask_template.copy()
                else:
                    pass
        
            mask = np.vstack([np.hstack([mask_up_left,mask_up_right]), np.hstack([mask_down_left,mask_down_right])])
        else:
            mask = mask_up_left
        mask = mask/ 255.0  # 前景==255,背景==0
        
        
        if self.transforms is not None:
            #opencv to pillow
            img= Image.fromarray(cv2.cvtColor(img,cv2.COLOR_BGR2RGB))
            mask = Image.fromarray(mask)
            img, mask = self.transforms(img, mask)
            
         

        return img, mask

改进3:训练图片分布的随机扰动,在utils/data_loading.py中__getitem__函数增加

if self.train:
                #随机乘加操作
                img = img * random.randint(90,110)/100 + random.randint(-10,10)/100
                #随机乘加操作

改进4:增加预测推理代码,

import os
import time
import cv2

import torch
from torchvision import transforms
import numpy as np
from PIL import Image

from src import UNet




class ThyroidNoduleDetect():
    def __init__(self, weights_path = "save_weights/N-net3_weight_best.pth"):
        #超声波甲状腺结节检测
        self.use_cuda = True
        self.use_float16 = False
        # mean = (0.302, 0.302, 0.302)
        # std = (0.093, 0.093, 0.093)
        self.mean = [0.326, 0.327, 0.354]
        self.std = [0.149, 0.149, 0.157]

        # get devices
        device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
        print("using {} device.".format(device))

        # create model
        classes = 1  # exclude background
        self.model = UNet(in_channels=3, num_classes=classes+1, base_c=32)

        # load weights
        self.model.load_state_dict(torch.load(weights_path, map_location='cpu')['model'])
        self.model.to(device)
        self.model.requires_grad_(False)
        self.model.eval()  # 进入验证模式

    def process(self, image):
    
        x = cv2.cvtColor(image,cv2.COLOR_BGR2RGB)
        x = (x/np.max(x) - self.mean)/self.std
        x = np.expand_dims(x, axis=0)

        if self.use_cuda:
            x = torch.from_numpy(x).cuda()
        else:
            x = torch.from_numpy(x)
    
        x = x.to(torch.float32 if not self.use_float16 else torch.float16).permute(0, 3, 1, 2)

        t1 = time.time()
        with torch.no_grad():
            output = self.model(x)
            
            prediction = output['out'].argmax(1).squeeze(0)
            prediction = prediction.to("cpu").numpy().astype(np.uint8)
            prediction_mask = prediction*255
            
         

        t2 = time.time()
        tact_time = (t2 - t1)
        print("ThyroidNoduleDetect ",f'{ tact_time} seconds, {1 / tact_time} FPS, @batch_size 1')
        return prediction_mask


    def draw_circles(self, image, prediction_mask):
        _,binary = cv2.threshold(prediction_mask,127,255,cv2.THRESH_BINARY) 
        contours, hierarchy = cv2.findContours(binary, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) 
        cv2.drawContours(image,contours,-1,(0,0,255),3)
        
        for index,c in enumerate(contours):
            M = cv2.moments(c)
            print(index, M)
            if M["m00"]!=0.0:
                #计算重心
                cX = int(M["m10"] / M["m00"])
                cY = int(M["m01"] / M["m00"])
                cv2.circle(image, (cX, cY), 4, (0, 255, 0), -1)

        return image

def test_image():
    img_path = "TNUI/test/images/1079.png"
    roi_mask_path = "TNUI/test/masks/1079.png"
    image =cv2.imread(img_path,1)
    
    tnd = ThyroidNoduleDetect()
    prediction_mask = tnd.process(image)
    cv2.imwrite("pre_result/N-net2_test_1079.png", prediction_mask)


def eval_images():
    tnd = ThyroidNoduleDetect()
    image_path = "TNUI/test/images/"
    mask_path = "TNUI/test/masks/"

    for name in  os.listdir(image_path):
        image = cv2.imread(os.path.join(image_path,name),1)
        mask = cv2.imread(os.path.join(mask_path,name),1)
        
        prediction_mask = tnd.process(image)
        prediction_mask_bgr = cv2.cvtColor(prediction_mask,cv2.COLOR_GRAY2BGR)
        image = tnd.draw_circles(image, prediction_mask)
        
        cv2.imwrite(os.path.join("pre_result/",name), np.hstack([image, prediction_mask_bgr, mask]))




if __name__ == '__main__':
    test_image()
    #eval_images()

改进5:MobileV3Unet,新增mobilenet_unet.py

from collections import OrderedDict
from typing import Dict
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from torchvision.models import mobilenet_v3_large
from .unet import Up, OutConv


class IntermediateLayerGetter(nn.ModuleDict):
    """
    Module wrapper that returns intermediate layers from a model

    It has a strong assumption that the modules have been registered
    into the model in the same order as they are used.
    This means that one should **not** reuse the same nn.Module
    twice in the forward if you want this to work.

    Additionally, it is only able to query submodules that are directly
    assigned to the model. So if `model` is passed, `model.feature1` can
    be returned, but not `model.feature1.layer2`.

    Args:
        model (nn.Module): model on which we will extract the features
        return_layers (Dict[name, new_name]): a dict containing the names
            of the modules for which the activations will be returned as
            the key of the dict, and the value of the dict is the name
            of the returned activation (which the user can specify).
    """
    _version = 2
    __annotations__ = {
        "return_layers": Dict[str, str],
    }

    def __init__(self, model: nn.Module, return_layers: Dict[str, str]) -> None:
        if not set(return_layers).issubset([name for name, _ in model.named_children()]):
            raise ValueError("return_layers are not present in model")
        orig_return_layers = return_layers
        return_layers = {str(k): str(v) for k, v in return_layers.items()}

        # 重新构建backbone,将没有使用到的模块全部删掉
        layers = OrderedDict()
        for name, module in model.named_children():
            layers[name] = module
            if name in return_layers:
                del return_layers[name]
            if not return_layers:
                break

        super(IntermediateLayerGetter, self).__init__(layers)
        self.return_layers = orig_return_layers

    def forward(self, x: Tensor) -> Dict[str, Tensor]:
        out = OrderedDict()
        for name, module in self.items():
            x = module(x)
            if name in self.return_layers:
                out_name = self.return_layers[name]
                out[out_name] = x
        return out


class MobileV3Unet(nn.Module):
    def __init__(self, num_classes, pretrain_backbone: bool = False):
        super(MobileV3Unet, self).__init__()
        backbone = mobilenet_v3_large(pretrained=pretrain_backbone)

        # if pretrain_backbone:
        #     # 载入mobilenetv3 large backbone预训练权重
        #     # https://download.pytorch.org/models/mobilenet_v3_large-8738ca79.pth
        #     backbone.load_state_dict(torch.load("mobilenet_v3_large.pth", map_location='cpu'))

        backbone = backbone.features

        stage_indices = [1, 3, 6, 12, 15]
        self.stage_out_channels = [backbone[i].out_channels for i in stage_indices]
        return_layers = dict([(str(j), f"stage{i}") for i, j in enumerate(stage_indices)])
        self.backbone = IntermediateLayerGetter(backbone, return_layers=return_layers)

        c = self.stage_out_channels[4] + self.stage_out_channels[3]
        self.up1 = Up(c, self.stage_out_channels[3])
        c = self.stage_out_channels[3] + self.stage_out_channels[2]
        self.up2 = Up(c, self.stage_out_channels[2])
        c = self.stage_out_channels[2] + self.stage_out_channels[1]
        self.up3 = Up(c, self.stage_out_channels[1])
        c = self.stage_out_channels[1] + self.stage_out_channels[0]
        self.up4 = Up(c, self.stage_out_channels[0])
        self.conv = OutConv(self.stage_out_channels[0], num_classes=num_classes)

    def forward(self, x: torch.Tensor) -> Dict[str, torch.Tensor]:
        input_shape = x.shape[-2:]
        backbone_out = self.backbone(x)
        x = self.up1(backbone_out['stage4'], backbone_out['stage3'])
        x = self.up2(x, backbone_out['stage2'])
        x = self.up3(x, backbone_out['stage1'])
        x = self.up4(x, backbone_out['stage0'])
        x = self.conv(x)
        x = F.interpolate(x, size=input_shape, mode="bilinear", align_corners=False)

        return {"out": x}

改进6:VGG16UNet,新增vgg_unet.py

from collections import OrderedDict
from typing import Dict

import torch
import torch.nn as nn
from torch import Tensor
from torchvision.models import vgg16_bn
from .unet import Up, OutConv


class IntermediateLayerGetter(nn.ModuleDict):
    """
    Module wrapper that returns intermediate layers from a model

    It has a strong assumption that the modules have been registered
    into the model in the same order as they are used.
    This means that one should **not** reuse the same nn.Module
    twice in the forward if you want this to work.

    Additionally, it is only able to query submodules that are directly
    assigned to the model. So if `model` is passed, `model.feature1` can
    be returned, but not `model.feature1.layer2`.

    Args:
        model (nn.Module): model on which we will extract the features
        return_layers (Dict[name, new_name]): a dict containing the names
            of the modules for which the activations will be returned as
            the key of the dict, and the value of the dict is the name
            of the returned activation (which the user can specify).
    """
    _version = 2
    __annotations__ = {
        "return_layers": Dict[str, str],
    }

    def __init__(self, model: nn.Module, return_layers: Dict[str, str]) -> None:
        if not set(return_layers).issubset([name for name, _ in model.named_children()]):
            raise ValueError("return_layers are not present in model")
        orig_return_layers = return_layers
        return_layers = {str(k): str(v) for k, v in return_layers.items()}

        # 重新构建backbone,将没有使用到的模块全部删掉
        layers = OrderedDict()
        for name, module in model.named_children():
            layers[name] = module
            if name in return_layers:
                del return_layers[name]
            if not return_layers:
                break

        super(IntermediateLayerGetter, self).__init__(layers)
        self.return_layers = orig_return_layers

    def forward(self, x: Tensor) -> Dict[str, Tensor]:
        out = OrderedDict()
        for name, module in self.items():
            x = module(x)
            if name in self.return_layers:
                out_name = self.return_layers[name]
                out[out_name] = x
        return out


class VGG16UNet(nn.Module):
    def __init__(self, num_classes, pretrain_backbone: bool = False):
        super(VGG16UNet, self).__init__()
        backbone = vgg16_bn(pretrained=pretrain_backbone)

        # if pretrain_backbone:
        #     # 载入vgg16_bn预训练权重
        #     # https://download.pytorch.org/models/vgg16_bn-6c64b313.pth
        #     backbone.load_state_dict(torch.load("vgg16_bn.pth", map_location='cpu'))

        backbone = backbone.features

        stage_indices = [5, 12, 22, 32, 42]
        self.stage_out_channels = [64, 128, 256, 512, 512]
        return_layers = dict([(str(j), f"stage{i}") for i, j in enumerate(stage_indices)])
        self.backbone = IntermediateLayerGetter(backbone, return_layers=return_layers)

        c = self.stage_out_channels[4] + self.stage_out_channels[3]
        self.up1 = Up(c, self.stage_out_channels[3])
        c = self.stage_out_channels[3] + self.stage_out_channels[2]
        self.up2 = Up(c, self.stage_out_channels[2])
        c = self.stage_out_channels[2] + self.stage_out_channels[1]
        self.up3 = Up(c, self.stage_out_channels[1])
        c = self.stage_out_channels[1] + self.stage_out_channels[0]
        self.up4 = Up(c, self.stage_out_channels[0])
        self.conv = OutConv(self.stage_out_channels[0], num_classes=num_classes)

    def forward(self, x: torch.Tensor) -> Dict[str, torch.Tensor]:
        backbone_out = self.backbone(x)
        x = self.up1(backbone_out['stage4'], backbone_out['stage3'])
        x = self.up2(x, backbone_out['stage2'])
        x = self.up3(x, backbone_out['stage1'])
        x = self.up4(x, backbone_out['stage0'])
        x = self.conv(x)

        return {"out": x}

改进7:HNet,新增HNet.py

from functools import partial
from typing import Dict
import torch
import torch.nn as nn
import torch.nn.functional as F

nonlinearity = partial(F.relu, inplace=True)


class SDCBlock(nn.Module):
    def __init__(self, channel):
        super(SDCBlock, self).__init__()
        self.dilate1 = nn.Conv2d(channel, channel, kernel_size=3, dilation=1, padding=1)
        self.dilate2 = nn.Conv2d(channel, channel, kernel_size=3, dilation=3, padding=3)
        self.dilate3 = nn.Conv2d(channel, channel, kernel_size=3, dilation=11, padding=11)
        self.conv1x1 = nn.Conv2d(channel, channel, kernel_size=1, dilation=1, padding=0)

    def forward(self, x):
        dilate1_out = nonlinearity(self.dilate1(x))
        dilate2_out = nonlinearity(self.conv1x1(self.dilate2(x)))
        dilate3_out = nonlinearity(self.conv1x1(self.dilate2(self.dilate1(x))))
        dilate4_out = nonlinearity(self.conv1x1(self.dilate2(self.dilate1(self.dilate3(x)))))
        out = x + dilate1_out + dilate2_out + dilate3_out + dilate4_out
        return out


class DoubleConv(nn.Sequential):
    def __init__(self, in_channels, out_channels, mid_channels=None):
        if mid_channels is None:
            mid_channels = out_channels
        super(DoubleConv, self).__init__(
            nn.Conv2d(in_channels, mid_channels, kernel_size=3, padding=1, bias=False),
            nn.BatchNorm2d(mid_channels),
            nn.ReLU(inplace=True),
            nn.Conv2d(mid_channels, out_channels, kernel_size=3, padding=1, bias=False),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(inplace=True)
        )


class SingleConv(nn.Sequential):
    def __init__(self, in_channels, out_channels, mid_channels=None):
        if mid_channels is None:
            mid_channels = out_channels
        super(SingleConv, self).__init__(
            nn.Conv2d(in_channels, mid_channels, kernel_size=3, padding=1, bias=False),
            nn.BatchNorm2d(mid_channels),
            nn.ReLU(inplace=True),
        )


class Up(nn.Module):
    def __init__(self, in_channels, out_channels, bilinear=True):
        super(Up, self).__init__()
        if bilinear:
            self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
            self.conv = DoubleConv(in_channels, out_channels, in_channels // 2)
        else:
            self.up = nn.ConvTranspose2d(in_channels, in_channels // 2, kernel_size=2, stride=2)
            self.conv = DoubleConv(in_channels, out_channels)

    def forward(self, x1: torch.Tensor, x2: torch.Tensor) -> torch.Tensor:
        x1 = self.up(x1)
        # [N, C, H, W]
        diff_y = x2.size()[2] - x1.size()[2]
        diff_x = x2.size()[3] - x1.size()[3]

        # padding_left, padding_right, padding_top, padding_bottom
        x1 = F.pad(x1, [diff_x // 2, diff_x - diff_x // 2,
                        diff_y // 2, diff_y - diff_y // 2])

        x = torch.cat([x2, x1], dim=1)
        x = self.conv(x)
        return x


class OutConv(nn.Sequential):
    def __init__(self, in_channels, num_classes):
        super(OutConv, self).__init__(
            nn.Conv2d(in_channels, num_classes, kernel_size=1)
        )

class Down(nn.Sequential):
    def __init__(self, in_channels, out_channels):
        super(Down, self).__init__(
            nn.MaxPool2d(2, stride=2),
            DoubleConv(in_channels, out_channels)
        )


class DownLast(nn.Sequential):
    def __init__(self, in_channels, out_channels):
        super(DownLast, self).__init__(
            nn.MaxPool2d(2, stride=2),
            SingleConv(in_channels, out_channels)
        )

class UNet(nn.Module):
    def __init__(self,
                 in_channels: int = 1,
                 num_classes: int = 2,
                 bilinear: bool = True,
                 base_c: int = 64):
        super(UNet, self).__init__()
        self.in_channels = in_channels
        self.num_classes = num_classes
        self.bilinear = bilinear

        self.in_conv = DoubleConv(in_channels, base_c)
        self.down1 = Down(base_c, base_c * 2)
        self.down2 = Down(base_c * 2, base_c * 4)
        self.down3 = Down(base_c * 4, base_c * 8)
        factor = 2 if bilinear else 1
        self.down4 = DownLast(base_c * 8, base_c * 16 // factor)
        # self.down4 = Down(base_c * 8, base_c * 16 // factor)
        self.sdc = SDCBlock(base_c * 8)
        self.singleconv = SingleConv(base_c * 8, base_c * 16 // factor)
        self.up1 = Up(base_c * 16, base_c * 8 // factor, bilinear)
        self.up2 = Up(base_c * 8, base_c * 4 // factor, bilinear)
        self.up3 = Up(base_c * 4, base_c * 2 // factor, bilinear)
        self.up4 = Up(base_c * 2, base_c, bilinear)
        self.out_conv = OutConv(base_c, num_classes)

    def forward(self, x: torch.Tensor) -> Dict[str, torch.Tensor]:
        x1 = self.in_conv(x)
        x2 = self.down1(x1)
        x3 = self.down2(x2)
        x4 = self.down3(x3)
        x5 = self.singleconv(self.sdc(self.down4(x4)))
        # x5 = self.down4(x4)
        x = self.up1(x5, x4)
        x = self.up2(x, x3)
        x = self.up3(x, x2)
        x = self.up4(x, x1)
        logits = self.out_conv(x)

        return {"out": logits}

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.mfbz.cn/a/550602.html

如若内容造成侵权/违法违规/事实不符,请联系我们进行投诉反馈qq邮箱809451989@qq.com,一经查实,立即删除!

相关文章

轻松查询车辆信息的全能接口

在当今社会&#xff0c;车辆已经成为人们出行的重要工具之一。当我们在二手车买卖、事故处理或者其他需要查询车辆详细信息的情况下&#xff0c;我们通常需要耗费大量时间和精力去收集相关的资料。幸好&#xff0c;有了车辆信息查询接口&#xff0c;我们可以通过输入车架号vin来…

20240416,对象初始化和清理,对象模型和THIS指针

哈哈哈乌龟越狱了 目录 2.5 深拷贝&浅拷贝 2.6 初始化列表 2.7 类对象作为类成员 2.8 静态成员 2.9 成员变量和成员函数分开存储 2.10 THIS指针的用途 2.11 空指针访问成员函数 2.12 COSNT修饰成员函数 2.5 深拷贝&浅拷贝 浅拷贝&#xff1a;简单的赋值拷贝…

leetcode-合并两个有序链表

目录 题目 图解 方法一 方法二 代码(解析在注释中) 方法一 ​编辑方法二 题目 将两个升序链表合并为一个新的 升序 链表并返回。新链表是通过拼接给定的两个链表的所有节点组成的。 示例 1&#xff1a; 输入&#xff1a;l1 [1,2,4], l2 [1,3,4] 输出&#xff1a;[1,1…

第11章 数据仓库和数据智能知识点梳理

第11章 数据仓库和数据智能知识点梳理&#xff08;附带页码&#xff09; ◼ 数据仓库&#xff08;Data Warehouse&#xff0c;DW&#xff09;&#xff1a;始于 20 世纪 80 年代&#xff0c;发展于 20 世纪 90 年代&#xff0c;后与商务智能&#xff08;Business Inteligence,BI…

MAC上如何将某个目录制作成iso格式磁盘文件,iso文件本质是什么?以及挂载到ParallelDesktop中?(hdiutil makehybrid )

背景 ParallelsDesktop没有安装ParallelsTools的无法共享目录&#xff0c;可以通过ParallelsDesktop提供CD磁盘的方式共享进去 命令 # 准备文档 mkdir mytestdir cp xxx mytestdir# 生成iso hdiutil makehybrid -o output.iso mytestdir -iso -joliethdiutil是MAC提供的磁盘…

使用FastDDS编译IDL文件

1.安装FastDDS环境 Ubuntu22.04 1.1安装依赖的软件 sudo apt-get update //基础工具安装 sudo apt install cmake g python3-pip wget git //Asio 是一个用于网络和低级 I/O 编程的跨平台C库&#xff0c;它提供了一致的 异步模型。 TinyXML2是一个简单&#xff0c;小巧&…

DFS算法系列题 全排列II

DFS算法系列题 – 全排列II DFS精选题- > 这次我们挑战的对象是&#xff1a; 全排列II 题目链接&#xff1a;47. 全排列 II - 力扣&#xff08;LeetCode&#xff09; 这道题和我们之前做的全排列不同的点在于这道题的题目包含了重复的数字&#xff0c;要求我们返回不重复…

Transformer的Decoder的输入输出都是什么

目录 1 疑问&#xff1a;Transformer的Decoder的输入输出都是什么 2 推理时Transformer的Decoder的输入输出 2.1 推理过程中的Decoder输入输出 2.2 整体右移一位 3 训练时Decoder的输入 参考文献&#xff1a; 1 疑问&#xff1a;Transformer的Decoder的输入输出都是什么 …

SQLite数据库中JSON 函数和运算符

返回&#xff1a;SQLite—系列文章目录 上一篇:维护SQLite的私有分支&#xff08;二十六&#xff09; 下一篇&#xff1a;SQLite—系列文章目录 ​ 1. 概述 默认情况下&#xff0c;SQLite 支持 29 个函数和 2 个运算符 处理 JSON 值。还有两个表值函数可用于分解 JSON…

最优算法100例之52-合并两个单调递增的单链表

专栏主页&#xff1a;计算机专业基础知识总结&#xff08;适用于期末复习考研刷题求职面试&#xff09;系列文章https://blog.csdn.net/seeker1994/category_12585732.html 题目描述 合并两个单调递增的单链表 题解报告 解法1&#xff1a;采用尾插法首先确定一个头结点出来&a…

【Java EE】关于Spring MVC 响应

文章目录 &#x1f38d;返回静态页面&#x1f332;RestController 与 Controller 的关联和区别&#x1f334;返回数据 ResponseBody&#x1f38b;返回HTML代码片段&#x1f343;返回JSON&#x1f340;设置状态码&#x1f384;设置Header&#x1f338;设置Content-Type&#x1f…

【halcon】C# halcon 内存暴增 续,找到一个解决方案

这里写自定义目录标题 背景释放临时缓存具体的使用感受背景 在之前的文章《【halcon】C# halcon 内存暴增 》中我们提到了一些会导致内存暴增的原因。 其中一个就是使用了计算复杂的算子,且图片很大时,此时内存就会暴增,而且内存无法被释放。 这次,我在做一个项目时,用到…

一个开源的全自动视频生成软件MoneyPrinterTurbo

只需提供一个视频 主题 或 关键词 &#xff0c;就可以全自动生成视频文案、视频素材、视频字幕、视频背景音乐&#xff0c;然后合成一个高清的短视频。 一&#xff1a;功能特性 完整的 MVC架构&#xff0c;代码 结构清晰&#xff0c;易于维护&#xff0c;支持 API 和 Web界面…

软件杯 深度学习图像修复算法 - opencv python 机器视觉

文章目录 0 前言2 什么是图像内容填充修复3 原理分析3.1 第一步&#xff1a;将图像理解为一个概率分布的样本3.2 补全图像 3.3 快速生成假图像3.4 生成对抗网络(Generative Adversarial Net, GAN) 的架构3.5 使用G(z)生成伪图像 4 在Tensorflow上构建DCGANs最后 0 前言 &#…

复习回顾ES6基础篇(一小时学会es6)

基本语法 多行注释 /* 这里的所有内容 都是注释。 */单行注释 // 这是一条注释。变量定义 var x "" //定义范围变量 let y "" //定义局部变量 const z "" //定义常量运算符 变量类型 流程语句 if (condition) {/* 条件为真时运行的代…

LVM与磁盘配额

目录 一.LVM概述 1.LVM &#xff08;Logical Vokume Manager &#xff09;逻辑卷管理 2.LVM的管理命令 3.创建并使用LVM操作步骤 二.磁盘配额概述 1.实现磁盘限额的条件 2.Linux磁盘限额的特点 3.实现磁盘配额的步骤 三.总结&#xff1a; 一.LVM概述 1.LVM &#xff…

【静态分析】软件分析课程实验-前置准备

课程&#xff1a;南京大学的《软件分析》课程 平台&#xff1a;Tai-e&#xff08;太阿&#xff09;实验作业平台 1. 实验概述 Tai-e 是一个分析 Java 程序的静态程序分析框架&#xff0c;相比于已有的知名静态程序分析框架&#xff08;如 Soot、Wala 等&#xff09;&#xf…

《手把手教你》系列基础篇(九十二)-java+ selenium自动化测试-框架设计基础-POM设计模式简介(详解教程)

1.简介 页面对象模型&#xff08;Page Object Model&#xff09;在Selenium Webdriver自动化测试中使用非常流行和受欢迎&#xff0c;作为自动化测试工程师应该至少听说过POM这个概念。本篇介绍POM的简介&#xff0c;接下来宏哥一步一步告诉你如何在你JavaSelenium3自动化测试…

算法打卡day36

今日任务&#xff1a; 1&#xff09;01背包问题理论基础(卡码网&#xff1a;46. 携带研究材料) 2&#xff09;01背包问题滚动数组(卡码网&#xff1a;46. 携带研究材料) 3&#xff09;416. 分割等和子集 4&#xff09;复习day11 卡码网&#xff1a;46. 携带研究材料 题目链接&…

35、链表-LRU缓存

思路&#xff1a; 首先要了解LRU缓存的原理&#xff0c;首先定下容量&#xff0c;每次get请求和put请求都会把当前元素放最前/后面&#xff0c;如果超过容量那么头部/尾部元素就被移除&#xff0c;所以最近最少使用的元素会被优先移除&#xff0c;保证热点数据持续存在。 不管放…
最新文章