-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathmodels.py
More file actions
115 lines (88 loc) · 3.42 KB
/
models.py
File metadata and controls
115 lines (88 loc) · 3.42 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
import torch
from math import log
import torch.nn as nn
import torch.nn.functional as F
class ResidualBlock(nn.Module):
def __init__(self, channels: int = None) -> None:
super(ResidualBlock, self).__init__()
self.block = nn.Sequential(nn.Conv2d(channels, channels, kernel_size=3, padding=1),
nn.BatchNorm2d(channels),
nn.PReLU(),
nn.Conv2d(channels, channels, kernel_size=3, padding=1),
nn.BatchNorm2d(channels))
def forward(self, x: torch.Tensor) -> torch.Tensor:
residual = self.block(x)
return x + residual
class UpSample(nn.Module):
def __init__(self, in_feat: int = None, upscale: int = None) -> None:
super(UpSample, self).__init__()
self.block = nn.Sequential(nn.Conv2d(in_feat, in_feat * upscale ** 2, kernel_size=3, padding=1),
nn.PixelShuffle(upscale),
nn.PReLU())
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.block(x)
class Generator(nn.Module):
def __init__(self, scale_factor: int = None) -> None:
super(Generator, self).__init__()
num_upsamples = int(log(scale_factor, 2))
self.conv_block1 = nn.Sequential(nn.Conv2d(3, 64, kernel_size=9, padding=4),
nn.PReLU())
self.conv_block2 = ResidualBlock(64)
self.conv_block3 = ResidualBlock(64)
self.conv_block4 = ResidualBlock(64)
self.conv_block5 = ResidualBlock(64)
self.conv_block6 = ResidualBlock(64)
self.conv_block7 = nn.Sequential(nn.Conv2d(64, 64, kernel_size=3, padding=1),
nn.BatchNorm2d(64))
upsample_blocks = [UpSample(64, 2) for _ in range(num_upsamples)]
upsample_blocks.append(nn.Conv2d(64, 3, kernel_size=9, padding=4))
self.conv_block8 = nn.Sequential(*upsample_blocks)
def forward(self, x: torch.Tensor) -> torch.Tensor:
block1 = self.conv_block1(x)
block2 = self.conv_block2(block1)
block3 = self.conv_block3(block2)
block4 = self.conv_block4(block3)
block5 = self.conv_block5(block4)
block6 = self.conv_block6(block5)
block7 = self.conv_block7(block6)
block8 = self.conv_block8(block1 + block7)
return (torch.tanh(block8) + 1) / 2
class Discriminator(nn.Module):
def __init__(self) -> None:
super(Discriminator, self).__init__()
self.block1 = nn.Sequential(nn.Conv2d(3, 64, kernel_size=3, padding=1),
nn.LeakyReLU(0.2),
nn.Conv2d(64, 64, kernel_size=3, stride=2, padding=1),
nn.BatchNorm2d(64),
nn.LeakyReLU(0.2),
nn.Conv2d(64, 128, kernel_size=3, padding=1),
nn.BatchNorm2d(128),
nn.LeakyReLU(0.2),
nn.Conv2d(128, 128, kernel_size=3, stride=2, padding=1),
nn.BatchNorm2d(128),
nn.LeakyReLU(0.2))
self.block2 = nn.Sequential(nn.Conv2d(128, 256, kernel_size=3, padding=1),
nn.BatchNorm2d(256),
nn.LeakyReLU(0.2),
nn.Conv2d(256, 256, kernel_size=3, stride=2, padding=1),
nn.BatchNorm2d(256),
nn.LeakyReLU(0.2),
nn.Conv2d(256, 512, kernel_size=3, padding=1),
nn.BatchNorm2d(512),
nn.LeakyReLU(0.2),
nn.Conv2d(512, 512, kernel_size=3, stride=2, padding=1),
nn.BatchNorm2d(512),
nn.LeakyReLU(0.2))
self.block3 = nn.Sequential(nn.AdaptiveAvgPool2d(1),
nn.Conv2d(512, 1024, kernel_size=1),
nn.LeakyReLU(0.2),
nn.Conv2d(1024, 1, kernel_size=1),
nn.Sigmoid())
def forward(self, x: torch.Tensor) -> torch.Tensor:
batch_size = x.size(0)
x = self.block1(x)
x = self.block2(x)
x = self.block3(x)
return x.view(batch_size)
if __name__ == '__main__':
pass