-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathDiscriminator.py
More file actions
86 lines (70 loc) · 2.84 KB
/
Discriminator.py
File metadata and controls
86 lines (70 loc) · 2.84 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
from torch import nn
import torch
def Normalize(in_channels):
return torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True)
class Discriminator(nn.Module):
def __init__(self):
super(Discriminator, self).__init__()
self.decoder = nn.Sequential(
Conv2D(3, 32, 3, strides=2, activation='relu'),
Normalize(32),
Conv2D(32, 32, 3, activation='relu'),
Normalize(32),
Conv2D(32, 64, 3, strides=2, activation='relu'),
Normalize(64),
Conv2D(64, 64, 3, activation='relu'),
Normalize(64),
Conv2D(64, 64, 3, strides=2, activation='relu'),
Normalize(64),
Conv2D(64, 128, 3, strides=2, activation='relu'),
Normalize(128),
Conv2D(128, 128, 3, strides=2, activation='relu'),
Flatten(),
Dense(8192, 512, activation='relu'),
Dense(512, 1, activation=None))
def forward(self, image):
x = image - .5
return self.decoder(x)
class Dense(nn.Module):
def __init__(self, in_features, out_features, activation='relu', kernel_initializer='he_normal'):
super(Dense, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.activation = activation
self.kernel_initializer = kernel_initializer
self.linear = nn.Linear(in_features, out_features)
# initialization
if kernel_initializer == 'he_normal':
nn.init.kaiming_normal_(self.linear.weight)
else:
raise NotImplementedError
def forward(self, inputs):
outputs = self.linear(inputs)
if self.activation is not None:
if self.activation == 'relu':
outputs = nn.ReLU(inplace=True)(outputs)
return outputs
class Conv2D(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, activation='relu', strides=1):
super(Conv2D, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.activation = activation
self.strides = strides
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, strides, int((kernel_size - 1) / 2))
# default: using he_normal as the kernel initializer
nn.init.kaiming_normal_(self.conv.weight)
def forward(self, inputs):
outputs = self.conv(inputs)
if self.activation is not None:
if self.activation == 'relu':
outputs = nn.ReLU(inplace=True)(outputs)
else:
raise NotImplementedError
return outputs
class Flatten(nn.Module):
def __init__(self):
super(Flatten, self).__init__()
def forward(self, input):
return input.view(input.size(0), -1)