-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathmodel.py
More file actions
76 lines (63 loc) · 2.63 KB
/
model.py
File metadata and controls
76 lines (63 loc) · 2.63 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
import torch
import torch.nn as nn
class LSTMPredictor(nn.Module):
def __init__(self, input_size, hidden_size, num_layers, output_size):
super(LSTMPredictor, self).__init__()
self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True)
self.num_layers = num_layers
self.hidden_size = hidden_size
self.fc = nn.Linear(hidden_size, output_size)
def forward(self, x):
h_0, c_0 = (torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(x.device),
torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(x.device))
out, (h_out, c_out) = self.lstm(x, (h_0, c_0))
out = self.fc(h_out[-1, :, :])
return out
class BilibiliModel(nn.Module):
def __init__(self, in_features, hidden_features, out_features):
super(BilibiliModel, self).__init__()
self.bn = nn.BatchNorm1d(num_features=in_features)
self.attn = Attention_module(in_features, hidden_features, in_features)
self.resblock = two_cascaded_res_blocks(in_features)
self.linear = nn.Linear(in_features, 1)
def forward(self, x):
x = self.bn(x)
x = self.attn(x) #32 42
x = self.resblock(x)
x = self.linear(x)
return x
class Attention_module(nn.Module):
def __init__(self, in_features, hidden_features, out_features):
super(Attention_module, self).__init__()
self.linear1 = nn.Linear(in_features, hidden_features)
self.bn = nn.BatchNorm1d(num_features=hidden_features)
self.relu = nn.ReLU()
self.linear2 = nn.Linear(hidden_features, out_features)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
res = x
y = self.linear2(self.relu(self.bn(self.linear1(x))))
y = self.sigmoid(y)
return y * res
class res_block(nn.Module):
def __init__(self, features):
super(res_block, self).__init__()
self.layer1 = nn.Linear(features, features)
self.bn1 = nn.BatchNorm1d(num_features=features)
self.relu = nn.ReLU()
self.layer2 = nn.Linear(features, features)
self.bn2 = nn.BatchNorm1d(num_features=features)
def forward(self, x):
res = x
y = self.relu(self.bn1(self.layer1(x)))
y = self.bn2(self.layer2(y))
return self.relu(y + res)
class two_cascaded_res_blocks(nn.Module):
def __init__(self, features):
super(two_cascaded_res_blocks, self).__init__()
self.block1 = res_block(features)
self.block2 = res_block(features)
def forward(self, x):
x = self.block1(x)
x = self.block2(x)
return x