-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathmodels.py
More file actions
106 lines (96 loc) · 3.45 KB
/
models.py
File metadata and controls
106 lines (96 loc) · 3.45 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
import math
import torch
import torch.nn as nn
import torch.nn.functional as nnF
keep_prob=0.9
# Customized convolution module, integrately perform transpose and multi-size convolution
class TransConv(nn.Module):
def __init__(self,input_dim=46,output_dim=48):#,size=[3,7,11],padding=[1,3,5]):
super().__init__()
self.cnn1=nn.Conv1d(input_dim,output_dim,3,padding=1)
self.cnn2=nn.Conv1d(input_dim,output_dim,7,padding=3)
self.cnn3=nn.Conv1d(input_dim,output_dim,11,padding=5)
def forward(self,x):
x=x.permute(0,2,1)
x1=self.cnn1(x)
x2=self.cnn2(x)
x3=self.cnn3(x)
x=torch.cat((x1,x2,x3), -2)
x=nnF.relu(x)
x=x.permute(0,2,1)
return x
def get_length(x):
xsum=(x>0).sum(axis=-1)
length=(xsum>0).sum(axis=-1)
return length
def len_1k_pad(x):
length=x.shape[1]
if 1000>length:
return nnF.pad(x,(0,0,0,1000-length,0,0))
else:
return x
def length_pad(x):
length=x.shape[1]
newLength=4*math.ceil(length/4)
if newLength>length:
return nnF.pad(x,(0,0,0,newLength-length,0,0))
else:
return x
class pack_GRU(nn.Module):
def __init__(self,input_dim,n_hidden=128,stack=3):
super().__init__()
if stack >1:
dropout=1-keep_prob
else:
dropout=0
self.biRNN=nn.GRU(input_dim,n_hidden,stack,batch_first=True,dropout=dropout,bidirectional=True)
def forward(self,x,length):
pack_x=nn.utils.rnn.pack_padded_sequence(x,length.to('cpu'),batch_first=True,enforce_sorted=False)
x,_=self.biRNN(pack_x)
padx,lengthx=nn.utils.rnn.pad_packed_sequence(x,batch_first=True)
return padx
class Basic_Model(nn.Module):
def __init__(self,input_dim,n_hidden=128):
super().__init__()
self.trans_conv=TransConv(input_dim, 48)
self.biRNN=pack_GRU(48*3,n_hidden)
self.final_linear_stack=nn.Sequential(
nn.Linear(n_hidden*2,128),nn.SELU(),
nn.Linear(128,32),nn.SELU(),
nn.Linear(32,3),nn.Softmax(dim=-1)
)
def forward(self,x):
lengths=get_length(x)
x=self.trans_conv(x)
x=self.biRNN(x,lengths)
x=self.final_linear_stack(x)
return x
class Transformer_light_Model(nn.Module):
def __init__(self,input_dim=43,n_hidden=128):
super().__init__()
attention_dim=64
self.trans_conv=TransConv(input_dim, 48)
self.drop=nn.Dropout(0.2)
self.biRNN=pack_GRU(48*3,n_hidden,2)
encoder_layer=nn.TransformerEncoderLayer(d_model=attention_dim, nhead=4,activation='gelu',batch_first=True)
self.encoder= nn.TransformerEncoder(encoder_layer,num_layers=2)
self.biRNN2=pack_GRU(n_hidden*2+attention_dim,n_hidden,1)
self.linear1=nn.Linear(n_hidden*2,attention_dim)
self.fc1=nn.Sequential(nn.Linear(n_hidden*2+attention_dim,196),nn.ReLU(),nn.Dropout(0.3))
self.final_linear_stack=nn.Sequential(
nn.Linear(196,64),nn.ReLU(),nn.Dropout(1-keep_prob),
nn.Linear(64,3),nn.Softmax(dim=-1)
)
def forward(self,x):
lengths=get_length(x)
x=self.trans_conv(x)
x=self.drop(x)
bix=self.biRNN(x,lengths)
att_x=self.linear1(bix)
att=self.encoder(att_x)
x=torch.cat((bix,att),-1)
bix=self.biRNN2(x,lengths)
x=torch.cat((bix,att),-1)
x=self.fc1(x)
x=self.final_linear_stack(x)
return x