-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathmodel.py
More file actions
70 lines (55 loc) · 2.45 KB
/
model.py
File metadata and controls
70 lines (55 loc) · 2.45 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
#!/usr/bin/env python
# -*- coding:utf-8 -*-
'''
@File : model.py
@Author : song
@Time : 2020/10/01
@Contact: songjian@westlake.edu.cn
@intro : Bi-GRU + self-intention
'''
import torch
import torch.nn as nn
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence, PackedSequence
import predifine
class Frag_GRU(nn.Module):
def __init__(self):
super(Frag_GRU, self).__init__()
self.aa_embed = nn.Embedding(22, 32)
self.charge_embed = nn.Embedding(4, 32)
self.gru = nn.GRU(batch_first=True,
bidirectional=True,
num_layers=2,
dropout=0.5,
input_size=32,
hidden_size=32)
# attention
self.attention = nn.Linear(64, 32)
self.context = nn.Linear(32, 1, bias=False)
self.fc = nn.Linear(64 + 32, len(predifine.g_anno_to_idx))
def forward(self, batch_padded, batch_lens, batch_charge):
aa_embedding = self.aa_embed(batch_padded)
# pack and pad
outputs = pack_padded_sequence(aa_embedding,
batch_lens,
batch_first=True,
enforce_sorted=False) # unsort with false
self.gru.flatten_parameters()
outputs, _ = self.gru(outputs, None)
# self attention
att_w = torch.tanh(self.attention(outputs.data))
att_w = self.context(att_w).squeeze(1)
max_w = att_w.max()
att_w = torch.exp(att_w - max_w)
att_w, _ = pad_packed_sequence(PackedSequence(data=att_w,
batch_sizes=outputs.batch_sizes,
sorted_indices=outputs.sorted_indices,
unsorted_indices=outputs.unsorted_indices),
batch_first=True)
alphas = att_w / torch.sum(att_w, dim=1, keepdim=True)
outputs, _ = pad_packed_sequence(outputs, batch_first=True)
outputs = (outputs * alphas.unsqueeze(2)).sum(dim=1) # [batch_size, out_dim]
# cat charge embed
charge_embed = self.charge_embed(batch_charge)
result = torch.cat((outputs, charge_embed), dim=1)
result = self.fc(result)
return result