forked from kkteru/grail
-
Notifications
You must be signed in to change notification settings - Fork 0
/
aggregators.py
executable file
·55 lines (37 loc) · 1.48 KB
/
aggregators.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
import abc
import torch.nn as nn
import torch
import torch.nn.functional as F
class Aggregator(nn.Module):
def __init__(self, emb_dim):
super(Aggregator, self).__init__()
def forward(self, node):
curr_emb = node.mailbox['curr_emb'][:, 0, :] # (B, F)
nei_msg = torch.bmm(node.mailbox['alpha'].transpose(1, 2), node.mailbox['msg']).squeeze(1) # (B, F)
# nei_msg, _ = torch.max(node.mailbox['msg'], 1) # (B, F)
new_emb = self.update_embedding(curr_emb, nei_msg)
return {'h': new_emb}
@abc.abstractmethod
def update_embedding(curr_emb, nei_msg):
raise NotImplementedError
class SumAggregator(Aggregator):
def __init__(self, emb_dim):
super(SumAggregator, self).__init__(emb_dim)
def update_embedding(self, curr_emb, nei_msg):
new_emb = nei_msg + curr_emb
return new_emb
class MLPAggregator(Aggregator):
def __init__(self, emb_dim):
super(MLPAggregator, self).__init__(emb_dim)
self.linear = nn.Linear(2 * emb_dim, emb_dim)
def update_embedding(self, curr_emb, nei_msg):
inp = torch.cat((nei_msg, curr_emb), 1)
new_emb = F.relu(self.linear(inp))
return new_emb
class GRUAggregator(Aggregator):
def __init__(self, emb_dim):
super(GRUAggregator, self).__init__(emb_dim)
self.gru = nn.GRUCell(emb_dim, emb_dim)
def update_embedding(self, curr_emb, nei_msg):
new_emb = self.gru(nei_msg, curr_emb)
return new_emb