-
Notifications
You must be signed in to change notification settings - Fork 3
/
models.py
75 lines (70 loc) · 3.11 KB
/
models.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
import torch, random
from torch_geometric.data import NeighborSampler
import torch.nn.functional as F
from torch_geometric.datasets import Planetoid
import torch_geometric.transforms as T
from torch_geometric.nn import GCNConv
from sklearn.cluster import KMeans
import networkx as nx
import community as comm
class MarkovGCNR(torch.nn.Module):
def __init__(self, ndim, nlayers, ntargets, features, edges, weights = None, droprate = 0.5, useleakyrelu = False, alpha = 0.5, addbias = True):
super(MarkovGCNR, self).__init__()
self.convs = []
self.ndim = ndim
self.nlayers = nlayers
self.edges = edges
self.weights = weights
self.ntargets = ntargets
self.features = features
self.droprate = droprate
self.useleakyrelu = useleakyrelu
self.alpha = alpha
self.convs.append(GCNConv(self.features.shape[1], self.ndim, cached=True, bias = addbias))
for l in range(nlayers-2):
self.convs.append(GCNConv(self.ndim, self.ndim, cached=True, bias = addbias))
self.convs.append(GCNConv(self.ndim, self.ntargets, cached=True, bias = addbias))
def forward(self):
assert len(self.edges) == self.nlayers
x = F.dropout(self.features, p=self.droprate, training=self.training)
x = self.convs[0](x, self.edges[0], None)
x_prev = x
for l in range(1, self.nlayers):
if self.useleakyrelu:
x = F.leaky_relu(x)
else:
x = F.relu(x)
x = F.dropout(x, p=self.droprate, training=self.training)
if l < self.nlayers - 1:
# residual connection with first layer
x = self.alpha * self.convs[l](x, self.edges[l], self.weights[l]) + (1-self.alpha) * x_prev
#x = self.convs[l](self.alpha * x + (1-self.alpha) * x_prev, medge_index[l], medge_weight[l])
else:
x = self.convs[l](x, self.edges[l], self.weights[l])
return F.log_softmax(x, dim=1)
def inference(self):
# inference considering 2-layered network
x = self.convs[0](self.features, self.edges[0])
xs = F.relu(x).cpu().detach().numpy()
xs = xs.round(decimals=5)
return xs
class GCN(torch.nn.Module):
def __init__(self, ndim, nlayers, ntargets, features, edges, weights = None, droprate = 0.5, alpha = 0.5, addbias = True):
super(GCN, self).__init__()
self.convs = []
self.ndim = ndim
self.nlayers = 2
self.edges = edges
self.weights = weights
self.ntargets = ntargets
self.features = features
self.droprate = droprate
self.convs.append(GCNConv(self.features.shape[1], self.ndim, cached=True, bias = addbias))
self.convs.append(GCNConv(self.ndim, self.ntargets, cached=True, bias = addbias))
def forward(self):
x = F.dropout(self.features, p=self.droprate, training=self.training)
x = self.convs[0](x, self.edges, None)
x = F.relu(x)
x = F.dropout(x, p=self.droprate)
x = self.convs[1](x, self.edges, self.weights)
return F.log_softmax(x, dim=1)