Skip to content

Commit

Permalink
cleanup
Browse files Browse the repository at this point in the history
  • Loading branch information
lukecavabarrett committed Apr 10, 2020
1 parent 57e4238 commit db3b6c3
Show file tree
Hide file tree
Showing 12 changed files with 63 additions and 44 deletions.
10 changes: 5 additions & 5 deletions datasets_generation/graph_generation.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,9 +70,9 @@ def ladder(N):
""" Creates a ladder graph of N nodes: two rows of N/2 nodes, with each pair connected by a single edge.
In case N is odd another node is attached to the first one. """
G = nx.ladder_graph(N // 2)
if N%2 != 0:
G.add_node(N-1)
G.add_edge(0, N-1)
if N % 2 != 0:
G.add_node(N - 1)
G.add_edge(0, N - 1)
return G


Expand Down Expand Up @@ -105,14 +105,14 @@ def lobster(N, seed):
N − b − p pendent vertices uniformly connected to the previous pendent vertices """
np.random.seed(seed)
B = np.random.randint(low=1, high=N)
F = np.random.randint(low=B+1,high=N+1)
F = np.random.randint(low=B + 1, high=N + 1)
G = nx.empty_graph(N)
for i in range(1, B):
G.add_edge(i - 1, i)
for i in range(B, F):
G.add_edge(i, np.random.randint(B))
for i in range(F, N):
G.add_edge(i, np.random.randint(low=B,high=F))
G.add_edge(i, np.random.randint(low=B, high=F))
return G


Expand Down
2 changes: 0 additions & 2 deletions datasets_generation/multitask_dataset.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
import argparse
import os
import pickle
import sys

import numpy as np
import torch
Expand All @@ -18,7 +17,6 @@ def __init__(self, n_graphs, N, seed, graph_type, get_nodes_labels, get_graph_la
self.features = {}
self.nodes_labels = {}
self.graph_labels = {}
generated = 0

def progress_bar(iteration, total, prefix='', suffix='', decimals=1, length=100, fill='█', printEnd=""):
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
Expand Down
8 changes: 5 additions & 3 deletions models/gat/layer.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
import torch.nn as nn
import torch.nn.functional as F


class GATHead(nn.Module):

def __init__(self, in_features, out_features, alpha, activation=True, device='cpu'):
Expand All @@ -24,7 +25,8 @@ def forward(self, input, adj):

h = torch.matmul(input, self.W)
(B, N, _) = adj.shape
a_input = torch.cat([h.repeat(1, 1, N).view(B, N * N, -1), h.repeat(1, N, 1)], dim=1).view(B, N, -1, 2 * self.out_features)
a_input = torch.cat([h.repeat(1, 1, N).view(B, N * N, -1), h.repeat(1, N, 1)], dim=1).view(B, N, -1,
2 * self.out_features)
e = self.leakyrelu(torch.matmul(a_input, self.a).squeeze(3))

zero_vec = -9e15 * torch.ones_like(e)
Expand Down Expand Up @@ -66,7 +68,7 @@ def __init__(self, in_features, out_features, alpha, nheads=1, activation=True,
self.heads = nn.ModuleList()
for _ in range(nheads):
self.heads.append(GATHead(in_features=self.input_head, out_features=self.output_head, alpha=alpha,
activation=activation, device=device))
activation=activation, device=device))

def forward(self, input, adj):
y = torch.cat([head(input, adj) for head in self.heads], dim=2)
Expand All @@ -75,4 +77,4 @@ def forward(self, input, adj):
def __repr__(self):
return self.__class__.__name__ + ' (' \
+ str(self.in_features) + ' -> ' \
+ str(self.out_features) + ')'
+ str(self.out_features) + ')'
5 changes: 3 additions & 2 deletions models/gcn/layer.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,8 @@ def forward(self, X, adj):

# normalised mean aggregation
adj = adj + torch.eye(N, device=self.device).unsqueeze(0)
rD = torch.mul(torch.pow(torch.sum(adj, -1, keepdim=True), -0.5), torch.eye(N, device=self.device).unsqueeze(0)) # D^{-1/2]
rD = torch.mul(torch.pow(torch.sum(adj, -1, keepdim=True), -0.5),
torch.eye(N, device=self.device).unsqueeze(0)) # D^{-1/2]
adj = torch.matmul(torch.matmul(rD, adj), rD) # D^{-1/2] A' D^{-1/2]
y = torch.bmm(adj, XW)

Expand All @@ -53,4 +54,4 @@ def forward(self, X, adj):
def __repr__(self):
return self.__class__.__name__ + ' (' \
+ str(self.in_features) + ' -> ' \
+ str(self.out_features) + ')'
+ str(self.out_features) + ')'
1 change: 0 additions & 1 deletion models/gcn/train.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
from __future__ import division
from __future__ import print_function

from models.gnn_framework import GNN
from models.gcn.layer import GCNLayer
from util.train import execute_train, build_arg_parser

Expand Down
9 changes: 5 additions & 4 deletions models/gin/layer.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import torch
import torch.nn as nn
from util.layers import FCLayer, MLP
from util.layers import MLP


class GINLayer(nn.Module):
Expand All @@ -21,7 +21,8 @@ def __init__(self, in_features, out_features, fc_layers=2, device='cpu'):
self.in_features = in_features
self.out_features = out_features
self.epsilon = nn.Parameter(torch.zeros(size=(1,), device=device))
self.post_transformation = MLP(in_size=in_features, hidden_size=max(in_features, out_features), out_size=out_features,
self.post_transformation = MLP(in_size=in_features, hidden_size=max(in_features, out_features),
out_size=out_features,
layers=fc_layers, mid_activation='relu', last_activation='relu', mid_b_norm=True,
last_b_norm=False, device=device)
self.reset_parameters()
Expand All @@ -33,7 +34,7 @@ def forward(self, input, adj):
(B, N, _) = adj.shape

# sum aggregation
mod_adj = adj + torch.eye(N, device=self.device).unsqueeze(0)*(1+self.epsilon)
mod_adj = adj + torch.eye(N, device=self.device).unsqueeze(0) * (1 + self.epsilon)
support = torch.matmul(mod_adj, input)

# post-aggregation transformation
Expand All @@ -42,4 +43,4 @@ def forward(self, input, adj):
def __repr__(self):
return self.__class__.__name__ + ' (' \
+ str(self.in_features) + ' -> ' \
+ str(self.out_features) + ')'
+ str(self.out_features) + ')'
20 changes: 13 additions & 7 deletions models/gnn_framework.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,8 @@

class GNN(nn.Module):
def __init__(self, nfeat, nhid, nodes_out, graph_out, dropout, conv_layers=2, fc_layers=3, first_conv_descr=None,
middle_conv_descr=None, final_activation='LeakyReLU', skip=False, gru=False, fixed=False, variable=False,
middle_conv_descr=None, final_activation='LeakyReLU', skip=False, gru=False, fixed=False,
variable=False,
device='cpu'):
"""
:param nfeat: number of input features per node
Expand Down Expand Up @@ -37,17 +38,21 @@ def __init__(self, nfeat, nhid, nodes_out, graph_out, dropout, conv_layers=2, fc

if type(first_conv_descr) == dict:
first_conv_descr = types.SimpleNamespace(**first_conv_descr)
assert type(first_conv_descr) == types.SimpleNamespace, "first_conv_descr should be either a dict or a SimpleNamespace"
assert type(
first_conv_descr) == types.SimpleNamespace, "first_conv_descr should be either a dict or a SimpleNamespace"
if type(first_conv_descr.args) == dict:
first_conv_descr.args = types.SimpleNamespace(**first_conv_descr.args)
assert type(first_conv_descr.args) == types.SimpleNamespace, "first_conv_descr.args should be either a dict or a SimpleNamespace"
assert type(
first_conv_descr.args) == types.SimpleNamespace, "first_conv_descr.args should be either a dict or a SimpleNamespace"

if type(middle_conv_descr) == dict:
middle_conv_descr = types.SimpleNamespace(**middle_conv_descr)
assert type(middle_conv_descr) == types.SimpleNamespace, "middle_conv_descr should be either a dict or a SimpleNamespace"
assert type(
middle_conv_descr) == types.SimpleNamespace, "middle_conv_descr should be either a dict or a SimpleNamespace"
if type(middle_conv_descr.args) == dict:
middle_conv_descr.args = types.SimpleNamespace(**middle_conv_descr.args)
assert type(middle_conv_descr.args) == types.SimpleNamespace, "middle_conv_descr.args should be either a dict or a SimpleNamespace"
assert type(
middle_conv_descr.args) == types.SimpleNamespace, "middle_conv_descr.args should be either a dict or a SimpleNamespace"

self.dropout = dropout
self.conv_layers = nn.ModuleList()
Expand Down Expand Up @@ -86,7 +91,8 @@ def forward(self, x, adj):
skip_connections = [x] if self.skip else None

n_layers = self.n_fixed_conv(adj) if self.variable else self.n_fixed_conv
conv_layers = [self.conv_layers[0]] + ([self.conv_layers[1]] * (n_layers-1)) if self.fixed else self.conv_layers
conv_layers = [self.conv_layers[0]] + (
[self.conv_layers[1]] * (n_layers - 1)) if self.fixed else self.conv_layers

for layer, conv in enumerate(conv_layers):
y = conv(x, adj)
Expand All @@ -103,4 +109,4 @@ def forward(self, x, adj):
x = torch.cat(skip_connections, dim=2)

# readout output
return (self.nodes_read_out(x), self.graph_read_out(x))
return (self.nodes_read_out(x), self.graph_read_out(x))
2 changes: 2 additions & 0 deletions models/pna/aggregators.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,9 @@
import math
import torch

EPS = 1e-5


# each aggregator is a function taking as input X (B x N x N x Din), adj (B x N x N), self_loop and device and
# returning the aggregated value of X (B x N x Din) for each dimension

Expand Down
23 changes: 15 additions & 8 deletions models/pna/layer.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,8 +26,10 @@ def __init__(self, in_features, out_features, aggregators, scalers, avg_d, self_
self.self_loop = self_loop
self.pretrans = MLP(in_size=2 * self.in_features, hidden_size=self.in_features, out_size=self.in_features,
layers=pretrans_layers, mid_activation='relu', last_activation='none')
self.posttrans = MLP(in_size=(len(aggregators) * len(scalers) + 1) * self.in_features, hidden_size=self.out_features,
out_size=self.out_features, layers=posttrans_layers, mid_activation='relu', last_activation='none')
self.posttrans = MLP(in_size=(len(aggregators) * len(scalers) + 1) * self.in_features,
hidden_size=self.out_features,
out_size=self.out_features, layers=posttrans_layers, mid_activation='relu',
last_activation='none')
self.avg_d = avg_d

def forward(self, input, adj):
Expand All @@ -40,7 +42,9 @@ def forward(self, input, adj):
h_mod = self.pretrans(h_cat)

# aggregation
m = torch.cat([aggregate(h_mod, adj, self_loop=self.self_loop, device=self.device) for aggregate in self.aggregators],dim=2)
m = torch.cat(
[aggregate(h_mod, adj, self_loop=self.self_loop, device=self.device) for aggregate in self.aggregators],
dim=2)
m = torch.cat([scale(m, adj, avg_d=self.avg_d) for scale in self.scalers], dim=2)

# post-aggregation transformation
Expand All @@ -59,6 +63,7 @@ class PNALayer(nn.Module):
Implements a single convolutional layer of the Principal Neighbourhood Aggregation Networks
as described in XXX
"""

def __init__(self, in_features, out_features, aggregators, scalers, avg_d, towers=1, self_loop=False,
pretrans_layers=1, posttrans_layers=1, divide_input=True, device='cpu'):
"""
Expand All @@ -74,7 +79,8 @@ def __init__(self, in_features, out_features, aggregators, scalers, avg_d, tower
:param device: device used for computation
"""
super(PNALayer, self).__init__()
assert ((not divide_input) or in_features % towers == 0), "if divide_input is set the number of towers has to divide in_features"
assert ((
not divide_input) or in_features % towers == 0), "if divide_input is set the number of towers has to divide in_features"
assert (out_features % towers == 0), "the number of towers has to divide the out_features"

# retrieve the aggregators and scalers functions
Expand All @@ -88,9 +94,10 @@ def __init__(self, in_features, out_features, aggregators, scalers, avg_d, tower
# convolution
self.towers = nn.ModuleList()
for _ in range(towers):
self.towers.append(PNATower(in_features=self.input_tower, out_features=self.output_tower, aggregators=aggregators,
scalers=scalers, avg_d=avg_d, self_loop=self_loop, pretrans_layers=pretrans_layers,
posttrans_layers=posttrans_layers, device=device))
self.towers.append(
PNATower(in_features=self.input_tower, out_features=self.output_tower, aggregators=aggregators,
scalers=scalers, avg_d=avg_d, self_loop=self_loop, pretrans_layers=pretrans_layers,
posttrans_layers=posttrans_layers, device=device))
# mixing network
self.mixing_network = FCLayer(out_features, out_features, activation='LeakyReLU')

Expand All @@ -109,4 +116,4 @@ def forward(self, input, adj):
def __repr__(self):
return self.__class__.__name__ + ' (' \
+ str(self.in_features) + ' -> ' \
+ str(self.out_features) + ')'
+ str(self.out_features) + ')'
5 changes: 2 additions & 3 deletions models/pna/scalers.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import torch


# each scaler is a function that takes as input X (B x N x Din), adj (B x N x N) and
# avg_d (dictionary containing averages over training set) and returns X_scaled (B x N x Din) as output

Expand Down Expand Up @@ -38,6 +39,4 @@ def scale_inverse_linear(X, adj, avg_d=None):


SCALERS = {'identity': scale_identity, 'amplification': scale_amplification, 'attenuation': scale_attenuation,
'linear': scale_linear, 'inverse_linear': scale_inverse_linear}


'linear': scale_linear, 'inverse_linear': scale_inverse_linear}
19 changes: 11 additions & 8 deletions util/layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -135,7 +135,8 @@ class FCLayer(nn.Module):
Output dimension of the linear layer
"""

def __init__(self, in_size, out_size, activation='relu', dropout=0., b_norm=False, bias=True, init_fn=None, device='cpu'):
def __init__(self, in_size, out_size, activation='relu', dropout=0., b_norm=False, bias=True, init_fn=None,
device='cpu'):
super(FCLayer, self).__init__()

self.__params = locals()
Expand All @@ -159,7 +160,7 @@ def __init__(self, in_size, out_size, activation='relu', dropout=0., b_norm=Fals
def reset_parameters(self, init_fn=None):
init_fn = init_fn or self.init_fn
if init_fn is not None:
init_fn(self.linear.weight, 1/self.in_size)
init_fn(self.linear.weight, 1 / self.in_size)
if self.bias:
self.linear.bias.data.zero_()

Expand All @@ -171,7 +172,7 @@ def forward(self, x):
h = self.dropout(h)
if self.b_norm is not None:
if h.shape[1] != self.out_size:
h = self.b_norm(h.transpose(1, 2)).transpose(1,2)
h = self.b_norm(h.transpose(1, 2)).transpose(1, 2)
else:
h = self.b_norm(h)
return h
Expand All @@ -186,6 +187,7 @@ class MLP(nn.Module):
"""
Simple multi-layer perceptron, built of a series of FCLayers
"""

def __init__(self, in_size, hidden_size, out_size, layers, mid_activation='relu', last_activation='none',
dropout=0., mid_b_norm=False, last_b_norm=False, device='cpu'):
super(MLP, self).__init__()
Expand Down Expand Up @@ -222,6 +224,7 @@ class GRU(nn.Module):
"""
Wrapper class for the GRU used by the GNN framework, nn.GRU is used for the Gated Recurrent Unit itself
"""

def __init__(self, input_size, hidden_size, device):
super(GRU, self).__init__()
self.input_size = input_size
Expand All @@ -241,9 +244,9 @@ def forward(self, x, y):
y = y.reshape(1, B * N, -1).contiguous()

# padding if necessary
if x.shape[-1]< self.input_size:
if x.shape[-1] < self.input_size:
x = F.pad(input=x, pad=[0, self.input_size - x.shape[-1]], mode='constant', value=0)
if y.shape[-1]< self.hidden_size:
if y.shape[-1] < self.hidden_size:
y = F.pad(input=y, pad=[0, self.hidden_size - y.shape[-1]], mode='constant', value=0)

x = self.gru(x, y)[1]
Expand All @@ -255,18 +258,18 @@ class S2SReadout(nn.Module):
"""
Performs a Set2Set aggregation of all the graph nodes' features followed by a series of fully connected layers
"""
def __init__(self, in_size, hidden_size, out_size, fc_layers=3, device='cpu', final_activation='relu'):

def __init__(self, in_size, hidden_size, out_size, fc_layers=3, device='cpu', final_activation='relu'):
super(S2SReadout, self).__init__()

# set2set aggregation
self.set2set = Set2Set(in_size, device=device)

# fully connected layers
self.mlp = MLP(in_size=2*in_size, hidden_size=hidden_size, out_size=out_size, layers=fc_layers,
self.mlp = MLP(in_size=2 * in_size, hidden_size=hidden_size, out_size=out_size, layers=fc_layers,
mid_activation="relu", last_activation=final_activation, mid_b_norm=True, last_b_norm=False,
device=device)

def forward(self, x):
x = self.set2set(x)
return self.mlp(x)
return self.mlp(x)
3 changes: 2 additions & 1 deletion util/util.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,8 @@ def total_loss_multiple_batches(output, target, loss='mse', only_nodes=False, on
""" returns the average of the average losses of each task over all batches,
batches are weighted equally regardless of their cardinality or graph size """
return sum([total_loss((output[0][batch], output[1][batch]), (target[0][batch], target[1][batch]),
loss, only_nodes, only_graph).data.item() for batch in range(len(output[0]))]) / len(output[0])
loss, only_nodes, only_graph).data.item() for batch in range(len(output[0]))]) / len(
output[0])


def specific_loss(output, target, loss='mse', only_nodes=False, only_graph=False):
Expand Down

0 comments on commit db3b6c3

Please sign in to comment.