Skip to content

Commit

Permalink
Merge pull request rampasek#23 from rampasek/graphormer
Browse files Browse the repository at this point in the history
Graphormer and dataset extensions
  • Loading branch information
luis-mueller authored Feb 8, 2023
2 parents 40cfeed + 34ccb30 commit 95a17d5
Show file tree
Hide file tree
Showing 39 changed files with 1,879 additions and 57 deletions.
84 changes: 84 additions & 0 deletions configs/GPS/actor-GPS.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,84 @@
out_dir: results
metric_best: accuracy
wandb:
use: True
project: Actor
dataset:
format: PyG-Actor
name: none
task: node
task_type: classification
transductive: True
split_mode: standard
node_encoder: True
node_encoder_name: LapPE
# node_encoder_name: LinearNode+GraphormerBias
node_encoder_bn: False
edge_encoder: False
edge_encoder_name: DummyEdge
edge_encoder_bn: False
posenc_GraphormerBias:
enable: False
node_degrees_only: True
num_spatial_types: 20
num_in_degrees: 1297
num_out_degrees: 74
graphormer:
use_graph_token: False
posenc_LapPE:
enable: True
eigen:
laplacian_norm: none
eigvec_norm: L2
max_freqs: 4
model: DeepSet
dim_pe: 4
layers: 2
n_heads: 4 # Only used when `posenc.model: Transformer`
raw_norm_type: none
posenc_RWSE:
enable: False
kernel:
times_func: range(1,17)
model: Linear
dim_pe: 16
raw_norm_type: BatchNorm
train:
mode: custom
sampler: full_batch
# sampler: saint_rw
# batch_size: 32
eval_period: 5
enable_ckpt: False
# ckpt_period: 100
model:
type: GPSModel
loss_fun: cross_entropy
edge_decoding: dot
gt:
layer_type: GCN+Transformer
layers: 2
n_heads: 4
dim_hidden: 64 # `gt.dim_hidden` must match `gnn.dim_inner`
dropout: 0.2
attn_dropout: 0.0
layer_norm: False
batch_norm: False
gnn:
head: node
layers_pre_mp: 0
layers_post_mp: 1
dim_inner: 64 # `gt.dim_hidden` must match `gnn.dim_inner`
batchnorm: True
act: gelu
dropout: 0.2
agg: mean
normalize_adj: False
optim:
clip_grad_norm: True
optimizer: adamW
weight_decay: 1e-5
base_lr: 0.0005
max_epoch: 200
scheduler: cosine_with_warmup
num_warmup_epochs: 10
84 changes: 84 additions & 0 deletions configs/GPS/webkb-cor-GPS.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,84 @@
out_dir: results
metric_best: accuracy
wandb:
use: True
project: Cornell
dataset:
format: PyG-WebKB
name: cornell
task: node
task_type: classification
transductive: True
split_mode: standard
node_encoder: True
node_encoder_name: LapPE
# node_encoder_name: LinearNode+GraphormerBias
node_encoder_bn: False
edge_encoder: False
edge_encoder_name: DummyEdge
edge_encoder_bn: False
posenc_GraphormerBias:
enable: False
node_degrees_only: True
num_spatial_types: 20
num_in_degrees: 9
num_out_degrees: 94
graphormer:
use_graph_token: False
posenc_LapPE:
enable: True
eigen:
laplacian_norm: none
eigvec_norm: L2
max_freqs: 4
model: DeepSet
dim_pe: 4
layers: 2
n_heads: 4 # Only used when `posenc.model: Transformer`
raw_norm_type: none
posenc_RWSE:
enable: False
kernel:
times_func: range(1,17)
model: Linear
dim_pe: 16
raw_norm_type: BatchNorm
train:
mode: custom
sampler: full_batch
# sampler: saint_rw
# batch_size: 32
eval_period: 5
enable_ckpt: False
# ckpt_period: 100
model:
type: GPSModel
loss_fun: cross_entropy
edge_decoding: dot
gt:
layer_type: GCN+Transformer
layers: 2
n_heads: 4
dim_hidden: 64 # `gt.dim_hidden` must match `gnn.dim_inner`
dropout: 0.2
attn_dropout: 0.0
layer_norm: False
batch_norm: False
gnn:
head: node
layers_pre_mp: 0
layers_post_mp: 1
dim_inner: 64 # `gt.dim_hidden` must match `gnn.dim_inner`
batchnorm: True
act: gelu
dropout: 0.2
agg: mean
normalize_adj: False
optim:
clip_grad_norm: True
optimizer: adamW
weight_decay: 1e-5
base_lr: 0.0005
max_epoch: 200
scheduler: cosine_with_warmup
num_warmup_epochs: 10
84 changes: 84 additions & 0 deletions configs/GPS/webkb-tex-GPS.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,84 @@
out_dir: results
metric_best: accuracy
wandb:
use: True
project: Texas
dataset:
format: PyG-WebKB
name: texas
task: node
task_type: classification
transductive: True
split_mode: standard
node_encoder: True
node_encoder_name: LapPE
# node_encoder_name: LinearNode+GraphormerBias
node_encoder_bn: False
edge_encoder: False
edge_encoder_name: DummyEdge
edge_encoder_bn: False
posenc_GraphormerBias:
enable: False
node_degrees_only: True
num_spatial_types: 20
num_in_degrees: 13
num_out_degrees: 105
graphormer:
use_graph_token: False
posenc_LapPE:
enable: True
eigen:
laplacian_norm: none
eigvec_norm: L2
max_freqs: 4
model: DeepSet
dim_pe: 4
layers: 2
n_heads: 4 # Only used when `posenc.model: Transformer`
raw_norm_type: none
posenc_RWSE:
enable: False
kernel:
times_func: range(1,17)
model: Linear
dim_pe: 16
raw_norm_type: BatchNorm
train:
mode: custom
sampler: full_batch
# sampler: saint_rw
# batch_size: 32
eval_period: 5
enable_ckpt: False
# ckpt_period: 100
model:
type: GPSModel
loss_fun: cross_entropy
edge_decoding: dot
gt:
layer_type: GCN+Transformer
layers: 2
n_heads: 4
dim_hidden: 64 # `gt.dim_hidden` must match `gnn.dim_inner`
dropout: 0.2
attn_dropout: 0.0
layer_norm: False
batch_norm: False
gnn:
head: node
layers_pre_mp: 0
layers_post_mp: 1
dim_inner: 64 # `gt.dim_hidden` must match `gnn.dim_inner`
batchnorm: True
act: gelu
dropout: 0.2
agg: mean
normalize_adj: False
optim:
clip_grad_norm: True
optimizer: adamW
weight_decay: 1e-5
base_lr: 0.0005
max_epoch: 200
scheduler: cosine_with_warmup
num_warmup_epochs: 10
84 changes: 84 additions & 0 deletions configs/GPS/webkb-wis-GPS.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,84 @@
out_dir: results
metric_best: accuracy
wandb:
use: True
project: Wisconsin
dataset:
format: PyG-WebKB
name: wisconsin
task: node
task_type: classification
transductive: True
split_mode: standard
node_encoder: True
node_encoder_name: LapPE
# node_encoder_name: LinearNode+GraphormerBias
node_encoder_bn: False
edge_encoder: False
edge_encoder_name: DummyEdge
edge_encoder_bn: False
posenc_GraphormerBias:
enable: False
node_degrees_only: True
num_spatial_types: 20
num_in_degrees: 12
num_out_degrees: 123
graphormer:
use_graph_token: False
posenc_LapPE:
enable: True
eigen:
laplacian_norm: none
eigvec_norm: L2
max_freqs: 4
model: DeepSet
dim_pe: 4
layers: 2
n_heads: 4 # Only used when `posenc.model: Transformer`
raw_norm_type: none
posenc_RWSE:
enable: False
kernel:
times_func: range(1,17)
model: Linear
dim_pe: 16
raw_norm_type: BatchNorm
train:
mode: custom
sampler: full_batch
# sampler: saint_rw
# batch_size: 32
eval_period: 5
enable_ckpt: False
# ckpt_period: 100
model:
type: GPSModel
loss_fun: cross_entropy
edge_decoding: dot
gt:
layer_type: GCN+Transformer
layers: 2
n_heads: 4
dim_hidden: 64 # `gt.dim_hidden` must match `gnn.dim_inner`
dropout: 0.2
attn_dropout: 0.0
layer_norm: False
batch_norm: False
gnn:
head: node
layers_pre_mp: 0
layers_post_mp: 1
dim_inner: 64 # `gt.dim_hidden` must match `gnn.dim_inner`
batchnorm: True
act: gelu
dropout: 0.2
agg: mean
normalize_adj: False
optim:
clip_grad_norm: True
optimizer: adamW
weight_decay: 1e-5
base_lr: 0.0005
max_epoch: 200
scheduler: cosine_with_warmup
num_warmup_epochs: 10
Loading

0 comments on commit 95a17d5

Please sign in to comment.