Skip to content

Commit

Permalink
[Misc] Rename number_of_edges and number_of_nodes to num_edges and nu…
Browse files Browse the repository at this point in the history
…m_nodes in examples. (dmlc#5492)

* pytorch_example

* fix

---------

Co-authored-by: Ubuntu <[email protected]>
  • Loading branch information
frozenbugs and Ubuntu authored Mar 29, 2023
1 parent 3c8ac09 commit 5008af2
Show file tree
Hide file tree
Showing 70 changed files with 246 additions and 269 deletions.
4 changes: 2 additions & 2 deletions examples/pytorch/appnp/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ def main(args):
test_mask = g.ndata["test_mask"]
in_feats = features.shape[1]
n_classes = data.num_labels
n_edges = g.number_of_edges()
n_edges = g.num_edges()
print(
"""----Data statistics------'
#Edges %d
Expand All @@ -69,7 +69,7 @@ def main(args):
)
)

n_edges = g.number_of_edges()
n_edges = g.num_edges()
# add self loop
g = dgl.remove_self_loop(g)
g = dgl.add_self_loop(g)
Expand Down
2 changes: 1 addition & 1 deletion examples/pytorch/bgrl/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ def get_ppi():
test_dataset = PPIDataset(mode="test")
train_val_dataset = [i for i in train_dataset] + [i for i in val_dataset]
for idx, data in enumerate(train_val_dataset):
data.ndata["batch"] = torch.zeros(data.number_of_nodes()) + idx
data.ndata["batch"] = torch.zeros(data.num_nodes()) + idx
data.ndata["batch"] = data.ndata["batch"].long()

g = list(GraphDataLoader(train_val_dataset, batch_size=22, shuffle=True))
Expand Down
2 changes: 1 addition & 1 deletion examples/pytorch/caregnn/model_sampling.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ def sample_frontier(self, block_id, g, seed_nodes, *args, **kwargs):
with g.local_scope():
new_edges_masks = {}
for etype in g.canonical_etypes:
edge_mask = th.zeros(g.number_of_edges(etype))
edge_mask = th.zeros(g.num_edges(etype))
# extract each node from dict because of single node type
for node in seed_nodes:
edges = g.in_edges(node, form="eid", etype=etype)
Expand Down
2 changes: 1 addition & 1 deletion examples/pytorch/dgi/dgi.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ def __init__(self, g, in_feats, n_hidden, n_layers, activation, dropout):

def forward(self, features, corrupt=False):
if corrupt:
perm = torch.randperm(self.g.number_of_nodes())
perm = torch.randperm(self.g.num_nodes())
features = features[perm]
features = self.conv(features)
return features
Expand Down
4 changes: 2 additions & 2 deletions examples/pytorch/dgi/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ def main(args):
test_mask = torch.ByteTensor(g.ndata["test_mask"])
in_feats = features.shape[1]
n_classes = data.num_classes
n_edges = g.number_of_edges()
n_edges = g.num_edges()

if args.gpu < 0:
cuda = False
Expand All @@ -55,7 +55,7 @@ def main(args):
if args.self_loop:
g = dgl.remove_self_loop(g)
g = dgl.add_self_loop(g)
n_edges = g.number_of_edges()
n_edges = g.num_edges()

if args.gpu >= 0:
g = g.to(args.gpu)
Expand Down
6 changes: 3 additions & 3 deletions examples/pytorch/dgmg/cycles.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ def get_next(i, v_max):


def is_cycle(g):
size = g.number_of_nodes()
size = g.num_nodes()

if size < 3:
return False
Expand Down Expand Up @@ -102,7 +102,7 @@ def collate_batch(self, batch):

def dglGraph_to_adj_list(g):
adj_list = {}
for node in range(g.number_of_nodes()):
for node in range(g.num_nodes()):
# For undirected graph. successors and
# predecessors are equivalent.
adj_list[node] = g.successors(node).tolist()
Expand Down Expand Up @@ -141,7 +141,7 @@ def rollout_and_examine(self, model, num_samples):
sampled_adj_list = dglGraph_to_adj_list(sampled_graph)
adj_lists_to_plot.append(sampled_adj_list)

graph_size = sampled_graph.number_of_nodes()
graph_size = sampled_graph.num_nodes()
valid_size = self.v_min <= graph_size <= self.v_max
cycle = is_cycle(sampled_graph)

Expand Down
14 changes: 7 additions & 7 deletions examples/pytorch/dgmg/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ def __init__(self, node_hidden_size):
self.node_to_graph = nn.Linear(node_hidden_size, self.graph_hidden_size)

def forward(self, g):
if g.number_of_nodes() == 0:
if g.num_nodes() == 0:
return torch.zeros(1, self.graph_hidden_size)
else:
# Node features are stored as hv in ndata.
Expand Down Expand Up @@ -75,7 +75,7 @@ def dgmg_reduce(self, nodes, round):
return {"a": node_activation}

def forward(self, g):
if g.number_of_edges() == 0:
if g.num_edges() == 0:
return
else:
for t in range(self.num_prop_rounds):
Expand Down Expand Up @@ -115,7 +115,7 @@ def __init__(self, graph_embed_func, node_hidden_size):
self.init_node_activation = torch.zeros(1, 2 * node_hidden_size)

def _initialize_node_repr(self, g, node_type, graph_embed):
num_nodes = g.number_of_nodes()
num_nodes = g.num_nodes()
hv_init = self.initialize_hv(
torch.cat(
[
Expand Down Expand Up @@ -166,7 +166,7 @@ def prepare_training(self):

def forward(self, g, action=None):
graph_embed = self.graph_op["embed"](g)
src_embed = g.nodes[g.number_of_nodes() - 1].data["hv"]
src_embed = g.nodes[g.num_nodes() - 1].data["hv"]

logit = self.add_edge(torch.cat([graph_embed, src_embed], dim=1))
prob = torch.sigmoid(logit)
Expand Down Expand Up @@ -200,7 +200,7 @@ def prepare_training(self):
self.log_prob = []

def forward(self, g, dest):
src = g.number_of_nodes() - 1
src = g.num_nodes() - 1
possible_dests = range(src)

src_embed_expand = g.nodes[src].data["hv"].expand(src, -1)
Expand Down Expand Up @@ -320,10 +320,10 @@ def forward_train(self, actions):

def forward_inference(self):
stop = self.add_node_and_update()
while (not stop) and (self.g.number_of_nodes() < self.v_max + 1):
while (not stop) and (self.g.num_nodes() < self.v_max + 1):
num_trials = 0
to_add_edge = self.add_edge_or_not()
while to_add_edge and (num_trials < self.g.number_of_nodes() - 1):
while to_add_edge and (num_trials < self.g.num_nodes() - 1):
self.choose_dest_and_update()
num_trials += 1
to_add_edge = self.add_edge_or_not()
Expand Down
2 changes: 1 addition & 1 deletion examples/pytorch/diffpool/data_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ def pre_process(dataset, prog_args):
print("overwrite node attributes with DiffPool's preprocess setting")
if prog_args.data_mode == "id":
for g, _ in dataset:
id_list = np.arange(g.number_of_nodes())
id_list = np.arange(g.num_nodes())
g.ndata["feat"] = one_hotify(id_list, pad=dataset.max_num_node)

elif prog_args.data_mode == "deg-num":
Expand Down
2 changes: 1 addition & 1 deletion examples/pytorch/diffpool/model/dgl_layers/gnn.py
Original file line number Diff line number Diff line change
Expand Up @@ -148,7 +148,7 @@ def forward(self, g, h):
if self.link_pred:
current_lp_loss = torch.norm(
adj.to_dense() - torch.mm(assign_tensor, torch.t(assign_tensor))
) / np.power(g.number_of_nodes(), 2)
) / np.power(g.num_nodes(), 2)
self.loss_log["LinkPredLoss"] = current_lp_loss

for loss_layer in self.reg_loss:
Expand Down
4 changes: 2 additions & 2 deletions examples/pytorch/gatv2/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,7 @@ def main(args):
test_mask = g.ndata["test_mask"]
num_feats = features.shape[1]
n_classes = data.num_labels
n_edges = g.number_of_edges()
n_edges = g.num_edges()
print(
"""----Data statistics------'
#Edges %d
Expand All @@ -110,7 +110,7 @@ def main(args):
# add self loop
g = dgl.remove_self_loop(g)
g = dgl.add_self_loop(g)
n_edges = g.number_of_edges()
n_edges = g.num_edges()
# create model
heads = ([args.num_heads] * args.num_layers) + [args.num_out_heads]
model = GATv2(
Expand Down
34 changes: 17 additions & 17 deletions examples/pytorch/gcmc/data.py
Original file line number Diff line number Diff line change
Expand Up @@ -305,49 +305,49 @@ def _npairs(graph):
rst = 0
for r in self.possible_rating_values:
r = to_etype_name(r)
rst += graph.number_of_edges(str(r))
rst += graph.num_edges(str(r))
return rst

print(
"Train enc graph: \t#user:{}\t#movie:{}\t#pairs:{}".format(
self.train_enc_graph.number_of_nodes("user"),
self.train_enc_graph.number_of_nodes("movie"),
self.train_enc_graph.num_nodes("user"),
self.train_enc_graph.num_nodes("movie"),
_npairs(self.train_enc_graph),
)
)
print(
"Train dec graph: \t#user:{}\t#movie:{}\t#pairs:{}".format(
self.train_dec_graph.number_of_nodes("user"),
self.train_dec_graph.number_of_nodes("movie"),
self.train_dec_graph.number_of_edges(),
self.train_dec_graph.num_nodes("user"),
self.train_dec_graph.num_nodes("movie"),
self.train_dec_graph.num_edges(),
)
)
print(
"Valid enc graph: \t#user:{}\t#movie:{}\t#pairs:{}".format(
self.valid_enc_graph.number_of_nodes("user"),
self.valid_enc_graph.number_of_nodes("movie"),
self.valid_enc_graph.num_nodes("user"),
self.valid_enc_graph.num_nodes("movie"),
_npairs(self.valid_enc_graph),
)
)
print(
"Valid dec graph: \t#user:{}\t#movie:{}\t#pairs:{}".format(
self.valid_dec_graph.number_of_nodes("user"),
self.valid_dec_graph.number_of_nodes("movie"),
self.valid_dec_graph.number_of_edges(),
self.valid_dec_graph.num_nodes("user"),
self.valid_dec_graph.num_nodes("movie"),
self.valid_dec_graph.num_edges(),
)
)
print(
"Test enc graph: \t#user:{}\t#movie:{}\t#pairs:{}".format(
self.test_enc_graph.number_of_nodes("user"),
self.test_enc_graph.number_of_nodes("movie"),
self.test_enc_graph.num_nodes("user"),
self.test_enc_graph.num_nodes("movie"),
_npairs(self.test_enc_graph),
)
)
print(
"Test dec graph: \t#user:{}\t#movie:{}\t#pairs:{}".format(
self.test_dec_graph.number_of_nodes("user"),
self.test_dec_graph.number_of_nodes("movie"),
self.test_dec_graph.number_of_edges(),
self.test_dec_graph.num_nodes("user"),
self.test_dec_graph.num_nodes("movie"),
self.test_dec_graph.num_edges(),
)
)

Expand Down Expand Up @@ -398,7 +398,7 @@ def _generate_enc_graph(
# sanity check
assert (
len(rating_pairs[0])
== sum([graph.number_of_edges(et) for et in graph.etypes]) // 2
== sum([graph.num_edges(et) for et in graph.etypes]) // 2
)

if add_support:
Expand Down
10 changes: 5 additions & 5 deletions examples/pytorch/gcmc/train_sampling.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,8 +100,8 @@ def load_subtensor(input_nodes, pair_graph, blocks, dataset, parent_graph):


def flatten_etypes(pair_graph, dataset, segment):
n_users = pair_graph.number_of_nodes("user")
n_movies = pair_graph.number_of_nodes("movie")
n_users = pair_graph.num_nodes("user")
n_movies = pair_graph.num_nodes("movie")
src = []
dst = []
labels = []
Expand Down Expand Up @@ -274,7 +274,7 @@ def run(proc_id, n_gpus, args, devices, dataset):
dataset.train_enc_graph,
{
to_etype_name(k): th.arange(
dataset.train_enc_graph.number_of_edges(etype=to_etype_name(k))
dataset.train_enc_graph.num_edges(etype=to_etype_name(k))
)
for k in dataset.possible_rating_values
},
Expand All @@ -288,7 +288,7 @@ def run(proc_id, n_gpus, args, devices, dataset):
if proc_id == 0:
valid_dataloader = dgl.dataloading.DataLoader(
dataset.valid_dec_graph,
th.arange(dataset.valid_dec_graph.number_of_edges()),
th.arange(dataset.valid_dec_graph.num_edges()),
sampler,
g_sampling=dataset.valid_enc_graph,
batch_size=args.minibatch_size,
Expand All @@ -297,7 +297,7 @@ def run(proc_id, n_gpus, args, devices, dataset):
)
test_dataloader = dgl.dataloading.DataLoader(
dataset.test_dec_graph,
th.arange(dataset.test_dec_graph.number_of_edges()),
th.arange(dataset.test_dec_graph.num_edges()),
sampler,
g_sampling=dataset.test_enc_graph,
batch_size=args.minibatch_size,
Expand Down
2 changes: 1 addition & 1 deletion examples/pytorch/ggnn/ggnn_gc.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ def forward(self, graph, labels=None):

assert annotation.size()[-1] == self.annotation_size

node_num = graph.number_of_nodes()
node_num = graph.num_nodes()

zero_pad = torch.zeros(
[node_num, self.out_feats - self.annotation_size],
Expand Down
2 changes: 1 addition & 1 deletion examples/pytorch/ggnn/ggnn_ns.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ def forward(self, graph, labels=None):

assert annotation.size()[-1] == self.annotation_size

node_num = graph.number_of_nodes()
node_num = graph.num_nodes()

zero_pad = torch.zeros(
[node_num, self.out_feats - self.annotation_size],
Expand Down
2 changes: 1 addition & 1 deletion examples/pytorch/ggnn/ggsnn.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ def forward(self, graph, seq_lengths, ground_truth=None):

assert annotation.size()[-1] == self.annotation_size

node_num = graph.number_of_nodes()
node_num = graph.num_nodes()

all_logits = []
for _ in range(self.max_seq_length):
Expand Down
16 changes: 8 additions & 8 deletions examples/pytorch/graph_matching/examples.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,18 +24,18 @@
print(distance) # 1.0

# With user-input cost matrices
node_substitution_cost = np.empty((G1.number_of_nodes(), G2.number_of_nodes()))
G1_node_deletion_cost = np.empty(G1.number_of_nodes())
G2_node_insertion_cost = np.empty(G2.number_of_nodes())
node_substitution_cost = np.empty((G1.num_nodes(), G2.num_nodes()))
G1_node_deletion_cost = np.empty(G1.num_nodes())
G2_node_insertion_cost = np.empty(G2.num_nodes())

edge_substitution_cost = np.empty((G1.number_of_edges(), G2.number_of_edges()))
G1_edge_deletion_cost = np.empty(G1.number_of_edges())
G2_edge_insertion_cost = np.empty(G2.number_of_edges())
edge_substitution_cost = np.empty((G1.num_edges(), G2.num_edges()))
G1_edge_deletion_cost = np.empty(G1.num_edges())
G2_edge_insertion_cost = np.empty(G2.num_edges())

# Node substitution cost of 0 when node-ids are same, else 1
node_substitution_cost.fill(1.0)
for i in range(G1.number_of_nodes()):
for j in range(G2.number_of_nodes()):
for i in range(G1.num_nodes()):
for j in range(G2.num_nodes()):
node_substitution_cost[i, j] = 0.0

# Node insertion/deletion cost of 1
Expand Down
Loading

0 comments on commit 5008af2

Please sign in to comment.