Skip to content

Commit

Permalink
Fix timing (dmlc#1930)
Browse files Browse the repository at this point in the history
Co-authored-by: Ubuntu <[email protected]>
  • Loading branch information
classicsong and Ubuntu authored Aug 4, 2020
1 parent fae26dd commit 1232961
Show file tree
Hide file tree
Showing 2 changed files with 6 additions and 7 deletions.
7 changes: 3 additions & 4 deletions examples/pytorch/graphsage/train_cv.py
Original file line number Diff line number Diff line change
Expand Up @@ -176,7 +176,7 @@ def load_subtensor(g, labels, blocks, hist_blocks, dev_id, aggregation_on_device
"""
blocks[0].srcdata['features'] = g.ndata['features'][blocks[0].srcdata[dgl.NID]]
blocks[-1].dstdata['label'] = labels[blocks[-1].dstdata[dgl.NID]]
ret_blocks = []
ret_blocks = []
ret_hist_blocks = []
for i, (block, hist_block) in enumerate(zip(blocks, hist_blocks)):
hist_col = 'features' if i == 0 else 'hist_%d' % i
Expand Down Expand Up @@ -257,9 +257,8 @@ def run(args, dev_id, data):
for epoch in range(args.num_epochs):
tic = time.time()
model.train()
tic_step = time.time()
for step, (blocks, hist_blocks) in enumerate(dataloader):
tic_step = time.time()

# The nodes for input lies at the LHS side of the first block.
# The nodes for output lies at the RHS side of the last block.
input_nodes = blocks[0].srcdata[dgl.NID]
Expand All @@ -283,7 +282,7 @@ def run(args, dev_id, data):
acc = compute_acc(batch_pred, batch_labels)
print('Epoch {:05d} | Step {:05d} | Loss {:.4f} | Train Acc {:.4f} | Speed (samples/sec) {:.4f}'.format(
epoch, step, loss.item(), acc.item(), np.mean(iter_tput[3:])))

tic_step = time.time()
toc = time.time()
print('Epoch Time(s): {:.4f}'.format(toc - tic))
if epoch >= 5:
Expand Down
6 changes: 3 additions & 3 deletions examples/pytorch/graphsage/train_sampling.py
Original file line number Diff line number Diff line change
Expand Up @@ -155,9 +155,8 @@ def run(args, device, data):

# Loop over the dataloader to sample the computation dependency graph as a list of
# blocks.
tic_step = time.time()
for step, (input_nodes, seeds, blocks) in enumerate(dataloader):
tic_step = time.time()

# Load the input features as well as output labels
batch_inputs, batch_labels = load_subtensor(train_g, seeds, input_nodes, device)
blocks = [block.to(device) for block in blocks]
Expand All @@ -175,6 +174,7 @@ def run(args, device, data):
gpu_mem_alloc = th.cuda.max_memory_allocated() / 1000000 if th.cuda.is_available() else 0
print('Epoch {:05d} | Step {:05d} | Loss {:.4f} | Train Acc {:.4f} | Speed (samples/sec) {:.4f} | GPU {:.1f} MiB'.format(
epoch, step, loss.item(), acc.item(), np.mean(iter_tput[3:]), gpu_mem_alloc))
tic_step = time.time()

toc = time.time()
print('Epoch Time(s): {:.4f}'.format(toc - tic))
Expand Down Expand Up @@ -207,7 +207,7 @@ def run(args, device, data):
argparser.add_argument('--inductive', action='store_true',
help="Inductive learning setting")
args = argparser.parse_args()

if args.gpu >= 0:
device = th.device('cuda:%d' % args.gpu)
else:
Expand Down

0 comments on commit 1232961

Please sign in to comment.