Skip to content

Commit

Permalink
[bugfix] Fix the memory leak issue of Cluster GAT under 0.5 kernel an…
Browse files Browse the repository at this point in the history
…d simplify the bipartite GAT. (dmlc#1908)

* uipd

* upd

* upd

* upd

* upd
  • Loading branch information
yzh119 authored Aug 1, 2020
1 parent 303e423 commit 34a067e
Show file tree
Hide file tree
Showing 5 changed files with 11 additions and 28 deletions.
2 changes: 0 additions & 2 deletions examples/pytorch/ogb/cluster-gat/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,8 +95,6 @@ def inference(self, g, x, batch_size, device):
drop_last=False,
num_workers=args.num_workers)

layer.fc_src = layer.fc
layer.fc_dst = layer.fc
for input_nodes, output_nodes, blocks in tqdm.tqdm(dataloader):
block = blocks[0].to(device)
h = x[input_nodes].to(device)
Expand Down
3 changes: 0 additions & 3 deletions python/dgl/heterograph_index.py
Original file line number Diff line number Diff line change
Expand Up @@ -522,7 +522,6 @@ def out_edges(self, etype, v):
eid = F.from_dgl_nd(edge_array(2))
return src, dst, eid

@utils.cached_member(cache='_cache', prefix='edges')
def edges(self, etype, order=None):
"""Return all the edges
Expand Down Expand Up @@ -821,7 +820,6 @@ def edge_subgraph(self, induced_edges, preserve_nodes):
eids = [F.to_dgl_nd(edges) for edges in induced_edges]
return _CAPI_DGLHeteroEdgeSubgraph(self, eids, preserve_nodes)

@utils.cached_member(cache='_cache', prefix='unitgraph')
def get_unitgraph(self, etype, ctx):
"""Create a unitgraph graph from given edge type and copy to the given device
context.
Expand Down Expand Up @@ -912,7 +910,6 @@ def create_format_(self):
"""Create all sparse matrices allowed for the graph."""
return _CAPI_DGLHeteroCreateFormat(self)

@utils.cached_member(cache='_cache', prefix='reverse')
def reverse(self):
"""Reverse the heterogeneous graph adjacency
Expand Down
27 changes: 6 additions & 21 deletions python/dgl/nn/pytorch/conv/gatconv.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,13 +26,8 @@ class GATConv(nn.Module):
Parameters
----------
in_feats : int, or pair of ints
in_feats : int
Input feature size.
If the layer is to be applied to a unidirectional bipartite graph, ``in_feats``
specifies the input feature size on both the source and destination nodes. If
a scalar is given, the source and destination node feature size would take the
same value.
out_feats : int
Output feature size.
num_heads : int
Expand Down Expand Up @@ -62,14 +57,8 @@ def __init__(self,
self._num_heads = num_heads
self._in_src_feats, self._in_dst_feats = expand_as_pair(in_feats)
self._out_feats = out_feats
if isinstance(in_feats, tuple):
self.fc_src = nn.Linear(
self._in_src_feats, out_feats * num_heads, bias=False)
self.fc_dst = nn.Linear(
self._in_dst_feats, out_feats * num_heads, bias=False)
else:
self.fc = nn.Linear(
self._in_src_feats, out_feats * num_heads, bias=False)
self.fc = nn.Linear(
self._in_src_feats, out_feats * num_heads, bias=False)
self.attn_l = nn.Parameter(th.FloatTensor(size=(1, num_heads, out_feats)))
self.attn_r = nn.Parameter(th.FloatTensor(size=(1, num_heads, out_feats)))
self.feat_drop = nn.Dropout(feat_drop)
Expand All @@ -89,11 +78,7 @@ def __init__(self,
def reset_parameters(self):
"""Reinitialize learnable parameters."""
gain = nn.init.calculate_gain('relu')
if hasattr(self, 'fc'):
nn.init.xavier_normal_(self.fc.weight, gain=gain)
else: # bipartite graph neural networks
nn.init.xavier_normal_(self.fc_src.weight, gain=gain)
nn.init.xavier_normal_(self.fc_dst.weight, gain=gain)
nn.init.xavier_normal_(self.fc.weight, gain=gain)
nn.init.xavier_normal_(self.attn_l, gain=gain)
nn.init.xavier_normal_(self.attn_r, gain=gain)
if isinstance(self.res_fc, nn.Linear):
Expand Down Expand Up @@ -122,8 +107,8 @@ def forward(self, graph, feat):
if isinstance(feat, tuple):
h_src = self.feat_drop(feat[0])
h_dst = self.feat_drop(feat[1])
feat_src = self.fc_src(h_src).view(-1, self._num_heads, self._out_feats)
feat_dst = self.fc_dst(h_dst).view(-1, self._num_heads, self._out_feats)
feat_src = self.fc(h_src).view(-1, self._num_heads, self._out_feats)
feat_dst = self.fc(h_dst).view(-1, self._num_heads, self._out_feats)
else:
h_src = h_dst = self.feat_drop(feat)
feat_src = feat_dst = self.fc(h_src).view(
Expand Down
3 changes: 3 additions & 0 deletions src/array/cuda/csr_sort.cu
Original file line number Diff line number Diff line change
Expand Up @@ -140,6 +140,9 @@ void CSRSort_<kDLGPU, int64_t>(CSRMatrix* csr) {
csr->sorted = true;
csr->indices = new_indices;
csr->data = new_data;

// free resources
device->FreeWorkspace(ctx, workspace);
}

template void CSRSort_<kDLGPU, int32_t>(CSRMatrix* csr);
Expand Down
4 changes: 2 additions & 2 deletions tests/pytorch/test_nn.py
Original file line number Diff line number Diff line change
Expand Up @@ -497,8 +497,8 @@ def test_gat_conv(g, idtype):
def test_gat_conv_bi(g, idtype):
g = g.astype(idtype).to(F.ctx())
ctx = F.ctx()
gat = nn.GATConv((5, 10), 2, 4)
feat = (F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 10)))
gat = nn.GATConv(5, 2, 4)
feat = (F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5)))
gat = gat.to(ctx)
h = gat(g, feat)
assert h.shape == (g.number_of_dst_nodes(), 4, 2)
Expand Down

0 comments on commit 34a067e

Please sign in to comment.