@@ -146,7 +146,7 @@ above.
146
146
positive_graph = positive_graph.to(torch.device(' cuda' ))
147
147
negative_graph = negative_graph.to(torch.device(' cuda' ))
148
148
input_features = blocks[0 ].srcdata[' features' ]
149
- pos_score, neg_score = model(positive_graph, blocks, input_features)
149
+ pos_score, neg_score = model(positive_graph, negative_graph, blocks, input_features)
150
150
loss = compute_loss(pos_score, neg_score)
151
151
opt.zero_grad()
152
152
loss.backward()
@@ -166,7 +166,7 @@ classification/regression.
166
166
.. code :: python
167
167
168
168
class StochasticTwoLayerRGCN (nn .Module ):
169
- def __init__ (self , in_feat , hidden_feat , out_feat ):
169
+ def __init__ (self , in_feat , hidden_feat , out_feat , rel_names ):
170
170
super ().__init__ ()
171
171
self .conv1 = dglnn.HeteroGraphConv({
172
172
rel : dglnn.GraphConv(in_feat, hidden_feat, norm = ' right' )
@@ -197,6 +197,20 @@ over the edge types for :meth:`dgl.DGLHeteroGraph.apply_edges`.
197
197
dgl.function.u_dot_v(' x' , ' x' , ' score' ), etype = etype)
198
198
return edge_subgraph.edata[' score' ]
199
199
200
+ class Model (nn .Module ):
201
+ def __init__ (self , in_features , hidden_features , out_features , num_classes ,
202
+ etypes ):
203
+ super ().__init__ ()
204
+ self .rgcn = StochasticTwoLayerRGCN(
205
+ in_features, hidden_features, out_features, etypes)
206
+ self .pred = ScorePredictor()
207
+
208
+ def forward (self , positive_graph , negative_graph , blocks , x ):
209
+ x = self .rgcn(blocks, x)
210
+ pos_score = self .pred(positive_graph, x)
211
+ neg_score = self .pred(negative_graph, x)
212
+ return pos_score, neg_score
213
+
200
214
Data loader definition is also very similar to that of edge
201
215
classification/regression. The only difference is that you need to give
202
216
the negative sampler and you will be supplying a dictionary of edge
@@ -252,7 +266,7 @@ dictionaries of node types and predictions here.
252
266
253
267
.. code :: python
254
268
255
- model = Model(in_features, hidden_features, out_features, num_classes)
269
+ model = Model(in_features, hidden_features, out_features, num_classes, etypes )
256
270
model = model.cuda()
257
271
opt = torch.optim.Adam(model.parameters())
258
272
@@ -261,9 +275,8 @@ dictionaries of node types and predictions here.
261
275
positive_graph = positive_graph.to(torch.device(' cuda' ))
262
276
negative_graph = negative_graph.to(torch.device(' cuda' ))
263
277
input_features = blocks[0 ].srcdata[' features' ]
264
- edge_labels = edge_subgraph.edata[' labels' ]
265
- edge_predictions = model(edge_subgraph, blocks, input_features)
266
- loss = compute_loss(edge_labels, edge_predictions)
278
+ pos_score, neg_score = model(positive_graph, negative_graph, blocks, input_features)
279
+ loss = compute_loss(pos_score, neg_score)
267
280
opt.zero_grad()
268
281
loss.backward()
269
282
opt.step()
0 commit comments