-
Notifications
You must be signed in to change notification settings - Fork 3
/
Copy pathevaluate.py
90 lines (72 loc) · 2.99 KB
/
evaluate.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
import torch.nn as nn
import random as random
from sklearn.metrics import f1_score
from models import LogReg
import numpy as np
np.random.seed(0)
import torch
torch.backends.cudnn.deterministic = True
torch.manual_seed(0)
torch.cuda.manual_seed_all(0)
random.seed(0)
def evaluate(embeds, idx_train, idx_val, idx_test, labels, task,epoch,lr ,isTest=True, iterater=10):
nb_classes = labels.shape[1]
train_embs = embeds[idx_train]
xent = nn.CrossEntropyLoss()
val_embs = embeds[idx_val]
test_embs = embeds[idx_test]
train_lbls = torch.argmax(labels[idx_train], dim=1)
val_lbls = torch.argmax(labels[idx_val], dim=1)
test_lbls = torch.argmax(labels[idx_test], dim=1)
accs = []
micro_f1s = []
macro_f1s = []
macro_f1s_val = []
for _ in range(iterater):
log = LogReg(train_embs.shape[1], nb_classes)
opt = torch.optim.Adam(log.parameters(), lr=lr)
log.to(train_lbls.device)
val_accs = []; test_accs = []
val_micro_f1s = []; test_micro_f1s = []
val_macro_f1s = []; test_macro_f1s = []
for iter_ in range(epoch):
# train
log.train()
opt.zero_grad()
logits = log(train_embs)
loss = xent(logits, train_lbls)
loss.backward()
opt.step()
# val
logits = log(val_embs)
preds = torch.argmax(logits, dim=1)
val_acc = torch.sum(preds == val_lbls).float() / val_lbls.shape[0]
val_f1_macro = f1_score(val_lbls.cpu(), preds.cpu(), average='macro')
val_f1_micro = f1_score(val_lbls.cpu(), preds.cpu(), average='micro')
val_accs.append(val_acc.item())
val_macro_f1s.append(val_f1_macro)
val_micro_f1s.append(val_f1_micro)
# test
logits = log(test_embs)
preds = torch.argmax(logits, dim=1)
test_acc = torch.sum(preds == test_lbls).float() / test_lbls.shape[0]
test_f1_macro = f1_score(test_lbls.cpu(), preds.cpu(), average='macro')
test_f1_micro = f1_score(test_lbls.cpu(), preds.cpu(), average='micro')
test_accs.append(test_acc.item())
test_macro_f1s.append(test_f1_macro)
test_micro_f1s.append(test_f1_micro)
max_iter = val_accs.index(max(val_accs))
accs.append(test_accs[max_iter])
max_iter = val_macro_f1s.index(max(val_macro_f1s))
macro_f1s.append(test_macro_f1s[max_iter])
macro_f1s_val.append(val_macro_f1s[max_iter])
max_iter = val_micro_f1s.index(max(val_micro_f1s))
micro_f1s.append(test_micro_f1s[max_iter])
if task == 'Node':
if isTest:
print("\t[Classification] Macro-F1: {:.4f} ({:.4f}) | Micro-F1: {:.4f} ({:.4f})".format(
np.mean(macro_f1s), np.std(macro_f1s), np.mean(micro_f1s), np.std(micro_f1s)))
# print(macro_f1s,micro_f1s)
else:
return np.mean(macro_f1s_val), np.mean(macro_f1s)
return macro_f1s, micro_f1s