-
Notifications
You must be signed in to change notification settings - Fork 6
/
pytorch_tensorboard.py
43 lines (38 loc) · 1.89 KB
/
pytorch_tensorboard.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
import torch
import torchvision.utils as vutils
import numpy as np
import torchvision.models as models
from torchvision import datasets
from tensorboardX import SummaryWriter
resnet18 = models.resnet18(False)
writer = SummaryWriter()
sample_rate = 44100
freqs = [262, 294, 330, 349, 392, 440, 440, 440, 440, 440, 440]
for n_iter in range(100):
s1 = torch.rand(1) # value to keep
s2 = torch.rand(1)
writer.add_scalar('data/scalar1', s1[0], n_iter) #data grouping by `slash`
writer.add_scalar('data/scalar2', s2[0], n_iter)
writer.add_scalars('data/scalar_group', {"xsinx":n_iter*np.sin(n_iter),
"xcosx":n_iter*np.cos(n_iter),
"arctanx": np.arctan(n_iter)}, n_iter)
x = torch.rand(32, 3, 64, 64) # output from network
if n_iter%10==0:
x = vutils.make_grid(x, normalize=True, scale_each=True)
writer.add_image('Image', x, n_iter)
x = torch.zeros(sample_rate*2)
for i in range(x.size(0)):
x[i] = np.cos(freqs[n_iter//10]*np.pi*float(i)/float(sample_rate)) # sound amplitude should in [-1, 1]
writer.add_audio('myAudio', x, n_iter) #, sample_rate=sample_rate)
writer.add_text('Text', 'text logged at step:'+str(n_iter), n_iter)
for name, param in resnet18.named_parameters():
writer.add_histogram(name, param.clone().cpu().data.numpy(), n_iter)
writer.add_pr_curve('xoxo', np.random.randint(2, size=100), np.random.rand(100), n_iter) #needs tensorboard 0.4RC or later
dataset = datasets.MNIST('data', train=False, download=True)
images = dataset.test_data[:100].float()
label = dataset.test_labels[:100]
features = images.view(100, 784)
writer.add_embedding(features, metadata=label, label_img=images.unsqueeze(1))
# export scalar data to JSON for external processing
writer.export_scalars_to_json("./all_scalars.json")
writer.close()