forked from deependersingla/deep_trader
-
Notifications
You must be signed in to change notification settings - Fork 0
/
net.py
60 lines (48 loc) · 1.62 KB
/
net.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
import chainer
import chainer.functions as F
import chainer.links as L
class MnistMLP(chainer.Chain):
"""A network for multi-layer perceptron.
"""
def __init__(self, n_in, n_units, n_out):
super(MnistMLP, self).__init__(
l1=L.Linear(n_in, n_units),
l2=L.Linear(n_units, n_units),
l3=L.Linear(n_units,200),
l4=L.Linear(200,100),
l5=L.Linear(100,50),
l6=L.Linear(50, 10),
l7=L.Linear(10, n_out),
)
def __call__(self, x):
h1 = F.relu(self.l1(x))
h2 = F.relu(self.l2(h1))
h3 = F.relu(self.l3(h2))
h4 = F.relu(self.l4(h3))
h5 = F.relu(self.l5(h4))
h6 = F.relu(self.l6(h5))
return self.l7(h6)
class MnistMLPParallel(chainer.Chain):
"""An example of model-parallel MLP.
This chain combines four small MLPs on two different devices.
"""
def __init__(self, n_in, n_units, n_out):
super(MnistMLPParallel, self).__init__(
first0=MnistMLP(n_in, n_units // 2, n_units).to_gpu(0),
first1=MnistMLP(n_in, n_units // 2, n_units).to_gpu(1),
second0=MnistMLP(n_units, n_units // 2, n_out).to_gpu(0),
second1=MnistMLP(n_units, n_units // 2, n_out).to_gpu(1),
)
def __call__(self, x):
# assume x is on GPU 0
x1 = F.copy(x, 1)
z0 = self.first0(x)
z1 = self.first1(x1)
# sync
h0 = z0 + F.copy(z1, 0)
h1 = z1 + F.copy(z0, 1)
y0 = self.second0(F.relu(h0))
y1 = self.second1(F.relu(h1))
# sync
y = y0 + F.copy(y1, 0)
return y