@@ -29,24 +29,26 @@ def compute_accuracy(y_target, y_predict):
29
29
y = T .dvector ("y" )
30
30
31
31
# initialize the weights and biases
32
- w = theano .shared (rng .randn (feats ), name = "w" )
32
+ W = theano .shared (rng .randn (feats ), name = "w" )
33
33
b = theano .shared (0. , name = "b" )
34
34
35
35
36
36
# Construct Theano expression graph
37
- p_1 = T .nnet .sigmoid (T .dot (x , w ) + b ) # Logistic Probability that target = 1 (activation function)
37
+ p_1 = T .nnet .sigmoid (T .dot (x , W ) + b ) # Logistic Probability that target = 1 (activation function)
38
38
prediction = p_1 > 0.5 # The prediction thresholded
39
39
xent = - y * T .log (p_1 ) - (1 - y ) * T .log (1 - p_1 ) # Cross-entropy loss function
40
- cost = xent .mean () + 0.01 * (w ** 2 ).sum ()# The cost to minimize (l2 regularization)
41
- gw , gb = T .grad (cost , [w , b ]) # Compute the gradient of the cost
40
+ # or
41
+ # xent = T.nnet.binary_crossentropy(p_1, y) # this is provided by theano
42
+ cost = xent .mean () + 0.01 * (W ** 2 ).sum ()# The cost to minimize (l2 regularization)
43
+ gW , gb = T .grad (cost , [W , b ]) # Compute the gradient of the cost
42
44
43
45
44
46
# Compile
45
47
learning_rate = 0.1
46
48
train = theano .function (
47
49
inputs = [x , y ],
48
50
outputs = [prediction , xent .mean ()],
49
- updates = ((w , w - learning_rate * gw ), (b , b - learning_rate * gb )))
51
+ updates = ((W , W - learning_rate * gW ), (b , b - learning_rate * gb )))
50
52
predict = theano .function (inputs = [x ], outputs = prediction )
51
53
52
54
# Training
0 commit comments