14
14
15
15
Date: 2017.9.20
16
16
- - - - - -- - - - - - - - - - - - - - - - - - - - - - -
17
- '''
17
+ '''
18
18
from __future__ import print_function
19
19
20
+ import pickle
20
21
import numpy as np
21
22
import matplotlib .pyplot as plt
22
23
23
24
class CNN ():
24
25
25
- def __init__ (self ,conv1_get ,size_p1 ,bp_num1 ,bp_num2 ,bp_num3 ,rate_w = 0.2 ,rate_t = 0.2 ):
26
+ def __init__ (self , conv1_get , size_p1 , bp_num1 , bp_num2 , bp_num3 , rate_w = 0.2 , rate_t = 0.2 ):
26
27
'''
27
28
:param conv1_get: [a,c,d],size, number, step of convolution kernel
28
29
:param size_p1: pooling size
@@ -48,32 +49,30 @@ def __init__(self,conv1_get,size_p1,bp_num1,bp_num2,bp_num3,rate_w=0.2,rate_t=0.
48
49
self .thre_bp3 = - 2 * np .random .rand (self .num_bp3 )+ 1
49
50
50
51
51
- def save_model (self ,save_path ):
52
+ def save_model (self , save_path ):
52
53
#save model dict with pickle
53
- import pickle
54
54
model_dic = {'num_bp1' :self .num_bp1 ,
55
- 'num_bp2' :self .num_bp2 ,
56
- 'num_bp3' :self .num_bp3 ,
57
- 'conv1' :self .conv1 ,
58
- 'step_conv1' :self .step_conv1 ,
59
- 'size_pooling1' :self .size_pooling1 ,
60
- 'rate_weight' :self .rate_weight ,
61
- 'rate_thre' :self .rate_thre ,
62
- 'w_conv1' :self .w_conv1 ,
63
- 'wkj' :self .wkj ,
64
- 'vji' :self .vji ,
65
- 'thre_conv1' :self .thre_conv1 ,
66
- 'thre_bp2' :self .thre_bp2 ,
67
- 'thre_bp3' :self .thre_bp3 }
55
+ 'num_bp2' :self .num_bp2 ,
56
+ 'num_bp3' :self .num_bp3 ,
57
+ 'conv1' :self .conv1 ,
58
+ 'step_conv1' :self .step_conv1 ,
59
+ 'size_pooling1' :self .size_pooling1 ,
60
+ 'rate_weight' :self .rate_weight ,
61
+ 'rate_thre' :self .rate_thre ,
62
+ 'w_conv1' :self .w_conv1 ,
63
+ 'wkj' :self .wkj ,
64
+ 'vji' :self .vji ,
65
+ 'thre_conv1' :self .thre_conv1 ,
66
+ 'thre_bp2' :self .thre_bp2 ,
67
+ 'thre_bp3' :self .thre_bp3 }
68
68
with open (save_path , 'wb' ) as f :
69
69
pickle .dump (model_dic , f )
70
70
71
71
print ('Model saved: %s' % save_path )
72
72
73
73
@classmethod
74
- def ReadModel (cls ,model_path ):
74
+ def ReadModel (cls , model_path ):
75
75
#read saved model
76
- import pickle
77
76
with open (model_path , 'rb' ) as f :
78
77
model_dic = pickle .load (f )
79
78
@@ -97,13 +96,13 @@ def ReadModel(cls,model_path):
97
96
return conv_ins
98
97
99
98
100
- def sig (self ,x ):
99
+ def sig (self , x ):
101
100
return 1 / (1 + np .exp (- 1 * x ))
102
101
103
- def do_round (self ,x ):
102
+ def do_round (self , x ):
104
103
return round (x , 3 )
105
104
106
- def convolute (self ,data ,convs ,w_convs ,thre_convs ,conv_step ):
105
+ def convolute (self , data , convs , w_convs , thre_convs , conv_step ):
107
106
#convolution process
108
107
size_conv = convs [0 ]
109
108
num_conv = convs [1 ]
@@ -132,7 +131,7 @@ def convolute(self,data,convs,w_convs,thre_convs,conv_step):
132
131
focus_list = np .asarray (focus1_list )
133
132
return focus_list ,data_featuremap
134
133
135
- def pooling (self ,featuremaps ,size_pooling ,type = 'average_pool' ):
134
+ def pooling (self , featuremaps , size_pooling , type = 'average_pool' ):
136
135
#pooling process
137
136
size_map = len (featuremaps [0 ])
138
137
size_pooled = int (size_map / size_pooling )
@@ -153,7 +152,7 @@ def pooling(self,featuremaps,size_pooling,type='average_pool'):
153
152
featuremap_pooled .append (map_pooled )
154
153
return featuremap_pooled
155
154
156
- def _expand (self ,datas ):
155
+ def _expand (self , datas ):
157
156
#expanding three dimension data to one dimension list
158
157
data_expanded = []
159
158
for i in range (len (datas )):
@@ -164,14 +163,14 @@ def _expand(self,datas):
164
163
data_expanded = np .asarray (data_expanded )
165
164
return data_expanded
166
165
167
- def _expand_mat (self ,data_mat ):
166
+ def _expand_mat (self , data_mat ):
168
167
#expanding matrix to one dimension list
169
168
data_mat = np .asarray (data_mat )
170
169
shapes = np .shape (data_mat )
171
170
data_expanded = data_mat .reshape (1 ,shapes [0 ]* shapes [1 ])
172
171
return data_expanded
173
172
174
- def _calculate_gradient_from_pool (self ,out_map ,pd_pool ,num_map ,size_map ,size_pooling ):
173
+ def _calculate_gradient_from_pool (self , out_map , pd_pool ,num_map , size_map , size_pooling ):
175
174
'''
176
175
calcluate the gradient from the data slice of pool layer
177
176
pd_pool: list of matrix
@@ -190,7 +189,7 @@ def _calculate_gradient_from_pool(self,out_map,pd_pool,num_map,size_map,size_poo
190
189
pd_all .append (pd_conv2 )
191
190
return pd_all
192
191
193
- def trian (self ,patterns ,datas_train , datas_teach , n_repeat , error_accuracy ,draw_e = bool ):
192
+ def train (self , patterns , datas_train , datas_teach , n_repeat , error_accuracy , draw_e = bool ):
194
193
#model traning
195
194
print ('----------------------Start Training-------------------------' )
196
195
print ((' - - Shape: Train_Data ' ,np .shape (datas_train )))
@@ -206,7 +205,7 @@ def trian(self,patterns,datas_train, datas_teach, n_repeat, error_accuracy,draw_
206
205
data_train = np .asmatrix (datas_train [p ])
207
206
data_teach = np .asarray (datas_teach [p ])
208
207
data_focus1 ,data_conved1 = self .convolute (data_train ,self .conv1 ,self .w_conv1 ,
209
- self .thre_conv1 ,conv_step = self .step_conv1 )
208
+ self .thre_conv1 ,conv_step = self .step_conv1 )
210
209
data_pooled1 = self .pooling (data_conved1 ,self .size_pooling1 )
211
210
shape_featuremap1 = np .shape (data_conved1 )
212
211
'''
@@ -231,7 +230,7 @@ def trian(self,patterns,datas_train, datas_teach, n_repeat, error_accuracy,draw_
231
230
pd_conv1_pooled = pd_i_all / (self .size_pooling1 * self .size_pooling1 )
232
231
pd_conv1_pooled = pd_conv1_pooled .T .getA ().tolist ()
233
232
pd_conv1_all = self ._calculate_gradient_from_pool (data_conved1 ,pd_conv1_pooled ,shape_featuremap1 [0 ],
234
- shape_featuremap1 [1 ],self .size_pooling1 )
233
+ shape_featuremap1 [1 ],self .size_pooling1 )
235
234
#weight and threshold learning process---------
236
235
#convolution layer
237
236
for k_conv in range (self .conv1 [1 ]):
@@ -268,15 +267,15 @@ def draw_error():
268
267
draw_error ()
269
268
return mse
270
269
271
- def predict (self ,datas_test ):
270
+ def predict (self , datas_test ):
272
271
#model predict
273
272
produce_out = []
274
273
print ('-------------------Start Testing-------------------------' )
275
274
print ((' - - Shape: Test_Data ' ,np .shape (datas_test )))
276
275
for p in range (len (datas_test )):
277
276
data_test = np .asmatrix (datas_test [p ])
278
277
data_focus1 , data_conved1 = self .convolute (data_test , self .conv1 , self .w_conv1 ,
279
- self .thre_conv1 , conv_step = self .step_conv1 )
278
+ self .thre_conv1 , conv_step = self .step_conv1 )
280
279
data_pooled1 = self .pooling (data_conved1 , self .size_pooling1 )
281
280
data_bp_input = self ._expand (data_pooled1 )
282
281
@@ -289,11 +288,11 @@ def predict(self,datas_test):
289
288
res = [list (map (self .do_round ,each )) for each in produce_out ]
290
289
return np .asarray (res )
291
290
292
- def convolution (self ,data ):
291
+ def convolution (self , data ):
293
292
#return the data of image after convoluting process so we can check it out
294
293
data_test = np .asmatrix (data )
295
294
data_focus1 , data_conved1 = self .convolute (data_test , self .conv1 , self .w_conv1 ,
296
- self .thre_conv1 , conv_step = self .step_conv1 )
295
+ self .thre_conv1 , conv_step = self .step_conv1 )
297
296
data_pooled1 = self .pooling (data_conved1 , self .size_pooling1 )
298
297
299
298
return data_conved1 ,data_pooled1
@@ -303,4 +302,4 @@ def convolution(self,data):
303
302
pass
304
303
'''
305
304
I will put the example on other file
306
- '''
305
+ '''
0 commit comments