-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathindex.py
79 lines (64 loc) · 2.36 KB
/
index.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
import pandas
import tensorflow as tf
import tensorflow.feature_column as fc
import os
import sys
import functools
from constants import used_columns
from persistence import serving_input_receiver_fn
from display import pprint_result, peek_classification
from data import input_fn, split_data_frame, prepare_data
import plots as pl
# tf.enable_eager_execution()
# readings_file = 'tram_sensor_readings.json'
readings_file = 'new-sensor-readings.json'
data_frame = pandas.read_json(readings_file)
data_frame = prepare_data(data_frame)
train_df, test_df = split_data_frame(data_frame, train_set_fraction=0.7)
ax = fc.numeric_column('ax')
ay = fc.numeric_column('ay')
az = fc.numeric_column('az')
gx = fc.numeric_column('gx')
gy = fc.numeric_column('gy')
gz = fc.numeric_column('gz')
big_hidden_units=[1024,512, 256]
ok_hidden_units=[15,100]
mid_hidden_units=[20, 40, 20]
small_hidden_units=[20, 1]
EPOCHS_NUM = 40
# EPOCHS_NUM = 1000
ADAM_LEARNING_RATE = 0.005
proxAdagrad_LEARNING_RATE = 0.15
DECAY_STEPS = 100
adam_optimizer = lambda: tf.train.AdamOptimizer(
learning_rate=tf.train.exponential_decay(
learning_rate=ADAM_LEARNING_RATE,
global_step=tf.train.get_global_step(),
decay_steps=DECAY_STEPS,
decay_rate=0.96))
adagrad_optimizer = tf.train.ProximalAdagradOptimizer(
learning_rate=proxAdagrad_LEARNING_RATE,
l1_regularization_strength=0.001)
tested_nums = []
accuracies=[]
precisions=[]
recalls=[]
for learning_rate in [x * 0.01 for x in [1]]:
print(f"testing: {learning_rate}")
test_hidden_units = [15, 100]
estimator = tf.estimator.DNNClassifier(
feature_columns=[ax, ay, az, gx, gy, gz],
hidden_units=test_hidden_units,
optimizer=adam_optimizer
)
estimator.train(input_fn=lambda:input_fn(train_df, num_epochs=EPOCHS_NUM, shuffle=False, batch_size=64), steps=None)
result = estimator.evaluate(lambda:input_fn(test_df, num_epochs=1, shuffle=False, batch_size=64))
tested_nums.append(learning_rate)
accuracies.append(result["accuracy"])
precisions.append(result["precision"])
recalls.append(result["recall"])
print(f"A: {result['accuracy']}, B: {result['precision']}")
estimator.export_savedmodel(export_dir_base='adam-dnn', serving_input_receiver_fn=serving_input_receiver_fn)
pl.show_multiple_series([accuracies, precisions, recalls])
# pprint_result(result)
# peek_classification(estimator, test_df)