forked from carefree0910/MachineLearning
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Optimizers.py
80 lines (55 loc) · 2.16 KB
/
Optimizers.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
import tensorflow as tf
class Optimizer:
def __init__(self, lr=1e-3):
self._lr = lr
self._opt = None
@property
def name(self):
return str(self)
def minimize(self, x, *args, **kwargs):
return self._opt.minimize(x, *args, **kwargs)
def __str__(self):
return self.__class__.__name__
def __repr__(self):
return str(self)
class MBGD(Optimizer):
def __init__(self, lr=1e-3):
Optimizer.__init__(self, lr)
self._opt = tf.train.GradientDescentOptimizer(self._lr)
class Momentum(Optimizer):
def __init__(self, lr=1e-3, momentum=0.8):
Optimizer.__init__(self, lr)
self._opt = tf.train.MomentumOptimizer(self._lr, momentum)
class NAG(Optimizer):
def __init__(self, lr=1e-3, momentum=0.8):
Optimizer.__init__(self, lr)
self._opt = tf.train.MomentumOptimizer(self._lr, momentum, use_nesterov=True)
class AdaDelta(Optimizer):
def __init__(self, lr=1e-3, rho=0.95, eps=1e-8):
Optimizer.__init__(self, lr)
self._opt = tf.train.AdadeltaOptimizer(self._lr, rho, eps)
class AdaGrad(Optimizer):
def __init__(self, lr=1e-3, init=0.1):
Optimizer.__init__(self, lr)
self._opt = tf.train.AdagradOptimizer(self._lr, init)
class Adam(Optimizer):
def __init__(self, lr=1e-3, beta1=0.9, beta2=0.999, eps=1e-8):
Optimizer.__init__(self, lr)
self._opt = tf.train.AdamOptimizer(self._lr, beta1, beta2, eps)
class RMSProp(Optimizer):
def __init__(self, lr=1e-3, decay=0.9, momentum=0.0, eps=1e-10):
Optimizer.__init__(self, lr)
self._opt = tf.train.RMSPropOptimizer(self._lr, decay, momentum, eps)
# Factory
class OptFactory:
available_optimizers = {
"MBGD": MBGD, "Momentum": Momentum, "NAG": NAG,
"AdaDelta": AdaDelta, "AdaGrad": AdaGrad,
"Adam": Adam, "RMSProp": RMSProp
}
def get_optimizer_by_name(self, name, lr, *args, **kwargs):
try:
_optimizer = self.available_optimizers[name](lr, *args, **kwargs)
return _optimizer
except KeyError:
raise NotImplementedError("Undefined Optimizer '{}' found".format(name))