-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathalgo.py
More file actions
120 lines (87 loc) · 2.58 KB
/
algo.py
File metadata and controls
120 lines (87 loc) · 2.58 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
import numpy
import warnings
def hessian_update_sr1(x, g, x0, g0, h0):
dx = numpy.subtract(x, x0)
dg = numpy.subtract(g, g0)
h0dx = numpy.dot(h0, dx)
ddg = dg - h0dx
# avoid division by zero
if numpy.linalg.norm(dx) < numpy.finfo(float).eps:
return h0
else:
h = h0 + numpy.outer(ddg, ddg) / numpy.dot(ddg, dx)
return h
def enforce_max_step_size(dx, smax):
s = numpy.linalg.norm(dx)
return dx if s < smax else dx * smax / s
def optimize_quasi_newton(x0, func, grad, smax=0.3, gtol=1e-5, maxiter=50):
dim = numpy.size(x0)
x0 = numpy.array(x0)
g0 = grad(x0)
h0 = numpy.linalg.norm(g0) / smax * numpy.eye(dim)
converged = False
traj = [x0]
for iteration in range(maxiter):
dx = - numpy.dot(numpy.linalg.pinv(h0), g0)
dx = enforce_max_step_size(dx, smax)
x = x0 + dx
traj.append(x)
g = grad(x)
gmax = numpy.amax(numpy.abs(g))
converged = gmax < gtol
if converged:
break
else:
h = hessian_update_sr1(x=x, g=g, x0=x0, g0=g0, h0=h0)
x0 = x
g0 = g
h0 = h
if not converged:
warnings.warn("Did not converge!")
return x, traj
def optimize_newton_raphson(x0, func, grad, hess, smax=0.3, gtol=1e-5,
maxiter=50):
x0 = numpy.array(x0)
converged = False
traj = [x0]
for iteration in range(maxiter):
g0 = grad(x0)
h0 = hess(x0)
dx = - numpy.dot(numpy.linalg.pinv(h0), g0)
dx = enforce_max_step_size(dx, smax)
x = x0 + dx
traj.append(x)
g = grad(x)
gmax = numpy.amax(numpy.abs(g))
converged = gmax < gtol
if converged:
break
else:
x0 = x
if not converged:
warnings.warn("Did not converge!")
return x, traj
def optimize_gradient_descent(x0, func, grad, smax=0.3, gtol=1e-5, maxiter=50):
x0 = numpy.array(x0)
g0 = grad(x0)
s = smax
converged = False
traj = [x0]
for iteration in range(maxiter):
dx = - s * g0 / numpy.linalg.norm(g0)
dx = enforce_max_step_size(dx, smax)
x = x0 + dx
traj.append(x)
g = grad(x)
gmax = numpy.amax(numpy.abs(g))
converged = gmax < gtol
if converged:
break
else:
dg = g - g0
s = numpy.vdot(dx, dg) * numpy.linalg.norm(g0) / numpy.vdot(dg, dg)
x0 = x
g0 = g
if not converged:
warnings.warn("Did not converge!")
return x, traj