-
Notifications
You must be signed in to change notification settings - Fork 1
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Display method and some the implementations
- Loading branch information
Showing
24 changed files
with
1,026 additions
and
190 deletions.
There are no files selected for viewing
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
Oops, something went wrong.
Large diffs are not rendered by default.
Oops, something went wrong.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,27 @@ | ||
|
||
import numpy as np | ||
import matplotlib.pyplot as plt | ||
from pysurrogate.optimize import fit, predict | ||
|
||
|
||
if __name__ == '__main__': | ||
|
||
# number of samples we will use for this example | ||
n_samples = 100 | ||
|
||
# --------------------------------------------------------- | ||
# Example 1: One input variable and one target | ||
# --------------------------------------------------------- | ||
|
||
X = np.random.rand(n_samples, 20) * 4 * np.pi | ||
Y = np.cos(X) | ||
|
||
# fit the model and predict the data | ||
model = fit(X, Y, methods='sklearn_dacefit', disp=True, debug=True) | ||
|
||
_X = np.linspace(0, 4 * np.pi, 1000) | ||
_Y = predict(model, _X) | ||
|
||
plt.scatter(X, Y, label="Observations") | ||
plt.plot(_X, _Y, label="True") | ||
plt.show() |
Empty file.
Empty file.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,16 @@ | ||
|
||
import numpy as np | ||
|
||
# function to calculate the correlation matrix all in one | ||
def calc_kernel_matrix(A, B, func, theta): | ||
D = np.repeat(A, B.shape[0], axis=0) - np.tile(B, (A.shape[0], 1)) | ||
K = func(D, theta) | ||
return np.reshape(K, (A.shape[0], B.shape[0])) | ||
|
||
|
||
# ------------------------------- | ||
# Correlation Functions | ||
# ------------------------------- | ||
|
||
def corr_gauss(D, theta): | ||
return np.exp(np.sum(np.square(D) * -theta, axis=1)) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,45 @@ | ||
import warnings | ||
|
||
import numpy as np | ||
from numpy.linalg import LinAlgError | ||
|
||
from pysurrogate.impl.my_dacefit.corr import corr_gauss, calc_kernel_matrix | ||
from pysurrogate.impl.my_dacefit.regr import regr_constant | ||
|
||
|
||
def fit(X, Y, theta, regr=regr_constant, kernel=corr_gauss): | ||
# attributes used for convenience | ||
n_sample, n_var, n_target = X.shape[0], X.shape[1], Y.shape[1] | ||
|
||
# calculate the kernel matrix R | ||
R = calc_kernel_matrix(X, X, kernel, theta) | ||
R += np.eye(n_sample) * (10 + n_sample) * 2.220446049250313e-16 | ||
|
||
# do the cholesky decomposition | ||
try: | ||
C = np.linalg.cholesky(R) | ||
except LinAlgError: | ||
warnings.warn("Error while doing Cholesky Decomposition.") | ||
return {'obj': np.inf} | ||
|
||
# fit the least squares for regression | ||
F = regr(X) | ||
Ft = np.linalg.lstsq(C, F, rcond=None)[0] | ||
Q, G = np.linalg.qr(Ft) | ||
rcond = 1.0 / np.linalg.cond(G) | ||
if rcond > 1e15: | ||
raise Exception('F is too ill conditioned: Poor combination of regression model and design sites') | ||
Yt = np.linalg.solve(C, Y) | ||
beta = np.linalg.lstsq(G, Q.T @ Yt, rcond=None)[0] | ||
|
||
# calculate the residual to fit with gaussian process and calculate objective function | ||
rho = Yt - Ft @ beta | ||
sigma2 = np.sum(np.square(rho), axis=0) / n_sample | ||
detR = np.prod(np.power(np.diag(C), (2 / n_sample))) | ||
obj = np.sum(sigma2) * detR | ||
|
||
# finally gamma to predict values | ||
gamma = np.linalg.solve(C.T, rho) | ||
|
||
return {'R': R, 'C': C, 'F': F, 'Ft': Ft, 'Q': Q, 'G': G, 'Yt': Yt, 'beta': beta, 'rho': rho, | ||
'_sigma2': sigma2, 'obj': obj, 'gamma': gamma} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,4 @@ | ||
import numpy as np | ||
|
||
def regr_constant(X): | ||
return np.ones((X.shape[0], 1)) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Empty file.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,74 @@ | ||
import george | ||
import numpy as np | ||
import scipy.optimize as op | ||
|
||
from pysurrogate.surrogate import Surrogate | ||
|
||
|
||
class GPGeorge(Surrogate): | ||
def __init__(self, kernel): | ||
Surrogate.__init__(self) | ||
self.kernel = kernel | ||
self.model = None | ||
self.F = None | ||
|
||
def _predict(self, X): | ||
return self.model.predict(self.F, X, return_var=True) | ||
|
||
def _fit(self, X, F): | ||
|
||
self.F = F | ||
n_var = X.shape[1] | ||
|
||
if self.kernel == "linear": | ||
kernel = george.kernels.LinearKernel(order=2, log_gamma2=0.2, ndim=n_var) | ||
elif self.kernel == "expsqrt": | ||
kernel = george.kernels.ExpSquaredKernel(metric=np.ones(n_var), ndim=n_var) | ||
elif self.kernel == "rational_quad": | ||
kernel = george.kernels.RationalQuadraticKernel(log_alpha=0.2, metric=np.ones(n_var), ndim=n_var) | ||
elif self.kernel == "exp": | ||
kernel = george.kernels.ExpKernel(metric=np.ones(n_var), ndim=n_var) | ||
elif self.kernel == "polynomial": | ||
kernel = george.kernels.PolynomialKernel(metric=np.ones(n_var)) | ||
else: | ||
raise ValueError("Parameter %s for kernel unknown." % self.kernel) | ||
|
||
gp = george.GP(kernel, fit_mean=True) | ||
|
||
t = 0.1 * np.ones((n_var, 1)) | ||
|
||
# Define the objective function (negative log-likelihood in this case). | ||
def nll(p): | ||
gp.set_parameter_vector(p) | ||
ll = gp.log_likelihood(y, quiet=True) | ||
return -ll if np.isfinite(ll) else 1e25 | ||
|
||
# And the gradient of the objective function. | ||
def grad_nll(p): | ||
gp.set_parameter_vector(p) | ||
return -gp.grad_log_likelihood(y, quiet=True) | ||
|
||
# You need to compute the GP once before starting the optimization. | ||
gp.compute(t) | ||
|
||
# Print the initial ln-likelihood. | ||
print(gp.log_likelihood(F)) | ||
|
||
# Run the optimization routine. | ||
p0 = gp.get_parameter_vector() | ||
results = op.minimize(nll, p0, jac=grad_nll, method="L-BFGS-B") | ||
|
||
# Update the kernel and print the final log-likelihood. | ||
gp.set_parameter_vector(results.x) | ||
print(gp.log_likelihood(F)) | ||
|
||
gp.optimize(X, F) | ||
self.model = gp | ||
|
||
|
||
@staticmethod | ||
def get_params(): | ||
val = [] | ||
for kernel in ['linear', 'expsqrt', 'rational_quad', 'exp']: # , , 'exp', , 'polynomial']: | ||
val.append({'kernel': kernel}) | ||
return val |
Oops, something went wrong.