Commit 943962eb authored by constantin.hoeing's avatar constantin.hoeing
Browse files

QNAP demo

parent f060d157
""" Phase_JWST_DRAP_demo
written on Sep 17. 2019 by
Constantin Höing
Inst. Fuer Numerische und Angewandte Mathematik
Universität Göttingen
"""
import sys
sys.path.append('../proxtoolbox/Problems/Phase') # find out what should be in the string
sys.path.append("..")
import JWST_data_processor
import JWST_QNAP_in
from phase import Phase
JWST = Phase(JWST_QNAP_in.new_config)
JWST.solve()
JWST.show()
......@@ -35,11 +35,11 @@
from numpy import zeros, isreal, zeros_like
import numpy as np
from algorithms import Algorithm
from .algorithms import Algorithm
import sys
try:
sys.path.append('../../samsara/python')
from samsara import Samsara
from .samsara import Samsara
except:
print('*********************************************************************')
print('* SAMSARA package missing. Please download from *')
......@@ -60,6 +60,7 @@ class QNAP(Algorithm):
self.norm_data = config['norm_data']
self.Nx = config['Nx']; self.Ny = config['Ny']; self.Nz = config['Nz']
self.product_space_dimension = config['product_space_dimension']
self.anim = config["anim"]
if 'truth' in config:
self.truth = config['truth']
......@@ -195,9 +196,9 @@ class QNAP(Algorithm):
tmp_u[:,:,j]=tmp2
else:
tmp2=np.concatenate(np.real(u[:,:,0]),np.imag(u[:,:,0]))
tmp2=np.concatenate((np.real(u[:,:,0]),np.imag(u[:,:,0])), axis=1)
uold_vec=tmp2.reshape(self.Nx*self.Ny*2,1)
tmp2= np.concatenate(np.real(tmp_u[:,:,0]), np.imag(tmp_u[:,:,0]))
tmp2= np.concatenate((np.real(tmp_u[:,:,0]), np.imag(tmp_u[:,:,0])), axis=1)
unew_vec = tmp2.reshape(self.Nx*self.Ny*2,1)
if iter<=3:
gradfnew_vec = uold_vec-unew_vec
......@@ -218,7 +219,7 @@ class QNAP(Algorithm):
else: # product space of 3D arrays
print('Cannot handle 3D arrays on the product space yet')
tmp1 = self.prox2.run(tmp_u)
tmp1 = self.prox2.work(tmp_u)
tmp_change=0; tmp_gap=0
if(p==1)and(q==1):
......@@ -233,7 +234,7 @@ class QNAP(Algorithm):
else:
z=tmp_u;
Relerrs[iter] = np.linalg.norm(self.truth - np.exp(-1j*np.angle(trace(self.truth*z))) * z, 'fro')/self.norm_truth;
Relerrs[iter] = np.linalg.norm(self.truth - np.exp(-1j*np.angle(np.trace(self.truth*z))) * z, 'fro')/self.norm_truth;
elif(q==1):
......@@ -253,7 +254,7 @@ class QNAP(Algorithm):
tmp_gap = tmp_gap+(np.linalg.norm(tmp1[:,:,k,j]-tmp_u[:,:,k,j],'fro')/(norm_data))**2;
if hasattr(self, "truth") and j == 1: # (any(strcmp('truth',fieldnames(method_input))))&&(j==1)
Relerrs[iter] = Relerrs[iter]+np.linalg.norm(self.truth - exp(-1j*angle(trace(self.truth*tmp_u[:,:,:,1]))) * tmp_u[:,:,k,1], 'fro')/self.norm_truth;
Relerrs[iter] = Relerrs[iter]+np.linalg.norm(self.truth - np.exp(-1j*np.angle(np.trace(self.truth*tmp_u[:,:,:,1]))) * tmp_u[:,:,k,1], 'fro')/self.norm_truth;
......@@ -264,18 +265,21 @@ class QNAP(Algorithm):
if hasattr(self, "diagnostic"): #(any(strcmp('diagnostic', fieldnames(method_input))))
# graphics
if (anim>=1)and (iter%2==0):
#print(config["anim"])
if (self.anim>=1)and (iter%2==0):
self.u=tmp1;
self=self.animation(method_output);
#self=self.animation(method_output);
#self.animation()
self.stats.time = toc;
#self.stats.time = toc;
tmp = self.prox1(u);
tmp = self.prox1.work(u);
self.u=tmp;
tmp2 = self.prox2(u);
tmp2 = self.prox2.work(u);
if(self.Nx==1):
self.u1 = tmp[:,0];
self.u2 = tmp2[:,0];
......@@ -289,15 +293,21 @@ class QNAP(Algorithm):
self.u1 = tmp;
self.u2 = tmp2;
change=change[1:iter];
self.stats.iter = iter-1;
self.stats.change = change;
change=change[0:iter];
self.iter = iter;
print(self.iter)
self.change = change;
if hasattr(self, "diagnostic"): #(any(strcmp('diagnostic', fieldnames(method_input))))
gap=gap[1:iter];
self.stats.gap = gap;
self.gap = gap;
if hasattr(self, "truth"): #(any(strcmp('truth',fieldnames(method_input))))
self.stats.Relerrs=Relerrs;
self.yRelerrs=Relerrs;
# self.output = {}
# for key, val in self.__dict__.items():
# if key != "output":
# self.output[str(key)] = val
return self.__dict__
......@@ -30,5 +30,6 @@ from .DRl import *
from .GRAAL_F import *
from .GRAAL_objective import *
from .KM import *
from .QNAP import *
__all__ = ["AP","HPR","RAAR", "AP_expert", "GRAAL", "RAAR_expert", "DRl", "ADMM", "RRR", "CAARL", "CADRl", "CDRl", "CP", "CPrand", "DRAP", "DRl", "GRAAL_F", "GRAAL_objective", "KM"]
__all__ = ["AP","HPR","RAAR", "AP_expert", "GRAAL", "RAAR_expert", "DRl", "ADMM", "RRR", "CAARL", "CADRl", "CDRl", "CP", "CPrand", "DRAP", "DRl", "GRAAL_F", "GRAAL_objective", "KM", "QNAP"]
"""Provides the class Samsara that is an unconstrained optimiser for
real-valued functions.
Running Samsara
===============
Samsara is written for Python 3 (but works with Python 2.7) and has
the following dependencies:
* `numpy`
* `matplotlib`
You should also add the location of your samsara folder to the
``PYTHONPATH`` environment variable.
To use Samsara with your project copy the `driver.py` file from the
samsara folder to any destination you like and edit it with the
function you want optimised.
Running the Tests
=================
To run the tests you will need to have MATLAB, the `matlab` module for
Python installed (the module can be found with your installed MATLAB
program in `<path-to-matlab>/extern/engines/python`) and pytest
installed. To run the test you then execute
>>> py.test <test_file>
"""
### MODULE FUNCTION IMPORTS ###
# Samsara
from .samsara import Samsara
# Hessian products and Scaler
from .hessian import BFGS1_product, invBFGS1_product, Broyden1_product,\
invBroyden1_product, Broyden2_product, invBroyden2_product,\
delta_MSB2, delta_invBFGS
# History
from .history import fdSY_mat, cdSY_mat
# Step finder
from .step_finder import Dogleg_QN, Explicit_TR
"""
Extending samsara
-----------------
Any functions from additional modules have to be imported in this file.
"""
### MODULE FUNCTION IMPORTS ###
# Samsara
from .samsara import Samsara
# Hessian products and Scaler
from .hessian import BFGS1_product, invBFGS1_product, Broyden1_product,\
invBroyden1_product, Broyden2_product, invBroyden2_product,\
delta_MSB2, delta_invBFGS
# History
from .history import fdSY_mat, cdSY_mat
# Step finder
from .step_finder import Dogleg_QN, Explicit_TR
"""Example of a driver for Samsara.
To create a samsara driver for your own project, copy this file and
add in your function and options.
"""
import matplotlib.pyplot as plt
from numpy import abs, amax, array, load, zeros
from numpy.linalg import norm
from samsara import Samsara, BFGS1_product, Broyden1_product,\
Broyden2_product, Dogleg_QN, Explicit_TR, cdSY_mat, fdSY_mat
def objective(x):
"""Example objective (Engvall function).
Replace this code with your actual function.
Parameters
----------
x : array_like
The point to evaluate the function at (2 dimensional).
Returns
-------
f : float
The function value at `x`.
Df : array_like
The gradient vector of ``f`` at ``x``.
Notes
-----
In your implementation you may use any number of parameters and
return values. But samsara will always need the function value
and the gradient vector.
"""
f = x[0]**4 + x[1]**4 + 2*x[0]**2*x[1]**2 - 4*x[0] + 3
Df = zeros(2)
Df[0] = 4*x[0]**3 + 4*x[0]*x[1]**2 - 4
Df[1] = 4*x[1]**3 + 4*x[0]**2*x[1]
return f, Df
def plot_result():
"""Example of a custom plot function for the result.
Replace this code with your actual plotting routine.
"""
pass
def driver():
# You can either implement your function in the objective function
# provided above, or import your function from a different module
# and set it here.
Objective = objective
# ========== SAMSARA ==========
samsara = Samsara()
# --- OPTIONS ---
params = {}
# Extra output from samsara.
params['verbose'] = True
# A regularisation parameter.
params['alpha_0'] = 5e-12
# ...
params['gamma'] = .5
# ...
params['c'] = .01
# ...
params['beta'] = .9999999
# ...
params['eta1'] = .995
# ...
params['eta2'] = .8
# ...
params['eta3'] = .25
# The maximum steps to look back. Must be an integer.
params['maxmem'] = 8
# The initial radius of the trust region.
params['tr'] = 1e+15
tols = {}
# Gradient norm tolerance
# This is used as a stopping criteria for SAMSARA. It must be a
# real number greater than zero.
tols['ngrad_TOL'] = 1e-6
# Step size tolerance
# This is used as a stopping criteria for SAMSARA. It must be a
# real number greater than zero.
tols['step_TOL'] = 1e-12
# Maximum number of iterations
# This is used as a stopping criteria for SAMSARA. It must be
# an integer greater than zero.
tols['Maxit'] = 1000
opts = {}
# Hessian method
# Quasi-Newton methods for estimation of the function's Hessian.
# Broyden implementations use Dogleg_QN lin search methods
# while BFGS implementations use Explicit_TR.
# Methods available:
# - BFGS1_product
# - Broyden1_product
# - Broyden2_product
opts['QN_method'] = BFGS1_product
# Step finder
# Optional: Default is Dogleg_QN
# Methods available:
# - Dogleg_QN
# - Explicit_TR
opts['Step_finder'] = Explicit_TR
# History
# Ordering methods for S, Y matrices in limited memory application.
# Finite difference ordering, fdSY_mat, is recommended for Broyden
# implementations. For BFGS implementations, use conjugate
# direction ordering, cdSY_mat.
opts['History'] = cdSY_mat
# Trust Region Adjust
# Select the method for adjustment of the trust region in optimazation.
opts['update_conditions'] = 'Trust Region'
# Initial Step Scaler
# The norm of the initial step taken in the Cauchy direction.
# This multiplied against the normalized gradient to yield the
# initial direction vector in order to generate the first step
# taken by SAMSARA.
# Assign a value of 0.0D0 to use the default value which is the
# minimum of 1D-1 or the norm(1d-1*gradient).
opts['initial_step'] = 1e+5*tols['step_TOL']
samsara.load_options(params, tols, opts)
# ========== STARTING VALUES ==========
# The starting vector. Set it to an appropriate value.
xold_vec = array([.5, 2.])
# The starting function value and gradient vector.
fold, gradfold_vec = Objective(xold_vec)
# The next point.
xnew_vec = None
# The next function value.
fnew = None
# The next gradient vector.
gradfnew_vec = None
# Keep at this value, so that the main loop executes at least once.
stepsize = 999.
# Keep at this value, so that the main loop executes at least once.
ngradfnew = 999.
# ========== MAIN LOOP ==========
# --- Tolerances ---
# The lower bound for the gradient vector norm.
ngrad_TOL = 2e-14
# The lower bound for the stepsize.
step_TOL = 2e-17
# The maximum number of iterations.
Maxit = 500
# Number of current iterations
it = 0
while it < Maxit and ngradfnew > ngrad_TOL and stepsize > step_TOL:
xnew_vec, xold_vec, fold, gradfold_vec, stepsize =\
samsara.run(xold_vec, xnew_vec, fold, fnew, gradfold_vec,
gradfnew_vec)
it += 1
fnew, gradfnew_vec = Objective(xnew_vec)
ngradfnew = norm(gradfnew_vec)
if stepsize <= step_TOL:
print('Algorithm stagnated: stepsize tolerance violated.')
if ngradfnew <= ngrad_TOL:
print('Algorithm stagnated: gradient norm tolerance violated.')
if it >= Maxit:
print('Algorithm exceeded: maximum step count violated.')
# TODO(benedikt) Rework from here
# You can save some of the values of samsara.
# samsara.save()
# You can now plot your results.
# plot_result()
# Return the optimal values.
return xnew_vec, fnew
# Entry point into the driver.
if __name__ == '__main__':
driver()
"""Contains all implementations of hessian products.
Hessian functions
=================
Some introductory text.
.. autofunction:: samsara.hessian.BFGS1_product
.. autofunction:: samsara.hessian.Broyden1_product
.. autofunction:: samsara.hessian.Broyden2_product
.. autofunction:: samsara.hessian.invBFGS1_product
.. autofunction:: samsara.hessian.invBroyden1_product
.. autofunction:: samsara.hessian.invBroyden2_product
.. autofunction:: samsara.hessian_wrapper
Scaler functions
================
These functions are used to determine the `scale` variable for the
hessian functions.
.. autofunction:: samsara.hessian.delta_invBFGS
.. autofunction:: samsara.hessian.delta_MSB2
.. autofunction:: samsara.hessian.scaler_wrapper
"""
from numpy import abs, c_, dot, diag, eye, r_, sqrt, tril, triu, zeros
from numpy.linalg import cholesky, norm, solve
from numpy.random import random, seed
# PYTHON PORT ANNOTATION
# Removed parameters dim and mem, as the value can be obtained from
# the shape of S_mat/Y_mat.
def delta_MSB2(S_mat, Y_mat, alpha):
"""Scaling for the MSB2 matrix.
Parameters
----------
S_mat : array_like
The matrix of steps.
Y_mat : array_like
The matrix of gradients.
alpha : float
A regularisation parameter.
Returns
-------
float
TODO
See Also
--------
delta_invBFGS : Alternative function.
samsara.samsara.Normsqresid, samsara.step_finder.Step_generator
"""
dim = Y_mat.shape[1]
# normalize the columns of Y_mat for stability
ny = zeros(dim)
for j in range(dim):
ny[j] = 1./norm(Y_mat[:, j])
# PYTHON PORT ANNOTATION
# In Python we can't modify the input parameters as they are
# just references to the originals in the calling function.
Yc_mat = dot(Y_mat, diag(ny))
Sc_mat = dot(S_mat, diag(ny))
# Look into doing a Cholesky factorization of Y'Y for stability
M_mat = solve(dot(Yc_mat.T, Yc_mat) + alpha*eye(dim, dim), Yc_mat.T)
seed(5489) # sets the same seed for the same 'random' vector
v0_vec = random(S_mat.shape[0]) # used to compute Rayleigh quotient
delta0 = 0
delta = 10
RQ_iter = 0
v_vec = dot(Sc_mat, dot(M_mat, v0_vec))
while abs((delta - delta0)/delta) > 1e-5 and RQ_iter < 10:
RQ_iter += 1
v0_vec = v_vec/sqrt(dot(v_vec, v_vec))
delta0 = delta
v_vec = dot(Sc_mat, dot(M_mat, v0_vec))
delta = dot(v0_vec, v_vec)
return abs(delta)
# PYTHON PORT ANNOTATION
# Removed parameter dim, as the value can be obtained from
# the shape of S_mat/Y_mat.
def delta_invBFGS(mem, S_mat, Y_mat):
"""Scaling for the inverse BFGS matrix
Parameters
----------
mem : int
The step to use.
S_mat : array_like
The matrix of steps.
Y_mat : array_like
The matrix of gradients.
Returns
-------
float
TODO
See Also
--------
BFGS1_product : TODO
delta_MSB2 : Alternative function.
"""
# print('--- in delta_invBFGS:58 ---')
# print('S_mat[:dim, mem-1] =', S_mat[:dim, mem-1])
# print('Y_mat[:dim, mem-1] =', Y_mat[:dim, mem-1])
# print('--- out delta_invBFGS:61 ---')
return abs(dot(S_mat[:, mem-1], Y_mat[:, mem-1]))\
/ dot(Y_mat[:, mem-1], Y_mat[:, mem-1])
def scaler_wrapper(Scaler, mem, S_mat, Y_mat, alpha):
"""A wrapper for all scaler functions.
Used to have unified function calls.
Parameters
----------
Scaler : function
The scaler function to use.
mem : int
The step to use.
S_mat : array_like
The matrix of steps.
Y_mat : array_like
The matrix of gradients.
Returns
-------
float
TODO
See Also
--------
delta_MSB2, delta_invBFGS
"""
if Scaler is delta_invBFGS:
return delta_invBFGS(mem, S_mat, Y_mat)
if Scaler is delta_MSB2:
return delta_MSB2(S_mat, Y_mat, alpha)
def BFGS1_product(u_vec, S_mat, Y_mat, scale):
"""An implementation of the BFGS algorithm.
Parameters
----------
u_vec : array_like
The vector to be multiplied.
S_mat : array_like
The matrix of steps.
Y_mat : array_like
The matrix of gradient differences.
scale : float
The initial scaling.
Returns
-------
array_like
TODO
See Also
--------
delta_invBFGS : TODO
Broyden1_product, Broyden2_product
"""
STY_mat = dot(S_mat.T, Y_mat)
D_mat = diag(diag(STY_mat))
L_mat = tril(STY_mat, -1)
Di_mat = diag(diag(STY_mat)**(-1))
R_mat = 1./scale*dot(S_mat.T, S_mat) + dot(L_mat, dot(Di_mat, L_mat.T))
R_mat = cholesky(R_mat).T
Dsqrt_mat = diag(diag(STY_mat)**.5)
Disqrt_mat = diag(diag(STY_mat)**(-.5))
Phi_mat = c_[Y_mat, (1./scale)*S_mat]
Gam1_mat = r_[c_[-Dsqrt_mat, dot(Disqrt_mat, L_mat.T)],
c_[zeros(D_mat.shape), R_mat]]
Gam2_mat = r_[c_[Dsqrt_mat, zeros(D_mat.shape)],
c_[-dot(L_mat, Disqrt_mat), R_mat