Commit 88b8375f authored by luckypeter.okonun's avatar luckypeter.okonun
Browse files

"Uploaded CDP_Candes_1D"

parents 824773c6 9813cbd6
Written 18th October 2019 by Alexander Dornheim
Current state of ProxPython (to my knowledge):
Folders:
docs: haven't worked on this
GUI_Python: haven't worked on this
InputData: folder for input data, input data for Phase, ART should be automatically downloaded into this folder
Nanoscale_Photonic_Imaging_demos: haven't worked on this, to contain demos for Russels book (so far 1 demo)
proxgui: haven't worked on this
proxtoolbox: most code, see below
TestSuite: testing/demos, see below
Files:
changes_in_Matlab_since_NahmeZiehe.txt: old, delete
manual.pdf: old
ProxPython_for_Dummies_README.txt: Mostly wrote it in Feb 2018 for students, still most up to date ReadMe. Contains list of demos which should work properly.
ProxPython_how_to_run_demos_on_Windows_for_dummies.txt: Short ReadMe for Windows by students (recent). Contains list of files students had problem running.
ReadMe.md: old, doesn't contain much information
setup.py: never touched this setup file
Proposed clean up:
Are both GUI folders needed? Are they working? -> Jochen
docs -> new documentation by Sylvain in future
delete changes_in_Matlab_since_NahmeZiehe.txt
Combine ReadMes
manual.pdf ?
TestSuite Folder
Phase_test_data: some output from the matlab version which I used for tests, need to talk to Sylvain if he wants to keep this. My tests are very basic, need to be done better using unitest
testing_Algortihms: folder with tests for the algortihms Robin implemented using unitest, haven't touched it personally
testing_ProxOperators: folder with tests for the proxoperators Robin implemented using unitest, haven't touched it personally
*_demo.py: Relocate/ structure in folders: Phase/ART demos etc. List of demos that should be working see ReadMe
to be continued...
""" Phase_JWST_DRAP_demo
written on Sep 17. 2019 by
Constantin Höing
Inst. Fuer Numerische und Angewandte Mathematik
Universität Göttingen
"""
import sys
sys.path.append('../proxtoolbox/Problems/Phase') # find out what should be in the string
sys.path.append("..")
import JWST_data_processor
import JWST_QNAP_in
from phase import Phase
JWST = Phase(JWST_QNAP_in.new_config)
JWST.solve()
JWST.show()
......@@ -18,9 +18,9 @@ class HPR(SimpleAlgorithm):
iter = self.config["iter"] +1
beta = np.exp((-iter/beta_switch)**3) * beta0 + (1-np.exp((-iter/beta_switch)**3)) + beta_max
beta = np.exp((-iter/beta_switch)**3) * beta0 + (1-np.exp((-iter/beta_switch)**3)) * beta_max
tmp = self.prox2.work(u)
tmp2 = (2+beta-1)*tmp-u
unew = (2 * self.prox1.work(u) - tmp2 + u +(1-beta)*tmp)/2
return unew
\ No newline at end of file
unew = (2 * self.prox1.work(tmp2) - tmp2 + u +(1-beta)*tmp)/2
return unew
# QNAP.m
# QNAP.py
# written on May 23, 2017 by
# Russell Luke
# Russell Luke/Constantin Höing
# Inst. Fuer Numerische und Angewandte Mathematik
# Universitaet Gottingen
#
......@@ -27,23 +27,40 @@
#
#
from numpy import zeros, isreal, zeros_like
import numpy as np
from .algorithms import Algorithm
import sys
sys.path.append('../../samsara/python')
from samsara import Samsara
try:
sys.path.append('../../samsara/python')
from .samsara import Samsara
except:
print('*********************************************************************')
print('* SAMSARA package missing. Please download from *')
print('* http://vaopt.math.uni-goettingen.de/en/software.php *')
print('* Save and unpack the datafile then copy the python/samsara folder *')
print('* in the proxtoolbox/Algorithms folder. *')
print("* You may need to execute setup.py install again *")
print('******************************************************************')
class QNAP(Algorithm):
class QNAP(Algorithm):
def __init__(self,config):
self.iter = 0
self.prox1 = config['proxoperators'][0](config)
self.prox2 = config['proxoperators'][1](config)
self.norm_data = config['norm_data']
self.Nx = config['Nx']; self.Ny = config['Ny']; self.Nz = config['Nz']
self.product_space_dimension = config['product_space_dimension']
self.anim = config["anim"]
if 'truth' in config:
self.truth = config['truth']
......@@ -66,7 +83,8 @@ class QNAP(Algorithm):
##### PREPROCESSING
iter = self.iter
prox1 = self.prox1; prox2 = self.prox2
prox1 = self.prox1;
prox2 = self.prox2
if u.ndim < 3:
p = 1
......@@ -128,73 +146,168 @@ class QNAP(Algorithm):
gradfnew_vec = (u[:,0]- tmp_u[:,0])
else:
if iter>3:
uold_vec=reshape(concatenate(real(u[:,0]), imag(u[:,0])),
self.Ny*self.Nx*2, 0);
unew_vec=reshape(concatenate(real(tmp_u[:,0]), imag(tmp_u[:,0])),
self.Ny*self.Nx*2, 0)
uold_vec= np.concatenate(np.real(u[:,0]), np.imag(u[:,0])).reshape(self.Ny*self.Nx*2, 0)
unew_vec=np.concatenate(np.real(tmp_u[:,0]), np.imag(tmp_u[:,0])).reshape(self.Ny*self.Nx*2, 0)
gradfold_vec = gradfnew_vec
gradfnew_vec = uold_vec-unew_vec
unew_vec,uold_vec,gap[iter-0],gradfold_vec, change[iter]= \
feval('samsara', uold_vec, unew_vec,
gap(iter-2)*self.Nx*self.Ny,
gap(iter-1)*self.Nx*self.Ny,
gradfold_vec, gradfnew_vec)
# unew_vec,uold_vec,gap[iter-0],gradfold_vec, change[iter]= \
# feval('samsara', uold_vec, unew_vec,
# gap(iter-2)*self.Nx*self.Ny,
# gap(iter-1)*self.Nx*self.Ny,
# gradfold_vec, gradfnew_vec)
unew_vec, uold_vec, gap[iter], gradfold_vec, change[iter] = Samsara.run(uold_vec, unew_vec, gap(iter-2)*self.Nx*self.Ny, gap(iter-1)*self.Nx*self.Ny, gradfold_vec, gradfnew_vec)
# now reshape unew_vec
tmp_u_vec = unew_vec[0:self.Ny*self.Nx-1]+1j*unew_vec[self.Ny*self.Nx:self.Ny*self.Nx*2-1]
for j in range(self.product_space_dimension):
tmp_u[:,j]=tmp_u_vec
gap[iter-1]=gap[iter-1]/(self.Nx*self.Ny)
else:
uold_vec=reshape([real(u[:,0]), imag(u[:,0])],
self.Ny*self.Nx*2, 1)
unew_vec=reshape([real(tmp_u[:,0]), imag(tmp_u[:,0])],
self.Ny*self.Nx*2, 1)
uold_vec= np.array(np.real(u[:,0]), np.imag(u[:,0])).reshape(self.Ny*self.Nx*2, 1)
unew_vec= np.array( np.real(tmp_u[:,0]), np.imag(tmp_u[:,0])).reshape(self.Ny*self.Nx*2, 1)
gradfnew_vec = uold_vec-unew_vec
elif (p!=1) and (q==1):
if (np.all(isreal(u))):
tmp2 = u[:,:,0]
uold_vec=reshape(tmp2,self.Nx*self.Ny,1)
uold_vec= tmp2.reshape(self.Nx*self.Ny,1)
tmp2 = tmp_u[:,:,0]
unew_vec = reshape(tmp2,self.Nx*self.Ny,1);
unew_vec = tmp2.reshape(self.Nx*self.Ny,1)
if iter<=3:
gradfnew_vec = uold_vec-unew_vec
else:
gradfold_vec = gradfnew_vec
gradfnew_vec = uold_vec-unew_vec
unew_vec,uold_vec,gap[iter-1],gradfold_vec, change[iter]= \
feval('samsara', uold_vec, unew_vec,
self.samsara.run(uold_vec, unew_vec,
gap(iter-2)*self.Nx*self.Ny,
gap(iter-1)*self.Nx*self.Ny,
gradfold_vec, gradfnew_vec);
gradfold_vec, gradfnew_vec)
# now reshape and replace u
gap[iter-1]=gap[iter-1]/(self.Nx*self.Ny);
tmp2=reshape(unew_vec,self.Ny,self.Nx);
tmp2=unew_vec.reshape(self.Ny,self.Nx);
for j in range(self.product_space_dimension):
tmp_u[:,:,j]=tmp2
end
else:
tmp2=concatenate(real(u[:,:,0]),imag(u[:,:,0]))
uold_vec=reshape(tmp2,self.Nx*self.Ny*2,1)
tmp2=concatenate(real(tmp_u[:,:,0]), imag(tmp_u[:,:,0]))
unew_vec = reshape(tmp2,self.Nx*self.Ny*2,1)
tmp2=np.concatenate((np.real(u[:,:,0]),np.imag(u[:,:,0])), axis=1)
uold_vec=tmp2.reshape(self.Nx*self.Ny*2,1)
tmp2= np.concatenate((np.real(tmp_u[:,:,0]), np.imag(tmp_u[:,:,0])), axis=1)
unew_vec = tmp2.reshape(self.Nx*self.Ny*2,1)
if iter<=3:
gradfnew_vec = uold_vec-unew_vec
else:
gradfold_vec = gradfnew_vec
gradfnew_vec = uold_vec-unew_vec
unew_vec,uold_vec,gap[iter-1],gradfold_vec, change[iter]= \
feval('samsara', uold_vec, unew_vec,
self.samsara.run( uold_vec, unew_vec,
gap(iter-2)*self.Nx*self.Ny,
gap(iter-1)*self.Nx*self.Ny,
gradfold_vec, gradfnew_vec)
# now reshape and replace u
gap[iter-1]=gap[iter-1]/(self.Nx*self.Ny)
tmp2=reshape(unew_vec,self.Ny,self.Nx*2)
tmp2=unew_vec.reshape(self.Ny,self.Nx*2)
unew=tmp2[:,1:self.Nx] +1j*tmp2[:,self.Nx+1:2*self.Nx]
for j in range(self.product_space_dimension):
tmp_u[:,:,j]=unew
else: # product space of 3D arrays
print('Cannot handle 3D arrays on the product space yet')
tmp1 = self.prox2.work(tmp_u)
tmp_change=0; tmp_gap=0
if(p==1)and(q==1):
tmp_change = np.linalg.norm(u-tmp_u, 'fro')/norm_data**2
tmp_gap = np.linalg.norm(tmp1-tmp_u,'fro')/norm_data**2
if hasattr(self, 'diagnostic'): #('diagnostic' in config)
if hasattr(self, "truth"): #('truth' in config)
if self.truth_dim[0]==1:
z=tmp_u[0,:]
elif self.truth_dim[0]==1:
z=tmp_u[:,0]
else:
z=tmp_u;
Relerrs[iter] = np.linalg.norm(self.truth - np.exp(-1j*np.angle(np.trace(self.truth*z))) * z, 'fro')/self.norm_truth;
elif(q==1):
for j in range(0,self.product_space_dimension):
tmp_change= tmp_change+ np.linalg.norm(u[:,:,j]-tmp_u[:,:,j], 'fro')/norm_data**2;
tmp_gap = tmp_gap+ np.linalg.norm(tmp1[:,:,j]-tmp_u[:,:,j],'fro')/norm_data**2
if hasattr(self, "truth"): #'truth' in config
Relerrs[iter] = np.linalg.norm(self.truth - np.exp(-1j*np.angle(np.trace(self.truth*tmp_u[:,:,1]))) * tmp_u[:,:,1], 'fro')/self.norm_truth;
else:
Relerrs[iter]=0;
for j in range(0,self.product_space_dimension):
for k in range(r1,self.Nz):
tmp_change= tmp_change+(np.linalg.norm(u[:,:,k,j]-tmp_u[:,:,k,j], 'fro')/norm_data)**2;
# compute (||P_Sx-P_Mx||/norm_data)^2:
tmp_gap = tmp_gap+(np.linalg.norm(tmp1[:,:,k,j]-tmp_u[:,:,k,j],'fro')/(norm_data))**2;
if hasattr(self, "truth") and j == 1: # (any(strcmp('truth',fieldnames(method_input))))&&(j==1)
Relerrs[iter] = Relerrs[iter]+np.linalg.norm(self.truth - np.exp(-1j*np.angle(np.trace(self.truth*tmp_u[:,:,:,1]))) * tmp_u[:,:,k,1], 'fro')/self.norm_truth;
change[iter]=np.sqrt(tmp_change);
gap[iter] = np.sqrt(tmp_gap);
u=tmp_u;
if hasattr(self, "diagnostic"): #(any(strcmp('diagnostic', fieldnames(method_input))))
# graphics
#print(config["anim"])
if (self.anim>=1)and (iter%2==0):
self.u=tmp1;
#self=self.animation(method_output);
#self.animation()
#self.stats.time = toc;
tmp = self.prox1.work(u);
self.u=tmp;
tmp2 = self.prox2.work(u);
if(self.Nx==1):
self.u1 = tmp[:,0];
self.u2 = tmp2[:,0];
elif(self.Ny==1):
self.u1 = tmp[0,:];
self.u2 = tmp2[0,:];
elif(self.Nz==1):
self.u1 = tmp[:,:,0];
self.u2 = tmp2[:,:,0];
else:
self.u1 = tmp;
self.u2 = tmp2;
change=change[0:iter];
self.iter = iter;
print(self.iter)
self.change = change;
if hasattr(self, "diagnostic"): #(any(strcmp('diagnostic', fieldnames(method_input))))
gap=gap[1:iter];
self.gap = gap;
if hasattr(self, "truth"): #(any(strcmp('truth',fieldnames(method_input))))
self.yRelerrs=Relerrs;
# self.output = {}
# for key, val in self.__dict__.items():
# if key != "output":
# self.output[str(key)] = val
return self.__dict__
......@@ -30,5 +30,6 @@ from .DRl import *
from .GRAAL_F import *
from .GRAAL_objective import *
from .KM import *
from .QNAP import *
__all__ = ["AP","HPR","RAAR", "AP_expert", "GRAAL", "RAAR_expert", "DRl", "ADMM", "RRR", "CAARL", "CADRl", "CDRl", "CP", "CPrand", "DRAP", "DRl", "GRAAL_F", "GRAAL_objective", "KM"]
__all__ = ["AP","HPR","RAAR", "AP_expert", "GRAAL", "RAAR_expert", "DRl", "ADMM", "RRR", "CAARL", "CADRl", "CDRl", "CP", "CPrand", "DRAP", "DRl", "GRAAL_F", "GRAAL_objective", "KM", "QNAP"]
"""Provides the class Samsara that is an unconstrained optimiser for
real-valued functions.
Running Samsara
===============
Samsara is written for Python 3 (but works with Python 2.7) and has
the following dependencies:
* `numpy`
* `matplotlib`
You should also add the location of your samsara folder to the
``PYTHONPATH`` environment variable.
To use Samsara with your project copy the `driver.py` file from the
samsara folder to any destination you like and edit it with the
function you want optimised.
Running the Tests
=================
To run the tests you will need to have MATLAB, the `matlab` module for
Python installed (the module can be found with your installed MATLAB
program in `<path-to-matlab>/extern/engines/python`) and pytest
installed. To run the test you then execute
>>> py.test <test_file>
"""
### MODULE FUNCTION IMPORTS ###
# Samsara
from .samsara import Samsara
# Hessian products and Scaler
from .hessian import BFGS1_product, invBFGS1_product, Broyden1_product,\
invBroyden1_product, Broyden2_product, invBroyden2_product,\
delta_MSB2, delta_invBFGS
# History
from .history import fdSY_mat, cdSY_mat
# Step finder
from .step_finder import Dogleg_QN, Explicit_TR
"""
Extending samsara
-----------------
Any functions from additional modules have to be imported in this file.
"""
### MODULE FUNCTION IMPORTS ###
# Samsara
from .samsara import Samsara
# Hessian products and Scaler
from .hessian import BFGS1_product, invBFGS1_product, Broyden1_product,\
invBroyden1_product, Broyden2_product, invBroyden2_product,\
delta_MSB2, delta_invBFGS
# History
from .history import fdSY_mat, cdSY_mat
# Step finder
from .step_finder import Dogleg_QN, Explicit_TR
"""Example of a driver for Samsara.
To create a samsara driver for your own project, copy this file and
add in your function and options.
"""
import matplotlib.pyplot as plt
from numpy import abs, amax, array, load, zeros
from numpy.linalg import norm
from samsara import Samsara, BFGS1_product, Broyden1_product,\
Broyden2_product, Dogleg_QN, Explicit_TR, cdSY_mat, fdSY_mat
def objective(x):
"""Example objective (Engvall function).
Replace this code with your actual function.
Parameters
----------
x : array_like
The point to evaluate the function at (2 dimensional).
Returns
-------
f : float
The function value at `x`.
Df : array_like
The gradient vector of ``f`` at ``x``.
Notes
-----
In your implementation you may use any number of parameters and
return values. But samsara will always need the function value
and the gradient vector.
"""
f = x[0]**4 + x[1]**4 + 2*x[0]**2*x[1]**2 - 4*x[0] + 3
Df = zeros(2)
Df[0] = 4*x[0]**3 + 4*x[0]*x[1]**2 - 4
Df[1] = 4*x[1]**3 + 4*x[0]**2*x[1]
return f, Df
def plot_result():
"""Example of a custom plot function for the result.
Replace this code with your actual plotting routine.
"""
pass
def driver():
# You can either implement your function in the objective function
# provided above, or import your function from a different module
# and set it here.
Objective = objective
# ========== SAMSARA ==========
samsara = Samsara()
# --- OPTIONS ---
params = {}
# Extra output from samsara.
params['verbose'] = True
# A regularisation parameter.
params['alpha_0'] = 5e-12
# ...
params['gamma'] = .5
# ...
params['c'] = .01
# ...
params['beta'] = .9999999
# ...
params['eta1'] = .995
# ...
params['eta2'] = .8
# ...
params['eta3'] = .25
# The maximum steps to look back. Must be an integer.
params['maxmem'] = 8
# The initial radius of the trust region.
params['tr'] = 1e+15
tols = {}
# Gradient norm tolerance
# This is used as a stopping criteria for SAMSARA. It must be a
# real number greater than zero.
tols['ngrad_TOL'] = 1e-6
# Step size tolerance
# This is used as a stopping criteria for SAMSARA. It must be a
# real number greater than zero.
tols['step_TOL'] = 1e-12
# Maximum number of iterations
# This is used as a stopping criteria for SAMSARA. It must be
# an integer greater than zero.
tols['Maxit'] = 1000
opts = {}
# Hessian method
# Quasi-Newton methods for estimation of the function's Hessian.
# Broyden implementations use Dogleg_QN lin search methods
# while BFGS implementations use Explicit_TR.
# Methods available:
# - BFGS1_product
# - Broyden1_product
# - Broyden2_product
opts['QN_method'] = BFGS1_product
# Step finder
# Optional: Default is Dogleg_QN
# Methods available:
# - Dogleg_QN
# - Explicit_TR
opts['Step_finder'] = Explicit_TR
# History
# Ordering methods for S, Y matrices in limited memory application.
# Finite difference ordering, fdSY_mat, is recommended for Broyden
# implementations. For BFGS implementations, use conjugate
# direction ordering, cdSY_mat.
opts['History'] = cdSY_mat
# Trust Region Adjust
# Select the method for adjustment of the trust region in optimazation.
opts['update_conditions'] = 'Trust Region'
# Initial Step Scaler
# The norm of the initial step taken in the Cauchy direction.
# This multiplied against the normalized gradient to yield the
# initial direction vector in order to generate the first step
# taken by SAMSARA.
# Assign a value of 0.0D0 to use the default value which is the
# minimum of 1D-1 or the norm(1d-1*gradient).
opts['initial_step'] = 1e+5*tols['step_TOL']
samsara.load_options(params, tols, opts)
# ========== STARTING VALUES ==========
# The starting vector. Set it to an appropriate value.
xold_vec = array([.5, 2.])
# The starting function value and gradient vector.
fold, gradfold_vec = Objective(xold_vec)
# The next point.
xnew_vec = None
# The next function value.
fnew = None
# The next gradient vector.
gradfnew_vec = None
# Keep at this value, so that the main loop executes at least once.
stepsize = 999.
# Keep at this value, so that the main loop executes at least once.
ngradfnew = 999.
# ========== MAIN LOOP ==========
# --- Tolerances ---
# The lower bound for the gradient vector norm.
ngrad_TOL = 2e-14
# The lower bound for the stepsize.
step_TOL = 2e-17
# The maximum number of iterations.
Maxit = 500
# Number of current iterations
it = 0
while it < Maxit and ngradfnew > ngrad_TOL and stepsize > step_TOL:
xnew_vec, xold_vec, fold, gradfold_vec, stepsize =\
samsara.run(xold_vec, xnew_vec, fold, fnew, gradfold_vec,