Commit 454213c7 authored by alexander.dornheim's avatar alexander.dornheim
Browse files

Added class SimpleAlgortihms which works similar to algorithm_wrapper in ProxMatlab

Added new AP using SimpleAlgortihm
Moved old AP to AP_expert
parent ff967516
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 14 13:08:06 2015
from .SimpleAlgortihm import SimpleAlgorithm
@author: rebecca
"""
class AP(SimpleAlgorithm):
from math import sqrt
from numpy import zeros
from scipy.linalg import norm
from .algorithms import Algorithm
class AP(Algorithm):
"""
Alternating Projections
"""
def __init__(self, config):
"""
Parameters
----------
config : dict
Dictionary containing the problem configuration.
It must contain the following mappings:
prox1: ProxOperator
First ProxOperator (the class, no instance)
prox2: ProxOperator
Second ProxOperator (the class, no instance)
beta0: number
Starting relaxation parmater
beta_max: number
Maximum relaxation parameter
beta_switch: int
Iteration at which beta moves from beta0 -> beta_max
normM: number
?
Nx: int
Row-dim of the product space elements
Ny: int
Column-dim of the product space elements
Nz: int
Depth-dim of the product space elements
dim: int
Size of the product space
"""
self.prox1 = config['proxoperators'][0](config); self.prox2 = config['proxoperators'][1](config);
self.normM = config['normM'];
self.Nx = config['Nx']; self.Ny = config['Ny']; self.Nz = config['Nz'];
self.product_space_dimension = config['product_space_dimension'];
self.iters = 0
if 'truth' in config:
self.truth = config['truth']
self.truth_dim = config['truth_dim']
self.norm_truth = config['norm_truth']
def run(self, u, tol, maxiter):
"""
Runs the algorithm for the specified input data
"""
prox1 = self.prox1; prox2 = self.prox2;
if u.ndim < 3:
p = 1
q = 1
elif u.ndim == 3:
p = u.shape[2]
q = 1
else:
p = u.shape[2]
q = u.shape[3]
normM = self.normM;
iters = self.iters
change = zeros(maxiter+1);
change[0] = 999;
gap = change.copy();
if hasattr(self, 'truth'):
Relerrs = change.copy()
tmp1 = prox2.work(u);
while iters < maxiter and change[iters] >= tol:
iters += 1;
tmp_u = prox1.work(tmp1);
tmp1 = prox2.work(tmp_u);
tmp_change = 0; tmp_gap = 0;
if p==1 and q==1:
tmp_change= (norm(u-tmp_u, 'fro')/normM)**2
tmp_gap = (norm(tmp1-tmp_u,'fro')/normM)**2
if hasattr(self, 'truth'):
if self.truth_dim[0] == 1:
z=tmp_u[0,:]
elif self.truth_dim[1]==1:
z=tmp_u[:,0]
else:
z=tmp_u
Relerrs[iter] = norm(self.truth - exp(-1j*angle(trace(self.truth.T*z))) * z, 'fro')/self.norm_truth
elif q==1:
for j in range(self.product_space_dimension):
tmp_change= tmp_change+(norm(u[:,:,j]-tmp_u[:,:,j], 'fro')/normM)**2
# compute (||P_SP_Mx-P_Mx||/normM)^2:
tmp_gap = tmp_gap+(norm(tmp1[:,:,j]-tmp_u[:,:,j],'fro')/normM)**2
else:
for j in range(self.product_space_dimension):
for k in range(self.Nz):
tmp_change= tmp_change+(norm(u[:,:,k,j]-tmp_u[:,:,k,j], 'fro')/normM)**2
# compute (||P_Sx-P_Mx||/normM)^2:
tmp_gap = tmp_gap+(norm(tmp1[:,:,k,j]-tmp_u[:,:,k,j],'fro')/(normM))**2
change[iters] = sqrt(tmp_change);
gap[iters] = sqrt(tmp_gap);
u = tmp_u;
tmp = prox1.work(u);
tmp2 = prox2.work(u);
if self.Nx == 1:
u1 = tmp[:,0]
u2 = tmp2[:,0]
elif self.Ny == 1:
u1 = tmp[0,:]
u2 = tmp2[0,:]
elif self.Nz == 1 and tmp.ndim > 2:
u1 = tmp[:,:,0]
u2 = tmp2[:,:,0]
else:
u1 = tmp
u2 = tmp2
change = change[1:iters+1];
gap = gap[1:iters+1];
return {'u1': u1, 'u2': u2, 'iter': iters, 'change': change, 'gap': gap}
def evaluate(self, u):
tmp_u = self.prox2.work(u)
unew = self.prox1.work(tmp_u)
return unew
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 14 13:08:06 2015
@author: rebecca
"""
from math import sqrt
from numpy import zeros
from scipy.linalg import norm
from .algorithms import Algorithm
class AP(Algorithm):
"""
Alternating Projections
"""
def __init__(self, config):
"""
Parameters
----------
config : dict
Dictionary containing the problem configuration.
It must contain the following mappings:
prox1: ProxOperator
First ProxOperator (the class, no instance)
prox2: ProxOperator
Second ProxOperator (the class, no instance)
beta0: number
Starting relaxation parmater
beta_max: number
Maximum relaxation parameter
beta_switch: int
Iteration at which beta moves from beta0 -> beta_max
normM: number
?
Nx: int
Row-dim of the product space elements
Ny: int
Column-dim of the product space elements
Nz: int
Depth-dim of the product space elements
dim: int
Size of the product space
"""
self.prox1 = config['proxoperators'][0](config); self.prox2 = config['proxoperators'][1](config);
self.normM = config['normM'];
self.Nx = config['Nx']; self.Ny = config['Ny']; self.Nz = config['Nz'];
self.product_space_dimension = config['product_space_dimension'];
self.iters = 0
if 'truth' in config:
self.truth = config['truth']
self.truth_dim = config['truth_dim']
self.norm_truth = config['norm_truth']
def run(self, u, tol, maxiter):
"""
Runs the algorithm for the specified input data
"""
prox1 = self.prox1; prox2 = self.prox2;
if u.ndim < 3:
p = 1
q = 1
elif u.ndim == 3:
p = u.shape[2]
q = 1
else:
p = u.shape[2]
q = u.shape[3]
normM = self.normM;
iters = self.iters
change = zeros(maxiter+1);
change[0] = 999;
gap = change.copy();
if hasattr(self, 'truth'):
Relerrs = change.copy()
tmp1 = prox2.work(u);
while iters < maxiter and change[iters] >= tol:
iters += 1;
tmp_u = prox1.work(tmp1);
tmp1 = prox2.work(tmp_u);
tmp_change = 0; tmp_gap = 0;
if p==1 and q==1:
tmp_change= (norm(u-tmp_u, 'fro')/normM)**2
tmp_gap = (norm(tmp1-tmp_u,'fro')/normM)**2
if hasattr(self, 'truth'):
if self.truth_dim[0] == 1:
z=tmp_u[0,:]
elif self.truth_dim[1]==1:
z=tmp_u[:,0]
else:
z=tmp_u
Relerrs[iter] = norm(self.truth - exp(-1j*angle(trace(self.truth.T*z))) * z, 'fro')/self.norm_truth
elif q==1:
for j in range(self.product_space_dimension):
tmp_change= tmp_change+(norm(u[:,:,j]-tmp_u[:,:,j], 'fro')/normM)**2
# compute (||P_SP_Mx-P_Mx||/normM)^2:
tmp_gap = tmp_gap+(norm(tmp1[:,:,j]-tmp_u[:,:,j],'fro')/normM)**2
else:
for j in range(self.product_space_dimension):
for k in range(self.Nz):
tmp_change= tmp_change+(norm(u[:,:,k,j]-tmp_u[:,:,k,j], 'fro')/normM)**2
# compute (||P_Sx-P_Mx||/normM)^2:
tmp_gap = tmp_gap+(norm(tmp1[:,:,k,j]-tmp_u[:,:,k,j],'fro')/(normM))**2
change[iters] = sqrt(tmp_change);
gap[iters] = sqrt(tmp_gap);
u = tmp_u;
tmp = prox1.work(u);
tmp2 = prox2.work(u);
if self.Nx == 1:
u1 = tmp[:,0]
u2 = tmp2[:,0]
elif self.Ny == 1:
u1 = tmp[0,:]
u2 = tmp2[0,:]
elif self.Nz == 1 and tmp.ndim > 2:
u1 = tmp[:,:,0]
u2 = tmp2[:,:,0]
else:
u1 = tmp
u2 = tmp2
change = change[1:iters+1];
gap = gap[1:iters+1];
return {'u1': u1, 'u2': u2, 'iter': iters, 'change': change, 'gap': gap}
#Simple Algortihm
#Analog to AlgortihmWrapper in ProxMatlab
from .algorithms import Algorithm
from numpy import zeros, angle, trace, exp, sqrt
from numpy.linalg import norm
class SimpleAlgorithm:
def __init__(self,config):
"""
Parameters
----------
config : dict
Dictionary containing the problem configuration.
It must contain the following mappings:
proxoperators: 2 ProxOperators
Tuple of ProxOperators (the class, no instance)
beta_0: number
Starting relaxation parmater
beta_max: number
Maximum relaxation parameter
beta_switch: int
Iteration at which beta moves from beta_0 -> beta_max
normM: number
?
Nx: int
Row-dim of the product space elements
Ny: int
Column-dim of the product space elements
Nz: int
Depth-dim of the product space elements
dim: int
Size of the product space
"""
self.prox1 = config['proxoperators'][0](config)
self.prox2 = config['proxoperators'][1](config)
self.normM = config['normM']
self.beta_0 = config['beta_0']
self.beta_max = config['beta_max']
self.beta_switch = config['beta_switch']
self.Nx = config['Nx']; self.Ny = config['Ny']; self.Nz = config['Nz'];
self.product_space_dimension = config['product_space_dimension']
self.iter = 0
self.config = config
if 'truth' in config:
self.truth = config['truth']
self.truth_dim = config['truth_dim']
self.norm_truth = config['norm_truth']
if 'diagnostic' in config:
self.diagnostic = True
else:
self.diagnostic = False
def run(self, u, tol, maxiter):
"""
Runs the algorithm for the specified input data
"""
##### PREPROCESSING
config = self.config
normM = self.normM
beta = self.beta_0
iter = self.iter
if u.ndim < 3:
p = 1
q = 1
elif u.ndim == 3:
p = u.shape[2]
q = 1
else:
p = u.shape[2]
q = u.shape[3]
change = zeros(maxiter+1,dtype=u.dtype)
change[0] = 999
######################################
# set up diagnostic arrays
######################################
if self.diagnostic:
gap = change.copy()
shadow_change = change.copy()
if hasattr(self, 'truth'):
Relerrs = change.copy()
# Different algorithms will have different quatities that one
# should monitor. Since we take a fixed point perspective, the main
# thing to monitor is the change in the iterates.
shadow = self.prox1.work(u)
##### LOOP
while iter < maxiter and change[iter] >= tol:
iter += 1
config['iter'] =iter
#makes call to algorithm:
config['u']=u
u_new = self.evaluate(u)
if 'diagnostic' in self.config:
# the next prox operation only gets used in the computation of
# the size of the gap. This extra step is not
# required in alternating projections, which makes RAAR
u2 = self.prox2.work(u_new)
u1 = self.prox1.work(u2)
# compute the normalized change in successive iterates:
# change(iter) = sum(sum((feval('P_M',M,u)-tmp).^2))/normM;
tmp_change=0; tmp_gap=0; tmp_shadow=0;
if p==1 and q==1:
tmp_change= (norm(u-u_new, 'fro')/normM)**2
if 'diagnostic' in self.config:
# For Douglas-Rachford,in general it is appropriate to monitor the
# SHADOWS of the iterates, since in the convex case these converge
# even for beta=1.
# (see Bauschke-Combettes-Luke, J. Approx. Theory, 2004)
tmp_shadow = (norm(u2-shadow,'fro')/normM)**2
tmp_gap = (norm(u1-u2,'fro')/normM)**2
if hasattr(self, 'truth'):
if self.truth_dim[0] == 1:
z=u1[0,:]
elif self.truth_dim[1] == 1:
z=u1[:,0]
else:
z=u1;
Relerrs[iter] = norm(self.truth - exp(-1j*angle(trace(self.truth.T*z))) * z, 'fro')/self.norm_truth
elif q==1:
for j in range(self.product_space_dimension):
tmp_change= tmp_change+ (norm(u[:,:,j]-u_new[:,:,j], 'fro')/normM)**2
if 'diagnostic' in self.config:
# compute (||P_SP_Mx-P_Mx||/normM)^2:
tmp_gap = tmp_gap+(norm(u1[:,:,j]-u2[:,:,j],'fro')/normM)**2
tmp_shadow = tmp_shadow+(norm(u2[:,:,j]-shadow[:,:,j],'fro')/normM)**2
if 'diagnostic' in self.config:
if hasattr(self, 'truth'):
z=u1[:,:,0]
Relerrs[iter] = norm(self.truth - exp(-1j*angle(trace(self.truth.T*z))) * z, 'fro')/self.norm_truth
else:
Relerrs[iter] = 0
for j in range(self.product_space_dimension):
for k in range(Nz):
tmp_change= tmp_change+ (norm(u[:,:,k,j]-u_new[:,:,k,j], 'fro')/normM)**2
if 'diagnostic' in self.config:
# compute (||P_Sx-P_Mx||/normM)^2:
tmp_gap = tmp_gap+(norm(u1[:,:,k,j]-u22[:,:,k,j],'fro')/(normM))**2
tmp_shadow = tmp_shadow+(norm(u2[:,:,k,j]-shadow[:,:,k,j],'fro')/(normM))**2
if hasattr(self, 'truth') and (j==0):
Relerrs[iter] = Relerrs[iter] + norm(self.truth - exp(-1j*angle(trace(self.truth.T*u1[:,:,k,1]))) * u1[:,:,k,1], 'fro')/self.norm_truth
change[iter] = sqrt(tmp_change)
if 'diagnostic' in self.config:
gap[iter] = sqrt(tmp_gap)
shadow_change[iter] = sqrt(tmp_shadow) # this is the Euclidean norm of the gap to
# the unregularized set. To monitor the Euclidean norm of the gap to the
# regularized set is expensive to calculate, so we use this surrogate.
# Since the stopping criteria is on the change in the iterates, this
# does not matter.
# graphics
#update
u=u_new
if 'diagnostic' in self.config:
# For Douglas-Rachford,in general it is appropriate to monitor the
# SHADOWS of the iterates, since in the convex case these converge
# even for beta=1.
# (see Bauschke-Combettes-Luke, J. Approx. Theory, 2004)
shadow=u2
##### POSTPROCESSING
u2 = self.prox1.work(u);
u1 = self.prox2.work(u2);
if self.Nx == 1:
u1 = u1[:,0];
u2 = u2[:,0];
elif self.Ny == 1:
u1 = u1[0,:];
u2 = u2[0,:];
elif self.Nz == 1 and u1.ndim > 2:
u1 = u1[:,:,0]
u2 = u2[:,:,0]
else:
u1 = tmp;
u2 = u2;
change = change[1:iter+1];
output = {'u' : u, 'u1': u1, 'u2': u2, 'iter': iter, 'change': change}
if 'diagnostic' in self.config:
gap = gap[1:iter+1]
shadow_change = shadow_change[1:iter+1]
output['gap']=gap
output['shadow_change']= shadow_change
if hasattr(self, 'truth'):
output['Relerrs']=Relerrs
return output
# ART_alternating_proj_in.m
# written on October 6, 2011 by
# Russell Luke
# University of Goettingen
#
# DESCRIPTION: parameter input file for main_ProxToolbox.m
#
##########################################################################
new_config={
## We start very general.
##
## What type of problem is being solved? Classification
## is according to the geometry: 'Affine', 'Phase',
## 'Affine-sparsity', 'Custom'
'problem_family' : 'ART',
##==========================================
## Problem parameters
##==========================================
## What is the name of the data file?
'rescale' : 1,
'data_filename' : 'ART_data_processor',
## What type of object are we working with?
## Options are: 'phase', 'real', 'nonnegative', 'complex'
'object' : 'complex',
## What type of constraints do we have?
## Options are: 'support only', 'real and support', 'nonnegative and support',
## 'amplitude only', 'sparse real', 'sparse complex', and 'hybrid'
## 'convex'
'constraint' : 'convex',
## What type of measurements are we working with?
## Options are: 'single diffraction', 'diversity diffraction',
## 'ptychography', 'complex', and 'diversity affine'
'experiment' : 'convex',
## What are the dimensions of the measurements?
# given in the ART_data_processor
##==========================================
## Algorithm parameters
##==========================================
## Now set some algorithm parameters that the user should be
## able to control (without too much damage)
# Point to appropriate projectors.
'Prox1' : 'P_parallel_hyperplane',# 'P_parallel_hyperplane', # 'P_sequential_hyperplane_odd', # projection onto support and nonnegativity constraint
'Prox2' : 'P_diag', #'P_Diag', #'P_sequential_hyperplane_even', # projection onto mask constraint
## Algorithm:
'algorithm' : 'AP', #'AP', 'Cimmino',
'numruns' : 1, # the only time this parameter will
# be different than 1 is when we are
# benchmarking, that is, when algorithm performance statistics
# are being generated for randomly generated problems/initial
# values etc.
## relaxaton parameters in RAAR, HPR and HAAR
'beta_0' : 0.75, # starting relaxation prameter (only used with
# HAAR, HPR and RAAR)
'beta_max' : 0.75, # maximum relaxation prameter (only used with
# HAAR, RAAR, and HPR)
'beta_switch' : 13, # iteration at which beta moves from beta_0 -> beta_max
## maximum number of iterations and tolerances
'MAXIT' : 20,
'TOL' : -1e-6,
## parameter for the data regularization
## need to discuss how/whether the user should
## put in information about the noise
'data_ball' : 1e-15,
# the above is the percentage of the gap
# between the measured data and the
# initial guess satisfying the
# qualitative constraints. For a number
# very close to one, the gap is not expected
# to improve much. For a number closer to 0
# the gap is expected to improve a lot.
# Ultimately the size of the gap depends
# on the inconsistency of the measurement model
# with the qualitative constraints.
##==========================================
## parameters for plotting and diagnostics