Commit a8e50aeb authored by jansen31's avatar jansen31
Browse files

Merge remote-tracking branch 'origin/dornheim' into dornheim

parents 49e0b8aa 6e0bee00
......@@ -5,11 +5,13 @@ Created on Mon Dec 14 13:08:06 2015
@author: rebecca
"""
from math import sqrt
from numpy import zeros
from math import sqrt, exp
from numpy import zeros, angle
from numpy import all as a
from scipy.linalg import norm
from .algorithms import Algorithm
class AP_expert(Algorithm):
"""
Alternating Projections
......@@ -54,6 +56,7 @@ class AP_expert(Algorithm):
self.truth = config['truth']
self.truth_dim = config['truth_dim']
self.norm_truth = config['norm_truth']
def run(self, u, tol, maxiter):
"""
......@@ -91,18 +94,18 @@ class AP_expert(Algorithm):
tmp1 = prox2.work(tmp_u);
tmp_change = 0; tmp_gap = 0;
if p==1 and q==1:
tmp_change= (norm(u-tmp_u, 'fro')/norm_data)**2
tmp_gap = (norm(tmp1-tmp_u,'fro')/norm_data)**2
if hasattr(self, 'truth'):
if self.truth_dim[0] == 1:
z=tmp_u[0,:]
elif self.truth_dim[1]==1:
z=tmp_u[:,0]
else:
z=tmp_u
Relerrs[iter] = norm(self.truth - exp(-1j*angle(trace(self.truth.T*z))) * z, 'fro')/self.norm_truth
Relerrs[iter] = norm(self.truth - exp(-1j*angle(trace(self.truth.T*z))) * z, 'fro')/self.norm_truth
elif q==1:
for j in range(self.product_space_dimension):
......
......@@ -72,18 +72,18 @@ class QNAP(Algorithm):
else:
self.diagnostic = False
self.samsara = Samsara()
self.Samsara = Samsara()
def run(self, u, tol, maxiter):
"""
Runs the algorithm for the specified input data
"""
Samsara = self.Samsara
##### PREPROCESSING
iter = self.iter
prox1 = self.prox1;
prox1 = self.prox1
prox2 = self.prox2
if u.ndim < 3:
......@@ -116,7 +116,6 @@ class QNAP(Algorithm):
while iter < maxiter and change[iter] >= tol:
iter += 1
self.iter =iter
tmp_u = prox1.work(tmp1)
# make call to SAMSARA for acceleration step
# since for the product space Prox1 is the
......@@ -125,33 +124,33 @@ class QNAP(Algorithm):
# array to samsara. Have to reshape the complex
# array as a real-valued vector. Use the gap as the
# objective function, and the negative gradient tmp_u- u
if (p==1) and (q==1):
if (np.all(isreal(u))):
if iter > 4:
uold_vec=u[:,0]
unew_vec=tmp_u[:,0]
gradfold_vec = gradfnew_vec
gradfnew_vec = (u[:,0]- tmp_u[:,0])
unew_vec, uold_vec, gap[iter-1], gradfold_vec, change[iter] = \
self.samsara.run(uold_vec, unew_vec,
gap[iter-2]*self.Nx*self.Ny,
gap[iter-1]*self.Nx*self.Ny,
gradfold_vec, gradfnew_vec)
for j in range(self.product_space_dimension):
tmp_u[:,j]=unew_vec
gap[iter-1]=gap[iter-1]/(self.Nx*self.Ny)
if (p==1) and (q==1):
if (np.all(isreal(u))):
if iter > 4:
uold_vec=u[:,0]
unew_vec=tmp_u[:,0]
gradfold_vec = gradfnew_vec
gradfnew_vec = (u[:,0]- tmp_u[:,0])
unew_vec, uold_vec, gap[iter-1], gradfold_vec, change[iter] = \
Samsara.run(uold_vec, unew_vec,
gap[iter-2]*self.Nx*self.Ny,
gap[iter-1]*self.Nx*self.Ny,
gradfold_vec, gradfnew_vec)
for j in range(self.product_space_dimension):
tmp_u[:,j]=unew_vec
gap[iter-1]=gap[iter-1]/(self.Nx*self.Ny)
else:
unew_vec=tmp_u[:,0]
uold_vec=u[:,0]
gradfnew_vec = (u[:,0]- tmp_u[:,0])
else:
unew_vec=tmp_u[:,0]
uold_vec=u[:,0]
gradfnew_vec = (u[:,0]- tmp_u[:,0])
else:
if iter>3:
uold_vec= np.concatenate(np.real(u[:,0]), np.imag(u[:,0])).reshape(self.Ny*self.Nx*2, 0)
if iter>3:
uold_vec= np.concatenate((np.real(u[:,0]), np.imag(u[:,0]))).reshape(self.Ny*self.Nx*2)
unew_vec=np.concatenate(np.real(tmp_u[:,0]), np.imag(tmp_u[:,0])).reshape(self.Ny*self.Nx*2, 0)
unew_vec=np.concatenate((np.real(tmp_u[:,0]), np.imag(tmp_u[:,0]))).reshape(self.Ny*self.Nx*2)
gradfold_vec = gradfnew_vec
gradfnew_vec = uold_vec-unew_vec
gradfold_vec = gradfnew_vec
gradfnew_vec = uold_vec-unew_vec
# unew_vec,uold_vec,gap[iter-0],gradfold_vec, change[iter]= \
# feval('samsara', uold_vec, unew_vec,
......@@ -159,115 +158,115 @@ class QNAP(Algorithm):
# gap(iter-1)*self.Nx*self.Ny,
# gradfold_vec, gradfnew_vec)
unew_vec, uold_vec, gap[iter], gradfold_vec, change[iter] = Samsara.run(uold_vec, unew_vec, gap(iter-2)*self.Nx*self.Ny, gap(iter-1)*self.Nx*self.Ny, gradfold_vec, gradfnew_vec)
unew_vec, uold_vec, gap[iter], gradfold_vec, change[iter] = Samsara.run(uold_vec, unew_vec, gap[iter-2]*self.Nx*self.Ny, gap[iter-1]*self.Nx*self.Ny, gradfold_vec, gradfnew_vec)
# now reshape unew_vec
tmp_u_vec = unew_vec[0:self.Ny*self.Nx-1]+1j*unew_vec[self.Ny*self.Nx:self.Ny*self.Nx*2-1]
for j in range(self.product_space_dimension):
tmp_u[:,j]=tmp_u_vec
gap[iter-1]=gap[iter-1]/(self.Nx*self.Ny)
else:
uold_vec= np.array(np.real(u[:,0]), np.imag(u[:,0])).reshape(self.Ny*self.Nx*2, 1)
unew_vec= np.array( np.real(tmp_u[:,0]), np.imag(tmp_u[:,0])).reshape(self.Ny*self.Nx*2, 1)
gradfnew_vec = uold_vec-unew_vec
elif (p!=1) and (q==1):
if (np.all(isreal(u))):
tmp2 = u[:,:,0]
uold_vec= tmp2.reshape(self.Nx*self.Ny,1)
tmp2 = tmp_u[:,:,0]
unew_vec = tmp2.reshape(self.Nx*self.Ny,1)
if iter<=3:
gradfnew_vec = uold_vec-unew_vec
else:
gradfold_vec = gradfnew_vec
gradfnew_vec = uold_vec-unew_vec
unew_vec,uold_vec,gap[iter-1],gradfold_vec, change[iter]= \
self.samsara.run(uold_vec, unew_vec,
tmp_u_vec = unew_vec[0:self.Ny*self.Nx]+1j*unew_vec[self.Ny*self.Nx:self.Ny*self.Nx*2]
for j in range(self.product_space_dimension):
tmp_u[:,j]=tmp_u_vec
gap[iter-1]=gap[iter-1]/(self.Nx*self.Ny)
else:
uold_vec= np.array((np.real(u[:,0]), np.imag(u[:,0]))).reshape(self.Ny*self.Nx*2)
unew_vec= np.array((np.real(tmp_u[:,0]), np.imag(tmp_u[:,0]))).reshape(self.Ny*self.Nx*2)
gradfnew_vec = uold_vec-unew_vec
elif (p!=1) and (q==1):
if (np.all(isreal(u))):
tmp2 = u[:,:,0]
uold_vec= tmp2.reshape(self.Nx*self.Ny)
tmp2 = tmp_u[:,:,0]
unew_vec = tmp2.reshape(self.Nx*self.Ny)
if iter<=3:
gradfnew_vec = uold_vec-unew_vec
else:
gradfold_vec = gradfnew_vec
gradfnew_vec = uold_vec-unew_vec
unew_vec,uold_vec,gap[iter-1],gradfold_vec, change[iter]= \
Samsara.run(uold_vec, unew_vec,
gap(iter-2)*self.Nx*self.Ny,
gap(iter-1)*self.Nx*self.Ny,
gradfold_vec, gradfnew_vec)
# now reshape and replace u
gap[iter-1]=gap[iter-1]/(self.Nx*self.Ny);
gap[iter-1]=gap[iter-1]/(self.Nx*self.Ny);
tmp2=unew_vec.reshape(self.Ny,self.Nx);
for j in range(self.product_space_dimension):
tmp_u[:,:,j]=tmp2
tmp2=unew_vec.reshape(self.Ny,self.Nx);
for j in range(self.product_space_dimension):
tmp_u[:,:,j]=tmp2
else:
tmp2=np.concatenate((np.real(u[:,:,0]),np.imag(u[:,:,0])), axis=1)
uold_vec=tmp2.reshape(self.Nx*self.Ny*2,1)
tmp2= np.concatenate((np.real(tmp_u[:,:,0]), np.imag(tmp_u[:,:,0])), axis=1)
unew_vec = tmp2.reshape(self.Nx*self.Ny*2,1)
if iter<=3:
gradfnew_vec = uold_vec-unew_vec
else:
gradfold_vec = gradfnew_vec
gradfnew_vec = uold_vec-unew_vec
unew_vec,uold_vec,gap[iter-1],gradfold_vec, change[iter]= \
self.samsara.run( uold_vec, unew_vec,
gap(iter-2)*self.Nx*self.Ny,
gap(iter-1)*self.Nx*self.Ny,
tmp2=np.concatenate((np.real(u[:,:,0]),np.imag(u[:,:,0])), axis=1)
uold_vec=tmp2.reshape(self.Nx*self.Ny*2)
tmp2= np.concatenate((np.real(tmp_u[:,:,0]), np.imag(tmp_u[:,:,0])), axis=1)
unew_vec = tmp2.reshape(self.Nx*self.Ny*2)
if iter<=3:
gradfnew_vec = uold_vec-unew_vec
else:
gradfold_vec = gradfnew_vec
gradfnew_vec = uold_vec-unew_vec
unew_vec,uold_vec,gap[iter-1],gradfold_vec, change[iter]= \
Samsara.run( uold_vec, unew_vec,
gap[iter-2]*self.Nx*self.Ny,
gap[iter-1]*self.Nx*self.Ny,
gradfold_vec, gradfnew_vec)
# now reshape and replace u
gap[iter-1]=gap[iter-1]/(self.Nx*self.Ny)
tmp2=unew_vec.reshape(self.Ny,self.Nx*2)
unew=tmp2[:,1:self.Nx] +1j*tmp2[:,self.Nx+1:2*self.Nx]
for j in range(self.product_space_dimension):
tmp_u[:,:,j]=unew
else: # product space of 3D arrays
print('Cannot handle 3D arrays on the product space yet')
tmp1 = self.prox2.work(tmp_u)
tmp_change=0; tmp_gap=0
if(p==1)and(q==1):
tmp_change = np.linalg.norm(u-tmp_u, 'fro')/norm_data**2
tmp_gap = np.linalg.norm(tmp1-tmp_u,'fro')/norm_data**2
if hasattr(self, 'diagnostic'): #('diagnostic' in config)
if hasattr(self, "truth"): #('truth' in config)
if self.truth_dim[0]==1:
z=tmp_u[0,:]
elif self.truth_dim[0]==1:
z=tmp_u[:,0]
else:
z=tmp_u;
gap[iter-1]=gap[iter-1]/(self.Nx*self.Ny)
tmp2=unew_vec.reshape(self.Ny,self.Nx*2)
unew=tmp2[:,0:self.Nx] +1j*tmp2[:,self.Nx:2*self.Nx]
for j in range(self.product_space_dimension):
tmp_u[:,:,j]=unew
else: # product space of 3D arrays
print('Cannot handle 3D arrays on the product space yet')
tmp1 = self.prox2.work(tmp_u)
tmp_change=0; tmp_gap=0
if(p==1)and(q==1):
tmp_change = np.linalg.norm(u-tmp_u, 'fro')/norm_data**2
tmp_gap = np.linalg.norm(tmp1-tmp_u,'fro')/norm_data**2
if hasattr(self, 'diagnostic'): #('diagnostic' in config)
if hasattr(self, "truth"): #('truth' in config)
if self.truth_dim[0]==1:
z=tmp_u[0,:]
elif self.truth_dim[0]==1:
z=tmp_u[:,0]
else:
z=tmp_u;
Relerrs[iter] = np.linalg.norm(self.truth - np.exp(-1j*np.angle(np.trace(self.truth*z))) * z, 'fro')/self.norm_truth;
Relerrs[iter] = np.linalg.norm(self.truth - np.exp(-1j*np.angle(np.trace(self.truth*z))) * z, 'fro')/self.norm_truth;
elif(q==1):
for j in range(0,self.product_space_dimension):
tmp_change= tmp_change+ np.linalg.norm(u[:,:,j]-tmp_u[:,:,j], 'fro')/norm_data**2;
tmp_gap = tmp_gap+ np.linalg.norm(tmp1[:,:,j]-tmp_u[:,:,j],'fro')/norm_data**2
elif(q==1):
for j in range(0,self.product_space_dimension):
tmp_change= tmp_change+ np.linalg.norm(u[:,:,j]-tmp_u[:,:,j], 'fro')/norm_data**2;
tmp_gap = tmp_gap+ np.linalg.norm(tmp1[:,:,j]-tmp_u[:,:,j],'fro')/norm_data**2
if hasattr(self, "truth"): #'truth' in config
Relerrs[iter] = np.linalg.norm(self.truth - np.exp(-1j*np.angle(np.trace(self.truth*tmp_u[:,:,1]))) * tmp_u[:,:,1], 'fro')/self.norm_truth;
if hasattr(self, "truth"): #'truth' in config
Relerrs[iter] = np.linalg.norm(self.truth - np.exp(-1j*np.angle(np.trace(self.truth*tmp_u[:,:,1]))) * tmp_u[:,:,1], 'fro')/self.norm_truth;
else:
Relerrs[iter]=0;
for j in range(0,self.product_space_dimension):
for k in range(r1,self.Nz):
tmp_change= tmp_change+(np.linalg.norm(u[:,:,k,j]-tmp_u[:,:,k,j], 'fro')/norm_data)**2;
# compute (||P_Sx-P_Mx||/norm_data)^2:
tmp_gap = tmp_gap+(np.linalg.norm(tmp1[:,:,k,j]-tmp_u[:,:,k,j],'fro')/(norm_data))**2;
else:
Relerrs[iter]=0;
for j in range(0,self.product_space_dimension):
for k in range(r1,self.Nz):
tmp_change= tmp_change+(np.linalg.norm(u[:,:,k,j]-tmp_u[:,:,k,j], 'fro')/norm_data)**2;
# compute (||P_Sx-P_Mx||/norm_data)^2:
tmp_gap = tmp_gap+(np.linalg.norm(tmp1[:,:,k,j]-tmp_u[:,:,k,j],'fro')/(norm_data))**2;
if hasattr(self, "truth") and j == 1: # (any(strcmp('truth',fieldnames(method_input))))&&(j==1)
Relerrs[iter] = Relerrs[iter]+np.linalg.norm(self.truth - np.exp(-1j*np.angle(np.trace(self.truth*tmp_u[:,:,:,1]))) * tmp_u[:,:,k,1], 'fro')/self.norm_truth;
if hasattr(self, "truth") and j == 1: # (any(strcmp('truth',fieldnames(method_input))))&&(j==1)
Relerrs[iter] = Relerrs[iter]+np.linalg.norm(self.truth - np.exp(-1j*np.angle(np.trace(self.truth*tmp_u[:,:,:,1]))) * tmp_u[:,:,k,1], 'fro')/self.norm_truth;
change[iter]=np.sqrt(tmp_change);
gap[iter] = np.sqrt(tmp_gap);
u=tmp_u;
change[iter]=np.sqrt(tmp_change);
gap[iter] = np.sqrt(tmp_gap);
u=tmp_u;
if hasattr(self, "diagnostic"): #(any(strcmp('diagnostic', fieldnames(method_input))))
if hasattr(self, "diagnostic"): #(any(strcmp('diagnostic', fieldnames(method_input))))
# graphics
#print(config["anim"])
if (self.anim>=1)and (iter%2==0):
self.u=tmp1;
if (self.anim>=1)and (iter%2==0):
self.u=tmp1;
#self=self.animation(method_output);
#self.animation()
......@@ -293,7 +292,7 @@ class QNAP(Algorithm):
self.u1 = tmp;
self.u2 = tmp2;
change=change[0:iter];
change=change[1:iter];
self.iter = iter;
print(self.iter)
self.change = change;
......
......@@ -164,8 +164,8 @@ class SimpleAlgorithm:
if 'diagnostic' in self.config:
if hasattr(self, 'truth'):
z = u1[:, :, 0]
Relerrs[iter] = norm(self.truth - exp(-1j * angle(trace(self.truth.T * z))) * z,
'fro') / self.norm_truth
Relerrs[iter] = norm((self.truth - exp(-1j * angle(trace(self.truth.T.transpose() * z))) * z),'fro') / self.norm_truth
else:
if 'diagnostic' in self.config:
......@@ -182,7 +182,7 @@ class SimpleAlgorithm:
if hasattr(self, 'truth') and (j == 0):
Relerrs[iter] = Relerrs[iter] + norm(
self.truth - exp(-1j * angle(trace(self.truth.T * u1[:, :, k, 1]))) * u1[:, :, k, 1],
'fro') / self.norm_truth
'fro') / self.norm_Truth
change[iter] = sqrt(tmp_change)
if 'diagnostic' in self.config:
......
from math import sqrt, exp
from numpy import zeros, angle, tile, mean, conj, reshape, repeat, newaxis, trace
from numpy.fft import fft, ifft, fft2, ifft2
from scipy.linalg import norm
from .algorithms import Algorithm
import numpy as np
import cmath
## Doesn't work yet, relative error increases over the iterations instead of decreasing.
class Wirtinger(Algorithm):
def __init__(self, config):
self.Prox1 = config['proxoperators'][0](config)
self.Prox2 = config['proxoperators'][1](config)
self.norm_data = config['norm_data']
self.Ny = config['Ny']
self.Nx = config['Nx']
self.Nz = config['Nz']
self.product_space_dimension = config['product_space_dimension']
self.u = config['u_0']
if 'truth' in config:
self.truth = config['truth']
self.truth_dim = config['truth_dim']
self.norm_truth = config['norm_truth']
MAXIT = config['MAXIT']
TOL = config['TOL']
if ('experiment' in config):
self.experiment = config['experiment']
if self.experiment == 'CDP':
self.Masks = config['Masks']
self.data_sq = config['data_sq']
elif self.experiment == 'JWST':
self.Masks = np.zeros(self.Ny,self.Nx,self.product_space_dimension)
self.indicator_ampl = config['indicator_ampl']
self.illumination_phase = config['illumination_phase']
self.data_sq = config['data_sq']
for j in range(product_space_dimension):
Masks[:,:,j] = indicator_ampl * exp(1j*illumination_phase[:,:,j])
else:
self.data_sq = config['data_sq']
self.norm_rt_data = config['norm_rt_data']
self.Masks[0,0,0:self.product_space_dimension] = 1/self.norm_truth
def run(self, u, TOL, MAXIT):
Masks = self.Masks
Prox1 = self.Prox1
Prox2 = self.Prox2
if u.ndim < 3:
p = 1
q = 1
elif u.ndim == 3:
p = u.shape[2]
q = 1
else:
p = u.shape[2]
q = u.shape[3]
iter = 0
change = zeros(MAXIT, dtype=u.dtype)
change[0] = 999
gap = change.copy()
if self.Nx==1:
A = lambda I: fft(Masks*I)
At = lambda Y: np.repeat(mean(conj(Masks) * ifft(Y),1)[:,np.newaxis], self.product_space_dimension, axis = 1)
elif self.Ny==1:
A = lambda I: fft(Masks*I)
At = lambda Y: np.repeat(mean(conj(Masks) * ifft(Y),0)[np.newaxis,:], self.product_space_dimension, axis = 0)
else:
A = lambda I: fft2(Masks*I)
At = lambda Y: np.repeat(mean(conj(Masks) * ifft2(Y),2)[:,:,np.newaxis], self.product_space_dimension, axis = 2)
if hasattr(self, 'truth'):
Relerrs = change.copy()
tau0 = 330
mu = lambda t: min(1-exp(-t/tau0), 0.4)
while iter < MAXIT-1 and change[iter]>=TOL:
iter +=1
Bz = A(u)
C = (abs(Bz)**2 -self.data_sq)* Bz
grad = At(C)
step = mu(iter)/self.norm_data**2 * grad
u = u - step
#print(norm(u,'fro'))
#print("u")
#print(norm(Bz,'fro'))
#print("Bz")
#print(norm(C,'fro'))
#print("C")
#print(norm(grad,'fro'))
#print("grad")
#print(norm(step,'fro'))
#print("step")
#print(norm(u,'fro'))
#print("newu")
tmp_change = 0
tmp_gap = 0
if p==1 and q==1:
tmp_change = (norm(step, 'fro')/self.norm_truth)**2
tmp_u = Prox1.work(u)
tmp1 = Prox2.work(tmp_u)
tmp_gap = (norm(tmp1-tmp_u, 'fro')/self.norm_truth)**2
if self.truth_dim[0]==1:
z = u[0,:]
elif self.truth_dim[1]==1:
z = u[:,0]
else:
z = u[:,:,0]
Relerrs[iter] = norm(self.truth - cmath.exp(-1j*angle(trace(self.truth*z)))*z,'fro')/self.norm_truth
elif q==1:
tmp_u = Prox1.work(u)
tmp1 = Prox2.work(tmp_u)
tmp_change = self.product_space_dimension*(norm(step[:,:,0], 'fro') /self.norm_truth)**2
for j in range(self.product_space_dimension):
tmp_gap = tmp_gap + (norm(tmp1[:,:,j] - tmp_u[:,:,j], 'fro') / self.norm_truth)**2
Relerrs[iter] = norm(self.truth - cmath.exp(-1j*angle(trace(self.truth*tmp_u[:,:,0]))) *tmp_u[:,:,0], 'fro') /self.norm_truth
else:
Times[iter] = toc
tmp_u = Prox1.work(u)
tmp1 = Prox2.work(tmp_u)
for j in range(self.product_space_dimension):
for k in range(self.Nz):
tmp_change = tmp_change + (norm(step[:,:,k,j], 'fro') /self.norm_truth)**2
tmp_gap = tmp_gap + (norm(step[:,:,k,j], 'fro')/self.norm_truth)**2
Relerrs[iter] = norm(self.truth-cmath.exp(-1j*angle(trace(self.truth*tmp_u[:,:,:,0])))*tmp_u[:,:,:,0],'fro')/self.norm_truth
gap[iter] = sqrt(tmp_gap)
change[iter] = sqrt(tmp_change)
tmp = Prox1.work(u)
tmp2 = Prox2.work(u)
if self.Nx==1:
u1 = tmp[:,0]
u2 = tmp2[:,0]
elif self.Ny==1:
u1 = tmp[0,:]
u2 = tmp2[0,:]
elif self.Nz==1:
u1 = tmp[:,:,0]
u2 = tmp2[:,:,0]
else:
u1 = tmp
u2 = tmp2
change = change[0:iter]
return {'u1': u1, 'u2': u2, 'iter': iter, 'change': change, 'gap': gap}
\ No newline at end of file
......@@ -31,5 +31,6 @@ from .GRAAL_F import *
from .GRAAL_objective import *
from .KM import *
from .QNAP import *
from .Wirtinger import *
__all__ = ["AP","HPR","RAAR", "AP_expert", "GRAAL", "RAAR_expert", "DRl", "ADMM", "RRR", "CAARL", "CADRl", "CDRl", "CP", "CPrand", "DRAP", "DRl", "GRAAL_F", "GRAAL_objective", "KM", "QNAP"]
__all__ = ["AP","HPR","RAAR", "AP_expert", "GRAAL", "RAAR_expert", "DRl", "ADMM", "RRR", "CAARL", "CADRl", "CDRl", "CP", "CPrand", "DRAP", "DRl", "GRAAL_F", "GRAAL_objective", "KM", "QNAP", "Wirtinger"]
......@@ -20,7 +20,7 @@ new_config = {
'warmup_iter' : 0,
'method' : 'AP_expert',
'algorithm' : 'AP_expert',
'numruns' : 100,