Commit c69089f7 authored by alexander.dornheim's avatar alexander.dornheim
Browse files

Corrected various error in HAAR

parent 3867fd73
......@@ -29,9 +29,14 @@
#
# Nonstandard Matlab function calls: method_input.Prox1 and .Prox2, RAAR, Q_Heau
from numpy import zeros
from scipy.linalg import norm
from .algorithms import Algorithm
class HAAR(Algorithm):
def __init__(self, config):
def __init__(self, config):
"""
Parameters
----------
......@@ -115,14 +120,14 @@ def __init__(self, config):
y=y0
if dim_number==2 and (s[0] !=1 ) and(s[1] !=1)
if dim_number==2 and (s[0] !=1 ) and(s[1] !=1):
y0=feval('reshape',y,s[0]*s[1],1)
elif dim_number == 3
elif dim_number == 3:
y0=zeros(s[0]*s[1]*s[2],1,1)
for j in range(s[3])
for j in range(s[3]):
ytmp=y[:,:,j].reshape(s[0]*s[1],1)
y0[n*m*j:m*n*(j+1)]= ytmp
elif dim_numner == 4
elif dim_numner == 4:
print('not ready for 4D arrays')
return
......@@ -144,7 +149,7 @@ def __init__(self, config):
elif (p>1) and (q==1):
tmp_y = zeros((n*m*p,1,1))
tmp_Ty=tmp_y
for j in range(p)
for j in range(p):
ytmp = y[:,:,j].reshape((n*m,1))
tmp_y[n*m*j:m*n*(j+1)] = ytmp
ytmp = Ty[:,:,j].reshape((n*m,1))
......@@ -159,14 +164,14 @@ def __init__(self, config):
y_new = y_new.reshape((m,n))
elif dim_number == 3:
tmpy = zeros((m,n,p))
for j in range(p)
for j in range(p):
tmpy[:,:,j]= y_new[n*m*j:m*n*(j+1)].reshape((m,n))
y_new=tmpy
elseif(q>1)
elif dim_number == 4:
print('not ready for 4D arrays')
if(self.'diagnostic'):
if 'diagnostic' in self.config:
# the next prox operations only gets used in the computation of
# the size of the gap. This extra step is not
# required in alternating projections, which makes RAAR
......@@ -175,55 +180,55 @@ def __init__(self, config):
tmp = prox2.evaluate(y_new)
tmp2 = prox1.evaluate(tmp)
# compute the normalized change in successive iterates:
# change(iter) = sum(sum((feval('P_M',M,u)-tmp).^2))/normM;
tmp_change=0; tmp_gap=0; tmp_shadow=0;
# compute the normalized change in successive iterates:
# change(iter) = sum(sum((feval('P_M',M,u)-tmp).^2))/normM;
tmp_change=0; tmp_gap=0; tmp_shadow=0;
if p==1 and q==1:
tmp_change= (norm(u-u_new, 'fro')/normM)**2
if 'diagnostic' in self.config:
# For Douglas-Rachford,in general it is appropriate to monitor the
# SHADOWS of the iterates, since in the convex case these converge
# even for beta=1.
# (see Bauschke-Combettes-Luke, J. Approx. Theory, 2004)
tmp_shadow = (norm(tmp-shadow,'fro')/normM)**2
tmp_gap = (norm(tmp-tmp2,'fro')/normM)**2
if hasattr(self, 'truth'):
if self.truth_dim[0] == 1:
z=tmp[0,:]
elif self.truth_dim[1] == 1:
z=tmp[:,0]
else:
z=tmp;
Relerrs[iter] = norm(self.truth - exp(-1j*angle(trace(self.truth.T*z))) * z, 'fro')/self.norm_truth
elif q==1:
for j in range(self.product_space_dimension):
tmp_change= tmp_change+ (norm(y[:,:,j]-y_new[:,:,j], 'fro')/normM)**2
if 'diagnostic' in self.config:
# compute (||P_SP_Mx-P_Mx||/normM)^2:
tmp_gap = tmp_gap+(norm(tmp[:,:,j]tmp2[:,:,j],'fro')/normM)**2
tmp_shadow = tmp_shadow+(norm(tmp[:,:,j]-shadow[:,:,j],'fro')/normM)**2
if p==1 and q==1:
tmp_change= (norm(u-u_new, 'fro')/normM)**2
if 'diagnostic' in self.config:
# For Douglas-Rachford,in general it is appropriate to monitor the
# SHADOWS of the iterates, since in the convex case these converge
# even for beta=1.
# (see Bauschke-Combettes-Luke, J. Approx. Theory, 2004)
tmp_shadow = (norm(tmp-shadow,'fro')/normM)**2
tmp_gap = (norm(tmp-tmp2,'fro')/normM)**2
if hasattr(self, 'truth'):
if self.truth_dim[0] == 1:
z=tmp[0,:]
elif self.truth_dim[1] == 1:
z=tmp[:,0]
else:
z=tmp;
Relerrs[iter] = norm(self.truth - exp(-1j*angle(trace(self.truth.T*z))) * z, 'fro')/self.norm_truth
elif q==1:
for j in range(self.product_space_dimension):
tmp_change= tmp_change+ (norm(y[:,:,j]-y_new[:,:,j], 'fro')/normM)**2
if 'diagnostic' in self.config:
if hasattr(self, 'truth'):
z=tmp[:,:,0]
Relerrs[iter] = norm(self.truth - exp(-1j*angle(trace(self.truth.T*z))) * z, 'fro')/self.norm_truth
change[iter] = sqrt(tmp_change)
# compute (||P_SP_Mx-P_Mx||/normM)^2:
tmp_gap = tmp_gap+(norm(tmp[:,:,j]-tmp2[:,:,j],'fro')/normM)**2
tmp_shadow = tmp_shadow+(norm(tmp[:,:,j]-shadow[:,:,j],'fro')/normM)**2
if 'diagnostic' in self.config:
gap[iter] = sqrt(tmp_gap)
shadow_change[iter] = sqrt(tmp_shadow) # this is the Euclidean norm of the gap to
# the unregularized set. To monitor the Euclidean norm of the gap to the
# regularized set is expensive to calculate, so we use this surrogate.
# Since the stopping criteria is on the change in the iterates, this
# does not matter.
# graphics
# update
y=y_new
self.T_config['u_0']=y
if hasattr(self, 'truth'):
z=tmp[:,:,0]
Relerrs[iter] = norm(self.truth - exp(-1j*angle(trace(self.truth.T*z))) * z, 'fro')/self.norm_truth
change[iter] = sqrt(tmp_change)
if 'diagnostic' in self.config:
gap[iter] = sqrt(tmp_gap)
shadow_change[iter] = sqrt(tmp_shadow) # this is the Euclidean norm of the gap to
# the unregularized set. To monitor the Euclidean norm of the gap to the
# regularized set is expensive to calculate, so we use this surrogate.
# Since the stopping criteria is on the change in the iterates, this
# does not matter.
# graphics
# update
y=y_new
self.T_config['u_0']=y
if 'diagnostic' in self.config:
# For Douglas-Rachford,in general it is appropriate to monitor the
# SHADOWS of the iterates, since in the convex case these converge
......@@ -246,7 +251,7 @@ def __init__(self, config):
u2 = tmp2[0,:];
elif self.Nz == 1 and u1.ndim > 2:
u1 = tmp[:,:,0]
u2 = tmp2[:,:,0]
u2 = tmp2[:,:,0]
change = change[1:iter+1];
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment