Commit 3ec7a1f9 authored by alexander.dornheim's avatar alexander.dornheim
Browse files

Fixed two more bugs in HAAR

parent d88d91ab
......@@ -79,7 +79,7 @@ class HAAR(Algorithm):
self.norm_truth = config['norm_truth']
self.T='RAAR_expert'
self.T_config = config
self.T_config = config.copy()
self.T_config['MAXIT']=1
self.T_config['beta_max']=.1
self.T_config['beta_0']=.1
......@@ -140,106 +140,107 @@ class HAAR(Algorithm):
while iter < maxiter and change[iter] >= tol:
iter = iter+1
self.iter=iter
# next iterate
# Ty= RAARy with beta=1 and MAXIT=1
T_output = self.T.run(self.T_config['u_0'],self.T_config['TOL'],self.T_config['MAXIT'])
Ty = T_output['u']
if dim_number == 2:
Ty = Ty.reshape((s[0]*s[1],1))
tmp_y= y.reshape((s[0]*s[1],1))
elif dim_number == 3:
tmp_y = zeros((s[0]*s[1]*s[2],1,1))
tmp_Ty=tmp_y
for j in range(p):
ytmp = y[:,:,j].reshape((s[0]*s[1],1))
tmp_y[s[0]*s[1]*j:s[0]*s[1]*(j+1)] = ytmp
ytmp = Ty[:,:,j].reshape((s[0]*s[1],1))
tmp_Ty[s[0]*s[1]*j:s[0]*s[1]*(j+1)] = ytmp
Ty=tmp_Ty
elif dim_number == 4:
print('not ready for 4D arrays')
iter = iter+1
self.iter=iter
# next iterate
# Ty= RAARy with beta=1 and MAXIT=1
T_output = self.T.run(self.T_config['u_0'],self.T_config['TOL'],self.T_config['MAXIT'])
Ty = T_output['u']
if dim_number == 2:
Ty = Ty.reshape((s[0]*s[1],1))
tmp_y= y.reshape((s[0]*s[1],1))
elif dim_number == 3:
tmp_y = zeros((s[0]*s[1]*s[2],1,1))
tmp_Ty=tmp_y
for j in range(p):
ytmp = y[:,:,j].reshape((s[0]*s[1],1))
tmp_y[s[0]*s[1]*j:s[0]*s[1]*(j+1)] = ytmp
ytmp = Ty[:,:,j].reshape((s[0]*s[1],1))
tmp_Ty[s[0]*s[1]*j:s[0]*s[1]*(j+1)] = ytmp
Ty=tmp_Ty
elif dim_number == 4:
print('not ready for 4D arrays')
y_new = self.Q_Heau.work(y0,tmp_y,(1-self.mu)*tmp_y+self.mu*Ty)
if dim_number == 2 and (s[0]!=1) and (s[1]!=1):
y_new = y_new.reshape((s[0],s[1]))
elif dim_number == 3:
tmpy = zeros((s[0],s[1],s[2]))
for j in range(p):
tmpy[:,:,j]= y_new[s[0]*s[1]*j:s[0]*s[1]*(j+1)].reshape((s[0],s[1]))
y_new=tmpy
elif dim_number == 4:
print('not ready for 4D arrays')
y_new = self.Q_Heau.work(y0,tmp_y,(1-self.mu)*tmp_y+self.mu*Ty)
if dim_number == 2 and (s[0]!=1) and (s[1]!=1):
y_new = y_new.reshape((s[0],s[1]))
elif dim_number == 3:
tmpy = zeros((s[0],s[1],s[2]))
for j in range(p):
tmpy[:,:,j]= y_new[s[0]*s[1]*j:s[0]*s[1]*(j+1)].reshape((s[0],s[1]))
y_new=tmpy
elif dim_number == 4:
print('not ready for 4D arrays')
if self.diagnostic == True:
# the next prox operations only gets used in the computation of
# the size of the gap. This extra step is not
# required in alternating projections, which makes RAAR
# and special cases thereof more expensive to monitor.
# compute the normalized change in successive iterates:
tmp = prox2.work(y_new)
tmp2 = prox1.work(tmp)
# compute the normalized change in successive iterates:
# change(iter) = sum(sum((feval('P_M',M,u)-tmp).^2))/normM;
tmp_change=0; tmp_gap=0; tmp_shadow=0;
if dim_number <= 2:
tmp_change= (norm(y-y_new, 'fro')/normM)**2
if self.diagnostic == True:
# For Douglas-Rachford,in general it is appropriate to monitor the
# SHADOWS of the iterates, since in the convex case these converge
# even for beta=1.
# (see Bauschke-Combettes-Luke, J. Approx. Theory, 2004)
tmp_shadow = (norm(tmp-shadow,'fro')/normM)**2
tmp_gap = (norm(tmp-tmp2,'fro')/normM)**2
if hasattr(self, 'truth'):
if self.truth_dim[0] == 1:
z=tmp[0,:]
elif self.truth_dim[1] == 1:
z=tmp[:,0]
else:
z=tmp;
Relerrs[iter] = norm(self.truth - exp(-1j*angle(trace(self.truth.T*z))) * z, 'fro')/self.norm_truth
elif dim_number == 3:
for j in range(self.product_space_dimension):
tmp_change= tmp_change+ (norm(y[:,:,j]-y_new[:,:,j], 'fro')/normM)**2
if self.diagnostic == True:
# compute (||P_SP_Mx-P_Mx||/normM)^2:
tmp_gap = tmp_gap+(norm(tmp[:,:,j]-tmp2[:,:,j],'fro')/normM)**2
tmp_shadow = tmp_shadow+(norm(tmp[:,:,j]-shadow[:,:,j],'fro')/normM)**2
if self.diagnostic == True:
if hasattr(self, 'truth'):
z=tmp[:,:,0]
Relerrs[iter] = norm(self.truth - exp(-1j*angle(trace(self.truth.T*z))) * z, 'fro')/self.norm_truth
if self.diagnostic == True:
# the next prox operations only gets used in the computation of
# the size of the gap. This extra step is not
# required in alternating projections, which makes RAAR
# and special cases thereof more expensive to monitor.
# compute the normalized change in successive iterates:
tmp = prox2.work(y_new)
tmp2 = prox1.work(tmp)
# compute the normalized change in successive iterates:
# change(iter) = sum(sum((feval('P_M',M,u)-tmp).^2))/normM;
tmp_change=0; tmp_gap=0; tmp_shadow=0;
if dim_number <= 2:
tmp_change= (norm(y-y_new, 'fro')/normM)**2
change[iter] = sqrt(tmp_change)
print(tmp_change)
if self.diagnostic == True:
gap[iter] = sqrt(tmp_gap)
shadow_change[iter] = sqrt(tmp_shadow) # this is the Euclidean norm of the gap to
# the unregularized set. To monitor the Euclidean norm of the gap to the
# regularized set is expensive to calculate, so we use this surrogate.
# Since the stopping criteria is on the change in the iterates, this
# does not matter.
# graphics
# update
y=y_new
self.T_config['u_0']=y
if self.diagnostic == True:
# For Douglas-Rachford,in general it is appropriate to monitor the
# SHADOWS of the iterates, since in the convex case these converge
# even for beta=1.
# (see Bauschke-Combettes-Luke, J. Approx. Theory, 2004)
tmp_shadow = (norm(tmp-shadow,'fro')/normM)**2
tmp_gap = (norm(tmp-tmp2,'fro')/normM)**2
if hasattr(self, 'truth'):
if self.truth_dim[0] == 1:
z=tmp[0,:]
elif self.truth_dim[1] == 1:
z=tmp[:,0]
else:
z=tmp;
Relerrs[iter] = norm(self.truth - exp(-1j*angle(trace(self.truth.T*z))) * z, 'fro')/self.norm_truth
elif q==1:
for j in range(self.product_space_dimension):
tmp_change= tmp_change+ (norm(y[:,:,j]-y_new[:,:,j], 'fro')/normM)**2
if self.diagnostic == True:
# compute (||P_SP_Mx-P_Mx||/normM)^2:
tmp_gap = tmp_gap+(norm(tmp[:,:,j]-tmp2[:,:,j],'fro')/normM)**2
tmp_shadow = tmp_shadow+(norm(tmp[:,:,j]-shadow[:,:,j],'fro')/normM)**2
if self.diagnostic == True:
if hasattr(self, 'truth'):
z=tmp[:,:,0]
Relerrs[iter] = norm(self.truth - exp(-1j*angle(trace(self.truth.T*z))) * z, 'fro')/self.norm_truth
change[iter] = sqrt(tmp_change)
if self.diagnostic == True:
gap[iter] = sqrt(tmp_gap)
shadow_change[iter] = sqrt(tmp_shadow) # this is the Euclidean norm of the gap to
# the unregularized set. To monitor the Euclidean norm of the gap to the
# regularized set is expensive to calculate, so we use this surrogate.
# Since the stopping criteria is on the change in the iterates, this
# does not matter.
# graphics
# update
y=y_new
self.T_config['u_0']=y
if self.diagnostic == True:
# For Douglas-Rachford,in general it is appropriate to monitor the
# SHADOWS of the iterates, since in the convex case these converge
# even for beta=1.
# (see Bauschke-Combettes-Luke, J. Approx. Theory, 2004)
shadow=tmp
shadow=tmp
##### POSTPROCESSING
......@@ -258,7 +259,7 @@ class HAAR(Algorithm):
u1 = tmp[:,:,0]
u2 = tmp2[:,:,0]
change = change[1:iter+1];
change = change[1:iter+1]
output = {'u' : u, 'u1': u1, 'u2': u2, 'iter': iter, 'change': change}
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment