Commit f27bc5a8 authored by alexander.dornheim's avatar alexander.dornheim
Browse files

Made some progress on QNAP

parent 67cbf347
......@@ -27,6 +27,8 @@
#
#
from numpy import zeros, isreal, zeros_like
import numpy as np
from .algorithms import Algorithm
import sys
sys.path.append('../../samsara/python')
......@@ -35,12 +37,68 @@ from samsara import Samsara
class QNAP(Algorithm):
def __init__(self,config):
self.iter = 0
self.prox1 = config['proxoperators'][0](config)
self.prox2 = config['proxoperators'][1](config)
self.normM = config['normM']
self.Nx = config['Nx']; self.Ny = config['Ny']; self.Nz = config['Nz']
self.product_space_dimension = config['product_space_dimension']
if 'truth' in config:
self.truth = config['truth']
self.truth_dim = config['truth_dim']
self.norm_truth = config['norm_truth']
if 'diagnostic' in config:
self.diagnostic = True
else:
self.diagnostic = False
self.samsara = Samsara()
def run(self, u, tol, maxiter):
"""
Runs the algorithm for the specified input data
"""
##### PREPROCESSING
iter = self.iter
prox1 = self.prox1; prox2 = self.prox2
if u.ndim < 3:
p = 1
q = 1
elif u.ndim == 3:
p = u.shape[2]
q = 1
else:
p = u.shape[2]
q = u.shape[3]
change = zeros(maxiter+1,dtype=u.dtype)
change[0] = 999
######################################
# set up diagnostic arrays
######################################
if self.diagnostic:
gap = change.copy()
shadow_change = change.copy()
if hasattr(self, 'truth'):
Relerrs = change.copy()
normM = self.normM
tmp1 = self.prox2.work(u)
gradfnew_vec = zeros_like(u[:,0])
while iter < maxiter and change[iter] >= tol:
iter += 1
config['iter'] =iter
self.iter =iter
tmp_u = prox1.work(tmp1)
# make call to SAMSARA for acceleration step
# since for the product space Prox1 is the
......@@ -49,66 +107,66 @@ class QNAP(Algorithm):
# array to samsara. Have to reshape the complex
# array as a real-valued vector. Use the gap as the
# objective function, and the negative gradient tmp_u- u
if dim_number == 2:
if (isreal(u)):
if(iter>4):
uold_vec=u[:,1]
unew_vec=tmp_u[:,1]
if (p==1) and (q==1):
if (np.all(isreal(u))):
if iter > 4:
uold_vec=u[:,0]
unew_vec=tmp_u[:,0]
gradfold_vec = gradfnew_vec
gradfnew_vec = (u[:,1]- tmp_u[:,1])
unew_vec, uold_vec, gap[iter-1], gradfold_vec, change[iter], samsara_data = \
self.samsara(uold_vec, unew_vec,
gradfnew_vec = (u[:,0]- tmp_u[:,0])
unew_vec, uold_vec, gap[iter-1], gradfold_vec, change[iter] = \
self.samsara.run(uold_vec, unew_vec,
gap[iter-2]*self.Nx*self.Ny,
gap[iter-1]*self.Nx*self.Ny,
gradfold_vec, gradfnew_vec, samsara_data)
gradfold_vec, gradfnew_vec)
for j in range(self.product_space_dimension):
tmp_u[:,j]=unew_vec
gap[iter-1]=gap[iter-1]/(self.Nx*self.Ny)
else:
unew_vec=tmp_u[:,1]
uold_vec=u[:,1]
gradfnew_vec = (u[:,1]- tmp_u[:,1])
unew_vec=tmp_u[:,0]
uold_vec=u[:,0]
gradfnew_vec = (u[:,0]- tmp_u[:,0])
else:
if iter>3:
uold_vec=reshape(concatenate(real(u[:,1]), imag(u[:,1])),
self.Ny*self.Nx*2, 1);
unew_vec=reshape(concatenate(real(tmp_u[:,1]), imag(tmp_u[:,1])),
self.Ny*self.Nx*2, 1)
uold_vec=reshape(concatenate(real(u[:,0]), imag(u[:,0])),
self.Ny*self.Nx*2, 0);
unew_vec=reshape(concatenate(real(tmp_u[:,0]), imag(tmp_u[:,0])),
self.Ny*self.Nx*2, 0)
gradfold_vec = gradfnew_vec
gradfnew_vec = uold_vec-unew_vec
unew_vec,uold_vec,gap[iter-1],gradfold_vec, change[iter], samsara_data= \
unew_vec,uold_vec,gap[iter-0],gradfold_vec, change[iter]= \
feval('samsara', uold_vec, unew_vec,
gap(iter-2)*self.Nx*self.Ny,
gap(iter-1)*self.Nx*self.Ny,
gradfold_vec, gradfnew_vec, samsara_data)
gradfold_vec, gradfnew_vec)
# now reshape unew_vec
tmp_u_vec = unew_vec[1:self.Ny*self.Nx]+1j*unew_vec[self.Ny*self.Nx+1:self.Ny*self.Nx*2]
tmp_u_vec = unew_vec[0:self.Ny*self.Nx-1]+1j*unew_vec[self.Ny*self.Nx:self.Ny*self.Nx*2-1]
for j in range(self.product_space_dimension):
tmp_u[:,j]=tmp_u_vec
gap[iter-1]=gap[iter-1]/(self.Nx*self.Ny)
else:
uold_vec=reshape([real(u[:,1]), imag(u[:,1])],
uold_vec=reshape([real(u[:,0]), imag(u[:,0])],
self.Ny*self.Nx*2, 1)
unew_vec=reshape([real(tmp_u[:,1]), imag(tmp_u[:,1])],
unew_vec=reshape([real(tmp_u[:,0]), imag(tmp_u[:,0])],
self.Ny*self.Nx*2, 1)
gradfnew_vec = uold_vec-unew_vec
elif dim_number == 3:
if (isreal(u)):
tmp2 = u[:,:,1]
elif (p!=1) and (q==1):
if (np.all(isreal(u))):
tmp2 = u[:,:,0]
uold_vec=reshape(tmp2,self.Nx*self.Ny,1)
tmp2 = tmp_u[:,:,1]
tmp2 = tmp_u[:,:,0]
unew_vec = reshape(tmp2,self.Nx*self.Ny,1);
if iter<=3:
gradfnew_vec = uold_vec-unew_vec
else:
gradfold_vec = gradfnew_vec
gradfnew_vec = uold_vec-unew_vec
unew_vec,uold_vec,gap[iter-1],gradfold_vec, change[iter], samsara_data= \
unew_vec,uold_vec,gap[iter-1],gradfold_vec, change[iter]= \
feval('samsara', uold_vec, unew_vec,
gap(iter-2)*self.Nx*self.Ny,
gap(iter-1)*self.Nx*self.Ny,
gradfold_vec, gradfnew_vec, samsara_data);
gradfold_vec, gradfnew_vec);
# now reshape and replace u
gap[iter-1]=gap[iter-1]/(self.Nx*self.Ny);
......@@ -117,20 +175,20 @@ class QNAP(Algorithm):
tmp_u[:,:,j]=tmp2
end
else:
tmp2=concatenate(real(u[:,:,1]),imag(u[:,:,1]))
tmp2=concatenate(real(u[:,:,0]),imag(u[:,:,0]))
uold_vec=reshape(tmp2,self.Nx*self.Ny*2,1)
tmp2=concatenate(real(tmp_u[:,:,1]), imag(tmp_u[:,:,1]))
tmp2=concatenate(real(tmp_u[:,:,0]), imag(tmp_u[:,:,0]))
unew_vec = reshape(tmp2,self.Nx*self.Ny*2,1)
if iter<=3:
gradfnew_vec = uold_vec-unew_vec
else:
gradfold_vec = gradfnew_vec
gradfnew_vec = uold_vec-unew_vec
unew_vec,uold_vec,gap[iter-1],gradfold_vec, change[iter], samsara_data= \
unew_vec,uold_vec,gap[iter-1],gradfold_vec, change[iter]= \
feval('samsara', uold_vec, unew_vec,
gap(iter-2)*self.Nx*self.Ny,
gap(iter-1)*self.Nx*self.Ny,
gradfold_vec, gradfnew_vec, samsara_data)
gradfold_vec, gradfnew_vec)
# now reshape and replace u
gap[iter-1]=gap[iter-1]/(self.Nx*self.Ny)
tmp2=reshape(unew_vec,self.Ny,self.Nx*2)
......
......@@ -40,7 +40,7 @@ class SimpleAlgorithm:
self.prox1 = config['proxoperators'][0](config)
self.prox2 = config['proxoperators'][1](config)
self.normM = config['normM']
self.Nx = config['Nx']; self.Ny = config['Ny']; self.Nz = config['Nz'];
self.Nx = config['Nx']; self.Ny = config['Ny']; self.Nz = config['Nz']
self.product_space_dimension = config['product_space_dimension']
self.iter = 0
self.config = config
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment