Commit a262f25c authored by jansen31's avatar jansen31
Browse files

partial merge from ot3d

parent bc40eb09
......@@ -6,3 +6,4 @@ build
dist
.vscode/launch.json
.vscode/settings.json
tests/orb_tomog_regression_tests.py
\ No newline at end of file
......@@ -15,7 +15,7 @@ class Algorithm:
The algorithm object is created when the Experiment object is initialized.
"""
def __init__(self, experiment, iterateMonitor, accelerator = None):
def __init__(self, experiment, iterateMonitor, accelerator=None):
# instantiate prox operators
self.proxOperators = []
......
......@@ -54,10 +54,10 @@ class FeasibilityIterateMonitor(IterateMonitor):
# assumes that u0 is an array
self.u_monitor[0] = self.u0
for j in range(1, nProx):
self.u_monitor[j] = self.proxOperators[j].eval(self.u_monitor[j-1], j)
self.u_monitor[j] = self.proxOperators[j].eval(self.u_monitor[j - 1], j)
else: # always creates a u_monitor, issue is whether to monitor the intermediate
# steps for the gap, as the above prepares for in the case that
# diagnostic ist true.
# diagnostic is true.
self.u_monitor = self.u0
# set up diagnostic arrays
......@@ -105,12 +105,12 @@ class FeasibilityIterateMonitor(IterateMonitor):
# reveals the RELEVANT convergence of the (shadow sequence of the) algorithm:
if isCell(u):
for j in range(len(u)):
self.u_monitor[0][j] = exp(-1j*angle(trace(matmul(prev_u_mon[0][j].T.conj(), u[j]))))*u[j]
self.u_monitor[0][j] = exp(-1j * angle(trace(matmul(prev_u_mon[0][j].T.conj(), u[j])))) * u[j]
else:
# assumes that u is a ndarray
self.u_monitor[0] = exp(-1j*angle(trace(matmul(prev_u_mon[0].T.conj(), u))))*u
self.u_monitor[0] = exp(-1j * angle(trace(matmul(prev_u_mon[0].T.conj(), u)))) * u
u1 = self.proxOperators[nProx-1].eval(self.u_monitor[0]) # Note: to compute u1 which prox_index should we use?
u1 = self.proxOperators[nProx - 1].eval(self.u_monitor[0]) # Note: to compute u1 which prox_index should we use?
# Matlab takes whatever is defined in method_input, which corresponds
# to the last prox evaluation if it was set (not always the case)
......@@ -119,8 +119,8 @@ class FeasibilityIterateMonitor(IterateMonitor):
# do go to the intersection, if this is nonempty.
if self.diagnostic:
self.u_monitor[1] = self.proxOperators[0].eval(u1, 0)
for j in range(1, nProx-1):
self.u_monitor[j+1] = self.proxOperators[j].eval(self.u_monitor[j], j)
for j in range(1, nProx - 1):
self.u_monitor[j + 1] = self.proxOperators[j].eval(self.u_monitor[j], j)
# convert u1 to an array if necessary
if isCell(u1):
_m, _n, p, q = size_matlab(u1[len(u1)-1])
......@@ -134,9 +134,9 @@ class FeasibilityIterateMonitor(IterateMonitor):
if p == 1 and q == 1:
if isCell(self.u_monitor[0]):
for j in range(len(self.u_monitor[0])):
tmp_change = tmp_change + (norm(self.u_monitor[0][j] - prev_u_mon[0][j])/normM)**2
tmp_change = tmp_change + (norm(self.u_monitor[0][j] - prev_u_mon[0][j]) / normM) ** 2
else:
tmp_change = (norm(self.u_monitor[0] - prev_u_mon[0])/normM)**2
tmp_change = (norm(self.u_monitor[0] - prev_u_mon[0]) / normM) ** 2
if self.diagnostic:
# compute shadow and gap
tmp_shadow = tmp_change
......@@ -146,11 +146,11 @@ class FeasibilityIterateMonitor(IterateMonitor):
# (see Bauschke-Combettes-Luke, J. Approx. Theory, 2004)
if isCell(u1):
for jj in range(len(u1)):
tmp_shadow = tmp_shadow + (norm(self.u_monitor[1][jj] - prev_u_mon[1][jj])/normM)**2
tmp_gap = tmp_gap + (norm(self.u_monitor[1][jj] - u1[jj])/normM)**2
tmp_shadow = tmp_shadow + (norm(self.u_monitor[1][jj] - prev_u_mon[1][jj]) / normM) ** 2
tmp_gap = tmp_gap + (norm(self.u_monitor[1][jj] - u1[jj]) / normM) ** 2
else:
tmp_shadow = tmp_shadow + (norm(self.u_monitor[1] - prev_u_mon[1])/normM)**2
tmp_gap = tmp_gap + (norm(self.u_monitor[1] - u1)/normM)**2
tmp_shadow = tmp_shadow + (norm(self.u_monitor[1] - prev_u_mon[1]) / normM) ** 2
tmp_gap = tmp_gap + (norm(self.u_monitor[1] - u1) / normM) ** 2
for j in range(2, nProx):
# For Douglas-Rachford,in general it is appropriate to monitor the
# SHADOWS of the iterates, since in the convex case these converge
......@@ -158,8 +158,8 @@ class FeasibilityIterateMonitor(IterateMonitor):
# (see Bauschke-Combettes-Luke, J. Approx. Theory, 2004)
if isCell(self.u_monitor[j]):
for jj in range(len(self.u_monitor[j])):
tmp_shadow = tmp_shadow + (norm(self.u_monitor[j][jj] - prev_u_mon[j][jj])/normM)**2
tmp_gap = tmp_gap + (norm(self.u_monitor[j][jj] - self.u_monitor[j-1][jj])/normM)**2
tmp_shadow = tmp_shadow + (norm(self.u_monitor[j][jj] - prev_u_mon[j][jj]) / normM) ** 2
tmp_gap = tmp_gap + (norm(self.u_monitor[j][jj] - self.u_monitor[j - 1][jj]) / normM) ** 2
else:
tmp_shadow = tmp_shadow + (norm(self.u_monitor[j] - prev_u_mon[j])/normM)**2
tmp_gap = tmp_gap + (norm(self.u_monitor[j] - self.u_monitor[j-1])/normM)**2
......@@ -168,11 +168,11 @@ class FeasibilityIterateMonitor(IterateMonitor):
# convert u1 to an array if necessary
# TODO need to simplify this...
if isCell(u1):
z = u1[len(u1)-1]
z = u1[len(u1) - 1]
if self.truth_dim[0] == 1:
z = reshape(z, (1,len(z))) # we want a true matrix not just a vector
z = reshape(z, (1, len(z))) # we want a true matrix not just a vector
elif self.truth_dim[1] == 1:
z = reshape(z, (len(z),1)) # we want a true matrix not just a vector
z = reshape(z, (len(z), 1)) # we want a true matrix not just a vector
else:
if u1.ndim == 1 or self.formulation == 'cyclic':
z = u1
......@@ -183,14 +183,14 @@ class FeasibilityIterateMonitor(IterateMonitor):
z = reshape(z, (len(z),1)) # we want a true matrix not just a vector
else: # product space
if self.truth_dim[0] == 1:
z = u1[0,:]
z = reshape(z, (1,len(z))) # we want a true matrix not just a vector
z = u1[0, :]
z = reshape(z, (1, len(z))) # we want a true matrix not just a vector
elif self.truth_dim[1] == 1:
z = u1[:,0]
z = reshape(z, (len(z),1)) # we want a true matrix not just a vector
z = u1[:, 0]
z = reshape(z, (len(z), 1)) # we want a true matrix not just a vector
else:
if u1.ndim == 3:
z = u1[:,:,0]
z = u1[:, :, 0]
else:
z = u1
"""
......@@ -224,28 +224,34 @@ class FeasibilityIterateMonitor(IterateMonitor):
if isCell(self.u_monitor[0]):
for j in range(len(self.u_monitor[0])):
for jj in range(p):
tmp_change = tmp_change + (norm(self.u_monitor[0][j][:,:,jj] - prev_u_mon[0][j][:,:,jj])/normM)**2
tmp_change = tmp_change + (
norm(self.u_monitor[0][j][:, :, jj] - prev_u_mon[0][j][:, :, jj]) / normM) ** 2
if self.diagnostic:
tmp_shadow = tmp_change
for k in range(1, nProx):
# compute (||P_SP_Mx-P_Mx||/normM)^2:
for j in range(len(self.u_monitor[0])):
for jj in range(p):
tmp_gap = tmp_gap + (norm(self.u_monitor[k][j][:,:,jj] - self.u_monitor[k-1][:,:,jj])/normM)**2
tmp_shadow = tmp_shadow + (norm(self.u_monitor[k][j][:,:,jj] - prev_u_mon[k][j][:,:,jj])/normM)**2
tmp_gap = tmp_gap + (norm(
self.u_monitor[k][j][:, :, jj] - self.u_monitor[k - 1][:, :, jj]) / normM) ** 2
tmp_shadow = tmp_shadow + (norm(
self.u_monitor[k][j][:, :, jj] - prev_u_mon[k][j][:, :, jj]) / normM) ** 2
else:
for j in range(p):
tmp_change = tmp_change + (norm(self.u_monitor[0][:,:,j] - prev_u_mon[0][:,:,j])/normM)**2
tmp_change = tmp_change + (norm(self.u_monitor[0][:, :, j] - prev_u_mon[0][:, :, j]) / normM) ** 2
if self.diagnostic:
tmp_shadow = tmp_change
for k in range(1, nProx):
# compute (||P_SP_Mx-P_Mx||/normM)^2:
tmp_gap = tmp_gap + (norm(self.u_monitor[k][:,:,j] - self.u_monitor[k-1][:,:,j])/normM)**2
tmp_shadow = tmp_shadow + (norm(self.u_monitor[k][:,:,j] - prev_u_mon[k][:,:,j])/normM)**2
tmp_gap = tmp_gap + (
norm(self.u_monitor[k][:, :, j] - self.u_monitor[k - 1][:, :, j]) / normM) ** 2
tmp_shadow = tmp_shadow + (
norm(self.u_monitor[k][:, :, j] - prev_u_mon[k][:, :, j]) / normM) ** 2
if self.diagnostic and self.truth is not None:
z = u1[:,:,0]
rel_error = norm(self.truth - exp(-1j*angle(trace(matmul(self.truth.T.conj(), z)))) * z) / self.norm_truth
z = u1[:, :, 0]
rel_error = norm(
self.truth - exp(-1j * angle(trace(matmul(self.truth.T.conj(), z)))) * z) / self.norm_truth
else: # cells of 4D arrays???
pass
......@@ -289,8 +295,8 @@ class FeasibilityIterateMonitor(IterateMonitor):
output = super(FeasibilityIterateMonitor, self).postprocess(alg, output)
if self.diagnostic:
stats = output['stats']
stats['gaps'] = self.gaps[1:alg.iter+1]
stats['shadow_changes'] = self.shadow_changes[1:alg.iter+1]
stats['gaps'] = self.gaps[1:alg.iter + 1]
stats['shadow_changes'] = self.shadow_changes[1:alg.iter + 1]
if self.truth is not None:
stats['rel_errors'] = self.rel_errors[1:alg.iter+1]
stats['rel_errors'] = self.rel_errors[1:alg.iter + 1]
return output
from numpy import zeros, sqrt
from numpy.linalg import norm
from proxtoolbox import algorithms
from proxtoolbox.utils.cell import Cell, isCell
from proxtoolbox.utils.cell import isCell
from proxtoolbox.utils.size import size_matlab
from numpy import zeros, angle, trace, exp, sqrt, sum, mod
from numpy.linalg import norm
class IterateMonitor:
......@@ -14,8 +15,8 @@ class IterateMonitor:
def __init__(self, experiment):
self.u0 = experiment.u0
self.u_monitor = self.u0 # the part of the sequence that is being monitored
# put u0 as default
assert self.u0 is not None, 'No valid initial guess given'
self.u_monitor = self.u0 # the part of the sequence that is being monitored put u0 as default
self.isCell = isCell(self.u0)
self.norm_data = experiment.norm_data
self.truth = experiment.truth
......@@ -125,7 +126,7 @@ class IterateMonitor:
# in matlab this corresponded to a cell
# here we assume that such a cell is m by n where m = 1
# so far this seems to be always the case
# the following tests attemp to match the ndarray case below
# the following tests attempt to match the ndarray case below
m = 1
n = len(u_m)
if n == self.n_product_Prox:
......@@ -161,11 +162,6 @@ class IterateMonitor:
output['stats']['changes'] = self.changes[1:alg.iter + 1]
return output
@staticmethod
def phase_offset_compensated_norm(arr1, arr2, norm_factor=1, norm_type='fro'):
phase_offset = angle(sum(arr1 * arr2.conj()))
return norm(arr1 - arr2 * exp(1j * phase_offset), norm_type) / norm_factor
def getIterateSize(self, u):
"""
Given an iterate, determine p and q parameters
......@@ -207,7 +203,7 @@ class IterateMonitor:
def calculateObjective(self, alg):
"""
Calculate objective value. The dafault implementation
Calculate objective value. The default implementation
uses the optimality monitor if it exists
Parameters
......
......@@ -101,7 +101,7 @@ class Experiment(metaclass=ExperimentMetaClass):
# (including what would have been generated with verbose mode)
graphics=0,
anim=False,
anim_step = 2, # Tell how frequently the animation is updated, i.e., every `anim_step` step(s)
anim_step=2, # Tell how frequently the animation is updated, i.e., every `anim_step` step(s)
anim_colorbar=False, # if True the animated image will include a colorbar (updated at each iteration)
graphics_display=None,
figure_width=9, # figure width in inches
......@@ -125,6 +125,8 @@ class Experiment(metaclass=ExperimentMetaClass):
self.object = object
self.formulation = formulation
self.output = {}
# Those data members will define the final dimensions
# of the dataset to be used in this experiment.
# At this stage we set them to the desired values
......@@ -135,7 +137,7 @@ class Experiment(metaclass=ExperimentMetaClass):
self.Ny = Ny
if Nz is not None:
self.Nz = Nz
else:
else: # TODO: Bad for experiments with 3D data, where Nz may be None to simply follow the data
self.Nz = 1
# we store here the original values specified by the user
self.desired_product_space_dimension = product_space_dimension
......@@ -213,7 +215,6 @@ class Experiment(metaclass=ExperimentMetaClass):
# for animation:
self.anim_figure = None
self.debug = debug
def initialize(self):
......@@ -273,7 +274,7 @@ class Experiment(metaclass=ExperimentMetaClass):
t = time.time()
self.output = self.runAlgorithm(self.u0)
self.output = self.runAlgorithm(self.u0) # TODO: append to dict? (in overwrite mode)
self.output['stats']['time'] = time.time() - t
......@@ -631,15 +632,15 @@ class Experiment(metaclass=ExperimentMetaClass):
if self.anim_figure == None:
# create figure
plt.ion()
self.anim_figure = plt.figure(figsize = (self.figure_width, self.figure_height),
dpi = self.figure_dpi)
self.anim_figure = plt.figure(figsize=(self.figure_width, self.figure_height),
dpi=self.figure_dpi)
self.anim_figure.canvas.mpl_connect('close_event', self.onAnimationWindowClosing)
self.anim_ax = self.anim_figure.add_subplot(111)
self.anim_bar = None
self.anim_im = self.anim_ax.imshow(image)
self.anim_ax.set_title(title)
self.anim_figure.canvas.draw()
#plt.show(block=False)
# plt.show(block=False)
else:
self.anim_im.set_array(image)
min_val = np.min(image)
......
......@@ -11,7 +11,7 @@ class DegenerateOrbital(PlanarMolecule):
def getDefaultParameters():
defaultParams = {
'experiment_name': '2D ARPES',
'data_filename': None,
'data_filename': '..\Inputdata\OrbitalTomog\\2020_10_27_coronene_Homo1+2_ARPES_2e6counts_corrected_80x80.tif',
'from_intensity_data': True,
'object': 'real',
'degeneracy': 2, # Number of degenerate states to reconstruct
......
......@@ -27,10 +27,6 @@ class OrbitalTomographyExperiment(Experiment):
'proxOperators',
'output', 'data_zeros',
'figure_width', 'figure_height', 'figure_dpi']
# ['dynamic_graphics', 'data', 'data_sq', 'rt_data', 'u0',
# 'truth', 'data_zeros', 'support',
# 'output', 'matlab_output', 'json_output', 'save_output_tf', 'pickle_output',
# 'h5_output', 'savedir', 'save_name']
self.output_ignore_keys = ['u1', 'u2', 'plots']
def loadData(self):
......@@ -72,6 +68,10 @@ class OrbitalTomographyExperiment(Experiment):
self.proxOperators.append('P_M_masked')
self.propagator = 'PropagatorFFTn'
self.inverse_propagator = 'InvPropagatorFFTn'
elif self.experiment_name == 'noisy 3D ARPES':
self.proxOperators.append('Approx_P_M_masked')
self.propagator = 'PropagatorFFTn'
self.inverse_propagator = 'InvPropagatorFFTn'
elif self.experiment_name == 'noisy 2D ARPES':
self.proxOperators.append('Approx_P_M')
self.propagator = 'PropagatorFFT2'
......
......@@ -13,8 +13,8 @@ class OrthogonalOrbitals(PlanarMolecule):
def getDefaultParameters():
defaultParams = {
'experiment_name': '2D ARPES',
'data_filename': None,
'from_intensity_data': True,
'data_filename': '..\Inputdata\OrbitalTomog\\2020_10_27_coronene_Homo_stack_ARPES_2e6counts_corrected_80x80.tif',
'from_intensity_data': False,
'object': 'real',
'constraint': 'sparse real',
'sparsity_parameter': 75,
......@@ -164,7 +164,7 @@ class OrthogonalOrbitals(PlanarMolecule):
self.output['u1'] = self.algorithm.prox1.eval(self.algorithm.u)
self.output['u2'] = self.algorithm.prox2.eval(self.algorithm.u)
figsize = kwargs.pop("figsize", (12, 3))
figsize = kwargs.pop("figsize", (12, 6))
for i, operator in enumerate(self.algorithm.proxOperators):
operator_name = self.proxOperators[i].__name__
f = self.plot_guess(operator.eval(self.algorithm.u),
......
......@@ -11,9 +11,10 @@ from proxtoolbox.utils.orbitaltomog import bin_array, shifted_fft, shifted_ifft,
class PlanarMolecule(OrbitalTomographyExperiment):
@staticmethod
def getDefaultParameters():
# TODO: optimize parameters and proxoperators to get good & consistent phase retrieval using the demo
defaultParams = {
'experiment_name': '2D ARPES',
'data_filename': None,
'experiment_name': 'noisy 2D ARPES', # '2D ARPES', #
'data_filename': '../InputData/OrbitalTomog/coronene_homo1.tif',
'from_intensity_data': False,
'object': 'real',
'constraint': 'sparse real',
......@@ -29,7 +30,7 @@ class PlanarMolecule(OrbitalTomographyExperiment):
'lambda_0': 0.85,
'lambda_max': 0.50,
'lambda_switch': 50,
'data_ball': .999826,
'data_ball': .999826e-30,
'TOL2': 1e-15,
'diagnostic': True,
'algorithm': 'DRl',
......@@ -198,7 +199,10 @@ class PlanarMolecule(OrbitalTomographyExperiment):
ax[0].imshow(self.data)
ax[0].set_title("Measured data")
prop = self.propagator(self)
u_hat = prop.eval(self.algorithm.prox1.eval(self.algorithm.u))
guess = self.algorithm.prox2.eval(self.algorithm.u)
guess = roll_to_pos(guess, pos=tuple([s//2 for s in guess.shape]), move_maximum=True)
guess = roll_to_pos(guess, pos=tuple([s // 2 for s in guess.shape]))
u_hat = prop.eval(self.algorithm.prox1.eval(guess))
ax[1].imshow(abs(u_hat))
ax[1].set_title("Predicted measurement intensity")
ax[2].imshow(complex_to_rgb(u_hat))
......
import sys, os
# set proxpython path
sys.path.append(os.path.dirname(os.path.realpath(__file__)) + "/../../..")
from proxtoolbox.experiments.orbitaltomography import planar_molecule as pm
if __name__ == "__main__": # prevent execution when generating documentation
data_file = '..\\..\\..\\InputData\\OrbitalTomog\\coronene_homo1.tif'
exp_pm = pm.PlanarMolecule(data_filename=data_file,
experiment='noisy 2D ARPES', # 'noisy 2D ARPES' OR '2D ARPES'
constraint='sparse real',
use_sparsity_with_support=True,
sparsity_parameter=40,
TOL=1e-10,
lambda_0=0.85, lambda_max=0.5, lambda_switch=30,
rnd_seed=None) # Disable fixed pseudo-random number generator
# exp_pm.plotInputData()
exp_pm.run()
exp_pm.show()
from numpy import where, log, sum, errstate
from proxtoolbox.proxoperators.proxoperator import ProxOperator, magproj
from numpy import where, log, sum
__all__ = ['P_M', 'P_M_masked', "Approx_P_M", 'Approx_P_M_masked']
......@@ -12,12 +13,8 @@ class P_M(ProxOperator):
def __init__(self, experiment):
"""
Initialization
Parameters
----------
config : dict - Dictionary containing the problem configuration. It must contain the following mapping:
'M' : array_like - non-negative real FOURIER DOMAIN CONSTRAINT
"""
super().__init__(experiment)
self.data = experiment.data
self.prop = experiment.propagator(experiment)
self.invprop = experiment.inverse_propagator(experiment)
......@@ -55,7 +52,7 @@ class P_M_masked(P_M):
experiment: experiment class
"""
super(P_M_masked, self).__init__(experiment)
self.mask = experiment.fmask == 0
self.mask = experiment.fmask # True means not updated
def eval(self, u, prox_idx=None):
"""
......@@ -69,10 +66,11 @@ class P_M_masked(P_M):
-------
array_like - p_M: the projection IN THE PHYSICAL (time) DOMAIN
"""
# assert u.shape == self.mask.shape, 'Non-matching iterate array!'
fourier_space_iterate = self.prop.eval(u)
constrained = magproj(self.data, fourier_space_iterate.copy())
update = where(self.mask, fourier_space_iterate, constrained)
return self.invprop(update)
return self.invprop.eval(update)
class Approx_P_M(P_M):
......@@ -87,6 +85,7 @@ class Approx_P_M(P_M):
u_hat = self.prop.eval(u)
u_hat_sq = abs(u_hat)**2
with errstate(divide='ignore', invalid='ignore'):
tmp = u_hat_sq / self.data_sq
tmp[self.data_zeros] = 1
u_hat_sq[self.data_zeros] = 0
......
......@@ -58,11 +58,11 @@ class P_Sparsity(ProxOperator):
p_Sparsity : array_like, the projection
"""
u *= self.support # apply support (simple 1 if no support)
p_sparse = 0 * u
sorting = np.argsort(abs(u), axis=None) # gives indices of sorted array in ascending order
indices = np.asarray([unravel_index(sorting[i], u.shape) for i in range(-1 * self.sparsity_parameter, 0)])
p_sparse[indices[:, 0], indices[:, 1]] = u[indices[:, 0], indices[:, 1]]
u_flat = np.ravel(u) # flatten array before sorting
sorting = np.argsort(abs(u_flat), axis=None) # gives indices of sorted array in ascending order
p_sparse = np.zeros_like(sorting, dtype=u.dtype) # create array of zeros
p_sparse[sorting[-1 * int(self.sparsity_parameter):]] = u_flat[sorting[-1 * int(self.sparsity_parameter):]]
p_sparse = np.reshape(p_sparse, u.shape) # recreate original shape
return p_sparse
......@@ -115,6 +115,7 @@ class P_Sparsity_Superpixel(P_Sparsity):
"""
Apply sparsity on superpixels, i.e. on the binned array
"""
def __init__(self, experiment):
super(P_Sparsity_Superpixel, self).__init__(experiment)
# Set the superpixel size:
......@@ -140,6 +141,7 @@ class P_Sparsity_Superpixel_real(P_Sparsity_Superpixel):
"""
Apply real-valued sparsity on superpixels, i.e. on the binned array
"""
def eval(self, u, prox_idx=None):
return super(P_Sparsity_Superpixel_real, self).eval(u.real, prox_idx)
......@@ -166,9 +168,9 @@ class P_nonneg_sparsity(ProxOperator):
m, n, p, q = size_matlab(u)
null = np.zeros(u.shape)
tmp = np.maximum(null, np.real(u))
tmp = np.reshape(tmp, (m*n*p*q), order='F')
I = np.argsort(tmp)[::-1] # descend sort
p_Sparsity = np.zeros(m*n*p*q)
p_Sparsity[I[0:int(self.sparsity_parameter)]] = tmp[I[0:int(self.sparsity_parameter)]]
tmp = np.reshape(tmp, (m * n * p * q), order='F')
indices = np.argsort(tmp)[::-1] # descend sort
p_Sparsity = np.zeros(m * n * p * q)
p_Sparsity[indices[0:int(self.sparsity_parameter)]] = tmp[indices[0:int(self.sparsity_parameter)]]
p_Sparsity = np.reshape(p_Sparsity, u.shape, order='F')
return p_Sparsity
......@@ -5,6 +5,7 @@ from proxtoolbox import proxoperators
from proxtoolbox.utils.cell import Cell, isCell
from proxtoolbox.utils.size import size_matlab
class Prox_product_space(ProxOperator):
"""
Prox operator in a product space:
......@@ -37,7 +38,7 @@ class Prox_product_space(ProxOperator):
----------
u : ndarray or a list of ndarray objects
Input data to be projected
prox_idx : int, optional
prox_index : int, optional
Index of this prox operator
Returns
......
import numpy as np
from numpy import conj, dot, empty, ones, sqrt, sum, zeros, exp, \
nonzero, log, tile, shape, real, zeros_like, pi
# from pyfftw.interfaces.scipy_fftpack import fft2, ifft2
from numpy.fft import fft2, ifft2, fft, ifft , fftshift, ifftshift
# TODO: switch to scipy.fftpack? (is faster supposedly)
from numpy import sqrt
class ProxOperator:
......@@ -68,14 +62,14 @@ def magproj(constr, u):
The projection.
"""
# naive implementation: should eval now, roughly as fast as below
# # naive implementation: should eval now, roughly as fast as below
# mod_u = sqrt(u.real**2+u.imag**2)
# with np.errstate(divide='ignore', invalid='ignore'):
# proj = constr/mod_u
# proj[np.isnan(proj)] = 0 #then mod_u=0 and constr=0
# proj[np.isnan(proj)] = 0 # then mod_u=0 and constr=0
# proj = proj*u
# index_inf = np.isinf(proj)
# proj[index_inf] = constr[index_inf] #then mod_u=0 and constr!=0
# proj[index_inf] = constr[index_inf] # then mod_u=0 and constr!=0
# return proj """
eps = 3e-20
......
import numpy as np
class Cell(np.ndarray):
"""
Array containing any kind of object (inherits from ndarray).
......@@ -17,6 +16,7 @@ class Cell(np.ndarray):
order)
return obj
def isCell(u):
"""
Test if object `u` is an instance of Cell class.
......@@ -31,6 +31,3 @@ def isCell(u):
True if 'u' is an instance of Cell class, False otherwise.
"""
return isinstance(u, Cell)