Commit a262f25c authored by jansen31's avatar jansen31
Browse files

partial merge from ot3d

parent bc40eb09
......@@ -6,3 +6,4 @@ build
dist
.vscode/launch.json
.vscode/settings.json
tests/orb_tomog_regression_tests.py
\ No newline at end of file
......@@ -34,4 +34,4 @@ from .PHeBIE_ptychography_objective import *
from .PHeBIE_phase_objective import *
from .Wirtinger import *
from .Wirt_IterateMonitor import *
from .NSLS_sourceloc_objective import *
from .NSLS_sourceloc_objective import *
\ No newline at end of file
......@@ -15,7 +15,7 @@ class Algorithm:
The algorithm object is created when the Experiment object is initialized.
"""
def __init__(self, experiment, iterateMonitor, accelerator = None):
def __init__(self, experiment, iterateMonitor, accelerator=None):
# instantiate prox operators
self.proxOperators = []
......
......@@ -39,7 +39,7 @@ class FeasibilityIterateMonitor(IterateMonitor):
super(FeasibilityIterateMonitor, self).preprocess(alg)
nProx = len(self.proxOperators)
# Even if more detailed diagnostics are not run, the mathematical
# exit criteria will be on step-sizes, for which one needs to monitor the
# norm of the difference between successive iterates of the blocks, in
......@@ -54,10 +54,10 @@ class FeasibilityIterateMonitor(IterateMonitor):
# assumes that u0 is an array
self.u_monitor[0] = self.u0
for j in range(1, nProx):
self.u_monitor[j] = self.proxOperators[j].eval(self.u_monitor[j-1], j)
else: # always creates a u_monitor, issue is whether to monitor the intermediate
self.u_monitor[j] = self.proxOperators[j].eval(self.u_monitor[j - 1], j)
else: # always creates a u_monitor, issue is whether to monitor the intermediate
# steps for the gap, as the above prepares for in the case that
# diagnostic ist true.
# diagnostic is true.
self.u_monitor = self.u0
# set up diagnostic arrays
......@@ -105,22 +105,22 @@ class FeasibilityIterateMonitor(IterateMonitor):
# reveals the RELEVANT convergence of the (shadow sequence of the) algorithm:
if isCell(u):
for j in range(len(u)):
self.u_monitor[0][j] = exp(-1j*angle(trace(matmul(prev_u_mon[0][j].T.conj(), u[j]))))*u[j]
self.u_monitor[0][j] = exp(-1j * angle(trace(matmul(prev_u_mon[0][j].T.conj(), u[j])))) * u[j]
else:
# assumes that u is a ndarray
self.u_monitor[0] = exp(-1j*angle(trace(matmul(prev_u_mon[0].T.conj(), u))))*u
self.u_monitor[0] = exp(-1j * angle(trace(matmul(prev_u_mon[0].T.conj(), u)))) * u
u1 = self.proxOperators[nProx-1].eval(self.u_monitor[0]) # Note: to compute u1 which prox_index should we use?
# Matlab takes whatever is defined in method_input, which corresponds
u1 = self.proxOperators[nProx - 1].eval(self.u_monitor[0]) # Note: to compute u1 which prox_index should we use?
# Matlab takes whatever is defined in method_input, which corresponds
# to the last prox evaluation if it was set (not always the case)
# for Douglas Rachford and DRlambda the iterates themselves do not
# converge to the intersection, even if this is nonempty. The Shadows, however,
# do go to the intersection, if this is nonempty.
if self.diagnostic:
self.u_monitor[1] = self.proxOperators[0].eval(u1, 0)
for j in range(1, nProx-1):
self.u_monitor[j+1] = self.proxOperators[j].eval(self.u_monitor[j], j)
for j in range(1, nProx - 1):
self.u_monitor[j + 1] = self.proxOperators[j].eval(self.u_monitor[j], j)
# convert u1 to an array if necessary
if isCell(u1):
_m, _n, p, q = size_matlab(u1[len(u1)-1])
......@@ -134,9 +134,9 @@ class FeasibilityIterateMonitor(IterateMonitor):
if p == 1 and q == 1:
if isCell(self.u_monitor[0]):
for j in range(len(self.u_monitor[0])):
tmp_change = tmp_change + (norm(self.u_monitor[0][j] - prev_u_mon[0][j])/normM)**2
tmp_change = tmp_change + (norm(self.u_monitor[0][j] - prev_u_mon[0][j]) / normM) ** 2
else:
tmp_change = (norm(self.u_monitor[0] - prev_u_mon[0])/normM)**2
tmp_change = (norm(self.u_monitor[0] - prev_u_mon[0]) / normM) ** 2
if self.diagnostic:
# compute shadow and gap
tmp_shadow = tmp_change
......@@ -146,11 +146,11 @@ class FeasibilityIterateMonitor(IterateMonitor):
# (see Bauschke-Combettes-Luke, J. Approx. Theory, 2004)
if isCell(u1):
for jj in range(len(u1)):
tmp_shadow = tmp_shadow + (norm(self.u_monitor[1][jj] - prev_u_mon[1][jj])/normM)**2
tmp_gap = tmp_gap + (norm(self.u_monitor[1][jj] - u1[jj])/normM)**2
tmp_shadow = tmp_shadow + (norm(self.u_monitor[1][jj] - prev_u_mon[1][jj]) / normM) ** 2
tmp_gap = tmp_gap + (norm(self.u_monitor[1][jj] - u1[jj]) / normM) ** 2
else:
tmp_shadow = tmp_shadow + (norm(self.u_monitor[1] - prev_u_mon[1])/normM)**2
tmp_gap = tmp_gap + (norm(self.u_monitor[1] - u1)/normM)**2
tmp_shadow = tmp_shadow + (norm(self.u_monitor[1] - prev_u_mon[1]) / normM) ** 2
tmp_gap = tmp_gap + (norm(self.u_monitor[1] - u1) / normM) ** 2
for j in range(2, nProx):
# For Douglas-Rachford,in general it is appropriate to monitor the
# SHADOWS of the iterates, since in the convex case these converge
......@@ -158,21 +158,21 @@ class FeasibilityIterateMonitor(IterateMonitor):
# (see Bauschke-Combettes-Luke, J. Approx. Theory, 2004)
if isCell(self.u_monitor[j]):
for jj in range(len(self.u_monitor[j])):
tmp_shadow = tmp_shadow + (norm(self.u_monitor[j][jj] - prev_u_mon[j][jj])/normM)**2
tmp_gap = tmp_gap + (norm(self.u_monitor[j][jj] - self.u_monitor[j-1][jj])/normM)**2
tmp_shadow = tmp_shadow + (norm(self.u_monitor[j][jj] - prev_u_mon[j][jj]) / normM) ** 2
tmp_gap = tmp_gap + (norm(self.u_monitor[j][jj] - self.u_monitor[j - 1][jj]) / normM) ** 2
else:
tmp_shadow = tmp_shadow + (norm(self.u_monitor[j] - prev_u_mon[j])/normM)**2
tmp_gap = tmp_gap + (norm(self.u_monitor[j] - self.u_monitor[j-1])/normM)**2
tmp_gap = tmp_gap + (norm(self.u_monitor[j] - self.u_monitor[j-1])/normM)**2
# compute relative error
if self.truth is not None:
# convert u1 to an array if necessary
# TODO need to simplify this...
# TODO need to simplify this...
if isCell(u1):
z = u1[len(u1)-1]
z = u1[len(u1) - 1]
if self.truth_dim[0] == 1:
z = reshape(z, (1,len(z))) # we want a true matrix not just a vector
z = reshape(z, (1, len(z))) # we want a true matrix not just a vector
elif self.truth_dim[1] == 1:
z = reshape(z, (len(z),1)) # we want a true matrix not just a vector
z = reshape(z, (len(z), 1)) # we want a true matrix not just a vector
else:
if u1.ndim == 1 or self.formulation == 'cyclic':
z = u1
......@@ -183,14 +183,14 @@ class FeasibilityIterateMonitor(IterateMonitor):
z = reshape(z, (len(z),1)) # we want a true matrix not just a vector
else: # product space
if self.truth_dim[0] == 1:
z = u1[0,:]
z = reshape(z, (1,len(z))) # we want a true matrix not just a vector
z = u1[0, :]
z = reshape(z, (1, len(z))) # we want a true matrix not just a vector
elif self.truth_dim[1] == 1:
z = u1[:,0]
z = reshape(z, (len(z),1)) # we want a true matrix not just a vector
z = u1[:, 0]
z = reshape(z, (len(z), 1)) # we want a true matrix not just a vector
else:
if u1.ndim == 3:
z = u1[:,:,0]
z = u1[:, :, 0]
else:
z = u1
"""
......@@ -224,35 +224,41 @@ class FeasibilityIterateMonitor(IterateMonitor):
if isCell(self.u_monitor[0]):
for j in range(len(self.u_monitor[0])):
for jj in range(p):
tmp_change = tmp_change + (norm(self.u_monitor[0][j][:,:,jj] - prev_u_mon[0][j][:,:,jj])/normM)**2
tmp_change = tmp_change + (
norm(self.u_monitor[0][j][:, :, jj] - prev_u_mon[0][j][:, :, jj]) / normM) ** 2
if self.diagnostic:
tmp_shadow = tmp_change
for k in range(1, nProx):
# compute (||P_SP_Mx-P_Mx||/normM)^2:
for j in range(len(self.u_monitor[0])):
for jj in range(p):
tmp_gap = tmp_gap + (norm(self.u_monitor[k][j][:,:,jj] - self.u_monitor[k-1][:,:,jj])/normM)**2
tmp_shadow = tmp_shadow + (norm(self.u_monitor[k][j][:,:,jj] - prev_u_mon[k][j][:,:,jj])/normM)**2
tmp_gap = tmp_gap + (norm(
self.u_monitor[k][j][:, :, jj] - self.u_monitor[k - 1][:, :, jj]) / normM) ** 2
tmp_shadow = tmp_shadow + (norm(
self.u_monitor[k][j][:, :, jj] - prev_u_mon[k][j][:, :, jj]) / normM) ** 2
else:
for j in range(p):
tmp_change = tmp_change + (norm(self.u_monitor[0][:,:,j] - prev_u_mon[0][:,:,j])/normM)**2
tmp_change = tmp_change + (norm(self.u_monitor[0][:, :, j] - prev_u_mon[0][:, :, j]) / normM) ** 2
if self.diagnostic:
tmp_shadow = tmp_change
for k in range(1, nProx):
# compute (||P_SP_Mx-P_Mx||/normM)^2:
tmp_gap = tmp_gap + (norm(self.u_monitor[k][:,:,j] - self.u_monitor[k-1][:,:,j])/normM)**2
tmp_shadow = tmp_shadow + (norm(self.u_monitor[k][:,:,j] - prev_u_mon[k][:,:,j])/normM)**2
tmp_gap = tmp_gap + (
norm(self.u_monitor[k][:, :, j] - self.u_monitor[k - 1][:, :, j]) / normM) ** 2
tmp_shadow = tmp_shadow + (
norm(self.u_monitor[k][:, :, j] - prev_u_mon[k][:, :, j]) / normM) ** 2
if self.diagnostic and self.truth is not None:
z = u1[:,:,0]
rel_error = norm(self.truth - exp(-1j*angle(trace(matmul(self.truth.T.conj(), z)))) * z) / self.norm_truth
else: # cells of 4D arrays???
z = u1[:, :, 0]
rel_error = norm(
self.truth - exp(-1j * angle(trace(matmul(self.truth.T.conj(), z)))) * z) / self.norm_truth
else: # cells of 4D arrays???
pass
self.changes[alg.iter] = sqrt(tmp_change)
if self.diagnostic:
self.gaps[alg.iter] = sqrt(tmp_gap)
self.shadow_changes[alg.iter] = sqrt(tmp_shadow) # this is the Euclidean norm of the gap to
self.shadow_changes[alg.iter] = sqrt(tmp_shadow) # this is the Euclidean norm of the gap to
# the unregularized set. To monitor the Euclidean norm of the gap to the
# regularized set is expensive to calculate, so we use this surrogate.
# Since the stopping criteria is on the change in the iterates, this
......@@ -289,8 +295,8 @@ class FeasibilityIterateMonitor(IterateMonitor):
output = super(FeasibilityIterateMonitor, self).postprocess(alg, output)
if self.diagnostic:
stats = output['stats']
stats['gaps'] = self.gaps[1:alg.iter+1]
stats['shadow_changes'] = self.shadow_changes[1:alg.iter+1]
stats['gaps'] = self.gaps[1:alg.iter + 1]
stats['shadow_changes'] = self.shadow_changes[1:alg.iter + 1]
if self.truth is not None:
stats['rel_errors'] = self.rel_errors[1:alg.iter+1]
return output
\ No newline at end of file
stats['rel_errors'] = self.rel_errors[1:alg.iter + 1]
return output
from numpy import zeros, sqrt
from numpy.linalg import norm
from proxtoolbox import algorithms
from proxtoolbox.utils.cell import Cell, isCell
from proxtoolbox.utils.cell import isCell
from proxtoolbox.utils.size import size_matlab
from numpy import zeros, angle, trace, exp, sqrt, sum, mod
from numpy.linalg import norm
class IterateMonitor:
......@@ -14,8 +15,8 @@ class IterateMonitor:
def __init__(self, experiment):
self.u0 = experiment.u0
self.u_monitor = self.u0 # the part of the sequence that is being monitored
# put u0 as default
assert self.u0 is not None, 'No valid initial guess given'
self.u_monitor = self.u0 # the part of the sequence that is being monitored put u0 as default
self.isCell = isCell(self.u0)
self.norm_data = experiment.norm_data
self.truth = experiment.truth
......@@ -122,10 +123,10 @@ class IterateMonitor:
u_m = self.u_monitor
# Python code for [m,n,p] = size(u_m)
if isCell(u_m):
# in matlab this corresponded to a cell
# in matlab this corresponded to a cell
# here we assume that such a cell is m by n where m = 1
# so far this seems to be always the case
# the following tests attemp to match the ndarray case below
# the following tests attempt to match the ndarray case below
m = 1
n = len(u_m)
if n == self.n_product_Prox:
......@@ -161,11 +162,6 @@ class IterateMonitor:
output['stats']['changes'] = self.changes[1:alg.iter + 1]
return output
@staticmethod
def phase_offset_compensated_norm(arr1, arr2, norm_factor=1, norm_type='fro'):
phase_offset = angle(sum(arr1 * arr2.conj()))
return norm(arr1 - arr2 * exp(1j * phase_offset), norm_type) / norm_factor
def getIterateSize(self, u):
"""
Given an iterate, determine p and q parameters
......@@ -207,7 +203,7 @@ class IterateMonitor:
def calculateObjective(self, alg):
"""
Calculate objective value. The dafault implementation
Calculate objective value. The default implementation
uses the optimality monitor if it exists
Parameters
......
......@@ -101,7 +101,7 @@ class Experiment(metaclass=ExperimentMetaClass):
# (including what would have been generated with verbose mode)
graphics=0,
anim=False,
anim_step = 2, # Tell how frequently the animation is updated, i.e., every `anim_step` step(s)
anim_step=2, # Tell how frequently the animation is updated, i.e., every `anim_step` step(s)
anim_colorbar=False, # if True the animated image will include a colorbar (updated at each iteration)
graphics_display=None,
figure_width=9, # figure width in inches
......@@ -125,6 +125,8 @@ class Experiment(metaclass=ExperimentMetaClass):
self.object = object
self.formulation = formulation
self.output = {}
# Those data members will define the final dimensions
# of the dataset to be used in this experiment.
# At this stage we set them to the desired values
......@@ -135,7 +137,7 @@ class Experiment(metaclass=ExperimentMetaClass):
self.Ny = Ny
if Nz is not None:
self.Nz = Nz
else:
else: # TODO: Bad for experiments with 3D data, where Nz may be None to simply follow the data
self.Nz = 1
# we store here the original values specified by the user
self.desired_product_space_dimension = product_space_dimension
......@@ -176,11 +178,11 @@ class Experiment(metaclass=ExperimentMetaClass):
self.accelerator_name = accelerator_name
self.rotate = rotate # tells the iterate monitor to rotate the iterates
# to a fixed global reference
# to a fixed global reference
self.rnd_seed = rnd_seed
# additional attributes
self.data = None
self.data_sq = None
self.norm_data = None
......@@ -191,9 +193,9 @@ class Experiment(metaclass=ExperimentMetaClass):
self.truth = None
self.truth_dim = None
self.norm_truth = None
self.optimality_monitor = None
self.proj_iter = None # not sure what it does
self.proxOperators = []
......@@ -201,8 +203,8 @@ class Experiment(metaclass=ExperimentMetaClass):
self.productProxOperators = []
self.n_product_Prox = None # size of the product space
# TODO: which default: None, 0 or 1 ?
# see how it is used
# TODO: which default: None, 0 or 1 ?
# see how it is used
self.propagator = None
self.inverse_propagator = None
......@@ -213,7 +215,6 @@ class Experiment(metaclass=ExperimentMetaClass):
# for animation:
self.anim_figure = None
self.debug = debug
def initialize(self):
......@@ -231,8 +232,8 @@ class Experiment(metaclass=ExperimentMetaClass):
# load data
self.loadData()
self.reshapeData(self.Nx, self.Ny, self.Nz) # TODO need to revisit this
# - arguments not needed in particular
self.reshapeData(self.Nx, self.Ny, self.Nz) # TODO need to revisit this
# - arguments not needed in particular
if self.TOL2 is None:
self.TOL2 = 1e-20
......@@ -273,7 +274,7 @@ class Experiment(metaclass=ExperimentMetaClass):
t = time.time()
self.output = self.runAlgorithm(self.u0)
self.output = self.runAlgorithm(self.u0) # TODO: append to dict? (in overwrite mode)
self.output['stats']['time'] = time.time() - t
......@@ -282,7 +283,7 @@ class Experiment(metaclass=ExperimentMetaClass):
self.output['stats']['time'], "seconds.")
self.postprocess()
if self.save_output:
self.saveOutput()
......@@ -309,8 +310,8 @@ class Experiment(metaclass=ExperimentMetaClass):
dictionary containing the output of the algorithm. In
particular, it contains the last iterate and various
statistics.
"""
return self.algorithm.run(u)
"""
return self.algorithm.run(u)
def loadData(self):
"""
......@@ -631,15 +632,15 @@ class Experiment(metaclass=ExperimentMetaClass):
if self.anim_figure == None:
# create figure
plt.ion()
self.anim_figure = plt.figure(figsize = (self.figure_width, self.figure_height),
dpi = self.figure_dpi)
self.anim_figure = plt.figure(figsize=(self.figure_width, self.figure_height),
dpi=self.figure_dpi)
self.anim_figure.canvas.mpl_connect('close_event', self.onAnimationWindowClosing)
self.anim_ax = self.anim_figure.add_subplot(111)
self.anim_bar = None
self.anim_im = self.anim_ax.imshow(image)
self.anim_ax.set_title(title)
self.anim_figure.canvas.draw()
#plt.show(block=False)
# plt.show(block=False)
else:
self.anim_im.set_array(image)
min_val = np.min(image)
......
......@@ -11,7 +11,7 @@ class DegenerateOrbital(PlanarMolecule):
def getDefaultParameters():
defaultParams = {
'experiment_name': '2D ARPES',
'data_filename': None,
'data_filename': '..\Inputdata\OrbitalTomog\\2020_10_27_coronene_Homo1+2_ARPES_2e6counts_corrected_80x80.tif',
'from_intensity_data': True,
'object': 'real',
'degeneracy': 2, # Number of degenerate states to reconstruct
......
......@@ -27,10 +27,6 @@ class OrbitalTomographyExperiment(Experiment):
'proxOperators',
'output', 'data_zeros',
'figure_width', 'figure_height', 'figure_dpi']
# ['dynamic_graphics', 'data', 'data_sq', 'rt_data', 'u0',
# 'truth', 'data_zeros', 'support',
# 'output', 'matlab_output', 'json_output', 'save_output_tf', 'pickle_output',
# 'h5_output', 'savedir', 'save_name']
self.output_ignore_keys = ['u1', 'u2', 'plots']
def loadData(self):
......@@ -72,6 +68,10 @@ class OrbitalTomographyExperiment(Experiment):
self.proxOperators.append('P_M_masked')
self.propagator = 'PropagatorFFTn'
self.inverse_propagator = 'InvPropagatorFFTn'
elif self.experiment_name == 'noisy 3D ARPES':
self.proxOperators.append('Approx_P_M_masked')
self.propagator = 'PropagatorFFTn'
self.inverse_propagator = 'InvPropagatorFFTn'
elif self.experiment_name == 'noisy 2D ARPES':
self.proxOperators.append('Approx_P_M')
self.propagator = 'PropagatorFFT2'
......
......@@ -13,8 +13,8 @@ class OrthogonalOrbitals(PlanarMolecule):
def getDefaultParameters():
defaultParams = {
'experiment_name': '2D ARPES',
'data_filename': None,
'from_intensity_data': True,
'data_filename': '..\Inputdata\OrbitalTomog\\2020_10_27_coronene_Homo_stack_ARPES_2e6counts_corrected_80x80.tif',
'from_intensity_data': False,
'object': 'real',
'constraint': 'sparse real',
'sparsity_parameter': 75,
......@@ -164,7 +164,7 @@ class OrthogonalOrbitals(PlanarMolecule):
self.output['u1'] = self.algorithm.prox1.eval(self.algorithm.u)
self.output['u2'] = self.algorithm.prox2.eval(self.algorithm.u)
figsize = kwargs.pop("figsize", (12, 3))
figsize = kwargs.pop("figsize", (12, 6))
for i, operator in enumerate(self.algorithm.proxOperators):
operator_name = self.proxOperators[i].__name__
f = self.plot_guess(operator.eval(self.algorithm.u),
......
......@@ -11,9 +11,10 @@ from proxtoolbox.utils.orbitaltomog import bin_array, shifted_fft, shifted_ifft,
class PlanarMolecule(OrbitalTomographyExperiment):
@staticmethod
def getDefaultParameters():
# TODO: optimize parameters and proxoperators to get good & consistent phase retrieval using the demo
defaultParams = {
'experiment_name': '2D ARPES',
'data_filename': None,
'experiment_name': 'noisy 2D ARPES', # '2D ARPES', #
'data_filename': '../InputData/OrbitalTomog/coronene_homo1.tif',
'from_intensity_data': False,
'object': 'real',
'constraint': 'sparse real',
......@@ -29,7 +30,7 @@ class PlanarMolecule(OrbitalTomographyExperiment):
'lambda_0': 0.85,
'lambda_max': 0.50,
'lambda_switch': 50,
'data_ball': .999826,
'data_ball': .999826e-30,
'TOL2': 1e-15,
'diagnostic': True,
'algorithm': 'DRl',
......@@ -198,7 +199,10 @@ class PlanarMolecule(OrbitalTomographyExperiment):
ax[0].imshow(self.data)
ax[0].set_title("Measured data")
prop = self.propagator(self)
u_hat = prop.eval(self.algorithm.prox1.eval(self.algorithm.u))
guess = self.algorithm.prox2.eval(self.algorithm.u)
guess = roll_to_pos(guess, pos=tuple([s//2 for s in guess.shape]), move_maximum=True)
guess = roll_to_pos(guess, pos=tuple([s // 2 for s in guess.shape]))
u_hat = prop.eval(self.algorithm.prox1.eval(guess))
ax[1].imshow(abs(u_hat))
ax[1].set_title("Predicted measurement intensity")
ax[2].imshow(complex_to_rgb(u_hat))
......
import sys, os
# set proxpython path
sys.path.append(os.path.dirname(os.path.realpath(__file__)) + "/../../..")
from proxtoolbox.experiments.orbitaltomography import planar_molecule as pm
if __name__ == "__main__": # prevent execution when generating documentation
data_file = '..\\..\\..\\InputData\\OrbitalTomog\\coronene_homo1.tif'
exp_pm = pm.PlanarMolecule(data_filename=data_file,
experiment='noisy 2D ARPES', # 'noisy 2D ARPES' OR '2D ARPES'
constraint='sparse real',
use_sparsity_with_support=True,
sparsity_parameter=40,
TOL=1e-10,
lambda_0=0.85, lambda_max=0.5, lambda_switch=30,
rnd_seed=None) # Disable fixed pseudo-random number generator
# exp_pm.plotInputData()
exp_pm.run()
exp_pm.show()
from numpy import where, log, sum, errstate
from proxtoolbox.proxoperators.proxoperator import ProxOperator, magproj
from numpy import where, log, sum
__all__ = ['P_M', 'P_M_masked', "Approx_P_M", 'Approx_P_M_masked']
......@@ -12,12 +13,8 @@ class P_M(ProxOperator):
def __init__(self, experiment):
"""
Initialization
Parameters
----------
config : dict - Dictionary containing the problem configuration. It must contain the following mapping:
'M' : array_like - non-negative real FOURIER DOMAIN CONSTRAINT
"""
super().__init__(experiment)
self.data = experiment.data
self.prop = experiment.propagator(experiment)
self.invprop = experiment.inverse_propagator(experiment)
......@@ -55,7 +52,7 @@ class P_M_masked(P_M):
experiment: experiment class
"""
super(P_M_masked, self).__init__(experiment)
self.mask = experiment.fmask == 0
self.mask = experiment.fmask # True means not updated
def eval(self, u, prox_idx=None):
"""
......@@ -69,10 +66,11 @@ class P_M_masked(P_M):
-------
array_like - p_M: the projection IN THE PHYSICAL (time) DOMAIN
"""
# assert u.shape == self.mask.shape, 'Non-matching iterate array!'
fourier_space_iterate = self.prop.eval(u)
constrained = magproj(self.data, fourier_space_iterate.copy())
update = where(self.mask, fourier_space_iterate, constrained)
return self.invprop(update)
return self.invprop.eval(update)
class Approx_P_M(P_M):
......@@ -87,7 +85,8 @@ class Approx_P_M(P_M):
u_hat = self.prop.eval(u)
u_hat_sq = abs(u_hat)**2
tmp = u_hat_sq / self.data_sq
with errstate(divide='ignore', invalid='ignore'):
tmp = u_hat_sq / self.data_sq
tmp[self.data_zeros] = 1
u_hat_sq[self.data_zeros] = 0
I_u_hat = tmp == 0
......
......@@ -58,11 +58,11 @@ class P_Sparsity(ProxOperator):
p_Sparsity : array_like, the projection
"""
u *= self.support # apply support (simple 1 if no support)
p_sparse = 0 * u
sorting = np.argsort(abs(u), axis=None) # gives indices of sorted array in ascending order
indices = np.asarray([unravel_index(sorting[i], u.shape) for i in range(-1 * self.sparsity_parameter, 0)])
p_sparse[indices[:, 0], indices[:, 1]] = u[indices[:, 0], indices[:, 1]]
u_flat = np.ravel(u) # flatten array before sorting
sorting = np.argsort(abs(u_flat), axis=None) # gives indices of sorted array in ascending order
p_sparse = np.zeros_like(sorting, dtype=u.dtype) # create array of zeros
p_sparse[sorting[-1 * int(self.sparsity_parameter):]] = u_flat[sorting[-1 * int(self.sparsity_parameter):]]