Commit d1a788d7 authored by s.gretchko's avatar s.gretchko
Browse files

Merged Experiment and DataProcessor classes

parent fa3112db
......@@ -2,6 +2,6 @@
import SetProxPythonPath
from proxtoolbox.experiments.phase.Krueger_Experiment import Krueger_Experiment
CDI = Krueger_Experiment(algorithm = 'AvP', data_ball = 24)
CDI.run()
CDI.show()
Krueger = Krueger_Experiment(algorithm = 'AvP', data_ball = 24)
Krueger.run()
Krueger.show()
......@@ -22,14 +22,14 @@ class Algorithm:
self.prox1 = self.proxOperators[0]
self.prox2 = self.proxOperators[1]
self.norm_data = experiment.data_processor.norm_data
self.norm_data = experiment.norm_data
self.Nx = experiment.Nx
self.Ny = experiment.Ny
self.Nz = experiment.Nz
self.product_space_dimension = experiment.product_space_dimension
self.truth = experiment.data_processor.truth
self.truth_dim = experiment.data_processor.truth_dim
self.norm_truth = experiment.data_processor.norm_truth
self.truth = experiment.truth
self.truth_dim = experiment.truth_dim
self.norm_truth = experiment.norm_truth
self.diagnostic = experiment.diagnostic
self.maxIter = experiment.MAXIT
......
......@@ -12,15 +12,15 @@ class IterateMonitor:
"""
def __init__(self, experiment):
self.u0 = experiment.data_processor.u0
self.u0 = experiment.u0
self.u_monitor = self.u0 # the part of the sequence that is being monitored
# put u0 as default
self.isCell = isCell(self.u0)
self.norm_data = experiment.data_processor.norm_data
self.truth = experiment.data_processor.truth
self.truth_dim = experiment.data_processor.truth_dim
self.norm_truth = experiment.data_processor.norm_truth
self.norm_data = experiment.norm_data
self.truth = experiment.truth
self.truth_dim = experiment.truth_dim
self.norm_truth = experiment.norm_truth
self.diagnostic = experiment.diagnostic
self.rotate = experiment.rotate
self.n_product_Prox = experiment.n_product_Prox
......@@ -111,7 +111,7 @@ class IterateMonitor:
u1 = u_m
u2 = u_m
else: # ndarray
m, n, p, q = size_matlab(u_m)
m, n, p, _q = size_matlab(u_m)
if n == self.n_product_Prox:
u1 = u_m[:,0]
u2 = u_m[:,n-1]
......
......@@ -3,6 +3,7 @@ from proxtoolbox.experiments.dataProcessor import DataProcessor
from proxtoolbox import algorithms
from proxtoolbox import proxoperators
from proxtoolbox.proxoperators.proxoperator import ProxOperator
from proxtoolbox.Utilities.cell import Cell, isCell
import sys
import os
......@@ -53,29 +54,29 @@ class Experiment(metaclass = ExperimentMetaClass):
'''
Base class for ProxToolbox Experiments
This class is meant to be derived from to implement concrete experiments.
The experiment class contains all the information that describes
a given experiment. Essentially, it contains a data processor that
describes the data used in the experiment and an instance of the
algorithm that works on this data. Derived classes must implement
the static method getDefaultParameters() which returns a dictionary
containing the default parameters used by this experiment
The experiment class contains all the information
that describes a given experiment.
Essentially, it loads or generates the data used in the
experiment and instanciates the algorithm that works on
this data. This class is meant to be derived from to implement
concrete experiments. Derived classes must implement
the static method getDefaultParameters() which returns a
dictionary containing the default parameters used by this
experiment. Also required, is the implementation of the
loadData() method to load or generate the data.
'''
def __init__(self,
experiment_name = None,
data_processor_name = None,
data_processor_path = None,
data_processor_package = None,
constraint = 'amplitude only',
object = 'complex',
formulation = 'product space',
product_space_dimension = 1,
sets = None,
Nx = 128,
Ny = 128,
Nz = 1,
dim = 4,
Nx = None,
Ny = None,
Nz = None,
noise = 'none',
algorithm = None,
numruns = 1,
......@@ -108,20 +109,32 @@ class Experiment(metaclass = ExperimentMetaClass):
automatically after the experiment object is fully constructed.
"""
self.experiment_name = experiment_name
self.data_processor_name = data_processor_name
self.data_processor_path = data_processor_path
self.data_processor_package = data_processor_package
self.data_processor = None
self.constraint = constraint
self.object = object
self.formulation = formulation
# Those data members will define the final dimensions
# of the dataset to be used in this experiment.
# At this stage we set them to the desired values
# They may be changed by loadData() and/or ReshapeData()
self.product_space_dimension = product_space_dimension
self.sets = sets
self.Nx = Nx
self.Ny = Ny
self.Nz = Nz
self.dim = dim
if Nz is not None:
self.Nz = Nz
else:
self.Nz = 1
# we store here the original values specified by the user
self.desired_product_space_dimension = product_space_dimension
self.desired_sets = sets
self.desired_Nx = Nx
self.desired_Ny = Ny
self.desired_Nz = Nz
#self.dim = dim #TODO maybe not needed ?
self.noise = noise
self.data_ball = data_ball
self.diagnostic = diagnostic
......@@ -146,24 +159,31 @@ class Experiment(metaclass = ExperimentMetaClass):
self.json_output = json_output
self.matlab_output = matlab_output
self.iterate_monitor_name = iterate_monitor_name
self.accelerator_name = accelerator_name
self.rotate = rotate # tells the iterate monitor to rotate the iterates
# to a fixed global reference
self.data = None
self.data_sq = None
self.norm_data = None
self.norm_data_sq = None
self.rt_data = None
self.norm_rt_data = None
self.u0 = None
self.truth = None
self.truth_dim = None
self.norm_truth = None
self.proj_iter = None # not sure what it does
#self.proxOperators = ['', '', ''] # it is assumed thereafter that
# the size of list is least 3
self.proxOperators = []
self.nProx = None # is the number of prox operators?
self.productProxOperators = []
self.n_product_Prox = None # size of the product space
# TODO: which default: None, 0 or 1 ? see how it is used
self.propagator = None
self.inverse_propagator = None
......@@ -174,34 +194,16 @@ class Experiment(metaclass = ExperimentMetaClass):
"""
Initialize experiment object. This method is automatically called by
the metaclass (ExperimentMetaClass) once the experiment object is
fully constructed. This method instanciates the data processor which
fully constructed. This method calls the method loadData() that
loads or generates the data required for this experiment.
Additionally, it finds the classes of the prox operators, and
instantiate the iterate monitor and the algorithm used to run
this experiment.
"""
# instantiate data processor
if self.data_processor_name is not None:
module = None
data_processor_fullname = self.data_processor_name
if self.data_processor_path is not None:
sys.path.append(self.data_processor_path)
if self.data_processor_package is not None:
data_processor_fullname = self.data_processor_package + '.' + self.data_processor_name
# try to load the module that contains the data processor
try:
module = importlib.import_module(data_processor_fullname)
data_processor_attr = getattr(module, self.data_processor_name)
self.data_processor = data_processor_attr() # instantiate data processor object
except Exception as e:
errMsg = e.__str__()
print ("Failed to import data processor module " + self.data_processor_name + ": " + errMsg)
raise
# load data
if self.data_processor is not None:
self.data_processor.loadData(self)
self.data_processor.reshapeData(self.Nx, self.Ny, self.Nz)
self.loadData()
self.reshapeData(self.Nx, self.Ny, self.Nz)
# define the prox operators to be used for this experiment (list their names)
# default implementation chooses prox operators based on constraint
......@@ -263,7 +265,7 @@ class Experiment(metaclass = ExperimentMetaClass):
t = time.time()
self.output = self.algorithm.run(self.data_processor.u0) # run algorithm
self.output = self.algorithm.run(self.u0) # run algorithm
self.output['stats']['time'] = time.time() - t
......@@ -276,6 +278,13 @@ class Experiment(metaclass = ExperimentMetaClass):
if self.verbose:
self.printBenchmark()
def loadData(self):
"""
Load or generate the dataset that will be used for
this experiment. Create the initial iterate.
"""
raise NotImplementedError("This is just an abstract interface")
def setupProxOperators(self):
"""
......@@ -309,6 +318,43 @@ class Experiment(metaclass = ExperimentMetaClass):
self.proxOperators.append(None)
self.proxOperators.append('Approx_PM_Poisson') # Patrick: This is just to monitor the change of phases!
def reshapeData(self, Nx, Ny, Nz):
"""
Reshape data based on the given arguments. This method is called
during the initialization of the Experiment object, after the call
to loadData().
Parameters
----------
Nx, Ny, Nz: int
The new dimensions to be used.
Notes
-----
This is a default implementation that is used so far for phase experiments
TODO: too many data fields... See if we can reduce the number of data fields
Could use a variable that tells us which kind of data we are dealing with
"""
# reshape and rename the data
if self.data_sq is None:
# need to put data into required format
self.data_sq = self.data
self.data = self.rt_data
if self.norm_data is not None:
self.norm_data_sq = self.norm_data
self.norm_data = self.norm_rt_data
# The following lines of code correspond to the Cone_and_Sphere branch. However,
# the case where the data is a cell is not treated (in Matlab implementation)
# TODO: Need to check is this is OK.
if not isCell(self.data):
tmp = self.data.shape
if tmp[0] == 1 or tmp[1] == 1:
self.data_sq = self.data_sq.reshape(Nx, Ny)
# the prox algorithms work with the square root of the measurement:
self.data = self.data.reshape(Nx, Ny)
def saveOutput(self):
......@@ -396,7 +442,6 @@ class Experiment(metaclass = ExperimentMetaClass):
a_posteriori_error = rate/(1-rate)*changes[iterCount-2]
benchmark['a_posteriori_error'] = a_posteriori_error
if self.diagnostic:
self.truth = self.data_processor.truth
if self.truth is not None and 'rel_errors' in self.output['stats']:
rel_errors = self.output['stats']['rel_errors']
if iterCount == 1:
......@@ -413,33 +458,33 @@ class Experiment(metaclass = ExperimentMetaClass):
TODO: deprecated in Cone_and_Sphere
"""
u1 = self.data_processor.u0.copy()
u2 = self.data_processor.u0.copy()
u1 = self.u0.copy()
u2 = self.u0.copy()
proj1 = self.proxOperators[0](self)
if self.formulation == 'sequential':
for j in range(self.product_space_dimension):
self.proj_iter = j #TODO this should be passed to the prox operator
proj1 = self.proxOperators[0](self)
u1[:, :, j] = proj1.eval(self.data_processor.u0)
u1[:, :, j] = proj1.eval(self.u0)
self.proj_iter = mod(j, self.product_space_dimension) + 1
proj1 = self.proxOperators[0](self) #TODO recreate prox to update proj_iter
u1[:, :, j] = proj1.eval(self.data_processor.u0)
u1[:, :, j] = proj1.eval(self.u0)
else: # i.e. formulation=='product space'
proj1 = self.proxOperators[0](self)
u1 = proj1.eval(self.data_processor.u0)
u1 = proj1.eval(self.u0)
proj2 = self.proxOperators[1](self)
u2 = proj2.eval(u1)
# estimate the gap in the relevant metric
if self.Nx == 1 or self.Ny == 1:
tmp_gap = square(norm(u1 - u2) / self.data_processor.norm_rt_data)
tmp_gap = square(norm(u1 - u2) / self.norm_rt_data)
elif self.product_space_dimension == 1:
tmp_gap = (norm(u1 - u2) / self.data_processor.norm_rt_data) ** 2
tmp_gap = (norm(u1 - u2) / self.norm_rt_data) ** 2
else:
tmp_gap = 0
for j in range(self.product_space_dimension):
# compute (||P_Sx-P_Mx||/norm_data)^2:
tmp_gap = tmp_gap + (norm(u1[:, :, j] - u2[:, :, j]) / self.data_processor.norm_rt_data) ** 2
tmp_gap = tmp_gap + (norm(u1[:, :, j] - u2[:, :, j]) / self.norm_rt_data) ** 2
gap_0 = sqrt(tmp_gap)
return gap_0
......
from proxtoolbox.experiments.dataProcessor import DataProcessor
import numpy as np
from numpy import exp, sqrt, log2, log10, ceil, floor, unravel_index, argmax, zeros
from scipy.io import loadmat
from numpy.fft import fftshift, ifft2, fft2
from proxtoolbox.Utilities.mypoissonrnd import mypoissonrnd
from proxtoolbox.Utilities.gaussian import gaussian
from proxtoolbox.Utilities.cell import Cell, isCell
class CDI_DataProcessor(DataProcessor):
"""
Data processor for the CDI experiment
"""
def __init__(self):
"""
"""
super(CDI_DataProcessor, self).__init__()
self.magn = None
self.farfield = None
self.data_zeros = None
self.support_idx = None
self.abs_illumination = None
self.supp_phase = None
def loadData(self, experiment):
"""
Load CDI dataset. Create the initial iterate.
"""
np.random.seed(1234) # for tests
print('Loading data file CDI_intensity')
f = loadmat('../InputData/Phase/CDI_intensity.mat')
# diffraction pattern
dp = f['intensity']['img'][0, 0]
orig_res = max(dp.shape[0], dp.shape[1]) # actual data size
step_up = ceil(log2(experiment.Nx) - log2(orig_res))
workres = 2**(step_up) * 2**(floor(log2(orig_res))) # desired array size
N = int(workres)
# center data and use region of interest around center
# central pixel
# find max data value
argmx = unravel_index(argmax(dp), dp.shape)
Xi = argmx[0] + 1
Xj = argmx[1] + 1
# get desired roi:
# necessary conversion
Di = N/2 - (Xi-1)
Dj = N/2 - (Xj-1)
# roi around central pixel
Ni = 2*(Xi + Di*(Di < 0) - 1)
Nj = 2*(Xj + Dj*(Dj < 0) - 1)
tmp = zeros((N, N))
tmp[int(Di*(Di > 0)):int(Di*(Di > 0) + Ni), int(Dj*(Dj > 0)):int(Dj*(Dj > 0) + Nj)] \
= dp[int(Xi - Ni/2) - 1:int(Xi + Ni/2 - 1), int(Xj - Nj/2) - 1:int(Xj + Nj/2 - 1)]
M = (fftshift(tmp))**(.5)
# M=tmp.^(.5)
## define support
DX = np.ceil(N / 7)
S = zeros((N, N))
S[int(N/4 + 1 + DX) - 1:int(3/4 * N - 1 - DX), int(N/4 + 1 + DX) - 1:int(3/4*N - 1 - DX)] = 1
self.magn = 1 # magnification factor
self.farfield = True
print('Using farfield formula.')
self.rt_data = Cell(1)
self.rt_data[0] = M
# standard for the main program is that
# the data field is the magnitude SQUARED
# in Luke.m this is changed to the magnitude.
self.data = Cell(1)
self.data[0] = M**2
self.norm_rt_data = np.linalg.norm(ifft2(M), 'fro')
self.data_zeros = np.where(M == 0)
self.support_idx = np.nonzero(S)
self.sets = 2
# use the abs_illumination field to represent the
# support constraint.
self.abs_illumination = S
# initial guess
tmp_rnd = (np.random.rand(N, N)).T # to match Matlab
self.u0 = S * tmp_rnd
self.u0 = self.u0 / np.linalg.norm(self.u0, 'fro') * self.norm_rt_data
self.dp = dp # store dp for display later
from proxtoolbox.experiments.phase.phaseExperiment import PhaseExperiment
from proxtoolbox.Utilities.mypoissonrnd import mypoissonrnd
from proxtoolbox.Utilities.gaussian import gaussian
from proxtoolbox.Utilities.cell import Cell, isCell
import numpy as np
from numpy import exp, sqrt, log2, log10, ceil, floor, unravel_index, argmax, zeros
from scipy.io import loadmat
from numpy.fft import fftshift, ifft2, fft2
from numpy import log10
import matplotlib.pyplot as plt
from matplotlib.pyplot import subplots, show, figure
class CDI_Experiment(PhaseExperiment):
'''
CDI experiment class
'''
def __init__(self,
warmup_iter = 0,
**kwargs):
"""
"""
# call parent's __init__ method
super(CDI_Experiment, self).__init__(**kwargs)
# do here any data member initialization
self.warmup_iter = warmup_iter
@staticmethod
def getDefaultParameters():
defaultParams = {
'experiment_name' : 'CDI',
'data_processor_name': 'CDI_DataProcessor',
'data_processor_package': 'proxtoolbox.experiments.phase',
'object': 'nonnegative',
'constraint': 'nonnegative and support',
'Nx': 128,
......@@ -50,7 +45,98 @@ class CDI_Experiment(PhaseExperiment):
}
return defaultParams
def __init__(self,
warmup_iter = 0,
**kwargs):
"""
"""
# call parent's __init__ method
super(CDI_Experiment, self).__init__(**kwargs)
# do here any data member initialization
self.warmup_iter = warmup_iter
# the following data members are set by loadData()
self.magn = None
self.farfield = None
self.data_zeros = None
self.support_idx = None
self.abs_illumination = None
self.supp_phase = None
def loadData(self):
"""
Load CDI dataset. Create the initial iterate.
"""
np.random.seed(1234) # for tests
print('Loading data file CDI_intensity')
f = loadmat('../InputData/Phase/CDI_intensity.mat')
# diffraction pattern
dp = f['intensity']['img'][0, 0]
orig_res = max(dp.shape[0], dp.shape[1]) # actual data size
step_up = ceil(log2(self.Nx) - log2(orig_res))
workres = 2**(step_up) * 2**(floor(log2(orig_res))) # desired array size
N = int(workres)
# center data and use region of interest around center
# central pixel
# find max data value
argmx = unravel_index(argmax(dp), dp.shape)
Xi = argmx[0] + 1
Xj = argmx[1] + 1
# get desired roi:
# necessary conversion
Di = N/2 - (Xi-1)
Dj = N/2 - (Xj-1)
# roi around central pixel
Ni = 2*(Xi + Di*(Di < 0) - 1)
Nj = 2*(Xj + Dj*(Dj < 0) - 1)
tmp = zeros((N, N))
tmp[int(Di*(Di > 0)):int(Di*(Di > 0) + Ni), int(Dj*(Dj > 0)):int(Dj*(Dj > 0) + Nj)] \
= dp[int(Xi - Ni/2) - 1:int(Xi + Ni/2 - 1), int(Xj - Nj/2) - 1:int(Xj + Nj/2 - 1)]
M = (fftshift(tmp))**(.5)
# M=tmp.^(.5)
## define support
DX = np.ceil(N / 7)
S = zeros((N, N))
S[int(N/4 + 1 + DX) - 1:int(3/4 * N - 1 - DX), int(N/4 + 1 + DX) - 1:int(3/4*N - 1 - DX)] = 1
self.magn = 1 # magnification factor
self.farfield = True
print('Using farfield formula.')
self.rt_data = Cell(1)
self.rt_data[0] = M
# standard for the main program is that
# the data field is the magnitude SQUARED
# in Luke.m this is changed to the magnitude.
self.data = Cell(1)
self.data[0] = M**2
self.norm_rt_data = np.linalg.norm(ifft2(M), 'fro')
self.data_zeros = np.where(M == 0)
self.support_idx = np.nonzero(S)
self.sets = 2
# use the abs_illumination field to represent the
# support constraint.
self.abs_illumination = S
# initial guess
tmp_rnd = (np.random.rand(N, N)).T # to match Matlab
self.u0 = S * tmp_rnd
self.u0 = self.u0 / np.linalg.norm(self.u0, 'fro') * self.norm_rt_data
self.dp = dp # store dp for display later
def setupProxOperators(self):
"""
Determine the prox operators to be used for this experiment
......@@ -69,7 +155,7 @@ class CDI_Experiment(PhaseExperiment):
self.proxOperators.append('P_rank1_SR')
else:
self.proxOperators.append('Approx_Pphase_FreFra_Poisson')
self.nProx = self.data_processor.sets
self.nProx = self.sets
def show(self):
......@@ -79,10 +165,10 @@ class CDI_Experiment(PhaseExperiment):
f, (ax1, ax2) = subplots(1, 2,