Commit 8910379d authored by skamann's avatar skamann
Browse files

Added new parameter cubefit|offsets to enable offset fitting.

parent f81bb2f4
......@@ -37,4 +37,4 @@ __author__ = "Sebastian Kamann (s.kamann@ljmu.ac.uk)"
changeable = ["verbose", "interact", "cpucount", "iterations", "type", "degree", "subtract",
"usesigma", "first", "last", "binning", "radius", "inspect", "mask", "include",
"exclude"]
"exclude", "offsets"]
......@@ -65,7 +65,8 @@
},
"cubefit": {
"usesigma": true,
"iterations": 5
"iterations": 5,
"offsets": false
},
"ptsort": {
"outfilename": null
......
......@@ -52,7 +52,7 @@ logger = logging.getLogger(__name__)
def init_worker(catalog, psf_type, psf_radius, psf_parameters_to_fit, position_parameters_to_fit,
last_component_is_unresolved, osfactor, osradius, sources, psf_sources, max_iterations, analyse_psf,
*args):
fit_offsets, *args):
"""
This function defines the global parameters for the parallel part of the
analysis.
......@@ -93,6 +93,9 @@ def init_worker(catalog, psf_type, psf_radius, psf_parameters_to_fit, position_p
analyse_psf : bool
Flag indicating if the radial profiles of the stars used to fit the PSF
plus the parameters of the individual PSF fits should be returned.
fit_offsets : bool
Flag indicating if the offsets to the coordinates in the reference
catalogue should be fitted.
args
Returns
......@@ -112,6 +115,7 @@ def init_worker(catalog, psf_type, psf_radius, psf_parameters_to_fit, position_p
global g_psf_sources
global g_max_iterations
global g_analyse_psf
global g_fit_offsets
g_catalog = catalog
g_psf_type = psf_type
......@@ -125,6 +129,7 @@ def init_worker(catalog, psf_type, psf_radius, psf_parameters_to_fit, position_p
g_psf_sources = psf_sources
g_max_iterations = max_iterations
g_analyse_psf = analyse_psf
g_fit_offsets = fit_offsets
def parallel_fit_layer(fit_parameters):
......@@ -201,7 +206,7 @@ def parallel_fit_layer(fit_parameters):
global g_catalog, g_psf_type, g_psf_radius, g_psf_parameters_to_fit, g_position_parameters_to_fit
global g_last_component_is_unresolved, g_osfactor, g_osradius, g_sources, g_psf_sources, g_max_iterations
global g_analyse_psf
global g_analyse_psf, g_fit_offsets
if coordinates is not None:
catalog = pd.merge(coordinates, g_catalog[['component', 'magnitude']], left_index=True, right_index=True)
......@@ -216,7 +221,7 @@ def parallel_fit_layer(fit_parameters):
results = layerfit(fluxes=None, psf_parameters_to_fit=g_psf_parameters_to_fit,
position_parameters_to_fit=g_position_parameters_to_fit,
sources=g_sources, psf_sources=g_psf_sources, max_iterations=g_max_iterations,
analyse_psf=g_analyse_psf)
analyse_psf=g_analyse_psf, fit_offsets=g_fit_offsets)
del layerfit
return results
......@@ -370,7 +375,7 @@ class FitCube(object):
self.residuals = {}
def __call__(self, layer_range=(0, -1), n_track=10, n_cpu=1, max_iterations=20, use_variances=True, n_bin=1,
analyse_psf=False):
analyse_psf=False, fit_offsets=False):
"""
Run the fitting process for the requested layers.
......@@ -408,7 +413,10 @@ class FitCube(object):
analyse_psf : bool, optional
Flag indicating if the radial profiles of the stars used to fit
the PSF should be returned.
fit_offsets : bool, optional
Flag indicating if the offsets to the coordinates in the reference
catalogue should be fitted.
Returns
-------
psf_data : astropy.io.fits.HDUList
......@@ -433,7 +441,7 @@ class FitCube(object):
_layers_to_analyse = np.arange(start + n_bin // 2, stop, n_bin, dtype=np.int16)
# then rearrange layers so that central layer comes first.
layers_to_analyse = np.zeros(_layers_to_analyse.shape, dtype=_layers_to_analyse.dtype)
layers_to_analyse = np.zeros_like(_layers_to_analyse)
layers_to_analyse[0::2] = _layers_to_analyse[_layers_to_analyse.size // 2:]
layers_to_analyse[1::2] = _layers_to_analyse[:_layers_to_analyse.size // 2][::-1]
......@@ -542,7 +550,8 @@ class FitCube(object):
sources,
psf_sources,
max_iterations,
analyse_psf
analyse_psf,
fit_offsets
)
# initialize processes pool
......@@ -642,9 +651,10 @@ class FitCube(object):
self._write_psf_properties(result.profiles, result.parameters, current_wavelength=current)
self._write_minicubes(current, result.residuals, fit_input[k - 1]['data'])
offsets.append(result.offsets)
testout = pd.concat([off.unstack() for off in offsets], axis=1)
testout.to_csv('test_offsets.csv', index=True)
if result.offsets is not None:
offsets.append(result.offsets)
testout = pd.concat([off.unstack() for off in offsets], axis=1)
testout.to_csv('test_offsets.csv', index=True)
# reset list containing input data for parallel fit
fit_input = []
......
......@@ -103,12 +103,15 @@ def cubefit(config):
logging.info("Starting fit process...")
fitter = FitCube(ifs_data=ifs_data, sources=sources, psf_attributes=psf_attributes,
psf_radius=config['psf'].get('radius', 15.))
psf_data = fitter(layer_range=(first_layer, last_layer), n_cpu=config['global'].get('cpucount', 1),
max_iterations=config['cubefit'].get('iterations', 5),
use_variances=config['cubefit'].get('usesigma', True),
n_bin=config['layers'].get('binning', 1), analyse_psf=config['psf'].get('inspect', False))
psf_radius=config['psf']['radius'])
psf_data = fitter(layer_range=(first_layer, last_layer),
n_cpu=config['global']['cpucount'],
max_iterations=config['cubefit']['iterations'],
use_variances=config['cubefit']['usesigma'],
n_bin=config['layers']['binning'],
analyse_psf=config['psf']['inspect'],
fit_offsets=config['cubefit']['offsets'])
logging.info("Saving results...")
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment