Commit 68d8ffa8 authored by skamann's avatar skamann
Browse files

Continued implementation of pixtable support.

parent 6f36bb0c
......@@ -178,7 +178,7 @@ class Transformation(Parameters):
i += 1
@classmethod
def from_coordinates(cls, in_coords, out_coords, ids=None, out_sigma=None):
def from_coordinates(cls, in_coords, out_coords, ids=None, out_sigma=None, **kwargs):
"""
Initialize a new coordinate transformation using the provided input
(reference) and output (IFS) coordinates in a least squares fit. The
......
......@@ -34,7 +34,6 @@ import itertools
import logging
import time
import numpy as np
# from multiprocessing import cpu_count, Pool
from multiprocessing import cpu_count, Pool
# required PampelMuse-packages
......@@ -46,6 +45,9 @@ __author__ = "Sebastian Kamann (skamann@astro.physik.uni-goettingen.de)"
__revision__ = 290
logger = logging.getLogger(__name__)
def init_worker(psf_type, magnitudes, components, last_component_is_unresolved, maxrad, osfactor, osradius,
psf_parameters_to_fit, position_parameters_to_fit, sources, psf_sources, maxiter, *args):
......@@ -208,7 +210,7 @@ class FitCube(object):
-------
The newly created instance
"""
logging.debug("Initializing FitCube instance (revision: {0})".format(__revision__))
logger.debug("Initializing FitCube instance (revision: {0})".format(__revision__))
self.ifs_data = ifs_data
......@@ -240,30 +242,30 @@ class FitCube(object):
# fits.
if self.sources.ifs_coordinates_fitted[~self.sources.is_background].any():
if not self.sources.ifs_coordinates_fitted[~self.sources.is_background].all():
logging.error('The provided status of the source positions is not supported.')
logging.error('Only {0} out of {1} source positions have polynomial fits.'.format(
logger.error('The provided status of the source positions is not supported.')
logger.error('Only {0} out of {1} source positions have polynomial fits.'.format(
self.sources.ifs_coordinates_fitted.sum(), self.sources.nsrc - self.sources.nbackground))
else:
self.position_parameters['xy'] = False
elif self.sources.ifs_coordinates_free[~self.sources.is_background].any():
if not self.sources.ifs_coordinates_free[~self.sources.is_background].all():
logging.error('The provided status of the source positions is not supported.')
logging.error('Only {0} out of {1} source positions are free fit parameters.'.format(
logger.error('The provided status of the source positions is not supported.')
logger.error('Only {0} out of {1} source positions are free fit parameters.'.format(
self.sources.ifs_coordinates_free.sum(), self.sources.nsrc - self.sources.nbackground))
else:
self.position_parameters['xy'] = True
else:
if not self.sources.transformation.valid:
logging.error('No coordinate transformation found. Cannot determine source positions.')
logger.error('No coordinate transformation found. Cannot determine source positions.')
for i, parameter in enumerate(self.sources.transformation.names):
self.position_parameters[parameter] = self.sources.transformation.free[i]
# work on PSF, check if basic PSF model is valid
self.psf_attributes = psf_attributes
if self.psf_attributes.profile not in ["gauss", "moffat", "double_gauss"]:
logging.error('Invalid or unsupported PSF type: "{0}"'.format(self.psf_attributes.profile))
logger.error('Invalid or unsupported PSF type: "{0}"'.format(self.psf_attributes.profile))
else:
logging.info('Using PSF of type {0}.'.format(self.psf_attributes.profile.upper()))
logger.info('Using PSF of type {0}.'.format(self.psf_attributes.profile.upper()))
# collect PSF parameters and their status in the fit
self.psf_attributes = psf_attributes
......@@ -326,8 +328,8 @@ class FitCube(object):
start = self.sources.ndisp + start
if stop < 0:
stop = self.sources.ndisp + (stop + 1)
logging.info("Layer range to be analysed: {0}-{1}".format(start, stop))
logging.info("Binning factor per layer: {0}".format(n_bin))
logger.info("Layer range to be analysed: {0}-{1}".format(start, stop))
logger.info("Binning factor per layer: {0}".format(n_bin))
# define array containing the layers (or centres of combined layers) that should be analysed. They must be
# in the correct order, i.e. starting from the central wavelength and then moving to the red and blue ends
......@@ -347,9 +349,9 @@ class FitCube(object):
# Number of iterations per layer.
if maxiter > 1:
logging.info("Maximum iterations per layer: {0}".format(maxiter))
logger.info("Maximum iterations per layer: {0}".format(maxiter))
else:
logging.info("No iteration on individual layers performed.")
logger.info("No iteration on individual layers performed.")
# begin loading data required for fitting process into memory
# whole array with spectra needs to be loaded
......@@ -379,16 +381,11 @@ class FitCube(object):
# Check how many CPUs should be used
if n_cpu is None:
n_cpu = cpu_count()
logging.info("Starting analysis using {0} CPU(s).".format(n_cpu))
logger.info("Starting analysis using {0} CPU(s).".format(n_cpu))
# The number of previously fitted layers used to obtain initial guesses
n_track = int(n_track//n_bin) + 1 if n_bin > 1 else n_track
# get logger for per-layer customisation using common prefix
# root = logging.getLogger()
# handler = root.handlers[0]
# n_digits = int(np.log10(layers_to_analyse.size)) + 1 # for logging prefix, get number of digits of N_layer
# For all layers to be fitted in parallel, we first need to collect the input data in a common list.
# The list will contain one dictionary per layer to be fitted in parallel
fit_input = []
......@@ -439,12 +436,13 @@ class FitCube(object):
for i, jj in enumerate(layers_to_analyse):
# Add a common prefix for all logging messages related to the analysis of the current layer.
# fmt = logging.Formatter("%(asctime)s[%(levelname)-8s]: [{0:0{1}d}/{2}] %(message)s".format(
# i+1, n_digits, layers_to_analyse.size), datefmt='%m/%d/%Y %H:%M:%S')
# handler.setFormatter(fmt)
n_digits = int(np.log10(layers_to_analyse.size)) + 1 # for logging prefix, get number of digits of N_layer
fmt = logging.Formatter("%(asctime)s[%(levelname)-8s]: [{0:0{1}d}/{2}] %(message)s".format(
i+1, n_digits, layers_to_analyse.size), datefmt='%m/%d/%Y %H:%M:%S')
logger.handlers[0].setFormatter(fmt)
# call fitting routine
logging.info("Collecting input data for fit to layer #{0}.".format(jj))
logger.info("Collecting input data for fit to layer #{0}.".format(jj))
# get 1D arrays containing valid data pixels, also sigma-values if requested
data, variances, transform = self.get_layer(jj, n_bin=n_bin, use_variances=use_variances)
......@@ -517,7 +515,7 @@ class FitCube(object):
last_layers = layers_to_analyse[:i]
else:
last_layers = layers_to_analyse[i - 2:max(0, i - 2 - 2 * n_track):-2]
logging.debug("Layers used to update initial guesses: {0}".format(last_layers))
logger.debug("Layers used to update initial guesses: {0}".format(last_layers))
# if not self.use_variances:
# if n_iter == 1 and len(self.last_sigma) > 0:
......@@ -533,7 +531,7 @@ class FitCube(object):
# self.sigma *= sigma_corr
#
# with printoptions(precision=4, suppress=True):
# logging.debug("Relative changes in fluxes: {0}".format(delta_flux))
# logger.debug("Relative changes in fluxes: {0}".format(delta_flux))
# apply weighting scheme, similar to the one used in Stetson's DAOphot
# weights = 1. / (1. + (abs(self.current) / (2. * self.sigma)) ** 2)
......@@ -593,7 +591,7 @@ class FitCube(object):
# NaN pixels will be masked
mask = np.isnan(data)
logging.debug("Layer contains {0} masked pixels".format(mask.sum()))
logger.debug("Layer contains {0} masked pixels".format(mask.sum()))
# apply mask - note that any 2D data will be flattened by this
data = data[~mask]
......@@ -647,7 +645,7 @@ class FitCube(object):
value = self.psf_attributes.data[current, i, 0]
parameters[prm] = value
logstring += " {0}={1:.2f},".format(prm, float(value))
logging.info(logstring[:-1])
logger.info(logstring[:-1])
return parameters
......@@ -692,7 +690,7 @@ class FitCube(object):
value = self.sources.transformation.data[current, i, 0]
parameters[prm] = value
logstring += " {0}={1:.2f},".format(prm, float(value))
logging.info(logstring[:-1])
logger.info(logstring[:-1])
return parameters
......@@ -720,7 +718,7 @@ class FitCube(object):
if last is None:
last = []
logging.info("Updating coordinates of PSF instances...")
logger.info("Updating coordinates of PSF instances...")
t0 = time.time()
# loop over sources
......@@ -738,7 +736,7 @@ class FitCube(object):
else:
centroids.append(self.sources.ifs_coordinates[current, k, :, 0])
logging.debug("Time required to update source coordinates: {0:6.4f}s".format(time.time() - t0))
logger.debug("Time required to update source coordinates: {0:6.4f}s".format(time.time() - t0))
return np.asarray(centroids, dtype=np.float32)
......
......@@ -7,6 +7,9 @@ from ..psf import Gaussian, Moffat, DoubleGaussian
from ..core.coordinates import Transformation
logger = logging.getLogger(__name__)
def sum_by_group(values, groups):
"""
This is a fast algorithm for summing up the data in one
......@@ -125,27 +128,27 @@ class FitLayer(object):
self.data = np.asarray(data)
self.variances = np.asarray(variances)
if self.data.ndim != 1 or self.variances.shape != self.data.shape:
logging.error('Arrays containing data and variances must have same shape.')
logger.error('Arrays containing data and variances must have same shape.')
self.nspaxel = self.data.size
# this is the transformation from spaxels to (x,y) coordinates
self.transformation = np.asarray(transformation)
if self.transformation.ndim != 2 or self.transformation.shape != (self.nspaxel, 2):
logging.error('Spaxel-to-xy transformation has invalid shape.')
logger.error('Spaxel-to-xy transformation has invalid shape.')
# work on source catalogue
self.xy = np.asarray(xy)
if self.xy.ndim != 2 or self.xy.shape[1] != 2:
logging.error('Array containing source coordinates has invalid shape.')
logger.error('Array containing source coordinates has invalid shape.')
self.nsources = self.xy.shape[0]
self.magnitudes = np.asarray(magnitudes) if magnitudes is not None else np.zeros(self.nsources, dtype='<f4')
if self.magnitudes.size != self.nsources:
logging.error('Number of provided magnitudes does not match number of sources.')
logger.error('Number of provided magnitudes does not match number of sources.')
self.components = components if components is not None else np.arange(self.nsources, dtype='<i4')
if self.components.size != self.nsources:
logging.error('No. of entries in components data does not match number of sources.')
logger.error('No. of entries in components data does not match number of sources.')
self.ncomponents = self.components.max() + 1
self.last_component_is_unresolved = last_component_is_unresolved
......@@ -166,7 +169,7 @@ class FitLayer(object):
elif psf_type.lower == 'double_gauss':
self.psf_class = DoubleGaussian
else:
logging.error('Unknown PSF type provided: {0}'.format(psf_type))
logger.error('Unknown PSF type provided: {0}'.format(psf_type))
self.psf_class = None
# add information about location of the individual spaxels.
self.psf_class.set_transform(self.transformation)
......@@ -193,14 +196,14 @@ class FitLayer(object):
if fluxes is None:
fluxes = np.zeros(self.ncomponents, dtype='<f4')
if fluxes.size != self.ncomponents:
logging.error('No. of provided fluxes does not match number of components.')
logger.error('No. of provided fluxes does not match number of components.')
# Flag indicating if the source matrix must be re-calculated (typically after the PSF/positions have changed)
rebuild_matrix = True
for iteration in range(1, maxiter + 1):
if maxiter > 1:
logging.info("Iteration {0} (of max. {1})".format(iteration, maxiter))
logger.info("Iteration {0} (of max. {1})".format(iteration, maxiter))
if rebuild_matrix:
source_matrix, included_components = self.build_matrix()
......@@ -230,7 +233,7 @@ class FitLayer(object):
t1 = time.time()
bestfit = linalg.lsmr(fit_matrix, current * weights, atol=1e-5, show=False)
t2 = time.time()
logging.debug("Time required to solve matrix equation: {0:6.3f}s".format(t2-t1))
logger.debug("Time required to solve matrix equation: {0:6.3f}s".format(t2-t1))
# Note that the background fluxes are currently not stored
fluxes[included_components] += (bestfit[0] / totflux)[:included_components.size]
......@@ -248,15 +251,19 @@ class FitLayer(object):
sources=sources, psf_parameters=psf_parameters_to_fit,
psf_sources=psf_sources,
fit_positions=(position_parameters_to_fit is not None))
# update PSF parameters:
# update PSF parameters, using S/N as weights:
if psf_parameters_to_fit is not None:
for i, name in enumerate(psf_parameters_to_fit, start=2): # first two entries are S/N and flux
if np.isfinite(results[:, i]).any():
new_value = np.average(results[:, i][np.isfinite(results[:, i])],
weights=results[:, 0][np.isfinite(results[:, i])])
getattr(self.psf_class, "set_{0}".format(name))(new_value)
try:
getattr(self.psf_class, "set_{0}".format(name))(new_value)
except AssertionError:
logger.error('PSF fit yielded invalid value {0:.1f} for parameter {1}.'.format(
name, new_value))
else:
logging.warning('Cannot update PSF parameter {0} because no PSF fit succeeded.'.format(
logger.warning('Cannot update PSF parameter {0} because no PSF fit succeeded.'.format(
name))
# update positions
......@@ -284,13 +291,16 @@ class FitLayer(object):
getattr(self.psf_class, "set_{0}".format(parameter))(position_results[i])
if psf_results is not None:
for i, parameter in enumerate(psf_parameters_to_fit):
getattr(self.psf_class, "set_{0}".format(parameter))(psf_results[i])
try:
getattr(self.psf_class, "set_{0}".format(parameter))(psf_results[i])
except AssertionError:
logger.error('PSF fit yielded invalid value {0:.1f} for parameter {1}.'.format(
parameter, psf_results[i]))
# PSF and/or positions have changed, so the matrix needs to be recalculated
rebuild_matrix = True
if iteration == maxiter:
logging.warning('Maximum number of iterations reached.')
logger.warning('Maximum number of iterations reached.')
# estimate uncertainties that are returned with the data
uncertainties = 1. / (totflux * np.sqrt(fit_matrix.multiply(fit_matrix).sum(axis=0)).getA1())
......@@ -353,23 +363,23 @@ class FitLayer(object):
spaxels, fluxes = psf.get_flux()
if spaxels.size == 0:
logging.warning("Source no. {0} entirely outside FoV.".format(psf.src_id))
logger.warning("Source no. {0} entirely outside FoV.".format(psf.src_id))
results.append([spaxels, fluxes, fluxes.sum()])
t1 = time.time()
logging.debug("Time required to calculate PSF profiles: {0:.3f}s".format(t1 - t0))
logger.debug("Time required to calculate PSF profiles: {0:.3f}s".format(t1 - t0))
# Calculate total flux of each component
totflux, ordered_components = sum_by_group([r[-1] for r in results], self.components)
# note that the total fluxes are already in the correct order, i.e. sorted by component
t2 = time.time()
logging.debug("Time required to combine components: {0:.3f}s".format(t2 - t1))
logger.debug("Time required to combine components: {0:.3f}s".format(t2 - t1))
# check if any row does not contribute at all
empty_components = ordered_components[totflux <= 0]
if len(empty_components) > (1 - update_unresolved):
logging.warning("Encountered component(s) that contribute zero flux: {0}".format(empty_components))
logger.warning("Encountered component(s) that contribute zero flux: {0}".format(empty_components))
# Fill the matrix: the indices and intensities of all PSF instances are combined and serve as row indices (i0)
# and values of the matrix. To obtain the column indices of the matrix (i1), we use the property 'specrow' of
......@@ -418,7 +428,7 @@ class FitLayer(object):
_Apsf += self.background
t3 = time.time()
logging.debug("Time required to create matrix: {0:.3f}s".format(t3 - t2))
logger.debug("Time required to create matrix: {0:.3f}s".format(t3 - t2))
# return CSC matrix and array containing the components that contribute flux
zero_component = np.in1d(np.arange(self.ncomponents, dtype=np.int32), empty_components)
......@@ -475,7 +485,7 @@ class FitLayer(object):
names.extend(["xc", "yc"])
x0.extend([psf.xc, psf.yc])
logging.debug("Initial guesses for fit of source no. {0}: {1}".format(psf.src_id, dict(zip(names, x0))))
logger.debug("Initial guesses for fit of source no. {0}: {1}".format(psf.src_id, dict(zip(names, x0))))
# get pixels of layer where the current PSF is defined
variances = self.variances[psf.used]
......@@ -535,14 +545,13 @@ class FitLayer(object):
results[n, 2:(2 + len(psf_parameters))] = np.nan
if fit_positions:
results[n, -2:] = result[0][-2:]
# log outcome of fit
if not 0 < result[-1] <= 4:
logging.warning("PSF fit failed for source no. {0}: {1}".format(i, result[-2]))
else:
# log outcome of fit
prmstring = ", ".join(["{0}={1:.2f}".format(names[k], result[0][k]) for k in xrange(1, len(names))])
logging.info("Successfully fitted PSF of source no. {0} (S/N={1:.1f}): f={2:.1f}, {3}".format(
logger.info("Successfully fitted PSF of source no. {0} (S/N={1:.1f}): f={2:.1f}, {3}".format(
i, snratio, result[0][0], prmstring))
else:
logger.warning("PSF fit failed for source no. {0}: {1}".format(i, result[-2]))
results[n] = np.nan
# reset PSF instance to state it had before the fit
psf.transform = self.transformation
......@@ -593,7 +602,7 @@ class FitLayer(object):
# from the behaviour of getattr(self.PSF, "set_<PAR>")(<VALUE>)
setattr(psf, names[i], x0[i])
except AssertionError as error: # if parameter cannot be updated, raise LinAlgError
logging.error("Encountered AssertionError: {0}".format(error))
logger.error("Encountered AssertionError: {0}".format(error))
raise np.linalg.linalg.LinAlgError("Invalid value '{0}' for parameter '{1}'.".format(x0[i], names[i]))
# return residuals
return data / sigma - x0[0] * (psf.f / sigma)
......@@ -640,7 +649,7 @@ class FitLayer(object):
# Fit position parameters
if position_parameters is not None:
logging.info("Optimizing source coordinates...")
logger.info("Optimizing source coordinates...")
# collect initial guesses
x0 = [getattr(self.psf_class, "_{0}".format(name)) for name in position_parameters]
......@@ -654,13 +663,13 @@ class FitLayer(object):
for i, name in enumerate(position_parameters):
getattr(self.psf_class, "set_{0}".format(name))(bestfit_position[0][i])
logstring += " {0}={1:.2f},".format(name, bestfit_position[0][i])
logging.info(logstring[:-1])
logger.info(logstring[:-1])
else:
bestfit_position = None
# fit PSF parameters
if psf_parameters is not None:
logging.info("Optimizing PSF parameters...")
logger.info("Optimizing PSF parameters...")
# collect free parameters
x0 = [getattr(self.psf_class, "_{0}".format(name)) for name in psf_parameters]
......@@ -675,11 +684,11 @@ class FitLayer(object):
for i, name in enumerate(psf_parameters):
getattr(self.psf_class, "set_{0}".format(name))(bestfit_psf[0][i])
logstring += " {0}={1:.2f},".format(name, bestfit_psf[0][i])
logging.info(logstring)
logger.info(logstring)
except Exception, msg: # optimize.minpack.error: # error handler to avoid code that code crashes
# completely, seems to depend on numpy version which error is actually thrown
logging.error("PSF fit failed: {0}".format(msg))
logger.error("PSF fit failed: {0}".format(msg))
bestfit_psf = None
else:
bestfit_psf = None
......@@ -708,8 +717,8 @@ class FitLayer(object):
residuals : nd_array
The residuals of the fit.
"""
# logging.debug("Shape of Jacobi matrix: {0}".format(self.jacobi_multi_psf(x0, names).shape))
# logging.debug("Shape of data: {0}".format(self.data.shape))
# logger.debug("Shape of Jacobi matrix: {0}".format(self.jacobi_multi_psf(x0, names).shape))
# logger.debug("Shape of data: {0}".format(self.data.shape))
# update parameters in PSF-class
for i, name in enumerate(names):
try:
......@@ -770,7 +779,7 @@ class FitLayer(object):
jacob[j, psf.used] -= fluxes[ii] * (diff_i / np.sqrt(self.variances[psf.used]))
t1 = time.time()
logging.debug("Time needed to create Jacobi matrix: {0:.3f}s".format(t1 - t0))
logger.debug("Time needed to create Jacobi matrix: {0:.3f}s".format(t1 - t0))
return jacob
......@@ -779,10 +788,10 @@ class FitLayer(object):
xy_reference = np.asarray(xy_reference)
xy_true = np.asarray(xy_true)
if xy_reference.shape != xy_true.shape or xy_reference.shape[1] != 2:
logging.error('Cannot fit coordinate transformation. Arrays have incompatible shapes.')
logger.error('Cannot fit coordinate transformation. Arrays have incompatible shapes.')
return None
if xy_reference.shape[0] < 4:
logging.error('Cannot fit coordinate transformation. Less than 4 sources provided.')
logger.error('Cannot fit coordinate transformation. Less than 4 sources provided.')
return None
return Transformation.fit_to_coordinates(xy_reference, xy_true)[0]
......@@ -1345,7 +1345,7 @@ class Sources(object):
logging.debug("Time required to load IFS coordinates: {0}s".format(t3-t2))
logging.debug("Time required to load fits to IFS coordinates: {0}s".format(t4-t3))
def fit_coordinate_transformation(self, index=slice(None)):
def fit_coordinate_transformation(self, index=slice(None), **kwargs):
"""
Calculate/Initialize a new coordinate transformation using the input
coordinates and the IFS coordinates. For this method to work, the
......@@ -1382,7 +1382,7 @@ class Sources(object):
# carry out fit
ref_coordinates = np.vstack((self.x, self.y)).T[self.ifs_coordinates_loaded]
self.transformation = Transformation.from_coordinates(ref_coordinates, self.ifs_coordinates[index][..., 0],
ids=self.ID[self.ifs_coordinates_loaded])
ids=self.ID[self.ifs_coordinates_loaded], **kwargs)
def providePosIDs(self, minStatus=1):
"""
......
......@@ -508,11 +508,6 @@ def initfit(pampelmuse):
elif not sources.transformation.valid:
logging.critical("At least 2 sources must be identified.")
if pampelmuse.posfit == 1:
sources.transformation.free[-2:] = True
elif pampelmuse.posfit == 2:
sources.transformation.free[:] = True
# add coordinate transformation to PSF instances
i = 0
......@@ -712,7 +707,6 @@ def initfit(pampelmuse):
sources.cunit = pampelmuse.ifs_data.wcs.wcs.cunit[2]
sources.load_spectra() # create dummy data: every spectrum contains only NaNs
sources.data[:] = np.nan
sources.transformation.resize(sources.ndisp)
if not pampelmuse.smallifu:
......@@ -834,58 +828,66 @@ def initfit(pampelmuse):
raise ConfigurationError("Not enough PSF sources found.")
else:
logging.warning("Found only {0} PSF source(s)".format(len(sources.psf_indices)))
else:
# determine wavelength-dependent coordinate transformation
logging.info("Determining wavelength-dependent coordinate transformation...")
sources.fit_coordinate_transformation()
# determine FWHM, E and THETA
stars = (results["id"] >= 0)
# determine wavelength-dependent coordinate transformation
logging.info("Determining wavelength-dependent coordinate transformation...")
sources.fit_coordinate_transformation()
# determine FWHM, E and THETA
stars = (results["id"] >= 0)
logging.info("Estimating PSF properties using measured moments...")
x2 = np.ma.array(results[["x2"]][stars].view(np.float32).reshape(stars.sum(), -1))
x2.mask = np.isnan(x2)
y2 = np.ma.array(results[["y2"]][stars].view(np.float32).reshape(stars.sum(), -1))
y2.mask = np.isnan(y2)
xy = np.ma.array(results[["xy"]][stars].view(np.float32).reshape(stars.sum(), -1))
xy.mask = np.isnan(xy)
# get A, B:
with np.errstate(invalid="ignore"):
A = np.sqrt((x2+y2)/2.+np.sqrt(((x2-y2)/2.)**2+xy**2))
A.mask[A <= 0] = True
B = np.sqrt((x2+y2)/2.-np.sqrt(((x2-y2)/2.)**2+xy**2))
B.mask[B <= 0] = True
logging.info("Estimating PSF properties using measured moments...")
x2 = np.ma.array(results[["x2"]][stars].view(np.float32).reshape(stars.sum(), -1))
x2.mask = np.isnan(x2)
y2 = np.ma.array(results[["y2"]][stars].view(np.float32).reshape(stars.sum(), -1))
y2.mask = np.isnan(y2)
xy = np.ma.array(results[["xy"]][stars].view(np.float32).reshape(stars.sum(), -1))
xy.mask = np.isnan(xy)
# get A, B:
with np.errstate(invalid="ignore"):
A = np.sqrt((x2+y2)/2.+np.sqrt(((x2-y2)/2.)**2+xy**2))
A.mask[A <= 0] = True
B = np.sqrt((x2+y2)/2.-np.sqrt(((x2-y2)/2.)**2+xy**2))
B.mask[B <= 0] = True
# FWHM
fwhm = np.sqrt(8.*np.log(2))*np.mean(A, axis=0)
# TODO: should FWHM be updated as well?
logging.info("Mean FWHM: {0:.2f}pix.".format(float(fwhm.mean())))
# Ellipticity
e = 1. - np.mean(B/A, axis=0)
logging.info("Mean ellipticity: {0:.2f}".format(float(e.mean())))
ii = psf_attributes.names.index("e")
if psf_attributes.free[ii]:
# need to check if dispersion information already included in PSF variables, otherwise, use
# numpy.resize that fills enlarged array with repeated copies of available data
if psf_attributes.data.shape[0] == 1:
psf_attributes.data = np.resize(psf_attributes.data, e.shape + psf_attributes.data.shape[1:])
psf_attributes.data[:, ii, 0] = e.data
# Position angle
with np.errstate(invalid="ignore"):
_theta = 0.5*np.arctan(2*xy/(x2-y2))
_theta -= (-1)**(_theta < 0)*np.pi/4.
_theta += (-1)**(xy > 0)*np.pi/4.
theta = _theta.mean(axis=0)
logging.info("Mean position angle: {0:.2f}rad.".format(float(theta.mean())))
ii = psf_attributes.names.index("theta")
if psf_attributes.free[ii]:
if psf_attributes.data.shape[0] == 1:
psf_attributes.data = np.resize(psf_attributes.data, theta.shape+psf_attributes.data.shape[1:])
psf_attributes.data[:, ii, 0] = theta.data
# FWHM
fwhm = np.sqrt(8.*np.log(2))*np.mean(A, axis=0)
# TODO: should FWHM be updated as well?
logging.info("Mean FWHM: {0:.2f}pix.".format(float(fwhm.mean())))
# Ellipticity
e = 1. - np.mean(B/A, axis=0)
logging.info("Mean ellipticity: {0:.2f}".format(float(e.mean())))
ii = psf_attributes.names.index("e")
if psf_attributes.free[ii]:
# need to check if dispersion information already included in PSF variables, otherwise, use
# numpy.resize that fills enlarged array with repeated copies of available data
if psf_attributes.data.shape[0] == 1:
psf_attributes.data = np.resize(psf_attributes.data, e.shape + psf_attributes.data.shape[1:])
psf_attributes.data[:, ii, 0] = e.data
# Position angle
with np.errstate(invalid="ignore"):
_theta = 0.5*np.arctan(2*xy/(x2-y2))
_theta -= (-1)**(_theta < 0)*np.pi/4.
_theta += (-1)**(xy > 0)*np.pi/4.
theta = _theta.mean(axis=0)
logging.info("Mean position angle: {0:.2f}rad.".format(float(theta.mean())))
ii = psf_attributes.names.index("theta")
if psf_attributes.free[ii]: