Skip to content
GitLab
Menu
Projects
Groups
Snippets
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Sign in
Toggle navigation
Menu
Open sidebar
skamann
pampelmuse
Commits
84eebbf6
Commit
84eebbf6
authored
Jul 25, 2016
by
skamann
Browse files
Completed weighting scheme for new fit.
parent
5984751e
Changes
12
Expand all
Hide whitespace changes
Inline
Side-by-side
pampelmuse/core/__init__.py
View file @
84eebbf6
import
copy_reg
from
types
import
*
# order counts here! aperture loads Cube and Sources, so they must be loaded first
from
cube
import
Cube
from
sources
import
Sources
from
aperture
import
AperPhoto
from
background
import
BackgroundGrid
# for multi-threading
def
_pickle_method
(
method
):
func_name
=
method
.
im_func
.
__name__
obj
=
method
.
im_self
cls
=
method
.
im_class
if
func_name
.
startswith
(
'__'
)
and
not
func_name
.
endswith
(
'__'
):
cls_name
=
cls
.
__name__
.
lstrip
(
'_'
)
if
cls_name
:
func_name
=
'_'
+
cls_name
+
func_name
return
_unpickle_method
,
(
func_name
,
obj
,
cls
)
def
_unpickle_method
(
func_name
,
obj
,
cls
):
for
cls
in
cls
.
mro
():
try
:
func
=
cls
.
__dict__
[
func_name
]
except
KeyError
:
pass
else
:
break
return
func
.
__get__
(
obj
,
cls
)
copy_reg
.
pickle
(
MethodType
,
_pickle_method
,
_unpickle_method
)
"""
__init__.py
===========
Copyright 2013-2016 Sebastian Kamann
This file is part of PampelMuse.
PampelMuse is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
any later version.
PampelMuse is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with PampelMuse. If not, see <http://www.gnu.org/licenses/>.
+++
Latest SVN revision
-------------------
333, 2016/07/25
"""
# import copy_reg
# from types import *
from
.sources
import
Sources
# order counts here! aperture loads Sources, so it must be loaded first
from
.aperture
import
AperPhoto
from
.background
import
BackgroundGrid
__author__
=
"Sebastian Kamann (skamann@astro.physik.uni-goettingen.de)"
__revision__
=
333
# # for multi-threading
# def _pickle_method(method):
# func_name = method.im_func.__name__
# obj = method.im_self
# cls = method.im_class
# if func_name.startswith('__') and not func_name.endswith('__'):
# cls_name = cls.__name__.lstrip('_')
# if cls_name:
# func_name = '_' + cls_name + func_name
# return _unpickle_method, (func_name, obj, cls)
#
#
# def _unpickle_method(func_name, obj, cls):
# for cls in cls.mro():
# try:
# func = cls.__dict__[func_name]
# except KeyError:
# pass
# else:
# break
# return func.__get__(obj, cls)
#
#
# copy_reg.pickle(MethodType, _pickle_method, _unpickle_method)
pampelmuse/core/aperture.py
View file @
84eebbf6
"""
aperture.py
===========
Copyright 2013-2016 Sebastian Kamann
This file is part of PampelMuse.
PampelMuse is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
any later version.
PampelMuse is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with PampelMuse. If not, see <http://www.gnu.org/licenses/>.
+++
Provides
--------
class pampelmuse.core.aperture.AperPhoto
...
...
@@ -22,25 +40,23 @@ Provides
--------
function pampelmuse.core.aperture.run_ap
Latest SVN revision
-------------------
25
3, 201
5
/0
4
/2
2
33
3, 201
6
/0
7
/2
5
"""
__author__
=
"Sebastian Kamann (skamann@astro.physik.uni-goettingen.de)"
__revision__
=
253
import
logging
import
numpy
as
np
from
multiprocessing
import
Pool
from
scipy
import
spatial
import
numpy
as
np
# required PampelMuse-packages
from
..instruments
import
Instrument
,
MusePixtable
from
..psf
import
Profile
from
..functions.statistics
import
clip_sigma
__author__
=
"Sebastian Kamann (skamann@astro.physik.uni-goettingen.de)"
__revision__
=
333
def
run_ap
(
centroids
,
pixpos
,
data
,
background
=
"global"
,
r_aper
=
3.
,
calc_moments
=
False
,
thresh
=
3.
):
"""
This function performs aperture photometry on one or more images around
...
...
pampelmuse/core/background.py
View file @
84eebbf6
"""
background.py
=============
Copyright 2013-2016 Sebastian Kamann
This file is part of PampelMuse.
PampelMuse is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
any later version.
PampelMuse is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with PampelMuse. If not, see <http://www.gnu.org/licenses/>.
+++
Provides
--------
class BackgroundGrid
...
...
@@ -21,7 +39,7 @@ Revision 221, 2014/12/02
Latest SVN revision
-------------------
232
, 201
5
/0
1/30
333
, 201
6
/0
7/25
"""
import
logging
import
numpy
as
np
...
...
@@ -32,7 +50,7 @@ from ..psf import Canvas
__author__
=
"Sebastian Kamann (skamann@astro.physik.uni-goettingen.de)"
__revision__
=
232
__revision__
=
333
class
BackgroundGrid
(
Canvas
):
...
...
pampelmuse/core/coordinates.py
View file @
84eebbf6
"""
coordinates.py
==============
Copyright 2013-2016 Sebastian Kamann
This file is part of PampelMuse.
PampelMuse is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
any later version.
PampelMuse is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with PampelMuse. If not, see <http://www.gnu.org/licenses/>.
+++
Provides
--------
function pampelmuse.core.coordinates.my_lsq_sigma
...
...
@@ -52,18 +70,16 @@ This results in the inverse transformation:
Latest SVN revision
-------------------
280
, 201
5/10/30
333
, 201
6/07/25
"""
__author__
=
"Sebastian Kamann (skamann@astro.physik.uni-goettingen.de)"
__revision__
=
280
import
logging
import
re
import
numpy
as
np
from
..core.parameters
import
Parameters
# required PampelMuse modules
from
pampelmuse.core.parameters
import
Parameters
__author__
=
"Sebastian Kamann (skamann@astro.physik.uni-goettingen.de)"
__revision__
=
333
def
my_lsq_sigma
(
a
,
b
,
sigma
=
None
):
...
...
pampelmuse/core/cube.py
deleted
100644 → 0
View file @
5984751e
This diff is collapsed.
Click to expand it.
pampelmuse/core/fit_cube.py
View file @
84eebbf6
...
...
@@ -32,7 +32,7 @@ extraction for a data cube.
Latest SVN revision
-------------------
33
2
, 2016/07/25
33
3
, 2016/07/25
"""
import
logging
import
time
...
...
@@ -43,7 +43,7 @@ from ..core import BackgroundGrid
__author__
=
"Sebastian Kamann (skamann@astro.physik.uni-goettingen.de)"
__revision__
=
33
2
__revision__
=
33
3
logger
=
logging
.
getLogger
(
__name__
)
...
...
@@ -585,8 +585,8 @@ class FitCube(object):
data : nd_array
The data layer
variances : nd_array
The variance layer,
not that it
will be
filled with ones if the
'use_variances' parameter wa
s set to
f
alse.
The variance layer, will
be
None if the 'use_variances' parameter
i
s set to
F
alse.
transformation : ndarray
The transformation from pixels to spatial (x,y) coordinates that
is valid for the returned data layer.
...
...
@@ -610,8 +610,6 @@ class FitCube(object):
# apply mask - note that any 2D data will be flattened by this
data
=
data
[
~
mask
]
if
variances
is
None
:
variances
=
np
.
ones
(
data
.
shape
,
dtype
=
np
.
float32
)
# Set spaxel to (x,y) transformation of the PSF-instances, using the transformation of the Instrument instance
# and the mask
...
...
pampelmuse/core/fit_layer.py
View file @
84eebbf6
...
...
@@ -20,6 +20,10 @@ along with PampelMuse. If not, see <http://www.gnu.org/licenses/>.
+++
This module implements the optimization for a single layer of IFS data.
Latest SVN revision
-------------------
333, 2016/07/25
"""
import
contextlib
import
logging
...
...
@@ -32,7 +36,7 @@ from ..core.coordinates import Transformation
__author__
=
"Sebastian Kamann (skamann@astro.physik.uni-goettingen.de)"
__revision__
=
33
1
__revision__
=
33
3
logger
=
logging
.
getLogger
(
__name__
)
...
...
@@ -248,18 +252,71 @@ class FitLayer(object):
self
.
psf_instances
[
-
1
].
set_yc
(
self
.
xy
[
i
,
1
])
self
.
source_matrix
=
None
self
.
fit_weights
=
None
self
.
fluxes
=
np
.
zeros
((
self
.
n_components
,),
dtype
=
'<f4'
)
self
.
background_fluxes
=
np
.
zeros
((
self
.
n_background
,),
dtype
=
'<f4'
)
@
property
def
variances
(
self
):
"""
This method is designed to provide an estimate of the variances
if they are not provided externally. To this aim, the method compares
the current model with the data and determines the deviation between
the two as a function of the model flux using a running MAD.
Returns
-------
variances : ndarray
The array containing the estimated variances. Note that it will
be filled with 1s if no model is yet available.
"""
# if available, use provided variances
if
self
.
_variances
is
not
None
:
return
self
.
variances
return
self
.
_variances
# otherwise, we only can estimate the variances if a model exists
elif
(
self
.
fluxes
!=
0
).
any
():
model_fluxes
=
self
.
model
.
sum
(
axis
=
1
).
getA1
()
xbin
,
ybin
=
FitLayer
.
calc_running_mad
(
model_fluxes
,
self
.
current
)
return
1.4826
*
np
.
interp
(
model_fluxes
,
xbin
,
ybin
)
# 1.4826 is for MAD -> sigma conversion
# if not, just return 1s
else
:
return
np
.
ones
(
self
.
data
.
shape
,
dtype
=
np
.
float32
)
def
get_weight_factors
(
self
,
alpha
=
2
,
beta
=
2
):
"""
This method implements a weighting scheme similar to the one used by
P. Stetson in DAOphot. The idea is to reduce the weights of deviant
pixels (i.e. pixels with huge residuals) using a modified variance.
The weight of a pixel i in the fit is modified from a pure variance-
weighting as follows:
w_i = f_i(residuals_i) / variance_i, where
f_i = 1 / {1 + [abs(residuals_i)/(alpha*sigma_i)]^beta}, with
sigma_i = SQRT(variance_i)
Parameters
----------
alpha : int
The parameter alpha in the above equation.
beta : int
The parameter beta in the above equation.
Returns
-------
f : ndarray
The factors by which the variances should be divided to get
modified variances for the fitting process.
"""
if
(
self
.
fluxes
!=
0
).
any
():
return
1.
/
(
1.
+
np
.
abs
(
self
.
current
/
(
alpha
*
np
.
sqrt
(
self
.
variances
)))
**
beta
)
else
:
return
self
.
estimate_variances
(
)
return
np
.
ones
(
self
.
data
.
shape
,
dtype
=
np
.
float32
)
def
__call__
(
self
,
fluxes
=
None
,
psf_parameters_to_fit
=
None
,
position_parameters_to_fit
=
None
,
sources
=
None
,
psf_sources
=
None
,
maxiter
=
5
,
*
args
,
**
kwargs
):
psf_sources
=
None
,
maxiter
=
5
):
"""
This method carries out a least-squares fir to the data. The process
is iterative: The model is optimized for the fluxes, the PSF, and the
...
...
@@ -294,8 +351,6 @@ class FitLayer(object):
shape of the PSF right than to get its centroid.
maxiter : int, optional
The maximum number of iterations.
args
kwargs
Returns
-------
...
...
@@ -321,6 +376,9 @@ class FitLayer(object):
else
:
self
.
fluxes
=
fluxes
# Determine initial weights for each pixel in the fit.
self
.
fit_weights
=
self
.
get_weight_factors
()
/
self
.
variances
for
iteration
in
range
(
1
,
maxiter
+
1
):
if
maxiter
>
1
:
logger
.
info
(
"Iteration {0} (of max. {1})"
.
format
(
iteration
,
maxiter
))
...
...
@@ -331,23 +389,20 @@ class FitLayer(object):
# fluxes of components outside FoV are set to NaN
self
.
fluxes
[
~
included_components
]
=
np
.
nan
# divide source matrix by uncertainties
self
.
source_matrix
.
data
/=
np
.
take
(
np
.
sqrt
(
self
.
variances
),
self
.
source_matrix
.
indices
)
# Fit fluxes by direct matrix inversion
# first obtain fit matrix by multiplying source matrix with sqrt of fitting weights
n
,
m
=
self
.
source_matrix
.
shape
fit_matrix
=
sparse
.
dia_matrix
((
np
.
sqrt
(
self
.
fit_weights
),
[
0
]),
shape
=
(
n
,
n
))
*
self
.
source_matrix
# The documentation of scipy states that to efficiently solve the matrix equation, the columns should
# be scaled to have the same euclidean norm. I am still not convinced that this is true, but scale the
# columns nevertheless.
totflux
=
np
.
sqrt
((
self
.
source_matrix
.
T
*
self
.
source_matrix
).
diagonal
())
fit_matrix
=
self
.
source_matrix
*
sparse
.
dia_matrix
((
1.
/
totflux
,
[
0
]),
shape
=
(
self
.
source_matrix
.
shape
[
1
],
self
.
source_matrix
.
shape
[
1
]))
fit_matrix
.
data
*=
np
.
take
(
weights
,
fit_matrix
.
indices
)
total_flux
=
np
.
sqrt
((
fit_matrix
.
T
*
fit_matrix
).
diagonal
())
# for testing, show scatter plot with current residuals
# import matplotlib.pyplot as plt
# fig = plt.figure(figsize=(8, 8))
# ax = fig.add_axes([0.12, 0.12, 0.83, 0.83])
# # print(psf.uc, psf.vc, psf.xc, psf.yc)
# ax.scatter(self.transformation[:, 0], self.transformation[:, 1], marker='o', s=5, c=self.current,
# cmap=plt.cm.hot, edgecolors='face', vmin=-10, vmax=100)
# plt.show()
...
...
@@ -355,22 +410,26 @@ class FitLayer(object):
# The fit for the source fluxes is carried out on the residuals, not the data itself. The fitted residual
# fluxes are added to the existing ones
t1
=
time
.
time
()
bestfit
=
linalg
.
lsmr
(
fit_matrix
,
self
.
current
*
weights
,
atol
=
1e-5
,
show
=
False
)
bestfit
=
linalg
.
lsmr
(
fit_matrix
.
multiply
(
total_flux
),
self
.
current
*
np
.
sqrt
(
self
.
fit_weights
),
atol
=
1e-5
,
show
=
False
)
t2
=
time
.
time
()
logger
.
debug
(
"Time required to solve matrix equation: {0:6.3f}s"
.
format
(
t2
-
t1
))
logging
.
info
(
"Chi**2 of current fit: {0:.1f}"
.
format
(
bestfit
[
3
]
**
2
))
# The background fluxes are currently stored in a separate array
self
.
fluxes
[
included_components
]
+=
(
bestfit
[
0
]
/
totflux
)[:
included_components
.
sum
()]
self
.
background_fluxes
+=
(
bestfit
[
0
]
/
totflux
)[
included_components
.
sum
():]
self
.
fluxes
[
included_components
]
+=
(
bestfit
[
0
]
/
tot
al_
flux
)[:
included_components
.
sum
()]
self
.
background_fluxes
+=
(
bestfit
[
0
]
/
tot
al_
flux
)[
included_components
.
sum
():]
# Convergence criterion: the average relative change in flux is < 0.5 percent
delta_flux
=
(
bestfit
[
0
]
/
totflux
)[:
included_components
.
sum
()]
/
self
.
fluxes
[
included_components
]
delta_flux
=
(
bestfit
[
0
]
/
tot
al_
flux
)[:
included_components
.
sum
()]
/
self
.
fluxes
[
included_components
]
with
printoptions
(
precision
=
4
,
suppress
=
True
):
logger
.
debug
(
"Relative changes in fluxes: {0}"
.
format
(
delta_flux
))
if
np
.
median
(
abs
(
delta_flux
))
<
0.005
:
break
# re-calculate fitting weights
self
.
fit_weights
=
self
.
get_weight_factors
()
/
self
.
variances
# carry out optimization of PSF and/or source positions
if
psf_parameters_to_fit
is
not
None
or
position_parameters_to_fit
is
not
None
:
# if any sources are provided for the PSF fit, the method for large layers is used
...
...
@@ -406,8 +465,8 @@ class FitLayer(object):
if
parameter
not
in
position_parameters_to_fit
:
fixed
[
parameter
]
=
getattr
(
self
.
psf_class
,
parameter
)
valid
=
np
.
isfinite
(
results
[:,
-
2
:]).
all
(
axis
=
1
)
bestfit
=
self
.
_update_coordinate_transformation
(
self
.
xy
[
sources
],
results
[
valid
,
-
2
:],
fixed
=
fixed
)
bestfit
=
FitLayer
.
_update_coordinate_transformation
(
self
.
xy
[
sources
],
results
[
valid
,
-
2
:],
fixed
=
fixed
)
for
i
,
parameter
in
enumerate
(
Transformation
.
PARAMETERS
):
if
parameter
in
position_parameters_to_fit
:
getattr
(
self
.
psf_class
,
"set_{0}"
.
format
(
parameter
))(
bestfit
[
0
][
i
])
...
...
@@ -452,7 +511,10 @@ class FitLayer(object):
return_positions
=
[
getattr
(
self
.
psf_class
,
p
)
for
p
in
position_parameters_to_fit
]
if
self
.
wave
is
not
None
:
return_wave
=
np
.
asarray
(
self
.
wave
.
dot
(
self
.
source_matrix
)
/
self
.
source_matrix
.
sum
(
axis
=
0
))[
0
]
# calculate variance weighted wavelength of each component
n
=
self
.
source_matrix
.
shape
[
0
]
wave_matrix
=
sparse
.
dia_matrix
((
self
.
fit_weights
,
[
0
]),
shape
=
(
n
,
n
))
*
self
.
source_matrix
return_wave
=
np
.
asarray
(
self
.
wave
.
dot
(
wave_matrix
)
/
wave_matrix
.
sum
(
axis
=
0
))[
0
]
else
:
return_wave
=
None
...
...
@@ -464,8 +526,7 @@ class FitLayer(object):
Returns
-------
The current best-fit model. Note that each model pixel is divided by
its uncertainty.
The current best-fit model.
"""
fluxes
=
np
.
hstack
((
self
.
fluxes
[
np
.
isfinite
(
self
.
fluxes
)],
self
.
background_fluxes
))
return
self
.
source_matrix
*
sparse
.
dia_matrix
((
fluxes
,
[
0
]),
shape
=
(
...
...
@@ -477,10 +538,9 @@ class FitLayer(object):
Returns
-------
The residuals of the current best fit model. Note that each residual
pixel is divided by its uncertainty.
The residuals of the current best fit model.
"""
return
(
self
.
data
/
np
.
sqrt
(
self
.
variances
))
-
self
.
model
.
sum
(
axis
=
1
).
getA1
()
return
self
.
data
-
self
.
model
.
sum
(
axis
=
1
).
getA1
()
def
build_matrix
(
self
,
update_unresolved
=
True
):
"""
...
...
@@ -672,8 +732,8 @@ class FitLayer(object):
logger
.
debug
(
"Initial guesses for fit of source no. {0}: {1}"
.
format
(
psf
.
src_id
,
dict
(
zip
(
names
,
x0
))))
# get pixels of layer where the current PSF is defined
variances
=
self
.
variance
s
[
psf
.
used
]
data
=
self
.
current
[
psf
.
used
]
*
np
.
sqrt
(
variances
)
+
x0
[
0
]
*
psf
.
f
variances
=
1.
/
self
.
fit_weight
s
[
psf
.
used
]
data
=
self
.
current
[
psf
.
used
]
+
x0
[
0
]
*
psf
.
f
psf
.
transform
=
self
.
transformation
[
psf
.
used
]
psf
.
maxrad
=
np
.
inf
...
...
@@ -938,11 +998,11 @@ class FitLayer(object):
# calculate new model
psf_matrix
,
included_components
=
self
.
build_matrix
(
update_unresolved
=
False
)
n_comp
=
included_components
.
size
psf_matrix
.
data
/
=
np
.
take
(
np
.
sqrt
(
self
.
variance
s
),
psf_matrix
.
indices
)
psf_matrix
.
data
*
=
np
.
take
(
np
.
sqrt
(
self
.
fit_weight
s
),
psf_matrix
.
indices
)
model
=
psf_matrix
*
sparse
.
dia_matrix
((
fluxes
,
[
0
]),
shape
=
(
n_comp
,
n_comp
))
# return residuals
return
(
self
.
data
/
np
.
sqrt
(
self
.
variance
s
))
-
model
.
sum
(
axis
=
1
).
getA1
()
return
(
self
.
data
*
np
.
sqrt
(
self
.
fit_weight
s
))
-
model
.
sum
(
axis
=
1
).
getA1
()
def
multi_psf_jacobi
(
self
,
x0
,
names
,
fluxes
):
"""
...
...
@@ -977,14 +1037,15 @@ class FitLayer(object):
ii
=
self
.
components
[
i
]
for
j
in
xrange
(
len
(
x0
)):
diff_i
=
getattr
(
psf
,
"diff_{0}"
.
format
(
names
[
j
]))
jacob
[
j
,
psf
.
used
]
-=
fluxes
[
ii
]
*
(
diff_i
/
np
.
sqrt
(
self
.
variance
s
[
psf
.
used
]))
jacob
[
j
,
psf
.
used
]
-=
fluxes
[
ii
]
*
(
diff_i
*
np
.
sqrt
(
self
.
fit_weight
s
[
psf
.
used
]))
t1
=
time
.
time
()
logger
.
debug
(
"Time needed to create Jacobi matrix: {0:.3f}s"
.
format
(
t1
-
t0
))
return
jacob
def
_update_coordinate_transformation
(
self
,
xy_reference
,
xy_true
,
fixed
=
None
):
@
staticmethod
def
_update_coordinate_transformation
(
xy_reference
,
xy_true
,
fixed
=
None
):
"""
This method uses a set of reference and best-fit coordinates to obtain
a new coordinate transformation.
...
...
@@ -1028,6 +1089,22 @@ class FitLayer(object):
aim, the data are sorted by increasing x values and then the MAD
is measured in bins of n datapoints with increasing x values.
NOTE: It is assumed that the median value of y is zero.
Parameters
----------
x : array_like
The x-values of the data, used for sorting
y : array_like
The y-values of the data, used to determine the MAD values.
n : int, optional
The number of bins in which the MAD is calculated
Returns
-------
xbin : ndarray
The value of x at the centre of each bin.
ybin : ndarray
The MAD determined for each bin.
"""
indices
=
np
.
argsort
(
x
)
...
...
@@ -1046,35 +1123,3 @@ class FitLayer(object):
cury
=
y
[
indices
[
imin
:
imax
]]
ybin
[
i
]
=
np
.
median
(
abs
(
cury
))
return
xbin
,
ybin
def
get_variances
(
self
):
"""
This method is designed to provide an estimate of the variances
if they are not provided externally. To this aim, the method compares
the current model with the data and determines the deviation between
the two as a function of the model flux using a running MAD.
Returns
-------
variances : ndarray
The array containing the estimated variances. Note that it will
be filled with 1s if no model is yet available.
"""
# if available, use provided variances
if
self
.
_variances
is
not
None
:
return
self
.
_variances
# otherwise, we only can estimate the variances if a model exists
elif
(
self
.
fluxes
!=
0
).
any
():
model_fluxes
=
self
.
model
.
sum
(
axis
=
1
).
getA1
()
xbin
,
ybin
=
FitLayer
.
calc_running_mad
(
model_fluxes
,
self
.
current
)
return
1.4826
*
np
.
interp
(
model_fluxes
,
xbin
,
ybin
)
# 1.4826 is for MAD -> sigma conversion
# if not, just return 1s
else
:
return
np
.
ones
(
self
.
data
.
shape
,
dtype
=
np
.
float32
)
def
get_weight_factor
(
self
,
alpha
=
2
,
beta
=
2
):
if
(
self
.
fluxes
!=
0
).
any
():
return
1.
/
(
1.
+
np
.
abs
(
self
.
current
/
alpha
)
**
beta
)
else
:
return
np
.
ones
(
self
.
data
.
shape
,
dtype
=
np
.
float32
)
pampelmuse/core/parameters.py
View file @
84eebbf6
"""
parameters.py
=============
Copyright 2013-2016 Sebastian Kamann
This file is part of PampelMuse.
PampelMuse is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
any later version.
PampelMuse is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of