Skip to content
GitLab
Projects
Groups
Snippets
Help
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
CIGALE
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
12
Issues
12
List
Boards
Labels
Service Desk
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Operations
Operations
Incidents
Environments
Packages & Registries
Packages & Registries
Package Registry
Container Registry
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
cigale
CIGALE
Commits
47f33669
Commit
47f33669
authored
Jul 06, 2014
by
BURGARELLA Denis
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'develop' of gitlab.oamp.dev:yannick/pcigale into develop
parents
ae31073e
c33eb557
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
32 additions
and
40 deletions
+32
-40
pcigale/analysis_modules/pdf_analysis/workers.py
pcigale/analysis_modules/pdf_analysis/workers.py
+32
-40
No files found.
pcigale/analysis_modules/pdf_analysis/workers.py
View file @
47f33669
...
...
@@ -11,6 +11,7 @@ import time
import
numpy
as
np
from
scipy
import
optimize
from
scipy.special
import
erf
import
scipy.stats
as
st
from
.utils
import
(
save_best_sed
,
save_pdf
,
save_chi2
,
dchi2_over_ds2
)
from
...warehouse
import
SedWarehouse
...
...
@@ -255,16 +256,14 @@ def analysis(idx, obs):
)
if
lim_flag
is
True
:
norm_init
=
norm_facts
for
imod
in
range
(
len
(
model_fluxes
)):
norm_facts
[
imod
]
=
optimize
.
newton
(
dchi2_over_ds2
,
norm_
init
[
imod
],
norm_facts
[
imod
]
=
optimize
.
newton
(
dchi2_over_ds2
,
norm_
facts
[
imod
],
tol
=
1e-16
,
args
=
(
obs_fluxes
,
obs_errors
,
model_fluxes
[
mod
,
:]))
model_fluxes
[
i
mod
,
:]))
model_fluxes
*=
norm_facts
[:,
np
.
newaxis
]
# χ² of the comparison of each model to each observation.
mask_data
=
np
.
logical_and
(
obs_fluxes
>
tolerance
,
obs_errors
>
tolerance
)
if
lim_flag
is
True
:
# This mask selects the filter(s) for which measured fluxes are given
# i.e., when (obs_flux is >=0. and obs_errors>=0.) and lim_flag=True
...
...
@@ -273,53 +272,47 @@ def analysis(idx, obs):
# i.e., when (obs_flux is >=0. (and obs_errors>=-9990., obs_errors<0.))
# and lim_flag=True
mask_lim
=
np
.
logical_and
(
obs_errors
>=
-
9990.
,
obs_errors
<
tolerance
)
chi2_
data
=
np
.
sum
(
np
.
square
(
chi2_
=
np
.
sum
(
np
.
square
(
(
obs_fluxes
[
mask_data
]
-
model_fluxes
[:,
mask_data
])
/
obs_errors
[
mask_data
]),
axis
=
1
)
chi2_
lim
=
-
2.
*
np
.
sum
(
chi2_
+
=
-
2.
*
np
.
sum
(
np
.
log
(
np
.
sqrt
(
np
.
pi
/
2.
)
*
(
-
obs_errors
[
mask_lim
])
*
(
1.
+
erf
(
(
obs_fluxes
[
mask_lim
]
-
model_fluxes
[:,
mask_lim
])
/
(
np
.
sqrt
(
2
)
*
(
-
obs_errors
[
mask_lim
]))))),
axis
=
1
)
chi2_
=
chi2_data
+
chi2_lim
else
:
mask_data
=
np
.
logical_and
(
obs_fluxes
>
tolerance
,
obs_errors
>
tolerance
)
chi2_
=
np
.
sum
(
np
.
square
(
(
obs_fluxes
[
mask_data
]
-
model_fluxes
[:,
mask_data
])
/
obs_errors
[
mask_data
]),
axis
=
1
)
# We use the exponential probability associated with the χ² as
# likelihood function.
likelihood
=
np
.
exp
(
-
chi2_
/
2
)
# For the analysis, we consider that the computed models explain
# each observation. We normalise the likelihood function to have a
# total likelihood of 1 for each observation.
likelihood
/=
np
.
sum
(
likelihood
)
# We don't want to take into account the models with a probability
# less that the threshold.
wlikely
=
np
.
where
(
likelihood
>=
MIN_PROBABILITY
)
likelihood
=
likelihood
[
wlikely
]
# We re-normalise the likelihood.
likelihood
/=
np
.
sum
(
likelihood
)
##################################################################
# Variable analysis #
##################################################################
# We define the best fitting model for each observation as the one
# with the least χ².
if
len
(
chi2_
)
==
0
:
# It sometimes happen because models are older than the Universe's age
print
(
"--------------------------------------------------------------"
)
print
(
"No suitable model selected for the object #"
,
idx
,
"we skip it"
)
print
(
"One possible origin is that models are older than the Universe"
)
print
(
"--------------------------------------------------------------"
)
if
chi2_
.
size
==
0
:
# It sometimes happen because models are older than the Universe's age
print
(
"No suitable model found for the object {}. One possible origin "
"is that models are older than the Universe."
.
format
(
obs
[
'id'
]))
else
:
# We select only models that have at least 0.1% of the probability of the
# best model to reproduce the observations. It helps eliminating very bad
# models.
maxchi2
=
st
.
chi2
.
isf
(
st
.
chi2
.
sf
(
np
.
min
(
chi2_
),
obs_fluxes
.
size
-
1
)
*
1e-3
,
obs_fluxes
.
size
-
1
)
wlikely
=
np
.
where
(
chi2_
<
maxchi2
)
# We use the exponential probability associated with the χ² as
# likelihood function.
likelihood
=
np
.
exp
(
-
chi2_
[
wlikely
]
/
2
)
best_index
=
chi2_
.
argmin
()
# We compute once again the best sed to obtain its info
# We compute once again the best sed to obtain its info
global
gbl_previous_idx
if
gbl_previous_idx
>
-
1
:
gbl_warehouse
.
partial_clear_cache
(
...
...
@@ -330,24 +323,23 @@ def analysis(idx, obs):
sed
=
gbl_warehouse
.
get_sed
(
gbl_params
.
modules
,
gbl_params
.
from_index
([
wz
[
0
][
wvalid
[
0
][
best_index
]]]))
# We correct the mass-dependent parameters
# We correct the mass-dependent parameters
for
key
in
sed
.
mass_proportional_info
:
sed
.
info
[
key
]
*=
norm_facts
[
best_index
]
for
index
,
variable
in
enumerate
(
gbl_analysed_variables
):
if
variable
in
sed
.
mass_proportional_info
:
model_variables
[:,
index
]
*=
norm_facts
# We compute the weighted average and standard deviation using the
# likelihood as weight.
# We compute the weighted average and standard deviation using the
# likelihood as weight.
analysed_averages
=
np
.
empty
(
len
(
gbl_analysed_variables
))
analysed_std
=
np
.
empty_like
(
analysed_averages
)
# We check how many unique parameter values are analysed and if less than
# Npdf (= 100), the PDF is initally built assuming a number of bins equal
# to the number of unique values for a given parameter (e.g., average_sfr,
# age, attenuation.uv_bump_amplitude, dust.luminosity, attenuation.FUV,
# etc.).
# We check how many unique parameter values are analysed and if less
# than Npdf (= 100), the PDF is initally built assuming a number of
# bins equal to the number of unique values for a given parameter
# (e.g., average_sfr, age, attenuation.uv_bump_amplitude,
# dust.luminosity, attenuation.FUV, etc.).
Npdf
=
100
var
=
np
.
empty
((
Npdf
,
len
(
analysed_averages
)))
pdf
=
np
.
empty
((
Npdf
,
len
(
analysed_averages
)))
...
...
@@ -382,7 +374,7 @@ def analysis(idx, obs):
pdf
[:,
i
]
=
np
.
interp
(
var
[:,
i
],
pdf_x
,
pdf_prob
)
# TODO Merge with above computation after checking it is fine with a MA.
# TODO Merge with above computation after checking it is fine with a MA
gbl_analysed_averages
[
idx
,
:]
=
analysed_averages
gbl_analysed_std
[
idx
,
:]
=
analysed_std
...
...
@@ -392,7 +384,7 @@ def analysis(idx, obs):
gbl_best_chi2
[
idx
]
=
chi2_
[
best_index
]
gbl_best_chi2_red
[
idx
]
=
chi2_
[
best_index
]
/
obs_fluxes
.
size
# If observed SED analysis
# If observed SED analysis
if
gbl_phase
==
1
:
if
gbl_save
[
'best_sed'
]:
save_best_sed
(
obs
[
'id'
],
sed
,
norm_facts
[
best_index
])
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment