Skip to content
GitLab
Projects
Groups
Snippets
/
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
cigale
CIGALE
Commits
ffa67a8b
Commit
ffa67a8b
authored
May 24, 2015
by
Médéric Boquien
Browse files
PEP8-fication.
parent
7144a406
Changes
32
Hide whitespace changes
Inline
Side-by-side
pcigale/__init__.py
View file @
ffa67a8b
...
...
@@ -3,8 +3,6 @@
# Licensed under the CeCILL-v2 licence - see Licence_CeCILL_V2-en.txt
# Author: Yannick Roehlly
__version__
=
"0.1-alpha"
import
argparse
import
multiprocessing
as
mp
import
sys
...
...
@@ -13,6 +11,8 @@ from .session.configuration import Configuration
from
.analysis_modules
import
get_module
as
get_analysis_module
from
.analysis_modules.utils
import
ParametersHandler
__version__
=
"0.1-alpha"
def
init
(
config
):
"Create a blank configuration file."
...
...
pcigale/analysis_modules/__init__.py
View file @
ffa67a8b
...
...
@@ -100,7 +100,7 @@ class AnalysisModule(object):
# value and that are not in the parameters dictionary, we add them
# with their default value.
for
key
in
self
.
parameter_list
:
if
(
not
key
in
parameters
)
and
(
if
(
key
not
in
parameters
)
and
(
self
.
parameter_list
[
key
][
2
]
is
not
None
):
parameters
[
key
]
=
self
.
parameter_list
[
key
][
2
]
...
...
@@ -124,7 +124,7 @@ class AnalysisModule(object):
raise
KeyError
(
"The parameters passed are different from the "
"expected one."
+
message
)
#We do the actual processing
#
We do the actual processing
self
.
_process
(
data_file
,
column_list
,
creation_modules
,
creation_modules_params
,
parameters
)
...
...
@@ -196,27 +196,27 @@ def adjust_errors(flux, error, tolerance, lim_flag, default_error=0.1,
# because if lim_flag is False, we process upper limits as no-data.
#
# Replace errors below tolerance by the default one.
mask_noerror
=
np
.
logical_and
(
flux
>
tolerance
,
error
<
-
9990.
)
mask_noerror
=
np
.
logical_and
(
flux
>
tolerance
,
error
<
-
9990.
)
error
[
mask_noerror
]
=
(
default_error
*
flux
[
mask_noerror
])
mask_limflag
=
np
.
logical_and
.
reduce
(
(
flux
>
tolerance
,
error
>=-
9990.
,
error
<
tolerance
))
(
flux
>
tolerance
,
error
>=
-
9990.
,
error
<
tolerance
))
# Replace upper limits by no data if lim_flag==False
if
not
lim_flag
:
flux
[
mask_limflag
]
=
-
9999.
error
[
mask_limflag
]
=
-
9999.
mask_ok
=
np
.
logical_and
(
flux
>
tolerance
,
error
>
tolerance
)
mask_ok
=
np
.
logical_and
(
flux
>
tolerance
,
error
>
tolerance
)
# Add the systematic error.
error
[
mask_ok
]
=
np
.
sqrt
(
np
.
square
(
error
[
mask_ok
]
)
+
np
.
square
(
flux
[
mask_ok
]
*
systematic_deviation
))
error
[
mask_ok
]
=
np
.
sqrt
(
error
[
mask_ok
]
**
2
+
(
flux
[
mask_ok
]
*
systematic_deviation
)
**
2
)
return
error
def
complete_obs_table
(
obs_table
,
used_columns
,
filter_list
,
tolerance
,
lim_flag
,
default_error
=
0.1
,
systematic_deviation
=
0.1
):
def
complete_obs_table
(
obs_table
,
used_columns
,
filter_list
,
tolerance
,
lim_flag
,
default_error
=
0.1
,
systematic_deviation
=
0.1
):
"""Complete the observation table
For each filter:
...
...
pcigale/analysis_modules/pdf_analysis/__init__.py
View file @
ffa67a8b
...
...
@@ -227,55 +227,59 @@ class PdfAnalysis(AnalysisModule):
print
(
"
\n
Saving results..."
)
save_table_analysis
(
'analysis_results.txt'
,
obs_table
[
'id'
],
analysed_variables
,
analysed_averages
,
analysed_std
)
save_table_best
(
'best_models.txt'
,
obs_table
[
'id'
],
best_chi2
,
best_chi2_red
,
best_parameters
,
best_fluxes
,
filters
,
info
)
save_table_analysis
(
'analysis_results.txt'
,
obs_table
[
'id'
],
analysed_variables
,
analysed_averages
,
analysed_std
)
save_table_best
(
'best_models.txt'
,
obs_table
[
'id'
],
best_chi2
,
best_chi2_red
,
best_parameters
,
best_fluxes
,
filters
,
info
)
if
mock_flag
is
True
:
print
(
"
\n
Mock analysis..."
)
obs_fluxes
=
np
.
array
([
obs_table
[
name
]
for
name
in
filters
]).
T
obs_errors
=
np
.
array
([
obs_table
[
name
+
"_err"
]
for
name
in
filters
]).
T
obs_errors
=
np
.
array
([
obs_table
[
name
+
"_err"
]
for
name
in
filters
]).
T
mock_fluxes
=
np
.
empty_like
(
obs_fluxes
)
mock_errors
=
np
.
empty_like
(
obs_errors
)
bestmod_fluxes
=
np
.
ctypeslib
.
as_array
(
best_fluxes
[
0
])
bestmod_fluxes
=
bestmod_fluxes
.
reshape
(
best_fluxes
[
1
])
wdata
=
np
.
where
((
obs_fluxes
>
0.
)
&
(
obs_errors
>
0.
))
wlim
=
np
.
where
((
obs_fluxes
>
0.
)
&
(
obs_errors
>=
-
9990.
)
&
(
obs_errors
<
0.
))
wnodata
=
np
.
where
((
obs_fluxes
<=
-
9990.
)
&
(
obs_errors
<=
-
9990.
))
wdata
=
np
.
where
((
obs_fluxes
>
0.
)
&
(
obs_errors
>
0.
))
wlim
=
np
.
where
((
obs_fluxes
>
0.
)
&
(
obs_errors
>=
-
9990.
)
&
(
obs_errors
<
0.
))
wnodata
=
np
.
where
((
obs_fluxes
<=
-
9990.
)
&
(
obs_errors
<=
-
9990.
))
mock_fluxes
[
wdata
]
=
np
.
random
.
normal
(
bestmod_fluxes
[
wdata
],
obs_errors
[
wdata
],
bestmod_fluxes
[
wdata
],
obs_errors
[
wdata
],
len
(
bestmod_fluxes
[
wdata
]))
mock_errors
[
wdata
]
=
obs_errors
[
wdata
]
mock_fluxes
[
wlim
]
=
obs_fluxes
[
wlim
]
mock_errors
[
wlim
]
=
obs_errors
[
wlim
]
mock_fluxes
[
wnodata
]
=
obs_fluxes
[
wnodata
]
mock_errors
[
wnodata
]
=
obs_errors
[
wnodata
]
mock_table
=
obs_table
.
copy
()
mock_table
[
'id'
]
=
obs_table
[
'id'
]
mock_table
[
'redshift'
]
=
obs_table
[
'redshift'
]
indx
=
0
for
name
in
filters
:
mock_table
[
name
]
=
mock_fluxes
[:,
indx
]
mock_table
[
name
+
"_err"
]
=
mock_errors
[:,
indx
]
indx
+=
1
phase
=
2
initargs
=
(
params
,
filters
,
analysed_variables
,
model_redshifts
,
model_fluxes
,
model_variables
,
time
.
time
(),
mp
.
Value
(
'i'
,
0
),
analysed_averages
,
analysed_std
,
best_fluxes
,
best_parameters
,
best_chi2
,
best_chi2_red
,
save
,
lim_flag
,
n_obs
,
phase
)
model_fluxes
,
model_variables
,
time
.
time
(),
mp
.
Value
(
'i'
,
0
),
analysed_averages
,
analysed_std
,
best_fluxes
,
best_parameters
,
best_chi2
,
best_chi2_red
,
save
,
lim_flag
,
n_obs
,
phase
)
if
cores
==
1
:
# Do not create a new process
init_worker_analysis
(
*
initargs
)
for
idx
,
mock
in
enumerate
(
mock_table
):
...
...
@@ -287,11 +291,12 @@ class PdfAnalysis(AnalysisModule):
print
(
"
\n
Saving results..."
)
save_table_analysis
(
'analysis_mock_results.txt'
,
mock_table
[
'id'
],
analysed_variables
,
analysed_averages
,
analysed_std
)
save_table_best
(
'best_mock_models.txt'
,
mock_table
[
'id'
],
best_chi2
,
best_chi2_red
,
best_parameters
,
best_fluxes
,
filters
,
info
)
save_table_analysis
(
'analysis_mock_results.txt'
,
mock_table
[
'id'
],
analysed_variables
,
analysed_averages
,
analysed_std
)
save_table_best
(
'best_mock_models.txt'
,
mock_table
[
'id'
],
best_chi2
,
best_chi2_red
,
best_parameters
,
best_fluxes
,
filters
,
info
)
print
(
"Run completed!"
)
...
...
pcigale/analysis_modules/pdf_analysis/utils.py
View file @
ffa67a8b
...
...
@@ -6,16 +6,18 @@
# Author: Yannick Roehlly & Médéric Boquien
from
astropy
import
log
log
.
setLevel
(
'ERROR'
)
from
astropy.table
import
Table
,
Column
import
numpy
as
np
from
scipy.stats
import
scoreatpercentile
from
..utils
import
OUT_DIR
log
.
setLevel
(
'ERROR'
)
# Number of points in the PDF
PDF_NB_POINTS
=
1000
def
save_best_sed
(
obsid
,
sed
,
norm
):
"""Save the best SED to a VO table.
...
...
@@ -129,8 +131,8 @@ def save_table_analysis(filename, obsid, analysed_variables, analysed_averages,
delimiter
=
None
)
def
save_table_best
(
filename
,
obsid
,
chi2
,
chi2_red
,
variables
,
fluxes
,
filters
,
info_keys
):
def
save_table_best
(
filename
,
obsid
,
chi2
,
chi2_red
,
variables
,
fluxes
,
filters
,
info_keys
):
"""Save the values corresponding to the best fit
Parameters
...
...
@@ -176,7 +178,7 @@ def save_table_best(filename, obsid, chi2, chi2_red, variables, fluxes, filters,
column
=
Column
(
np_fluxes
[:,
index
],
name
=
name
,
unit
=
'mJy'
)
best_model_table
.
add_column
(
column
)
best_model_table
.
write
(
OUT_DIR
+
filename
,
format
=
'ascii.fixed_width'
,
best_model_table
.
write
(
OUT_DIR
+
filename
,
format
=
'ascii.fixed_width'
,
delimiter
=
None
)
...
...
@@ -212,8 +214,8 @@ def dchi2_over_ds2(s):
# The mask "lim" selects the filter(s) for which upper limits are given
# i.e., when obs_fluxes is >=0. and obs_errors = 9990 <= obs_errors < 0.
wlim
=
np
.
where
((
gbl_obs_errors
>=
-
9990.
)
&
(
gbl_obs_errors
<
0.
))
wdata
=
np
.
where
(
gbl_obs_errors
>=
0.
)
wlim
=
np
.
where
((
gbl_obs_errors
>=
-
9990.
)
&
(
gbl_obs_errors
<
0.
))
wdata
=
np
.
where
(
gbl_obs_errors
>=
0.
)
mod_fluxes_data
=
gbl_mod_fluxes
[
wdata
]
mod_fluxes_lim
=
gbl_mod_fluxes
[
wlim
]
...
...
@@ -246,6 +248,7 @@ def dchi2_over_ds2(s):
return
func
def
analyse_chi2
(
chi2
):
"""Function to analyse the best chi^2 and find out whether what fraction of
objects seem to be overconstrainted.
...
...
@@ -260,5 +263,5 @@ def analyse_chi2(chi2):
# If low values of reduced chi^2, it means that the data are overfitted
# Errors might be under-estimated or not enough valid data.
print
(
"
\n
{}% of the objects have chi^2_red~0 and {}% chi^2_red<0.5"
.
format
(
np
.
round
((
chi2_red
<
1e-12
).
sum
()
/
chi2_red
.
size
,
1
),
np
.
round
((
chi2_red
<
0.5
).
sum
()
/
chi2_red
.
size
,
1
)))
.
format
(
np
.
round
((
chi2_red
<
1e-12
).
sum
()
/
chi2_red
.
size
,
1
),
np
.
round
((
chi2_red
<
0.5
).
sum
()
/
chi2_red
.
size
,
1
)))
pcigale/analysis_modules/pdf_analysis/workers.py
View file @
ffa67a8b
...
...
@@ -20,6 +20,7 @@ from ...warehouse import SedWarehouse
# moments computation.
MIN_PROBABILITY
=
1e-20
def
init_sed
(
params
,
filters
,
analysed
,
redshifts
,
fluxes
,
variables
,
t_begin
,
n_computed
):
"""Initializer of the pool of processes. It is mostly used to convert
...
...
@@ -70,6 +71,7 @@ def init_sed(params, filters, analysed, redshifts, fluxes, variables,
gbl_warehouse
=
SedWarehouse
()
def
init_analysis
(
params
,
filters
,
analysed
,
redshifts
,
fluxes
,
variables
,
t_begin
,
n_computed
,
analysed_averages
,
analysed_std
,
best_fluxes
,
best_parameters
,
best_chi2
,
best_chi2_red
,
save
,
...
...
@@ -150,6 +152,7 @@ def init_analysis(params, filters, analysed, redshifts, fluxes, variables,
gbl_n_obs
=
n_obs
gbl_phase
=
phase
def
sed
(
idx
):
"""Worker process to retrieve a SED and affect the relevant data to shared
RawArrays.
...
...
@@ -219,7 +222,7 @@ def analysis(idx, obs):
# We pick the indices of the models with closest redshift assuming we have
# limited the number of decimals (usually set to 2 decimals).
wz
=
np
.
where
(
gbl_w_redshifts
[
gbl_redshifts
[
np
.
abs
(
obs
[
'redshift'
]
-
gbl_redshifts
).
argmin
()]])
gbl_redshifts
).
argmin
()]])
# We only keep model with fluxes >= -90. If not => no data
# Probably because age > age of the universe (see function sed(idx) above).
...
...
@@ -242,7 +245,7 @@ def analysis(idx, obs):
# 2) s/he puts False in the boolean lim_flag
# and the limits are processed as no-data below.
lim_flag
=
gbl_lim_flag
and
np
.
any
((
obs_errors
>=
-
9990.
)
&
lim_flag
=
gbl_lim_flag
and
np
.
any
((
obs_errors
>=
-
9990.
)
&
(
obs_errors
<
tolerance
))
# Normalisation factor to be applied to a model fluxes to best fit
...
...
@@ -256,8 +259,8 @@ def analysis(idx, obs):
if
lim_flag
is
True
:
for
imod
in
range
(
len
(
model_fluxes
)):
norm_facts
[
imod
]
=
optimize
.
newton
(
dchi2_over_ds2
,
norm_facts
[
imod
],
tol
=
1e-16
,
norm_facts
[
imod
]
=
optimize
.
newton
(
dchi2_over_ds2
,
norm_facts
[
imod
],
tol
=
1e-16
,
args
=
(
obs_fluxes
,
obs_errors
,
model_fluxes
[
imod
,
:]))
model_fluxes
*=
norm_facts
[:,
np
.
newaxis
]
...
...
@@ -299,28 +302,28 @@ def analysis(idx, obs):
print
(
"No suitable model found for the object {}. One possible origin "
"is that models are older than the Universe."
.
format
(
obs
[
'id'
]))
else
:
# We select only models that have at least 0.1% of the probability of
the
# best model to reproduce the observations. It helps eliminating
very bad
# models.
maxchi2
=
st
.
chi2
.
isf
(
st
.
chi2
.
sf
(
np
.
min
(
chi2_
),
obs_fluxes
.
size
-
1
)
*
1e-3
,
obs_fluxes
.
size
-
1
)
# We select only models that have at least 0.1% of the probability of
#
the
best model to reproduce the observations. It helps eliminating
#
very bad
models.
maxchi2
=
st
.
chi2
.
isf
(
st
.
chi2
.
sf
(
np
.
min
(
chi2_
),
obs_fluxes
.
size
-
1
)
*
1e-3
,
obs_fluxes
.
size
-
1
)
wlikely
=
np
.
where
(
chi2_
<
maxchi2
)
# We use the exponential probability associated with the χ² as
# likelihood function.
likelihood
=
np
.
exp
(
-
chi2_
[
wlikely
]
/
2
)
best_index
=
chi2_
.
argmin
()
best_index
=
chi2_
.
argmin
()
# We compute once again the best sed to obtain its info
global
gbl_previous_idx
if
gbl_previous_idx
>
-
1
:
gbl_warehouse
.
partial_clear_cache
(
gbl_params
.
index_module_changed
(
gbl_previous_idx
,
wz
[
0
][
wvalid
[
0
][
best_index
]]))
wz
[
0
][
wvalid
[
0
][
best_index
]]))
gbl_previous_idx
=
wz
[
0
][
wvalid
[
0
][
best_index
]]
sed
=
gbl_warehouse
.
get_sed
(
gbl_params
.
modules
,
gbl_params
.
from_index
([
wz
[
0
][
wvalid
[
0
][
best_index
]]]))
gbl_params
.
from_index
([
wz
[
0
][
wvalid
[
0
][
best_index
]]]))
# We correct the mass-dependent parameters
for
key
in
sed
.
mass_proportional_info
:
...
...
@@ -367,12 +370,12 @@ def analysis(idx, obs):
np
.
square
(
pdf_x
-
analysed_averages
[
i
])
*
pdf_prob
)
/
np
.
sum
(
pdf_prob
)
)
analysed_std
[
i
]
=
max
(
0.05
*
analysed_averages
[
i
],
analysed_std
[
i
])
analysed_std
[
i
]
=
max
(
0.05
*
analysed_averages
[
i
],
analysed_std
[
i
])
var
[:,
i
]
=
np
.
linspace
(
min_hist
[
i
],
max_hist
[
i
],
Npdf
)
pdf
[:,
i
]
=
np
.
interp
(
var
[:,
i
],
pdf_x
,
pdf_prob
)
# TODO Merge with above computation after checking it is fine with a MA
gbl_analysed_averages
[
idx
,
:]
=
analysed_averages
gbl_analysed_std
[
idx
,
:]
=
analysed_std
...
...
@@ -388,8 +391,8 @@ def analysis(idx, obs):
if
gbl_save
[
'best_sed'
]:
save_best_sed
(
obs
[
'id'
],
sed
,
norm_facts
[
best_index
])
if
gbl_save
[
'chi2'
]:
save_chi2
(
obs
[
'id'
],
gbl_analysed_variables
,
model_variables
,
chi2_
/
obs_fluxes
.
size
)
save_chi2
(
obs
[
'id'
],
gbl_analysed_variables
,
model_variables
,
chi2_
,
obs_fluxes
.
size
)
if
gbl_save
[
'pdf'
]:
save_pdf
(
obs
[
'id'
],
gbl_analysed_variables
,
var
,
pdf
)
...
...
@@ -398,7 +401,6 @@ def analysis(idx, obs):
n_computed
=
gbl_n_computed
.
value
t_elapsed
=
time
.
time
()
-
gbl_t_begin
print
(
"{}/{} objects analysed in {} seconds ({} objects/s)"
.
format
(
n_computed
,
gbl_n_obs
,
np
.
around
(
t_elapsed
,
decimals
=
1
),
np
.
around
(
n_computed
/
t_elapsed
,
decimals
=
2
)),
end
=
"
\n
"
if
idx
==
gbl_n_obs
-
1
else
"
\r
"
)
format
(
n_computed
,
gbl_n_obs
,
np
.
around
(
t_elapsed
,
decimals
=
1
),
np
.
around
(
n_computed
/
t_elapsed
,
decimals
=
2
)),
end
=
"
\n
"
if
idx
==
gbl_n_obs
-
1
else
"
\r
"
)
pcigale/analysis_modules/savefluxes/__init__.py
View file @
ffa67a8b
...
...
@@ -47,8 +47,8 @@ class SaveFluxes(AnalysisModule):
parameter_list
=
OrderedDict
([
(
"output_file"
,
(
"string"
,
"Name of the output file that contains the parameters of the
model(s)
"
"and the flux densities in the bands"
,
"Name of the output file that contains the parameters of the "
"
model(s)
and the flux densities in the bands"
,
"computed_fluxes.txt"
)),
(
"save_sed"
,
(
...
...
@@ -128,7 +128,7 @@ class SaveFluxes(AnalysisModule):
(
n_params
,
n_filters
))
model_parameters
=
(
RawArray
(
ctypes
.
c_double
,
n_params
*
n_info
),
(
n_params
,
n_info
))
(
n_params
,
n_info
))
initargs
=
(
params
,
filters
,
save_sed
,
model_fluxes
,
model_parameters
,
time
.
time
(),
mp
.
Value
(
'i'
,
0
))
...
...
pcigale/analysis_modules/savefluxes/workers.py
View file @
ffa67a8b
...
...
@@ -13,6 +13,7 @@ from ...warehouse import SedWarehouse
from
..utils
import
OUT_DIR
def
init_fluxes
(
params
,
filters
,
save_sed
,
fluxes
,
info
,
t_begin
,
n_computed
):
"""Initializer of the pool of processes. It is mostly used to convert
RawArrays into numpy arrays. The latter are defined as global variables to
...
...
@@ -78,7 +79,7 @@ def fluxes(idx):
sed
=
gbl_warehouse
.
get_sed
(
gbl_params
.
modules
,
gbl_params
.
from_index
(
idx
))
if
gbl_save_sed
==
True
:
if
gbl_save_sed
is
True
:
sed
.
to_votable
(
OUT_DIR
+
"{}_best_model.xml"
.
format
(
idx
))
if
'sfh.age'
in
sed
.
info
and
sed
.
info
[
'sfh.age'
]
>
sed
.
info
[
'universe.age'
]:
...
...
pcigale/analysis_modules/utils.py
View file @
ffa67a8b
...
...
@@ -15,9 +15,10 @@ import shutil
import
numpy
as
np
from
astropy
import
log
log
.
setLevel
(
'ERROR'
)
from
astropy.table
import
Table
,
Column
log
.
setLevel
(
'ERROR'
)
# Directory where the output files are stored
OUT_DIR
=
"out/"
...
...
@@ -71,8 +72,8 @@ class ParametersHandler(object):
isinstance
(
value
,
str
)):
dictionary
[
key
]
=
[
value
]
# We use itertools.product to make all the possible combinations from
the
# value lists.
# We use itertools.product to make all the possible combinations from
#
the
value lists.
key_list
=
dictionary
.
keys
()
value_array_list
=
[
dictionary
[
key
]
for
key
in
key_list
]
combination_list
=
[
collections
.
OrderedDict
(
zip
(
key_list
,
combination
))
...
...
pcigale/creation_modules/__init__.py
View file @
ffa67a8b
...
...
@@ -40,7 +40,7 @@ def complete_parameters(given_parameters, parameter_list):
"""
# Complete the given parameters with default values when needed.
for
key
in
parameter_list
:
if
(
not
key
in
given_parameters
)
and
(
if
(
key
not
in
given_parameters
)
and
(
parameter_list
[
key
][
2
]
is
not
None
):
given_parameters
[
key
]
=
parameter_list
[
key
][
2
]
# Check parameter consistency between the parameter list and the given
...
...
pcigale/creation_modules/bc03.py
View file @
ffa67a8b
...
...
@@ -51,9 +51,9 @@ class BC03(CreationModule):
(
"sfr"
,
"Instantaneous Star Formation Rate in solar mass per year, "
"at the age of the galaxy."
),
(
'sfr10Myrs'
,
'Average SFR in the last 10 Myr (default) of the '
'galaxy history.'
),
'galaxy history.'
),
(
'sfr100Myrs'
,
'Average SFR in the last 100 Myr (default) of the '
'galaxy history.'
),
'galaxy history.'
),
(
"ssp_m_star"
,
"Total mass in stars in Solar mass."
),
(
"ssp_m_gas"
,
"Mass returned to the ISM by evolved stars in Solar "
"mass."
),
...
...
pcigale/creation_modules/dale2014.py
View file @
ffa67a8b
...
...
@@ -31,8 +31,8 @@ class Dale2014(CreationModule):
parameter_list
=
OrderedDict
([
(
'fracAGN'
,
(
'float'
,
"AGN fraction "
"
[it is not recommended to combine this AGN emission
with th
at
of Fritz et al. (2006)
]
"
,
"AGN fraction
. It is not recommended to combine this AGN emission
"
"with th
e
of Fritz et al. (2006)
models.
"
,
0.0
)),
(
'alpha'
,
(
...
...
@@ -85,7 +85,7 @@ class Dale2014(CreationModule):
luminosity
=
sed
.
info
[
'dust.luminosity'
]
frac_agn
=
self
.
parameters
[
"fracAGN"
]
if
frac_agn
<
1.
:
L_AGN
=
luminosity
*
(
1.
/
(
1.
-
frac_agn
)
-
1.
)
else
:
...
...
@@ -100,7 +100,7 @@ class Dale2014(CreationModule):
luminosity
*
self
.
model_sb
.
lumin
)
if
frac_agn
!=
0.
:
sed
.
add_contribution
(
'agn'
,
self
.
model_quasar
.
wave
,
L_AGN
*
self
.
model_quasar
.
lumin
)
L_AGN
*
self
.
model_quasar
.
lumin
)
# CreationModule to be returned by get_module
Module
=
Dale2014
pcigale/creation_modules/dl2007.py
View file @
ffa67a8b
...
...
@@ -40,8 +40,8 @@ class DL2007(CreationModule):
(
'umin'
,
(
'float'
,
"Minimum radiation field. Possible values are: 0.10, 0.15, 0.20, "
"0.30, 0.40, 0.50, 0.70, 0.80, 1.00, 1.20, 1.50, 2.00, 2.50,
3.00,
"
"4.00, 5.00, 7.00, 8.00, 10.0, 12.0, 15.0, 20.0, 25.0."
,
"0.30, 0.40, 0.50, 0.70, 0.80, 1.00, 1.20, 1.50, 2.00, 2.50, "
"
3.00,
4.00, 5.00, 7.00, 8.00, 10.0, 12.0, 15.0, 20.0, 25.0."
,
1.0
)),
(
'umax'
,
(
...
...
pcigale/creation_modules/dustatt_calzleit.py
View file @
ffa67a8b
...
...
@@ -41,9 +41,9 @@ def k_calzetti2000(wavelength):
# Attenuation between 120 nm and 630 nm
mask
=
(
wavelength
<
630
)
result
[
mask
]
=
2.659
*
(
-
2.156
+
1.509e3
/
wavelength
[
mask
]
-
0.198e6
/
wavelength
[
mask
]
**
2
+
0.011e9
/
wavelength
[
mask
]
**
3
)
+
4.05
result
[
mask
]
=
2.659
*
(
-
2.156
+
1.509e3
/
wavelength
[
mask
]
-
0.198e6
/
wavelength
[
mask
]
**
2
+
0.011e9
/
wavelength
[
mask
]
**
3
)
+
4.05
# Attenuation between 630 nm and 2200 nm
mask
=
(
wavelength
>=
630
)
...
...
@@ -71,9 +71,9 @@ def k_leitherer2002(wavelength):
"""
wavelength
=
np
.
array
(
wavelength
)
result
=
(
5.472
+
0.671e3
/
wavelength
-
9.218e3
/
wavelength
**
2
+
2.620e6
/
wavelength
**
3
)
result
=
(
5.472
+
0.671e3
/
wavelength
-
9.218e3
/
wavelength
**
2
+
2.620e6
/
wavelength
**
3
)
return
result
...
...
@@ -98,8 +98,8 @@ def uv_bump(wavelength, central_wave, gamma, ebump):
"""
return
(
ebump
*
wavelength
**
2
*
gamma
**
2
/
((
wavelength
**
2
-
central_wave
**
2
)
**
2
+
wavelength
**
2
*
gamma
**
2
))
((
wavelength
**
2
-
central_wave
**
2
)
**
2
+
wavelength
**
2
*
gamma
**
2
))
def
power_law
(
wavelength
,
delta
):
...
...
@@ -261,20 +261,20 @@ class CalzLeit(CreationModule):
powerlaw_slope
=
float
(
self
.
parameters
[
"powerlaw_slope"
])
# Fλ fluxes (only from continuum) in each filter before attenuation.
flux_noatt
=
{
filt
:
sed
.
compute_fnu
(
filt
)
for
filt
in
self
.
filter_list
}
flux_noatt
=
{
filt
:
sed
.
compute_fnu
(
filt
)
for
filt
in
self
.
filter_list
}
# Compute attenuation curve
if
self
.
sel_attenuation
is
None
:
self
.
sel_attenuation
=
a_vs_ebv
(
wavelength
,
uv_bump_wavelength
,
uv_bump_width
,
uv_bump_amplitude
,
powerlaw_slope
)
uv_bump_width
,
uv_bump_amplitude
,
powerlaw_slope
)
attenuation_total
=
0.
for
contrib
in
list
(
sed
.
contribution_names
):
age
=
contrib
.
split
(
'.'
)[
-
1
].
split
(
'_'
)[
-
1
]
luminosity
=
sed
.
get_lumin_contribution
(
contrib
)
attenuated_luminosity
=
(
luminosity
*
10
**
(
ebvs
[
age
]
*
self
.
sel_attenuation
/
-
2.5
))
(
ebvs
[
age
]
*
self
.
sel_attenuation
/
-
2.5
))
attenuation_spectrum
=
attenuated_luminosity
-
luminosity
# We integrate the amount of luminosity attenuated (-1 because the
# spectrum is negative).
...
...
@@ -296,7 +296,7 @@ class CalzLeit(CreationModule):
sed
.
add_info
(
"dust.luminosity"
,
attenuation_total
,
True
)
# Fλ fluxes (only from continuum) in each filter after attenuation.
flux_att
=
{
filt
:
sed
.
compute_fnu
(
filt
)
for
filt
in
self
.
filter_list
}
flux_att
=
{
filt
:
sed
.
compute_fnu
(
filt
)
for
filt
in
self
.
filter_list
}
# Attenuation in each filter
for
filt
in
self
.
filter_list
:
...
...
pcigale/creation_modules/dustatt_powerlaw.py
View file @
ffa67a8b
...
...
@@ -57,8 +57,8 @@ def uv_bump(wavelength, central_wave, gamma, ebump):
"""
return
(
ebump
*
wavelength
**
2
*
gamma
**
2
/
((
wavelength
**
2
-
central_wave
**
2
)
**
2
+
wavelength
**
2
*
gamma
**
2
))
((
wavelength
**
2
-
central_wave
**
2
)
**
2
+
wavelength
**
2
*
gamma
**
2
))
def
alambda_av
(
wavelength
,
delta
,
bump_wave
,
bump_width
,
bump_ampl
):
...
...
@@ -97,7 +97,7 @@ def alambda_av(wavelength, delta, bump_wave, bump_width, bump_ampl):
class
PowerLawAtt
(
CreationModule
):
"""Power law attenuation module
This module computes the attenuation using a power law
This module computes the attenuation using a power law
as defined in Charlot and Fall (2000).
The attenuation can be computed on the whole spectrum or on a specific
...
...
@@ -185,20 +185,20 @@ class PowerLawAtt(CreationModule):
powerlaw_slope
=
float
(
self
.
parameters
[
"powerlaw_slope"
])
# Fλ fluxes (only from continuum)) in each filter before attenuation.
flux_noatt
=
{
filt
:
sed
.
compute_fnu
(
filt
)
for
filt
in
self
.
filter_list
}
flux_noatt
=
{
filt
:
sed
.
compute_fnu
(
filt
)
for
filt
in
self
.
filter_list
}
# Compute attenuation curve
if
self
.
sel_attenuation
is
None
:
self
.
sel_attenuation
=
alambda_av
(
wavelength
,
powerlaw_slope
,
uv_bump_wavelength
,
uv_bump_width
,
uv_bump_amplitude
)
uv_bump_wavelength
,
uv_bump_width
,
uv_bump_amplitude
)
attenuation_total
=
0.
for
contrib
in
list
(
sed
.
contribution_names
):
age
=
contrib
.
split
(
'.'
)[
-
1
].
split
(
'_'
)[
-
1
]