Commit 11dbd038 authored by BURGARELLA Denis's avatar BURGARELLA Denis

Merge branch 'release/v0.7.0'

parents a571f2cb deb7dc52
.be/id-cache
.DS_Store
*.pyc
build/
pcigale/data/data.db
pcigale.egg-info
docs/_build
......
This diff is collapsed.
......@@ -117,17 +117,17 @@ def read_bc03_ssp(filename):
# The time grid is in year, we want Myr.
time_grid = np.array(time_grid, dtype=float)
time_grid = time_grid * 1.e-6
time_grid *= 1.e-6
# The first "long" vector encountered is the wavelength grid. The value
# are in Ångström, we convert it to nano-meter.
wavelength = np.array(full_table.pop(0), dtype=float)
wavelength = wavelength * 0.1
wavelength *= 0.1
# The luminosities are in Solar luminosity (3.826.10^33 ergs.s-1) per
# Ångström, we convert it to W/nm.
luminosity = np.array(full_table, dtype=float)
luminosity = luminosity * 3.826e27
luminosity *= 3.826e27
# Transposition to have the time in the second axis.
luminosity = luminosity.transpose()
......@@ -137,6 +137,7 @@ def read_bc03_ssp(filename):
def build_filters(base):
filters = []
filters_dir = os.path.join(os.path.dirname(__file__), 'filters/')
for filter_file in glob.glob(filters_dir + '*.dat'):
with open(filter_file, 'r') as filter_file_read:
......@@ -165,8 +166,9 @@ def build_filters(base):
new_filter.effective_wavelength = np.mean(
filter_table[0][filter_table[1] > 0]
)
filters.append(new_filter)
base.add_filter(new_filter)
base.add_filters(filters)
def build_m2005(base):
......@@ -226,7 +228,7 @@ def build_m2005(base):
[age_grid_orig, lambda_grid_orig, flux_orig] = \
spec_table[:, spec_table[1, :] == wavelength]
flux_orig = flux_orig * 10 * 1.e-7 # From erg/s^-1/Å to W/nm
age_grid_orig = age_grid_orig * 1000 # Gyr to Myr
age_grid_orig *= 1000 # Gyr to Myr
flux_regrid = interpolate.interp1d(age_grid_orig,
flux_orig)(age_grid)
......@@ -329,6 +331,7 @@ def build_bc2003(base):
def build_dale2014(base):
models = []
dale2014_dir = os.path.join(os.path.dirname(__file__), 'dale2014/')
# Getting the alpha grid for the templates
......@@ -369,10 +372,9 @@ def build_dale2014(base):
lumin[lumin < 0] = 0
lumin[wave < 2E3] = 0
norm = np.trapz(lumin, x=wave)
lumin = lumin/norm
base.add_dale2014(Dale2014(fraction, alpha_grid[al-1], wave, lumin))
lumin /= norm
models.append(Dale2014(fraction, alpha_grid[al-1], wave, lumin))
# Emission from dust heated by AGN - Quasar template
filename = dale2014_dir + "shi_agn.regridded.extended.dat"
print("Importing {}...".format(filename))
......@@ -381,12 +383,15 @@ def build_dale2014(base):
wave *= 1e3
lumin_quasar = 10**lumin_quasar / wave
norm = np.trapz(lumin_quasar, x=wave)
lumin_quasar = lumin_quasar / norm
lumin_quasar /= norm
models.append(Dale2014(1.0, 0.0, wave, lumin_quasar))
base.add_dale2014(Dale2014(1.0, 0.0, wave, lumin_quasar))
base.add_dale2014(models)
def build_dl2007(base):
models = []
dl2007_dir = os.path.join(os.path.dirname(__file__), 'dl2007/')
qpah = {
......@@ -442,7 +447,7 @@ def build_dl2007(base):
# Conversion from Jy cm² sr¯¹ H¯¹to W nm¯¹ (kg of dust)¯¹
lumin *= conv/MdMH[model]
base.add_dl2007(DL2007(qpah[model], umin, umin, wave, lumin))
models.append(DL2007(qpah[model], umin, umin, wave, lumin))
for umax in umaximum:
filename = dl2007_dir + "U{}/U{}_{}_MW3.1_{}.txt".format(umin,
umin,
......@@ -459,10 +464,12 @@ def build_dl2007(base):
# Conversion from Jy cm² sr¯¹ H¯¹to W nm¯¹ (kg of dust)¯¹
lumin *= conv/MdMH[model]
base.add_dl2007(DL2007(qpah[model], umin, umax, wave, lumin))
models.append(DL2007(qpah[model], umin, umax, wave, lumin))
base.add_dl2007(models)
def build_dl2014(base):
models = []
dl2014_dir = os.path.join(os.path.dirname(__file__), 'dl2014/')
qpah = {"000": 0.47, "010": 1.12, "020": 1.77, "030": 2.50, "040": 3.19,
......@@ -515,7 +522,7 @@ def build_dl2014(base):
# Conversion from Jy cm² sr¯¹ H¯¹to W nm¯¹ (kg of dust)¯¹
lumin *= conv/MdMH[model]
base.add_dl2014(DL2014(qpah[model], umin, umin, 1.0, wave, lumin))
models.append(DL2014(qpah[model], umin, umin, 1.0, wave, lumin))
for al in alpha:
filename = (dl2014_dir + "U{}_1e7_MW3.1_{}/spec_{}.dat"
.format(umin, model, al))
......@@ -529,11 +536,12 @@ def build_dl2014(base):
# Conversion from Jy cm² sr¯¹ H¯¹to W nm¯¹ (kg of dust)¯¹
lumin *= conv/MdMH[model]
base.add_dl2014(DL2014(qpah[model], umin, 1e7, al, wave,
lumin))
models.append(DL2014(qpah[model], umin, 1e7, al, wave, lumin))
base.add_dl2014(models)
def build_fritz2006(base):
models = []
fritz2006_dir = os.path.join(os.path.dirname(__file__), 'fritz2006/')
# Parameters of Fritz+2006
......@@ -590,16 +598,19 @@ def build_fritz2006(base):
lumin_agn *= 1e-4
# Normalization of the lumin_therm to 1W
norm = np.trapz(lumin_therm, x=wave)
lumin_therm = lumin_therm / norm
lumin_scatt = lumin_scatt / norm
lumin_agn = lumin_agn / norm
lumin_therm /= norm
lumin_scatt /= norm
lumin_agn /= norm
base.add_fritz2006(Fritz2006(params[4], params[3], params[2],
models.append(Fritz2006(params[4], params[3], params[2],
params[1], params[0], psy[n], wave,
lumin_therm, lumin_scatt, lumin_agn))
base.add_fritz2006(models)
def build_nebular(base):
models_lines = []
models_cont = []
lines_dir = os.path.join(os.path.dirname(__file__), 'nebular/')
# Number of Lyman continuum photon to normalize the nebular continuum
......@@ -628,14 +639,9 @@ def build_nebular(base):
ratio2 = ratio2/ratio2[w]
ratio3 = ratio3/ratio3[w]
lines = NebularLines(np.float(Z), -3., wave, ratio1)
base.add_nebular_lines(lines)
lines = NebularLines(np.float(Z), -2., wave, ratio2)
base.add_nebular_lines(lines)
lines = NebularLines(np.float(Z), -1., wave, ratio3)
base.add_nebular_lines(lines)
models_lines.append(NebularLines(np.float(Z), -3., wave, ratio1))
models_lines.append(NebularLines(np.float(Z), -2., wave, ratio2))
models_lines.append(NebularLines(np.float(Z), -1., wave, ratio3))
filename = "{}continuum_{}.dat".format(lines_dir, Z)
print("Importing {}...".format(filename))
......@@ -651,15 +657,12 @@ def build_nebular(base):
cont2 *= conv
cont3 *= conv
cont = NebularContinuum(np.float(Z), -3., wave, cont1)
base.add_nebular_continuum(cont)
cont = NebularContinuum(np.float(Z), -2., wave, cont2)
base.add_nebular_continuum(cont)
cont = NebularContinuum(np.float(Z), -1., wave, cont3)
base.add_nebular_continuum(cont)
models_cont.append(NebularContinuum(np.float(Z), -3., wave, cont1))
models_cont.append(NebularContinuum(np.float(Z), -2., wave, cont2))
models_cont.append(NebularContinuum(np.float(Z), -1., wave, cont3))
base.add_nebular_continuum(models_cont)
base.add_nebular_lines(models_lines)
def build_base():
base = Database(writable=True)
......
This source diff could not be displayed because it is too large. You can view the blob instead.
......@@ -14,4 +14,4 @@
1648.07 0.1601
1705.61 0.1188
1750.00 0.1050
1810.83 -0.0009
1810.83 0.0000
# PSEUDO_D4000
# energy
# D4000 pseudo filter
3600.0 0.0
3610.0 0.0
3620.0 0.0
3630.0 0.0
3640.0 0.0
3650.0 0.0
3660.0 0.0
3670.0 0.0
3680.0 0.0
3690.0 0.0
3700.0 0.0
3710.0 0.0
3720.0 0.0
3730.0 0.0
3740.0 0.0
3750.0 0.0
3760.0 0.0
3770.0 0.0
3780.0 0.0
3790.0 0.0
3800.0 0.0
3810.0 0.0
3820.0 0.0
3830.0 0.0
3840.0 0.0
3849.0 0.0
3850.0 -0.1
......@@ -57,20 +33,3 @@
4100.0 0.1
4101.0 0.0
4110.0 0.0
4120.0 0.0
4130.0 0.0
4140.0 0.0
4150.0 0.0
4160.0 0.0
4170.0 0.0
4180.0 0.0
4190.0 0.0
4200.0 0.0
4210.0 0.0
4220.0 0.0
4230.0 0.0
4240.0 0.0
4250.0 0.0
4260.0 0.0
4270.0 0.0
4280.0 0.0
# PSEUDO_LyC
# energy
# Lyman continuum pseudo filter
91 1
100 1
110 1
120 1
130 1
140 1
150 1
160 1
170 1
180 1
190 1
200 1
210 1
220 1
230 1
240 1
250 1
260 1
270 1
280 1
290 1
300 1
310 1
320 1
330 1
340 1
350 1
360 1
370 1
380 1
390 1
400 1
410 1
420 1
430 1
440 1
450 1
460 1
470 1
480 1
490 1
500 1
510 1
520 1
530 1
540 1
550 1
560 1
570 1
580 1
590 1
600 1
610 1
620 1
630 1
640 1
650 1
660 1
670 1
680 1
690 1
700 1
710 1
720 1
730 1
740 1
750 1
760 1
770 1
780 1
790 1
800 1
810 1
820 1
830 1
840 1
850 1
860 1
870 1
880 1
890 1
900 1
910 1
911 1
912 0
920 0
......@@ -15,14 +15,16 @@ __version__ = "0.1-alpha"
def init(config):
"Create a blank configuration file."
"""Create a blank configuration file.
"""
config.create_blank_conf()
print("The initial configuration file was created. Please complete it "
"with the data file name and the pcigale modules to use.")
def genconf(config):
"Generate the full configuration."
"""Generate the full configuration.
"""
config.generate_conf()
print("The configuration file has been updated. Please complete the "
"various module parameters and the data file columns to use in "
......@@ -30,7 +32,8 @@ def genconf(config):
def check(config):
"Check the configuration."
"""Check the configuration.
"""
# TODO: Check if all the parameters that don't have default values are
# given for each module.
print("With this configuration, pcigale must compute {} "
......@@ -41,7 +44,8 @@ def check(config):
def run(config):
"Run the analysis."
"""Run the analysis.
"""
data_file = config.configuration['data_file']
column_list = config.configuration['column_list']
creation_modules = config.configuration['creation_modules']
......@@ -87,18 +91,21 @@ def main():
run_parser = subparsers.add_parser('run', help=run.__doc__)
run_parser.set_defaults(parser='run')
args = parser.parse_args()
if args.config_file:
config = Configuration(args.config_file)
if len(sys.argv) == 1:
parser.print_usage()
else:
config = Configuration()
if args.parser == 'init':
init(config)
elif args.parser == 'genconf':
genconf(config)
elif args.parser == 'check':
check(config)
elif args.parser == 'run':
run(config)
args = parser.parse_args()
if args.config_file:
config = Configuration(args.config_file)
else:
config = Configuration()
if args.parser == 'init':
init(config)
elif args.parser == 'genconf':
genconf(config)
elif args.parser == 'check':
check(config)
elif args.parser == 'run':
run(config)
......@@ -5,6 +5,7 @@
# Author: Yannick Roehlly & Denis Burgarella
from importlib import import_module
import numpy as np
from astropy.table import Column
......@@ -148,20 +149,18 @@ def get_module(module_name):
raise
def adjust_errors(flux, error, tolerance, lim_flag, default_error=0.1,
systematic_deviation=0.1):
"""Adjust the errors replacing the 0 values by the default error and
adding the systematic deviation.
The systematic deviation change the error to:
sqrt( error² + (flux * deviation)² )
def adjust_data(fluxes, errors, tolerance, lim_flag, default_error=0.1,
systematic_deviation=0.1):
"""Adjust the fluxes and errors replacing the invalid values by NaN, and
adding the systematic deviation. The systematic deviation changes the
errors to: sqrt(errors² + (fluxes*deviation)²)
Parameters
----------
flux: array of floats
Fluxes.
error: array of floats
Observational error in the same unit as the fluxes.
fluxes: array of floats
Observed fluxes.
errors: array of floats
Observational errors in the same unit as the fluxes.
tolerance: float
Tolerance threshold under flux error is considered as 0.
lim_flag: boolean
......@@ -179,38 +178,35 @@ def adjust_errors(flux, error, tolerance, lim_flag, default_error=0.1,
"""
# The arrays must have the same lengths.
if len(flux) != len(error):
if len(fluxes) != len(errors):
raise ValueError("The flux and error arrays must have the same "
"length.")
# We copy the error array not to modify the original one.
error = np.copy(error)
# We copy the arrays not to modify the original ones.
fluxes = fluxes.copy()
errors = errors.copy()
# We define:
# 1) upper limit == (lim_flag==True) and
# [(flux > tolerance) and (-9990. < error < tolerance)]
# 2) no data == (flux < -9990.) and (error < -9990.)
# Note that the upper-limit test must be performed before the no-data test
# because if lim_flag is False, we process upper limits as no-data.
#
# Replace errors below tolerance by the default one.
mask_noerror = np.logical_and(flux > tolerance, error < -9990.)
error[mask_noerror] = (default_error * flux[mask_noerror])
# We set invalid data to NaN
mask_invalid = np.where((fluxes <= tolerance) | (errors < -9990.))
fluxes[mask_invalid] = np.nan
errors[mask_invalid] = np.nan
mask_limflag = np.logical_and.reduce(
(flux > tolerance, error >= -9990., error < tolerance))
# Replace missing errors by the default ones.
mask_noerror = np.where((fluxes > tolerance) & ~np.isfinite(errors))
errors[mask_noerror] = (default_error * fluxes[mask_noerror])
# Replace upper limits by no data if lim_flag==False
if not lim_flag:
flux[mask_limflag] = -9999.
error[mask_limflag] = -9999.
mask_ok = np.logical_and(flux > tolerance, error > tolerance)
mask_limflag = np.where((fluxes > tolerance) & (errors < tolerance))
fluxes[mask_limflag] = np.nan
errors[mask_limflag] = np.nan
# Add the systematic error.
error[mask_ok] = np.sqrt(error[mask_ok]**2 +
(flux[mask_ok]*systematic_deviation)**2)
return error
mask_ok = np.where((fluxes > tolerance) & (errors > tolerance))
errors[mask_ok] = np.sqrt(errors[mask_ok]**2 +
(fluxes[mask_ok]*systematic_deviation)**2)
return fluxes, errors
def complete_obs_table(obs_table, used_columns, filter_list, tolerance,
......@@ -259,20 +255,17 @@ def complete_obs_table(obs_table, used_columns, filter_list, tolerance,
"the observation table.".format(name))
name_err = name + "_err"
if name_err not in used_columns:
if name_err not in obs_table.columns:
obs_table.add_column(Column(
name=name_err,
data=np.ones(len(obs_table), dtype=float))*-9999.,
index=obs_table.colnames.index(name)+1
)
else:
obs_table[name_err] = np.zeros(len(obs_table))
obs_table[name_err] = adjust_errors(obs_table[name],
obs_table[name_err],
tolerance,
lim_flag,
default_error,
systematic_deviation)
if name_err not in obs_table.columns:
obs_table.add_column(Column(name=name_err,
data=np.full(len(obs_table), np.nan)),
index=obs_table.colnames.index(name)+1)
elif name_err not in used_columns:
obs_table[name_err] = np.full(len(obs_table), np.nan)
obs_table[name], obs_table[name_err] = adjust_data(obs_table[name],
obs_table[name_err],
tolerance,
lim_flag,
default_error,
systematic_deviation)
return obs_table
......@@ -42,7 +42,6 @@ from .workers import init_analysis as init_worker_analysis
from .workers import analysis as worker_analysis
from ..utils import ParametersHandler, backup_dir
from ..utils import OUT_DIR
# Tolerance threshold under which any flux or error is considered as 0.
TOLERANCE = 1e-12
......@@ -118,6 +117,7 @@ class PdfAnalysis(AnalysisModule):
Number of cores to run the analysis on
"""
np.seterr(invalid='ignore')
print("Initialising the analysis module... ")
......@@ -142,7 +142,7 @@ class PdfAnalysis(AnalysisModule):
n_obs = len(obs_table)
w_redshifting = creation_modules.index('redshifting')
if creation_modules_params[w_redshifting]['redshift'] == ['']:
if list(creation_modules_params[w_redshifting]['redshift']) == ['']:
z = np.unique(np.around(obs_table['redshift'],
decimals=REDSHIFT_DECIMALS))
creation_modules_params[w_redshifting]['redshift'] = z
......@@ -208,12 +208,11 @@ class PdfAnalysis(AnalysisModule):
best_chi2 = (RawArray(ctypes.c_double, n_obs), (n_obs))
best_chi2_red = (RawArray(ctypes.c_double, n_obs), (n_obs))
phase = 1
initargs = (params, filters, analysed_variables, model_redshifts,
model_fluxes, model_variables, time.time(),
mp.Value('i', 0), analysed_averages, analysed_std,
best_fluxes, best_parameters, best_chi2, best_chi2_red,
save, lim_flag, n_obs, phase)
save, lim_flag, n_obs)
if cores == 1: # Do not create a new process
init_worker_analysis(*initargs)
for idx, obs in enumerate(obs_table):
......@@ -238,48 +237,30 @@ class PdfAnalysis(AnalysisModule):
print("\nMock analysis...")
# For the mock analysis we do not save the ancillary files
for k in save:
save[k] = False
obs_fluxes = np.array([obs_table[name] for name in filters]).T
obs_errors = np.array([obs_table[name + "_err"] for name in
filters]).T
mock_fluxes = np.empty_like(obs_fluxes)
mock_errors = np.empty_like(obs_errors)
mock_fluxes = obs_fluxes.copy()
bestmod_fluxes = np.ctypeslib.as_array(best_fluxes[0])
bestmod_fluxes = bestmod_fluxes.reshape(best_fluxes[1])
wdata = np.where((obs_fluxes > 0.) & (obs_errors > 0.))
wlim = np.where((obs_fluxes > 0.) & (obs_errors >= -9990.) &
(obs_errors < 0.))
wnodata = np.where((obs_fluxes <= -9990.) &
(obs_errors <= -9990.))
mock_fluxes[wdata] = np.random.normal(
bestmod_fluxes[wdata],
obs_errors[wdata],
len(bestmod_fluxes[wdata]))
mock_errors[wdata] = obs_errors[wdata]
mock_fluxes[wlim] = obs_fluxes[wlim]
mock_errors[wlim] = obs_errors[wlim]
mock_fluxes[wnodata] = obs_fluxes[wnodata]
mock_errors[wnodata] = obs_errors[wnodata]
wdata = np.where((obs_fluxes > TOLERANCE) &
(obs_errors > TOLERANCE))
mock_fluxes[wdata] = np.random.normal(bestmod_fluxes[wdata],
obs_errors[wdata])
mock_table = obs_table.copy()
mock_table['id'] = obs_table['id']
mock_table['redshift'] = obs_table['redshift']
indx = 0
for name in filters:
mock_table[name] = mock_fluxes[:, indx]
mock_table[name + "_err"] = mock_errors[:, indx]
indx += 1
for idx, name in enumerate(filters):
mock_table[name] = mock_fluxes[:, idx]
phase = 2
initargs = (params, filters, analysed_variables, model_redshifts,
model_fluxes, model_variables, time.time(),
mp.Value('i', 0), analysed_averages, analysed_std,
best_fluxes, best_parameters, best_chi2,
best_chi2_red, save, lim_flag, n_obs, phase)
best_chi2_red, save, lim_flag, n_obs)
if cores == 1: # Do not create a new process
init_worker_analysis(*initargs)
for idx, mock in enumerate(mock_table):
......
......@@ -8,16 +8,13 @@
from astropy import log
from astropy.table import Table, Column
import numpy as np
from scipy import optimize
from scipy.special import erf
from scipy.stats import scoreatpercentile
from ..utils import OUT_DIR
log.setLevel('ERROR')
# Number of points in the PDF
PDF_NB_POINTS = 1000
def save_best_sed(obsid, sed, norm):
"""Save the best SED to a VO table.
......@@ -35,62 +32,75 @@ def save_best_sed(obsid, sed, norm):
sed.to_votable(OUT_DIR + "{}_best_model.xml".format(obsid), mass=norm)
def save_pdf(obsid, analysed_variables, model_variables, likelihood):
"""Save the PDF to a FITS file
def save_pdf(obsid, name, model_variable, likelihood):
"""Compute and save the PDF to a FITS file
We estimate the probability density functions (PDF) of the parameters using
a weighted kernel density estimation. This part should definitely be
improved as we simulate the weight by adding as many value as their
probability * 100.
We estimate the probability density functions (PDF) of the parameter from
a likelihood-weighted histogram.
Parameters
----------
obsid: string
Name of the object. Used to prepend the output file name
analysed_variables: list
Analysed variables names
model_variables: 2D array
Analysed variables values for all models
likelihood: 2D array
Likelihood for all models
name: string
Analysed variable name
model_variable: array
Values of the model variable
likelihood: 1D array
Likelihood of the "likely" models
"""
for var_index, var_name in enumerate(analysed_variables):
pdf_grid = model_variables[:, var_index]
pdf_prob = likelihood[:, var_index]
if pdf_prob is None:
# TODO: use logging
print("Can not compute PDF for observation <{}> and "
"variable <{}>.".