Commit a571f2cb authored by Yannick Roehlly's avatar Yannick Roehlly

Merge branch 'release/v0.6.0'

parents 7f3456f8 b303f478
......@@ -157,7 +157,14 @@ def build_filters(base):
filter_type, filter_table)
# We normalise the filter and compute the effective wavelength.
new_filter.normalise()
# If the filter is a pseudo-filter used to compute line fluxes, it
# should not be normalised.
if not filter_name.startswith('PSEUDO'):
new_filter.normalise()
else:
new_filter.effective_wavelength = np.mean(
filter_table[0][filter_table[1] > 0]
)
base.add_filter(new_filter)
......@@ -241,7 +248,6 @@ def build_m2005(base):
lambda_grid = np.hstack([lambda_grid[:argmin+1], lambda_grid_resamp])
flux_age = np.vstack([flux_age[:argmin+1, :], flux_age_resamp])
# Use Z value for metallicity, not log([Z/H])
metallicity = {-1.35: 0.001,
-0.33: 0.01,
......@@ -399,6 +405,10 @@ def build_dl2007(base):
"4.00", "5.00", "7.00", "8.00", "10.0", "12.0", "15.0",
"20.0", "25.0"]
# Mdust/MH used to retrieve the dust mass as models as given per atom of H
MdMH = {"00": 0.0100, "10": 0.0100, "20": 0.0101, "30": 0.0102,
"40": 0.0102, "50": 0.0103, "60": 0.0104}
# Here we obtain the wavelength beforehand to avoid reading it each time.
datafile = open(dl2007_dir + "U{}/U{}_{}_MW3.1_{}.txt".format(umaximum[0],
umaximum[0],
......@@ -413,8 +423,8 @@ def build_dl2007(base):
# We convert wavelengths from μm to nm
wave *= 1000.
# The models are in Jy cm² sr¯¹ H¯¹. We convert this to W/nm.
conv = 4. * np.pi * 1e-30 / cst.m_p * cst.c / (wave * wave) * 1e9
# Conversion factor from Jy cm² sr¯¹ H¯¹ to W nm¯¹ (kg of H)¯¹
conv = 4. * np.pi * 1e-30 / (cst.m_p+cst.m_e) * cst.c / (wave*wave) * 1e9
for model in sorted(qpah.keys()):
for umin in uminimum:
......@@ -429,8 +439,8 @@ def build_dl2007(base):
lumin = np.genfromtxt(io.BytesIO(data.encode()), usecols=(2))
# For some reason fluxes are decreasing in the model files
lumin = lumin[::-1]
# Conversion from Jy cm² sr¯¹ H¯¹ to W/nm
lumin *= conv
# Conversion from Jy cm² sr¯¹ H¯¹to W nm¯¹ (kg of dust)¯¹
lumin *= conv/MdMH[model]
base.add_dl2007(DL2007(qpah[model], umin, umin, wave, lumin))
for umax in umaximum:
......@@ -446,8 +456,8 @@ def build_dl2007(base):
# For some reason fluxes are decreasing in the model files
lumin = lumin[::-1]
# Conversion from Jy cm² sr¯¹ H¯¹ to W/nm
lumin *= conv
# Conversion from Jy cm² sr¯¹ H¯¹to W nm¯¹ (kg of dust)¯¹
lumin *= conv/MdMH[model]
base.add_dl2007(DL2007(qpah[model], umin, umax, wave, lumin))
......@@ -455,9 +465,9 @@ def build_dl2007(base):
def build_dl2014(base):
dl2014_dir = os.path.join(os.path.dirname(__file__), 'dl2014/')
qpah = {"000":0.47, "010":1.12, "020":1.77, "030":2.50, "040":3.19,
"050":3.90, "060":4.58, "070":5.26, "080":5.95, "090":6.63,
"100":7.32}
qpah = {"000": 0.47, "010": 1.12, "020": 1.77, "030": 2.50, "040": 3.19,
"050": 3.90, "060": 4.58, "070": 5.26, "080": 5.95, "090": 6.63,
"100": 7.32}
uminimum = ["0.100", "0.120", "0.150", "0.170", "0.200", "0.250", "0.300",
"0.350", "0.400", "0.500", "0.600", "0.700", "0.800", "1.000",
......@@ -470,6 +480,11 @@ def build_dl2014(base):
"1.9", "2.0", "2.1", "2.2", "2.3", "2.4", "2.5", "2.6", "2.7",
"2.8", "2.9", "3.0"]
# Mdust/MH used to retrieve the dust mass as models as given per atom of H
MdMH = {"000": 0.0100, "010": 0.0100, "020": 0.0101, "030": 0.0102,
"040": 0.0102, "050": 0.0103, "060": 0.0104, "070": 0.0105,
"080": 0.0106, "090": 0.0107, "100": 0.0108}
# Here we obtain the wavelength beforehand to avoid reading it each time.
datafile = open(dl2014_dir + "U{}_{}_MW3.1_{}/spec_1.0.dat"
.format(uminimum[0], uminimum[0], "000"))
......@@ -483,8 +498,8 @@ def build_dl2014(base):
# We convert wavelengths from μm to nm
wave *= 1000.
# The models are in Jy cm² sr¯¹ H¯¹. We convert this to W/nm.
conv = 4. * np.pi * 1e-30 / cst.m_p * cst.c / (wave * wave) * 1e9
# Conversion factor from Jy cm² sr¯¹ H¯¹ to W nm¯¹ (kg of H)¯¹
conv = 4. * np.pi * 1e-30 / (cst.m_p+cst.m_e) * cst.c / (wave*wave) * 1e9
for model in sorted(qpah.keys()):
for umin in uminimum:
......@@ -497,8 +512,8 @@ def build_dl2014(base):
# For some reason fluxes are decreasing in the model files
lumin = lumin[::-1]
# Conversion from Jy cm² sr¯¹ H¯¹ to W/nm
lumin *= conv
# Conversion from Jy cm² sr¯¹ H¯¹to W nm¯¹ (kg of dust)¯¹
lumin *= conv/MdMH[model]
base.add_dl2014(DL2014(qpah[model], umin, umin, 1.0, wave, lumin))
for al in alpha:
......@@ -511,8 +526,8 @@ def build_dl2014(base):
# For some reason fluxes are decreasing in the model files
lumin = lumin[::-1]
# Conversion from Jy cm² sr¯¹ H¯¹ to W/nm
lumin *= conv
# Conversion from Jy cm² sr¯¹ H¯¹to W nm¯¹ (kg of dust)¯¹
lumin *= conv/MdMH[model]
base.add_dl2014(DL2014(qpah[model], umin, 1e7, al, wave,
lumin))
......@@ -538,7 +553,7 @@ def build_fritz2006(base):
datafile.close()
wave = np.genfromtxt(io.BytesIO(data.encode()), usecols=(0))
wave *= 1e3
#Number of wavelength: 178; Number of comments lines: 28
# Number of wavelengths: 178; Number of comments lines: 28
nskip = 28
blocksize = 178
......@@ -581,7 +596,7 @@ def build_fritz2006(base):
base.add_fritz2006(Fritz2006(params[4], params[3], params[2],
params[1], params[0], psy[n], wave,
lumin_therm, lumin_scatt,lumin_agn))
lumin_therm, lumin_scatt, lumin_agn))
def build_nebular(base):
......@@ -685,7 +700,7 @@ def build_base():
build_dale2014(base)
print("\nDONE\n")
print('#' * 78)
print("8- Importing nebular lines and continuum\n")
build_nebular(base)
print("\nDONE\n")
......
# PSEUDO_D4000
# energy
# D4000 pseudo filter
3600.0 0.0
3610.0 0.0
3620.0 0.0
3630.0 0.0
3640.0 0.0
3650.0 0.0
3660.0 0.0
3670.0 0.0
3680.0 0.0
3690.0 0.0
3700.0 0.0
3710.0 0.0
3720.0 0.0
3730.0 0.0
3740.0 0.0
3750.0 0.0
3760.0 0.0
3770.0 0.0
3780.0 0.0
3790.0 0.0
3800.0 0.0
3810.0 0.0
3820.0 0.0
3830.0 0.0
3840.0 0.0
3849.0 0.0
3850.0 -0.1
3860.0 -0.1
3870.0 -0.1
3880.0 -0.1
3890.0 -0.1
3900.0 -0.1
3910.0 -0.1
3920.0 -0.1
3930.0 -0.1
3940.0 -0.1
3950.0 -0.1
3951.0 0.0
3960.0 0.0
3970.0 0.0
3980.0 0.0
3990.0 0.0
3999.0 0.0
4000.0 0.1
4010.0 0.1
4020.0 0.1
4030.0 0.1
4040.0 0.1
4050.0 0.1
4060.0 0.1
4070.0 0.1
4080.0 0.1
4090.0 0.1
4100.0 0.1
4101.0 0.0
4110.0 0.0
4120.0 0.0
4130.0 0.0
4140.0 0.0
4150.0 0.0
4160.0 0.0
4170.0 0.0
4180.0 0.0
4190.0 0.0
4200.0 0.0
4210.0 0.0
4220.0 0.0
4230.0 0.0
4240.0 0.0
4250.0 0.0
4260.0 0.0
4270.0 0.0
4280.0 0.0
# PSEUDO_H_beta
# energy
# H beta pseudo filter
4827.87 0.0
4827.875 -0.2857959417043478
4847.87 -0.2857959417043478
4847.875 0.34782608695652173
4876.625 0.34782608695652173
4876.63 -0.2857959417043478
4891.625 -0.2857959417043478
4891.63 0.0
# PSEUDO_H_delta
# energy
# H delta pseudo filter
4041.5 0.0
4041.6000000000004 -0.1415428166967742
4079.75 -0.1415428166967742
4079.8 0.0
4083.3999999999996 0.0
4083.5 0.25806451612903225
4122.25 0.25806451612903225
4122.3 0.0
4128.4 0.0
4128.5 -0.1415428166967742
4161.0 -0.1415428166967742
4161.1 0.0
# PSEUDO_H_gamma
# energy
# H gamma pseudo filter
4283.4 0.0
4283.5 -0.11268875366857142
4319.74 -0.11268875366857142
4319.75 0.22857142857142856
4363.5 0.22857142857142856
4363.6 0.0
4367.200000000001 0.0
4367.25 -0.11268875366857142
4419.75 -0.11268875366857142
4419.8 0.0
# PSEUDO_Mg2
# energy
# Mg2 pseudo filter
4895.12 0.0
4895.125 -0.07843137254117646
4957.625 -0.07843137254117646
4957.63 0.0
5154.120000000001 0.0
5154.125 0.23529411764705882
5196.625 0.23529411764705882
5196.63 0.0
5301.12 0.0
5301.125 -0.07843137254117646
5366.125 -0.07843137254117646
5366.130000000001 0.0
......@@ -3,8 +3,6 @@
# Licensed under the CeCILL-v2 licence - see Licence_CeCILL_V2-en.txt
# Author: Yannick Roehlly
__version__ = "0.1-alpha"
import argparse
import multiprocessing as mp
import sys
......@@ -13,6 +11,8 @@ from .session.configuration import Configuration
from .analysis_modules import get_module as get_analysis_module
from .analysis_modules.utils import ParametersHandler
__version__ = "0.1-alpha"
def init(config):
"Create a blank configuration file."
......
......@@ -5,9 +5,7 @@
# Author: Yannick Roehlly & Denis Burgarella
from importlib import import_module
from collections import OrderedDict
import numpy as np
from scipy import stats
from astropy.table import Column
......@@ -15,13 +13,13 @@ class AnalysisModule(object):
"""Abstract class, the pCigale analysis modules are based on.
"""
# parameter_list is a ordered dictionary containing all the parameters
# parameter_list is a dictionary containing all the parameters
# used by the module. Each parameter name is associate to a tuple
# (variable type, description [string], default value). Each module must
# define its parameter list, unless it does not use any parameter. Using
# None means that there is no description, unit or default value. If None
# should be the default value, use the 'None' string instead.
parameter_list = OrderedDict()
parameter_list = dict()
def __init__(self, **kwargs):
"""Instantiate a analysis module
......@@ -100,7 +98,7 @@ class AnalysisModule(object):
# value and that are not in the parameters dictionary, we add them
# with their default value.
for key in self.parameter_list:
if (not key in parameters) and (
if (key not in parameters) and (
self.parameter_list[key][2] is not None):
parameters[key] = self.parameter_list[key][2]
......@@ -124,7 +122,7 @@ class AnalysisModule(object):
raise KeyError("The parameters passed are different from the "
"expected one." + message)
#We do the actual processing
# We do the actual processing
self._process(data_file, column_list, creation_modules,
creation_modules_params, parameters)
......@@ -196,27 +194,27 @@ def adjust_errors(flux, error, tolerance, lim_flag, default_error=0.1,
# because if lim_flag is False, we process upper limits as no-data.
#
# Replace errors below tolerance by the default one.
mask_noerror = np.logical_and(flux>tolerance, error<-9990.)
mask_noerror = np.logical_and(flux > tolerance, error < -9990.)
error[mask_noerror] = (default_error * flux[mask_noerror])
mask_limflag = np.logical_and.reduce(
(flux>tolerance, error>=-9990., error<tolerance))
(flux > tolerance, error >= -9990., error < tolerance))
# Replace upper limits by no data if lim_flag==False
if not lim_flag:
flux[mask_limflag] = -9999.
error[mask_limflag] = -9999.
mask_ok = np.logical_and(flux>tolerance, error>tolerance)
mask_ok = np.logical_and(flux > tolerance, error > tolerance)
# Add the systematic error.
error[mask_ok] = np.sqrt(np.square(error[mask_ok]) +
np.square(flux[mask_ok] * systematic_deviation))
error[mask_ok] = np.sqrt(error[mask_ok]**2 +
(flux[mask_ok]*systematic_deviation)**2)
return error
def complete_obs_table(obs_table, used_columns, filter_list, tolerance, lim_flag,
default_error=0.1, systematic_deviation=0.1):
def complete_obs_table(obs_table, used_columns, filter_list, tolerance,
lim_flag, default_error=0.1, systematic_deviation=0.1):
"""Complete the observation table
For each filter:
......@@ -250,14 +248,14 @@ def complete_obs_table(obs_table, used_columns, filter_list, tolerance, lim_flag
Raises
------
StandardError: When a filter is not present in the observation table.
Exception: When a filter is not present in the observation table.
"""
# TODO Print or log a warning when an error column is in the used column
# list but is not present in the observation table.
for name in filter_list:
if name not in obs_table.columns:
raise StandardError("The filter <{}> (at least) is not present in "
raise Exception("The filter <{}> (at least) is not present in "
"the observation table.".format(name))
name_err = name + "_err"
......
......@@ -25,7 +25,6 @@ reduced χ²) is given for each observation.
"""
from collections import OrderedDict
import ctypes
import multiprocessing as mp
from multiprocessing.sharedctypes import RawArray
......@@ -37,7 +36,6 @@ from ...utils import read_table
from .. import AnalysisModule, complete_obs_table
from .utils import save_table_analysis, save_table_best, analyse_chi2
from ...warehouse import SedWarehouse
from ...data import Database
from .workers import sed as worker_sed
from .workers import init_sed as init_worker_sed
from .workers import init_analysis as init_worker_analysis
......@@ -55,12 +53,12 @@ REDSHIFT_DECIMALS = 2
class PdfAnalysis(AnalysisModule):
"""PDF analysis module"""
parameter_list = OrderedDict([
parameter_list = dict([
("analysed_variables", (
"array of strings",
"List of the variables (in the SEDs info dictionaries) for which "
"the statistical analysis will be done.",
["sfr", "sfr10Myrs", "sfr100Myrs"]
["sfh.sfr", "sfh.sfr10Myrs", "sfh.sfr100Myrs"]
)),
("save_best_sed", (
"boolean",
......@@ -134,14 +132,7 @@ class PdfAnalysis(AnalysisModule):
lim_flag = config["lim_flag"].lower() == "true"
mock_flag = config["mock_flag"].lower() == "true"
# Get the needed filters in the pcigale database. We use an ordered
# dictionary because we need the keys to always be returned in the
# same order. We also put the filters in the shared modules as they
# are needed to compute the fluxes during the models generation.
with Database() as base:
filters = OrderedDict([(name, base.get_filter(name))
for name in column_list
if not name.endswith('_err')])
filters = [name for name in column_list if not name.endswith('_err')]
n_filters = len(filters)
# Read the observation table and complete it by adding error where
......@@ -168,8 +159,9 @@ class PdfAnalysis(AnalysisModule):
# Retrieve an arbitrary SED to obtain the list of output parameters
warehouse = SedWarehouse()
sed = warehouse.get_sed(creation_modules, params.from_index(0))
info = sed.info
n_info = len(sed.info)
info = list(sed.info.keys())
info.sort()
n_info = len(info)
del warehouse, sed
print("Computing the models fluxes...")
......@@ -235,55 +227,59 @@ class PdfAnalysis(AnalysisModule):
print("\nSaving results...")
save_table_analysis('analysis_results.txt', obs_table['id'],
analysed_variables, analysed_averages, analysed_std)
save_table_best('best_models.txt', obs_table['id'], best_chi2,
best_chi2_red, best_parameters, best_fluxes, filters, info)
save_table_analysis('analysis_results.txt', obs_table['id'],
analysed_variables, analysed_averages,
analysed_std)
save_table_best('best_models.txt', obs_table['id'], best_chi2,
best_chi2_red, best_parameters, best_fluxes, filters,
info)
if mock_flag is True:
print("\nMock analysis...")
obs_fluxes = np.array([obs_table[name] for name in filters]).T
obs_errors = np.array([obs_table[name + "_err"] for name in filters]).T
obs_errors = np.array([obs_table[name + "_err"] for name in
filters]).T
mock_fluxes = np.empty_like(obs_fluxes)
mock_errors = np.empty_like(obs_errors)
bestmod_fluxes = np.ctypeslib.as_array(best_fluxes[0])
bestmod_fluxes = bestmod_fluxes.reshape(best_fluxes[1])
wdata = np.where((obs_fluxes > 0.)&(obs_errors > 0.))
wlim = np.where((obs_fluxes > 0.)&
(obs_errors >= -9990.)&(obs_errors < 0.))
wnodata = np.where((obs_fluxes <= -9990.)&(obs_errors <= -9990.))
wdata = np.where((obs_fluxes > 0.) & (obs_errors > 0.))
wlim = np.where((obs_fluxes > 0.) & (obs_errors >= -9990.) &
(obs_errors < 0.))
wnodata = np.where((obs_fluxes <= -9990.) &
(obs_errors <= -9990.))
mock_fluxes[wdata] = np.random.normal(
bestmod_fluxes[wdata],
obs_errors[wdata],
bestmod_fluxes[wdata],
obs_errors[wdata],
len(bestmod_fluxes[wdata]))
mock_errors[wdata] = obs_errors[wdata]
mock_fluxes[wlim] = obs_fluxes[wlim]
mock_errors[wlim] = obs_errors[wlim]
mock_fluxes[wnodata] = obs_fluxes[wnodata]
mock_errors[wnodata] = obs_errors[wnodata]
mock_table = obs_table.copy()
mock_table['id'] = obs_table['id']
mock_table['redshift'] = obs_table['redshift']
indx = 0
for name in filters:
mock_table[name] = mock_fluxes[:, indx]
mock_table[name + "_err"] = mock_errors[:, indx]
indx += 1
phase = 2
initargs = (params, filters, analysed_variables, model_redshifts,
model_fluxes, model_variables, time.time(),
mp.Value('i', 0), analysed_averages, analysed_std,
best_fluxes, best_parameters, best_chi2,
best_chi2_red, save, lim_flag, n_obs, phase)
model_fluxes, model_variables, time.time(),
mp.Value('i', 0), analysed_averages, analysed_std,
best_fluxes, best_parameters, best_chi2,
best_chi2_red, save, lim_flag, n_obs, phase)
if cores == 1: # Do not create a new process
init_worker_analysis(*initargs)
for idx, mock in enumerate(mock_table):
......@@ -295,11 +291,12 @@ class PdfAnalysis(AnalysisModule):
print("\nSaving results...")
save_table_analysis('analysis_mock_results.txt', mock_table['id'],
analysed_variables, analysed_averages, analysed_std)
save_table_best('best_mock_models.txt', mock_table['id'],
best_chi2, best_chi2_red,
best_parameters, best_fluxes, filters, info)
save_table_analysis('analysis_mock_results.txt', mock_table['id'],
analysed_variables, analysed_averages,
analysed_std)
save_table_best('best_mock_models.txt', mock_table['id'],
best_chi2, best_chi2_red, best_parameters,
best_fluxes, filters, info)
print("Run completed!")
......
......@@ -6,16 +6,19 @@
# Author: Yannick Roehlly & Médéric Boquien
from astropy import log
log.setLevel('ERROR')
from astropy.table import Table, Column
import numpy as np
from scipy.special import erf
from scipy.stats import scoreatpercentile
from ..utils import OUT_DIR
log.setLevel('ERROR')
# Number of points in the PDF
PDF_NB_POINTS = 1000
def save_best_sed(obsid, sed, norm):
"""Save the best SED to a VO table.
......@@ -129,8 +132,8 @@ def save_table_analysis(filename, obsid, analysed_variables, analysed_averages,
delimiter=None)
def save_table_best(filename, obsid, chi2, chi2_red, variables, fluxes, filters,
info_keys):
def save_table_best(filename, obsid, chi2, chi2_red, variables, fluxes,
filters, info_keys):
"""Save the values corresponding to the best fit
Parameters
......@@ -147,7 +150,7 @@ def save_table_best(filename, obsid, chi2, chi2_red, variables, fluxes, filters,
All variables corresponding to a SED
fluxes: RawArray
Fluxes in all bands for each object
filters: OrderedDict
filters: list
Filters used to compute the fluxes
info_keys: list
Parameters names
......@@ -176,11 +179,11 @@ def save_table_best(filename, obsid, chi2, chi2_red, variables, fluxes, filters,
column = Column(np_fluxes[:, index], name=name, unit='mJy')
best_model_table.add_column(column)
best_model_table.write(OUT_DIR + filename,format='ascii.fixed_width',
best_model_table.write(OUT_DIR + filename, format='ascii.fixed_width',
delimiter=None)
def dchi2_over_ds2(s):
def dchi2_over_ds2(s, obs_fluxes, obs_errors, mod_fluxes):
"""Function used to estimate the normalization factor in the SED fitting
process when upper limits are included in the dataset to fit (from Eq. A11
in Sawicki M. 2012, PASA, 124, 1008).
......@@ -212,17 +215,17 @@ def dchi2_over_ds2(s):
# The mask "lim" selects the filter(s) for which upper limits are given
# i.e., when obs_fluxes is >=0. and obs_errors = 9990 <= obs_errors < 0.
wlim = np.where((gbl_obs_errors >= -9990.)&(gbl_obs_errors < 0.))
wdata = np.where(gbl_obs_errors>=0.)
wlim = np.where((obs_errors >= -9990.) & (obs_errors < 0.))
wdata = np.where(obs_errors >= 0.)
mod_fluxes_data = gbl_mod_fluxes[wdata]
mod_fluxes_lim = gbl_mod_fluxes[wlim]
mod_fluxes_data = mod_fluxes[wdata]
mod_fluxes_lim = mod_fluxes[wlim]
obs_fluxes_data = gbl_obs_fluxes[wdata]
obs_fluxes_lim = gbl_obs_fluxes[wlim]
obs_fluxes_data = obs_fluxes[wdata]
obs_fluxes_lim = obs_fluxes[wlim]
obs_errors_data = gbl_obs_errors[wdata]
obs_errors_lim = -gbl_obs_errors[wlim]
obs_errors_data = obs_errors[wdata]
obs_errors_lim = -obs_errors[wlim]
dchi2_over_ds_data = np.sum(
(obs_fluxes_data-s*mod_fluxes_data) *
......@@ -246,6 +249,7 @@ def dchi2_over_ds2(s):
return func
def analyse_chi2(chi2):
"""Function to analyse the best chi^2 and find out whether what fraction of
objects seem to be overconstrainted.
......@@ -260,5 +264,5 @@ def analyse_chi2(chi2):
# If low values of reduced chi^2, it means that the data are overfitted
# Errors might be under-estimated or not enough valid data.
print("\n{}% of the objects have chi^2_red~0 and {}% chi^2_red<0.5"
.format(np.round((chi2_red < 1e-12).sum()/chi2_red.size, 1),
np.round((chi2_red < 0.5).sum()/chi2_red.size, 1)))
.format(np.round((chi2_red < 1e-12).sum()/chi2_red.size, 1),
np.round((chi2_red < 0.5).sum()/chi2_red.size, 1)))
......@@ -20,6 +20,7 @@ from ...warehouse import SedWarehouse
# moments computation.
MIN_PROBABILITY = 1e-20
def init_sed(params, filters, analysed, redshifts, fluxes, variables,
t_begin, n_computed):
"""Initializer of the pool of processes. It is mostly used to convert
......@@ -30,8 +31,8 @@ def init_sed(params, filters, analysed, redshifts, fluxes, variables,
----------
params: ParametersHandler
Handles the parameters from a 1D index.
filters: OrderedDict
Contains filters to compute the fluxes.
filters: List
Contains the names of the filters to compute the fluxes.
analysed: list
Variable names to be analysed.
redshifts: RawArray and tuple containing the shape
......@@ -70,6 +71,7 @@ def init_sed(params, filters, analysed, redshifts, fluxes, variables,
gbl_warehouse = SedWarehouse()
def init_analysis(params, filters, analysed, redshifts, fluxes, variables,
t_begin, n_computed, analysed_averages, analysed_std,
best_fluxes, best_parameters, best_chi2, best_chi2_red, save,
......@@ -82,7 +84,7 @@ def init_analysis(params, filters, analysed, redshifts, fluxes, variables,
----------
params: ParametersHandler
Handles the parameters from a 1D index.
filters: OrderedDict
filters: list
Contains filters to compute the fluxes.
analysed: list
Variable names to be analysed
......@@ -122,7 +124,7 @@ def init_analysis(params, filters, analysed, redshifts, fluxes, variables,
global gbl_redshifts, gbl_w_redshifts, gbl_analysed_averages
global gbl_analysed_std, gbl_best_fluxes, gbl_best_parameters
global gbl_best_chi2, gbl_best_chi2_red, gbl_save, gbl_n_obs
global gbl_lim_flag, gbl_phase
global gbl_lim_flag, gbl_phase, gbl_keys
gbl_analysed_averages = np.ctypeslib.as_array(analysed_averages[0])
gbl_analysed_averages = gbl_analysed_averages.reshape(analysed_averages[1])
......@@ -149,6 +151,8 @@ def init_analysis(params, filters, analysed, redshifts, fluxes, variables,
gbl_n_obs = n_obs
gbl_phase = phase
gbl_keys = None
def sed(idx):
"""Worker process to retrieve a SED and affect the relevant data to shared
......@@ -169,24 +173,23 @@ def sed(idx):
sed = gbl_warehouse.get_sed(gbl_params.modules,
gbl_params.from_index(idx))
if 'age' in sed.info and sed.info['age'] > sed.info['universe.age']:
if 'sfh.age' in sed.info and<