Commit a2107cb5 authored by Médéric Boquien's avatar Médéric Boquien

Ensure that best models are properly computed when models are computed by...

Ensure that best models are properly computed when models are computed by blocks and that no fit could be made in one or more blocks.
parent f2e67af9
......@@ -22,6 +22,7 @@
- When using a parameters file, Boolean values were not correctly interpreted. (Médéric Boquien, reported by Eric Martínez, INAOE)
- Make sure that the best-fit models are stored with the correct scaling factor when the distance is given explicitly (Médéric Boquien)
- Some labels and the title for the SED plots has been improved to avoid overlaps and overflows. (Médéric Boquien)
- Ensure that best models are properly computed when models are computed by blocks and that no fit could be made in one or more blocks. This can be case if all the models in the block are older than the age of the universe. (Médéric)
### Optimised
- Slight speedup of the computation of the likelihood from the χ² (Médéric Boquien)
- The the fritz2006 module should now run faster thanks to an optimisation of the computation of the luminosity of the various AGN components (Médéric Boquien & Guang Yang)
......
......@@ -229,46 +229,47 @@ def bestfit(oidx, obs):
"""
np.seterr(invalid='ignore')
best_index = int(gbl_results.best.index[oidx])
# We compute the model at the exact redshift not to have to correct for the
# difference between the object and the grid redshifts.
params = deepcopy(gbl_params.from_index(best_index))
if obs.redshift >= 0.:
model_z = params[gbl_params.modules.index('redshifting')]['redshift']
params[gbl_params.modules.index('redshifting')]['redshift'] = obs.redshift
# Correct fluxes for the fact that the scaling factor was computed on
# the grid redshift. Because of the difference in redshift the distance
# is different and must be reflected in the scaling
corr_scaling = compute_corr_dz(model_z, obs) / \
compute_corr_dz(obs.redshift, obs)
else: # The model redshift is always exact in redhisfting mode
corr_scaling = 1.
sed = deepcopy(gbl_warehouse.get_sed(gbl_params.modules, params))
# Handle the case where the distance does not correspond to the redshift.
if obs.redshift >= 0.:
corr_dz = (obs.distance / sed.info['universe.luminosity_distance']) ** 2
else:
corr_dz = 1.
scaling = gbl_results.best.scaling[oidx] * corr_scaling
for band in gbl_results.best.flux:
gbl_results.best.flux[band][oidx] = sed.compute_fnu(band) * scaling
# If the distance is user defined, the redshift-based luminosity distance
# of the model is probably incorrect so we replace it
sed.add_info('universe.luminosity_distance', obs.distance, force=True)
for prop in gbl_results.best.intprop:
gbl_results.best.intprop[prop][oidx] = sed.info[prop]
for prop in gbl_results.best.extprop:
gbl_results.best.extprop[prop][oidx] = sed.info[prop] * scaling \
* corr_dz
if gbl_conf['analysis_params']["save_best_sed"]:
sed.to_fits(f"out/{obs.id}", scaling * corr_dz)
if np.isfinite(gbl_results.best.index[oidx]):
best_index = int(gbl_results.best.index[oidx])
# We compute the model at the exact redshift not to have to correct for
# the difference between the object and the grid redshifts.
params = deepcopy(gbl_params.from_index(best_index))
if obs.redshift >= 0.:
model_z = params[gbl_params.modules.index('redshifting')]['redshift']
params[gbl_params.modules.index('redshifting')]['redshift'] = obs.redshift
# Correct fluxes for the fact that the scaling factor was computed
# on the grid redshift. Because of the difference in redshift the
# distance is different and must be reflected in the scaling
corr_scaling = compute_corr_dz(model_z, obs) / \
compute_corr_dz(obs.redshift, obs)
else: # The model redshift is always exact in redhisfting mode
corr_scaling = 1.
sed = deepcopy(gbl_warehouse.get_sed(gbl_params.modules, params))
# Handle the case where the distance does not correspond to the redshift.
if obs.redshift >= 0.:
corr_dz = (obs.distance / sed.info['universe.luminosity_distance']) ** 2
else:
corr_dz = 1.
scaling = gbl_results.best.scaling[oidx] * corr_scaling
for band in gbl_results.best.flux:
gbl_results.best.flux[band][oidx] = sed.compute_fnu(band) * scaling
# If the distance is user defined, the redshift-based luminosity distance
# of the model is probably incorrect so we replace it
sed.add_info('universe.luminosity_distance', obs.distance, force=True)
for prop in gbl_results.best.intprop:
gbl_results.best.intprop[prop][oidx] = sed.info[prop]
for prop in gbl_results.best.extprop:
gbl_results.best.extprop[prop][oidx] = sed.info[prop] * scaling \
* corr_dz
if gbl_conf['analysis_params']["save_best_sed"]:
sed.to_fits(f"out/{obs.id}", scaling * corr_dz)
gbl_counter.inc()
......@@ -112,10 +112,10 @@ class BayesResultsManager(object):
for band in merged.fluxerror}
weight = np.array([result.weight for result in results])
totweight = np.sum(weight, axis=0)
totweight = np.nansum(weight, axis=0)
for prop in merged.intmean:
merged.intmean[prop][:] = np.sum(
merged.intmean[prop][:] = np.nansum(
intmean[prop] * weight, axis=0) / totweight
# We compute the merged standard deviation by combining the
......@@ -123,11 +123,11 @@ class BayesResultsManager(object):
# http://stats.stackexchange.com/a/10445 where the number of
# datapoints has been substituted with the weights. In short we
# exploit the fact that Var(X) = E(Var(X)) + Var(E(X)).
merged.interror[prop][:] = np.sqrt(np.sum(
merged.interror[prop][:] = np.sqrt(np.nansum(
weight * (interror[prop]**2. + (intmean[prop]-merged.intmean[prop])**2), axis=0) / totweight)
for prop in merged.extmean:
merged.extmean[prop][:] = np.sum(
merged.extmean[prop][:] = np.nansum(
extmean[prop] * weight, axis=0) / totweight
# We compute the merged standard deviation by combining the
......@@ -135,7 +135,7 @@ class BayesResultsManager(object):
# http://stats.stackexchange.com/a/10445 where the number of
# datapoints has been substituted with the weights. In short we
# exploit the fact that Var(X) = E(Var(X)) + Var(E(X)).
merged.exterror[prop][:] = np.sqrt(np.sum(
merged.exterror[prop][:] = np.sqrt(np.nansum(
weight * (exterror[prop]**2. + (extmean[prop]-merged.extmean[prop])**2), axis=0) / totweight)
for prop in merged.extmean:
......@@ -148,7 +148,7 @@ class BayesResultsManager(object):
merged.exterror[prop])
for band in merged.fluxmean:
merged.fluxmean[band][:] = np.sum(
merged.fluxmean[band][:] = np.nansum(
fluxmean[band] * weight, axis=0) / totweight
# We compute the merged standard deviation by combining the
......@@ -156,7 +156,7 @@ class BayesResultsManager(object):
# http://stats.stackexchange.com/a/10445 where the number of
# datapoints has been substituted with the weights. In short we
# exploit the fact that Var(X) = E(Var(X)) + Var(E(X)).
merged.fluxerror[band][:] = np.sqrt(np.sum(
merged.fluxerror[band][:] = np.sqrt(np.nansum(
weight * (fluxerror[band]**2. + (fluxmean[band]-merged.fluxmean[band])**2), axis=0) / totweight)
......@@ -284,22 +284,22 @@ class BestResultsManager(object):
if len(results) == 1:
return results[0]
best = np.argmin([result.chi2 for result in results], axis=0)
chi2 = np.array([result.chi2 for result in results])
merged = results[0]
for iobs, bestidx in enumerate(best):
for band in merged.flux:
merged.flux[band][iobs] = \
results[bestidx].flux[band][iobs]
for prop in merged.intprop:
merged.intprop[prop][iobs] = \
results[bestidx].intprop[prop][iobs]
for prop in merged.extprop:
merged.extprop[prop][iobs] = \
results[bestidx].extprop[prop][iobs]
merged.chi2[iobs] = results[bestidx].chi2[iobs]
merged.scaling[iobs] = results[bestidx].scaling[iobs]
merged.index[iobs] = results[bestidx].index[iobs]
for iobs, bestidx in enumerate(np.argsort(chi2, axis=0)[0, :]):
if np.isfinite(bestidx):
for band in merged.flux:
merged.flux[band][iobs] = \
results[bestidx].flux[band][iobs]
for prop in merged.intprop:
merged.intprop[prop][iobs] = \
results[bestidx].intprop[prop][iobs]
for prop in merged.extprop:
merged.extprop[prop][iobs] = \
results[bestidx].extprop[prop][iobs]
merged.chi2[iobs] = results[bestidx].chi2[iobs]
merged.scaling[iobs] = results[bestidx].scaling[iobs]
merged.index[iobs] = results[bestidx].index[iobs]
return merged
......
......@@ -49,6 +49,10 @@ class SharedArray(object):
"""
self.raw = RawArray(ctypes.c_double, size)
self.size = size
# By default RawArray initialises all the elements to 0. Setting them to
# np.nan is preferanble to in case for a reason some elements are never
# assigned a value during a run
self.array[:] = np.nan
def __setitem__(self, idx, data):
if isinstance(idx, slice):
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment