Commit a2107cb5 authored by Médéric Boquien's avatar Médéric Boquien

Ensure that best models are properly computed when models are computed by...

Ensure that best models are properly computed when models are computed by blocks and that no fit could be made in one or more blocks.
parent f2e67af9
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
- When using a parameters file, Boolean values were not correctly interpreted. (Médéric Boquien, reported by Eric Martínez, INAOE) - When using a parameters file, Boolean values were not correctly interpreted. (Médéric Boquien, reported by Eric Martínez, INAOE)
- Make sure that the best-fit models are stored with the correct scaling factor when the distance is given explicitly (Médéric Boquien) - Make sure that the best-fit models are stored with the correct scaling factor when the distance is given explicitly (Médéric Boquien)
- Some labels and the title for the SED plots has been improved to avoid overlaps and overflows. (Médéric Boquien) - Some labels and the title for the SED plots has been improved to avoid overlaps and overflows. (Médéric Boquien)
- Ensure that best models are properly computed when models are computed by blocks and that no fit could be made in one or more blocks. This can be case if all the models in the block are older than the age of the universe. (Médéric)
### Optimised ### Optimised
- Slight speedup of the computation of the likelihood from the χ² (Médéric Boquien) - Slight speedup of the computation of the likelihood from the χ² (Médéric Boquien)
- The the fritz2006 module should now run faster thanks to an optimisation of the computation of the luminosity of the various AGN components (Médéric Boquien & Guang Yang) - The the fritz2006 module should now run faster thanks to an optimisation of the computation of the luminosity of the various AGN components (Médéric Boquien & Guang Yang)
......
...@@ -229,46 +229,47 @@ def bestfit(oidx, obs): ...@@ -229,46 +229,47 @@ def bestfit(oidx, obs):
""" """
np.seterr(invalid='ignore') np.seterr(invalid='ignore')
best_index = int(gbl_results.best.index[oidx]) if np.isfinite(gbl_results.best.index[oidx]):
best_index = int(gbl_results.best.index[oidx])
# We compute the model at the exact redshift not to have to correct for the
# difference between the object and the grid redshifts. # We compute the model at the exact redshift not to have to correct for
params = deepcopy(gbl_params.from_index(best_index)) # the difference between the object and the grid redshifts.
if obs.redshift >= 0.: params = deepcopy(gbl_params.from_index(best_index))
model_z = params[gbl_params.modules.index('redshifting')]['redshift'] if obs.redshift >= 0.:
params[gbl_params.modules.index('redshifting')]['redshift'] = obs.redshift model_z = params[gbl_params.modules.index('redshifting')]['redshift']
# Correct fluxes for the fact that the scaling factor was computed on params[gbl_params.modules.index('redshifting')]['redshift'] = obs.redshift
# the grid redshift. Because of the difference in redshift the distance # Correct fluxes for the fact that the scaling factor was computed
# is different and must be reflected in the scaling # on the grid redshift. Because of the difference in redshift the
corr_scaling = compute_corr_dz(model_z, obs) / \ # distance is different and must be reflected in the scaling
compute_corr_dz(obs.redshift, obs) corr_scaling = compute_corr_dz(model_z, obs) / \
else: # The model redshift is always exact in redhisfting mode compute_corr_dz(obs.redshift, obs)
corr_scaling = 1. else: # The model redshift is always exact in redhisfting mode
corr_scaling = 1.
sed = deepcopy(gbl_warehouse.get_sed(gbl_params.modules, params))
sed = deepcopy(gbl_warehouse.get_sed(gbl_params.modules, params))
# Handle the case where the distance does not correspond to the redshift.
if obs.redshift >= 0.: # Handle the case where the distance does not correspond to the redshift.
corr_dz = (obs.distance / sed.info['universe.luminosity_distance']) ** 2 if obs.redshift >= 0.:
else: corr_dz = (obs.distance / sed.info['universe.luminosity_distance']) ** 2
corr_dz = 1. else:
corr_dz = 1.
scaling = gbl_results.best.scaling[oidx] * corr_scaling
scaling = gbl_results.best.scaling[oidx] * corr_scaling
for band in gbl_results.best.flux:
gbl_results.best.flux[band][oidx] = sed.compute_fnu(band) * scaling for band in gbl_results.best.flux:
gbl_results.best.flux[band][oidx] = sed.compute_fnu(band) * scaling
# If the distance is user defined, the redshift-based luminosity distance
# of the model is probably incorrect so we replace it # If the distance is user defined, the redshift-based luminosity distance
sed.add_info('universe.luminosity_distance', obs.distance, force=True) # of the model is probably incorrect so we replace it
for prop in gbl_results.best.intprop: sed.add_info('universe.luminosity_distance', obs.distance, force=True)
gbl_results.best.intprop[prop][oidx] = sed.info[prop] for prop in gbl_results.best.intprop:
gbl_results.best.intprop[prop][oidx] = sed.info[prop]
for prop in gbl_results.best.extprop:
gbl_results.best.extprop[prop][oidx] = sed.info[prop] * scaling \ for prop in gbl_results.best.extprop:
* corr_dz gbl_results.best.extprop[prop][oidx] = sed.info[prop] * scaling \
* corr_dz
if gbl_conf['analysis_params']["save_best_sed"]:
sed.to_fits(f"out/{obs.id}", scaling * corr_dz) if gbl_conf['analysis_params']["save_best_sed"]:
sed.to_fits(f"out/{obs.id}", scaling * corr_dz)
gbl_counter.inc() gbl_counter.inc()
...@@ -112,10 +112,10 @@ class BayesResultsManager(object): ...@@ -112,10 +112,10 @@ class BayesResultsManager(object):
for band in merged.fluxerror} for band in merged.fluxerror}
weight = np.array([result.weight for result in results]) weight = np.array([result.weight for result in results])
totweight = np.sum(weight, axis=0) totweight = np.nansum(weight, axis=0)
for prop in merged.intmean: for prop in merged.intmean:
merged.intmean[prop][:] = np.sum( merged.intmean[prop][:] = np.nansum(
intmean[prop] * weight, axis=0) / totweight intmean[prop] * weight, axis=0) / totweight
# We compute the merged standard deviation by combining the # We compute the merged standard deviation by combining the
...@@ -123,11 +123,11 @@ class BayesResultsManager(object): ...@@ -123,11 +123,11 @@ class BayesResultsManager(object):
# http://stats.stackexchange.com/a/10445 where the number of # http://stats.stackexchange.com/a/10445 where the number of
# datapoints has been substituted with the weights. In short we # datapoints has been substituted with the weights. In short we
# exploit the fact that Var(X) = E(Var(X)) + Var(E(X)). # exploit the fact that Var(X) = E(Var(X)) + Var(E(X)).
merged.interror[prop][:] = np.sqrt(np.sum( merged.interror[prop][:] = np.sqrt(np.nansum(
weight * (interror[prop]**2. + (intmean[prop]-merged.intmean[prop])**2), axis=0) / totweight) weight * (interror[prop]**2. + (intmean[prop]-merged.intmean[prop])**2), axis=0) / totweight)
for prop in merged.extmean: for prop in merged.extmean:
merged.extmean[prop][:] = np.sum( merged.extmean[prop][:] = np.nansum(
extmean[prop] * weight, axis=0) / totweight extmean[prop] * weight, axis=0) / totweight
# We compute the merged standard deviation by combining the # We compute the merged standard deviation by combining the
...@@ -135,7 +135,7 @@ class BayesResultsManager(object): ...@@ -135,7 +135,7 @@ class BayesResultsManager(object):
# http://stats.stackexchange.com/a/10445 where the number of # http://stats.stackexchange.com/a/10445 where the number of
# datapoints has been substituted with the weights. In short we # datapoints has been substituted with the weights. In short we
# exploit the fact that Var(X) = E(Var(X)) + Var(E(X)). # exploit the fact that Var(X) = E(Var(X)) + Var(E(X)).
merged.exterror[prop][:] = np.sqrt(np.sum( merged.exterror[prop][:] = np.sqrt(np.nansum(
weight * (exterror[prop]**2. + (extmean[prop]-merged.extmean[prop])**2), axis=0) / totweight) weight * (exterror[prop]**2. + (extmean[prop]-merged.extmean[prop])**2), axis=0) / totweight)
for prop in merged.extmean: for prop in merged.extmean:
...@@ -148,7 +148,7 @@ class BayesResultsManager(object): ...@@ -148,7 +148,7 @@ class BayesResultsManager(object):
merged.exterror[prop]) merged.exterror[prop])
for band in merged.fluxmean: for band in merged.fluxmean:
merged.fluxmean[band][:] = np.sum( merged.fluxmean[band][:] = np.nansum(
fluxmean[band] * weight, axis=0) / totweight fluxmean[band] * weight, axis=0) / totweight
# We compute the merged standard deviation by combining the # We compute the merged standard deviation by combining the
...@@ -156,7 +156,7 @@ class BayesResultsManager(object): ...@@ -156,7 +156,7 @@ class BayesResultsManager(object):
# http://stats.stackexchange.com/a/10445 where the number of # http://stats.stackexchange.com/a/10445 where the number of
# datapoints has been substituted with the weights. In short we # datapoints has been substituted with the weights. In short we
# exploit the fact that Var(X) = E(Var(X)) + Var(E(X)). # exploit the fact that Var(X) = E(Var(X)) + Var(E(X)).
merged.fluxerror[band][:] = np.sqrt(np.sum( merged.fluxerror[band][:] = np.sqrt(np.nansum(
weight * (fluxerror[band]**2. + (fluxmean[band]-merged.fluxmean[band])**2), axis=0) / totweight) weight * (fluxerror[band]**2. + (fluxmean[band]-merged.fluxmean[band])**2), axis=0) / totweight)
...@@ -284,22 +284,22 @@ class BestResultsManager(object): ...@@ -284,22 +284,22 @@ class BestResultsManager(object):
if len(results) == 1: if len(results) == 1:
return results[0] return results[0]
best = np.argmin([result.chi2 for result in results], axis=0) chi2 = np.array([result.chi2 for result in results])
merged = results[0] merged = results[0]
for iobs, bestidx in enumerate(best): for iobs, bestidx in enumerate(np.argsort(chi2, axis=0)[0, :]):
for band in merged.flux: if np.isfinite(bestidx):
merged.flux[band][iobs] = \ for band in merged.flux:
results[bestidx].flux[band][iobs] merged.flux[band][iobs] = \
for prop in merged.intprop: results[bestidx].flux[band][iobs]
merged.intprop[prop][iobs] = \ for prop in merged.intprop:
results[bestidx].intprop[prop][iobs] merged.intprop[prop][iobs] = \
for prop in merged.extprop: results[bestidx].intprop[prop][iobs]
merged.extprop[prop][iobs] = \ for prop in merged.extprop:
results[bestidx].extprop[prop][iobs] merged.extprop[prop][iobs] = \
merged.chi2[iobs] = results[bestidx].chi2[iobs] results[bestidx].extprop[prop][iobs]
merged.scaling[iobs] = results[bestidx].scaling[iobs] merged.chi2[iobs] = results[bestidx].chi2[iobs]
merged.index[iobs] = results[bestidx].index[iobs] merged.scaling[iobs] = results[bestidx].scaling[iobs]
merged.index[iobs] = results[bestidx].index[iobs]
return merged return merged
......
...@@ -49,6 +49,10 @@ class SharedArray(object): ...@@ -49,6 +49,10 @@ class SharedArray(object):
""" """
self.raw = RawArray(ctypes.c_double, size) self.raw = RawArray(ctypes.c_double, size)
self.size = size self.size = size
# By default RawArray initialises all the elements to 0. Setting them to
# np.nan is preferanble to in case for a reason some elements are never
# assigned a value during a run
self.array[:] = np.nan
def __setitem__(self, idx, data): def __setitem__(self, idx, data):
if isinstance(idx, slice): if isinstance(idx, slice):
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment