Commit a2107cb5 authored by Médéric Boquien's avatar Médéric Boquien

Ensure that best models are properly computed when models are computed by...

Ensure that best models are properly computed when models are computed by blocks and that no fit could be made in one or more blocks.
parent f2e67af9
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
- When using a parameters file, Boolean values were not correctly interpreted. (Médéric Boquien, reported by Eric Martínez, INAOE) - When using a parameters file, Boolean values were not correctly interpreted. (Médéric Boquien, reported by Eric Martínez, INAOE)
- Make sure that the best-fit models are stored with the correct scaling factor when the distance is given explicitly (Médéric Boquien) - Make sure that the best-fit models are stored with the correct scaling factor when the distance is given explicitly (Médéric Boquien)
- Some labels and the title for the SED plots has been improved to avoid overlaps and overflows. (Médéric Boquien) - Some labels and the title for the SED plots has been improved to avoid overlaps and overflows. (Médéric Boquien)
- Ensure that best models are properly computed when models are computed by blocks and that no fit could be made in one or more blocks. This can be case if all the models in the block are older than the age of the universe. (Médéric)
### Optimised ### Optimised
- Slight speedup of the computation of the likelihood from the χ² (Médéric Boquien) - Slight speedup of the computation of the likelihood from the χ² (Médéric Boquien)
- The the fritz2006 module should now run faster thanks to an optimisation of the computation of the luminosity of the various AGN components (Médéric Boquien & Guang Yang) - The the fritz2006 module should now run faster thanks to an optimisation of the computation of the luminosity of the various AGN components (Médéric Boquien & Guang Yang)
......
...@@ -229,17 +229,18 @@ def bestfit(oidx, obs): ...@@ -229,17 +229,18 @@ def bestfit(oidx, obs):
""" """
np.seterr(invalid='ignore') np.seterr(invalid='ignore')
if np.isfinite(gbl_results.best.index[oidx]):
best_index = int(gbl_results.best.index[oidx]) best_index = int(gbl_results.best.index[oidx])
# We compute the model at the exact redshift not to have to correct for the # We compute the model at the exact redshift not to have to correct for
# difference between the object and the grid redshifts. # the difference between the object and the grid redshifts.
params = deepcopy(gbl_params.from_index(best_index)) params = deepcopy(gbl_params.from_index(best_index))
if obs.redshift >= 0.: if obs.redshift >= 0.:
model_z = params[gbl_params.modules.index('redshifting')]['redshift'] model_z = params[gbl_params.modules.index('redshifting')]['redshift']
params[gbl_params.modules.index('redshifting')]['redshift'] = obs.redshift params[gbl_params.modules.index('redshifting')]['redshift'] = obs.redshift
# Correct fluxes for the fact that the scaling factor was computed on # Correct fluxes for the fact that the scaling factor was computed
# the grid redshift. Because of the difference in redshift the distance # on the grid redshift. Because of the difference in redshift the
# is different and must be reflected in the scaling # distance is different and must be reflected in the scaling
corr_scaling = compute_corr_dz(model_z, obs) / \ corr_scaling = compute_corr_dz(model_z, obs) / \
compute_corr_dz(obs.redshift, obs) compute_corr_dz(obs.redshift, obs)
else: # The model redshift is always exact in redhisfting mode else: # The model redshift is always exact in redhisfting mode
......
...@@ -112,10 +112,10 @@ class BayesResultsManager(object): ...@@ -112,10 +112,10 @@ class BayesResultsManager(object):
for band in merged.fluxerror} for band in merged.fluxerror}
weight = np.array([result.weight for result in results]) weight = np.array([result.weight for result in results])
totweight = np.sum(weight, axis=0) totweight = np.nansum(weight, axis=0)
for prop in merged.intmean: for prop in merged.intmean:
merged.intmean[prop][:] = np.sum( merged.intmean[prop][:] = np.nansum(
intmean[prop] * weight, axis=0) / totweight intmean[prop] * weight, axis=0) / totweight
# We compute the merged standard deviation by combining the # We compute the merged standard deviation by combining the
...@@ -123,11 +123,11 @@ class BayesResultsManager(object): ...@@ -123,11 +123,11 @@ class BayesResultsManager(object):
# http://stats.stackexchange.com/a/10445 where the number of # http://stats.stackexchange.com/a/10445 where the number of
# datapoints has been substituted with the weights. In short we # datapoints has been substituted with the weights. In short we
# exploit the fact that Var(X) = E(Var(X)) + Var(E(X)). # exploit the fact that Var(X) = E(Var(X)) + Var(E(X)).
merged.interror[prop][:] = np.sqrt(np.sum( merged.interror[prop][:] = np.sqrt(np.nansum(
weight * (interror[prop]**2. + (intmean[prop]-merged.intmean[prop])**2), axis=0) / totweight) weight * (interror[prop]**2. + (intmean[prop]-merged.intmean[prop])**2), axis=0) / totweight)
for prop in merged.extmean: for prop in merged.extmean:
merged.extmean[prop][:] = np.sum( merged.extmean[prop][:] = np.nansum(
extmean[prop] * weight, axis=0) / totweight extmean[prop] * weight, axis=0) / totweight
# We compute the merged standard deviation by combining the # We compute the merged standard deviation by combining the
...@@ -135,7 +135,7 @@ class BayesResultsManager(object): ...@@ -135,7 +135,7 @@ class BayesResultsManager(object):
# http://stats.stackexchange.com/a/10445 where the number of # http://stats.stackexchange.com/a/10445 where the number of
# datapoints has been substituted with the weights. In short we # datapoints has been substituted with the weights. In short we
# exploit the fact that Var(X) = E(Var(X)) + Var(E(X)). # exploit the fact that Var(X) = E(Var(X)) + Var(E(X)).
merged.exterror[prop][:] = np.sqrt(np.sum( merged.exterror[prop][:] = np.sqrt(np.nansum(
weight * (exterror[prop]**2. + (extmean[prop]-merged.extmean[prop])**2), axis=0) / totweight) weight * (exterror[prop]**2. + (extmean[prop]-merged.extmean[prop])**2), axis=0) / totweight)
for prop in merged.extmean: for prop in merged.extmean:
...@@ -148,7 +148,7 @@ class BayesResultsManager(object): ...@@ -148,7 +148,7 @@ class BayesResultsManager(object):
merged.exterror[prop]) merged.exterror[prop])
for band in merged.fluxmean: for band in merged.fluxmean:
merged.fluxmean[band][:] = np.sum( merged.fluxmean[band][:] = np.nansum(
fluxmean[band] * weight, axis=0) / totweight fluxmean[band] * weight, axis=0) / totweight
# We compute the merged standard deviation by combining the # We compute the merged standard deviation by combining the
...@@ -156,7 +156,7 @@ class BayesResultsManager(object): ...@@ -156,7 +156,7 @@ class BayesResultsManager(object):
# http://stats.stackexchange.com/a/10445 where the number of # http://stats.stackexchange.com/a/10445 where the number of
# datapoints has been substituted with the weights. In short we # datapoints has been substituted with the weights. In short we
# exploit the fact that Var(X) = E(Var(X)) + Var(E(X)). # exploit the fact that Var(X) = E(Var(X)) + Var(E(X)).
merged.fluxerror[band][:] = np.sqrt(np.sum( merged.fluxerror[band][:] = np.sqrt(np.nansum(
weight * (fluxerror[band]**2. + (fluxmean[band]-merged.fluxmean[band])**2), axis=0) / totweight) weight * (fluxerror[band]**2. + (fluxmean[band]-merged.fluxmean[band])**2), axis=0) / totweight)
...@@ -284,10 +284,10 @@ class BestResultsManager(object): ...@@ -284,10 +284,10 @@ class BestResultsManager(object):
if len(results) == 1: if len(results) == 1:
return results[0] return results[0]
best = np.argmin([result.chi2 for result in results], axis=0) chi2 = np.array([result.chi2 for result in results])
merged = results[0] merged = results[0]
for iobs, bestidx in enumerate(best): for iobs, bestidx in enumerate(np.argsort(chi2, axis=0)[0, :]):
if np.isfinite(bestidx):
for band in merged.flux: for band in merged.flux:
merged.flux[band][iobs] = \ merged.flux[band][iobs] = \
results[bestidx].flux[band][iobs] results[bestidx].flux[band][iobs]
......
...@@ -49,6 +49,10 @@ class SharedArray(object): ...@@ -49,6 +49,10 @@ class SharedArray(object):
""" """
self.raw = RawArray(ctypes.c_double, size) self.raw = RawArray(ctypes.c_double, size)
self.size = size self.size = size
# By default RawArray initialises all the elements to 0. Setting them to
# np.nan is preferanble to in case for a reason some elements are never
# assigned a value during a run
self.array[:] = np.nan
def __setitem__(self, idx, data): def __setitem__(self, idx, data):
if isinstance(idx, slice): if isinstance(idx, slice):
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment