Commit a58a327b authored by bepinat's avatar bepinat

possibility to have models with flexible number of input parameters

parent 76fd2e22
Pipeline #729 failed with stages
......@@ -5,7 +5,7 @@ from Class.PSF import PSF
from Class.Images import Image
class Model2D:
class Model2D_old:
"""
Models in 2D of the velocity field of galaxies
Compute the velocity field and the dispersion of a model using observed data in input.
......@@ -25,7 +25,7 @@ class Model2D:
# None parameters
self.model_pa = None
self.model_incl = None
self.model_inc = None
self.model_xc = None
self.model_yc = None
self.model_vs = None
......@@ -47,25 +47,25 @@ class Model2D:
self.flux = flux
self.psf = psf
def set_parameters(self, xc, yc, pa, incl, vs, vm, rt):
def set_parameters(self, xc, yc, pa, inc, vs, rt, vm):
"""
Sets the value of the model parameters
:param float xc: abscissa of the center in pixel
:param float yc: ordinate of the center in pixel
:param float pa: position angle of the major axis in degree
:param float incl: inclination of the disk in degree
:param float inc: inclination of the disk in degree
:param float vs: systemic velocity in km/s
:param float vm: maximum rotation velocity in km/s
:param float rt: characteristic radius, where the maximum velocity is reached
"""
self.model_pa = pa
self.model_incl = incl
self.model_inc = inc
self.model_xc = (xc + 0.5) * self.flux.get_oversamp() - 0.5
self.model_yc = (yc + 0.5) * self.flux.get_oversamp() - 0.5
self.model_vs = vs
self.model_radius, self.model_theta = tools.sky_coord_to_galactic(self.model_xc, self.model_yc, self.model_pa, self.model_incl,
self.model_radius, self.model_theta = tools.sky_coord_to_galactic(self.model_xc, self.model_yc, self.model_pa, self.model_inc,
im_size=np.shape(self.vel_map_hd))
self.model_vm = vm
......@@ -78,7 +78,7 @@ class Model2D:
:return ndarray:
"""
return [(self.model_xc + 0.5) * self.flux.get_oversamp() - 0.5, (self.model_yc + 0.5) * self.flux.get_oversamp() - 0.5,
self.model_pa, self.model_incl, self.model_vs, self.model_vm, self.model_rt/self.flux.get_oversamp()]
self.model_pa, self.model_inc, self.model_vs, self.model_vm, self.model_rt/self.flux.get_oversamp()]
def disk_velocity(self):
"""
......@@ -90,7 +90,7 @@ class Model2D:
vr = self.vel_model(self.model_radius, self.model_rt, self.model_vm)
# Calculation of the velocity field
v = vr * math.sin(math.radians(self.model_incl)) * self.model_theta + self.model_vs
v = vr * math.sin(math.radians(self.model_inc)) * self.model_theta + self.model_vs
return v
......@@ -173,7 +173,7 @@ class Model2D:
return np.sum(chi2)
class Model2D_new:
class Model2D:
"""
Models in 2D of the velocity field of galaxies
Compute the velocity field and the dispersion of a model using observed data in input.
......@@ -202,60 +202,59 @@ class Model2D_new:
self.errvel = errvel
self.flux = flux
self.psf = psf
# None parameters
self.oversamp = self.flux.get_oversamp()
self.indices = tools.lowres_indices_highres_frame(flux.get_size(), self.oversamp)
# We initiate the geometrical model parameters
self.model_pa = None
self.model_incl = None
self.model_inc = None
self.model_xc = None
self.model_yc = None
self.model_vs = None
# We initiate the associated coordinate maps in the galaxy frame
self.model_radius = None
self.model_theta = None
self.model_RCparams_names = self.vel_model.__code__.co_varnames[1:self.vel_model._code.co_argcount]
self.model_RCparams_values = [None for par in self.model_RCparams_names]
self.model_RCparams = dict((par, None) for par in self.vel_model.__code__.co_varnames[1:self.vel_model._code.co_argcount])
# XXX adapter les parametres au nombre d'arguments du modele
# XXX adapter le nombre de composantes + nombre de galaxies
#self.model_vm = None
#self.model_rt = None
def set_parameters(self, xc, yc, pa, incl, vs, vm, rt):
# XXX adapter les parametres au nombre d'arguments du modele
# We write an ordered list containg the names of the model parameters
self.model_parname = ['xc', 'yc', 'pa', 'inc', 'vs']
# We initiate a dictionary containing the names of rotation curve parameters. The number of arguments adjusts automatically
for name in self.vel_model.__code__.co_varnames[1:self.vel_model.__code__.co_argcount]:
self.model_parname.append(name)
#self.model_RCparams_names = self.vel_model.__code__.co_varnames[1:self.vel_model.__code__.co_argcount] # can be useful to check the parameters are provided with the correct order
#self.model_RCparams_values = [None for par in self.model_RCparams_names]
#self.model_RCparams = dict((par, None) for par in self.vel_model.__code__.co_varnames[1:self.vel_model.__code__.co_argcount])
self.model_RCparams = None
def set_parameters(self, xc, yc, pa, inc, vs, *args):
"""
Sets the value of the model parameters
:param float xc: abscissa of the center in pixel
:param float yc: ordinate of the center in pixel
:param float pa: position angle of the major axis in degree
:param float incl: inclination of the disk in degree
:param float inc: inclination of the disk in degree
:param float vs: systemic velocity in km/s
:param float vm: maximum rotation velocity in km/s
:param float rt: characteristic radius, where the maximum velocity is reached
"""
self.model_pa = pa
self.model_incl = incl
self.model_xc = (xc + 0.5) * self.flux.get_oversamp() - 0.5
self.model_yc = (yc + 0.5) * self.flux.get_oversamp() - 0.5
self.model_inc = inc
self.model_xc = xc
self.model_yc = yc
self.model_vs = vs
# XXX je pense qu'il vaut mieux garder les parametres en low resolution (voir en kpc et km/s) et modifier le calcul de model_radius (donc de sky_coord_to_galactic)
self.model_radius, self.model_theta = tools.sky_coord_to_galactic(self.model_xc, self.model_yc, self.model_pa, self.model_incl,
im_size=np.shape(self.vel_map_hd))
self.model_vm = vm
self.model_rt = rt*self.flux.get_oversamp()
#self.model_radius, self.model_theta = tools.sky_coord_to_galactic(self.model_xc, self.model_yc, self.model_pa, self.model_inc, im_size=np.shape(self.vel_map_hd), oversamp=self.oversamp)
self.model_radius, self.model_theta = tools.sky_coord_to_galactic(self.indices[1], self.indices[0], self.model_xc, self.model_yc, self.model_pa, self.model_inc)
self.model_RCparams = args
def get_parameter(self):
# XXX adapter les parametres au nombre d'arguments du modele
"""
Gets the actual parameters of the model (in low resolution scale)
:return ndarray:
"""
return [(self.model_xc + 0.5) * self.flux.get_oversamp() - 0.5, (self.model_yc + 0.5) * self.flux.get_oversamp() - 0.5,
self.model_pa, self.model_incl, self.model_vs, self.model_vm, self.model_rt/self.flux.get_oversamp()]
return [self.model_xc, self.model_yc, self.model_pa, self.model_inc, self.model_vs, *self.model_RCparams]
def disk_velocity(self):
"""
......@@ -264,23 +263,19 @@ class Model2D_new:
:return ndarray:
"""
# XXX adapter les parametres au nombre d'arguments du modele
vr = self.vel_model(self.model_radius, self.model_rt, self.model_vm)
vr = self.vel_model(self.model_radius, *self.model_RCparams)
# Calculation of the velocity field
v = vr * math.sin(math.radians(self.model_incl)) * self.model_theta + self.model_vs
v = vr * math.sin(math.radians(self.model_inc)) * self.model_theta + self.model_vs
return v
def velocity_map(self):
"""
"""
Computes the velocity map of the model
"""
self.vel_map_hd = self.disk_velocity()
vel_times_flux = tools.rebin_data(self.psf.convolution(self.flux.data * self.vel_map_hd), self.flux.get_oversamp())
vel_times_flux = tools.rebin_data(self.psf.convolution(self.flux.data * self.vel_map_hd), self.oversamp)
self.vel_map[self.vel.mask] = vel_times_flux[self.vel.mask] / self.flux.data_rebin[self.vel.mask]
def linear_velocity_dispersion(self):
......@@ -343,11 +338,12 @@ class Model2D_new:
:param int nparams: number of parameters
:return float:
"""
# XXX adapter les parametres au nombre d'arguments du modele
self.set_parameters(cube[0], cube[1], cube[2], cube[3], cube[4], cube[5], cube[6])
#self.set_parameters(cube[0], cube[1], cube[2], cube[3], cube[4], cube[5], cube[6])
self.set_parameters(cube[0], cube[1], cube[2], cube[3], cube[4], *cube[5:ndim])
self.velocity_map()
chi2 = -(self.vel_map[self.vel.mask] - self.vel.data[self.vel.mask])**2/(2*self.errvel.data[self.vel.mask]**2)
chi2 = -(self.vel_map[self.vel.mask] - self.vel.data[self.vel.mask])**2 / (2*self.errvel.data[self.vel.mask]**2)
return np.sum(chi2)
......@@ -355,4 +351,3 @@ class Model2D_new:
#class Model3D:
#class Model1D:
......@@ -18,20 +18,18 @@ files:
psf:
#This section is to set parameters needed by the model used
init fit:
#Inside 'parname' write the parameters which are used by the model in the same order than you declared your model
parname: ['xc', 'yc', 'pa', 'inc', 'vs', 'vm', 'rt']
#For each parameter, set the initial 'value', 'limits' and if is 'fixed' (1) or not (0)
#The order of parameters is no important
xc:
desc: center abscissa in pixel
value: 14.8
limits: [10, 20]
fixed: 1
fixed: 0
yc:
desc: center ordinate in pixel
value: 14.9
limits: [10, 20]
fixed: 1
fixed: 0
pa:
desc: positon angle in degree
value: 0
......@@ -40,23 +38,23 @@ init fit:
inc:
desc: inclination in degree
value: 45
limits: [10, 80]
limits: [44, 46]
fixed: 1
vs:
desc: systemic velocity in km/s
value: 0
limits: [-10, 10]
fixed: 0
vm:
desc: model velocity in km/s
value: 209
limits: [0, 400]
fixed: 0
rt:
desc: transition radius
value: 4
limits: [1, 12]
fixed: 0
vm:
desc: model velocity in km/s
value: 209
limits: [0, 400]
fixed: 0
#This section is for parameters of the model which are not fitted
conf model:
sig: 40
......@@ -66,12 +64,12 @@ conf model:
smooth: 0
#Selection of the desired 'model', 'method' and others things about running the program
config fit:
model: flat
method: mpfit
model: expo
method: multinest
verbose: False
#parameters about PyMultiNest running
PyMultiNest:
nbp: 0
max_iter: 10000
plt stats: False
plt xy: True
#parameters about mpfit running
......
......@@ -113,6 +113,7 @@ def nfw_velocity(r, rt, vm):
#Rotation curve for a Isothermal sphere density profile (see Binney & Tremaine)
#XXX See function in mass model
#XXX possibility to use spline by segment as done in the NEFER code for filter transmission shift computation
#:param ndarray r: 1D or 2D array which contains the radius
#:param float rt: scalelength
......@@ -202,6 +203,6 @@ def zhao_velocity(r, rt, vt, a, b, g):
# Must be at the end of the file
list_model = {'exp': exponential_velocity, 'flat': flat_velocity, 'arctan': arctan_velocity, 'hubmod': hubblemodified_velocity, 'nfw': nfw_velocity, 'kent': kent_velocity,
list_model = {'expo': exponential_velocity, 'flat': flat_velocity, 'atan': arctan_velocity, 'hmod': hubblemodified_velocity, 'nfw': nfw_velocity, 'kent': kent_velocity,
#'iso': iso_velocity, 'hernquist': hernquist_velocity, 'jaffe': jaffe_velocity, 'einasto': einasto_velocity,
'courteau': courteau_velocity, 'zhao': zhao_velocity}
#!/usr/bin/env python
import os
import sys
import math
import numpy as np
from numpy.fft import fftshift, fft2, ifft2
import math
import argparse
from astropy.io import ascii
import Models.velocity_model as vm
import Models.flux_model as fm
import Tools.tools as tools
from Class.Images import Image
from Class.PSF import PSF
from Class.Model2D import Model2D
import argparse
from astropy.io import ascii
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname('../Models/'), '..')))
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname('../Class/'), '..')))
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname('../Tools/'), '..')))
......@@ -91,7 +93,7 @@ def main(parser):
flux_rebin = tools.rebin_data(conv_psf(flux, np.sqrt(fwhm**2+args.smooth**2)), args.osamp)
# create velocity maps
flux_ld = Image(flux_rebin, mask=flux_rebin>0.1)
flux_ld = Image(flux_rebin, mask=flux_rebin > 0.1)
flux_hd = ImageOverSamp(flux_rebin, rdv, oversamp=args.osamp)
psf = PSF(flux_hd, fwhm_ld=args.fwhm, smooth=args.smooth)
flux_hd.conv_inter_flux(psf)
......@@ -102,21 +104,21 @@ def main(parser):
# flux[np.where(flux < 0.1)] = 0
print(' write flux hd')
tools.write_fits(new_xcen, new_ycen, args.pa, args.incl, args.vs, args.vmax, rdv, 0, psf.convolution(flux), args.path +'flux_HD')
tools.write_fits(new_xcen, new_ycen, args.pa, args.incl, args.vs, args.vmax, rdv, 0, psf.convolution(flux), args.path + 'flux_HD')
# flux_rebin[np.where(flux_rebin < 0.1)] = 0
print(' write flux ld')
tools.write_fits(*args.center, args.pa, args.incl, args.vs, args.vmax, args.rdv, 0, flux_rebin, args.path +'flux')
tools.write_fits(*args.center, args.pa, args.incl, args.vs, args.vmax, args.rdv, 0, flux_rebin, args.path + 'flux')
vel_hd = psf.convolution(model.vel_map_hd)
print(' write vel map hd')
tools.write_fits(new_xcen, new_ycen, args.pa, args.incl, args.vs, args.vmax, rdv, 0, vel_hd, args.path +'modelV_HD')
tools.write_fits(new_xcen, new_ycen, args.pa, args.incl, args.vs, args.vmax, rdv, 0, vel_hd, args.path + 'modelV_HD')
print(' write vel map')
tools.write_fits(args.center[0], args.center[1], args.pa, args.incl, args.vs, args.vm, args.rdv, 0, tools.rebin_data(vel_hd, args.osamp),
args.path + 'modelV_full')
model.vel_map[flux_rebin < 0.1] = float('nan')
tools.write_fits(args.center[0], args.center[1], args.pa, args.incl, args.vs, args.vm, args.rdv, 0, model.vel_map, args.path +'modelV')
tools.write_fits(args.center[0], args.center[1], args.pa, args.incl, args.vs, args.vm, args.rdv, 0, model.vel_map, args.path + 'modelV')
# create dispersion and error maps
evel = np.ones(args.size)
......
import sys
import os
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname('../Class/'), '..')))
import time
import logging
from Class.Model2D import Model2D
import Tools.cap_mpfit as mpfit
import logging
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname('../Class/'), '..')))
logger = logging.getLogger('__mocking__')
......@@ -14,7 +14,7 @@ def use_mpfit(model, params, quiet=False, gtol=1e-10, ftol=1e-10, xtol=1e-10):
Function call mpfit to perform the fit of the model
:param Model2D model: model initialised
:param dict params: dictionary which contain all parameters with the limits and if are fixed or not
:param dict params: dictionary which contains all parameters with the limits and if are fixed or not
:param Bool quiet: print or not verbose from the fit method
:param float gtol: orthogonality desired between the function vector and the columns of the jacobian.
:param float ftol: relative error desired in the sum of squares.
......@@ -25,19 +25,26 @@ def use_mpfit(model, params, quiet=False, gtol=1e-10, ftol=1e-10, xtol=1e-10):
verbose = 0
else:
verbose = 1
# create the array of dictionary 'parinfo' needed by mpfit
# xxx eventuellement utiliser la liste définie dans velocity_models, ça permettrait de faire les vérifications + éviter d'avoir la liste de noms
p0 = [params[key] for key in params['parname']]
try:
p0 = [params[key] for key in model.model_parname]
except:
logger.info(' Parameters name inconsistency. Check input parameters names in input file.')
logger.info(' Parameters names should be')
logger.info(print([parname for parname in model.model_parname]))
return
#p0 = [params[key] for key in params['parname']] # YYY in principle necessary to avoid disordered dictionnary reading of the yaml file but is it really necessary? Yaml seems to read in the good order
parinfo = [{'value': 0., 'fixed': 0, 'limited': [1, 1], 'limits': [0., 0.], 'parname': 0., 'step': 0.} for i in range(len(p0))]
for i in range(len(p0)):
parinfo[i]['value'] = p0[i]['value']
parinfo[i]['parname'] = params['parname'][i]
parinfo[i]['limits'] = params[params['parname'][i]]['limits']
if params[params['parname'][i]]['fixed'] is None:
params[params['parname'][i]]['fixed'] = 0
if params[params['parname'][i]]['fixed'] == 1:
parinfo[i]['parname'] = model.model_parname[i]
parinfo[i]['limits'] = params[model.model_parname[i]]['limits']
if params[model.model_parname[i]]['fixed'] is None:
params[model.model_parname[i]]['fixed'] = 0
if params[model.model_parname[i]]['fixed'] == 1:
parinfo[i]['fixed'] = 1
logger.info(' start fit with mpfit')
......@@ -54,13 +61,22 @@ def use_mpfit(model, params, quiet=False, gtol=1e-10, ftol=1e-10, xtol=1e-10):
logger.info(' fit status: {}'.format(model_fit.status))
logger.info(' Chi2R: {} DOF: {}'.format(model_fit.fnorm/model_fit.dof, model_fit.dof))
logger.info(' {0:^{width}}{1:^{width}}{2:^{width}}{3:^{width}}{4:^{width}}'
'{5:^{width}}{6:^{width}}'.format(*params['parname'], width=12))
logger.info('-' * (len(params['parname'])*12))
logger.info(' {0:^{width}.{prec}f}{1:^{width}.{prec}f}{2:^{width}.{prec}f}{3:^{width}.{prec}f}{4:^{width}.{prec}f}'
'{5:^{width}.{prec}f}{6:^{width}.{prec}f}'.format(*model_fit.params, width=12, prec=6))
logger.info(' {0:^{width}.{prec}f}{1:^{width}.{prec}f}{2:^{width}.{prec}f}{3:^{width}.{prec}f}{4:^{width}.{prec}f}'
'{5:^{width}.{prec}f}{6:^{width}.{prec}f}'.format(*model_fit.perror, width=12, prec=6))
message = ' '
#for par in params['parname']:
for par in model.model_parname:
message += '{0:^{width}}'.format(par, width=12)
logger.info(message)
logger.info('-' * (len(model.model_parname)*12))
message = ' '
for par in model_fit.params:
message += '{0:^{width}.{prec}}'.format(par, width=12, prec=6)
logger.info(message)
message = ' '
for par in model_fit.perror:
message += '{0:^{width}.{prec}}'.format(par, width=12, prec=6)
logger.info(message)
return {'results': {params['parname'][i]: {'value': model_fit.params[i], 'error': model_fit.perror[i]} for i in range(len(params['parname']))},
return {'results': {model.model_parname[i]: {'value': model_fit.params[i], 'error': model_fit.perror[i]} for i in range(len(model.model_parname))},
'mpfit': {'chi2r': model_fit.fnorm/model_fit.dof, 'dof': model_fit.dof}}
import sys
import os
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname('../Class/'), '..')))
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname('../Tools/'), '..')))
import logging
import pymultinest
import time
from Class.Model2D import Model2D
import matplotlib.pyplot as plt
import logging
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname('../Class/'), '..')))
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname('../Tools/'), '..')))
logger = logging.getLogger('__mocking__')
def use_pymultinest(model, params, quiet=True, nbp=0, pltstats=False, rank=0, path=None, whd=None):
def use_pymultinest(model, params, quiet=True, pltstats=False, rank=0, path=None, whd=None, max_iter=0, n_live_points=50, sampling_efficiency=0.8, evidence_tolerance=0.5, n_iter_before_update=100, null_log_evidence=-1e90, max_modes=100, mode_tolerance=-1e60):
"""
Function which define the parameters's space, call PyMultiNest and perform the analysis of the results
......@@ -21,13 +21,21 @@ def use_pymultinest(model, params, quiet=True, nbp=0, pltstats=False, rank=0, pa
:param Model2D model: model initialized
:param dict params: dictionary which contain all parameters with the limits and if are fixed or not
:param bool quiet: print or not verbose from the fit method
:param int nbp: number of points calculated by MultiNest, set to 0 for unlimited
:param int max_iter: number of points calculated by MultiNest, set to 0 for unlimited
:param bool pltstats: create a pdf file with plots of probabilities of parameters
:param int rank: the rank of the thread (needed whe you use MPI4PY with more than 1 core)
:param str path: the relative of the directory where PyMultiNest can write files needed for the further analysis
:param string whd: suffix for filename if a high resolution map is used
"""
n_live_points = 50
sampling_efficiency = 0.8
evidence_tolerance = 0.5
n_iter_before_update = 50 # 100
null_log_evidence = -1e90
max_modes = 100
mode_tolerance = -1e60
def prior(cube, ndim, nparams):
"""
......@@ -37,18 +45,20 @@ def use_pymultinest(model, params, quiet=True, nbp=0, pltstats=False, rank=0, pa
:param int ndim: number of dimension if different of the number of parameters
:param int nparams: number of parameters
"""
if nparams == 7:
for i in range(nparams):
limits = params[params['parname'][i]]['limits']
cube[i] = cube[i] * (limits[1] - limits[0]) + limits[0]
#if nparams == 7: # XXX is this condition necessary?
for i in range(nparams):
limits = params[model.model_parname[i]]['limits']
cube[i] = cube[i] * (limits[1] - limits[0]) + limits[0]
if rank == 0:
t1 = time.time()
# ### Call PyMultiNest
pymultinest.run(model.log_likelihood, prior, len(params['parname']), outputfiles_basename=path+'/res'+whd+'_', resume=False, verbose=quiet, max_iter=nbp,
n_live_points=50, sampling_efficiency=0.8, evidence_tolerance=0.5, n_iter_before_update=100, null_log_evidence=-1e90,
max_modes=100, mode_tolerance=-1e60)
pymultinest.run(model.log_likelihood, prior, len(model.model_parname), outputfiles_basename=path+'/res'+whd+'_', resume=False, verbose=quiet, max_iter=max_iter,
n_live_points=n_live_points, sampling_efficiency=sampling_efficiency, evidence_tolerance=evidence_tolerance,
n_iter_before_update=n_iter_before_update, null_log_evidence=null_log_evidence, max_modes=max_modes, mode_tolerance=mode_tolerance)
# XXX all these parameters may be in the configuration file (n_live_points, sampling_efficiency, evidence_tolerance, n_iter_before_update, null_log_evidence, max_modes, mode_tolerance): if present, call the function with the keyword, otherwise, use default, idem for max_iter
# How can we change the propability of priors (e.g. not uniform)?
# ###
if rank == 0:
......@@ -56,14 +66,14 @@ def use_pymultinest(model, params, quiet=True, nbp=0, pltstats=False, rank=0, pa
logger.info(' fit done in: {:6.2f} s \n'.format(t2-t1))
output = pymultinest.Analyzer(n_params=len(params['parname']), outputfiles_basename=path+'/res'+whd+'_')
output = pymultinest.Analyzer(n_params=len(model.model_parname), outputfiles_basename=path+'/res'+whd+'_')
bestfit = output.get_best_fit()
stats = output.get_mode_stats()
# print results ont he prompt screen
logger.info(' {0:^{width}}{1:^{width}}{2:^{width}}{3:^{width}}{4:^{width}}{5:^{width}}'
'{6:^{width}}'.format(*params['parname'], width=12))
logger.info('-' * (len(params['parname'])*12))
'{6:^{width}}'.format(*model.model_parname, width=12))
logger.info('-' * (len(model.model_parname)*12))
logger.info(' {0:^{width}.{prec}f}{1:^{width}.{prec}f}{2:^{width}.{prec}f}{3:^{width}.{prec}f}'
'{4:^{width}.{prec}f}{5:^{width}.{prec}f}{6:^{width}.{prec}f}'.format(*bestfit['parameters'], width=12, prec=6))
logger.info(' {0:^{width}.{prec}f}{1:^{width}.{prec}f}{2:^{width}.{prec}f}{3:^{width}.{prec}f}'
......@@ -72,22 +82,22 @@ def use_pymultinest(model, params, quiet=True, nbp=0, pltstats=False, rank=0, pa
# plot all parameters probabilities in a pdf file
if pltstats is True:
p = pymultinest.PlotMarginalModes(output)
plt.figure(figsize=(5 * len(params['parname']), 5 * len(params['parname'])))
for i in range(len(params['parname'])):
plt.subplot(len(params['parname']), len(params['parname']), len(params['parname']) * i + i + 1)
plt.figure(figsize=(5 * len(model.model_parname), 5 * len(model.model_parname)))
for i in range(len(model.model_parname)):
plt.subplot(len(model.model_parname), len(model.model_parname), len(model.model_parname) * i + i + 1)
p.plot_marginal(i, with_ellipses=False, with_points=True, grid_points=50, only_interpolate=False)
plt.ylabel("Probability")
plt.xlabel(params['parname']()[i])
plt.xlabel(model.model_parname()[i])
for j in range(i):
plt.subplot(len(params['parname']), len(params['parname']), len(params['parname']) * j + i + 1)
plt.subplot(len(model.model_parname), len(model.model_parname), len(model.model_parname) * j + i + 1)
p.plot_conditional(i, j, with_ellipses=False, with_points=True, grid_points=30, only_interpolate=False)
plt.xlabel(params['parname']()[i])
plt.ylabel(params['parname']()[j])
plt.xlabel(model.model_parname()[i])
plt.ylabel(model.model_parname()[j])
plt.savefig(path+'/proba'+whd+'.pdf', bbox_inches='tight')
logger.info('Plot of probabilities saved as pdf')
return {'results': {params['parname'][i]: {'value': bestfit['parameters'][i],
'error': stats['modes'][0]['sigma'][i]} for i in range(len(params['parname']))},
return {'results': {model.model_parname[i]: {'value': bestfit['parameters'][i],
'error': stats['modes'][0]['sigma'][i]} for i in range(len(model.model_parname))},
'PyMultiNest': {'log likelihood': bestfit['log_likelihood']}}
......@@ -5,34 +5,78 @@ import os
import sys
import yaml
import logging
from matplotlib import pyplot as plt
logger = logging.getLogger('__mocking__')
def sky_coord_to_galactic(xc, yc, pa, incl, im_size=None):
def lowres_indices_highres_frame(im_shape, oversamp=1):
"""
Compute low resolution indices in the high resolution frame
:param ndarray im_shape: shape of the high resolution frame (pixels)
:param int oversamp: oversampling factor
:return ndarray, ndarray: y, x
"""
y, x = np.indices(im_shape)
y = (y - (oversamp/2 - 0.5)) / oversamp
x = (x - (oversamp/2 - 0.5)) / oversamp
return y, x
def sky_coord_to_galactic(x, y, xc, yc, pa, inc):
"""
Converts position from Sky coordinates to Galactic coordinates
:param float xc: position of the center in arcsec
:param float yc: position of the center in arcsec
:param float x: low resolution x-indices in the high resolution frame (pixel)
:param float y: low resolution x-indices in the high resolution frame (pixel)
:param float xc: x-index of the center in low resolution (pixel)
:param float yc: y-index of the center in low resolution (pixel)
:param float pa: position angle of the major axis degree
:param float incl: inclination of the disk in degree
:param ndarray im_size: maximum radius of the scene (arcsec),
im_size should be larger than the slit length + seeing (Default im_size=100)
:return list[ndarray, ndarray]: [r, theta]
:param float inc: inclination of the disk in degree
:return ndarray, ndarray: r, theta
"""
y, x = np.indices(im_size)
den = (y - yc) * math.cos(math.radians(pa)) - (x - xc) * math.sin(math.radians(pa))
num = - (x - xc) * math.cos(math.radians(pa)) - (y - yc) * math.sin(math.radians(pa))
r = (den ** 2 + (num / math.cos(math.radians(incl))) ** 2) ** 0.5
tpsi = num * 1.
tpsi[np.where(den != 0)] /= den[np.where(den != 0)] # to avoid a NaN at the center
den2 = math.cos(math.radians(incl)) ** 2 + tpsi ** 2
r = (den ** 2 + (num / math.cos(math.radians(inc))) ** 2) ** 0.5
sg = np.sign(den) # sign
ctheta = sg * (math.cos(math.radians(incl)) ** 2 / den2) ** 0.5 # azimuth in galaxy plane
num[np.where(den != 0)] /= den[np.where(den != 0)] # to avoid a NaN at the center
den2 = math.cos(math.radians(inc)) ** 2 + num ** 2
ctheta = sg * (math.cos(math.radians(inc)) ** 2 / den2) ** 0.5 # azimuth in galaxy plane
return r, ctheta
#def sky_coord_to_galactic(xc, yc, pa, inc, im_size=None, oversamp=1):
#"""
#Converts position from Sky coordinates to Galactic coordinates
#:param float xc: position of the center in arcsec
#:param float yc: position of the center in arcsec
#:param float pa: position angle of the major axis degree
#:param float inc: inclination of the disk in degree
#:param ndarray im_size: maximum radius of the scene (arcsec),
#im_size should be larger than the slit length + seeing (Default im_size=100)
#:param int oversamp: oversampling factor
#:return list[ndarray, ndarray]: [r, theta]
#"""
## XXX il faudrait faire ces calculs dans la classe, éventuellement si l'oversampling doit être modifié, mais pas besoin de refaire ces opérations à chaque itération
#y, x = np.indices(im_size)
#y = (y - (oversamp/2 - 0.5)) / oversamp
#x = (x - (oversamp/2 - 0.5)) / oversamp
#den = (y - yc) * math.cos(math.radians(pa)) - (x - xc) * math.sin(math.radians(pa))
#num = - (x - xc) * math.cos(math.radians(pa)) - (y - yc) * math.sin(math.radians(pa))
#r = (den ** 2 + (num / math.cos(math.radians(inc))) ** 2) ** 0.5
#sg = np.sign(den) # sign
#num[np.where(den != 0)] /= den[np.where(den != 0)] # to avoid a NaN at the center XXX ???
#den2 = math.cos(math.radians(inc)) ** 2 + num ** 2
##tpsi = num * 1.
##tpsi[np.where(den != 0)] /= den[np.where(den != 0)] # to avoid a NaN at the center
##den2 = math.cos(math.radians(inc)) ** 2 + tpsi ** 2
#ctheta = sg * (math.cos(math.radians(inc)) ** 2 / den2) ** 0.5 # azimuth in galaxy plane
return [r, ctheta]
#return [r, ctheta]
def rebin_data(data, factor):
......@@ -53,7 +97,7 @@ def rebin_data(data, factor):
return data2.mean(2).mean(3)
def write_fits(data, filename, config, results, mask=None):
def write_fits(data, filename, config, model, results, mask=None):
"""
write data in fits file with model's parameters
......@@ -68,7 +112,8 @@ def write_fits(data, filename, config, results, mask=None):
data[np.logical_not(mask)] = float('nan')
hdu = fits.PrimaryHDU(data=data)
for key in config['init fit']['parname']:
#for key in config['init fit']['parname']:
for key in model.model_parname:
try:
hdu.header.append((key, results['results'][key]['value'], config['init fit'][key]['desc']))
except KeyError as k:
......@@ -108,23 +153,25 @@ def search_file(path, filename):
sys.exit()
def make_dir(path, config):
def make_dir(path, config, model):
"""
Creates the directory where results will be written
The name of the directory depends on fixed parameters
:param path: path where fits files are
:param dict config: YAML config dictionary
:param model Model2D: model
:return str: the path
"""
if path == '.':
path = ''
path = './'
dirname = config['config fit']['method'] + '_' + config['config fit']['model']
suffix = ''