Commit 542d229f authored by bepinat's avatar bepinat

major update with the possibility of having several components + models with...

major update with the possibility of having several components + models with any number of parameters + structure of YAML input file modified
parent a58a327b
......@@ -16,10 +16,10 @@ class Image:
if type(filename) is str:
self.header = fits.getheader(filename)
self.data = fits.getdata(filename)
self.high = self.header['NAXIS1']
self.height = self.header['NAXIS1']
self.length = self.header['NAXIS2']
self.size = np.array(np.shape(self.data))
self.len = self.length*self.high
self.len = self.length*self.height
self.oversample = 1
if mask is None:
self.mask = np.logical_not(np.isnan(self.data))
......@@ -32,9 +32,9 @@ class Image:
self.header = None
self.data = np.nan_to_num(filename)
self.size = np.array(np.shape(self.data))
self.high = self.size[0]
self.height = self.size[0]
self.length = self.size[1]
self.len = self.length*self.high
self.len = self.length*self.height
self.oversample = 1
if mask is None:
self.mask = np.logical_not(np.isnan(self.data))
......@@ -54,8 +54,8 @@ class Image:
def get_lenght(self):
return self.length
def get_high(self):
return self.high
def get_height(self):
return self.height
def set_oversamp(self, oversamp):
self.oversample = oversamp
......@@ -67,15 +67,21 @@ class Image:
"""
Performs an interpolation of the image at higher resolution and stock the new image
"""
#x = np.arange(self.length) # + 0.5*self.oversample
#y = np.arange(self.height) # + 0.5*self.oversample
#new_x = np.linspace(0, self.length, self.length * int(self.oversample))
#new_y = np.linspace(0, self.height, self.height * int(self.oversample))
x = np.linspace(0, self.length, self.length) # + 0.5*self.oversample
y = np.linspace(0, self.high, self.high) # + 0.5*self.oversample
y = np.linspace(0, self.height, self.height) # + 0.5*self.oversample
new_x = np.linspace(0, self.length, self.length * int(self.oversample))
new_y = np.linspace(0, self.high, self.high * int(self.oversample))
new_y = np.linspace(0, self.height, self.height * int(self.oversample))
data = np.reshape(self.data, -1)
func = interp2d(x, y, data, kind='linear', fill_value=0)
self.data = np.array(func(new_x, new_y)).transpose()
self.high *= self.oversample
self.height *= self.oversample
self.length *= self.oversample
self.size *= self.oversample
......
This diff is collapsed.
import math
import numpy as np
from numpy.fft import fftshift, fft2, ifft2
from numpy.fft import fftshift, rfft2, irfft2
#from numpy.fft import fftshift, fft2, ifft2
from Class.Images import Image
import matplotlib.pyplot as plt
class PSF:
"""
Create a 2D gaussian as PSF, store all the information about the psf.
Perform the convolution af an image with the psf stored by the method 'convolution'.
You can initiate this class with a fits file of a PSF, in this case no gaussian will be created.
To avoid (at least minimize) any border effect, class PSF need information
about the size of the highest resolution image passed by a Image's class.
Create a 2D gaussian as PSF, store all the information about the psf.
Perform the convolution af an image with the psf stored by the method 'convolution'.
You can initiate this class with a fits file of a PSF, in this case no gaussian will be created.
To avoid (at least minimize) any border effect, class PSF need information
about the size of the highest resolution image passed by a Image's class.
"""
def __init__(self, flux, img_psf=None, fwhm_lr=3.5, smooth=0):
......@@ -25,11 +27,12 @@ class PSF:
"""
self.psf = img_psf
self.smooth = smooth * flux.oversample
sigma_s = self.smooth / (2 * math.sqrt(2 * math.log(2)))
if self.psf is None:
self.im_size = flux.size
self.fwhm = fwhm_lr*flux.oversample
self.smooth = smooth*flux.oversample
self.fwhm = fwhm_lr * flux.oversample
self.fwhm_f = np.sqrt(self.fwhm**2 + self.smooth**2)
# (2*fwhm) is considered to avoid the boundary effect after the convolution
self.size = np.array([2 * self.fwhm_f, 2 * self.fwhm_f])
......@@ -38,25 +41,40 @@ class PSF:
self.size = np.array((ideal_size - self.im_size) / 2, dtype=int)
y, x = np.indices((self.im_size[0] + 2 * self.size[0], self.im_size[1] + 2 * self.size[1]))
sigma = self.fwhm_f / (2 * math.sqrt(2 * math.log(2)))
self.psf = (1. / (2 * math.pi * sigma ** 2)) * np.exp(-((x - self.im_size[1] / 2 - self.size[1]) ** 2 + (y - self.im_size[0] / 2 - self.size[
0]) ** 2) / (2.0 * sigma ** 2))
# normalization in order to ensure flux conservation
self.psf /= self.psf.sum()
sigma_o = self.fwhm / (2 * math.sqrt(2 * math.log(2)))
self.psf_o = (1. / (2 * math.pi * sigma_o ** 2)) * np.exp(-((x - self.im_size[1] / 2 - self.size[1]) ** 2 + (y - self.im_size[0] / 2 - self.size[0]) ** 2) / (2.0 * sigma_o ** 2))
else:
self.im_size = np.array(img_psf.shape, dtype=int)
self.fwhm = self.im_size[0] / 8 # could be 4 or 2...
self.fwhm_f = int(np.ceil(np.sqrt(self.fwhm**2 + self.smooth**2)))
self.size = np.array([2 * self.fwhm_f, 2 * self.fwhm_f], dtype=int)
ideal_size = 2 ** np.ceil(np.log(self.im_size + 2 * self.size) / math.log(2))
self.size = np.array((ideal_size - self.im_size) / 2, dtype=int)
y, x = np.indices((self.im_size[0] + 2 * self.size[0], self.im_size[1] + 2 * self.size[1]))
self.psf_o = np.zeros((self.im_size[0] + 2 * self.size[0], self.im_size[1] + 2 * self.size[1]))
self.psf_o[int(self.size[0]):int(self.size[0] + self.im_size[0]), int(self.size[1]):int(self.size[1] + self.im_size[1])] = img_psf
if sigma_s != 0:
self.psf_s = (1. / (2 * math.pi * sigma_s ** 2)) * np.exp(-((x - self.im_size[1] / 2 - self.size[1]) ** 2 + (y - self.im_size[0] / 2 - self.size[0]) ** 2) / (2.0 * sigma_s ** 2))
self.psf_s /= self.psf_s.sum()
self.psf_s_fft2 = rfft2(fftshift(self.psf_s))
else:
self.size = np.array(img_psf.shape)
self.psf = np.zeros((self.im_size[0] + 2 * self.size[0], self.im_size[1] + 2 * self.size[1]))
self.psf[self.im_size[0]/2+self.size[0]/2:self.im_size[0]/2+3*self.size[0]/2, self.im_size[1]/2+self.size[1]/2:self.im_size[1]/2+3*self.size[1]/2]\
= img_psf
self.psf /= self.psf.sum()
self.psf_s = np.zeros((self.im_size[0] + 2 * self.size[0], self.im_size[1] + 2 * self.size[1]))
self.psf_s[0, 0] = 1
self.psf_s_fft2 = rfft2(self.psf_s)
# normalization in order to ensure flux conservation
self.psf_o /= self.psf_o.sum()
self.psf_o_fft2 = rfft2(fftshift(self.psf_o))
# accounting for Gaussian smoothing
self.psf_fft2 = self.psf_o_fft2 * self.psf_s_fft2
self.psf = fftshift(irfft2(self.psf_fft2))
self.psf_fft2 = fft2(fftshift(self.psf))
def convolution(self, data):
"""
Do the convolution product between the data and the PSF
Do the convolution product between the data and the PSF
:param ndarray data:
:return ndarray:
......@@ -65,7 +83,7 @@ class PSF:
data2 = np.zeros((data.shape[0] + 2 * self.size[0], data.shape[1] + 2 * self.size[1]))
data2[self.size[0]:data.shape[0] + self.size[0], self.size[1]:data.shape[1] + self.size[1]] = data
data_conv = ifft2(fft2(data2) * self.psf_fft2)
data_conv = irfft2(rfft2(data2) * self.psf_fft2)
data_conv = data_conv[self.size[0]:data.shape[0] + self.size[0], self.size[1]:data.shape[1] + self.size[1]].real
return data_conv
#Configuration file needed by the program
#YAML is used
####
gal name: test
# maybe add a key nobj to identify the number of galaxies to be fitted
# then for each galaxy, need a flux_HD.fits file
# for each galaxy add a key ncomp to identify the number of components to be used
# for each component, only rotation curve parameters change as well as model: model should be in each component rather than in 'config fit'
#In this section write the name of fits' files from the directory which contains this file
#The program can search files in subdirectories
files:
flux: flux_HD.fits
vel: vel.fits
errvel: evel.fits
disp:
psf:
#This section is to set parameters needed by the model used
init fit:
#For each parameter, set the initial 'value', 'limits' and if is 'fixed' (1) or not (0)
#The order of parameters is no important
xc:
desc: center abscissa in pixel
value: 14.8
limits: [10, 20]
fixed: 0
yc:
desc: center ordinate in pixel
value: 14.9
limits: [10, 20]
fixed: 0
pa:
desc: positon angle in degree
value: 0
limits: [0, 360]
fixed: 0
inc:
desc: inclination in degree
value: 45
limits: [44, 46]
fixed: 1
vs:
desc: systemic velocity in km/s
value: 0
limits: [-10, 10]
fixed: 0
rt:
desc: transition radius
value: 4
limits: [1, 12]
fixed: 0
vm:
desc: model velocity in km/s
value: 209
limits: [0, 400]
fixed: 0
#This section is for parameters of the model which are not fitted
conf model:
sig: 40
slope: 0
psfx: 3.5
psfz: 49
smooth: 0
#Selection of the desired 'model', 'method' and others things about running the program
config fit:
model: expo
method: multinest
verbose: False
#parameters about PyMultiNest running
PyMultiNest:
max_iter: 10000
plt stats: False
plt xy: True
#parameters about mpfit running
mpfit:
ftol: 1e-10
gtol: 1e-10
xtol: 1e-10
#Configuration file needed by the program
#YAML is used
####
name: test
#In this section write the name of '.fits' files from the directory which contains this file
#The program can search files in subdirectories
files:
vel: vel.fits
errvel: evel.fits
disp:
psf:
#This section is to set parameters needed by the model used for any object in the field
objects:
obj1:
files:
flux: flux_HD.fits
# Parameters not fitted but used for correcting dispersion map: 'sig' et 'slope' sont utilisés dans Model2D pour corriger la carte de dispersion
# XXX Gérer ces deux paramètres
dispersion info:
sig: 0
slope: 0
params:
xc:
desc: center abscissa in pixel
value: 14.8
limits: [10, 20]
fixed: 1
yc:
desc: center ordinate in pixel
value: 14.9
limits: [10, 20]
fixed: 1
pa:
desc: positon angle in degree
value: 0
limits: [0, 360]
fixed: 0
inc:
desc: inclination in degree
value: 45
limits: [44, 46]
fixed: 1
vs:
desc: systemic velocity in km/s
value: 0
limits: [-10, 10]
fixed: 0
#Components, to be used for the fit, there can be several components to describe the rotation curve of a galaxy (e.g. stars, gas, DM)
components:
comp1:
model: flat
params:
rt:
desc: transition radius
value: 1
limits: [1, 12]
fixed: 0
vm:
desc: model velocity in km/s
value: 120
limits: [0, 400]
fixed: 0
comp2:
model: expo
params:
rt:
desc: transition radius
value: 1
limits: [1, 12]
fixed: 0
vm:
desc: model velocity in km/s
value: 143
limits: [0, 400]
fixed: 0
#This section is for parameters of the model which are not fitted
#conf model:
resol params:
psfx: 3.5
psfz: 40
smooth: 0
oversample: 4
#Selection of the desired 'method' and other things about running the program
config fit:
verbose: False
method: mpfit
#parameters about PyMultiNest running
multinest:
nbp: 0
plt stats: False
plt xy: True
#parameters about mpfit running
mpfit:
ftol: 1e-10
gtol: 1e-10
xtol: 1e-10
#Configuration file needed by the program
#YAML is used
####
name: test
#In this section write the name of '.fits' files from the directory which contains this file
#The program can search files in subdirectories
files:
vel: vel.fits
errvel: evel.fits
disp:
psf:
#This section is to set parameters needed by the model used for any object in the field
objects:
obj1:
files:
flux: flux_HD.fits
# Parameters not fitted but used for correcting dispersion map: 'sig' et 'slope' sont utilisés dans Model2D pour corriger la carte de dispersion
dispersion info:
sig: 0
slope: 0
params:
xc:
desc: center abscissa in pixel
value: 14.8
limits: [14, 16]
fixed: 1
yc:
desc: center ordinate in pixel
value: 14.9
limits: [14, 16]
fixed: 1
pa:
desc: position angle in degree
value: 0
limits: [0, 360]
fixed: 0
inc:
desc: inclination in degree
value: 45
limits: [44, 46]
fixed: 1
vs:
desc: systemic velocity in km/s
value: 0
limits: [-10, 10]
fixed: 0
#Components, to be used for the fit, there can be several components to describe the rotation curve of a galaxy (e.g. stars, gas, DM)
components:
comp1:
model: flat
params:
rt:
desc: transition radius
value: 4
limits: [1, 12]
fixed: 0
vm:
desc: model velocity in km/s
value: 209
limits: [50, 300]
fixed: 0
#This section is for parameters of the model which are not fitted
resol params:
psfx: 3.5
psfz: 40
smooth: 1
oversample: 4
#Selection of the desired 'method' and other things about running the program
config fit:
verbose: False
method: mpfit
#parameters about mpfit running
mpfit:
ftol: 1e-10
gtol: 1e-10
xtol: 1e-10
velocity model: flat
flux model: exp with characteristic radius 4 pixels
parameters:
x y pa incl vs vm rd sig0 fwhm smooth oversample rtrunc
15.0 15.0 0.0 45.0 0.0 200.0 4.0 40.0 3.5 0.0 5.0 8.0
......@@ -3,10 +3,58 @@ from scipy.special import i0, i1, k0, k1
"""
Librairy which contains elementary rotation curve models
Library which contains elementary rotation curve models and which allows to combine them quadratically
"""
class combined_velocity_model:
'''
Class which allows to combine quadratically velocity models. This class has attributes useful for handling the model parameters.
'''
def __init__(self, list_name, list_comp):
'''
:param list list_name: list which contains the names of the models to combine
'''
self.list_name = list_name
self.model_parname = []
self.model_modname = []
self.model_compname = []
self.final_func_name = ''
for (model, component) in zip(list_name, list_comp):
self.final_func_name += model + '+'
func = list_model[model]
func_parname = []
for name in func.__code__.co_varnames[1:func.__code__.co_argcount]:
self.model_parname.append(name)
self.model_modname.append(model)
self.model_compname.append(component)
#func_parname.append(name)
#self.model_parname.append([func, func_parname])
#self.model_parname.append(func_parname)
self.final_func_name = self.final_func_name[:-1]
def vel_model(self, r, *args):
'''
Final combined model
:param ndarray r: 1D or 2D array which contains the radius
:param list of float args: list that contains model arguments
:return ndarray: 1D or 2D array with values of the model at the specified radius
'''
n_params_prev = 0
v2 = np.zeros(r.shape)
for model in self.list_name:
func = list_model[model]
n_params = n_params_prev + func.__code__.co_argcount - 1
params = args[n_params_prev:n_params]
#print(model, n_params_prev, n_params, params)
# Ajouter une verification des paramtres d'entree
#print(params)
v2 += func(r, *params) ** 2
n_params_prev = n_params
return np.sqrt(v2)
def exponential_velocity(r, rt, vm):
"""
Rotation curve for an exponential disk
......@@ -170,7 +218,6 @@ def courteau_velocity(r, rt, vm, a):
:param float a: Courteau index
"""
vr = np.zeros(np.shape(r))
q = np.where(r != 0) # To prevent any problem in the center
......
......@@ -53,13 +53,19 @@ method_model_fixedparams. A summary of the parameters of the model is written in
## Example
There is an Example directory in which you have fits files and a **YAML** file as example to try the program.
There is an Example directory in which you have fits files and some **YAML** file as examples to try the program:
`mocking.py Example/ input2_flat_mpfit.yaml`
`mocking.py Example/ input2_flat_expo_mpfit.yaml`
## Create model
The subprocess to create map was used to validate the program (cross test) with existing programs.
This subprocess needs to be updated from the last changes of the architecture and the addition of the data language **YAML**.
## Other features
- The toolbox enables to create input **YAML** files for a list of galaxies.
### Credits
Written by Jeremy Dumoulin under supervision of Benoît Epinat
Written by Jeremy Dumoulin under supervision of Benoît Epinat, maintained and new features by Benoît Epinat.
......@@ -2,7 +2,6 @@ import sys
import os
import time
import logging
from Class.Model2D import Model2D
import Tools.cap_mpfit as mpfit
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname('../Class/'), '..')))
......@@ -14,7 +13,7 @@ def use_mpfit(model, params, quiet=False, gtol=1e-10, ftol=1e-10, xtol=1e-10):
Function call mpfit to perform the fit of the model
:param Model2D model: model initialised
:param dict params: dictionary which contains all parameters with the limits and if are fixed or not
:param dict params: dictionary which contains all parameters with the limits and if they are fixed or not
:param Bool quiet: print or not verbose from the fit method
:param float gtol: orthogonality desired between the function vector and the columns of the jacobian.
:param float ftol: relative error desired in the sum of squares.
......@@ -28,23 +27,29 @@ def use_mpfit(model, params, quiet=False, gtol=1e-10, ftol=1e-10, xtol=1e-10):
# create the array of dictionary 'parinfo' needed by mpfit
try:
p0 = [params[key] for key in model.model_parname]
p0 = [params[compn][parn] for (compn, parn) in zip(model.model_compname, model.model_parname)]
logger.info(' Parameters name are consistent.')
except:
logger.info(' Parameters name inconsistency. Check input parameters names in input file.')
logger.info(' Parameters names should be')
logger.info(print([parname for parname in model.model_parname]))
logger.info(print([(compn, modn, parn) for (modn, parn, compn) in zip(model.model_modname, model.model_parname, model.model_compname)]))
return
#p0 = [params[key] for key in params['parname']] # YYY in principle necessary to avoid disordered dictionnary reading of the yaml file but is it really necessary? Yaml seems to read in the good order
parinfo = [{'value': 0., 'fixed': 0, 'limited': [1, 1], 'limits': [0., 0.], 'parname': 0., 'step': 0.} for i in range(len(p0))]
parinfo = [{'value': 0., 'fixed': 0, 'limited': [1, 1], 'limits': [0., 0.], 'modname': 0., 'parname': 0., 'step': 0.} for i in range(len(p0))]
for i in range(len(p0)):
modn = model.model_modname[i]
parn = model.model_parname[i]
compn = model.model_compname[i]
parinfo[i]['value'] = p0[i]['value']
parinfo[i]['parname'] = model.model_parname[i]
parinfo[i]['limits'] = params[model.model_parname[i]]['limits']
if params[model.model_parname[i]]['fixed'] is None:
params[model.model_parname[i]]['fixed'] = 0
if params[model.model_parname[i]]['fixed'] == 1:
parinfo[i]['parname'] = parn
parinfo[i]['modname'] = modn
parinfo[i]['compname'] = compn
parinfo[i]['limits'] = params[compn][parn]['limits']
if params[compn][parn]['fixed'] is None:
params[compn][parn]['fixed'] = 0
if params[compn][parn]['fixed'] == 1:
parinfo[i]['fixed'] = 1
logger.info(' start fit with mpfit')
......@@ -62,7 +67,6 @@ def use_mpfit(model, params, quiet=False, gtol=1e-10, ftol=1e-10, xtol=1e-10):
logger.info(' Chi2R: {} DOF: {}'.format(model_fit.fnorm/model_fit.dof, model_fit.dof))
message = ' '
#for par in params['parname']:
for par in model.model_parname:
message += '{0:^{width}}'.format(par, width=12)
logger.info(message)
......@@ -77,6 +81,12 @@ def use_mpfit(model, params, quiet=False, gtol=1e-10, ftol=1e-10, xtol=1e-10):
for par in model_fit.perror:
message += '{0:^{width}.{prec}}'.format(par, width=12, prec=6)
logger.info(message)
result = []
for i in range(len(model.model_parname)):
result.append({model.model_parname[i]: {'value': model_fit.params[i], 'error': model_fit.perror[i],
'component': p0[i]['component'], 'model':model.model_modname[i],
'desc': p0[i]['desc']}})
return {'results': {model.model_parname[i]: {'value': model_fit.params[i], 'error': model_fit.perror[i]} for i in range(len(model.model_parname))},
return {'results': result,
'mpfit': {'chi2r': model_fit.fnorm/model_fit.dof, 'dof': model_fit.dof}}
import sys
import os
import logging
import pymultinest
import time
import matplotlib.pyplot as plt
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname('../Class/'), '..')))
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname('../Tools/'), '..')))
logger = logging.getLogger('__mocking__')
def use_multinest(model, params, quiet=True, pltstats=False, rank=0, path=None, whd=None, max_iter=0, n_live_points=50, sampling_efficiency=0.8, evidence_tolerance=0.5, n_iter_before_update=100, null_log_evidence=-1e90, max_modes=100, mode_tolerance=-1e60):
"""
Function which define the parameters's space, call PyMultiNest and perform the analysis of the results
:param Model2D model: model initialized
:param dict params: dictionary which contain all parameters with the limits and if are fixed or not
:param bool quiet: print or not verbose from the fit method
:param int max_iter: number of points calculated by MultiNest, set to 0 for unlimited
:param bool pltstats: create a pdf file with plots of probabilities of parameters
:param int rank: the rank of the thread (needed whe you use MPI4PY with more than 1 core)
:param str path: the relative of the directory where PyMultiNest can write files needed for the further analysis
:param string whd: suffix for filename if a high resolution map is used
"""