Commit 47df8d7c authored by bepinat's avatar bepinat

Update with minor corrections

parent 542d229f
import numpy as np
from astropy.io import fits
from scipy.interpolate import interp2d
import Tools.tools as tools
class Cube:
"""
Stocks a fits file and determines all parameters like size etc ...
:param string or ndarray filename: path+name of the file, can be a numpy array.
"""
def __init__(self, filename, mask=None):
self.data_rebin = None
if type(filename) is str:
self.header = fits.getheader(filename)
self.data = fits.getdata(filename)
self.length = self.header['NAXIS1']
self.height = self.header['NAXIS2']
self.sizez = self.header['NAXIS3']
self.size = np.array(np.shape(self.data))
self.len = self.length*self.height
self.oversample = 1
if mask is None:
self.mask = np.logical_not(np.isnan(self.data))
else:
self.mask = mask
self.nan_to_num()
# Add for create model without Image's class
if type(filename) is np.ndarray:
self.header = None
self.data = np.nan_to_num(filename)
self.size = np.array(np.shape(self.data))
self.height = self.size[2]
self.length = self.size[1]
self.sizez = self.size[0]
self.len = self.length*self.height
self.oversample = 1
if mask is None:
self.mask = np.logical_not(np.isnan(self.data))
else:
self.mask = mask
self.nan_to_num()
def nan_to_num(self):
"""
Converts 'nan' into 0
"""
self.data = np.nan_to_num(self.data)
def get_size(self):
return self.size
def get_lenght(self):
return self.length
def get_height(self):
return self.height
def set_oversamp(self, oversamp):
self.oversample = oversamp
def get_oversamp(self):
return self.oversample
......@@ -16,8 +16,8 @@ class Image:
if type(filename) is str:
self.header = fits.getheader(filename)
self.data = fits.getdata(filename)
self.height = self.header['NAXIS1']
self.length = self.header['NAXIS2']
self.length = self.header['NAXIS1']
self.height = self.header['NAXIS2']
self.size = np.array(np.shape(self.data))
self.len = self.length*self.height
self.oversample = 1
......
import math
import numpy as np
import Tools.tools as tools
from scipy import constants as ct
from Class.PSF import PSF
from Class.Images import Image
......@@ -73,15 +74,6 @@ class Model2D:
self.model_RCparams = None
def set_constraints(self, config):
"""
Sets the value of the model parameters
:param float xc: abscissa of the center in pixel
"""
return
def set_parameters(self, xc, yc, pa, inc, vs, *args):
"""
Sets the value of the model parameters
......@@ -228,6 +220,197 @@ class Model2D:
return np.sum(chi2)
#class Model3D: # XXX do the same thing: initiate, then produce velocity map, intensity/flux map and velocity dispersion map at high resolution and then produce a cube with the same sampling as data and adjusted with respect to a reference line and wavelengths in the cube. Then convolve with PSF and rebin. Everything needed is in the program with made with Debora
class Model3D: # XXX do the same thing: initiate, then produce velocity map, intensity/flux map and velocity dispersion map at high resolution and then produce a cube with the same sampling as data and adjusted with respect to a reference line and wavelengths in the cube. Then convolve with PSF and rebin. Everything needed is in the program with made with Debora
"""
Models in 3D of the datacube around a line of galaxies
Also compute the velocity field and the dispersion of a model using observed cube in input.
This model can be used with any rotational curve models (in 'velocity_model.py').
"""
def __init__(self, cube, vcube, flux, psf, vel_model, psfz=0, sig0=0, slope=0., header=None, z=0., lbda0=6562.78, cont=0.):
"""
:param Image cube: represents the cube to fit
:param Image vcube: the variance cube
:param Image flux: flux distribution at the same or in high resolution than the velocity
:param PSF psf: the class of the psf
:param func vel_model: velocity function used to create the model
:param float psfz: velocity dispersion corresponding to the spectral resolution
:param float sig0: velocity dispersion of the model
:param float slope: slope of the velocity dispersion
:param hdufits header header: header associated to the datacube
:param float z: systemic redshift of the source
:param float lbda0: restframe wavelength of the line used (could be a list of lines and then need to add line ratios, etc.)
:param float cont: continuum level (assumed constant for now, but could be polynomial)
"""
# Parameters which must be initialized
self.vel_model = vel_model.vel_model
self.model_sig0 = sig0
self.model_slope = slope
self.psfz = psfz
# XXX a reprendre en 3D
self.cube_mod = np.zeros(cube.shape)
#self.vel_map = np.zeros(cube.shape[1:])
self.vel_map_hd = np.zeros(np.array(flux.get_size()))
self.disp_map_hd = np.zeros(np.array(flux.get_size()))
#self.cube_hd = np.zeros((cube.shape[0], np.array(flux.get_size()))) # Check def works (dimension declaration)
#
self.cube = cube
self.vcube = vcube
self.header = header
self.flux = flux
self.psf = psf
self.oversamp = self.flux.get_oversamp()
self.indices = tools.lowres_indices_highres_frame(flux.get_size(), self.oversamp)
self.z = z
self.lbda0 = lbda0
self.lbda1 = lbda0 * (1 + z)
self.cont = cont # XXX devrait être une image en fait!
# We initiate the wavelength range of the datacube
self.lbda = (np.arange(header['NAXIS3']) - header['CRPIX3']) * header['CDELT3'] + header['CRVAL3'] # XXX Check units!
self.vind = (self.lbda - self.lbda1) / self.lbda1 * ct.c * 1e-3 # km/s ; XXX Ne fonctionne que s'il n'y a qu'une seule raie
# We initiate the geometrical model parameters
self.model_pa = None
self.model_inc = None
self.model_xc = None
self.model_yc = None
self.model_vs = None
# We initiate the associated coordinate maps in the galaxy frame
self.model_radius = None
self.model_theta = None
# We write an ordered list containing the names of the model parameters
self.model_parname = ['xc', 'yc', 'pa', 'inc', 'vs']
self.model_modname = ['geometrical', 'geometrical', 'geometrical', 'geometrical', 'geometrical']
self.model_compname = ['geometrical', 'geometrical', 'geometrical', 'geometrical', 'geometrical']
# We initiate a dictionary containing the names of rotation curve parameters. The number of arguments adjusts automatically
#ii = 0
for name in vel_model.model_parname:
self.model_parname.append(name)
for name in vel_model.model_modname:
self.model_modname.append(name)
for name in vel_model.model_compname:
self.model_compname.append(name)
#ii += 1
#print(self.model_parname)
#for name in self.vel_model.__code__.co_varnames[1:self.vel_model.__code__.co_argcount]:
#self.model_parname.append(name)
#self.model_RCparams_names = self.vel_model.__code__.co_varnames[1:self.vel_model.__code__.co_argcount] # can be useful to check the parameters are provided with the correct order
#self.model_RCparams_values = [None for par in self.model_RCparams_names]
#self.model_RCparams = dict((par, None) for par in self.vel_model.__code__.co_varnames[1:self.vel_model.__code__.co_argcount])
self.model_RCparams = None
def set_parameters(self, xc, yc, pa, inc, vs, *args):
"""
Sets the value of the model parameters
:param float xc: abscissa of the center in pixel
:param float yc: ordinate of the center in pixel
:param float pa: position angle of the major axis in degree
:param float inc: inclination of the disk in degree
:param float vs: systemic velocity in km/s
:param list of float args: list that contains other model arguments
"""
self.model_pa = pa
self.model_inc = inc
self.model_xc = xc
self.model_yc = yc
self.model_vs = vs
#self.model_radius, self.model_theta = tools.sky_coord_to_galactic(self.model_xc, self.model_yc, self.model_pa, self.model_inc, im_size=np.shape(self.vel_map_hd), oversamp=self.oversamp)
self.model_radius, self.model_theta = tools.sky_coord_to_galactic(self.indices[1], self.indices[0], self.model_xc, self.model_yc, self.model_pa, self.model_inc)
self.model_RCparams = args
def get_parameter(self):
"""
Gets the actual parameters of the model (in low resolution scale)
:return ndarray:
"""
return [self.model_xc, self.model_yc, self.model_pa, self.model_inc, self.model_vs, *self.model_RCparams]
def disk_velocity(self):
"""
Computes the velocity field
:return ndarray:
"""
vr = self.vel_model(self.model_radius, *self.model_RCparams)
# Calculation of the velocity field
v = vr * math.sin(math.radians(self.model_inc)) * self.model_theta + self.model_vs
return v
def linear_velocity_dispersion(self):
"""
Returns the velocity dispersion map needed for the fit process
:return ndarray: velocity dispersion map
"""
# Calculation of the velocity dispersion
sig = self.model_sig0 + self.model_slope * np.abs(self.model_radius)
sig[np.where(sig <= 0)] = 0
return sig
def cube_model(self):
"""
Computes a high resolution cube
: return cube:
"""
self.vel_map_hd = self.disk_velocity()
self.disp_map_hd = self.linear_velocity_dispersion() # besoin du centre du modèle, donc ici! En principe pour la mmodélisation du cube, il faudrait que sigma et slope soient des paramètres libre du modèle
# Il faudrait en principe donner la LSF en input (de la même manière que la PSF). Dans un premier temps, on peut faire l'hypothèse que la LSF est Gaussienne, c'est donc inclus dans sigma
self.cube_hd = self.flux * np.exp(-np.subtract(self.vind.reshape(len(vind), 1, 1), self.vel_map_hd.reshape(1, self.flux.shape[0], self.flux.shape[1])) ** 2 / (2 * self.disp_map_hd ** 2)) + self.cont
self.cube_mod = tools.rebin_data(self.psf.convolution(self.cube_hd), self.oversamp)
def least_square(self, p, fjac=None):
"""
Function minimized by mpfit.
Returns (data-model)^2/err^2 XXX Why no square???
:param ndarray p: array of parameters
:return ndarray:
"""
self.set_parameters(*p)
self.cube_model()
return [0, np.reshape((self.cube - self.cube_mod)/np.sqrt(self.vcube), -1)] # sqrt, to have uncertainty, i.e. stdev
def log_likelihood(self, cube, ndim, nparams):
"""
Log likelihood function which maximized by multinest
Returns -sum[(data-model)^2/(2*err^2)]
:param ndarray cube: data whith n_params dimension
:param int ndim: number of dimension if different of the number of paramerters
:param int nparams: number of parameters
:return float:
"""
#self.set_parameters(cube[0], cube[1], cube[2], cube[3], cube[4], cube[5], cube[6])
self.set_parameters(cube[0], cube[1], cube[2], cube[3], cube[4], *cube[5:ndim])
self.cube_model()
chi2 = -(self.cube_mod - self.cube)**2 / (2*self.vcube) # no **2 because we use variance
return np.sum(chi2)
#class Model1D:
This diff is collapsed.
......@@ -70,6 +70,7 @@ class PSF:
# accounting for Gaussian smoothing
self.psf_fft2 = self.psf_o_fft2 * self.psf_s_fft2
self.psf = fftshift(irfft2(self.psf_fft2))
self.psf_fft2_cube = rfft2(fftshift(self.psf).reshape(1, self.psf.shape[0], self.psf.shape[1]))
def convolution(self, data):
......@@ -84,6 +85,23 @@ class PSF:
data2[self.size[0]:data.shape[0] + self.size[0], self.size[1]:data.shape[1] + self.size[1]] = data
data_conv = irfft2(rfft2(data2) * self.psf_fft2)
data_conv = data_conv[self.size[0]:data.shape[0] + self.size[0], self.size[1]:data.shape[1] + self.size[1]].real
data_conv = data_conv[self.size[0]:data.shape[0] + self.size[0], self.size[1]:data.shape[1] + self.size[1]]
return data_conv
def convolution_cube(self, cube):
"""
Do the convolution product between the datacube and the PSF
:param ndarray cube: data cube
:return ndarray:
"""
cube2 = np.zeros((cube.shape[0], cube.shape[1] + 2 * self.size[0], cube.shape[2] + 2 * self.size[1]))
cube2[:,self.size[0]:cube.shape[1] + self.size[0], self.size[1]:cube.shape[2] + self.size[1]] = cube
cube_conv = irfft2(rfft2(cube2) * self.psf_fft2_cube)
cube_conv = cube_conv[:,self.size[0]:cube.shape[1] + self.size[0], self.size[1]:cube.shape[2] + self.size[1]]
return cube_conv
......@@ -156,6 +156,45 @@ def nfw_velocity(r, rt, vm):
return vr
def iso_reduced_velocity(r):
"""
Reduced rotation curve for a real isothermal profile
:param ndarray: 1D array which contains the reduced radius
"""
rho = np.ones(len(r))
dlnrho = np.zeros(len(r))
drho = np.zeros(len(r))
for i in np.arange(1,len(r)):
rho[i] = rho[i-1] + (r[i] - r[i-1]) * drho[i-1]
dlnrho[i] = 1 / r[i]**2 * (r[i-1]**2 * dlnrho[i-1] - 9 * rho[i] * r[i]**2 * (r[i] - r[i-1]))
drho[i] = rho[i-1] * dlnrho[i]
derivlogrho = np.zeros(len(r))
derivlogrho[2:] = (np.log(rho[2:]) - np.log(rho[1:-1])) / (np.log(r[2:]) - np.log(r[1:-1]))
vc = np.sqrt(-derivlogrho)
vc[1] = (vc[0] + vc[2])/2
return rho, vc
nel = 100000
r_red = np.arange(nel)/200
rho_red, vc_red = iso_reduced_velocity(r_red)
def iso_velocity(r, rt, vm):
"""
Rotation curve for a 'true' Isothermal sphere density profile (see Binney & Tremaine) with a core.
:param ndarray r: 1D or 2D array which contains the radius
:param float rt: scalelength
:param float vm: normalisation velocity of the model
"""
vc = np.interp(r/rt, r_red, vc_red) * vm
return vc
#def iso_velocity(r, rt, vm):
#"""
#Rotation curve for a Isothermal sphere density profile (see Binney & Tremaine)
......@@ -245,11 +284,7 @@ def zhao_velocity(r, rt, vt, a, b, g):
return vt * (r/rt)**g / (1 + (r/rt)**a)**exp
#def nfw_exponential_velocity(r, rt1, vm1, rt2, vm2):
#return np.sqrt(nfw_velocity(r, rt1, vm1) ** 2 + exponential_velocity(r, rt1, vm1) ** 2)
# Must be at the end of the file
list_model = {'expo': exponential_velocity, 'flat': flat_velocity, 'atan': arctan_velocity, 'hmod': hubblemodified_velocity, 'nfw': nfw_velocity, 'kent': kent_velocity,
#'iso': iso_velocity, 'hernquist': hernquist_velocity, 'jaffe': jaffe_velocity, 'einasto': einasto_velocity,
list_model = {'expo': exponential_velocity, 'flat': flat_velocity, 'atan': arctan_velocity, 'hmod': hubblemodified_velocity, 'nfw': nfw_velocity, 'kent': kent_velocity, 'iso': iso_velocity,
#'hernquist': hernquist_velocity, 'jaffe': jaffe_velocity, 'einasto': einasto_velocity,
'courteau': courteau_velocity, 'zhao': zhao_velocity}
......@@ -288,58 +288,6 @@ def set_params(objects):
return params
def set_params2(objects):
"""
Set parameters for a single object with possibly multiple components
:param dictionary objects: 'objects' entry of the configuration file
:return dictionary: for each object, contains the names, initial guesses, constraints of the parameters, corresponding model and component.
"""
params = {}
for objn in objects:
obj = objects[objn]
params[objn] = {}
params[objn]['geometrical'] = {}
for param in obj['params']:
params[objn]['geometrical'][param] = obj['params'][param].copy()
params[objn]['geometrical'][param]['component'] = 'geometrical'
for compn in obj['components']:
comp = obj['components'][compn]
params[objn][comp['model']] = {}
for param in comp['params']:
params[objn][comp['model']][param] = comp['params'][param].copy()
params[objn][comp['model']][param]['component'] = compn
return params
def set_params3(objects):
"""
Set parameters for a single object with possibly multiple components
:param dictionary objects: 'objects' entry of the configuration file
:return dictionary: for each object, contains the names, initial guesses, constraints of the parameters, corresponding model and component.
"""
params = {}
for objn in objects:
obj = objects[objn]
params[objn] = {}
for param in obj['params']:
params[objn][param] = obj['params'][param].copy()
#params[objn][param]['object'] = objn
params[objn][param]['model'] = 'geometrical'
params[objn][param]['component'] = 'geometrical'
for compn in obj['components']:
comp = obj['components'][compn]
for param in comp['params']:
params[objn][param] = comp['params'][param].copy()
#params[objn][param]['object'] = objn
params[objn][param]['model'] = comp['model']
params[objn][param]['component'] = compn
return params
def set_files(filesg, objects):
"""
Set files list from global file entry and objects entry
......@@ -380,25 +328,6 @@ def set_models(objects):
return models
def set_models2(objects):
"""
Set files list from global file entry and objects entry
:param dictionary objects: 'objects' entry of the configuration file
:return dictionary: for each object, a list of the names of the model corresponding to each component
"""
models = {}
for objn in objects:
obj = objects[objn]
models[objn] = []
for compn in obj['components']:
print(compn)
models[objn].append(obj['components'][compn]['model'])
print(models)
return models
def lowres_indices_highres_frame(im_shape, oversamp=1):
"""
Compute low resolution indices in the high resolution frame
......@@ -500,60 +429,6 @@ def write_fits(data, filename, config, model, results, mask=None):
if mask is not None:
data[np.logical_not(mask)] = float('nan')
hdu = fits.PrimaryHDU(data=data)
#for key in config['init fit']['parname']:
for key in model.model_parname:
try:
hdu.header.append((key, results['results'][key]['value'], config['init fit'][key]['desc']))
except KeyError as k:
hdu.header.append((key, results['results'][key]['value']))
logger.exception("key 'desc' not found, parameter '{}' written without description".format(key), exc_info=k)
hdulist = fits.HDUList(hdu)
hdulist.writeto(filename + '.fits', checksum=True, overwrite=True)
def write_fits2(data, filename, config, model, results, mask=None):
"""
write data in fits file with model's parameters
:param ndarray data: data to write in fits file
:param str filename: name of the fits file (with path)
:param dict config: config file
:param dict results: dictionary of the results
:param ndarray[bool] mask: boolean mask
:return:
"""
if mask is not None:
data[np.logical_not(mask)] = float('nan')
hdu = fits.PrimaryHDU(data=data)
for key in results['results']:
try:
hdu.header.append((key, results['results'][key]['value'], results['results'][key]['desc']))
except KeyError as k:
hdu.header.append((key, results['results'][key]['value']))
logger.exception("key 'desc' not found, parameter '{}' written without description".format(key), exc_info=k)
hdulist = fits.HDUList(hdu)
hdulist.writeto(filename + '.fits', checksum=True, overwrite=True)
def write_fits3(data, filename, config, model, results, mask=None):
"""
write data in fits file with model's parameters
:param ndarray data: data to write in fits file
:param str filename: name of the fits file (with path)
:param dict config: config file
:param dict results: dictionary of the results
:param ndarray[bool] mask: boolean mask
:return:
"""
if mask is not None:
data[np.logical_not(mask)] = float('nan')
hdu = fits.PrimaryHDU(data=data)
for el in results['results']:
......@@ -597,7 +472,7 @@ def search_file(path, filename):
sys.exit()
def make_dir2(path, config):
def make_dir(path, config):
"""
Creates the directory where results will be written
The name of the directory depends on fixed parameters
......@@ -640,121 +515,7 @@ def make_dir2(path, config):
return toreturn
def make_dir(path, config, model):
"""
Creates the directory where results will be written
The name of the directory depends on fixed parameters
:param path: path where fits files are
:param dict config: YAML config dictionary
:param model Model2D: model
:return str: the path
"""
if path == '.':
path = './'
dirname = config['config fit']['method'] + '_' + config['config fit']['model']
suffix = ''
#for key in config['init fit']['parname']:
for key in model.model_parname:
if config['init fit'][key]['fixed'] == 1:
suffix += key[0]
if suffix != '':
dirname += '_'+suffix
if os.path.isdir(path+dirname) is False:
logger.info("\ncreate directory {}".format(dirname))
os.makedirs(path+dirname)
toreturn = path+dirname
logger.debug('makedir: {}'.format(toreturn))
return toreturn
def write_yaml(path, params, galname, whd):
"""
Sets up the stream and writes the YAML file which contains the results
:param str path: path where to write the YAML file
:param dict params: dictionary of the bestfit parameters
:param str galname: the name of the galaxy
:param str whd: suffix when a high resolution map is used
:return:
"""
outstream = open(path+'/results'+whd+'.yaml', 'w')
dictowrite = {'name': galname, 'results': ''}
sub_dict = {}
for key in params['results']:
sub_dict.update({key: {'value': float(params['results'][key]['value']), 'error': float(params['results'][key]['error'])}})
dictowrite['results'] = sub_dict
try:
dictowrite.update({'mpfit stats': {'chi2r': float(params['mpfit']['chi2r']), 'dof': float(params['mpfit']['dof'])}})
except KeyError:
logger.debug("keyError: Key 'mpfit' not found in the results' dictionary")
pass
try:
dictowrite.update({'multinest': {'log likelihood': params['multinest']['log likelihood']}})
except KeyError:
logger.debug("keyError: Key 'multinest' not found in the results' dictionary")
pass
yaml.dump(dictowrite, outstream, default_flow_style=False)
outstream.close()
def write_yaml2(path, params, galname, method, whd):
"""
Sets up the stream and writes the YAML file which contains the results
:param str path: path where to write the YAML file
:param dict params: dictionary of the bestfit parameters
:param str galname: the name of the galaxy
:param str whd: suffix when a high resolution map is used
:return:
"""
outstream = open(path+'/results'+whd+'.yaml', 'w')
dictowrite = {'name': galname, 'results': ''}
sub_dict = {}
for key in params['results']:
#sub_dict.update({key: {'value': float(params['results'][key]['value']), 'error': float(params['results'][key]['error'])}})
sub_dict.update({key: {'value': float(params['results'][key]['value']), 'error': float(params['results'][key]['error']), 'component': params['results'][key]['component'], 'model': params['results'][key]['model'], 'desc': params['results'][key]['desc']}})
dictowrite['results'] = sub_dict
if method == 'mpfit':
try:
dictowrite.update({'mpfit stats': {'chi2r': float(params[method]['chi2r']), 'dof': float(params[method]['dof'])}})
except KeyError:
logger.debug("keyError: Key 'mpfit' not found in the results' dictionary")
pass
elif method == 'multinest':
try:
dictowrite.update({'multinest': {'log likelihood': params[method]['log likelihood']}})
except KeyError:
logger.debug("keyError: Key 'multinest' not found in the results' dictionary")
pass
#elif method == 'emcee':
#try:
#dictowrite.update({'emcee': {'xxx': params[method]['log likelihood']}})
#except KeyError:
#logger.debug("keyError: Key 'emcee' not found in the results' dictionary")
#pass
yaml.dump(dictowrite, outstream, default_flow_style=False)
outstream.close()
def write_yaml3(path, params, galname, method, whd):
def write_yaml(path, params, galname, method, whd):
"""
Sets up the stream and writes the YAML file which contains the results
......
......@@ -10,7 +10,8 @@ from astropy.io import fits
from Models.velocity_model import combined_velocity_model
import Tools.tools as tools
from Class.Images import Image
from Class.Model2D import Model2D
from Class.Models import Model2D
#from Class.Model2D import Model3D
from Class.PSF import PSF
from SubProcess.use_mpfit import use_mpfit
from SubProcess.use_multinest import use_multinest
......@@ -78,11 +79,8 @@ def mocking(path=None, filename=None, rank=0):
conffit = config['config fit']
# Reading and organising object entries from the config file
#modparamsobj = tools.set_params2(objects) # YYY Intermediate version: the name of component, model and objects are specified in this dictionary
modparamsobj = tools.set_params(objects) # YYY the name of component, model and objects are specified in this dictionary
#modparamsobj = tools.set_params3(objects) # Older version the name of component, model and objects are specified in this dictionary
modparamsobj = tools.set_params(objects) # the name of component, model and objects are specified in this dictionary
files = tools.set_files(filesg, objects) # for the 'flux' entry, one dictionary per object
#modelsobj = tools.set_models2(objects) # older version: the name of component, model and objects are specified in this dictionary
modelsobj = tools.set_models(objects) # the name of component, model and objects are specified in this dictionary
# Search and open images file
......@@ -162,7 +160,7 @@ def mocking(path=None, filename=None, rank=0):
return
# Create output path
out_path = tools.make_dir2(path, config)
out_path = tools.make_dir(path, config)
# Choose the method from the config file
if conffit['method'] == 'mpfit' and rank == 0:
......@@ -215,36 +213,23 @@ def mocking(path=None, filename=None, rank=0):
#print(par, el[par])
#print(best_param)
#best_param = [results['results'][key]['value'] for key in model.model_parname]
#best_param = [results['results'][key]['value'] for key in modparams['parname']]
model.set_parameters(*best_param)
model.velocity_map()
model.vel_disp_map()
#tools.write_fits(model.vel_map, out_path+'/modv'+whd, config, results, mask=vel.mask)