Commit ad575320 authored by bepinat's avatar bepinat

First commit from Jeremy work

parents
import numpy as np
from astropy.io import fits
from scipy.interpolate import interp2d
import Tools.tools as tools
class Image:
"""
Stock fits file and determine all parameters like size etc ...
:param string or ndarray filename: path+name of the file, can be a numpy array.
"""
def __init__(self, filename, mask=None):
self.data_rebin = None
if type(filename) is str:
self.header = fits.getheader(filename)
self.data = fits.getdata(filename)
self.high = self.header['NAXIS1']
self.length = self.header['NAXIS2']
self.size = np.array(np.shape(self.data))
self.len = self.length*self.high
self.oversample = 1
if mask is None:
self.mask = np.logical_not(np.isnan(self.data))
else:
self.mask = mask
self.nan_to_num()
# Add for create model without Image's class
if type(filename) is np.ndarray:
self.header = None
self.data = np.nan_to_num(filename)
self.size = np.array(np.shape(self.data))
self.high = self.size[0]
self.length = self.size[1]
self.len = self.length*self.high
self.oversample = 1
if mask is None:
self.mask = np.logical_not(np.isnan(self.data))
else:
self.mask = mask
self.nan_to_num()
def nan_to_num(self):
"""
convert 'nan' into 0
"""
self.data = np.nan_to_num(self.data)
def get_size(self):
return self.size
def get_lenght(self):
return self.length
def get_high(self):
return self.high
def set_oversamp(self, oversamp):
self.oversample = oversamp
def get_oversamp(self):
return self.oversample
def interpolation(self):
"""
Perform an interpolation of the image at higher resolution and stock the new image
"""
x = np.linspace(0, self.length, self.length) # + 0.5*self.oversample
y = np.linspace(0, self.high, self.high) # + 0.5*self.oversample
new_x = np.linspace(0, self.length, self.length * int(self.oversample))
new_y = np.linspace(0, self.high, self.high * int(self.oversample))
data = np.reshape(self.data, -1)
func = interp2d(x, y, data, kind='linear', fill_value=0)
self.data = np.array(func(new_x, new_y)).transpose()
self.high *= self.oversample
self.length *= self.oversample
self.size *= self.oversample
def conv_inter_flux(self, psf):
"""
Do a convolution of the interpolated image with a psf and stock the result
:param PSF psf: psf object
"""
self.data_rebin = tools.rebin_data(psf.convolution(self.data), self.oversample)
import math
import numpy as np
import Tools.tools as tools
from Class.PSF import PSF
from Class.Images import Image
class Model2D:
"""
Model in 2D of the velocity field of a galaxy
Compute the velocity field and the dispersion of a model using observed data in input.
This model can be used only with rotational curves with 3 parameters like which in 'velocity_model.py'
"""
def __init__(self, vel, errvel, flux, psf, vel_model, sig0, slope=0.):
"""
:param Image vel: represent the velocity field to fit
:param Image errvel: the error map of the velocity (vel)
:param Image flux: flux distribution at the same or in high resolution than the velocity
:param PSF psf: the class of the psf
:param func vel_model: velocity function used to create the model
:param float sig0: velocity dispersion of the model
:param float slope: slope of the velocity dispersion
"""
# None parameters
self.model_pa = None
self.model_incl = None
self.model_xc = None
self.model_yc = None
self.model_vm = None
self.model_vs = None
self.model_rt = None
self.model_radius = None
self.model_theta = None
# Parameters which must must be initialized
self.vel_model = vel_model
self.model_sig0 = sig0
self.model_slope = slope
self.vel_map = np.zeros(vel.get_size())
self.vel_disp = np.zeros(vel.get_size())
self.vel_map_hd = np.zeros(np.array(flux.get_size()))
self.vel = vel
self.errvel = errvel
self.flux = flux
self.psf = psf
def set_parameters(self, xc, yc, pa, incl, vs, vm, rt):
"""
set the value of the model parameters
:param float xc: abscissa of the center in pixel
:param float yc: ordinate of the center in pixel
:param float pa: position angle of the major axis in degree
:param float incl: inclination of the disk in degree
:param float vs: systemic velocity in km/s
:param float vm: maximum rotation velocity in km/s
:param float rt: characteristic radius, where the maximum velocity is reached
"""
self.model_pa = pa
self.model_incl = incl
self.model_xc = (xc + 0.5) * self.flux.get_oversamp() - 0.5
self.model_yc = (yc + 0.5) * self.flux.get_oversamp() - 0.5
self.model_vm = vm
self.model_vs = vs
self.model_rt = rt*self.flux.get_oversamp()
self.model_radius, self.model_theta = tools.sky_coord_to_galactic(self.model_xc, self.model_yc, self.model_pa, self.model_incl,
im_size=np.shape(self.vel_map_hd))
def get_parameter(self):
"""
Get the actual parameters of the model (in low resolution scale)
:return ndarray:
"""
return [(self.model_xc + 0.5) * self.flux.get_oversamp() - 0.5, (self.model_yc + 0.5) * self.flux.get_oversamp() - 0.5,
self.model_pa, self.model_incl, self.model_vs, self.model_vm, self.model_rt/self.flux.get_oversamp()]
def disk_velocity(self):
"""
Compute the velocity field
:return ndarray:
"""
vr = self.vel_model(self.model_radius, self.model_rt, self.model_vm)
# Calculation of the velocity field
v = vr * math.sin(math.radians(self.model_incl)) * self.model_theta + self.model_vs
return v
def velocity_map(self):
"""
calculate the velocity map of the model
"""
self.vel_map_hd = self.disk_velocity()
vel_times_flux = tools.rebin_data(self.psf.convolution(self.flux.data * self.vel_map_hd), self.flux.get_oversamp())
self.vel_map[self.vel.mask] = vel_times_flux[self.vel.mask] / self.flux.data_rebin[self.vel.mask]
def linear_velocity_dispersion(self):
"""
return the velocity dispersion map needed for the fit process
:return ndarray: velocity dispersion map
"""
# Calculation of the velocity dispersion
sig = self.model_sig0 + self.model_slope * np.abs(self.model_radius)
sig[np.where(sig <= 0)] = 0
return sig
def vel_disp_map(self):
"""
calculate the velocity dispersion map from the velocity field (with bestfit parameters) and the flux distribution
:return ndarray: velocity dispersion map
"""
term1 = np.zeros(self.vel.size)
term2 = np.zeros(self.vel.size)
term3 = np.zeros(self.vel.size)
sig = self.linear_velocity_dispersion()
term1[self.vel.mask] = tools.rebin_data(self.psf.convolution(sig ** 2 * self.flux.data), self.flux.oversample)[self.vel.mask] \
/ self.flux.data_rebin[self.vel.mask]
term2[self.vel.mask] = tools.rebin_data(self.psf.convolution(self.vel_map_hd ** 2 * self.flux.data), self.flux.oversample)[self.vel.mask] \
/ self.flux.data_rebin[self.vel.mask]
term3[self.vel.mask] = (tools.rebin_data(self.psf.convolution(self.vel_map_hd * self.flux.data), self.flux.oversample)[self.vel.mask] /
self.flux.data_rebin[self.vel.mask]) ** 2
self.vel_disp = np.sqrt(term1 + term2 - term3)
def least_square(self, p, fjac=None):
"""
Function minimized by mpfit.
Return (data-model)^2/err^2
:param ndarray p: array of parameters
:return ndarray:
"""
self.set_parameters(*p)
self.velocity_map()
return [0, np.reshape((self.vel.data[self.vel.mask]-self.vel_map[self.vel.mask])/self.errvel.data[self.vel.mask], -1)]
def log_likelihood(self, cube, ndim, nparams):
"""
log likelihood function which maximized by multinest
Return sum[(data-model)^2/(2*err^2)]
:param ndarray cube: data whith n_params dimension
:param int ndim: number of dimension if different of the number of paramerters
:param int nparams: number of parameters
:return float:
"""
self.set_parameters(cube[0], cube[1], cube[2], cube[3], cube[4], cube[5], cube[6])
self.velocity_map()
chi2 = -(self.vel_map[self.vel.mask] - self.vel.data[self.vel.mask])**2/(2*self.errvel.data[self.vel.mask]**2)
return np.sum(chi2)
import math
import numpy as np
from numpy.fft import fftshift, fft2, ifft2
from Class.Images import Image
class PSF:
"""
Create a 2D gaussian as PSF, store all the information about the psf.
Perform the convolution af an image with the psf stored by the method 'convolution'.
You can initiate this class with a fits file of a PSF, in this case no gaussian will be created.
To avoid (at least minimize) any border effect, class PSF need information
about the size of the highest resolution image passed by a Image's class.
"""
def __init__(self, flux, img_psf=None, fwhm_lr=3.5, smooth=0):
"""
constructor of the PSF class
:param Image flux:
:param ndarray img_psf: image of the PSF
:param float fwhm_lr: fwhm of a gaussian, default is 3.5 pixels
"""
self.psf = img_psf
if self.psf is None:
self.im_size = flux.size
self.fwhm = fwhm_lr*flux.oversample
self.smooth = smooth*flux.oversample
self.fwhm_f = np.sqrt(self.fwhm**2 + self.smooth**2)
# (2*fwhm) is considered to avoid the boundary effect after the convolution
self.size = np.array([2 * self.fwhm_f, 2 * self.fwhm_f])
ideal_size = 2 ** np.ceil(np.log(self.im_size + 2 * self.size) / math.log(2))
# to prevent any problems in the future, self.size has been forced to be an array of int
self.size = np.array((ideal_size - self.im_size) / 2, dtype=int)
y, x = np.indices((self.im_size[0] + 2 * self.size[0], self.im_size[1] + 2 * self.size[1]))
sigma = self.fwhm_f / (2 * math.sqrt(2 * math.log(2)))
self.psf = (1. / (2 * math.pi * sigma ** 2)) * np.exp(-((x - self.im_size[1] / 2 - self.size[1]) ** 2 + (y - self.im_size[0] / 2 - self.size[
0]) ** 2) / (2.0 * sigma ** 2))
# normalization in order to ensure flux conservation
self.psf /= self.psf.sum()
else:
self.size = np.array(img_psf.shape)
self.psf = np.zeros((self.im_size[0] + 2 * self.size[0], self.im_size[1] + 2 * self.size[1]))
self.psf[self.im_size[0]/2+self.size[0]/2:self.im_size[0]/2+3*self.size[0]/2, self.im_size[1]/2+self.size[1]/2:self.im_size[1]/2+3*self.size[1]/2]\
= img_psf
self.psf /= self.psf.sum()
self.psf_fft2 = fft2(fftshift(self.psf))
def convolution(self, data):
"""
Do the convolution product between the data and the PSF
:param ndarray data:
:return ndarray:
"""
data2 = np.zeros((data.shape[0] + 2 * self.size[0], data.shape[1] + 2 * self.size[1]))
data2[self.size[0]:data.shape[0] + self.size[0], self.size[1]:data.shape[1] + self.size[1]] = data
data_conv = ifft2(fft2(data2) * self.psf_fft2)
data_conv = data_conv[self.size[0]:data.shape[0] + self.size[0], self.size[1]:data.shape[1] + self.size[1]].real
return data_conv
"""
Contain all classes used in the program and sub process
"""
\ No newline at end of file
"""
This module contain the models of velocity and flux distribution
If you want add more model edit or create new python file in this module
Actually this module contain:
- flux_model.py
- velocity_model.py
"""
\ No newline at end of file
import numpy as np
import Tools.tools as tools
def flat_disk_intensity(xc, yc, pa, incl, rd, center_bright, rtrunc, im_size):
"""
:param float xc: position of the center in abscissa
:param float yc: position of the center in ordinate
:param float pa:
:param float incl:
:param float rd:
:param float center_bright:
:param float rtrunc:
:param ndarray im_size:
"""
r, theta = tools.sky_coord_to_galactic(xc, yc, pa, incl, im_size=im_size)
flux = np.zeros(np.shape(r))
flux[np.where(r <= rtrunc)] = center_bright
flux[np.where(r > rtrunc)] = 0.
return flux
def exponential_disk_intensity(xc, yc, pa, incl, rd, center_bright, rtrunc, im_size):
"""
:param float xc:
:param float yc:
:param float pa:
:param float incl:
:param float rd:
:param float center_bright:
:param float rtrunc:
:param ndarray im_size:
"""
r, theta = tools.sky_coord_to_galactic(xc, yc, pa, incl, im_size=im_size)
if rd != 0:
flux = center_bright * np.exp(- np.abs(r) / rd)
else:
flux = center_bright * np.exp(0 * r)
flux[np.where(r > rtrunc)] = 0.
return flux
import numpy as np
from scipy.special import i0, i1, k0, k1
def exponential_velocity(r, rt, vm):
"""
Velocity function for an exponential disk
:param ndarray r: 2D array which contain the radius
:param int rt: radius at which the maximum velocity is reached
:param float vm: Maximum velocity of the model
"""
rd = rt / 2.15 # disk scale length
vr = np.zeros(np.shape(r))
q = np.where(r != 0) # To prevent any problem in the center
vr[q] = r[q] / rd * vm / 0.88 * np.sqrt(i0(0.5 * r[q] / rd) * k0(0.5 * r[q] / rd) - i1(0.5 * r[q] / rd) * k1(0.5 * r[q] / rd))
return vr
def flat_velocity(r, rt, vm):
"""
Velocity function for flat disk
:param ndarray r: 2D array which contain the radius
:param int rt: radius at which the maximum velocity is reached
:param float vm: maximum velocity of the model
"""
vr = np.zeros(np.shape(r))
vr[np.where(r <= rt)] = vm*r[np.where(r <= rt)]/rt
vr[np.where(r > rt)] = vm
return vr
def arctan_velocity(r, rt, vm):
"""
:param ndarray r:
:param int rt:
:param float vm:
:return:
"""
return 2*vm/np.pi*np.arctan(2*r/rt)
# Must be at th end of the file
list_model = {'exp': exponential_velocity, 'flat': flat_velocity, 'arctan': arctan_velocity}
# name
### What is it?
**GalKin: Galactic Kinematic**, fit a velocity model from observed velocity field of galaxy.
### Model
For compute the model, we use method of moments from :
<a href="http://adsabs.harvard.edu/abs/2010MNRAS.401.2113E">
Epinat, B., Amram, P., Balkowski, C., & Marcelin, M. 2010, MNRAS, 401, 2113</a>
But you can add more rotational curves in "velocity_model.py" or create another model like cube etc.
### There is two methods implemented
- MPFIT: Which uses the Levenberg-Marquardt technique to solve the
least-squares problem.
- PyMultiNest : use a Nested Sampling Monte Carlo library and bayesian statistics.
## Installation
The program need PyMultiNest installed, please refer<a href="https://johannesbuchner.github.io/PyMultiNest/"> here</a>for
its installation guide.
The program need also **astropy** and **yaml** libraries.
## How to lunch?
(name) can be lunch from prompt with
```
galkin.py path filename
```
or in an python script/console by importing first and call the main function
```
import galkin.py
galkin.galkin(path, filename, rank=0)
```
where path can be absolute or relative, filename is the name of the **YAML** configuration file. The parameter rank is the id of the thread.
Example if you run the program with mpi and 4 core, rank may be equal from 0 to 3.
### Compatible with MPI4PY
For more information and how to install it, follow<a href="http://pythonhosted.org/mpi4py/"> this link<a/>.
To execute the program with **mpi4py**:
```
mpiexec -n (nbcore) galkin.py path filename
```
## Output
Outputs are written in a directory where the **YAML** config file is. The name of the directory depend of the method, the model and fixed paramters like:
method_model_fixedparams. A recapitulation of parameters of the model is written in the header of fits file and in a **YAML** file.
### Example
There is a Example directory in which you have fits files and a **YAML** file as example and try the program.
### Create model
The subprocess for create map was used to validate the program (cross test) with existing programs.
This subprocess need to be updated from the last changes of the architecture and the addition of the data language **YAML**.
#!/usr/bin/env python
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname('../Models/'), '..')))
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname('../Class/'), '..')))
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname('../Tools/'), '..')))
import numpy as np
from numpy.fft import fftshift, fft2, ifft2
import math
import Models.velocity_model as vm
import Models.flux_model as fm
import Tools.tools as tools
from Class.Images import Image
from Class.PSF import PSF
from Class.Model2D import Model2D
import argparse
from astropy.io import ascii
def main(parser):
"""
This section is not update from last changes of other modules, they must be pass parameters from a YAML config file
:param parser:
:return:
"""
parser.add_argument('path', help='directory where will create the map', type=str)
parser.add_argument('-fm', default='flat', help='flux model', type=str)
parser.add_argument('-vm', default='flat', help='velocity model', type=str)
parser.add_argument('-pa', default=0, type=float, help='position angel from the north to the left of the galaxy')
parser.add_argument('-incl', default=45, type=float, help='inclination of th galactic plane')
parser.add_argument('-vs', default=0, type=float, help='systemic velocity')
parser.add_argument('-vmax', default=200, type=float, help='characteristic velocity of the model')
parser.add_argument('-rdv', default=4, type=float, help='characteristic radius of the velocity model')
parser.add_argument('-rdf', default=6, type=float, help='characteristic radius of the flux model')
parser.add_argument('-sig0', default=40, type=float, help='velocity dispersion')
parser.add_argument('-fwhm', default=3.5, type=float, help='fwhm of a gaussian psf')
parser.add_argument('-smooth', default=0, type=float, help='fwhm for smooth images')
parser.add_argument('-rt', default=8, type=float, help='truncated radius after which the flux is set to 0')
parser.add_argument('-osamp', default=5, type=int, help='oversampling to compute high definition')
parser.add_argument('-center', nargs='+', default=(15, 15), type=float, help='center of the galaxy')
parser.add_argument('-size', nargs='+', default=(30, 30), type=int, help='size of images in low resolution')
parser.add_argument('-prefix', type=str, help='prefix add to filename')
parser.add_argument('-suffix', type=str, help='suffix add to filename')
parser.add_argument('-slope', type=float, help="")
args = parser.parse_args()
fm_list = {'exp': fm.exponential_disk_intensity, 'flat': fm.flat_disk_intensity}
vm_list = {'exp': vm.exponential_velocity, 'flat': vm.flat_velocity, 'arctan': vm.arctan_velocity}
center_bright = 2000
# oversampling paramters#
rdv = args.rdv * args.osamp
rdf = args.rdf * args.osamp
fwhm = args.fwhm * args.osamp
smooth = args.smooth * args.osamp
new_xcen = (args.center[0]+0.5)*args.osamp-0.5
new_ycen = (args.center[1]+0.5)*args.osamp-0.5
im_size = np.array(args.size) * args.osamp
rtrunc = args.rt * args.osamp
print('\n create array {}x{}'.format(*im_size))
def conv_psf(data, fwhm):
# (2*fwhm) is considered to avoid the boundary effect after the convolution
size = np.array([2 * fwhm, 2 * fwhm])
ideal_size = 2 ** np.ceil(np.log(im_size + 2 * size) / math.log(2))
# to prevent any problems in the future, size has been forced to be an array of int
size = np.array((ideal_size - im_size) / 2, dtype=int)
y, x = np.indices((im_size[0] + 2 * size[0], im_size[1] + 2 * size[1]))
sigma = fwhm / (2 * math.sqrt(2 * math.log(2)))
psf = (1. / (2 * math.pi * sigma ** 2)) * np.exp(-((x - im_size[1] / 2 - size[1]) ** 2 + (y - im_size[0] / 2 - size[0]) ** 2) / (2.0 * sigma ** 2))
# normalization in order to ensure flux conservation
psf /= psf.sum()
data2 = np.zeros((data.shape[0] + 2 * size[0], data.shape[1] + 2 * size[1]))
data2[size[0]:data.shape[0] + size[0], size[1]:data.shape[1] + size[1]] = data
psf_fft2 = fft2(fftshift(psf))
data_conv = ifft2(fft2(data2) * psf_fft2)
data_conv = data_conv[size[0]:data.shape[0] + size[0], size[1]:data.shape[1] + size[1]].real
return data_conv
# create flux maps
flux = fm_list[args.fm](new_xcen, new_ycen, args.pa, args.incl, rdf, center_bright, rtrunc, im_size)
# fluxw = np.copy(flux)
flux_rebin = tools.rebin_data(conv_psf(flux, np.sqrt(fwhm**2+args.smooth**2)), args.osamp)
# create velocity maps
flux_ld = Image(flux_rebin, mask=flux_rebin>0.1)
flux_hd = ImageOverSamp(flux_rebin, rdv, oversamp=args.osamp)
psf = PSF(flux_hd, fwhm_ld=args.fwhm, smooth=args.smooth)
flux_hd.conv_inter_flux(psf)
model = Model2D(flux_ld, flux_hd, args.sig0, slope=args.slope)
model.set_parameters(args.center[0], args.center[1], args.pa, args.incl, args.vs, args.vmax, args.rdv, flux_hd)
model.velocity_map(psf, flux_ld, flux_hd, vm_list[args.vm])
# flux[np.where(flux < 0.1)] = 0
print(' write flux hd')
tools.write_fits(new_xcen, new_ycen, args.pa, args.incl, args.vs, args.vmax, rdv, 0, psf.convolution(flux), args.path +'flux_HD')
# flux_rebin[np.where(flux_rebin < 0.1)] = 0
print(' write flux ld')