Commit 05886a0a authored by LUSTIG Peter's avatar LUSTIG Peter

deleted old files

parent 02bb0e88
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import dill as pickle
import matplotlib.pyplot as plt
import numpy as np
import astropy.units as u
from nikamap import NikaMap
#from completness import *
import myfunctions as mfct
from time import clock
from multiprocess import Pool
from functools import partial
import sys
from astropy.table import vstack
#import sys
import completness as cp
import os
import matplotlib.pyplot as plt
plt.close('all')
flux_bins = 2
flux_range = [0.1, 10]
threshold_bins = 5
threshold_range = [2, 10]
#threshold_range = [3, 5]
nsim = 10
flux = 10 * u.mJy
noisefile = "../../N2CLS/ForMarseille/HLS091828_common_mode_kids_out/map_JK.fits"
method="unknown"
outfile = None
outdir = "results/"
ncore = 4
'''
import argparse
desc = "Place false sources with a defined flux on Jackknife maps, detect sources and calculate the purity and completness of the detection."
parser = argparse.ArgumentParser(description=desc)
#parser.add_argument("-n", "--noisefile", action="store",
# dest="noisefile",
# help="Name of the Noise Containing Input File")
parser.add_argument(action="store",
dest="noisefile",
help="Name of the Noise Containing Input File")
parser.add_argument("-s", "--simulations", action="store",
dest="nsim", type=int, default=10,
help="Number of Simulations")
parser.add_argument("-f", "--flux", action="store",
dest="flux", type=float, default=10.,
help="Flux of the Created Sources in mJy")
parser.add_argument("-m", "--method", action="store",
dest="method", default="unknown",
help="Name of the Reduction Method for Outfile Header")
parser.add_argument("-o", "--outfile", action="store",
dest="outfile", default=None,
help="Output File Name. If not specified: completness-method-flux_[flux]-nsim_[nsim].fits")
parser.add_argument("-d", "--outdir", action="store",
dest="outdir", default="",
help="Output directory. Default: This folder.")
args=parser.parse_args()
nsim = args.nsim
flux = args.flux * u.mJy
noisefile = args.noisefile
method=args.method
outfile = args.outfile
outdir = args.outdir
#flux bins
#flux range
#threshold bins
#threshold range
'''
############### Begin Calculation #############
sim_per_core = mfct.Distribute_Njobs(ncore, nsim)
data = NikaMap.read(noisefile)
fluxes = np.logspace(np.log10(flux_range[0]), np.log10(flux_range[1]), flux_bins)*u.mJy
fluxes_edges = np.logspace(np.log10(flux_range[0]), np.log10(flux_range[1]), flux_bins + 1)*u.mJy
threshold = np.linspace(threshold_range[0], threshold_range[1], threshold_bins)
threshold_edges = np.linspace(threshold_range[0], threshold_range[1], threshold_bins+1)
# Create 4D wcs
shape_4D, wcs_4D = cp.completness_purity_wcs(data.shape, data.wcs, bins=20,
flux_range=flux_range, flux_bins=flux_bins, flux_log=True,
threshold_range=threshold_range, threshold_bins=threshold_bins)
# Testing the lower edges
wcs_threshold = wcs_4D.sub([3])
assert np.all(np.abs(wcs_threshold.all_pix2world(np.arange(threshold_bins+1)-0.5, 0) - threshold_edges) < 1e-15)
wcs_flux = wcs_4D.sub([4])
assert np.all(np.abs(wcs_flux.all_pix2world(np.arange(flux_bins+1)-0.5, 0) - fluxes_edges.value) < 1e-13)
T0 = clock()
print('------------')
#data.plot()
#plt.show(block=True)
helpfunc = partial(cp.completness_purity_2, flux, **{"nsources":8**2, "within":(0, 1),
"shape":shape_4D[0:3],
"wcs":wcs_4D.sub([1, 2, 3]), "jk_map":data})
pool = Pool(ncore)
print(sim_per_core)
tmpres = pool.map(helpfunc, sim_per_core)
tmpres = list(zip(*tmpres))
all_comp = np.sum(tmpres[0], axis=0)
all_comp_n = np.sum(tmpres[1], axis=0)
all_pur = np.sum(tmpres[2], axis=0)
all_pur_n = np.sum(tmpres[3], axis=0)
sourceresult = vstack(tmpres[4])
print(all_comp.shape)
print(all_comp_n.shape)
with np.errstate(divide='ignore', invalid='ignore'):
completness = all_comp / all_comp_n[..., np.newaxis]
purity = all_pur / all_pur_n
if (method == "unknown" and "/" in noisefile):
method = noisefile.split("/")[-2]
if outfile is None:
outfile = "completness-" + method + "-flux_{:04.0f}-nsim_{:03}.fits".format(flux.value, nsim)
if outdir != "":
if outdir[-1] != "/":
outdir = outdir + "/"
if not os.path.exists(outdir):
os.makedirs(outdir)
outfile = outdir + outfile
mfct.WriteResult(outfile, threshold, completness, purity, nsim, wcs_4D, method, flux, sourceresult)
cp.Plot_CompPur(completness, purity, threshold, nsim=nsim, savename=outfile[0:-5]+"-plot.png")
T = clock()-T0
print('Total calculation time: {:.0f}s'.format(T))
from __future__ import absolute_import, division, print_function
from pathlib import Path
import os
import numpy as np
import matplotlib.pyplot as plt
#from multiprocess import Pool, cpu_count
from functools import partial
from astropy import units as u
from astropy.io import ascii
from astropy.wcs import WCS
from astropy.utils.console import ProgressBar
from astropy.table import Table, vstack
from scipy.optimize import curve_fit
from nikamap import NikaMap, Jackknife
from nikamap.utils import pos_uniform
from IPython import get_ipython
ipython = get_ipython()
import myfunctions as mfct
from time import clock
from astropy.io import fits
from mpl_toolkits.axes_grid1 import make_axes_locatable
if '__IPYTHON__' in globals():
ipython.magic('load_ext autoreload')
ipython.magic('autoreload 2')
ipython.magic('matplotlib tk')
def add_axis(name, range, bins, unit=None, i_axe=3, log=False):
"""Define a dictionnary for additionnal wcs axes (linear or log)"""
header = {'CTYPE{}'.format(i_axe): name,
'CRPIX{}'.format(i_axe): 1,
'CUNIT{}'.format(i_axe): unit}
if log:
# Log scale (edges definition)
log_step = (np.log(range[1]) - np.log(range[0])) / bins
center_start = np.exp(np.log(range[0]) + log_step / 2)
header['CTYPE{}'.format(i_axe)] += '-LOG'
header['CRVAL{}'.format(i_axe)] = center_start
header['CDELT{}'.format(i_axe)] = log_step * center_start
# Log scale (center definition)
# log_step = (np.log(flux_range[1]) - np.log(flux_range[0])) / (bins-1)
# center_start = range[0]
else:
# Linear scale (edges definition)
step = (range[1] - range[0]) / bins
header['CRVAL{}'.format(i_axe)] = range[0] + step / 2
header['CDELT{}'.format(i_axe)] = step
# Linear scale (center definition)
# step = (range[1] - range[0]) / (bins-1)
return header
def completness_purity_wcs(shape, wcs, bins=30,
flux_range=(0, 1), flux_bins=10, flux_log=False,
threshold_range=(0, 1), threshold_bins=10, threshold_log=False):
"""Build a wcs for the completness_purity function
"""
slice_step = np.ceil(np.asarray(shape) / bins).astype(int)
celestial_slice = slice(0, shape[0], slice_step[0]), slice(0, shape[1], slice_step[1])
# [WIP]: Shall we use a 4D WCS ? (ra/dec flux/threshold)
# [WIP]: -TAB does not seems to be very easy to do with astropy
# Basicaly Working... .
header = wcs[celestial_slice[0], celestial_slice[1]].to_header()
header['WCSAXES'] = 4
header.update(add_axis('THRESHOLD', threshold_range, threshold_bins, i_axe=3))
header.update(add_axis('FLUX', flux_range, flux_bins, i_axe=4, log=True))
return (bins, bins, threshold_bins, flux_bins), WCS(header)
def fake_worker(img, min_threshold=2, nsources=8**2, flux=1*u.Jy, within=(1 / 4, 3 / 4), pos_gen=pos_uniform, photmethod='peak',**kwargs):
"""The completness purity worker, create a fake dataset from an jackknifed image, detect sources
in doped image and return catalogs
Parameters
----------
img : :class:`nikamap.NikaMap`
Jackknifed dataset
min_threshold : float
minimum threshold for the detection
**kwargs :
arguments for source injection
Returns
-------
mf_img.sources :
detected sources table
mf_img.fake_sources :
created fake sources table
"""
# Renormalize the stddev
std = img.check_SNR()
img.uncertainty.array *= std
# Actually rather slow... maybe check the code ?
img.add_gaussian_sources(nsources=nsources, within=within, peak_flux=flux, pos_gen=pos_gen, **kwargs)
# ... match filter it ...
mf_img = img.match_filter(img.beam)
mf_img.uncertainty.array *= mf_img.check_SNR()
# ... and detect sources with the lowest threshold...
# The gaussian fit from subpixel=True is very slow here...
mf_img.detect_sources(threshold=min_threshold)
FluxResolution(mf_img, method=photmethod)
mf_img.phot_sources(peak=True, psf=False)
#flux = FluxResolution(mf_img)
return mf_img.sources, mf_img.fake_sources
def FluxResolution(nm, method='peak'):
if method== 'peak':
peak, psf = True, False
else:
peak, psf = False, True
nm.phot_sources(sources=nm.sources, peak=peak, psf=psf)
#np.savetxt('fluxhisttest.txt', nm.sources['flux_'+method])
return 0
def completness_worker(shape, wcs, sources, fake_sources, min_threshold=2, max_threshold=5):
"""Compute completness from the fake source catalog
Parameters
----------
shape : tuple
the shape of the resulting image
sources : :class:`astropy.table.Table`
the detected sources
fake_sources : :class:`astropy.table.Table`
the fake sources table, with corresponding mask
min_threshold : float
the minimum SNR threshold requested
max_threshold : float
the maximum SNR threshold requested
Returns
-------
_completness, _norm_comp
corresponding 2D :class:`numpy.ndarray`
"""
# If one wanted to used a histogramdd, one would need a threshold axis
# covering ALL possible SNR, otherwise loose flux, or cap the thresholds...
fake_snr = np.ma.array(sources[fake_sources['find_peak']]['SNR'],
mask=fake_sources['find_peak'].mask)
# As we are interested by the cumulative numbers, keep all inside the upper pixel
fake_snr[fake_snr > max_threshold] = max_threshold
# TODO: Consider keeping all pixels information in fake_source and source...
# This would imply to do only a simple wcs_threshold here...
xx, yy, zz = wcs.wcs_world2pix(fake_sources['ra'], fake_sources['dec'], fake_snr.filled(min_threshold), 0)
# Number of fake sources recovered
_completness, _ = np.histogramdd(np.asarray([xx, yy, zz]).T + 0.5, bins=np.asarray(shape),
range=list(zip([0]*len(shape), shape)),
weights=~fake_sources['find_peak'].mask)
# Reverse cumulative sum to get all sources at the given threshold
_completness = np.cumsum(_completness[..., ::-1], axis=2)[..., ::-1]
# Number of fake sources (independant of threshold)
_norm_comp, _, _ = np.histogram2d(xx + 0.5, yy + 0.5, bins=np.asarray(shape[0:2]),
range=list(zip([0]*2, shape[0:2])))
return _completness, _norm_comp
def purity_worker(shape, wcs, sources, max_threshold=2):
"""Compute completness from the fake source catalog
Parameters
----------
shape : tuple
the shape of the resulting image
sources : :class:`astropy.table.Table`
the detected sources table, with corresponding match
max_threshold : float
the maximum threshold requested
Returns
-------
_completness, _norm_comp
corresponding 2D :class:`numpy.ndarray`
"""
sources_snr = sources['SNR']
# As we are interested by the cumulative numbers, keep all inside the upper pixel
sources_snr[sources_snr > max_threshold] = max_threshold
xx, yy, zz = wcs.wcs_world2pix(sources['ra'], sources['dec'], sources_snr, 0)
# Number of fake sources recovered
_purity, _ = np.histogramdd(np.asarray([xx, yy, zz]).T + 0.5, bins=np.asarray(shape),
range=list(zip([0]*len(shape), shape)),
weights=~sources['fake_sources'].mask)
# Revese cumulative sum...
_purity = np.cumsum(_purity[..., ::-1], axis=2)[..., ::-1]
# Number of total detected sources at a given threshold
_norm_pur, _ = np.histogramdd(np.asarray([xx, yy, zz]).T + 0.5, bins=np.asarray(shape),
range=list(zip([0]*len(shape), shape)))
_norm_pur = np.cumsum(_norm_pur[..., ::-1], axis=2)[..., ::-1]
return _purity, _norm_pur
#def completness_purity(flux, wcs=None, shape=None, nsources=8**2, within=(1 / 4, 3 / 4), nsim=500, jk_filenames=None):
# """Compute completness map for a given flux"""
#
# print(flux)
# #fluxlist=[]
#
# # wcs_celestial = wcs.celestial
# # Lower and upper edges ... take the center of the pixel for the upper edge
# min_threshold, max_threshold = wcs.sub([3]).all_pix2world([-0.5, shape[2]-1], 0)[0]
#
# completness = np.zeros(shape, dtype=np.float)
# norm_comp = np.zeros(shape[0:2], dtype=np.float)
#
# purity = np.zeros(shape, dtype=np.float)
# norm_pur = np.zeros(shape, dtype=np.float)
#
# jk_iter = Jackknife(jk_filenames, n=nsim)
# print("Begin loop")
#
# for img in ProgressBar(jk_iter):
# print("In loop")
# # %load_ext snakeviz
# # %snakeviz the following line.... all is spend in the find_peaks / fit_2d_gaussian
# # TODO: Change the find_peaks routine, or maybe just the fit_2d_gaussian to be FAST ! (Maybe look into gcntrd.pro routine or photutils.centroid.centroid_1dg maybe ?)
# sources, fake_sources = fake_worker(img, nsources=nsources, within=within, flux=flux, pos_gen=pos_uniform, min_threshold=min_threshold, dist_threshold=img.beam.fwhm_pix.value)
#
# # Save global array (MB requests)
# #FAKE_SOURCES.append(fake_sources)
# #DETECTED_SOURCES.append(sources)
#
# _completness, _norm_comp = completness_worker(shape, wcs, sources, fake_sources, min_threshold, max_threshold)
#
# completness += _completness
# norm_comp += _norm_comp
#
# _purity, _norm_pur = purity_worker(shape, wcs, sources, max_threshold)
#
# purity += _purity
# norm_pur += _norm_pur
#
# # norm can be 0, so to avoid warning on invalid values...
# with np.errstate(divide='ignore', invalid='ignore'):
# completness /= norm_comp[..., np.newaxis]
# purity /= norm_pur
#
# # TODO: One should probably return completness AND norm if one want to combine several fluxes
# return completness, purity
def completness_purity_2(flux, nsim=2, wcs=None, shape=None, nsources=8**2, within=(1 / 4, 3 / 4), jk_map=None):
"""Compute completness map for a given flux"""
#print(flux)
# wcs_celestial = wcs.celestial
# Lower and upper edges ... take the center of the pixel for the upper edge
min_threshold, max_threshold = wcs.sub([3]).all_pix2world([-0.5, shape[2]-1], 0)[0]
completness = np.zeros(shape, dtype=np.float)
norm_comp = np.zeros(shape[0:2], dtype=np.float)
purity = np.zeros(shape, dtype=np.float)
norm_pur = np.zeros(shape, dtype=np.float)
sourcetable = Table()
#jk_iter = Jackknife(jk_filenames, n=nsim)
for isim in ProgressBar(nsim):
# %load_ext snakeviz
# %snakeviz the following line.... all is spend in the find_peaks / fit_2d_gaussian
# TODO: Change the find_peaks routine, or maybe just the fit_2d_gaussian to be FAST ! (Maybe look into gcntrd.pro routine or photutils.centroid.centroid_1dg maybe ?)
sources, fake_sources = fake_worker(jk_map, nsources=nsources, within=within, flux=flux, pos_gen=pos_uniform, min_threshold=min_threshold, dist_threshold=jk_map.beam.fwhm_pix.value)
sourcetable = vstack([sourcetable,sources])
# Save global array (MB requests)
#FAKE_SOURCES.append(fake_sources)
#DETECTED_SOURCES.append(sources)
_completness, _norm_comp = completness_worker(shape, wcs, sources, fake_sources, min_threshold, max_threshold)
completness += _completness
norm_comp += _norm_comp
_purity, _norm_pur = purity_worker(shape, wcs, sources, max_threshold)
purity += _purity
norm_pur += _norm_pur
# norm can be 0, so to avoid warning on invalid values...
#with np.errstate(divide='ignore', invalid='ignore'):
# completness /= norm_comp[..., np.newaxis]
# purity /= norm_pur
# TODO: One should probably return completness AND norm if one want to combine several fluxes
return completness, norm_comp, purity, norm_pur, sourcetable
def Plot_CompPur(completness, purity, threshold,nsim=None, savename=None):
threshold_bins = completness.shape[-1]
fig, axes = plt.subplots(nrows=2, ncols=threshold_bins, sharex=True, sharey=True)
for i in range(threshold_bins):
axes[0, i].imshow(completness[:, :, i], vmin=0, vmax=1)
im=axes[1, i].imshow(purity[:, :, i], vmin=0, vmax=1)
axes[1, i].set_xlabel("thresh={:.2f}".format(threshold[i]))
if i == (threshold_bins-1):
#print('-----------')
divider = make_axes_locatable(axes[1,i])
cax = divider.append_axes('right', size='5%', pad=0.0)
fig=plt.gcf()
fig.colorbar(im, cax=cax, orientation='vertical')
if nsim is not None:
axes[0, 0].set_title("{} simulations".format(nsim))
axes[0, 0].set_ylabel("completness")
axes[1, 0].set_ylabel("purity")
if savename is not None:
plt.savefig(savename)
#plt.show(block=True)
# Save result !!
#completness, purity = completness_purity_2(flux=10*u.mJy)
# TODO:
# Prepare function for multiprocessing
# calculate sum of results of each run and divide sums by norms
#
#
#completness, purity = completness_purity_2(flux=10*u.mJy, nsources=8**2, within=(0, 1),
# wcs=wcs_4D.sub([1, 2, 3]),
# shape=shape_4D[0:3], nsim=nsim,
# jk_map=data)
#shape=shape_4D[0:3], nsim=np.multiply(*shape_4D[0:2]) * 100,
'''
# To merge all the fake_sources and sources catalogs
fake_sources = FAKE_SOURCES[0]
sources = DETECTED_SOURCES[0]
for _fake, _detected in zip(FAKE_SOURCES[1:], DETECTED_SOURCES[1:]):
n_fake = len(fake_sources)
n_detected = len(sources)
_fake['ID'] = _fake['ID'] + n_fake
_detected['ID'] = _detected['ID'] + n_detected
_fake['find_peak'] = _fake['find_peak'] + n_detected
_detected['fake_sources'] = _detected['fake_sources'] + n_fake
fake_sources = vstack([fake_sources, _fake])
sources = vstack([sources, _detected])
# The full Monte Carlo checks
FAKE_SOURCES = []
DETECTED_SOURCES = []
pool = Pool(cpu_count())
#p_completness_parity = partial(completness_purity, **{'wcs': wcs.sub([1, 2, 3]), 'shape': shape[0:3], 'jk_filenames': filenames})
p_completness_parity = partial(completness_purity_2, **{'wcs': wcs_4D.sub([1, 2, 3]), 'shape': shape_4D[0:3], 'jk_map': data})
result = pool.map(p_completness_parity, fluxes)
completness = np.moveaxis(np.asarray([_completness for _completness, _purity in result]), 0, 3)
purity = np.moveaxis(np.asarray([_purity for _completness, _purity in result]), 0, 3)
'''
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment