diff --git a/papers/Vvmax/Data/all_loc_frb_data_w171020.dat b/papers/Vvmax/Data/all_loc_frb_data_w171020.dat index 3b66232c..78299670 100644 --- a/papers/Vvmax/Data/all_loc_frb_data_w171020.dat +++ b/papers/Vvmax/Data/all_loc_frb_data_w171020.dat @@ -1,4 +1,4 @@ - # FRB S/N DM DMgal Freq dt w theta_d Fnu Zloc Nants RF +# FRB S/N DM DMgal Freq dt w theta_d Fnu Zloc Nants RF 20171020 19.5 114.1 38.4 1297.5 1.260 1.70 0.722 200 0.00867 1 200 20180924 21.1 362.4 40.5 1297.5 0.864 1.76 0.23 16 0.32140 24 18.4 20181112 19.3 589.0 40.2 1297.5 0.864 2.10 0.31 26 0.47550 12 28 diff --git a/zdm/beam_generator/sim_craco_beam.py b/zdm/beam_generator/sim_craco_beam.py new file mode 100644 index 00000000..378ae8cd --- /dev/null +++ b/zdm/beam_generator/sim_craco_beam.py @@ -0,0 +1,291 @@ +""" +This file has been updated and adapted from original code produced +by Ziteng (Andy) Wang. The original is part of the +CRAFT code repository, and can be found here: +https://github.com/askap-craco/craco-beam +""" + +import numpy as np +import os +import logging +from matplotlib import pyplot as plt + +logging.basicConfig( + level = logging.INFO, + format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', +) + +logger = logging.getLogger(__name__) + + +def Gauss(r, sigma): + """ + simple Gaussian function + """ + return np.exp(-0.5 * (r / sigma) ** 2) + +def hist_craco_beams(footprint, pitch, freq, gsize, gpix,plot=False): + """ + Generates a beam histogram for the specified info + """ + + infile = f"./craco_{footprint}_p{pitch:.2f}_f{freq:.1f}MHz_f{gsize:.1f}d_npix{gpix}.npy" + beamshape = np.load(infile) + + envelope = np.max(beamshape,axis=0) + gs = 10. + npix = 2560 + rads = np.linspace(-gs/2.,gs/2.,npix) * np.pi/180. + xs = np.repeat(rads,npix) + xs = xs.reshape([npix,npix]) + ys = np.copy(xs) + ys = ys.T + + ddeg = rads[1]-rads[0] + dsolid = ddeg**2 + + # calculate solid angle per grid point + solids = np.cos(xs**2 + ys**2) #**0.5 + solids *= dsolid + + bmin = -3 + bmax = 0 + nbins = 300 + db = (bmax - bmin)/nbins + binedges = np.logspace(bmin,bmax,nbins+1) + h,b = np.histogram(envelope.flatten(),weights = solids.flatten(),bins=binedges) + + bcs = 10**np.linspace(bmin + db/2.,bmax - db/2.,nbins) + + basename = f"./hist_craco_{footprint}_p{pitch:.2f}_f{freq:.1f}MHz_f{gsize:.1f}d_npix{gpix}.npy" + + np.save(basename,h) + np.save("craco_histogram_bins.npy",binedges) + + if plot: + + x1 = np.load("../data/BeamData/ASKAP_1300_bins.npy") + y1 = np.load("../data/BeamData/ASKAP_1300_hist.npy") + dx1 = x1[1]/x1[0] + xbar1 = x1[:-1] * dx1**0.5 + + plt.figure() + plt.plot(bcs,h,label="CRACO square") + plt.plot(xbar1,y1,label="ICS closepack 1300 MHz") + + plt.xlabel("$B$") + plt.ylabel("$\\Omega(B)$") + plt.xscale('log') + plt.yscale('log') + plt.show() + +class AskapBeam: + def __init__( + self, footprint="square", pitch=0.9, freq=920.5, + ): + self.footprint = footprint # askap footprint, including square and closepack + self.pitch = pitch # beam separation in degrees + self.freq = freq # frequency in MHz + + # self.get_aval = self._load_aval_func() + + self.tsys = self._load_tsys() + self.beamscentre = self._gen_askap_beamcentre() + self.beamwidth = self._get_primary_beamwidth() + self.avals = self._load_aval() + + def _load_tsys(self): + coeffs = np.load("askap_tsys_polyval.npy") + return np.polyval(coeffs, self.freq) + + def _load_aval(self): + data = np.loadtxt("avals.dat") + x = data[:,0] + y = data[:,1] + newx = np.concatenate([x,-x[::-1]]) + newy = np.concatenate([y,y[::-1]]) + + dists = np.sqrt(self.beamscentre[0] ** 2 + self.beamscentre[1] ** 2) + return np.interp(dists, newx, newy) + # return get_aval + + def _gen_askap_beamcentre(self): + """ + generate beam centres + """ + if self.footprint == "square": + x=np.linspace(0,5.*self.pitch,6) + x=np.repeat(x,6) + x = x.reshape([6,6]) + y=np.copy(x) + y=y.T + x -= self.pitch*2.5 + y -= self.pitch*2.5 + + elif self.footprint == "closepack": + x = np.zeros([6,6]) + y = np.zeros([6,6]) + for iy in np.arange(6): + if iy % 2 == 0: x0 = 0. + else: x0 = 0.5 * self.pitch + x[iy,:] = np.arange(6) * self.pitch + x0 + y[iy,:] = iy*self.pitch*3**0.5/2. + x -= np.sum(x)/36. + y -= np.sum(y)/36. + + return x.flatten(), y.flatten() + + def _get_primary_beamwidth(self): + D = 12. + c_light = 299792458. + HPBW=1.09*(c_light/(self.freq * 1e6))/D # from RACS + sigma=(HPBW/2.)*(2*np.log(2))**-0.5 + deg_sigma = sigma * 180./np.pi + + return deg_sigma # in the unit of degree + + def primary_response(self, xpoints, ypoints, beamx, beamy, beamw=None): + offsets = (xpoints[None, ...] - beamx) ** 2 + (ypoints[..., None] - beamy) ** 2 + offsets = np.sqrt(offsets) + if beamw is None: beamw = self.beamwidth + return Gauss(offsets, beamw) + + +class CracoBeam: + + def __init__( + self, fov=1.1, npix=256, sbeam=40./3600. + ): + self.fov = fov # single field of view in degree + self.npix = npix # number of pixel in image domain + self.sbeam = sbeam # size of the synthesized beam in degree + + self.uvcellsize = 1 / np.deg2rad(self.fov) + self.imgcellsize = self.fov / self.npix # cell size in degree + + + def grid_response(self, xpoints, ypoints, beamx=0., beamy=0.): + """ + pillbox gridding response i.e., sinc function + + xpoints, ypoints: numpy.ndarray + coordinates in the unit of degree offset from the phase centre + """ + # this might be something to do with the FoV + xpoints = (xpoints - beamx).copy() + ypoints = (ypoints - beamy).copy() + + ### outside the FoV, make copies... + # xpoints = (xpoints - self.fov / 2) % self.fov - self.fov / 2 + # ypoints = (ypoints - self.fov / 2) % self.fov - self.fov / 2 + + xpoints = np.deg2rad(xpoints) + ypoints = np.deg2rad(ypoints) + + xpoints, ypoints = np.meshgrid(xpoints, ypoints) + + sin = np.sin; pi = np.pi + + p1 = sin(pi*self.uvcellsize*xpoints) / (pi*self.uvcellsize*xpoints) + p2 = sin(pi*self.uvcellsize*ypoints) / (pi*self.uvcellsize*ypoints) + + return np.abs(p1 * p2) + + def sample_response(self, xpoints, ypoints, beamx=0., beamy=0.): + """ + response due to insufficient sampling i.e., 4% loss + + xpoints, ypoints: numpy.ndarray + coordinates in the unit of degree offset from the phase centre + """ + xpoints = (xpoints - beamx).copy() + ypoints = (ypoints - beamy).copy() + + xoff = xpoints % self.imgcellsize - self.imgcellsize / 2 + yoff = ypoints % self.imgcellsize - self.imgcellsize / 2 + + xoff, yoff = np.meshgrid(xoff, yoff) + off = np.sqrt(xoff ** 2 + yoff ** 2) + return Gauss(off, self.sbeam) + + +def _make_response_grid(gsize=10., gpix=2560.): + """ + given the size of the grid (in degree, diameter), and the number of pixels on each axis, + return two numpy arrays containing grid coordinates + """ + xpoints = np.linspace(-gsize/2, gsize/2, gpix) + ypoints = np.linspace(-gsize/2, gsize/2, gpix) + return xpoints, ypoints + +def _sim_one_beam(ibeam, xpoints, ypoints, askapbeam, cracobeam, ): + logger.info(f"start simulating craco response for beam{ibeam}...") + beamx = askapbeam.beamscentre[0][ibeam] + beamy = askapbeam.beamscentre[1][ibeam] + + ### aval value + beamaval = askapbeam.avals[ibeam] + ### primary beam response + logger.info(f"get primary beam response for beam{ibeam}...") + primresponse = askapbeam.primary_response(xpoints, ypoints, beamx, beamy, ) + ### craco related response + logger.info(f"get primary beam response for beam{ibeam}...") + gridresponse = cracobeam.grid_response(xpoints, ypoints, beamx, beamy, ) + logger.info(f"get sample response for beam{ibeam}...") + sampresponse = cracobeam.sample_response(xpoints, ypoints, beamx, beamy, ) + + beamresponse = beamaval * primresponse * gridresponse * sampresponse + return beamresponse + +def get_craco_allbeams( + footprint="square", pitch=0.9, freq=920.5, # askap footprint related, + fov=1.1, npix=256, sbeam=40./3600., # craco related + gsize=10., gpix=2560 # response gridding params + ): + xpoints, ypoints = _make_response_grid(gsize=gsize, gpix=gpix) + askapbeam = AskapBeam(footprint=footprint, pitch=pitch, freq=freq) + cracobeam = CracoBeam(fov=fov, npix=npix, sbeam=sbeam) + + beamsresponse = np.zeros((36, gpix, gpix)) + for ibeam in range(36): # 36 beams + beamsresponse[ibeam] = _sim_one_beam(ibeam, xpoints, ypoints, askapbeam, cracobeam, ) + + savefname = f"./craco_{footprint}_p{pitch:.2f}_f{freq:.1f}MHz_f{gsize:.1f}d_npix{gpix}.npy" + np.save(savefname, beamsresponse) + logger.info(f"save data to {savefname}...") + +def main(): + from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter + parser = ArgumentParser(description="simulate craco response", formatter_class=ArgumentDefaultsHelpFormatter) + parser.add_argument("-fp", "--footprint", help="askap beam footprint", type=str, default="square") + parser.add_argument("-p", "--pitch", help="beam pitch", type=float, default=0.9) + parser.add_argument("-f", "--freq", help="frequency of the observation", type=float, default=920.5) + parser.add_argument("-fv", "--fov", help="craco image field-of-view", type=float, default=1.1) + parser.add_argument("-npix", "--npix", help="number of pixel in craco image", type=int, default=256) + parser.add_argument("-sb", "--sbeam", help="size of the synthesized beam", type=float, default=40./3600.) + parser.add_argument("-gs", "--gridsize", help="grid size in the unit of degree", type=float, default=10.) + parser.add_argument("-gn", "--gridnpix", help="number of pixel in the grid", type=int, default=2560) + values = parser.parse_args() + + footprint=values.footprint + pitch=values.pitch + freq=values.freq + fov=values.fov + npix=values.npix + sbeam=values.sbeam + gsize=values.gridsize + gpix=values.gridnpix + + opfile = f"./craco_{footprint}_p{pitch:.2f}_f{freq:.1f}MHz_f{gsize:.1f}d_npix{gpix}.npy" + if not os.path.exists(opfile): + get_craco_allbeams( + footprint=footprint, pitch=pitch, freq=freq, + fov=values.fov, npix=values.npix, sbeam=sbeam, gsize=gsize, + gpix=gpix + ) + else: + hist_craco_beams(footprint, pitch, freq, gsize, gpix,plot=True) + + +main() + diff --git a/zdm/data/BeamData/ASKAP_average_bins.npy b/zdm/data/BeamData/ASKAP_average_bins.npy new file mode 100644 index 00000000..f8685bc5 Binary files /dev/null and b/zdm/data/BeamData/ASKAP_average_bins.npy differ diff --git a/zdm/data/BeamData/ASKAP_average_hist.npy b/zdm/data/BeamData/ASKAP_average_hist.npy new file mode 100644 index 00000000..7328b6b7 Binary files /dev/null and b/zdm/data/BeamData/ASKAP_average_hist.npy differ diff --git a/zdm/data/Surveys/CRAFT_ICS_1300.ecsv b/zdm/data/Surveys/CRAFT_ICS_1300.ecsv index 7a7b07c4..27dace6e 100644 --- a/zdm/data/Surveys/CRAFT_ICS_1300.ecsv +++ b/zdm/data/Surveys/CRAFT_ICS_1300.ecsv @@ -14,30 +14,31 @@ # - {name: THRESH, datatype: float64} # - {name: TRES, datatype: float64} # - {name: WIDTH, datatype: float64} +# - {name: TAU, datatype: float64} # - {name: XDec, datatype: string} # - {name: XRA, datatype: string} # - {name: Z, datatype: float64} # meta: !!omap -# - {survey_data: '{"observing": {"NORM_FRB": 19, "TOBS": 165.506756761, "MAX_IDT": 4096}, -# "telescope": {"BEAM": "ASKAP_1300", "DIAM": 12.0, "NBEAMS": 36, "NBINS": 5}}'} +# - {survey_data: '{"observing": {"NORM_FRB": 19, "TOBS": 165.506756761, "MAX_IDT": 4096, "MAX_IW": 12, "MAXWMETH": 1}, +# "telescope": {"BEAM": "ASKAP_1300", "DIAM": 12.0, "NBEAMS": 36, "NBINS": 5, "WDATA": 1}}'} # schema: astropy-2.0 -TNS BW DM DMG FBAR FRES Gb Gl SNR SNRTHRESH THRESH TRES WIDTH XDec XRA Z -20180924B 336.0 362.4 40.5 1297.5 1.0 -74.40520121494983 277.20651893408893 21.1 9.0 4.4 0.864 1.76 -40:54:00.1 21:44:25.255 0.3214 -20181112A 336.0 589.0 40.2 1297.5 1.0 -63.30983709852826 290.87390392674445 19.3 9.0 4.4 0.864 2.1 -52:58:15.39 21:49:23.630 0.4755 -20190102C 336.0 364.5 57.3 1271.5 1.0 -37.519392943553534 300.95052401722796 14.0 9.0 4.4 0.864 1.7 -79:28:32.2845 21:29:39.70836 0.291 -20190608B 336.0 339.5 37.2 1271.5 1.0 -67.23724646562972 88.21721604792883 16.1 9.0 4.4 1.728 6.0 -07:53:53.6 22:16:04.7472s 0.1178 -20190611B 336.0 322.2 57.6 1271.5 1.0 -37.59976893568559 300.9594501909617 9.3 9.0 4.4 1.728 2.0 -79:23:51.284 21:22:58.372 0.378 -20190711A 336.0 594.6 56.6 1271.5 1.0 -36.63629532677801 301.03976293370494 23.8 9.0 4.4 1.728 6.5 -80:21:28.18 21:57:40.012 0.522 -20190714A 336.0 504.7 38.5 1271.5 1.0 -75.88144720209479 120.55805492153455 10.7 9.0 4.4 1.728 2.9 -13:01:14.36 12:15:55.081 0.2365 -20191228A 336.0 297.5 32.9 1271.5 1.0 -80.77822140033614 230.79855541687724 22.9 9.0 4.4 1.728 2.3 -29:35:37.85 22:57:43.269 0.243 -20210117A 336.0 730.0 34.4 1271.5 1.0 -75.7801432700954 164.65014968696988 27.1 9.0 4.4 1.182 3.4 -16:11:25.2 22:39:36.0 0.214 -20210214A 336.0 398.3 31.9 1271.5 1.0 -65.65372930742399 91.72782990931984 11.6 9.0 4.4 1.182 3.5 -05:49:56 00:27:43 -1.0 -20210407 336.0 1785.3 154.0 1271.5 1.0 -35.320866474063905 114.6146256941771 19.1 9.0 4.4 1.182 8.0 27:03:30.24 05:14:36.202 -1.0 -20210912 336.0 1234.5 30.9 1271.5 1.0 -80.16655338584705 235.42165843951094 31.7 9.0 4.4 1.182 5.5 -30:29:33.1 23:24:40.3 -1.0 -20211127 336.0 234.83 42.5 1271.5 1.0 -81.68554744714709 125.94306109583985 37.9 9.0 4.4 1.182 1.41 -18:49:28.4 13:19:09.5 0.046946 -20220531A 336.0 727.0 70.0 1271.5 1.0 -56.509199987039224 296.83938685003784 9.7 9.0 4.4 1.182 11.0 -60:17:48.2 19:38:50.2 -1.0 -20220610A 336.0 1458.1 31.0 1271.5 1.0 -78.89122591551634 250.56280818953536 29.8 9.0 4.4 1.182 5.6 -33:30:49.87 23:24:17.559 1.016 -20220918A 336.0 656.8 40.7 1271.5 1.0 -45.81623556809721 308.4134807482381 26.4 9.0 4.4 1.182 7.1 -70:48:40.5 01:10:22.1 0.45 -20230526A 336.0 316.4 50.0 1271.5 1.0 -62.994316139408156 318.1591960546148 22.1 9.0 4.4 1.182 2.6 -52:46:07.7 01:29:27.5 0.157 -20230718A 336.0 477.0 396.0 1271.5 1.0 -75.66933767869608 316.30925962515585 10.9 9.0 4.4 1.182 2.3 -41:00:12.8 08:30:27.1 -1.0 -20230731A 336.0 701.1 547.0 1271.5 1.0 -60.14372530213189 304.262204790738 16.6 9.0 4.4 1.182 3.3 -56:58:19.1 11:38:40.1 -1.0 +TNS BW DM DMG FBAR FRES Gb Gl SNR SNRTHRESH THRESH TRES WIDTH TAU XDec XRA Z +20180924B 336.0 362.4 40.5 1297.5 1.0 -74.40520121494983 277.20651893408893 21.1 9.0 4.4 0.864 2 0.59 -40:54:00.1 21:44:25.255 0.3214 +20181112A 336.0 589.0 40.2 1297.5 1.0 -63.30983709852826 290.87390392674445 19.3 9.0 4.4 0.864 0.8 0.023 -52:58:15.39 21:49:23.630 0.4755 +20190102C 336.0 364.5 57.3 1271.5 1.0 -37.519392943553534 300.95052401722796 14.0 9.0 4.4 0.864 1.25 0.027 -79:28:32.2845 21:29:39.70836 0.291 +20190608B 336.0 339.5 37.2 1271.5 1.0 -67.23724646562972 88.21721604792883 16.1 9.0 4.4 1.728 10.8 3.8 -07:53:53.6 22:16:04.7472s 0.1178 +20190611B 336.0 322.2 57.6 1271.5 1.0 -37.59976893568559 300.9594501909617 9.3 9.0 4.4 1.728 1.59 0.03 -79:23:51.284 21:22:58.372 0.378 +20190711A 336.0 594.6 56.6 1271.5 1.0 -36.63629532677801 301.03976293370494 23.8 9.0 4.4 1.728 11.0 0.0076 -80:21:28.18 21:57:40.012 0.522 +20190714A 336.0 504.7 38.5 1271.5 1.0 -75.88144720209479 120.55805492153455 10.7 9.0 4.4 1.728 3.0 0.422 -13:01:14.36 12:15:55.081 0.2365 +20191228A 336.0 297.5 32.9 1271.5 1.0 -80.77822140033614 230.79855541687724 22.9 9.0 4.4 1.728 13.6 5.85 -29:35:37.85 22:57:43.269 0.243 +20210117A 336.0 730.0 34.4 1271.5 1.0 -75.7801432700954 164.65014968696988 27.1 9.0 4.4 1.182 3.6 0.25 -16:11:25.2 22:39:36.0 0.214 +20210214A 336.0 398.3 31.9 1271.5 1.0 -65.65372930742399 91.72782990931984 11.6 9.0 4.4 1.182 3.5 -1 -05:49:56 00:27:43 -1.0 +20210407 336.0 1785.3 154.0 1271.5 1.0 -35.320866474063905 114.6146256941771 19.1 9.0 4.4 1.182 1.62 0.09 27:03:30.24 05:14:36.202 -1.0 +20210912 336.0 1234.5 30.9 1271.5 1.0 -80.16655338584705 235.42165843951094 31.7 9.0 4.4 1.182 1.61 0.048 -30:29:33.1 23:24:40.3 -1.0 +20211127 336.0 234.83 42.5 1271.5 1.0 -81.68554744714709 125.94306109583985 37.9 9.0 4.4 1.182 0.48 0.025 -18:49:28.4 13:19:09.5 0.046946 +20220531A 336.0 727.0 70.0 1271.5 1.0 -56.509199987039224 296.83938685003784 9.7 9.0 4.4 1.182 11.0 -1 -60:17:48.2 19:38:50.2 -1.0 +20220610A 336.0 1458.1 31.0 1271.5 1.0 -78.89122591551634 250.56280818953536 29.8 9.0 4.4 1.182 2.0 0.521 -33:30:49.87 23:24:17.559 1.016 +20220918A 336.0 656.8 40.7 1271.5 1.0 -45.81623556809721 308.4134807482381 26.4 9.0 4.4 1.182 13.9 7.66 -70:48:40.5 01:10:22.1 0.45 +20230526A 336.0 316.4 50.0 1271.5 1.0 -62.994316139408156 318.1591960546148 22.1 9.0 4.4 1.182 2.7 1.16 -52:46:07.7 01:29:27.5 0.157 +20230718A 336.0 477.0 396.0 1271.5 1.0 -75.66933767869608 316.30925962515585 10.9 9.0 4.4 1.182 0.7 0.117 -41:00:12.8 08:30:27.1 -1.0 +20230731A 336.0 701.1 547.0 1271.5 1.0 -60.14372530213189 304.262204790738 16.6 9.0 4.4 1.182 2.7 0.45 -56:58:19.1 11:38:40.1 -1.0 diff --git a/zdm/data/Surveys/CRAFT_ICS_1632.ecsv b/zdm/data/Surveys/CRAFT_ICS_1632.ecsv index 18a6c4f3..71c6ce61 100644 --- a/zdm/data/Surveys/CRAFT_ICS_1632.ecsv +++ b/zdm/data/Surveys/CRAFT_ICS_1632.ecsv @@ -19,7 +19,7 @@ # - {name: XRA, datatype: string} # - {name: Z, datatype: float64} # meta: !!omap -# - {survey_data: '{"observing": {"NORM_FRB": 3, "TOBS": 50.866971336, "MAX_IDT": 4096}, +# - {survey_data: '{"observing": {"NORM_FRB": 3, "TOBS": 50.866971336, "MAX_IDT": 4096, "MAX_IW": 12, "MAXWMETH": 1}, # "telescope": {"BEAM": "ASKAP_1632", "DIAM": 12.0, "NBEAMS": 1, "NBINS": 5}}'} # schema: astropy-2.0 TNS BW DM DMG FBAR FRES Gb Gl SNR SNRTHRESH THRESH TRES WIDTH XC XDec XRA Z diff --git a/zdm/data/Surveys/CRAFT_ICS_892.ecsv b/zdm/data/Surveys/CRAFT_ICS_892.ecsv index 0edc4c28..25976d68 100644 --- a/zdm/data/Surveys/CRAFT_ICS_892.ecsv +++ b/zdm/data/Surveys/CRAFT_ICS_892.ecsv @@ -19,7 +19,7 @@ # - {name: XRA, datatype: string} # - {name: Z, datatype: float64} # meta: !!omap -# - {survey_data: '{"observing": {"NORM_FRB": 15, "TOBS": 317.293429793, "MAX_IDT": 4096}, +# - {survey_data: '{"observing": {"NORM_FRB": 15, "TOBS": 317.293429793, "MAX_IDT": 4096, "MAX_IW": 12, "MAXWMETH": 1}, # "telescope": {"BEAM": "ASKAP_892", "DIAM": 12.0, "NBEAMS": 1, "NBINS": 5}}'} # schema: astropy-2.0 TNS BW DM DMG FBAR FRES Gb Gl SNR SNRTHRESH THRESH TRES WIDTH XC XDec XRA Z diff --git a/zdm/grid.py b/zdm/grid.py index 9db0d31a..652464b9 100644 --- a/zdm/grid.py +++ b/zdm/grid.py @@ -19,7 +19,7 @@ class Grid: It also assumes a linear uniform grid. """ - def __init__(self, survey, state, zDMgrid, zvals, dmvals, smear_mask, wdist, prev_grid=None): + def __init__(self, survey, state, zDMgrid, zvals, dmvals, smear_mask, wdist=None, prev_grid=None): """ Class constructor. @@ -78,21 +78,19 @@ def __init__(self, survey, state, zDMgrid, zvals, dmvals, smear_mask, wdist, pre self.smear = prev_grid.smear.copy() self.smear_grid = prev_grid.smear_grid.copy() - if wdist: - efficiencies = survey.efficiencies # two dimensions + if wdist is not None: + efficiencies = survey.efficiencies # two OR three dimensions weights = survey.wplist + # Warning -- THRESH could be different for each FRB, but we don't treat it that way + self.calc_thresholds(survey.meta["THRESH"], + efficiencies,weights=weights) else: - efficiencies = survey.mean_efficiencies + # if this is the case, why calc thresholds again below? + efficiencies = survey.mean_efficiencies # one dimension weights = None self.calc_thresholds(survey.meta["THRESH"], efficiencies, weights=weights) efficiencies=survey.mean_efficiencies - weights=None - # Warning -- THRESH could be different for each FRB, but we don't treat it that way - thresh = survey.meta["THRESH"] - # was np.median(survey.frbs['THRESH']) - self.calc_thresholds(thresh, - efficiencies, - weights=weights) + # Calculate self.calc_pdv() self.set_evolution() # sets star-formation rate scaling with z - here, no evoltion... @@ -356,30 +354,23 @@ def calc_pdv(self, beam_b=None, beam_o=None): main_beam_b = np.log10(main_beam_b) for i, b in enumerate(main_beam_b): + # if eff_weights is 2D (i.e., z-dependent) then w is a vector of length NZ for j, w in enumerate(self.eff_weights): # using log10 space conversion if self.use_log10: thresh = new_thresh[j, :, :] - b else: # original thresh = self.thresholds[j, :, :] / b - - if j == 0: - self.b_fractions[:, :, i] = ( - self.beam_o[i] - * w - * self.array_cum_lf( - thresh, Emin, Emax, self.state.energy.gamma, self.use_log10 - ) - ) - else: - self.b_fractions[:, :, i] += ( + + # the below is to ensure this works when w is a vector of length nz + w = np.array(w) + + self.b_fractions[:, :, i] += ( self.beam_o[i] - * w - * self.array_cum_lf( + * (self.array_cum_lf( thresh, Emin, Emax, self.state.energy.gamma, self.use_log10 - ) + ).T * w.T).T ) - # here, b-fractions are unweighted according to the value of b. self.fractions = np.sum( self.b_fractions, axis=2 @@ -419,7 +410,7 @@ def calc_thresholds(self, F0:float, Args: F0 (float): base survey threshold - eff_table ([type]): table of efficiencies corresponding to DM-values + eff_table ([type]): table of efficiencies corresponding to DM-values. 1, 2, or 3 dimensions! bandwidth ([type], optional): [description]. Defaults to 1e9. nuObs ([float], optional): survey frequency (affects sensitivity via alpha - only for alpha method) Defaults to 1.3e9. @@ -441,9 +432,10 @@ def calc_thresholds(self, F0:float, self.eff_weights = np.array([1]) self.eff_table = np.array([eff_table]) # make it an extra dimension else: # multiple FRB widths: dimensions nW x NDM - self.nthresh = eff_table.shape[0] + # check that the weights dimensions check out + self.nthresh = eff_table.shape[0] # number of width bins. if weights is not None: - if weights.size != self.nthresh: + if weights.shape[0] != self.nthresh: raise ValueError( "Dimension of weights must equal first dimension of efficiency table" ) @@ -451,10 +443,15 @@ def calc_thresholds(self, F0:float, raise ValueError( "For a multidimensional efficiency table, please set relative weights" ) - self.eff_weights = weights / np.sum(weights) # normalises this! + # I have removed weight normalisation here. In theory, normalisation to <1 is + # a feature, not a bug, representing more/less scattering moving into the + # observable range self.eff_table = eff_table + self.eff_weights = weights + + # now two or three dimensions Eff_thresh = F0 / self.eff_table - + self.EF(self.state.energy.alpha, bandwidth) # sets FtoE values - could have been done *WAY* earlier self.thresholds = np.zeros([self.nthresh, self.zvals.size, self.dmvals.size]) @@ -465,8 +462,12 @@ def calc_thresholds(self, F0:float, # FRB width (nthresh) and DM. # We loop over nthesh and generate a NDM x Nz array for each for i in np.arange(self.nthresh): - self.thresholds[i,:,:] = np.outer(self.FtoE, Eff_thresh[i,:]) - + if self.eff_table.ndim == 2: + self.thresholds[i,:,:] = np.outer(self.FtoE, Eff_thresh[i,:]) + else: + self.thresholds[i,:,:] = ((Eff_thresh[i,:,:]).T * self.FtoE).T + + def smear_dm(self, smear:np.ndarray): # ,mean:float,sigma:float): """ Smears DM using the supplied array. Example use: DMX contribution @@ -582,6 +583,9 @@ def initMC(self): nw = self.eff_weights.size nb = self.beam_b.size + if self.eff_weights.ndim > 1: + raise ValueError("MC generation from z-dependent widths not currently enabled") + # holds array of probabilities in w,b space pwb = np.zeros([nw * nb]) rates = [] @@ -673,6 +677,8 @@ def GenMCFRB(self, Emax_boost): # grid of beam values, weights nw = self.eff_weights.size + if self.eff_weights.ndim > 1: + raise ValueError("MC generation from z-dependent widths not currently enabled") nb = self.beam_b.size if not self.MCinit: @@ -782,26 +788,6 @@ def GenMCFRB(self, Emax_boost): iDM1 = 0 # dummy MCDM = self.dmvals[iDM3] * kDM3 + self.dmvals[iDM2] * kDM2 - - #MCDM = self.dmvals[iDM1] * kDM1 + self.dmvals[iDM2] * kDM2 - #if iz2 > 0: - # Eth = ( - # self.thresholds[j, iz1, iDM1] * kz1 * kDM1 - # + self.thresholds[j, iz1, iDM2] * kz1 * kDM2 - # + self.thresholds[j, iz1, iDM3] * kz1 * kDM3 - # + self.thresholds[j, iz2, iDM1] * kz2 * kDM1 - # + self.thresholds[j, iz2, iDM2] * kz2 * kDM2 - # + self.thresholds[j, iz2, iDM3] * kz2 * kDM3 - # + self.thresholds[j, iz3, iDM1] * kz3 * kDM1 - # + self.thresholds[j, iz3, iDM2] * kz3 * kDM2 - # + self.thresholds[j, iz3, iDM3] * kz3 * kDM3 - # ) - #else: - # Eth = ( - # self.thresholds[j, iz2, iDM1] * kDM1 - # + self.thresholds[j, iz2, iDM2] * kDM2 - # ) - # Eth *= kz2 ** 2 # assume threshold goes as Eth~z^2 in the near Universe else: # interpolate linearly from 0 to the minimum value fDM = r / pDMc[iDM2] @@ -812,14 +798,6 @@ def GenMCFRB(self, Emax_boost): iDM1 = 0 #dummy iDM3 = 0 #dummy - #if iz2 > 0: # ignore effect of lowest DM bin on threshold - # Eth = ( - # self.thresholds[j, iz1, iDM2] * kz1 - # + self.thresholds[j, iz2, iDM2] * kz2 - # ) - #else: - # Eth = self.thresholds[j, iz2, iDM2] * kDM2 - # Eth *= kz2 ** 2 # assume threshold goes as Eth~z^2 in the near Universe # This is constructed such that weights and iz, iDM will work out # for all cases of the above. Note that only four of these terms at diff --git a/zdm/iteration.py b/zdm/iteration.py index c446cffc..9621c30d 100644 --- a/zdm/iteration.py +++ b/zdm/iteration.py @@ -12,6 +12,7 @@ from scipy.stats import poisson import scipy.stats as st from zdm import repeat_grid as zdm_repeat_grid + # internal counter NCF=0 @@ -257,12 +258,6 @@ def calc_likelihoods_1D(grid,survey,doplot=False,norm=True,psnr=True,Pn=False,pN #pdm /= global_norm else: log_global_norm=0 - - #ddm=dmvals[1]-dmvals[0] - #kdms=DMobs/ddm - #idms1=kdms.astype('int') - #idms2=idms1+1 - #dkdms=kdms-idms1 idms1,idms2,dkdms1,dkdms2 = grid.get_dm_coeffs(DMobs) @@ -307,7 +302,7 @@ def calc_likelihoods_1D(grid,survey,doplot=False,norm=True,psnr=True,Pn=False,pN expected *= C Pn=Poisson_p(observed,expected) - print(observed, expected, grid.Rc) + if Pn==0: Nll=-1e10 if dolist==0: @@ -353,29 +348,86 @@ def calc_likelihoods_1D(grid,survey,doplot=False,norm=True,psnr=True,Pn=False,pN rs[j,i] = np.sum(grid.rates[j,iweights[i]] * dm_weights[i]) / np.sum(dm_weights[i]) # this has shape nz,nFRB - FRBs could come from any z-value - zpsnr=np.zeros(Eths.shape[1:]) - zpsnr = zpsnr.flatten() + nw,nz,nfrb = Eths.shape + zpsnr=np.zeros([nz,nfrb]) + # numpy flattens this to the order of [z0frb0,z0f1,z0f2,...,z1f0,...] + # zpsnr = zpsnr.flatten() + + if grid.eff_weights.ndim ==2: + zwidths = True + else: + zwidths = False + + # this variable keeps the normalisation of sums over p(b,w) as a function of z + pbw_norm = 0 + + if doplot: + # this will produce a plot of the p(SNR) given a particular width bin + # for a range of beam values as a function of z for the zeroeth FRB + plt.figure() + ax1 = plt.gca() + plt.xlabel("$z$") + plt.ylabel("p(snr | b,w,z)") + + # this will produce a plot of p(z) for all beam values at an + # arbitrary w-bin + plt.figure() + ax2 = plt.gca() + plt.xlabel("$z$") + plt.ylabel("p(b,w|z)") for i,b in enumerate(survey.beam_b): #iterate over the grid of weights bEths=Eths/b #this is the only bit that depends on j, but OK also! #now wbEths is the same 2D grid - #wbEths=bEths #this is the only bit that depends on j, but OK also! + # bEobs has dimensions Nwidths * Nz * NFRB bEobs=bEths*survey.Ss[nozlist] #should correctly multiply the last dimensions for j,w in enumerate(grid.eff_weights): + # p(SNR | b,w,DM,z) is given by differential/cumulative + # however, p(b,w|DM,z) is given by cumulative*w*Omegab / \sum_w,b cumulative*w*Omegab + # hence, the factor of cumulative cancels when calculating p(SNR,w,b), which is what we do here differential = grid.array_diff_lf(bEobs[j,:,:],Emin,Emax,gamma) * bEths[j,:,:] cumulative=grid.array_cum_lf(bEobs[j,:,:],Emin,Emax,gamma) - # check for zero probabilities - OK = np.where(cumulative.flatten()>0)[0] - zpsnr[OK] += (differential.flatten()[OK]/cumulative.flatten()[OK])*survey.beam_o[i]*w + if zwidths: + usew = np.repeat(w,nfrb).reshape([nz,nfrb]) # need to reshape this + else: + usew = w + + # this keeps track of the \sum_w,b cumulative*w*Omegab + dpbw = survey.beam_o[i]*usew*cumulative + pbw_norm += dpbw + zpsnr += differential*survey.beam_o[i]*usew + + if doplot and j==5: + #arbitrrily plots for FRB iFRB + iFRB=0 + # chooses an arbitrary width to plot at for j=5 + ax1.plot(grid.zvals,(differential/cumulative)[:,iFRB],label="b_"+str(b)[0:4]+"_w_"+str(j)) + ax2.plot(grid.zvals,dpbw[:,iFRB],label="b_"+str(b)[0:4]+"_w_"+str(j)) + if doplot: + plt.sca(ax1) + plt.yscale('log') + plt.tight_layout() + plt.legend(fontsize=6) + plt.savefig("FRB1_psnr_given_bw.png") + plt.close() + + plt.sca(ax2) + plt.tight_layout() + plt.legend(fontsize=6) + plt.savefig("FRB1_pbw_given_z.png") + plt.close() + # normalise by the beam and FRB width values - NORM = np.sum(survey.beam_o) * np.sum(grid.eff_weights) - zpsnr /= NORM - zpsnr = zpsnr.reshape(Eths.shape[1:]) + #This ensures that regions with zero probability don't produce nans due to 0/0 + OK = np.where(pbw_norm.flatten() > 0.) + zpsnr = zpsnr.flatten() + zpsnr[OK] /= pbw_norm.flatten()[OK] + zpsnr = zpsnr.reshape([nz,nfrb]) - # perform the weighting over the redshift axis + # perform the weighting over the redshift axis, i.e. to multiply by p(z|DM) and normalise \int p(z|DM) dz = 1 rnorms = np.sum(rs,axis=0) zpsnr *= rs psnr = np.sum(zpsnr,axis=0) / rnorms @@ -398,12 +450,6 @@ def calc_likelihoods_1D(grid,survey,doplot=False,norm=True,psnr=True,Pn=False,pN llsum += snrll if doplot: - fig1=plt.figure() - plt.xlabel('z') - plt.ylabel('p(SNR | DM,z)p(z)') - #plt.xlim(0,1) - tm=0. - plt.yscale('log') fig4=plt.figure() plt.xlabel('z') @@ -417,11 +463,7 @@ def calc_likelihoods_1D(grid,survey,doplot=False,norm=True,psnr=True,Pn=False,pN #plt.xlim(0,1) plt.yscale('log') - fig3=plt.figure() - plt.xlabel('z') - plt.ylabel('p(z)') - - tm1=np.max(psnr) + tm4=np.max(zpsnr) tm2=np.max(rs) for j in np.arange(survey.Ss.size): linestyle='-' @@ -432,25 +474,12 @@ def calc_likelihoods_1D(grid,survey,doplot=False,norm=True,psnr=True,Pn=False,pN if j>=3*survey.Ss.size/4: linestyle='-.' - plt.figure(fig1.number) - plt.plot(zvals,psnr[:,j],label=str(int(DMobs[j])),linestyle=linestyle,linewidth=1) - plt.figure(fig4.number) plt.plot(zvals,rs[:,j],label=str(int(DMobs[j])),linestyle=linestyle,linewidth=2) plt.figure(fig2.number) plt.plot(zvals,zpsnr[:,j],label=str(int(DMobs[j])),linestyle=linestyle) - plt.figure(fig3.number) - plt.plot(zvals,sgV[:,j],label=str(int(DMobs[j])),linestyle=linestyle) - - fig1.legend(ncol=2,loc='upper right',fontsize=8) - fig1.tight_layout() - plt.figure(fig1.number) - plt.ylim(tm1/1e5,tm1) - fig1.savefig('TEMP_p_psnr.pdf') - plt.close(fig1.number) - fig4.legend(ncol=2,loc='upper right',fontsize=8) fig4.tight_layout() plt.figure(fig4.number) @@ -460,14 +489,10 @@ def calc_likelihoods_1D(grid,survey,doplot=False,norm=True,psnr=True,Pn=False,pN fig2.legend(ncol=2,loc='upper right',fontsize=8) fig2.tight_layout() + plt.ylim(tm4/1e5,tm4) fig2.savefig('TEMP_p_zpsnr.pdf') plt.close(fig2.number) - fig3.legend(ncol=2,loc='upper right',fontsize=8) - fig3.tight_layout() - fig3.savefig('TEMP_p_sgV.pdf') - plt.close(fig3.number) - print("Total snr probabilities") for i,p in enumerate(psnr): print(i,survey.Ss[i],p) @@ -498,8 +523,7 @@ def calc_likelihoods_1D(grid,survey,doplot=False,norm=True,psnr=True,Pn=False,pN plt.tight_layout() plt.savefig('Plots/1d_dm_fit.pdf') plt.close() - - print(survey.name, "1D list:", lllist) + if dolist==0: return llsum elif dolist==1: @@ -710,7 +734,7 @@ def calc_likelihoods_2D(grid,survey,doplot=False,norm=True,psnr=True,printit=Fal plt.xlabel('DM') plt.ylabel('p(DM)') plt.tight_layout() - plt.savefig('Plots/1d_dm_fit.pdf') + plt.savefig('1d_dm_fit.pdf') plt.close() ###### Calculates p(E | z,DM) ######## @@ -758,15 +782,25 @@ def calc_likelihoods_2D(grid,survey,doplot=False,norm=True,psnr=True,printit=Fal Eths += grid.thresholds[:,izs2,idmobs1]*dkdmobs1*dkzs2 Eths += grid.thresholds[:,izs1,idmobs2]*dkdmobs2*dkzs1 Eths += grid.thresholds[:,izs2,idmobs2]*dkdmobs2*dkzs2 - + FtoE = grid.FtoE[izs1]*dkzs1 FtoE += grid.FtoE[izs2]*dkzs2 # now do this in one go # We integrate p(snr|b,w) p(b,w) db dw. # Eths.shape[i] is the number of FRBs: length of izs1 - psnr=np.zeros(Eths.shape[1]) + # this has shape nz,nFRB - FRBs could come from any z-value + nw,nfrb = Eths.shape + psnr=np.zeros([nfrb]) + if grid.eff_weights.ndim ==2: + zwidths = True + usews = np.zeros([nfrb]) + else: + zwidths = False + + # initialised to hold w-b normalisations + pbw_norm = 0. for i,b in enumerate(survey.beam_b): bEths=Eths/b # array of shape NFRB, 1/b bEobs=bEths*survey.Ss[zlist] @@ -778,10 +812,38 @@ def calc_likelihoods_2D(grid,survey,doplot=False,norm=True,psnr=True,printit=Fal temp2=grid.array_cum_lf(bEths[j,:],Emin,Emax,gamma) # * FtoE #one dim in beamshape, one dim in FRB cumulative = temp2.T #*bEths[j,:] #multiplies by beam factors and weight - psnr += (differential/cumulative)*survey.beam_o[i]*w + if zwidths: + # a function of redshift + usew = w[izs1]*dkzs1 + w[izs2]*dkzs2 + usews += usew + usew = usew + else: + usew = w # just a scalar quantity + + # the product here is p(SNR|DM,z) = p(SNR|b,w,DM,z) * p(b,w|DM,z) + # p(SNR|b,w,DM,z) = differential/cumulative + # p(b,w|DM,z) = survey.beam_o[i]*usew * cumulative / sum(survey.beam_o[i]*usew * cumulative) + # hence, the "cumulative" part cancels + + dpbw = survey.beam_o[i]*usew*cumulative + pbw_norm += dpbw + + psnr += differential*survey.beam_o[i]*usew - NORM = np.sum(grid.eff_weights) * np.sum(survey.beam_o) - psnr /= NORM + OK = np.where(pbw_norm > 0.)[0] + psnr[OK] /= pbw_norm[OK] + + if doplot: + plt.figure() + plt.xlabel("$s ( \\equiv {\\rm SNR}/{\\rm SNR_{\\rm th}})$") + plt.ylabel("p(s)") + plt.scatter(survey.Ss[zlist],psnr,c=Zobs) + cbar = plt.colorbar() + cbar.set_label('z') + plt.tight_layout() + plt.savefig("2D_ps.png") + plt.close() + # keeps individual FRB values if dolist==2: @@ -820,7 +882,7 @@ def calc_likelihoods_2D(grid,survey,doplot=False,norm=True,psnr=True,printit=Fal f"wzterm={np.sum(np.log10(psnr)):0.2f}," \ f"comb={np.sum(np.log10(psnr*pvals)):0.2f}") - # print(survey.name, "2D list:", lllist) + if dolist==0: return llsum elif dolist==1: diff --git a/zdm/loading.py b/zdm/loading.py index 22af2557..dd270042 100644 --- a/zdm/loading.py +++ b/zdm/loading.py @@ -35,8 +35,8 @@ def set_state(alpha_method=1, cosmo=Planck18): vparams['width'] = {} vparams['width']['Wlogmean'] = 1.70267 vparams['width']['Wlogsigma'] = 0.899148 - vparams['width']['Wbins'] = 10 - vparams['width']['Wscale'] = 2 + vparams['width']['WNbins'] = 10 + #vparams['width']['Wscale'] = 2 vparams['width']['Wthresh'] = 0.5 #vparams['width']['Wmethod'] = 2 @@ -119,10 +119,6 @@ def load_CHIME(Nbin:int=6, make_plots:bool=False, opdir='CHIME/',\ print("Loading CHIME surveys from ",sdir) # loads beam data - - #bounds = np.load(beams.beams_path+'bounds.npy') - #solids = np.load(beams.beams_path+'solids.npy') - names=[] # Loops through CHIME declination bins for ibin in np.arange(Nbin): @@ -185,7 +181,8 @@ def surveys_and_grids(init_state=None, alpha_method=1, NFRB=None, repeaters=False, sdir=None, edir=None, rand_DMG=False, discard_empty=False, - state_dict=None): + state_dict=None, + survey_dict=None,verbose=False): """ Load up a survey and grid for a real dataset Args: @@ -208,7 +205,7 @@ def surveys_and_grids(init_state=None, alpha_method=1, If true, randomise the galactic DM - for MCMC studies discard_empty (bool, optional): If true, does not calculate empty surveys (mostly for after latitude cuts) - + survey_dict (dict,None): list of survey metadata and values to apply Raises: IOError: [description] @@ -243,9 +240,9 @@ def surveys_and_grids(init_state=None, alpha_method=1, surveys = [] for survey_name in survey_names: # print(f"Initializing {survey_name}") - s = survey.load_survey(survey_name, state, dmvals, + s = survey.load_survey(survey_name, state, dmvals, zvals, NFRB=NFRB, sdir=sdir, edir=edir, - rand_DMG=rand_DMG) + rand_DMG=rand_DMG,survey_dict=survey_dict) if discard_empty == False or s.NFRB != 0: # Check necessary parameters exist if considering repeaters @@ -256,12 +253,14 @@ def surveys_and_grids(init_state=None, alpha_method=1, else: print("Skipping empty survey " + s.name) - print("Initialised surveys") + if verbose: + print("Initialised surveys") # generates zdm grid grids = misc_functions.initialise_grids( surveys, zDMgrid, zvals, dmvals, state, wdist=True, repeaters=repeaters) - print("Initialised grids") + if verbose: + print("Initialised grids") # Return Survey and Grid return surveys, grids diff --git a/zdm/optical.py b/zdm/optical.py index 61a1bb0f..b87ea3e1 100644 --- a/zdm/optical.py +++ b/zdm/optical.py @@ -1,17 +1,566 @@ """ -This file contains functions related to optical observations. +This library contains routines that interact with +the FRB/astropath module and (optical) FRB host galaxy +information. -Currently, this is simply the approximate fraction of +It includes the class "host_model" for describing the +intrinsic FRB host galaxy distribution, associated functions, +and the approximate fraction of detectable FRBs from Marnoch et al (https://doi.org/10.1093/mnras/stad2353) """ import numpy as np from matplotlib import pyplot as plt +from zdm import cosmology as cos +from zdm import optical_params as op +from scipy.interpolate import CubicSpline +import os +from importlib import resources +import pandas +class host_model: + """ + A class to hold information about the intrinsic properties of FRB + host galaxies. Eventually, this should be expanded to be a + meta-class with different internal models. But for now, it's + just a simple one + + Ingredients are: + A model for describing the intrinsic distribution + of host galaxies. This model must be described + by some set of parameters, and be able to return a + prior as a function of intrinsic host galaxy magnitude. + This model is initialised via opstate.AbsModelID + + A model for converting absolute to apparent host magnitudes. + This is by defult an apparent r-band magnitude, though + in theory a user can work in whatever band they wish. + + Internally, this class initialises: + An array of absolute magnitudes, which get weighted according + to the host model. + Internal variables associated with this are prefaced "Model" + + An array of apparent magnitudes, which is used to compare with + host galaxy candidates + Internal variables associated with this are prefaced "App" + + Arrays mapping intrinsic to absolute magnitude as a function + of redshift, to allow quick estimation of p(apparent_mag | DM) + for a given FRB survey with many FRBs + Internal variables associated with this are prefaced "Abs" + + Note that while this class describes the intrinsic "magnitudes", + really magnitude here is a proxy for whatever parameter is used + to intrinsically describe FRBs. However, only 1D descriptions are + currently implemented. Future descriptions will include redshift + evolution, and 2D descriptions (e.g. mass, SFR) at any given redshift. + + """ + def __init__(self,opstate=None,verbose=False): + """ + Class constructor + + Args: + opstate (class: Hosts, optional): class defining parameters + of optical state model + verbose (bool, optional): to be verbose y/n + + """ + if opstate is None: + opstate = op.Hosts() + + + if opstate.AppModelID == 0: + if verbose: + print("Initialising simple luminosity function") + # must take arguments of (absoluteMag,z) + self.CalcApparentMags = SimpleApparentMags + else: + raise ValueError("Model ",opstate.AppModelID," not implemented") + + if opstate.AbsModelID == 0: + if verbose: + print("Describing absolute mags with N independent bins") + elif opstate.AbsModelID == 1: + if verbose: + print("Describing absolute mags with spline interpoilation of N points") + else: + raise ValueError("Model ",opstate.AbsModelID," not implemented") + + + self.AppModelID = opstate.AppModelID + self.AbsModelID = opstate.AbsModelID + + self.opstate = opstate + + self.init_abs_bins() + self.init_model_bins() + self.init_app_bins() + self.init_abs_prior() + + self.ZMAP = False # records that we need to initialise this + + ############################################################# + ################## Initialisation Functions ################# + ############################################################# + + def init_abs_prior(self): + """ + Initialises prior on absolute magnitude of galaxies according to the method. + + """ + + if self.opstate.AbsPriorMeth==0: + # uniform prior in log space of absolute magnitude + Absprior = np.full([self.ModelNBins],1./self.NAbsBins) + else: + # other methods to be added as required + raise ValueError("Luminosity prior method ",self.opstate.AbsPriorMeth," not implemented") + + # enforces normalisation of the prior to unity + Absprior /= np.sum(Absprior) + self.AbsPrior = Absprior + + + # this maps the weights from the parameter file to the absoluate magnitudes use + # internally within the program. We now initialise this during an "init" + self.AbsMagWeights = self.init_abs_mag_weights() + + # renormalises the weights, so all internal apparent mags sum to unit + # include this step in the init routine perhaps? + self.AbsMagWeights /= np.sum(self.AbsMagWeights) + + def init_app_bins(self): + """ + Initialises bins in apparent magnitude + It uses these to calculate priors for any given host galaxy magnitude. + This is a very simple set of uniformly log-spaced bins in magnitude space, + and linear interpolation is used between them. + """ + + self.Appmin = self.opstate.Appmin + self.Appmax = self.opstate.Appmax + self.NAppBins = self.opstate.NAppBins + + # this creates the bin edges + self.AppBins = np.linspace(self.Appmin,self.Appmax,self.NAppBins+1) + dAppBin = self.AppBins[1] - self.AppBins[0] + self.AppMags = self.AppBins[:-1] + dAppBin/2. + self.dAppmag = dAppBin + + def init_abs_bins(self): + """ + Initialises internal array of absolute magnitudes + This is a simple set of uniformly log-spaced bins in terms + of absolute magnitude, which the absolute magnitude model gets + projected onto + """ + # shortcuts + Absmin = self.opstate.Absmin + Absmax = self.opstate.Absmax + NAbsBins = self.opstate.NAbsBins + + + self.Absmin = Absmin + self.Absmax = Absmax + self.NAbsBins = NAbsBins + + # generally large number of abs magnitude bins + MagBins = np.linspace(Absmin,Absmax,NAbsBins+1) + dMag = MagBins[1]-MagBins[0] + AbsMags = MagBins[:-1]+dMag/2. # bin centres + + self.MagBins = MagBins + self.dMag = dMag + self.AbsMags = AbsMags + + def init_model_bins(self): + """ + Initialises bins for the simple model of an absolute + magnitude prior. This is much sparser than the app or + abs bins, since these model bins correspond to + parameters which may get adjusted by e.g. an MCMC + + The base unit here is assumed to be absolute r-band + magnitude M, but in principle this works for whatever\ + absolute unit is being used by the model. + """ + ModelNBins = self.opstate.NModelBins + self.ModelNBins = ModelNBins + + if self.AbsModelID == 0: + # bins are centres + dbin = (self.Absmax - self.Absmin)/ModelNBins + ModelBins = np.linspace(self.Absmin+dbin/2.,self.Absmax-dbin/2.,ModelNBins) + + elif self.AbsModelID == 1: + # bins on edges + ModelBins = np.linspace(self.Absmin,self.Absmax,ModelNBins) + + self.ModelBins = ModelBins + self.dModel = ModelBins[1]-ModelBins[0] + + def init_zmapping(self,zvals): + """ + For a set of redshifts, initialise mapping + between intrinsic magnitudes and apparent magnitudes + + This routine only needs to be called once, since the model + to convert absolute to apparent magnitudes is fixed + + It is not set automatically however, and needs to be called + with a set of z values. This is all for speedup purposes. + + Args: + zvals (np.ndarray, float): array of redshifts over which + to map absolute to apparent magnitudes. + """ + + # records that this has been initialised + self.ZMAP = True + + # mapping of apparent to absolute magnitude + self.zmap = self.CalcApparentMags(self.AbsMags,zvals) + self.zvals = zvals + self.NZ = self.zvals.size + + self.init_maghist() + + def init_maghist(self): + """ + Initialises the array mapping redshifts and absolute magnitudes + to redshift and apparent magnitude + + Calculates the internal maghist array, of size self.NAppBins X self.NZ + + No return value. + + """ + + # for current model, calculate weighted histogram of apparent magnitude + # for each redshift. Done by converting intrinsic to apparent for each z, + # then suming up the associated weights + maghist = np.zeros([self.NAppBins,self.NZ]) + for i in np.arange(self.NZ): + # creates weighted histogram of apparent magnitudes, + # using model weights from wmap (which are fixed for all z) + hist,bins = np.histogram(self.zmap[:,i],weights=self.AbsMagWeights,bins=self.AppBins) + + # # NOTE: these should NOT be re-normalised, since the normalisation reflects + # true magnitudes which fall off the apparent magnitude histogram. + maghist[:,i] = hist + + self.maghist = maghist + + def reinit_model(self): + """ + Re-initialises all internal info which depends on the optical + param model. It assumes that the changes have been implemented in + self.AbsPrior + """ + + # this maps the weights from the parameter file to the absoluate magnitudes use + # internally within the program. We now initialise this during an "init" + self.AbsMagWeights = self.init_abs_mag_weights() + + # renormalises the weights, so all internal apparent mags sum to unity + # include this step in the init routine perhaps? + self.AbsMagWeights /= np.sum(self.AbsMagWeights) + + self.init_maghist() + + def init_abs_mag_weights(self): + """ + Assigns a weight to each of the absolute magnitudes + in the internal array (which generally is very large) + in terms of the absolute magnitude parameterisation + (generally, only a few parameters) + """ + + if self.AbsModelID==0: + # describes absolute magnitudes via ModelNBins + # between AbsMin and AbsMax + # coefficients at centre of bins + + # gives mapping from model bins to mag bins + self.imags = ((self.AbsMags - self.Absmin)/self.dModel).astype('int') + + #rounding errors + toohigh = np.where(self.imags == self.ModelNBins) + self.imags[toohigh] = self.ModelNBins-1 + + weights = self.AbsPrior[self.imags] + elif self.AbsModelID == 1: + # As above, but with spline interpolation of model. + # coefficients span full range + cs = CubicSpline(self.ModelBins,self.AbsPrior) + weights = cs(self.AbsMags) + toolow = np.where(weights < 0.) + weights[toolow] = 0. + else: + raise ValueError("This weighting scheme not yet implemented") + return weights + + + ############################################################# + ################## Path Calculations ################# + ############################################################# + + def estimate_unseen_prior(self,mag_limit): + """ + Calculates PU, the prior that an FRB host galaxy of a + particular DM is unseen in the optical image + + This requires initialisation of init_path_raw_prior_Oi + + NOTE: The total normalisation of priors in the magnitude range + may be less than unity. This is because some probability + may fall outside of the magnitude range being examined. + Hence, the correct normalisation is found by summing + the visible magnitudes, and subtracting them from unity. + + args: + mag_limit (float): maximum observable magnitude of host galaxies + + returns: + PU (float): probability PU of true hist being unseen in the optical + image. + + """ + + invisible = np.where(self.AppMags > mag_limit)[0] + + PU = np.sum(self.priors[invisible]) + + return PU + + def path_raw_prior_Oi(self,mags,ang_sizes,Sigma_ms): + """ + Function to pass to astropath module + for calculating a raw FRB prior. + + + NOTE: as of all recent PATH iterations, the galaxy angular + size should NOT be included in the calculation, since + this gets integrated over in estimating the offset error. + Nonetheless, this function *must* accept an ang_size + argument. + + NOTE2: Before using this function, the call "init_path_raw_prior_Oi" + must be called. This is because the full prior requires a zDM + grid and an FRB DM, yet this cannot be passed to raw_prior_Oi + within PATH. + + Args: + mags (tuple, float): host galaxy r-band magnitudes + ang_sizes (tuple, float): host galaxy angular sizes (arcsec I believe) + Sigma_ms (tuple, float): galaxy densities on the sky + + Returns: + Ois (tuple): priors on host galaxy magnitdues mags + + """ + + ngals = len(mags) + Ois = [] + for i,mag in enumerate(mags): + + #print(mag) + # calculate the bins in apparent magnitude prior + kmag2 = (mag - self.Appmin)/self.dAppmag + imag1 = int(np.floor(kmag2)) + + # careful with interpolation - priors are for magnitude bins + # with bin edges give by Appmin + N dAppmag. + # We probably want to smooth this eventually due to minor + # numerical tweaks + + #kmag2 -= imag1 + #kmag1 = 1.-kmag2 + #imag2 = imag1+1 + #prior = kmag1*self.priors[imag1] + kmag2*self.priors[imag2] + + # very simple - just gives probability for bin it's in + Oi = self.priors[imag1] + Oi /= Sigma_ms[i] # normalise by host counts + Ois.append(Oi) + + Ois = np.array(Ois) + return Ois + + def init_path_raw_prior_Oi(self,DM,grid): + """ + Initialises the priors for a particlar DM. + This performs a function very similar to + "get_posterior" except that it expicitly + only operates on a single DM, and saves the + information internally so that + path_raw_prior_Oi can be called for numerous + host galaxy candidates. + + It returns the priors distribution. + + Args: + DM [float]: dispersion measure of an FRB (pc cm-3) + grid (class grid): initialised grid object from which + to calculate priors + + Returns: + priors (float): vector of priors on host galaxy apparent magnitude + """ + + # we start by getting the posterior distribution p(z) + # for an FRB with DM DM seen by the 'grid' + pz = get_pz_prior(grid,DM) + + # we now calculate the list of priors - for the array + # defined by self.AppBins with bin centres at self.AppMags + priors = self.calc_magnitude_priors(grid.zvals,pz) + + # stores knowledge of the DM used to calculate the priors + self.prior_DM = DM + self.priors = priors + + return priors + + + def calc_magnitude_priors(self,zlist:np.ndarray,pzlist:np.ndarray): + """ + Calculates priors as a function of magnitude for + a given redshift distribution. + + Args: + zlist (np.ndarray, float): array of redshifts + pz (np.ndarray, float): array of probabilities of the FRB + occurring at each of those redshifts + + # returns probability-weighted magnitude distribution, as a function of + # self.AppBins + + """ + # we integrate over the host absolute magnitude distribution + + # checks that pz is normalised + pzlist /= np.sum(pzlist) + + for i,absmag in enumerate(self.AbsMags): + plum = self.AbsMagWeights[i] + mags = self.CalcApparentMags(absmag,zlist) + temp,bins = np.histogram(mags,weights=pzlist*plum,bins=self.AppBins) + if i==0: + pmags = temp + else: + pmags += temp + + return pmags + + def get_posterior(self, grid, DM): + """ + Returns posterior redshift distributiuon for a given grid, and DM + magnitude distribution, for FRBs of DM given a grid object. + Note: this calculates a prior for PATH, but is a posterior + from zDM's point of view. + + Args: + grid (class grid object): grid object defining p(z,DM) + DM (float, np.ndarray OR scalar): FRB DM(s) + + Returns: + papps (np.ndarray, floats): probability distribution of apparent magnitudes given DM + pz (np.ndarray, floats): probability distribution of redshift given DM + """ + # Step 1: get prior on z + pz = get_pz_prior(grid,DM) + + ### STEP 2: get apparent magnitude distribution ### + if hasattr(DM,"__len__"): + papps = np.dot(self.maghist,pz) + else: + papps = self.maghist*pz + + + return papps,pz + +def get_pz_prior(grid, DM): + """ + Returns posterior redshift distributiuon for a given grid and DM + magnitude distribution for FRBs of DM given a grid object + + Args: + grid (class grid object): grid object modelling p(z,DM) + DM (float or np.ndarray of floats): FRB dispersion measure, pc cm^-3 + + Returns: + pz (np.ndarray): probability distribution in redshift space + """ + + ### get PZ distribution ### + # Get the coefficients for linear interpolation between DM bins + # of the grid + dmvals = grid.dmvals + ddm = dmvals[1]-dmvals[0] + # get the grid DM values that this DM site between + idm1 = (np.floor(DM/ddm)).astype('int') + idm2 = idm1+1 + dm1 = dmvals[idm1] + dm2 = dmvals[idm2] + # get the coefficients of dm1 and dm2 + kdm2 = (DM - dm1)/ddm + kdm1 = 1.-kdm2 + + # calculate p(z) based on interpolating grid.rates + pz = kdm1 * grid.rates[:,idm1] + kdm2 * grid.rates[:,idm2] + pz = pz/np.sum(pz,axis=0) + return pz + +def SimpleApparentMags(Abs,zs): + """ + Function to convert galaxy absolue to apparent magnitudes. + + Nominally, magnitudes are r-band magnitudes, but this function + is so simple it doesn't matter. + + Just applies a distance correction - no k-correction. + + Args: + Abs (float or array of floats): intrinsic galaxy luminosities + zs (float or array of floats): redshifts of galaxies + + Returns: + ApparentMags: NAbs x NZ array of magnitudes, where these + are the dimensions of the inputs + """ + + # calculates luminosity distances (Mpc) + lds = cos.dl(zs) + + # finds distance relative to absolute magnitude distance + dabs = 1e-5 # in units of Mpc + + # relative magnitude + dMag = 2.5*np.log10((lds/dabs)**2) + + + if np.isscalar(zs) or np.isscalar(Abs): + # just return the product, be it scalar x scalar, + # scalar x array, or array x scalar + # this also ensures that the dimensions are as expected + + ApparentMags = Abs + dMag + else: + # Convert to multiplication so we can use + # numpy.outer + temp1 = 10**Abs + temp2 = 10**dMag + ApparentMags = np.outer(temp1,temp2) + ApparentMags = np.log10(ApparentMags) + return ApparentMags + -def p_unseen(zvals,plot=False): +def p_unseen_Marnoch(zvals,plot=False): """ Returns probability of a hist being unseen in typical VLT observations. @@ -61,3 +610,202 @@ def p_unseen(zvals,plot=False): fitv[toohigh]=1. return fitv + + + +def simplify_name(TNSname): + """ + Simplifies an FRB name to basics + """ + # reduces all FRBs to six integers + + if TNSname[0:3] == "FRB": + TNSname = TNSname[3:] + + if len(TNSname) == 9: + name = TNSname[2:-1] + elif len(TNSname) == 8: + name = TNSname[2:] + elif len(TNSname) == 7: + name = TNSname[:-1] + elif len(TNSname) == 6: + name = TNSname + else: + print("Do not know how to process ",TNSname) + return name + +def matchFRB(TNSname,survey): + """ + Gets the FRB id from the survey list + Returns None if not in the survey + Used to match properties between a survey + and other FRB libraries + """ + + name = simplify_name(TNSname) + match = None + for i,frb in enumerate(survey.frbs["TNS"]): + if name == simplify_name(frb): + match = i + break + return match + + +# this defines the ICS FRBs for which we have PATH info +frblist=['FRB20180924B','FRB20181112A','FRB20190102C','FRB20190608B', + 'FRB20190611B','FRB20190711A','FRB20190714A','FRB20191001A', + 'FRB20191228A','FRB20200430A','FRB20200906A','FRB20210117A', + 'FRB20210320C','FRB20210807D','FRB20211127I','FRB20211203C', + 'FRB20211212A','FRB20220105A','FRB20220501C', + 'FRB20220610A','FRB20220725A','FRB20220918A', + 'FRB20221106A','FRB20230526A','FRB20230708A', + 'FRB20230731A','FRB20230902A','FRB20231226A','FRB20240201A', + 'FRB20240210A','FRB20240304A','FRB20240310A'] + + + +def run_path(name,model,PU=0.1,usemodel = False, sort = False): + """ + evaluates PATH on an FRB + + absolute [bool]: if True, treats rel_error as an absolute value + in arcseconds + """ + from frb.frb import FRB + from astropath.priors import load_std_priors + from astropath.path import PATH + + ######### Loads FRB, and modifes properties ######### + my_frb = FRB.by_name(name) + + # do we even still need this? I guess not, but will keep it here just in case + my_frb.set_ee(my_frb.sig_a,my_frb.sig_b,my_frb.eellipse['theta'], + my_frb.eellipse['cl'],True) + + # reads in galaxy info + ppath = os.path.join(resources.files('frb'), 'data', 'Galaxies', 'PATH') + pfile = os.path.join(ppath, f'{my_frb.frb_name}_PATH.csv') + ptbl = pandas.read_csv(pfile) + + # Load prior + priors = load_std_priors() + prior = priors['adopted'] # Default + + theta_new = dict(method='exp', + max=priors['adopted']['theta']['max'], + scale=0.5) + prior['theta'] = theta_new + + # change this to something depending on the FRB DM + prior['U']=PU + + candidates = ptbl[['ang_size', 'mag', 'ra', 'dec', 'separation']] + + this_path = PATH() + this_path.init_candidates(candidates.ra.values, + candidates.dec.values, + candidates.ang_size.values, + mag=candidates.mag.values) + this_path.frb = my_frb + + frb_eellipse = dict(a=my_frb.sig_a, + b=my_frb.sig_b, + theta=my_frb.eellipse['theta']) + + this_path.init_localization('eellipse', + center_coord=this_path.frb.coord, + eellipse=frb_eellipse) + + # this results in a prior which is uniform in log space + # when summed over all galaxies with the same magnitude + if usemodel: + this_path.init_cand_prior('user', P_U=prior['U']) + else: + this_path.init_cand_prior('inverse', P_U=prior['U']) + + # this is for the offset + this_path.init_theta_prior(prior['theta']['method'], + prior['theta']['max'], + prior['theta']['scale']) + + P_O=this_path.calc_priors() + + # Calculate p(O_i|x) + debug = True + P_Ox,P_U = this_path.calc_posteriors('fixed', + box_hwidth=10., + max_radius=10., + debug=debug) + mags = candidates['mag'] + + if sort: + indices = np.argsort(P_Ox) + P_O = P_O[indices] + P_Ox = P_Ox[indices] + mags = mags[indices] + + return P_O,P_Ox,P_U,mags + + + + +def plot_frb(name,ralist,declist,plist,opfile): + """ + does an frb + + absolute [bool]: if True, treats rel_error as an absolute value + in arcseconds + + clist: list of astropy coordinates + plist: list of p(O|x) for candidates hosts + """ + + from frb.frb import FRB + from astropath.priors import load_std_priors + from astropath.path import PATH + + ######### Loads FRB, and modifes properties ######### + my_frb = FRB.by_name(name) + + ppath = os.path.join(resources.files('frb'), 'data', 'Galaxies', 'PATH') + pfile = os.path.join(ppath, f'{my_frb.frb_name}_PATH.csv') + ptbl = pandas.read_csv(pfile) + + candidates = ptbl[['ang_size', 'mag', 'ra', 'dec', 'separation']] + + #raoff=199. + 2910/3600 # -139./3600 + #decoff=-18.8 -139./3600 #+2910/3600 + + raoff = my_frb.coord.ra.deg + decoff = my_frb.coord.dec.deg + + cosfactor = np.cos(my_frb.coord.dec.rad) + + plt.figure() + plt.xlabel('ra [arcsec] - relative') + plt.ylabel('dec [arcsec] - relative') + + + plt.scatter([(ralist-raoff)*3600*cosfactor],[(declist-decoff)*3600],marker='+', + c=plist, vmin=0.,vmax=1.,label="Deviated FRB") + + + plt.scatter((candidates['ra']-raoff)*3600*cosfactor,(candidates['dec']-decoff)*3600, + s=candidates['ang_size']*300, facecolors='none', edgecolors='r', + label="Candidate host galaxies") + + + # orig scatter plot command + sc = plt.scatter([(my_frb.coord.ra.deg-raoff)*3600*cosfactor],[(my_frb.coord.dec.deg-decoff)*3600], + marker='x',label="True FRB") + plt.colorbar(sc) + + for i, ra in enumerate(candidates['ra']): + ra=(ra-raoff)*3600*cosfactor + dec=(candidates['dec'][i]-decoff)*3600 + plt.text(ra,dec,str(candidates['ang_size'][i])[0:4]) + plt.legend() + plt.tight_layout() + plt.savefig(opfile) + plt.tight_layout() + plt.close() diff --git a/zdm/optical_params.py b/zdm/optical_params.py new file mode 100644 index 00000000..f1eef223 --- /dev/null +++ b/zdm/optical_params.py @@ -0,0 +1,70 @@ +""" Classes for optical properties """ + +from dataclasses import dataclass, field + +from zdm import data_class +import numpy as np + +@dataclass +class Hosts(data_class.myDataClass): + # None of the fields should start with an X + Absmin: float = field( + default=-30, + metadata={'help': "Minimum host absolute magnitude", + 'unit': 'M_r^{min}', + 'Notation': '', + }) + Absmax: float = field( + default=0., + metadata={'help': "Maximum host absolute magnitude", + 'unit': 'M_r^{max}', + 'Notation': '', + }) + NAbsBins: int = field( + default=1000, + metadata={'help': "Number of absolute magnitude bins used internally", + 'unit': '', + 'Notation': '', + }) + NModelBins: int = field( + default=10, + metadata={'help': "Number of absolute magnitude bins used in the model", + 'unit': '', + 'Notation': '', + }) + Appmin: float = field( + default=10, + metadata={'help': "Minimum host apparent magnitude", + 'unit': 'm_r^{min}', + 'Notation': '', + }) + Appmax: float = field( + default=35, + metadata={'help': "Maximum host apparent magnitude", + 'unit': 'm_r^{max}', + 'Notation': '', + }) + NAppBins: int = field( + default=250, + metadata={'help': "Number of apparent magnitude bins", + 'unit': '', + 'Notation': '', + }) + AbsPriorMeth: int = field( + default=0, + metadata={'help': "Model for abs mag prior and function description. 0: uniform distribution. Others to be implemented.", + 'unit': '', + 'Notation': '', + }) + AppModelID: int = field( + default=0, + metadata={'help': "Model for converting absolute to apparent magnitudes. 0: no k-correction. Others to be implemented.", + 'unit': '', + 'Notation': '', + }) + AbsModelID: int = field( + default=0, + metadata={'help': "Model for describing absolute magnitudes. 0: Simple histogram of absolute magnitudes. 1: spline interpolation of histogram.", + 'unit': '', + 'Notation': '', + }) diff --git a/zdm/parameters.py b/zdm/parameters.py index 9a701418..410979fb 100644 --- a/zdm/parameters.py +++ b/zdm/parameters.py @@ -253,13 +253,17 @@ class WidthParams(data_class.myDataClass): }, ) - Wbins: int = field( + WNbins: int = field( default=5, metadata={"help": "Number of bins for FRB width distribution", "unit": ""}, ) - Wscale: int = field( - default=3.5, - metadata={"help": "Log-scaling of bins for width distribution", "unit": ""}, + WMin: int = field( + default=0.1, + metadata={"help": "Minimum scattering value to model", "unit": "ms"}, + ) + WMax: int = field( + default=100, + metadata={"help": "Maximum scattering value to model", "unit": "ms"}, ) @@ -282,6 +286,14 @@ class ScatParams(data_class.myDataClass): "Notation": "\log \sigma_{s}", }, ) + Smaxsigma: float = field( + default=3., + metadata={ + "help": " Multiple of the Slogsigma out to which to model the width distribution ", + "unit": "", + "Notation": "N_{\sigma_{s}}", + }, + ) Sfnorm: float = field( default=600, metadata={ @@ -298,6 +310,14 @@ class ScatParams(data_class.myDataClass): "Notation": "\lambda", }, ) + ScatDist: int = field( + default=2, + metadata={ + "help": "Method for describing scattering distribution. 0 log uniform, 1 is lognormal, 2 upper lognormal", + "unit": "", + "Notation": "", + }, + ) # FRB Energetics -- energy diff --git a/zdm/repeat_grid.py b/zdm/repeat_grid.py index 3165e950..06d66c47 100644 --- a/zdm/repeat_grid.py +++ b/zdm/repeat_grid.py @@ -12,9 +12,6 @@ - Rmax: Maximum repetition rate of repeaters - Rgamma: *Differential* number of repeaters: dN/dR ~ R^Rgamma - - - Some notes regarding time dilation: #1: dVdtau @@ -52,14 +49,19 @@ # NZ is meant to toggle between calculating nonzero elements or not # In theory, if False, it tries to perform calculations -# where there is no flux -# Currenty, it seems like setting it to False causes problems +# where there is no flux. But that also saves calculation time +# in determining where this is no flux, and re-arranging arrays +# accordingly. Currently, it seems like setting it to False +# causes problems. In future, fix, and investigate the time +# saved. NZ=True # These constants represent thresholds below which we assume the chance -# to produce repetition is identically zero +# to produce repetition is identically zero, for speedup purposes, and +# to avoid numerical error in calculating p(reps) = 1 - p(1) - p(0) LSRZ = -10. #Was causing problems at merely -6 SET_REP_ZERO=10**LSRZ # forces p(n=0) bursts to be unity when a maximally repeating FRB has SET_REP_ZERO expected events or less + # the above is crazy, the problem is it should zet p_zero + p_single to be unity, i.e. chance of double to be zero # but how do we do this? presumably it's the total minus psingle? #LZRZ = np.log10(SET_REP_ZERO) @@ -170,6 +172,7 @@ def __init__(self, survey, state, zDMgrid, zvals, dmvals, smear_mask, wdist, else: self.use_sfr = self.sfr + # this is the calculation that initiates everything. It is a poor name choice self.calc_Rthresh(Exact=Exact,MC=MC,doplots=doplots) def update(self, vparams: dict, ALL=False, prev_grid=None): @@ -331,7 +334,6 @@ def calc_Rthresh(self,Exact=True,MC=False,Pthresh=0.01,doplots=False,old=[False, """ - # calculates rate corresponding to Pthresh # P = 1.-(1+R) * np.exp(-R) # P = 1.-(1+R) * (1.-R + R^2/2) = 1 - [1 +R -R - R^2 + R^2/2 + cubic] @@ -1077,18 +1079,27 @@ def calcRmult(self,beam_b=1,time_days=1): We also need to account for the rate scaling if it exists. """ + Rmult=0 # initiates the variable. It's actually going to be NZ x NDM + # calculates Rmult over a range of burst widths and probabilities - #print("Calculating Rmult using ",beam_b) for iw,w in enumerate(self.eff_weights): # we want to calculate for each point an Rmult such that # Rmult_final = \sum wi Rmulti - # does this make sense? Effectively its the sum of rates. Well yes it does! AWESOME! + # does this make sense? Effectively it's the sum of rates. Well yes it does! AWESOME! # numerator for Rmult for this width - #Note: grid.thresholds already will include effect of alpha - if iw==0: - Rmult = w*self.array_cum_lf(self.thresholds[iw,:,:]/beam_b,self.Emin,self.Emax,self.gamma) - else: + # Note: grid.thresholds already will include effect of alpha + # The dimensions of self.thresholds is NW x NZ x NDM, so self.thresholds[iw,:,:] is NZ x NDM + #if iw==0: + # Rmult = w*self.array_cum_lf(self.thresholds[iw,:,:]/beam_b,self.Emin,self.Emax,self.gamma) + #else: + if self.eff_table.ndim == 2: + # w is a scalar Rmult += w*self.array_cum_lf(self.thresholds[iw,:,:]/beam_b,self.Emin,self.Emax,self.gamma) + else: + # w is a vector of length NZ + Rmult += (self.array_cum_lf(self.thresholds[iw,:,:]/beam_b,self.Emin,self.Emax,self.gamma).T*w).T + + # normalisation Rmult /= self.vector_cum_lf(self.state.rep.RE0,self.Emin,self.Emax,self.gamma) # calculates the expectation value for a single pointing diff --git a/zdm/scripts/MonteCarloFRBs/test.py b/zdm/scripts/MonteCarloFRBs/test.py new file mode 100644 index 00000000..20aea7c3 --- /dev/null +++ b/zdm/scripts/MonteCarloFRBs/test.py @@ -0,0 +1,84 @@ +import os + +from astropy.cosmology import Planck18 +#from zdm import cosmology as cos +from zdm import misc_functions +from zdm import parameters +#from zdm import survey +#from zdm import pcosmic +#from zdm import iteration as it +from zdm import loading +#from zdm import io +import matplotlib.pyplot as plt + +import numpy as np +#from matplotlib import pyplot as plt +from pkg_resources import resource_filename +#import time + +from frb.dm import igm +#params +NMC = 10 +nu = 1. # GHz +t_samp = 1.182 # ms +bandwidth = 1.0 # MHz +snr_thresh = 4. # SNR threshold +mean_DM_MW = 80. # pc cm^-3 +disp_DM_MW = 50. # pc cm^-3 + + + +def creategrid(): + + # in case you wish to switch to another output directory + #opdir = "../ScatSim/" + #if not os.path.exists(opdir): + # os.mkdir(opdir) + + # Initialise surveys and grids + #sdir = os.path.join(resource_filename('zdm', 'data'), 'Surveys') + sdir = '/Users/cjames/CRAFT/Git/zdm/zdm/data/Surveys' + + names = ["CRAFT_ICS_1300"] + + # essentially turns off DM host and sets all FRB widths to ~0 (or close enough) + param_dict = {'lmean': 0.01, 'lsigma': 0.4, 'Wlogmean': -1,'WNbins': 1, + 'Wlogsigma': 0.1, 'Slogmean': -2,'Slogsigma': 0.1} + state = parameters.State() + state.set_astropy_cosmo(Planck18) + state.update_params(param_dict) + + + surveys, grids = loading.surveys_and_grids(survey_names = names, + repeaters=False, sdir=sdir) + + # plots it + #misc_functions.plot_grid_2(g.rates,g.zvals,g.dmvals, + # name=os.path.join(opdir,'tau_dm_grid.png'),norm=3,log=True, + # label='$\\log_{10} p({\\rm DM}_{\\rm EG}|z)$ [a.u.]', + # project=False, + # zmax=2.5,DMmax=3000,cmap="Oranges",Aconts=[0.01,0.1,0.5])#, + # pdmgz=[0.05,0.5,0.95]) + + return grids[0] + + + + +# get the grid +g = creategrid() + + + + + +# create DM_EG by sampling the grid +frbs = g.GenMCSample(NMC) +frbs = np.array(frbs) +zs = frbs[:,0] +DMcos = frbs[:,1] +snrs = frbs[:,3] +#bs = frbs[:,2] +#ws = frbs[:,4] + +print (snrs) diff --git a/zdm/scripts/Path/estimate_path_priors.py b/zdm/scripts/Path/estimate_path_priors.py new file mode 100644 index 00000000..133742ac --- /dev/null +++ b/zdm/scripts/Path/estimate_path_priors.py @@ -0,0 +1,116 @@ +""" +Script showing how to use zDM as priors for CRAFT +host galaxy magnitudes. + +It requirses the FRB and astropath modules to be installed. + +This does NOT include optimisation of any parameters +""" + +#standard Python imports +import numpy as np +from matplotlib import pyplot as plt + +# imports from the "FRB" series +from zdm import optical as opt +from zdm import loading +from zdm import cosmology as cos +from zdm import parameters +from zdm import loading + +import astropath.priors as pathpriors + + +def calc_path_priors(): + """ + Loops over all ICS FRBs + """ + + frblist = opt.frblist + + NFRB = len(frblist) + + # here is where I should initialise a zDM grid + state = parameters.State() + cos.set_cosmology(state) + cos.init_dist_measures() + model = opt.host_model() + name='CRAFT_ICS_1300' + ss,gs = loading.surveys_and_grids(survey_names=[name]) + g = gs[0] + s = ss[0] + # must be done once for any fixed zvals + model.init_zmapping(g.zvals) + + # do this only for a particular FRB + # it gives a prior on apparent magnitude and pz + #AppMagPriors,pz = model.get_posterior(g,DMlist) + + # do this once per "model" objects + pathpriors.USR_raw_prior_Oi = model.path_raw_prior_Oi + + allmags = None + allPOx = None + + for frb in frblist: + # interates over the FRBs. "Do FRB" + # P_O is the prior for each galaxy + # P_Ox is the posterior + # P_Ux is the posterior for it being unobserved + # mags is the list of galaxy magnitudes + + # determines if this FRB was seen by the survey, and + # if so, what its DMEG is + imatch = opt.matchFRB(frb,s) + if imatch is None: + print("Could not find ",frb," in survey") + continue + + DMEG = s.DMEGs[imatch] + + # original calculation + P_O1,P_Ox1,P_Ux1,mags1 = opt.run_path(frb,model,usemodel=False,PU=0.1) + + model.init_path_raw_prior_Oi(DMEG,g) + PU = model.estimate_unseen_prior(mag_limit=26) # might not be correct + P_O2,P_Ox2,P_Ux2,mags2 = opt.run_path(frb,model,usemodel=True,PU = PU) + + if False: + # compares outcomes + print("FRB ",frb) + print(" m_r P_O: old new P_Ox: old new") + for i,P_O in enumerate(P_O1): + print(i,mags1[i],P_O1[i],P_O2[i],P_Ox1[i],P_Ox2[i]) + print("\n") + + # keep cumulative histogram of posterior magnitude distributions + #allmags.append(mags2) + #allPOx.append(P_Ox2) + mags2 = np.array(mags2) + + if allmags is None: + allmags = mags2 + allPOx = P_Ox2 + else: + allmags = np.append(allmags,mags2) + allPOx = np.append(allPOx,P_Ox2) + + Nbins = int(model.Appmax - model.Appmin)+1 + bins = np.linspace(model.Appmin,model.Appmax,Nbins) + plt.figure() + plt.hist(allmags,weights = allPOx, bins = bins,label="Posterior") + plt.legend() + plt.tight_layout() + plt.savefig("posterior_pOx.png") + plt.close() + + + + + +if __name__ == "__main__": + + calc_path_priors() + + + diff --git a/zdm/scripts/Path/optimise_host_priors.py b/zdm/scripts/Path/optimise_host_priors.py new file mode 100644 index 00000000..279f9728 --- /dev/null +++ b/zdm/scripts/Path/optimise_host_priors.py @@ -0,0 +1,334 @@ +""" +This file illustrates how to optimise the host prior +distribution by fitting to CRAFT ICS optical observations. +It fits a model of absolute galaxy magnitude distributions, +uses zDM to predict redshifts and hence apparent magntidues, +runs PATH using that prior, and tries to get priors to match posteriors. + +WARNING: this is NOT the optimal method! That would require using +a catalogue of galaxies to sample from to generate fake opotical fields. +But nonetheless, this tests the power of estimating FRB host galaxy +contributions using zDM to set priors for apparent magnitudes. + +WARNING2: To do this properly also requires inputting the posterior POx +for host galaxies into zDM! This simulation does not do that either. + +WARNING3: this program takes O~1 hr to run +""" + + +#standard Python imports +import numpy as np +from matplotlib import pyplot as plt + +# imports from the "FRB" series +from zdm import optical as opt +from zdm import optical_params as op +from zdm import loading +from zdm import cosmology as cos +from zdm import parameters +from zdm import loading + +import astropath.priors as pathpriors +from scipy.optimize import minimize + + +def main(): + """ + Main function + Contains outer loop to iterate over parameters + + """ + + ######### List of all ICS FRBs for which we can run PATH ####### + # hard-coded list of FRBs with PATH data in ice paper + frblist=opt.frblist + + # Initlisation of zDM grid + # Eventually, this should be part of the loop, i.e. host IDs should + # be re-fed into FRB surveys. However, it will be difficult to do this + # with very limited redshift estimates. That might require posterior + # estimates of redshift given the observed galaxies. Maybe. + state = parameters.State() + cos.set_cosmology(state) + cos.init_dist_measures() + names=['CRAFT_ICS_892','CRAFT_ICS_1300','CRAFT_ICS_1632'] + ss,gs = loading.surveys_and_grids(survey_names=names) + + # Initialisation of model + opt_params = op.Hosts() + opt_params.AbsModelID = 1 + model = opt.host_model(opstate = opt_params) + model.init_zmapping(gs[0].zvals) + + x0 = model.AbsPrior + + args=[frblist,ss,gs,model] + Nparams = len(x0) + bounds = [(0,1)]*Nparams + result = minimize(function,x0 = x0,args=args,bounds = bounds) + + # Recording the current spline best-fit here + #x = [0.00000000e+00 0.00000000e+00 7.05155614e-02 8.39235326e-01 + # 3.27794398e-01 1.00182186e-03 0.00000000e+00 3.46702511e-04 + # 2.17040011e-03 9.72472750e-04] + + # recording the current non-spline best fit here + #x = [ 1.707e-04, 8.649e-02, 9.365e-01, 9.996e-01, 2.255e-01,\ + # 3.493e-02, 0.000e+00, 0.000e+00, 0.000e+00, 1.000e-01] + #x = np.array(x) + + print("Best fit result is ",result.x) + x = result.x + + # analyses final result + x /= np.sum(x) + model.AbsPrior = x + model.reinit_model() + outfile = "best_fit_apparent_magnitudes.png" + NFRB,AppMags,AppMagPriors,ObsMags,ObsPosteriors,PUprior,PUobs,sumPUprior,sumPUobs = calc_path_priors(frblist,ss,gs,model,verbose=False) + stat = calculate_goodness_statistic(NFRB,AppMags,AppMagPriors,ObsMags,ObsPosteriors,sumPUobs,sumPUprior,plotfile=outfile) + + # calculates the original PATH result + #outfile = "original_fit_apparent_magnitudes.png" + NFRB2,AppMags2,AppMagPriors2,ObsMags2,ObsPosteriors2,PUprior2,PUobs2,sumPUprior2,sumPUobs2 = calc_path_priors(frblist,ss,gs,model,verbose=False,usemodel=False) + #stat = calculate_goodness_statistic(NFRB,AppMags,AppMagPriors,ObsMags,ObsPosteriors,sumPUobs,sumPUprior,plotfile=outfile) + + + # plots original vs updated posteriors + plt.figure() + plt.xlabel("Original P") + plt.ylabel("Best fit P") + plt.xlim(0,1) + plt.ylim(0,1) + plt.scatter(ObsPosteriors2,ObsPosteriors,label="Hosts",marker='x') + plt.scatter(PUobs2,PUobs,label="Unobserved",marker='+') + plt.legend() + plt.tight_layout() + plt.savefig("Scatter_plot_comparison.png") + plt.close() + + + + # plots final result on absolute magnitudes + plt.figure() + plt.xlabel("Absolute magnitude, $M_r$") + plt.ylabel("$p(M_r)$") + plt.plot(model.AbsMags,model.AbsMagWeights/np.max(model.AbsMagWeights),label="interpolation") + plt.plot(model.ModelBins,x/np.max(x),marker="o",linestyle="",label="Model Parameters") + plt.legend() + plt.tight_layout() + plt.savefig("best_fit_absolute_magnitudes.pdf") + plt.close() + +def function(x,args): + """ + function to be minimised + """ + frblist = args[0] + ss = args[1] + gs=args[2] + model=args[3] + + # initialises model to the priors + # technically, there is a redundant normalisation here + model.AbsPrior = x + + NFRB,AppMags,AppMagPriors,ObsMags,ObsPosteriors,PUprior,PUobs,sumPUprior,sumPUobs = calc_path_priors(frblist,ss,gs,model,verbose=False) + + # we re-normalise the sum of PUs by NFRB + + # prevents infinite plots being created + stat = calculate_goodness_statistic(NFRB,AppMags,AppMagPriors,ObsMags,ObsPosteriors,sumPUobs,sumPUprior,plotfile=None) + + return stat + + +def make_cdf(xs,ys,ws,norm = True): + """ + makes a cumulative distribution in terms of + the x-values x, observed values y, and weights w + + """ + cdf = np.zeros([xs.size]) + for i,y in enumerate(ys): + OK = np.where(xs > y)[0] + cdf[OK] += ws[i] + if norm: + cdf /= cdf[-1] + return cdf + +def make_cdf_for_plotting(xvals,weights=None): + """ + Creates a cumulative distribution function + + xvals,yvals: values of data points + """ + N = xvals.size + cx = np.zeros([2*N]) + cy = np.zeros([2*N]) + + order = np.argsort(xvals) + xvals = xvals[order] + if weights is None: + weights = np.linspace(0.,1.,N+1) + else: + weights = weights[order] + weights = np.cumsum(weights) + weights /= weights[-1] + + for i,x in enumerate(xvals): + cx[2*i] = x + cx[2*i+1] = x + cy[2*i] = weights[i] + cy[2*i+1] = weights[i+1] + return cx,cy + +def calculate_goodness_statistic(NFRB,AppMags,AppMagPriors,ObsMags,ObsPosteriors,PUobs,PUprior,plotfile=None): + """ + Calculates a ks-like statistics to be proxzy for goodness-of-fit + We must set each AppMagPriors to 1.-PUprior at the limiting magnitude for each observation, + and sum the ObsPosteriors to be equal to 1.-PUobs at that magnitude. + Then these are what gets summed. + + This can be readily done by combining all ObsMags and ObsPosteriors into a single long list, + since this should already be correctly normalised. Priors require their own weight. + + Inputs: + AppMags: array listing apparent magnitudes + AppMagPrior: array giving prior on AppMags + ObsMags: list of observed magnitudes + ObsPosteriors: list of posterior values corresponding to ObsMags + PUobs: posterior on unseen probability + PUprior: prior on PU + Plotfile: set to name of output file for comparison plot + + Returns: + k-like statistic of biggest obs/prior difference + """ + + # we calculate a probability using a cumulative distribution + prior_dist = np.cumsum(AppMagPriors) + + # the above is normalised to NFRB. We now divide it by this + # might want to be careful here, and preserve this normalisation + prior_dist /= NFRB #((NFRB-PUprior)/NFRB) / prior_dist[-1] + + + obs_dist = make_cdf(AppMags,ObsMags,ObsPosteriors,norm=False) + + obs_dist /= NFRB + + # we calculate something like the k-statistic. Includes NFRB normalisation + diff = obs_dist - prior_dist + stat = np.max(np.abs(diff)) + + if plotfile is not None: + plt.figure() + plt.xlabel("Apparent magnitude $m_r$") + plt.ylabel("Cumulative host galaxy distribution") + #cx,cy = make_cdf_for_plotting(ObsMags,weights=ObsPosteriors) + plt.plot(AppMags,obs_dist,label="Observed") + plt.plot(AppMags,prior_dist,label="Prior") + plt.legend() + plt.tight_layout() + plt.savefig(plotfile) + plt.close() + + + return stat + +def calc_path_priors(frblist,ss,gs,model,verbose=True,usemodel=True): + """ + Inner loop. Gets passed model parameters, but assumes everything is + initialsied from there. + + Inputs: + FRBLIST: list of FRBs to retrieve data for + ss: list of surveys modelling those FRBs (searches for FRB in data) + gs: list of zDM grids modelling those surveys + model: host_model class object used to calculate priors on magnitude + verbose (bool): guess + + Returns: + Number of FRBs fitted + AppMags: list of apparent magnitudes used internally in the model + allMagPriors: summed array of magnitude priors calculated by the model + allObsMags: list of observed magnitudes of candidate hosts + allPOx: list of posterior probabilities calculated by the model + allPU: summed values of unobserved prior + allPUx: summed values of posterior of being unobserved + """ + + NFRB = len(frblist) + + # we assume here that the model has just had a bunch of parametrs updated + # within it. Must be done once for any fixed zvals. If zvals change, + # then we have another issue + model.reinit_model() + + # do this once per "model" objects + pathpriors.USR_raw_prior_Oi = model.path_raw_prior_Oi + + allObsMags = None + allPOx = None + allpriors = None + AppMags = model.AppMags + sumPU = 0. + sumPUx = 0. + allPU = [] + allPUx = [] + nfitted = 0 + + for i,frb in enumerate(frblist): + # interates over the FRBs. "Do FRB" + # P_O is the prior for each galaxy + # P_Ox is the posterior + # P_Ux is the posterior for it being unobserved + # mags is the list of galaxy magnitudes + + # determines if this FRB was seen by the survey, and + # if so, what its DMEG is + for j,s in enumerate(ss): + imatch = opt.matchFRB(frb,s) + if imatch is not None: + # this is the survey to be used + g=gs[j] + break + + if imatch is None: + if verbose: + print("Could not find ",frb," in any survey") + continue + + nfitted += 1 + + DMEG = s.DMEGs[imatch] + # this is where the particular survey comes into it + MagPriors = model.init_path_raw_prior_Oi(DMEG,g) + mag_limit=26 # might not be correct + PU = model.estimate_unseen_prior(mag_limit) + bad = np.where(AppMags > mag_limit)[0] + MagPriors[bad] = 0. + + P_O,P_Ox,P_Ux,ObsMags = opt.run_path(frb,model,usemodel=usemodel,PU = PU) + + ObsMags = np.array(ObsMags) + + if allObsMags is None: + allObsMags = ObsMags + allPOx = P_Ox + allMagPriors = MagPriors + else: + allObsMags = np.append(allObsMags,ObsMags) + allPOx = np.append(allPOx,P_Ox) + allMagPriors += MagPriors + + sumPU += PU + sumPUx += P_Ux + allPU.append(PU) + allPUx.append(P_Ux) + return nfitted,AppMags,allMagPriors,allObsMags,allPOx,allPU,allPUx,sumPU,sumPUx + + +main() diff --git a/zdm/scripts/Path/path_interface.py b/zdm/scripts/Path/path_interface.py new file mode 100644 index 00000000..1212f581 --- /dev/null +++ b/zdm/scripts/Path/path_interface.py @@ -0,0 +1,104 @@ +""" +This script shows how to use the host_model class +to estimate priors on host galaxy magnitudes +""" + +from zdm import optical as opt +from zdm import loading +import numpy as np +from zdm import cosmology as cos +from zdm import parameters + +from matplotlib import pyplot as plt + +def main(): + + state = parameters.State() + cos.set_cosmology(state) + cos.init_dist_measures() + + + model = opt.host_model() + + name='CRAFT_ICS_1300' + + + ss,gs = loading.surveys_and_grids(survey_names=[name]) + g = gs[0] + model.init_zmapping(g.zvals) + + if False: + plot_redshift_mapping(model,zvals) + + # for a DM 1500 FRB, what is the posterior magnitude distribution? + DMlist = np.linspace(200,2000,10) + + AppMagPriors,pz = model.get_posterior(g,DMlist) + + if False: + plot_redshift_prior(g.zvals,pz,DMlist) + + plot_AppMag_prior(model.AppMags,AppMagPriors,DMlist) + +def plot_AppMag_prior(AppMags,Priors,DMlist): + """ + Plots caluclated prior on hosy magnitudes + """ + plt.figure() + plt.xlabel("Apparent magnitude, $m_r$") + plt.ylabel("$p(m_r)|{\\rm DM}$") + + #plt.ylim(0,None) + for i,DM in enumerate(DMlist): + plt.plot(AppMags,Priors[:,i],label=str(DM)) + plt.legend() + plt.tight_layout() + plt.savefig("prior_on_apparent_magnitude.png") + plt.close() + +def plot_redshift_prior(zvals,pz,DMlist): + """ + Plots prior on redshift + """ + plt.figure() + plt.xlabel("z") + plt.ylabel("p(z)") + plt.xlim(0,2) + #plt.ylim(0,None) + for i,DM in enumerate(DMlist): + plt.plot(zvals,pz[:,i],label=str(DM)) + plt.legend() + plt.tight_layout() + plt.savefig("prior_on_redshift.png") + plt.close() + + +def plot_mag_prior(): + """ + Plots caluclated prior on hosy magnitudes + """ + plt.figure() + plt.xlabel('Apparent magnitude, $m_r$') + plt.ylabel("Relative probability, $P(m_r)$") + plt.plot(model.AppMags,pmags) + plt.tight_layout() + plt.savefig("posterior_DM_1500.png") + plt.close() + + +def plot_redshift_mapping(model,zvals): + # plots mapping of absolute to apparent magnitudes + # for each redshift + plt.figure() + #plt.hist(model.zmap.flatten()) + + for i,z in enumerate(zvals): + plt.plot(model.AppMags,model.maghist[:,i],label=str(z)) + break + plt.xlabel('Apparent Magnitudes') + plt.ylabel('Relative contribution to redshift') + plt.legend() + plt.tight_layout() + plt.show() + +main() diff --git a/zdm/scripts/Scattering/test_scattering_properties.py b/zdm/scripts/Scattering/test_scattering_properties.py new file mode 100644 index 00000000..d189186a --- /dev/null +++ b/zdm/scripts/Scattering/test_scattering_properties.py @@ -0,0 +1,237 @@ +""" +This script is intended to illustrate the different +ways in which scattering can be implemented in zDM + +""" +import os + +from zdm import cosmology as cos +from zdm import figures +from zdm import parameters +from zdm import survey +from zdm import pcosmic +from zdm import iteration as it +from zdm import loading +from zdm import io +from pkg_resources import resource_filename +import numpy as np +from zdm import survey +from matplotlib import pyplot as plt + +import matplotlib + +defaultsize=14 +ds=4 +font = {'family' : 'Helvetica', + 'weight' : 'normal', + 'size' : defaultsize} +matplotlib.rc('font', **font) + +def main(): + + # in case you wish to switch to another output directory + opdir = "Plots/" + if not os.path.exists(opdir): + os.mkdir(opdir) + + # directory where the survey files are located. The below is the default - + # you can leave this out, or change it for a different survey file location. + sdir = os.path.join(resource_filename('zdm', 'data'), 'Surveys') + + survey_name = "CRAFT_average_ICS" + + # make this into a list to initialise multiple surveys art once + names = [survey_name] + + repeaters=True + # sets plotting limits + zmax = 2. + dmmax = 2000 + + # initialise surveys and grids with different state values + imethods = np.arange(4) + methnames = ["const","width", "scat","zscat"] + zdists=[] + dmdists=[] + + if repeaters: + zdists_s=[] + dmdists_s=[] + zdists_r=[] + dmdists_r=[] + zdists_b=[] + dmdists_b=[] + + for i,Method in enumerate(imethods): + + survey_dict = {"WMETHOD": Method} + # Write True if you want to do repeater grids - see "plot_repeaters.py" to make repeater plots + surveys, grids = loading.surveys_and_grids(survey_names = names,\ + repeaters=repeaters, sdir=sdir,nz=70,ndm=140, + survey_dict = survey_dict) + g=grids[0] + s=surveys[0] + + llsum = it.calc_likelihoods_1D(g,s,pNreps=False) + print("For scattering method ",methnames[i],", 1D likelihoods ",llsum) + + llsum = it.calc_likelihoods_2D(g,s,pNreps=False) + print("For scattering method ",methnames[i],", 2D likelihoods are ",llsum) + + # extracts weights from survey and plots as function of z + if i==3: + weights = s.wplist + widths = s.wlist[:,0] + nw,nz = weights.shape + plt.figure() + plt.xlabel('z') + plt.ylabel('weight') + for iw in np.arange(nw): + plt.plot(g.zvals,weights[iw,:],label="width = "+str(widths[iw])[0:5]) + total = np.sum(weights,axis=0) + plt.plot(g.zvals,total,label="total",color="black") + plt.yscale("log") + plt.legend() + plt.tight_layout() + plt.savefig(opdir+"z_dependent_weights.png") + plt.close() + + figures.plot_grid( + g.rates, + g.zvals, + g.dmvals, + name=opdir+"pzdm_"+methnames[i]+".png", + norm=3, + log=True, + label='$\\log_{10} p({\\rm DM}_{\\rm cosmic} + {\\rm DM}_{\\rm host},z)$ [a.u.]', + project=True, + ylabel='${\\rm DM}_{\\rm cosmic} + {\\rm DM}_{\\rm host}$', + zmax=zmax,DMmax=dmmax, + Aconts=[0.01, 0.1, 0.5] + ) + zdists.append(np.sum(g.rates,axis=1)) + dmdists.append(np.sum(g.rates,axis=0)) + + if repeaters: + zdists_s.append(np.sum(g.exact_singles,axis=1)) + dmdists_s.append(np.sum(g.exact_singles,axis=0)) + + zdists_r.append(np.sum(g.exact_reps,axis=1)) + dmdists_r.append(np.sum(g.exact_reps,axis=0)) + + zdists_b.append(np.sum(g.exact_rep_bursts,axis=1)) + dmdists_b.append(np.sum(g.exact_rep_bursts,axis=0)) + + + # comparison of z distributions + plt.figure() + plt.xlabel('z') + plt.ylabel('p(z) [arb units]') + plt.xlim(0,zmax) + plt.ylim(0,None) + for i,Method in enumerate(imethods): + plt.plot(g.zvals,zdists[i],label=methnames[i]) + plt.legend() + plt.tight_layout() + plt.savefig(opdir+"pdm_comparison.png",) + plt.close() + + plt.figure() + plt.xlabel('DM') + plt.xlim(0,dmmax) + plt.ylim(0,None) + plt.ylabel('p(DM) [arb units]') + for i,Method in enumerate(imethods): + plt.plot(g.dmvals,dmdists[i],label=methnames[i]) + plt.legend() + plt.tight_layout() + plt.savefig(opdir+"pz_comparison.png",) + plt.close() + + if not repeaters: + exit() + + ### singles ### + # comparison of z distributions + plt.figure() + plt.xlabel('z') + plt.xlim(0,zmax) + plt.ylim(0,None) + plt.ylabel('p(z) [arb units]') + for i,Method in enumerate(imethods): + plt.plot(g.zvals,zdists_s[i],label=methnames[i]) + plt.legend() + plt.tight_layout() + plt.savefig(opdir+"pdm_singles_comparison.png",) + plt.close() + + plt.figure() + plt.xlabel('DM') + plt.xlim(0,dmmax) + plt.ylim(0,None) + plt.ylabel('p(DM) [arb units]') + for i,Method in enumerate(imethods): + plt.plot(g.dmvals,dmdists_s[i],label=methnames[i]) + plt.legend() + plt.tight_layout() + plt.savefig(opdir+"pz_singles_comparison.png",) + plt.close() + + + ### repeaters ### + # comparison of z distributions + plt.figure() + plt.xlabel('z') + plt.xlim(0,zmax) + plt.ylim(0,None) + plt.ylabel('p(z) [arb units]') + for i,Method in enumerate(imethods): + plt.plot(g.zvals,zdists_r[i],label=methnames[i]) + plt.legend() + plt.tight_layout() + plt.savefig(opdir+"pdm_repeater_comparison.png",) + plt.close() + + plt.figure() + plt.xlabel('DM') + plt.xlim(0,dmmax) + plt.ylim(0,None) + plt.ylabel('p(DM) [arb units]') + for i,Method in enumerate(imethods): + plt.plot(g.dmvals,dmdists_r[i],label=methnames[i]) + plt.legend() + plt.tight_layout() + plt.savefig(opdir+"pz_repeater_comparison.png",) + plt.close() + + ### bursts ### + # comparison of z distributions + plt.figure() + plt.xlabel('z') + plt.xlim(0,zmax) + plt.ylim(0,None) + plt.ylabel('p(z) [arb units]') + for i,Method in enumerate(imethods): + plt.plot(g.zvals,zdists_b[i],label=methnames[i]) + plt.legend() + plt.tight_layout() + plt.savefig(opdir+"pdm_bursts_comparison.png",) + plt.close() + + plt.figure() + plt.xlabel('DM') + plt.xlim(0,dmmax) + plt.ylim(0,None) + plt.ylabel('p(DM) [arb units]') + for i,Method in enumerate(imethods): + plt.plot(g.dmvals,dmdists_b[i],label=methnames[i]) + plt.legend() + plt.tight_layout() + plt.savefig(opdir+"pz_bursts_comparison.png",) + plt.close() + + + + + +main() diff --git a/zdm/survey.py b/zdm/survey.py index b551e5e8..5cd946b4 100644 --- a/zdm/survey.py +++ b/zdm/survey.py @@ -29,14 +29,19 @@ from astropy import units as u from astropy.coordinates import SkyCoord +# minimum threshold to use - it's a substitute for zero +MIN_THRESH = 1e-10 + class Survey: def __init__(self, state, survey_name:str, filename:str, dmvals:np.ndarray, + zvals:np.ndarray=None, NFRB:int=None, iFRB:int=0, edir=None, - rand_DMG=False): + rand_DMG=False, + survey_dict=None): """ Init an FRB Survey class Args: @@ -44,17 +49,25 @@ def __init__(self, state, survey_name:str, survey_name (str): Name of the survey filename (str): _description_ - dmvals (np.ndarray): _description_ + dmvals (np.ndarray): Extragalactic DM values at which to calculate efficiencies + zvals (np.ndarray): Redshift values at which to calculate efficiencies NFRB (int, optional): _description_. Defaults to None. iFRB (int, optional): _description_. Defaults to 0. edir (str, optional): Location of efficiency files rand_DMG (bool): If true randomise the DMG values within uncertainty + survey_dict (dict, optional): Dict of survey data meta-parameters to over-ride + values in the survey file """ # Proceed self.name = survey_name self.dmvals = dmvals + self.zvals = zvals + self.NDM = dmvals.size + self.NZ = zvals.size + # Load up - self.process_survey_file(filename, NFRB, iFRB, min_lat=state.analysis.min_lat, dmg_cut=state.analysis.DMG_cut) + self.process_survey_file(filename, NFRB, iFRB, min_lat=state.analysis.min_lat, + dmg_cut=state.analysis.DMG_cut,survey_dict = survey_dict) # Check if repeaters or not and set relevant parameters # Now done in loading # self.repeaters=False @@ -77,10 +90,28 @@ def __init__(self, state, survey_name:str, method=beam_method, plot=False, thresh=beam_thresh) # tells the survey to use the beam file + # Efficiency: width_method passed through "self" here - pwidths,pprobs=make_widths(self, state) - _ = self.get_efficiency_from_wlist(dmvals, - pwidths,pprobs,model=width_bias, edir=edir) + # Determines if the model is redshift dependent + + if self.meta['WMETHOD'] != 3: + pwidths,pprobs=make_widths(self, state) + _ = self.get_efficiency_from_wlist(pwidths,pprobs, + model=width_bias, edir=edir, iz=None) + else: + # evaluate efficiencies at each redshift + self.NWbins = state.width.WNbins + self.efficiencies = np.zeros([self.NWbins,self.NZ,self.NDM]) + self.wplist = np.zeros([self.NWbins,self.NZ]) + self.wlist = np.zeros([self.NWbins,self.NZ]) + self.DMlist = np.zeros([self.NZ,self.NDM]) + self.mean_efficiencies = np.zeros([self.NZ,self.NDM]) + + # we have a z-dependent scattering and width model + for iz,z in enumerate(self.zvals): + pwidths,pprobs=make_widths(self, state, z=z) + _ = self.get_efficiency_from_wlist(pwidths,pprobs, + model=width_bias, edir=edir,iz=iz) self.calc_max_dm() def init_repeaters(self): @@ -192,10 +223,6 @@ def init_DMEG(self,DMhalo,halo_method=0): self.DMhalo=DMhalo self.process_dmhalo(halo_method) self.DMEGs=self.DMs-self.DMGs - self.DMhalos - # self.DMEGs=self.DMs-self.DMGals - # self.DMEGs_obs=self.DMs-self.DMGs-DMhalo - # self.DMEGs = np.copy(self.DMEGs_obs) - # self.DMEGs[self.DMEGs < 0] = 10. # Minimum value of 10. pc/cm^3 def process_dmhalo(self, halo_method): """ @@ -361,7 +388,7 @@ def init_zs_reps(self): self.nDr = self.nD self.zreps = self.zlist self.nozreps = self.nozlist - + # Case of some singles and some repeaters else: if self.nD == 1: @@ -409,7 +436,8 @@ def process_survey_file(self,filename:str, NFRB:int=None, iFRB:int=0, min_lat=None, - dmg_cut=None): + dmg_cut=None, + survey_dict = None): """ Loads a survey file, then creates dictionaries of the loaded variables @@ -420,6 +448,7 @@ def process_survey_file(self,filename:str, iFRB (int, optional): Start grabbing FRBs at this index Mainly used for Monte Carlo analysis Requires that NFRB be set + surveydict: overrides value in file """ self.iFRB = iFRB self.NFRB = NFRB @@ -435,6 +464,12 @@ def process_survey_file(self,filename:str, DC = self.survey_data.params[key] self.meta[key] = getattr(self.survey_data[DC],key) + # over-rides survey data if applicable + if survey_dict is not None: + for key in survey_dict: + self.meta[key] = survey_dict[key] + + # Get default values from default frb data default_frb = survey_data.FRB() @@ -445,7 +480,6 @@ def process_survey_file(self,filename:str, default_value = self.meta[field.name] else: default_value = getattr(default_frb, field.name) - # now checks for missing data, fills with the default value if field.name in frb_tbl.columns: @@ -552,8 +586,8 @@ def process_survey_file(self,filename:str, # checks for incorrectSNR values toolow = np.where(self.Ss < 1.)[0] if len(toolow) > 0: - print("FRBs ",toolow," have SNR < SNRTHRESH!!! Please correct this. Exiting...") - exit() + raise ValueError("FRBs ",toolow," have SNR < SNRTHRESH!!! Please correct this. Exiting...") + print("FRB survey sucessfully initialised with ",self.NFRB," FRBs starting from", self.iFRB) @@ -658,18 +692,14 @@ def calc_max_dm(self): self.max_dm = max_dm - def get_efficiency_from_wlist(self,DMlist,wlist,plist, + def get_efficiency_from_wlist(self,wlist,plist, model="Quadrature", addGalacticDM=True, - edir=None): + edir=None, iz=None): """ Gets efficiency to FRBs Returns a list of relative efficiencies as a function of dispersion measure for each width given in wlist - - DMlist: - - list of dispersion measures (pc/cm3) at which to calculate efficiency - wlist: list of intrinsic FRB widths @@ -688,7 +718,11 @@ def get_efficiency_from_wlist(self,DMlist,wlist,plist, edir: - Directory where efficiency files are contained. Only relevant if specific FRB responses are used + iz: + - izth z-bin where these efficiencies are being calculated """ + DMlist = self.dmvals + efficiencies=np.zeros([wlist.size,DMlist.size]) if addGalacticDM: @@ -708,14 +742,25 @@ def get_efficiency_from_wlist(self,DMlist,wlist,plist, max_dm=self.meta['MAX_DM'], model=model, dsmear=False, - edir=edir) + edir=edir, + max_iw=self.meta['MAX_IW'], + max_meth = self.meta['MAXWMETH']) # keep an internal record of this - self.efficiencies=efficiencies - self.wplist=plist - self.wlist=wlist - self.DMlist=DMlist - mean_efficiencies=np.mean(efficiencies,axis=0) - self.mean_efficiencies=mean_efficiencies #be careful here!!! This may not be what we want! + + if iz is None: + self.efficiencies=efficiencies + self.wplist=plist + self.wlist=wlist + self.DMlist=DMlist + mean_efficiencies=np.mean(efficiencies,axis=0) + self.mean_efficiencies=mean_efficiencies #be careful here!!! This may not be what we want! + else: + self.efficiencies[:,iz,:]=efficiencies + self.wplist[:,iz]=plist + self.wlist[:,iz]=wlist + self.DMlist[iz,:]=DMlist + mean_efficiencies=np.mean(efficiencies,axis=0) + self.mean_efficiencies[iz,:]=mean_efficiencies #be careful here!!! This may not be what we want! return efficiencies def __repr__(self): @@ -730,7 +775,9 @@ def __repr__(self): # implements something like Mawson's formula for sensitivity # t_res in ms -def calc_relative_sensitivity(DM_frb,DM,w,fbar,t_res,nu_res,Nchan=336,max_idt=None,max_dm=None,model='Quadrature',dsmear=True,edir=None): +def calc_relative_sensitivity(DM_frb,DM,w,fbar,t_res,nu_res,Nchan=336,max_idt=None, + max_dm=None,model='Quadrature',dsmear=True,edir=None,max_iw=None, + max_meth = 0): """ Calculates DM-dependent sensitivity This function adjusts sensitivity to a given burst as a function of DM. @@ -753,7 +800,14 @@ def calc_relative_sensitivity(DM_frb,DM,w,fbar,t_res,nu_res,Nchan=336,max_idt=No NOTE: Quadrature_s and Sammons_s should be input to this function as just Quadrature and Sammons respectively dsmear: subtract DM smearing from measured width to calculate intrinsic + edir [string, optional]: directory containing efficiency files to be loaded + max_iw [int, optional]: maximum integer width of the search + maxmeth [int, optional]: + 0: ignore maximum width + 1: truncate sensitivity at maximum width + 2: scale sensitivity as 1/w at maximum width """ + global MIN_THRESH # this model returns the parameterised CHIME DM-dependent sensitivity # it is independent of width @@ -782,6 +836,7 @@ def calc_relative_sensitivity(DM_frb,DM,w,fbar,t_res,nu_res,Nchan=336,max_idt=No # hence we must first adjust for this prior to estimating the DM-dependence # for this we use the *true* DM at which the FRB was observed if dsmear==True: + # width is the total width measured_dm_smearing=2*(nu_res/1.e3)*k_DM*DM_frb/(fbar/1e3)**3 #smearing factor of FRB in the band uw=w**2-measured_dm_smearing**2-t_res**2 # uses the quadrature model to calculate intrinsic width uw if uw < 0: @@ -789,13 +844,28 @@ def calc_relative_sensitivity(DM_frb,DM,w,fbar,t_res,nu_res,Nchan=336,max_idt=No else: uw=uw**0.5 else: + # w represents the intrinsic width uw=w + totalw = (uw**2 + dm_smearing**2 + t_res**2)**0.5 + + + # calculates relative sensitivity to bursts as a function of DM if model=='Quadrature': - sensitivity=(uw**2+dm_smearing**2+t_res**2)**-0.25 + sensitivity=totalw**-0.5 elif model=='Sammons': sensitivity=0.75*(0.93*dm_smearing + uw + 0.35*t_res)**-0.5 - # calculates relative sensitivity to bursts as a function of DM + + # implements max integer width cut. + if max_meth != 0 and max_iw is not None: + max_w = t_res*(max_iw+0.5) + toolong = np.where(totalw > max_w)[0] + if max_meth == 1: + sensitivity[toolong] = MIN_THRESH # something close to zero + elif max_meth == 2: + # we have already reduced it by \sqrt{t} + # we thus add a further sqrt{t} factor + sensitivity[toolong] *= (max_w / totalw[toolong])**0.5 # If model not CHIME, Quadrature or Sammons assume it is a filename else: @@ -812,6 +882,85 @@ def calc_relative_sensitivity(DM_frb,DM,w,fbar,t_res,nu_res,Nchan=336,max_idt=No return sensitivity +def geometric_lognormals2(lmu1,ls1,lmu2,ls2,bins=None, + Ndivs=100,Nsigma=3.,plot=False,Nbins=101, + ScatDist=1): + ''' + Numerically evaluates the resulting distribution of y=\sqrt{x1^2+x2^2}, + where logx1~normal and logx2~normal with log-mean lmu and + log-sigma ls. + This is typically used for two log-normals of intrinsic + FRB width and scattering time + + lmu1, ls1 (float, float): log mean and log-sigma of the first distribution + + lmu2, ls2 (float, float): log-mean and log-sigma of the second distribution + + bins (np.ndarray([NBINS+1],dtype='float')): bin edges for resulting plot. + + Returns: + hist: histogram of probability within bins + chist: cumulative histogram of probability within bins + bins: bin edges for histogram + + ''' + + #draw from both distributions + np.random.seed(1234) + + xvals1 = np.linspace(lmu1-Nsigma*ls1,lmu1+Nsigma*ls1,Ndivs) + yvals1 = pcosmic.loglognormal_dlog(xvals1,lmu1,ls1,1.) + yvals1 /= np.sum(yvals1) + + # xvals in ln space + lnlog = np.log10(np.exp(1)) + xvals2 = np.logspace(lnlog*(lmu2-Nsigma*ls2),lnlog*(lmu2+Nsigma*ls2),Ndivs) + if ScatDist == 0: + # log uniform + yvals2 = np.full([Ndivs],2./Ndivs) + elif ScatDist == 1: + # lognormal + yvals2 = pcosmic.loglognormal_dlog(xvals2,lmu2,ls2,2.) + yvals2 /= np.sum(yvals2) + elif ScatDist == 2: + # upper lognormal is flat + yvals2 = pcosmic.loglognormal_dlog(xvals2,lmu2,ls2,2.) + upper = np.where(xvals2 > lmu2)[0] + ymax = np.max(yvals2) + yvals2[upper] = ymax + yvals2 /= np.sum(yvals2) + + xvals1 = np.exp(xvals1) + xvals2 = np.exp(xvals2) + themin = np.min([np.min(xvals1),np.min(xvals2)]) + themax = 2**0.5 * np.max([np.max(xvals1),np.max(xvals2)]) + + if bins is None: + #bins=np.linspace(0,np.max(ys)/4.,Nbins) + delta=1e-3 + # ensures the first bin begins at 0 + bins=np.zeros([Nbins+1]) + bins[1:]=np.logspace(np.log10(themin)-delta,np.log10(themax)+delta,Nbins) + else: + Nbins = len(bins)-1 + + # calculate widths + hist = np.zeros([Nbins]) + for i,x1 in enumerate(xvals1): + widths = (x1**2 + xvals2**2)**0.5 + probs = yvals1[i]*yvals2 + h,b = np.histogram(widths,bins=bins,weights=probs) + hist += h + + chist=np.zeros([Nbins+1]) + chist[1:]=np.cumsum(hist) + # we do not want to renormalise, since the normalisation reflects the values + # which are too large + #hist /= chist[-1] + chist /= chist[-1] + + return hist,chist,bins + def geometric_lognormals(lmu1,ls1,lmu2,ls2,bins=None, Nrand=10000,plot=False,Nbins=101): ''' @@ -868,14 +1017,41 @@ def geometric_lognormals(lmu1,ls1,lmu2,ls2,bins=None, plt.hist(np.log(ys),bins=lbins) plt.savefig('log_adding_lognormals.pdf') plt.close() - # renomalises - total will be less than unity, assuming some large # values fall off the largest bin #hist = hist/Nrand return hist,chist,bins + +def halflognormal(logmean,logsigma,minw,maxw,nbins): + """ + Generates a parameterised half-lognormal distribution. + This acts as a lognormal in the lower half, but + keeps a constant per-log-bin width in the upper half + """ + + logmin = np.log(minw) + logmax = np.log(maxw) + logbins = np.linspace(logmin,logmax,nbins+1) + dlogbin = (logmax - logmin)/nbins + logbinmean = logbins[:-1] + dlogbin/2. -def make_widths(s:Survey,state): + probs = np.zeros([nbins]) + + # gets weighting in smaller bins + args=[logmin,logmax,1.] + for i in np.arange(nbins): + weight,err=quad(pcosmic.loglognormal_dlog,logbins[i],logbins[i+1],args=args) + probs[i] = weight + if logbins[i+1] > logmean: + break + # fills up remaining bins with the peak value + probs[i+1:] = probs[i] + # normalisation + probs /= np.sum(pobs) + return probs + +def make_widths(s:Survey,state,z=0.): """ This method takes a distribution of intrinsic FRB widths (lognormal, defined by wlogmean and wlogsigma), and returns @@ -888,40 +1064,37 @@ def make_widths(s:Survey,state): Args: s (Survey,required): instance of survey class state (state class,required): instance of the state class - + z (float): redshift at which this is being calculated + Returns: list: list of widths """ # variables which can be over-ridden by a survey, but which # appear by default in the parameter set - # method to use to calculate FRB width distribution - if 'WMETHOD' in s.meta and s.meta['WMETHOD'] is not None: - width_method = s.meta['WMETHOD'] - else: - width_method = state.width.Wmethod - - # just extracting for now to get thrings straight - nbins=state.width.Wbins - scale=state.width.Wscale + # just extracting for now to get things straight + nbins=state.width.WNbins thresh=state.width.Wthresh - #width_method=state.width.Wmethod wlogmean=state.width.Wlogmean wlogsigma=state.width.Wlogsigma + width_method = s.meta["WMETHOD"] + wmin = state.width.WMin + wmax = state.width.WMax slogmean=state.scat.Slogmean slogsigma=state.scat.Slogsigma sfnorm=state.scat.Sfnorm sfpower=state.scat.Sfpower + maxsigma=state.scat.Smaxsigma + scatdist=state.scat.ScatDist + + # adjusts these model values according to redshift + wlogmean += np.log(1.+z) # scales with (1+z) + slogmean -= 3.*np.log(1.+z) # scales with (1+z)^-3 # constant of DM k_DM=4.149 #ms GHz^2 pc^-1 cm^3 - # Parse - # OLD - # tres=s.meta['TRES'] - # nu_res=s.meta['FRES'] - # fbar=s.meta['FBAR'] tres=s.meta['TRES'] nu_res=s.meta['FRES'] fbar=s.meta['FBAR'] @@ -933,20 +1106,23 @@ def make_widths(s:Survey,state): # total smearing factor within a channel dm_smearing=2*(nu_res/1.e3)*k_DM*DM/(fbar/1e3)**3 #smearing factor of FRB in the band - - # inevitable width due to dm and time resolution - wequality=(dm_smearing**2 + tres**2)**0.5 - - # initialise min/max of width bins - wmax=wequality*thresh - wmin=wmax*np.exp(-3.*wlogsigma) # three standard deviations below the mean - # keeps track of numerical normalisation to ensure it ends up at unity wsum=0. ######## generate width distribution ###### # arrays to hold widths and weights weights=[] widths=[] + + if width_method == 1 or width_method==2 or width_method==3: + bins = np.zeros([nbins+1]) + logwmin = np.log10(wmin) + logwmax = np.log10(wmax) + dbin = (logwmax - logwmin)/(nbins-1.) + # bins ignore wmax - scale takes precedent + bins[1:] = np.logspace(logwmin,logwmax, nbins) + widths = 10**(dbin * (np.arange(nbins)-0.5) + logwmin) + bins[0] = 1.e-10 # a very tiny value to avoid bad things in log space + if width_method==0: # do not take a distribution, just use 1ms for everything # this is done for tests, for complex surveys such as CHIME, @@ -954,72 +1130,34 @@ def make_widths(s:Survey,state): weights.append(1.) widths.append(np.exp(wlogmean)) elif width_method==1: - # take intrinsic lognrmal width distribution only + # take intrinsic lognormal width distribution only # normalisation of a log-normal norm=(2.*np.pi)**-0.5/wlogsigma args=(wlogmean,wlogsigma,norm) - + weights = np.zeros([nbins]) for i in np.arange(nbins): - weight,err=quad(pcosmic.loglognormal_dlog,np.log(wmin),np.log(wmax),args=args) - width=(wmin*wmax)**0.5 - widths.append(width) - weights.append(weight) - wsum += weight - wmin = wmax - wmax *= scale - elif width_method==2: - # include scattering distribution + weight,err=quad(pcosmic.loglognormal_dlog,np.log(bins[i]),np.log(bins[i+1]),args=args) + #width=(wmin*wmax)**0.5 + #widths.append(width) + weights[i] = weight #.append(weight) + #wsum += weight + #wmin = wmax + #wmax *= scale + elif width_method==2 or width_method==3: + # include scattering distribution. 3 means include z-dependence # scale scattering time according to frequency in logspace slogmean = slogmean + sfpower*np.log(fbar/sfnorm) + # generates bins + + #gets cumulative hist and bin edges - dist,cdist,cbins=geometric_lognormals(wlogmean, - wlogsigma, - slogmean, - slogsigma) + dist,cdist,cbins=geometric_lognormals2(wlogmean, + wlogsigma,slogmean,slogsigma,Nsigma=maxsigma, + ScatDist=scatdist,bins=bins) + weights = dist - # In the below, imin1 and imin2 are the two indices bracketing the minimum - # bin, while imax1 and imax2 bracket the upper max bin - imin1=0 - kmin=0. - imin2=1 - maxbins=cdist.size - for i in np.arange(nbins): - if i==nbins-1 or wmax >= cbins[-1]: - imax2=maxbins-1 - else: - imax2=np.where(cbins > wmax)[0][0] - if imax2 >= maxbins: - imax2=maxbins-1 - - imax1=imax2-1 - - # interpolating max bin. kmax applies to imax2, 1-kmax to imax1 - kmax=(wmax-cbins[imax1])/(cbins[imax2]-cbins[imax1]) - - #these are cumulative bins - # the area in the middle is just cmax-cmin - cmin=kmin*cdist[imin2]+(1-kmin)*cdist[imin1] - cmax=kmax*cdist[imax2]+(1-kmax)*cdist[imax1] - if i==0: - weight=cmax #forces integration from zero - else: - weight=cmax-cmin #integrates from bin min to bin max - # upper bins becomes lower bins - imin1=imax1 - imin2=imax2 - kmin=kmax - - width=(wmin*wmax)**0.5 - widths.append(width) - weights.append(weight) - wsum += weight - - # updates widths of bin mins and maxes - wmin = wmax - wmax *= scale - - elif width_method==3: + elif width_method==4: # use specific width of FRB. This requires there to be only a single FRB in the survey if s.meta['NFRB'] != 1: raise ValueError("If width method in make_widths is 3 only one FRB should be specified in the survey but ", str(s.meta['NFRB']), " FRBs were specified") @@ -1028,24 +1166,30 @@ def make_widths(s:Survey,state): widths.append(s.frbs['WIDTH'][0]) else: raise ValueError("Width method in make_widths must be 0, 1 or 2, not ",width_method) - weights[-1] += 1.-wsum #adds defecit here + # check this is correct - we may wish to lose extra probability + # off the top, though never off the bottom + #weights[-1] += 1.-wsum #adds defecit here weights=np.array(weights) widths=np.array(widths) # removes unneccesary bins - keep=np.where(weights>1e-4)[0] - weights=weights[keep] - widths=widths[keep] + # cannot do this when considering z-dependent bins for consistency + #keep=np.where(weights>1e-4)[0] + #weights=weights[keep] + #widths=widths[keep] return widths,weights def load_survey(survey_name:str, state:parameters.State, dmvals:np.ndarray, + zvals:np.ndarray, sdir:str=None, NFRB:int=None, nbins=None, iFRB:int=0, dummy=False, edir=None, - rand_DMG=False,): + rand_DMG=False, + survey_dict = None, + verbose=False): """Load a survey Args: @@ -1053,6 +1197,7 @@ def load_survey(survey_name:str, state:parameters.State, e.g. CRAFT/FE state (parameters.State): Parameters for the state dmvals (np.ndarray): DM values + zvals (np.ndarray): z values sdir (str, optional): Path to survey files. Defaults to None. nbins (int, optional): Sets number of bins for Beam analysis [was NBeams] @@ -1064,7 +1209,9 @@ def load_survey(survey_name:str, state:parameters.State, dummy (bool,optional) Skip many initialisation steps: used only when loading survey parameters for conversion to the new survey format - + survey_dict (dict, optional): dictionary of survey metadata to + over-ride values in file + verbose (bool): print output Raises: IOError: [description] @@ -1072,7 +1219,8 @@ def load_survey(survey_name:str, state:parameters.State, Survey: instance of the class """ - print(f"Loading survey: {survey_name}") + if verbose: + print(f"Loading survey: {survey_name}") if sdir is None: sdir = os.path.join( @@ -1106,8 +1254,10 @@ def load_survey(survey_name:str, state:parameters.State, survey_name, os.path.join(sdir, dfile), dmvals, + zvals, NFRB=NFRB, iFRB=iFRB, - edir=edir, rand_DMG=rand_DMG) + edir=edir, rand_DMG=rand_DMG, + survey_dict = survey_dict) return srvy def vet_frb_table(frb_tbl:pandas.DataFrame, diff --git a/zdm/survey_data.py b/zdm/survey_data.py index 4ccd1694..7849322e 100644 --- a/zdm/survey_data.py +++ b/zdm/survey_data.py @@ -98,7 +98,13 @@ class FRB(data_class.myDataClass): }) WIDTH: float = field( default=0.1, - metadata={'help': "Width of the event (intrinsic??)", + metadata={'help': "Width of the event", + 'unit': 'ms', + 'Notation': '', + }) + TAU: float = field( + default=-1., + metadata={'help': "Scattering timescale of the event", 'unit': 'ms', 'Notation': '', }) @@ -147,7 +153,13 @@ class Telescope(data_class.myDataClass): }) WMETHOD: int = field( default=2, - metadata={'help': "Method of calculating FRB widths; 0 ignore, 1 std, 2 includes scattering", + metadata={'help': "Code for width method. 0: ignore it (all 1ms), 1: intrinsic lognormal, 2: include scattering, 3: scat & z-dependence, 4: specific FRB", + 'unit': '', + 'Notation': '', + }) + WDATA: int = field( + default=2, + metadata={'help': "What does the WIDTH column include? 0 intrinsic, 1: also scattering, 2: also DM smearing", 'unit': '', 'Notation': '', }) @@ -257,6 +269,18 @@ class Observing(data_class.myDataClass): 'unit': '', 'Notation': '', }) + MAX_IW: int = field( + default=None, + metadata={'help': "Maximum width of FRB search in units of tres (12 for CRAFT ICS)", + 'unit': '', + 'Notation': '', + }) + MAXWMETH: int = field( + default=0, + metadata={'help': "Method for treating FRBs with width > max width. 0: do nothing, 1: ignore them, 2: reduce sensitivity to 1/w", + 'unit': '', + 'Notation': '', + }) MAX_LOC_DMEG: int = field( default=-1, metadata={'help': "Ignore zs with DMEG larger than 'x'. \n-1: Use all zs \n0: 'x' = smallest DMEG for an FRB without a z \n>0: 'x' = this value", diff --git a/zdm/tests/files/scat_test_new.json b/zdm/tests/files/scat_test_new.json index 410fb567..ea13f975 100644 --- a/zdm/tests/files/scat_test_new.json +++ b/zdm/tests/files/scat_test_new.json @@ -40,8 +40,7 @@ "Wlogmean": 0.0, "Wlogsigma": 0.97, "Wthresh": 0.5, - "Wbins": 10, - "Wscale": 2 + "WNbins": 10 }, "scat": { "Slogmean": 0.7, diff --git a/zdm/tests/files/scat_test_old.json b/zdm/tests/files/scat_test_old.json index b6637bbc..dfbec6b5 100644 --- a/zdm/tests/files/scat_test_old.json +++ b/zdm/tests/files/scat_test_old.json @@ -40,8 +40,7 @@ "Wlogmean": 1.70267, "Wlogsigma": 0.899148, "Wthresh": 0.5, - "Wbins": 5, - "Wscale": 3.5 + "WNbins": 5 }, "scat": { "Slogmean": 0.7, diff --git a/zdm/tests/test_path_prior.py b/zdm/tests/test_path_prior.py new file mode 100644 index 00000000..b5edcadb --- /dev/null +++ b/zdm/tests/test_path_prior.py @@ -0,0 +1,77 @@ +""" +Script testing the use of zDM to generate priors for +host galaxy magnitudes. + +""" + +#standard Python imports +import numpy as np +from matplotlib import pyplot as plt + +# imports from the "FRB" series +from zdm import optical as opt +from zdm import loading +from zdm import cosmology as cos +from zdm import parameters +from zdm import loading + +def test_path_priors(): + """ + Loops over all ICS FRBs + """ + + ######### List of all ICS FRBs for which we can run PATH ####### + # Just a single ICS FRB for this test + frblist=['FRB20180924B'] + + NFRB = len(frblist) + + # here is where I should initialise a zDM grid + state = parameters.State() + cos.set_cosmology(state) + cos.init_dist_measures() + model = opt.host_model() + name='CRAFT_ICS_1300' + ss,gs = loading.surveys_and_grids(survey_names=[name]) + g = gs[0] + s = ss[0] + + # must be done once for any fixed zvals + model.init_zmapping(g.zvals) + + + for frb in frblist: + # interates over the FRBs. "Do FRB" + # P_O is the prior for each galaxy + # P_Ox is the posterior + # P_Ux is the posterior for it being unobserved + # mags is the list of galaxy magnitudes + + # determines if this FRB was seen by the survey, and + # if so, what its DMEG is + imatch = opt.matchFRB(frb,s) + if imatch is None: + raise ValueError("Could not find ",frb," in survey") + # should be in this file + + DMEG = s.DMEGs[imatch] + + prior = model.init_path_raw_prior_Oi(DMEG,g) + PU = model.estimate_unseen_prior(mag_limit=26) # might not be correct + + # the model should have calculated a valid unseen probability + if PU < 0. or PU > 1.: + raise ValueError("Unseen probability calculated to be ",PU) + + if not np.isfinite(PU): + raise ValueError("Unseen probability PU is ",PU) + + bad = np.where(prior < 0.)[0] + if len(bad) > 0: + raise ValueError("Some elements of model prior have negative probability") + + OK = np.all(np.isfinite(prior)) + if not OK: + raise ValueError("Some elements of magnitude priors are not finite") + +test_path_priors()