diff --git a/.gitignore b/.gitignore index cca0255d..bb56d400 100644 --- a/.gitignore +++ b/.gitignore @@ -174,6 +174,7 @@ notebooks/demo_add_bands_to_empty.ipynb notebooks/demo_comprehensive_to_minimal_cat.ipynb notebooks/demo_create_footprint_mask.ipynb notebooks/demo_check_footprint.ipynb +notebooks/demo_comprehensive_to_minimal_cat.ipynb notebooks/demo_calibrate_minimal_cat.ipynb notebooks/leakage_minimal.ipynb notebooks/demo_add_bands.ipynb diff --git a/notebooks/cosmo_val/cat_config.yaml b/notebooks/cosmo_val/cat_config.yaml index 5b1c3ed6..87806ec5 100644 --- a/notebooks/cosmo_val/cat_config.yaml +++ b/notebooks/cosmo_val/cat_config.yaml @@ -859,3 +859,49 @@ nz: path: dndz paths: output: ./output +SP_v1.6.6: + subdir: /n17data/UNIONS/WL/v1.6.x + pipeline: SP + colour: orange + getdist_colour: 0.0, 0.5, 1.0 + ls: dashed + marker: d + cov_th: + A: 2405.3892055695346 + n_e: 6.128201234871523 + n_psf: 0.752316232272063 + sigma_e: 0.379587601488189 + mask: /home/guerrini/sp_validation/cosmo_inference/data/mask/mask_map_v1.4.6_nside_8192.fits + psf: + PSF_flag: FLAG_PSF_HSM + PSF_size: SIGMA_PSF_HSM + square_size: true + star_flag: FLAG_STAR_HSM + star_size: SIGMA_STAR_HSM + hdu: 1 + path: unions_shapepipe_psf_2024_v1.6.a.fits + ra_col: RA + dec_col: Dec + e1_PSF_col: E1_PSF_HSM + e1_star_col: E1_STAR_HSM + e2_PSF_col: E2_PSF_HSM + e2_star_col: E2_STAR_HSM + shear: + R: 1.0 + covmat_file: ./covs/shapepipe_A/cov_shapepipe_A.txt + path: v1.6.6/unions_shapepipe_cut_struc_2024_v1.6.6.fits + redshift_path: /n17data/mkilbing/astro/data/CFIS/v1.0/nz/dndz_SP_A.txt + w_col: w_des + e1_col: e1 + e1_col_corrected: e1_leak_corrected + e1_PSF_col: e1_PSF + e2_col: e2 + e2_col_corrected: e2_leak_corrected + e2_PSF_col: e2_PSF + cols: RA,Dec + star: + ra_col: RA + dec_col: Dec + e1_col: e1 + e2_col: e2 + path: unions_shapepipe_star_2024_v1.6.a.fits diff --git a/notebooks/cosmo_val/run_cosmo_val.py b/notebooks/cosmo_val/run_cosmo_val.py index f08f7c0a..3b3ad6b9 100644 --- a/notebooks/cosmo_val/run_cosmo_val.py +++ b/notebooks/cosmo_val/run_cosmo_val.py @@ -57,6 +57,9 @@ # %% cv.plot_objectwise_leakage() +# %% +cv.plot_objectwise_leakage_aux() + # %% #cv.plot_ellipticity() @@ -67,8 +70,9 @@ cv.plot_separation() # %% -cv.npatch = 1 -cv.treecorr_config["var_method"] = "shot" +cv.calculate_additive_bias() + +# %% cv.plot_2pcf() cv.treecorr_config["var_method"] = "jackknife" cv.npatch = 100 diff --git a/notebooks/demo_apply_hsp_masks.py b/notebooks/demo_apply_hsp_masks.py index e9e718d3..dc4b94d0 100644 --- a/notebooks/demo_apply_hsp_masks.py +++ b/notebooks/demo_apply_hsp_masks.py @@ -57,16 +57,17 @@ # Set parameters base = "unions_shapepipe_comprehensive" year = 2024 -ver = "v1.5.c" +ver_maj = "v1.5" + +obj._params["input_path"] = f"{base}_{year}_{ver_maj}.c.hdf5" +obj._params["output_path"] = f"{base}_struc_{year}_{ver_maj}.c.hdf5" +obj._params["mask_dir"] = f"{os.environ['HOME']}/{ver_maj}.x/masks" -obj._params["input_path"] = f"{base}_{year}_{ver}.hdf5" -obj._params["output_path"] = f"{base}_struc_{year}_{ver}.hdf5" -obj._params["mask_dir"] = "/n17data/UNIONS/WL/masks" obj._params["nside"] = 131072 obj._params["file_base"] = "mask_r_" obj._params["bits"] = bits -obj._params["aux_mask_files"] = f"{obj._params['mask_dir']}/coverage_v1.5.x.hsp" +obj._params["aux_mask_files"] = f"{obj._params['mask_dir']}/coverage_{ver_maj}.x.hsp" obj._params["aux_mask_labels"] = "npoint3" obj._params["verbose"] = True # - diff --git a/src/sp_validation/cosmo_val.py b/src/sp_validation/cosmo_val.py index d1f276b7..751ae056 100644 --- a/src/sp_validation/cosmo_val.py +++ b/src/sp_validation/cosmo_val.py @@ -203,13 +203,13 @@ def __init__( ylim_alpha=[-0.005, 0.05], ylim_xi_sys_ratio=[-0.02, 0.5], nside=1024, - nside_mask = 2**12, + nside_mask=2**12, binning="powspace", power=1 / 2, n_ell_bins=32, ell_step=10, pol_factor=True, - cell_method='map', + cell_method="map", nrandom_cell=10, path_onecovariance=None, cosmo_params=None, @@ -239,7 +239,10 @@ def __init__( self.nside_mask = nside_mask self.path_onecovariance = path_onecovariance - assert self.cell_method in ["map", "catalog"], "cell_method must be 'map' or 'catalog'" + assert self.cell_method in [ + "map", + "catalog", + ], "cell_method must be 'map' or 'catalog'" # For theory calculations: # Create cosmology object using new functionality @@ -249,7 +252,6 @@ def __init__( # Use Planck 2018 defaults self.cosmo = get_cosmo() - self.treecorr_config = { "ra_units": "degrees", "dec_units": "degrees", @@ -258,7 +260,9 @@ def __init__( "sep_units": "arcmin", "nbins": nbins, "var_method": var_method, - "cross_patch_weight": "match" if var_method == "jackknife" else "simple", + "cross_patch_weight": ( + "match" if var_method == "jackknife" else "simple" + ), } self.catalog_config_path = Path(catalog_config) @@ -295,7 +299,10 @@ def ensure_version_exists(ver): base_ver = ver[: -len(leak_suffix)] ensure_version_exists(base_ver) shear_cfg = cc[base_ver]["shear"] - if "e1_col_corrected" not in shear_cfg or "e2_col_corrected" not in shear_cfg: + if ( + "e1_col_corrected" not in shear_cfg + or "e2_col_corrected" not in shear_cfg + ): raise ValueError( f"{base_ver} does not have e1_col_corrected/e2_col_corrected " f"fields; cannot create {ver}" @@ -414,7 +421,9 @@ def compute_survey_stats( weight_column = weights_key_override or shear_cfg["w_col"] if weight_column not in data.columns.names: - raise KeyError(f"Weight column '{weight_column}' missing in {catalog_path}") + raise KeyError( + f"Weight column '{weight_column}' missing in {catalog_path}" + ) w = np.asarray(data[weight_column], dtype=float) @@ -428,9 +437,15 @@ def compute_survey_stats( mask_candidate = mask_path else: mask_candidate = self.cc[ver].get("mask") - if isinstance(mask_candidate, str) and not os.path.isabs(mask_candidate): - mask_candidate = str(Path(self.cc[ver]["subdir"]) / mask_candidate) - if mask_candidate is not None and not os.path.exists(mask_candidate): + if isinstance(mask_candidate, str) and not os.path.isabs( + mask_candidate + ): + mask_candidate = str( + Path(self.cc[ver]["subdir"]) / mask_candidate + ) + if mask_candidate is not None and not os.path.exists( + mask_candidate + ): mask_candidate = None area_deg2 = None @@ -481,7 +496,9 @@ def _area_from_catalog(self, catalog_path, nside): def _area_from_mask(self, mask_map_path): mask = hp.read_map(mask_map_path, dtype=np.float64) - return float(mask.sum() * hp.nside2pixarea(hp.get_nside(mask), degrees=True)) + return float( + mask.sum() * hp.nside2pixarea(hp.get_nside(mask), degrees=True) + ) def _write_catalog_config(self): with self.catalog_config_path.open("w") as file: @@ -528,8 +545,12 @@ def set_params_leakage_scale(self, ver): params_in["e1_col"] = self.cc[ver]["shear"]["e1_col"] params_in["e2_col"] = self.cc[ver]["shear"]["e2_col"] params_in["w_col"] = self.cc[ver]["shear"]["w_col"] - params_in["R11"] = None if ver != "DES" else self.cc[ver]["shear"]["R11"] - params_in["R22"] = None if ver != "DES" else self.cc[ver]["shear"]["R22"] + params_in["R11"] = ( + None if ver != "DES" else self.cc[ver]["shear"]["R11"] + ) + params_in["R22"] = ( + None if ver != "DES" else self.cc[ver]["shear"]["R22"] + ) params_in["ra_star_col"] = self.cc[ver]["star"]["ra_col"] params_in["dec_star_col"] = self.cc[ver]["star"]["dec_col"] @@ -610,7 +631,6 @@ def basename(self, version, treecorr_config=None, npatch=None): f"_npatch={patches}" ) - def calculate_rho_tau_stats(self): out_dir = f"{self.cc['paths']['output']}/rho_tau_stats" if not os.path.exists(out_dir): @@ -649,13 +669,13 @@ def tau_stat_handler(self): @property def colors(self): return [self.cc[ver]["colour"] for ver in self.versions] - + @property def area(self): if not hasattr(self, "_area"): self.calculate_area() return self._area - + @property def n_eff_gal(self): if not hasattr(self, "_n_eff_gal"): @@ -674,7 +694,7 @@ def pseudo_cls(self): self.calculate_pseudo_cl() self.calculate_pseudo_cl_eb_cov() return self._pseudo_cls - + @property def pseudo_cls_onecov(self): if not hasattr(self, "_pseudo_cls_onecov"): @@ -687,33 +707,42 @@ def calculate_area(self): for ver in self.versions: self.print_magenta(ver) - if not hasattr(self.cc[ver]['shear'], 'mask'): - print("Mask not found in config file, calculating area from binned catalog") + if not hasattr(self.cc[ver]["shear"], "mask"): + print( + "Mask not found in config file, calculating area from binned catalog" + ) area[ver] = self.calculate_area_from_binned_catalog(ver) else: - mask = hp.read_map(self.cc[ver]['shear']['mask'], verbose=False) + mask = hp.read_map(self.cc[ver]["shear"]["mask"], verbose=False) nside_mask = hp.get_nside(mask) print(f"nside_mask = {nside_mask}") - area[ver] = np.sum(mask) * hp.nside2pixarea(nside_mask, degrees=True) + area[ver] = np.sum(mask) * hp.nside2pixarea( + nside_mask, degrees=True + ) print(f"Area = {area[ver]:.2f} deg^2") self._area = area self.print_done("Area calculation finished") - + def calculate_area_from_binned_catalog(self, ver): print(f"nside_mask = {self.nside_mask}") with self.results[ver].temporarily_read_data(): ra = self.results[ver].dat_shear["RA"] dec = self.results[ver].dat_shear["Dec"] hsp_map = hp.ang2pix( - self.nside_mask, + self.nside_mask, np.radians(90 - dec), np.radians(ra), lonlat=False, ) - mask = np.bincount(hsp_map, minlength=hp.nside2npix(self.nside_mask)) > 0 + mask = ( + np.bincount(hsp_map, minlength=hp.nside2npix(self.nside_mask)) + > 0 + ) - area = np.sum(mask) * hp.nside2pixarea(self.nside_mask, degrees=True) + area = np.sum(mask) * hp.nside2pixarea( + self.nside_mask, degrees=True + ) print(f"Area = {area:.2f} deg^2") return area @@ -725,9 +754,14 @@ def calculate_n_eff_gal(self): self.print_magenta(ver) with self.results[ver].temporarily_read_data(): w = self.results[ver].dat_shear[self.cc[ver]["shear"]["w_col"]] - n_eff_gal[ver] = 1/(self.area[ver]*60*60)* np.sum(w)**2/np.sum(w**2) + n_eff_gal[ver] = ( + 1 + / (self.area[ver] * 60 * 60) + * np.sum(w) ** 2 + / np.sum(w**2) + ) print(f"n_eff_gal = {n_eff_gal[ver]:.2f} gal./arcmin^-2") - + self._n_eff_gal = n_eff_gal self.print_done("Effective number of galaxy calculation finished") @@ -737,13 +771,23 @@ def calculate_ellipticity_dispersion(self): for ver in self.versions: self.print_magenta(ver) with self.results[ver].temporarily_read_data(): - e1 = self.results[ver].dat_shear[self.cc[ver]["shear"]["e1_col"]] - e2 = self.results[ver].dat_shear[self.cc[ver]["shear"]["e2_col"]] + e1 = self.results[ver].dat_shear[ + self.cc[ver]["shear"]["e1_col"] + ] + e2 = self.results[ver].dat_shear[ + self.cc[ver]["shear"]["e2_col"] + ] w = self.results[ver].dat_shear[self.cc[ver]["shear"]["w_col"]] ellipticity_dispersion[ver] = np.sqrt( - 0.5*(np.average(e1**2, weights=w**2) + np.average(e2**2, weights=w**2)) + 0.5 + * ( + np.average(e1**2, weights=w**2) + + np.average(e2**2, weights=w**2) + ) + ) + print( + f"Ellipticity dispersion = {ellipticity_dispersion[ver]:.4f}" ) - print(f"Ellipticity dispersion = {ellipticity_dispersion[ver]:.4f}") self._ellipticity_dispersion = ellipticity_dispersion def plot_rho_stats(self, abs=False): @@ -836,7 +880,11 @@ def calculate_rho_tau_fits(self): assert self.rho_tau_method != "none" # this initializes the rho_tau_fits attribute - self._rho_tau_fits = {"flat_sample_list": [], "result_list": [], "q_list": []} + self._rho_tau_fits = { + "flat_sample_list": [], + "result_list": [], + "q_list": [], + } quantiles = [1 - self.quantile, self.quantile] self._xi_psf_sys = {} @@ -866,7 +914,9 @@ def calculate_rho_tau_fits(self): self.rho_tau_fits["result_list"].append(result) self.rho_tau_fits["q_list"].append(q) - self.psf_fitter.load_rho_stat(f"rho_stats_{self.basename(ver)}.fits") + self.psf_fitter.load_rho_stat( + f"rho_stats_{self.basename(ver)}.fits" + ) nbins = self.psf_fitter.rho_stat_handler._treecorr_config["nbins"] xi_psf_sys_samples = np.array([]).reshape(0, nbins) @@ -902,7 +952,9 @@ def plot_rho_tau_fits(self): show=True, close=True, ) - self.print_done(f"Tau contours plot saved to {os.path.abspath(savefig)}") + self.print_done( + f"Tau contours plot saved to {os.path.abspath(savefig)}" + ) plt.figure(figsize=(15, 6)) for mcmc_result, ver, color, flat_sample in zip( @@ -911,7 +963,9 @@ def plot_rho_tau_fits(self): self.colors, self.rho_tau_fits["flat_sample_list"], ): - self.psf_fitter.load_rho_stat(f"rho_stats_{self.basename(ver)}.fits") + self.psf_fitter.load_rho_stat( + f"rho_stats_{self.basename(ver)}.fits" + ) for i in range(100): self.psf_fitter.plot_xi_psf_sys( flat_sample[-i + 1], ver, color, alpha=0.1 @@ -934,8 +988,12 @@ def plot_rho_tau_fits(self): theta = self.psf_fitter.rho_stat_handler.rho_stats["theta"] xi_psf_sys = self.xi_psf_sys[ver] plt.plot(theta, xi_psf_sys["mean"], linestyle=ls, color=color) - plt.plot(theta, xi_psf_sys["quantiles"][0], linestyle=ls, color=color) - plt.plot(theta, xi_psf_sys["quantiles"][1], linestyle=ls, color=color) + plt.plot( + theta, xi_psf_sys["quantiles"][0], linestyle=ls, color=color + ) + plt.plot( + theta, xi_psf_sys["quantiles"][1], linestyle=ls, color=color + ) plt.fill_between( theta, xi_psf_sys["quantiles"][0], @@ -961,7 +1019,9 @@ def plot_rho_tau_fits(self): self.versions, self.rho_tau_fits["flat_sample_list"], ): - self.psf_fitter.load_rho_stat(f"rho_stats_{self.basename(ver)}.fits") + self.psf_fitter.load_rho_stat( + f"rho_stats_{self.basename(ver)}.fits" + ) for yscale in ("linear", "log"): out_path = os.path.abspath( f"{out_dir}/xi_psf_sys_terms_{yscale}_{ver}.png" @@ -1013,20 +1073,28 @@ def calculate_scale_dependent_leakage(self): output_path_ab = f"{output_base_path}_a_b.txt" output_path_aa = f"{output_base_path}_a_a.txt" with self.results[ver].temporarily_read_data(): - if os.path.exists(output_path_ab) and os.path.exists(output_path_aa): + if os.path.exists(output_path_ab) and os.path.exists( + output_path_aa + ): self.print_green( f"Skipping computation, reading {output_path_ab} and " f"{output_path_aa} instead" ) - results.r_corr_gp = treecorr.GGCorrelation(self.treecorr_config) + results.r_corr_gp = treecorr.GGCorrelation( + self.treecorr_config + ) results.r_corr_gp.read(output_path_ab) - results.r_corr_pp = treecorr.GGCorrelation(self.treecorr_config) + results.r_corr_pp = treecorr.GGCorrelation( + self.treecorr_config + ) results.r_corr_pp.read(output_path_aa) else: - results.compute_corr_gp_pp_alpha(output_base_path=output_base_path) + results.compute_corr_gp_pp_alpha( + output_base_path=output_base_path + ) results.do_alpha(fast=True) results.do_xi_sys() @@ -1130,7 +1198,9 @@ def plot_scale_dependent_leakage(self): xlabel = r"$\theta$ [arcmin]" ylabel = r"$\xi^{\rm sys}_+(\theta)$" title = "Cross-correlation leakage" - out_path = os.path.abspath(f"{self.cc['paths']['output']}/xi_sys_p.png") + out_path = os.path.abspath( + f"{self.cc['paths']['output']}/xi_sys_p.png" + ) cs_plots.plot_data_1d( theta, y, @@ -1161,7 +1231,9 @@ def plot_scale_dependent_leakage(self): xlabel = r"$\theta$ [arcmin]" ylabel = r"$\xi^{\rm sys}_-(\theta)$" title = "Cross-correlation leakage" - out_path = os.path.abspath(f"{self.cc['paths']['output']}/xi_sys_m.png") + out_path = os.path.abspath( + f"{self.cc['paths']['output']}/xi_sys_m.png" + ) cs_plots.plot_data_1d( theta, y, @@ -1197,9 +1269,6 @@ def calculate_objectwise_leakage(self): results_obj.update_params() results_obj.prepare_output() - # Skip read_data() and copy catalogue from scale leakage instance instead - # results_obj._dat = self.results[ver].dat_shear - out_base = results_obj.get_out_base(mix, order) out_path = f"{out_base}.pkl" if os.path.exists(out_path): @@ -1301,12 +1370,47 @@ def plot_objectwise_leakage(self): ) cs_plots.savefig(out_path, close_fig=False) cs_plots.show() - self.print_done(f"Object-wise leakage coefficients plot saved to {out_path}") + self.print_done( + f"Object-wise leakage coefficients plot saved to {out_path}" + ) + + def calculate_objectwise_leakage_aux(self): + + self.print_start("Object-wise leakage auxiliary quantities:") + for ver in self.versions: + self.print_magenta(ver) + + print("Query objectise results") + results_obj = self.results_objectwise[ver] + print("Query done") + results_obj.check_params() + results_obj.update_params() + results_obj.prepare_output() + + with results_obj.temporarily_read_data(): + results_obj._dat = results_obj.dat_shear + + if not "cols" in results_obj._params: + self.print_green( + "Skipping object-wise leakage (aux quantities), no input columns for regression found" + ) + else: + self.print_cyan( + f"Computing object-wise leakage regression with aux quantities: {results_obj._params['cols']}" + ) + + # Run + results_obj.obs_leakage() + + def plot_objectwise_leakage_aux(self): + self.calculate_objectwise_leakage_aux() def plot_ellipticity(self, nbins=200): out_path = os.path.abspath(f"{self.cc['paths']['output']}/ell_hist.png") if os.path.exists(out_path): - self.print_done(f"Skipping ellipticity histograms, {out_path} exists") + self.print_done( + f"Skipping ellipticity histograms, {out_path} exists" + ) else: self.print_start("Computing ellipticity histograms:") @@ -1317,12 +1421,20 @@ def plot_ellipticity(self, nbins=200): R = self.cc[ver]["shear"]["R"] with self.results[ver].temporarily_read_data(): e1 = ( - self.results[ver].dat_shear[self.cc[ver]["shear"]["e1_col"]] / R + self.results[ver].dat_shear[ + self.cc[ver]["shear"]["e1_col"] + ] + / R ) e2 = ( - self.results[ver].dat_shear[self.cc[ver]["shear"]["e2_col"]] / R + self.results[ver].dat_shear[ + self.cc[ver]["shear"]["e2_col"] + ] + / R ) - w = self.results[ver].dat_shear[self.cc[ver]["shear"]["w_col"]] + w = self.results[ver].dat_shear[ + self.cc[ver]["shear"]["w_col"] + ] axs[0].hist( e1, @@ -1353,7 +1465,9 @@ def plot_ellipticity(self, nbins=200): self.print_done("Ellipticity histograms saved to " + out_path) def plot_weights(self, nbins=200): - out_path = os.path.abspath(f"{self.cc['paths']['output']}/weight_hist.png") + out_path = os.path.abspath( + f"{self.cc['paths']['output']}/weight_hist.png" + ) if os.path.exists(out_path): self.print_done(f"Skipping weight histograms, {out_path} exists") else: @@ -1363,7 +1477,9 @@ def plot_weights(self, nbins=200): for ver in self.versions: self.print_magenta(ver) with self.results[ver].temporarily_read_data(): - w = self.results[ver].dat_shear[self.cc[ver]["shear"]["w_col"]] + w = self.results[ver].dat_shear[ + self.cc[ver]["shear"]["w_col"] + ] plt.hist( w, @@ -1437,7 +1553,9 @@ def c2(self): self.calculate_additive_bias() return self._c2 - def calculate_2pcf(self, ver, npatch=None, save_fits=False, **treecorr_config): + def calculate_2pcf( + self, ver, npatch=None, save_fits=False, **treecorr_config + ): """ Calculate the two-point correlation function (2PCF) ξ± for a given catalog version with TreeCorr. @@ -1496,8 +1614,12 @@ def calculate_2pcf(self, ver, npatch=None, save_fits=False, **treecorr_config): else: # Load data and create a catalog with self.results[ver].temporarily_read_data(): - e1 = self.results[ver].dat_shear[self.cc[ver]["shear"]["e1_col"]] - e2 = self.results[ver].dat_shear[self.cc[ver]["shear"]["e2_col"]] + e1 = self.results[ver].dat_shear[ + self.cc[ver]["shear"]["e1_col"] + ] + e2 = self.results[ver].dat_shear[ + self.cc[ver]["shear"]["e2_col"] + ] w = self.results[ver].dat_shear[self.cc[ver]["shear"]["w_col"]] if ver != "DES": R = self.cc[ver]["shear"]["R"] @@ -1527,7 +1649,9 @@ def calculate_2pcf(self, ver, npatch=None, save_fits=False, **treecorr_config): ra_units=self.treecorr_config["ra_units"], dec_units=self.treecorr_config["dec_units"], npatch=npatch, - patch_centers=patch_file if os.path.exists(patch_file) else None, + patch_centers=( + patch_file if os.path.exists(patch_file) else None + ), ) # If no patch file exists, save the current patches @@ -1547,13 +1671,17 @@ def calculate_2pcf(self, ver, npatch=None, save_fits=False, **treecorr_config): col2 = fits.Column(name="BIN2", format="K", array=np.ones(len(lst))) col3 = fits.Column(name="ANGBIN", format="K", array=lst) col4 = fits.Column(name="VALUE", format="D", array=gg.xip) - col5 = fits.Column(name="ANG", format="D", unit="arcmin", array=gg.meanr) + col5 = fits.Column( + name="ANG", format="D", unit="arcmin", array=gg.meanr + ) coldefs = fits.ColDefs([col1, col2, col3, col4, col5]) xiplus_hdu = fits.BinTableHDU.from_columns(coldefs, name="XI_PLUS") col4 = fits.Column(name="VALUE", format="D", array=gg.xim) coldefs = fits.ColDefs([col1, col2, col3, col4, col5]) - ximinus_hdu = fits.BinTableHDU.from_columns(coldefs, name="XI_MINUS") + ximinus_hdu = fits.BinTableHDU.from_columns( + coldefs, name="XI_MINUS" + ) # append xi_plus header info xiplus_dict = { @@ -1567,19 +1695,27 @@ def calculate_2pcf(self, ver, npatch=None, save_fits=False, **treecorr_config): for key in xiplus_dict: xiplus_hdu.header[key] = xiplus_dict[key] - col1 = fits.Column(name="BIN1", format="K", array=np.ones(len(lst))) - col2 = fits.Column(name="BIN2", format="K", array=np.ones(len(lst))) + col1 = fits.Column( + name="BIN1", format="K", array=np.ones(len(lst)) + ) + col2 = fits.Column( + name="BIN2", format="K", array=np.ones(len(lst)) + ) col3 = fits.Column(name="ANGBIN", format="K", array=lst) col4 = fits.Column(name="VALUE", format="D", array=gg.xip) col5 = fits.Column( name="ANG", format="D", unit="arcmin", array=gg.rnom ) coldefs = fits.ColDefs([col1, col2, col3, col4, col5]) - xiplus_hdu = fits.BinTableHDU.from_columns(coldefs, name="XI_PLUS") + xiplus_hdu = fits.BinTableHDU.from_columns( + coldefs, name="XI_PLUS" + ) col4 = fits.Column(name="VALUE", format="D", array=gg.xim) coldefs = fits.ColDefs([col1, col2, col3, col4, col5]) - ximinus_hdu = fits.BinTableHDU.from_columns(coldefs, name="XI_MINUS") + ximinus_hdu = fits.BinTableHDU.from_columns( + coldefs, name="XI_MINUS" + ) # append xi_plus header info xiplus_dict = { @@ -1689,7 +1825,8 @@ def plot_2pcf(self): plt.errorbar( self.cat_ggs[ver].meanr, self.cat_ggs[ver].xip * self.cat_ggs[ver].meanr, - yerr=np.sqrt(self.cat_ggs[ver].varxip) * self.cat_ggs[ver].meanr, + yerr=np.sqrt(self.cat_ggs[ver].varxip) + * self.cat_ggs[ver].meanr, label=ver, ls=self.cc[ver]["ls"], color=self.cc[ver]["colour"], @@ -1700,7 +1837,9 @@ def plot_2pcf(self): plt.xlabel(rf"$\theta$ [{self.treecorr_config['sep_units']}]") plt.xlim([self.theta_min_plot, self.theta_max_plot]) plt.ylabel(r"$\theta \xi_+(\theta)$") - out_path = os.path.abspath(f"{self.cc['paths']['output']}/xi_p_theta.png") + out_path = os.path.abspath( + f"{self.cc['paths']['output']}/xi_p_theta.png" + ) cs_plots.savefig(out_path, close_fig=False) cs_plots.show() self.print_done(f"xi_plus_theta plot saved to {out_path}") @@ -1711,7 +1850,8 @@ def plot_2pcf(self): plt.errorbar( self.cat_ggs[ver].meanr * cs_plots.dx(idx, len(ver)), self.cat_ggs[ver].xim * self.cat_ggs[ver].meanr, - yerr=np.sqrt(self.cat_ggs[ver].varxim) * self.cat_ggs[ver].meanr, + yerr=np.sqrt(self.cat_ggs[ver].varxim) + * self.cat_ggs[ver].meanr, label=ver, ls=self.cc[ver]["ls"], color=self.cc[ver]["colour"], @@ -1722,7 +1862,9 @@ def plot_2pcf(self): plt.xlabel(rf"$\theta$ [{self.treecorr_config['sep_units']}]") plt.xlim([self.theta_min_plot, self.theta_max_plot]) plt.ylabel(r"$\theta \xi_-(\theta)$") - out_path = os.path.abspath(f"{self.cc['paths']['output']}/xi_m_theta.png") + out_path = os.path.abspath( + f"{self.cc['paths']['output']}/xi_m_theta.png" + ) cs_plots.savefig(out_path, close_fig=False) cs_plots.show() self.print_done(f"xi_minus_theta plot saved to {out_path}") @@ -1772,13 +1914,14 @@ def plot_2pcf(self): ) cs_plots.savefig(out_path, close_fig=False) cs_plots.show() - self.print_done(f"xi_plus_xi_psf_sys {ver} plot saved to {out_path}") + self.print_done( + f"xi_plus_xi_psf_sys {ver} plot saved to {out_path}" + ) def plot_ratio_xi_sys_xi(self, threshold=0.1, offset=0.02): fig, _ = plt.subplots(ncols=1, nrows=1, figsize=(10, 7)) - for idx, ver in enumerate(self.versions): xi_psf_sys = self.xi_psf_sys[ver] gg = self.cat_ggs[ver] @@ -1790,7 +1933,7 @@ def plot_ratio_xi_sys_xi(self, threshold=0.1, offset=0.02): ) theta = gg.meanr - jittered_theta = theta * (1+idx*offset) + jittered_theta = theta * (1 + idx * offset) plt.errorbar( jittered_theta, @@ -1800,13 +1943,13 @@ def plot_ratio_xi_sys_xi(self, threshold=0.1, offset=0.02): ls=self.cc[ver]["ls"], color=self.cc[ver]["colour"], fmt=self.cc[ver].get("marker", None), - capsize=5 + capsize=5, ) plt.fill_between( [self.theta_min_plot, self.theta_max_plot], - - threshold, - + threshold, + -threshold, + +threshold, color="black", alpha=0.1, label=f"{threshold:.0%} threshold", @@ -1815,24 +1958,27 @@ def plot_ratio_xi_sys_xi(self, threshold=0.1, offset=0.02): [self.theta_min_plot, self.theta_max_plot], [threshold, threshold], ls="dashed", - color="black") + color="black", + ) plt.plot( [self.theta_min_plot, self.theta_max_plot], [-threshold, -threshold], ls="dashed", - color="black") + color="black", + ) plt.xscale("log") plt.xlabel(rf"$\theta$ [{self.treecorr_config['sep_units']}]") plt.ylabel(r"$\xi^{\rm psf}_{+, {\rm sys}} / \xi_+$") plt.gca().yaxis.set_major_formatter(mticker.PercentFormatter(xmax=1)) plt.legend() plt.title("Ratio of PSF systematics to cosmic shear signal") - out_path = os.path.abspath(f"{self.cc['paths']['output']}/ratio_xi_sys_xi.png") + out_path = os.path.abspath( + f"{self.cc['paths']['output']}/ratio_xi_sys_xi.png" + ) cs_plots.savefig(out_path, close_fig=False) cs_plots.show() print(f"Ratio of xi_psf_sys to xi plot saved to {out_path}") - def calculate_aperture_mass_dispersion( self, theta_min=0.3, theta_max=200, nbins=500, nbins_map=15, npatch=25 ): @@ -1864,11 +2010,15 @@ def calculate_aperture_mass_dispersion( with self.results[ver].temporarily_read_data(): R = self.cc[ver]["shear"]["R"] g1 = ( - self.results[ver].dat_shear[self.cc[ver]["shear"]["e1_col"]] + self.results[ver].dat_shear[ + self.cc[ver]["shear"]["e1_col"] + ] - self.c1[ver] ) / R g2 = ( - self.results[ver].dat_shear[self.cc[ver]["shear"]["e2_col"]] + self.results[ver].dat_shear[ + self.cc[ver]["shear"]["e2_col"] + ] - self.c2[ver] ) / R cat_gal = treecorr.Catalog( @@ -1876,7 +2026,9 @@ def calculate_aperture_mass_dispersion( dec=self.results[ver].dat_shear["Dec"], g1=g1, g2=g2, - w=self.results[ver].dat_shear[self.cc[ver]["shear"]["w_col"]], + w=self.results[ver].dat_shear[ + self.cc[ver]["shear"]["w_col"] + ], ra_units=self.treecorr_config["ra_units"], dec_units=self.treecorr_config["dec_units"], npatch=npatch, @@ -1935,7 +2087,9 @@ def plot_aperture_mass_dispersion(self): xlabel = r"$\theta$ [arcmin]" ylabel = "dispersion" title = f"Aperture-mass dispersion {mode}" - out_path = os.path.abspath(f"{self.cc['paths']['output']}/{mode}.png") + out_path = os.path.abspath( + f"{self.cc['paths']['output']}/{mode}.png" + ) cs_plots.plot_data_1d( x, y, @@ -1967,7 +2121,9 @@ def plot_aperture_mass_dispersion(self): xlabel = r"$\theta$ [arcmin]" ylabel = "dispersion" title = f"Aperture-mass dispersion mode {mode}" - out_path = os.path.abspath(f"{self.cc['paths']['output']}/{mode}_log.png") + out_path = os.path.abspath( + f"{self.cc['paths']['output']}/{mode}_log.png" + ) cs_plots.plot_data_1d( x, y, @@ -2095,7 +2251,9 @@ def calculate_pure_eb( # Calculate correlation functions gg = self.calculate_2pcf(version, npatch=npatch, **treecorr_config) - gg_int = self.calculate_2pcf(version, npatch=npatch, **treecorr_config_int) + gg_int = self.calculate_2pcf( + version, npatch=npatch, **treecorr_config_int + ) # Get redshift distribution if using analytic covariance if cov_path_int is not None: @@ -2112,7 +2270,7 @@ def calculate_pure_eb( cov_path_int=cov_path_int, cosmo_cov=cosmo_cov, n_samples=n_samples, - z_dist=z_dist + z_dist=z_dist, ) return results @@ -2135,7 +2293,7 @@ def plot_pure_eb( cosmo_cov=None, n_samples=1000, results=None, - **kwargs + **kwargs, ): """ Generate comprehensive pure E/B mode analysis plots. @@ -2200,7 +2358,7 @@ def plot_pure_eb( # Use instance defaults for unspecified parameters versions = versions or self.versions - output_dir = output_dir or self.cc['paths']['output'] + output_dir = output_dir or self.cc["paths"]["output"] npatch = npatch or self.npatch # Override var_method to analytic when cov_path_int is provided @@ -2246,26 +2404,26 @@ def plot_pure_eb( # Get or calculate results for this version version_results = results_list[idx] or self.calculate_pure_eb( - version, - min_sep=min_sep, - max_sep=max_sep, - nbins=nbins, - min_sep_int=min_sep_int, - max_sep_int=max_sep_int, - nbins_int=nbins_int, - npatch=npatch, - var_method=var_method, - cov_path_int=cov_path_int, - cosmo_cov=cosmo_cov, - n_samples=n_samples, - ) + version, + min_sep=min_sep, + max_sep=max_sep, + nbins=nbins, + min_sep_int=min_sep_int, + max_sep_int=max_sep_int, + nbins_int=nbins_int, + npatch=npatch, + var_method=var_method, + cov_path_int=cov_path_int, + cosmo_cov=cosmo_cov, + n_samples=n_samples, + ) # Calculate E/B statistics for all bin combinations (only if not provided) version_results = calculate_eb_statistics( version_results, cov_path_int=cov_path_int, n_samples=n_samples, - **kwargs + **kwargs, ) # Generate all plots using specialized plotting functions @@ -2273,9 +2431,7 @@ def plot_pure_eb( # Integration vs Reporting comparison plot plot_integration_vs_reporting( - gg, gg_int, - out_stub + "_integration_vs_reporting.png", - version + gg, gg_int, out_stub + "_integration_vs_reporting.png", version ) # E/B/Ambiguous correlation functions plot @@ -2284,7 +2440,7 @@ def plot_pure_eb( out_stub + "_xis.png", version, fiducial_xip_scale_cut=fiducial_xip_scale_cut, - fiducial_xim_scale_cut=fiducial_xim_scale_cut + fiducial_xim_scale_cut=fiducial_xim_scale_cut, ) # 2D PTE heatmaps plot @@ -2293,7 +2449,7 @@ def plot_pure_eb( version, out_stub + "_ptes.png", fiducial_xip_scale_cut=fiducial_xip_scale_cut, - fiducial_xim_scale_cut=fiducial_xim_scale_cut + fiducial_xim_scale_cut=fiducial_xim_scale_cut, ) # Covariance matrix plot @@ -2301,7 +2457,7 @@ def plot_pure_eb( version_results["cov"], var_method, out_stub + "_covariance.png", - version + version, ) def calculate_cosebis( @@ -2405,7 +2561,7 @@ def calculate_cosebis( scale_cuts = [ (bin_edges[start], bin_edges[stop]) for start in range(nbins) - for stop in range(start+1, nbins+1) + for stop in range(start + 1, nbins + 1) ] print(f"Evaluating {len(scale_cuts)} scale cut combinations") @@ -2428,11 +2584,17 @@ def plot_cosebis( self, version=None, output_dir=None, - min_sep_int=0.5, max_sep_int=500, nbins_int=1000, # Integration binning - npatch=None, nmodes=10, cov_path=None, - evaluate_all_scale_cuts=False, # New parameter - min_sep=None, max_sep=None, nbins=None, # Reporting binning - fiducial_scale_cut=None, # For plotting reference + min_sep_int=0.5, + max_sep_int=500, + nbins_int=1000, # Integration binning + npatch=None, + nmodes=10, + cov_path=None, + evaluate_all_scale_cuts=False, # New parameter + min_sep=None, + max_sep=None, + nbins=None, # Reporting binning + fiducial_scale_cut=None, # For plotting reference results=None, ): """ @@ -2488,8 +2650,8 @@ def plot_cosebis( # Use instance defaults if not specified version = version or self.versions[0] - output_dir = output_dir or self.cc['paths']['output'] - npatch = npatch or self.treecorr_config.get('npatch', 256) + output_dir = output_dir or self.cc["paths"]["output"] + npatch = npatch or self.treecorr_config.get("npatch", 256) # Determine variance method based on whether theoretical covariance is used var_method = "analytic" if cov_path is not None else "jackknife" @@ -2503,7 +2665,9 @@ def plot_cosebis( # Add scale cut info if provided if fiducial_scale_cut is not None: - out_stub += f"_scalecut={fiducial_scale_cut[0]}-{fiducial_scale_cut[1]}" + out_stub += ( + f"_scalecut={fiducial_scale_cut[0]}-{fiducial_scale_cut[1]}" + ) # if evaluate_all_scale_cuts: # out_stub += f"_allcuts_minsep={min_sep}_maxsep={max_sep}_nbins={nbins}" @@ -2527,8 +2691,9 @@ def plot_cosebis( # Generate plots using specialized plotting functions # Extract single result for plotting if multiple scale cuts were evaluated - if (isinstance(results, dict) and - all(isinstance(k, tuple) for k in results.keys())): + if isinstance(results, dict) and all( + isinstance(k, tuple) for k in results.keys() + ): # Multiple scale cuts: use fiducial_scale_cut if provided, otherwise use # full range if fiducial_scale_cut is not None: @@ -2547,20 +2712,19 @@ def plot_cosebis( plot_results, version, out_stub + "_cosebis.png", - fiducial_scale_cut=fiducial_scale_cut + fiducial_scale_cut=fiducial_scale_cut, ) plot_cosebis_covariance_matrix( - plot_results, - version, - var_method, - out_stub + "_covariance.png" + plot_results, version, var_method, out_stub + "_covariance.png" ) # Generate scale cut heatmap if we have multiple scale cuts - if (isinstance(results, dict) and - all(isinstance(k, tuple) for k in results.keys()) and - len(results) > 1): + if ( + isinstance(results, dict) + and all(isinstance(k, tuple) for k in results.keys()) + and len(results) > 1 + ): # Create temporary gg object with correct binning for mapping treecorr_config_temp = { **self.treecorr_config, @@ -2577,10 +2741,9 @@ def plot_cosebis( gg_temp, version, out_stub + "_scalecut_ptes.png", - fiducial_scale_cut=fiducial_scale_cut + fiducial_scale_cut=fiducial_scale_cut, ) - def calculate_pseudo_cl_eb_cov(self): """ Compute a theoretical Gaussian covariance of the Pseudo-Cl for EE, EB and BB. @@ -2599,14 +2762,20 @@ def calculate_pseudo_cl_eb_cov(self): if ver not in self._pseudo_cls.keys(): self._pseudo_cls[ver] = {} - out_path = os.path.abspath(f"{self.cc['paths']['output']}/pseudo_cl_cov_{ver}.fits") + out_path = os.path.abspath( + f"{self.cc['paths']['output']}/pseudo_cl_cov_{ver}.fits" + ) if os.path.exists(out_path): - self.print_done(f"Skipping Pseudo-Cl covariance calculation, {out_path} exists") - self._pseudo_cls[ver]['cov'] = fits.open(out_path) + self.print_done( + f"Skipping Pseudo-Cl covariance calculation, {out_path} exists" + ) + self._pseudo_cls[ver]["cov"] = fits.open(out_path) else: params = get_params_rho_tau(self.cc[ver], survey=ver) - self.print_cyan(f"Extracting the fiducial power spectrum for {ver}") + self.print_cyan( + f"Extracting the fiducial power spectrum for {ver}" + ) lmax = 2 * self.nside z, dndz = self.get_redshift(ver) @@ -2617,7 +2786,7 @@ def calculate_pseudo_cl_eb_cov(self): "Unexpected pixwin length for lmax=" f"{lmax}: got {pw.shape[0]}, expected {len(ell)+1}" ) - pw = pw[1:len(ell)+1] + pw = pw[1 : len(ell) + 1] # Load redshift distribution and calculate theory C_ell fiducial_cl = ( @@ -2631,51 +2800,84 @@ def calculate_pseudo_cl_eb_cov(self): * pw**2 ) - self.print_cyan("Getting a sample of the fiducial Cl's with noise") + self.print_cyan( + "Getting a sample of the fiducial Cl's with noise" + ) lmin = 8 - lmax = 2*self.nside + lmax = 2 * self.nside b_lmax = lmax - 1 - ells = np.arange(lmin, lmax+1) + ells = np.arange(lmin, lmax + 1) - if self.binning == 'linear': + if self.binning == "linear": # Linear bands of width ell_step, respecting actual lmax bpws = (ells - lmin) // self.ell_step - bpws = np.minimum(bpws, bpws[-1]) # Ensure last bin captures all + bpws = np.minimum( + bpws, bpws[-1] + ) # Ensure last bin captures all b = nmt.NmtBin(ells=ells, bpws=bpws, lmax=b_lmax) - elif self.binning == 'powspace': + elif self.binning == "powspace": start = np.power(lmin, self.power) end = np.power(lmax, self.power) - bins_ell = np.power(np.linspace(start, end, self.n_ell_bins+1), 1/self.power) + bins_ell = np.power( + np.linspace(start, end, self.n_ell_bins + 1), + 1 / self.power, + ) - #Get bandpowers + # Get bandpowers bpws = np.digitize(ells.astype(float), bins_ell) - 1 bpws[0] = 0 - bpws[-1] = self.n_ell_bins-1 + bpws[-1] = self.n_ell_bins - 1 b = nmt.NmtBin(ells=ells, bpws=bpws, lmax=b_lmax) - #Load data and create shear and noise maps + # Load data and create shear and noise maps cat_gal = fits.getdata(self.cc[ver]["shear"]["path"]) - n_gal, unique_pix, idx, idx_rep = self.get_n_gal_map(params, nside, cat_gal) + n_gal, unique_pix, idx, idx_rep = self.get_n_gal_map( + params, nside, cat_gal + ) mask = n_gal != 0 - - cl_noise, f, wsp = self.get_sample(params, self.nside, b_lmax, b, cat_gal, n_gal, mask, unique_pix, idx_rep) - - fiducial_cl = np.array([fiducial_cl, 0.*fiducial_cl, 0.*fiducial_cl, 0.*fiducial_cl])+ np.mean(cl_noise, axis=1, keepdims=True) - + + cl_noise, f, wsp = self.get_sample( + params, + self.nside, + b_lmax, + b, + cat_gal, + n_gal, + mask, + unique_pix, + idx_rep, + ) + + fiducial_cl = np.array( + [ + fiducial_cl, + 0.0 * fiducial_cl, + 0.0 * fiducial_cl, + 0.0 * fiducial_cl, + ] + ) + np.mean(cl_noise, axis=1, keepdims=True) + self.print_cyan("Computing the Pseudo-Cl covariance") cw = nmt.NmtCovarianceWorkspace.from_fields(f, f, f, f) - covar_22_22 = nmt.gaussian_covariance(cw, 2, 2, 2, 2, - fiducial_cl, - fiducial_cl, - fiducial_cl, - fiducial_cl, - wsp, wb=wsp).reshape([self.n_ell_bins, 4, self.n_ell_bins, 4]) + covar_22_22 = nmt.gaussian_covariance( + cw, + 2, + 2, + 2, + 2, + fiducial_cl, + fiducial_cl, + fiducial_cl, + fiducial_cl, + wsp, + wb=wsp, + ).reshape([self.n_ell_bins, 4, self.n_ell_bins, 4]) covar_EE_EE = covar_22_22[:, 0, :, 0] covar_EE_EB = covar_22_22[:, 0, :, 1] @@ -2717,7 +2919,7 @@ def calculate_pseudo_cl_eb_cov(self): hdu.writeto(out_path, overwrite=True) - self._pseudo_cls[ver]['cov'] = hdu + self._pseudo_cls[ver]["cov"] = hdu self.print_done("Done Pseudo-Cl covariance") @@ -2728,51 +2930,88 @@ def calculate_pseudo_cl_onecovariance(self): self.print_start("Computing Pseudo-Cl covariance with OneCovariance") if self.path_onecovariance is None: - raise ValueError("path_onecovariance must be provided to use OneCovariance") - + raise ValueError( + "path_onecovariance must be provided to use OneCovariance" + ) + if not os.path.exists(self.path_onecovariance): - raise ValueError(f"OneCovariance path {self.path_onecovariance} does not exist") - - template_config = os.path.join(self.path_onecovariance, "config_files", "config_3x2pt_pure_Cell_UNIONS.ini") + raise ValueError( + f"OneCovariance path {self.path_onecovariance} does not exist" + ) + + template_config = os.path.join( + self.path_onecovariance, + "config_files", + "config_3x2pt_pure_Cell_UNIONS.ini", + ) if not os.path.exists(template_config): - raise ValueError(f"Template config file {template_config} does not exist") + raise ValueError( + f"Template config file {template_config} does not exist" + ) self._pseudo_cls_onecov = {} for ver in self.versions: self.print_magenta(ver) - out_dir = os.path.abspath(f"{self.cc['paths']['output']}/pseudo_cl_cov_onecov_{ver}/") + out_dir = os.path.abspath( + f"{self.cc['paths']['output']}/pseudo_cl_cov_onecov_{ver}/" + ) os.makedirs(out_dir, exist_ok=True) - if os.path.exists(os.path.join(out_dir, "covariance_list_3x2pt_pure_Cell.dat")): - self.print_done(f"Skipping OneCovariance calculation, {out_dir} exists") + if os.path.exists( + os.path.join(out_dir, "covariance_list_3x2pt_pure_Cell.dat") + ): + self.print_done( + f"Skipping OneCovariance calculation, {out_dir} exists" + ) self._load_onecovariance_cov(out_dir, ver) else: - mask_path = self.cc[ver]['shear']['mask'] - redshift_distr_path = os.path.join(self.data_base_dir, self.cc[ver]['shear']['redshift_distr']) + mask_path = self.cc[ver]["shear"]["mask"] + redshift_distr_path = os.path.join( + self.data_base_dir, self.cc[ver]["shear"]["redshift_distr"] + ) config_path = os.path.join(out_dir, f"config_onecov_{ver}.ini") - self.print_cyan(f"Modifying OneCovariance config file and saving it to {config_path}") - self._modify_onecov_config(template_config, config_path, out_dir, mask_path, redshift_distr_path, ver) + self.print_cyan( + f"Modifying OneCovariance config file and saving it to {config_path}" + ) + self._modify_onecov_config( + template_config, + config_path, + out_dir, + mask_path, + redshift_distr_path, + ver, + ) self.print_cyan("Running OneCovariance...") cmd = f"python {os.path.join(self.path_onecovariance, 'covariance.py')} {config_path}" self.print_cyan(f"Command: {cmd}") ret = os.system(cmd) if ret != 0: - raise RuntimeError(f"OneCovariance command failed with return code {ret}") + raise RuntimeError( + f"OneCovariance command failed with return code {ret}" + ) self.print_cyan("OneCovariance completed successfully.") self._load_onecovariance_cov(out_dir, ver) - + self.print_done("Done Pseudo-Cl covariance with OneCovariance") - def _modify_onecov_config(self, template_config, config_path, out_dir, mask_path, redshift_distr_path, ver): + def _modify_onecov_config( + self, + template_config, + config_path, + out_dir, + mask_path, + redshift_distr_path, + ver, + ): """ - Modify OneCovariance configuration file with correct mask, redshift distribution, + Modify OneCovariance configuration file with correct mask, redshift distribution, and ellipticity dispersion parameters. - + Parameters ---------- template_config : str @@ -2784,30 +3023,38 @@ def _modify_onecov_config(self, template_config, config_path, out_dir, mask_path redshift_distr_path : str Path to the redshift distribution file """ - config = configparser.ConfigParser() + config = configparser.ConfigParser() # Load the template configuration config.read(template_config) - + # Update mask path mask_base = os.path.basename(os.path.abspath(mask_path)) mask_folder = os.path.dirname(os.path.abspath(mask_path)) - config['survey specs']['mask_directory'] = mask_folder - config['survey specs']['mask_file_lensing'] = mask_base - config['survey specs']['survey_area_lensing_in_deg2'] = str(self.area[ver]) - config['survey specs']['ellipticity_dispersion'] = str(self.ellipticity_dispersion[ver]) - config['survey specs']['n_eff_lensing'] = str(self.n_eff_gal[ver]) + config["survey specs"]["mask_directory"] = mask_folder + config["survey specs"]["mask_file_lensing"] = mask_base + config["survey specs"]["survey_area_lensing_in_deg2"] = str( + self.area[ver] + ) + config["survey specs"]["ellipticity_dispersion"] = str( + self.ellipticity_dispersion[ver] + ) + config["survey specs"]["n_eff_lensing"] = str(self.n_eff_gal[ver]) # Update redshift distribution path - redshift_distr_base = os.path.basename(os.path.abspath(redshift_distr_path)) - redshift_distr_folder = os.path.dirname(os.path.abspath(redshift_distr_path)) - config['redshift']['z_directory'] = redshift_distr_folder - config['redshift']['zlens_file'] = redshift_distr_base - - #Update output directory - config['output settings']['directory'] = out_dir - + redshift_distr_base = os.path.basename( + os.path.abspath(redshift_distr_path) + ) + redshift_distr_folder = os.path.dirname( + os.path.abspath(redshift_distr_path) + ) + config["redshift"]["z_directory"] = redshift_distr_folder + config["redshift"]["zlens_file"] = redshift_distr_base + + # Update output directory + config["output settings"]["directory"] = out_dir + # Save the modified configuration - with open(config_path, 'w') as f: + with open(config_path, "w") as f: config.write(f) def get_cov_from_onecov(self, cov_one_cov, gaussian=True): @@ -2818,52 +3065,73 @@ def get_cov_from_onecov(self, cov_one_cov, gaussian=True): for i in range(n_bins): for j in range(n_bins): cov[i, j] = cov_one_cov[i * n_bins + j, index_value] - + return cov def _load_onecovariance_cov(self, out_dir, ver): self.print_cyan(f"Loading OneCovariance results from {out_dir}") - cov_one_cov = np.genfromtxt(os.path.join(out_dir, "covariance_list_3x2pt_pure_Cell.dat")) + cov_one_cov = np.genfromtxt( + os.path.join(out_dir, "covariance_list_3x2pt_pure_Cell.dat") + ) gaussian_one_cov = self.get_cov_from_onecov(cov_one_cov, gaussian=True) all_one_cov = self.get_cov_from_onecov(cov_one_cov, gaussian=False) self._pseudo_cls_onecov[ver] = { - 'gaussian_cov': gaussian_one_cov, - 'all_cov': all_one_cov + "gaussian_cov": gaussian_one_cov, + "all_cov": all_one_cov, } def calculate_pseudo_cl_g_ng_cov(self, gaussian_part="iNKA"): - assert gaussian_part in ["iNKA", "OneCovariance"], "gaussian_part must be 'iNKA' or 'OneCovariance'" - self.print_start(f"Gaussian and Non-Gaussian covariance of the Pseudo-Cl's using {gaussian_part} for the Gaussian part") - + assert gaussian_part in [ + "iNKA", + "OneCovariance", + ], "gaussian_part must be 'iNKA' or 'OneCovariance'" + self.print_start( + f"Gaussian and Non-Gaussian covariance of the Pseudo-Cl's using {gaussian_part} for the Gaussian part" + ) + self._pseudo_cls_cov_g_ng = {} for ver in self.versions: self.print_magenta(ver) - out_file = os.path.abspath(f"{self.cc['paths']['output']}/pseudo_cl_cov_g_ng_{gaussian_part}_{ver}.fits") + out_file = os.path.abspath( + f"{self.cc['paths']['output']}/pseudo_cl_cov_g_ng_{gaussian_part}_{ver}.fits" + ) if os.path.exists(out_file): - self.print_done(f"Skipping Gaussian and Non-Gaussian covariance calculation, {out_file} exists") + self.print_done( + f"Skipping Gaussian and Non-Gaussian covariance calculation, {out_file} exists" + ) cov_hdu = fits.open(out_file) self._pseudo_cls_cov_g_ng[ver] = cov_hdu continue if gaussian_part == "iNKA": - gaussian_cov = self.pseudo_cls[ver]['cov']['COVAR_EE_EE'].data - non_gaussian_cov = self.pseudo_cls_onecov[ver]['all_cov'] - self.pseudo_cls_onecov[ver]['gaussian_cov'] + gaussian_cov = self.pseudo_cls[ver]["cov"]["COVAR_EE_EE"].data + non_gaussian_cov = ( + self.pseudo_cls_onecov[ver]["all_cov"] + - self.pseudo_cls_onecov[ver]["gaussian_cov"] + ) full_cov = gaussian_cov + non_gaussian_cov elif gaussian_part == "OneCovariance": - gaussian_cov = self.pseudo_cls_onecov[ver]['gaussian_cov'] - non_gaussian_cov = self.pseudo_cls_onecov[ver]['all_cov'] - self.pseudo_cls_onecov[ver]['gaussian_cov'] - full_cov = self.pseudo_cls_onecov[ver]['all_cov'] + gaussian_cov = self.pseudo_cls_onecov[ver]["gaussian_cov"] + non_gaussian_cov = ( + self.pseudo_cls_onecov[ver]["all_cov"] + - self.pseudo_cls_onecov[ver]["gaussian_cov"] + ) + full_cov = self.pseudo_cls_onecov[ver]["all_cov"] else: raise ValueError(f"Unknown gaussian_part: {gaussian_part}") self.print_cyan("Saving Gaussian and Non-Gaussian covariance...") hdu = fits.HDUList() hdu.append(fits.ImageHDU(gaussian_cov, name="COVAR_GAUSSIAN")) - hdu.append(fits.ImageHDU(non_gaussian_cov, name="COVAR_NON_GAUSSIAN")) + hdu.append( + fits.ImageHDU(non_gaussian_cov, name="COVAR_NON_GAUSSIAN") + ) hdu.append(fits.ImageHDU(full_cov, name="COVAR_FULL")) hdu.writeto(out_file, overwrite=True) self._pseudo_cls_cov_g_ng[ver] = hdu - self.print_done(f"Done Gaussian and Non-Gaussian covariance of the Pseudo-Cl's using {gaussian_part} for the Gaussian part") + self.print_done( + f"Done Gaussian and Non-Gaussian covariance of the Pseudo-Cl's using {gaussian_part} for the Gaussian part" + ) def calculate_pseudo_cl(self): """ @@ -2882,14 +3150,18 @@ def calculate_pseudo_cl(self): self._pseudo_cls[ver] = {} - out_path = os.path.abspath(f"{self.cc['paths']['output']}/pseudo_cl_{ver}.fits") + out_path = os.path.abspath( + f"{self.cc['paths']['output']}/pseudo_cl_{ver}.fits" + ) if os.path.exists(out_path): - self.print_done(f"Skipping Pseudo-Cl's calculation, {out_path} exists") + self.print_done( + f"Skipping Pseudo-Cl's calculation, {out_path} exists" + ) cl_shear = fits.getdata(out_path) - self._pseudo_cls[ver]['pseudo_cl'] = cl_shear - elif self.cell_method == 'map': + self._pseudo_cls[ver]["pseudo_cl"] = cl_shear + elif self.cell_method == "map": self.calculate_pseudo_cl_map(ver, nside, out_path) - elif self.cell_method == 'catalog': + elif self.cell_method == "catalog": self.calculate_pseudo_cl_catalog(ver, out_path) else: raise ValueError(f"Unknown cell method: {self.cell_method}") @@ -2899,35 +3171,37 @@ def calculate_pseudo_cl(self): def calculate_pseudo_cl_map(self, ver, nside, out_path): params = get_params_rho_tau(self.cc[ver], survey=ver) - #Load data and create shear and noise maps + # Load data and create shear and noise maps cat_gal = fits.getdata(self.cc[ver]["shear"]["path"]) - w = cat_gal[params['w_col']] + w = cat_gal[params["w_col"]] self.print_cyan("Creating maps and computing Cl's...") - n_gal_map, unique_pix, idx, idx_rep = self.get_n_gal_map(params, nside, cat_gal) + n_gal_map, unique_pix, idx, idx_rep = self.get_n_gal_map( + params, nside, cat_gal + ) mask = n_gal_map != 0 shear_map_e1 = np.zeros(hp.nside2npix(nside)) shear_map_e2 = np.zeros(hp.nside2npix(nside)) - e1 = cat_gal[params['e1_col']] - e2 = cat_gal[params['e2_col']] + e1 = cat_gal[params["e1_col"]] + e2 = cat_gal[params["e2_col"]] del cat_gal - - shear_map_e1[unique_pix] += np.bincount(idx_rep, weights=e1*w) - shear_map_e2[unique_pix] += np.bincount(idx_rep, weights=e2*w) + + shear_map_e1[unique_pix] += np.bincount(idx_rep, weights=e1 * w) + shear_map_e2[unique_pix] += np.bincount(idx_rep, weights=e2 * w) shear_map_e1[mask] /= n_gal_map[mask] shear_map_e2[mask] /= n_gal_map[mask] - shear_map = shear_map_e1 + 1j*shear_map_e2 + shear_map = shear_map_e1 + 1j * shear_map_e2 del shear_map_e1, shear_map_e2 ell_eff, cl_shear, wsp = self.get_pseudo_cls_map(shear_map) cl_noise = np.zeros_like(cl_shear) - + for i in range(self.nrandom_cell): noise_map_e1 = np.zeros(hp.nside2npix(nside)) @@ -2935,64 +3209,67 @@ def calculate_pseudo_cl_map(self, ver, nside, out_path): e1_rot, e2_rot = self.apply_random_rotation(e1, e2) - - noise_map_e1[unique_pix] += np.bincount(idx_rep, weights=e1_rot*w) - noise_map_e2[unique_pix] += np.bincount(idx_rep, weights=e2_rot*w) + noise_map_e1[unique_pix] += np.bincount(idx_rep, weights=e1_rot * w) + noise_map_e2[unique_pix] += np.bincount(idx_rep, weights=e2_rot * w) noise_map_e1[mask] /= n_gal_map[mask] noise_map_e2[mask] /= n_gal_map[mask] - noise_map = noise_map_e1 + 1j*noise_map_e2 + noise_map = noise_map_e1 + 1j * noise_map_e2 del noise_map_e1, noise_map_e2 _, cl_noise_, _ = self.get_pseudo_cls_map(noise_map, wsp) cl_noise += cl_noise_ - + cl_noise /= self.nrandom_cell del e1, e2, w try: del e1_rot, e2_rot - except NameError: #Continue if the random generation has been skipped. + except NameError: # Continue if the random generation has been skipped. pass - del n_gal_map + del n_gal_map - #This is a problem because the measurement depends on the seed. To be fixed. - #cl_shear = cl_shear - np.mean(cl_noise, axis=1, keepdims=True) + # This is a problem because the measurement depends on the seed. To be fixed. + # cl_shear = cl_shear - np.mean(cl_noise, axis=1, keepdims=True) cl_shear = cl_shear - cl_noise self.print_cyan("Saving pseudo-Cl's...") self.save_pseudo_cl(ell_eff, cl_shear, out_path) cl_shear = fits.getdata(out_path) - self._pseudo_cls[ver]['pseudo_cl'] = cl_shear + self._pseudo_cls[ver]["pseudo_cl"] = cl_shear def calculate_pseudo_cl_catalog(self, ver, out_path): params = get_params_rho_tau(self.cc[ver], survey=ver) - #Load data and create shear and noise maps + # Load data and create shear and noise maps cat_gal = fits.getdata(self.cc[ver]["shear"]["path"]) - ell_eff, cl_shear, wsp = self.get_pseudo_cls_catalog(catalog=cat_gal, params=params) + ell_eff, cl_shear, wsp = self.get_pseudo_cls_catalog( + catalog=cat_gal, params=params + ) self.print_cyan("Saving pseudo-Cl's...") self.save_pseudo_cl(ell_eff, cl_shear, out_path) cl_shear = fits.getdata(out_path) - self._pseudo_cls[ver]['pseudo_cl'] = cl_shear + self._pseudo_cls[ver]["pseudo_cl"] = cl_shear def get_n_gal_map(self, params, nside, cat_gal): """ Compute the galaxy number density map. """ - ra = cat_gal[params['ra_col']] - dec = cat_gal[params['dec_col']] - w = cat_gal[params['w_col']] + ra = cat_gal[params["ra_col"]] + dec = cat_gal[params["dec_col"]] + w = cat_gal[params["w_col"]] - theta = (90. - dec) * np.pi / 180. - phi = ra * np.pi / 180. + theta = (90.0 - dec) * np.pi / 180.0 + phi = ra * np.pi / 180.0 pix = hp.ang2pix(nside, theta, phi) - unique_pix, idx, idx_rep = np.unique(pix, return_index=True, return_inverse=True) + unique_pix, idx, idx_rep = np.unique( + pix, return_index=True, return_inverse=True + ) n_gal = np.zeros(hp.nside2npix(nside)) n_gal[unique_pix] = np.bincount(idx_rep, weights=w) return n_gal, unique_pix, idx, idx_rep @@ -3006,18 +3283,24 @@ def get_gaussian_real( noise_map_e1 = np.zeros(hp.nside2npix(nside)) noise_map_e2 = np.zeros(hp.nside2npix(nside)) - w = cat_gal[params['w_col']] - noise_map_e1[unique_pix] += np.bincount(idx_rep, weights=e1_rot*w) - noise_map_e2[unique_pix] += np.bincount(idx_rep, weights=e2_rot*w) + w = cat_gal[params["w_col"]] + noise_map_e1[unique_pix] += np.bincount(idx_rep, weights=e1_rot * w) + noise_map_e2[unique_pix] += np.bincount(idx_rep, weights=e2_rot * w) noise_map_e1[mask] /= n_gal[mask] noise_map_e2[mask] /= n_gal[mask] - return noise_map_e1 + 1j*noise_map_e2 + return noise_map_e1 + 1j * noise_map_e2 - def get_sample(self, params, nside, lmax, b, cat_gal, n_gal, mask, unique_pix, idx_rep): - noise_map = self.get_gaussian_real(params, nside, lmax, cat_gal, n_gal, mask, unique_pix, idx_rep) + def get_sample( + self, params, nside, lmax, b, cat_gal, n_gal, mask, unique_pix, idx_rep + ): + noise_map = self.get_gaussian_real( + params, nside, lmax, cat_gal, n_gal, mask, unique_pix, idx_rep + ) - f = nmt.NmtField(mask=mask, maps=[noise_map.real, noise_map.imag], lmax=lmax) + f = nmt.NmtField( + mask=mask, maps=[noise_map.real, noise_map.imag], lmax=lmax + ) wsp = nmt.NmtWorkspace.from_fields(f, f, b) @@ -3025,32 +3308,34 @@ def get_sample(self, params, nside, lmax, b, cat_gal, n_gal, mask, unique_pix, i cl_noise = wsp.decouple_cell(cl_noise) return cl_noise, f, wsp - + def get_pseudo_cls_map(self, map, wsp=None): """ Compute the pseudo-cl for a given map. """ lmin = 8 - lmax = 2*self.nside + lmax = 2 * self.nside b_lmax = lmax - 1 - ells = np.arange(lmin, lmax+1) + ells = np.arange(lmin, lmax + 1) - if self.binning == 'linear': + if self.binning == "linear": # Linear bands of width ell_step, respecting actual lmax bpws = (ells - lmin) // self.ell_step bpws = np.minimum(bpws, bpws[-1]) # Ensure last bin captures all b = nmt.NmtBin(ells=ells, bpws=bpws, lmax=b_lmax) - elif self.binning == 'powspace': + elif self.binning == "powspace": start = np.power(lmin, self.power) end = np.power(lmax, self.power) - bins_ell = np.power(np.linspace(start, end, self.n_ell_bins+1), 1/self.power) + bins_ell = np.power( + np.linspace(start, end, self.n_ell_bins + 1), 1 / self.power + ) - #Get bandpowers + # Get bandpowers bpws = np.digitize(ells.astype(float), bins_ell) - 1 bpws[0] = 0 - bpws[-1] = self.n_ell_bins-1 + bpws[-1] = self.n_ell_bins - 1 b = nmt.NmtBin(ells=ells, bpws=bpws, lmax=b_lmax) @@ -3058,11 +3343,13 @@ def get_pseudo_cls_map(self, map, wsp=None): factor = -1 if self.pol_factor else 1 - f_all = nmt.NmtField(mask=(map!=0), maps=[map.real, factor*map.imag], lmax=b_lmax) + f_all = nmt.NmtField( + mask=(map != 0), maps=[map.real, factor * map.imag], lmax=b_lmax + ) if wsp is None: wsp = nmt.NmtWorkspace.from_fields(f_all, f_all, b) - + cl_coupled = nmt.compute_coupled_cell(f_all, f_all) cl_all = wsp.decouple_cell(cl_coupled) @@ -3074,25 +3361,27 @@ def get_pseudo_cls_catalog(self, catalog, params, wsp=None): """ lmin = 8 - lmax = 2*self.nside + lmax = 2 * self.nside b_lmax = lmax - 1 - ells = np.arange(lmin, lmax+1) + ells = np.arange(lmin, lmax + 1) - if self.binning == 'linear': + if self.binning == "linear": # Linear bands of width ell_step, respecting actual lmax bpws = (ells - lmin) // self.ell_step bpws = np.minimum(bpws, bpws[-1]) # Ensure last bin captures all b = nmt.NmtBin(ells=ells, bpws=bpws, lmax=b_lmax) - elif self.binning == 'powspace': + elif self.binning == "powspace": start = np.power(lmin, self.power) end = np.power(lmax, self.power) - bins_ell = np.power(np.linspace(start, end, self.n_ell_bins+1), 1/self.power) + bins_ell = np.power( + np.linspace(start, end, self.n_ell_bins + 1), 1 / self.power + ) - #Get bandpowers + # Get bandpowers bpws = np.digitize(ells.astype(float), bins_ell) - 1 bpws[0] = 0 - bpws[-1] = self.n_ell_bins-1 + bpws[-1] = self.n_ell_bins - 1 b = nmt.NmtBin(ells=ells, bpws=bpws, lmax=b_lmax) @@ -3100,17 +3389,22 @@ def get_pseudo_cls_catalog(self, catalog, params, wsp=None): factor = -1 if self.pol_factor else 1 - f_all = nmt.NmtFieldCatalog(positions=[catalog[params['ra_col']], catalog[params['dec_col']]], - weights=catalog[params['w_col']], - field=[catalog[params['e1_col']], factor*catalog[params['e2_col']]], - lmax=b_lmax, - lmax_mask=b_lmax, - spin=2, - lonlat=True) - + f_all = nmt.NmtFieldCatalog( + positions=[catalog[params["ra_col"]], catalog[params["dec_col"]]], + weights=catalog[params["w_col"]], + field=[ + catalog[params["e1_col"]], + factor * catalog[params["e2_col"]], + ], + lmax=b_lmax, + lmax_mask=b_lmax, + spin=2, + lonlat=True, + ) + if wsp is None: wsp = nmt.NmtWorkspace.from_fields(f_all, f_all, b) - + cl_coupled = nmt.compute_coupled_cell(f_all, f_all) cl_all = wsp.decouple_cell(cl_coupled) @@ -3135,9 +3429,9 @@ def apply_random_rotation(self, e1, e2): Second component of the rotated ellipticity. """ np.random.seed() - rot_angle = np.random.rand(len(e1))*2*np.pi - e1_out = e1*np.cos(rot_angle) + e2*np.sin(rot_angle) - e2_out = -e1*np.sin(rot_angle) + e2*np.cos(rot_angle) + rot_angle = np.random.rand(len(e1)) * 2 * np.pi + e1_out = e1 * np.cos(rot_angle) + e2 * np.sin(rot_angle) + e2_out = -e1 * np.sin(rot_angle) + e2 * np.cos(rot_angle) return e1_out, e2_out def save_pseudo_cl(self, ell_eff, pseudo_cl, out_path): @@ -3151,7 +3445,7 @@ def save_pseudo_cl(self, ell_eff, pseudo_cl, out_path): out_path : str Path to save the pseudo-Cl's to. """ - #Create columns of the fits file + # Create columns of the fits file col1 = fits.Column(name="ELL", format="D", array=ell_eff) col2 = fits.Column(name="EE", format="D", array=pseudo_cl[0]) col3 = fits.Column(name="EB", format="D", array=pseudo_cl[1]) @@ -3174,126 +3468,183 @@ def plot_pseudo_cl(self): """ self.print_cyan("Plotting pseudo-Cl's") - #Plotting EE + # Plotting EE out_path = os.path.abspath(f"{self.cc['paths']['output']}/cell_ee.png") fig, ax = plt.subplots(nrows=2, ncols=1, figsize=(8, 8)) for ver in self.versions: - ell = self.pseudo_cls[ver]['pseudo_cl']["ELL"] - cov = self.pseudo_cls[ver]['cov']["COVAR_EE_EE"].data - ax[0].errorbar(ell, ell*self.pseudo_cls[ver]['pseudo_cl']["EE"], yerr=ell*np.sqrt(np.diag(cov)), fmt=self.cc[ver]["marker"], label=ver+" EE", color=self.cc[ver]["colour"], capsize=2) + ell = self.pseudo_cls[ver]["pseudo_cl"]["ELL"] + cov = self.pseudo_cls[ver]["cov"]["COVAR_EE_EE"].data + ax[0].errorbar( + ell, + ell * self.pseudo_cls[ver]["pseudo_cl"]["EE"], + yerr=ell * np.sqrt(np.diag(cov)), + fmt=self.cc[ver]["marker"], + label=ver + " EE", + color=self.cc[ver]["colour"], + capsize=2, + ) ax[0].set_ylabel(r"$\ell C_\ell$") - ax[0].set_xlim(ell.min()-10, ell.max()+100) - ax[0].set_xscale('squareroot') + ax[0].set_xlim(ell.min() - 10, ell.max() + 100) + ax[0].set_xscale("squareroot") ax[0].set_xticks(np.array([100, 400, 900, 1600])) ax[0].minorticks_on() - ax[0].tick_params(axis='x', which='minor', length=2, width=0.8) - minor_ticks = [i*10 for i in range(1, 10)] + [i*100 for i in range(1, 21)] + ax[0].tick_params(axis="x", which="minor", length=2, width=0.8) + minor_ticks = [i * 10 for i in range(1, 10)] + [ + i * 100 for i in range(1, 21) + ] ax[0].xaxis.set_ticks(minor_ticks, minor=True) for ver in self.versions: - ell = self.pseudo_cls[ver]['pseudo_cl']["ELL"] - cov = self.pseudo_cls[ver]['cov']["COVAR_EE_EE"].data - ax[1].errorbar(ell, self.pseudo_cls[ver]['pseudo_cl']["EE"], yerr=np.sqrt(np.diag(cov)), fmt=self.cc[ver]["marker"], label=ver+" EE", color=self.cc[ver]["colour"]) + ell = self.pseudo_cls[ver]["pseudo_cl"]["ELL"] + cov = self.pseudo_cls[ver]["cov"]["COVAR_EE_EE"].data + ax[1].errorbar( + ell, + self.pseudo_cls[ver]["pseudo_cl"]["EE"], + yerr=np.sqrt(np.diag(cov)), + fmt=self.cc[ver]["marker"], + label=ver + " EE", + color=self.cc[ver]["colour"], + ) ax[1].set_xlabel(r"$\ell$") ax[1].set_ylabel(r"$C_\ell$") - ax[1].set_xlim(ell.min()-10, ell.max()+100) - ax[1].set_xscale('squareroot') - ax[1].set_yscale('log') + ax[1].set_xlim(ell.min() - 10, ell.max() + 100) + ax[1].set_xscale("squareroot") + ax[1].set_yscale("log") ax[1].set_xticks(np.array([100, 400, 900, 1600])) ax[1].minorticks_on() - ax[1].tick_params(axis='x', which='minor', length=2, width=0.8) - minor_ticks = [i*10 for i in range(1, 10)] + [i*100 for i in range(1, 21)] + ax[1].tick_params(axis="x", which="minor", length=2, width=0.8) + minor_ticks = [i * 10 for i in range(1, 10)] + [ + i * 100 for i in range(1, 21) + ] ax[1].xaxis.set_ticks(minor_ticks, minor=True) - plt.suptitle('Pseudo-Cl EE (Gaussian covariance)') + plt.suptitle("Pseudo-Cl EE (Gaussian covariance)") plt.legend() plt.savefig(out_path) - #Plotting EB + # Plotting EB out_path = os.path.abspath(f"{self.cc['paths']['output']}/cell_eb.png") fig, ax = plt.subplots(nrows=2, ncols=1, figsize=(8, 8)) for ver in self.versions: - ell = self.pseudo_cls[ver]['pseudo_cl']["ELL"] - cov = self.pseudo_cls[ver]['cov']["COVAR_EB_EB"].data - ax[0].errorbar(ell, ell*self.pseudo_cls[ver]['pseudo_cl']["EB"], yerr=ell*np.sqrt(np.diag(cov)), fmt=self.cc[ver]["marker"], label=ver+" EB", color=self.cc[ver]["colour"], capsize=2) + ell = self.pseudo_cls[ver]["pseudo_cl"]["ELL"] + cov = self.pseudo_cls[ver]["cov"]["COVAR_EB_EB"].data + ax[0].errorbar( + ell, + ell * self.pseudo_cls[ver]["pseudo_cl"]["EB"], + yerr=ell * np.sqrt(np.diag(cov)), + fmt=self.cc[ver]["marker"], + label=ver + " EB", + color=self.cc[ver]["colour"], + capsize=2, + ) ax[0].axhline(0, color="black", linestyle="--") ax[0].set_ylabel(r"$\ell C_\ell$") - ax[0].set_xlim(ell.min()-10, ell.max()+100) - ax[0].set_xscale('squareroot') + ax[0].set_xlim(ell.min() - 10, ell.max() + 100) + ax[0].set_xscale("squareroot") ax[0].set_xticks(np.array([100, 400, 900, 1600])) ax[0].minorticks_on() - ax[0].tick_params(axis='x', which='minor', length=2, width=0.8) - minor_ticks = [i*10 for i in range(1, 10)] + [i*100 for i in range(1, 21)] + ax[0].tick_params(axis="x", which="minor", length=2, width=0.8) + minor_ticks = [i * 10 for i in range(1, 10)] + [ + i * 100 for i in range(1, 21) + ] ax[0].xaxis.set_ticks(minor_ticks, minor=True) for ver in self.versions: - ell = self.pseudo_cls[ver]['pseudo_cl']["ELL"] - cov = self.pseudo_cls[ver]['cov']["COVAR_EB_EB"].data - ax[1].errorbar(ell, self.pseudo_cls[ver]['pseudo_cl']["EB"], yerr=np.sqrt(np.diag(cov)), fmt=self.cc[ver]["marker"], label=ver+" EB", color=self.cc[ver]["colour"]) + ell = self.pseudo_cls[ver]["pseudo_cl"]["ELL"] + cov = self.pseudo_cls[ver]["cov"]["COVAR_EB_EB"].data + ax[1].errorbar( + ell, + self.pseudo_cls[ver]["pseudo_cl"]["EB"], + yerr=np.sqrt(np.diag(cov)), + fmt=self.cc[ver]["marker"], + label=ver + " EB", + color=self.cc[ver]["colour"], + ) ax[1].set_xlabel(r"$\ell$") ax[1].set_ylabel(r"$C_\ell$") - ax[1].set_xlim(ell.min()-10, ell.max()+100) - ax[1].set_xscale('squareroot') - ax[1].set_yscale('log') + ax[1].set_xlim(ell.min() - 10, ell.max() + 100) + ax[1].set_xscale("squareroot") + ax[1].set_yscale("log") ax[1].set_xticks(np.array([100, 400, 900, 1600])) ax[1].minorticks_on() - ax[1].tick_params(axis='x', which='minor', length=2, width=0.8) - minor_ticks = [i*10 for i in range(1, 10)] + [i*100 for i in range(1, 21)] + ax[1].tick_params(axis="x", which="minor", length=2, width=0.8) + minor_ticks = [i * 10 for i in range(1, 10)] + [ + i * 100 for i in range(1, 21) + ] ax[1].xaxis.set_ticks(minor_ticks, minor=True) - plt.suptitle('Pseudo-Cl EB (Gaussian covariance)') + plt.suptitle("Pseudo-Cl EB (Gaussian covariance)") plt.legend() plt.savefig(out_path) - #Plotting BB + # Plotting BB out_path = os.path.abspath(f"{self.cc['paths']['output']}/cell_bb.png") fig, ax = plt.subplots(nrows=2, ncols=1, figsize=(8, 8)) for ver in self.versions: - ell = self.pseudo_cls[ver]['pseudo_cl']["ELL"] - cov = self.pseudo_cls[ver]['cov']["COVAR_BB_BB"].data - ax[0].errorbar(ell, ell*self.pseudo_cls[ver]['pseudo_cl']["BB"], yerr=ell*np.sqrt(np.diag(cov)), fmt=self.cc[ver]["marker"], label=ver+" BB", color=self.cc[ver]["colour"], capsize=2) + ell = self.pseudo_cls[ver]["pseudo_cl"]["ELL"] + cov = self.pseudo_cls[ver]["cov"]["COVAR_BB_BB"].data + ax[0].errorbar( + ell, + ell * self.pseudo_cls[ver]["pseudo_cl"]["BB"], + yerr=ell * np.sqrt(np.diag(cov)), + fmt=self.cc[ver]["marker"], + label=ver + " BB", + color=self.cc[ver]["colour"], + capsize=2, + ) ax[0].axhline(0, color="black", linestyle="--") ax[0].set_ylabel(r"$\ell C_\ell$") - ax[0].set_xlim(ell.min()-10, ell.max()+100) - ax[0].set_xscale('squareroot') + ax[0].set_xlim(ell.min() - 10, ell.max() + 100) + ax[0].set_xscale("squareroot") ax[0].set_xticks(np.array([100, 400, 900, 1600])) ax[0].minorticks_on() - ax[0].tick_params(axis='x', which='minor', length=2, width=0.8) - minor_ticks = [i*10 for i in range(1, 10)] + [i*100 for i in range(1, 21)] + ax[0].tick_params(axis="x", which="minor", length=2, width=0.8) + minor_ticks = [i * 10 for i in range(1, 10)] + [ + i * 100 for i in range(1, 21) + ] ax[0].xaxis.set_ticks(minor_ticks, minor=True) for ver in self.versions: - ell = self.pseudo_cls[ver]['pseudo_cl']["ELL"] - cov = self.pseudo_cls[ver]['cov']["COVAR_BB_BB"].data - ax[1].errorbar(ell, self.pseudo_cls[ver]['pseudo_cl']["BB"], yerr=np.sqrt(np.diag(cov)), fmt=self.cc[ver]["marker"], label=ver+" BB", color=self.cc[ver]["colour"]) + ell = self.pseudo_cls[ver]["pseudo_cl"]["ELL"] + cov = self.pseudo_cls[ver]["cov"]["COVAR_BB_BB"].data + ax[1].errorbar( + ell, + self.pseudo_cls[ver]["pseudo_cl"]["BB"], + yerr=np.sqrt(np.diag(cov)), + fmt=self.cc[ver]["marker"], + label=ver + " BB", + color=self.cc[ver]["colour"], + ) ax[1].set_xlabel(r"$\ell$") ax[1].set_ylabel(r"$C_\ell$") - ax[1].set_xlim(ell.min()-10, ell.max()+100) - ax[1].set_xscale('squareroot') - ax[1].set_yscale('log') + ax[1].set_xlim(ell.min() - 10, ell.max() + 100) + ax[1].set_xscale("squareroot") + ax[1].set_yscale("log") ax[1].set_xticks(np.array([100, 400, 900, 1600])) ax[1].minorticks_on() - ax[1].tick_params(axis='x', which='minor', length=2, width=0.8) - minor_ticks = [i*10 for i in range(1, 10)] + [i*100 for i in range(1, 21)] + ax[1].tick_params(axis="x", which="minor", length=2, width=0.8) + minor_ticks = [i * 10 for i in range(1, 10)] + [ + i * 100 for i in range(1, 21) + ] ax[1].xaxis.set_ticks(minor_ticks, minor=True) - plt.suptitle('Pseudo-Cl BB (Gaussian covariance)') + plt.suptitle("Pseudo-Cl BB (Gaussian covariance)") plt.legend() plt.savefig(out_path)