diff --git a/.gitignore b/.gitignore index d3b9924c9..173c9dfbe 100644 --- a/.gitignore +++ b/.gitignore @@ -85,6 +85,7 @@ cutouts # custom local files local +figures # private dev folder dev/* diff --git a/CHANGELOG.md b/CHANGELOG.md index 31a180d23..59fa4b097 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,8 @@ # Changelog +- Minor improvements to the DE CO2 constraint +- Added an option to source industry energy demand from UBA MWMS (Projektionsbericht 2025) for the years 2025-2035 +- renamed some scripts +- Bugfix: Enforce stricter power import limit to avoid that import from one country compensate from exports to another - Bugfix: Enforce stricter H2 derivative import limit to avoid that exports of one type of derivative compensate for imports of another - Added an option to source mobility demand from UBA MWMS (Projektionsbericht 2025) for the years 2025-2035 - Renamed functions and script for exogenous mobility demand diff --git a/Snakefile b/Snakefile index 68a00413e..94c914b41 100644 --- a/Snakefile +++ b/Snakefile @@ -523,6 +523,7 @@ rule modify_district_heat_share: rule modify_prenetwork: params: + no_flex_lt_run=config_provider("iiasa_database", "no_flex_lt_run"), efuel_export_ban=config_provider("solving", "constraints", "efuel_export_ban"), enable_kernnetz=config_provider("wasserstoff_kernnetz", "enable"), costs=config_provider("costs"), @@ -555,6 +556,17 @@ rule modify_prenetwork: bev_charge_rate=config_provider("sector", "bev_charge_rate"), bev_energy=config_provider("sector", "bev_energy"), bev_dsm_availability=config_provider("sector", "bev_dsm_availability"), + uba_for_industry=config_provider("iiasa_database", "uba_for_industry"), + uba_for_rescom_electricity=config_provider( + "iiasa_database", "uba_for_rescom_electricity" + ), + scale_industry_non_energy=config_provider( + "iiasa_database", "scale_industry_non_energy" + ), + restrict_cross_border_flows=config_provider( + "iiasa_database", "restrict_cross_border_flows" + ), + force_co2_price=config_provider("iiasa_database", "force_co2_price"), input: costs_modifications="ariadne-data/costs_{planning_horizons}-modifications.csv", network=resources( @@ -575,6 +587,12 @@ rule modify_prenetwork: industrial_demand=resources( "industrial_energy_demand_base_s_{clusters}_{planning_horizons}.csv" ), + industrial_production_per_country_tomorrow=resources( + "industrial_production_per_country_tomorrow_{planning_horizons}-modified.csv" + ), + industry_sector_ratios=resources( + "industry_sector_ratios_{planning_horizons}.csv" + ), pop_weighted_energy_totals=resources( "pop_weighted_energy_totals_s_{clusters}.csv" ), @@ -582,12 +600,13 @@ rule modify_prenetwork: regions_onshore=resources("regions_onshore_base_s_{clusters}.geojson"), regions_offshore=resources("regions_offshore_base_s_{clusters}.geojson"), offshore_connection_points="ariadne-data/offshore_connection_points.csv", + new_industrial_energy_demand="ariadne-data/UBA_Projektionsbericht2025_Abbildung31_MWMS.csv", output: network=resources( "networks/base_s_{clusters}_{opts}_{sector_opts}_{planning_horizons}_final.nc" ), resources: - mem_mb=4000, + mem_mb=8000, log: RESULTS + "logs/modify_prenetwork_base_s_{clusters}_{opts}_{sector_opts}_{planning_horizons}.log", @@ -595,7 +614,7 @@ rule modify_prenetwork: "scripts/pypsa-de/modify_prenetwork.py" -ruleorder: modify_industry_demand > build_industrial_production_per_country_tomorrow +ruleorder: modify_industry_production > build_industrial_production_per_country_tomorrow rule modify_existing_heating: @@ -656,7 +675,7 @@ rule build_existing_chp_de: "scripts/pypsa-de/build_existing_chp_de.py" -rule modify_industry_demand: +rule modify_industry_production: params: reference_scenario=config_provider("iiasa_database", "reference_scenario"), input: @@ -671,9 +690,9 @@ rule modify_industry_demand: resources: mem_mb=1000, log: - logs("modify_industry_demand_{planning_horizons}.log"), + logs("modify_industry_production_{planning_horizons}.log"), script: - "scripts/pypsa-de/modify_industry_demand.py" + "scripts/pypsa-de/modify_industry_production.py" rule build_wasserstoff_kernnetz: @@ -737,7 +756,6 @@ rule download_ariadne_template: rule export_ariadne_variables: params: planning_horizons=config_provider("scenario", "planning_horizons"), - hours=config_provider("clustering", "temporal", "resolution_sector"), max_hours=config_provider("electricity", "max_hours"), costs=config_provider("costs"), config_industry=config_provider("industry"), @@ -840,11 +858,22 @@ rule plot_ariadne_variables: rule ariadne_all: input: - expand(RESULTS + "graphs/costs.svg", run=config_provider("run", "name")), - expand( - RESULTS + "ariadne/capacity_detailed.png", - run=config_provider("run", "name"), + price_carbon="results/" + + config["run"]["prefix"] + + "/scenario_comparison/Price-Carbon.png", + + +rule plot_ariadne_scenario_comparison: + params: + output_dir=directory( + "results/" + config["run"]["prefix"] + "/scenario_comparison/" ), + input: + expand(RESULTS + "graphs/costs.svg", run=config_provider("run", "name")), + # expand( + # RESULTS + "ariadne/capacity_detailed.png", + # run=config_provider("run", "name"), + # ), expand( RESULTS + "maps/base_s_{clusters}_{opts}_{sector_opts}-h2_network_incl_kernnetz_{planning_horizons}.pdf", @@ -856,8 +885,12 @@ rule ariadne_all: RESULTS + "ariadne/exported_variables_full.xlsx", run=config_provider("run", "name"), ), + output: + price_carbon="results/" + + config["run"]["prefix"] + + "/scenario_comparison/Price-Carbon.png", script: - "scripts/pypsa-de/plot_ariadne_scenario_comparison.py" + "scripts/pypsa-de/plot_scenario_comparison.py" rule build_scenarios: @@ -959,3 +992,483 @@ rule ariadne_report_only: RESULTS + "ariadne/report/elec_price_duration_curve.pdf", run=config_provider("run", "name"), ), + + +def get_st_sensitivities(w): + dirs = ["base"] + sensitivities = config_provider("iiasa_database", "regret_run", "st_sensitivities")( + w + ) + if sensitivities is None: + return dirs + for sens in sensitivities: + dirs.append(f"{sens}") + return dirs + + +rule prepare_st_low_res_network: + params: + solving=config_provider("solving"), + foresight=config_provider("foresight"), + co2_sequestration_potential=config_provider( + "sector", "co2_sequestration_potential", default=200 + ), + scope_to_fix=config_provider("iiasa_database", "regret_run", "scope_to_fix"), + h2_vent=config_provider("iiasa_database", "regret_run", "h2_vent"), + strict=config_provider("iiasa_database", "regret_run", "strict"), + unit_commitment=config_provider( + "iiasa_database", "regret_run", "unit_commitment" + ), + scale_cross_border_elec_capa=config_provider( + "iiasa_database", "regret_run", "scale_cross_border_elec_capa" + ), + input: + network=RESULTS + + "networks/base_s_{clusters}_{opts}_{sector_opts}_{eeg_sweep_year}.nc", + output: + st_low_res_prenetwork=RESULTS + + "st_low_res_prenetworks/base_s_{clusters}_{opts}_{sector_opts}_{eeg_sweep_year}_eeg_level_{eeg_level}.nc", + resources: + mem_mb=16000, + log: + RESULTS + + "logs/st_low_res_prenetwork_s_{clusters}_{opts}_{sector_opts}_{eeg_sweep_year}_eeg_level_{eeg_level}.log", + script: + "scripts/pypsa-de/prepare_st_low_res_network.py" + + +rule solve_st_low_res_network: + params: + st_sensitivity="{sensitivity}", + solving=config_provider("solving"), + regret_run=True, + energy_year=config_provider("energy", "energy_totals_year"), + custom_extra_functionality=input_custom_extra_functionality, + input: + st_low_res_prenetwork=RESULTS + + "st_low_res_prenetworks/base_s_{clusters}_{opts}_{sector_opts}_{eeg_sweep_year}_eeg_level_{eeg_level}.nc", + co2_totals_name=resources("co2_totals.csv"), + energy_totals=resources("energy_totals.csv"), + output: + st_low_res_network=RESULTS + + "st_low_res_networks/{sensitivity}/base_s_{clusters}_{opts}_{sector_opts}_{eeg_sweep_year}_eeg_level_{eeg_level}.nc", + shadow: + shadow_config + log: + solver=RESULTS + + "logs/st_low_res_networks/{sensitivity}/base_s_{clusters}_{opts}_{sector_opts}_{eeg_sweep_year}_eeg_level_{eeg_level}_solver.log", + memory=RESULTS + + "logs/st_low_res_networks/{sensitivity}/base_s_{clusters}_{opts}_{sector_opts}_{eeg_sweep_year}_eeg_level_{eeg_level}_memory.log", + python=RESULTS + + "logs/st_low_res_networks/{sensitivity}/base_s_{clusters}_{opts}_{sector_opts}_{eeg_sweep_year}_eeg_level_{eeg_level}_python.log", + threads: solver_threads + resources: + mem_mb=config_provider("solving", "mem_mb"), + runtime=config_provider("solving", "runtime", default="6h"), + script: + "scripts/pypsa-de/solve_st_low_res_network.py" + + +use rule export_ariadne_variables as export_st_variables with: + input: + template="data/template_ariadne_database.xlsx", + industry_demands=expand( + resources( + "industrial_energy_demand_base_s_{clusters}_{planning_horizons}.csv" + ), + **config["scenario"], + allow_missing=True, + ), + networks=expand( + RESULTS + + "networks/base_s_{clusters}_{opts}_{sector_opts}_{planning_horizons}.nc", + **config["scenario"], + allow_missing=True, + ), + costs=expand( + resources("costs_{planning_horizons}.csv"), + **config["scenario"], + allow_missing=True, + ), + industrial_production_per_country_tomorrow=expand( + resources( + "industrial_production_per_country_tomorrow_{planning_horizons}-modified.csv" + ), + **config["scenario"], + allow_missing=True, + ), + industry_sector_ratios=expand( + resources("industry_sector_ratios_{planning_horizons}.csv"), + **config["scenario"], + allow_missing=True, + ), + industrial_production=resources("industrial_production_per_country.csv"), + energy_totals=resources("energy_totals.csv"), + st_low_res_networks=expand( + RESULTS + + "st_low_res_networks/{sensitivity}/base_s_{clusters}_{opts}_{sector_opts}_{eeg_sweep_year}_eeg_level_{eeg_level}.nc", + **config["scenario"], + eeg_sweep_year=config_provider( + "iiasa_database", "regret_run", "eeg_sweep_year" + ), + allow_missing=True, + ), + output: + exported_variables=RESULTS + + "st_low_res_variables/{sensitivity}/st_low_res_variables_eeg_level_{eeg_level}.xlsx", + exported_variables_full=RESULTS + + "st_low_res_variables/{sensitivity}/st_low_res_variables_full_eeg_level_{eeg_level}.xlsx", + log: + RESULTS + + "logs/st_low_res_variables/{sensitivity}/st_low_res_variables_eeg_level_{eeg_level}.log", + + +rule st_all: + input: + expand( + RESULTS + + "st_low_res_variables/{sensitivity}/st_low_res_variables_eeg_level_{eeg_level}.xlsx", + sensitivity=get_st_sensitivities, + eeg_level=config_provider("iiasa_database", "regret_run", "EEG_levels"), + run=config_provider("run", "name"), + ), + + +rule solve_eeg_sweep_lt: + params: + solving=config_provider("solving"), + foresight=config_provider("foresight"), + co2_sequestration_potential=config_provider( + "sector", "co2_sequestration_potential", default=200 + ), + custom_extra_functionality=input_custom_extra_functionality, + energy_year=config_provider("energy", "energy_totals_year"), + input: + network=resources( + "networks/base_s_{clusters}_{opts}_{sector_opts}_{eeg_sweep_year}_final.nc" + ), + co2_totals_name=resources("co2_totals.csv"), + energy_totals=resources("energy_totals.csv"), + output: + network=RESULTS + + "networks/base_s_{clusters}_{opts}_{sector_opts}_{eeg_sweep_year}_EEG_{eeg_level}.nc", + config=RESULTS + + "configs/config.base_s_{clusters}_{opts}_{sector_opts}_{eeg_sweep_year}_EEG_{eeg_level}.yaml", + shadow: + shadow_config + log: + solver=RESULTS + + "logs/base_s_{clusters}_{opts}_{sector_opts}_{eeg_sweep_year}_EEG_{eeg_level}_solver.log", + memory=RESULTS + + "logs/base_s_{clusters}_{opts}_{sector_opts}_{eeg_sweep_year}_EEG_{eeg_level}_memory.log", + python=RESULTS + + "logs/base_s_{clusters}_{opts}_{sector_opts}_{eeg_sweep_year}_EEG_{eeg_level}_python.log", + threads: solver_threads + resources: + mem_mb=config_provider("solving", "mem_mb"), + runtime=config_provider("solving", "runtime", default="6h"), + benchmark: + ( + RESULTS + + "benchmarks/solve_sector_network/base_s_{clusters}_{opts}_{sector_opts}_{eeg_sweep_year}_EEG_{eeg_level}" + ) + conda: + "envs/environment.yaml" + script: + "scripts/solve_network.py" + + +use rule export_ariadne_variables as export_eeg_sweep_lt_variables with: + input: + template="data/template_ariadne_database.xlsx", + industry_demands=expand( + resources( + "industrial_energy_demand_base_s_{clusters}_{planning_horizons}.csv" + ), + **config["scenario"], + allow_missing=True, + ), + networks=expand( + RESULTS + + "networks/base_s_{clusters}_{opts}_{sector_opts}_{planning_horizons}.nc", + **config["scenario"], + allow_missing=True, + ), + costs=expand( + resources("costs_{planning_horizons}.csv"), + **config["scenario"], + allow_missing=True, + ), + industrial_production_per_country_tomorrow=expand( + resources( + "industrial_production_per_country_tomorrow_{planning_horizons}-modified.csv" + ), + **config["scenario"], + allow_missing=True, + ), + industry_sector_ratios=expand( + resources("industry_sector_ratios_{planning_horizons}.csv"), + **config["scenario"], + allow_missing=True, + ), + industrial_production=resources("industrial_production_per_country.csv"), + energy_totals=resources("energy_totals.csv"), + eeg_sweep_networks=expand( + RESULTS + + "networks/base_s_{clusters}_{opts}_{sector_opts}_{eeg_sweep_year}_EEG_{eeg_level}.nc", + **config["scenario"], + eeg_sweep_year=config_provider( + "iiasa_database", "regret_run", "eeg_sweep_year" + ), + allow_missing=True, + ), + output: + exported_variables=RESULTS + "ariadne/exported_variables_EEG_{eeg_level}.xlsx", + exported_variables_full=RESULTS + + "ariadne/exported_variables_full_EEG_{eeg_level}.xlsx", + log: + RESULTS + "logs/export_ariadne_variables_EEG_{eeg_level}.log", + + +rule eeg_sweep: + input: + expand( + RESULTS + "ariadne/exported_variables_full_EEG_{eeg_level}.xlsx", + eeg_level=config_provider("iiasa_database", "regret_run", "EEG_levels"), + run=config_provider("run", "name"), + ), + + +rule prepare_regret_network: + params: + solving=config_provider("solving"), + foresight=config_provider("foresight"), + co2_sequestration_potential=config_provider( + "sector", "co2_sequestration_potential", default=200 + ), + scope_to_fix=config_provider("iiasa_database", "regret_run", "scope_to_fix"), + h2_vent=config_provider("iiasa_database", "regret_run", "h2_vent"), + strict=config_provider("iiasa_database", "regret_run", "strict"), + unit_commitment=config_provider( + "iiasa_database", "regret_run", "unit_commitment" + ), + scale_cross_border_elec_capa=config_provider( + "iiasa_database", "regret_run", "scale_cross_border_elec_capa" + ), + input: + decision=RESULTS.replace("{run}", "{decision}") + + "networks/base_s_{clusters}_{opts}_{sector_opts}_{planning_horizons}.nc", + realization=RESULTS + + "networks/base_s_{clusters}_{opts}_{sector_opts}_{planning_horizons}.nc", + output: + regret_prenetwork=RESULTS + + "regret_prenetworks/decision_{decision}_s_{clusters}_{opts}_{sector_opts}_{planning_horizons}.nc", + resources: + mem_mb=16000, + log: + RESULTS + + "logs/regret_prenetwork_{decision}_s_{clusters}_{opts}_{sector_opts}_{planning_horizons}.log", + script: + "scripts/pypsa-de/prepare_regret_network.py" + + +rule solve_regret_network: + params: + st_sensitivity="{sensitivity}", + solving=config_provider("solving"), + regret_run=True, + energy_year=config_provider("energy", "energy_totals_year"), + custom_extra_functionality=input_custom_extra_functionality, + input: + regret_prenetwork=RESULTS + + "regret_prenetworks/decision_{decision}_s_{clusters}_{opts}_{sector_opts}_{planning_horizons}.nc", + co2_totals_name=resources("co2_totals.csv"), + energy_totals=resources("energy_totals.csv"), + output: + regret_network=RESULTS + + "regret_networks/{sensitivity}/decision_{decision}_s_{clusters}_{opts}_{sector_opts}_{planning_horizons}.nc", + shadow: + shadow_config + log: + solver=RESULTS + + "logs/regret_networks/{sensitivity}/decision_{decision}_s_{clusters}_{opts}_{sector_opts}_{planning_horizons}_solver.log", + memory=RESULTS + + "logs/regret_networks/{sensitivity}/decision_{decision}_s_{clusters}_{opts}_{sector_opts}_{planning_horizons}_memory.log", + python=RESULTS + + "logs/regret_networks/{sensitivity}/decision_{decision}_s_{clusters}_{opts}_{sector_opts}_{planning_horizons}_python.log", + threads: solver_threads + resources: + mem_mb=config_provider("solving", "mem_mb"), + runtime=config_provider("solving", "runtime", default="6h"), + script: + "scripts/pypsa-de/solve_regret_network.py" + + +rule export_regret_variables: + params: + planning_horizons=config_provider("scenario", "planning_horizons"), + max_hours=config_provider("electricity", "max_hours"), + costs=config_provider("costs"), + config_industry=config_provider("industry"), + energy_totals_year=config_provider("energy", "energy_totals_year"), + co2_price_add_on_fossils=config_provider("co2_price_add_on_fossils"), + co2_sequestration_cost=config_provider("sector", "co2_sequestration_cost"), + post_discretization=config_provider("solving", "options", "post_discretization"), + NEP_year=config_provider("costs", "NEP"), + NEP_transmission=config_provider("costs", "transmission"), + input: + template="data/template_ariadne_database.xlsx", + industry_demands=expand( + resources( + "industrial_energy_demand_base_s_{clusters}_{planning_horizons}.csv" + ), + **config["scenario"], + allow_missing=True, + ), + networks=expand( + RESULTS + + "regret_networks/{sensitivity}/decision_{decision}_s_{clusters}_{opts}_{sector_opts}_{planning_horizons}.nc", + **config["scenario"], + allow_missing=True, + ), + costs=expand( + resources("costs_{planning_horizons}.csv"), + **config["scenario"], + allow_missing=True, + ), + industrial_production_per_country_tomorrow=expand( + resources( + "industrial_production_per_country_tomorrow_{planning_horizons}-modified.csv" + ), + **config["scenario"], + allow_missing=True, + ), + industry_sector_ratios=expand( + resources("industry_sector_ratios_{planning_horizons}.csv"), + **config["scenario"], + allow_missing=True, + ), + industrial_production=resources("industrial_production_per_country.csv"), + energy_totals=resources("energy_totals.csv"), + output: + exported_variables=RESULTS + + "regret_variables/{sensitivity}/regret_variables_{decision}.xlsx", + exported_variables_full=RESULTS + + "regret_variables/{sensitivity}/regret_variables_{decision}_full.xlsx", + resources: + mem_mb=16000, + log: + RESULTS + + "regret_variables/{sensitivity}/logs/export_regret_variables_{decision}.log", + script: + "scripts/pypsa-de/export_ariadne_variables.py" + + +rule regret_no_flex: + input: + "results/" + + config["run"]["prefix"] + + "/scenario_comparison/no_flex_st_regret_networks/Price-Carbon.png", + + +rule regret_base: + input: + "results/" + + config["run"]["prefix"] + + "/scenario_comparison/regret_networks/Price-Carbon.png", + + +rule regret_all: + input: + lambda w: expand( + "results/" + + config["run"]["prefix"] + + "/scenario_comparison/{sensitivity}/Price-Carbon.png", + sensitivity=get_st_sensitivities, + ), + f"results/{config['run']['prefix']}/regret_plots/LT_comparison/elec_capa_comp_de_2025.png", + # expand("results/" + config["run"]["prefix"] + "/regret_plots/{sensitivity}/ST_comparison/elec_price_comp_de.png", + # sensitivity=get_st_sensitivities), + + +rule plot_scenario_comparison_regrets: + params: + output_dir=directory( + "results/" + config["run"]["prefix"] + "/scenario_comparison/{sensitivity}" + ), + input: + exported_variables=expand( + RESULTS + + "regret_variables/{sensitivity}/regret_variables_{decision}_full.xlsx", + run=lambda w: [ + r + for r in config_provider("run", "name")(w) + if r + in config_provider("iiasa_database", "regret_run", "demand_baselines")( + w + ) + ], + decision=config_provider("run", "name"), + allow_missing=True, + ), + output: + price_carbon="results/" + + config["run"]["prefix"] + + "/scenario_comparison/{sensitivity}/Price-Carbon.png", + script: + "scripts/pypsa-de/plot_scenario_comparison.py" + + +rule regret_plots_lt: + params: + scenarios=get_scenarios(run), + planning_horizons=config_provider("scenario", "planning_horizons"), + plotting=config_provider("plotting"), + output_dir=directory( + "results/" + config["run"]["prefix"] + "/regret_plots/LT_comparison" + ), + input: + networks=expand( + RESULTS + + "regret_networks/base/decision_{run}_s_{clusters}_{opts}_{sector_opts}_{planning_horizons}.nc", + **config["scenario"], + run=config["run"]["name"], + ), + regret_variables=expand( + RESULTS + "regret_variables/base/regret_variables_{run}_full.xlsx", + run=config["run"]["name"], + ), + output: + elec_capa_comp_de_2025=f"results/{config['run']['prefix']}/regret_plots/LT_comparison/elec_capa_comp_de_2025.png", + resources: + mem_mb=32000, + script: + "scripts/pypsa-de/regret_plots_lt.py" + + +rule regret_plots: + params: + scenarios=config["run"]["name"], + scenarios_config=get_scenarios(run), + planning_horizons=config_provider("scenario", "planning_horizons"), + plotting=config_provider("plotting"), + output_dir=directory( + f"results/{config['run']['prefix']}/regret_plots/{{sensitivity}}/ST_comparison" + ), + input: + regret_networks=lambda wildcards: expand( + f"results/{config['run']['prefix']}/{{run}}/regret_networks/{wildcards.sensitivity}/decision_{{decision}}_s_{{clusters}}_{{opts}}_{{sector_opts}}_{{planning_horizons}}.nc", + run=config["run"]["name"], + decision=config["run"]["name"], + clusters=config["scenario"]["clusters"], + opts=config["scenario"]["opts"], + sector_opts=config["scenario"]["sector_opts"], + planning_horizons=config["scenario"]["planning_horizons"], + allow_missing=True, + ), + output: + elec_price_comp_de=f"results/{config['run']['prefix']}/regret_plots/{{sensitivity}}/ST_comparison/elec_price_comp_de.png", + resources: + mem_mb=32000, + script: + "scripts/pypsa-de/regret_plots.py" diff --git a/ariadne-data/UBA_Projektionsbericht2025_Abbildung31_MWMS.csv b/ariadne-data/UBA_Projektionsbericht2025_Abbildung31_MWMS.csv new file mode 100644 index 000000000..417fd22c4 --- /dev/null +++ b/ariadne-data/UBA_Projektionsbericht2025_Abbildung31_MWMS.csv @@ -0,0 +1,6 @@ +carrier,2025,2030,2035 +fossil,324,258,191 +industry electricity,211,234,249 +solid biomass for industry,31,35,31 +H2 for industry,0,6,42 +low-temperature heat for industry,48,59,63 diff --git a/ariadne-data/costs_2025-modifications.csv b/ariadne-data/costs_2025-modifications.csv index 8fe241d25..2c1dd0fcf 100644 --- a/ariadne-data/costs_2025-modifications.csv +++ b/ariadne-data/costs_2025-modifications.csv @@ -1,5 +1,5 @@ technology,parameter,value,unit,source,further description -gas,fuel,40,EUR/MWh_th,Ariadne, +gas,fuel,32.8,EUR/MWh_th,Ariadne,"40 EUR in 2025, converted to EUR2020 with 0.82" oil,fuel,32.9876,EUR2020/MWh,Ariadne,"$2020 = 0.8775 EUR2020, 1bbl = 1.6998MWh" coal,fuel,10.6694,EUR2020/MWh,Ariadne,"$2020 = 0.8775 EUR2020, 1t = 8.06 MWh" decentral air-sourced heat pump,investment,1604,EUR2020/kW_th,https://ariadneprojekt.de/media/2024/01/Ariadne-Analyse_HeizkostenEmissionenGebaeude_Januar2024.pdf https://www.enpal.de/waermepumpe/kosten/ https://www.bdew.de/media/documents/BDEW-HKV_Altbau.pdf and cost reduction from DEA diff --git a/config/config.de.yaml b/config/config.de.yaml index c086057ac..4b31d1af8 100644 --- a/config/config.de.yaml +++ b/config/config.de.yaml @@ -4,20 +4,22 @@ # docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#run run: - prefix: 20250901_h2_deriv_fix + prefix: 20251113_lower_rescom_merge name: # - ExPol - - KN2045_Mix - # - KN2045_Elek - # - KN2045_H2 - # - KN2045_NFniedrig - # - KN2045_NFhoch + # - KN2045_Mix + - HighDemand + - HighDemandNoFlex + # - LowRES + # - LowRESNoFlex + - LowDemand + - LowDemandNoFlex scenarios: enable: true manual_file: config/scenarios.manual.yaml file: config/scenarios.automated.yaml shared_resources: - policy: base #stops recalculating + policy: false #stops recalculating exclude: - existing_heating.csv # specify files which should not be shared between scenarios - costs @@ -43,8 +45,50 @@ iiasa_database: - KN2045_NFhoch reference_scenario: KN2045_Mix region: Deutschland + regret_run: + demand_baselines: # names of scenarios that define the demand variations to be considered in the short-term runs + # - HighDemand + # - HighDemandNoFlex + # - LowRES + # - LowRESNoFlex + scope_to_fix: EU # Supported values are DE and EU + strict: false # If false, the model allows capacity expansion for virtual links and bottlenecks + h2_vent: true + st_sensitivities: # CAVEAT: These behave like wildcards + #- no_flex + #- gas_price_60 + #- gas_price_80 + eeg_sweep_year: + - 2030 + EEG_levels: + - 1.00 + - 0.95 + - 0.90 + - 0.85 + - 0.80 + - 0.75 + - 0.70 + unit_commitment: + enable: false + params: custom # options: conservative, average, optimistic, custom + carriers: ["OCGT", "CCGT", "coal", "lignite", "nuclear", "oil", "urban central solid biomass CHP"] # subset of ["OCGT", "CCGT", "coal", "lignite", "nuclear", "oil","urban central solid biomass CHP"] + regions: ['DE'] # subset of ['AT', 'BE', 'CH', 'CZ', 'DE', 'DK', 'FR', 'GB', 'LU', 'NL', 'NO', 'PL', 'SE', 'ES', 'IT'] + scale_cross_border_elec_capa: false # If true, scales cross-border electricity capacities to target values given in prepare_regret_network.py ageb_for_mobility: true # In 2020 use AGEB data for final energy demand and KBA for vehicles - uba_for_mobility: false # For 2025–2035 use MWMS scenario from UBA Projektionsbericht 2025 + uba_for_mobility: # For 2025–2035 use MWMS scenario from UBA Projektionsbericht 2025 + - 2025 + uba_for_industry: # For 2025–2035 use MWMS scenario from UBA Projektionsbericht 2025 + - 2025 + uba_for_rescom_electricity: + - 1900 # dummy entry to disable UBA scaling for rescom electricity demand + no_flex_lt_run: false # If true, removes all flexibility options + scale_industry_non_energy: false # Scale non-energy industry demand directly proportional to energy demand + restrict_cross_border_flows: # restricts cross border flows between all countries (AC) + 2025: 0.4 + 2030: 0.45 + 2035: 0.5 + force_co2_price: + 2030: 200.0 # docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#foresight foresight: myopic @@ -52,26 +96,24 @@ foresight: myopic # docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#scenario # Wildcard docs in https://pypsa-eur.readthedocs.io/en/latest/wildcards.html scenario: - ll: - - vopt clusters: - - 27 #current options: 27, 49 + - 49 #current options: 27, 49 opts: - '' sector_opts: - none planning_horizons: - - 2020 + #- 2020 - 2025 - 2030 - - 2035 - - 2040 - - 2045 - - 2050 + # - 2035 + # - 2040 + # - 2045 + # - 2050 existing_capacities: - grouping_years_power: [1920, 1950, 1955, 1960, 1965, 1970, 1975, 1980, 1985, 1990, 1995, 2000, 2005, 2010, 2015, 2020] - grouping_years_heat: [1980, 1985, 1990, 1995, 2000, 2005, 2010, 2015, 2019] # heat grouping years >= baseyear will be ignored + grouping_years_power: [1920, 1950, 1955, 1960, 1965, 1970, 1975, 1980, 1985, 1990, 1995, 2000, 2005, 2010, 2015, 2020, 2025] + grouping_years_heat: [1980, 1985, 1990, 1995, 2000, 2005, 2010, 2015, 2020, 2024] # heat grouping years >= baseyear will be ignored fill_value_gas_chp_lifetime: 40 # if no explicit lifetime is given use 40 years. The number was chosen s.t. the existing capacities in 2020 match with statistics. @@ -99,6 +141,7 @@ atlite: renewable: onwind: + resource_classes: 5 capacity_per_sqkm: 1.4 cutout: europe-2019-sarah3-era5 correction_factor: 0.95 @@ -122,6 +165,7 @@ renewable: hub_height: 100. P: 3.06 offwind-ac: + resource_classes: 5 capacity_per_sqkm: 6 landfall_length: 30 cutout: europe-2019-sarah3-era5 @@ -146,6 +190,7 @@ renewable: hub_height: 140. P: 5.5 offwind-dc: + resource_classes: 5 capacity_per_sqkm: 6 landfall_length: 30 cutout: europe-2019-sarah3-era5 @@ -175,9 +220,11 @@ renewable: cutout: europe-2019-sarah3-era5 correction_factor: 0.95 solar: + resource_classes: 5 cutout: europe-2019-sarah3-era5 correction_factor: 0.9 # scaling to Abbildung 36 of https://www.ise.fraunhofer.de/de/veroeffentlichungen/studien/aktuelle-fakten-zur-photovoltaik-in-deutschland.html solar-hsat: + resource_classes: 5 cutout: europe-2019-sarah3-era5 correction_factor: 0.9 # scaling to Abbildung 36 of https://www.ise.fraunhofer.de/de/veroeffentlichungen/studien/aktuelle-fakten-zur-photovoltaik-in-deutschland.html hydro: @@ -197,40 +244,40 @@ clustering: # print(fw.div(fw.sum()).subtract(5e-5).round(4).to_dict().__repr__().replace(",","\n")) focus_weights: # 27 nodes: 8 for Germany, 3 for Italy, 2 each for Denmark, UK and Spain, 1 per each of other 10 "Stromnachbarn" - 'DE': 0.2966 - 'AT': 0.0370 - 'BE': 0.0370 - 'CH': 0.0370 - 'CZ': 0.0370 - 'DK': 0.0741 - 'FR': 0.0370 - 'GB': 0.0741 - 'LU': 0.0370 - 'NL': 0.0370 - 'NO': 0.0370 - 'PL': 0.0370 - 'SE': 0.0370 - 'ES': 0.0741 - 'IT': 0.1111 + # 'DE': 0.2966 + # 'AT': 0.0370 + # 'BE': 0.0370 + # 'CH': 0.0370 + # 'CZ': 0.0370 + # 'DK': 0.0741 + # 'FR': 0.0370 + # 'GB': 0.0741 + # 'LU': 0.0370 + # 'NL': 0.0370 + # 'NO': 0.0370 + # 'PL': 0.0370 + # 'SE': 0.0370 + # 'ES': 0.0741 + # 'IT': 0.1111 # high spatial resolution: change clusters to 49 # 49 nodes: 30 for Germany, 3 for Italy, 2 each for Denmark, UK and Spain, 1 per each of other 10 "Stromnachbarn" - # 'DE': 0.6124 - # 'AT': 0.0204 - # 'BE': 0.0204 - # 'CH': 0.0204 - # 'CZ': 0.0204 - # 'DK': 0.0408 - # 'FR': 0.0204 - # 'GB': 0.0408 - # 'LU': 0.0204 - # 'NL': 0.0204 - # 'NO': 0.0204 - # 'PL': 0.0204 - # 'SE': 0.0204 - # 'ES': 0.0408 - # 'IT': 0.0612 + 'DE': 0.6124 + 'AT': 0.0204 + 'BE': 0.0204 + 'CH': 0.0204 + 'CZ': 0.0204 + 'DK': 0.0408 + 'FR': 0.0204 + 'GB': 0.0408 + 'LU': 0.0204 + 'NL': 0.0204 + 'NO': 0.0204 + 'PL': 0.0204 + 'SE': 0.0204 + 'ES': 0.0408 + 'IT': 0.0612 temporal: - resolution_sector: 365H + resolution_sector: 3H # docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#co2-budget co2_budget: @@ -269,6 +316,10 @@ first_technology_occurrence: H2 pipeline: 2025 H2 Electrolysis: 2025 H2 pipeline retrofitted: 2025 + urban central water pits charger: 2035 + urban central water pits discharger: 2035 + Store: + urban central water pits: 2035 costs: horizon: "mean" # "optimist", "pessimist" or "mean" @@ -279,7 +330,7 @@ costs: # docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#sector sector: v2g: false - solar_thermal: false + solar_thermal: true district_heating: potential: 0.3 progress: @@ -312,6 +363,7 @@ sector: urban decentral: true rural: true co2_spatial: true + co2_network: false biomass_spatial: true ammonia: false methanol: @@ -324,7 +376,10 @@ sector: regional_coal_demand: true #set to true if regional CO2 constraints needed gas_network: false regional_gas_demand: true + regional_co2_sequestration_potential: + enable: false H2_retrofit: true + hydrogen_turbine: false biogas_upgrading_cc: true biomass_to_liquid: true biomass_to_liquid_cc: true @@ -431,8 +486,8 @@ industry: # docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#solving solving: - runtime: 12h - mem_mb: 70000 #30000 is OK for 22 nodes, 365H; 140000 for 22 nodes 3H; 400000 for 44 nodes 3H + runtime: 24h + mem_mb: 190000 #30000 is OK for 22 nodes, 365H; 140000 for 22 nodes 3H; 400000 for 44 nodes 3H options: assign_all_duals: true load_shedding: false @@ -440,7 +495,7 @@ solving: min_iterations: 1 # settings for post-discretization: 1 max_iterations: 1 # settings for post-discretization: 1 post_discretization: - enable: false + enable: true line_unit_size: 1698 line_threshold: 0.3 link_unit_size: @@ -474,7 +529,7 @@ solving: DE: 2020: 54.5 2025: 69 - 2030: 115 # EEG2023 Ziel für 2030 + 2030: 120 # EEG2023 Ziel für 2030 + 5 GW 2035: 160 # EEG2023 Ziel für 2040 2040: 250 2045: 250 @@ -482,7 +537,7 @@ solving: offwind: DE: 2020: 7.8 - 2025: 11.3 + 2025: 12 2030: 29.3 # uba Projektionsbericht and NEP without delayed BalWin 3 2035: 50 # Planned projects until 2035 (offshore_connection_points.csv) -1.3 GW for potential delays 2040: 65 # Planned projects until 2040 -1.5 GW for potential retirments @@ -491,7 +546,7 @@ solving: solar: DE: 2020: 53.7 - 2025: 110 # EEG2023; assumes for 2026: 128 GW, assuming a fair share reached by end of 2025 + 2025: 115 # EEG2023; assumes for 2026: 128 GW, assuming a fair share reached by end of 2025 2030: 235 # PV Ziel 2030 + 20 GW 2035: 400 2040: 800 @@ -507,6 +562,9 @@ solving: 2040: 50000 2045: 80000 2050: 80000 + urban central water tanks: + DE: + 2025: 120 # GWh, https://www.hamburg-institut.com/wp-content/uploads/2023/12/Referenzblatt_SysGF-1.pdf Link: methanolisation: DE: @@ -606,8 +664,8 @@ solving: # boundary condition lower? DE: 2020: 0 - 2025: 0 - 2030: 10 + 2025: 6 + 2030: 20 2035: 105 2040: 200 2045: 300 @@ -730,7 +788,7 @@ onshore_nep_force: offshore_nep_force: cutin_year: 2025 cutout_year: 2030 - delay_years: 0 + delay_years: 2 scale_capacity: 2020: diff --git a/config/scenarios.manual.yaml b/config/scenarios.manual.yaml index eaa142a28..996a6eba1 100644 --- a/config/scenarios.manual.yaml +++ b/config/scenarios.manual.yaml @@ -2,9 +2,414 @@ # SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT +HighDemand: + weather_year: 2012 + electricity: + transmission_limit: v1 + solving: + options: + noisy_costs: false + constraints: + decentral_heat_budgets: # UBA MWMS, Projektionsbericht 2025, Abbildung 48 + heat_pump: + DE: + 2025: 12.0 + resistive_heater: + DE: + 2025: 30.0 + biomass_boiler: + DE: + 2025: 103.0 + limits_capacity_min: + Generator: + onwind: + DE: + 2030: 115 # Wind-an-Land Law + 2035: 157 # Wind-an-Land Law + offwind: + DE: + 2035: 40 # 40 Wind-auf-See Law + solar: + DE: # EEG2023; Ziel for 2024: 88 GW and for 2026: 128 GW, + 2025: 101 # assuming at least 1/3 of buildout reached in 2025 + 2030: 215 # PV strategy + 2035: 309 + limits_capacity_max: + Generator: + onwind: + DE: + 2030: 115 # EEG2023 Ziel für 2030 + 5 GW + solar: + DE: + 2030: 215 # PV Ziel 2030 + 2035: 309 + first_technology_occurrence: + Link: + urban central water pits charger: 2040 # essentially moving pits out of the optimization horizon to forbid them + urban central water pits discharger: 2040 + Store: + urban central water pits: 2040 + +HighDemandNoFlex: + weather_year: 2012 + electricity: + transmission_limit: v1 + iiasa_database: + no_flex_lt_run: true + solving: + options: + noisy_costs: false + constraints: + decentral_heat_budgets: # UBA MWMS, Projektionsbericht 2025, Abbildung 48 + heat_pump: + DE: + 2025: 12.0 + resistive_heater: + DE: + 2025: 30.0 + biomass_boiler: + DE: + 2025: 103.0 + limits_capacity_min: + Generator: + onwind: + DE: + 2030: 115 # Wind-an-Land Law + 2035: 157 # Wind-an-Land Law + offwind: + DE: + 2035: 40 # 40 Wind-auf-See Law + solar: + DE: # EEG2023; Ziel for 2024: 88 GW and for 2026: 128 GW, + 2025: 101 # assuming at least 1/3 of buildout reached in 2025 + 2030: 215 # PV strategy + 2035: 309 + limits_capacity_max: + Generator: + onwind: + DE: + 2030: 115 # EEG2023 Ziel für 2030 + 5 GW + solar: + DE: + 2030: 215 # PV Ziel 2030 + 2035: 309 + first_technology_occurrence: + Link: + urban central water pits charger: 2040 # essentially moving pits out of the optimization horizon to forbid them + urban central water pits discharger: 2040 + Store: + urban central water pits: 2040 + +LowDemand: # Demand and capacities from UBA + weather_year: 2012 + electricity: + transmission_limit: v1 + iiasa_database: + uba_for_industry: + - 2025 + - 2030 + - 2035 + uba_for_mobility: + - 2025 + - 2030 + - 2035 + uba_for_rescom_electricity: + - 2030 + offshore_nep_force: + delay_years: 2 + solving: + options: + noisy_costs: false + constraints: + decentral_heat_budgets: # UBA MWMS, Projektionsbericht 2025, Abbildung 48 + heat_pump: + DE: + 2025: 12.0 + 2030: 26.0 + 2035: 38.0 + resistive_heater: + DE: + 2025: 30.0 + 2030: 23.0 + 2035: 19.0 + biomass_boiler: + DE: + 2025: 103.0 + 2030: 90.0 + 2035: 76.0 + # central_heat_pump_budgets: + # DE: # UBA MWMS, Projektionsbericht 2025, Abbildung 22 + # 2025: 7.0 + # 2030: 26.0 + # 2035: 52.0 + # Capacities from UBA are here: https://datacube.uba.de/vis?lc=de&df[ds]=dc-release&df[id]=DF_CROSS_PROJECTION_REPORT_CORE_INDICATORS_25&df[ag]=UBA&df[vs]=1.0&av=true&dq=..INSTALLIERTE_LEISTUNG_GROSSBATTERIESPEICHER%2BINSTALLIERTE_LEISTUNG_WIND_AN_LAND%2BINSTALLIERTE_LEISTUNG_WIND_AUF_SEE%2BINSTALLIERTE_LEISTUNG_PV...&pd=2025%2C&to[TIME_PERIOD]=false&pg=0&vw=tb + limits_capacity_min: + Generator: + onwind: + DE: + 2030: 103.99 + 2035: 145.41 + offwind: + DE: + 2030: 26.77 + 2035: 39.97 + solar: + DE: + 2030: 214.55 + 2035: 308.60 + Link: + H2 electrolysis: + DE: + 2030: 0 + limits_capacity_max: + Generator: + onwind: + DE: + 2030: 103.99 + 2035: 145.41 + offwind: + DE: + 2030: 26.77 + 2035: 39.97 + solar: + DE: + 2030: 214.55 + 2035: 308.60 + first_technology_occurrence: + Link: + urban central water pits charger: 2040 # essentially moving pits out of the optimization horizon to forbid them + urban central water pits discharger: 2040 + Store: + urban central water pits: 2040 + +LowDemandNoFlex: + weather_year: 2012 + electricity: + transmission_limit: v1 + iiasa_database: + no_flex_lt_run: true + uba_for_industry: + - 2025 + - 2030 + - 2035 + uba_for_mobility: + - 2025 + - 2030 + - 2035 + uba_for_rescom_electricity: + - 2030 + offshore_nep_force: + delay_years: 2 + solving: + options: + noisy_costs: false + constraints: + decentral_heat_budgets: # UBA MWMS, Projektionsbericht 2025, Abbildung 48 + heat_pump: + DE: + 2025: 12.0 + 2030: 26.0 + 2035: 38.0 + resistive_heater: + DE: + 2025: 30.0 + 2030: 23.0 + 2035: 19.0 + biomass_boiler: + DE: + 2025: 103.0 + 2030: 90.0 + 2035: 76.0 + limits_capacity_min: + Generator: + onwind: + DE: + 2030: 103.99 + 2035: 145.41 + offwind: + DE: + 2030: 26.77 + 2035: 39.97 + solar: + DE: + 2030: 214.55 + 2035: 308.60 + Link: + H2 electrolysis: + DE: + 2030: 0 + limits_capacity_max: + Generator: + onwind: + DE: + 2030: 103.99 + 2035: 145.41 + offwind: + DE: + 2030: 26.77 + 2035: 39.97 + solar: + DE: + 2030: 214.55 + 2035: 308.60 + first_technology_occurrence: + Link: + urban central water pits charger: 2040 # essentially moving pits out of the optimization horizon to forbid them + urban central water pits discharger: 2040 + Store: + urban central water pits: 2040 + +LowRES: # Demand from UBA, lower RES capacities + electricity: + transmission_limit: v1 + iiasa_database: + uba_for_industry: + - 2025 + - 2030 + - 2035 + uba_for_mobility: + - 2025 + - 2030 + - 2035 + uba_for_rescom_electricity: + - 2030 + offshore_nep_force: + delay_years: 2 + solving: + options: + noisy_costs: false + constraints: + decentral_heat_budgets: # UBA MWMS, Projektionsbericht 2025, Abbildung 48 + heat_pump: + DE: + 2025: 12.0 + 2030: 26.0 + 2035: 38.0 + resistive_heater: + DE: + 2025: 30.0 + 2030: 23.0 + 2035: 19.0 + biomass_boiler: + DE: + 2025: 103.0 + 2030: 90.0 + 2035: 76.0 + # central_heat_pump_budgets: + # DE: # UBA MWMS, Projektionsbericht 2025, Abbildung 22 + # 2025: 7.0 + # 2030: 26.0 + # 2035: 52.0 + limits_capacity_min: + Generator: + onwind: + DE: + 2030: 0 + 2035: 0 + offwind: + DE: + 2030: 0 + 2035: 0 + Link: + H2 electrolysis: + DE: + 2030: 0 + limits_capacity_max: + Generator: + onwind: + DE: + 2030: 100 + offwind: + DE: + 2030: 21 + solar: + DE: + 2030: 180 + first_technology_occurrence: + Link: + urban central water pits charger: 2040 # essentially moving pits out of the optimization horizon to forbid them + urban central water pits discharger: 2040 + Store: + urban central water pits: 2040 + + +LowRESNoFlex: + electricity: + transmission_limit: v1 + iiasa_database: + no_flex_lt_run: true + uba_for_industry: + - 2025 + - 2030 + - 2035 + uba_for_mobility: + - 2025 + - 2030 + - 2035 + uba_for_rescom_electricity: + - 2030 + offshore_nep_force: + delay_years: 2 + solving: + options: + noisy_costs: false + constraints: + decentral_heat_budgets: # UBA MWMS, Projektionsbericht 2025, Abbildung 48 + heat_pump: + DE: + 2025: 12.0 + 2030: 26.0 + 2035: 38.0 + resistive_heater: + DE: + 2025: 30.0 + 2030: 23.0 + 2035: 19.0 + biomass_boiler: + DE: + 2025: 103.0 + 2030: 90.0 + 2035: 76.0 + # central_heat_pump_budgets: + # DE: # UBA MWMS, Projektionsbericht 2025, Abbildung 22 + # 2025: 7.0 + # 2030: 26.0 + # 2035: 52.0 + limits_capacity_min: + Generator: + onwind: + DE: + 2030: 0 + 2035: 0 + offwind: + DE: + 2030: 0 + 2035: 0 + Link: + H2 electrolysis: + DE: + 2030: 0 + limits_capacity_max: + Generator: + onwind: + DE: + 2030: 100 + offwind: + DE: + 2030: 21 + solar: + DE: + 2030: 180 + first_technology_occurrence: + Link: + urban central water pits charger: 2040 # essentially moving pits out of the optimization horizon to forbid them + urban central water pits discharger: 2040 + Store: + urban central water pits: 2040 + ExPol: # UBA CO2 pathway instead of KSG targets -# Europen CO2 pathway fixed at 2030 for 2035-2050 +# European CO2 pathway fixed at 2030 for 2035-2050 # Kernnetz is not assumed to be built # Not forcing renewables after 2030 diff --git a/scripts/prepare_sector_network.py b/scripts/prepare_sector_network.py index 478c30191..70a58db7d 100755 --- a/scripts/prepare_sector_network.py +++ b/scripts/prepare_sector_network.py @@ -471,19 +471,22 @@ def update_wind_solar_costs( # NB: solar costs are also manipulated for rooftop # when distribution grid is inserted - n.generators.loc[n.generators.carrier == "solar", "capital_cost"] = costs.at[ - "solar-utility", "capital_cost" - ] - n.generators.loc[n.generators.carrier == "solar", "overnight_cost"] = costs.at[ - "solar-utility", "investment" - ] + carrier_cost_dict = { + "solar": "solar-utility", + "solar-hsat": "solar-hsat", + "onwind": "onwind", + } + + for carrier, cost_key in carrier_cost_dict.items(): + if carrier not in n.generators.carrier.values: + continue + n.generators.loc[n.generators.carrier == carrier, "capital_cost"] = costs.at[ + cost_key, "capital_cost" + ] + n.generators.loc[n.generators.carrier == carrier, "overnight_cost"] = costs.at[ + cost_key, "investment" + ] - n.generators.loc[n.generators.carrier == "onwind", "capital_cost"] = costs.at[ - "onwind", "capital_cost" - ] - n.generators.loc[n.generators.carrier == "onwind", "overnight_cost"] = costs.at[ - "onwind", "investment" - ] # for offshore wind, need to calculated connection costs for key, fn in profiles.items(): tech = key[len("profile_") :] diff --git a/scripts/pypsa-de/additional_functionality.py b/scripts/pypsa-de/additional_functionality.py index 101857d96..9f49ab9b4 100644 --- a/scripts/pypsa-de/additional_functionality.py +++ b/scripts/pypsa-de/additional_functionality.py @@ -9,7 +9,9 @@ logger = logging.getLogger(__name__) -def add_capacity_limits(n, investment_year, limits_capacity, sense="maximum"): +def add_capacity_limits( + n, investment_year, limits_capacity, snakemake, sense="maximum" +): for c in n.iterate_components(limits_capacity): logger.info(f"Adding {sense} constraints for {c.list_name}") @@ -45,7 +47,11 @@ def add_capacity_limits(n, investment_year, limits_capacity, sense="maximum"): logger.info( f"Existing {c.name} {carrier} capacity in {ct}: {existing_capacity} {units}" ) - + if extendable_index.empty: + logger.warning( + f"No extendable {c.name} {carrier} capacities found in {ct}. Skipping." + ) + continue nom = n.model[c.name + "-" + attr + "_nom"].loc[extendable_index] lhs = nom.sum() @@ -102,60 +108,104 @@ def add_power_limits(n, investment_year, limits_power_max): """ " Restricts the maximum inflow/outflow of electricity from/to a country. """ + + def add_pos_neg_aux_variables(n, idx, infix): + """ + For every snapshot in the network `n` this functions adds auxiliary variables corresponding to the positive and negative parts of the dynamical variables of the network components specified in the index `idx`. The `infix` parameter is used to create unique names for the auxiliary variables and constraints. + + Parameters + ---------- + n : pypsa.Network + The PyPSA network object containing the model. + idx : pandas.Index + The index of the network component (e.g., lines or links) for which to create auxiliary variables. + infix : str + A string used to create unique names for the auxiliary variables and constraints. + """ + + var_key = f"{idx.name}-{'s' if idx.name == 'Line' else 'p'}" + var = n.model[var_key].sel({idx.name: idx}) + aux_pos = n.model.add_variables( + name=f"{var_key}-{infix}-aux-pos", + lower=0, + coords=[n.snapshots, idx], + ) + aux_neg = n.model.add_variables( + name=f"{var_key}-{infix}-aux-neg", + upper=0, + coords=[n.snapshots, idx], + ) + n.model.add_constraints( + aux_pos >= var, + name=f"{var_key}-{infix}-aux-pos-constr", + ) + n.model.add_constraints( + aux_neg <= var, + name=f"{var_key}-{infix}-aux-neg-constr", + ) + return aux_pos, aux_neg + for ct in limits_power_max: if investment_year not in limits_power_max[ct].keys(): continue - limit = 1e3 * limits_power_max[ct][investment_year] / 10 + lim = 1e3 * limits_power_max[ct][investment_year] # in MW logger.info( - f"Adding constraint on electricity import/export from/to {ct} to be < {limit} MW" + f"Adding constraint on electricity import/export from/to {ct} to be < {lim} MW" ) - incoming_line = n.lines.index[ - (n.lines.carrier == "AC") - & (n.lines.bus0.str[:2] != ct) - & (n.lines.bus1.str[:2] == ct) - ] - outgoing_line = n.lines.index[ - (n.lines.carrier == "AC") - & (n.lines.bus0.str[:2] == ct) - & (n.lines.bus1.str[:2] != ct) - ] + # identify interconnectors - incoming_link = n.links.index[ - (n.links.carrier == "DC") - & (n.links.bus0.str[:2] != ct) - & (n.links.bus1.str[:2] == ct) - ] - outgoing_link = n.links.index[ - (n.links.carrier == "DC") - & (n.links.bus0.str[:2] == ct) - & (n.links.bus1.str[:2] != ct) - ] + incoming_lines = n.lines.query( + f"not bus0.str.startswith('{ct}') and bus1.str.startswith('{ct}') and active" + ) + outgoing_lines = n.lines.query( + f"bus0.str.startswith('{ct}') and not bus1.str.startswith('{ct}') and active" + ) + incoming_links = n.links.query( + f"not bus0.str.startswith('{ct}') and bus1.str.startswith('{ct}') and carrier == 'DC' and active" + ) + outgoing_links = n.links.query( + f"bus0.str.startswith('{ct}') and not bus1.str.startswith('{ct}') and carrier == 'DC' and active" + ) - # iterate over snapshots - otherwise exporting of postnetwork fails since - # the constraints are time dependent - for t in n.snapshots: - incoming_line_p = n.model["Line-s"].loc[t, incoming_line] - outgoing_line_p = n.model["Line-s"].loc[t, outgoing_line] - incoming_link_p = n.model["Link-p"].loc[t, incoming_link] - outgoing_link_p = n.model["Link-p"].loc[t, outgoing_link] + # define auxiliary variables for positive and negative parts of line and link flows - lhs = ( - incoming_link_p.sum() - - outgoing_link_p.sum() - + incoming_line_p.sum() - - outgoing_line_p.sum() - ) / 10 - # divide by 10 to avoid numerical issues + incoming_lines_aux_pos, incoming_lines_aux_neg = add_pos_neg_aux_variables( + n, incoming_lines.index, f"incoming-{ct}" + ) - cname_upper = f"Power-import-limit-{ct}-{t}" - cname_lower = f"Power-export-limit-{ct}-{t}" + outgoing_lines_aux_pos, outgoing_lines_aux_neg = add_pos_neg_aux_variables( + n, outgoing_lines.index, f"outgoing-{ct}" + ) - n.model.add_constraints(lhs <= limit, name=cname_upper) - n.model.add_constraints(lhs >= -limit, name=cname_lower) + incoming_links_aux_pos, incoming_links_aux_neg = add_pos_neg_aux_variables( + n, incoming_links.index, f"incoming-{ct}" + ) + + outgoing_links_aux_pos, outgoing_links_aux_neg = add_pos_neg_aux_variables( + n, outgoing_links.index, f"outgoing-{ct}" + ) - # not adding to network as the shadow prices are not needed + # To constraint the absolute values of imports and exports, we have to sum the + # corresponding positive and negative flows separately, using the auxiliary variables + + import_lhs = ( + incoming_links_aux_pos.sum(dim="Link") + + incoming_lines_aux_pos.sum(dim="Line") + - outgoing_links_aux_neg.sum(dim="Link") + - outgoing_lines_aux_neg.sum(dim="Line") + ) / 10 + + export_lhs = ( + outgoing_links_aux_pos.sum(dim="Link") + + outgoing_lines_aux_pos.sum(dim="Line") + - incoming_links_aux_neg.sum(dim="Link") + - incoming_lines_aux_neg.sum(dim="Line") + ) / 10 + + n.model.add_constraints(import_lhs <= lim / 10, name=f"Power-import-limit-{ct}") + n.model.add_constraints(export_lhs <= lim / 10, name=f"Power-export-limit-{ct}") def h2_import_limits(n, investment_year, limits_volume_max): @@ -373,6 +423,7 @@ def add_national_co2_budgets(n, snakemake, national_co2_budgets, investment_year nyears = nhours / 8760 sectors = determine_emission_sectors(n.config["sector"]) + energy_totals = pd.read_csv(snakemake.input.energy_totals, index_col=[0, 1]) # convert MtCO2 to tCO2 co2_totals = 1e6 * pd.read_csv(snakemake.input.co2_totals_name, index_col=0) @@ -397,8 +448,8 @@ def add_national_co2_budgets(n, snakemake, national_co2_budgets, investment_year links = n.links.index[ (n.links.index.str[:2] == ct) & (n.links[f"bus{port}"] == "co2 atmosphere") - & ( - n.links.carrier != "kerosene for aviation" + & ~n.links.carrier.str.contains( + "shipping|aviation" ) # first exclude aviation to multiply it with a domestic factor later ] @@ -422,27 +473,68 @@ def add_national_co2_budgets(n, snakemake, national_co2_budgets, investment_year ) # Aviation demand - energy_totals = pd.read_csv(snakemake.input.energy_totals, index_col=[0, 1]) domestic_aviation = energy_totals.loc[ (ct, snakemake.params.energy_year), "total domestic aviation" ] international_aviation = energy_totals.loc[ (ct, snakemake.params.energy_year), "total international aviation" ] - domestic_factor = domestic_aviation / ( + domestic_aviation_factor = domestic_aviation / ( domestic_aviation + international_aviation ) aviation_links = n.links[ (n.links.index.str[:2] == ct) & (n.links.carrier == "kerosene for aviation") ] - lhs.append - ( - n.model["Link-p"].loc[:, aviation_links.index] - * aviation_links.efficiency2 - * n.snapshot_weightings.generators - ).sum() * domestic_factor + lhs.append( + ( + n.model["Link-p"].loc[:, aviation_links.index] + * aviation_links.efficiency2 + * n.snapshot_weightings.generators + ).sum() + * domestic_aviation_factor + ) + logger.info( + f"Adding domestic aviation emissions for {ct} with a factor of {domestic_aviation_factor}" + ) + + # Shipping oil + domestic_navigation = energy_totals.loc[ + (ct, snakemake.params.energy_year), "total domestic navigation" + ] + international_navigation = energy_totals.loc[ + (ct, snakemake.params.energy_year), "total international navigation" + ] + domestic_navigation_factor = domestic_navigation / ( + domestic_navigation + international_navigation + ) + shipping_links = n.links[ + (n.links.index.str[:2] == ct) & (n.links.carrier == "shipping oil") + ] + lhs.append( + ( + n.model["Link-p"].loc[:, shipping_links.index] + * shipping_links.efficiency2 + * n.snapshot_weightings.generators + ).sum() + * domestic_navigation_factor + ) + + # Shipping methanol + shipping_meoh_links = n.links[ + (n.links.index.str[:2] == ct) & (n.links.carrier == "shipping methanol") + ] + if not shipping_meoh_links.empty: # no shipping methanol in 2025 + lhs.append( + ( + n.model["Link-p"].loc[:, shipping_meoh_links.index] + * shipping_meoh_links.efficiency2 + * n.snapshot_weightings.generators + ).sum() + * domestic_navigation_factor + ) + logger.info( - f"Adding domestic aviation emissions for {ct} with a factor of {domestic_factor}" + f"Adding domestic shipping emissions for {ct} with a factor of {domestic_navigation_factor}" ) # Adding Efuel imports and exports to constraint @@ -535,53 +627,82 @@ def add_national_co2_budgets(n, snakemake, national_co2_budgets, investment_year ) -def force_boiler_profiles_existing_per_load(n): - """ - This scales the boiler dispatch to the load profile with a factor common to - all boilers at load. - """ +def add_decentral_heat_budgets(n, decentral_heat_budgets, investment_year): + carrier_dict = { + "heat_pump": [ + "rural air heat pump", + "rural ground heat pump", + "urban decentral air heat pump", + ], + "resistive_heater": [ + "rural resistive heater", + "urban decentral resistive heater", + ], + "biomass_boiler": [ + "rural biomass boiler", + "urban decentral biomass boiler", + ], + } + + for asset_type, budget_dict in decentral_heat_budgets.items(): + assets = n.links.index[n.links.carrier.isin(carrier_dict[asset_type])] + + if assets.empty: + logger.warning( + f"No {asset_type}s found in the network. Skipping decentral {asset_type} budgets." + ) + return - logger.info("Forcing boiler profiles for existing ones") + if investment_year not in budget_dict["DE"].keys(): + logger.warning( + f"No decentral {asset_type} budget for {investment_year} found in the config file. Skipping." + ) + return - decentral_boilers = n.links.index[ - n.links.carrier.str.contains("boiler") - & ~n.links.carrier.str.contains("urban central") - & ~n.links.p_nom_extendable - ] + logger.info(f"Adding decentral {asset_type} budgets") - if decentral_boilers.empty: - return + for ct in budget_dict: + if ct != "DE": + logger.error( + f"{asset_type.capitalize()} budget for countries other than `DE` is not yet supported. Found country {ct}. Please check the config file." + ) - boiler_loads = n.links.loc[decentral_boilers, "bus1"] - boiler_loads = boiler_loads[boiler_loads.isin(n.loads_t.p_set.columns)] - decentral_boilers = boiler_loads.index - boiler_profiles_pu = n.loads_t.p_set[boiler_loads].div( - n.loads_t.p_set[boiler_loads].max(), axis=1 - ) - boiler_profiles_pu.columns = decentral_boilers - boiler_profiles = DataArray( - boiler_profiles_pu.multiply(n.links.loc[decentral_boilers, "p_nom"], axis=1) - ) + limit = budget_dict[ct][investment_year] * 1e6 - boiler_load_index = pd.Index(boiler_loads.unique()) - boiler_load_index.name = "Load" + logger.info( + f"Limiting decentral {asset_type} electricity consumption in country {ct} to {budget_dict[ct][investment_year]:.1%} MWh.", + ) + assets = assets[assets.str.startswith(ct)] - # per load scaling factor - n.model.add_variables(coords=[boiler_load_index], name="Load-profile_factor") + lhs = [] - # clumsy indicator matrix to map boilers to loads - df = pd.DataFrame(index=boiler_load_index, columns=decentral_boilers, data=0.0) - for k, v in boiler_loads.items(): - df.loc[v, k] = 1.0 + lhs.append( + ( + n.model["Link-p"].loc[:, assets] * n.snapshot_weightings.generators + ).sum() + ) - lhs = n.model["Link-p"].loc[:, decentral_boilers] - ( - boiler_profiles * DataArray(df) * n.model["Load-profile_factor"] - ).sum("Load") + lhs = sum(lhs) - n.model.add_constraints(lhs, "=", 0, "Link-fixed_profile") + cname = f"decentral_{asset_type}_limit-{ct}" + if cname in n.global_constraints.index: + logger.warning( + f"Global constraint {cname} already exists. Dropping and adding it again." + ) + n.global_constraints.drop(cname, inplace=True) - # hack so that PyPSA doesn't complain there is nowhere to store the variable - n.loads["profile_factor_opt"] = 0.0 + n.model.add_constraints( + lhs <= limit, + name=f"GlobalConstraint-{cname}", + ) + n.add( + "GlobalConstraint", + cname, + constant=limit, + sense="<=", + type="", + carrier_attribute="", + ) def force_boiler_profiles_existing_per_boiler(n): @@ -735,49 +856,105 @@ def adapt_nuclear_output(n): ) -def additional_functionality(n, snapshots, snakemake): - logger.info("Adding Ariadne-specific functionality") - - investment_year = int(snakemake.wildcards.planning_horizons[-4:]) - constraints = snakemake.params.solving["constraints"] - - add_capacity_limits( - n, investment_year, constraints["limits_capacity_min"], "minimum" +def add_empty_co2_atmosphere_store_constraint(n): + """ + Ensures that the CO2 atmosphere store at the last snapshot is empty. + """ + logger.info( + "Adding constraint for empty CO2 atmosphere store at the last snapshot." ) + cname = "empty_co2_atmosphere_store" - add_capacity_limits( - n, investment_year, constraints["limits_capacity_max"], "maximum" - ) + last_snapshot = n.snapshots.values[-1] + lhs = n.model["Store-e"].loc[last_snapshot, "co2 atmosphere"] - add_power_limits(n, investment_year, constraints["limits_power_max"]) + n.model.add_constraints(lhs == 0, name=cname) - if snakemake.wildcards.clusters != "1": - h2_import_limits(n, investment_year, constraints["limits_volume_max"]) - electricity_import_limits(n, investment_year, constraints["limits_volume_max"]) +def additional_functionality(n, snapshots, snakemake): + logger.info("Adding Ariadne-specific functionality") + try: + investment_year = int(snakemake.wildcards.planning_horizons[-4:]) + except AttributeError: + investment_year = int(snakemake.wildcards.eeg_sweep_year) + constraints = snakemake.params.solving["constraints"] + if snakemake.wildcards.get("eeg_sweep_year"): + eeg_sweep_year = int(snakemake.wildcards.eeg_sweep_year) + assert eeg_sweep_year == 2030, "EEG sweep implemented only for 2030 " + lvl = float(snakemake.wildcards.eeg_level) + constraints["limits_capacity_min"]["Generator"]["onwind"]["DE"][ + eeg_sweep_year + ] = constraints["limits_capacity_max"]["Generator"]["onwind"]["DE"][ + eeg_sweep_year + ] = 115 * lvl + constraints["limits_capacity_min"]["Generator"]["offwind"]["DE"][ + eeg_sweep_year + ] = constraints["limits_capacity_max"]["Generator"]["offwind"]["DE"][ + eeg_sweep_year + ] = 24 * lvl + constraints["limits_capacity_min"]["Generator"]["solar"]["DE"][ + eeg_sweep_year + ] = constraints["limits_capacity_max"]["Generator"]["solar"]["DE"][ + eeg_sweep_year + ] = 215 * lvl + if not snakemake.params.get("regret_run"): + add_capacity_limits( + n, investment_year, constraints["limits_capacity_min"], snakemake, "minimum" + ) - if investment_year >= 2025: - h2_production_limits( - n, - investment_year, - constraints["limits_volume_min"], - constraints["limits_volume_max"], + add_capacity_limits( + n, investment_year, constraints["limits_capacity_max"], snakemake, "maximum" ) - add_h2_derivate_limit(n, investment_year, constraints["limits_volume_max"]) + if snakemake.wildcards.clusters != "1": + h2_import_limits(n, investment_year, constraints["limits_volume_max"]) + + # deactivate elec import limit since it may lead to strange flows + # TODO evaluate if this is necessary + # electricity_import_limits( + # n, investment_year, constraints["limits_volume_max"] + # ) + + if investment_year >= 2025: + h2_production_limits( + n, + investment_year, + constraints["limits_volume_min"], + constraints["limits_volume_max"], + ) + add_h2_derivate_limit(n, investment_year, constraints["limits_volume_max"]) + + if isinstance(constraints["co2_budget_national"], dict): + if "co2 atmosphere" not in n.generators.index: + add_national_co2_budgets( + n, + snakemake, + constraints["co2_budget_national"], + investment_year, + ) + else: + logger.warning( + "CO2 atmosphere generator found. Skipping national CO2 budget constraints to avoid conflicts." + ) + else: + logger.warning("No national CO2 budget specified!") + + add_power_limits(n, investment_year, constraints["limits_power_max"]) - # force_boiler_profiles_existing_per_load(n) force_boiler_profiles_existing_per_boiler(n) - if isinstance(constraints["co2_budget_national"], dict): - add_national_co2_budgets( + if isinstance(constraints.get("decentral_heat_budgets"), dict): + add_decentral_heat_budgets( n, - snakemake, - constraints["co2_budget_national"], + constraints["decentral_heat_budgets"], investment_year, ) - else: - logger.warning("No national CO2 budget specified!") if investment_year == 2020: adapt_nuclear_output(n) + + if "co2 atmosphere" in n.generators.index: + logger.warning( + "CO2 atmosphere generator found. Adding constraint for empty CO2 atmosphere store at the last snapshot." + ) + add_empty_co2_atmosphere_store_constraint(n) diff --git a/scripts/pypsa-de/build_exogenous_mobility_data.py b/scripts/pypsa-de/build_exogenous_mobility_data.py index 2d316baea..553b343aa 100644 --- a/scripts/pypsa-de/build_exogenous_mobility_data.py +++ b/scripts/pypsa-de/build_exogenous_mobility_data.py @@ -12,7 +12,7 @@ def get_mobility_data( year, non_land_liquids, ageb_for_mobility=True, - uba_for_mobility=False, + uba_for_mobility="", ): """ Retrieve the German mobility demand from the transport model. @@ -62,7 +62,7 @@ def get_mobility_data( # FZ27_202101, table FZ 27.2, 1. January 2021: mobility_data["million_EVs"] = 0.358498 + 0.280149 - elif year == "2025" and uba_for_mobility: + elif year == "2025" and int(year) in uba_for_mobility: # https://www.umweltbundesamt.de/sites/default/files/medien/11850/publikationen/projektionsbericht_2025.pdf, Abbildung 64 & 59, mobility_data = pd.Series( { @@ -77,7 +77,7 @@ def get_mobility_data( mobility_data = mobility_data.mul(1e6) # convert TWh to MWh mobility_data["million_EVs"] = 2.7 + 1.2 # BEV + PHEV - elif year == "2030" and uba_for_mobility: + elif year == "2030" and int(year) in uba_for_mobility: mobility_data = pd.Series( { "Electricity": 57.0, @@ -89,7 +89,7 @@ def get_mobility_data( mobility_data = mobility_data.mul(1e6) mobility_data["million_EVs"] = 8.7 + 1.8 - elif year == "2035" and uba_for_mobility: + elif year == "2035" and int(year) in uba_for_mobility: mobility_data = pd.Series( { "Electricity": 117.0, @@ -102,7 +102,7 @@ def get_mobility_data( mobility_data["million_EVs"] = 18.9 + 1.8 else: - if uba_for_mobility: + if int(year) in uba_for_mobility: # here year > 2035 logger.error( f"Year {year} is not supported for UBA mobility projections. Please use only 2020, 2025, 2030, 2035." ) @@ -132,8 +132,8 @@ def get_mobility_data( opts="", ll="vopt", sector_opts="none", - planning_horizons="2020", - run="KN2045_Mix", + planning_horizons="2025", + run="LowDemand", ) configure_logging(snakemake) diff --git a/scripts/pypsa-de/build_scenarios.py b/scripts/pypsa-de/build_scenarios.py index 29a030937..37afc6b54 100644 --- a/scripts/pypsa-de/build_scenarios.py +++ b/scripts/pypsa-de/build_scenarios.py @@ -66,7 +66,11 @@ def get_co2_budget(df, source): ## GHG targets according to KSG initial_years_co2 = pd.Series( index=[2020, 2025, 2030], - data=[813, 643, 438], + data=[ + 813, + 629, # From UBA Projektionsbericht 2025 + 456.8, # angepasste Jahresemissionsgesamtmenge laut UBA Projektionsbericht 2025, Tabelle 1 + ], ) later_years_co2 = pd.Series( @@ -85,18 +89,8 @@ def get_co2_budget(df, source): ) else: raise ValueError("Invalid source for CO2 budget.") - ## Compute nonco2 from Ariadne-Leitmodell (REMIND) - # co2 = ( - # df.loc["Emissions|CO2 incl Bunkers","Mt CO2/yr"] - # - df.loc["Emissions|CO2|Land-Use Change","Mt CO2-equiv/yr"] - # - df.loc["Emissions|CO2|Energy|Demand|Bunkers","Mt CO2/yr"] - # ) - # ghg = ( - # df.loc["Emissions|Kyoto Gases","Mt CO2-equiv/yr"] - # - df.loc["Emissions|Kyoto Gases|Land-Use Change","Mt CO2-equiv/yr"] - # # No Kyoto Gas emissions for Bunkers recorded in Ariadne DB - # ) + ## Compute nonco2 from Ariadne-Leitmodell (REMIND) try: co2_land_use_change = df.loc["Emissions|CO2|Land-Use Change", "Mt CO2-equiv/yr"] @@ -117,14 +111,20 @@ def get_co2_budget(df, source): nonco2 = ghg - co2 + # Hard-code values for 2020 and 2025 from UBA reports / projections + # Table 1, https://reportnet.europa.eu/public/dataflow/1478, GHG - CO2 + nonco2[2020] = 733.7 - 647.9 + nonco2[2025] = 628.8 - 554.6 + ## PyPSA disregards nonco2 GHG emissions, but includes bunkers targets_pypsa = targets_co2 - nonco2 + logger.info("Non-CO2 GHG emissions assumed (in Mt CO2-equiv/yr):") + for year in nonco2.index: + logger.info(f"{year}: {nonco2.loc[year]:.1f}") + target_fractions_pypsa = targets_pypsa.loc[targets_co2.index] / baseline_pypsa - target_fractions_pypsa[2020] = ( - 0.671 # Hard-coded based on REMIND data from ariadne2-internal DB - ) return target_fractions_pypsa.round(3) diff --git a/scripts/pypsa-de/export_ariadne_variables.py b/scripts/pypsa-de/export_ariadne_variables.py index 0128698f8..f138f17d7 100644 --- a/scripts/pypsa-de/export_ariadne_variables.py +++ b/scripts/pypsa-de/export_ariadne_variables.py @@ -410,6 +410,252 @@ def fill_if_lifetime_inf(n, carrier, lifetime, component="links"): # n.links.query("carrier=='DC' and index.str.startswith('DC')")[["carrier","annuity","capital_cost","lifetime","FOM","build_year"]].sort_values("FOM") +def get_producer_rents(n, region): + carrier_dict = { + "Non-Biomass Renewables": [ + ( + "Generator", + "offwind-ac", + ), + ( + "Generator", + "offwind-dc", + ), + ( + "Generator", + "onwind", + ), + ( + "Generator", + "ror", + ), + ( + "Generator", + "solar rooftop", + ), + ("Generator", "solar-hsat"), + ( + "Link", + "hydro", + ), + ], + "Consumers": [ + ( + "Link", + "BEV charger", + ), + ( + "Link", + "DAC", + ), + ( + "Link", + "H2 Electrolysis", + ), + ( + "Link", + "H2 Fuel Cell", + ), + ( + "Link", + "methanolisation", + ), + ( + "Link", + "rural air heat pump", + ), + ( + "Link", + "rural ground heat pump", + ), + ( + "Link", + "rural resistive heater", + ), + ( + "Link", + "urban central air heat pump", + ), + ( + "Link", + "urban central resistive heater", + ), + ( + "Link", + "urban decentral air heat pump", + ), + ( + "Link", + "urban decentral resistive heater", + ), + ( + "Load", + "agriculture electricity", + ), + ( + "Load", + "agriculture machinery electric", + ), + ( + "Load", + "electricity", + ), + ( + "Load", + "industry electricity", + ), + ], + "Conventional": [ + ( + "Link", + "CCGT", + ), + ( + "Link", + "OCGT", + ), + ( + "Link", + "coal", + ), + ( + "Link", + "lignite", + ), + ( + "Link", + "nuclear", + ), + ("Link", "oil"), + ("Link", "urban central coal CHP"), + ("Link", "urban central gas CHP"), + ("Link", "urban central gas CHP CC"), + ("Link", "urban central lignite CHP"), + ("Link", "urban central oil CHP"), + ], + "Transmission": [ # is this correct? + ( + "Line", + "AC", + ), + ( + "Link", + "DC", + ), + # ("Link",'H2 pipeline',), + # ("Link","H2 pipeline (Kernnetz)",), + ( + "Link", + "electricity distribution grid", + ), + ], + "Biomass": [ + ( + "Link", + "solid biomass", + ), + ( + "Link", + "urban central solid biomass CHP", + ), + ( + "Link", + "urban central solid biomass CHP CC", + ), + ], + "Stores": [ + ( + "StorageUnit", + "PHS", + ), + ( + "Link", + "home battery charger", + ), + ( + "Link", + "home battery discharger", + ), + ( + "Link", + "battery charger", + ), + ( + "Link", + "battery discharger", + ), + ], + "Waste": [ + ( + "Link", + "waste CHP", + ), + ( + "Link", + "waste CHP CC", + ), + ], + } + # Invert the dictionary to map specific carriers to their general categories + carrier_map = {val: key for key, values in carrier_dict.items() for val in values} + revenue = ( + n.statistics.revenue(groupby=["bus", "carrier"], nice_names=False) + .filter(like="DE") + .groupby(level=["component", "carrier"]) + .sum() + ) + capex = ( + n.statistics.capex(groupby=["bus", "carrier"], nice_names=False) + .filter(like="DE") + .groupby(level=["component", "carrier"]) + .sum() + ) + capex_by_year = n.statistics.capex( + groupby=["bus", "carrier", "build_year"], nice_names=False + ) + _capex20 = capex_by_year[ + capex_by_year.index.get_level_values("build_year") >= (2030 - 20) + ] + capex20 = _capex20.filter(like="DE").groupby(level=["component", "carrier"]).sum() + excapex = ( + n.statistics.expanded_capex(groupby=["bus", "carrier"], nice_names=False) + .filter(like="DE") + .groupby(level=["component", "carrier"]) + .sum() + ) + opex = ( + n.statistics.opex(groupby=["bus", "carrier"], nice_names=False) + .filter(like="DE") + .groupby(level=["component", "carrier"]) + .sum() + ) + + pr_expanded = revenue.sub(excapex, fill_value=0).sub(opex, fill_value=0) + pr_expanded = pr_expanded.groupby(pr_expanded.index.map(carrier_map)).sum() + pr_expanded.index = ( + "Producer Rent|Annual|Expanded Assets Only|Electricity|" + pr_expanded.index + ) + pr_20 = revenue.sub(capex20, fill_value=0).sub(opex, fill_value=0) + pr_20 = pr_20.groupby(pr_20.index.map(carrier_map)).sum() + pr_20.index = "Producer Rent|Annual|20-year Assets Only|Electricity|" + pr_20.index + pr = revenue.sub(capex, fill_value=0).sub(opex, fill_value=0) + pr = pr.groupby(pr.index.map(carrier_map)).sum() + pr.index = "Producer Rent|Annual|All Assets|Electricity|" + pr.index + pr_st = revenue.sub(opex, fill_value=0) + pr_st = pr_st.groupby(pr_st.index.map(carrier_map)).sum() + pr_st.index = "Producer Rent|Short-term|Electricity|" + pr_st.index + + var = pd.concat([pr_expanded, pr_20, pr, pr_st]) + + var["Electricity System Cost|CAPEX"] = ( + capex.groupby(capex.index.map(carrier_map)).sum().sum() + ) + var["Electricity System Cost|OPEX"] = ( + opex.groupby(opex.index.map(carrier_map)).sum().sum() + ) + return var + + """ get_system_cost(n, region) @@ -996,6 +1242,34 @@ def _get_capacities(n, region, cap_func, cap_string="Capacity|"): like="solar thermal" ).sum() + var[cap_string + "Decentral Heat|Heat Pump"] = capacities_decentral_heat.filter( + like="heat pump" + ).sum() + + var[cap_string + "Decentral Heat|Resistive Heater"] = ( + capacities_decentral_heat.filter(like="resistive heater").sum() + ) + + var[cap_string + "Decentral Heat|Biomass"] = capacities_decentral_heat.filter( + like="biomass" + ).sum() + + var[cap_string + "Decentral Heat|Gas"] = capacities_decentral_heat.filter( + like="gas" + ).sum() + + var[cap_string + "Decentral Heat|Oil"] = capacities_decentral_heat.filter( + like="oil boiler" + ).sum() + + var[cap_string + "Decentral Heat|Storage Converter"] = capacities_decentral_heat[ + capacities_decentral_heat.index.str.contains("water tanks discharger") + ].sum() + + var[cap_string + "Decentral Heat|Storage Reservoir"] = storage_capacities[ + storage_capacities.index.str.contains("(?:decentral|rural) water tanks") + ].sum() + capacities_h2 = ( cap_func( bus_carrier="H2", @@ -1027,7 +1301,9 @@ def _get_capacities(n, region, cap_func, cap_string="Capacity|"): capacities_h2.get("H2 Electrolysis", 0) + var[cap_string + "Hydrogen|Gas"] ) - var[cap_string + "Hydrogen|Reservoir"] = storage_capacities.get("H2 Store", 0) + var[cap_string + "Hydrogen|Storage Reservoir"] = var[ + cap_string + "Hydrogen|Reservoir" + ] = storage_capacities.get("H2 Store", 0) capacities_gas = ( cap_func( @@ -1513,15 +1789,26 @@ def get_secondary_energy(n, region, _industry_demand): ], ).sum() + gas_fractions = _get_fuel_fractions(n, region, "gas") + var["Secondary Energy|Electricity|Gas|Fossil"] = ( + var["Secondary Energy|Electricity|Gas"] * gas_fractions["Natural Gas"] + ) + var["Secondary Energy|Electricity|Gas|Biomass"] = ( + var["Secondary Energy|Electricity|Gas"] * gas_fractions["Biomass"] + ) + var["Secondary Energy|Electricity|Fossil"] = ( - var["Secondary Energy|Electricity|Gas"] + var["Secondary Energy|Electricity|Gas|Fossil"] + var["Secondary Energy|Electricity|Oil"] + var["Secondary Energy|Electricity|Coal"] ) - var["Secondary Energy|Electricity|Biomass|w/o CCS"] = electricity_supply.reindex( - ["urban central solid biomass CHP", "solid biomass", "biogas"] - ).sum() + var["Secondary Energy|Electricity|Biomass|w/o CCS"] = ( + electricity_supply.reindex( + ["urban central solid biomass CHP", "solid biomass", "biogas"] + ).sum() + + var["Secondary Energy|Electricity|Gas|Biomass"] + ) var["Secondary Energy|Electricity|Biomass|w/ CCS"] = electricity_supply.get( "urban central solid biomass CHP CC", 0 ) @@ -1529,8 +1816,8 @@ def get_secondary_energy(n, region, _industry_demand): like="solid biomass" ).sum() var["Secondary Energy|Electricity|Biomass|Gaseous and Liquid"] = ( - electricity_supply.get("biogas") - ) + electricity_supply.get("biogas", 0) + ) + var["Secondary Energy|Electricity|Gas|Biomass"] var["Secondary Energy|Electricity|Biomass"] = ( var["Secondary Energy|Electricity|Biomass|w/o CCS"] + var["Secondary Energy|Electricity|Biomass|w/ CCS"] @@ -1584,7 +1871,19 @@ def get_secondary_energy(n, region, _industry_demand): var["Secondary Energy|Electricity|Curtailment"] = ( n.statistics.curtailment(bus_carrier=["AC", "low voltage"], **kwargs) .filter(like=region) - .values.sum() + .groupby("carrier") + .sum() + .reindex( + [ + "offwind-ac", + "offwind-dc", + "onwind", + "solar", + "solar rooftop", + "solar-hsat", + ] + ) + .sum() ) electricity_balance = ( @@ -1611,19 +1910,29 @@ def get_secondary_energy(n, region, _industry_demand): ).sum() ) - # TODO Compute transmission losses via links_t - # var["Secondary Energy|Electricity|Transmission Losses"] = \ - # n.statistics.withdrawal( - # bus_carrier=["AC", "low voltage"], **kwargs - # ).filter(like=region).groupby(["carrier"]).sum().get( - # ["AC", "DC", "electricity distribution grid"] - # ).subtract( - # n.statistics.supply( - # bus_carrier=["AC", "low voltage"], **kwargs - # ).filter(like=region).groupby(["carrier"]).sum().get( - # ["AC", "DC", "electricity distribution grid"] - # ) - # ).sum() + idx = n.lines.query( + f"bus0.str.contains('{region}') and bus1.str.contains('{region}')" + ).index + acl = ( + n.lines_t.p0[idx].sum(axis=1).multiply(n.snapshot_weightings.generators).sum() + + n.lines_t.p1[idx].sum(axis=1).multiply(n.snapshot_weightings.generators).sum() + ) + idx = n.links.query( + f"bus0.str.contains('{region}') and bus1.str.contains('{region}') and carrier == 'DC'" + ).index + dcl = ( + n.links_t.p0[idx].sum(axis=1).multiply(n.snapshot_weightings.generators).sum() + + n.links_t.p1[idx].sum(axis=1).multiply(n.snapshot_weightings.generators).sum() + ) + idx = n.links.query( + f"bus0.str.contains('{region}') and bus1.str.contains('{region}') and carrier == 'electricity distribution grid'" + ).index + distril = ( + n.links_t.p0[idx].sum(axis=1).multiply(n.snapshot_weightings.generators).sum() + + n.links_t.p1[idx].sum(axis=1).multiply(n.snapshot_weightings.generators).sum() + ) + + var["Secondary Energy|Electricity|Transmission Losses"] = acl + dcl + distril # supply - withdrawal # var["Secondary Energy|Electricity|Storage"] = \ @@ -1818,9 +2127,17 @@ def get_secondary_energy(n, region, _industry_demand): axis=0, ).sum() mwh_coal_per_mwh_coke = 1.366 + coke_fraction = ( + industry_demand.get("coke") + * mwh_coal_per_mwh_coke + / ( + industry_demand.get("coke") * mwh_coal_per_mwh_coke + + industry_demand.get("coal") + ) + ) # Coke is added as a coal demand, so we need to convert back to units of coke for secondary energy var["Secondary Energy|Solids|Coal"] = var["Secondary Energy|Solids"] = ( - industry_demand.get("coke", 0) / mwh_coal_per_mwh_coke + sum_load(n, "coal for industry", region) * coke_fraction / mwh_coal_per_mwh_coke ) biomass_usage = ( @@ -1990,14 +2307,17 @@ def get_final_energy( # !: Pypsa-eur does not strictly distinguish between energy and # non-energy use - var["Final Energy|Industry|Electricity"] = industry_demand.get("electricity") - # or use: sum_load(n, "industry electricity", region) + var["Final Energy|Industry|Electricity"] = sum_load( + n, "industry electricity", region + ) # electricity is not used for non-energy purposes var["Final Energy|Industry excl Non-Energy Use|Electricity"] = var[ "Final Energy|Industry|Electricity" ] - var["Final Energy|Industry|Heat"] = industry_demand.get("low-temperature heat") + var["Final Energy|Industry|Heat"] = sum_load( + n, "low-temperature heat for industry", region + ) # heat is not used for non-energy purposes var["Final Energy|Industry excl Non-Energy Use|Heat"] = var[ "Final Energy|Industry|Heat" @@ -2009,7 +2329,7 @@ def get_final_energy( # var["Final Energy|Industry|Geothermal"] = \ # Not implemented - var["Final Energy|Industry|Gases"] = industry_demand.get("methane") + var["Final Energy|Industry|Gases"] = sum_load(n, "gas for industry", region) for gas_type in gas_fractions.index: var[f"Final Energy|Industry|Gases|{gas_type}"] = ( @@ -2031,7 +2351,7 @@ def get_final_energy( # var["Final Energy|Industry|Power2Heat"] = \ # Q: misleading description - var["Final Energy|Industry|Hydrogen"] = industry_demand.get("hydrogen") + var["Final Energy|Industry|Hydrogen"] = sum_load(n, "H2 for industry", region) # subtract non-energy used hydrogen from total hydrogen demand var["Final Energy|Industry excl Non-Energy Use|Hydrogen"] = ( var["Final Energy|Industry|Hydrogen"] @@ -2075,16 +2395,29 @@ def get_final_energy( # var["Final Energy|Industry|Other"] = \ - var["Final Energy|Industry|Solids|Biomass"] = industry_demand.get("solid biomass") + var["Final Energy|Industry|Solids|Biomass"] = sum_load( + n, "solid biomass for industry", region + ) var["Final Energy|Industry excl Non-Energy Use|Solids|Biomass"] = var[ "Final Energy|Industry|Solids|Biomass" ] mwh_coal_per_mwh_coke = 1.366 - # Coke is added as a coal demand, so we need to convert back to units of coke for final energy + coke_fraction = ( + industry_demand.get("coke") + * mwh_coal_per_mwh_coke + / ( + industry_demand.get("coke") * mwh_coal_per_mwh_coke + + industry_demand.get("coal") + ) + ) + # Contains coke demand, which is a coal product + # Here coke is considered a secondary energy source var["Final Energy|Industry|Solids|Coal"] = ( - industry_demand.get("coal") - + industry_demand.get("coke") / mwh_coal_per_mwh_coke + sum_load(n, "coal for industry", region) * (1 - coke_fraction) + + sum_load(n, "coal for industry", region) + * coke_fraction + / mwh_coal_per_mwh_coke ) var["Final Energy|Industry excl Non-Energy Use|Solids|Coal"] = var[ "Final Energy|Industry|Solids|Coal" @@ -2415,7 +2748,7 @@ def get_final_energy( var["Final Energy|Agriculture|Electricity"] = sum_load( n, "agriculture electricity", region - ) + ) + sum_load(n, "agriculture machinery electric", region) var["Final Energy|Agriculture|Heat"] = sum_load(n, "agriculture heat", region) var["Final Energy|Agriculture|Liquids"] = sum_load( n, "agriculture machinery oil", region @@ -2572,10 +2905,171 @@ def get_final_energy( return var * MWh2TWh -def get_emissions(n, region, _energy_totals, industry_demand): +def get_emissions(n, region, _energy_totals, _industry_demand): + def get_constraint_emissions(n, ct): + lhs = [] + + for port in [col[3:] for col in n.links if col.startswith("bus")]: + links = n.links.index[ + (n.links.index.str[:2] == ct) + & (n.links[f"bus{port}"] == "co2 atmosphere") + & ~n.links.carrier.str.contains( + "shipping|aviation" + ) # first exclude aviation to multiply it with a domestic factor later + ] + + if port == "0": + efficiency = -1.0 + elif port == "1": + efficiency = n.links.loc[links, "efficiency"] + else: + efficiency = n.links.loc[links, f"efficiency{port}"] + + variables = ( + n.links_t.p0.loc[:, links] + .mul(efficiency) + .mul(n.snapshot_weightings.generators, axis=0) + .sum() + ) + if not variables.empty: + lhs.append(variables) + + # Aviation demand + energy_totals = pd.read_csv(snakemake.input.energy_totals, index_col=[0, 1]) + domestic_aviation = energy_totals.loc[ + (ct, snakemake.params.energy_totals_year), "total domestic aviation" + ] + international_aviation = energy_totals.loc[ + (ct, snakemake.params.energy_totals_year), "total international aviation" + ] + domestic_aviation_factor = domestic_aviation / ( + domestic_aviation + international_aviation + ) + aviation_links = n.links[ + (n.links.index.str[:2] == ct) & (n.links.carrier == "kerosene for aviation") + ] + lhs.append( + ( + n.links_t.p0.loc[:, aviation_links.index] + .mul(aviation_links.efficiency2) + .mul(n.snapshot_weightings.generators, axis=0) + ).sum() + * domestic_aviation_factor + ) + + # Shipping oil + domestic_navigation = energy_totals.loc[ + (ct, snakemake.params.energy_totals_year), "total domestic navigation" + ] + international_navigation = energy_totals.loc[ + (ct, snakemake.params.energy_totals_year), "total international navigation" + ] + domestic_navigation_factor = domestic_navigation / ( + domestic_navigation + international_navigation + ) + shipping_links = n.links[ + (n.links.index.str[:2] == ct) & (n.links.carrier == "shipping oil") + ] + lhs.append( + ( + n.links_t.p0.loc[:, shipping_links.index].mul( + n.snapshot_weightings.generators, axis=0 + ) + * shipping_links.efficiency2 + ).sum() + * domestic_navigation_factor + ) + + # Shipping methanol + shipping_meoh_links = n.links[ + (n.links.index.str[:2] == ct) & (n.links.carrier == "shipping methanol") + ] + lhs.append( + ( + n.links_t.p0.loc[:, shipping_meoh_links.index].mul( + n.snapshot_weightings.generators, axis=0 + ) + * shipping_meoh_links.efficiency2 + ).sum() + * domestic_navigation_factor + ) + + # Adding Efuel imports and exports to constraint + incoming_oil = n.links.index[n.links.index == f"EU renewable oil -> {ct} oil"] + outgoing_oil = n.links.index[n.links.index == f"{ct} renewable oil -> EU oil"] + + lhs.append( + ( + -1 + * n.links_t.p0.loc[:, incoming_oil].mul( + n.snapshot_weightings.generators, axis=0 + ) + * 0.2571 + ).sum() + ) + lhs.append( + ( + n.links_t.p0.loc[:, outgoing_oil].mul( + n.snapshot_weightings.generators, axis=0 + ) + * 0.2571 + ).sum() + ) + + incoming_methanol = n.links.index[ + n.links.index == f"EU methanol -> {ct} methanol" + ] + outgoing_methanol = n.links.index[ + n.links.index == f"{ct} methanol -> EU methanol" + ] + + lhs.append( + ( + -1 + * n.links_t.p0.loc[:, incoming_methanol].mul( + n.snapshot_weightings.generators, axis=0 + ) + / snakemake.config["sector"]["MWh_MeOH_per_tCO2"] + ).sum() + ) + + lhs.append( + ( + n.links_t.p0.loc[:, outgoing_methanol].mul( + n.snapshot_weightings.generators, axis=0 + ) + / snakemake.config["sector"]["MWh_MeOH_per_tCO2"] + ).sum() + ) + + # Methane + incoming_CH4 = n.links.index[n.links.index == f"EU renewable gas -> {ct} gas"] + outgoing_CH4 = n.links.index[n.links.index == f"{ct} renewable gas -> EU gas"] + + lhs.append( + ( + -1 + * n.links_t.p0.loc[:, incoming_CH4].mul( + n.snapshot_weightings.generators, axis=0 + ) + * 0.198 + ).sum() + ) + + lhs.append( + ( + n.links_t.p0.loc[:, outgoing_CH4].mul( + n.snapshot_weightings.generators, axis=0 + ) + * 0.198 + ).sum() + ) + + return pd.concat(lhs).sum() * t2Mt + energy_totals = _energy_totals.loc[region[0:2]] - industry_DE = industry_demand.filter( + industry_demand = _industry_demand.filter( like=region, axis=0, ).sum() @@ -2587,6 +3081,8 @@ def get_emissions(n, region, _energy_totals, industry_demand): var = pd.Series() + var["Emissions|CO2|Model|Constraint"] = get_constraint_emissions(n, region).sum() + co2_emissions = ( n.statistics.supply(bus_carrier="co2", **kwargs) .filter(like=region) @@ -2881,8 +3377,22 @@ def get_emissions(n, region, _energy_totals, industry_demand): ) # considered 0 anyways mwh_coal_per_mwh_coke = 1.366 # from eurostat energy balance - # 0.3361 t/MWh, 1e-6 to convert to Mt - coking_emissions = industry_DE.coke * (mwh_coal_per_mwh_coke - 1) * 0.3361 * t2Mt + coke_fraction = ( + industry_demand.get("coke") + * mwh_coal_per_mwh_coke + / ( + industry_demand.get("coke") * mwh_coal_per_mwh_coke + + industry_demand.get("coal") + ) + ) + # 0.3361 t_CO2/MWh + coking_emissions = ( + sum_load(n, "coal for industry", region) + * coke_fraction + * (mwh_coal_per_mwh_coke - 1) + * 0.3361 + * t2Mt + ) var["Emissions|Gross Fossil CO2|Energy|Demand|Industry"] = ( co2_emissions.reindex( [ @@ -2966,7 +3476,78 @@ def get_emissions(n, region, _energy_totals, industry_demand): var["Emissions|CO2|Energy|Demand"] + var["Emissions|CO2|Energy|Demand|Bunkers"] ) + CHP_emissions_E = CHP_emissions.multiply(CHP_E_fraction).groupby("carrier").sum() + CHP_emissions_H = ( + CHP_emissions.multiply(1 - CHP_E_fraction).groupby("carrier").sum() + ) + CHP_negative_emissions_E = ( + CHP_negative_emissions.multiply(negative_CHP_E_fraction) + .groupby("carrier") + .sum() + ) + CHP_negative_emissions_H = ( + CHP_negative_emissions.multiply(1 - negative_CHP_E_fraction) + .groupby("carrier") + .sum() + ) + + var["Emissions|Gross Fossil CO2|Energy|Supply|Electricity|Gas"] = ( + co2_emissions.reindex( + [ + "OCGT", + "CCGT", + ], + ).sum() + + CHP_emissions_E.filter(like="gas").sum() + ) + var["Emissions|CO2|Energy|Supply|Electricity|Gas"] = ( + var["Emissions|Gross Fossil CO2|Energy|Supply|Electricity|Gas"] + - CHP_negative_emissions_E.filter(like="gas").sum() + ) + + var["Emissions|CO2|Energy|Supply|Electricity|Coal"] = var[ + "Emissions|Gross Fossil CO2|Energy|Supply|Electricity|Coal" + ] = ( + co2_emissions.reindex( + [ + "coal", + "lignite", + ], + ).sum() + + CHP_emissions_E.filter(regex="coal|lignite").sum() + ) + var["Emissions|CO2|Energy|Supply|Electricity|Oil"] = var[ + "Emissions|Gross Fossil CO2|Energy|Supply|Electricity|Oil" + ] = ( + co2_emissions.reindex( + [ + "oil", + ], + ).sum() + + CHP_emissions_E.filter(like="oil").sum() + ) + + var["Emissions|CO2|Energy|Supply|Electricity|Biomass"] = ( + CHP_negative_emissions_E.filter(like="bio").sum() + ) + + var["Emissions|Gross Fossil CO2|Energy|Supply|Electricity|Waste"] = ( + CHP_emissions_E.filter(like="waste CHP").sum() + ) + var["Emissions|CO2|Energy|Supply|Electricity|Waste"] = ( + var["Emissions|Gross Fossil CO2|Energy|Supply|Electricity|Waste"] + - CHP_negative_emissions_E.filter(like="waste").sum() + ) + var["Emissions|Gross Fossil CO2|Energy|Supply|Electricity"] = ( + var["Emissions|Gross Fossil CO2|Energy|Supply|Electricity|Gas"] + + var["Emissions|Gross Fossil CO2|Energy|Supply|Electricity|Coal"] + + var["Emissions|Gross Fossil CO2|Energy|Supply|Electricity|Oil"] + + var["Emissions|Gross Fossil CO2|Energy|Supply|Electricity|Waste"] + ) + + assert isclose( + var["Emissions|Gross Fossil CO2|Energy|Supply|Electricity"], co2_emissions.reindex( [ "OCGT", @@ -2976,22 +3557,22 @@ def get_emissions(n, region, _energy_totals, industry_demand): "oil", ], ).sum() - + CHP_emissions.multiply(CHP_E_fraction).values.sum() + + CHP_emissions_E.sum(), ) var["Emissions|CO2|Energy|Supply|Electricity"] = ( var["Emissions|Gross Fossil CO2|Energy|Supply|Electricity"] - - CHP_negative_emissions.multiply(negative_CHP_E_fraction).values.sum() + - CHP_negative_emissions_E.sum() ) var["Emissions|Gross Fossil CO2|Energy|Supply|Heat"] = ( co2_emissions.filter(like="urban central").filter(like="boiler").sum() - + CHP_emissions.multiply(1 - CHP_E_fraction).values.sum() + + CHP_emissions_H.sum() ) var["Emissions|CO2|Energy|Supply|Heat"] = ( var["Emissions|Gross Fossil CO2|Energy|Supply|Heat"] - - CHP_negative_emissions.multiply(1 - negative_CHP_E_fraction).values.sum() + - CHP_negative_emissions_H.sum() ) var["Emissions|CO2|Energy|Supply|Electricity and Heat"] = ( @@ -3150,7 +3731,7 @@ def get_nodal_supply(n, bus_carrier, query="index == index or index != index"): return result -def price_load(n, load_carrier, region): +def price_load(n, load_carrier, region, clip=None): """ Calculate the average price of a specific load carrier in a given region. @@ -3171,7 +3752,8 @@ def price_load(n, load_carrier, region): if n.loads_t.p[load.index].values.sum() < 1: return np.nan, 0 result = ( - n.loads_t.p[load.index] * n.buses_t.marginal_price[load.bus].values + n.loads_t.p[load.index] + * n.buses_t.marginal_price[load.bus].clip(upper=clip).values ).values.sum() result /= n.loads_t.p[load.index].values.sum() return result, n.loads_t.p[load.index].values.sum() @@ -3310,6 +3892,72 @@ def get_weighted_costs(costs, flows): return result +def get_vre_market_values(n, region): + def get_einheitspreis(n, region): + ac_buses = n.buses.query( + f"index.str.startswith('{region}') and carrier == 'AC'" + ).index + nodal_prices = n.buses_t.marginal_price[ac_buses] + + nodal_flows = ( + n.statistics.withdrawal( + bus_carrier="AC", + groupby=["name", "bus", "carrier"], + aggregate_time=False, + ) + .groupby("bus") + .sum() + .T.filter( + like="DE", + axis=1, + ) + ) + + weighted_mean_nodal_price = ( + nodal_flows.mul(nodal_prices).sum(axis=1).div(nodal_flows.sum(axis=1)) + ) + + return weighted_mean_nodal_price + + def get_vre_market_value(n, region, carriers): + idx = n.generators.query( + f"carrier in {carriers} and bus.str.contains('{region}')" + ).index + gen = n.generators_t.p[idx].sum(axis=1) + einheitspreis = get_einheitspreis(n, region) + + return einheitspreis.mul(gen).sum() / gen.sum() + + var = pd.Series() + var["Market Value|Electricity|Wind|Onshore"] = get_vre_market_value( + n, region, ["onwind"] + ) + var["Market Value|Electricity|Wind|Offshore|AC"] = get_vre_market_value( + n, region, ["offwind-ac"] + ) + var["Market Value|Electricity|Wind|Offshore|DC"] = get_vre_market_value( + n, region, ["offwind-dc"] + ) + var["Market Value|Electricity|Wind|Offshore"] = get_vre_market_value( + n, region, ["offwind-ac", "offwind-dc"] + ) + var["Market Value|Electricity|Solar|PV"] = get_vre_market_value( + n, region, ["solar", "solar-hsat"] + ) + var["Market Value|Electricity|Solar|Rooftop"] = get_vre_market_value( + n, region, ["solar rooftop"] + ) + var["Market Value|Electricity|Solar"] = get_vre_market_value( + n, region, ["solar", "solar-hsat", "solar rooftop"] + ) + var["Market Value|Electricity|VRE"] = get_vre_market_value( + n, + region, + ["onwind", "offwind-ac", "offwind-dc", "solar", "solar-hsat", "solar rooftop"], + ) + return var + + def get_prices(n, region): """ Calculate the prices of various energy sources in the Ariadne model. @@ -3339,8 +3987,12 @@ def get_prices(n, region): except KeyError: co2_limit_de = 0 + try: + co2_limit_eu = n.global_constraints.loc["CO2Limit", "mu"] + except KeyError: + co2_limit_eu = n.generators.loc["co2 atmosphere", "marginal_cost"] # co2 additions - co2_price = -n.global_constraints.loc["CO2Limit", "mu"] - co2_limit_de + co2_price = -co2_limit_eu - co2_limit_de # specific emissions in tons CO2/MWh according to n.links[n.links.carrier =="your_carrier].efficiency2.unique().item() specific_emissions = { "oil": 0.2571, @@ -3427,15 +4079,23 @@ def get_prices(n, region): # reported: 8/14 # Price|Secondary Energy|Electricity + max_elec_price = 4000 # to clip extreme prices during scarcity nodal_flows_ac = get_nodal_flows( n, "AC", region, query="not carrier.str.contains('gas')" ) - nodal_prices_ac = n.buses_t.marginal_price[nodal_flows_ac.columns] + nodal_prices_ac = n.buses_t.marginal_price[nodal_flows_ac.columns].clip( + upper=max_elec_price + ) + nodal_subsidies_ac = ( + n.buses_t.marginal_price[nodal_flows_ac.columns] - nodal_prices_ac + ) var["Price|Secondary Energy|Electricity"] = ( nodal_flows_ac.mul(nodal_prices_ac).values.sum() / nodal_flows_ac.values.sum() ) + var["Subsidy|Electricity"] = nodal_flows_ac.mul(nodal_subsidies_ac).values.sum() + # Price|Secondary Energy|Gases|Natural Gas var["Price|Secondary Energy|Gases|Natural Gas"] = ( costs_gen_generators(n, region, "gas")[0] + co2_cost_gas @@ -3550,7 +4210,7 @@ def get_prices(n, region): # Price|Final Energy|Transportation|Passenger|Electricity var["Price|Final Energy|Transportation|Passenger|Electricity"] = price_load( - n, "land transport EV", region + n, "land transport EV", region, clip=max_elec_price )[0] # Price|Final Energy|Transportation|Passenger|Gases # Price|Final Energy|Transportation|Passenger|Hydrogen @@ -3610,14 +4270,14 @@ def get_prices(n, region): # Price|Final Energy|Transportation|Electricity var["Price|Final Energy|Transportation|Electricity"] = price_load( - n, "land transport EV", region + n, "land transport EV", region, clip=max_elec_price )[0] # Price|Final Energy|Transportation|Electricity|Sales Margin # Price|Final Energy|Transportation|Electricity|Transport and Distribution # Price|Final Energy|Transportation|Electricity|Other Taxes # Price|Final Energy|Transportation|Liquids|Kerosene - var["Price|Final Energy|Transportation|Electricity"] = price_load( + var["Price|Final Energy|Transportation|Liquids|Kerosene"] = price_load( n, "kerosene for aviation", region )[0] # Price|Final Energy|Transportation|Liquids|Kerosene|Sales Margin @@ -3738,7 +4398,9 @@ def get_prices(n, region): "& not carrier.str.contains('industry')" "& not carrier.str.contains('urban central')", ) - nodal_prices_lv = n.buses_t.marginal_price[nodal_flows_lv.columns] + nodal_prices_lv = n.buses_t.marginal_price[nodal_flows_lv.columns].clip( + upper=max_elec_price + ) var["Price|Final Energy|Residential and Commercial|Electricity"] = ( nodal_flows_lv.mul(nodal_prices_lv).values.sum() / nodal_flows_lv.values.sum() ) @@ -3837,7 +4499,7 @@ def get_prices(n, region): # Price|Final Energy|Industry|Electricity var["Price|Final Energy|Industry|Electricity"] = price_load( - n, "industry electricity", region + n, "industry electricity", region, clip=max_elec_price )[0] # Price|Final Energy|Industry|Electricity|Sales Margin # Price|Final Energy|Industry|Electricity|Transport and Distribution @@ -4261,9 +4923,136 @@ def get_grid_investments( return var +def calculate_cfd_payments( + n, + region="DE", + vre_carriers=[ + "solar", + "solar-hsat", + "solar rooftop", + "offwind-ac", + "offwind-dc", + "onwind", + ], + einheitspreis=True, + rooftop_price_factor=2, + rooftop_self_consumption=0.2, +): + build_years = [ + y for y in n.generators.build_year.unique() if y in [2025, 2030, 2035] + ] + yearly_costs = {} + + for year in build_years: + one_sided = True if year < 2027 else False + print( + f"Calculating new EEG payments for year {year} with {'one-sided' if one_sided else 'two-sided'} contracts for difference" + ) + + cost = pd.Series(dtype=float) + for carrier in vre_carriers: + # Keep in mind that, e.g., the 2025 assets represent the 2020-2025 assets + gens = n.generators.query( + f"carrier == '{carrier}' and index.str.startswith('{region}') and build_year == {year}" + ) + if gens.empty: + print( + f"No {carrier} generators found for year {year} in region {region}." + ) + cost[carrier] = 0.0 + continue + + flh = n.generators_t.p_max_pu[gens.index].mean() * 8760 + strike_price = gens.capital_cost / flh + + # Not equal to n.generators_t.p[gens.index] because of curtailment + avail_generation = n.generators_t.p_max_pu[gens.index] * gens.p_nom_opt + + if einheitspreis: + nodal_prices = n.buses_t.marginal_price[ + n.buses.query( + f"index.str.startswith('{region}') and carrier == 'AC'" + ).index + ] + + nodal_flows = ( + n.statistics.withdrawal( + bus_carrier="AC", + groupby=["name", "bus", "carrier"], + aggregate_time=False, + ) + .groupby("bus") + .sum() + .T.filter( + like=region, + axis=1, + ) + ) + + weighted_mean_nodal_price = ( + nodal_flows.mul(nodal_prices) + .sum(axis=1) + .div(nodal_flows.sum(axis=1)) + ) + + price = pd.DataFrame( + {loc: weighted_mean_nodal_price for loc in strike_price.index}, + index=weighted_mean_nodal_price.index, + ) + else: + price = n.buses_t.marginal_price[gens.bus] + price.columns = gens.index + + print("average hourly price", carrier, round(price.mean().mean(), 2)) + print("average strike price", carrier, round(strike_price.mean(), 2)) + + if carrier == "solar rooftop": + print( + "Correcting rooftop PV strike price by factor", + rooftop_price_factor, + "and accounting self-consumption with a factor", + rooftop_self_consumption, + ) + strike_price *= rooftop_price_factor + avail_generation *= 1 - rooftop_self_consumption + + if year == 2025: + if price.mean().mean() < 75: + print("price seems to be low") + + if one_sided: + remuneration = strike_price - price.clip(upper=strike_price, axis=1) + else: + remuneration = strike_price - price + + cost[carrier] = (avail_generation * remuneration).multiply( + n.snapshot_weightings.generators, axis=0 + ).values.sum() / 1e9 # in bn € + yearly_costs[year] = cost + + print("\nNew EEG payment per carrier in bn €:\n") + + return pd.DataFrame(yearly_costs) + + def get_policy(n, investment_year): var = pd.Series() + cfds = calculate_cfd_payments(n) + var["Policy|Renewable Energy Support|CfD|Solar|Rooftop"] = ( + cfds.sum(axis=1).filter(like="solar rooftop").sum() + ) + var["Policy|Renewable Energy Support|CfD|Solar|Utility"] = ( + cfds.sum(axis=1).reindex(["solar", "solar-hsat"]).sum() + ) + var["Policy|Renewable Energy Support|CfD|Wind|Onshore"] = ( + cfds.sum(axis=1).filter(like="onwind").sum() + ) + var["Policy|Renewable Energy Support|CfD|Wind|Offshore"] = ( + cfds.sum(axis=1).filter(like="offwind").sum() + ) + var["Policy|Renewable Energy Support|CfD|Total"] = cfds.sum(axis=1).sum() + # add carbon component to fossil fuels if specified if (snakemake.params.co2_price_add_on_fossils is not None) and ( investment_year in snakemake.params.co2_price_add_on_fossils.keys() @@ -4275,19 +5064,66 @@ def get_policy(n, investment_year): co2_limit_de = n.global_constraints.loc["co2_limit-DE", "mu"] except KeyError: co2_limit_de = 0 - var["Price|Carbon"] = ( - -n.global_constraints.loc["CO2Limit", "mu"] - co2_limit_de + co2_price_add_on - ) + try: + co2_limit_eu = n.global_constraints.loc["CO2Limit", "mu"] + except KeyError: + co2_limit_eu = n.generators.loc["co2 atmosphere", "marginal_cost"] + var["Price|Carbon"] = -co2_limit_eu - co2_limit_de + co2_price_add_on var["Price|Carbon|EU-wide Regulation All Sectors"] = ( - -n.global_constraints.loc["CO2Limit", "mu"] + co2_price_add_on + -co2_limit_eu + co2_price_add_on ) - # Price|Carbon|EU-wide Regulation Non-ETS - var["Price|Carbon|National Climate Target"] = -co2_limit_de - # Price|Carbon|National Climate Target Non-ETS + try: + var["Price|Policy|Electricity Import Independence"] = n.global_constraints.loc[ + "Electricity_import_limit-DE", "mu" + ] + except KeyError: + var["Price|Policy|Electricity Import Independence"] = 0 + + try: + var["Price|Policy|Hydrogen Import Independence"] = n.global_constraints.loc[ + "H2_import_limit-DE", "mu" + ] + except KeyError: + var["Price|Policy|Hydrogen Import Independence"] = 0 + + try: + var["Price|Policy|Methanol Import Independence"] = n.global_constraints.loc[ + "methanol_import_limit-DE", "mu" + ] + except KeyError: + var["Price|Policy|Methanol Import Independence"] = 0 + + try: + var["Price|Policy|Hydrogen Export Ban"] = n.global_constraints.loc[ + "H2_export_ban-DE", "mu" + ] + except KeyError: + var["Price|Policy|Hydrogen Export Ban"] = 0 + + try: + var["Price|Policy|Renewable Oil Import Independence"] = ( + n.global_constraints.loc["renewable_oil_import_limit-DE", "mu"] + ) + except KeyError: + var["Price|Policy|Renewable Oil Import Independence"] = 0 + + try: + var["Price|Policy|Renewable Gas Import Independence"] = ( + n.global_constraints.loc["renewable_gas_import_limit-DE", "mu"] + ) + except KeyError: + var["Price|Policy|Renewable Gas Import Independence"] = 0 + + try: + var["Price|Policy|Hydrogen Derivate Import Independence"] = ( + n.global_constraints.loc["H2_derivate_import_limit-DE", "mu"] + ) + except KeyError: + var["Price|Policy|Hydrogen Derivate Import Independence"] = 0 return var @@ -4314,8 +5150,12 @@ def get_tsc(n, country): ) # transmission_carriers = get_transmission_carriers(n).get_level_values("carrier") - transmission_lines = n.lines.carrier.isin(transmission_carriers) - transmission_links = n.links.carrier.isin(transmission_carriers) + transmission_lines = ( + n.lines.carrier.isin(transmission_carriers) & n.lines.active + ) + transmission_links = ( + n.links.carrier.isin(transmission_carriers) & n.links.active + ) # country_transmission_lines = ( (n.lines.bus0.str.contains(country)) & ~(n.lines.bus1.str.contains(country)) @@ -4385,207 +5225,178 @@ def get_tsc(n, country): return tsc - def get_link_opex(n, carriers, region, sw): - # get flow of electricity/hydrogen... - # multiply it with the marginal costs - supplying = n.links[ - (n.links.carrier.isin(carriers)) - & (n.links.bus0.str.startswith(region)) - & (~n.links.bus1.str.startswith(region)) - ].index - - receiving = n.links[ - (n.links.carrier.isin(carriers)) - & (~n.links.bus0.str.startswith(region)) - & (n.links.bus1.str.startswith(region)) - ].index + def get_trade_cost(n, region, carriers): + """ + Positive values mean cost for the domestic energy system (imports > exports) + Negative values mean revenue for the domestic energy system (exports > imports) + """ + export_revenue, import_cost = get_export_import(n, region, carriers, unit="€") + return import_cost - export_revenue + + var["Total Energy System Cost|Trade|Electricity"] = ( + get_trade_cost(n, region, ["AC"]) + get_trade_cost(n, region, ["DC"]) + ) / 1e9 + var["Total Energy System Cost|Trade|Efuels"] = ( + get_trade_cost(n, region, ["renewable oil", "renewable gas", "methanol"]) / 1e9 + ) + var["Total Energy System Cost|Trade|Hydrogen"] = ( + get_trade_cost( + n, + region, + ["H2 pipeline", "H2 pipeline (Kernnetz)", "H2 pipeline retrofitted"], + ) + / 1e9 + ) + var["Total Energy System Cost|Trade"] = ( + var["Total Energy System Cost|Trade|Electricity"] + + var["Total Energy System Cost|Trade|Efuels"] + + var["Total Energy System Cost|Trade|Hydrogen"] + ) + # Total Energy System Cost in billion EUR2020/yr + var["Total Energy System Cost|Non Trade"] = get_tsc(n, region).sum().sum() / 1e9 - trade_out = 0 - for index in supplying: - # price of energy in trade country - marg_price = n.buses_t.marginal_price[n.links.loc[index].bus0] - trade = n.links_t.p1[index].mul(sw) - trade_out += marg_price.mul(trade).sum() - - trade_in = 0 - for index in receiving: - # price of energy in Germany - marg_price = n.buses_t.marginal_price[n.links.loc[index].bus0] - trade = n.links_t.p1[index].mul(sw) - trade_in += marg_price.mul(trade).sum() - return abs(trade_in) - abs(trade_out) - # > 0: costs for Germany - # < 0: profit for Germany - - def get_line_opex(n, region, sw): - supplying = n.lines[ - (n.lines.carrier.isin(["AC"])) - & (n.lines.bus0.str.startswith(region)) - & (~n.lines.bus1.str.startswith(region)) - ].index - receiving = n.lines[ - (n.lines.carrier.isin(["AC"])) - & (~n.lines.bus0.str.startswith(region)) - & (n.lines.bus1.str.startswith(region)) - ].index + var["Total Energy System Cost"] = ( + var["Total Energy System Cost|Non Trade"] + + var["Total Energy System Cost|Trade"] + ) - # i have to clip the trade - net_out = 0 - for index in supplying: - trade = n.lines_t.p1[index].mul(sw) - trade_out = trade.clip(lower=0) # positive - trade_in = trade.clip(upper=0) # negative - marg_price_DE = n.buses_t.marginal_price[n.lines.loc[index].bus0] - marg_price_EU = n.buses_t.marginal_price[n.lines.loc[index].bus1] - net_out += ( - trade_out.mul(marg_price_DE).sum() + trade_in.mul(marg_price_EU).sum() - ) - # net_out > 0: Germany is exporting more electricity - # net_out < 0: Germany is importing more electricity - - net_in = 0 - for index in receiving: - trade = n.lines_t.p1[index].mul(sw) - trade_in = trade.clip(lower=0) # positive - trade_out = trade.clip(upper=0) # negative - trade_out = trade_out.clip(upper=0) - marg_price_EU = n.buses_t.marginal_price[n.lines.loc[index].bus0] - marg_price_DE = n.buses_t.marginal_price[n.lines.loc[index].bus1] - net_in += ( - trade_in.mul(marg_price_EU).sum() + trade_out.mul(marg_price_DE).sum() - ) - # net_in > 0: Germany is importing more electricity - # net_in < 0: Germany is exporting more electricity - - return -net_out + net_in - - trade_carriers = [ - "DC", - "H2 pipeline", - "H2 pipeline (Kernnetz)", - "H2 pipeline retrofittedrenewable oil", - "renewable gas", - "methanol", - ] + var["Total Energy System Cost|EU"] = ( + n.statistics.capex().sum() + n.statistics.opex().sum() + ) / 1e9 - sw = n.snapshot_weightings.generators - tsc = get_tsc(n, region).sum().sum() - trade_costs = get_link_opex(n, trade_carriers, region, sw) + get_line_opex( - n, region, sw + rev = n.statistics.revenue(groupby=["bus"]).loc["Load"] + var["Total Energy System Cost|Load Revenue"] = ( + rev[rev.index.str.startswith(region)].sum() / 1e9 ) - # Cost|Total Energy System Cost in billion EUR2020/yr - var["Cost|Total Energy System Cost"] = round((tsc + trade_costs) / 1e9, 4) - return var -def get_trade(n, region): - var = pd.Series() - - def get_export_import_links(n, region, carriers): - # note: links can also used bidirectional if efficiency=1 (e.g. "H2 pipeline retrofitted") - outgoing = n.links.index[ - (n.links.carrier.isin(carriers)) - & (n.links.bus0.str[:2] == region) - & (n.links.bus1.str[:2] != region) - ] - - incoming = n.links.index[ - (n.links.carrier.isin(carriers)) - & (n.links.bus0.str[:2] != region) - & (n.links.bus1.str[:2] == region) - ] - - exporting_p = ( - # if p0 > 0 (=clip(lower=0)) system is withdrawing from bus0 (DE) and feeding into bus1 (non-DE) -> export - n.links_t.p0.loc[:, outgoing] - .clip(lower=0) - .multiply(n.snapshot_weightings.generators, axis=0) - .values.sum() - + - # if p1 > 0 system is withdrawing from bus1 (DE) and feeding into bus0 (non-DE) -> export - n.links_t.p1.loc[:, incoming] - .clip(lower=0) - .multiply(n.snapshot_weightings.generators, axis=0) - .values.sum() - ) - - importing_p = ( - # if p1 < 0 (=clip(upper=0)) system is feeding into bus1 (DE) and withdrawing from bus0 (non-DE) -> import (with negative sign here) - n.links_t.p1.loc[:, incoming] - .clip(upper=0) - .multiply(n.snapshot_weightings.generators, axis=0) - .values.sum() - * -1 - + - # if p0 < 0 (=clip(upper=0)) system is feeding into bus0 (DE) and withdrawing from bus1 (non-DE) -> import (with negative sign here) - n.links_t.p0.loc[:, outgoing] - .clip(upper=0) - .multiply(n.snapshot_weightings.generators, axis=0) - .values.sum() - * -1 - ) - - return exporting_p, importing_p +def get_export_import(n, region, carriers, aggregate=True, unit="MWh"): + # note: links can also used bidirectional if efficiency=1 (e.g. "H2 pipeline retrofitted") + if "AC" not in carriers: + df = n.links + df_t = n.links_t + if "AC" in carriers: + if len(carriers) > 1: + raise NotImplementedError( + "AC lines cannot be combined with other carriers. Use carrier=['AC'] to get export_import for n.lines." + ) + df = n.lines + df_t = n.lines_t - # Trade|Secondary Energy|Electricity|Volume - outgoing_ac = n.lines.index[ - (n.lines.carrier == "AC") - & (n.lines.bus0.str[:2] == region) - & (n.lines.bus1.str[:2] != region) + outgoing = df[ + (df.carrier.isin(carriers)) + & (df.bus0.str[:2] == region) + & (df.bus1.str[:2] != region) ] - incoming_ac = n.lines.index[ - (n.lines.carrier == "AC") - & (n.lines.bus0.str[:2] != region) - & (n.lines.bus1.str[:2] == region) + incoming = df[ + (df.carrier.isin(carriers)) + & (df.bus0.str[:2] != region) + & (df.bus1.str[:2] == region) ] - - exporting_p_ac = ( - # if p0 > 0 (=clip(lower=0)) system is withdrawing from bus0 (DE) and feeding into bus1 (non-DE) -> export - n.lines_t.p0.loc[:, outgoing_ac] + # if p0 > 0 (=clip(lower=0)) system is withdrawing from bus0 (DE) and feeding into bus1 (non-DE) -> export + export_outgoing = ( + df_t.p0.loc[:, outgoing.index] .clip(lower=0) .multiply(n.snapshot_weightings.generators, axis=0) - .values.sum() - + - # if p1 > 0 system is withdrawing from bus1 (DE) and feeding into bus0 (non-DE) -> export - n.lines_t.p1.loc[:, incoming_ac] + ) + if unit == "€": + # bus0 is DE for outgoing links + domestic_prices = pd.concat( + [ + n.buses_t.marginal_price[bus].rename(link) + for link, bus in outgoing.bus0.items() + ], + axis=1, + ) + export_outgoing *= domestic_prices + + # if p1 > 0 system is withdrawing from bus1 (DE) and feeding into bus0 (non-DE) -> export + export_incoming = ( + df_t.p1.loc[:, incoming.index] .clip(lower=0) .multiply(n.snapshot_weightings.generators, axis=0) - .values.sum() ) + if unit == "€": + # bus1 is DE for incoming links + domestic_prices = pd.concat( + [ + n.buses_t.marginal_price[bus].rename(link) + for link, bus in incoming.bus1.items() + ], + axis=1, + ) + export_incoming *= domestic_prices - importing_p_ac = ( - # if p1 < 0 (=clip(upper=0)) system is feeding into bus1 (DE) and withdrawing from bus0 (non-DE) -> import (with negative sign here) - n.lines_t.p1.loc[:, incoming_ac] + exporting_p = pd.concat([export_outgoing, export_incoming], axis=1) + if aggregate: + exporting_p = exporting_p.values.sum() + + # if p1 < 0 (=clip(upper=0)) system is feeding into bus1 (DE) and withdrawing from bus0 (non-DE) -> import (with negative sign here) + import_incoming = ( + df_t.p1.loc[:, incoming.index] .clip(upper=0) .multiply(n.snapshot_weightings.generators, axis=0) - .values.sum() * -1 - + - # if p0 < 0 (=clip(upper=0)) system is feeding into bus0 (DE) and withdrawing from bus1 (non-DE) -> import (with negative sign here) - n.lines_t.p0.loc[:, outgoing_ac] + ) + if unit == "€": + # bus1 is DE for incoming links + domestic_prices = pd.concat( + [ + n.buses_t.marginal_price[bus].rename(link) + for link, bus in incoming.bus1.items() + ], + axis=1, + ) + import_incoming *= domestic_prices + + # if p0 < 0 (=clip(upper=0)) system is feeding into bus0 (DE) and withdrawing from bus1 (non-DE) -> import (with negative sign here) + import_outgoing = ( + df_t.p0.loc[:, outgoing.index] .clip(upper=0) .multiply(n.snapshot_weightings.generators, axis=0) - .values.sum() * -1 ) + if unit == "€": + # bus0 is DE for outgoing links + domestic_prices = pd.concat( + [ + n.buses_t.marginal_price[bus].rename(link) + for link, bus in outgoing.bus0.items() + ], + axis=1, + ) + import_outgoing *= domestic_prices - exports_dc, imports_dc = get_export_import_links(n, region, ["DC"]) + importing_p = pd.concat([import_outgoing, import_incoming], axis=1) + if aggregate: + importing_p = importing_p.values.sum() - var["Trade|Secondary Energy|Electricity|Volume"] = ( - exporting_p_ac - importing_p_ac - ) + (exports_dc - imports_dc) + return exporting_p, importing_p + + +def get_trade(n, region): + var = pd.Series() + + # Trade|Secondary Energy|Electricity|Volume + exports_ac, imports_ac = get_export_import(n, region, ["AC"]) + + exports_dc, imports_dc = get_export_import(n, region, ["DC"]) + + var["Trade|Secondary Energy|Electricity|Volume"] = (exports_ac - imports_ac) + ( + exports_dc - imports_dc + ) var["Trade|Secondary Energy|Electricity|Gross Import|Volume"] = ( - importing_p_ac + imports_dc + imports_ac + imports_dc ) # var["Trade|Secondary Energy|Electricity|Volume|Exports"] = \ # (exporting_p_ac + exports_dc) # Trade|Secondary Energy|Hydrogen|Volume h2_carriers = ["H2 pipeline", "H2 pipeline (Kernnetz)", "H2 pipeline retrofitted"] - exports_h2, imports_h2 = get_export_import_links(n, region, h2_carriers) + exports_h2, imports_h2 = get_export_import(n, region, h2_carriers) var["Trade|Secondary Energy|Hydrogen|Volume"] = exports_h2 - imports_h2 var["Trade|Secondary Energy|Hydrogen|Gross Import|Volume"] = imports_h2 # var["Trade|Secondary Energy|Hydrogen|Volume|Exports"] = \ @@ -4618,7 +5429,7 @@ def get_export_import_links(n, region, carriers): EU_renewable_oil.filter(like="bio").sum() / EU_renewable_oil.sum() ) - exports_oil_renew, imports_oil_renew = get_export_import_links( + exports_oil_renew, imports_oil_renew = get_export_import( n, region, ["renewable oil"] ) @@ -4630,7 +5441,7 @@ def get_export_import_links(n, region, carriers): imports_oil_renew * EU_bio_fraction ) - exports_meoh, imports_meoh = get_export_import_links(n, region, ["methanol"]) + exports_meoh, imports_meoh = get_export_import(n, region, ["methanol"]) var["Trade|Secondary Energy|Liquids|Hydrogen|Volume"] = ( exports_oil_renew * (1 - DE_bio_fraction) @@ -4674,7 +5485,7 @@ def get_export_import_links(n, region, carriers): assert region == "DE" # only DE is implemented at the moment - exports_gas_renew, imports_gas_renew = get_export_import_links( + exports_gas_renew, imports_gas_renew = get_export_import( n, region, ["renewable gas"] ) var["Trade|Secondary Energy|Gases|Hydrogen|Volume"] = exports_gas_renew * ( @@ -4697,7 +5508,7 @@ def get_export_import_links(n, region, carriers): gas_fractions = _get_fuel_fractions(n, region, "gas") if "gas pipeline" in n.links.carrier.unique(): - exports_gas, imports_gas = get_export_import_links( + exports_gas, imports_gas = get_export_import( n, region, ["gas pipeline", "gas pipeline new"] ) var["Trade|Primary Energy|Gas|Volume"] = ( @@ -4714,44 +5525,33 @@ def get_export_import_links(n, region, carriers): # Biomass Trade - biomass_potential_DE = ( - n.stores.query("carrier.str.contains('solid biomass')") - .filter(like=region, axis=0) - .e_nom.sum() + biomass_primary_gens = n.generators.query( + f"index.str.startswith('{region}') and index.str.endswith('solid biomass')" + ) # use endswith to avoid the biomass transport generators + biomass_transport_gens = n.generators.query( + f"index.str.startswith('{region}') and index.str.endswith('solid biomass transported')" ) - biomass_usage_local = ( - n.stores_t.p[ - n.stores.query("carrier.str.contains('solid biomass')") - .filter(like=region, axis=0) - .index - ] - .sum() - .multiply(n.snapshot_weightings["stores"].unique().item()) - .sum() - ) + local_biomass_potential = biomass_primary_gens.e_sum_max.sum() - biomass_usage_transported = ( - n.generators_t.p[ - n.generators.query("carrier.str.contains('solid biomass')") - .filter(like=region, axis=0) - .index - ] - .sum() - .multiply(n.snapshot_weightings["generators"].unique().item()) + local_biomass_usage = ( + ( + n.generators_t.p[ + biomass_primary_gens.index.union(biomass_transport_gens.index) + ] + ) + .sum(axis=1) + .multiply(n.snapshot_weightings.generators) .sum() ) - biomass_net_exports = ( - biomass_potential_DE - biomass_usage_local - biomass_usage_transported - ) - var["Trade|Primary Energy|Biomass|Volume"] = biomass_net_exports + biomass_imports = local_biomass_usage - local_biomass_potential + + var["Trade|Primary Energy|Biomass|Net Imports"] = biomass_imports logger.info( f"""Share of imported biomass: { - round( - -biomass_net_exports / (biomass_potential_DE + biomass_net_exports), 3 - ) + round(biomass_imports / local_biomass_usage, 3) }""" ) @@ -5031,7 +5831,11 @@ def hack_DC_projects(n, p_nom_start, p_nom_planned, model_year, snakemake, costs ) # Future projects should not have any capacity - assert isclose(n.links.loc[future_projects, "p_nom_opt"], 0).all() + try: + assert isclose(n.links.loc[future_projects, "p_nom_opt"], 0).all() + except AssertionError: + logger.warning("Future projects have non-zero p_nom_opt. Overwriting with 0.") + n.links.loc[future_projects, "p_nom_opt"] = 0 # Setting p_nom to 0 such that n.statistics does not compute negative expanded capex or capacity additions # Setting p_nom_min to 0 for the grid_expansion calculation @@ -5214,11 +6018,13 @@ def get_ariadne_var( industry_production, ), get_prices(n, region), + get_vre_market_values(n, region), get_emissions(n, region, energy_totals, industry_demand), get_policy(n, year), get_trade(n, region), get_economy(n, region), get_system_cost(n, region), + get_producer_rents(n, region), ] ) @@ -5286,6 +6092,7 @@ def get_data( var["Demand|Electricity"] = var.reindex( [ "Secondary Energy|Electricity|Storage Losses", + "Secondary Energy|Electricity|Transmission Losses", "Secondary Energy Input|Electricity|Heat", "Secondary Energy Input|Electricity|Hydrogen", "Secondary Energy Input|Electricity|Liquids", @@ -5297,6 +6104,12 @@ def get_data( ] ).sum() + var["Electricity System Cost"] = ( + var["Total Energy System Cost|Trade|Electricity"] + + var["Electricity System Cost|CAPEX"] + + var["Electricity System Cost|OPEX"] + ) + data = [] for v in var.index: try: @@ -5331,7 +6144,10 @@ def get_data( opts="", ll="vopt", sector_opts="None", - run="KN2045_Mix", + run="HighDemand", + decision="LowDemand", + sensitivity="base", + eeg_level=0.7, ) configure_logging(snakemake) config = snakemake.config @@ -5382,6 +6198,12 @@ def get_data( # Load data _networks = [pypsa.Network(fn) for fn in snakemake.input.networks] + if snakemake.input.get("eeg_sweep_networks"): + _sweep_networks = [ + pypsa.Network(fn) for fn in snakemake.input.eeg_sweep_networks + ] + N_st = len(_sweep_networks) + _networks[-N_st:] = _sweep_networks nhours = _networks[0].snapshot_weightings.generators.sum() nyears = nhours / 8760 @@ -5406,7 +6228,7 @@ def get_data( if "debug" == "debug": # For debugging var = pd.Series() - idx = 6 + idx = 0 n = networks[idx] c = costs[idx] _industry_demand = industry_demands[idx] @@ -5426,6 +6248,9 @@ def get_data( yearly_dfs = [] for i, year in enumerate(planning_horizons): print(f"Getting data for year {year}...") + decision = "" + if snakemake.wildcards.get("decision"): + decision = "_decision_" + snakemake.wildcards.decision yearly_dfs.append( get_data( networks[i], @@ -5437,7 +6262,7 @@ def get_data( "DE", year=year, version=config["version"], - scenario=snakemake.wildcards.run, + scenario=snakemake.wildcards.run + decision, ) ) @@ -5456,12 +6281,14 @@ def get_data( ac_projects_invest = df.query( "Variable == 'Investment|Energy Supply|Electricity|Transmission|AC|NEP|Onshore'" )[planning_horizons].values.sum() - + active_years = [ + int(year) for year in modelyears if int(year) in [2025, 2030, 2035, 2040] + ] df.loc[ df.query( "Variable == 'Investment|Energy Supply|Electricity|Transmission|AC|Übernahme|Startnetz Delta'" ).index, - [2025, 2030, 2035, 2040], + active_years, ] += (ac_startnetz - ac_projects_invest) / 4 for suffix in ["|AC|NEP", "|AC", "", " and Distribution"]: @@ -5469,7 +6296,7 @@ def get_data( df.query( f"Variable == 'Investment|Energy Supply|Electricity|Transmission{suffix}'" ).index, - [2025, 2030, 2035, 2040], + active_years, ] += (ac_startnetz - ac_projects_invest) / 4 print("Assigning mean investments of year and year + 5 to year.") diff --git a/scripts/pypsa-de/modify_industry_demand.py b/scripts/pypsa-de/modify_industry_production.py similarity index 99% rename from scripts/pypsa-de/modify_industry_demand.py rename to scripts/pypsa-de/modify_industry_production.py index 6494ce8c2..a473210d5 100644 --- a/scripts/pypsa-de/modify_industry_demand.py +++ b/scripts/pypsa-de/modify_industry_production.py @@ -23,7 +23,7 @@ if __name__ == "__main__": if "snakemake" not in globals(): snakemake = mock_snakemake( - "modify_industry_demand", + "modify_industry_production", simpl="", clusters=22, opts="", diff --git a/scripts/pypsa-de/modify_prenetwork.py b/scripts/pypsa-de/modify_prenetwork.py index 344efb76c..9d403305d 100644 --- a/scripts/pypsa-de/modify_prenetwork.py +++ b/scripts/pypsa-de/modify_prenetwork.py @@ -15,15 +15,16 @@ def first_technology_occurrence(n): """ - Sets p_nom_extendable to false for carriers with configured first - occurrence if investment year is before configured year. + Drop configured technologies before configured year. """ for c, carriers in snakemake.params.technology_occurrence.items(): for carrier, first_year in carriers.items(): if int(snakemake.wildcards.planning_horizons) < first_year: - logger.info(f"{carrier} not extendable before {first_year}.") - n.df(c).loc[n.df(c).carrier == carrier, "p_nom_extendable"] = False + to_drop = n.df(c).query(f"carrier == '{carrier}'").index + if to_drop.empty: + continue + n.remove(c, to_drop) def fix_new_boiler_profiles(n): @@ -397,7 +398,7 @@ def unravel_carbonaceous_fuels(n): carrier="renewable oil", p_nom=1e6, p_min_pu=0, - marginal_cost=0.01, + marginal_cost=1, ) if snakemake.params.efuel_export_ban: @@ -479,7 +480,7 @@ def unravel_carbonaceous_fuels(n): carrier="methanol", p_nom=1e6, p_min_pu=0, - marginal_cost=0.01, + marginal_cost=1, ) if snakemake.params.efuel_export_ban: @@ -702,7 +703,7 @@ def unravel_gasbus(n, costs): carrier="renewable gas", p_nom=1e6, p_min_pu=0, - marginal_cost=0.01, + marginal_cost=1, ) if snakemake.params.efuel_export_ban: @@ -825,6 +826,31 @@ def must_run(n, params): n.links.loc[links_i, "p_min_pu"] = p_min_pu +def modify_rescom_demand(n): + logger.info( + "Modifying residential and commercial electricity demand in Germany towards UBA Projektionsbericht 2025." + ) + # UBA Projektionsbericht 2025, Tabelle 18, Verbrauch GHD + Haushalte ohne Wärmepumpen, 2030 + + uba_rescom = (94.9 - 3.1 + 143.5 - 23.6) * 1e6 + fraction_modelyear = n.snapshot_weightings.generators.sum() / 8760 + loads_i = n.loads[ + (n.loads.carrier == "electricity") & n.loads.index.str.startswith("DE") + ] + old_demand = ( + n.loads_t.p_set.loc[:, loads_i.index] + .sum(axis=1) + .mul(n.snapshot_weightings.generators) + .sum() + ) + new_demand = uba_rescom * fraction_modelyear + scale_factor = new_demand / old_demand + logger.info( + f"Scaling residential and commercial electricity loads in Germany by {scale_factor:.2f}.\nPrevious total demand: {old_demand:.2f} MWh/a, new total demand: {new_demand:.2f} MWh/a." + ) + n.loads_t.p_set.loc[:, loads_i.index] *= scale_factor + + def modify_mobility_demand(n, mobility_data_file): """ Change loads in Germany to use exogenous data for road demand. @@ -1192,10 +1218,39 @@ def drop_duplicate_transmission_projects(n): f"Dropping transmission projects with build year <= {year}. They are likely already in the OSM base network." ) # Maybe one 2024 line is missing in the OSM base network - to_drop = n.lines.query("0 < build_year <= @year").index + to_drop = n.lines.query(f"0 < build_year <= {year}").index n.remove("Line", to_drop) + to_deactivate = n.links.query( + f"carrier == 'DC' and (0 < build_year <= {year})" + ).index + n.links.loc[to_deactivate, "active"] = False + + +def deactivate_late_transmission_projects(n, current_year): + nep_year = snakemake.params.onshore_nep_force["cutout_year"] + + cutout_year = min(nep_year, current_year) + + to_deactivate = n.links.query( + f"carrier == 'DC' and build_year > {cutout_year}" + ).index + n.links.loc[to_deactivate, "active"] = False + + to_deactivate = n.lines.query(f"build_year > {cutout_year}").index + n.lines.loc[to_deactivate, "active"] = False + + +def fix_transmission_DE(n): + to_fix = n.lines.query("bus0.str.contains('DE') or bus1.str.contains('DE')").index + n.lines.loc[to_fix, "s_nom_extendable"] = False + + to_fix = n.links.query( + "(bus0.str.contains('DE') or bus1.str.contains('DE')) and carrier=='DC'" + ).index + n.links.loc[to_fix, "p_nom_extendable"] = False + def scale_capacity(n, scaling): """ @@ -1255,6 +1310,167 @@ def scale_capacity(n, scaling): ] +def modify_industry_demand( + n, + year, + industry_energy_demand_file, + industry_production_file, + sector_ratios_file, + scale_non_energy=False, +): + logger.info("Modifying industry demand in Germany.") + + industry_production = pd.read_csv( + industry_production_file, + index_col="kton/a", + ).rename_axis("country") + + sector_ratios = pd.read_csv( + sector_ratios_file, + header=[0, 1], + index_col=0, + ).rename_axis("carrier") + + new_demand = pd.read_csv( + industry_energy_demand_file, + index_col=0, + )[str(year)].mul(1e6) + + subcategories = ["HVC", "Methanol", "Chlorine", "Ammonia"] + carrier = ["hydrogen", "methane", "naphtha"] + + ip = industry_production.loc["DE", subcategories] # kt/a + sr = sector_ratios["DE"].loc[carrier, subcategories] # MWh/tMaterial + _non_energy = sr.multiply(ip).sum(axis=1) * 1e3 + + non_energy = pd.Series( + { + "industry electricity": 0.0, + "low-temperature heat for industry": 0.0, + "solid biomass for industry": 0.0, + "H2 for industry": _non_energy["hydrogen"], + "coal for industry": 0.0, + "gas for industry": _non_energy["methane"], + "naphtha for industry": _non_energy["naphtha"], + } + ) + + _industry_loads = [ + "solid biomass for industry", + "gas for industry", + "H2 for industry", + "industry methanol", + "naphtha for industry", + "low-temperature heat for industry", + "industry electricity", + "coal for industry", + ] + industry_loads = n.loads.query( + f"carrier in {_industry_loads} and bus.str.startswith('DE')" + ) + + if scale_non_energy: + new_demand_without_non_energy = new_demand.sum() + pypsa_industry_without_non_energy = ( + industry_loads.p_set.sum() * 8760 - non_energy.sum() + ) + non_energy_scaling_factor = ( + new_demand_without_non_energy / pypsa_industry_without_non_energy + ) + logger.info( + f"Scaling non-energy use by {non_energy_scaling_factor:.2f} to match UBA data." + ) + non_energy_corrected = non_energy * non_energy_scaling_factor + else: + non_energy_corrected = non_energy + + for carrier in [ + "industry electricity", + # "H2 for industry", # skip because UBA is too optimistic on H2 + "solid biomass for industry", + "low-temperature heat for industry", + ]: + loads_i = n.loads.query( + f"carrier == '{carrier}' and bus.str.startswith('DE')" + ).index + logger.info( + f"Total load of {carrier} in DE before scaling: {n.loads.loc[loads_i, 'p_set'].sum() * 8760:.2f} MWh/a" + ) + total_load = industry_loads.p_set.loc[loads_i].sum() * 8760 + scaling_factor = ( + new_demand[carrier] + non_energy_corrected[carrier] + ) / total_load + n.loads.loc[loads_i, "p_set"] *= scaling_factor + logger.info( + f"Total load of {carrier} in DE after scaling: {n.loads.loc[loads_i, 'p_set'].sum() * 8760:.2f} MWh/a" + ) + + # Fossil fuels are aggregated in UBA MWMS but have to be scaled separately + fossil_loads = industry_loads.query("carrier.str.contains('gas|coal|naphtha')") + fossil_totals = ( + fossil_loads[["p_set", "carrier"]].groupby("carrier").p_set.sum() * 8760 + ) + fossil_energy = fossil_totals - non_energy[fossil_totals.index] + fossil_energy_corrected = fossil_energy * new_demand["fossil"] / fossil_energy.sum() + fossil_totals_corrected = ( + fossil_energy_corrected + non_energy_corrected[fossil_totals.index] + ) + for carrier in fossil_totals.index: + loads_i = fossil_loads.query( + f"carrier == '{carrier}' and bus.str.startswith('DE')" + ).index + n.loads.loc[loads_i, "p_set"] *= ( + fossil_totals_corrected[carrier] / fossil_totals[carrier] + ) + + +def remove_flexibility_options(n, current_year): + logger.info("Removing decentral TES and BEV DSM from the network.") + n.remove("Store", n.stores.query("carrier == 'EV battery'").index) + carriers_to_drop = [ + "urban decentral water tanks charger", + "urban decentral water tanks discharger", + "urban decentral water tanks", + "rural water tanks charger", + "rural water tanks discharger", + "rural water tanks", + ] + n.remove("Link", n.links.query(f"carrier in {carriers_to_drop}").index) + n.remove("Store", n.stores.query(f"carrier in {carriers_to_drop}").index) + n.remove("Bus", n.buses.query(f"carrier in {carriers_to_drop}").index) + + if current_year == 2030: + logger.info("Removing decentral TES and batteries from the network.") + carriers_to_drop = [ + "home battery charger", + "home battery discharger", + "home battery", + "battery charger", + "battery discharger", + "battery", + ] + n.remove( + "Link", + n.links.query( + f"carrier in {carriers_to_drop} and build_year == {current_year}" + ).index, + ) + n.remove( + "Store", + n.stores.query( + f"carrier in {carriers_to_drop} and build_year == {current_year}" + ).index, + ) + + +def restrict_cross_border_flows(n, s_max_pu): + logger.info( + f"Restricting cross-border flows between all countries (AC) to {s_max_pu}." + ) + cross_border_lines = n.lines.index[n.lines.bus0.str[:2] != n.lines.bus1.str[:2]] + n.lines.loc[cross_border_lines, "s_max_pu"] = s_max_pu + + if __name__ == "__main__": if "snakemake" not in globals(): snakemake = mock_snakemake( @@ -1264,8 +1480,8 @@ def scale_capacity(n, scaling): opts="", ll="vopt", sector_opts="none", - planning_horizons="2025", - run="KN2045_Mix", + planning_horizons="2030", + run="LowRES", ) configure_logging(snakemake) @@ -1337,4 +1553,55 @@ def scale_capacity(n, scaling): sanitize_custom_columns(n) + if current_year in snakemake.params.uba_for_industry: + if current_year not in [2025, 2030, 2035]: + logger.error( + "The UBA for industry data is only available for 2025, 2030 and 2035. Please check your config." + ) + modify_industry_demand( + n, + current_year, + snakemake.input.new_industrial_energy_demand, + snakemake.input.industrial_production_per_country_tomorrow, + snakemake.input.industry_sector_ratios, + scale_non_energy=snakemake.params.scale_industry_non_energy, + ) + + if current_year in snakemake.params.uba_for_rescom_electricity: + if current_year != 2030: + logger.error( + "The UBA for rescom electricity data is only available for 2030. Please check your config." + ) + modify_rescom_demand(n) + # For regret runs + deactivate_late_transmission_projects(n, current_year) + + if snakemake.params.no_flex_lt_run: + logger.info("Run without flexibility options detected.") + remove_flexibility_options(n, current_year) + + fix_transmission_DE(n) + + if current_year in snakemake.params.restrict_cross_border_flows: + restrict_cross_border_flows( + n, snakemake.params.restrict_cross_border_flows[current_year] + ) + + if current_year in snakemake.params.get("force_co2_price", {}): + logger.info("Adding negative CO2 generator and dropping co2 limits.") + + n.add( + "Generator", + "co2 atmosphere", + bus="co2 atmosphere", + p_min_pu=-1, + p_max_pu=0, + p_nom_extendable=True, + carrier="co2", + marginal_cost=-snakemake.params.force_co2_price[current_year], + ) + n.global_constraints.drop("CO2Limit", inplace=True) + # Instead of removing it here, never add it in additional_functionality + # n.global_constraints.drop("co2_limit-DE", inplace=True) + n.export_to_netcdf(snakemake.output.network) diff --git a/scripts/pypsa-de/plot_ariadne_scenario_comparison.py b/scripts/pypsa-de/plot_scenario_comparison.py similarity index 55% rename from scripts/pypsa-de/plot_ariadne_scenario_comparison.py rename to scripts/pypsa-de/plot_scenario_comparison.py index d2904d8fe..9559514ce 100644 --- a/scripts/pypsa-de/plot_ariadne_scenario_comparison.py +++ b/scripts/pypsa-de/plot_scenario_comparison.py @@ -9,22 +9,22 @@ from scripts._helpers import mock_snakemake -def scenario_plot(df, var): +def scenario_plot(df, output_dir, var): unit = df._get_label_or_level_values("Unit")[0] if var.startswith("Investment"): unit = "billion EUR2020/yr" df = df.droplevel("Unit") ax = df.T.plot(xlabel="years", ylabel=str(unit), title=str(var)) - prefix = snakemake.config["run"]["prefix"] var = var.replace("|", "-").replace("\\", "-").replace(" ", "-").replace("/", "-") - ax.figure.savefig(f"results/{prefix}/ariadne_comparison/{var}", bbox_inches="tight") + ax.figure.savefig(f"{output_dir}/{var}.png", bbox_inches="tight", dpi=100) plt.close(ax.figure) if __name__ == "__main__": if "snakemake" not in globals(): snakemake = mock_snakemake( - "ariadne_all", + "plot_scenario_comparison_regrets", + sensitivity="base", # simpl="", # clusters=22, # opts="", @@ -35,7 +35,18 @@ def scenario_plot(df, var): ) dfs = [] - for file in snakemake.input.exported_variables: + fns = snakemake.input.exported_variables + if "regret_variables" in fns[0] and len(fns) == 4: + # reorder indices of fns as 0312 + fns = [fns[i] for i in [0, 3, 2, 1] if i < len(fns)] + if "regret_variables" in fns[0] and len(fns) in [9, 16]: + fns = [ + fn for fn in fns if "NoFlex/" not in fn + ] # !!! CAVEAT DISPATCHING ON FILENAME + if len(fns) > 16: + raise ValueError("Too many files to plot together.") + + for file in fns: _df = pd.read_excel( file, index_col=list(range(5)), sheet_name="data" ).droplevel(["Model", "Region"]) @@ -43,9 +54,10 @@ def scenario_plot(df, var): df = pd.concat(dfs, axis=0) - prefix = snakemake.config["run"]["prefix"] - if not os.path.exists(f"results/{prefix}/ariadne_comparison/"): - os.mkdir(f"results/{prefix}/ariadne_comparison/") + output_dir = snakemake.params.output_dir + + if not os.path.exists(output_dir): + os.makedirs(output_dir) for var in df._get_label_or_level_values("Variable"): - scenario_plot(df.xs(var, level="Variable"), var) + scenario_plot(df.xs(var, level="Variable"), output_dir, var) diff --git a/scripts/pypsa-de/prepare_regret_network.py b/scripts/pypsa-de/prepare_regret_network.py new file mode 100644 index 000000000..baac54480 --- /dev/null +++ b/scripts/pypsa-de/prepare_regret_network.py @@ -0,0 +1,778 @@ +# Import the function dynamically since the folder name contains a hyphen which is invalid in a module name. +import importlib.util +import logging +import pathlib + +import numpy as np +import pandas as pd +import pypsa + +from scripts._helpers import ( + configure_logging, + mock_snakemake, + set_scenario_config, + update_config_from_wildcards, +) +from scripts.solve_network import prepare_network + +_spec_path = pathlib.Path(__file__).resolve().parent / "modify_prenetwork.py" +_spec = importlib.util.spec_from_file_location( + "scripts.pypsa_de.modify_prenetwork", _spec_path +) +_modify_prenetwork = importlib.util.module_from_spec(_spec) +_spec.loader.exec_module(_modify_prenetwork) +remove_flexibility_options = _modify_prenetwork.remove_flexibility_options + +logger = logging.getLogger(__name__) + +uc_params_custom = { + "OCGT": { + "p_min_pu": 0.2, + "start_up_cost": 20, + "shut_down_cost": 20, + "min_up_time": 1, + "min_down_time": 1, + "ramp_limit_up": 1, + }, + "CCGT": { + "p_min_pu": 0.45, + "start_up_cost": 80, + "shut_down_cost": 80, + "min_up_time": 3, + "min_down_time": 2, + "ramp_limit_up": 1, + }, + "coal": { + "p_min_pu": 0.5, + "start_up_cost": 200, + "shut_down_cost": 200, + "min_up_time": 24, + "min_down_time": 24, + "ramp_limit_up": 1, + }, + "lignite": { + "p_min_pu": 0.5, + "start_up_cost": 200, + "shut_down_cost": 200, + "min_up_time": 24, + "min_down_time": 24, + "ramp_limit_up": 1, + }, + "nuclear": { + "p_min_pu": 0.5, + "start_up_cost": 100, + "shut_down_cost": 100, + "min_up_time": 8, + "min_down_time": 10, + }, + "oil": { + "p_min_pu": 0.2, + "start_up_cost": 30, + "shut_down_cost": 30, + "min_up_time": 1, + "min_down_time": 1, + "ramp_limit_up": 1, + }, + "urban central solid biomass CHP": { + "p_min_pu": 0.5, + "start_up_cost": 150, + "shut_down_cost": 150, + "min_up_time": 5, + "min_down_time": 5, + }, +} + +uc_params_optimistic = { + "OCGT": { + "p_min_pu": 0.15, + "start_up_cost": 20, + "min_up_time": 1, + "min_down_time": 1, + "ramp_limit_up": 1, + "ramp_limit_start_up": 0.3, + "ramp_limit_shut_down": 0.3, + }, + "CCGT": { + "p_min_pu": 0.4, + "start_up_cost": 100, + "min_up_time": 2, + "min_down_time": 2, + "ramp_limit_up": 1, + "ramp_limit_start_up": 0.5, + "ramp_limit_shut_down": 0.5, + }, + "coal": { + "p_min_pu": 0.3, + "start_up_cost": 80, + "min_up_time": 4, + "min_down_time": 4, + "ramp_limit_up": 0.8, + "ramp_limit_start_up": 0.3, + "ramp_limit_shut_down": 0.3, + }, + "lignite": { + "p_min_pu": 0.3, + "start_up_cost": 120, + "min_up_time": 5, + "min_down_time": 5, + "ramp_limit_up": 0.7, + "ramp_limit_start_up": 0.3, + "ramp_limit_shut_down": 0.3, + }, + "nuclear": { + "p_min_pu": 0.45, + "start_up_cost": 200, + "min_up_time": 6, + "min_down_time": 8, + "ramp_limit_up": 0.2, + "ramp_limit_start_up": 0.2, + "ramp_limit_shut_down": 0.2, + }, + "oil": { + "p_min_pu": 0.2, + "start_up_cost": 30, + "min_up_time": 1, + "min_down_time": 1, + "ramp_limit_up": 1, + "ramp_limit_start_up": 0.3, + "ramp_limit_shut_down": 0.3, + }, + "urban central solid biomass CHP": { + "p_min_pu": 0.35, + "start_up_cost": 50, + "min_up_time": 1, + "min_down_time": 1, + "ramp_limit_up": 0.9, + "ramp_limit_start_up": 0.4, + "ramp_limit_shut_down": 0.4, + }, +} + +uc_params_average = { + "OCGT": { + "p_min_pu": 0.2, + "start_up_cost": 40, + "min_up_time": 1, + "min_down_time": 1, + "ramp_limit_up": 1, + "ramp_limit_start_up": 0.2, + "ramp_limit_shut_down": 0.2, + }, + "CCGT": { + "p_min_pu": 0.45, + "start_up_cost": 150, + "min_up_time": 3, + "min_down_time": 2, + "ramp_limit_up": 1, + "ramp_limit_start_up": 0.45, + "ramp_limit_shut_down": 0.45, + }, + "coal": { + "p_min_pu": 0.325, + "start_up_cost": 120, + "min_up_time": 5, + "min_down_time": 6, + "ramp_limit_up": 0.7, + "ramp_limit_start_up": 0.38, + "ramp_limit_shut_down": 0.38, + }, + "lignite": { + "p_min_pu": 0.325, + "start_up_cost": 150, + "min_up_time": 7, + "min_down_time": 6, + "ramp_limit_up": 0.6, + "ramp_limit_start_up": 0.4, + "ramp_limit_shut_down": 0.4, + }, + "nuclear": { + "p_min_pu": 0.5, + "start_up_cost": 250, + "min_up_time": 6, + "min_down_time": 10, + "ramp_limit_up": 0.3, + "ramp_limit_start_up": 0.3, + "ramp_limit_shut_down": 0.3, + }, + "oil": { + "p_min_pu": 0.2, + "start_up_cost": 50, + "min_up_time": 1, + "min_down_time": 1, + "ramp_limit_up": 0.8, + "ramp_limit_start_up": 0.2, + "ramp_limit_shut_down": 0.2, + }, + "urban central solid biomass CHP": { + "p_min_pu": 0.38, + "start_up_cost": 80, + "min_up_time": 2, + "min_down_time": 2, + "ramp_limit_up": 0.7, + "ramp_limit_start_up": 0.38, + "ramp_limit_shut_down": 0.38, + }, +} + +uc_params_conservative = { + "OCGT": { + "p_min_pu": 0.25, + "start_up_cost": 60, + "min_up_time": 2, + "min_down_time": 2, + "ramp_limit_up": 0.9, + "ramp_limit_start_up": 0.15, + "ramp_limit_shut_down": 0.15, + }, + "CCGT": { + "p_min_pu": 0.5, + "start_up_cost": 200, + "min_up_time": 4, + "min_down_time": 3, + "ramp_limit_up": 0.8, + "ramp_limit_start_up": 0.35, + "ramp_limit_shut_down": 0.35, + }, + "coal": { + "p_min_pu": 0.35, + "start_up_cost": 160, + "min_up_time": 6, + "min_down_time": 8, + "ramp_limit_up": 0.5, + "ramp_limit_start_up": 0.25, + "ramp_limit_shut_down": 0.25, + }, + "lignite": { + "p_min_pu": 0.35, + "start_up_cost": 200, + "min_up_time": 8, + "min_down_time": 10, + "ramp_limit_up": 0.4, + "ramp_limit_start_up": 0.2, + "ramp_limit_shut_down": 0.2, + }, + "nuclear": { + "p_min_pu": 0.55, + "start_up_cost": 400, + "min_up_time": 10, + "min_down_time": 12, + "ramp_limit_up": 0.15, + "ramp_limit_start_up": 0.15, + "ramp_limit_shut_down": 0.15, + }, + "oil": { + "p_min_pu": 0.25, + "start_up_cost": 80, + "min_up_time": 2, + "min_down_time": 2, + "ramp_limit_up": 0.6, + "ramp_limit_start_up": 0.15, + "ramp_limit_shut_down": 0.15, + }, + "urban central solid biomass CHP": { + "p_min_pu": 0.4, + "start_up_cost": 120, + "min_up_time": 3, + "min_down_time": 3, + "ramp_limit_up": 0.5, + "ramp_limit_start_up": 0.3, + "ramp_limit_shut_down": 0.3, + }, +} + + +def add_unit_commitment( + n, + uc_params=uc_params_average, + carriers=["OCGT", "coal", "lignite", "urban central solid biomass CHP"], + regions=["DE"], +): + """ + Add unit commitment parameters to links in the network based on a UC parameter dictionary. + + Parameters + ---------- + n : pypsa.Network + The PyPSA network. + + uc_params : dict + Nested dict with carrier names as keys and dict of UC parameters as values. + Example: + { + "OCGT": { + "p_min_pu": 0.2, + "start_up_cost": 40, + "min_up_time": 1, + "min_down_time": 1, + "ramp_limit_up": 1, + "ramp_limit_start_up": 0.2, + "ramp_limit_shut_down": 0.2, + }, + ... + } + + carriers : list, optional + List of carriers to process (default = all carriers in uc_params). + + regions : list + List of region codes to filter buses (default = ["DE"]). + """ + + def get_filtered_links(carrier_list): + carrier_mask = n.links.carrier.isin(carrier_list) + region_mask = n.links.bus0.str.contains( + "|".join(regions), na=False + ) | n.links.bus1.str.contains("|".join(regions), na=False) + return n.links[carrier_mask & region_mask].index + + # If no carriers specified, use all available in uc_params + carriers_to_process = carriers if carriers is not None else list(uc_params.keys()) + + available_carriers = set(carriers_to_process) & set(n.links.carrier.unique()) + + for carrier in available_carriers: + links_i = get_filtered_links([carrier]) + if len(links_i) == 0: + continue + + # apply UC parameters from dict + for param, value in uc_params[carrier].items(): + if param in n.links.columns: + n.links.loc[links_i, param] = value + + # ensure committable flag + n.links.loc[links_i, "committable"] = True + + +target_caps = { + "GB": 0, + "CH": 8000, + "CZ": 5500, + "NL": 8000, + "FR": 9500, + "PL": 4500, + "NO": 2500, + "BE": 0, + "DK": 6000, + "AT": 6000, + "LU": 1500, + "SE": 1000, +} + + +def scale_transmission_capacity(n, target_capacities): + """ + Scale transmission capacities in PyPSA network to match target values. + + Parameters + ---------- + n : pypsa.Network + PyPSA network object + target_capacities : pd.Series or dict + Target transmission capacities by country in MW (already rounded) + """ + + if isinstance(target_capacities, dict): + target_capacities = pd.Series(target_capacities) + + # Calculate current capacities (without reversed links) + countries_ac = ["AT", "CH", "CZ", "DK", "FR", "LU", "NL", "PL"] + countries_dc = ["BE", "FR", "GB", "DK", "SE", "NO", "CH"] + + current_capa = pd.DataFrame( + data=0, + index=list(set(countries_ac + countries_dc)), + columns=["AC_MW", "DC_MW", "Total_MW"], + ) + + # AC lines + for ct in countries_ac: + ac_lines = n.lines # No need to filter out reversed for AC lines + ind = ac_lines[ + (ac_lines.bus0.str.startswith("DE") & ac_lines.bus1.str.startswith(ct)) + | (ac_lines.bus0.str.startswith(ct) & ac_lines.bus1.str.startswith("DE")) + ].index + current_capa.loc[ct, "AC_MW"] = n.lines.loc[ind, "s_nom_opt"].sum() + + # DC links + for ct in countries_dc: + dc_links = n.links[ + n.links.carrier.isin(["DC"]) & ~n.links.index.str.contains("reversed") + ] + ind = dc_links[ + (dc_links.bus0.str.startswith("DE") & dc_links.bus1.str.startswith(ct)) + | (dc_links.bus0.str.startswith(ct) & dc_links.bus1.str.startswith("DE")) + ].index + current_capa.loc[ct, "DC_MW"] = n.links.loc[ind, "p_nom_opt"].sum() + + current_capa["Total_MW"] = current_capa["AC_MW"] + current_capa["DC_MW"] + + # Calculate scaling factors + for country in target_capacities.index: + if country in current_capa.index: + current_total = current_capa.loc[country, "Total_MW"] + target_total = target_capacities[country] + + if current_total > 0: + scaling_factor = target_total / current_total + + print( + f"{country}: {current_total:.0f} MW -> {target_total:.0f} MW (factor: {scaling_factor:.3f})" + ) + + # Scale AC lines (no reversed links for AC) + if current_capa.loc[country, "AC_MW"] > 0: + ac_lines = n.lines # No need to filter out reversed for AC lines + ac_ind = ac_lines[ + ( + ac_lines.bus0.str.startswith("DE") + & ac_lines.bus1.str.startswith(country) + ) + | ( + ac_lines.bus0.str.startswith(country) + & ac_lines.bus1.str.startswith("DE") + ) + ].index + + # Scale AC lines + n.lines.loc[ac_ind, "s_nom_opt"] *= scaling_factor + n.lines.loc[ac_ind, "s_nom"] *= scaling_factor + + # Scale DC links + if current_capa.loc[country, "DC_MW"] > 0: + dc_links = n.links[ + n.links.carrier.isin(["DC"]) + & ~n.links.index.str.contains("reversed") + ] + dc_ind = dc_links[ + ( + dc_links.bus0.str.startswith("DE") + & dc_links.bus1.str.startswith(country) + ) + | ( + dc_links.bus0.str.startswith(country) + & dc_links.bus1.str.startswith("DE") + ) + ].index + + # Scale main DC links + n.links.loc[dc_ind, "p_nom_opt"] *= scaling_factor + n.links.loc[dc_ind, "p_nom"] *= scaling_factor + + # Scale reversed DC links to match + for link_id in dc_ind: + reversed_id = link_id + "-reversed" + if reversed_id in n.links.index: + n.links.loc[reversed_id, "p_nom_opt"] = n.links.loc[ + link_id, "p_nom_opt" + ] + n.links.loc[reversed_id, "p_nom"] = n.links.loc[ + link_id, "p_nom" + ] + + elif target_total > 0: + logger.info( + f"WARNING: {country} has target capacity {target_total:.0f} MW but no current capacity to scale" + ) + else: + logger.info(f"{country}: Target is 0 MW - no scaling needed") + + +def _unfix_bottlenecks(new, deci, name, extendable_i): + if name == "links": + # Links that have 0-cost and are extendable + virtual_links = [ + "land transport oil", + "land transport fuel cell", + "solid biomass for industry", + "gas for industry", + "industry methanol", + "naphtha for industry", + "process emissions", + "coal for industry", + "H2 for industry", + "shipping methanol", + "shipping oil", + "kerosene for aviation", + "agriculture machinery oil", + "co2 sequestered", + ] + + _idx = new.loc[new.carrier.isin(virtual_links)].index.intersection(extendable_i) + new.loc[_idx, "p_nom_extendable"] = True + + # Bottleneck links can be extended, but not reduced to fix infeasibilities due to numerical inconsistencies + bottleneck_links = [ + "electricity distribution grid", + "HVC to air", # waste CHP would get used as a flexible energy source otherwise + "SMR", + # Boilers create bottlenecks AND should be extendable for fixed_profile_scaling constraints to be applied correctly + "rural gas boiler", + "urban decentral gas boiler", + # Biomass for 2035 when gas is banned + "rural biomass boiler", + "urban decentral biomass boiler", + ] + _idx = new.loc[new.carrier.isin(bottleneck_links)].index.intersection( + extendable_i + ) + new.loc[_idx, "p_nom_extendable"] = True + new.loc[_idx, "p_nom_min"] = deci.loc[_idx, "p_nom_opt"] + # OCGT as last resort to avoid load shedding + # allowed only in DE + # (previously the model sometimes expanded waste CHPs) + _idx = new.loc[ + (new.carrier == "OCGT") & (new.index.str.startswith("DE")) + ].index.intersection(extendable_i) + new.loc[_idx, "p_nom_extendable"] = True + new.loc[_idx, "p_nom_min"] = deci.loc[_idx, "p_nom_opt"] + + if name == "generators": + fuels = [ + "lignite", + "coal", + "oil primary", + "uranium", + "gas primary", + ] + vents = [ + "urban central heat vent", + "rural heat vent", + "urban decentral heat vent", + ] + _idx = new.loc[new.carrier.isin(fuels + vents)].index.intersection(extendable_i) + new.loc[_idx, "p_nom_extendable"] = True + + return + + +def fix_capacities(realization, decision, scope="DE", strict=False, no_flex=False): + logger.info(f"Fixing all capacities for scope: {scope}") + if scope == "EU": + scope = "" + if not strict: + logger.info("Freeing virtual links, bottlenecks and fossil generators.") + + # Copy all existing assets from the decision network + n = decision.copy() + + # The constraints and loads are taken from the realization network + n.global_constraints = realization.global_constraints.copy() + n.loads = realization.loads.copy() + # Copy the whole realization network, because copying loads_t directly is not type stable + n.loads_t = realization.copy().loads_t + + nominal_attrs = { + "generators": "p_nom", + "lines": "s_nom", + "links": "p_nom", + "stores": "e_nom", + } + + for name, attr in nominal_attrs.items(): + new = getattr(n, name) + deci = getattr(decision, name) + real = getattr(realization, name) + + # Scenario specific assets are taken from the realization network + _idx = real.query("carrier in ['BEV charger', 'V2G', 'EV battery']").index + new.loc[_idx, attr] = real.loc[_idx, attr] + + # Start with fixing everything... + extendable_i = new.query( + f"{attr}_extendable and index.str.startswith('{scope}')" + ).index + new.loc[extendable_i, attr + "_extendable"] = False + new.loc[extendable_i, attr] = new.loc[extendable_i, attr + "_opt"] + + # Some links should be extendable to avoid infeasibilities or allow burning more fossil fuels + if not strict: + _unfix_bottlenecks(new, deci, name, extendable_i) + + # The CO2 constraints on atmosphere and sequestration need extendable stores to work correctly + if name == "stores": + logger.info("Freeing co2 atmosphere and sequestered stores.") + # there is only one co2 atmosphere store which should always be extendable, hence no intersection with extendable_i needed + _idx = new.query("carrier == 'co2'").index + new.loc[_idx, "e_nom_extendable"] = True + # co2 sequestered stores from previous planning horizons should not be extendable + _idx = new.query("carrier == 'co2 sequestered'").index.intersection( + extendable_i + ) + new.loc[_idx, "e_nom_extendable"] = True + + # Above several assets are switched to extendable again, for these the p_nom value is restored to the value from the decision network + + _idx = new.query(f"{attr}_extendable").index + + new.loc[_idx, attr] = deci.loc[_idx, attr] + + if no_flex: + logger.info("Realization network is from a run without flexibility.") + remove_flexibility_options(n) + return n + + +if __name__ == "__main__": + if "snakemake" not in globals(): + snakemake = mock_snakemake( + "prepare_regret_network", + clusters=27, + opts="", + sector_opts="none", + planning_horizons="2030", + decision="LowDemand", + run="HighDemand", + ) + + configure_logging(snakemake) + set_scenario_config(snakemake) + update_config_from_wildcards(snakemake.config, snakemake.wildcards) + + # Touch output file to ensure it exists + pathlib.Path(snakemake.output.regret_prenetwork).touch() + + realization = pypsa.Network(snakemake.input.realization) + decision = pypsa.Network(snakemake.input.decision) + + planning_horizons = snakemake.wildcards.get("planning_horizons", None) + logging_frequency = snakemake.config.get("solving", {}).get( + "mem_logging_frequency", 30 + ) + solve_opts = snakemake.params.solving["options"] + assert solve_opts["noisy_costs"] == False, ( + "Noisy costs should not be used in regret runs." + ) + np.random.seed(solve_opts.get("seed", 123)) + + strict = snakemake.params["strict"] + scope_to_fix = snakemake.params["scope_to_fix"] + h2_vent = snakemake.params["h2_vent"] + + n = fix_capacities( + realization, + decision, + scope=scope_to_fix, + strict=strict, + no_flex=decision.meta.get("iiasa_database").get("no_flex_lt_run", False), + ) + + unit_commitment = snakemake.params.get("unit_commitment") + + if unit_commitment["enable"]: + logger.info( + f"Add unit commitment in {unit_commitment['regions']} for carriers {unit_commitment['carriers']} with parameter set '{unit_commitment['params']}' to the network." + ) + + uc_params_str = unit_commitment["params"] + if uc_params_str == "custom": + uc_params = uc_params_custom + elif uc_params_str == "optimistic": + uc_params = uc_params_optimistic + elif uc_params_str == "conservative": + uc_params = uc_params_conservative + elif uc_params_str == "average": + uc_params = uc_params_average + + add_unit_commitment( + n, + uc_params, + carriers=unit_commitment["carriers"], + regions=unit_commitment["regions"], + ) + + scale_cross_border_elec_capa = snakemake.params.get( + "scale_cross_border_elec_capa", False + ) + + if scale_cross_border_elec_capa: + logger.info("Scaling cross-border electricity capacities to target values.") + scale_transmission_capacity(n, target_caps) + + if strict: + logger.info( + "Strict regret run chosen. No capacities are extendable. Activating load shedding to prevent infeasibilites." + ) + snakemake.params.solving["options"]["load_shedding"] = True + + if h2_vent: + logger.info("H2 venting activated for regret run.") + n.add("Carrier", "H2 vent", color="#dd2e23", nice_name="H2 vent") + + n.add( + "Generator", + n.buses.query("carrier=='H2'").index, + " vent", + bus=n.buses.query("carrier=='H2'").index, + carrier="H2 vent", + sign=-1, + marginal_cost=1, + p_nom=1e6, + ) + # n.generators_t.p[n.generators.query("carrier == 'H2 vent'").index].T.mul(n.snapshot_weightings.generators).T.sum() + + # Manipulating the global constraints + to_keep = [ + "biomass limit", + "unsustainable biomass limit", + "co2_sequestration_limit", + "CO2Limit", + "co2_limit-DE", + ] + + to_drop = n.global_constraints.index.difference(to_keep) + + logger.info("Regret run detected. Dropping the following constraints:") + logger.info(to_drop) + + n.global_constraints.drop(to_drop, inplace=True) + + # If running with post-discretization the last lines of optimize_transmission_expansion_iteratively have to be undone for the operational run + if solve_opts["post_discretization"].get("enable") and not solve_opts.get( + "skip_iterations" + ): + n.lines.s_nom_extendable = False + n.lines.s_nom = n.lines.s_nom_opt + + discretized_links = n.links.query( + f"carrier in {list(solve_opts['post_discretization'].get('link_unit_size').keys())}" + ).index + n.links.loc[discretized_links, "p_nom_extendable"] = False + n.links.loc[discretized_links, "p_nom"] = n.links.loc[ + discretized_links, "p_nom_opt" + ] + + prepare_network( + n, + solve_opts=snakemake.params.solving["options"], + foresight=snakemake.params.foresight, + planning_horizons=planning_horizons, + co2_sequestration_potential=snakemake.params["co2_sequestration_potential"], + limit_max_growth=snakemake.params.get("sector", {}).get("limit_max_growth"), + regret_run=True, + ) + + # These constraints have to be changed AFTER prepare_network + + if scope_to_fix == "EU": + logger.info( + f"Fixing Scope 'EU' chosen. Setting the EU CO2 price to the sum of the EU and DE CO2 prices from the realization network: {realization.global_constraints.loc['CO2Limit', 'mu'] + realization.global_constraints.loc['co2_limit-DE', 'mu']} €/t_CO2" + ) + n.add( + "Generator", + "co2 atmosphere", + bus="co2 atmosphere", + p_min_pu=-1, + p_max_pu=0, + p_nom_extendable=True, + carrier="co2", + marginal_cost=( + realization.global_constraints.loc["CO2Limit", "mu"] + + realization.global_constraints.loc["co2_limit-DE", "mu"] + ), + ) + logger.info("Adding negative CO2 generator and dropping co2 limits.") + n.global_constraints.drop("CO2Limit", inplace=True) + n.global_constraints.drop("co2_limit-DE", inplace=True) + + n.export_to_netcdf(snakemake.output.regret_prenetwork) diff --git a/scripts/pypsa-de/prepare_st_low_res_network.py b/scripts/pypsa-de/prepare_st_low_res_network.py new file mode 100644 index 000000000..c01ead06b --- /dev/null +++ b/scripts/pypsa-de/prepare_st_low_res_network.py @@ -0,0 +1,231 @@ +import importlib.util +import logging +import pathlib + +import numpy as np +import pypsa + +from scripts._helpers import ( + configure_logging, + mock_snakemake, + set_scenario_config, + update_config_from_wildcards, +) +from scripts.solve_network import prepare_network + + +def _load_attr_from_file(filename: str, attr_name: str) -> object: + """ + Load attribute attr_name from a local python file given by filename (including '.py'). + """ + if not filename.endswith(".py"): + raise ValueError("filename must include the '.py' extension") + module_stem = pathlib.Path(filename).stem + _spec_path = pathlib.Path(__file__).resolve().parent / filename + _spec = importlib.util.spec_from_file_location( + f"scripts.pypsa_de.{module_stem}", _spec_path + ) + _mod = importlib.util.module_from_spec(_spec) + assert _spec is not None and _spec.loader is not None + _spec.loader.exec_module(_mod) + return getattr(_mod, attr_name) + + +_unfix_bottlenecks = _load_attr_from_file( + "prepare_regret_network.py", "_unfix_bottlenecks" +) +remove_flexibility_options = _load_attr_from_file( + "modify_prenetwork.py", "remove_flexibility_options" +) + + +logger = logging.getLogger(__name__) + +eeg_targets = { + 2030: { + "solar": 215000, + "onwind": 115000, + "offwind": 30000, + } +} + +co2_prices = { + 2030: 220, +} + + +def scale_new_res_to_target(n, targets, year, ratio=1.0): + for tech, target in targets[year].items(): + logger.info( + f"Scaling installed capacity of {tech} in DE to target of {target * ratio} MW." + ) + gens = n.generators.query( + f"carrier.str.contains('{tech}') and not carrier.str.contains('solar thermal') and index.str.startswith('DE')" + ) + existing_gens = gens[gens.build_year < year] + new_gens = gens[gens.build_year == year] + existing_cap = existing_gens.p_nom_opt.sum() + new_cap = new_gens.p_nom_opt.sum() + assert np.isclose(existing_cap + new_cap, gens.p_nom_opt.sum()) + + scale_factor = (target * ratio - existing_cap) / new_cap + + n.generators.loc[new_gens.index, "p_nom_opt"] *= scale_factor + n.generators.loc[new_gens.index, "p_nom"] *= scale_factor + + +def fix_capacities(n_lt, no_flex=False): + n = n_lt.copy() + + nominal_attrs = { + "generators": "p_nom", + "lines": "s_nom", + "links": "p_nom", + "stores": "e_nom", + } + + for name, attr in nominal_attrs.items(): + new = getattr(n, name) + lt = getattr(n_lt, name) + + extendable_i = new.query(f"{attr}_extendable").index + + new.loc[extendable_i, attr + "_extendable"] = False + new.loc[extendable_i, attr] = new.loc[extendable_i, attr + "_opt"] + + _unfix_bottlenecks(new, lt, name, extendable_i) + + # The CO2 constraints on atmosphere and sequestration need extendable stores to work correctly + if name == "stores": + logger.info("Freeing co2 atmosphere and sequestered stores.") + # there is only one co2 atmosphere store which should always be extendable, hence no intersection with extendable_i needed + _idx = new.query("carrier == 'co2'").index + new.loc[_idx, "e_nom_extendable"] = True + # co2 sequestered stores from previous planning horizons should not be extendable + _idx = new.query("carrier == 'co2 sequestered'").index.intersection( + extendable_i + ) + new.loc[_idx, "e_nom_extendable"] = True + + # Above several assets are switched to extendable again, for these the p_nom value is restored to the value from the decision network + + _idx = new.query(f"{attr}_extendable").index + + new.loc[_idx, attr] = lt.loc[_idx, attr] + + if no_flex: + logger.info("Realization network is from a run without flexibility.") + remove_flexibility_options(n) + return n + + +if __name__ == "__main__": + if "snakemake" not in globals(): + snakemake = mock_snakemake( + "prepare_st_low_res_network", + clusters=27, + opts="", + sector_opts="none", + eeg_sweep_year="2030", + run="HighDemand", + eeg_level=0.7, + ) + + configure_logging(snakemake) + + configure_logging(snakemake) + set_scenario_config(snakemake) + update_config_from_wildcards(snakemake.config, snakemake.wildcards) + + n_lt = pypsa.Network(snakemake.input.network) + eeg_sweep_year = snakemake.wildcards.get("eeg_sweep_year", None) + logging_frequency = snakemake.config.get("solving", {}).get( + "mem_logging_frequency", 30 + ) + solve_opts = snakemake.params.solving["options"] + assert solve_opts["noisy_costs"] == False, ( + "Noisy costs should not be used in regret runs." + ) + np.random.seed(solve_opts.get("seed", 123)) + + strict = snakemake.params["strict"] + scope_to_fix = snakemake.params["scope_to_fix"] + h2_vent = snakemake.params["h2_vent"] + + n = fix_capacities(n_lt, snakemake.params.get("no_flex_lt_run", False)) + + scale_new_res_to_target( + n, eeg_targets, int(eeg_sweep_year), ratio=float(snakemake.wildcards.eeg_level) + ) + + if h2_vent: + logger.info("H2 venting activated for short-term run.") + n.add("Carrier", "H2 vent", color="#dd2e23", nice_name="H2 vent") + + n.add( + "Generator", + n.buses.query("carrier=='H2'").index, + " vent", + bus=n.buses.query("carrier=='H2'").index, + carrier="H2 vent", + sign=-1, + marginal_cost=1, + p_nom=1e6, + ) + + # Manipulating the global constraints + to_keep = [ + "biomass limit", + "unsustainable biomass limit", + "co2_sequestration_limit", + "CO2Limit", + "co2_limit-DE", + ] + + to_drop = n.global_constraints.index.difference(to_keep) + logger.info("Short-term run detected. Dropping the following constraints:") + logger.info(to_drop) + + n.global_constraints.drop(to_drop, inplace=True) + + # If running with post-discretization the last lines of optimize_transmission_expansion_iteratively have to be undone for the operational run + if solve_opts["post_discretization"].get("enable") and not solve_opts.get( + "skip_iterations" + ): + n.lines.s_nom_extendable = False + n.lines.s_nom = n.lines.s_nom_opt + + discretized_links = n.links.query( + f"carrier in {list(solve_opts['post_discretization'].get('link_unit_size').keys())}" + ).index + n.links.loc[discretized_links, "p_nom_extendable"] = False + n.links.loc[discretized_links, "p_nom"] = n.links.loc[ + discretized_links, "p_nom_opt" + ] + + prepare_network( + n, + solve_opts=snakemake.params.solving["options"], + foresight=snakemake.params.foresight, + planning_horizons=eeg_sweep_year, + co2_sequestration_potential=snakemake.params["co2_sequestration_potential"], + limit_max_growth=snakemake.params.get("sector", {}).get("limit_max_growth"), + regret_run=True, + ) + + logger.info("Adding negative CO2 generator and dropping co2 limits.") + + n.add( + "Generator", + "co2 atmosphere", + bus="co2 atmosphere", + p_min_pu=-1, + p_max_pu=0, + p_nom_extendable=True, + carrier="co2", + marginal_cost=-co2_prices[int(eeg_sweep_year)], + ) + n.global_constraints.drop("CO2Limit", inplace=True) + n.global_constraints.drop("co2_limit-DE", inplace=True) + + n.export_to_netcdf(snakemake.output.st_low_res_prenetwork) diff --git a/scripts/pypsa-de/regret_plots.py b/scripts/pypsa-de/regret_plots.py new file mode 100644 index 000000000..6b21f22b8 --- /dev/null +++ b/scripts/pypsa-de/regret_plots.py @@ -0,0 +1,253 @@ +import os +import sys + +sys.path.append(os.path.abspath(os.path.dirname(__file__))) +sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))) + +import collections +import itertools +import os +import re + +import matplotlib.pyplot as plt +import numpy as np +import pandas as pd +import pypsa +from _helpers import configure_logging, mock_snakemake + +groups = { + "gas": ["gas CHP", "OCGT", "CCGT", "gas"], + "heat vent": ["heat vent"], + "water tanks": ["water tank", "water pit"], + "heat pump": ["heat pump"], + "resistive heater": ["resistive heater"], + "biomass": ["biomass"], + "lignite": ["lignite"], + "coal": ["coal"], + "oil": ["oil"], + "waste": ["waste"], + "solar": ["solar"], + "offwind": ["offwind"], +} + + +def aggregate_by_keywords(opex_comp_agg, groups): + """ + Aggregate rows in opex_comp_agg according to keyword groups. + + Parameters + ---------- + opex_comp_agg : pd.DataFrame + DataFrame with row index as technology names. + groups : dict + Keys = new aggregated name, + Values = list of substrings to match in the index. + + Returns + ------- + pd.DataFrame + """ + df_out = opex_comp_agg.copy() + for new_name, keywords in groups.items(): + mask = df_out.index.to_series().str.contains("|".join(keywords)) + if mask.any(): + summed = df_out.loc[mask].sum() + df_out = df_out.drop(df_out.index[mask]) + df_out.loc[new_name] = summed + return df_out + + +if __name__ == "__main__": + if "snakemake" not in globals(): + import os + import sys + + from _helpers import mock_snakemake + + snakemake = mock_snakemake( + "regret_plots", + sensitivity="gas_price_60", + ) + + configure_logging(snakemake) + config = snakemake.config + planning_horizons = snakemake.params.planning_horizons + scenarios = snakemake.params.scenarios + decisions = ["decision_" + d for d in scenarios] + tech_colors = snakemake.params.plotting["tech_colors"] + + # Nested dict: networks[year][scenario][decision] = Network + networks = collections.defaultdict(lambda: collections.defaultdict(dict)) + + for fn in snakemake.input.regret_networks: + parts = fn.split(os.sep) + + # scenario is the folder name 2 levels up + scenario = parts[-4] + if scenario not in scenarios: + raise ValueError( + f"Unexpected scenario '{scenario}' in {fn}. Allowed: {scenarios}" + ) + + # extract year (4 digits before .nc) + m = re.search(r"_(\d{4})\.nc$", fn) + if not m: + raise ValueError(f"Could not parse year from {fn}") + year = int(m.group(1)) + + # extract decision_* (string until the 2nd underscore in filename) + filename = parts[-1] + decision = "_".join(filename.split("_")[:2]) + if not decision.startswith("decision"): + raise ValueError(f"Unexpected decision string in {filename}") + + # load and store + # print(f"Loading {fn} ...") + # print(f" scenario: {scenario}, year: {year}, decision: {decision}") + networks[year][scenario][decision] = pypsa.Network(fn) + + # ensure output directory exist + if not os.path.exists(snakemake.params.output_dir): + os.makedirs(snakemake.params.output_dir) + + # Plot electricity price duration curves + + fig, ax = plt.subplots( + figsize=(4 * len(scenarios), 5 * len(planning_horizons)), + nrows=len(planning_horizons), + ncols=1, + ) + ax = ax.flatten() + + for i, year in enumerate(planning_horizons): + for scenario, decision in itertools.product(scenarios, decisions): + n = networks[year][scenario][decision] + lmps = n.buses_t.marginal_price.loc[ + :, (n.buses.carrier == "AC") & (n.buses.index.str.startswith("DE")) + ] + lmps_sorted = pd.DataFrame( + lmps.values.flatten(), columns=["lmp"] + ).sort_values(by="lmp", ascending=False) + lmps_sorted["percentage"] = ( + np.arange(len(lmps_sorted)) / len(lmps_sorted) * 100 + ) + + ax[i].plot( + lmps_sorted["percentage"], + lmps_sorted["lmp"], + label=f"{scenario}_{decision} (avg: {lmps_sorted['lmp'].mean():.2f})", + ) + + ax[i].set_ylim(-50, 300) + ax[i].legend() + ax[i].set_xlabel("Percentage of time") + ax[i].set_ylabel("€/MWh") + ax[i].set_title(f"Price duration curves {year}") + + plt.tight_layout() + plt.savefig(snakemake.output.elec_price_comp_de, bbox_inches="tight") + plt.close() + + # Print CO2 prices + + # for i, year in enumerate(years): + # for scenario, decision in itertools.product(scenarios, decisions): + + # n = networks[year][scenario][decision] + + # print(f"CO2 price for {year}, {scenario}, {decision}: {n.global_constraints.loc["CO2Limit", "mu"] + n.global_constraints.loc["co2_limit-DE", "mu"]}") + + # Plot OPEX + + kwargs = { + "groupby": ["bus", "carrier"], + "at_port": True, + "nice_names": False, + } + + fig, axes = plt.subplots( + nrows=len(planning_horizons), + ncols=1, + figsize=(6 * len(scenarios), 6 * len(planning_horizons)), + ) + axes = axes.flatten() + + for i, year in enumerate(planning_horizons): + opex_comp = pd.DataFrame( + columns=["_".join(tup) for tup in itertools.product(scenarios, decisions)] + ) + + # Collect OPEX for all scenario-decision combinations + for scenario, decision in itertools.product(scenarios, decisions): + n = networks[year][scenario][decision] + + opex = ( + n.statistics.opex(**kwargs) + .filter(like="DE") + .groupby("carrier") + .sum() + .multiply(1e-9) # to billion € + ) + opex_comp[f"{scenario}_{decision}"] = opex + + # Aggregate cost components with less than 0.1 (100 Mio €) as "Other" + opex_comp_agg = aggregate_by_keywords(opex_comp, groups) + small_rows = opex_comp_agg.abs().max(axis=1) < 0.1 + other_row = opex_comp_agg[small_rows].sum(axis=0) + opex_comp_agg = opex_comp_agg.loc[~small_rows] + opex_comp_agg.loc["Other"] = other_row + + # Prepare labels with line breaks + labels = [col.replace("_", "\n") for col in opex_comp_agg.columns] + + # Plot stacked bar + ax = axes[i] + bottom = np.zeros(len(opex_comp_agg.columns)) + + for tech in opex_comp_agg.index: + values = opex_comp_agg.loc[tech].values + ax.bar( + labels, + values, + bottom=bottom, + color=tech_colors.get(tech, "#333333"), + label=tech, + ) + + # Add numbers in the middle, except for 'Other' + if tech != "Other": + for j, val in enumerate(values): + if val > 0: # only if positive + ax.text( + j, + bottom[j] + val / 2, # middle of the segment + f"{val:.2f}", + ha="center", + va="center", + fontsize=8, + color="white", + ) + + bottom += values + + # Add total sum labels on top of bars + totals = opex_comp_agg.sum(axis=0) + for j, total in enumerate(totals): + ax.text( + j, + total + total * 0.02, + f"{total:.2f}", + ha="center", + va="bottom", + fontsize=10, + ) + + # Adjust y-limit + ax.set_ylim(0, max(totals) * 1.08) + ax.set_ylabel("OPEX [billion €]") + ax.set_title(f"Stacked OPEX composition by technology, {year}") + + # Legend outside + axes[-1].legend(loc="upper left", bbox_to_anchor=(1, 1)) + plt.savefig(snakemake.params.output_dir + "/opex_comp_de.png", bbox_inches="tight") + plt.close() diff --git a/scripts/pypsa-de/regret_plots_lt.py b/scripts/pypsa-de/regret_plots_lt.py new file mode 100644 index 000000000..4911a66b5 --- /dev/null +++ b/scripts/pypsa-de/regret_plots_lt.py @@ -0,0 +1,613 @@ +import os +import sys + +sys.path.append(os.path.abspath(os.path.dirname(__file__))) # Adds 'scripts/' to path +sys.path.append( + os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) +) # Adds repo root + +import re +from collections import defaultdict + +import matplotlib.patches as mpatches +import matplotlib.pyplot as plt +import numpy as np +import pandas as pd +import pypsa +from _helpers import configure_logging, mock_snakemake +from matplotlib.patches import Rectangle + +groups = { + "gas (+CHP)": ["gas CHP", "OCGT", "CCGT"], + "heat pump": ["heat pump"], + "resistive heater": ["resistive heater"], + "biomass (+ CHP)": ["biomass"], + "coal (+ CHP)": ["coal"], + "oil (+ CHP)": ["oil"], + "waste CHP": ["waste"], + "solar": ["solar"], + "offwind": ["offwind"], +} + + +def aggregate_by_keywords(df, groups): + """ + Aggregate rows in df according to keyword groups. + + Parameters + ---------- + df : pd.DataFrame + DataFrame with row index as technology names. + groups : dict + Keys = new aggregated name, + Values = list of substrings to match in the index. + + Returns + ------- + pd.DataFrame + """ + df_out = df.copy() + for new_name, keywords in groups.items(): + mask = df_out.index.to_series().str.contains("|".join(keywords)) + if mask.any(): + summed = df_out.loc[mask].sum() + df_out = df_out.drop(df_out.index[mask]) + df_out.loc[new_name] = summed + return df_out + + +def plot_capacity_comparison( + df, + scenarios=("HighDemand", "LowDemand"), + tech_colors=None, + plot_diff=False, + title="Electricity capacities", + ylabel="GW", + save_path=None, + figsize=(12, 6), + hatch_second="//", # hatch for second scenario in non-diff plot + show_dummy_legend=True, # legend with empty + hatched boxes +): + """ + Plot electricity capacities by carrier for multiple scenarios, or their difference. + + Parameters + ---------- + df : pd.DataFrame # index: carriers, columns: scenario names + scenarios : tuple(str, str) # (base, compare) + tech_colors : dict # carrier -> color + plot_diff : bool # if True, plot (compare - base) as signed bars + """ + if tech_colors is None: + tech_colors = {} + + base_name, cmp_name = scenarios + df = df.copy() + + # --- Establish supply/demand membership AND ORDER from base scenario only --- + base = df[base_name] + supply_order = base[base >= 0].sort_values(ascending=False).index + demand_order = ( + base[base < 0].abs().sort_values(ascending=False).index + ) # sort by |capacity| + + # Build plotting table + if plot_diff: + # signed difference (base - compare) + diff = (df[base_name].abs() - df[cmp_name].abs()).rename("Diff").to_frame() + df_plot_supply = diff.loc[supply_order.intersection(diff.index)] + df_plot_demand = diff.loc[demand_order.intersection(diff.index)] + plot_columns = ["Diff"] + else: + # normal comparison: keep both scenarios + df_plot_supply = df[[base_name, cmp_name]].loc[ + supply_order.intersection(df.index) + ] + df_plot_demand = df[[base_name, cmp_name]].loc[ + demand_order.intersection(df.index) + ] + # For the demand side, show magnitudes (upward bars). Clip to avoid negative heights. + df_plot_demand = df_plot_demand.apply(lambda s: (-s).clip(lower=0)) + plot_columns = [base_name, cmp_name] + + # Positions + n_supply = len(df_plot_supply) + n_demand = len(df_plot_demand) + pos_supply = np.arange(n_supply) + pos_demand = np.arange(n_demand) + n_supply + 1 # +1 gap + bar_width = 0.35 + + # Plot + fig, ax = plt.subplots(figsize=figsize) + + # Supply bars + for j, col in enumerate(plot_columns): + if n_supply: + vals = df_plot_supply[col].values + ax.bar( + pos_supply + (j - 0.5) * bar_width + if len(plot_columns) > 1 + else pos_supply, + vals, + width=bar_width if len(plot_columns) > 1 else 0.8, + color=[tech_colors.get(t, "#1f77b4") for t in df_plot_supply.index], + hatch=( + "" + if (not plot_diff and col == base_name) + else (hatch_second if not plot_diff else "") + ), + alpha=0.9, + linewidth=0.0, + ) + + # Demand bars + for j, col in enumerate(plot_columns): + if n_demand: + vals = ( + df_plot_demand[col].values + if not plot_diff + else (df_plot_demand[col].values) + ) + # In diff mode we plot signed values; in normal mode they are already positive magnitudes + ax.bar( + pos_demand + (j - 0.5) * bar_width + if len(plot_columns) > 1 + else pos_demand, + vals, + width=bar_width if len(plot_columns) > 1 else 0.8, + color=[tech_colors.get(t, "#1f77b4") for t in df_plot_demand.index], + hatch=( + "" + if (not plot_diff and col == base_name) + else (hatch_second if not plot_diff else "") + ), + alpha=0.9, + linewidth=0.0, + ) + + # Divider line between supply and demand + if n_supply: + ax.axvline(x=n_supply - 0.5, color="black", linestyle="--", linewidth=1.0) + + # Baseline for signed diffs + if plot_diff: + ax.axhline(0, color="black", linewidth=1.0) + + # X labels + tech_labels = list(df_plot_supply.index) + list(df_plot_demand.index) + all_positions = np.concatenate([pos_supply, pos_demand]) + ax.set_xticks(all_positions) + ax.set_xticklabels(tech_labels, rotation=45, ha="right") + + ax.set_ylabel(ylabel) + ax.set_title(title) + ax.grid(True, axis="y", alpha=0.3) + + # Legend + if plot_diff: + handles = [ + mpatches.Patch( + facecolor="white", + edgecolor="black", + label=f"{base_name} - {cmp_name}", + ) + ] + ax.legend(handles=handles, title="Difference") + elif show_dummy_legend: + handles = [ + mpatches.Patch( + facecolor="white", edgecolor="black", hatch="", label=base_name + ), + mpatches.Patch( + facecolor="white", edgecolor="black", hatch=hatch_second, label=cmp_name + ), + ] + ax.legend(handles=handles, title="Scenario") + + plt.tight_layout() + if save_path: + plt.savefig(save_path, bbox_inches="tight") + plt.close() + + +if __name__ == "__main__": + if "snakemake" not in globals(): + import os + import sys + + from _helpers import mock_snakemake + + snakemake = mock_snakemake( + "regret_plots_lt", + ) + + configure_logging(snakemake) + config = snakemake.config + planning_horizons = snakemake.params.planning_horizons + scenarios = ["HighDemand", "LowDemand"] # config["run"]["name"] + tech_colors = snakemake.params.plotting["tech_colors"] + + # Load networks + networks = defaultdict(dict) + + for fn in snakemake.input.networks: + scenario = fn.split(os.sep)[-4] + year = int(re.search(r"_(\d{4})\.nc$", fn).group(1)) + networks[scenario][year] = pypsa.Network(fn) + + # Load variables + vars_dict = {} + + for fn in snakemake.input.regret_variables: + df = ( + pd.read_excel( + fn, + index_col=list(range(5)), + # index_col=["Model", "Scenario", "Region", "Variable", "Unit"], + sheet_name="data", + ) + .groupby(["Variable", "Unit"], dropna=False) + .sum() + ).round(5) + + if "HighDemand" in fn: + vars_dict["HighDemand"] = df + elif "LowDemand" in fn: + vars_dict["LowDemand"] = df + + # ensure output directory exist + if not os.path.exists(snakemake.params.output_dir): + os.makedirs(snakemake.params.output_dir) + + # Capacity plot DE + + tech_colors["gas (+CHP)"] = tech_colors["OCGT"] + tech_colors["biomass (+ CHP)"] = tech_colors["biomass"] + tech_colors["coal (+ CHP)"] = tech_colors["coal"] + tech_colors["oil (+ CHP)"] = tech_colors["oil"] + tech_colors["heat pump"] = tech_colors["heat pump"] + tech_colors["waste CHP"] = tech_colors["waste"] + + kwargs = { + "groupby": ["bus", "carrier"], + "at_port": True, + "nice_names": False, + } + + capa_comp = pd.DataFrame(columns=scenarios) + + for year in planning_horizons: + for scenario in scenarios: + n = networks[scenario][year] + + capacities = ( + n.statistics.optimal_capacity( + bus_carrier=["AC", "low voltage"], + **kwargs, + ) + .filter(like="DE") + .groupby("carrier") + .sum() + .drop( + ["AC", "DC", "electricity distribution grid"], + errors="ignore", + ) + .multiply(1e-3) # MW → GW + ) + capa_comp[scenario] = capacities + + capa_comp_agg = aggregate_by_keywords(capa_comp, groups) + # drop capa with less than 100 MW in both scenarios + capa_comp_agg = capa_comp_agg[(capa_comp_agg.abs() >= 0.1).any(axis=1)] + + plot_capacity_comparison( + df=capa_comp_agg, + scenarios=["HighDemand", "LowDemand"], + tech_colors=tech_colors, + plot_diff=False, + title=f"Electricity capacities in DE: {year}", + save_path=snakemake.output.elec_capa_comp_de_2025 + if year == 2025 + else snakemake.params.output_dir + f"/elec_capa_comp_de_{year}.png", + ) + + plot_capacity_comparison( + df=capa_comp_agg, + scenarios=["HighDemand", "LowDemand"], + tech_colors=tech_colors, + plot_diff=True, + title=f"Difference of electricity capacities in DE: {year}", + save_path=snakemake.params.output_dir + f"/elec_capa_diff_de_{year}.png", + ) + + # Capacity plot outside DE + + for year in planning_horizons: + for scenario in scenarios: + n = networks[scenario][year] + + capacities = ( + n.statistics.optimal_capacity( + bus_carrier=["AC", "low voltage"], + **kwargs, + ) + .loc[ + lambda df: ~df.index.get_level_values("bus").str.contains( + "DE", regex=False + ) + ] + .groupby("carrier") + .sum() + .drop(["AC", "DC", "electricity distribution grid"], errors="ignore") + .multiply(1e-3) # MW → GW + ) + capa_comp[scenario] = capacities + + capa_comp_agg = aggregate_by_keywords(capa_comp, groups) + # drop capa with less than 100 MW in both scenarios + capa_comp_agg = capa_comp_agg[(capa_comp_agg.abs() >= 0.1).any(axis=1)] + + plot_capacity_comparison( + df=capa_comp_agg, + scenarios=["HighDemand", "LowDemand"], + tech_colors=tech_colors, + plot_diff=False, + title=f"Electricity capacities in EU (outside DE): {year}", + save_path=snakemake.params.output_dir + f"/elec_capa_comp_eu_{year}.png", + ) + + plot_capacity_comparison( + df=capa_comp_agg, + scenarios=["HighDemand", "LowDemand"], + tech_colors=tech_colors, + plot_diff=True, + title=f"Difference of electricity capacities in EU (outside DE): {year}", + save_path=snakemake.params.output_dir + f"/elec_capa_diff_eu_{year}.png", + ) + + # Electricity demand as bar plot + + demand_comp = pd.DataFrame(columns=scenarios) + + for year in planning_horizons: + for scenario in scenarios: + n = networks[scenario][year] + + electricity_withdrawal = ( + n.statistics.withdrawal(bus_carrier=["low voltage", "AC"], **kwargs) + .filter(like="DE") + .groupby(["carrier"]) + .sum() + .multiply(1e-6) # MWh → TWh + ) + + demand_comp[scenario] = electricity_withdrawal + + demand_comp_agg = aggregate_by_keywords(demand_comp, groups) + # drop capa with less than 100 MW in both scenarios + demand_comp_agg = demand_comp_agg[(demand_comp_agg.abs() >= 0.1).any(axis=1)] + + plot_capacity_comparison( + df=demand_comp_agg, + scenarios=["HighDemand", "LowDemand"], + tech_colors=tech_colors, + plot_diff=False, + title=f"Electricity demand in DE: {year}", + ylabel="TWh", + save_path=snakemake.params.output_dir + f"/elec_demand_comp_de_{year}.png", + ) + + plot_capacity_comparison( + df=demand_comp_agg, + scenarios=["HighDemand", "LowDemand"], + tech_colors=tech_colors, + plot_diff=True, + title=f"Difference of electricity demand in DE: {year}", + ylabel="TWh", + save_path=snakemake.params.output_dir + f"/elec_demand_diff_de_{year}.png", + ) + + # ToDo + + # Electricity demand temporal (+ import) + # Split electricity demand of distribution grid + + # System cost CAPEX + capex_data = {} + + for scenario in scenarios: + # Extract CAPEX data + df = vars_dict[scenario] + capex = df[ + df.index.get_level_values("Variable").str.startswith( + "System Cost|CAPEX" + ) + ] + capex_top = capex[ + capex.index.get_level_values("Variable").str.count(r"\|") == 2 + ] + + # Reset index and prepare data + capex_reset = capex_top.reset_index() + capex_reset["Category"] = capex_reset["Variable"].str.split("|").str[2] + + # Get year columns on first iteration + if scenario == scenarios[0]: + year_cols = [ + col + for col in capex_reset.columns + if str(col) in ["2025", "2030", "2035"] + ] + + # Store processed data + capex_data[scenario] = capex_reset.set_index("Category")[year_cols].fillna( + 0 + ) + + # Extract for easier access + capex_low_plot = capex_data["LowDemand"] + capex_ariadne_plot = capex_data["HighDemand"] + + # Set up the plot + fig, ax = plt.subplots(figsize=(12, 8)) + + # Define years and categories + years = [str(col) for col in year_cols] # Convert to strings for labels + categories = capex_low_plot.index.tolist() + + # Define colors for each category using tech_colors + category_color_map = { + "Electricity": "gold", + "Gases": tech_colors["gas"], + "Heat": tech_colors["heat"], + "Hydrogen": tech_colors["H2"], + "Liquids": tech_colors["oil"], + "Methanol": tech_colors["methanol"], + } + + # Create colors list based on categories + colors = [category_color_map.get(cat, "#808080") for cat in categories] + + # Set up bar positions + x = np.arange(len(years)) + width = 0.35 + + # Create stacked bars + bottom_low = np.zeros(len(years)) + bottom_ariadne = np.zeros(len(years)) + + for i, category in enumerate(categories): + # Get values for each year + low_values = [capex_low_plot.loc[category, col] for col in year_cols] + ariadne_values = [ + capex_ariadne_plot.loc[category, col] for col in year_cols + ] + + # Plot bars - LowDemand with hatching, HighDemand without + ax.bar( + x - width / 2, + low_values, + width, + bottom=bottom_low, + label=category, + color=colors[i], + alpha=0.8, + hatch="///", + ) + ax.bar( + x + width / 2, + ariadne_values, + width, + bottom=bottom_ariadne, + color=colors[i], + alpha=0.8, + ) + + # Add category values inside bars (only if value > threshold for readability) + for j in range(len(years)): + # LowDemand category values + if low_values[j] > 0.5: # Only show if value is significant + ax.text( + j - width / 2, + bottom_low[j] + low_values[j] / 2, + f"{low_values[j]:.1f}", + ha="center", + va="center", + fontsize=8, + fontweight="bold", + color="white", + ) + + # HighDemand category values + if ariadne_values[j] > 0.5: # Only show if value is significant + ax.text( + j + width / 2, + bottom_ariadne[j] + ariadne_values[j] / 2, + f"{ariadne_values[j]:.1f}", + ha="center", + va="center", + fontsize=8, + fontweight="bold", + color="white", + ) + + # Update bottom for stacking + bottom_low += low_values + bottom_ariadne += ariadne_values + + # Customize the plot + ax.set_xlabel("Year", fontsize=12) + ax.set_ylabel("billion €", fontsize=12) + ax.set_title( + "System Cost CAPEX Comparison: LowDemand vs HighDemand", + fontsize=14, + fontweight="bold", + ) + ax.set_xticks(x) + ax.set_xticklabels(years) + + # Category legend elements + category_elements = [ + Rectangle((0, 0), 1, 1, facecolor=colors[i], alpha=0.8, label=cat) + for i, cat in enumerate(categories) + ] + + # Scenario legend elements + scenario_elements = [ + Rectangle( + (0, 0), + 1, + 1, + facecolor="gray", + alpha=0.8, + hatch="///", + label="LowDemand", + ), + Rectangle((0, 0), 1, 1, facecolor="gray", alpha=0.8, label="HighDemand"), + ] + + # Create separate legends + category_legend = ax.legend( + handles=category_elements, + loc="upper left", + bbox_to_anchor=(1.05, 1), + title="Categories", + ) + scenario_legend = ax.legend( + handles=scenario_elements, + loc="upper left", + bbox_to_anchor=(1.05, 0.6), + title="Scenarios", + ) + + # Add both legends to the plot + ax.add_artist(category_legend) + + # Add total CAPEX values on top of bars + for i, year in enumerate(years): + ax.text( + i - width / 2, + bottom_low[i] + max(max(bottom_low), max(bottom_ariadne)) * 0.02, + f"{bottom_low[i]:.1f}", + ha="center", + fontweight="bold", + fontsize=9, + ) + ax.text( + i + width / 2, + bottom_ariadne[i] + max(max(bottom_low), max(bottom_ariadne)) * 0.02, + f"{bottom_ariadne[i]:.1f}", + ha="center", + fontweight="bold", + fontsize=9, + ) + + # Add grid + ax.grid(True, alpha=0.3, axis="y") + ax.set_axisbelow(True) + + # Adjust y-axis limits to accommodate top labels + y_max = max(max(bottom_low), max(bottom_ariadne)) + ax.set_ylim(0, y_max * 1.1) + plt.savefig(snakemake.params.output_dir + "/capex_de.png", bbox_inches="tight") + plt.close() diff --git a/scripts/pypsa-de/solve_regret_network.py b/scripts/pypsa-de/solve_regret_network.py new file mode 100644 index 000000000..fa5f2ca1a --- /dev/null +++ b/scripts/pypsa-de/solve_regret_network.py @@ -0,0 +1,90 @@ +import importlib.util +import logging +import pathlib +import re + +import numpy as np +import pypsa + +from scripts._benchmark import memory_logger +from scripts._helpers import ( + configure_logging, + mock_snakemake, + set_scenario_config, + update_config_from_wildcards, +) +from scripts.solve_network import solve_network + +_spec_path = pathlib.Path(__file__).resolve().parent / "modify_prenetwork.py" +_spec = importlib.util.spec_from_file_location( + "scripts.pypsa_de.modify_prenetwork", _spec_path +) +_modify_prenetwork = importlib.util.module_from_spec(_spec) +_spec.loader.exec_module(_modify_prenetwork) +remove_flexibility_options = _modify_prenetwork.remove_flexibility_options + + +logger = logging.getLogger(__name__) + +if __name__ == "__main__": + if "snakemake" not in globals(): + snakemake = mock_snakemake( + "solve_regret_network", + clusters=27, + opts="", + sector_opts="none", + planning_horizons="2030", + decision="LowDemand", + run="HighDemand", + sensitivity="base", + ) + + configure_logging(snakemake) + set_scenario_config(snakemake) + update_config_from_wildcards(snakemake.config, snakemake.wildcards) + + n = pypsa.Network(snakemake.input.regret_prenetwork) + + # Touch output file to ensure it exists + pathlib.Path(snakemake.output.regret_network).touch() + + planning_horizons = snakemake.wildcards.get("planning_horizons", None) + logging_frequency = snakemake.config.get("solving", {}).get( + "mem_logging_frequency", 30 + ) + np.random.seed(snakemake.params.solving["options"].get("seed", 123)) + + if "no_flex" in snakemake.params.st_sensitivity: + logger.info( + "Running sensitivity of the short term model with less flexibility options." + ) + remove_flexibility_options(n) + + gas_price = re.findall(r"gas_price_(\d{2,3})", snakemake.params.st_sensitivity) + if gas_price: + gas_price = int(gas_price[0]) + logger.info( + f"Running sensitivity of the short term model with gas price set to {gas_price} €/MWh." + ) + n.generators.loc[n.generators.carrier == "gas primary", "marginal_cost"] = ( + gas_price + ) + + with memory_logger( + filename=getattr(snakemake.log, "memory", None), interval=logging_frequency + ) as mem: + solve_network( + n, + config=snakemake.config, + params=snakemake.params, + solving=snakemake.params.solving, + planning_horizons=planning_horizons, + rule_name=snakemake.rule, + log_fn=snakemake.log.solver, + snakemake=snakemake, + ) + logger.info(f"Maximum memory usage: {mem.mem_usage}") + + n.meta = dict(snakemake.config, **dict(wildcards=dict(snakemake.wildcards))) + + n.export_to_netcdf(snakemake.output.regret_network) diff --git a/scripts/pypsa-de/solve_st_low_res_network.py b/scripts/pypsa-de/solve_st_low_res_network.py new file mode 100644 index 000000000..a2e12f0b9 --- /dev/null +++ b/scripts/pypsa-de/solve_st_low_res_network.py @@ -0,0 +1,87 @@ +import importlib.util +import logging +import pathlib +import re + +import numpy as np +import pypsa + +from scripts._benchmark import memory_logger +from scripts._helpers import ( + configure_logging, + mock_snakemake, + set_scenario_config, + update_config_from_wildcards, +) +from scripts.solve_network import solve_network + +_spec_path = pathlib.Path(__file__).resolve().parent / "modify_prenetwork.py" +_spec = importlib.util.spec_from_file_location( + "scripts.pypsa_de.modify_prenetwork", _spec_path +) +_modify_prenetwork = importlib.util.module_from_spec(_spec) +_spec.loader.exec_module(_modify_prenetwork) +remove_flexibility_options = _modify_prenetwork.remove_flexibility_options + + +logger = logging.getLogger(__name__) + +if __name__ == "__main__": + if "snakemake" not in globals(): + snakemake = mock_snakemake( + "solve_st_low_res_network", + clusters=27, + opts="", + sector_opts="none", + planning_horizons="2030", + decision="LowDemand", + run="HighDemand", + sensitivity="base", + ) + + configure_logging(snakemake) + set_scenario_config(snakemake) + update_config_from_wildcards(snakemake.config, snakemake.wildcards) + + n = pypsa.Network(snakemake.input.st_low_res_prenetwork) + + planning_horizons = snakemake.wildcards.get("planning_horizons", None) + logging_frequency = snakemake.config.get("solving", {}).get( + "mem_logging_frequency", 30 + ) + np.random.seed(snakemake.params.solving["options"].get("seed", 123)) + + if "no_flex" in snakemake.params.st_sensitivity: + logger.info( + "Running sensitivity of the short term model with less flexibility options." + ) + remove_flexibility_options(n) + + gas_price = re.findall(r"gas_price_(\d{2,3})", snakemake.params.st_sensitivity) + if gas_price: + gas_price = int(gas_price[0]) + logger.info( + f"Running sensitivity of the short term model with gas price set to {gas_price} €/MWh." + ) + n.generators.loc[n.generators.carrier == "gas primary", "marginal_cost"] = ( + gas_price + ) + + with memory_logger( + filename=getattr(snakemake.log, "memory", None), interval=logging_frequency + ) as mem: + solve_network( + n, + config=snakemake.config, + params=snakemake.params, + solving=snakemake.params.solving, + planning_horizons=planning_horizons, + rule_name=snakemake.rule, + log_fn=snakemake.log.solver, + snakemake=snakemake, + ) + logger.info(f"Maximum memory usage: {mem.mem_usage}") + + n.meta = dict(snakemake.config, **dict(wildcards=dict(snakemake.wildcards))) + + n.export_to_netcdf(snakemake.output.st_low_res_network) diff --git a/scripts/solve_network.py b/scripts/solve_network.py index 55871d5f3..15e9ab519 100644 --- a/scripts/solve_network.py +++ b/scripts/solve_network.py @@ -26,7 +26,9 @@ based on the rule :mod:`solve_network`. """ +import contextlib import importlib +import io import logging import os import pathlib @@ -43,6 +45,7 @@ import yaml from pypsa.descriptors import get_activity_mask from pypsa.descriptors import get_switchable_as_dense as get_as_dense +from snakemake.script import Snakemake from scripts._benchmark import memory_logger from scripts._helpers import ( @@ -137,7 +140,9 @@ def check_p_min_p_max(p_nom_max): n.buses.loc[bus, name] = df_carrier.p_nom_max.values -def add_land_use_constraint(n: pypsa.Network, planning_horizons: str) -> None: +def add_land_use_constraint( + n: pypsa.Network, planning_horizons: str, regret_run=False +) -> None: """ Add land use constraints for renewable energy potential. @@ -164,6 +169,11 @@ def add_land_use_constraint(n: pypsa.Network, planning_horizons: str) -> None: "offwind-dc", "offwind-float", ]: + if regret_run: + logger.info( + f"Skipping land use constraint adjustment for {carrier} with planning horizons {planning_horizons}, because of regret run." + ) + continue ext_i = (n.generators.carrier == carrier) & ~n.generators.p_nom_extendable grouper = n.generators.loc[ext_i].index.str.replace( f" {carrier}.*$", "", regex=True @@ -188,7 +198,9 @@ def add_land_use_constraint(n: pypsa.Network, planning_horizons: str) -> None: n.generators["p_nom_max"] = n.generators["p_nom_max"].clip(lower=0) -def add_solar_potential_constraints(n: pypsa.Network, config: dict) -> None: +def add_solar_potential_constraints( + n: pypsa.Network, config: dict, regret_run=False +) -> None: """ Add constraint to make sure the sum capacity of all solar technologies (fixed, tracking, ets. ) is below the region potential. @@ -206,6 +218,12 @@ def add_solar_potential_constraints(n: pypsa.Network, config: dict) -> None: rename = {} if PYPSA_V1 else {"Generator-ext": "Generator"} solar_carriers = ["solar", "solar-hsat"] + + if regret_run: + logger.info( + "Skipping solar potential constraint adjustment, because of regret run." + ) + return solar = n.generators[ n.generators.carrier.isin(solar_carriers) & n.generators.p_nom_extendable ].index @@ -427,6 +445,7 @@ def prepare_network( planning_horizons: str | None, co2_sequestration_potential: dict[str, float], limit_max_growth: dict[str, Any] | None = None, + regret_run: bool = False, ) -> None: """ Prepare network with various constraints and modifications. @@ -480,6 +499,14 @@ def prepare_network( p_nom=1e9, # kW ) + n.generators.loc[ + (n.generators.carrier == "load") + & ( + n.generators.index.str.contains("non-sequestered HVC|process emissions") + ), + "sign", + ] *= -1 + if solve_opts.get("curtailment_mode"): n.add("Carrier", "curtailment", color="#fedfed", nice_name="Curtailment") n.generators_t.p_min_pu = n.generators_t.p_max_pu @@ -516,7 +543,7 @@ def prepare_network( n.snapshot_weightings[:] = 8760.0 / nhours if foresight == "myopic": - add_land_use_constraint(n, planning_horizons) + add_land_use_constraint(n, planning_horizons, regret_run) if foresight == "perfect": add_land_use_constraint_perfect(n) @@ -876,7 +903,7 @@ def add_TES_energy_to_power_ratio_constraints(n: pypsa.Network) -> None: if indices_charger_p_nom_extendable.empty or indices_stores_e_nom_extendable.empty: logger.warning( - "No valid extendable charger links or stores found for TES energy-to-power constraints.Not enforcing TES energy-to-power ratio constraints!" + "No valid extendable charger links or stores found for TES energy-to-power constraints. Not enforcing TES energy-to-power ratio constraints!" ) return @@ -1166,7 +1193,10 @@ def add_co2_atmosphere_constraint(n, snapshots): def extra_functionality( - n: pypsa.Network, snapshots: pd.DatetimeIndex, planning_horizons: str | None = None + n: pypsa.Network, + snapshots: pd.DatetimeIndex, + snakemake: Snakemake, + planning_horizons: str | None = None, ) -> None: """ Add custom constraints and functionality. @@ -1177,6 +1207,8 @@ def extra_functionality( The PyPSA network instance with config and params attributes snapshots : pd.DatetimeIndex Simulation timesteps + snakemake : snakemake.script.Snakemake + Snakemake instance for accessing workflow parameters planning_horizons : str, optional The current planning horizon year or None in perfect foresight @@ -1208,7 +1240,9 @@ def extra_functionality( ) and {"solar-hsat", "solar"}.issubset( config["electricity"]["extendable_carriers"]["Generator"] ): - add_solar_potential_constraints(n, config) + add_solar_potential_constraints( + n, config, snakemake.params.get("regret_run", False) + ) if n.config.get("sector", {}).get("tes", False): if n.buses.index.str.contains( @@ -1278,6 +1312,7 @@ def solve_network( config: dict, params: dict, solving: dict, + snakemake: Snakemake, rule_name: str | None = None, planning_horizons: str | None = None, **kwargs, @@ -1295,6 +1330,8 @@ def solve_network( Dictionary of solving parameters solving : Dict Dictionary of solving options and configuration + snakemake : snakemake.script.Snakemake + Snakemake instance for accessing workflow parameters rule_name : str, optional Name of the snakemake rule being executed planning_horizons : str, optional @@ -1318,6 +1355,7 @@ def solve_network( ObjectiveValueError If objective value differs from expected value """ + set_of_options = solving["solver"]["options"] cf_solving = solving["options"] @@ -1327,7 +1365,7 @@ def solve_network( ) kwargs["solver_name"] = solving["solver"]["name"] kwargs["extra_functionality"] = partial( - extra_functionality, planning_horizons=planning_horizons + extra_functionality, snakemake=snakemake, planning_horizons=planning_horizons ) kwargs["transmission_losses"] = cf_solving.get("transmission_losses", False) kwargs["linearized_unit_commitment"] = cf_solving.get( @@ -1381,9 +1419,12 @@ def solve_network( raise RuntimeError("Solving status 'warning'. Discarding solution.") if "infeasible" in condition: - labels = n.model.compute_infeasibilities() - logger.info(f"Labels:\n{labels}") - n.model.print_infeasibilities() + # labels = n.model.compute_infeasibilities() + # logger.info(f"Labels:\n{labels}") + buf = io.StringIO() + with contextlib.redirect_stdout(buf): + n.model.print_infeasibilities() + logger.info(buf.getvalue()) raise RuntimeError("Solving status 'infeasible'. Infeasibilities computed.") if status == "warning": @@ -1399,11 +1440,12 @@ def solve_network( from scripts._helpers import mock_snakemake snakemake = mock_snakemake( - "solve_sector_network", + "solve_sector_network_myopic", + simpl="", + clusters=27, opts="", - clusters="5", - configfiles="config/test/config.overnight.yaml", - sector_opts="", + sector_opts="none", + run="LowDemand", planning_horizons="2030", ) configure_logging(snakemake) @@ -1415,7 +1457,9 @@ def solve_network( np.random.seed(solve_opts.get("seed", 123)) n = pypsa.Network(snakemake.input.network) - planning_horizons = snakemake.wildcards.get("planning_horizons", None) + planning_horizons = snakemake.wildcards.get( + "planning_horizons", snakemake.wildcards.get("eeg_sweep_year", None) + ) prepare_network( n, @@ -1437,6 +1481,7 @@ def solve_network( config=snakemake.config, params=snakemake.params, solving=snakemake.params.solving, + snakemake=snakemake, planning_horizons=planning_horizons, rule_name=snakemake.rule, log_fn=snakemake.log.solver,