diff --git a/.flake8 b/.flake8 new file mode 100644 index 00000000..c981be61 --- /dev/null +++ b/.flake8 @@ -0,0 +1,7 @@ +[flake8] +extend-exclude = docs +max-line-length = 88 +extend-ignore = E203, F841 +count = true +statistics = true +show-source = true diff --git a/.isort.cfg b/.isort.cfg new file mode 100644 index 00000000..cc5b9a8a --- /dev/null +++ b/.isort.cfg @@ -0,0 +1,4 @@ +[settings] +profile = black +multi_line_output = 3 +lines_between_types = 1 diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 00000000..cc65366d --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,24 @@ +repos: +- repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.3.0 + hooks: + - id: check-yaml + - id: end-of-file-fixer + - id: trailing-whitespace +- repo: https://github.com/psf/black + rev: 22.8.0 + hooks: + - id: black +- repo: https://github.com/pycqa/isort + rev: 5.10.1 + hooks: + - id: isort + name: isort (python) +- repo: https://github.com/asottile/pyupgrade + rev: v2.38.0 + hooks: + - id: pyupgrade +- repo: https://github.com/pycqa/flake8 + rev: 5.0.4 + hooks: + - id: flake8 diff --git a/README.rst b/README.rst index 3aa23ff0..5931403a 100644 --- a/README.rst +++ b/README.rst @@ -4,7 +4,7 @@ .. image:: https://openegoproject.files.wordpress.com/2017/02/open_ego_logo_breit.png?w=400 - + *A cross-grid-level electricity grid and storage optimization tool* | `openegoproject.wordpress.com `_ @@ -16,8 +16,8 @@ eGo Integrated optimization of flexibility options and grid extension measures for power grids based on `eTraGo `_ and -`eDisGo `_. The Documentation of the eGo tool -can be found on +`eDisGo `_. The Documentation of the eGo tool +can be found on `openego.readthedocs.io `_ . .. contents:: @@ -35,7 +35,7 @@ In case of installation errors of pypsa-fork use: .. code-block:: - $ pip3 install -e git+https://github.com/openego/PyPSA@master#egg=0.11.0fork + $ pip3 install -e git+https://github.com/openego/PyPSA@master#egg=0.11.0fork ---------------------------- @@ -46,10 +46,12 @@ Create a virtualenvironment and activate it: .. code-block:: - $ virtualenv venv --clear -p python3.5 + $ virtualenv venv --clear -p python3.8 $ source venv/bin/activate - $ cd venv - $ pip3 install -e git+https://github.com/openego/eGo@dev#egg=eGo --process-dependency-links + $ cd path/to/eGo + $ python -m pip install -e .[full] + $ pre-commit install # install pre-commit hooks + ------- License @@ -84,6 +86,6 @@ this program. If not, see https://www.gnu.org/licenses/. .. |readthedocs| image:: https://readthedocs.org/projects/openego/badge/?version=master :target: http://openego.readthedocs.io/en/latest/?badge=master :alt: Documentation Status - + .. |zenodo| image:: https://zenodo.org/badge/87306120.svg :target: https://zenodo.org/badge/latestdoi/87306120 diff --git a/doc/_static/ego_example_iplot_map.html b/doc/_static/ego_example_iplot_map.html index 16f51970..34c5d0ab 100644 --- a/doc/_static/ego_example_iplot_map.html +++ b/doc/_static/ego_example_iplot_map.html @@ -1,5 +1,5 @@ - + @@ -14,7 +14,7 @@ - + - + - - + + @@ -92,7 +92,7 @@ - +
.maplegend .legend-title { text-align: left; @@ -221,8 +221,8 @@ - - + +
- - + +
- + float_image - + - diff --git a/doc/api/modules.rst b/doc/api/modules.rst index 6570597a..ed06c8c5 100644 --- a/doc/api/modules.rst +++ b/doc/api/modules.rst @@ -12,12 +12,12 @@ Overview of modules :maxdepth: 7 ego.tools - + scenario_settings.json ====================== -With the ``scenario_settings.json`` file you set up your calcualtion. -The file can be found on +With the ``scenario_settings.json`` file you set up your calcualtion. +The file can be found on `github `_. .. json:object:: scenario_setting.json @@ -33,42 +33,37 @@ The file can be found on .. json:object:: global - + :property bool eTraGo: Decide if you want to run the eTraGo tool (HV/EHV grid optimization). :property bool eDisGo: Decide if you want to run the eDisGo tool (MV grid optimiztaion). Please note: eDisGo requires eTraGo= ``true``. - :property string csv_import_eTraGo: ``false`` or path to previously calculated eTraGo results (in order to reload the results instead of performing a new run). - :property string csv_import_eDisGo: ``false`` or path to previously calculated eDisGo results (in order to reload the results instead of performing a new run). + :property string csv_import_eTraGo: ``false`` or path to previously calculated eTraGo results (in order to reload the results instead of performing a new run). + :property string csv_import_eDisGo: ``false`` or path to previously calculated eDisGo results (in order to reload the results instead of performing a new run). + - .. json:object:: eTraGo This section of :json:object:`scenario_setting.json` contains all input parameters for the eTraGo tool. A description of the parameters can be found `here. `_ - + .. json:object:: eDisGo This section of :json:object:`scenario_setting.json` contains all input parameters for the eDisGo tool and the clustering of MV grids. - :property string db: Name of your database (e.g.``''oedb''``). eDisGo queries generator data from this database. Please note that this parameters is automatically overwritten in eDisGo's configuration files. - :property string gridversion: ``null`` or *open_eGo* dataset version (e.g. ``''v0.4.5''``). If ``null``, *open_eGo*'s model_draft is used. Please note that this parameters is automatically overwritten in eDisGo's configuration files. - :property string ding0_files: Path to the MV grid files (created by `ding0 `_) (e.g. ``''data/MV_grids/20180713110719''``) - :property string choice_mode: Mode that eGo uses to chose MV grids out of the files in **ding0_files** (e.g. ``''manual''``, ``''cluster''`` or ``''all''``). If ``''manual''`` is chosen, the parameter **manual_grids** must contain a list of the desired grids. If ``''cluster''`` is chosen, **no_grids** must specify the desired number of clusters and **cluster_attributes** must specify the applied cluster attributes. If ``''all''`` is chosen, all MV grids from **ding0_files** are calculated. - :property list cluster_attributes: List of strings containing the desired cluster attributes. Available attributes are: ``''farthest_node''``, ``''wind_cap''``, ``''solar_cap''`` and ``''extended_storage''``, thus an exemplary list looks like ``["farthest_node", "wind_cap", "solar_cap", "extended_storage"]``. ``''farthest_node''`` represents the longest path within each grid, ``''wind_cap''`` the installed wind capacity within each grid, ``''solar_cap''`` the installed solar capacity within each grid and ``''extended_storage''`` the installed storage units (as calculated by eTraGo). Please note that ``''extended_storage''`` is only available in combination with eTraGo datasets that optimized storage extension. Otherwise this attribute is ignored. + :property string gridversion: This parameter is currently not used. + :property string grid_path: Path to the MV grid files (created by `ding0 `_) (e.g. ``''data/MV_grids/20180713110719''``) + :property string choice_mode: Mode that eGo uses to chose MV grids out of the files in **grid_path** (e.g. ``''manual''``, ``''cluster''`` or ``''all''``). If ``''manual''`` is chosen, the parameter **manual_grids** must contain a list of the desired grids. If ``''cluster''`` is chosen, **no_grids** must specify the desired number of clusters and **cluster_attributes** must specify the applied cluster attributes. If ``''all''`` is chosen, all MV grids from **grid_path** are calculated. + :property list cluster_attributes: List of strings containing the desired cluster attributes. Available attributes are all attributes returned from :py:func:`~ego.mv_clustering.mv_clustering.get_cluster_attributes. :property bool only_cluster: If ``true``, eGo only identifies cluster results, but performs no eDisGo run. Please note that for **only_cluster** an eTraGo run or dataset must be provided. - :property list manual_grids: List of MV grid ID's (*open_eGo* HV/MV substation ID's) is case of **choice_mode** = ``''manual''`` (e.g. ``[1718,1719]``). Ohterwise this parameter is ignored. - :property int no_grids: Number of MV grid clusters (from all files in **ding0_files**, a specified number of representative clusters is calculated) in case of **choice_mode** = ``''cluster''``. Otherwise this parameter is ignored. + :property list manual_grids: List of MV grid ID's in case of **choice_mode** = ``''manual''`` (e.g. ``[1718,1719]``). Ohterwise this parameter is ignored. + :property int n_clusters: Number of MV grid clusters (from all grids in **grid_path**, a specified number of representative clusters is calculated) in case of **choice_mode** = ``''cluster''``. Otherwise this parameter is ignored. :property bool parallelization: If ``false``, eDisgo is used in a consecutive way (this may take very long time). In order to increase the performance of MV grid simulations, ``true`` allows the parallel calculation of MV grids. If **parallelization** = ``true``, **max_calc_time** and **max_workers** must be specified. :property float max_calc_time: Maximum calculation time in hours for eDisGo simulations. The calculation is terminated after this time and all costs are extrapolated based on the unfinished simulation. Please note that this parameter is only used if **parallelization** = ``true``. :property ing max_workers: Number of workers (cpus) that are allocated to the simulation. If the given value exceeds the number of available workers, it is reduced to the number of available workers. Please note that this parameter is only used if **parallelization** = ``true``. - :property bool initial_reinforcement: This parameter must be set ``true``. - :property bool apply_curtailment: If ``true``, eDisGo applies and optimizes the curtailment (as calculated by eTraGo) within each MV grid. - :property float curtailment_voltage_threshold: p.u. overvoltage limit (e.g. ``0.05``). If this p.u. overvoltage is exceeded at any bus, curtailment is applied. - :property bool storage_distribution: If ``true``, eDisGo attempts to integrate battery storages (as calculated by eTraGo) into MV grids in order to reduce grid reinforcement. :property float max_cos_phi_renewable: Maximum power factor for wind and solar generators in MV grids (e.g. ``0.9``). If the reactive power (as calculated by eTraGo) exceeds this power factor, the reactive power is reduced in order to reach the power factor conditions. :property string solver: Solver eDisGo uses to optimize the curtailment and storage integration (e.g. ``''gurobi''``). - :property string timesteps_pfa: Method eDisGo uses for the storage integration (e.g. ``''snapshot_analysis''``). :property string results: Path to folder where eDisGo's results will be saved. - + :property list tasks: List of string defining the tasks to run. The eDisGo calculation for each MV grid can be devided into separate tasks which is helpful in case one tasks fails and calculations do not need to started in the beginning. The following tasks exist: ``''1_setup_grid''``, ``''2_specs_overlying_grid''``, ``''3_temporal_complexity_reduction''``, ``''4_optimisation''``, ``''5_grid_reinforcement''``. + appl.py diff --git a/doc/conf.py b/doc/conf.py index a64f5fd5..3a6bd854 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -6,7 +6,11 @@ The documentation is available on RTD: https://openego.readthedocs.io""" -__copyright__ = "Flensburg University of Applied Sciences, Europa-Universität Flensburg, Centre for Sustainable Energy Systems, DLR-Institute for Networked Energy Systems" +__copyright__ = ( + "Flensburg University of Applied Sciences, Europa-Universität " + "Flensburg, Centre for Sustainable Energy Systems, DLR-Institute " + "for Networked Energy Systems" +) __license__ = "GNU Affero General Public License Version 3 (AGPL-3.0)" __author__ = "wolf_bunke" @@ -25,43 +29,44 @@ # All configuration values have a default; values that are commented out # serve to show the default. -import sys import os -import shlex +import sys + from unittest.mock import MagicMock -#from mock import Mock as MagicMock + +import sphinx_rtd_theme + +# from mock import Mock as MagicMock # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. -#sys.path.insert(0, os.path.abspath('.')) -sys.path.insert(0, os.path.abspath('../')) -sys.path.insert(0, os.path.abspath('../..')) +# sys.path.insert(0, os.path.abspath('.')) +sys.path.insert(0, os.path.abspath("../")) +sys.path.insert(0, os.path.abspath("../..")) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. -#needs_sphinx = '1.0' +# needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ - 'sphinx.ext.autodoc', - 'sphinx.ext.intersphinx', - 'sphinx.ext.todo', - 'sphinx.ext.coverage', - 'sphinx.ext.imgmath' , - 'sphinx.ext.viewcode', - 'sphinx.ext.autosummary', -# 'sphinxcontrib.napoleon',#enable Napoleon interpreter of docstrings Sphinx v<=1.2 - 'sphinx.ext.napoleon', #enable Napoleon Sphinx v>1.3 -# 'sphinx_paramlinks',#to have links to the types of the parameters of the functions - 'numpydoc', - 'sphinxcontrib.httpdomain', # for restfull API - 'sphinxcontrib.autohttp.flask', - 'sphinx.ext.extlinks', # enables external links with a key - 'sphinxjsondomain' + "sphinx.ext.autodoc", + "sphinx.ext.intersphinx", + "sphinx.ext.todo", + "sphinx.ext.coverage", + "sphinx.ext.imgmath", + "sphinx.ext.viewcode", + "sphinx.ext.autosummary", + "sphinx.ext.napoleon", # enable Napoleon Sphinx v>1.3 + "numpydoc", + "sphinxcontrib.httpdomain", # for restfull API + "sphinxcontrib.autohttp.flask", + "sphinx.ext.extlinks", # enables external links with a key + "sphinxjsondomain", ] @@ -84,13 +89,15 @@ # Dictionary of external links -extlinks = {'pandas':('http://pandas.pydata.org/pandas-docs/stable/api.html#%s', - 'pandas.'), - 'sqlalchemy':('http://docs.sqlalchemy.org/en/latest/orm/session_basics.html%s', - 'SQLAlchemy session object'), - 'shapely':('http://toblerity.org/shapely/manual.html#%s', - 'Shapely object') - } +extlinks = { + "pandas": ("http://pandas.pydata.org/pandas-docs/stable/api.html#%s", "pandas."), + "sqlalchemy": ( + "http://docs.sqlalchemy.org/en/latest/orm/session_basics.html%s", + "SQLAlchemy session object", + ), + "shapely": ("http://toblerity.org/shapely/manual.html#%s", "Shapely object"), +} + # test oedb implementation def rstjinja(app, docname, source): @@ -98,61 +105,60 @@ def rstjinja(app, docname, source): Render our pages as a jinja template for fancy templating goodness. """ # Make sure we're outputting HTML - if app.builder.format != 'html': + if app.builder.format != "html": return src = source[0] - rendered = app.builder.templates.render_string( - src, app.config.html_context - ) + rendered = app.builder.templates.render_string(src, app.config.html_context) source[0] = rendered + def setup(app): app.connect("source-read", rstjinja) -#import requests -#oep_url= 'http://oep.iks.cs.ovgu.de/' +# import requests + +# oep_url= 'http://oep.iks.cs.ovgu.de/' # get data from oedb test -#power_class = requests.get(oep_url+'/api/v0/schema/model_draft/tables/ego_power_class/rows/', ).json() -#import json -#path = os.getcwd() -#json_file = '../ego/scenario_setting.json' +# import json +# path = os.getcwd() +# json_file = '../ego/scenario_setting.json' -#with open(path +'/'+json_file) as f: +# with open(path +'/'+json_file) as f: # scn_set = json.load(f) # -#json_global = list(scn_set['eTraGo']) +# json_global = list(scn_set['eTraGo']) # -#html_context = { +# html_context = { # 'power_class': power_class, # 'scn_setting': scn_set -#} +# } # add RestFull API -httpexample_scheme = 'https' +httpexample_scheme = "https" # Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] +templates_path = ["_templates"] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = ['.rst', '.md'] -source_suffix = '.rst' +source_suffix = ".rst" # The encoding of source files. -#source_encoding = 'utf-8-sig' +# source_encoding = 'utf-8-sig' # The master toctree document. -master_doc = 'index' +master_doc = "index" # General information about the project. -project = u'eGo' -copyright = u'2015-2018, open_eGo-Team' -author = u'open_eGo-Team' +project = "eGo" +copyright = "2015-2018, open_eGo-Team" +author = "open_eGo-Team" # The version info for the project you're documenting, acts as replacement for @@ -160,9 +166,9 @@ def setup(app): # built documents. # # The short X.Y version. -version = '0.3.4' +version = "0.3.4" # The full version, including alpha/beta/rc tags. -release = '0.3.4' +release = "0.3.4" # The language for content autogenerated by Sphinx. Refer to documentation @@ -170,62 +176,73 @@ def setup(app): # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. -language = None +language = "en" # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: -#today = '' +# today = '' # Else, today_fmt is used as the format for a strftime call. -#today_fmt = '%B %d, %Y' +# today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. -exclude_patterns = ['_build', 'whatsnew', '_static'] +exclude_patterns = ["_build", "whatsnew", "_static"] # The reST default role (used for this markup: `text`) to use for all # documents. -#default_role = None +# default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. -#add_function_parentheses = True +# add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). -#add_module_names = True +# add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. -#show_authors = False +# show_authors = False # The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' +pygments_style = "sphinx" # A list of ignored prefixes for module index sorting. -#modindex_common_prefix = [] +# modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. -#keep_warnings = False +# keep_warnings = False # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = True -# Fix import error of modules which depend on C modules (mock out the imports for these modules) -# see http://read-the-docs.readthedocs.io/en/latest/faq.html#i-get-import-errors-on-libraries-that-depend-on-c-modules +# Fix import error of modules which depend on C modules (mock out the imports for +# these modules) +# see http://read-the-docs.readthedocs.io/en/latest/faq.html#i-get-import- +# errors-on-libraries-that-depend-on-c-modules + +if "READTHEDOCS" in os.environ: -if 'READTHEDOCS' in os.environ: class Mock(MagicMock): @classmethod def __getattr__(cls, name): - return MagicMock() + return MagicMock() - MOCK_MODULES = ['ding0', 'ding0.results', 'shapely'] + MOCK_MODULES = ["ding0", "ding0.results", "shapely"] sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES) - MOCK_MODULES = ['libgeos', 'geos', 'libgeos_c', 'geos_c','libgeos_c.so.1', - 'libgeos_c.so', 'shapely', 'geoalchemy2', 'geoalchemy2.shape '] - + MOCK_MODULES = [ + "libgeos", + "geos", + "libgeos_c", + "geos_c", + "libgeos_c.so.1", + "libgeos_c.so", + "shapely", + "geoalchemy2", + "geoalchemy2.shape ", + ] # -- Options for HTML output ---------------------------------------------- @@ -234,158 +251,150 @@ def __getattr__(cls, name): # a list of builtin themes. # html_theme = 'alabaster' -import sphinx_rtd_theme html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] -html_theme = 'sphinx_rtd_theme' +html_theme = "sphinx_rtd_theme" # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. -#html_theme_options = {} +# html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. -#html_theme_path = [] +# html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". -#html_title = None +# html_title = None # A shorter title for the navigation bar. Default is the same as html_title. -#html_short_title = None +# html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. -#html_logo = None +# html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. -#html_favicon = None +# html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] +html_static_path = ["_static"] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. -#html_extra_path = [] +# html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. -#html_last_updated_fmt = '%b %d, %Y' +# html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. -#html_use_smartypants = True +# html_use_smartypants = True # Custom sidebar templates, maps document names to template names. -#html_sidebars = {} +# html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. -#html_additional_pages = {} +# html_additional_pages = {} # If false, no module index is generated. -#html_domain_indices = True +# html_domain_indices = True # If false, no index is generated. -#html_use_index = True +# html_use_index = True # If true, the index is split into individual pages for each letter. -#html_split_index = False +# html_split_index = False # If true, links to the reST sources are added to the pages. -#html_show_sourcelink = True +# html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -#html_show_sphinx = True +# html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -#html_show_copyright = True +# html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. -#html_use_opensearch = '' +# html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). -#html_file_suffix = None +# html_file_suffix = None # Language to be used for generating the HTML full-text search index. # Sphinx supports the following languages: # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' -#html_search_language = 'en' +# html_search_language = 'en' # A dictionary with options for the search language support, empty by default. # Now only 'ja' uses this config value -#html_search_options = {'type': 'default'} +# html_search_options = {'type': 'default'} # The name of a javascript file (relative to the configuration directory) that # implements a search results scorer. If empty, the default will be used. -#html_search_scorer = 'scorer.js' +# html_search_scorer = 'scorer.js' # Output file base name for HTML help builder. -htmlhelp_basename = 'eGodoc' +htmlhelp_basename = "eGodoc" # -- Options for LaTeX output --------------------------------------------- latex_elements = { -# The paper size ('letterpaper' or 'a4paper'). -#'papersize': 'letterpaper', - -# The font size ('10pt', '11pt' or '12pt'). -#'pointsize': '10pt', - -# Additional stuff for the LaTeX preamble. -#'preamble': '', - -# Latex figure (float) alignment -#'figure_align': 'htbp', + # The paper size ('letterpaper' or 'a4paper'). + # 'papersize': 'letterpaper', + # The font size ('10pt', '11pt' or '12pt'). + # 'pointsize': '10pt', + # Additional stuff for the LaTeX preamble. + # 'preamble': '', + # Latex figure (float) alignment + # 'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ - (master_doc, 'eGo.tex', u'eGo Documentation', - u'open\_eGo-Team', 'manual'), + (master_doc, "eGo.tex", "eGo Documentation", r"open\_eGo-Team", "manual"), ] # The name of an image file (relative to this directory) to place at the top of # the title page. -#latex_logo = None +# latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. -#latex_use_parts = False +# latex_use_parts = False # If true, show page references after internal links. -#latex_show_pagerefs = False +# latex_show_pagerefs = False # If true, show URL addresses after external links. -#latex_show_urls = False +# latex_show_urls = False # Documents to append as an appendix to all manuals. -#latex_appendices = [] +# latex_appendices = [] # If false, no module index is generated. -#latex_domain_indices = True +# latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). -man_pages = [ - (master_doc, 'eGo', u'eGo Documentation', - [author], 1) -] +man_pages = [(master_doc, "eGo", "eGo Documentation", [author], 1)] # If true, show URL addresses after external links. -#man_show_urls = False +# man_show_urls = False # -- Options for Texinfo output ------------------------------------------- @@ -394,36 +403,33 @@ def __getattr__(cls, name): # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ - (master_doc, 'eGo', u'eGo Documentation', - author, 'eGo', 'Titel', - 'Miscellaneous'), + (master_doc, "eGo", "eGo Documentation", author, "eGo", "Titel", "Miscellaneous"), ] # Documents to append as an appendix to all manuals. -#texinfo_appendices = [] +# texinfo_appendices = [] # If false, no module index is generated. -#texinfo_domain_indices = True +# texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. -#texinfo_show_urls = 'footnote' +# texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. -#texinfo_no_detailmenu = False +# texinfo_no_detailmenu = False # Example configuration for intersphinx: refer to the Python standard library. -intersphinx_mapping = {'python': ('https://docs.python.org/3', None), - 'etrago': ('https://etrago.readthedocs.io/en/latest', - None), - 'edisgo': ('http://edisgo.readthedocs.io/en/dev',None), - 'ding0': ('https://dingo.readthedocs.io/en/dev',None), - 'pypsa': ('https://pypsa.org/doc/',None), - 'sqlalchemy': ('https://docs.sqlalchemy.org/en/latest/', - None), - } +intersphinx_mapping = { + "python": ("https://docs.python.org/3", None), + "etrago": ("https://etrago.readthedocs.io/en/latest", None), + "edisgo": ("http://edisgo.readthedocs.io/en/dev", None), + "ding0": ("https://dingo.readthedocs.io/en/dev", None), + "pypsa": ("https://pypsa.org/doc/", None), + "sqlalchemy": ("https://docs.sqlalchemy.org/en/latest/", None), +} # Numbered figures numfig = True -autodoc_member_order = 'bysource' +autodoc_member_order = "bysource" diff --git a/doc/developer.rst b/doc/developer.rst index 88019905..adad24f5 100644 --- a/doc/developer.rst +++ b/doc/developer.rst @@ -9,8 +9,8 @@ Installation .. note:: Installation is only tested on (Ubuntu 16.04 ) linux OS. -Please read the Installation Guideline :ref:`ego.doc.installation`. - +Please read the Installation Guideline :ref:`ego.doc.installation`. + 1. Use virtual environment -------------------------- @@ -58,17 +58,17 @@ script, which can be found under ``ding0/ding0/examples/``. `Learn more about Dingo `_. Before you run the script check also the configs of Dingo and eDisGo in order to use the right database version. You find this files under -``ding0/ding0/config/config_db_tables.cfg`` and +``ding0/ding0/config/config_db_tables.cfg`` and ``~.edisgo/config/config_db_tables.cfg``. Your created ding0 grids are stored in -``~.ding0/..``. - +``~.ding0/..``. + eDisGo and eTraGo ----------------- -Please read the Developer notes of -`eDisGo `_ and +Please read the Developer notes of +`eDisGo `_ and `eTraGo `_. @@ -79,17 +79,17 @@ Error handling ``pip install --upgrade pip==18.1`` 2. Installation Error of eTraGo, eDisGo, Pypsa fork or ding0. - If you have problems with one of those packages please clone it from + If you have problems with one of those packages please clone it from *github.com* and install it from the master or dev branch. For example ``pip3 install -e git+https://github.com/openego//PyPSA.git@master#egg=pypsafork`` 3. Matplotlib error on server and few other systems. Please change your settings - in ``matplotlibrc`` from ``backend : TkAgg`` to ``backend : PDF``. You can + in ``matplotlibrc`` from ``backend : TkAgg`` to ``backend : PDF``. You can find the file for example in a virtual environment under ``~/env/lib/python3.5/site-packages/matplotlib/mpl-data$ vim matplotlibrc``. `Learn more here. `_. 4. Geopandas error caused by Rtree ``Could not find libspatialindex_c library`` - Please reinstall Rtree with ``sudo pip3 install Rtree`` or install + Please reinstall Rtree with ``sudo pip3 install Rtree`` or install ``libspatialindex_c`` via ``sudo apt install python3-rtree``. On Windows or macOS you maybe install ``libspatialindex_c`` straight from source. diff --git a/doc/getting_started.rst b/doc/getting_started.rst index 256ca7e5..40c211e6 100644 --- a/doc/getting_started.rst +++ b/doc/getting_started.rst @@ -28,11 +28,11 @@ Steps to run eGo ``eGo/ego`` and ``>>> python3 appl.py`` . You can also use any other Python Terminal, Jupyter Notebook or Editor. - + How to use eGo? =============== - + Start and use eGo from the terminal. .. code-block:: bash @@ -71,13 +71,13 @@ on `jupyter.org `_. `Workshop open_eGo Session eGo (in German) `_ `Workshop open_eGo Session eTraGo (in German) `_ - + `Workshop open_eGo Session DinGo (in German) `_ `Workshop open_eGo Session eDisGo (in German) `_ - + `OpenMod eTraGo Tutorial (in English) `_ - + @@ -89,7 +89,7 @@ A small example of the eGo results is displayed below. The full page can be foun .. raw:: html - + @@ -101,4 +101,3 @@ The plot is created by the eGo function: .. code-block:: python ego.iplot - diff --git a/doc/images/open_ego_icon.svg b/doc/images/open_ego_icon.svg index dcdf778b..f65c3e13 100644 --- a/doc/images/open_ego_icon.svg +++ b/doc/images/open_ego_icon.svg @@ -108,4 +108,4 @@ id="path4581-6" cx="-194.7018" cy="-183.20657" - r="67.5" /> \ No newline at end of file + r="67.5" /> diff --git a/doc/installation.rst b/doc/installation.rst index 3d741d7c..e98548fc 100644 --- a/doc/installation.rst +++ b/doc/installation.rst @@ -12,13 +12,13 @@ command in order to install eGo: $ pip3 install eGo --process-dependency-links Please ensure, that you are using the pip version 18.1. -Use ``pip install --upgrade pip==18.1`` to get the right pip version. +Use ``pip install --upgrade pip==18.1`` to get the right pip version. In Case of problems with the Installation and the ``dependency_links`` of the PyPSA fork, please istall PyPSA from the github.com/openego Repository. .. code-block:: bash - $ pip3 install -e git+https://github.com/openego/PyPSA@master#egg=0.11.0fork + $ pip3 install -e git+https://github.com/openego/PyPSA@master#egg=0.11.0fork Using virtual environment @@ -117,7 +117,7 @@ Old developer connection host = oe2.iws.cs.ovgu.de port = 5432 pw = YourOEDBPassword - - - + + + Please find more information on *Developer notes*. diff --git a/doc/theoretical_background.rst b/doc/theoretical_background.rst index 2f3d5dcb..f0e5141a 100644 --- a/doc/theoretical_background.rst +++ b/doc/theoretical_background.rst @@ -46,26 +46,26 @@ Subsequent to the MV grid simulations with the reduced number of representative Economic calculation ==================== -The tool *eGo* unites the extra high (ehv) and high voltage (hv) models with the -medium (mv) and low voltage (lv) models to ascertain the costs per selected -measure and scenario. This results in a cross-grid-level economic result of +The tool *eGo* unites the extra high (ehv) and high voltage (hv) models with the +medium (mv) and low voltage (lv) models to ascertain the costs per selected +measure and scenario. This results in a cross-grid-level economic result of the electrical grid and storage optimisation. Overnight costs --------------- -The *overnight costs* represents the investment costs of the components or -construction project without any interest, as if the project was completed +The *overnight costs* represents the investment costs of the components or +construction project without any interest, as if the project was completed "overnight". The overnight costs (:math:`C_{\text{Overnight}}` ) of the grid measures (lines and transformers) are calculated as: .. math:: - C_{Line~extension} = S_{Extension}~[MVA] * C_{assumtion}~[\frac{EUR}{MVA}] * L_{Line~length}~[km] + C_{Line~extension} = S_{Extension}~[MVA] * C_{assumtion}~[\frac{EUR}{MVA}] * L_{Line~length}~[km] .. math:: - C_{Transformer~extension} = S_{Extension}~[MVA] * C_{assumtion}~[\frac{EUR}{MVA}] + C_{Transformer~extension} = S_{Extension}~[MVA] * C_{assumtion}~[\frac{EUR}{MVA}] The total overnight grid extension costs are given by: @@ -85,13 +85,13 @@ Annuity costs ------------- The *annuity costs* represents project investment costs with an interest as present -value of an annuity. The investment years *T* and the interest rate *p* are +value of an annuity. The investment years *T* and the interest rate *p* are defined as default in *eGo* with an interest rate ( :math:`p` ) of ``0.05`` -and a number of investment years ( :math:`T` ) of ``40 years``. The values are +and a number of investment years ( :math:`T` ) of ``40 years``. The values are based on the [StromNEV_A1]_ for the grid investment regulation in Germany. The present value of an annuity (PVA) is calculated as: - + .. math:: PVA = \frac{1}{p}- \frac{1}{\left ( p*\left (1 + p \right )^T \right )} @@ -115,14 +115,14 @@ Investment costs ehv/hv ----------------------- The investment costs of the grid and storage expansion are taken from the studies -[NEP2015a]_ for the extra and high voltage components and the [Dena]_. The -given costs are transformed in respect to PyPSA *[€/MVA]* format [PyPSA]_ +[NEP2015a]_ for the extra and high voltage components and the [Dena]_. The +given costs are transformed in respect to PyPSA *[€/MVA]* format [PyPSA]_ components for the optimisation. - + **Overview of grid cost assumtions:** -The table displays the transformer and line costs which are used for the +The table displays the transformer and line costs which are used for the calculation with *eTraGo*. .. csv-table:: Overview of grid cost assumtions @@ -131,8 +131,8 @@ calculation with *eTraGo*. :header-rows: 1 The *eTraGo* calculation of the annuity costs per simulation period is defined -in :func:`~etrago.tools.utilities.set_line_costs` and -:func:`~etrago.tools.utilities.set_trafo_costs`. +in :func:`~etrago.tools.utilities.set_line_costs` and +:func:`~etrago.tools.utilities.set_trafo_costs`. **Overview of storage cost assumtions:** @@ -143,10 +143,10 @@ in :func:`~etrago.tools.utilities.set_line_costs` and Investment costs mv/lv ---------------------- -The tool *eDisGO* is calculating all grid expansion measures as capital or -*overnight* costs. In order to get the annuity costs of eDisGo's optimisation +The tool *eDisGO* is calculating all grid expansion measures as capital or +*overnight* costs. In order to get the annuity costs of eDisGo's optimisation results the function :func:`~ego.tools.economics.edisgo_convert_capital_costs` -is used. The cost assumption of [eDisGo]_ are taken from the [Dena]_ +is used. The cost assumption of [eDisGo]_ are taken from the [Dena]_ and [CONSENTEC]_ study. Based on the component the costs including earthwork costs can depend on population density according to [Dena]_. @@ -157,7 +157,7 @@ References .. [NEP2015a] Übertragungsnetzbetreiber Deutschland. (2015). - *Netzentwicklungsplan Strom 2025 - Kostenschaetzungen*, Version 2015, + *Netzentwicklungsplan Strom 2025 - Kostenschaetzungen*, Version 2015, 1. Entwurf, 2015. (``_) @@ -176,20 +176,17 @@ References (``_) .. [Overnight cost] Wikipedia (2018). - *Definition of overnight cost*. + *Definition of overnight cost*. (``_) .. [eDisGo] eDisGo - grid expantion costs (2018). - *Cost assumption on mv and lv grid components*. + *Cost assumption on mv and lv grid components*. (``_) .. [CONSENTEC] CONSENTEC et.al (2006). *Untersuchung der Voraussetzungen und möglicher Anwendung analytischer* - *Kostenmodelle in der deutschen Energiewirtschaft *. + *Kostenmodelle in der deutschen Energiewirtschaft *. (``_) - - - diff --git a/doc/welcome.rst b/doc/welcome.rst index ab439808..0460e992 100644 --- a/doc/welcome.rst +++ b/doc/welcome.rst @@ -28,7 +28,7 @@ in order to use eGo a registration on the OpenEnergy Platform is required. For m information see `openenergy-platform `_ and login. -The OpenEnergy platform mainly addresses students, researchers and scientists in +The OpenEnergy platform mainly addresses students, researchers and scientists in the field of energy modelling and analytics, but also welcomes all other interested parties. The platform provides great tools to make your energy system modelling process transparent. Data of the open_eGo project are stored on @@ -73,7 +73,7 @@ grid data for whole Germany. `Learn more here `_. Dataprocessing -------------- -For the open_eGo project several python packages are developed which are feeded +For the open_eGo project several python packages are developed which are feeded by the input data of the data processing. The dataprocessing is written in SQL and Python. `Learn more here `_. @@ -115,7 +115,7 @@ License .. image:: images/open_ego_icon_web.png :scale: 100% :align: right - + © Copyright 2015-2018 Flensburg University of Applied Sciences, @@ -150,7 +150,7 @@ Partner :alt: ZNES Flensburg :target: http://www.znes-flensburg.de/project/150?language=en :align: right - + .. image:: https://i0.wp.com/reiner-lemoine-institut.de/wp-content/uploads/2015/08/RLI_Logo.png :scale: 90% :width: 180px @@ -176,4 +176,3 @@ Partner :alt: Uni Magdeburg :target: http://iks.cs.ovgu.de/IKS.html :align: left - diff --git a/doc/whatsnew.rst b/doc/whatsnew.rst index a5bb64e2..f1308877 100644 --- a/doc/whatsnew.rst +++ b/doc/whatsnew.rst @@ -7,9 +7,9 @@ What's new :local: :backlinks: top -.. include:: whatsnew/v0-3-4.rst -.. include:: whatsnew/v0-3-3.rst -.. include:: whatsnew/v0-3-2.rst +.. include:: whatsnew/v0-3-4.rst +.. include:: whatsnew/v0-3-3.rst +.. include:: whatsnew/v0-3-2.rst .. include:: whatsnew/v0-3-1.rst .. include:: whatsnew/v0-3-0.rst .. include:: whatsnew/v0-2-0.rst diff --git a/doc/whatsnew/v0-2-0.rst b/doc/whatsnew/v0-2-0.rst index 13e9f8d8..f9b5bd01 100644 --- a/doc/whatsnew/v0-2-0.rst +++ b/doc/whatsnew/v0-2-0.rst @@ -1,15 +1,15 @@ Release v0.2.0 (July 18, 2018) ++++++++++++++++++++++++++++++ -Fundamental structural changes of the eGo tool are included in this release. -A new feature is the integration of the MV grid power flow simulations, -performed by the tool `eDisGo. `_. -Thereby, eGo can be used to perform power flow simulations and optimizations +Fundamental structural changes of the eGo tool are included in this release. +A new feature is the integration of the MV grid power flow simulations, +performed by the tool `eDisGo. `_. +Thereby, eGo can be used to perform power flow simulations and optimizations for EHV, HV (*eTraGo*) and MV (*eDisGo*) grids. -Moreover, the use of the Dataprocessing versions -``''v0.4.1''`` and ``''v0.4.2''`` is supported. Please note, that this release -is still under construction and only recommended for developers of +Moreover, the use of the Dataprocessing versions +``''v0.4.1''`` and ``''v0.4.2''`` is supported. Please note, that this release +is still under construction and only recommended for developers of the *open_eGo* project. Furthermore, overall cost aggregation functions are available. @@ -42,8 +42,8 @@ Notes ----- * As an external user you need to have an account on the `openenergy-platform.org/login `_ -* In future versions, all MV grids (*ding0* grids) will be queried from your - database. However, in this version all MV grids have to be generated with - the tool `ding0 `_ and stored in *eGo*'s +* In future versions, all MV grids (*ding0* grids) will be queried from your + database. However, in this version all MV grids have to be generated with + the tool `ding0 `_ and stored in *eGo*'s *data* folder. * Total operational costs are missing in this release diff --git a/doc/whatsnew/v0-3-0.rst b/doc/whatsnew/v0-3-0.rst index 91c53cf0..65ce2499 100644 --- a/doc/whatsnew/v0-3-0.rst +++ b/doc/whatsnew/v0-3-0.rst @@ -1,10 +1,10 @@ Release v0.3.0 (September 07, 2018) +++++++++++++++++++++++++++++++++++ -Power Flow and Clustering. eGo is now using eTraGo non-linear power flows based -on optimization results and its disaggregation of clustered results -to an original spatial complexities. With the release of eDisGo speed-up options, -a new storage integration methodology and more are now available. +Power Flow and Clustering. eGo is now using eTraGo non-linear power flows based +on optimization results and its disaggregation of clustered results +to an original spatial complexities. With the release of eDisGo speed-up options, +a new storage integration methodology and more are now available. Added features @@ -14,7 +14,7 @@ Added features * Implementing of Ding0 grid parallelization * Redesign of scenario settings and API simplifications * Adding and using the Power Flow of eTraGo in eGo -* Testing and using new dataprocessing Version v0.4.3, v0.4.4 and v0.4.5 +* Testing and using new dataprocessing Version v0.4.3, v0.4.4 and v0.4.5 * make eGo installable from pip via ``pip3 install eGo -- process-dependency-links`` * Implementing eDisGo's storage distribution for MV and LV grids * Improved logging and the creation of status files @@ -23,7 +23,4 @@ Added features * Storage-related investment costs are also allocated to MV grids * Update of cluster plots * Plot of investment costs per line and bus -* Update of ``ego.iplot`` for an interactive visualization - - - +* Update of ``ego.iplot`` for an interactive visualization diff --git a/doc/whatsnew/v0-3-1.rst b/doc/whatsnew/v0-3-1.rst index 03231c53..8761126d 100644 --- a/doc/whatsnew/v0-3-1.rst +++ b/doc/whatsnew/v0-3-1.rst @@ -1,7 +1,7 @@ Release v0.3.1 (October 27, 2018) +++++++++++++++++++++++++++++++++ -This release contains documentation and bug fixes for the new features +This release contains documentation and bug fixes for the new features introduced in 0.3.0. Added features @@ -14,10 +14,10 @@ Added features * Change and update of API file scenario_setting.json * Improved cluster plot of ``ego.plot_edisgo_cluster()`` * Improved cost differentiation -* Add jupyter notebook eGo tutorials +* Add jupyter notebook eGo tutorials Fixes ----- * Fix installation problems of the pypsa 0.11.0 fork (use pip 18.1) -* Fix parallel calculation of mv results +* Fix parallel calculation of mv results diff --git a/doc/whatsnew/v0-3-2.rst b/doc/whatsnew/v0-3-2.rst index b5c5a5b7..262b3f45 100644 --- a/doc/whatsnew/v0-3-2.rst +++ b/doc/whatsnew/v0-3-2.rst @@ -7,4 +7,3 @@ Added features -------------- * Registration at zenodo.org - diff --git a/doc/whatsnew/v0-3-3.rst b/doc/whatsnew/v0-3-3.rst index c103d9ac..c1e895c7 100644 --- a/doc/whatsnew/v0-3-3.rst +++ b/doc/whatsnew/v0-3-3.rst @@ -13,6 +13,5 @@ Fixes ----- * Fix bug of period calculation -* removed duplicate matplotlib from setup.py +* removed duplicate matplotlib from setup.py * fixed csv import - diff --git a/doc/whatsnew/v0-3-4.rst b/doc/whatsnew/v0-3-4.rst index d20cef9c..00680e50 100644 --- a/doc/whatsnew/v0-3-4.rst +++ b/doc/whatsnew/v0-3-4.rst @@ -7,4 +7,3 @@ Added features -------------- * Update eDisGo version from 0.0.8 to 0.0.9 - diff --git a/ego/__init__.py b/ego/__init__.py index c87bd1d7..ee083176 100644 --- a/ego/__init__.py +++ b/ego/__init__.py @@ -11,11 +11,13 @@ # along with this program. If not, see . __version__ = "0.3.4" -__copyright__ = ("Europa-Universität Flensburg, " - " Centre for Sustainable Energy Systems") +__copyright__ = ( + "Europa-Universität Flensburg, " " Centre for Sustainable Energy Systems" +) __license__ = "GNU Affero General Public License Version 3 (AGPL-3.0)" __author__ = "wolf_bunke" import logging + logging.basicConfig(level=logging.INFO) diff --git a/ego/appl.py b/ego/appl.py index 627d7d99..0539ba3d 100644 --- a/ego/appl.py +++ b/ego/appl.py @@ -29,23 +29,26 @@ import os -if not 'READTHEDOCS' in os.environ: +if not "READTHEDOCS" in os.environ: from tools.io import eGo from tools.utilities import define_logging - logger = define_logging(name='ego') -__copyright__ = ("Flensburg University of Applied Sciences, " - "Europa-Universität Flensburg, " - "Centre for Sustainable Energy Systems") + logger = define_logging(name="ego") + +__copyright__ = ( + "Flensburg University of Applied Sciences, " + "Europa-Universität Flensburg, " + "Centre for Sustainable Energy Systems" +) __license__ = "GNU Affero General Public License Version 3 (AGPL-3.0)" __author__ = "wolf_bunke, maltesc" -if __name__ == '__main__': +if __name__ == "__main__": - logger.info('Start calculation') + logger.info("Start calculation") - ego = eGo(jsonpath='scenario_setting.json') + ego = eGo(jsonpath="scenario_setting.json") # logger.info('Print results') # ego.etrago_line_loading() # print(ego.etrago.generator) diff --git a/ego/data/__init__.py b/ego/data/__init__.py deleted file mode 100644 index c28ee935..00000000 --- a/ego/data/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -""" -""" - -__copyright__ = "Europa-Universität Flensburg, Centre for Sustainable Energy Systems" -__license__ = "GNU Affero General Public License Version 3 (AGPL-3.0)" -__author__ = "wolf_bunke" diff --git a/ego/data/investment_costs.csv b/ego/data/investment_costs.csv deleted file mode 100644 index 3793547b..00000000 --- a/ego/data/investment_costs.csv +++ /dev/null @@ -1,18 +0,0 @@ -carriers,Status Quo,NEP 2035,eGo 100,source -gas,1384,1322,1280, https://www.diw.de/documents/publikationen/73/diw_01.c.424566.de/diw_datadoc_2013-068.pdf page 75 -lignite,2862,2718,2620, https://www.diw.de/documents/publikationen/73/diw_01.c.424566.de/diw_datadoc_2013-068.pdf page 75 -waste,1800,1800,1800, https://www.diw.de/documents/publikationen/73/diw_01.c.424566.de/diw_datadoc_2013-068.pdf page 75 -oil,400,400,400, https://www.diw.de/documents/publikationen/73/diw_01.c.424566.de/diw_datadoc_2013-068.pdf page 75 -uranium,6000,6000,6000, https://www.diw.de/documents/publikationen/73/diw_01.c.424566.de/diw_datadoc_2013-068.pdf page 75 -biomass,2424,2141,1951, https://www.diw.de/documents/publikationen/73/diw_01.c.424566.de/diw_datadoc_2013-068.pdf page 75 -eeg_gas,2424,2141,1951, https://www.diw.de/documents/publikationen/73/diw_01.c.424566.de/diw_datadoc_2013-068.pdf page 75 -coal,1800,1800,1800, https://www.diw.de/documents/publikationen/73/diw_01.c.424566.de/diw_datadoc_2013-068.pdf page 75 -run_of_river,3000,3000,3000, https://www.diw.de/documents/publikationen/73/diw_01.c.424566.de/diw_datadoc_2013-068.pdf page 75 -reservoir,2000,2000,2000, https://www.diw.de/documents/publikationen/73/diw_01.c.424566.de/diw_datadoc_2013-068.pdf page 75 -pumped_storage,2000,2000,2000, https://www.diw.de/documents/publikationen/73/diw_01.c.424566.de/diw_datadoc_2013-068.pdf page 75 -solar,950,555,425, https://www.diw.de/documents/publikationen/73/diw_01.c.424566.de/diw_datadoc_2013-068.pdf page 75 -wind_onshore,1269,1154,1075, https://www.diw.de/documents/publikationen/73/diw_01.c.424566.de/diw_datadoc_2013-068.pdf page 75 -geothermal,3982,3216,2740, https://www.diw.de/documents/publikationen/73/diw_01.c.424566.de/diw_datadoc_2013-068.pdf page 75 -other_non_renewable,3982,3216,2740, https://www.diw.de/documents/publikationen/73/diw_01.c.424566.de/diw_datadoc_2013-068.pdf page 75 -extendable_storage,100000,100000,100000, https://www.diw.de/documents/publikationen/73/diw_01.c.424566.de/diw_datadoc_2013-068.pdf page 75 -wind_offshore,2868,2396,2093, https://www.diw.de/documents/publikationen/73/diw_01.c.424566.de/diw_datadoc_2013-068.pdf page 75 diff --git a/ego/data/investment_costs_of_grid_ measures.csv b/ego/data/investment_costs_of_grid_ measures.csv deleted file mode 100644 index 825aecb4..00000000 --- a/ego/data/investment_costs_of_grid_ measures.csv +++ /dev/null @@ -1,34 +0,0 @@ -id,Spannungsebene,Anlage/Anlagenteil,Maßnahme," -Investionskosten ",Einheit,Bemerkung,Literatur,Source -1,220,AC-Freileitungen,"220-kV-Stromkreisauflage/ -Umbeseilung","0,15",Mio. €/km,auf Bestandsleitung pro Stromkreis,NEP 2025,https://www.netzentwicklungsplan.de/sites/default/files/paragraphs-files/kostenschaetzungen_nep_2025_1_entwurf.pdf -2,380,AC-Freileitungen,"380-kV-Stromkreisauflage/ -Umbeseilung","0,20",Mio. €/km,auf Bestandsleitung pro Stromkreis,NEP 2025,https://www.netzentwicklungsplan.de/sites/default/files/paragraphs-files/kostenschaetzungen_nep_2025_1_entwurf.pdf -3,380,AC-Freileitungen,"380-kV-Neubau in bestehender -Trasse Doppelleitung","1,6",Mio. €/km,inkl. Rückbau der bestehenden Trasse,NEP 2025,https://www.netzentwicklungsplan.de/sites/default/files/paragraphs-files/kostenschaetzungen_nep_2025_1_entwurf.pdf -4,380,AC-Freileitungen,380-kV-Neubau in Doppelleitung,"1,5",Mio. €/km,"Neubautrasse, Hochstrom",NEP 2025,https://www.netzentwicklungsplan.de/sites/default/files/paragraphs-files/kostenschaetzungen_nep_2025_1_entwurf.pdf -5,,DC-Freileitungen,Neubau DC-Freileitung*,"1,5",Mio. €/km,Neubautrasse mit bis zu 4 GW,NEP 2025,https://www.netzentwicklungsplan.de/sites/default/files/paragraphs-files/kostenschaetzungen_nep_2025_1_entwurf.pdf -6,,DC-Freileitungen,Umstellung Freileitung AC → DC,"0,20",Mio. €/km,"AC-Bestandsleitung, Stromkreisauflage DC -(Nachbeseilung), Kosten pro Stromkreis",NEP 2025,https://www.netzentwicklungsplan.de/sites/default/files/paragraphs-files/kostenschaetzungen_nep_2025_1_entwurf.pdf -7,,DC-Erdkabel,Neubau DC-Erdkabel,"4,00",Mio. €/km,"Neubautrasse mit 2 GW bei durchschnittlichen -Gegebenheiten",NEP 2025,https://www.netzentwicklungsplan.de/sites/default/files/paragraphs-files/kostenschaetzungen_nep_2025_1_entwurf.pdf -8,,DC-Erdkabel,Neubau DC-Erdkabel,"8,00",Mio. €/km,"Neubautrasse mit 2 x 2 GW bei durchschnittlichen -Gegebenheiten",NEP 2025,https://www.netzentwicklungsplan.de/sites/default/files/paragraphs-files/kostenschaetzungen_nep_2025_1_entwurf.pdf -9,380,AC-Stationen,380-kV-Schaltfeld,"4,00",Mio. €/SF,inkl. anlagenanteiliger Infrastruktur,NEP 2025,https://www.netzentwicklungsplan.de/sites/default/files/paragraphs-files/kostenschaetzungen_nep_2025_1_entwurf.pdf -10,,DC-Stationen,DC-Konverterstation,"0,20",Mio. €/MW,"pro Konverterstation inkl. Kosten des/der -AC-Anschluss-SF, Kosten für VSC-Umrichter, -die ausschließlich zum Einsatz kommen -sollen (vorher Kosten für LCC-Umrichter)",NEP 2025,https://www.netzentwicklungsplan.de/sites/default/files/paragraphs-files/kostenschaetzungen_nep_2025_1_entwurf.pdf -11,380,"Kompensations -anlagen",380-kV-MSCDN,"1,4",Mio. €/Stück,100 Mvar schaltbarere Kondensator (ohne SF),NEP 2025,https://www.netzentwicklungsplan.de/sites/default/files/paragraphs-files/kostenschaetzungen_nep_2025_1_entwurf.pdf -12,380,"Kompensations -anlagen",380-kV-SVC,"4,0",Mio. €/Stück,"100 Mvar regelbare Kompensation: -inkl. Anpasstransformator (ohne SF)",NEP 2025,https://www.netzentwicklungsplan.de/sites/default/files/paragraphs-files/kostenschaetzungen_nep_2025_1_entwurf.pdf -13,380,"Kompensations -anlagen",380-kV-Kompensationsspule,"1,5",Mio. €/Stück,100 Mvar Drosselspule (ohne SF),NEP 2025,https://www.netzentwicklungsplan.de/sites/default/files/paragraphs-files/kostenschaetzungen_nep_2025_1_entwurf.pdf -14,380/110,380/110-kV-Transformatoren,300 MVA,"5,2",Mio. €/Stück,"inkl. 110-kV-Schaltfeld und Kabelableitung -(ohne 380-kV-Schaltfeld)",NEP 2025,https://www.netzentwicklungsplan.de/sites/default/files/paragraphs-files/kostenschaetzungen_nep_2025_1_entwurf.pdf -15,380/110,"380/220-kV- -Transformatoren",600 MVA,"8,5",Mio. €/Stück,inkl. Nebenanlagen (ohne 380-kV-Schaltfeld),NEP 2025,https://www.netzentwicklungsplan.de/sites/default/files/paragraphs-files/kostenschaetzungen_nep_2025_1_entwurf.pdf -16,110,"AC-Freileitungen",110-kV-Stromkreisauflage/Umbeseilung,"0,06",Mio. €/km,auf Bestandsleitung pro Stromkreis, dena Verteilnetzstudie 2030 S.146 ,https://shop.dena.de/sortiment/detail/produkt/dena-verteilnetzstudie-ausbau-und-innovationsbedarf-der-stromverteilnetze-in-deutschland-bis-2030/ -17,110,"AC-Freileitungen",110-kV-Neubau in bestehender Trasse Doppelleitung,"0,52",Mio. €/km,inkl. Rückbau der bestehenden Trasse,dena Verteilnetzstudie 2030 S.146 ,https://shop.dena.de/sortiment/detail/produkt/dena-verteilnetzstudie-ausbau-und-innovationsbedarf-der-stromverteilnetze-in-deutschland-bis-2030/ diff --git a/ego/examples/__init__.py b/ego/examples/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ego/examples/tutorials/README.md b/ego/examples/tutorials/README.md deleted file mode 100644 index 2d30992a..00000000 --- a/ego/examples/tutorials/README.md +++ /dev/null @@ -1,98 +0,0 @@ -# eGo Tutorials - - -## eDisGo -The python package eDisGo provides a toolbox for analysis and optimization of distribution grids. This software lives in the context of the research project open_eGo. It is closely related to the python project Ding0 as this project is currently the single data source for eDisGo providing synthetic grid data for whole Germany. - - -Learn more about: -* [eDisGo – Optimization of flexibility options and grid expansion for distribution grids based on PyPSA](http://edisgo.readthedocs.io/en/dev/start_page.html) - - -## eTraGo -Optimization of flexibility options for transmission grids based on PyPSA. - -A speciality in this context is that transmission grids are described by the 380, 220 and 110 kV in Germany. Conventionally the 110kV grid is part of the distribution grid. The integration of the transmission and ‘upper’ distribution grid is part of eTraGo. - -The focus of optimization are flexibility options with a special focus on energy storages and grid expansion measures. - - -The python tool eTraGo can be used in several forms like from a terminal as an execution program, by integrated development environments (IDE) like [Spyder](https://anaconda.org/anaconda/spyder), [Jupyter notebooks](http://jupyter.org/install) or many more. - -A general description how you to install and work with eTraGo can be found also [here](http://etrago.readthedocs.io/en/latest/getting_started.html). - - -# Notebook installation - -#### with Anaconda - -Download and install your Python 3.x version of Anaconda [here](https://www.anaconda.com/download/). The full Documentation can be found [on this page.](https://docs.anaconda.com/anaconda/install/) - -We use Anaconda with an own environment in order to reduze problems with Packages and different versions on our system. Learn more about [Anacona environments](https://conda.io/docs/user-guide/tasks/manage-environments.html). Remove your environment with _'conda env remove -n openMod_Zuerich2018'_. - - - - -##### Quick start - steps to do: - -0. Sign-in on [openenergy-platform.org](http://openenergy-platform.org/login/) -1. Install Anacanda -2. Get eGo Repository from github -3. Create environment -4. Activate your environment -5. Install you notebook requirements -6. Make few settings for your notebook -7. Start your notebook and check if the notebook is running - - - -##### Get eGo Repository and install it with an environment -```desktop - -$ git clone -b features/tutorial https://git@github.com/openego/eGo.git -$ cd eGo/ego/examples/tutorials/ -$ conda env create --file requirements.yml -``` - -##### Activate your environment and run your notebooks -```desktop - -$ source activate openMod_Zuerich2018 -$ jupyter notebook -$ source deactivate -``` - -##### fixes and work arounds: - -* Error in function plot_stacked_gen() due to data name changes. Fix error in ../eGo/ego/examples/tutorials/src/etrago/etrago/tools/plot.py and add: 'wind_offshore':'skyblue', 'wind_onshore':'skyblue', instead of 'wind'; restart kernel -plot_stacked_gen(network, resolution="MW") - - -##### API and ego.io settings - -Your API settings will be saved in the folder .egoio in the file config.ini. - - -```desktop -[oedb] -dialect = oedialect -username = -database = oedb -host = openenergy-platform.org -port = 80 -password = -``` - - -### Start you Notebook - -```desktop -$ jupyter notebook -``` - -See for more information [how to run your jupyter notebook](https://jupyter.readthedocs.io/en/latest/running.html#running). - - -

Note:

- -The installation is only tested on Ubuntu 16.4. and Windows 10 with [Anaconda](https://www.anaconda.com/download/) diff --git a/ego/examples/tutorials/edisgo_simple_example.ipynb b/ego/examples/tutorials/edisgo_simple_example.ipynb deleted file mode 100644 index 0581729a..00000000 --- a/ego/examples/tutorials/edisgo_simple_example.ipynb +++ /dev/null @@ -1,1495 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [], - "source": [ - "__copyright__ = \"Reiner Lemoine Institut gGmbH\"\n", - "__license__ = \"GNU Affero General Public License Version 3 (AGPL-3.0)\"\n", - "__url__ = \"https://github.com/openego/eDisGo/blob/master/LICENSE\"\n", - "__author__ = \"gplssm, birgits\"" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Welcome to the eDisGo example\n", - "\n", - "eDisGo is a **python toolbox for the analysis of distribution networks** (low and medium voltage) that can be used to investigate economically viable **network expansion** scenarios, considering alternative flexibility options such as **storages or redispatch**. \n", - "\n", - "eDisGo is developed in the [open_eGo research project](https://openegoproject.wordpress.com/). It is based on [PyPSA](https://pypsa.org/), a toolbox for simulation and optimization of power networks, and closely related to the [ding0](https://dingo.readthedocs.io/en/dev/) project. ding0 stands for distribution network generator and is a tool to generate synthetic status quo medium and low voltage power distribution networks based on open (or at least accessible) data. It is currently the single data source for eDisGo providing synthetic grid data for whole Germany.\n", - "\n", - "**! eDisGo is work in progress !** Please be aware that some of its features may still be buggy and not yet very sophisticated. We are happy for any bug reports, hints, etc. you may have for us.\n", - "\n", - "### Learn more about eDisGo\n", - "\n", - "* __[eDisGo Source Code](https://github.com/openego/eDisGo)__\n", - "* __[eDisGo Documentation](http://edisgo.readthedocs.io/en/dev/)__\n", - "\n", - "### Table of Contents\n", - "\n", - "* [The eDisGo API](#settings)\n", - "* [The eDisGo data container and grid data structure](#network)\n", - "* [Future generator capacities](#generator_scenario)\n", - "* [Grid reinforcement](#grid_reinforcement)\n", - "* [Evaluate results](#evaluation)\n", - "* [References](#references)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## About the example\n", - "\n", - "This example shows the general usage of eDisGo. Grid expansion costs for an example distribution grid (see image below) are calculated assuming additional renewable energy generators as stated in the open_eGo 'NEP 2035' scenario (based on the scenario framework of the German grid development plan (Netzentwicklungsplan) for the year 2035) and conducting a worst-case analysis. Moreover, the eDisGo network data structure and how to access the results are introduced. At the end of the example grid expansion costs for a different scenario are calculated and compared to the grid expansion costs in the 'NEP 2035' scenario.\n", - "\n", - "\n", - "\n", - "**Let's get started!** First of all we have to import some packages." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "#### Import packages" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/home/Birgit.Schachler/virtualenvs/open_ego_notebook/lib/python3.6/site-packages/psycopg2/__init__.py:144: UserWarning: The psycopg2 wheel package will be renamed from release 2.8; in order to keep installing from binary please use \"pip install psycopg2-binary\" instead. For details see: .\n", - " \"\"\")\n", - "INFO:keyring.backend:Loading SecretService\n", - "INFO:keyring.backend:Loading kwallet\n", - "INFO:keyring.backend:Loading macOS\n", - "INFO:keyring.backend:Loading windows\n", - "INFO:keyring.backend:Loading Gnome\n", - "INFO:keyring.backend:Loading Google\n", - "INFO:keyring.backend:Loading Windows (alt)\n", - "INFO:keyring.backend:Loading file\n", - "INFO:keyring.backend:Loading keyczar\n", - "INFO:keyring.backend:Loading multi\n", - "INFO:keyring.backend:Loading pyfs\n" - ] - } - ], - "source": [ - "import os\n", - "import sys\n", - "import pandas as pd\n", - "\n", - "from edisgo import EDisGo\n", - "\n", - "import logging\n", - "logging.basicConfig(level=logging.ERROR)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## The eDisGo API \n", - "\n", - "The top-level API for setting up your scenario, invoking grid expansion and flexibility measures, etc. is provided by the **EDisGo class** (see [class documentation](http://edisgo.readthedocs.io/en/dev/api/edisgo.grid.html#edisgo.grid.network.EDisGo) for more information).\n", - "\n", - "In this example we simply want to do a worst-case analysis of a ding0 grid. For this, we only have to provide a grid and set the 'worst_case_analysis' parameter. \n", - "\n", - "#### Specifying the ding0 grid\n", - "\n", - "The ding0 grid is specified through the input parameter 'ding0_grid'. The following assumes you have a file of a ding0 grid named “ding0_grids__6.pkl” in current working directory.\n" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [], - "source": [ - "ding0_grid = os.path.join(sys.path[0], \"ding0_grids_239_DPv0.4.0.pkl\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "#### Specifying worst-case analysis\n", - "\n", - "As worst-case analysis you can either just analyse the feed-in or the load case or do a combined analysis. Choose between the following options:\n", - "\n", - "* **’worst-case-feedin’** \n", - " \n", - " Feed-in and demand for the worst-case scenario \"feed-in case\" are generated. Demand is set to 15% of maximum demand for loads connected to the MV grid and 10% for loads connected to the LV grid. Feed-in of all generators is set to nominal power of the generator, except for PV systems where it is set to 85% of the nominal power.\n", - "\n", - " \n", - "* **’worst-case-load’**\n", - "\n", - " Feed-in and demand for the worst-case scenario \"load case\" are generated. Demand of all loads is set to maximum demand. Feed-in of all generators is set to zero.\n", - "\n", - "\n", - "* **’worst-case’**\n", - " \n", - " Feed-in and demand for the two worst-case scenarios \"feed-in case\" and \"load case\" are generated.\n", - "\n", - "Instead of doing a worst-case analysis you can also provide your own timeseries for demand and feed-in and use those in the network analysis. EDisGo also offers methods to generate load and feed-in time series. Check out the [EDisGo class documentation](http://edisgo.readthedocs.io/en/dev/api/edisgo.grid.html#edisgo.grid.network.EDisGo) for more information." - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [], - "source": [ - "worst_case_analysis = 'worst-case-feedin'" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now we are ready to initialize the edisgo API object." - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [], - "source": [ - "edisgo = EDisGo(ding0_grid=ding0_grid,\n", - " worst_case_analysis=worst_case_analysis)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## The eDisGo data container and grid data structure \n", - "\n", - "The last line, besides a couple of other things, initialized the [Network class](http://edisgo.readthedocs.io/en/dev/api/edisgo.grid.html#edisgo.grid.network.Network) which serves as an overall data container in eDisGo holding the grid data for the [MV grid](http://edisgo.readthedocs.io/en/dev/api/edisgo.grid.html#edisgo.grid.grids.MVGrid) and [LV grids](http://edisgo.readthedocs.io/en/dev/api/edisgo.grid.html#edisgo.grid.grids.LVGrid), [config data](http://edisgo.readthedocs.io/en/dev/api/edisgo.grid.html#edisgo.grid.network.Config), [results](http://edisgo.readthedocs.io/en/dev/api/edisgo.grid.html#edisgo.grid.network.Results), [timeseries](http://edisgo.readthedocs.io/en/dev/api/edisgo.grid.html#edisgo.grid.network.TimeSeries), etc. It is linked from multiple locations and provides hierarchical access to all data. Network itself can be accessed via the EDisGo API object as follows:\n", - "\n", - "```python\n", - "edisgo.network\n", - "```\n", - "\n", - "As mentioned *Network* holds the MV grid and LV grids. The grid topology is represented by separate undirected graphs for the MV grid and each of the LV grids. Each of these graphs is an eDisGo [Graph](http://edisgo.readthedocs.io/en/dev/_modules/edisgo/grid/grids.html#Graph), which is subclassed from networkx.Graph and extended by extra-functionality. Lines represent edges in the graph. Other equipment is represented by a node. Let's have a look into the graph.\n", - "\n", - "First we take a look at all the **lines**." - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "{GeneratorFluctuating_839361: {GeneratorFluctuating_878867: {'type': 'line',\n", - " 'line': Line_2390003},\n", - " BranchTee_MVGrid_239_86: {'type': 'line', 'line': Line_2390004}},\n", - " GeneratorFluctuating_839362: {BranchTee_MVGrid_239_87: {'type': 'line',\n", - " 'line': Line_2390005}},\n", - " GeneratorFluctuating_839363: {BranchTee_MVGrid_239_88: {'type': 'line',\n", - " 'line': Line_2390006}},\n", - " GeneratorFluctuating_839364: {LVStation_119612: {'type': 'line',\n", - " 'line': Line_2390007}},\n", - " GeneratorFluctuating_878450: {BranchTee_MVGrid_239_89: {'type': 'line',\n", - " 'line': Line_2390008}},\n", - " GeneratorFluctuating_878583: {BranchTee_MVGrid_239_84: {'type': 'line',\n", - " 'line': Line_2390001}},\n", - " GeneratorFluctuating_878609: {MVStation_239: {'type': 'line',\n", - " 'line': Line_2390009}},\n", - " GeneratorFluctuating_878611: {MVStation_239: {'type': 'line',\n", - " 'line': Line_2390010}},\n", - " GeneratorFluctuating_878614: {MVStation_239: {'type': 'line',\n", - " 'line': Line_2390011}},\n", - " GeneratorFluctuating_878615: {MVStation_239: {'type': 'line',\n", - " 'line': Line_2390012}},\n", - " GeneratorFluctuating_878862: {BranchTee_MVGrid_239_90: {'type': 'line',\n", - " 'line': Line_2390013}},\n", - " GeneratorFluctuating_878863: {BranchTee_MVGrid_239_91: {'type': 'line',\n", - " 'line': Line_2390014}},\n", - " GeneratorFluctuating_878864: {BranchTee_MVGrid_239_92: {'type': 'line',\n", - " 'line': Line_2390015}},\n", - " GeneratorFluctuating_878865: {BranchTee_MVGrid_239_93: {'type': 'line',\n", - " 'line': Line_2390016}},\n", - " GeneratorFluctuating_878866: {BranchTee_MVGrid_239_94: {'type': 'line',\n", - " 'line': Line_2390017}},\n", - " GeneratorFluctuating_878867: {GeneratorFluctuating_839361: {'type': 'line',\n", - " 'line': Line_2390003}},\n", - " GeneratorFluctuating_878875: {MVStation_239: {'type': 'line',\n", - " 'line': Line_2390018}},\n", - " GeneratorFluctuating_878950: {MVStation_239: {'type': 'line',\n", - " 'line': Line_2390019}},\n", - " GeneratorFluctuating_878963: {BranchTee_MVGrid_239_85: {'type': 'line',\n", - " 'line': Line_2390002}},\n", - " BranchTee_MVGrid_239_1: {LVStation_119897: {'type': 'line',\n", - " 'line': Line_2390050},\n", - " MVDisconnectingPoint_1: {'line': Line_2390252, 'type': 'line'}},\n", - " BranchTee_MVGrid_239_2: {MVStation_239: {'type': 'line',\n", - " 'line': Line_2390266},\n", - " MVDisconnectingPoint_2: {'line': Line_2390265, 'type': 'line'}},\n", - " BranchTee_MVGrid_239_3: {LVStation_125269: {'type': 'line',\n", - " 'line': Line_2390181},\n", - " BranchTee_MVGrid_239_88: {'type': 'line', 'line': Line_2390273},\n", - " BranchTee_MVGrid_239_89: {'type': 'line', 'line': Line_2390274}},\n", - " BranchTee_MVGrid_239_4: {LVStation_119904: {'type': 'line',\n", - " 'line': Line_2390055},\n", - " BranchTee_MVGrid_239_18: {'type': 'line', 'line': Line_2390251},\n", - " BranchTee_MVGrid_239_20: {'type': 'line', 'line': Line_2390254}},\n", - " BranchTee_MVGrid_239_5: {LVStation_120736: {'type': 'line',\n", - " 'line': Line_2390071},\n", - " BranchTee_MVGrid_239_6: {'type': 'line', 'line': Line_2390277},\n", - " BranchTee_MVGrid_239_8: {'type': 'line', 'line': Line_2390278}},\n", - " BranchTee_MVGrid_239_6: {LVStation_120411: {'type': 'line',\n", - " 'line': Line_2390064},\n", - " BranchTee_MVGrid_239_25: {'type': 'line', 'line': Line_2390260},\n", - " BranchTee_MVGrid_239_5: {'type': 'line', 'line': Line_2390277}},\n", - " BranchTee_MVGrid_239_7: {LVStation_120470: {'type': 'line',\n", - " 'line': Line_2390065},\n", - " LVStation_419726: {'type': 'line', 'line': Line_2390224},\n", - " BranchTee_MVGrid_239_8: {'type': 'line', 'line': Line_2390285}},\n", - " BranchTee_MVGrid_239_8: {LVStation_419795: {'type': 'line',\n", - " 'line': Line_2390225},\n", - " BranchTee_MVGrid_239_5: {'type': 'line', 'line': Line_2390278},\n", - " BranchTee_MVGrid_239_7: {'type': 'line', 'line': Line_2390285}},\n", - " BranchTee_MVGrid_239_9: {LVStation_120555: {'type': 'line',\n", - " 'line': Line_2390067},\n", - " LVStation_120585: {'type': 'line', 'line': Line_2390069},\n", - " BranchTee_MVGrid_239_10: {'type': 'line', 'line': Line_2390242}},\n", - " BranchTee_MVGrid_239_10: {LVStation_511325: {'type': 'line',\n", - " 'line': Line_2390238},\n", - " BranchTee_MVGrid_239_13: {'type': 'line', 'line': Line_2390241},\n", - " BranchTee_MVGrid_239_9: {'type': 'line', 'line': Line_2390242}},\n", - " BranchTee_MVGrid_239_11: {LVStation_120898: {'type': 'line',\n", - " 'line': Line_2390076},\n", - " LVStation_418244: {'type': 'line', 'line': Line_2390219},\n", - " MVStation_239: {'type': 'line', 'line': Line_2390243}},\n", - " BranchTee_MVGrid_239_12: {LVStation_120943: {'type': 'line',\n", - " 'line': Line_2390080},\n", - " LVStation_419885: {'type': 'line', 'line': Line_2390226},\n", - " BranchTee_MVGrid_239_84: {'type': 'line', 'line': Line_2390244}},\n", - " BranchTee_MVGrid_239_13: {LVStation_121317: {'type': 'line',\n", - " 'line': Line_2390087},\n", - " LVStation_416441: {'type': 'line', 'line': Line_2390204},\n", - " BranchTee_MVGrid_239_10: {'type': 'line', 'line': Line_2390241}},\n", - " BranchTee_MVGrid_239_14: {LVStation_121289: {'type': 'line',\n", - " 'line': Line_2390086},\n", - " LVStation_417530: {'type': 'line', 'line': Line_2390211},\n", - " MVStation_239: {'type': 'line', 'line': Line_2390245}},\n", - " BranchTee_MVGrid_239_15: {LVStation_121776: {'type': 'line',\n", - " 'line': Line_2390096},\n", - " BranchTee_MVGrid_239_29: {'type': 'line', 'line': Line_2390246},\n", - " BranchTee_MVGrid_239_55: {'type': 'line', 'line': Line_2390247}},\n", - " BranchTee_MVGrid_239_16: {LVStation_417276: {'type': 'line',\n", - " 'line': Line_2390210},\n", - " BranchTee_MVGrid_239_19: {'type': 'line', 'line': Line_2390248},\n", - " MVStation_239: {'type': 'line', 'line': Line_2390249}},\n", - " BranchTee_MVGrid_239_17: {LVStation_124110: {'type': 'line',\n", - " 'line': Line_2390148},\n", - " LVStation_416815: {'type': 'line', 'line': Line_2390207},\n", - " LVStation_416983: {'type': 'line', 'line': Line_2390209}},\n", - " BranchTee_MVGrid_239_18: {LVStation_121940: {'type': 'line',\n", - " 'line': Line_2390110},\n", - " BranchTee_MVGrid_239_29: {'type': 'line', 'line': Line_2390250},\n", - " BranchTee_MVGrid_239_4: {'type': 'line', 'line': Line_2390251}},\n", - " BranchTee_MVGrid_239_19: {LVStation_121919: {'type': 'line',\n", - " 'line': Line_2390108},\n", - " LVStation_416589: {'type': 'line', 'line': Line_2390206},\n", - " BranchTee_MVGrid_239_16: {'type': 'line', 'line': Line_2390248}},\n", - " BranchTee_MVGrid_239_20: {LVStation_419079: {'type': 'line',\n", - " 'line': Line_2390223},\n", - " BranchTee_MVGrid_239_35: {'type': 'line', 'line': Line_2390253},\n", - " BranchTee_MVGrid_239_4: {'type': 'line', 'line': Line_2390254}},\n", - " BranchTee_MVGrid_239_21: {LVStation_121879: {'type': 'line',\n", - " 'line': Line_2390099},\n", - " LVStation_122400: {'type': 'line', 'line': Line_2390128},\n", - " BranchTee_MVGrid_239_34: {'type': 'line', 'line': Line_2390255}},\n", - " BranchTee_MVGrid_239_22: {LVStation_122077: {'type': 'line',\n", - " 'line': Line_2390115},\n", - " LVStation_122078: {'type': 'line', 'line': Line_2390117},\n", - " LVStation_418237: {'type': 'line', 'line': Line_2390218}},\n", - " BranchTee_MVGrid_239_23: {LVStation_124085: {'type': 'line',\n", - " 'line': Line_2390144},\n", - " BranchTee_MVGrid_239_43: {'type': 'line', 'line': Line_2390256},\n", - " MVStation_239: {'type': 'line', 'line': Line_2390257}},\n", - " BranchTee_MVGrid_239_24: {LVStation_416449: {'type': 'line',\n", - " 'line': Line_2390205},\n", - " BranchTee_MVGrid_239_70: {'type': 'line', 'line': Line_2390258},\n", - " BranchTee_MVGrid_239_85: {'type': 'line', 'line': Line_2390259}},\n", - " BranchTee_MVGrid_239_25: {LVStation_503036: {'type': 'line',\n", - " 'line': Line_2390237},\n", - " BranchTee_MVGrid_239_6: {'type': 'line', 'line': Line_2390260},\n", - " BranchTee_MVGrid_239_76: {'type': 'line', 'line': Line_2390261}},\n", - " BranchTee_MVGrid_239_26: {LVStation_417550: {'type': 'line',\n", - " 'line': Line_2390213},\n", - " LVStation_417936: {'type': 'line', 'line': Line_2390216},\n", - " BranchTee_MVGrid_239_64: {'type': 'line', 'line': Line_2390262}},\n", - " BranchTee_MVGrid_239_27: {LVStation_417909: {'type': 'line',\n", - " 'line': Line_2390215},\n", - " LVStation_417987: {'type': 'line', 'line': Line_2390217},\n", - " BranchTee_MVGrid_239_78: {'type': 'line', 'line': Line_2390263}},\n", - " BranchTee_MVGrid_239_28: {LVStation_122077: {'type': 'line',\n", - " 'line': Line_2390116},\n", - " LVStation_122426: {'type': 'line', 'line': Line_2390132},\n", - " BranchTee_MVGrid_239_54: {'type': 'line', 'line': Line_2390264}},\n", - " BranchTee_MVGrid_239_29: {LVStation_122480: {'type': 'line',\n", - " 'line': Line_2390133},\n", - " BranchTee_MVGrid_239_15: {'type': 'line', 'line': Line_2390246},\n", - " BranchTee_MVGrid_239_18: {'type': 'line', 'line': Line_2390250}},\n", - " BranchTee_MVGrid_239_30: {BranchTee_MVGrid_239_32: {'type': 'line',\n", - " 'line': Line_2390267},\n", - " BranchTee_MVGrid_239_35: {'type': 'line', 'line': Line_2390268},\n", - " BranchTee_MVGrid_239_91: {'type': 'line', 'line': Line_2390269}},\n", - " BranchTee_MVGrid_239_31: {LVStation_139149: {'type': 'line',\n", - " 'line': Line_2390189},\n", - " LVStation_139186: {'type': 'line', 'line': Line_2390198},\n", - " BranchTee_MVGrid_239_34: {'type': 'line', 'line': Line_2390270}},\n", - " BranchTee_MVGrid_239_32: {LVStation_139104: {'type': 'line',\n", - " 'line': Line_2390184},\n", - " BranchTee_MVGrid_239_30: {'type': 'line', 'line': Line_2390267},\n", - " BranchTee_MVGrid_239_33: {'type': 'line', 'line': Line_2390271}},\n", - " BranchTee_MVGrid_239_33: {LVStation_419885: {'type': 'line',\n", - " 'line': Line_2390227},\n", - " BranchTee_MVGrid_239_32: {'type': 'line', 'line': Line_2390271},\n", - " BranchTee_MVGrid_239_86: {'type': 'line', 'line': Line_2390272}},\n", - " BranchTee_MVGrid_239_34: {LVStation_139150: {'type': 'line',\n", - " 'line': Line_2390191},\n", - " BranchTee_MVGrid_239_21: {'type': 'line', 'line': Line_2390255},\n", - " BranchTee_MVGrid_239_31: {'type': 'line', 'line': Line_2390270}},\n", - " BranchTee_MVGrid_239_35: {LVStation_122231: {'type': 'line',\n", - " 'line': Line_2390127},\n", - " BranchTee_MVGrid_239_20: {'type': 'line', 'line': Line_2390253},\n", - " BranchTee_MVGrid_239_30: {'type': 'line', 'line': Line_2390268}},\n", - " BranchTee_MVGrid_239_36: {LVStation_118322: {'type': 'line',\n", - " 'line': Line_2390020},\n", - " LVStation_118323: {'type': 'line', 'line': Line_2390021},\n", - " LVStation_124111: {'type': 'line', 'line': Line_2390150}},\n", - " BranchTee_MVGrid_239_37: {LVStation_118324: {'type': 'line',\n", - " 'line': Line_2390023},\n", - " LVStation_515314: {'type': 'line', 'line': Line_2390240}},\n", - " BranchTee_MVGrid_239_38: {LVStation_119612: {'type': 'line',\n", - " 'line': Line_2390024},\n", - " LVStation_119613: {'type': 'line', 'line': Line_2390026},\n", - " LVStation_417898: {'type': 'line', 'line': Line_2390214}},\n", - " BranchTee_MVGrid_239_39: {LVStation_119698: {'type': 'line',\n", - " 'line': Line_2390029}},\n", - " BranchTee_MVGrid_239_40: {LVStation_119701: {'type': 'line',\n", - " 'line': Line_2390032},\n", - " LVStation_119704: {'type': 'line', 'line': Line_2390035}},\n", - " BranchTee_MVGrid_239_41: {LVStation_119891: {'type': 'line',\n", - " 'line': Line_2390040},\n", - " LVStation_119894: {'type': 'line', 'line': Line_2390044},\n", - " LVStation_119895: {'type': 'line', 'line': Line_2390046}},\n", - " BranchTee_MVGrid_239_42: {LVStation_119896: {'type': 'line',\n", - " 'line': Line_2390048},\n", - " LVStation_119900: {'type': 'line', 'line': Line_2390052},\n", - " BranchTee_MVGrid_239_43: {'type': 'line', 'line': Line_2390275}},\n", - " BranchTee_MVGrid_239_43: {LVStation_119901: {'type': 'line',\n", - " 'line': Line_2390053},\n", - " BranchTee_MVGrid_239_23: {'type': 'line', 'line': Line_2390256},\n", - " BranchTee_MVGrid_239_42: {'type': 'line', 'line': Line_2390275}},\n", - " BranchTee_MVGrid_239_44: {LVStation_119892: {'type': 'line',\n", - " 'line': Line_2390041},\n", - " LVStation_119893: {'type': 'line', 'line': Line_2390042},\n", - " LVStation_119895: {'type': 'line', 'line': Line_2390047},\n", - " LVStation_119896: {'type': 'line', 'line': Line_2390049},\n", - " BranchTee_MVGrid_239_93: {'type': 'line', 'line': Line_2390276}},\n", - " BranchTee_MVGrid_239_45: {LVStation_119903: {'type': 'line',\n", - " 'line': Line_2390054},\n", - " LVStation_119904: {'type': 'line', 'line': Line_2390056}},\n", - " BranchTee_MVGrid_239_46: {LVStation_120387: {'type': 'line',\n", - " 'line': Line_2390059},\n", - " LVStation_120390: {'type': 'line', 'line': Line_2390063}},\n", - " BranchTee_MVGrid_239_47: {LVStation_120555: {'type': 'line',\n", - " 'line': Line_2390066},\n", - " LVStation_500916: {'type': 'line', 'line': Line_2390235}},\n", - " BranchTee_MVGrid_239_48: {LVStation_120737: {'type': 'line',\n", - " 'line': Line_2390072},\n", - " LVStation_120738: {'type': 'line', 'line': Line_2390073}},\n", - " BranchTee_MVGrid_239_49: {LVStation_120853: {'type': 'line',\n", - " 'line': Line_2390075},\n", - " LVStation_511325: {'type': 'line', 'line': Line_2390239}},\n", - " BranchTee_MVGrid_239_50: {LVStation_120898: {'type': 'line',\n", - " 'line': Line_2390077},\n", - " LVStation_120899: {'type': 'line', 'line': Line_2390078}},\n", - " BranchTee_MVGrid_239_51: {LVStation_120942: {'type': 'line',\n", - " 'line': Line_2390079},\n", - " LVStation_120943: {'type': 'line', 'line': Line_2390081}},\n", - " BranchTee_MVGrid_239_52: {LVStation_121286: {'type': 'line',\n", - " 'line': Line_2390082},\n", - " LVStation_121287: {'type': 'line', 'line': Line_2390083},\n", - " LVStation_121288: {'type': 'line', 'line': Line_2390085}},\n", - " BranchTee_MVGrid_239_53: {LVStation_121317: {'type': 'line',\n", - " 'line': Line_2390088},\n", - " LVStation_121318: {'type': 'line', 'line': Line_2390090}},\n", - " BranchTee_MVGrid_239_54: {LVStation_121742: {'type': 'line',\n", - " 'line': Line_2390093},\n", - " LVStation_121743: {'type': 'line', 'line': Line_2390095},\n", - " BranchTee_MVGrid_239_28: {'type': 'line', 'line': Line_2390264}},\n", - " BranchTee_MVGrid_239_55: {LVStation_121741: {'type': 'line',\n", - " 'line': Line_2390092},\n", - " LVStation_121742: {'type': 'line', 'line': Line_2390094},\n", - " LVStation_122230: {'type': 'line', 'line': Line_2390124},\n", - " LVStation_496409: {'type': 'line', 'line': Line_2390233},\n", - " BranchTee_MVGrid_239_15: {'type': 'line', 'line': Line_2390247}},\n", - " BranchTee_MVGrid_239_56: {LVStation_121879: {'type': 'line',\n", - " 'line': Line_2390100},\n", - " LVStation_121880: {'type': 'line', 'line': Line_2390102}},\n", - " BranchTee_MVGrid_239_57: {LVStation_121915: {'type': 'line',\n", - " 'line': Line_2390105},\n", - " LVStation_121916: {'type': 'line', 'line': Line_2390107},\n", - " LVStation_121919: {'type': 'line', 'line': Line_2390109}},\n", - " BranchTee_MVGrid_239_58: {LVStation_121940: {'type': 'line',\n", - " 'line': Line_2390111},\n", - " LVStation_121941: {'type': 'line', 'line': Line_2390112}},\n", - " BranchTee_MVGrid_239_59: {LVStation_122076: {'type': 'line',\n", - " 'line': Line_2390113},\n", - " LVStation_122078: {'type': 'line', 'line': Line_2390118}},\n", - " BranchTee_MVGrid_239_60: {LVStation_122123: {'type': 'line',\n", - " 'line': Line_2390120},\n", - " LVStation_122124: {'type': 'line', 'line': Line_2390121},\n", - " LVStation_122125: {'type': 'line', 'line': Line_2390123}},\n", - " BranchTee_MVGrid_239_61: {LVStation_122230: {'type': 'line',\n", - " 'line': Line_2390125}},\n", - " BranchTee_MVGrid_239_62: {LVStation_122400: {'type': 'line',\n", - " 'line': Line_2390129},\n", - " LVStation_122401: {'type': 'line', 'line': Line_2390130}},\n", - " BranchTee_MVGrid_239_63: {LVStation_122408: {'type': 'line',\n", - " 'line': Line_2390131},\n", - " LVStation_485974: {'type': 'line', 'line': Line_2390228}},\n", - " BranchTee_MVGrid_239_64: {LVStation_122696: {'type': 'line',\n", - " 'line': Line_2390135},\n", - " BranchTee_MVGrid_239_26: {'type': 'line', 'line': Line_2390262},\n", - " BranchTee_MVGrid_239_66: {'type': 'line', 'line': Line_2390279}},\n", - " BranchTee_MVGrid_239_65: {LVStation_122698: {'type': 'line',\n", - " 'line': Line_2390138},\n", - " LVStation_122699: {'type': 'line', 'line': Line_2390139},\n", - " BranchTee_MVGrid_239_66: {'type': 'line', 'line': Line_2390280}},\n", - " BranchTee_MVGrid_239_66: {LVStation_122697: {'type': 'line',\n", - " 'line': Line_2390136},\n", - " LVStation_418254: {'type': 'line', 'line': Line_2390220},\n", - " BranchTee_MVGrid_239_64: {'type': 'line', 'line': Line_2390279},\n", - " BranchTee_MVGrid_239_65: {'type': 'line', 'line': Line_2390280},\n", - " BranchTee_MVGrid_239_90: {'type': 'line', 'line': Line_2390281}},\n", - " BranchTee_MVGrid_239_67: {LVStation_124010: {'type': 'line',\n", - " 'line': Line_2390141},\n", - " LVStation_124011: {'type': 'line', 'line': Line_2390143}},\n", - " BranchTee_MVGrid_239_68: {LVStation_124085: {'type': 'line',\n", - " 'line': Line_2390145},\n", - " LVStation_124086: {'type': 'line', 'line': Line_2390146}},\n", - " BranchTee_MVGrid_239_69: {LVStation_124109: {'type': 'line',\n", - " 'line': Line_2390147},\n", - " LVStation_124110: {'type': 'line', 'line': Line_2390149},\n", - " LVStation_124111: {'type': 'line', 'line': Line_2390151}},\n", - " BranchTee_MVGrid_239_70: {LVStation_498758: {'type': 'line',\n", - " 'line': Line_2390234},\n", - " BranchTee_MVGrid_239_24: {'type': 'line', 'line': Line_2390258},\n", - " BranchTee_MVGrid_239_71: {'type': 'line', 'line': Line_2390282}},\n", - " BranchTee_MVGrid_239_71: {LVStation_124582: {'type': 'line',\n", - " 'line': Line_2390154},\n", - " LVStation_124583: {'type': 'line', 'line': Line_2390156},\n", - " BranchTee_MVGrid_239_70: {'type': 'line', 'line': Line_2390282}},\n", - " BranchTee_MVGrid_239_72: {LVStation_124911: {'type': 'line',\n", - " 'line': Line_2390160}},\n", - " BranchTee_MVGrid_239_73: {LVStation_125016: {'type': 'line',\n", - " 'line': Line_2390163},\n", - " LVStation_500931: {'type': 'line', 'line': Line_2390236},\n", - " BranchTee_MVGrid_239_74: {'type': 'line', 'line': Line_2390283}},\n", - " BranchTee_MVGrid_239_74: {LVStation_125015: {'type': 'line',\n", - " 'line': Line_2390162},\n", - " LVStation_125017: {'type': 'line', 'line': Line_2390165},\n", - " BranchTee_MVGrid_239_73: {'type': 'line', 'line': Line_2390283}},\n", - " BranchTee_MVGrid_239_75: {LVStation_125210: {'type': 'line',\n", - " 'line': Line_2390166},\n", - " LVStation_125211: {'type': 'line', 'line': Line_2390169},\n", - " MVDisconnectingPoint_4: {'line': Line_2390284, 'type': 'line'}},\n", - " BranchTee_MVGrid_239_76: {LVStation_125214: {'type': 'line',\n", - " 'line': Line_2390172},\n", - " LVStation_125215: {'type': 'line', 'line': Line_2390174},\n", - " BranchTee_MVGrid_239_25: {'type': 'line', 'line': Line_2390261}},\n", - " BranchTee_MVGrid_239_77: {LVStation_124911: {'type': 'line',\n", - " 'line': Line_2390161},\n", - " LVStation_125213: {'type': 'line', 'line': Line_2390171},\n", - " LVStation_125214: {'type': 'line', 'line': Line_2390173}},\n", - " BranchTee_MVGrid_239_78: {LVStation_125267: {'type': 'line',\n", - " 'line': Line_2390178},\n", - " LVStation_125268: {'type': 'line', 'line': Line_2390179},\n", - " BranchTee_MVGrid_239_27: {'type': 'line', 'line': Line_2390263}},\n", - " BranchTee_MVGrid_239_79: {LVStation_125268: {'type': 'line',\n", - " 'line': Line_2390180},\n", - " LVStation_125269: {'type': 'line', 'line': Line_2390182}},\n", - " BranchTee_MVGrid_239_80: {LVStation_139104: {'type': 'line',\n", - " 'line': Line_2390185},\n", - " LVStation_139105: {'type': 'line', 'line': Line_2390186},\n", - " LVStation_139106: {'type': 'line', 'line': Line_2390187}},\n", - " BranchTee_MVGrid_239_81: {LVStation_139150: {'type': 'line',\n", - " 'line': Line_2390192}},\n", - " BranchTee_MVGrid_239_82: {LVStation_139183: {'type': 'line',\n", - " 'line': Line_2390195},\n", - " LVStation_488816: {'type': 'line', 'line': Line_2390231},\n", - " LVStation_490253: {'type': 'line', 'line': Line_2390232}},\n", - " BranchTee_MVGrid_239_83: {LVStation_139186: {'type': 'line',\n", - " 'line': Line_2390199},\n", - " LVStation_139187: {'type': 'line', 'line': Line_2390201},\n", - " MVDisconnectingPoint_5: {'line': Line_2390197, 'type': 'line'}},\n", - " BranchTee_MVGrid_239_84: {GeneratorFluctuating_878583: {'type': 'line',\n", - " 'line': Line_2390001},\n", - " LVStation_120738: {'type': 'line', 'line': Line_2390074},\n", - " BranchTee_MVGrid_239_12: {'type': 'line', 'line': Line_2390244}},\n", - " BranchTee_MVGrid_239_85: {GeneratorFluctuating_878963: {'type': 'line',\n", - " 'line': Line_2390002},\n", - " LVStation_416815: {'type': 'line', 'line': Line_2390208},\n", - " BranchTee_MVGrid_239_24: {'type': 'line', 'line': Line_2390259}},\n", - " BranchTee_MVGrid_239_86: {GeneratorFluctuating_839361: {'type': 'line',\n", - " 'line': Line_2390004},\n", - " LVStation_139107: {'type': 'line', 'line': Line_2390188},\n", - " BranchTee_MVGrid_239_33: {'type': 'line', 'line': Line_2390272}},\n", - " BranchTee_MVGrid_239_87: {GeneratorFluctuating_839362: {'type': 'line',\n", - " 'line': Line_2390005},\n", - " LVStation_119698: {'type': 'line', 'line': Line_2390030},\n", - " LVStation_119889: {'type': 'line', 'line': Line_2390037}},\n", - " BranchTee_MVGrid_239_88: {GeneratorFluctuating_839363: {'type': 'line',\n", - " 'line': Line_2390006},\n", - " LVStation_125210: {'type': 'line', 'line': Line_2390167},\n", - " BranchTee_MVGrid_239_3: {'type': 'line', 'line': Line_2390273}},\n", - " BranchTee_MVGrid_239_89: {GeneratorFluctuating_878450: {'type': 'line',\n", - " 'line': Line_2390008},\n", - " LVStation_418546: {'type': 'line', 'line': Line_2390221},\n", - " BranchTee_MVGrid_239_3: {'type': 'line', 'line': Line_2390274}},\n", - " BranchTee_MVGrid_239_90: {GeneratorFluctuating_878862: {'type': 'line',\n", - " 'line': Line_2390013},\n", - " LVStation_123655: {'type': 'line', 'line': Line_2390140},\n", - " BranchTee_MVGrid_239_66: {'type': 'line', 'line': Line_2390281}},\n", - " BranchTee_MVGrid_239_91: {GeneratorFluctuating_878863: {'type': 'line',\n", - " 'line': Line_2390014},\n", - " LVStation_122520: {'type': 'line', 'line': Line_2390134},\n", - " BranchTee_MVGrid_239_30: {'type': 'line', 'line': Line_2390269}},\n", - " BranchTee_MVGrid_239_92: {GeneratorFluctuating_878864: {'type': 'line',\n", - " 'line': Line_2390015},\n", - " LVStation_119897: {'type': 'line', 'line': Line_2390051},\n", - " MVStation_239: {'type': 'line', 'line': Line_2390286}},\n", - " BranchTee_MVGrid_239_93: {GeneratorFluctuating_878865: {'type': 'line',\n", - " 'line': Line_2390016},\n", - " LVStation_120038: {'type': 'line', 'line': Line_2390057},\n", - " BranchTee_MVGrid_239_44: {'type': 'line', 'line': Line_2390276}},\n", - " BranchTee_MVGrid_239_94: {GeneratorFluctuating_878866: {'type': 'line',\n", - " 'line': Line_2390017},\n", - " LVStation_124910: {'type': 'line', 'line': Line_2390159},\n", - " LVStation_125216: {'type': 'line', 'line': Line_2390175}},\n", - " LVStation_122408: {BranchTee_MVGrid_239_63: {'type': 'line',\n", - " 'line': Line_2390131}},\n", - " LVStation_485974: {BranchTee_MVGrid_239_63: {'type': 'line',\n", - " 'line': Line_2390228},\n", - " MVStation_239: {'type': 'line', 'line': Line_2390229}},\n", - " LVStation_138585: {LVStation_139149: {'type': 'line', 'line': Line_2390183}},\n", - " LVStation_119895: {LVStation_119899: {'type': 'line', 'line': Line_2390045},\n", - " BranchTee_MVGrid_239_41: {'type': 'line', 'line': Line_2390046},\n", - " BranchTee_MVGrid_239_44: {'type': 'line', 'line': Line_2390047}},\n", - " LVStation_119896: {BranchTee_MVGrid_239_42: {'type': 'line',\n", - " 'line': Line_2390048},\n", - " BranchTee_MVGrid_239_44: {'type': 'line', 'line': Line_2390049}},\n", - " LVStation_119889: {LVStation_119891: {'type': 'line', 'line': Line_2390036},\n", - " BranchTee_MVGrid_239_87: {'type': 'line', 'line': Line_2390037}},\n", - " LVStation_119890: {LVStation_119893: {'type': 'line', 'line': Line_2390038},\n", - " LVStation_416175: {'type': 'line', 'line': Line_2390039}},\n", - " LVStation_119891: {LVStation_119889: {'type': 'line', 'line': Line_2390036},\n", - " BranchTee_MVGrid_239_41: {'type': 'line', 'line': Line_2390040}},\n", - " LVStation_119892: {LVStation_119697: {'type': 'line', 'line': Line_2390027},\n", - " BranchTee_MVGrid_239_44: {'type': 'line', 'line': Line_2390041}},\n", - " LVStation_119893: {LVStation_119890: {'type': 'line', 'line': Line_2390038},\n", - " BranchTee_MVGrid_239_44: {'type': 'line', 'line': Line_2390042}},\n", - " LVStation_119894: {LVStation_119898: {'type': 'line', 'line': Line_2390043},\n", - " BranchTee_MVGrid_239_41: {'type': 'line', 'line': Line_2390044}},\n", - " LVStation_119897: {BranchTee_MVGrid_239_1: {'type': 'line',\n", - " 'line': Line_2390050},\n", - " BranchTee_MVGrid_239_92: {'type': 'line', 'line': Line_2390051}},\n", - " LVStation_119898: {LVStation_119894: {'type': 'line', 'line': Line_2390043}},\n", - " LVStation_119899: {LVStation_119895: {'type': 'line', 'line': Line_2390045}},\n", - " LVStation_119900: {BranchTee_MVGrid_239_42: {'type': 'line',\n", - " 'line': Line_2390052}},\n", - " LVStation_119901: {BranchTee_MVGrid_239_43: {'type': 'line',\n", - " 'line': Line_2390053}},\n", - " LVStation_417530: {BranchTee_MVGrid_239_14: {'type': 'line',\n", - " 'line': Line_2390211}},\n", - " LVStation_419885: {BranchTee_MVGrid_239_12: {'type': 'line',\n", - " 'line': Line_2390226},\n", - " BranchTee_MVGrid_239_33: {'type': 'line', 'line': Line_2390227}},\n", - " LVStation_121940: {BranchTee_MVGrid_239_18: {'type': 'line',\n", - " 'line': Line_2390110},\n", - " BranchTee_MVGrid_239_58: {'type': 'line', 'line': Line_2390111}},\n", - " LVStation_121941: {BranchTee_MVGrid_239_58: {'type': 'line',\n", - " 'line': Line_2390112}},\n", - " LVStation_122426: {BranchTee_MVGrid_239_28: {'type': 'line',\n", - " 'line': Line_2390132}},\n", - " LVStation_122480: {BranchTee_MVGrid_239_29: {'type': 'line',\n", - " 'line': Line_2390133}},\n", - " LVStation_418254: {LVStation_124543: {'type': 'line', 'line': Line_2390152},\n", - " BranchTee_MVGrid_239_66: {'type': 'line', 'line': Line_2390220}},\n", - " LVStation_419605: {LVStation_125217: {'type': 'line', 'line': Line_2390176}},\n", - " LVStation_416441: {BranchTee_MVGrid_239_13: {'type': 'line',\n", - " 'line': Line_2390204}},\n", - " LVStation_418546: {BranchTee_MVGrid_239_89: {'type': 'line',\n", - " 'line': Line_2390221}},\n", - " LVStation_416244: {LVStation_119697: {'type': 'line', 'line': Line_2390028}},\n", - " LVStation_417898: {LVStation_417550: {'type': 'line', 'line': Line_2390212},\n", - " BranchTee_MVGrid_239_38: {'type': 'line', 'line': Line_2390214}},\n", - " LVStation_419795: {BranchTee_MVGrid_239_8: {'type': 'line',\n", - " 'line': Line_2390225}},\n", - " LVStation_120737: {LVStation_120736: {'type': 'line', 'line': Line_2390070},\n", - " BranchTee_MVGrid_239_48: {'type': 'line', 'line': Line_2390072}},\n", - " LVStation_120736: {LVStation_120737: {'type': 'line', 'line': Line_2390070},\n", - " BranchTee_MVGrid_239_5: {'type': 'line', 'line': Line_2390071}},\n", - " LVStation_120738: {BranchTee_MVGrid_239_48: {'type': 'line',\n", - " 'line': Line_2390073},\n", - " BranchTee_MVGrid_239_84: {'type': 'line', 'line': Line_2390074}},\n", - " LVStation_120942: {BranchTee_MVGrid_239_51: {'type': 'line',\n", - " 'line': Line_2390079}},\n", - " LVStation_120943: {BranchTee_MVGrid_239_12: {'type': 'line',\n", - " 'line': Line_2390080},\n", - " BranchTee_MVGrid_239_51: {'type': 'line', 'line': Line_2390081}},\n", - " LVStation_122230: {BranchTee_MVGrid_239_55: {'type': 'line',\n", - " 'line': Line_2390124},\n", - " BranchTee_MVGrid_239_61: {'type': 'line', 'line': Line_2390125}},\n", - " LVStation_122231: {LVStation_139192: {'type': 'line', 'line': Line_2390126},\n", - " BranchTee_MVGrid_239_35: {'type': 'line', 'line': Line_2390127}},\n", - " LVStation_418237: {BranchTee_MVGrid_239_22: {'type': 'line',\n", - " 'line': Line_2390218}},\n", - " LVStation_416449: {BranchTee_MVGrid_239_24: {'type': 'line',\n", - " 'line': Line_2390205}},\n", - " LVStation_417550: {LVStation_417898: {'type': 'line', 'line': Line_2390212},\n", - " BranchTee_MVGrid_239_26: {'type': 'line', 'line': Line_2390213}},\n", - " LVStation_139107: {BranchTee_MVGrid_239_86: {'type': 'line',\n", - " 'line': Line_2390188}},\n", - " LVStation_120585: {LVStation_124583: {'type': 'line', 'line': Line_2390068},\n", - " BranchTee_MVGrid_239_9: {'type': 'line', 'line': Line_2390069}},\n", - " LVStation_417276: {BranchTee_MVGrid_239_16: {'type': 'line',\n", - " 'line': Line_2390210}},\n", - " LVStation_122520: {BranchTee_MVGrid_239_91: {'type': 'line',\n", - " 'line': Line_2390134}},\n", - " LVStation_419726: {BranchTee_MVGrid_239_7: {'type': 'line',\n", - " 'line': Line_2390224}},\n", - " LVStation_121776: {BranchTee_MVGrid_239_15: {'type': 'line',\n", - " 'line': Line_2390096}},\n", - " LVStation_419327: {LVStation_124910: {'type': 'line', 'line': Line_2390158}},\n", - " LVStation_417734: {LVStation_139189: {'type': 'line', 'line': Line_2390203}},\n", - " LVStation_125015: {BranchTee_MVGrid_239_74: {'type': 'line',\n", - " 'line': Line_2390162}},\n", - " LVStation_125016: {LVStation_120389: {'type': 'line', 'line': Line_2390062},\n", - " BranchTee_MVGrid_239_73: {'type': 'line', 'line': Line_2390163}},\n", - " LVStation_125017: {LVStation_417909: {'type': 'line', 'line': Line_2390164},\n", - " BranchTee_MVGrid_239_74: {'type': 'line', 'line': Line_2390165}},\n", - " LVStation_500931: {BranchTee_MVGrid_239_73: {'type': 'line',\n", - " 'line': Line_2390236}},\n", - " LVStation_418244: {LVStation_122698: {'type': 'line', 'line': Line_2390137},\n", - " BranchTee_MVGrid_239_11: {'type': 'line', 'line': Line_2390219}},\n", - " LVStation_120411: {BranchTee_MVGrid_239_6: {'type': 'line',\n", - " 'line': Line_2390064}},\n", - " LVStation_121317: {BranchTee_MVGrid_239_13: {'type': 'line',\n", - " 'line': Line_2390087},\n", - " BranchTee_MVGrid_239_53: {'type': 'line', 'line': Line_2390088}},\n", - " LVStation_121318: {LVStation_121918: {'type': 'line', 'line': Line_2390089},\n", - " BranchTee_MVGrid_239_53: {'type': 'line', 'line': Line_2390090}},\n", - " LVStation_416815: {BranchTee_MVGrid_239_17: {'type': 'line',\n", - " 'line': Line_2390207},\n", - " BranchTee_MVGrid_239_85: {'type': 'line', 'line': Line_2390208}},\n", - " LVStation_139104: {BranchTee_MVGrid_239_32: {'type': 'line',\n", - " 'line': Line_2390184},\n", - " BranchTee_MVGrid_239_80: {'type': 'line', 'line': Line_2390185}},\n", - " LVStation_139105: {BranchTee_MVGrid_239_80: {'type': 'line',\n", - " 'line': Line_2390186}},\n", - " LVStation_139106: {BranchTee_MVGrid_239_80: {'type': 'line',\n", - " 'line': Line_2390187}},\n", - " LVStation_139192: {LVStation_122231: {'type': 'line', 'line': Line_2390126}},\n", - " LVStation_119903: {BranchTee_MVGrid_239_45: {'type': 'line',\n", - " 'line': Line_2390054}},\n", - " LVStation_119904: {BranchTee_MVGrid_239_4: {'type': 'line',\n", - " 'line': Line_2390055},\n", - " BranchTee_MVGrid_239_45: {'type': 'line', 'line': Line_2390056}},\n", - " LVStation_418449: {LVStation_121741: {'type': 'line', 'line': Line_2390091}},\n", - " LVStation_419079: {BranchTee_MVGrid_239_20: {'type': 'line',\n", - " 'line': Line_2390223}},\n", - " LVStation_119697: {LVStation_119892: {'type': 'line', 'line': Line_2390027},\n", - " LVStation_416244: {'type': 'line', 'line': Line_2390028}},\n", - " LVStation_119698: {BranchTee_MVGrid_239_39: {'type': 'line',\n", - " 'line': Line_2390029},\n", - " BranchTee_MVGrid_239_87: {'type': 'line', 'line': Line_2390030}},\n", - " LVStation_122076: {BranchTee_MVGrid_239_59: {'type': 'line',\n", - " 'line': Line_2390113},\n", - " MVStation_239: {'type': 'line', 'line': Line_2390114}},\n", - " LVStation_122077: {BranchTee_MVGrid_239_22: {'type': 'line',\n", - " 'line': Line_2390115},\n", - " BranchTee_MVGrid_239_28: {'type': 'line', 'line': Line_2390116}},\n", - " LVStation_122078: {BranchTee_MVGrid_239_22: {'type': 'line',\n", - " 'line': Line_2390117},\n", - " BranchTee_MVGrid_239_59: {'type': 'line', 'line': Line_2390118}},\n", - " LVStation_122124: {LVStation_121878: {'type': 'line', 'line': Line_2390097},\n", - " BranchTee_MVGrid_239_60: {'type': 'line', 'line': Line_2390121}},\n", - " LVStation_122125: {LVStation_490253: {'type': 'line', 'line': Line_2390122},\n", - " BranchTee_MVGrid_239_60: {'type': 'line', 'line': Line_2390123}},\n", - " LVStation_122122: {LVStation_122123: {'type': 'line', 'line': Line_2390119}},\n", - " LVStation_122123: {LVStation_122122: {'type': 'line', 'line': Line_2390119},\n", - " BranchTee_MVGrid_239_60: {'type': 'line', 'line': Line_2390120}},\n", - " LVStation_124543: {LVStation_418254: {'type': 'line', 'line': Line_2390152}},\n", - " LVStation_124911: {BranchTee_MVGrid_239_72: {'type': 'line',\n", - " 'line': Line_2390160},\n", - " BranchTee_MVGrid_239_77: {'type': 'line', 'line': Line_2390161}},\n", - " LVStation_124910: {LVStation_419327: {'type': 'line', 'line': Line_2390158},\n", - " BranchTee_MVGrid_239_94: {'type': 'line', 'line': Line_2390159}},\n", - " LVStation_139183: {LVStation_139184: {'type': 'line', 'line': Line_2390194},\n", - " BranchTee_MVGrid_239_82: {'type': 'line', 'line': Line_2390195}},\n", - " LVStation_139184: {LVStation_139183: {'type': 'line', 'line': Line_2390194},\n", - " LVStation_139185: {'type': 'line', 'line': Line_2390196}},\n", - " LVStation_139185: {LVStation_139184: {'type': 'line', 'line': Line_2390196}},\n", - " LVStation_139186: {BranchTee_MVGrid_239_31: {'type': 'line',\n", - " 'line': Line_2390198},\n", - " BranchTee_MVGrid_239_83: {'type': 'line', 'line': Line_2390199}},\n", - " LVStation_139187: {LVStation_139188: {'type': 'line', 'line': Line_2390200},\n", - " BranchTee_MVGrid_239_83: {'type': 'line', 'line': Line_2390201}},\n", - " LVStation_139188: {LVStation_139187: {'type': 'line', 'line': Line_2390200},\n", - " LVStation_139189: {'type': 'line', 'line': Line_2390202}},\n", - " LVStation_139189: {LVStation_139188: {'type': 'line', 'line': Line_2390202},\n", - " LVStation_417734: {'type': 'line', 'line': Line_2390203}},\n", - " LVStation_488816: {LVStation_490252: {'type': 'line', 'line': Line_2390230},\n", - " BranchTee_MVGrid_239_82: {'type': 'line', 'line': Line_2390231}},\n", - " LVStation_490252: {LVStation_488816: {'type': 'line', 'line': Line_2390230}},\n", - " LVStation_490253: {LVStation_122125: {'type': 'line', 'line': Line_2390122},\n", - " BranchTee_MVGrid_239_82: {'type': 'line', 'line': Line_2390232}},\n", - " LVStation_118322: {BranchTee_MVGrid_239_36: {'type': 'line',\n", - " 'line': Line_2390020}},\n", - " LVStation_118323: {BranchTee_MVGrid_239_36: {'type': 'line',\n", - " 'line': Line_2390021},\n", - " MVDisconnectingPoint_3: {'line': Line_2390022, 'type': 'line'}},\n", - " LVStation_118324: {BranchTee_MVGrid_239_37: {'type': 'line',\n", - " 'line': Line_2390023}},\n", - " LVStation_515314: {LVStation_119702: {'type': 'line', 'line': Line_2390033},\n", - " BranchTee_MVGrid_239_37: {'type': 'line', 'line': Line_2390240}},\n", - " LVStation_120387: {LVStation_120388: {'type': 'line', 'line': Line_2390058},\n", - " BranchTee_MVGrid_239_46: {'type': 'line', 'line': Line_2390059}},\n", - " LVStation_120388: {LVStation_120387: {'type': 'line', 'line': Line_2390058},\n", - " LVStation_121287: {'type': 'line', 'line': Line_2390060}},\n", - " LVStation_120389: {LVStation_120390: {'type': 'line', 'line': Line_2390061},\n", - " LVStation_125016: {'type': 'line', 'line': Line_2390062}},\n", - " LVStation_120390: {LVStation_120389: {'type': 'line', 'line': Line_2390061},\n", - " BranchTee_MVGrid_239_46: {'type': 'line', 'line': Line_2390063}},\n", - " LVStation_120853: {BranchTee_MVGrid_239_49: {'type': 'line',\n", - " 'line': Line_2390075}},\n", - " LVStation_511325: {BranchTee_MVGrid_239_10: {'type': 'line',\n", - " 'line': Line_2390238},\n", - " BranchTee_MVGrid_239_49: {'type': 'line', 'line': Line_2390239}},\n", - " LVStation_120470: {BranchTee_MVGrid_239_7: {'type': 'line',\n", - " 'line': Line_2390065}},\n", - " LVStation_417987: {BranchTee_MVGrid_239_27: {'type': 'line',\n", - " 'line': Line_2390217}},\n", - " LVStation_119612: {GeneratorFluctuating_839364: {'type': 'line',\n", - " 'line': Line_2390007},\n", - " BranchTee_MVGrid_239_38: {'type': 'line', 'line': Line_2390024}},\n", - " LVStation_119613: {LVStation_119703: {'type': 'line', 'line': Line_2390025},\n", - " BranchTee_MVGrid_239_38: {'type': 'line', 'line': Line_2390026}},\n", - " LVStation_119701: {LVStation_119702: {'type': 'line', 'line': Line_2390031},\n", - " BranchTee_MVGrid_239_40: {'type': 'line', 'line': Line_2390032}},\n", - " LVStation_119702: {LVStation_119701: {'type': 'line', 'line': Line_2390031},\n", - " LVStation_515314: {'type': 'line', 'line': Line_2390033}},\n", - " LVStation_119703: {LVStation_119613: {'type': 'line', 'line': Line_2390025},\n", - " LVStation_119704: {'type': 'line', 'line': Line_2390034}},\n", - " LVStation_119704: {LVStation_119703: {'type': 'line', 'line': Line_2390034},\n", - " BranchTee_MVGrid_239_40: {'type': 'line', 'line': Line_2390035}},\n", - " LVStation_120038: {BranchTee_MVGrid_239_93: {'type': 'line',\n", - " 'line': Line_2390057}},\n", - " LVStation_120555: {BranchTee_MVGrid_239_47: {'type': 'line',\n", - " 'line': Line_2390066},\n", - " BranchTee_MVGrid_239_9: {'type': 'line', 'line': Line_2390067}},\n", - " LVStation_500916: {BranchTee_MVGrid_239_47: {'type': 'line',\n", - " 'line': Line_2390235}},\n", - " LVStation_418547: {LVStation_496409: {'type': 'line', 'line': Line_2390222}},\n", - " LVStation_121286: {BranchTee_MVGrid_239_52: {'type': 'line',\n", - " 'line': Line_2390082}},\n", - " LVStation_121287: {LVStation_120388: {'type': 'line', 'line': Line_2390060},\n", - " BranchTee_MVGrid_239_52: {'type': 'line', 'line': Line_2390083}},\n", - " LVStation_121288: {LVStation_121289: {'type': 'line', 'line': Line_2390084},\n", - " BranchTee_MVGrid_239_52: {'type': 'line', 'line': Line_2390085}},\n", - " LVStation_121289: {LVStation_121288: {'type': 'line', 'line': Line_2390084},\n", - " BranchTee_MVGrid_239_14: {'type': 'line', 'line': Line_2390086}},\n", - " LVStation_121741: {LVStation_418449: {'type': 'line', 'line': Line_2390091},\n", - " BranchTee_MVGrid_239_55: {'type': 'line', 'line': Line_2390092}},\n", - " LVStation_121742: {BranchTee_MVGrid_239_54: {'type': 'line',\n", - " 'line': Line_2390093},\n", - " BranchTee_MVGrid_239_55: {'type': 'line', 'line': Line_2390094}},\n", - " LVStation_121743: {BranchTee_MVGrid_239_54: {'type': 'line',\n", - " 'line': Line_2390095}},\n", - " LVStation_496409: {LVStation_418547: {'type': 'line', 'line': Line_2390222},\n", - " BranchTee_MVGrid_239_55: {'type': 'line', 'line': Line_2390233}},\n", - " LVStation_416983: {BranchTee_MVGrid_239_17: {'type': 'line',\n", - " 'line': Line_2390209}},\n", - " LVStation_121878: {LVStation_122124: {'type': 'line', 'line': Line_2390097},\n", - " MVStation_239: {'type': 'line', 'line': Line_2390098}},\n", - " LVStation_121879: {BranchTee_MVGrid_239_21: {'type': 'line',\n", - " 'line': Line_2390099},\n", - " BranchTee_MVGrid_239_56: {'type': 'line', 'line': Line_2390100}},\n", - " LVStation_121880: {LVStation_496410: {'type': 'line', 'line': Line_2390101},\n", - " BranchTee_MVGrid_239_56: {'type': 'line', 'line': Line_2390102},\n", - " MVStation_239: {'type': 'line', 'line': Line_2390103}},\n", - " LVStation_496410: {LVStation_121880: {'type': 'line', 'line': Line_2390101}},\n", - " LVStation_121915: {LVStation_121918: {'type': 'line', 'line': Line_2390104},\n", - " BranchTee_MVGrid_239_57: {'type': 'line', 'line': Line_2390105}},\n", - " LVStation_121916: {LVStation_121917: {'type': 'line', 'line': Line_2390106},\n", - " BranchTee_MVGrid_239_57: {'type': 'line', 'line': Line_2390107}},\n", - " LVStation_121917: {LVStation_121916: {'type': 'line', 'line': Line_2390106}},\n", - " LVStation_121918: {LVStation_121318: {'type': 'line', 'line': Line_2390089},\n", - " LVStation_121915: {'type': 'line', 'line': Line_2390104}},\n", - " LVStation_121919: {BranchTee_MVGrid_239_19: {'type': 'line',\n", - " 'line': Line_2390108},\n", - " BranchTee_MVGrid_239_57: {'type': 'line', 'line': Line_2390109}},\n", - " LVStation_416589: {BranchTee_MVGrid_239_19: {'type': 'line',\n", - " 'line': Line_2390206}},\n", - " LVStation_122400: {BranchTee_MVGrid_239_21: {'type': 'line',\n", - " 'line': Line_2390128},\n", - " BranchTee_MVGrid_239_62: {'type': 'line', 'line': Line_2390129}},\n", - " LVStation_122401: {BranchTee_MVGrid_239_62: {'type': 'line',\n", - " 'line': Line_2390130}},\n", - " LVStation_122696: {BranchTee_MVGrid_239_64: {'type': 'line',\n", - " 'line': Line_2390135}},\n", - " LVStation_122697: {BranchTee_MVGrid_239_66: {'type': 'line',\n", - " 'line': Line_2390136}},\n", - " LVStation_122698: {LVStation_418244: {'type': 'line', 'line': Line_2390137},\n", - " BranchTee_MVGrid_239_65: {'type': 'line', 'line': Line_2390138}},\n", - " LVStation_122699: {BranchTee_MVGrid_239_65: {'type': 'line',\n", - " 'line': Line_2390139}},\n", - " LVStation_123655: {BranchTee_MVGrid_239_90: {'type': 'line',\n", - " 'line': Line_2390140}},\n", - " LVStation_124010: {BranchTee_MVGrid_239_67: {'type': 'line',\n", - " 'line': Line_2390141}},\n", - " LVStation_124011: {LVStation_124109: {'type': 'line', 'line': Line_2390142},\n", - " BranchTee_MVGrid_239_67: {'type': 'line', 'line': Line_2390143}},\n", - " LVStation_124109: {LVStation_124011: {'type': 'line', 'line': Line_2390142},\n", - " BranchTee_MVGrid_239_69: {'type': 'line', 'line': Line_2390147}},\n", - " LVStation_124110: {BranchTee_MVGrid_239_17: {'type': 'line',\n", - " 'line': Line_2390148},\n", - " BranchTee_MVGrid_239_69: {'type': 'line', 'line': Line_2390149}},\n", - " LVStation_124111: {BranchTee_MVGrid_239_36: {'type': 'line',\n", - " 'line': Line_2390150},\n", - " BranchTee_MVGrid_239_69: {'type': 'line', 'line': Line_2390151}},\n", - " LVStation_417936: {LVStation_124902: {'type': 'line', 'line': Line_2390157},\n", - " BranchTee_MVGrid_239_26: {'type': 'line', 'line': Line_2390216}},\n", - " LVStation_124902: {LVStation_417936: {'type': 'line', 'line': Line_2390157}},\n", - " LVStation_416175: {LVStation_119890: {'type': 'line', 'line': Line_2390039}},\n", - " LVStation_125210: {BranchTee_MVGrid_239_75: {'type': 'line',\n", - " 'line': Line_2390166},\n", - " BranchTee_MVGrid_239_88: {'type': 'line', 'line': Line_2390167}},\n", - " LVStation_125211: {LVStation_125212: {'type': 'line', 'line': Line_2390168},\n", - " BranchTee_MVGrid_239_75: {'type': 'line', 'line': Line_2390169}},\n", - " LVStation_125212: {LVStation_125211: {'type': 'line', 'line': Line_2390168}},\n", - " LVStation_125213: {LVStation_125216: {'type': 'line', 'line': Line_2390170},\n", - " BranchTee_MVGrid_239_77: {'type': 'line', 'line': Line_2390171}},\n", - " LVStation_125214: {BranchTee_MVGrid_239_76: {'type': 'line',\n", - " 'line': Line_2390172},\n", - " BranchTee_MVGrid_239_77: {'type': 'line', 'line': Line_2390173}},\n", - " LVStation_125215: {BranchTee_MVGrid_239_76: {'type': 'line',\n", - " 'line': Line_2390174}},\n", - " LVStation_125216: {LVStation_125213: {'type': 'line', 'line': Line_2390170},\n", - " BranchTee_MVGrid_239_94: {'type': 'line', 'line': Line_2390175}},\n", - " LVStation_125217: {LVStation_419605: {'type': 'line', 'line': Line_2390176},\n", - " LVStation_503036: {'type': 'line', 'line': Line_2390177}},\n", - " LVStation_503036: {LVStation_125217: {'type': 'line', 'line': Line_2390177},\n", - " BranchTee_MVGrid_239_25: {'type': 'line', 'line': Line_2390237}},\n", - " LVStation_125269: {BranchTee_MVGrid_239_3: {'type': 'line',\n", - " 'line': Line_2390181},\n", - " BranchTee_MVGrid_239_79: {'type': 'line', 'line': Line_2390182}},\n", - " LVStation_125267: {BranchTee_MVGrid_239_78: {'type': 'line',\n", - " 'line': Line_2390178}},\n", - " LVStation_125268: {BranchTee_MVGrid_239_78: {'type': 'line',\n", - " 'line': Line_2390179},\n", - " BranchTee_MVGrid_239_79: {'type': 'line', 'line': Line_2390180}},\n", - " LVStation_120898: {BranchTee_MVGrid_239_11: {'type': 'line',\n", - " 'line': Line_2390076},\n", - " BranchTee_MVGrid_239_50: {'type': 'line', 'line': Line_2390077}},\n", - " LVStation_120899: {BranchTee_MVGrid_239_50: {'type': 'line',\n", - " 'line': Line_2390078}},\n", - " LVStation_139149: {LVStation_138585: {'type': 'line', 'line': Line_2390183},\n", - " BranchTee_MVGrid_239_31: {'type': 'line', 'line': Line_2390189}},\n", - " LVStation_139150: {LVStation_139151: {'type': 'line', 'line': Line_2390190},\n", - " BranchTee_MVGrid_239_34: {'type': 'line', 'line': Line_2390191},\n", - " BranchTee_MVGrid_239_81: {'type': 'line', 'line': Line_2390192}},\n", - " LVStation_139151: {LVStation_139150: {'type': 'line', 'line': Line_2390190},\n", - " LVStation_139152: {'type': 'line', 'line': Line_2390193}},\n", - " LVStation_139152: {LVStation_139151: {'type': 'line', 'line': Line_2390193}},\n", - " LVStation_417909: {LVStation_125017: {'type': 'line', 'line': Line_2390164},\n", - " BranchTee_MVGrid_239_27: {'type': 'line', 'line': Line_2390215}},\n", - " LVStation_124085: {BranchTee_MVGrid_239_23: {'type': 'line',\n", - " 'line': Line_2390144},\n", - " BranchTee_MVGrid_239_68: {'type': 'line', 'line': Line_2390145}},\n", - " LVStation_124086: {BranchTee_MVGrid_239_68: {'type': 'line',\n", - " 'line': Line_2390146}},\n", - " LVStation_124581: {LVStation_124582: {'type': 'line', 'line': Line_2390153}},\n", - " LVStation_124582: {LVStation_124581: {'type': 'line', 'line': Line_2390153},\n", - " BranchTee_MVGrid_239_71: {'type': 'line', 'line': Line_2390154}},\n", - " LVStation_124583: {LVStation_120585: {'type': 'line', 'line': Line_2390068},\n", - " LVStation_124584: {'type': 'line', 'line': Line_2390155},\n", - " BranchTee_MVGrid_239_71: {'type': 'line', 'line': Line_2390156}},\n", - " LVStation_124584: {LVStation_124583: {'type': 'line', 'line': Line_2390155}},\n", - " LVStation_498758: {BranchTee_MVGrid_239_70: {'type': 'line',\n", - " 'line': Line_2390234}},\n", - " MVStation_239: {GeneratorFluctuating_878609: {'type': 'line',\n", - " 'line': Line_2390009},\n", - " GeneratorFluctuating_878611: {'type': 'line', 'line': Line_2390010},\n", - " GeneratorFluctuating_878614: {'type': 'line', 'line': Line_2390011},\n", - " GeneratorFluctuating_878615: {'type': 'line', 'line': Line_2390012},\n", - " GeneratorFluctuating_878875: {'type': 'line', 'line': Line_2390018},\n", - " GeneratorFluctuating_878950: {'type': 'line', 'line': Line_2390019},\n", - " LVStation_121878: {'type': 'line', 'line': Line_2390098},\n", - " LVStation_121880: {'type': 'line', 'line': Line_2390103},\n", - " LVStation_122076: {'type': 'line', 'line': Line_2390114},\n", - " LVStation_485974: {'type': 'line', 'line': Line_2390229},\n", - " BranchTee_MVGrid_239_11: {'type': 'line', 'line': Line_2390243},\n", - " BranchTee_MVGrid_239_14: {'type': 'line', 'line': Line_2390245},\n", - " BranchTee_MVGrid_239_16: {'type': 'line', 'line': Line_2390249},\n", - " BranchTee_MVGrid_239_23: {'type': 'line', 'line': Line_2390257},\n", - " BranchTee_MVGrid_239_2: {'type': 'line', 'line': Line_2390266},\n", - " BranchTee_MVGrid_239_92: {'type': 'line', 'line': Line_2390286}},\n", - " MVDisconnectingPoint_1: {BranchTee_MVGrid_239_1: {'line': Line_2390252,\n", - " 'type': 'line'}},\n", - " MVDisconnectingPoint_2: {BranchTee_MVGrid_239_2: {'line': Line_2390265,\n", - " 'type': 'line'}},\n", - " MVDisconnectingPoint_3: {LVStation_118323: {'line': Line_2390022,\n", - " 'type': 'line'}},\n", - " MVDisconnectingPoint_4: {BranchTee_MVGrid_239_75: {'line': Line_2390284,\n", - " 'type': 'line'}},\n", - " MVDisconnectingPoint_5: {BranchTee_MVGrid_239_83: {'line': Line_2390197,\n", - " 'type': 'line'}}}" - ] - }, - "execution_count": 6, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "# get a dictionary of all lines in the mv grid\n", - "edisgo.network.mv_grid.graph.edge" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The dictionary you got should look something like that:\n", - "```python\n", - "{Generator_x: {BranchTee_y: {'type': 'line', 'line': Line_1}},\n", - " BranchTee_y: {\n", - " Generator_x: {'type': 'line', 'line': Line_1},\n", - " BranchTee_z: {'type': 'line', 'line': Line_2}}\n", - "```\n", - "\n", - "That means that Generator_x is connected to BranchTee_y by Line_1 and BranchTee_y is also connected to BranchTee_z by Line_2. Line_1 and Line_2 are [Line](http://edisgo.readthedocs.io/en/dev/api/edisgo.grid.html#edisgo.grid.components.Line) objects containig all important information about the line, such as length, equipment type, and geometry. Accessing this information can for example be done as follows:" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "0.3681789122707058" - ] - }, - "execution_count": 7, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "edge_dictionary = edisgo.network.mv_grid.graph.edge\n", - "# get random line\n", - "line = edge_dictionary.popitem()[1].popitem()[1]['line']\n", - "# get line length\n", - "line.length" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now let's have a look at all the **nodes**." - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "[GeneratorFluctuating_839361,\n", - " GeneratorFluctuating_839362,\n", - " GeneratorFluctuating_839363,\n", - " GeneratorFluctuating_839364,\n", - " GeneratorFluctuating_878450,\n", - " GeneratorFluctuating_878583,\n", - " GeneratorFluctuating_878609,\n", - " GeneratorFluctuating_878611,\n", - " GeneratorFluctuating_878614,\n", - " GeneratorFluctuating_878615]" - ] - }, - "execution_count": 8, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "# get a list of all nodes (stations, generators, loads, branch tees)\n", - "# here, only the first 10 nodes are displayed\n", - "edisgo.network.mv_grid.graph.nodes()[:10]" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "You can also filter for certain kinds of nodes, e.g. generators..." - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "[GeneratorFluctuating_839361,\n", - " GeneratorFluctuating_839362,\n", - " GeneratorFluctuating_839363,\n", - " GeneratorFluctuating_839364,\n", - " GeneratorFluctuating_878450,\n", - " GeneratorFluctuating_878583,\n", - " GeneratorFluctuating_878609,\n", - " GeneratorFluctuating_878611,\n", - " GeneratorFluctuating_878614,\n", - " GeneratorFluctuating_878615,\n", - " GeneratorFluctuating_878862,\n", - " GeneratorFluctuating_878863,\n", - " GeneratorFluctuating_878864,\n", - " GeneratorFluctuating_878865,\n", - " GeneratorFluctuating_878866,\n", - " GeneratorFluctuating_878867,\n", - " GeneratorFluctuating_878875,\n", - " GeneratorFluctuating_878950,\n", - " GeneratorFluctuating_878963]" - ] - }, - "execution_count": 9, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "# get a list of all generators in the mv grid\n", - "edisgo.network.mv_grid.graph.nodes_by_attribute('generator')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "... or get a list of all lv grids." - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "[LVGrid_122408,\n", - " LVGrid_485974,\n", - " LVGrid_138585,\n", - " LVGrid_119895,\n", - " LVGrid_119896,\n", - " LVGrid_119889,\n", - " LVGrid_119890,\n", - " LVGrid_119891,\n", - " LVGrid_119892,\n", - " LVGrid_119893]" - ] - }, - "execution_count": 10, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "# get a list of all lv grids\n", - "# here, only the first 10 lv grids are displayed\n", - "list(edisgo.network.mv_grid.lv_grids)[:10]" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Future generator capacities \n", - "\n", - "In the open_eGo project we developed two future scenarios, the 'NEP 2035' and the 'ego 100' scenario. The 'NEP 2035' scenario closely follows the B2-Scenario 2035 from the German network developement plan (Netzentwicklungsplan NEP) 2015. The share of renewables is 65.8%, electricity demand is assumed to stay the same as in the status quo. The 'ego 100' scenario is based on the e-Highway 2050 scenario X-7 and assumes a share of renewables of 100% and again an equal electricity demand as in the status quo.\n", - "\n", - "As mentioned earlier, ding0 grids represent status quo networks and generator capacities. In order to analyse future scenarios the future generator park has to be imported." - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "WARNING:root:Right now only solar and wind generators can be imported from the oedb.\n" - ] - }, - { - "ename": "ProgrammingError", - "evalue": "(psycopg2.ProgrammingError) relation \"model_draft.ego_supply_res_powerplant_nep2035_mview\" does not exist\nLINE 2: FROM model_draft.ego_supply_res_powerplant_nep2035_mview \n ^\n [SQL: 'SELECT model_draft.ego_supply_res_powerplant_nep2035_mview.id, model_draft.ego_supply_res_powerplant_nep2035_mview.subst_id, model_draft.ego_supply_res_powerplant_nep2035_mview.la_id, model_draft.ego_supply_res_powerplant_nep2035_mview.mvlv_subst_id, model_draft.ego_supply_res_powerplant_nep2035_mview.electrical_capacity, model_draft.ego_supply_res_powerplant_nep2035_mview.generation_type, model_draft.ego_supply_res_powerplant_nep2035_mview.generation_subtype, model_draft.ego_supply_res_powerplant_nep2035_mview.voltage_level, ST_AsText(ST_Transform(model_draft.ego_supply_res_powerplant_nep2035_mview.rea_geom_new, %(ST_Transform_1)s)) AS geom, ST_AsText(ST_Transform(model_draft.ego_supply_res_powerplant_nep2035_mview.geom, %(ST_Transform_2)s)) AS geom_em \\nFROM model_draft.ego_supply_res_powerplant_nep2035_mview \\nWHERE model_draft.ego_supply_res_powerplant_nep2035_mview.subst_id = %(subst_id_1)s AND model_draft.ego_supply_res_powerplant_nep2035_mview.generation_type IN (%(generation_type_1)s, %(generation_type_2)s) AND model_draft.ego_supply_res_powerplant_nep2035_mview.voltage_level IN (%(voltage_level_1)s, %(voltage_level_2)s)'] [parameters: {'ST_Transform_1': 4326, 'ST_Transform_2': 4326, 'subst_id_1': 239, 'generation_type_1': 'solar', 'generation_type_2': 'wind', 'voltage_level_1': 4, 'voltage_level_2': 5}] (Background on this error at: http://sqlalche.me/e/f405)", - "output_type": "error", - "traceback": [ - "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", - "\u001b[0;31mProgrammingError\u001b[0m Traceback (most recent call last)", - "\u001b[0;32m~/virtualenvs/open_ego_notebook/lib/python3.6/site-packages/sqlalchemy/engine/base.py\u001b[0m in \u001b[0;36m_execute_context\u001b[0;34m(self, dialect, constructor, statement, parameters, *args)\u001b[0m\n\u001b[1;32m 1192\u001b[0m \u001b[0mparameters\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1193\u001b[0;31m context)\n\u001b[0m\u001b[1;32m 1194\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mBaseException\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m~/virtualenvs/open_ego_notebook/lib/python3.6/site-packages/sqlalchemy/engine/default.py\u001b[0m in \u001b[0;36mdo_execute\u001b[0;34m(self, cursor, statement, parameters, context)\u001b[0m\n\u001b[1;32m 506\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mdo_execute\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcursor\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mstatement\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mparameters\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcontext\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mNone\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 507\u001b[0;31m \u001b[0mcursor\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mexecute\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mstatement\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mparameters\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 508\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;31mProgrammingError\u001b[0m: relation \"model_draft.ego_supply_res_powerplant_nep2035_mview\" does not exist\nLINE 2: FROM model_draft.ego_supply_res_powerplant_nep2035_mview \n ^\n", - "\nThe above exception was the direct cause of the following exception:\n", - "\u001b[0;31mProgrammingError\u001b[0m Traceback (most recent call last)", - "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[0;31m# Import generators\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2\u001b[0m \u001b[0mscenario\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m'nep2035'\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 3\u001b[0;31m \u001b[0medisgo\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mimport_generators\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mgenerator_scenario\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mscenario\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m", - "\u001b[0;32m~/virtualenvs/open_ego_notebook/git_repos/eDisGo/edisgo/grid/network.py\u001b[0m in \u001b[0;36mimport_generators\u001b[0;34m(self, generator_scenario)\u001b[0m\n\u001b[1;32m 326\u001b[0m \u001b[0mdata_source\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m'oedb'\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 327\u001b[0m import_generators(network=self.network,\n\u001b[0;32m--> 328\u001b[0;31m data_source=data_source)\n\u001b[0m\u001b[1;32m 329\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 330\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0manalyze\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmode\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mNone\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m~/virtualenvs/open_ego_notebook/git_repos/eDisGo/edisgo/data/import_data.py\u001b[0m in \u001b[0;36mimport_generators\u001b[0;34m(network, data_source, file)\u001b[0m\n\u001b[1;32m 984\u001b[0m logging.warning('Right now only solar and wind generators can be '\n\u001b[1;32m 985\u001b[0m 'imported from the oedb.')\n\u001b[0;32m--> 986\u001b[0;31m \u001b[0m_import_genos_from_oedb\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mnetwork\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mnetwork\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 987\u001b[0m \u001b[0;32melif\u001b[0m \u001b[0mdata_source\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0;34m'pypsa'\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 988\u001b[0m \u001b[0m_import_genos_from_pypsa\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mnetwork\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mnetwork\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfile\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mfile\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m~/virtualenvs/open_ego_notebook/git_repos/eDisGo/edisgo/data/import_data.py\u001b[0m in \u001b[0;36m_import_genos_from_oedb\u001b[0;34m(network)\u001b[0m\n\u001b[1;32m 1844\u001b[0m \u001b[0;31m#generators_conv_mv = _import_conv_generators()\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1845\u001b[0m generators_res_mv, generators_res_lv = _import_res_generators(\n\u001b[0;32m-> 1846\u001b[0;31m types_condition)\n\u001b[0m\u001b[1;32m 1847\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1848\u001b[0m \u001b[0;31m#generators_mv = generators_conv_mv.append(generators_res_mv)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m~/virtualenvs/open_ego_notebook/git_repos/eDisGo/edisgo/data/import_data.py\u001b[0m in \u001b[0;36m_import_res_generators\u001b[0;34m(types_filter)\u001b[0m\n\u001b[1;32m 1089\u001b[0m generators_mv = pd.read_sql_query(generators_mv_sqla.statement,\n\u001b[1;32m 1090\u001b[0m \u001b[0msession\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mbind\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1091\u001b[0;31m index_col='id')\n\u001b[0m\u001b[1;32m 1092\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1093\u001b[0m \u001b[0;31m# define generators with unknown subtype as 'unknown'\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m~/virtualenvs/open_ego_notebook/lib/python3.6/site-packages/pandas/io/sql.py\u001b[0m in \u001b[0;36mread_sql_query\u001b[0;34m(sql, con, index_col, coerce_float, params, parse_dates, chunksize)\u001b[0m\n\u001b[1;32m 330\u001b[0m return pandas_sql.read_query(\n\u001b[1;32m 331\u001b[0m \u001b[0msql\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mindex_col\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mindex_col\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mparams\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mparams\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcoerce_float\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mcoerce_float\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 332\u001b[0;31m parse_dates=parse_dates, chunksize=chunksize)\n\u001b[0m\u001b[1;32m 333\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 334\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m~/virtualenvs/open_ego_notebook/lib/python3.6/site-packages/pandas/io/sql.py\u001b[0m in \u001b[0;36mread_query\u001b[0;34m(self, sql, index_col, coerce_float, parse_dates, params, chunksize)\u001b[0m\n\u001b[1;32m 1085\u001b[0m \u001b[0margs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_convert_params\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0msql\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mparams\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1086\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1087\u001b[0;31m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mexecute\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1088\u001b[0m \u001b[0mcolumns\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mresult\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mkeys\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1089\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m~/virtualenvs/open_ego_notebook/lib/python3.6/site-packages/pandas/io/sql.py\u001b[0m in \u001b[0;36mexecute\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 976\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mexecute\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 977\u001b[0m \u001b[0;34m\"\"\"Simple passthrough to SQLAlchemy connectable\"\"\"\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 978\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mconnectable\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mexecute\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 979\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 980\u001b[0m def read_table(self, table_name, index_col=None, coerce_float=True,\n", - "\u001b[0;32m~/virtualenvs/open_ego_notebook/lib/python3.6/site-packages/sqlalchemy/engine/base.py\u001b[0m in \u001b[0;36mexecute\u001b[0;34m(self, statement, *multiparams, **params)\u001b[0m\n\u001b[1;32m 2073\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2074\u001b[0m \u001b[0mconnection\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcontextual_connect\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mclose_with_result\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mTrue\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 2075\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mconnection\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mexecute\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mstatement\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0mmultiparams\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mparams\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 2076\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2077\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mscalar\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mstatement\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0mmultiparams\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mparams\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m~/virtualenvs/open_ego_notebook/lib/python3.6/site-packages/sqlalchemy/engine/base.py\u001b[0m in \u001b[0;36mexecute\u001b[0;34m(self, object, *multiparams, **params)\u001b[0m\n\u001b[1;32m 946\u001b[0m \u001b[0;32mraise\u001b[0m \u001b[0mexc\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mObjectNotExecutableError\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mobject\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 947\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 948\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mmeth\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmultiparams\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mparams\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 949\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 950\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m_execute_function\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfunc\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmultiparams\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mparams\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m~/virtualenvs/open_ego_notebook/lib/python3.6/site-packages/sqlalchemy/sql/elements.py\u001b[0m in \u001b[0;36m_execute_on_connection\u001b[0;34m(self, connection, multiparams, params)\u001b[0m\n\u001b[1;32m 267\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m_execute_on_connection\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mconnection\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmultiparams\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mparams\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 268\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msupports_execution\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 269\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mconnection\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_execute_clauseelement\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmultiparams\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mparams\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 270\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 271\u001b[0m \u001b[0;32mraise\u001b[0m \u001b[0mexc\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mObjectNotExecutableError\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m~/virtualenvs/open_ego_notebook/lib/python3.6/site-packages/sqlalchemy/engine/base.py\u001b[0m in \u001b[0;36m_execute_clauseelement\u001b[0;34m(self, elem, multiparams, params)\u001b[0m\n\u001b[1;32m 1058\u001b[0m \u001b[0mcompiled_sql\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1059\u001b[0m \u001b[0mdistilled_params\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1060\u001b[0;31m \u001b[0mcompiled_sql\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdistilled_params\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1061\u001b[0m )\n\u001b[1;32m 1062\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_has_events\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mengine\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_has_events\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m~/virtualenvs/open_ego_notebook/lib/python3.6/site-packages/sqlalchemy/engine/base.py\u001b[0m in \u001b[0;36m_execute_context\u001b[0;34m(self, dialect, constructor, statement, parameters, *args)\u001b[0m\n\u001b[1;32m 1198\u001b[0m \u001b[0mparameters\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1199\u001b[0m \u001b[0mcursor\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1200\u001b[0;31m context)\n\u001b[0m\u001b[1;32m 1201\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1202\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_has_events\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mengine\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_has_events\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m~/virtualenvs/open_ego_notebook/lib/python3.6/site-packages/sqlalchemy/engine/base.py\u001b[0m in \u001b[0;36m_handle_dbapi_exception\u001b[0;34m(self, e, statement, parameters, cursor, context)\u001b[0m\n\u001b[1;32m 1411\u001b[0m util.raise_from_cause(\n\u001b[1;32m 1412\u001b[0m \u001b[0msqlalchemy_exception\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1413\u001b[0;31m \u001b[0mexc_info\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1414\u001b[0m )\n\u001b[1;32m 1415\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m~/virtualenvs/open_ego_notebook/lib/python3.6/site-packages/sqlalchemy/util/compat.py\u001b[0m in \u001b[0;36mraise_from_cause\u001b[0;34m(exception, exc_info)\u001b[0m\n\u001b[1;32m 201\u001b[0m \u001b[0mexc_type\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mexc_value\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mexc_tb\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mexc_info\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 202\u001b[0m \u001b[0mcause\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mexc_value\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mexc_value\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0mexception\u001b[0m \u001b[0;32melse\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 203\u001b[0;31m \u001b[0mreraise\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtype\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mexception\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mexception\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtb\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mexc_tb\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcause\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mcause\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 204\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 205\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mpy3k\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m~/virtualenvs/open_ego_notebook/lib/python3.6/site-packages/sqlalchemy/util/compat.py\u001b[0m in \u001b[0;36mreraise\u001b[0;34m(tp, value, tb, cause)\u001b[0m\n\u001b[1;32m 184\u001b[0m \u001b[0mvalue\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__cause__\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mcause\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 185\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mvalue\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__traceback__\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0mtb\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 186\u001b[0;31m \u001b[0;32mraise\u001b[0m \u001b[0mvalue\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mwith_traceback\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtb\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 187\u001b[0m \u001b[0;32mraise\u001b[0m \u001b[0mvalue\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 188\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m~/virtualenvs/open_ego_notebook/lib/python3.6/site-packages/sqlalchemy/engine/base.py\u001b[0m in \u001b[0;36m_execute_context\u001b[0;34m(self, dialect, constructor, statement, parameters, *args)\u001b[0m\n\u001b[1;32m 1191\u001b[0m \u001b[0mstatement\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1192\u001b[0m \u001b[0mparameters\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1193\u001b[0;31m context)\n\u001b[0m\u001b[1;32m 1194\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mBaseException\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1195\u001b[0m self._handle_dbapi_exception(\n", - "\u001b[0;32m~/virtualenvs/open_ego_notebook/lib/python3.6/site-packages/sqlalchemy/engine/default.py\u001b[0m in \u001b[0;36mdo_execute\u001b[0;34m(self, cursor, statement, parameters, context)\u001b[0m\n\u001b[1;32m 505\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 506\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mdo_execute\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcursor\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mstatement\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mparameters\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcontext\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mNone\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 507\u001b[0;31m \u001b[0mcursor\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mexecute\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mstatement\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mparameters\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 508\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 509\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mdo_execute_no_params\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcursor\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mstatement\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcontext\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mNone\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;31mProgrammingError\u001b[0m: (psycopg2.ProgrammingError) relation \"model_draft.ego_supply_res_powerplant_nep2035_mview\" does not exist\nLINE 2: FROM model_draft.ego_supply_res_powerplant_nep2035_mview \n ^\n [SQL: 'SELECT model_draft.ego_supply_res_powerplant_nep2035_mview.id, model_draft.ego_supply_res_powerplant_nep2035_mview.subst_id, model_draft.ego_supply_res_powerplant_nep2035_mview.la_id, model_draft.ego_supply_res_powerplant_nep2035_mview.mvlv_subst_id, model_draft.ego_supply_res_powerplant_nep2035_mview.electrical_capacity, model_draft.ego_supply_res_powerplant_nep2035_mview.generation_type, model_draft.ego_supply_res_powerplant_nep2035_mview.generation_subtype, model_draft.ego_supply_res_powerplant_nep2035_mview.voltage_level, ST_AsText(ST_Transform(model_draft.ego_supply_res_powerplant_nep2035_mview.rea_geom_new, %(ST_Transform_1)s)) AS geom, ST_AsText(ST_Transform(model_draft.ego_supply_res_powerplant_nep2035_mview.geom, %(ST_Transform_2)s)) AS geom_em \\nFROM model_draft.ego_supply_res_powerplant_nep2035_mview \\nWHERE model_draft.ego_supply_res_powerplant_nep2035_mview.subst_id = %(subst_id_1)s AND model_draft.ego_supply_res_powerplant_nep2035_mview.generation_type IN (%(generation_type_1)s, %(generation_type_2)s) AND model_draft.ego_supply_res_powerplant_nep2035_mview.voltage_level IN (%(voltage_level_1)s, %(voltage_level_2)s)'] [parameters: {'ST_Transform_1': 4326, 'ST_Transform_2': 4326, 'subst_id_1': 239, 'generation_type_1': 'solar', 'generation_type_2': 'wind', 'voltage_level_1': 4, 'voltage_level_2': 5}] (Background on this error at: http://sqlalche.me/e/f405)" - ] - } - ], - "source": [ - "# Import generators\n", - "scenario = 'nep2035'\n", - "edisgo.import_generators(generator_scenario=scenario)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "You can have a look at all generators again and compare it to the list of generators created earlier before the import of new generators." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "edisgo.network.mv_grid.graph.nodes_by_attribute('generator')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Grid reinforcement \n", - "\n", - "Now we can finally calculate grid expansion costs.\n", - "\n", - "The grid expansion methodology is based on the distribution grid study of dena [[1]](#[1]) and Baden-Wuerttemberg [[2]](#[2]). For now only a combined analysis of MV and LV grids is possible. The order grid expansion measures are conducted is as follows:\n", - "\n", - "* Reinforce transformers and lines due to over-loading issues\n", - "* Reinforce lines in MV grid due to over-voltage issues\n", - "* Reinforce lines in LV grid due to over-loading issues\n", - "* Reinforce transformers and lines due to over-loading issues\n", - "\n", - "Reinforcement of transformers and lines due to over-loading issues is performed twice, once in the beginning and again after fixing over-voltage problems, because the changed power flows after reinforcing the grid may lead to new over-loading issues. (For further explanation see the [documentation](http://edisgo.readthedocs.io/en/dev/features_in_detail.html#automatic-grid-expansion).)\n", - "\n", - "After each reinforcement step a non-linear power flow analyses is conducted using PyPSA. Let's do a power flow analysis before the reinforcement to see how many over-voltage issues there are." - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "INFO:pypsa.pf:Slack bus for sub-network 0 is Bus_MVStation_239\n", - "INFO:pypsa.pf:Performing non-linear load-flow on AC sub-network SubNetwork 0 for snapshots DatetimeIndex(['1970-01-01'], dtype='datetime64[ns]', freq='H')\n", - "INFO:pypsa.pf:Newton-Raphson solved in 3 iterations with error of 0.000001 in 0.519885 seconds\n" - ] - } - ], - "source": [ - "# Do non-linear power flow analysis with PyPSA\n", - "edisgo.analyze()" - ] - }, - { - "cell_type": "code", - "execution_count": 13, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Voltage levels for ['GeneratorFluctuating_839361', 'GeneratorFluctuating_839362', 'GeneratorFluctuating_839363', 'GeneratorFluctuating_839364', 'GeneratorFluctuating_878450', 'GeneratorFluctuating_878583', 'GeneratorFluctuating_878609', 'GeneratorFluctuating_878611', 'GeneratorFluctuating_878614', 'GeneratorFluctuating_878615', 'GeneratorFluctuating_878862', 'GeneratorFluctuating_878863', 'GeneratorFluctuating_878864', 'GeneratorFluctuating_878865', 'GeneratorFluctuating_878866', 'GeneratorFluctuating_878867', 'GeneratorFluctuating_878875', 'GeneratorFluctuating_878950', 'GeneratorFluctuating_878963', 'BranchTee_MVGrid_239_1', 'BranchTee_MVGrid_239_2', 'BranchTee_MVGrid_239_3', 'BranchTee_MVGrid_239_4', 'BranchTee_MVGrid_239_5', 'BranchTee_MVGrid_239_6', 'BranchTee_MVGrid_239_7', 'BranchTee_MVGrid_239_8', 'BranchTee_MVGrid_239_9', 'BranchTee_MVGrid_239_10', 'BranchTee_MVGrid_239_11', 'BranchTee_MVGrid_239_12', 'BranchTee_MVGrid_239_13', 'BranchTee_MVGrid_239_14', 'BranchTee_MVGrid_239_15', 'BranchTee_MVGrid_239_16', 'BranchTee_MVGrid_239_17', 'BranchTee_MVGrid_239_18', 'BranchTee_MVGrid_239_19', 'BranchTee_MVGrid_239_20', 'BranchTee_MVGrid_239_21', 'BranchTee_MVGrid_239_22', 'BranchTee_MVGrid_239_23', 'BranchTee_MVGrid_239_24', 'BranchTee_MVGrid_239_25', 'BranchTee_MVGrid_239_26', 'BranchTee_MVGrid_239_27', 'BranchTee_MVGrid_239_28', 'BranchTee_MVGrid_239_29', 'BranchTee_MVGrid_239_30', 'BranchTee_MVGrid_239_31', 'BranchTee_MVGrid_239_32', 'BranchTee_MVGrid_239_33', 'BranchTee_MVGrid_239_34', 'BranchTee_MVGrid_239_35', 'BranchTee_MVGrid_239_36', 'BranchTee_MVGrid_239_37', 'BranchTee_MVGrid_239_38', 'BranchTee_MVGrid_239_39', 'BranchTee_MVGrid_239_40', 'BranchTee_MVGrid_239_41', 'BranchTee_MVGrid_239_42', 'BranchTee_MVGrid_239_43', 'BranchTee_MVGrid_239_44', 'BranchTee_MVGrid_239_45', 'BranchTee_MVGrid_239_46', 'BranchTee_MVGrid_239_47', 'BranchTee_MVGrid_239_48', 'BranchTee_MVGrid_239_49', 'BranchTee_MVGrid_239_50', 'BranchTee_MVGrid_239_51', 'BranchTee_MVGrid_239_52', 'BranchTee_MVGrid_239_53', 'BranchTee_MVGrid_239_54', 'BranchTee_MVGrid_239_55', 'BranchTee_MVGrid_239_56', 'BranchTee_MVGrid_239_57', 'BranchTee_MVGrid_239_58', 'BranchTee_MVGrid_239_59', 'BranchTee_MVGrid_239_60', 'BranchTee_MVGrid_239_61', 'BranchTee_MVGrid_239_62', 'BranchTee_MVGrid_239_63', 'BranchTee_MVGrid_239_64', 'BranchTee_MVGrid_239_65', 'BranchTee_MVGrid_239_66', 'BranchTee_MVGrid_239_67', 'BranchTee_MVGrid_239_68', 'BranchTee_MVGrid_239_69', 'BranchTee_MVGrid_239_70', 'BranchTee_MVGrid_239_71', 'BranchTee_MVGrid_239_72', 'BranchTee_MVGrid_239_73', 'BranchTee_MVGrid_239_74', 'BranchTee_MVGrid_239_75', 'BranchTee_MVGrid_239_76', 'BranchTee_MVGrid_239_77', 'BranchTee_MVGrid_239_78', 'BranchTee_MVGrid_239_79', 'BranchTee_MVGrid_239_80', 'BranchTee_MVGrid_239_81', 'BranchTee_MVGrid_239_82', 'BranchTee_MVGrid_239_83', 'BranchTee_MVGrid_239_84', 'BranchTee_MVGrid_239_85', 'BranchTee_MVGrid_239_86', 'BranchTee_MVGrid_239_87', 'BranchTee_MVGrid_239_88', 'BranchTee_MVGrid_239_89', 'BranchTee_MVGrid_239_90', 'BranchTee_MVGrid_239_91', 'BranchTee_MVGrid_239_92', 'BranchTee_MVGrid_239_93', 'BranchTee_MVGrid_239_94', 'LVStation_122408', 'LVStation_485974', 'LVStation_138585', 'LVStation_119895', 'LVStation_119896', 'LVStation_119889', 'LVStation_119890', 'LVStation_119891', 'LVStation_119892', 'LVStation_119893', 'LVStation_119894', 'LVStation_119897', 'LVStation_119898', 'LVStation_119899', 'LVStation_119900', 'LVStation_119901', 'LVStation_417530', 'LVStation_419885', 'LVStation_121940', 'LVStation_121941', 'LVStation_122426', 'LVStation_122480', 'LVStation_418254', 'LVStation_419605', 'LVStation_416441', 'LVStation_418546', 'LVStation_416244', 'LVStation_417898', 'LVStation_419795', 'LVStation_120737', 'LVStation_120736', 'LVStation_120738', 'LVStation_120942', 'LVStation_120943', 'LVStation_122230', 'LVStation_122231', 'LVStation_418237', 'LVStation_416449', 'LVStation_417550', 'LVStation_139107', 'LVStation_120585', 'LVStation_417276', 'LVStation_122520', 'LVStation_419726', 'LVStation_121776', 'LVStation_419327', 'LVStation_417734', 'LVStation_125015', 'LVStation_125016', 'LVStation_125017', 'LVStation_500931', 'LVStation_418244', 'LVStation_120411', 'LVStation_121317', 'LVStation_121318', 'LVStation_416815', 'LVStation_139104', 'LVStation_139105', 'LVStation_139106', 'LVStation_139192', 'LVStation_119903', 'LVStation_119904', 'LVStation_418449', 'LVStation_419079', 'LVStation_119697', 'LVStation_119698', 'LVStation_122076', 'LVStation_122077', 'LVStation_122078', 'LVStation_122124', 'LVStation_122125', 'LVStation_122122', 'LVStation_122123', 'LVStation_124543', 'LVStation_124911', 'LVStation_124910', 'LVStation_139183', 'LVStation_139184', 'LVStation_139185', 'LVStation_139186', 'LVStation_139187', 'LVStation_139188', 'LVStation_139189', 'LVStation_488816', 'LVStation_490252', 'LVStation_490253', 'LVStation_118322', 'LVStation_118323', 'LVStation_118324', 'LVStation_515314', 'LVStation_120387', 'LVStation_120388', 'LVStation_120389', 'LVStation_120390', 'LVStation_120853', 'LVStation_511325', 'LVStation_120470', 'LVStation_417987', 'LVStation_119612', 'LVStation_119613', 'LVStation_119701', 'LVStation_119702', 'LVStation_119703', 'LVStation_119704', 'LVStation_120038', 'LVStation_120555', 'LVStation_500916', 'LVStation_418547', 'LVStation_121286', 'LVStation_121287', 'LVStation_121288', 'LVStation_121289', 'LVStation_121741', 'LVStation_121742', 'LVStation_121743', 'LVStation_496409', 'LVStation_416983', 'LVStation_121878', 'LVStation_121879', 'LVStation_121880', 'LVStation_496410', 'LVStation_121915', 'LVStation_121916', 'LVStation_121917', 'LVStation_121918', 'LVStation_121919', 'LVStation_416589', 'LVStation_122400', 'LVStation_122401', 'LVStation_122696', 'LVStation_122697', 'LVStation_122698', 'LVStation_122699', 'LVStation_123655', 'LVStation_124010', 'LVStation_124011', 'LVStation_124109', 'LVStation_124110', 'LVStation_124111', 'LVStation_417936', 'LVStation_124902', 'LVStation_416175', 'LVStation_125210', 'LVStation_125211', 'LVStation_125212', 'LVStation_125213', 'LVStation_125214', 'LVStation_125215', 'LVStation_125216', 'LVStation_125217', 'LVStation_503036', 'LVStation_125269', 'LVStation_125267', 'LVStation_125268', 'LVStation_120898', 'LVStation_120899', 'LVStation_139149', 'LVStation_139150', 'LVStation_139151', 'LVStation_139152', 'LVStation_417909', 'LVStation_124085', 'LVStation_124086', 'LVStation_124581', 'LVStation_124582', 'LVStation_124583', 'LVStation_124584', 'LVStation_498758', 'MVStation_239', 'MVDisconnectingPoint_1', 'MVDisconnectingPoint_2', 'MVDisconnectingPoint_3', 'MVDisconnectingPoint_4', 'MVDisconnectingPoint_5'] are not returned from PFA\n" - ] - }, - { - "data": { - "text/html": [ - "
\n", - "\n", - "\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
1970-01-01
\n", - "
" - ], - "text/plain": [ - "Empty DataFrame\n", - "Columns: []\n", - "Index: [1970-01-01 00:00:00]" - ] - }, - "execution_count": 13, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "# get voltage at each node from power-flow analysis results\n", - "v_mag_pu_pfa = edisgo.network.results.v_res(nodes=edisgo.network.mv_grid.graph.nodes())\n", - "# set maximum allowed voltage deviation to 10%\n", - "max_v_dev = 0.1\n", - "# find all nodes with a node voltage deviation greater the allowed voltage deviation\n", - "v_mag_pu_pfa[(v_mag_pu_pfa > (1 + max_v_dev))] - 1" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Reinforcement is invoked doing the following:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Do grid reinforcement\n", - "edisgo.reinforce()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's check for over-voltage issues again:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# get voltage at each node from power-flow analysis results\n", - "v_mag_pu_pfa = edisgo.network.results.v_res(nodes=edisgo.network.mv_grid.graph.nodes())\n", - "# set maximum allowed voltage deviation to 10%\n", - "max_v_dev = 0.1\n", - "# find all nodes with a node voltage deviation greater the allowed voltage deviation\n", - "v_mag_pu_pfa[(v_mag_pu_pfa > (1 + max_v_dev))] - 1" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Evaluate results \n", - "\n", - "Above we already saw how to access voltage results from the power flow analysis. All results are stored in the [Results](http://edisgo.readthedocs.io/en/dev/api/edisgo.grid.html#edisgo.grid.network.Results) object and can be accessed through\n", - "```python\n", - "edisgo.network.results\n", - "```\n", - "\n", - "All changes in the grid conducted during the grid reinforcement, such as removed and new lines and new transformers, can be viewed as follows:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "edisgo.network.results.equipment_changes" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "You can also retrieve grid expansion costs through:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "costs = edisgo.network.results.grid_expansion_costs" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "If you are interested in accumulated costs you could group them like that:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# group costs by type\n", - "costs_grouped = costs.groupby(['type']).sum()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "An overview of the assumptions used to calculate grid expansion costs can be found in the [documentation]( http://edisgo.readthedocs.io/en/dev/features_in_detail.html#grid-expansion-costs)." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now let's compare the grid expansion costs for the 'NEP 2035' scenario with grid expansion costs for the 'ego 100' scenario. Therefore, we first have to setup the new scenario and calculate grid expansion costs." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# initialize new EDisGo object with 'ego 100' scenario\n", - "edisgo_ego100 = EDisGo(ding0_grid=ding0_grid,\n", - " worst_case_analysis=worst_case_analysis,\n", - " generator_scenario='ego100')\n", - "# conduct grid reinforcement\n", - "edisgo_ego100.reinforce()\n", - "# get grouped costs\n", - "costs_grouped_ego100 = edisgo_ego100.network.results.grid_expansion_costs.groupby(['type']).sum()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# plot" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## References \n", - "\n", - " [1] A.C. Agricola et al.: dena-Verteilnetzstudie: Ausbau- und Innovationsbedarf der Stromverteilnetze in Deutschland bis 2030. 2012.\n", - "\n", - " [2] C. Rehtanz et al.: Verteilnetzstudie für das Land Baden-Württemberg, ef.Ruhr GmbH, 2017." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.6.3" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/ego/examples/tutorials/etrago_OpenMod_Zuerich18.ipynb b/ego/examples/tutorials/etrago_OpenMod_Zuerich18.ipynb deleted file mode 100644 index 0c7f115f..00000000 --- a/ego/examples/tutorials/etrago_OpenMod_Zuerich18.ipynb +++ /dev/null @@ -1,706 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\"EUF\"\n", - "\"HSF\"\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": true - }, - "outputs": [], - "source": [ - "__copyright__ = \"Zentrum für nachhaltige Energiesysteme Flensburg\"\n", - "__license__ = \"GNU Affero General Public License Version 3 (AGPL-3.0)\"\n", - "__url__ = \"https://github.com/openego/data_processing/blob/master/LICENSE\"\n", - "__author__ = \"wolfbunke, ulfmueller\"" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "#
OpenMod Workshop Zürich 2018
\n", - "\n", - "## Open source tools for cross-grid-level electricity grid optimization developed in the open_eGo project - Learn-a-thon\n", - "\n", - "****\n", - "\n", - "### Learn more about\n", - "\n", - "\n", - "* __[open_eGo Project Webpage](https://openegoproject.wordpress.com/)__\n", - "* __[eTraGo Tool Documentation](http://etrago.readthedocs.io/en/latest/index.html)__ \n", - "* __[oedb Tutorials](http://oep.iks.cs.ovgu.de/dataedit/)__ How to use the OpenEnergy Database\n", - "* __[OpenMod Forum](https://forum.openmod-initiative.org/t/learn-a-thon-using-tools-for-cross-grid-level-electricity-grid-optimization-developed-in-the-open-ego-project/856)__ " - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\"HSF\"\n", - "## Table of Contents \n", - "\n", - "\n", - "* [Getting started with eTraGo](#started)\n", - "* [LOPF Calculation of Germany and neighbours with 10 notes](#d-kmean10)\n", - "* [LOPF Calculation of Schleswig-Holstein](#shcalc)\n", - "* [Using snapshot clustering](#snapshot)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Getting started with eTraGo \n", - "\n", - "\n", - "The python package eTraGo provides a optimization of flexibility options for transmission grids based on PyPSA. A speciality in this context is that transmission grids are described by the 380, 220 and 110 kV in Germany. Conventionally the 110kV grid is part of the distribution grid. The integration of the transmission and ‘upper’ distribution grid is part of eTraGo.\n", - "\n", - "The focus of optimization are flexibility options with a special focus on energy storages and grid expansion measures.\n", - "\n", - "\n", - "\n", - "\n", - "## Installation \n", - "\n", - "Please, find more information on the [README.md](https://github.com/openego/eGo/tree/features/tutorial/ego/examples/tutorials#etrago). \n", - "\n", - "\n", - "## Import eTraGo packages\n", - "\n", - "We are importing the [main function](https://github.com/openego/eTraGo/blob/dev/etrago/appl.py) of eTraGo and its database and plotting functions. \n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "import numpy as np\n", - "import pandas as pd\n", - "from numpy import genfromtxt\n", - "np.random.seed()\n", - "import time\n", - "import oedialect\n", - "\n", - "import os\n", - "\n", - "if not 'READTHEDOCS' in os.environ:\n", - " # Sphinx does not run this code.\n", - " # Do not import internal packages directly \n", - " from etrago.tools.io import NetworkScenario, results_to_oedb\n", - " from etrago.tools.plot import (plot_line_loading, plot_stacked_gen,\n", - " add_coordinates, curtailment, gen_dist,\n", - " storage_distribution,storage_expansion)\n", - " from etrago.tools.utilities import (load_shedding, data_manipulation_sh,\n", - " results_to_csv, parallelisation, pf_post_lopf, \n", - " loading_minimization, calc_line_losses, group_parallel_lines)\n", - " from etrago.cluster.networkclustering import busmap_from_psql, cluster_on_extra_high_voltage, kmean_clustering\n", - " from egoio.tools import db\n", - " from sqlalchemy.orm import sessionmaker\n", - " from etrago.appl import etrago\n", - " \n", - " " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": true - }, - "outputs": [], - "source": [ - "# enable jupyter interactiv plotting\n", - "%matplotlib notebook\n", - "from ipywidgets import *\n", - "import matplotlib.pyplot as plt" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# LOPF Calculation of Germany and neighbours with 30 nodes \n", - "\n", - "\n", - "
\n", - "[top](#toc)
\n", - "\n", - "In this section we start our eTraGo calulation with an __Status Quo__ scenario of Germany an its electrical neighbours. For time and performents reasons we are useing the [k-mean](https://de.wikipedia.org/wiki/K-Means-Algorithmus) clustering [functionality of eTraGo](http://etrago.readthedocs.io/en/latest/api/etrago.cluster.html#etrago.cluster.networkclustering.kmean_clustering) and use $k=30$ nodes. For the same reason we choose the time period of __start_snapshot__ and __end_snapshot__ for a day with 24 hours of the scenario year. \n", - "\n", - "\n", - "### Make your calulation settings\n", - "\n", - "A detailed discription of the args python dictionary can be found under . \n", - "\n", - "\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": true - }, - "outputs": [], - "source": [ - "args_k10 = {# Setup and Configuration:\n", - " 'db': 'oedb', # db session\n", - " 'gridversion': \"v0.2.11\", # None for model_draft or Version number (e.g. v0.2.11) for grid schema\n", - " 'method': 'lopf', # lopf or pf\n", - " 'pf_post_lopf': False, # state whether you want to perform a pf after a lopf simulation\n", - " 'start_snapshot': 4393, # 2.07.\n", - " 'end_snapshot' : 4400,\n", - " 'scn_name': 'NEP 2035', # state which scenario you want to run: Status Quo, NEP 2035, eGo100\n", - " 'solver': 'glpk', # glpk, cplex or gurobi\n", - " # Export options:\n", - " 'lpfile': False, # state if and where you want to save pyomo's lp file: False or /path/tofolder\n", - " 'results': False, # state if and where you want to save results as csv: False or /path/tofolder\n", - " 'export': False, # state if you want to export the results back to the database\n", - " # Settings: \n", - " 'storage_extendable':True, # state if you want storages to be installed at each node if necessary.\n", - " 'generator_noise':True, # state if you want to apply a small generator noise \n", - " 'reproduce_noise': False, # state if you want to use a predefined set of random noise for the given scenario. \n", - " # if so, provide path, e.g. 'noise_values.csv'\n", - " 'minimize_loading':False,\n", - " # Clustering:\n", - " 'k_mean_clustering': 30, # state if you want to perform a k-means clustering on the given network. \n", - " # State False or the value k (e.g. 20).\n", - " 'network_clustering': False, # state if you want to perform a clustering of HV buses to EHV buses.\n", - " # Simplifications:\n", - " 'parallelisation':False, # state if you want to run snapshots parallely.\n", - " 'skip_snapshots':False,\n", - " 'line_grouping': False, # state if you want to group lines running between the same buses.\n", - " 'branch_capacity_factor': 0.7, # globally extend or lower branch capacities\n", - " 'load_shedding':False, # meet the demand at very high cost; for debugging purposes.\n", - " 'comments':None }" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "# Start eTraGo calculation with args setting\n", - "# create network object which incluedes all input and output data\n", - "network = etrago(args_k10)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "# plot generation distripution\n", - "gen_dist(network, techs=None, snapshot=1,n_cols=3,gen_size=0.02)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "# plot stacked sum of nominal power for each generator type and timestep\n", - "#fig.set_size_inches(14,14)\n", - "# fix error in .../eGo/ego/examples/tutorials/src/etrago/etrago/tools/plot.py\n", - "# 'wind_offshore':'skyblue', wind_onshore':'skyblue',\n", - "plot_stacked_gen(network, resolution=\"MW\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "# make a line loading plot\n", - "# basemade einbauen für hintergrund länder\n", - "fig,ax = plt.subplots(1,1)\n", - "fig.set_size_inches(8,8)\n", - "plot_line_loading(network)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "# plot to show extendable storages, if expanded\n", - "storage_expansion(network)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "# get storage sizes in MW\n", - "network.storage_units.p_nom_opt.groupby(network.storage_units.carrier, axis=0).sum()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Marginal price per bus node" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": true - }, - "outputs": [], - "source": [ - "# get snapshot\n", - "now = network.snapshots[2]\n", - "\n", - "fig,ax = plt.subplots(1,1)\n", - "fig.set_size_inches(6,4)\n", - "\n", - "network.plot(ax=ax,line_widths=pd.Series(0.5,network.lines.index))\n", - "plt.hexbin(network.buses.x, network.buses.y, \n", - " gridsize=20,\n", - " C=network.buses_t.marginal_price.loc[now],\n", - " cmap=plt.cm.jet)\n", - "\n", - "#for some reason the colorbar only works with graphs plt.plot\n", - "#and must be attached plt.colorbar\n", - "\n", - "cb = plt.colorbar()\n", - "cb.set_label('Locational Marginal Price (EUR/MWh)') " - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Change the scnario \n", - "\n", - "* Set 'scn_name': to 'NEP 2035' and recalculate. \n", - "\n", - "\n", - "\n", - "****\n", - "\n", - "\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": true - }, - "outputs": [], - "source": [ - "# delete eTraGo object\n", - "#del network" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# LOPF Calculation of Schleswig-Holstein \n", - "\n", - "\n", - "
\n", - "[top](#toc)
\n", - "\n", - "\n", - "### Case Schleswig-Holstein\n", - "\n", - "The data set of Schleswing-Holstein is an extract of the main data set and works as an island. The power production and flows of the adjacent network areas are neglected. Therefore, the installed capacity and power production is very high.\n", - "\n", - "For our analysis we used serveral plotting options of eTraGo of [etrago.tools.plot](http://etrago.readthedocs.io/en/latest/api/etrago.tools.html#module-etrago.tools.plot).\n", - "\n", - "\n", - "### Make your settings\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": true - }, - "outputs": [], - "source": [ - "args_sh = {# Setup and Configuration:\n", - " 'db': 'oedb', # db session\n", - " 'gridversion': \"v0.2.11\", # None for model_draft or Version number (e.g. v0.2.11) for grid schema\n", - " 'method': 'lopf', # lopf or pf\n", - " 'pf_post_lopf': False, # state whether you want to perform a pf after a lopf simulation\n", - " 'start_snapshot': 4393, # 6552\n", - " 'end_snapshot' : 4394,\n", - " 'scn_name': 'SH NEP 2035', # state which scenario you want to run: Status Quo, NEP 2035, eGo100\n", - " 'solver': 'glpk', # glpk, cplex or gurobi\n", - " # Export options:\n", - " 'lpfile': False, # state if and where you want to save pyomo's lp file: False or /path/tofolder\n", - " 'results': False, # state if and where you want to save results as csv: False or /path/tofolder\n", - " 'export': False, # state if you want to export the results back to the database\n", - " # Settings: \n", - " 'storage_extendable':False, # state if you want storages to be installed at each node if necessary.\n", - " 'generator_noise':True, # state if you want to apply a small generator noise \n", - " 'reproduce_noise': False, # state if you want to use a predefined set of random noise for the given.\n", - " # scenario. if so, provide path, e.g. 'noise_values.csv'\n", - " 'minimize_loading':False,\n", - " # Clustering:\n", - " 'k_mean_clustering': False, # state if you want to perform a k-means clustering on the given network. \n", - " # State False or the value k (e.g. 20).\n", - " 'network_clustering': False, # state if you want to perform a clustering of HV buses to EHV buses.\n", - " # Simplifications:\n", - " 'parallelisation':False, # state if you want to run snapshots parallely.\n", - " 'skip_snapshots':False,\n", - " 'line_grouping': False, # state if you want to group lines running between the same buses.\n", - " 'branch_capacity_factor': 0.7, # globally extend or lower branch capacities\n", - " 'load_shedding':False, # meet the demand at very high cost; for debugging purposes.\n", - " 'comments':None }" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": true - }, - "outputs": [], - "source": [ - "# Start eTraGo calculation with args setting\n", - "# create network object \n", - "network = etrago(args_sh)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": true - }, - "outputs": [], - "source": [ - "# plot generation distripution\n", - "gen_dist(network, techs=None, snapshot=1,n_cols=3,gen_size=0.02)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": true - }, - "outputs": [], - "source": [ - "# plot stacked sum of nominal power for each generator type and timestep\n", - "plot_stacked_gen(network, resolution=\"MW\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": true - }, - "outputs": [], - "source": [ - "# plots\n", - "# make a line loading plot\n", - "fig,ax = plt.subplots(1,1)\n", - "fig.set_size_inches(12,10)\n", - "plot_line_loading(network)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": true - }, - "outputs": [], - "source": [ - "# plot to show extendable storages\n", - "storage_expansion(network)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": true - }, - "outputs": [], - "source": [ - "# curtailment per carrier / energy source\n", - "curtailment(network, carrier='wind_onshore')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": true - }, - "outputs": [], - "source": [ - "# delete network object\n", - "del network" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Using pf after lopf Calculation of Germany and neighbours with 30 nodes \n", - "\n", - "
\n", - "[top](#toc)
\n", - "\n", - "\n", - "In order to compute the grid losses we add an power flow calculation after our liniar opf calculation by setting *pf_post_lopf = True*. \n", - "\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": true - }, - "outputs": [], - "source": [ - "args_lopf_pf = {# Setup and Configuration:\n", - " 'db': 'oedb', # db session\n", - " 'gridversion': \"v0.2.11\", # None for model_draft or Version number (e.g. v0.2.11) for grid schema\n", - " 'method': 'lopf', # lopf or pf\n", - " 'pf_post_lopf': True, # state whether you want to perform a pf after a lopf simulation\n", - " 'start_snapshot': 4393, \n", - " 'end_snapshot' : 4417,\n", - " 'scn_name': 'NEP 2035', # state which scenario you want to run: Status Quo, NEP 2035, eGo100\n", - " 'solver': 'glpk', # glpk, cplex or gurobi\n", - " # Export options:\n", - " 'lpfile': False, # state if and where you want to save pyomo's lp file: False or /path/tofolder\n", - " 'results': False, # state if and where you want to save results as csv: False or /path/tofolder\n", - " 'export': False, # state if you want to export the results back to the database\n", - " # Settings: \n", - " 'storage_extendable':False, # state if you want storages to be installed at each node if necessary.\n", - " 'generator_noise':True, # state if you want to apply a small generator noise \n", - " 'reproduce_noise': False, # state if you want to use a predefined set of random noise for the given.\n", - " # scenario. if so, provide path, e.g. 'noise_values.csv'\n", - " 'minimize_loading':False,\n", - " # Clustering:\n", - " 'k_mean_clustering': 30, # state if you want to perform a k-means clustering on the given network. \n", - " # State False or the value k (e.g. 20).\n", - " 'network_clustering': False, # state if you want to perform a clustering of HV buses to EHV buses.\n", - " # Simplifications:\n", - " 'parallelisation':False, # state if you want to run snapshots parallely.\n", - " 'skip_snapshots':False,\n", - " 'line_grouping': False, # state if you want to group lines running between the same buses.\n", - " 'branch_capacity_factor': 0.7, # globally extend or lower branch capacities\n", - " 'load_shedding':False, # meet the demand at very high cost; for debugging purposes.\n", - " 'comments':None }" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": true - }, - "outputs": [], - "source": [ - "# Start eTraGo calculation with args setting\n", - "# create network object\n", - "network = etrago(args_lopf_pf)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": true - }, - "outputs": [], - "source": [ - "# plot stacked sum of nominal power for each generator type and timestep\n", - "plot_stacked_gen(network, resolution=\"MW\")\n", - "#plt.close()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": true - }, - "outputs": [], - "source": [ - "# plots\n", - "# make a line loading plot\n", - "fig,ax = plt.subplots(1,1)\n", - "fig.set_size_inches(8,8)\n", - "\n", - "plot_line_loading(network)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Calculate grid losses\n", - "\n", - "We are using the function [calc_line_losses(network)](http://etrago.readthedocs.io/en/latest/_modules/etrago/tools/utilities.html#calc_line_losses)." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": true - }, - "outputs": [], - "source": [ - "\n", - "# calcualte total grid losses\n", - "calc_line_losses(network)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Plot line costs" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": true - }, - "outputs": [], - "source": [ - "# Line losses\n", - "# calculate apparent power S = sqrt(p² + q²) [in MW]\n", - "s0_lines = ((network.lines_t.p0**2 + network.lines_t.q0**2).\\\n", - " apply(np.sqrt)) \n", - "\n", - "# calculate current I = S / U [in A]\n", - "i0_lines = np.multiply(s0_lines, 1000000) / np.multiply(network.lines.v_nom, 1000) \n", - "\n", - "# calculate losses per line and timestep network.lines_t.line_losses = I² * R [in MW]\n", - "network.lines_t.losses = np.divide(i0_lines**2 * network.lines.r, 1000000)\n", - "\n", - "# calculate total losses per line [in MW]\n", - "network.lines = network.lines.assign(losses=np.sum(network.lines_t.losses).values)\n", - "\n", - "# prepare plotting\n", - "timestep =1\n", - "cmap = plt.cm.jet\n", - "\n", - "fig,ax = plt.subplots(1,1)\n", - "fig.set_size_inches(6,4)\n", - "\n", - "# do the plotting\n", - "lc= network.plot(line_colors=network.lines.losses, line_cmap=cmap,\n", - " title=\"Line loading\", line_widths=0.55)\n", - "\n", - "cb = plt.colorbar(lc[1])\n", - "cb.set_label('Locational line losses in (EUR/MWh)')\n", - "plt.show()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Plot the reactive power" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": true - }, - "outputs": [], - "source": [ - "# get snapshot\n", - "now = network.snapshots[2]\n", - "#plot the reactive power\n", - "fig,ax = plt.subplots(1,1)\n", - "\n", - "fig.set_size_inches(6,6)\n", - "\n", - "q = network.buses_t.q.sum()#.loc[now]\n", - "\n", - "bus_colors = pd.Series(\"r\",network.buses.index)\n", - "bus_colors[q< 0.] = \"b\"\n", - "\n", - "\n", - "network.plot(bus_sizes=abs(q)*0.005,ax=ax,bus_colors=bus_colors,title=\"Reactive power feed-in (red=+ve, blue=-ve)\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Have a look into the Programm on Github\n", - "\n", - "* " - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.5.5" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/ego/examples/tutorials/grid_1476.png b/ego/examples/tutorials/grid_1476.png deleted file mode 100644 index cbef8115..00000000 Binary files a/ego/examples/tutorials/grid_1476.png and /dev/null differ diff --git a/ego/examples/tutorials/requirements.yml b/ego/examples/tutorials/requirements.yml deleted file mode 100644 index a672c934..00000000 --- a/ego/examples/tutorials/requirements.yml +++ /dev/null @@ -1,28 +0,0 @@ -name: openMod_Zuerich2018 - -channels: - - conda-forge - - anaconda - -dependencies: - - python=3.5 - - notebook # jupyter notebook - - numpy=1.14.3 - - pandas=0.20.3 - - pyyaml=3.12 - - requests - - sqlalchemy - - Rtree=0.8.3 - - scikit-learn - - scipy - - ipywidgets - - pip: - - ding0==0.1.4 - - "--editable=git+git@github.com:openego/eGo.git@dev#egg=eGo --process-dependency-links" - - "--editable=git+git@github.com:openego/oedialect.git@master#egg=oedialect" - - "--editable=git+git@github.com:openego/ego.io.git@v0.4.0#egg=egoio" - - "--editable=git+git@github.com:openego/PyPSA.git@dev#egg=pypsa" - - "--editable=git+git@github.com:openego/eTraGo.git@0.5.1#egg=etrago" # eTraGo==0.5.1 - - "--editable=git+git@github.com:openego/tsam.git@master#egg=tsam" - - "--editable=git+git@github.com:openego/eDisGo.git@dev#egg=edisgo" - diff --git a/ego/mv_clustering/__init__.py b/ego/mv_clustering/__init__.py new file mode 100644 index 00000000..593130ae --- /dev/null +++ b/ego/mv_clustering/__init__.py @@ -0,0 +1 @@ +from ego.mv_clustering.mv_clustering import cluster_workflow # noqa: F401 diff --git a/ego/mv_clustering/database.py b/ego/mv_clustering/database.py new file mode 100644 index 00000000..92cec635 --- /dev/null +++ b/ego/mv_clustering/database.py @@ -0,0 +1,112 @@ +import logging +import subprocess +import sys +import time + +from contextlib import contextmanager +from functools import wraps + +import saio + +from sqlalchemy import create_engine +from sqlalchemy.orm import sessionmaker + +logger = logging.getLogger(__name__) + + +def get_engine(config=None): + config = config["database"] + engine = create_engine( + f"postgresql+psycopg2://{config['user']}:" + f"{config['password']}@{config['host']}:" + f"{int(config['port'])}/{config['database_name']}", + echo=False, + ) + logger.info(f"Created engine: {engine}.") + return engine + + +@contextmanager +def sshtunnel(config=None): + ssh_config = config["ssh"] + if ssh_config["enabled"]: + try: + logger.info("Open ssh tunnel.") + proc = subprocess.Popen( + [ + "ssh", + "-N", + "-L", + f"{ssh_config['local_port']}" + f":{ssh_config['local_address']}" + f":{ssh_config['port']}", + f"{ssh_config['user']}@{ssh_config['ip']}", + ], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) + time.sleep(2) + yield proc + finally: + logger.info("Close ssh tunnel.") + proc.kill() + outs, errs = proc.communicate() + logger.info( + f"SSH process output STDOUT:{outs.decode('utf-8')}, " + f"STDERR:{errs.decode('utf-8')}" + ) + else: + try: + logger.info("Don't use an ssh tunnel.") + yield None + finally: + logger.info("Close contextmanager.") + + +@contextmanager +def session_scope(engine): + Session = sessionmaker(bind=engine) + session = Session() + try: + yield session + session.commit() + except: # noqa: E722 + session.rollback() + raise + finally: + session.close() + + +def session_decorator(f): + @wraps(f) + def wrapper(*args, **kwargs): + with session_scope(kwargs["engine"]) as session: + kwargs["session"] = session + kwargs.pop("engine") + logger.info(f"Calling {f.__name__}") + return f(*args, **kwargs) + + return wrapper + + +def register_tables_in_saio(engine): + db_tables = { + "egon_mv_grid_district": "grid.egon_mv_grid_district", + "generators_pv_status_quo": "supply.egon_power_plants_pv", + "generators_pv_rooftop": "supply.egon_power_plants_pv_roof_building", + "generators_wind_status_quo": "supply.egon_power_plants_wind", + "generators": "supply.egon_power_plants", + "etrago_load": "grid.egon_etrago_load", + "etrago_load_timeseries": "grid.egon_etrago_load_timeseries", + "heat_pump_capacity_individual": "supply.egon_individual_heating", + "pth_capacity_district_heating": "grid.egon_etrago_link", + } + orm = {} + + for name, table_str in db_tables.items(): + table_list = table_str.split(".") + table_schema = table_list[0] + table_name = table_list[1] + saio.register_schema(table_schema, engine) + orm[name] = sys.modules[f"saio.{table_schema}"].__getattr__(table_name) + return orm diff --git a/ego/mv_clustering/egon_data_io.py b/ego/mv_clustering/egon_data_io.py new file mode 100644 index 00000000..a98e7e69 --- /dev/null +++ b/ego/mv_clustering/egon_data_io.py @@ -0,0 +1,352 @@ +import logging + +import pandas as pd + +from sqlalchemy import func + +from ego.mv_clustering.database import session_decorator + +logger = logging.getLogger(__name__) + + +def func_within(geom_a, geom_b, srid=3035): + """ + Checks if geometry a is completely within geometry b. + + Parameters + ---------- + geom_a : Geometry + Geometry within `geom_b`. + geom_b : Geometry + Geometry containing `geom_a`. + srid : int + SRID geometries are transformed to in order to use the same SRID for both + geometries. + + """ + return func.ST_Within( + func.ST_Transform( + geom_a, + srid, + ), + func.ST_Transform( + geom_b, + srid, + ), + ) + + +@session_decorator +def get_grid_ids(orm=None, session=None): + """ + Gets all MV grid IDs and the area of each grid in m^2. + + Parameters + ----------- + orm : dict + Dictionary with tables to retrieve data from. + + Returns + ------- + pandas.DataFrame + Dataframe with grid ID in index and corresponding area in m^2 in column + "area_m2". + + """ + query = session.query( + orm["egon_mv_grid_district"].bus_id, + orm["egon_mv_grid_district"].area.label("area_m2"), + ) + return pd.read_sql_query(query.statement, session.bind, index_col="bus_id") + + +@session_decorator +def get_solar_capacity(scenario, grid_ids, orm=None, session=None): + """ + Gets PV capacity (rooftop and ground mounted) in MW per grid in specified scenario. + + Parameters + ----------- + scenario : str + Scenario to obtain data for. Possible options are "status_quo", "eGon2035", + and "eGon100RE". + grid_ids : list(int) + List of grid IDs to obtain data for. + orm : dict + Dictionary with tables to retrieve data from. + + Returns + ------- + pandas.DataFrame + DataFrame with grid ID in index and corresponding PV capacity in MW in column + "pv_capacity_mw". + + """ + # get PV ground mounted capacity per grid + if scenario == "status_quo": + query = ( + session.query( + orm["generators_pv_status_quo"].bus_id, + func.sum(orm["generators_pv_status_quo"].capacity).label("p_openspace"), + ) + .filter( + orm["generators_pv_status_quo"].bus_id.in_(grid_ids), + orm["generators_pv_status_quo"].site_type == "Freifläche", + orm["generators_pv_status_quo"].status == "InBetrieb", + orm["generators_pv_status_quo"].capacity <= 20, + orm["generators_pv_status_quo"].voltage_level.in_([4, 5, 6, 7]), + ) + .group_by( + orm["generators_pv_status_quo"].bus_id, + ) + ) + cap_open_space_df = pd.read_sql( + sql=query.statement, con=session.bind, index_col="bus_id" + ) + else: + query = ( + session.query( + orm["generators"].bus_id, + func.sum(orm["generators"].el_capacity).label("p_openspace"), + ) + .filter( + orm["generators"].scenario == scenario, + orm["generators"].bus_id.in_(grid_ids), + orm["generators"].voltage_level >= 4, + orm["generators"].el_capacity <= 20, + orm["generators"].carrier == "solar", + ) + .group_by( + orm["generators"].bus_id, + ) + ) + cap_open_space_df = pd.read_sql( + sql=query.statement, con=session.bind, index_col="bus_id" + ) + # get PV rooftop capacity per grid + query = ( + session.query( + orm["generators_pv_rooftop"].bus_id, + func.sum(orm["generators_pv_rooftop"].capacity).label("p_rooftop"), + ) + .filter( + orm["generators_pv_rooftop"].bus_id.in_(grid_ids), + orm["generators_pv_rooftop"].scenario == scenario, + orm["generators_pv_rooftop"].capacity <= 20, + orm["generators_pv_rooftop"].voltage_level.in_([4, 5, 6, 7]), + ) + .group_by( + orm["generators_pv_rooftop"].bus_id, + ) + ) + cap_rooftop_df = pd.read_sql( + sql=query.statement, con=session.bind, index_col="bus_id" + ) + + return ( + cap_open_space_df.join(cap_rooftop_df, how="outer") + .fillna(value=0) + .sum(axis="columns") + .to_frame("pv_capacity_mw") + ) + + +@session_decorator +def get_wind_capacity(scenario, grid_ids, orm=None, session=None): + """ + Gets wind onshore capacity in MW per grid in specified scenario. + + Parameters + ----------- + scenario : str + Scenario to obtain data for. Possible options are "status_quo", "eGon2035", + and "eGon100RE". + grid_ids : list(int) + List of grid IDs to obtain data for. + orm : dict + Dictionary with tables to retrieve data from. + + Returns + ------- + pandas.DataFrame + DataFrame with grid ID in index and corresponding Wind capacity in MW in + column "wind_capacity_mw". + + """ + if scenario == "status_quo": + query = ( + session.query( + orm["generators_wind_status_quo"].bus_id, + func.sum(orm["generators_wind_status_quo"].capacity).label( + "wind_capacity_mw" + ), + ) + .filter( + orm["generators_wind_status_quo"].bus_id.in_(grid_ids), + orm["generators_wind_status_quo"].site_type == "Windkraft an Land", + orm["generators_wind_status_quo"].status == "InBetrieb", + orm["generators_wind_status_quo"].capacity <= 20, + orm["generators_wind_status_quo"].voltage_level.in_([4, 5, 6, 7]), + ) + .group_by( + orm["generators_wind_status_quo"].bus_id, + ) + ) + cap_wind_df = pd.read_sql( + sql=query.statement, con=session.bind, index_col="bus_id" + ) + else: + query = ( + session.query( + orm["generators"].bus_id, + func.sum(orm["generators"].el_capacity).label("wind_capacity_mw"), + ) + .filter( + orm["generators"].scenario == scenario, + orm["generators"].bus_id.in_(grid_ids), + orm["generators"].voltage_level >= 4, + orm["generators"].el_capacity <= 20, + orm["generators"].carrier == "wind_onshore", + ) + .group_by( + orm["generators"].bus_id, + ) + ) + cap_wind_df = pd.read_sql( + sql=query.statement, con=session.bind, index_col="bus_id" + ) + return cap_wind_df + + +@session_decorator +def get_electromobility_maximum_load(scenario, grid_ids, orm=None, session=None): + """ + Parameters + ----------- + scenario : str + Scenario to obtain data for. Possible options are "status_quo", "eGon2035", + and "eGon100RE". + grid_ids : list(int) + List of grid IDs to obtain data for. + orm : dict + Dictionary with tables to retrieve data from. + + Returns + ------- + pandas.DataFrame + DataFrame with grid ID in index and corresponding maximum electromobility load + in MW in column "electromobility_max_load_mw". + + """ + if scenario == "status_quo": + return pd.DataFrame(columns=["electromobility_max_load_mw"]) + else: + load_timeseries_nested = ( + session.query( + orm["etrago_load"].bus.label("bus_id"), + orm["etrago_load_timeseries"].p_set, + ) + .join( + orm["etrago_load_timeseries"], + orm["etrago_load_timeseries"].load_id == orm["etrago_load"].load_id, + ) + .filter( + orm["etrago_load"].scn_name == f"{scenario}_lowflex", + orm["etrago_load"].carrier == "land_transport_EV", + orm["etrago_load"].bus.in_(grid_ids), + ) + ).subquery(name="load_timeseries_nested") + load_timeseries_unnested = ( + session.query( + load_timeseries_nested.c.bus_id, + func.unnest(load_timeseries_nested.c.p_set).label("p_set"), + ) + ).subquery(name="load_timeseries_unnested") + load_timeseries_maximal = ( + session.query( + load_timeseries_unnested.c.bus_id, + func.max(load_timeseries_unnested.c.p_set).label("p_set_max"), + ).group_by( + load_timeseries_unnested.c.bus_id, + ) + ).subquery(name="load_timeseries_maximal") + load_p_nom = session.query( + load_timeseries_maximal.c.bus_id, + load_timeseries_maximal.c.p_set_max.label("electromobility_max_load_mw"), + ) + return pd.read_sql( + sql=load_p_nom.statement, con=session.bind, index_col="bus_id" + ) + + +@session_decorator +def get_pth_capacity(scenario, grid_ids, orm=None, session=None): + """ + Gets PtH capacity (individual heating and district heating) in MW per grid + in specified scenario. + + Parameters + ----------- + scenario : str + Scenario to obtain data for. Possible options are "status_quo", "eGon2035", + and "eGon100RE". + grid_ids : list(int) + List of grid IDs to obtain data for. + orm : dict + Dictionary with tables to retrieve data from. + + Returns + ------- + pandas.DataFrame + DataFrame with grid ID in index and corresponding PtH capacity in MW in + column "pth_capacity_mw". + + """ + if scenario == "status_quo": + return pd.DataFrame(columns=["pth_capacity_mw"]) + else: + # get individual heat pump capacity + query = ( + session.query( + orm["heat_pump_capacity_individual"].mv_grid_id.label("bus_id"), + func.sum(orm["heat_pump_capacity_individual"].capacity).label( + "cap_individual" + ), + ) + .filter( + orm["heat_pump_capacity_individual"].mv_grid_id.in_(grid_ids), + orm["heat_pump_capacity_individual"].carrier == "heat_pump", + orm["heat_pump_capacity_individual"].scenario == scenario, + ) + .group_by( + orm["heat_pump_capacity_individual"].mv_grid_id, + ) + ) + cap_individual_df = pd.read_sql( + sql=query.statement, con=session.bind, index_col="bus_id" + ) + # get central heat pump and resistive heater capacity + query = ( + session.query( + orm["pth_capacity_district_heating"].bus0, + func.sum(orm["pth_capacity_district_heating"].p_nom).label("p_set"), + ) + .filter( + orm["pth_capacity_district_heating"].bus0.in_(grid_ids), + orm["pth_capacity_district_heating"].scn_name == scenario, + orm["pth_capacity_district_heating"].carrier.in_( + ["central_heat_pump", "central_resistive_heater"] + ), + orm["pth_capacity_district_heating"].p_nom <= 20.0, + ) + .group_by( + orm["pth_capacity_district_heating"].bus0, + ) + ) + cap_dh_df = pd.read_sql(sql=query.statement, con=session.bind, index_col="bus0") + return ( + cap_individual_df.join(cap_dh_df, how="outer") + .fillna(value=0) + .sum(axis="columns") + .to_frame("pth_capacity_mw") + ) diff --git a/ego/mv_clustering/mv_clustering.py b/ego/mv_clustering/mv_clustering.py new file mode 100644 index 00000000..18cd63a0 --- /dev/null +++ b/ego/mv_clustering/mv_clustering.py @@ -0,0 +1,387 @@ +# -*- coding: utf-8 -*- +# Copyright 2016-2018 Europa-Universität Flensburg, +# Flensburg University of Applied Sciences, +# Centre for Sustainable Energy Systems +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation; either version 3 of the +# License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +# File description +""" +This file contains all functions regarding the clustering of MV grids +""" +__copyright__ = ( + "Flensburg University of Applied Sciences, " + "Europa-Universität Flensburg, " + "Centre for Sustainable Energy Systems" +) +__license__ = "GNU Affero General Public License Version 3 (AGPL-3.0)" +__author__ = "wolf_bunke, maltesc, mltja" + +import logging +import os + +if "READTHEDOCS" not in os.environ: + import numpy as np + import pandas as pd + + from sklearn.cluster import KMeans + + import ego.mv_clustering.egon_data_io as db_io + + from ego.mv_clustering.database import ( + get_engine, + register_tables_in_saio, + sshtunnel, + ) + +logger = logging.getLogger(__name__) + + +def get_cluster_attributes(attributes_path, scenario, config=None): + """ + Determines attributes to cluster MV grids by. + + Considered attributes are PV, wind onshore and PtH capacity, as well as + maximum load of EVs (in case of uncoordinated charging). All attributes are given + in MW as well as in MW per km^2. + + Data is written to csv file and returned. + + Parameters + ---------- + attributes_path : str + Path to save attributes csv to, including the file name. + scenario : str + Scenario to determine attributes for. Possible options are "status_quo", + "eGon2035", and "eGon100RE". + config : dict + Config dict. + + Returns + ------- + pandas.DataFrame + DataFrame with grid ID in index and corresponding attributes in columns: + * "area" : area of MV grid in m^2 + * "pv_capacity_mw" : PV capacity in MW + * "pv_capacity_mw_per_km2" : PV capacity in MW per km^2 + * "pv_capacity_expansion_mw" : PV expansion from status quo to given + scenario in MW + * "pv_capacity_expansion_mw_per_km2" : PV expansion from status quo to given + scenario in MW per km^2 + * "wind_capacity_mw" : wind onshore capacity in MW + * "wind_capacity_mw_per_km2" : wind onshore capacity in MW per km^2 + * "wind_capacity_expansion_mw" : wind onshore expansion from status quo to given + scenario in MW + * "wind_capacity_expansion_mw_per_km2" : wind onshore expansion from status quo + to given scenario in MW per km^2 + * "electromobility_max_load_mw" : maximum load of EVs (in case of + uncoordinated charging) in MW + * "electromobility_max_load_mw_per_km2" : maximum load of EVs (in case of + uncoordinated charging) in MW per km^2 + * "electromobility_max_load_expansion_mw" : increase in maximum load of EVs + from status quo to given scenario (in case of uncoordinated charging) in MW + * "electromobility_max_load_expansion_mw_per_km2" : increase in maximum load of + EVs from status quo to given scenario (in case of uncoordinated charging) + in MW per km^2 + * "pth_capacity_mw" : PtH capacity (for individual and district + heating) in MW + * "pth_capacity_mw_per_km2" : PtH capacity (for individual and + district heating) in MW per km^2 + * "pth_capacity_expansion_mw" : increase in PtH capacity (for individual and + district heating) from status quo to given scenario in MW + * "pth_capacity_expansion_mw_per_km2" : increase in PtH capacity (for individual + and district heating) from status quo to given scenario in MW per km^2 + + """ + # get attributes from database + with sshtunnel(config=config): + engine = get_engine(config=config) + orm = register_tables_in_saio(engine) + + grid_ids_df = db_io.get_grid_ids(engine=engine, orm=orm) + solar_capacity_df = db_io.get_solar_capacity( + scenario, grid_ids_df.index, orm, engine=engine + ) + if scenario == "status_quo": + solar_capacity_sq_df = solar_capacity_df + else: + solar_capacity_sq_df = db_io.get_solar_capacity( + "status_quo", grid_ids_df.index, orm, engine=engine + ) + wind_capacity_df = db_io.get_wind_capacity( + scenario, grid_ids_df.index, orm, engine=engine + ) + if scenario == "status_quo": + wind_capacity_sq_df = wind_capacity_df + else: + wind_capacity_sq_df = db_io.get_wind_capacity( + "status_quo", grid_ids_df.index, orm, engine=engine + ) + emob_capacity_df = db_io.get_electromobility_maximum_load( + scenario, grid_ids_df.index, orm, engine=engine + ) + if scenario == "status_quo": + emob_capacity_sq_df = emob_capacity_df + else: + emob_capacity_sq_df = db_io.get_electromobility_maximum_load( + "status_quo", grid_ids_df.index, orm, engine=engine + ) + pth_capacity_df = db_io.get_pth_capacity( + scenario, grid_ids_df.index, orm, engine=engine + ) + if scenario == "status_quo": + pth_capacity_sq_df = pth_capacity_df + else: + pth_capacity_sq_df = db_io.get_pth_capacity( + "status_quo", grid_ids_df.index, orm, engine=engine + ) + emob_rename_col = "electromobility_max_load_expansion_mw" + df = pd.concat( + [ + grid_ids_df, + solar_capacity_df, + wind_capacity_df, + emob_capacity_df, + pth_capacity_df, + solar_capacity_sq_df.rename( + columns={"pv_capacity_mw": "pv_capacity_expansion_mw"} + ), + wind_capacity_sq_df.rename( + columns={"wind_capacity_mw": "wind_capacity_expansion_mw"} + ), + emob_capacity_sq_df.rename( + columns={"electromobility_max_load_mw": emob_rename_col} + ), + pth_capacity_sq_df.rename( + columns={"pth_capacity_mw": "pth_capacity_expansion_mw"} + ), + ], + axis="columns", + ).fillna(0) + + # calculate expansion values + df["pv_capacity_expansion_mw"] = ( + df["pv_capacity_mw"] - df["pv_capacity_expansion_mw"] + ) + df["wind_capacity_expansion_mw"] = ( + df["wind_capacity_mw"] - df["wind_capacity_expansion_mw"] + ) + df["electromobility_max_load_expansion_mw"] = ( + df["electromobility_max_load_mw"] - df["electromobility_max_load_expansion_mw"] + ) + df["pth_capacity_expansion_mw"] = ( + df["pth_capacity_mw"] - df["pth_capacity_expansion_mw"] + ) + + # calculate relative values + df["pv_capacity_mw_per_km2"] = df["pv_capacity_mw"] / (df["area_m2"] / 1e6) + df["wind_capacity_mw_per_km2"] = df["wind_capacity_mw"] / (df["area_m2"] / 1e6) + df["electromobility_max_load_mw_per_km2"] = df["electromobility_max_load_mw"] / ( + df["area_m2"] / 1e6 + ) + df["pth_capacity_mw_per_km2"] = df["pth_capacity_mw"] / (df["area_m2"] / 1e6) + df["pv_capacity_expansion_mw_per_km2"] = df["pv_capacity_expansion_mw"] / ( + df["area_m2"] / 1e6 + ) + df["wind_capacity_expansion_mw_per_km2"] = df["wind_capacity_expansion_mw"] / ( + df["area_m2"] / 1e6 + ) + df["electromobility_max_load_expansion_mw_per_km2"] = df[ + "electromobility_max_load_expansion_mw" + ] / (df["area_m2"] / 1e6) + df["pth_capacity_expansion_mw_per_km2"] = df["pth_capacity_expansion_mw"] / ( + df["area_m2"] / 1e6 + ) + + # write to csv + df.to_csv(attributes_path) + return df + + +def mv_grid_clustering(cluster_attributes_df, working_grids=None, config=None): + """ + Clusters the MV grids based on the attributes, for a given number of MV grids. + + Parameters + ---------- + cluster_attributes_df : pandas.DataFrame + Dataframe with data to cluster grids by. Columns contain the attributes to + cluster and index contains the MV grid IDs. + working_grids : pandas.DataFrame + DataFrame with information on whether MV grid can be used for calculations. + Index of the dataframe contains the MV grid ID and boolean value in column + "working" specifies whether respective grid can be used. + config : dict + Config dict. + + Returns + ------- + pandas.DataFrame + Dataframe containing the clustered MV grids and their weightings + + """ + random_seed = config["eGo"]["random_seed"] + n_clusters = config["eDisGo"]["n_clusters"] + + # Norm attributes + for attribute in cluster_attributes_df: + attribute_max = cluster_attributes_df[attribute].max() + cluster_attributes_df[attribute] = ( + cluster_attributes_df[attribute] / attribute_max + ) + + # Starting KMeans clustering + logger.info( + f"Used clustering attributes: {cluster_attributes_df.columns.to_list()}" + ) + kmeans = KMeans(n_clusters=n_clusters, random_state=random_seed) + data_array = cluster_attributes_df.to_numpy() + labels = kmeans.fit_predict(data_array) + centroids = kmeans.cluster_centers_ + + result_df = pd.DataFrame(index=cluster_attributes_df.index) + result_df["label"] = labels + # For each sample, calculate the distance to its assigned centroid. + result_df["centroid_distance"] = np.linalg.norm( + data_array - centroids[labels], axis=1 + ) + result_df["representative"] = False + + if working_grids is None: + result_df["working"] = True + else: + result_df["working"] = result_df.join(working_grids).fillna(False)["working"] + + failing_labels = [] + for label in np.unique(labels): + try: + rep = result_df.loc[ + result_df["working"] & (result_df["label"] == label), + "centroid_distance", + ].idxmin() + rep_orig = result_df.loc[ + result_df["label"] == label, "centroid_distance" + ].idxmin() + result_df.loc[rep, "representative"] = True + result_df.loc[rep, "representative_orig"] = rep_orig + except ValueError: + failing_labels.append(label) + + if len(failing_labels) > 0: + logger.warning( + f"There are {len(failing_labels)} clusters for which no representative " + f"could be determined." + ) + + n_grids = result_df.shape[0] + df_data = [] + columns = [ + "representative", + "n_grids_per_cluster", + "relative_representation", + "represented_grids", + "representative_orig", + ] + for label in np.unique(labels): + represented_grids = result_df[result_df["label"] == label].index.to_list() + n_grids_per_cluster = len(represented_grids) + relative_representation = (n_grids_per_cluster / n_grids) * 100 + try: + representative = result_df[ + result_df["representative"] & (result_df["label"] == label) + ].index.values[0] + except IndexError: + representative = False + try: + representative_orig = result_df[ + result_df["representative"] & (result_df["label"] == label) + ].representative_orig.values[0] + representative_orig = ( + True if representative == representative_orig else False + ) + except IndexError: + representative_orig = False + + row = [ + representative, + n_grids_per_cluster, + relative_representation, + represented_grids, + representative_orig, + ] + df_data.append(row) + + cluster_df = pd.DataFrame(df_data, index=np.unique(labels), columns=columns) + cluster_df.index.name = "cluster_id" + + return cluster_df.sort_values("n_grids_per_cluster", ascending=False) + + +def cluster_workflow(config=None): + """ + Get cluster attributes per grid if needed and conduct MV grid clustering. + + Parameters + ---------- + config : dict + Config dict from config json. Can be obtained by calling + ego.tools.utilities.get_scenario_setting(jsonpath=config_path). + + Returns + -------- + pandas.DataFrame + DataFrame with clustering results. Columns are "representative" containing + the grid ID of the representative grid, "n_grids_per_cluster" containing the + number of grids that are represented, "relative_representation" containing the + percentage of grids represented, "represented_grids" containing a list of + grid IDs of all represented grids and "representative_orig" containing + information on whether the representative is the actual cluster center (in which + case this value is True) or chosen because the grid in the cluster center is + not a working grid. + + """ + # determine cluster attributes + logger.info("Determine cluster attributes.") + attributes_path = os.path.join( + config["eDisGo"]["results"], "mv_grid_cluster_attributes.csv" + ) + if not os.path.exists(config["eDisGo"]["results"]): + os.makedirs(config["eDisGo"]["results"]) + scenario = config["eTraGo"]["scn_name"] + cluster_attributes_df = get_cluster_attributes( + attributes_path=attributes_path, scenario=scenario, config=config + ) + + # select attributes to cluster by + cluster_attributes_df = cluster_attributes_df[ + config["eDisGo"]["cluster_attributes"] + ] + working_grids_path = os.path.join( + config["eDisGo"]["grid_path"], "working_grids.csv" + ) + if os.path.isfile(working_grids_path): + working_grids = pd.read_csv(working_grids_path, index_col=0) + else: + raise FileNotFoundError( + "working_grids.csv is missing. Cannot conduct MV grid clustering." + ) + # conduct MV grid clustering + cluster_df = mv_grid_clustering( + cluster_attributes_df, working_grids=working_grids, config=config + ) + cluster_results_path = os.path.join( + config["eDisGo"]["results"], "mv_grid_cluster_results_new.csv" + ) + cluster_df.to_csv(cluster_results_path) + return cluster_df diff --git a/ego/run_test.py b/ego/run_test.py index 23c020f3..c9d016c5 100644 --- a/ego/run_test.py +++ b/ego/run_test.py @@ -1,32 +1,37 @@ # -*- coding: utf-8 -*- -from datetime import datetime -from tools.io import eGo +import os import sys -from pycallgraph import PyCallGraph -from pycallgraph.output import GraphvizOutput -from pycallgraph import Config -import pandas as pd + +from datetime import datetime + import matplotlib.pyplot as plt -import os +import pandas as pd + +from pycallgraph import Config, PyCallGraph +from pycallgraph.output import GraphvizOutput +from tools.io import eGo from tools.utilities import define_logging -logger = define_logging(name='ego') + +logger = define_logging(name="ego") def ego_testing(ego): - """ Call and test all ego Funktion - """ + """Call and test all ego Funktion""" # full networks try: logger.info("ego.etrago.network: {} ".format(ego.etrago.network)) - logger.info("ego.etrago.disaggregated_network: {} ".format( - ego.etrago.disaggregated_network)) + logger.info( + "ego.etrago.disaggregated_network: {} ".format( + ego.etrago.disaggregated_network + ) + ) # aggregated results logger.info("Testing of aggregated results ego.etrago. ") - logger.info("storage_investment_costs: {} ".format( - ego.etrago.storage_investment_costs)) - logger.info("storage_charges: {} ".format( - ego.etrago.storage_charges)) + logger.info( + "storage_investment_costs: {} ".format(ego.etrago.storage_investment_costs) + ) + logger.info("storage_charges: {} ".format(ego.etrago.storage_charges)) ego.etrago.operating_costs ego.etrago.generator @@ -50,39 +55,39 @@ def ego_testing(ego): logger.info("eTraGo failed testing") # eDisGo try: - logger.info("ego.edisgo: {} ".format( - ego.edisgo)) + logger.info("ego.edisgo: {} ".format(ego.edisgo)) except: logger.info("ego.ego.edisgo failed testing") try: - logger.info("ego.edisgo.network: {} ".format( - ego.edisgo.network)) + logger.info("ego.edisgo.network: {} ".format(ego.edisgo.network)) except: logger.info("ego.edisgo.network failed testing") try: - logger.info("ego.edisgo.grid_investment_costs: {} ".format( - ego.edisgo.grid_investment_costs)) + logger.info( + "ego.edisgo.grid_investment_costs: {} ".format( + ego.edisgo.grid_investment_costs + ) + ) except: logger.info("ego.edisgo.grid_investment_costs failed testing") try: - logger.info("ego.edisgo.grid_choice: {} ".format( - ego.edisgo.grid_choice)) + logger.info("ego.edisgo.grid_choice: {} ".format(ego.edisgo.grid_choice)) except: logger.info("ego.edisgo.grid_choice failed testing") try: - logger.info("ego.edisgo.successfull_grids: {} ".format( - ego.edisgo.successfull_grids)) + logger.info( + "ego.edisgo.successfull_grids: {} ".format(ego.edisgo.successfull_grids) + ) except: logger.info("ego.edisgo.successfull_grids failed testing") # eGo - logger.info("ego.total_investment_costs: {} ".format( - ego.total_investment_costs)) - logger.info("ego.total_operation_costs: {} ".format( - ego.total_operation_costs)) + logger.info("ego.total_investment_costs: {} ".format(ego.total_investment_costs)) + logger.info("ego.total_operation_costs: {} ".format(ego.total_operation_costs)) # ego plot functions try: ego.plot_total_investment_costs( - filename="results/plot_total_investment_costs.pdf") + filename="results/plot_total_investment_costs.pdf" + ) except: logger.info("ego.plot_total_investment_costs failed testing") try: @@ -102,39 +107,43 @@ def ego_testing(ego): except: logger.info(" plot_edisgo_cluster failed testing") try: - ego.plot_line_expansion(column='investment_costs', - filename="results/investment_costs.pdf") + ego.plot_line_expansion( + column="investment_costs", filename="results/investment_costs.pdf" + ) except: logger.info(" plot_line_expansion failed testing") try: - ego.plot_line_expansion(column='overnight_costs', - filename="results/overnight_costs.pdf") + ego.plot_line_expansion( + column="overnight_costs", filename="results/overnight_costs.pdf" + ) except: logger.info(" plot_line_expansion failed testing") try: - ego.plot_line_expansion(column='s_nom_expansion', - filename="results/s_nom_expansion.pdf") + ego.plot_line_expansion( + column="s_nom_expansion", filename="results/s_nom_expansion.pdf" + ) except: logger.info(" plot_line_expansion failed testing") try: - ego.plot_storage_expansion(column='overnight_costs', - filename="results/storage_capital_investment.pdf") + ego.plot_storage_expansion( + column="overnight_costs", filename="results/storage_capital_investment.pdf" + ) except: logger.info(" plot_storage_expansion failed testing") def main(): - logger.info('Start calculation') + logger.info("Start calculation") graphviz = GraphvizOutput() date = str(datetime.now().strftime("%Y-%m-%d-%H-%M-%S")) - graphviz.output_file = 'results/'+str(date)+'_basic_process_plot.png' + graphviz.output_file = "results/" + str(date) + "_basic_process_plot.png" logger.info("Time: {} ".format(date)) with PyCallGraph(output=graphviz, config=Config(groups=True)): - ego = eGo(jsonpath='scenario_setting_local.json') - logger.info('Start testing') + ego = eGo(jsonpath="scenario_setting_local.json") + logger.info("Start testing") ego_testing(ego) # object size @@ -143,5 +152,5 @@ def main(): logger.info("Time: {} ".format(str(datetime.now()))) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ego/scenario_setting.json b/ego/scenario_setting.json index a996e105..768fea97 100644 --- a/ego/scenario_setting.json +++ b/ego/scenario_setting.json @@ -3,60 +3,158 @@ "eTraGo": true, "eDisGo": true, "csv_import_eTraGo": false, - "csv_import_eDisGo": false + "csv_import_eDisGo": false, + "random_seed": 42 }, "eTraGo": { - "db": "oedb", - "gridversion": "v0.4.5", - "method": "lopf", - "pf_post_lopf": true, - "start_snapshot": 1000, - "end_snapshot" : 1005, + "db": "egon-data", + "gridversion": null, + "method": { + "type": "lopf", + "n_iter": 4, + "pyomo": true + }, + "pf_post_lopf": { + "active": true, + "add_foreign_lopf": true, + "q_allocation": "p_nom" + }, + "start_snapshot": 1, + "end_snapshot": 2, "solver": "gurobi", - "solver_options":{}, - "scn_name": "eGo 100", + "solver_options": { + "BarConvTol": 1e-05, + "FeasibilityTol": 1e-05, + "method": 2, + "crossover": 0, + "logFile": "solver_etragos.log", + "threads": 4 + }, + "model_formulation": "kirchhoff", + "scn_name": "eGon2035", "scn_extension": null, "scn_decommissioning": null, - "lpfile": false, - "csv_export": "results/your_results", - "db_export": false, - "extendable": ["storage", "network"], + "lpfile": false, + "csv_export": "test", + "extendable": { + "extendable_components": [ + "as_in_db" + ], + "upper_bounds_grid": { + "grid_max_D": null, + "grid_max_abs_D": { + "380": { + "i": 1020, + "wires": 4, + "circuits": 4 + }, + "220": { + "i": 1020, + "wires": 4, + "circuits": 4 + }, + "110": { + "i": 1020, + "wires": 4, + "circuits": 2 + }, + "dc": 0 + }, + "grid_max_foreign": 4, + "grid_max_abs_foreign": null + } + }, "generator_noise": 789456, - "minimize_loading": false, - "ramp_limits": false, - "extra_functionality": null, - "network_clustering_kmeans": 10, - "load_cluster": false, + "extra_functionality": {}, + "network_clustering": { + "random_state": 42, + "active": true, + "method": "kmedoids-dijkstra", + "n_clusters_AC": 30, + "cluster_foreign_AC": false, + "method_gas": "kmedoids-dijkstra", + "n_clusters_gas": 20, + "cluster_foreign_gas": false, + "k_busmap": false, + "kmeans_gas_busmap": false, + "line_length_factor": 1, + "remove_stubs": false, + "use_reduced_coordinates": false, + "bus_weight_tocsv": null, + "bus_weight_fromcsv": null, + "gas_weight_tocsv": null, + "gas_weight_fromcsv": null, + "n_init": 10, + "max_iter": 100, + "tol": 1e-06, + "CPU_cores": 4 + }, + "sector_coupled_clustering": { + "active": true, + "carrier_data": { + "central_heat": { + "base": [ + "CH4", + "AC" + ], + "strategy": "simultaneous" + } + } + }, "network_clustering_ehv": false, "disaggregation": "uniform", - "snapshot_clustering": false, - "parallelisation": false, + "snapshot_clustering": { + "active": false, + "method": "segmentation", + "extreme_periods": null, + "how": "daily", + "storage_constraints": "soc_constraints", + "n_clusters": 5, + "n_segments": 5 + }, "skip_snapshots": false, - "line_grouping": false, - "branch_capacity_factor": {"HV": 0.5, "eHV" : 0.7}, + "dispatch_disaggregation": false, + "branch_capacity_factor": { + "HV": 0.5, + "eHV": 0.7 + }, "load_shedding": false, - "foreign_lines" :{"carrier": "AC", "capacity": "osmTGmod"}, - "comments": "" - }, + "foreign_lines": { + "carrier": "AC", + "capacity": "osmTGmod" + }, + "comments": null +}, "eDisGo": { - "db": "oedb", - "gridversion": "v0.4.5", - "ding0_files": "/path/to_your/.dingo/grids", + "grid_path": "/path/to_your/.dingo/grids", "choice_mode": "cluster", - "cluster_attributes":["farthest_node", "wind_cap", "solar_cap", "extended_storage"], + "cluster_attributes":["pv_capacity_expansion_mw_per_km2", "wind_capacity_expansion_mw_per_km2", "electromobility_max_load_expansion_mw_per_km2", "pth_capacity_expansion_mw_per_km2"], "only_cluster": false, "manual_grids": [], - "no_grids": 2, + "n_clusters": 2, "parallelization":true, "max_calc_time": 0.5, "max_workers":2, - "initial_reinforcement":true, - "apply_curtailment":true, - "curtailment_voltage_threshold": 0, - "storage_distribution":true, "max_cos_phi_renewable": 0.9, "results": "results/another_result", "solver": "gurobi", - "timesteps_pfa": "snapshot_analysis" - } + "tasks": ["1_setup_grid", "2_specs_overlying_grid", "3_temporal_complexity_reduction", "4_optimisation", "5_grid_reinforcement"], + "gridversion": "v0.4.5" + }, + "database": { + "database_name": "", + "host": "127.0.0.1", + "port": "59700", + "user": "", + "password": "" + }, + "ssh": { + "enabled": true, + "user": "", + "ip": "", + "port": "", + "local_address": "127.0.0.1", + "local_port": "59700" + }, + "external_config": "~/.ego/secondary_ego_config.json" } diff --git a/ego/tools/economics.py b/ego/tools/economics.py index b430b386..a04e57d9 100644 --- a/ego/tools/economics.py +++ b/ego/tools/economics.py @@ -22,26 +22,31 @@ """ import io -import pkgutil -import os import logging -logger = logging.getLogger('ego') +import os +import pkgutil -if not 'READTHEDOCS' in os.environ: - import pandas as pd +logger = logging.getLogger("ego") + +if not "READTHEDOCS" in os.environ: import numpy as np - from ego.tools.utilities import get_time_steps + import pandas as pd + from etrago.tools.utilities import geolocation_buses -__copyright__ = "Flensburg University of Applied Sciences, Europa-Universität"\ + from ego.tools.utilities import get_time_steps + +__copyright__ = ( + "Flensburg University of Applied Sciences, Europa-Universität" "Flensburg, Centre for Sustainable Energy Systems" +) __license__ = "GNU Affero General Public License Version 3 (AGPL-3.0)" __author__ = "wolfbunke" # calculate annuity per time step or periode def annuity_per_period(capex, n, wacc, t, p): - """ Calculate per given period + """Calculate per given period Parameters ---------- @@ -63,7 +68,7 @@ def annuity_per_period(capex, n, wacc, t, p): def edisgo_convert_capital_costs(overnight_cost, t, p, json_file): - """ Get scenario and calculation specific annuity cost by given capital + """Get scenario and calculation specific annuity cost by given capital costs and lifetime. @@ -97,21 +102,20 @@ def edisgo_convert_capital_costs(overnight_cost, t, p, json_file): # https://github.com/openego/eTraGo/blob/dev/etrago/tools/utilities.py#L651 # Calculate present value of an annuity (PVA) - PVA = (1 / p) - (1 / (p*(1 + p) ** t)) + PVA = (1 / p) - (1 / (p * (1 + p) ** t)) year = 8760 # get period of calculation - period = (json_file['eTraGo']['end_snapshot'] - - json_file['eTraGo']['start_snapshot']) + period = json_file["eTraGo"]["end_snapshot"] - json_file["eTraGo"]["start_snapshot"] # calculation of capital_cost - annuity_cost = (overnight_cost / (PVA * (year/(period+1)))) + annuity_cost = overnight_cost / (PVA * (year / (period + 1))) return annuity_cost def etrago_convert_overnight_cost(annuity_cost, json_file, t=40, p=0.05): - """ Get annuity cost of simulation and calculation total + """Get annuity cost of simulation and calculation total ``overnight_costs`` by given capital costs and lifetime. Parameters @@ -146,21 +150,20 @@ def etrago_convert_overnight_cost(annuity_cost, json_file, t=40, p=0.05): # https://github.com/openego/eTraGo/blob/dev/etrago/tools/utilities.py#L651 # Calculate present value of an annuity (PVA) - PVA = (1 / p) - (1 / (p*(1 + p) ** t)) + PVA = (1 / p) - (1 / (p * (1 + p) ** t)) year = 8760 # get period of calculation - period = (json_file['eTraGo']['end_snapshot'] - - json_file['eTraGo']['start_snapshot']) + period = json_file["eTraGo"]["end_snapshot"] - json_file["eTraGo"]["start_snapshot"] # calculation of overnight_cost - overnight_cost = annuity_cost*(PVA * (year/(period+1))) + overnight_cost = annuity_cost * (PVA * (year / (period + 1))) return overnight_cost def etrago_operating_costs(network): - """ Function to get all operating costs of eTraGo. + """Function to get all operating costs of eTraGo. Parameters ---------- @@ -197,77 +200,88 @@ def etrago_operating_costs(network): etg = network # get v_nom - _bus = pd.DataFrame(etg.buses['v_nom']) + _bus = pd.DataFrame(etg.buses["v_nom"]) _bus.index.name = "name" _bus.reset_index(level=0, inplace=True) # Add voltage level idx = etg.generators.index - etg.generators = pd.merge(etg.generators, _bus, - left_on='bus', right_on='name') + etg.generators = pd.merge(etg.generators, _bus, left_on="bus", right_on="name") etg.generators.index = idx - etg.generators['voltage_level'] = 'unknown' + etg.generators["voltage_level"] = "unknown" # add ehv - ix_ehv = etg.generators[etg.generators['v_nom'] >= 380].index - etg.generators.set_value(ix_ehv, 'voltage_level', 'ehv') + ix_ehv = etg.generators[etg.generators["v_nom"] >= 380].index + etg.generators.set_value(ix_ehv, "voltage_level", "ehv") # add hv - ix_hv = etg.generators[(etg.generators['v_nom'] <= 220) & - (etg.generators['v_nom'] >= 110)].index - etg.generators.set_value(ix_hv, 'voltage_level', 'hv') + ix_hv = etg.generators[ + (etg.generators["v_nom"] <= 220) & (etg.generators["v_nom"] >= 110) + ].index + etg.generators.set_value(ix_hv, "voltage_level", "hv") # get voltage_level index - ix_by_ehv = etg.generators[etg.generators.voltage_level == 'ehv'].index - ix_by_hv = etg.generators[etg.generators.voltage_level == 'hv'].index - ix_slack = etg.generators[etg.generators.control != 'Slack'].index + ix_by_ehv = etg.generators[etg.generators.voltage_level == "ehv"].index + ix_by_hv = etg.generators[etg.generators.voltage_level == "hv"].index + ix_slack = etg.generators[etg.generators.control != "Slack"].index - ix_by_ehv = ix_slack.join(ix_by_ehv, how='left', level=None, - return_indexers=False, sort=False) - ix_by_hv = ix_slack.join(ix_by_hv, how='right', level=None, - return_indexers=False, sort=False) + ix_by_ehv = ix_slack.join( + ix_by_ehv, how="left", level=None, return_indexers=False, sort=False + ) + ix_by_hv = ix_slack.join( + ix_by_hv, how="right", level=None, return_indexers=False, sort=False + ) # groupby v_nom ehv - operating_costs_ehv = (etg.generators_t.p[ix_by_ehv] * - etg.generators. marginal_cost[ix_by_ehv]) - operating_costs_ehv = operating_costs_ehv.groupby( - etg.generators.carrier, axis=1).sum().sum() + operating_costs_ehv = ( + etg.generators_t.p[ix_by_ehv] * etg.generators.marginal_cost[ix_by_ehv] + ) + operating_costs_ehv = ( + operating_costs_ehv.groupby(etg.generators.carrier, axis=1).sum().sum() + ) operating_costs = pd.DataFrame(operating_costs_ehv) - operating_costs.columns = ['operation_costs'] - operating_costs['voltage_level'] = 'ehv' + operating_costs.columns = ["operation_costs"] + operating_costs["voltage_level"] = "ehv" # groupby v_nom ehv - operating_costs_hv = (etg.generators_t.p[ix_by_hv] * - etg.generators. marginal_cost[ix_by_hv]) - operating_costs_hv = operating_costs_hv.groupby( - etg.generators.carrier, axis=1).sum().sum() + operating_costs_hv = ( + etg.generators_t.p[ix_by_hv] * etg.generators.marginal_cost[ix_by_hv] + ) + operating_costs_hv = ( + operating_costs_hv.groupby(etg.generators.carrier, axis=1).sum().sum() + ) opt_costs_hv = pd.DataFrame(operating_costs_hv) - opt_costs_hv.columns = ['operation_costs'] - opt_costs_hv['voltage_level'] = 'hv' + opt_costs_hv.columns = ["operation_costs"] + opt_costs_hv["voltage_level"] = "hv" # add df operating_costs = operating_costs.append(opt_costs_hv) - tpc_ehv = pd.DataFrame(operating_costs_ehv.sum(), - columns=['operation_costs'], - index=['total_power_costs']) - tpc_ehv['voltage_level'] = 'ehv' + tpc_ehv = pd.DataFrame( + operating_costs_ehv.sum(), + columns=["operation_costs"], + index=["total_power_costs"], + ) + tpc_ehv["voltage_level"] = "ehv" operating_costs = operating_costs.append(tpc_ehv) - tpc_hv = pd.DataFrame(operating_costs_hv.sum(), - columns=['operation_costs'], - index=['total_power_costs']) - tpc_hv['voltage_level'] = 'hv' + tpc_hv = pd.DataFrame( + operating_costs_hv.sum(), + columns=["operation_costs"], + index=["total_power_costs"], + ) + tpc_hv["voltage_level"] = "hv" operating_costs = operating_costs.append(tpc_hv) # add Grid and Transform Costs try: - etg.lines['voltage_level'] = 'unknown' - ix_ehv = etg.lines[etg.lines['v_nom'] >= 380].index - etg.lines.set_value(ix_ehv, 'voltage_level', 'ehv') - ix_hv = etg.lines[(etg.lines['v_nom'] <= 220) & - (etg.lines['v_nom'] >= 110)].index - etg.lines.set_value(ix_hv, 'voltage_level', 'hv') + etg.lines["voltage_level"] = "unknown" + ix_ehv = etg.lines[etg.lines["v_nom"] >= 380].index + etg.lines.set_value(ix_ehv, "voltage_level", "ehv") + ix_hv = etg.lines[ + (etg.lines["v_nom"] <= 220) & (etg.lines["v_nom"] >= 110) + ].index + etg.lines.set_value(ix_hv, "voltage_level", "hv") losses_total = sum(etg.lines.losses) + sum(etg.transformers.losses) losses_costs = losses_total * np.average(etg.buses_t.marginal_price) @@ -277,24 +291,26 @@ def etrago_operating_costs(network): # axis=0).sum().reset_index() except AttributeError: - logger.info("No Transform and Line losses are calcualted! \n" - "Use eTraGo pf_post_lopf method") + logger.info( + "No Transform and Line losses are calcualted! \n" + "Use eTraGo pf_post_lopf method" + ) losses_total = 0 losses_costs = 0 # total grid losses costs - tgc = pd.DataFrame(losses_costs, - columns=['operation_costs'], - index=['total_grid_losses']) - tgc['voltage_level'] = 'ehv/hv' + tgc = pd.DataFrame( + losses_costs, columns=["operation_costs"], index=["total_grid_losses"] + ) + tgc["voltage_level"] = "ehv/hv" operating_costs = operating_costs.append(tgc) - #power_price = power_price.T.iloc[0] + # power_price = power_price.T.iloc[0] return operating_costs def etrago_grid_investment(network, json_file, session): - """ Function to get grid expantion costs from eTraGo + """Function to get grid expantion costs from eTraGo Parameters ---------- @@ -329,53 +345,60 @@ def etrago_grid_investment(network, json_file, session): """ # check settings for extendable - if 'network' not in json_file['eTraGo']['extendable']: - logger.info("The optimizition was not using parameter" - " 'extendable': network \n" - "No grid expantion costs from etrago") + if "network" not in json_file["eTraGo"]["extendable"]: + logger.info( + "The optimizition was not using parameter" + " 'extendable': network \n" + "No grid expantion costs from etrago" + ) - if 'network' in json_file['eTraGo']['extendable']: + if "network" in json_file["eTraGo"]["extendable"]: network = geolocation_buses(network, session) # differentiation by country_code - network.lines['differentiation'] = 'none' + network.lines["differentiation"] = "none" - network.lines['bus0_c'] = network.lines.bus0.map( - network.buses.country_code) - network.lines['bus1_c'] = network.lines.bus1.map( - network.buses.country_code) + network.lines["bus0_c"] = network.lines.bus0.map(network.buses.country_code) + network.lines["bus1_c"] = network.lines.bus1.map(network.buses.country_code) for idx, val in network.lines.iterrows(): - check = val['bus0_c'] + val['bus1_c'] + check = val["bus0_c"] + val["bus1_c"] if "DE" in check: - network.lines['differentiation'][idx] = 'cross-border' + network.lines["differentiation"][idx] = "cross-border" if "DEDE" in check: - network.lines['differentiation'][idx] = 'domestic' + network.lines["differentiation"][idx] = "domestic" if "DE" not in check: - network.lines['differentiation'][idx] = 'foreign' - - lines = network.lines[['v_nom', 'capital_cost', 's_nom', - 's_nom_min', 's_nom_opt', 'differentiation'] - ].reset_index() - - lines['s_nom_expansion'] = lines.s_nom_opt.subtract( - lines.s_nom, axis='index') - lines['capital_cost'] = lines.s_nom_expansion.multiply( - lines.capital_cost, axis='index') - lines['number_of_expansion'] = lines.s_nom_expansion > 0.0 - lines['time_step'] = get_time_steps(json_file) + network.lines["differentiation"][idx] = "foreign" + + lines = network.lines[ + [ + "v_nom", + "capital_cost", + "s_nom", + "s_nom_min", + "s_nom_opt", + "differentiation", + ] + ].reset_index() + + lines["s_nom_expansion"] = lines.s_nom_opt.subtract(lines.s_nom, axis="index") + lines["capital_cost"] = lines.s_nom_expansion.multiply( + lines.capital_cost, axis="index" + ) + lines["number_of_expansion"] = lines.s_nom_expansion > 0.0 + lines["time_step"] = get_time_steps(json_file) # add v_level - lines['voltage_level'] = 'unknown' + lines["voltage_level"] = "unknown" - ix_ehv = lines[lines['v_nom'] >= 380].index - lines.set_value(ix_ehv, 'voltage_level', 'ehv') + ix_ehv = lines[lines["v_nom"] >= 380].index + lines.set_value(ix_ehv, "voltage_level", "ehv") - ix_hv = lines[(lines['v_nom'] <= 220) & (lines['v_nom'] >= 110)].index - lines.set_value(ix_hv, 'voltage_level', 'hv') + ix_hv = lines[(lines["v_nom"] <= 220) & (lines["v_nom"] >= 110)].index + lines.set_value(ix_hv, "voltage_level", "hv") # based on eTraGo Function: # https://github.com/openego/eTraGo/blob/dev/etrago/tools/utilities.py#L651 @@ -383,48 +406,58 @@ def etrago_grid_investment(network, json_file, session): trafo = pd.DataFrame() # get costs of transfomers - if json_file['eTraGo']['network_clustering_kmeans'] == False: + if json_file["eTraGo"]["network_clustering_kmeans"] == False: - network.transformers['differentiation'] = 'none' + network.transformers["differentiation"] = "none" - trafos = network.transformers[['v_nom0', 'v_nom1', 'capital_cost', - 's_nom_extendable', 's_nom', - 's_nom_opt']] + trafos = network.transformers[ + [ + "v_nom0", + "v_nom1", + "capital_cost", + "s_nom_extendable", + "s_nom", + "s_nom_opt", + ] + ] trafos.columns.name = "" trafos.index.name = "" trafos.reset_index() - trafos['s_nom_extendable'] = trafos.s_nom_opt.subtract( - trafos.s_nom, axis='index') + trafos["s_nom_extendable"] = trafos.s_nom_opt.subtract( + trafos.s_nom, axis="index" + ) - trafos['capital_cost'] = trafos.s_nom_extendable.multiply( - trafos.capital_cost, axis='index') - trafos['number_of_expansion'] = trafos.s_nom_extendable > 0.0 - trafos['time_step'] = get_time_steps(json_file) + trafos["capital_cost"] = trafos.s_nom_extendable.multiply( + trafos.capital_cost, axis="index" + ) + trafos["number_of_expansion"] = trafos.s_nom_extendable > 0.0 + trafos["time_step"] = get_time_steps(json_file) # add v_level - trafos['voltage_level'] = 'unknown' + trafos["voltage_level"] = "unknown" # TODO check - ix_ehv = trafos[trafos['v_nom0'] >= 380].index - trafos.set_value(ix_ehv, 'voltage_level', 'ehv') + ix_ehv = trafos[trafos["v_nom0"] >= 380].index + trafos.set_value(ix_ehv, "voltage_level", "ehv") - ix_hv = trafos[(trafos['v_nom0'] <= 220) & - (trafos['v_nom0'] >= 110)].index - trafos.set_value(ix_hv, 'voltage_level', 'hv') + ix_hv = trafos[(trafos["v_nom0"] <= 220) & (trafos["v_nom0"] >= 110)].index + trafos.set_value(ix_hv, "voltage_level", "hv") # aggregate trafo - trafo = trafos[['voltage_level', - 'capital_cost', - 'differentiation']].groupby(['differentiation', - 'voltage_level'] - ).sum().reset_index() + trafo = ( + trafos[["voltage_level", "capital_cost", "differentiation"]] + .groupby(["differentiation", "voltage_level"]) + .sum() + .reset_index() + ) # aggregate lines - line = lines[['voltage_level', - 'capital_cost', - 'differentiation']].groupby(['differentiation', - 'voltage_level'] - ).sum().reset_index() + line = ( + lines[["voltage_level", "capital_cost", "differentiation"]] + .groupby(["differentiation", "voltage_level"]) + .sum() + .reset_index() + ) # merge trafos and line frames = [line, trafo] @@ -455,117 +488,113 @@ def edisgo_grid_investment(edisgo, json_file): t = 40 p = 0.05 - logger.info('For all components T={} and p={} is used'.format(t, p)) + logger.info("For all components T={} and p={} is used".format(t, p)) - costs = pd.DataFrame( - columns=['voltage_level', 'annuity_costs', 'overnight_costs']) + costs = pd.DataFrame(columns=["voltage_level", "annuity_costs", "overnight_costs"]) # Loop through all calculated eDisGo grids for key, value in edisgo.network.items(): - if not hasattr(value, 'network'): - logger.warning('No results available for grid {}'.format(key)) + if not hasattr(value, "network"): + logger.warning("No results available for grid {}".format(key)) continue # eDisGo results (overnight costs) for this grid costs_single = value.network.results.grid_expansion_costs - costs_single.rename( - columns={'total_costs': 'overnight_costs'}, - inplace=True) + costs_single.rename(columns={"total_costs": "overnight_costs"}, inplace=True) # continue if this grid was not reinforced - if (costs_single['overnight_costs'].sum() == 0.): - logger.info('No expansion costs for grid {}'.format(key)) + if costs_single["overnight_costs"].sum() == 0.0: + logger.info("No expansion costs for grid {}".format(key)) continue # Overnight cost translated in annuity costs - costs_single['capital_cost'] = edisgo_convert_capital_costs( - costs_single['overnight_costs'], - t=t, - p=p, - json_file=json_file) + costs_single["capital_cost"] = edisgo_convert_capital_costs( + costs_single["overnight_costs"], t=t, p=p, json_file=json_file + ) # Weighting (retrieves the singe (absolute) weighting for this grid) choice = edisgo.grid_choice - weighting = choice.loc[ - choice['the_selected_network_id'] == key - ][ - 'no_of_points_per_cluster' + weighting = choice.loc[choice["the_selected_network_id"] == key][ + "no_of_points_per_cluster" ].values[0] - costs_single[['capital_cost', 'overnight_costs']] = ( - costs_single[['capital_cost', 'overnight_costs']] - * weighting) + costs_single[["capital_cost", "overnight_costs"]] = ( + costs_single[["capital_cost", "overnight_costs"]] * weighting + ) # Append costs of this grid costs = costs.append( - costs_single[[ - 'voltage_level', - 'capital_cost', - 'overnight_costs']], ignore_index=True) + costs_single[["voltage_level", "capital_cost", "overnight_costs"]], + ignore_index=True, + ) if len(costs) == 0: - logger.info('No expansion costs in any MV grid') + logger.info("No expansion costs in any MV grid") return None else: - aggr_costs = costs.groupby( - ['voltage_level']).sum().reset_index() + aggr_costs = costs.groupby(["voltage_level"]).sum().reset_index() # In eDisGo all costs are in kEuro (eGo only takes Euro) - aggr_costs[['capital_cost', 'overnight_costs']] = ( - aggr_costs[['capital_cost', 'overnight_costs']] - * 1000) + aggr_costs[["capital_cost", "overnight_costs"]] = ( + aggr_costs[["capital_cost", "overnight_costs"]] * 1000 + ) successfull_grids = edisgo.successfull_grids if successfull_grids < 1: logger.warning( - 'Only {} % of the grids were calculated.\n'.format( - "{0:,.2f}".format(successfull_grids * 100) - ) + 'Costs are extrapolated...') + "Only {} % of the grids were calculated.\n".format( + "{:,.2f}".format(successfull_grids * 100) + ) + + "Costs are extrapolated..." + ) - aggr_costs[['capital_cost', 'overnight_costs']] = ( - aggr_costs[['capital_cost', 'overnight_costs']] - / successfull_grids) + aggr_costs[["capital_cost", "overnight_costs"]] = ( + aggr_costs[["capital_cost", "overnight_costs"]] / successfull_grids + ) return aggr_costs def get_generator_investment(network, scn_name): - """ Get investment costs per carrier/ generator. - - """ + """Get investment costs per carrier/ generator.""" etg = network try: - data = pkgutil.get_data('ego', 'data/investment_costs.csv') - invest = pd.read_csv(io.BytesIO(data), - encoding='utf8', sep=",", - index_col="carriers") + data = pkgutil.get_data("ego", "data/investment_costs.csv") + invest = pd.read_csv( + io.BytesIO(data), encoding="utf8", sep=",", index_col="carriers" + ) except FileNotFoundError: path = os.getcwd() - filename = 'investment_costs.csv' - invest = pd.DataFrame.from_csv(path + '/data/'+filename) - - if scn_name in ['SH Status Quo', 'Status Quo']: - invest_scn = 'Status Quo' - - if scn_name in ['SH NEP 2035', 'NEP 2035']: - invest_scn = 'NEP 2035' - - if scn_name in ['SH eGo 100', 'eGo 100']: - invest_scn = 'eGo 100' - - gen_invest = pd.concat([invest[invest_scn], - etg.generators.groupby('carrier')['p_nom'].sum()], - axis=1, join='inner') - - gen_invest = pd.concat([invest[invest_scn], etg.generators.groupby( - 'carrier') - ['p_nom'].sum()], axis=1, join='inner') - gen_invest['carrier_costs'] = gen_invest[invest_scn] * \ - gen_invest['p_nom'] * 1000 # in MW + filename = "investment_costs.csv" + invest = pd.DataFrame.from_csv(path + "/data/" + filename) + + if scn_name in ["SH Status Quo", "Status Quo"]: + invest_scn = "Status Quo" + + if scn_name in ["SH NEP 2035", "NEP 2035"]: + invest_scn = "NEP 2035" + + if scn_name in ["SH eGo 100", "eGo 100"]: + invest_scn = "eGo 100" + + gen_invest = pd.concat( + [invest[invest_scn], etg.generators.groupby("carrier")["p_nom"].sum()], + axis=1, + join="inner", + ) + + gen_invest = pd.concat( + [invest[invest_scn], etg.generators.groupby("carrier")["p_nom"].sum()], + axis=1, + join="inner", + ) + gen_invest["carrier_costs"] = ( + gen_invest[invest_scn] * gen_invest["p_nom"] * 1000 + ) # in MW return gen_invest diff --git a/ego/tools/edisgo_integration.py b/ego/tools/edisgo_integration.py index b8d512bc..f30b9cf6 100644 --- a/ego/tools/edisgo_integration.py +++ b/ego/tools/edisgo_integration.py @@ -18,57 +18,64 @@ # File description """ -This file is part of the the eGo toolbox. +This file is part of the eGo toolbox. It contains the class definition for multiple eDisGo networks. """ -__copyright__ = ("Flensburg University of Applied Sciences, " - "Europa-Universität Flensburg, " - "Centre for Sustainable Energy Systems") +__copyright__ = ( + "Flensburg University of Applied Sciences, " + "Europa-Universität Flensburg, " + "Centre for Sustainable Energy Systems" +) __license__ = "GNU Affero General Public License Version 3 (AGPL-3.0)" -__author__ = "wolf_bunke, maltesc" +__author__ = "wolf_bunke, maltesc, mltja" -# Import -from traceback import TracebackException +import json +import logging import os import pickle -import logging -import traceback -import pypsa -import csv -import dill -import pandas as pd + +from copy import deepcopy +from datetime import datetime +from datetime import timedelta as td from time import localtime, sleep, strftime -from datetime import datetime, timedelta as td -import json -from sqlalchemy.orm import sessionmaker -from sqlalchemy.orm import scoped_session -import multiprocess as mp2 -if not 'READTHEDOCS' in os.environ: +# Import +from traceback import TracebackException - from egoio.db_tables import model_draft, grid - from egoio.tools import db +import dill +import multiprocess as mp2 +import pandas as pd - from edisgo.grid.network import Results, TimeSeriesControl - from edisgo.grid import tools +if "READTHEDOCS" not in os.environ: + from edisgo.edisgo import import_edisgo_from_files + from edisgo.flex_opt.reinforce_grid import enhanced_reinforce_grid + from edisgo.network.overlying_grid import distribute_overlying_grid_requirements + from edisgo.tools.config import Config + from edisgo.tools.logger import setup_logger from edisgo.tools.plots import mv_grid_topology - from edisgo.grid.network import EDisGo + from edisgo.tools.temporal_complexity_reduction import ( + get_most_critical_time_intervals, + ) + from edisgo.tools.tools import ( + aggregate_district_heating_components, + reduce_timeseries_data_to_given_timeindex, + ) - from ego.tools.specs import ( - get_etragospecs_direct + from ego.mv_clustering import cluster_workflow, database + from ego.tools.economics import edisgo_grid_investment + from ego.tools.interface import ( + ETraGoMinimalData, + get_etrago_results_per_bus, + map_etrago_heat_bus_to_district_heating_id, + rename_generator_carriers_edisgo, ) - from ego.tools.mv_cluster import ( - analyze_attributes, - cluster_mv_grids) - from ego.tools.economics import ( - edisgo_grid_investment) # Logging logger = logging.getLogger(__name__) pickle.DEFAULT_PROTOCOL = 4 -dill.settings['protocol'] = 4 +dill.settings["protocol"] = 4 class EDisGoNetworks: @@ -91,12 +98,9 @@ def __init__(self, json_file, etrago_network): self._set_scenario_settings() # Create reduced eTraGo network - self._etrago_network = _ETraGoData(etrago_network) + self._etrago_network = ETraGoMinimalData(etrago_network) del etrago_network - # eDisGo specific naming - self._edisgo_scenario_translation() - # Program information self._run_finished = False @@ -104,12 +108,9 @@ def __init__(self, json_file, etrago_network): self._edisgo_grids = {} if self._csv_import: - self._laod_edisgo_results() - self._successfull_grids = self._successfull_grids() - self._grid_investment_costs = edisgo_grid_investment( - self, - self._json_file - ) + self._load_edisgo_results() + self._successful_grids = self._successful_grids() + self._grid_investment_costs = edisgo_grid_investment(self, self._json_file) else: # Only clustering results @@ -127,22 +128,21 @@ def __init__(self, json_file, etrago_network): if self._results: self._save_edisgo_results() - self._successfull_grids = self._successfull_grids() + self._successful_grids = self._successful_grids() self._grid_investment_costs = edisgo_grid_investment( - self, - self._json_file + self, self._json_file ) @property def network(self): """ - Container for eDisGo grids, including all results + Container for EDisGo objects, including all results Returns ------- - :obj:`dict` of :class:`edisgo.grid.network.EDisGo` - Dictionary of eDisGo objects, keyed by MV grid ID + dict[int, :class:`edisgo.EDisGo`] + Dictionary of EDisGo objects, keyed by MV grid ID """ return self._edisgo_grids @@ -156,12 +156,13 @@ def grid_choice(self): ------- :pandas:`pandas.DataFrame` Dataframe containing the chosen grids and their weightings + 'no_of_points_per_cluster', 'the_selected_network_id', 'represented_grids' """ return self._grid_choice @property - def successfull_grids(self): + def successful_grids(self): """ Relative number of successfully calculated MV grids (Includes clustering weighting) @@ -172,7 +173,7 @@ def successfull_grids(self): Relative number of grids """ - return self._successfull_grids + return self._successful_grids @property def grid_investment_costs(self): @@ -187,60 +188,6 @@ def grid_investment_costs(self): """ return self._grid_investment_costs - def get_mv_grid_from_bus_id(self, bus_id): - """ - Queries the MV grid ID for a given eTraGo bus - - Parameters - ---------- - bus_id : int - eTraGo bus ID - - Returns - ------- - int - MV grid (ding0) ID - - """ - - conn = db.connection(section=self._db_section) - session_factory = sessionmaker(bind=conn) - Session = scoped_session(session_factory) - session = Session() - - mv_grid_id = self._get_mv_grid_from_bus_id(session, bus_id) - - Session.remove() - - return mv_grid_id - - def get_bus_id_from_mv_grid(self, subst_id): - """ - Queries the eTraGo bus ID for given MV grid (ding0) ID - - Parameters - ---------- - subst_id : int - MV grid (ding0) ID - - Returns - ------- - int - eTraGo bus ID - - """ - - conn = db.connection(section=self._db_section) - session_factory = sessionmaker(bind=conn) - Session = scoped_session(session_factory) - session = Session() - - bus_id = self._get_bus_id_from_mv_grid(session, subst_id) - - Session.remove() - - return bus_id - def plot_storage_integration(self, mv_grid_id, **kwargs): """ Plots storage position in MV grid of integrated storages. @@ -249,14 +196,16 @@ def plot_storage_integration(self, mv_grid_id, **kwargs): mv_grid_topology( self._edisgo_grids[mv_grid_id].network.pypsa, self._edisgo_grids[mv_grid_id].network.config, - node_color=kwargs.get('storage_integration', None), - filename=kwargs.get('filename', None), - grid_district_geom=kwargs.get('grid_district_geom', True), - background_map=kwargs.get('background_map', True), - xlim=kwargs.get('xlim', None), ylim=kwargs.get('ylim', None), - title=kwargs.get('title', '')) - - def plot_grid_expansion_costs(self, mv_grid_id, ** kwargs): + node_color=kwargs.get("storage_integration", None), + filename=kwargs.get("filename", None), + grid_district_geom=kwargs.get("grid_district_geom", True), + background_map=kwargs.get("background_map", True), + xlim=kwargs.get("xlim", None), + ylim=kwargs.get("ylim", None), + title=kwargs.get("title", ""), + ) + + def plot_grid_expansion_costs(self, mv_grid_id, **kwargs): """ Plots costs per MV line. For more information see :func:`edisgo.tools.plots.mv_grid_topology`. @@ -265,18 +214,23 @@ def plot_grid_expansion_costs(self, mv_grid_id, ** kwargs): mv_grid_topology( self._edisgo_grids[mv_grid_id].network.pypsa, self._edisgo_grids[mv_grid_id].network.config, - line_color='expansion_costs', + line_color="expansion_costs", grid_expansion_costs=( - self._edisgo_grids[mv_grid_id].network. - results.grid_expansion_costs.rename(columns={ - "overnight_costs": "total_costs"})), - filename=kwargs.get('filename', None), - grid_district_geom=kwargs.get('grid_district_geom', True), - background_map=kwargs.get('background_map', True), - limits_cb_lines=kwargs.get('limits_cb_lines', None), - xlim=kwargs.get('xlim', None), ylim=kwargs.get('ylim', None), - lines_cmap=kwargs.get('lines_cmap', 'inferno_r'), - title=kwargs.get('title', '')) + self._edisgo_grids[ + mv_grid_id + ].network.results.grid_expansion_costs.rename( + columns={"overnight_costs": "total_costs"} + ) + ), + filename=kwargs.get("filename", None), + grid_district_geom=kwargs.get("grid_district_geom", True), + background_map=kwargs.get("background_map", True), + limits_cb_lines=kwargs.get("limits_cb_lines", None), + xlim=kwargs.get("xlim", None), + ylim=kwargs.get("ylim", None), + lines_cmap=kwargs.get("lines_cmap", "inferno_r"), + title=kwargs.get("title", ""), + ) def plot_line_loading(self, mv_grid_id, **kwargs): """ @@ -288,20 +242,22 @@ def plot_line_loading(self, mv_grid_id, **kwargs): mv_grid_topology( self._edisgo_grids[mv_grid_id].network.pypsa, self._edisgo_grids[mv_grid_id].network.config, - timestep=kwargs.get('timestep', None), - line_color='loading', - node_color=kwargs.get('node_color', None), + timestep=kwargs.get("timestep", None), + line_color="loading", + node_color=kwargs.get("node_color", None), line_load=self._edisgo_grids[mv_grid_id].network.results.s_res(), - filename=kwargs.get('filename', None), - arrows=kwargs.get('arrows', None), - grid_district_geom=kwargs.get('grid_district_geom', True), - background_map=kwargs.get('background_map', True), + filename=kwargs.get("filename", None), + arrows=kwargs.get("arrows", None), + grid_district_geom=kwargs.get("grid_district_geom", True), + background_map=kwargs.get("background_map", True), voltage=None, # change API - limits_cb_lines=kwargs.get('limits_cb_lines', None), - limits_cb_nodes=kwargs.get('limits_cb_nodes', None), - xlim=kwargs.get('xlim', None), ylim=kwargs.get('ylim', None), - lines_cmap=kwargs.get('lines_cmap', 'inferno_r'), - title=kwargs.get('title', '')) + limits_cb_lines=kwargs.get("limits_cb_lines", None), + limits_cb_nodes=kwargs.get("limits_cb_nodes", None), + xlim=kwargs.get("xlim", None), + ylim=kwargs.get("ylim", None), + lines_cmap=kwargs.get("lines_cmap", "inferno_r"), + title=kwargs.get("title", ""), + ) def plot_mv_grid_topology(self, mv_grid_id, **kwargs): """ @@ -309,214 +265,215 @@ def plot_mv_grid_topology(self, mv_grid_id, **kwargs): For more information see :func:`edisgo.tools.plots.mv_grid_topology`. """ - mv_grid_topology(self._edisgo_grids[mv_grid_id].network.pypsa, - self._edisgo_grids[mv_grid_id].network.config, - filename=kwargs.get('filename', None), - grid_district_geom=kwargs.get( - 'grid_district_geom', True), - background_map=kwargs.get('background_map', True), - xlim=kwargs.get('xlim', None), - ylim=kwargs.get('ylim', None), - title=kwargs.get('title', '')) + mv_grid_topology( + self._edisgo_grids[mv_grid_id].network.pypsa, + self._edisgo_grids[mv_grid_id].network.config, + filename=kwargs.get("filename", None), + grid_district_geom=kwargs.get("grid_district_geom", True), + background_map=kwargs.get("background_map", True), + xlim=kwargs.get("xlim", None), + ylim=kwargs.get("ylim", None), + title=kwargs.get("title", ""), + ) def _init_status(self): """ - Creates a Status file where all eDisGo statuses are tracked... + Creates a status csv file where statuses of MV grid calculations are tracked. + + The file is saved to the directory 'status'. Filename indicates date and time + the file was created. + + File contains the following information: + + * 'MV grid id' (index) + * 'cluster_perc' - percentage of grids represented by this grid + * 'start_time' - start time of calculation + * 'end_time' - end time of calculation + """ - self._status_dir = 'status' + self._status_dir = os.path.join(self._json_file["eDisGo"]["results"], "status") if not os.path.exists(self._status_dir): os.makedirs(self._status_dir) - self._status_file = 'eGo_' + strftime("%Y-%m-%d_%H%M%S", localtime()) + self._status_file_name = "eGo_" + strftime("%Y-%m-%d_%H%M%S", localtime()) status = self._grid_choice.copy() - status = status.set_index('the_selected_network_id') - status.index.names = ['MV grid'] - - tot_reprs = self._grid_choice['no_of_points_per_cluster'].sum() + status = status.set_index("the_selected_network_id") + status.index.names = ["MV grid id"] - status['cluster_perc'] = status['no_of_points_per_cluster'] / tot_reprs + status["cluster_perc"] = ( + status["no_of_points_per_cluster"] + / self._grid_choice["no_of_points_per_cluster"].sum() + ) - status['start_time'] = 'Not started yet' - status['end_time'] = 'Not finished yet' + status["start_time"] = "Not started yet" + status["end_time"] = "Not finished yet" status.drop( - ['no_of_points_per_cluster', 'represented_grids'], - axis=1, - inplace=True) + ["no_of_points_per_cluster", "represented_grids"], axis=1, inplace=True + ) - self._status_path = os.path.join( - self._status_dir, - self._status_file + '.csv') + self._status_file_path = os.path.join( + self._status_dir, self._status_file_name + ".csv" + ) - status.to_csv(self._status_path) + status.to_csv(self._status_file_path) def _status_update(self, mv_grid_id, time, message=None, show=True): """ - Updtaed eDisGo's status files + Updates status csv file where statuses of MV grid calculations are tracked. + + Parameters + ---------- + mv_grid_id : int + MV grid ID of the ding0 grid. + time : str + Can be either 'start' to set information on when the calculation started + or 'end' to set information on when the calculation ended. In case a + message is provided through parameter `message`, the message instead of the + time is set. + message : str or None (optional) + Message to set for 'start_time' or 'end_time'. If None, the current time + is set. Default: None. + show : bool (optional) + If True, shows a logging message with the status information. Default: True. + """ - status = pd.read_csv( - self._status_path, - index_col=0) + status = pd.read_csv(self._status_file_path, index_col=0) - status['start_time'] = status['start_time'].astype(str) - status['end_time'] = status['end_time'].astype(str) + status["start_time"] = status["start_time"].astype(str) + status["end_time"] = status["end_time"].astype(str) if message: now = message else: now = strftime("%Y-%m-%d_%H:%M", localtime()) - if time == 'start': - status.at[mv_grid_id, 'start_time'] = now - elif time == 'end': - status.at[mv_grid_id, 'end_time'] = now + if time == "start": + status.at[mv_grid_id, "start_time"] = now + elif time == "end": + status.at[mv_grid_id, "end_time"] = now if show: - logger.info("\n\neDisGo Status: \n\n" - + status.to_string() - + "\n\n") + logger.info("\n\neDisGo status: \n\n" + status.to_string() + "\n\n") - status.to_csv(self._status_path) + status.to_csv(self._status_file_path) def _update_edisgo_configs(self, edisgo_grid): """ This function overwrites some eDisGo configurations with eGo settings. + + The overwritten configs are: + + * config['db_connection']['section'] + * config['data_source']['oedb_data_source'] + * config['versioned']['version'] + """ # Info and Warning handling - if not hasattr(self, '_suppress_log'): + if not hasattr(self, "_suppress_log"): self._suppress_log = False # Only in the first run warnings and # info get thrown - # Database section - ego_db = self._db_section - edisgo_db = edisgo_grid.network.config['db_connection']['section'] - - if not ego_db == edisgo_db: - if not self._suppress_log: - logger.warning( - ("eDisGo database configuration (db: '{}') " - + "will be overwritten with database configuration " - + "from eGo's scenario settings (db: '{}')").format( - edisgo_db, - ego_db)) - edisgo_grid.network.config['db_connection']['section'] = ego_db - # Versioned ego_gridversion = self._grid_version - if ego_gridversion == None: - ego_versioned = 'model_draft' + if ego_gridversion is None: + ego_versioned = "model_draft" if not self._suppress_log: - logger.info("eGo's grid_version == None is " - + "evaluated as data source: model_draft") + logger.info( + "eGo's grid_version == None is " + + "evaluated as data source: model_draft" + ) else: - ego_versioned = 'versioned' + ego_versioned = "versioned" if not self._suppress_log: - logger.info(("eGo's grid_version == '{}' is " - + "evaluated as data source: versioned").format( - ego_gridversion)) + logger.info( + ( + "eGo's grid_version == '{}' is " + + "evaluated as data source: versioned" + ).format(ego_gridversion) + ) - edisgo_versioned = edisgo_grid.network.config[ - 'data_source']['oedb_data_source'] + edisgo_versioned = edisgo_grid.network.config["data_source"]["oedb_data_source"] if not ego_versioned == edisgo_versioned: if not self._suppress_log: logger.warning( - ("eDisGo data source configuration ('{}') " - + "will be overwritten with data source config. from " - + "eGo's scenario settings (data source: '{}')" - ).format( - edisgo_versioned, - ego_versioned)) - edisgo_grid.network.config[ - 'data_source']['oedb_data_source'] = ego_versioned + ( + "eDisGo data source configuration ('{}') " + + "will be overwritten with data source config. from " + + "eGo's scenario settings (data source: '{}')" + ).format(edisgo_versioned, ego_versioned) + ) + edisgo_grid.network.config["data_source"][ + "oedb_data_source" + ] = ego_versioned # Gridversion ego_gridversion = self._grid_version - edisgo_gridversion = edisgo_grid.network.config[ - 'versioned']['version'] + edisgo_gridversion = edisgo_grid.network.config["versioned"]["version"] if not ego_gridversion == edisgo_gridversion: if not self._suppress_log: logger.warning( - ("eDisGo version configuration (version: '{}') " - + "will be overwritten with version configuration " - + "from eGo's scenario settings (version: '{}')" - ).format( - edisgo_gridversion, - ego_gridversion)) - edisgo_grid.network.config[ - 'versioned']['version'] = ego_gridversion + ( + "eDisGo version configuration (version: '{}') " + + "will be overwritten with version configuration " + + "from eGo's scenario settings (version: '{}')" + ).format(edisgo_gridversion, ego_gridversion) + ) + edisgo_grid.network.config["versioned"]["version"] = ego_gridversion self._suppress_log = True def _set_scenario_settings(self): - self._csv_import = self._json_file['eGo']['csv_import_eDisGo'] + self._csv_import = self._json_file["eGo"]["csv_import_eDisGo"] # eTraGo args - self._etrago_args = self._json_file['eTraGo'] - self._scn_name = self._etrago_args['scn_name'] - self._ext_storage = ( - 'storage' in self._etrago_args['extendable'] - ) + self._etrago_args = self._json_file["eTraGo"] + self._scn_name = self._etrago_args["scn_name"] + self._ext_storage = "storage" in self._etrago_args["extendable"] if self._ext_storage: logger.info("eTraGo Dataset used extendable storage") - self._pf_post_lopf = self._etrago_args['pf_post_lopf'] + self._pf_post_lopf = self._etrago_args["pf_post_lopf"] # eDisGo args import if self._csv_import: # raise NotImplementedError - with open(os.path.join( - self._csv_import, - 'edisgo_args.json')) as f: + with open(os.path.join(self._csv_import, "edisgo_args.json")) as f: edisgo_args = json.load(f) - self._json_file['eDisGo'] = edisgo_args - logger.info("All eDisGo settings are taken from CSV folder" - + "(scenario settings are ignored)") + self._json_file["eDisGo"] = edisgo_args + logger.info( + "All eDisGo settings are taken from CSV folder" + + "(scenario settings are ignored)" + ) # This overwrites the original object... # Imported or directly from the Settings # eDisGo section of the settings - self._edisgo_args = self._json_file['eDisGo'] + self._edisgo_args = self._json_file["eDisGo"] # Reading all eDisGo settings # TODO: Integrate into a for-loop - self._db_section = self._edisgo_args['db'] - self._grid_version = self._edisgo_args['gridversion'] - self._timesteps_pfa = self._edisgo_args['timesteps_pfa'] - self._solver = self._edisgo_args['solver'] - self._curtailment_voltage_threshold = self._edisgo_args[ - 'curtailment_voltage_threshold'] - self._ding0_files = self._edisgo_args['ding0_files'] - self._choice_mode = self._edisgo_args['choice_mode'] - self._parallelization = self._edisgo_args['parallelization'] - self._initial_reinforcement = self._edisgo_args[ - 'initial_reinforcement'] - self._storage_distribution = self._edisgo_args['storage_distribution'] - self._apply_curtailment = self._edisgo_args['apply_curtailment'] - self._cluster_attributes = self._edisgo_args['cluster_attributes'] - self._only_cluster = self._edisgo_args['only_cluster'] - self._max_workers = self._edisgo_args['max_workers'] - self._max_cos_phi_renewable = self._edisgo_args[ - 'max_cos_phi_renewable'] - self._results = self._edisgo_args['results'] - self._max_calc_time = self._edisgo_args['max_calc_time'] + self._grid_version = self._edisgo_args["gridversion"] + self._solver = self._edisgo_args["solver"] + self._grid_path = self._edisgo_args["grid_path"] + self._choice_mode = self._edisgo_args["choice_mode"] + self._parallelization = self._edisgo_args["parallelization"] + self._cluster_attributes = self._edisgo_args["cluster_attributes"] + self._only_cluster = self._edisgo_args["only_cluster"] + self._max_workers = self._edisgo_args["max_workers"] + self._max_cos_phi_renewable = self._edisgo_args["max_cos_phi_renewable"] + self._results = self._edisgo_args["results"] + self._max_calc_time = self._edisgo_args["max_calc_time"] # Some basic checks - if (self._storage_distribution is True) & (self._ext_storage is False): - logger.warning("Storage distribution (MV grids) is active, " - + "but eTraGo dataset has no extendable storages") - if not self._initial_reinforcement: - raise NotImplementedError( - "Skipping the initial reinforcement is not yet implemented" - ) if self._only_cluster: - logger.warning( - "\n\nThis eDisGo run only returns cluster results\n\n") + logger.warning("\n\nThis eDisGo run only returns cluster results\n\n") # Versioning if self._grid_version is not None: @@ -524,17 +481,7 @@ def _set_scenario_settings(self): else: self._versioned = False - def _edisgo_scenario_translation(self): - - # Scenario translation - if self._scn_name == 'Status Quo': - self._generator_scn = None - elif self._scn_name == 'NEP 2035': - self._generator_scn = 'nep2035' - elif self._scn_name == 'eGo 100': - self._generator_scn = 'ego100' - - def _successfull_grids(self): + def _successful_grids(self): """ Calculates the relative number of successfully calculated grids, including the cluster weightings @@ -544,143 +491,60 @@ def _successfull_grids(self): for key, value in self._edisgo_grids.items(): weight = self._grid_choice.loc[ - self._grid_choice['the_selected_network_id'] == key - ]['no_of_points_per_cluster'].values[0] + self._grid_choice["the_selected_network_id"] == key + ]["no_of_points_per_cluster"].values[0] total += weight - if hasattr(value, 'network'): + if hasattr(value, "network"): success += weight else: fail += weight - return success/total + return success / total - def _analyze_cluster_attributes(self): - """ - Analyses the attributes wind and solar capacity and farthest node - for clustering. - These are considered the "standard" attributes for the MV grid - clustering. - """ - analyze_attributes(self._ding0_files) - - def _cluster_mv_grids( - self, - no_grids): + def _cluster_mv_grids(self): """ Clusters the MV grids based on the attributes, for a given number of MV grids - Parameters - ---------- - no_grids : int - Desired number of clusters (of MV grids) - Returns ------- :pandas:`pandas.DataFrame` Dataframe containing the clustered MV grids and their weightings """ - - # TODO: This first dataframe contains the standard attributes... - # ...Create an Interface in order to use attributes more flexibly. - # Make this function more generic. - attributes_path = self._ding0_files + '/attributes.csv' - - if not os.path.isfile(attributes_path): - logger.info('Attributes file is missing') - logger.info('Attributes will be calculated') - self._analyze_cluster_attributes() - - df = pd.read_csv(self._ding0_files + '/attributes.csv') - df = df.set_index('id') - df.drop(['Unnamed: 0'], inplace=True, axis=1) - df.rename( - columns={ - "Solar_cumulative_capacity": "solar_cap", - "Wind_cumulative_capacity": "wind_cap", - "The_Farthest_node": "farthest_node"}, - inplace=True) - - if 'extended_storage' in self._cluster_attributes: - if self._ext_storage: - storages = self._identify_extended_storages() - if not (storages.max().values[0] == 0.): - df = pd.concat([df, storages], axis=1) - df.rename( - columns={"storage_p_nom": "extended_storage"}, - inplace=True) - else: - logger.warning('Extended storages all 0. \ - Therefore, extended storages \ - are excluded from clustering') - - found_atts = [ - i for i in self._cluster_attributes if i in df.columns - ] - missing_atts = [ - i for i in self._cluster_attributes if i not in df.columns - ] - - logger.info( - 'Available attributes are: {}'.format(df.columns.tolist()) - ) - logger.info( - 'Chosen/found attributes are: {}'.format(found_atts) - ) - - if len(missing_atts) > 0: - logger.warning( - 'Missing attributes: {}'.format(missing_atts) - ) - if 'extended_storage' in missing_atts: - logger.info('Hint: eTraGo dataset must contain ' - 'extendable storage in order to include ' - 'storage extension in MV grid clustering.') - - return cluster_mv_grids( - no_grids, - cluster_base=df) + cluster_df = cluster_workflow(config=self._json_file) + # Filter for clusters with representatives. + cluster_df = cluster_df[cluster_df["representative"].astype(bool)] + return cluster_df def _identify_extended_storages(self): - conn = db.connection(section=self._db_section) - session_factory = sessionmaker(bind=conn) - Session = scoped_session(session_factory) - session = Session() - all_mv_grids = self._check_available_mv_grids() - storages = pd.DataFrame( - index=all_mv_grids, - columns=['storage_p_nom']) + storages = pd.DataFrame(index=all_mv_grids, columns=["storage_p_nom"]) - logger.info('Identifying extended storage') + logger.info("Identifying extended storage") for mv_grid in all_mv_grids: - bus_id = self._get_bus_id_from_mv_grid(session, mv_grid) min_extended = 0.3 stor_p_nom = self._etrago_network.storage_units.loc[ - (self._etrago_network.storage_units['bus'] == str(bus_id)) - & (self._etrago_network.storage_units[ - 'p_nom_extendable' - ] == True) - & (self._etrago_network.storage_units[ - 'p_nom_opt' - ] > min_extended) - & (self._etrago_network.storage_units['max_hours'] <= 20.) - ]['p_nom_opt'] + (self._etrago_network.storage_units["bus"] == str(mv_grid)) + & ( + self._etrago_network.storage_units["p_nom_extendable"] + == True # noqa: E712 + ) + & (self._etrago_network.storage_units["p_nom_opt"] > min_extended) + & (self._etrago_network.storage_units["max_hours"] <= 20.0) + ]["p_nom_opt"] if len(stor_p_nom) == 1: stor_p_nom = stor_p_nom.values[0] elif len(stor_p_nom) == 0: - stor_p_nom = 0. + stor_p_nom = 0.0 else: raise IndexError - storages.at[mv_grid, 'storage_p_nom'] = stor_p_nom - - Session.remove() + storages.at[mv_grid, "storage_p_nom"] = stor_p_nom return storages @@ -695,13 +559,9 @@ def _check_available_mv_grids(self): """ mv_grids = [] - for file in os.listdir(self._ding0_files): - if file.endswith('.pkl'): - mv_grids.append( - int(file.replace( - 'ding0_grids__', '' - ).replace('.pkl', ''))) - + for file in os.listdir(self._grid_path): + if os.path.isdir(os.path.join(self._grid_path, file)): + mv_grids.append(int(file)) return mv_grids def _set_grid_choice(self): @@ -712,57 +572,53 @@ def _set_grid_choice(self): choice_df = pd.DataFrame( columns=[ - 'no_of_points_per_cluster', - 'the_selected_network_id', - 'represented_grids']) - - if self._choice_mode == 'cluster': - no_grids = self._edisgo_args['no_grids'] - logger.info('Clustering to {} MV grids'.format(no_grids)) - - cluster_df = self._cluster_mv_grids(no_grids) - choice_df[ - 'the_selected_network_id' - ] = cluster_df['the_selected_network_id'] - choice_df[ - 'no_of_points_per_cluster' - ] = cluster_df['no_of_points_per_cluster'] - choice_df[ - 'represented_grids' - ] = cluster_df['represented_grids'] - - elif self._choice_mode == 'manual': - man_grids = self._edisgo_args['manual_grids'] - - choice_df['the_selected_network_id'] = man_grids - choice_df['no_of_points_per_cluster'] = 1 - choice_df['represented_grids'] = [ - [mv_grid_id] - for mv_grid_id - in choice_df['the_selected_network_id']] + "no_of_points_per_cluster", + "the_selected_network_id", + "represented_grids", + ] + ) - logger.info( - 'Calculating manually chosen MV grids {}'.format(man_grids) - ) + if self._choice_mode == "cluster": + cluster_df = self._cluster_mv_grids() + + n_clusters = self._json_file["eDisGo"]["n_clusters"] + n_clusters_found = cluster_df.shape[0] + if n_clusters == n_clusters_found: + logger.info(f"Clustering to {n_clusters} MV grids") + else: + logger.warning( + f"For {n_clusters} only for {n_clusters_found} clusters " + f"found working grids." + ) + + choice_df["the_selected_network_id"] = cluster_df["representative"] + choice_df["no_of_points_per_cluster"] = cluster_df["n_grids_per_cluster"] + choice_df["represented_grids"] = cluster_df["represented_grids"] - elif self._choice_mode == 'all': + elif self._choice_mode == "manual": + man_grids = self._edisgo_args["manual_grids"] + + choice_df["the_selected_network_id"] = man_grids + choice_df["no_of_points_per_cluster"] = 1 + choice_df["represented_grids"] = [ + [mv_grid_id] for mv_grid_id in choice_df["the_selected_network_id"] + ] + + logger.info("Calculating manually chosen MV grids {}".format(man_grids)) + + elif self._choice_mode == "all": mv_grids = self._check_available_mv_grids() - choice_df['the_selected_network_id'] = mv_grids - choice_df['no_of_points_per_cluster'] = 1 - choice_df['represented_grids'] = [ - [mv_grid_id] - for mv_grid_id - in choice_df['the_selected_network_id']] + choice_df["the_selected_network_id"] = mv_grids + choice_df["no_of_points_per_cluster"] = 1 + choice_df["represented_grids"] = [ + [mv_grid_id] for mv_grid_id in choice_df["the_selected_network_id"] + ] no_grids = len(mv_grids) - logger.info( - 'Calculating all available {} MV grids'.format(no_grids) - ) + logger.info("Calculating all available {} MV grids".format(no_grids)) - choice_df = choice_df.sort_values( - 'no_of_points_per_cluster', - ascending=False) + choice_df = choice_df.sort_values("no_of_points_per_cluster", ascending=False) self._grid_choice = choice_df @@ -773,66 +629,57 @@ def _run_edisgo_pool(self): """ parallelization = self._parallelization - if not os.path.exists(self._results): - os.makedirs(self._results) + results_dir = self._results + if not os.path.exists(results_dir): + os.makedirs(results_dir) if parallelization is True: - logger.info('Run eDisGo parallel') - mv_grids = self._grid_choice['the_selected_network_id'].tolist() + logger.info("Run eDisGo parallel") + mv_grids = self._grid_choice["the_selected_network_id"].tolist() no_cpu = mp2.cpu_count() if no_cpu > self._max_workers: no_cpu = self._max_workers logger.info( - 'Number of workers limited to {} by user'.format( - self._max_workers - )) + "Number of workers limited to {} by user".format(self._max_workers) + ) self._edisgo_grids = set(mv_grids) self._edisgo_grids = parallelizer( mv_grids, - lambda *xs: xs[1]._run_edisgo(xs[0]), + lambda *xs: xs[1].run_edisgo(xs[0]), (self,), self._max_calc_time, - workers=no_cpu) + workers=no_cpu, + ) for g in mv_grids: - if not g in self._edisgo_grids: - self._edisgo_grids[g] = 'Timeout' + if g not in self._edisgo_grids: + self._edisgo_grids[g] = "Timeout" else: - logger.info('Run eDisGo sequencial') + logger.info("Run eDisGo sequencial") no_grids = len(self._grid_choice) count = 0 for idx, row in self._grid_choice.iterrows(): - prog = '%.1f' % (count / no_grids * 100) - logger.info( - '{} % Calculated by eDisGo'.format(prog) - ) + prog = "%.1f" % (count / no_grids * 100) + logger.info("{} % Calculated by eDisGo".format(prog)) - mv_grid_id = int(row['the_selected_network_id']) - logger.info( - 'MV grid {}'.format(mv_grid_id) - ) + mv_grid_id = int(row["the_selected_network_id"]) + logger.info("MV grid {}".format(mv_grid_id)) try: - edisgo_grid = self._run_edisgo(mv_grid_id) - self._edisgo_grids[ - mv_grid_id - ] = edisgo_grid + edisgo_grid = self.run_edisgo(mv_grid_id) + self._edisgo_grids[mv_grid_id] = edisgo_grid except Exception as e: self._edisgo_grids[mv_grid_id] = e - logger.exception( - 'MV grid {} failed: \n'.format(mv_grid_id) - ) + logger.exception("MV grid {} failed: \n".format(mv_grid_id)) count += 1 - self._csv_import = self._json_file['eDisGo']['results'] + self._csv_import = self._json_file["eDisGo"]["results"] self._save_edisgo_results() - self._laod_edisgo_results() + self._load_edisgo_results() self._run_finished = True - def _run_edisgo( - self, - mv_grid_id): + def run_edisgo(self, mv_grid_id): """ Performs a single eDisGo run @@ -843,498 +690,1286 @@ def _run_edisgo( Returns ------- - :class:`edisgo.grid.network.EDisGo` + :class:`edisgo.EDisGo` Returns the complete eDisGo container, also including results + """ - self._status_update(mv_grid_id, 'start', show=False) + self._status_update(mv_grid_id, "start", show=False) - storage_integration = self._storage_distribution - apply_curtailment = self._apply_curtailment + # ##################### general settings #################### + config = self._json_file + scenario = config["eTraGo"]["scn_name"] + engine = database.get_engine(config=config) - logger.info( - 'MV grid {}: Calculating interface values'.format(mv_grid_id)) + # results directory + results_dir = os.path.join(self._results, str(mv_grid_id)) + if not os.path.exists(results_dir): + os.makedirs(results_dir) - conn = db.connection(section=self._db_section) - session_factory = sessionmaker(bind=conn) - Session = scoped_session(session_factory) - session = Session() + # logger + if self._parallelization: + stream_level = None + else: + stream_level = "debug" + setup_logger( + loggers=[ + # {"name": "root", "file_level": None, "stream_level": None}, + # {"name": "ego", "file_level": None, "stream_level": None}, + {"name": "edisgo", "file_level": "debug", "stream_level": stream_level}, + ], + file_name=f"run_edisgo_{mv_grid_id}.log", + log_dir=results_dir, + ) + # use edisgo logger in order to have all logging information for one grid go + # to the same file + logger = logging.getLogger("edisgo.external.ego._run_edisgo") + + edisgo_grid = None + time_intervals = None + + # ################### task: setup grid ################## + if "1_setup_grid" in config["eDisGo"]["tasks"]: + # data is always imported for the full flex scenario, wherefore in case + # a low-flex scenario is given, the lowflex-extension is dropped for the + # data import + if scenario.split("_")[-1] == "lowflex": + scn = scenario.split("_")[0] + else: + scn = scenario + edisgo_grid = self._run_edisgo_task_setup_grid( + mv_grid_id, scn, logger, config, engine + ) + edisgo_grid.save( + directory=os.path.join(results_dir, "grid_data"), + save_topology=True, + save_timeseries=True, + save_results=True, + save_electromobility=True, + save_dsm=True, + save_heatpump=True, + save_overlying_grid=False, + reduce_memory=True, + archive=True, + archive_type="zip", + parameters={"grid_expansion_results": ["equipment_changes"]}, + ) + if "2_specs_overlying_grid" not in config["eDisGo"]["tasks"]: + return {edisgo_grid.topology.id: results_dir} + + # ################### task: specs overlying grid ################## + if "2_specs_overlying_grid" in config["eDisGo"]["tasks"]: + if edisgo_grid is None: + grid_path = os.path.join(results_dir, "grid_data.zip") + edisgo_grid = import_edisgo_from_files( + edisgo_path=grid_path, + import_topology=True, + import_timeseries=True, + import_results=True, + import_electromobility=True, + import_heat_pump=True, + import_dsm=True, + import_overlying_grid=False, + from_zip_archive=True, + ) + edisgo_grid.legacy_grids = False + edisgo_grid = self._run_edisgo_task_specs_overlying_grid( + edisgo_grid, scenario, logger, config, engine + ) + zip_name = "grid_data_overlying_grid" + if scenario in ["eGon2035_lowflex", "eGon100RE_lowflex"]: + zip_name += "_lowflex" + edisgo_grid.save( + directory=os.path.join(results_dir, zip_name), + save_topology=True, + save_timeseries=True, + save_results=True, + save_electromobility=True, + save_dsm=True, + save_heatpump=True, + save_overlying_grid=True, + reduce_memory=True, + archive=True, + archive_type="zip", + parameters={"grid_expansion_results": ["equipment_changes"]}, + ) - # Query bus ID for this MV grid - bus_id = self._get_bus_id_from_mv_grid(session, mv_grid_id) + # ################### task: temporal complexity reduction ################## + # task temporal complexity reduction is optional + if "3_temporal_complexity_reduction" in config["eDisGo"]["tasks"]: + if edisgo_grid is None: + if scenario in ["eGon2035", "eGon100RE"]: + zip_name = "grid_data_overlying_grid.zip" + else: + zip_name = "grid_data_overlying_grid_lowflex.zip" + grid_path = os.path.join(results_dir, zip_name) + edisgo_grid = import_edisgo_from_files( + edisgo_path=grid_path, + import_topology=True, + import_timeseries=True, + import_results=True, + import_electromobility=True, + import_heat_pump=True, + import_dsm=True, + import_overlying_grid=True, + from_zip_archive=True, + ) + edisgo_grid.legacy_grids = False + time_intervals = self._run_edisgo_task_temporal_complexity_reduction( + edisgo_grid, logger, config + ) - # Calculate Interface values for this MV grid - specs = get_etragospecs_direct( - session, - bus_id, - self._etrago_network, - self._scn_name, - self._grid_version, - self._pf_post_lopf, - self._max_cos_phi_renewable) - Session.remove() - - # Get ding0 (MV grid) form folder - ding0_filepath = ( - self._ding0_files - + '/ding0_grids__' - + str(mv_grid_id) - + '.pkl') - - if not os.path.isfile(ding0_filepath): - msg = 'No MV grid file for MV grid {}'.format(mv_grid_id) - logger.error(msg) - raise Exception(msg) + # determine whether work flow ends here or continues, and if it continues + # whether time intervals need to be loaded + if "4_optimisation" not in config["eDisGo"]["tasks"]: + return {edisgo_grid.topology.id: results_dir} - # Initalize eDisGo with this MV grid - logger.info(("MV grid {}: Initialize MV grid").format(mv_grid_id)) - - edisgo_grid = EDisGo(ding0_grid=ding0_filepath, - worst_case_analysis='worst-case') - - logger.info(("MV grid {}: Changing eDisGo's voltage configurations " - + "for initial reinforcement").format(mv_grid_id)) - - edisgo_grid.network.config[ - 'grid_expansion_allowed_voltage_deviations'] = { - 'hv_mv_trafo_offset': 0.04, - 'hv_mv_trafo_control_deviation': 0.0, - 'mv_load_case_max_v_deviation': 0.055, - 'mv_feedin_case_max_v_deviation': 0.02, - 'lv_load_case_max_v_deviation': 0.065, - 'lv_feedin_case_max_v_deviation': 0.03, - 'mv_lv_station_load_case_max_v_deviation': 0.02, - 'mv_lv_station_feedin_case_max_v_deviation': 0.01 - } - - # Inital grid reinforcements - logger.info(("MV grid {}: Initial MV grid reinforcement " - + "(worst-case anaylsis)").format(mv_grid_id)) - - edisgo_grid.reinforce() - - # Get costs for initial reinforcement - # TODO: Implement a separate cost function - costs_grouped = \ - edisgo_grid.network.results.grid_expansion_costs.groupby( - ['type']).sum() - costs = pd.DataFrame( - costs_grouped.values, - columns=costs_grouped.columns, - index=[[edisgo_grid.network.id] * len(costs_grouped), - costs_grouped.index]).reset_index() - costs.rename(columns={'level_0': 'grid'}, inplace=True) - - costs_before = costs - - total_costs_before_EUR = costs_before['total_costs'].sum() * 1000 - logger.info( - ("MV grid {}: Costs for initial " - + "reinforcement: EUR {}").format( - mv_grid_id, - "{0:,.2f}".format(total_costs_before_EUR))) - - logger.info(( - "MV grid {}: Resetting grid after initial reinforcement" - ).format(mv_grid_id)) - edisgo_grid.network.results = Results(edisgo_grid.network) - # Reload the (original) eDisGo configs - edisgo_grid.network.config = None - - # eTraGo case begins here - logger.info("MV grid {}: eTraGo feed-in case".format(mv_grid_id)) - - # Update eDisGo settings (from config files) with scenario settings - logger.info("MV grid {}: Updating eDisgo configuration".format( - mv_grid_id)) - # Update configs with eGo's scenario settings - self._update_edisgo_configs(edisgo_grid) - - # Generator import for NEP 2035 and eGo 100 scenarios - if self._generator_scn: - logger.info( - 'Importing generators for scenario {}'.format( - self._scn_name) + # ########################## task: optimisation ########################## + if "4_optimisation" in config["eDisGo"]["tasks"]: + if edisgo_grid is None: + if scenario in ["eGon2035", "eGon100RE"]: + zip_name = "grid_data_overlying_grid.zip" + else: + zip_name = "grid_data_overlying_grid_lowflex.zip" + grid_path = os.path.join(results_dir, zip_name) + edisgo_grid = import_edisgo_from_files( + edisgo_path=grid_path, + import_topology=True, + import_timeseries=True, + import_results=True, + import_electromobility=True, + import_heat_pump=True, + import_dsm=True, + import_overlying_grid=True, + from_zip_archive=True, + ) + edisgo_grid.legacy_grids = False + if time_intervals is None: + # load time intervals + time_intervals = pd.read_csv( + os.path.join(results_dir, "selected_time_intervals.csv"), + index_col=0, + ) + for ti in time_intervals.index: + time_steps = time_intervals.at[ti, "time_steps"] + if time_steps is not None: + time_intervals.at[ti, "time_steps"] = pd.date_range( + start=time_steps.split("'")[1], + periods=int(time_steps.split("=")[-2].split(",")[0]), + freq="H", + ) + edisgo_grid = self._run_edisgo_task_optimisation( + edisgo_grid, scenario, logger, time_intervals, results_dir ) - edisgo_grid.import_generators( - generator_scenario=self._generator_scn) - else: - logger.info( - 'No generators imported for scenario {}'.format( - self._scn_name) + zip_name = "grid_data_optimisation" + if scenario in ["eGon2035_lowflex", "eGon100RE_lowflex"]: + zip_name += "_lowflex" + edisgo_grid.save( + directory=os.path.join(results_dir, zip_name), + save_topology=True, + save_timeseries=True, + save_results=True, + save_opf_results=True, + save_electromobility=False, + save_dsm=False, + save_heatpump=False, + save_overlying_grid=False, + reduce_memory=True, + archive=True, + archive_type="zip", + parameters={"grid_expansion_results": ["equipment_changes"]}, + ) + if "5_grid_reinforcement" not in config["eDisGo"]["tasks"]: + return {edisgo_grid.topology.id: results_dir} + + # ########################## reinforcement ########################## + if "5_grid_reinforcement" in config["eDisGo"]["tasks"]: + if edisgo_grid is None: + if scenario in ["eGon2035", "eGon100RE"]: + zip_name = "grid_data_optimisation.zip" + else: + zip_name = "grid_data_optimisation_lowflex.zip" + grid_path = os.path.join(results_dir, zip_name) + edisgo_grid = import_edisgo_from_files( + edisgo_path=grid_path, + import_topology=True, + import_timeseries=True, + import_results=True, + import_electromobility=False, + import_heat_pump=False, + import_dsm=False, + import_overlying_grid=False, + from_zip_archive=True, + ) + edisgo_grid.legacy_grids = False + edisgo_grid = self._run_edisgo_task_grid_reinforcement(edisgo_grid, logger) + edisgo_grid.save( + directory=os.path.join( + results_dir, f"grid_data_reinforcement_{scenario}" + ), + save_topology=True, + save_timeseries=True, + save_results=True, + save_electromobility=False, + save_dsm=False, + save_heatpump=False, + save_overlying_grid=False, + reduce_memory=True, + archive=True, + archive_type="zip", ) - edisgo_grid.network.pypsa = None - # Time Series from eTraGo - logger.info('Updating eDisGo timeseries with eTraGo values') - if self._pf_post_lopf: - logger.info('(Including reactive power)') - edisgo_grid.network.timeseries = TimeSeriesControl( - network=edisgo_grid.network, - timeseries_generation_fluctuating=specs['ren_potential'], - timeseries_generation_dispatchable=specs['conv_dispatch'], - timeseries_generation_reactive_power=specs['reactive_power'], - timeseries_load='demandlib', - timeindex=specs['conv_dispatch'].index).timeseries - else: - logger.info('(Only active power)') - edisgo_grid.network.timeseries = TimeSeriesControl( - network=edisgo_grid.network, - timeseries_generation_fluctuating=specs['ren_potential'], - timeseries_generation_dispatchable=specs['conv_dispatch'], - timeseries_load='demandlib', - timeindex=specs['conv_dispatch'].index).timeseries - - # Curtailment - if apply_curtailment: - logger.info('Including Curtailment') - - gens_df = tools.get_gen_info(edisgo_grid.network) - solar_wind_capacities = gens_df.groupby( - by=['type', 'weather_cell_id'] - )['nominal_capacity'].sum() - - curt_cols = [ - i for i in specs['ren_curtailment'].columns - if i in solar_wind_capacities.index - ] + self._status_update(mv_grid_id, "end") - if not curt_cols: - raise ImportError( - ("MV grid {}: Data doesn't match").format(mv_grid_id)) + return {edisgo_grid.topology.id: results_dir} - curt_abs = pd.DataFrame( - columns=pd.MultiIndex.from_tuples(curt_cols)) + def _run_edisgo_task_setup_grid(self, mv_grid_id, scenario, logger, config, engine): + """ + Sets up EDisGo object for future scenario (without specifications from overlying + grid). - for col in curt_abs: - curt_abs[col] = ( - specs['ren_curtailment'][col] - * solar_wind_capacities[col]) + The following data is set up: - edisgo_grid.curtail( - curtailment_timeseries=curt_abs, - methodology='voltage-based', - solver=self._solver, - voltage_threshold=self._curtailment_voltage_threshold) - else: - logger.info('No curtailment applied') - - # Storage Integration - costs_without_storage = None - if storage_integration: - if self._ext_storage: - if not specs['battery_p_series'] is None: - logger.info('Integrating storages in MV grid') - edisgo_grid.integrate_storage( - timeseries=specs['battery_p_series'], - position='distribute_storages_mv', - timeseries_reactive_power=specs[ - 'battery_q_series' - ]) # None if no pf_post_lopf - costs_without_storage = ( - edisgo_grid.network.results.storages_costs_reduction[ - 'grid_expansion_costs_initial'].values[0]) - else: - logger.info('No storage integration') + * load time series of conventional loads + * generator park + * home storage units + * DSM data + * heat pumps including heat demand and COP time series per heat pump + * charging points with standing times, etc. as well as charging time series for + uncontrolled charging (done so that public charging points have a charging + time series) and flexibility bands for home and work charging points - logger.info("MV grid {}: eDisGo grid analysis".format(mv_grid_id)) + A dummy time index is set that is later on overwritten by the time index used + in eTraGo. - edisgo_grid.reinforce(timesteps_pfa=self._timesteps_pfa) + Parameters + ---------- + mv_grid_id : int + MV grid ID of the ding0 grid. + scenario : str + Name of scenario to import data for. Possible options are "eGon2035" + and "eGon100RE". + logger : logger handler + config : dict + Dictionary with configuration data. + engine : :sqlalchemy:`sqlalchemy.Engine` + Database engine. - if costs_without_storage is not None: - costs_with_storage = ( - edisgo_grid.network.results.grid_expansion_costs[ - 'total_costs'].sum()) - if costs_with_storage >= costs_without_storage: - logger.warning( - "Storage did not benefit MV grid {}".format( - mv_grid_id)) - st = edisgo_grid.network.mv_grid.graph.nodes_by_attribute( - 'storage') - for storage in st: - tools.disconnect_storage(edisgo_grid.network, storage) + Returns + ------- + :class:`edisgo.EDisGo` - self._status_update(mv_grid_id, 'end') + """ + logger.info(f"MV grid {mv_grid_id}: Start task 'setup_grid'.") - path = os.path.join(self._results, str(mv_grid_id)) - edisgo_grid.network.results.save(path) + logger.info(f"MV grid {mv_grid_id}: Initialize MV grid.") + grid_path = os.path.join( + config["eDisGo"]["grid_path"], + str(mv_grid_id), + ) + if not os.path.isdir(grid_path): + msg = f"MV grid {mv_grid_id}: No grid data found." + logger.error(msg) + raise Exception(msg) - return {edisgo_grid.network.id: path} + edisgo_grid = import_edisgo_from_files(edisgo_path=grid_path) + edisgo_grid.legacy_grids = False + # overwrite configs + edisgo_grid._config = Config() + edisgo_grid.set_timeindex(pd.date_range("1/1/2011", periods=8760, freq="H")) - def _save_edisgo_results(self): + logger.info("Set up load time series of conventional loads.") + edisgo_grid.set_time_series_active_power_predefined( + conventional_loads_ts="oedb", engine=engine, scenario=scenario + ) + edisgo_grid.set_time_series_reactive_power_control( + control="fixed_cosphi", + generators_parametrisation=None, + loads_parametrisation="default", + storage_units_parametrisation=None, + ) + # overwrite p_set of conventional loads as it changes from scenario to scenario + edisgo_grid.topology.loads_df[ + "p_set" + ] = edisgo_grid.timeseries.loads_active_power.max() - if not os.path.exists(self._results): - os.makedirs(self._results) + logger.info("Set up generator park.") + edisgo_grid.import_generators(generator_scenario=scenario, engine=engine) - with open( - os.path.join(self._results, 'edisgo_args.json'), - 'w') as fp: - json.dump(self._edisgo_args, fp) + logger.info("Set up home storage units.") + edisgo_grid.import_home_batteries(scenario=scenario, engine=engine) - self._grid_choice.to_csv(self._results + '/grid_choice.csv') + logger.info("Set up DSM data.") + edisgo_grid.import_dsm(scenario=scenario, engine=engine) - def _laod_edisgo_results(self): + logger.info("Set up heat supply and demand data.") + edisgo_grid.import_heat_pumps(scenario=scenario, engine=engine) - # Load the grid choice form CSV - self._grid_choice = pd.read_csv( - os.path.join(self._csv_import, 'grid_choice.csv'), - index_col=0) - self._grid_choice['represented_grids'] = self._grid_choice.apply( - lambda x: eval(x['represented_grids']), axis=1) + logger.info("Set up electromobility data.") + edisgo_grid.import_electromobility( + data_source="oedb", scenario=scenario, engine=engine + ) + # apply charging strategy so that public charging points have a charging + # time series + edisgo_grid.apply_charging_strategy(strategy="dumb") + # get flexibility bands for home and work charging points + edisgo_grid.electromobility.get_flexibility_bands( + edisgo_obj=edisgo_grid, use_case=["home", "work"] + ) - for idx, row in self._grid_choice.iterrows(): - mv_grid_id = int(row['the_selected_network_id']) + logger.info("Run integrity checks.") + edisgo_grid.topology.check_integrity() + edisgo_grid.electromobility.check_integrity() + edisgo_grid.heat_pump.check_integrity() + edisgo_grid.dsm.check_integrity() - try: - # Grid expansion costs - file_path = os.path.join( - self._csv_import, - str(mv_grid_id), - 'grid_expansion_results', - 'grid_expansion_costs.csv') - - grid_expansion_costs = pd.read_csv( - file_path, - index_col=0) - - # powerflow results - pf_path = os.path.join( - self._csv_import, - str(mv_grid_id), - 'powerflow_results', - 'apparent_powers.csv') - - s_res = pd.read_csv( - pf_path, - index_col=0, - parse_dates=True) - - # Configs - config_path = os.path.join( - self._csv_import, - str(mv_grid_id), - 'configs.csv') - - edisgo_config = {} - with open(config_path, 'r') as f: - reader = csv.reader(f) - for row in reader: - a = iter(row[1:]) - edisgo_config[row[0]] = dict(zip(a, a)) - - # PyPSA network - pypsa_path = os.path.join( - self._csv_import, - str(mv_grid_id), - 'pypsa_network') - - imported_pypsa = pypsa.Network() - imported_pypsa.import_from_csv_folder(pypsa_path) - - # Storages - storage_path = os.path.join( - self._csv_import, - str(mv_grid_id), - 'storage_integration_results', - 'storages.csv') - - if os.path.exists(storage_path): - storages = pd.read_csv( - storage_path, - index_col=0) - else: - storages = pd.DataFrame( - columns=['nominal_power', 'voltage_level']) + return edisgo_grid - edisgo_grid = _EDisGoImported( - grid_expansion_costs, - s_res, - storages, - imported_pypsa, - edisgo_config) + def _run_edisgo_task_specs_overlying_grid( + self, edisgo_grid, scenario, logger, config, engine + ): + """ + Gets specifications from overlying grid and integrates them into the EDisGo + object. - self._edisgo_grids[ - mv_grid_id - ] = edisgo_grid + The following data is set up: - logger.info("Imported MV grid {}".format(mv_grid_id)) - except: - self._edisgo_grids[ - mv_grid_id - ] = "This grid failed to reimport" + * set generator time series + * set up thermal storage units + * requirements overlying grid on total renewables curtailment, DSM dispatch, + electromobility charging, heat pump dispatch, + + A dummy time index is set that is later on overwritten by the time index used + in eTraGo + + Parameters + ---------- + edisgo_grid : :class:`edisgo.EDisGo` + EDisGo object. + scenario : str + Name of scenario to import data for. Possible options are "eGon2035" + and "eGon100RE". + logger : logger handler + config : dict + Dictionary with configuration data. + engine : :sqlalchemy:`sqlalchemy.Engine` + Database engine. + + Returns + ------- + :class:`edisgo.EDisGo` + Returns the complete eDisGo container, also including results + + """ + logger.info("Start task 'specs_overlying_grid'.") + + logger.info("Get specifications from eTraGo.") + specs = get_etrago_results_per_bus( + edisgo_grid.topology.id, + self._etrago_network, + self._pf_post_lopf, + self._max_cos_phi_renewable, + ) + snapshots = specs["timeindex"] + # get time steps that don't converge in overlying grid + try: + convergence = pd.read_csv( + os.path.join(config["eGo"]["csv_import_eTraGo"], "pf_solution.csv"), + index_col=0, + parse_dates=True, + ) + ts_not_converged = convergence[~convergence.converged].index + except FileNotFoundError: + logger.info( + "No info on converged time steps, wherefore it is assumed that all " + "converged." + ) + ts_not_converged = pd.Index([]) + except Exception: + raise + + # overwrite previously set dummy time index if year that was used differs from + # year used in etrago + edisgo_year = edisgo_grid.timeseries.timeindex[0].year + etrago_year = snapshots[0].year + if edisgo_year != etrago_year: + timeindex_new_full = pd.date_range( + f"1/1/{etrago_year}", periods=8760, freq="H" + ) + # conventional loads + edisgo_grid.timeseries.loads_active_power.index = timeindex_new_full + edisgo_grid.timeseries.loads_reactive_power.index = timeindex_new_full + # DSM + edisgo_grid.dsm.e_max.index = timeindex_new_full + edisgo_grid.dsm.e_min.index = timeindex_new_full + edisgo_grid.dsm.p_max.index = timeindex_new_full + edisgo_grid.dsm.p_min.index = timeindex_new_full + # COP and heat demand + edisgo_grid.heat_pump.cop_df.index = timeindex_new_full + edisgo_grid.heat_pump.heat_demand_df.index = timeindex_new_full + # flexibility bands + edisgo_grid.electromobility.flexibility_bands[ + "upper_power" + ].index = timeindex_new_full + edisgo_grid.electromobility.flexibility_bands[ + "upper_energy" + ].index = timeindex_new_full + edisgo_grid.electromobility.flexibility_bands[ + "lower_energy" + ].index = timeindex_new_full + # TimeSeries.timeindex + edisgo_grid.timeseries.timeindex = snapshots + + logger.info("Set generator time series.") + # rename carrier to match with carrier names in overlying grid + rename_generator_carriers_edisgo(edisgo_grid) + # active power + edisgo_grid.set_time_series_active_power_predefined( + dispatchable_generators_ts=specs["dispatchable_generators_active_power"], + fluctuating_generators_ts=specs["renewables_potential"], + ) + # reactive power + if self._pf_post_lopf: + # ToDo (low priority) Use eTraGo time series to set reactive power + # (scale by nominal power) + edisgo_grid.set_time_series_manual( + generators_q=specs["generators_reactive_power"].loc[:, []], + ) + pass + else: + edisgo_grid.set_time_series_reactive_power_control( + control="fixed_cosphi", + generators_parametrisation="default", + loads_parametrisation=None, + storage_units_parametrisation=None, + ) + + # ToDo (medium priority) for now additional optimised storage capacity is + # ignored as capacities are very small and optimisation does not offer storage + # positioning + # if specs["storage_units_p_nom"] > 0.3: + # logger.info("Set up large battery storage units.") + # edisgo_grid.add_component( + # comp_type="storage_unit", + # bus=edisgo_grid.topology.mv_grid.station.index[0], + # p_nom=specs["storage_units_p_nom"], + # max_hours=specs["storage_units_max_hours"], + # type="large_storage", + # ) + + logger.info("Set up thermal storage units.") + # decentral + hp_decentral = edisgo_grid.topology.loads_df[ + edisgo_grid.topology.loads_df.sector == "individual_heating" + ] + if hp_decentral.empty and specs["thermal_storage_rural_capacity"] > 0: + logger.warning( + "There are thermal storage units for individual heating but no " + "heat pumps." + ) + if not hp_decentral.empty and specs["thermal_storage_rural_capacity"] > 0: + tes_cap_min_cumsum = ( + edisgo_grid.topology.loads_df.loc[hp_decentral.index, "p_set"] + .sort_index() + .cumsum() + ) + hps_selected = tes_cap_min_cumsum[ + tes_cap_min_cumsum <= specs["thermal_storage_rural_capacity"] + ].index + + # distribute thermal storage capacity to all selected heat pumps depending + # on heat pump size + tes_cap = ( + edisgo_grid.topology.loads_df.loc[hps_selected, "p_set"] + * specs["thermal_storage_rural_capacity"] + / edisgo_grid.topology.loads_df.loc[hps_selected, "p_set"].sum() + ) + edisgo_grid.heat_pump.thermal_storage_units_df = pd.DataFrame( + data={ + "capacity": tes_cap, + "efficiency": specs["thermal_storage_rural_efficiency"], + } + ) + # district heating + hp_dh = edisgo_grid.topology.loads_df[ + edisgo_grid.topology.loads_df.sector.isin( + ["district_heating", "district_heating_resistive_heater"] + ) + ] + # check if there are as many district heating systems in eTraGo as in eDisGo + if hp_dh.empty: + if len(specs["feedin_district_heating"].columns) != 0: + logger.warning( + f"There are {len(hp_dh.area_id.unique())} district heating " + f"systems in eDisGo and " + f"{len(specs['feedin_district_heating'].columns)} in eTraGo." + ) + else: + if len(hp_dh.area_id.unique()) != len( + specs["feedin_district_heating"].columns + ): + logger.warning( + f"There are {len(hp_dh.area_id.unique())} district heating " + f"systems in eDisGo and " + f"{len(specs['feedin_district_heating'].columns)} in eTraGo." + ) + # check that installed PtH capacity is equal in eTraGo as in eDisGo + if abs(hp_dh.p_set.sum() - specs["heat_pump_central_p_nom"]) > 1e-3: logger.warning( - "MV grid {} could not be loaded".format(mv_grid_id)) + f"Installed capacity of PtH units in district heating differs " + f"between eTraGo ({specs['heat_pump_central_p_nom']} MW) and " + f"eDisGo ({hp_dh.p_set.sum()} MW)." + ) + + if not specs["feedin_district_heating"].empty: + + # map district heating ID to heat bus ID from eTraGo + if scenario.split("_")[-1] == "lowflex": + scn = scenario.split("_")[0] + else: + scn = scenario + map_etrago_heat_bus_to_district_heating_id(specs, scn, engine) + + for dh_id in hp_dh.district_heating_id.unique(): + if dh_id in specs["thermal_storage_central_capacity"].index: + if specs["thermal_storage_central_capacity"].at[dh_id] > 0: + # get PtH unit name to allocate thermal storage unit to + comp_name = hp_dh[hp_dh.district_heating_id == dh_id].index[ + 0 + ] + edisgo_grid.heat_pump.thermal_storage_units_df = pd.concat( + [ + edisgo_grid.heat_pump.thermal_storage_units_df, + pd.DataFrame( + data={ + "capacity": specs[ + "thermal_storage_central_capacity" + ].at[dh_id], + "efficiency": specs[ + "thermal_storage_central_efficiency" + ], + }, + index=[comp_name], + ), + ] + ) + + logger.info("Set requirements from overlying grid.") + # all time series from overlying grid are also kept for low flex scenarios + # in order to afterwards check difference in dispatch between eTraGo and eDisGo + + # curtailment + # scale curtailment by ratio of nominal power in eDisGo and eTraGo + for carrier in specs["renewables_curtailment"].columns: + p_nom_total = specs["renewables_p_nom"][carrier] + p_nom_mv_lv = edisgo_grid.topology.generators_df[ + edisgo_grid.topology.generators_df["type"] == carrier + ].p_nom.sum() + specs["renewables_curtailment"][carrier] *= p_nom_mv_lv / p_nom_total + # check that curtailment does not exceed feed-in (for all converged time steps) + vres_gens = edisgo_grid.topology.generators_df[ + edisgo_grid.topology.generators_df["type"].isin( + specs["renewables_curtailment"].columns + ) + ].index + pot_vres_gens = edisgo_grid.timeseries.generators_active_power.loc[ + :, vres_gens + ].sum(axis=1) + pot_vres_gens.loc[ts_not_converged] = 0.0 + total_curtailment = specs["renewables_curtailment"].loc[:].sum(axis=1) + total_curtailment.loc[ts_not_converged] = 0.0 + diff = pot_vres_gens - total_curtailment + if (diff < 0).any(): + # if curtailment is much larger than feed-in, throw an error + if (diff < -1e-3).any(): + raise ValueError("Curtailment exceeds feed-in!") + # if curtailment is only slightly larger than feed-in, this is due to + # numerical errors and therefore corrected + else: + ts_neg_curtailment = diff[(diff < 0)].index + total_curtailment.loc[ts_neg_curtailment] += diff.loc[ + ts_neg_curtailment + ] + edisgo_grid.overlying_grid.renewables_curtailment = total_curtailment + + # battery storage + # scale storage time series by ratio of nominal power in eDisGo and eTraGo + p_nom_total = specs["storage_units_p_nom"] + p_nom_mv_lv = edisgo_grid.topology.storage_units_df.p_nom.sum() + edisgo_grid.overlying_grid.storage_units_active_power = ( + specs["storage_units_active_power"] * p_nom_mv_lv / p_nom_total + ) + edisgo_grid.overlying_grid.storage_units_soc = specs["storage_units_soc"] + + # DSM + edisgo_grid.overlying_grid.dsm_active_power = specs["dsm_active_power"] + + # BEV + edisgo_grid.overlying_grid.electromobility_active_power = specs[ + "electromobility_active_power" + ] + + # PtH + # scale heat pump time series by ratio of nominal power in eDisGo and eTraGo + p_nom_total = specs["heat_pump_rural_p_nom"] + p_nom_mv_lv = edisgo_grid.topology.loads_df[ + edisgo_grid.topology.loads_df.sector.isin( + ["individual_heating", "individual_heating_resistive_heater"] + ) + ].p_set.sum() + edisgo_grid.overlying_grid.heat_pump_decentral_active_power = ( + specs["heat_pump_rural_active_power"] * p_nom_mv_lv / p_nom_total + ) + p_nom_total = specs["heat_pump_central_p_nom"] + p_nom_mv_lv = edisgo_grid.topology.loads_df[ + edisgo_grid.topology.loads_df.sector.isin( + ["district_heating", "district_heating_resistive_heater"] + ) + ].p_set.sum() + edisgo_grid.overlying_grid.heat_pump_central_active_power = ( + specs["heat_pump_central_active_power"] * p_nom_mv_lv / p_nom_total + ) + + # Other feed-in into district heating + edisgo_grid.overlying_grid.feedin_district_heating = specs[ + "feedin_district_heating" + ] + + # Thermal storage units SoC + edisgo_grid.overlying_grid.thermal_storage_units_decentral_soc = specs[ + "thermal_storage_rural_soc" + ] + edisgo_grid.overlying_grid.thermal_storage_units_central_soc = specs[ + "thermal_storage_central_soc" + ] + + # Delete some flex data in case of low flex scenario + if scenario in ["eGon2035_lowflex", "eGon100RE_flex"]: + # delete DSM and flexibility bands to save disk space + edisgo_grid.dsm = edisgo_grid.dsm.__class__() + edisgo_grid.electromobility.flexibility_bands = { + "upper_power": pd.DataFrame(), + "lower_energy": pd.DataFrame(), + "upper_energy": pd.DataFrame(), + } + + logger.info("Run integrity check.") + edisgo_grid.check_integrity() + + return edisgo_grid - def _get_mv_grid_from_bus_id(self, session, bus_id): + def _run_edisgo_task_temporal_complexity_reduction( + self, edisgo_grid, logger, config + ): """ - Queries the MV grid ID for a given eTraGo bus + Runs the temporal complexity reduction to select most critical time periods. Parameters ---------- - bus_id : int - eTraGo bus ID + edisgo_grid : :class:`edisgo.EDisGo` + EDisGo object. + logger : logger handler + config : dict + Dictionary with configuration data. + engine : :sqlalchemy:`sqlalchemy.Engine` + Database engine. Returns ------- - int - MV grid (ding0) ID + :class:`edisgo.EDisGo` """ + logger.info("Start task 'temporal complexity reduction'.") - if self._versioned is True: - ormclass_hvmv_subst = grid.__getattribute__( - 'EgoDpHvmvSubstation' + # get non-converging time steps + try: + convergence = pd.read_csv( + os.path.join(config["eGo"]["csv_import_eTraGo"], "pf_solution.csv"), + index_col=0, + parse_dates=True, + ) + ts_not_converged = convergence[~convergence.converged].index + except FileNotFoundError: + logger.info( + "No info on converged time steps, wherefore it is assumed that all " + "converged." ) - subst_id = session.query( - ormclass_hvmv_subst.subst_id - ).filter( - ormclass_hvmv_subst.otg_id == bus_id, - ormclass_hvmv_subst.version == self._grid_version - ).scalar() - - if self._versioned is False: - ormclass_hvmv_subst = model_draft.__getattribute__( - 'EgoGridHvmvSubstation' + ts_not_converged = [] + except Exception: + raise + + # set time series data at time steps with non-convergence issues to zero + if len(ts_not_converged) > 0: + logger.info( + f"{len(ts_not_converged)} time steps did not converge in overlying " + f"grid. Time series data at time steps with non-convergence issues is " + f"set to zero." ) - subst_id = session.query( - ormclass_hvmv_subst.subst_id - ).filter( - ormclass_hvmv_subst.otg_id == bus_id - ).scalar() + # set data in TimeSeries object to zero + attributes = edisgo_grid.timeseries._attributes + for attr in attributes: + ts = getattr(edisgo_grid.timeseries, attr) + if not ts.empty: + ts.loc[ts_not_converged, :] = 0 + setattr(edisgo_grid.timeseries, attr, ts) + # set data in OverlyingGrid object to zero + attributes = edisgo_grid.overlying_grid._attributes + for attr in attributes: + ts = getattr(edisgo_grid.overlying_grid, attr) + if not ts.empty and "soc" not in attr: + if isinstance(ts, pd.Series): + ts.loc[ts_not_converged] = 0 + else: + ts.loc[ts_not_converged, :] = 0 + setattr(edisgo_grid.overlying_grid, attr, ts) + + # distribute overlying grid data + logger.info("Distribute overlying grid data.") + edisgo_grid = distribute_overlying_grid_requirements(edisgo_grid) + + # get critical time intervals + results_dir = os.path.join(self._results, str(edisgo_grid.topology.id)) + time_intervals = get_most_critical_time_intervals( + edisgo_grid, + percentage=1.0, + time_steps_per_time_interval=168, + time_step_day_start=4, + save_steps=True, + path=results_dir, + use_troubleshooting_mode=True, + overloading_factor=0.95, + voltage_deviation_factor=0.95, + ) - return subst_id + # drop time intervals with non-converging time steps + if len(ts_not_converged) > 0: + + # check overloading time intervals + for ti in time_intervals.index: + # check if there is one time step in time interval that did not converge + non_converged_ts_in_ti = [ + _ + for _ in ts_not_converged + if _ in time_intervals.at[ti, "time_steps_overloading"] + ] + if len(non_converged_ts_in_ti) > 0: + # if any time step did not converge, set time steps to None + time_intervals.at[ti, "time_steps_overloading"] = None + + # check voltage issues time intervals + for ti in time_intervals.index: + # check if there is one time step in time interval that did not converge + non_converged_ts_in_ti = [ + _ + for _ in ts_not_converged + if _ in time_intervals.at[ti, "time_steps_voltage_issues"] + ] + if len(non_converged_ts_in_ti) > 0: + # if any time step did not converge, set time steps to None + time_intervals.at[ti, "time_steps_voltage_issues"] = None + + # select time intervals + if not time_intervals.loc[:, "time_steps_overloading"].dropna().empty: + tmp = time_intervals.loc[:, "time_steps_overloading"].dropna() + time_interval_1 = tmp.iloc[0] + time_interval_1_ind = tmp.index[0] + else: + time_interval_1 = pd.Index([]) + time_interval_1_ind = None + if not time_intervals.loc[:, "time_steps_voltage_issues"].dropna().empty: + tmp = time_intervals.loc[:, "time_steps_voltage_issues"].dropna() + time_interval_2 = tmp.iloc[0] + time_interval_2_ind = tmp.index[0] + else: + time_interval_2 = pd.Index([]) + time_interval_2_ind = None - def _get_bus_id_from_mv_grid(self, session, subst_id): + # check if time intervals overlap + overlap = [_ for _ in time_interval_1 if _ in time_interval_2] + if len(overlap) > 0: + logger.info( + "Selected time intervals overlap. Trying to find another " + "time interval in voltage_issues intervals." + ) + # check if time interval without overlap can be found + for ti in time_intervals.loc[:, "time_steps_voltage_issues"].dropna().index: + overlap = [ + _ + for _ in time_interval_1 + if _ in time_intervals.at[ti, "time_steps_voltage_issues"] + ] + if len(overlap) == 0: + time_interval_2 = time_intervals.at[ti, "time_steps_voltage_issues"] + time_interval_2_ind = ti + break + overlap = [_ for _ in time_interval_1 if _ in time_interval_2] + if len(overlap) > 0: + logger.info( + "Selected time intervals overlap. Trying to find another " + "time interval in overloading intervals." + ) + # check if time interval without overlap can be found + for ti in time_intervals.loc[:, "time_steps_overloading"].dropna().index: + overlap = [ + _ + for _ in time_interval_2 + if _ in time_intervals.at[ti, "time_steps_overloading"] + ] + if len(overlap) == 0: + time_interval_1 = time_intervals.at[ti, "time_steps_overloading"] + time_interval_1_ind = ti + break + + overlap = [_ for _ in time_interval_1 if _ in time_interval_2] + if len(overlap) > 0: + logger.info( + "Overlap of selected time intervals cannot be avoided. " + "Time intervals are therefore concatenated." + ) + time_interval_1 = ( + time_interval_1.append(time_interval_2).unique().sort_values() + ) + time_interval_2 = None + + # save to csv + percentage = pd.Series() + percentage["time_interval_1"] = ( + None + if time_interval_1_ind is None + else time_intervals.at[ + time_interval_1_ind, "percentage_max_overloaded_components" + ] + ) + percentage["time_interval_2"] = ( + None + if time_interval_2_ind is None + else time_intervals.at[ + time_interval_2_ind, "percentage_buses_max_voltage_deviation" + ] + ) + pd.DataFrame( + { + "time_steps": [time_interval_1, time_interval_2], + "percentage": percentage, + }, + index=["time_interval_1", "time_interval_2"], + ).to_csv(os.path.join(results_dir, "selected_time_intervals.csv")) + + return time_interval_1, time_interval_2 + + def _run_edisgo_task_optimisation( + self, + edisgo_grid, + scenario, + logger, + time_intervals, + results_dir, + reduction_factor=0.3, + ): """ - Queries the eTraGo bus ID for given MV grid (ding0) ID + Runs the dispatch optimisation. Parameters ---------- - subst_id : int - MV grid (ding0) ID + edisgo_grid : :class:`edisgo.EDisGo` + EDisGo object. + scenario : str + Name of scenario to define flexible components. Possible options are + "eGon2035", "eGon2035_lowflex", "eGon100RE", and "eGon100RE_lowflex". + logger : logger handler + time_intervals : pd.DataFrame + Dataframe with information on time intervals to consider in the optimisation + in column "time_steps". + results_dir : str + Directory where to store OPF results. + reduction_factor : float + Reduction factor to use in spatial complexity reduction. Per default this + is set to 0.3. Returns ------- - int - eTraGo bus ID + :class:`edisgo.EDisGo` """ + logger.info("Start task 'optimisation'.") - if self._versioned is True: - ormclass_hvmv_subst = grid.__getattribute__( - 'EgoDpHvmvSubstation' - ) - bus_id = session.query( - ormclass_hvmv_subst.otg_id - ).filter( - ormclass_hvmv_subst.subst_id == subst_id, - ormclass_hvmv_subst.version == self._grid_version - ).scalar() - - if self._versioned is False: - ormclass_hvmv_subst = model_draft.__getattribute__( - 'EgoGridHvmvSubstation' + # prepare district heating data + # make sure district heating ID is string of integer not float + columns_rename = [ + str(int(float(_))) + for _ in edisgo_grid.overlying_grid.feedin_district_heating.columns + ] + if len(columns_rename) > 0: + edisgo_grid.overlying_grid.feedin_district_heating.columns = columns_rename + cols = edisgo_grid.overlying_grid.thermal_storage_units_central_soc.columns + columns_rename = [str(int(float(_))) for _ in cols] + if len(columns_rename) > 0: + edisgo_grid.overlying_grid.thermal_storage_units_central_soc.columns = ( + columns_rename ) - bus_id = session.query( - ormclass_hvmv_subst.otg_id - ).filter( - ormclass_hvmv_subst.subst_id == subst_id - ).scalar() + # aggregate PtH units in same district heating network and subtract feed-in + # from other heat sources from heat demand in district heating network + aggregate_district_heating_components( + edisgo_grid, + feedin_district_heating=edisgo_grid.overlying_grid.feedin_district_heating, + ) + # apply operating strategy so that inflexible heat pumps (without heat + # storage units) have a time series + edisgo_grid.apply_heat_pump_operating_strategy() + + timeindex = pd.Index([]) + for ti in time_intervals.index: + time_steps = time_intervals.at[ti, "time_steps"] + if time_steps is None: + continue + else: + timeindex = timeindex.append(pd.Index(time_steps)) + # copy edisgo object + edisgo_copy = deepcopy(edisgo_grid) + # temporal complexity reduction + reduce_timeseries_data_to_given_timeindex(edisgo_copy, time_steps) + + # spatial complexity reduction + edisgo_copy.spatial_complexity_reduction( + mode="kmeansdijkstra", + cluster_area="feeder", + reduction_factor=reduction_factor, + reduction_factor_not_focused=False, + ) - return bus_id + # OPF + # flexibilities in full flex: DSM, decentral and central PtH units, + # curtailment, EVs, storage units + # flexibilities in low flex: curtailment, storage units + psa_net = edisgo_copy.to_pypsa() + if scenario in ["eGon2035", "eGon100RE"]: + flexible_loads = edisgo_copy.dsm.p_max.columns + # flexible_hps = ( + # edisgo_copy.heat_pump.thermal_storage_units_df.index.values + # ) + flexible_cps = psa_net.loads.loc[ + psa_net.loads.index.str.contains("home") + | (psa_net.loads.index.str.contains("work")) + ].index.values + else: + flexible_loads = [] + # flexible_hps = [] + flexible_cps = [] + flexible_hps = edisgo_copy.heat_pump.heat_demand_df.columns.values + flexible_storage_units = ( + edisgo_copy.topology.storage_units_df.index.values + ) + edisgo_copy.pm_optimize( + flexible_cps=flexible_cps, + flexible_hps=flexible_hps, + flexible_loads=flexible_loads, + flexible_storage_units=flexible_storage_units, + s_base=1, + opf_version=4, + silence_moi=False, + method="soc", + ) -class _ETraGoData: - """ - Container for minimal eTraGo network. This minimal network is required - for the parallelization of eDisGo. + # save OPF results + zip_name = f"opf_results_{ti}" + if scenario in ["eGon2035_lowflex", "eGon100RE_lowflex"]: + zip_name += "_lowflex" + edisgo_copy.save( + directory=os.path.join(results_dir, zip_name), + save_topology=True, + save_timeseries=False, + save_results=False, + save_opf_results=True, + reduce_memory=True, + archive=True, + archive_type="zip", + ) - """ + # write flexibility dispatch results to spatially unreduced edisgo + # object + edisgo_grid.timeseries._loads_active_power.loc[ + time_steps, : + ] = edisgo_copy.timeseries.loads_active_power + edisgo_grid.timeseries._loads_reactive_power.loc[ + time_steps, : + ] = edisgo_copy.timeseries.loads_reactive_power + edisgo_grid.timeseries._generators_active_power.loc[ + time_steps, : + ] = edisgo_copy.timeseries.generators_active_power + edisgo_grid.timeseries._generators_reactive_power.loc[ + time_steps, : + ] = edisgo_copy.timeseries.generators_reactive_power - def __init__(self, etrago_network): + try: + edisgo_grid.timeseries._storage_units_active_power + except AttributeError: + edisgo_grid.timeseries.storage_units_active_power = pd.DataFrame( + index=edisgo_grid.timeseries.timeindex + ) + edisgo_grid.timeseries._storage_units_active_power.loc[ + time_steps, + edisgo_copy.timeseries.storage_units_active_power.columns, + ] = edisgo_copy.timeseries.storage_units_active_power + try: + edisgo_grid.timeseries._storage_units_reactive_power + except AttributeError: + edisgo_grid.timeseries.storage_units_reactive_power = pd.DataFrame( + index=edisgo_grid.timeseries.timeindex + ) + edisgo_grid.timeseries._storage_units_reactive_power.loc[ + time_steps, + edisgo_copy.timeseries.storage_units_reactive_power.columns, + ] = edisgo_copy.timeseries.storage_units_reactive_power + + # write OPF results back + edisgo_grid.opf_results.overlying_grid = pd.concat( + [ + edisgo_grid.opf_results.overlying_grid, + edisgo_copy.opf_results.overlying_grid, + ] + ) + edisgo_grid.opf_results.battery_storage_t.p = pd.concat( + [ + edisgo_grid.opf_results.battery_storage_t.p, + edisgo_copy.opf_results.battery_storage_t.p, + ] + ) + edisgo_grid.opf_results.battery_storage_t.e = pd.concat( + [ + edisgo_grid.opf_results.battery_storage_t.e, + edisgo_copy.opf_results.battery_storage_t.e, + ] + ) - self.snapshots = getattr( - etrago_network, "snapshots") - self.storage_units = getattr( - etrago_network, "storage_units") - self.storage_units_t = getattr( - etrago_network, "storage_units_t") - self.generators = getattr( - etrago_network, "generators") - self.generators_t = getattr( - etrago_network, "generators_t") + edisgo_grid.timeseries.timeindex = timeindex + return edisgo_grid + def _run_edisgo_task_grid_reinforcement(self, edisgo_grid, logger): + """ + Runs the grid reinforcement. -class _EDisGoImported: - """ - Imported (reduced) eDisGo class. - This class allows the import reduction to only the attributes used in eGo - """ + Parameters + ---------- + edisgo_grid : :class:`edisgo.EDisGo` + EDisGo object. + logger : logger handler - def __init__( - self, - grid_expansion_costs, - s_res, - storages, - pypsa, - edisgo_config): + Returns + ------- + :class:`edisgo.EDisGo` - self.network = _NetworkImported( - grid_expansion_costs, - s_res, - storages, - pypsa, - edisgo_config) + """ + logger.info("Start task 'grid_reinforcement'.") + # overwrite configs with new configs + edisgo_grid._config = Config() -class _NetworkImported: - """ - Reduced eDisG network class, used of eGo's reimport - """ + edisgo_grid = enhanced_reinforce_grid( + edisgo_grid, + activate_cost_results_disturbing_mode=True, + separate_lv_grids=True, + separation_threshold=2, + copy_grid=False, + ) + return edisgo_grid + + def _save_edisgo_results(self): + results_dir = self._results + if not os.path.exists(results_dir): + os.makedirs(results_dir) + + with open(os.path.join(results_dir, "edisgo_args.json"), "w") as fp: + json.dump(self._edisgo_args, fp) - def __init__( - self, - grid_expansion_costs, - s_res, - storages, - pypsa, - edisgo_config): + self._grid_choice.to_csv(os.path.join(results_dir, "grid_choice.csv")) - self.results = _ResultsImported( - grid_expansion_costs, - s_res, - storages) + def _load_edisgo_results(self): + """ + Loads eDisGo data for all specified grids - self.pypsa = pypsa - self.config = edisgo_config + Returns + -------- + dict[] + """ -class _ResultsImported: + # Load the grid choice from CSV + results_dir = self._results + self._grid_choice = pd.read_csv( + os.path.join(results_dir, "grid_choice.csv"), index_col=0 + ) + self._grid_choice["represented_grids"] = self._grid_choice.apply( + lambda x: eval(x["represented_grids"]), axis=1 + ) + + for idx, row in self._grid_choice.iterrows(): + mv_grid_id = int(row["the_selected_network_id"]) + + try: + edisgo_grid = import_edisgo_from_files( + edisgo_path=os.path.join(self._csv_import, str(mv_grid_id)), + import_topology=True, + import_timeseries=False, + import_results=True, + import_electromobility=False, + from_zip_archive=True, + dtype="float32", + parameters={ + "powerflow_results": ["pfa_p", "pfa_q"], + "grid_expansion_results": ["grid_expansion_costs"], + }, + ) + + self._edisgo_grids[mv_grid_id] = edisgo_grid + + logger.info("Imported MV grid {}".format(mv_grid_id)) + except: # noqa: E722 + self._edisgo_grids[mv_grid_id] = "This grid failed to reimport" + + logger.warning("MV grid {} could not be loaded".format(mv_grid_id)) + + +class _ETraGoData: """ - Reduced eDisG results class, used of eGo's reimport + Container for minimal eTraGo network. + + This minimal network only contains information relevant for eDisGo. + + Parameters + ---------- + etrago_network : :pypsa:`PyPSA.Network` + """ - def __init__( - self, - grid_expansion_costs, - s_res, - storages): + def __init__(self, etrago_network): + def filter_by_carrier( + etrago_network_obj, component, carrier, like=True, timeseries=True + ): + def filter_df_by_carrier(df): + if isinstance(carrier, str): + if like: + return df[df.carrier.str.contains(carrier)] + else: + return df[df.carrier == carrier] + elif isinstance(carrier, list): + return df[df.carrier.isin(carrier)] + elif carrier is None: + return df + + if timeseries: + attribute_to_save = { + "links": "p0", + "generators": "p", + "stores": "p", + "storage_units": "p", + } + attribute_to_save = attribute_to_save[component] + + df_to_filter = getattr( + getattr(etrago_network_obj, component + "_t"), attribute_to_save + ) + df = df_to_filter.loc[ + :, + filter_df_by_carrier(getattr(etrago_network_obj, component)).index, + ] + else: + columns_to_save = { + "links": ["carrier", "p_nom"], + "generators": ["carrier", "p_nom"], + "stores": ["carrier", "e_nom"], + "storage_units": ["carrier", "p_nom", "max_hours"], + } + columns_to_save = columns_to_save[component] + + df_to_filter = getattr(etrago_network_obj, component) + df = filter_df_by_carrier(df_to_filter) + df = df[columns_to_save] + + unique_carriers = filter_df_by_carrier( + getattr(etrago_network_obj, component) + ).carrier.unique() + logger.debug( + f"{component}, {carrier}, {timeseries}, {df.shape}, {unique_carriers}" + ) + + return df + + logger.debug( + f"Carriers in links " f"{etrago_network.network.links.carrier.unique()}" + ) + logger.debug( + f"Carriers in generators " + f"{etrago_network.network.generators.carrier.unique()}" + ) + logger.debug( + f"Carriers in stores " f"{etrago_network.network.stores.carrier.unique()}" + ) + logger.debug( + f"Carriers in storage_units " + f"{etrago_network.network.storage_units.carrier.unique()}" + ) + + self.snapshots = etrago_network.network.snapshots + + self.bev_charger = filter_by_carrier( + etrago_network.network, "links", "BEV", timeseries=False + ) + self.bev_charger_t = filter_by_carrier( + etrago_network.network, "links", "BEV", timeseries=True + ) + self.dsm = filter_by_carrier( + etrago_network.network, "links", "dsm", timeseries=False + ) + self.dsm_t = filter_by_carrier( + etrago_network.network, "links", "dsm", timeseries=True + ) + + self.rural_heat_t = filter_by_carrier( + etrago_network.network, "links", "rural_heat_pump", timeseries=True + ) + self.rural_heat_store = filter_by_carrier( + etrago_network.network, "stores", "rural_heat_store", timeseries=False + ) - self.grid_expansion_costs = grid_expansion_costs - self.storages = storages - self._s_res = s_res + self.central_heat_t = filter_by_carrier( + etrago_network.network, + "links", + ["central_heat_pump", "central_resistive_heater"], + timeseries=True, + ) + self.central_heat_store = filter_by_carrier( + etrago_network.network, "stores", "central_heat_store", timeseries=False + ) - def s_res(self): - return self._s_res + self.central_gas_chp_t = filter_by_carrier( + etrago_network.network, "links", "central_gas_chp_t", timeseries=True + ) + + # + self.generators = filter_by_carrier( + etrago_network.network, "generators", None, timeseries=False + ) + self.generators_t = filter_by_carrier( + etrago_network.network, "generators", None, timeseries=True + ) + + self.battery_storage_units = filter_by_carrier( + etrago_network.network, "storage_units", "battery", timeseries=False + ) + self.battery_storage_units_t = filter_by_carrier( + etrago_network.network, "storage_units", "battery", timeseries=True + ) def parallelizer( - ding0_id_list, - func, - func_arguments, - max_calc_time, - workers=mp2.cpu_count(), - worker_lifetime=1): + ding0_id_list, + func, + func_arguments, + max_calc_time, + workers=mp2.cpu_count(), + worker_lifetime=1, +): """ Use python multiprocessing toolbox for parallelization @@ -1369,6 +2004,7 @@ def parallelizer( containers : dict of :class:`~.edisgo.EDisGo` Dict of EDisGo instances keyed by its ID """ + def collect_pool_results(result): """ Store results from parallelized calculation in structured manner @@ -1390,14 +2026,13 @@ def error_callback(key): def initializer(): import pickle + pickle.DEFAULT_PROTOCOL = 4 import dill - dill.settings['protocol'] = 4 - pool = mp2.Pool( - workers, - initializer=initializer, - maxtasksperchild=worker_lifetime) + dill.settings["protocol"] = 4 + + pool = mp2.Pool(workers, initializer=initializer, maxtasksperchild=worker_lifetime) result_objects = {} for ding0_id in ding0_id_list: @@ -1407,34 +2042,32 @@ def initializer(): func=func, args=edisgo_args, callback=collect_pool_results, - error_callback=error_callback(ding0_id)) + error_callback=error_callback(ding0_id), + ) errors = {} successes = {} start = datetime.now() - end = (start + td(hours=max_calc_time)).isoformat(' ') - logger.info( - "Jobs started. They will time out at {}." - .format(end[:end.index('.')])) + end = (start + td(hours=max_calc_time)).isoformat(" ") + logger.info("Jobs started. They will time out at {}.".format(end[: end.index(".")])) current = datetime.now() time_spent = 0 - while (result_objects and - ((current - start).seconds <= max_calc_time_seconds)): + while result_objects and ((current - start).seconds <= max_calc_time_seconds): done = [] tick = (current - start).seconds * 100 / max_calc_time_seconds if tick - time_spent >= 1 or tick > 100: hours_to_go = (current - start).seconds / 3600 - logger.info("{:.2f}% ({:.2f}/{}h) spent" - .format(tick, hours_to_go, max_calc_time)) - logger.info("Jobs time out in {:.2f}h." - .format(max_calc_time - hours_to_go)) + logger.info( + "{:.2f}% ({:.2f}/{}h) spent".format(tick, hours_to_go, max_calc_time) + ) + logger.info("Jobs time out in {:.2f}h.".format(max_calc_time - hours_to_go)) time_spent = tick - for grid, result in result_objects.items(): + for grid_id, result in result_objects.items(): if result.ready(): logger.info( - "MV grid {} ready. Trying to `get` the result." - .format(grid)) - done.append(grid) + "MV grid {} ready. Trying to `get` the result.".format(grid_id) + ) + done.append(grid_id) if not result.successful(): try: # We already know that this was not successful, so the @@ -1443,18 +2076,17 @@ def initializer(): result.get() except Exception as e: logger.warning( - "MV grid {} failed due to {e!r}: '{e}'." - .format(grid, e=e)) - errors[grid] = e + "MV grid {} failed due to {e!r}: '{e}'.".format( + grid_id, e=e + ) + ) + errors[grid_id] = e else: - logger.info( - "MV grid {} calculated successfully.".format(grid)) - successes[grid] = result.get() - logger.info( - "Done `get`ting the result for MV grid {}." - .format(grid)) - for grid in done: - del result_objects[grid] + logger.info("MV grid {} calculated successfully.".format(grid_id)) + successes[grid_id] = result.get() + logger.info("Done `get`ting the result for MV grid {}.".format(grid_id)) + for grid_id in done: + del result_objects[grid_id] sleep(1) current = datetime.now() @@ -1471,26 +2103,26 @@ def initializer(): end = datetime.now() delta = end - start - logger.info("Execution finished after {:.2f} hours".format( - delta.seconds / 3600)) + logger.info("Execution finished after {:.2f} hours".format(delta.seconds / 3600)) done = [] - for grid, result in result_objects.items(): - done.append(grid) + for grid_id, result in result_objects.items(): + done.append(grid_id) try: - successes[grid] = result.get(timeout=0) - logger.info("MV grid {} calculated successfully.".format(grid)) + successes[grid_id] = result.get(timeout=0) + logger.info("MV grid {} calculated successfully.".format(grid_id)) except Exception as e: logger.warning( - "MV grid {} failed due to {e!r}: '{e}'.".format(grid, e=e)) - errors[grid] = e - for grid in done: - del result_objects[grid] + "MV grid {} failed due to {e!r}: '{e}'.".format(grid_id, e=e) + ) + errors[grid_id] = e + for grid_id in done: + del result_objects[grid_id] if errors: logger.info("MV grid calculation error details:") - for grid, error in errors.items(): - logger.info(" {}".format(grid)) + for grid_id, error in errors.items(): + logger.info(" {}".format(grid_id)) strings = TracebackException.from_exception(error).format() lines = [line for string in strings for line in string.split("\n")] for line in lines: diff --git a/ego/tools/interface.py b/ego/tools/interface.py new file mode 100644 index 00000000..e5781087 --- /dev/null +++ b/ego/tools/interface.py @@ -0,0 +1,847 @@ +# -*- coding: utf-8 -*- +# Copyright 2016-2018 Europa-Universität Flensburg, +# Flensburg University of Applied Sciences, +# Centre for Sustainable Energy Systems +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation; either version 3 of the +# License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +# File description +""" +This files contains all eGo interface functions +""" + +__copyright__ = "Europa-Universität Flensburg, " "Centre for Sustainable Energy Systems" +__license__ = "GNU Affero General Public License Version 3 (AGPL-3.0)" +__author__ = "wolf_bunke,maltesc,mltja" + +import logging +import math +import os +import time + +import pandas as pd + +if "READTHEDOCS" not in os.environ: + from sqlalchemy import func + + from ego.mv_clustering import database + +logger = logging.getLogger(__name__) + + +class ETraGoMinimalData: + """ + Container for minimal eTraGo network. + + This minimal network only contains information relevant for eDisGo. + + Parameters + ---------- + etrago_network : :pypsa:`PyPSA.Network` + + """ + + def __init__(self, etrago_network): + def set_filtered_attribute(etrago_network_obj, component): + + # filter components + columns_to_save = { + "links": [ + "bus0", + "bus1", + "carrier", + "p_nom", + "p_nom_opt", + "efficiency", + ], + "generators": ["bus", "carrier", "p_nom", "p_nom_opt"], + "stores": ["bus", "carrier", "e_nom", "e_nom_opt"], + "storage_units": [ + "bus", + "carrier", + "p_nom_opt", + "p_nom_min", + "p_nom_extendable", + "max_hours", + ], + "loads": ["bus", "p_set"], + } + columns_to_save = columns_to_save[component] + + df = getattr(etrago_network_obj, component) + + logger.info( + f"Component: {component} has unique carriers: {df.carrier.unique()}" + ) + + setattr(self, component, df[columns_to_save]) + + # filter components timeseries + attribute_to_save = { + "links": ["p0", "p1"], + "generators": ["p", "p_max_pu", "q"], + "stores": ["p", "e"], + "storage_units": ["p", "q", "state_of_charge"], + "loads": ["p"], + } + attribute_to_save = attribute_to_save[component] + + component_timeseries_dict = getattr(etrago_network_obj, component + "_t") + + new_component_timeseries_dict = { + attribute: component_timeseries_dict[attribute] + for attribute in attribute_to_save + } + + setattr(self, component + "_t", new_component_timeseries_dict) + + t_start = time.perf_counter() + + self.snapshots = etrago_network.snapshots + + components = ["storage_units", "stores", "generators", "links", "loads"] + for selected_component in components: + set_filtered_attribute(etrago_network, selected_component) + + logger.info(f"Data selection time {time.perf_counter() - t_start}") + + +def get_etrago_results_per_bus(bus_id, etrago_obj, pf_post_lopf, max_cos_phi_ren): + """ + Reads eTraGo Results from Database and returns + the interface values as a dictionary of corresponding dataframes + + Parameters + ---------- + bus_id : int + ID of the corresponding HV bus + etrago_obj: :class:`etrago.tools.io.NetworkScenario` + eTraGo network object compiled by :meth:`etrago.appl.etrago` + pf_post_lopf : bool + Variable if pf after lopf was run. + max_cos_phi_ren : float or None + If not None, the maximum reactive power is set by the given power factor + according to the dispatched active power. + + Returns + ------- + dict(str: :pandas:`pandas.DataFrame`) + Dataframes used as eDisGo inputs. + + * 'timeindex' + Timeindex of the etrago-object. + Type: pd.Datetimeindex + + * 'dispatchable_generators_active_power' + Normalised active power dispatch of dispatchable generators per + technology in p.u. at the given bus. + Type: pd.DataFrame + Columns: Carrier + Unit: pu + + * 'dispatchable_generators_reactive_power' + Normalised reactive power dispatch of dispatchable generators per + technology in p.u. at the given bus. + Type: pd.DataFrame + Columns: Carrier + Unit: pu + + * 'renewables_potential' + Normalised weather dependent feed-in potential of fluctuating generators + per technology (solar / wind) in p.u. at the given bus. + Type: pd.DataFrame + Columns: Carrier + Unit: pu + + * 'renewables_curtailment' + Curtailment of fluctuating generators per + technology (solar / wind) in MW at the given bus. This curtailment can also + include curtailment of plants at the HV side of the HV/MV station and + therefore needs to be scaled using the quotient of installed power at the + MV side and installed power at the HV side. + Type: pd.DataFrame + Columns: Carrier + Unit: MW + + * 'renewables_dispatch_reactive_power' + Normalised reactive power time series of fluctuating generators per + technology in p.u. at the given bus. + Type: pd.DataFrame + Columns: Carrier + Unit: pu + + * 'renewables_p_nom' + Installed capacity of fluctuating generators per + technology (solar / wind) at the given bus. + Type: pd.Series with carrier in index + Unit: MW + + * 'storage_units_p_nom' + Storage unit nominal power. + Type: float + Unit: MW + + * 'storage_units_max_hours' + Storage units maximal discharge hours when discharged with p_nom starting + at a SoC of 1. + Type: float + Unit: h + + * 'storage_units_active_power' + Active power time series of battery storage units at the given bus. + Type: pd.Series + Unit: MW + + * 'storage_units_reactive_power' + Reactive power time series of battery storage units at the given bus. + Type: pd.Series + Unit: MVar + + * 'storage_units_soc' + State of charge in p.u. of battery storage units at the given bus. + Type: pd.Series + Unit: pu + + * 'dsm_active_power' + Active power time series of DSM units at the given bus. + Type: pd.Series + Unit: MW + + * 'heat_pump_rural_active_power' + Active power time series of PtH units for individual heating at the given + bus. + Type: pd.Series + Unit: MW + + * 'heat_pump_rural_reactive_power' + Reactive power time series of PtH units for individual heating at the given + bus. + Type: pd.Series + Unit: MVar + + * 'heat_pump_rural_p_nom' + Nominal power of all PtH units for individual heating at the given bus. + Type: float + Unit: MW + + * 'thermal_storage_rural_capacity' + Capacity of thermal storage units in individual heating. + Type: float + Unit: MWh + + * 'thermal_storage_rural_efficiency' + Charging and discharging efficiency of thermal storage units in individual + heating. + Type: float + Unit: p.u. + + * 'thermal_storage_rural_soc' + SoC of central thermal storage units. + Type: pd.Series + Unit: p.u. + + * 'heat_pump_central_active_power' + Active power time series of central PtH units at the given bus. + Type: pd.Series + Unit: MW + + * 'heat_pump_central_reactive_power' + Reactive power time series of central PtH units at the given bus. + Type: pd.Series + Unit: MVar + + * 'heat_pump_central_p_nom' + Nominal power of all central PtH units at the given bus. + Type: float + Unit: MW + + * 'thermal_storage_central_capacity' + Capacity of central thermal storage units. + Type: pd.Series with eTraGo heat bus ID in index + Unit: MWh + + * 'thermal_storage_central_efficiency' + Charging and discharging efficiency of central thermal storage units. + Type: float + Unit: p.u. + + * 'thermal_storage_central_soc' + SoC of central thermal storage units. + Type: pd.DataFrame + Columns: eTraGo heat bus ID + Unit: p.u. + + * 'feedin_district_heating' + Time series of other thermal feed-in from e.g. gas boilers or geothermal + units at the heat bus. + Type: pd.DataFrame + Columns: eTraGo heat bus ID + Unit: MW + + * 'electromobility_active_power' + Active power charging time series at the given bus. + Type: pd.Series + Unit: MW + + * 'electromobility_reactive_power' + Reactive power charging time series at the given bus. + Type: pd.Series + Unit: MVar + + """ + + def dispatchable_gens(): + dispatchable_gens_df_p = pd.DataFrame(index=timeseries_index) + dispatchable_gens_df_q = pd.DataFrame(index=timeseries_index) + + dispatchable_gens_carriers = [ + _ + for _ in generators_df["carrier"].unique() + if "solar" not in _ and "wind" not in _ + ] + # Filter generators_df for selected carriers. + dispatchable_gens_df = generators_df[ + generators_df["carrier"].isin(dispatchable_gens_carriers) + ] + # Rename carriers to match with carrier names in eDisGo + gens = dispatchable_gens_df[ + dispatchable_gens_df.carrier.isin(["central_gas_CHP", "industrial_gas_CHP"]) + ] + dispatchable_gens_df.loc[gens.index, "carrier"] = "gas_CHP" + gens = dispatchable_gens_df[ + dispatchable_gens_df.carrier.isin( + ["central_biomass_CHP", "industrial_biomass_CHP"] + ) + ] + dispatchable_gens_df.loc[gens.index, "carrier"] = "biomass_CHP" + gens = dispatchable_gens_df[dispatchable_gens_df.carrier.isin(["reservoir"])] + dispatchable_gens_df.loc[gens.index, "carrier"] = "run_of_river" + for carrier in dispatchable_gens_df.carrier.unique(): + p_nom = dispatchable_gens_df.loc[ + dispatchable_gens_df["carrier"] == carrier, "p_nom" + ].sum() + columns_to_aggregate = dispatchable_gens_df[ + dispatchable_gens_df["carrier"] == carrier + ].index + dispatchable_gens_df_p[carrier] = ( + etrago_obj.generators_t["p"][columns_to_aggregate].sum(axis="columns") + / p_nom + ) + if pf_post_lopf: + dispatchable_gens_df_q[carrier] = ( + etrago_obj.generators_t["q"][columns_to_aggregate].sum( + axis="columns" + ) + / p_nom + ) + else: + dispatchable_gens_df_q[carrier] = pd.Series( + data=0, index=timeseries_index, dtype=float + ) + + # Add CHP to conventional generators (only needed in case pf_post_lopf is False, + # otherwise it is already included above) + if pf_post_lopf is False: + chp_df = links_df[ + links_df["carrier"].isin( + [ + "central_gas_CHP", + "industrial_gas_CHP", + "central_biomass_CHP", + "industrial_biomass_CHP", + ] + ) + ] + if not chp_df.empty: + # Rename CHP carrier to match with carrier names in eDisGo + gens_gas_chp = chp_df[ + chp_df.carrier.isin(["central_gas_CHP", "industrial_gas_CHP"]) + ] + chp_df.loc[gens_gas_chp.index, "carrier"] = "gas_CHP" + gens_biomass_chp = chp_df[ + chp_df.carrier.isin( + ["central_biomass_CHP", "industrial_biomass_CHP"] + ) + ] + chp_df.loc[gens_biomass_chp.index, "carrier"] = "biomass_CHP" + + for carrier in chp_df.carrier.unique(): + p_nom = chp_df.loc[chp_df["carrier"] == carrier, "p_nom"].sum() + columns_to_aggregate = chp_df[chp_df["carrier"] == carrier].index + dispatchable_gens_df_p[carrier] = abs( + etrago_obj.links_t["p1"][columns_to_aggregate].sum( + axis="columns" + ) + / p_nom + ) + dispatchable_gens_df_q[carrier] = pd.Series( + data=0, index=timeseries_index, dtype=float + ) + + if (dispatchable_gens_df_p < -1e-3).any().any(): + logger.warning("Dispatchable generator feed-in values smaller -1 kW.") + results["dispatchable_generators_active_power"] = dispatchable_gens_df_p + results["dispatchable_generators_reactive_power"] = dispatchable_gens_df_q + + def renewable_generators(): + + weather_dep_gens = ["solar", "solar_rooftop", "wind_onshore"] + renaming_carrier_dict = { + "solar": ["solar", "solar_rooftop"], + "wind": ["wind_onshore"], + } + weather_dep_gens_df = generators_df[ + generators_df.carrier.isin(weather_dep_gens) + ] + + # Rename carrier to aggregate to carriers + for new_carrier_name, item in renaming_carrier_dict.items(): + for carrier in item: + weather_dep_gens_df.loc[ + weather_dep_gens_df["carrier"] == carrier, "carrier" + ] = new_carrier_name + + # Aggregation of p_nom + agg_weather_dep_gens_df = ( + weather_dep_gens_df.groupby(["carrier"]).agg({"p_nom": "sum"}).reset_index() + ) + + # Initialize dfs + # potential + weather_dep_gens_df_pot_p = pd.DataFrame( + 0.0, + index=timeseries_index, + columns=agg_weather_dep_gens_df.carrier.unique(), + ) + # reactive power + weather_dep_gens_df_dis_q = pd.DataFrame( + 0.0, + index=timeseries_index, + columns=agg_weather_dep_gens_df.carrier.unique(), + ) + # curtailment + weather_dep_gens_df_curt_p = pd.DataFrame( + 0.0, + index=timeseries_index, + columns=agg_weather_dep_gens_df.carrier.unique(), + ) + + for index, carrier, p_nom in weather_dep_gens_df[ + ["carrier", "p_nom"] + ].itertuples(): + # get index in aggregated dataframe to determine total installed capacity + # of the respective carrier + agg_idx = agg_weather_dep_gens_df[ + agg_weather_dep_gens_df["carrier"] == carrier + ].index.values[0] + p_nom_agg = agg_weather_dep_gens_df.loc[agg_idx, "p_nom"] + + p_series = etrago_obj.generators_t["p"][index] + p_max_pu_series = etrago_obj.generators_t["p_max_pu"][index] + p_max_pu_normed_series = p_max_pu_series * p_nom / p_nom_agg + + if pf_post_lopf: + q_series = etrago_obj.generators_t["q"][index] + # If set limit maximum reactive power + if max_cos_phi_ren: + logger.info( + "Applying Q limit (max cos(phi)={})".format(max_cos_phi_ren) + ) + phi = math.acos(max_cos_phi_ren) + for timestep in timeseries_index: + p = p_series[timestep] + q = q_series[timestep] + q_max = p * math.tan(phi) + q_min = -p * math.tan(phi) + if q > q_max: + q = q_max + elif q < q_min: + q = q_min + q_series[timestep] = q + q_normed_series = q_series / p_nom_agg + else: + q_normed_series = pd.Series(0.0, index=timeseries_index) + + weather_dep_gens_df_pot_p[carrier] += p_max_pu_normed_series + weather_dep_gens_df_dis_q[carrier] += q_normed_series + weather_dep_gens_df_curt_p[carrier] += p_max_pu_series * p_nom - p_series + + if (weather_dep_gens_df_curt_p.min() < -1e-3).any(): + logger.warning("Curtailment values smaller -1 kW.") + + results["renewables_potential"] = weather_dep_gens_df_pot_p + results["renewables_curtailment"] = weather_dep_gens_df_curt_p + results["renewables_dispatch_reactive_power"] = weather_dep_gens_df_dis_q + results["renewables_p_nom"] = agg_weather_dep_gens_df.set_index("carrier").p_nom + + def storages(): + # Filter batteries + storages_df = etrago_obj.storage_units.loc[ + (etrago_obj.storage_units["carrier"] == "battery") + & (etrago_obj.storage_units["bus"] == str(bus_id)) + ] + if not storages_df.empty: + # p_nom - p_nom_opt can always be used, if extendable is True or False + storages_df_p_nom = storages_df["p_nom_opt"].sum() + # Capacity + storages_df_max_hours = (storages_df["max_hours"]).values[0] + storages_cap = storages_df_p_nom * storages_df_max_hours + # p and q + storages_df_p = etrago_obj.storage_units_t["p"][storages_df.index].sum( + axis=1 + ) + if pf_post_lopf: + storages_df_q = etrago_obj.storage_units_t["q"][storages_df.index].sum( + axis=1 + ) + else: + storages_df_q = pd.Series(0.0, index=timeseries_index) + storages_df_soc = ( + etrago_obj.storage_units_t["state_of_charge"][storages_df.index].sum( + axis=1 + ) + / storages_cap + ) + + else: + storages_df_p_nom = 0 + storages_df_max_hours = 0 + storages_df_p = pd.Series(0.0, index=timeseries_index) + storages_df_q = pd.Series(0.0, index=timeseries_index) + storages_df_soc = pd.Series(0.0, index=timeseries_index) + results["storage_units_p_nom"] = storages_df_p_nom + results["storage_units_max_hours"] = storages_df_max_hours + results["storage_units_active_power"] = storages_df_p + results["storage_units_reactive_power"] = storages_df_q + results["storage_units_soc"] = storages_df_soc + + def dsm(): + # not needed in eDisGo in low flex scenario (dsm_df will be empty in that case) + # DSM + dsm_df = links_df.loc[ + (links_df["carrier"] == "dsm") & (links_df["bus0"] == str(bus_id)) + ] + if not dsm_df.empty: + dsm_df_p = etrago_obj.links_t["p0"][dsm_df.index].sum(axis=1) + else: + dsm_df_p = pd.Series(0.0, index=timeseries_index) + results["dsm_active_power"] = dsm_df_p + + def central_heat(): + + central_heat_carriers = ["central_heat_pump", "central_resistive_heater"] + central_heat_df = links_df.loc[ + (links_df["carrier"].isin(central_heat_carriers)) + & (links_df["bus0"] == str(bus_id)) + & (links_df["p_nom"] <= 20) + ] + if not central_heat_df.empty: + # Timeseries + central_heat_df_p = etrago_obj.links_t["p0"][central_heat_df.index].sum( + axis=1 + ) + central_heat_df_q = pd.Series(0.0, index=timeseries_index) + + # Nominal power of PtH units + p_nom = central_heat_df.p_nom.sum() + + # Stores + central_heat_buses = central_heat_df["bus1"].unique() + # find all heat stores connected to heat buses + central_heat_store_links_df = etrago_obj.links.loc[ + etrago_obj.links["bus0"].isin(central_heat_buses) + ] + if central_heat_store_links_df.empty: + central_heat_store_capacity = pd.Series() + central_heat_store_efficiency = 0 + soc_ts = pd.DataFrame() + else: + central_heat_store_df = etrago_obj.stores.loc[ + (etrago_obj.stores["carrier"] == "central_heat_store") + & ( + etrago_obj.stores["bus"].isin( + central_heat_store_links_df.bus1.values + ) + ) + ].reset_index(names="store_name") + central_heat_store_merge_links_df = pd.merge( + central_heat_store_links_df, + central_heat_store_df, + left_on="bus1", + right_on="bus", + ) + # capacity + central_heat_store_capacity = ( + central_heat_store_merge_links_df.set_index("bus0").e_nom_opt + ) + # efficiency + central_heat_store_efficiency = ( + central_heat_store_links_df.efficiency.values[0] + ) + # SoC + soc_ts = etrago_obj.stores_t["e"][ + central_heat_store_df.store_name.values + ].rename( + columns=central_heat_store_merge_links_df.set_index( + "store_name" + ).bus0 + ) + soc_ts = soc_ts / central_heat_store_capacity + + # Other feed-in + dh_feedin_df = pd.DataFrame() + for heat_bus in central_heat_buses: + # get feed-in from generators + heat_gens = etrago_obj.generators[ + (etrago_obj.generators["bus"] == heat_bus) + & (etrago_obj.generators["carrier"] != "load shedding") + ] + if not heat_gens.empty: + feedin_df_gens = etrago_obj.generators_t["p"][heat_gens.index].sum( + axis=1 + ) + else: + feedin_df_gens = pd.Series(0.0, index=timeseries_index) + # get feed-in from links + # get all links feeding into heat bus (except heat store) + heat_links_all = etrago_obj.links[ + (etrago_obj.links["bus1"] == heat_bus) + & ( + etrago_obj.links["carrier"].isin( + [ + "central_gas_boiler", + "central_gas_CHP_heat", + "central_heat_pump", + "central_resistive_heater", + ] + ) + ) + ] + # filter out PtH units that are already considered in PtH dispatch + # above + heat_links = heat_links_all.drop( + index=central_heat_df.index, errors="ignore" + ) + if not heat_links.empty: + feedin_df_links = abs( + etrago_obj.links_t["p1"][heat_links.index].sum(axis=1) + ) + else: + feedin_df_links = pd.Series(0.0, index=timeseries_index) + dh_feedin_df[heat_bus] = feedin_df_gens + feedin_df_links + else: + central_heat_df_p = pd.Series(0.0, index=timeseries_index) + central_heat_df_q = pd.Series(0.0, index=timeseries_index) + p_nom = 0 + central_heat_store_capacity = pd.Series() + central_heat_store_efficiency = 0 + soc_ts = pd.DataFrame() + dh_feedin_df = pd.DataFrame() + + results["heat_pump_central_active_power"] = central_heat_df_p + results["heat_pump_central_reactive_power"] = central_heat_df_q + results["heat_pump_central_p_nom"] = p_nom + results["thermal_storage_central_capacity"] = central_heat_store_capacity + results["thermal_storage_central_efficiency"] = central_heat_store_efficiency + results["thermal_storage_central_soc"] = soc_ts + results["feedin_district_heating"] = dh_feedin_df + + def rural_heat(): + # not needed in eDisGo in low flex scenario, but obtained anyway + # ToDo (low priority) add resistive heaters (they only exist in eGon100RE) + rural_heat_carriers = ["rural_heat_pump"] + rural_heat_df = links_df.loc[ + links_df["carrier"].isin(rural_heat_carriers) + & (links_df["bus0"] == str(bus_id)) + ] + if not rural_heat_df.empty: + # Timeseries + rural_heat_df_p = etrago_obj.links_t["p0"][rural_heat_df.index].sum(axis=1) + rural_heat_df_q = pd.Series(0.0, index=timeseries_index) + # p_nom + rural_heat_p_nom = rural_heat_df.p_nom.sum() + # Store + # capacity + rural_heat_bus = rural_heat_df["bus1"].values[0] + rural_heat_store_link_df = etrago_obj.links.loc[ + etrago_obj.links["bus0"] == rural_heat_bus + ] + if rural_heat_store_link_df.empty: + rural_heat_store_capacity = 0 + heat_store_efficiency = 0 + soc_ts = pd.Series(0.0, index=timeseries_index) + else: + rural_heat_store_df = etrago_obj.stores.loc[ + (etrago_obj.stores["carrier"] == "rural_heat_store") + & ( + etrago_obj.stores["bus"] + == rural_heat_store_link_df.bus1.values[0] + ) + ] + rural_heat_store_capacity = rural_heat_store_df.e_nom_opt.values[0] + # efficiency + heat_store_efficiency = rural_heat_store_link_df.efficiency.values[0] + # SoC + if rural_heat_store_capacity > 0: + soc_ts = etrago_obj.stores_t["e"][rural_heat_store_df.index[0]] + soc_ts = soc_ts / rural_heat_store_capacity + else: + soc_ts = pd.Series(0.0, index=timeseries_index) + else: + rural_heat_df_p = pd.Series(0.0, index=timeseries_index) + rural_heat_df_q = pd.Series(0.0, index=timeseries_index) + rural_heat_p_nom = 0 + rural_heat_store_capacity = 0 + heat_store_efficiency = 0 + soc_ts = pd.Series(0.0, index=timeseries_index) + + results["heat_pump_rural_active_power"] = rural_heat_df_p + results["heat_pump_rural_reactive_power"] = rural_heat_df_q + results["heat_pump_rural_p_nom"] = rural_heat_p_nom + results["thermal_storage_rural_capacity"] = rural_heat_store_capacity + results["thermal_storage_rural_efficiency"] = heat_store_efficiency + results["thermal_storage_rural_soc"] = soc_ts + + def bev_charger(): + # not needed in eDisGo in low flex scenario (bev_charger_df will be empty in + # that case) + # BEV charger + bev_charger_df = links_df.loc[ + (links_df["carrier"] == "BEV_charger") & (links_df["bus0"] == str(bus_id)) + ] + if not bev_charger_df.empty: + bev_charger_df_p = etrago_obj.links_t["p0"][bev_charger_df.index].sum( + axis=1 + ) + bev_charger_df_q = pd.Series(0.0, index=timeseries_index) + else: + bev_charger_df_p = pd.Series(0.0, index=timeseries_index) + bev_charger_df_q = pd.Series(0.0, index=timeseries_index) + + results["electromobility_active_power"] = bev_charger_df_p + results["electromobility_reactive_power"] = bev_charger_df_q + + # Function part + t_start = time.perf_counter() + + logger.info("Specs for bus {}".format(bus_id)) + if pf_post_lopf: + logger.info("Active and reactive power interface") + else: + logger.info("Only active power interface") + + results = {} + + timeseries_index = etrago_obj.snapshots + results["timeindex"] = timeseries_index + + # Filter dataframes by bus_id + # Generators + generators_df = etrago_obj.generators[etrago_obj.generators["bus"] == str(bus_id)] + # Links + links_df = etrago_obj.links[ + (etrago_obj.links["bus0"] == str(bus_id)) + | (etrago_obj.links["bus1"] == str(bus_id)) + ] + + # Fill results + dispatchable_gens() + renewable_generators() + storages() + dsm() + central_heat() + rural_heat() + bev_charger() + logger.info(f"Overall time: {time.perf_counter() - t_start}") + + return results + + +def rename_generator_carriers_edisgo(edisgo_grid): + """ + Helper function to rename carriers so that they match carrier names in eTraGo. + + """ + generators_df = edisgo_grid.topology.generators_df + if "p_nom_th" in generators_df.columns: + gens_rename = generators_df[ + (generators_df["type"].isin(["gas", "gas extended", "oil", "others"])) + & (~generators_df["p_nom_th"].isna()) + ] + generators_df.loc[gens_rename.index, "type"] = "gas_CHP" + gens_rename = generators_df[ + (generators_df["type"].isin(["biomass"])) + & (~generators_df["p_nom_th"].isna()) + ] + generators_df.loc[gens_rename.index, "type"] = "biomass_CHP" + gens_rename = generators_df[generators_df["type"].isin(["water"])] + generators_df.loc[gens_rename.index, "type"] = "run_of_river" + gens_rename = generators_df[generators_df["type"].isin(["conventional"])] + generators_df.loc[gens_rename.index, "type"] = "others" + + +def map_etrago_heat_bus_to_district_heating_id(specs, scenario, engine): + """ + Helper function to rename heat bus ID from eTraGo to district heating ID used + in eDisGo for specifications from overlying grid on district heating feed-in, + as well as district heating storage SoC and capacity. + + """ + # map district heating ID to heat bus ID from eTraGo + orm = database.register_tables_in_saio(engine) + heat_buses = [int(_) for _ in specs["feedin_district_heating"].columns] + with database.session_scope(engine) as session: + # get srid of etrago_bus table + query = session.query(func.ST_SRID(orm["etrago_bus"].geom)).limit(1) + srid_etrago_bus = query.all()[0] + # get district heating ID corresponding to heat bus ID by geo join + query = ( + session.query( + orm["etrago_bus"].bus_id.label("heat_bus_id"), + orm["district_heating_areas"].id.label("district_heating_id"), + ) + .filter( + orm["etrago_bus"].scn_name == scenario, + orm["district_heating_areas"].scenario == scenario, + orm["etrago_bus"].bus_id.in_(heat_buses), + ) + .outerjoin( # join to obtain district heating ID + orm["district_heating_areas"], + func.ST_Transform( + func.ST_Centroid(orm["district_heating_areas"].geom_polygon), + srid_etrago_bus, + ) + == orm["etrago_bus"].geom, + ) + ) + mapping_heat_bus_dh_id = pd.read_sql( + query.statement, + engine, + index_col="heat_bus_id", + ) + # convert heat bus ID to string + mapping_heat_bus_dh_id.index = mapping_heat_bus_dh_id.index.map(str) + # rename heat bus to district heating ID + specs["feedin_district_heating"].rename( + columns=mapping_heat_bus_dh_id.district_heating_id, + inplace=True, + ) + specs["thermal_storage_central_soc"].rename( + columns=mapping_heat_bus_dh_id.district_heating_id, + inplace=True, + ) + specs["thermal_storage_central_capacity"].rename( + index=mapping_heat_bus_dh_id.district_heating_id, + inplace=True, + ) diff --git a/ego/tools/io.py b/ego/tools/io.py index 84b3cba2..e45f9e43 100644 --- a/ego/tools/io.py +++ b/ego/tools/io.py @@ -20,58 +20,43 @@ """This file contains the eGo main class as well as input & output functions of eGo in order to build the eGo application container. """ -import sys -import os -import json import logging -logger = logging.getLogger('ego') +import os + import pandas as pd -import numpy as np -import json -if not 'READTHEDOCS' in os.environ: - import pyproj as proj - from shapely.geometry import Polygon, Point, MultiPolygon - from sqlalchemy import MetaData, create_engine, and_, func - from sqlalchemy.orm import sessionmaker - import oedialect - from geoalchemy2 import * +if "READTHEDOCS" not in os.environ: + import re + from importlib import import_module + + import pypsa + + from egoio.db_tables.model_draft import EgoGridPfHvSource as Source + from egoio.db_tables.model_draft import EgoGridPfHvTempResolution as TempResolution from egoio.tools import db + from etrago import Etrago + from etrago.appl import run_etrago from etrago.tools.io import load_config_file - from egoio.db_tables.model_draft import EgoGridPfHvSource as Source,\ - EgoGridPfHvTempResolution as TempResolution - from ego.tools.results import (create_etrago_results) - from ego.tools.storages import (etrago_storages_investment, etrago_storages) - from ego.tools.economics import ( - etrago_operating_costs, - etrago_grid_investment, - get_generator_investment, - etrago_convert_overnight_cost) - from ego.tools.utilities import (get_scenario_setting, - get_time_steps, fix_leading_separator) + from sqlalchemy import and_ + from sqlalchemy.orm import sessionmaker + + from ego.tools.economics import etrago_convert_overnight_cost from ego.tools.edisgo_integration import EDisGoNetworks - from egoio.db_tables.model_draft import RenpassGisParameterRegion - from egoio.db_tables import model_draft, grid - from etrago.tools.plot import (plot_line_loading, plot_stacked_gen, - curtailment, gen_dist, storage_distribution, - plot_voltage, plot_residual_load, - plot_line_loading_diff, full_load_hours, - nodal_gen_dispatch, plot_q_flows, - max_load, storage_expansion, - nodal_production_balance, gen_dist_diff) - from etrago.appl import etrago - from importlib import import_module - import pypsa - import re - from ego.tools.plots import (plot_grid_storage_investment, - power_price_plot, plot_storage_use, igeoplot, - plot_edisgo_cluster, - plot_line_expansion, - plot_storage_expansion) - -__copyright__ = ("Europa-Universität Flensburg, " - "Centre for Sustainable Energy Systems") + from ego.tools.plots import ( + igeoplot, + plot_edisgo_cluster, + plot_grid_storage_investment, + plot_line_expansion, + plot_storage_expansion, + plot_storage_use, + power_price_plot, + ) + from ego.tools.utilities import get_scenario_setting + +logger = logging.getLogger("ego") + +__copyright__ = "Europa-Universität Flensburg, " "Centre for Sustainable Energy Systems" __license__ = "GNU Affero General Public License Version 3 (AGPL-3.0)" __author__ = "wolf_bunke,maltesc" @@ -97,8 +82,7 @@ class egoBasic(object): """ def __init__(self, *args, **kwargs): - """ - """ + """ """ logger.info("Using scenario setting: {}".format(self.jsonpath)) @@ -110,15 +94,15 @@ def __init__(self, *args, **kwargs): # Database connection from json_file try: - conn = db.connection(section=self.json_file['eTraGo']['db']) + conn = db.connection(section=self.json_file["eTraGo"]["db"]) Session = sessionmaker(bind=conn) self.session = Session() - logger.info('Connected to Database') - except: - logger.error('Failed connection to Database', exc_info=True) + logger.info("Connected to Database") + except: # noqa: E722 + logger.error("Failed connection to Database", exc_info=True) # get scn_name - self.scn_name = self.json_file['eTraGo']['scn_name'] + self.scn_name = self.json_file["eTraGo"]["scn_name"] class eTraGoResults(egoBasic): @@ -134,307 +118,76 @@ class eTraGoResults(egoBasic): """ def __init__(self, *args, **kwargs): - """ - """ + """ """ super(eTraGoResults, self).__init__(self, *args, **kwargs) self.etrago = None - self._etrago_network = None - self._etrago_disaggregated_network = None - logger.info('eTraGo section started') + logger.info("eTraGo section started") - if self.json_file['eGo']['result_id'] != None: + if self.json_file["eGo"]["result_id"] is not None: # Delete arguments from scenario_setting - logger.info('Remove given eTraGo settings from scenario_setting') + logger.info("Remove given eTraGo settings from scenario_setting") try: - self.json_file['eGo']['eTraGo'] = False + self.json_file["eGo"]["eTraGo"] = False - for key in self.json_file['eTraGo'].keys(): + for key in self.json_file["eTraGo"].keys(): - self.json_file['eTraGo'][key] = 'removed by DB recover' + self.json_file["eTraGo"][key] = "removed by DB recover" # ToDo add scenario_setting for results - self.json_file['eTraGo']['db'] = self.json_file['eTraGo']['db'] - logger.info( - 'Add eTraGo scenario_setting from oedb result') + self.json_file["eTraGo"]["db"] = self.json_file["eTraGo"]["db"] + logger.info("Add eTraGo scenario_setting from oedb result") # To do .... - _prefix = 'EgoGridPfHvResult' - schema = 'model_draft' - packagename = 'egoio.db_tables' - _pkg = import_module(packagename + '.' + schema) + _prefix = "EgoGridPfHvResult" + schema = "model_draft" + packagename = "egoio.db_tables" + _pkg = import_module(packagename + "." + schema) # get metadata - orm_meta = getattr(_pkg, _prefix + 'Meta') - self.jsonpath = recover_resultsettings(self.session, - self.json_file, - orm_meta, - self.json_file['eGo'] - ['result_id']) + orm_meta = getattr(_pkg, _prefix + "Meta") + self.jsonpath = recover_resultsettings( + self.session, + self.json_file, + orm_meta, + self.json_file["eGo"]["result_id"], + ) # add etrago_disaggregated_network from DB logger.info( "Recovered eTraGo network uses kmeans: {}".format( - self.json_file['eTraGo']['network_clustering_kmeans'])) + self.json_file["eTraGo"]["network_clustering_kmeans"] + ) + ) except KeyError: pass - logger.info('Create eTraGo network from oedb result') - self._etrago_network = etrago_from_oedb( - self.session, self.json_file) + logger.info("Create eTraGo network from oedb result") + self._etrago_network = etrago_from_oedb(self.session, self.json_file) - if self.json_file['eTraGo']['disaggregation'] != False: + if self.json_file["eTraGo"]["disaggregation"] is not False: self._etrago_disaggregated_network = self._etrago_network else: - logger.warning('No disaggregated network found in DB') + logger.warning("No disaggregated network found in DB") self._etrago_disaggregated_network = None # create eTraGo NetworkScenario - if self.json_file['eGo']['eTraGo'] is True: - - if self.json_file['eGo'].get('csv_import_eTraGo') != False: - - logger.info('Caution, import disaggregation ' - 'data of former Cluster') - - # get pathway - pathway = self.json_file['eGo'].get('csv_import_eTraGo') - - try: - # create Network from csv - self._etrago_network = pypsa.Network() - self._etrago_network.import_from_csv_folder(pathway) - logger.info('Create eTraGo network from CSV result') - - # get disaggregation - self._etrago_disaggregated_network = pypsa.Network() - self._etrago_disaggregated_network.\ - import_from_csv_folder(pathway+'/disaggregated') - logger.info('Create eTraGo disaggregated network ' - 'from CSV result') - - except TypeError: - file_path = "disaggregated/network.csv" - fix_leading_separator(pathway+"/"+file_path) - - file_path = "network.csv" - fix_leading_separator(pathway+"/"+file_path) - - self._etrago_network = pypsa.Network() - self._etrago_network.import_from_csv_folder(pathway) - logger.info('Create eTraGo network from CSV result') - - # get disaggregation - self._etrago_disaggregated_network = pypsa.Network() - self._etrago_disaggregated_network.\ - import_from_csv_folder(pathway+'/disaggregated') - logger.info('Create eTraGo disaggregated network' - 'from CSV result') - - args_name = "args.json" - with open(pathway+'/'+args_name) as f: - etrago_args = json.load(f) - logger.info('Using argument file') - - if etrago_args.get('extendable') == ['network', 'storages']: - etrago_args.update( - {'extendable': ['network', 'storage']}) - logger.info( - 'Changed naming of storages to storage of args') - - if etrago_args.get('extendable') == ['storages']: - etrago_args.update({'extendable': ['storage']}) - logger.info( - 'Changed naming of storages to storage of args') - - for key in self.json_file['eTraGo'].keys(): - try: - self.json_file['eTraGo'][key] = etrago_args[key] - except KeyError: - pass + if self.json_file["eGo"]["eTraGo"] is True: + + if self.json_file["eGo"].get("csv_import_eTraGo") is not False: + + logger.info("Import eTraGo network from csv files") + + self.etrago = Etrago( + csv_folder_name=self.json_file["eGo"].get("csv_import_eTraGo") + ) else: - logger.info('Create eTraGo network calcualted by eGo') - - if self.json_file['eTraGo']['disaggregation'] != False: - - etrago_network, etrago_disaggregated_network = etrago( - self.json_file['eTraGo']) - - self._etrago_network = etrago_network - self._etrago_disaggregated_network = ( - etrago_disaggregated_network) - else: - logger.warning("Only one network is used.") - - etrago_network, etrago_disaggregated_network = etrago( - self.json_file['eTraGo']) - - self._etrago_network = etrago_network - self._etrago_disaggregated_network = ( - etrago_disaggregated_network) - - # Add selected results to results container - # ----------------------------------------- - - self.etrago = pd.DataFrame() - self.etrago.network = self._etrago_network - self.etrago.disaggregated_network = self._etrago_disaggregated_network - - # Add function - self.etrago.storage_investment_costs = etrago_storages_investment( - self.etrago.network, self.json_file, self.session) - self.etrago.storage_charges = etrago_storages(self.etrago.network) - - self.etrago.operating_costs = etrago_operating_costs( - self.etrago.network) - self.etrago.generator = create_etrago_results(self.etrago.network, - self.scn_name) - self.etrago.grid_investment_costs = \ - etrago_grid_investment(self.etrago.network, - self.json_file, self.session) - - # add functions direct - # self._etrago_network.etrago_line_loading = etrago_line_loading - self.etrago.plot_line_loading = self._line_loading - self.etrago.plot_stacked_gen = self._stacked_gen - self.etrago.plot_curtailment = self._curtailment - self.etrago.plot_gen_dist = self._gen_dist - self.etrago.plot_storage_distribution = self._storage_distribution - self.etrago.plot_line_loading_diff = self._line_loading_diff - self.etrago.plot_residual_load = self._residual_load - self.etrago.plot_voltage = self._voltage - self.etrago.plot_nodal_gen_dispatch = \ - self._nodal_gen_dispatch - self.etrago.plot_full_load_hours = self._full_load_hours - self.etrago.plot_q_flows = self._plot_q_flows - self.etrago.plot_max_load = self._max_load - self.etrago.plot_storage_expansion = self._storage_expansion - self.etrago.plot_nodal_production_balance = ( - self._nodal_production_balance) - self.etrago.plot_gen_dist_diff = self._gen_dist_diff - - if not 'READTHEDOCS' in os.environ: - # include eTraGo functions and methods - def _gen_dist_diff(self, **kwargs): - """ - Integrate and use function from eTraGo. - For more information see: - """ - - return gen_dist_diff(networkA=self.etrago.network, - **kwargs) - - def _nodal_production_balance(self, **kwargs): - """ - Integrate and use function from eTraGo. - For more information see: - """ - - return nodal_production_balance(network=self.etrago.network, - **kwargs) - - def _storage_expansion(self, **kwargs): - """ - Integrate and use function from eTraGo. - For more information see: - """ - - return storage_expansion(network=self.etrago.network, - **kwargs) - - def _max_load(self, **kwargs): - """ - Integrate and use function from eTraGo. - For more information see: - """ - - return max_load(network=self.etrago.network, - **kwargs) - - def _plot_q_flows(self): - """ - Integrate and use function from eTraGo. - For more information see: - """ - - return plot_q_flows(network=self.etrago.network) - - def _line_loading(self, **kwargs): - """ - Integrate and use function from eTraGo. - For more information see: - """ - # add if time_step <1 -> plot - return plot_line_loading(network=self.etrago.network, **kwargs) - - def _stacked_gen(self, **kwargs): - """ - Integrate function from eTraGo. - For more information see: - """ - return plot_stacked_gen(network=self.etrago.network, **kwargs) - - def _curtailment(self, **kwargs): - """ - Integrate function from eTraGo. - For more information see: - """ - return curtailment(network=self.etrago.network, **kwargs) - - def _gen_dist(self, **kwargs): - """ - Integrate function from eTraGo. - For more information see: - """ - return gen_dist(network=self.etrago.network, **kwargs) - - def _storage_distribution(self, scaling=1, **kwargs): - """ - Integrate function from eTraGo. - For more information see: - """ - return storage_distribution(network=self.etrago.network, - scaling=1, **kwargs) - - def _voltage(self, **kwargs): - """ - Integrate function from eTraGo. - For more information see: - """ - return plot_voltage(network=self.etrago.network, **kwargs) - - def _residual_load(self, **kwargs): - """ - Integrate function from eTraGo. - For more information see: - """ - return plot_residual_load(network=self.etrago.network, **kwargs) - - def _line_loading_diff(self, **kwargs): - """ - Integrate function from eTraGo. - For more information see: - """ - return plot_line_loading_diff(networkA=self.etrago.network, - **kwargs) - - def _nodal_gen_dispatch(self, **kwargs): - """ - Integrate function from eTraGo. - For more information see: - """ - return nodal_gen_dispatch(network=self.etrago.network, - **kwargs) - - def _full_load_hours(self, **kwargs): - """ - Integrate function from eTraGo. - For more information see: - """ - return full_load_hours(network=self.etrago.network, **kwargs) + logger.info("Create eTraGo network calcualted by eGo") + + run_etrago(args=self.json_file["eTraGo"], json_path=None) class eDisGoResults(eTraGoResults): @@ -446,15 +199,16 @@ class eDisGoResults(eTraGoResults): def __init__(self, *args, **kwargs): super(eDisGoResults, self).__init__(self, *args, **kwargs) - if self.json_file['eGo']['eDisGo'] is True: - logger.info('Create eDisGo network') + if self.json_file["eGo"]["eDisGo"] is True: + logger.info("Create eDisGo network") self._edisgo = EDisGoNetworks( json_file=self.json_file, - etrago_network=self.etrago.disaggregated_network) + etrago_network=self.etrago.disaggregated_network, + ) else: self._edisgo = None - logger.info('No eDisGo network') + logger.info("No eDisGo network") @property def edisgo(self): @@ -489,7 +243,7 @@ class eGo(eDisGoResults): def __init__(self, jsonpath, *args, **kwargs): self.jsonpath = jsonpath - super(eGo, self).__init__(self, *args, **kwargs) + super(eGo, self).__init__(self, *args, **kwargs) # add total results here self._total_investment_costs = None @@ -499,49 +253,49 @@ def __init__(self, jsonpath, *args, **kwargs): self._ehv_grid_costs = None self._mv_grid_costs = None - def _calculate_investment_cost( - self, - storage_mv_integration=True): - """ Get total investment costs of all voltage level for storages + def _calculate_investment_cost(self, storage_mv_integration=True): + """Get total investment costs of all voltage level for storages and grid expansion """ - self._total_inv_cost = pd.DataFrame(columns=['component', - 'voltage_level', - 'capital_cost' - ]) + self._total_inv_cost = pd.DataFrame( + columns=["component", "voltage_level", "capital_cost"] + ) _grid_ehv = None - if 'network' in self.json_file['eTraGo']['extendable']: + if "network" in self.json_file["eTraGo"]["extendable"]: _grid_ehv = self.etrago.grid_investment_costs - _grid_ehv['component'] = 'grid' + _grid_ehv["component"] = "grid" - self._total_inv_cost = self._total_inv_cost.\ - append(_grid_ehv, ignore_index=True) + self._total_inv_cost = self._total_inv_cost.append( + _grid_ehv, ignore_index=True + ) _storage = None - if 'storage' in self.json_file['eTraGo']['extendable']: + if "storage" in self.json_file["eTraGo"]["extendable"]: _storage = self.etrago.storage_investment_costs - _storage['component'] = 'storage' + _storage["component"] = "storage" - self._total_inv_cost = self._total_inv_cost.\ - append(_storage, ignore_index=True) + self._total_inv_cost = self._total_inv_cost.append( + _storage, ignore_index=True + ) _grid_mv_lv = None - if self.json_file['eGo']['eDisGo'] is True: + if self.json_file["eGo"]["eDisGo"] is True: _grid_mv_lv = self.edisgo.grid_investment_costs if _grid_mv_lv is not None: - _grid_mv_lv['component'] = 'grid' - _grid_mv_lv['differentiation'] = 'domestic' + _grid_mv_lv["component"] = "grid" + _grid_mv_lv["differentiation"] = "domestic" - self._total_inv_cost = self._total_inv_cost.\ - append(_grid_mv_lv, ignore_index=True) + self._total_inv_cost = self._total_inv_cost.append( + _grid_mv_lv, ignore_index=True + ) # add overnight costs self._total_investment_costs = self._total_inv_cost - self._total_investment_costs[ - 'overnight_costs'] = etrago_convert_overnight_cost( - self._total_investment_costs['capital_cost'], self.json_file) + self._total_investment_costs["overnight_costs"] = etrago_convert_overnight_cost( + self._total_investment_costs["capital_cost"], self.json_file + ) # Include MV storages into the _total_investment_costs dataframe if storage_mv_integration is True: @@ -549,11 +303,13 @@ def _calculate_investment_cost( self._integrate_mv_storage_investment() # sort values - self._total_investment_costs['voltage_level'] = pd.Categorical( - self._total_investment_costs['voltage_level'], ['ehv', 'hv', 'mv', - 'lv', 'mv/lv']) - self._total_investment_costs = ( - self._total_investment_costs.sort_values('voltage_level')) + self._total_investment_costs["voltage_level"] = pd.Categorical( + self._total_investment_costs["voltage_level"], + ["ehv", "hv", "mv", "lv", "mv/lv"], + ) + self._total_investment_costs = self._total_investment_costs.sort_values( + "voltage_level" + ) self._storage_costs = _storage self._ehv_grid_costs = _grid_ehv @@ -577,44 +333,39 @@ def _integrate_mv_storage_investment(self): if integrated_share > 0: ehv_stor_idx = costs_df.index[ - (costs_df['component'] == 'storage') - & (costs_df['voltage_level'] == 'ehv')][0] - - int_capital_costs = costs_df.loc[ehv_stor_idx][ - 'capital_cost' - ] * integrated_share - int_overnight_costs = costs_df.loc[ehv_stor_idx][ - 'overnight_costs' - ] * integrated_share - - costs_df.at[ - ehv_stor_idx, - 'capital_cost' - ] = ( - costs_df.loc[ehv_stor_idx]['capital_cost'] - - int_capital_costs) - - costs_df.at[ - ehv_stor_idx, - 'overnight_costs' - ] = ( - costs_df.loc[ehv_stor_idx]['overnight_costs'] - - int_overnight_costs) + (costs_df["component"] == "storage") + & (costs_df["voltage_level"] == "ehv") + ][0] + + int_capital_costs = ( + costs_df.loc[ehv_stor_idx]["capital_cost"] * integrated_share + ) + int_overnight_costs = ( + costs_df.loc[ehv_stor_idx]["overnight_costs"] * integrated_share + ) + + costs_df.at[ehv_stor_idx, "capital_cost"] = ( + costs_df.loc[ehv_stor_idx]["capital_cost"] - int_capital_costs + ) + + costs_df.at[ehv_stor_idx, "overnight_costs"] = ( + costs_df.loc[ehv_stor_idx]["overnight_costs"] - int_overnight_costs + ) new_storage_row = { - 'component': ['storage'], - 'voltage_level': ['mv'], - 'differentiation': ['domestic'], - 'capital_cost': [int_capital_costs], - 'overnight_costs': [int_overnight_costs]} + "component": ["storage"], + "voltage_level": ["mv"], + "differentiation": ["domestic"], + "capital_cost": [int_capital_costs], + "overnight_costs": [int_overnight_costs], + } new_storage_row = pd.DataFrame(new_storage_row) costs_df = costs_df.append(new_storage_row) self._total_investment_costs = costs_df - except: - logger.info( - 'Something went wrong with the MV storage distribution.') + except: # noqa: E722 + logger.info("Something went wrong with the MV storage distribution.") def _calculate_all_extended_storages(self): """ @@ -623,11 +374,12 @@ def _calculate_all_extended_storages(self): etrago_network = self._etrago_disaggregated_network stor_df = etrago_network.storage_units.loc[ - (etrago_network.storage_units['p_nom_extendable'] == True)] + (etrago_network.storage_units["p_nom_extendable"] is True) + ] - stor_df = stor_df[['bus', 'p_nom_opt']] + stor_df = stor_df[["bus", "p_nom_opt"]] - all_extended_storages = stor_df['p_nom_opt'].sum() + all_extended_storages = stor_df["p_nom_opt"].sum() return all_extended_storages @@ -639,46 +391,49 @@ def _calculate_mv_storage(self): min_extended = 0.3 stor_df = etrago_network.storage_units.loc[ - (etrago_network.storage_units['p_nom_extendable'] == True) - & (etrago_network.storage_units['p_nom_opt'] > min_extended) - & (etrago_network.storage_units['max_hours'] <= 20.)] + (etrago_network.storage_units["p_nom_extendable"] is True) + & (etrago_network.storage_units["p_nom_opt"] > min_extended) + & (etrago_network.storage_units["max_hours"] <= 20.0) + ] - stor_df = stor_df[['bus', 'p_nom_opt']] + stor_df = stor_df[["bus", "p_nom_opt"]] - integrated_storage = .0 # Storage integrated in MV grids + integrated_storage = 0.0 # Storage integrated in MV grids for idx, row in stor_df.iterrows(): - bus_id = row['bus'] - p_nom_opt = row['p_nom_opt'] - - mv_grid_id = self.edisgo.get_mv_grid_from_bus_id(bus_id) + mv_grid_id = row["bus"] + p_nom_opt = row["p_nom_opt"] if not mv_grid_id: continue - logger.info("Checking storage integration for MV grid {}".format( - mv_grid_id)) + logger.info( + "Checking storage integration for MV grid {}".format(mv_grid_id) + ) grid_choice = self.edisgo.grid_choice cluster = grid_choice.loc[ - [mv_grid_id in repr_grids for repr_grids in grid_choice[ - 'represented_grids']]] + [ + mv_grid_id in repr_grids + for repr_grids in grid_choice["represented_grids"] + ] + ] if len(cluster) == 0: continue else: - representative_grid = cluster[ - 'the_selected_network_id'].values[0] + representative_grid = cluster["the_selected_network_id"].values[0] - if hasattr(self.edisgo.network[representative_grid], 'network'): + if hasattr(self.edisgo.network[representative_grid], "network"): integration_df = self.edisgo.network[ - representative_grid].network.results.storages + representative_grid + ].network.results.storages - integrated_power = integration_df['nominal_power'].sum() / 1000 + integrated_power = integration_df["nominal_power"].sum() / 1000 else: - integrated_power = 0. + integrated_power = 0.0 if integrated_power > p_nom_opt: integrated_power = p_nom_opt @@ -715,25 +470,19 @@ def total_operation_costs(self): return self._total_operation_costs - def plot_total_investment_costs(self, - filename=None, - display=False, **kwargs): - """ Plot total investment costs - """ + def plot_total_investment_costs(self, filename=None, display=False, **kwargs): + """Plot total investment costs""" if filename is None: filename = "results/plot_total_investment_costs.pdf" display = True return plot_grid_storage_investment( - self._total_investment_costs, - filename=filename, - display=display, - **kwargs) + self._total_investment_costs, filename=filename, display=display, **kwargs + ) def plot_power_price(self, filename=None, display=False): - """ Plot power prices per carrier of calculation - """ + """Plot power prices per carrier of calculation""" if filename is None: filename = "results/plot_power_price.pdf" display = True @@ -741,45 +490,38 @@ def plot_power_price(self, filename=None, display=False): return power_price_plot(self, filename=filename, display=display) def plot_storage_usage(self, filename=None, display=False): - """ Plot storage usage by charge and discharge - """ + """Plot storage usage by charge and discharge""" if filename is None: filename = "results/plot_storage_usage.pdf" display = True return plot_storage_use(self, filename=filename, display=display) - def plot_edisgo_cluster(self, filename=None, display=False, - **kwargs): - """ Plot the Clustering of selected Dingo networks - """ + def plot_edisgo_cluster(self, filename=None, display=False, **kwargs): + """Plot the Clustering of selected Dingo networks""" if filename is None: filename = "results/plot_edisgo_cluster.pdf" display = True - return plot_edisgo_cluster(self, filename=filename, display=display, - **kwargs) + return plot_edisgo_cluster(self, filename=filename, display=display, **kwargs) def plot_line_expansion(self, **kwargs): - """Plot line expantion per line - """ + """Plot line expantion per line""" return plot_line_expansion(self, **kwargs) def plot_storage_expansion(self, **kwargs): - """Plot storage expantion per bus - """ + """Plot storage expantion per bus""" return plot_storage_expansion(self, **kwargs) @property def iplot(self): - """ Get iplot of results as html - """ + """Get iplot of results as html""" return igeoplot(self) # write_results_to_db(): - logging.info('Initialisation of eGo Results') + logging.info("Initialisation of eGo Results") def results_to_excel(ego): @@ -789,12 +531,12 @@ def results_to_excel(ego): # Write the results as xlsx file # ToDo add time of calculation to file name # add xlsxwriter to setup - writer = pd.ExcelWriter('open_ego_results.xlsx', engine='xlsxwriter') + writer = pd.ExcelWriter("open_ego_results.xlsx", engine="xlsxwriter") # write results of installed Capacity by fuels - ego.total_investment_costs.to_excel(writer, - index=False, - sheet_name='Total Calculation') + ego.total_investment_costs.to_excel( + writer, index=False, sheet_name="Total Calculation" + ) # Close the Pandas Excel writer and output the Excel file. writer.save() @@ -819,7 +561,7 @@ def etrago_from_oedb(session, json_file): """ - result_id = json_file['eGo']['result_id'] + result_id = json_file["eGo"]["result_id"] # functions def map_ormclass(name): @@ -830,7 +572,7 @@ def map_ormclass(name): _mapped[name] = getattr(_pkg, _prefix + name) except AttributeError: - logger.warning('Relation %s does not exist.' % name) + logger.warning("Relation %s does not exist." % name) return _mapped @@ -854,18 +596,16 @@ def dataframe_results(name, session, result_id, ormclass): query = session.query(ormclass).filter(ormclass.result_id == result_id) - if name == 'Transformer': - name = 'Trafo' + if name == "Transformer": + name = "Trafo" - df = pd.read_sql(query.statement, - session.bind, - index_col=name.lower() + '_id') + df = pd.read_sql(query.statement, session.bind, index_col=name.lower() + "_id") - if name == 'Link': - df['bus0'] = df.bus0.astype(int) - df['bus1'] = df.bus1.astype(int) + if name == "Link": + df["bus0"] = df.bus0.astype(int) + df["bus1"] = df.bus1.astype(int) - if 'source' in df: + if "source" in df: source_orm = Source @@ -873,7 +613,7 @@ def dataframe_results(name, session, result_id, ormclass): df.source = df.source.map(id_to_source(source_query)) - if str(ormclass)[:-2].endswith('T'): + if str(ormclass)[:-2].endswith("T"): df = pd.Dataframe() return df @@ -884,27 +624,23 @@ def series_results(name, column, session, result_id, ormclass): Parameters ---------- - session: : sqlalchemy: `sqlalchemy.orm.session.Session < orm/session_basics.html >` + session: : sqlalchemy: `sqlalchemy.orm.session.Session` SQLAlchemy session to the OEDB """ # TODO - check index of bus_t and soon is wrong! # TODO: pls make more robust - id_column = re.findall(r'[A-Z][^A-Z]*', name)[0] + '_' + 'id' + id_column = re.findall(r"[A-Z][^A-Z]*", name)[0] + "_" + "id" id_column = id_column.lower() query = session.query( - getattr(ormclass, id_column), - getattr(ormclass, column). - label(column)).filter(and_( - ormclass.result_id == result_id - )) + getattr(ormclass, id_column), getattr(ormclass, column).label(column) + ).filter(and_(ormclass.result_id == result_id)) - df = pd.io.sql.read_sql(query.statement, - session.bind, - columns=[column], - index_col=id_column) + df = pd.io.sql.read_sql( + query.statement, session.bind, columns=[column], index_col=id_column + ) df.index = df.index.astype(str) @@ -915,37 +651,37 @@ def series_results(name, column, session, result_id, ormclass): assert not df.empty df.index = timeindex except AssertionError: - logger.warning("No data for %s in column %s." % (name, column)) + logger.warning("No data for {} in column {}.".format(name, column)) return df # create config for results path = os.getcwd() # add meta_args with args of results - config = load_config_file(path+'/tools/config.json')['results'] + config = load_config_file(path + "/tools/config.json")["results"] # map and Database settings of etrago_from_oedb() - _prefix = 'EgoGridPfHvResult' - schema = 'model_draft' - packagename = 'egoio.db_tables' - _pkg = import_module(packagename + '.' + schema) - temp_ormclass = 'TempResolution' - carr_ormclass = 'Source' + _prefix = "EgoGridPfHvResult" + schema = "model_draft" + packagename = "egoio.db_tables" + _pkg = import_module(packagename + "." + schema) + temp_ormclass = "TempResolution" + carr_ormclass = "Source" _mapped = {} # get metadata - orm_meta = getattr(_pkg, _prefix + 'Meta') + orm_meta = getattr(_pkg, _prefix + "Meta") # check result_id - result_id_in = session.query( - orm_meta.result_id).filter(orm_meta. - result_id == result_id).all() + result_id_in = ( + session.query(orm_meta.result_id).filter(orm_meta.result_id == result_id).all() + ) if result_id_in: - logger.info('Choosen result_id %s found in DB', result_id) + logger.info("Choosen result_id %s found in DB", result_id) else: - logger.info('Error: result_id not found in DB') + logger.info("Error: result_id not found in DB") # get meta data as args meta_args = recover_resultsettings(session, json_file, orm_meta, result_id) @@ -953,15 +689,17 @@ def series_results(name, column, session, result_id, ormclass): # get TempResolution temp = TempResolution - tr = session.query(temp.temp_id, temp.timesteps, - temp.resolution, temp.start_time).one() + tr = session.query( + temp.temp_id, temp.timesteps, temp.resolution, temp.start_time + ).one() - timeindex = pd.DatetimeIndex(start=tr.start_time, - periods=tr.timesteps, - freq=tr.resolution) + timeindex = pd.DatetimeIndex( + start=tr.start_time, periods=tr.timesteps, freq=tr.resolution + ) - timeindex = timeindex[meta_args['eTraGo']['start_snapshot'] - - 1: meta_args['eTraGo']['end_snapshot']] + timeindex = timeindex[ + meta_args["eTraGo"]["start_snapshot"] - 1 : meta_args["eTraGo"]["end_snapshot"] + ] # create df for PyPSA network @@ -970,37 +708,42 @@ def series_results(name, column, session, result_id, ormclass): timevarying_override = False - if pypsa.__version__ == '0.11.0': - old_to_new_name = {'Generator': - {'p_min_pu_fixed': 'p_min_pu', - 'p_max_pu_fixed': 'p_max_pu', - 'source': 'carrier', - 'dispatch': 'former_dispatch'}, - 'Bus': - {'current_type': 'carrier'}, - 'Transformer': - {'trafo_id': 'transformer_id'}, - 'Storage': - {'p_min_pu_fixed': 'p_min_pu', - 'p_max_pu_fixed': 'p_max_pu', - 'soc_cyclic': 'cyclic_state_of_charge', - 'soc_initial': 'state_of_charge_initial', - 'source': 'carrier'}} + if pypsa.__version__ == "0.11.0": + old_to_new_name = { + "Generator": { + "p_min_pu_fixed": "p_min_pu", + "p_max_pu_fixed": "p_max_pu", + "source": "carrier", + "dispatch": "former_dispatch", + }, + "Bus": {"current_type": "carrier"}, + "Transformer": {"trafo_id": "transformer_id"}, + "Storage": { + "p_min_pu_fixed": "p_min_pu", + "p_max_pu_fixed": "p_max_pu", + "soc_cyclic": "cyclic_state_of_charge", + "soc_initial": "state_of_charge_initial", + "source": "carrier", + }, + } timevarying_override = True else: - old_to_new_name = {'Storage': - {'soc_cyclic': 'cyclic_state_of_charge', - 'soc_initial': 'state_of_charge_initial'}} + old_to_new_name = { + "Storage": { + "soc_cyclic": "cyclic_state_of_charge", + "soc_initial": "state_of_charge_initial", + } + } # get data into dataframes - logger.info('Start building eTraGo results network') + logger.info("Start building eTraGo results network") for comp, comp_t_dict in config.items(): orm_dict = map_ormclass(comp) - pypsa_comp_name = 'StorageUnit' if comp == 'Storage' else comp + pypsa_comp_name = "StorageUnit" if comp == "Storage" else comp ormclass = orm_dict[comp] if not comp_t_dict: @@ -1019,77 +762,78 @@ def series_results(name, column, session, result_id, ormclass): name = name[:-1] pypsa_comp_name = name - if name == 'Storage': - pypsa_comp_name = 'StorageUnit' - if name == 'Transformer': - name = 'Trafo' + if name == "Storage": + pypsa_comp_name = "StorageUnit" + if name == "Transformer": + name = "Trafo" for col in columns: - df_series = series_results( - name, col, session, result_id, ormclass) + df_series = series_results(name, col, session, result_id, ormclass) # TODO: VMagPuSet? - if timevarying_override and comp == 'Generator': - idx = df[df.former_dispatch == 'flexible'].index + if timevarying_override and comp == "Generator": + idx = df[df.former_dispatch == "flexible"].index idx = [i for i in idx if i in df_series.columns] df_series.drop(idx, axis=1, inplace=True) try: pypsa.io.import_series_from_dataframe( - network, - df_series, - pypsa_comp_name, - col) + network, df_series, pypsa_comp_name, col + ) except (ValueError, AttributeError): - logger.warning("Series %s of component %s could not be" - " imported" % (col, pypsa_comp_name)) + logger.warning( + "Series %s of component %s could not be" + " imported" % (col, pypsa_comp_name) + ) - logger.info('Imported eTraGo results of id = %s ', result_id) + logger.info("Imported eTraGo results of id = %s ", result_id) return network def recover_resultsettings(session, json_file, orm_meta, result_id): - """ Recover scenario_setting from database - """ + """Recover scenario_setting from database""" # check result_id - result_id_in = session.query( - orm_meta.result_id).filter(orm_meta. - result_id == result_id).all() + result_id_in = ( + session.query(orm_meta.result_id).filter(orm_meta.result_id == result_id).all() + ) # get meta data as json_file - meta = session.query(orm_meta.result_id, orm_meta.scn_name, - orm_meta.calc_date, - orm_meta.user_name, orm_meta.method, - orm_meta.start_snapshot, - orm_meta.end_snapshot, orm_meta.solver, - orm_meta.settings - ).filter(orm_meta.result_id == result_id) - - meta_df = pd.read_sql( - meta.statement, meta.session.bind, index_col='result_id') + meta = session.query( + orm_meta.result_id, + orm_meta.scn_name, + orm_meta.calc_date, + orm_meta.user_name, + orm_meta.method, + orm_meta.start_snapshot, + orm_meta.end_snapshot, + orm_meta.solver, + orm_meta.settings, + ).filter(orm_meta.result_id == result_id) + + meta_df = pd.read_sql(meta.statement, meta.session.bind, index_col="result_id") # update json_file with main data by result_id - json_file['eTraGo']['scn_name'] = meta_df.scn_name[result_id] - json_file['eTraGo']['method'] = meta_df.method[result_id] - json_file['eTraGo']['start_snapshot'] = meta_df.start_snapshot[result_id] - json_file['eTraGo']['end_snapshot'] = meta_df.end_snapshot[result_id] - json_file['eTraGo']['solver'] = meta_df.solver[result_id] + json_file["eTraGo"]["scn_name"] = meta_df.scn_name[result_id] + json_file["eTraGo"]["method"] = meta_df.method[result_id] + json_file["eTraGo"]["start_snapshot"] = meta_df.start_snapshot[result_id] + json_file["eTraGo"]["end_snapshot"] = meta_df.end_snapshot[result_id] + json_file["eTraGo"]["solver"] = meta_df.solver[result_id] # update json_file with specific data by result_id meta_set = dict(meta_df.settings[result_id]) - for key in json_file['eTraGo'].keys(): + for key in json_file["eTraGo"].keys(): try: - json_file['eTraGo'][key] = meta_set[key] + json_file["eTraGo"][key] = meta_set[key] except KeyError: pass return json_file -if __name__ == '__main__': +if __name__ == "__main__": pass diff --git a/ego/tools/mv_cluster.py b/ego/tools/mv_cluster.py deleted file mode 100644 index 0e8da824..00000000 --- a/ego/tools/mv_cluster.py +++ /dev/null @@ -1,287 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2016-2018 Europa-Universität Flensburg, -# Flensburg University of Applied Sciences, -# Centre for Sustainable Energy Systems -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU Affero General Public License as -# published by the Free Software Foundation; either version 3 of the -# License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Affero General Public License for more details. -# -# You should have received a copy of the GNU Affero General Public License -# along with this program. If not, see . - -# File description -""" -This file contains all functions regarding the clustering of MV grids -""" -__copyright__ = ("Flensburg University of Applied Sciences, " - "Europa-Universität Flensburg, " - "Centre for Sustainable Energy Systems") -__license__ = "GNU Affero General Public License Version 3 (AGPL-3.0)" -__author__ = "wolf_bunke, maltesc" - -# Import -#from __future__ import print_function -import os -import logging - -if not 'READTHEDOCS' in os.environ: - import pickle - - import pandas as pd - - from sklearn.cluster import KMeans - import numpy as np - -logger = logging.getLogger(__name__) - -def analyze_attributes(ding0_files): - """ - Calculates the attributes wind and solar capacity and farthest node - for all files in ding0_files. Results are written to ding0_files - - Parameters - ---------- - ding0_files : :obj:`str` - Path to ding0 files - - """ - base_path = ding0_files - - not_found = [] - tccs = [] # Total Cumulative Capacity of Solar - tccw = [] # Total Cumulative Capacity of Wind - fnlvmv = [] # the Farthest Node in both networks (lv and mv) - MV_id_list = [] # Distrct id list - - for district_number in list(range(1, 4000)): - - try: - pickle_name = 'ding0_grids__{}.pkl'.format( - district_number) - nd = pickle.load(open(os.path.join(base_path, pickle_name), 'rb')) - print('District no.', district_number, 'found!') - except: - not_found.append(district_number) - continue - - MV_id = 0 - MV_id = nd._mv_grid_districts[0].id_db - - mv_cum_solar_MV = 0 # Solar cumulative capacity in MV - mv_cum_wind_MV = 0 # Solar cumulative capacity in MV - - # cumulative capacity of solar and wind in MV - for geno in nd._mv_grid_districts[0].mv_grid.generators(): - if geno.type == 'solar': - mv_cum_solar_MV += geno.capacity - if geno.type == 'wind': - mv_cum_wind_MV += geno.capacity - - lvg = 0 - mv_cum_solar_LV = 0 - mv_cum_wind_LV = 0 - - # cumulative capacity of solar and wind in LV - for lvgs in nd._mv_grid_districts[0].lv_load_areas(): - for lvgs1 in lvgs.lv_grid_districts(): - lvg += len(list(lvgs1.lv_grid.generators())) - for deno in lvgs1.lv_grid.generators(): - if deno.type == 'solar': - mv_cum_solar_LV += deno.capacity - if deno.type == 'wind': - mv_cum_wind_LV += deno.capacity - - # Total solar cumulative capacity in lv and mv - total_cum_solar = mv_cum_solar_MV + mv_cum_solar_LV - # Total wind cumulative capacity in lv and mv - total_cum_wind = mv_cum_wind_MV + mv_cum_wind_LV - - # append to lists - tccs.append(total_cum_solar) - tccw.append(total_cum_wind) - - # The farthest node length from MV substation - from ding0.core.network.stations import LVStationDing0 - - tot_dist = [] - max_length = 0 - max_length_list = [] - max_of_max = 0 - - # make CB open (normal operation case) - nd.control_circuit_breakers(mode='open') - # setting the root to measure the path from - root_mv = nd._mv_grid_districts[0].mv_grid.station() - # 1st from MV substation to LV station node - # Iteration through nodes - for node2 in nd._mv_grid_districts[0].mv_grid._graph.nodes(): - # select only LV station nodes - if isinstance( - node2, - LVStationDing0) and not node2.lv_load_area.is_aggregated: - - length_from_MV_to_LV_station = 0 - # Distance from MV substation to LV station node - length_from_MV_to_LV_station = nd._mv_grid_districts[ - 0 - ].mv_grid.graph_path_length( - node_source=node2, node_target=root_mv) / 1000 - - # Iteration through lv load areas - for lvgs in nd._mv_grid_districts[0].lv_load_areas(): - for lvgs1 in lvgs.lv_grid_districts(): - if lvgs1.lv_grid._station == node2: - root_lv = node2 # setting a new root - for node1 in lvgs1.lv_grid._graph.nodes(): - - length_from_LV_staion_to_LV_node = 0 - - # Distance from LV station to LV nodes - length_from_LV_staion_to_LV_node = ( - lvgs1.lv_grid.graph_path_length( - node_source=node1, - node_target=root_lv) / 1000) - - length_from_LV_node_to_MV_substation = 0 - - # total distances in both grids MV and LV - length_from_LV_node_to_MV_substation = ( - length_from_MV_to_LV_station - + length_from_LV_staion_to_LV_node) - - # append the total distance to a list - tot_dist.append( - length_from_LV_node_to_MV_substation) - if any(tot_dist): - max_length = max(tot_dist) - - # append max lengths of all grids to a list - max_length_list.append(max_length) - if any(max_length_list): - # to pick up max of max - max_of_max = max(max_length_list) - - fnlvmv.append(max_of_max) # append to a new list - MV_id_list.append(MV_id) # append the network id to a new list - - # export results to dataframes - d = {'id': MV_id_list, 'Solar_cumulative_capacity': tccs, - 'Wind_cumulative_capacity': tccw, - 'The_Farthest_node': fnlvmv} # assign lists to columns - # not founded networks - are_not_found = {'District_files_that_are_not_found': not_found} - - df = pd.DataFrame(d) # dataframe for results - - # dataframe for not found files id - df_are_not_found = pd.DataFrame(are_not_found) - - # Exporting dataframe to CSV files - df.to_csv(base_path + '/' + 'attributes.csv', sep=',') - df_are_not_found.to_csv(base_path + '/' + 'Not_found_grids.csv', sep=',') - - -def cluster_mv_grids( - no_grids, - cluster_base): - """ - Clusters the MV grids based on the attributes, for a given number - of MV grids - - Parameters - ---------- - ding0_files : :obj:`str` - Path to ding0 files - no_grids : int - Desired number of clusters (of MV grids) - - Returns - ------- - :pandas:`pandas.DataFrame` - Dataframe containing the clustered MV grids and their weightings - - """ - cluster_base_pu = pd.DataFrame() - - for attribute in cluster_base: - attribute_max = cluster_base[attribute].max() - cluster_base_pu[attribute] = cluster_base[attribute] / attribute_max - - id_ = [] - m = [] - for idx, row in cluster_base_pu.iterrows(): - id_.append(idx) - f = [] - for attribute in row: - f.append(attribute) - - m.append(f) - - X = np.array(m) - - logger.info( - 'Used Clustering Attributes: \n {}'.format( - list(cluster_base.columns))) - - no_clusters = no_grids - - ran_state = 1808 - - # Starting KMeans clustering - kmeans = KMeans(n_clusters=no_clusters, random_state=ran_state) - - # Return a label for each point - cluster_labels = kmeans.fit_predict(X) - - # Centers of clusters - centroids = kmeans.cluster_centers_ - - id_clus_dist = {} - - # Iterate through each point in dataset array X - for i in range(len(X)): - clus = cluster_labels[i] # point's cluster id - cent = centroids[cluster_labels[i]] # Cluster's center coordinates - - # Distance from that point to cluster's center (3d coordinates) - dist = ( - (X[i][0] - centroids[clus][0]) ** 2 - + (X[i][1] - centroids[clus][1]) ** 2 - + (X[i][2] - centroids[clus][2]) ** 2) ** (1 / 2) - - id_clus_dist.setdefault(clus, []).append({id_[i]: dist}) - - cluster_df = pd.DataFrame( - columns=[ - 'no_of_points_per_cluster', - 'cluster_percentage', - 'the_selected_network_id', - 'represented_grids']) - cluster_df.index.name = 'cluster_id' - - for key, value in id_clus_dist.items(): - no_points_clus = sum(1 for v in value if v) - # percentage of points per cluster - clus_perc = (no_points_clus / len(X)) * 100 - - id_dist = {} - for value_1 in value: - id_dist.update(value_1) - - # returns the shortest distance point (selected network) - short_dist_net_id_dist = min(id_dist.items(), key=lambda x: x[1]) - - cluster_df.loc[key] = [ - no_points_clus, - round(clus_perc, 2), - short_dist_net_id_dist[0], - list(id_dist.keys())] - - return cluster_df diff --git a/ego/tools/plots.py b/ego/tools/plots.py index cb4018d5..2a4acdd4 100644 --- a/ego/tools/plots.py +++ b/ego/tools/plots.py @@ -21,54 +21,62 @@ eGo results. """ +import os + import numpy as np import pandas as pd -import os + geopandas = True -if not 'READTHEDOCS' in os.environ: - from etrago.tools.plot import (plot_line_loading, plot_stacked_gen, - add_coordinates, curtailment, gen_dist, - storage_distribution, - plot_voltage, plot_residual_load, coloring) - from ego.tools.economics import etrago_convert_overnight_cost - from ego.tools.utilities import open_oedb_session - from pypsa import Network as PyPSANetwork +if not "READTHEDOCS" in os.environ: + from math import log10, sqrt + import pyproj as proj - from math import sqrt, log10 - from shapely.geometry import Polygon, Point, MultiPolygon + from geoalchemy2 import * + from pypsa import Network as PyPSANetwork + from shapely.geometry import MultiPolygon, Point, Polygon + + from ego.tools.economics import etrago_convert_overnight_cost + from ego.tools.utilities import open_oedb_session + try: - import geopandas as gpd + import branca.colormap as cm import folium + import geopandas as gpd + from folium import plugins - from folium.plugins import FloatImage from folium.features import CustomIcon - import branca.colormap as cm + from folium.plugins import FloatImage except: geopandas = False import oedialect import webbrowser import subprocess from egoio.db_tables.model_draft import ( - EgoGridMvGriddistrict, RenpassGisParameterRegion) + EgoGridMvGriddistrict, + RenpassGisParameterRegion, + ) from egoio.db_tables.grid import EgoDpMvGriddistrict import matplotlib.pyplot as plt import matplotlib as mpl import matplotlib.colors as mcolors import logging -logger = logging.getLogger('ego') -__copyright__ = "Flensburg University of Applied Sciences, Europa-Universität"\ +logger = logging.getLogger("ego") + +__copyright__ = ( + "Flensburg University of Applied Sciences, Europa-Universität" "Flensburg, Centre for Sustainable Energy Systems" +) __license__ = "GNU Affero General Public License Version 3 (AGPL-3.0)" __author__ = "wolfbunke" # plot colore of Carriers def carriers_colore(): - """ Return matplotlib colore set per carrier (technologies of + """Return matplotlib colore set per carrier (technologies of generators) of eTraGo. Returns @@ -77,51 +85,54 @@ def carriers_colore(): List of carriers and matplotlib colores """ - colors = {'biomass': 'green', - 'coal': 'k', - 'gas': 'orange', - 'eeg_gas': 'olive', - 'geothermal': 'purple', - 'lignite': 'brown', - 'oil': 'darkgrey', - 'other_non_renewable': 'pink', - 'reservoir': 'navy', - 'run_of_river': 'aqua', - 'pumped_storage': 'steelblue', - 'solar': 'yellow', - 'uranium': 'lime', - 'waste': 'sienna', - 'wind': 'skyblue', - 'slack': 'pink', - 'load shedding': 'red', - 'nan': 'm', - 'imports': 'salmon', - '': 'm'} + colors = { + "biomass": "green", + "coal": "k", + "gas": "orange", + "eeg_gas": "olive", + "geothermal": "purple", + "lignite": "brown", + "oil": "darkgrey", + "other_non_renewable": "pink", + "reservoir": "navy", + "run_of_river": "aqua", + "pumped_storage": "steelblue", + "solar": "yellow", + "uranium": "lime", + "waste": "sienna", + "wind": "skyblue", + "slack": "pink", + "load shedding": "red", + "nan": "m", + "imports": "salmon", + "": "m", + } return colors def ego_colore(): - """ Get the four eGo colores + """Get the four eGo colores Returns ------- colors : :obj:`dict` List of eGo matplotlib hex colores """ - colors = {'egoblue1': '#1F567D', - 'egoblue2': '#84A2B8', - 'egoblue3': '#A3B9C9', - 'egoblue4': '#C7D5DE' - } + colors = { + "egoblue1": "#1F567D", + "egoblue2": "#84A2B8", + "egoblue3": "#A3B9C9", + "egoblue4": "#C7D5DE", + } return colors -def plot_storage_expansion(ego, filename=None, dpi=300, - column='overnight_costs', - scaling=1): - """ Plot line expantion +def plot_storage_expansion( + ego, filename=None, dpi=300, column="overnight_costs", scaling=1 +): + """Plot line expantion Parameters ---------- @@ -148,38 +159,45 @@ def plot_storage_expansion(ego, filename=None, dpi=300, json_file = ego.json_file # get storage values - if 'storage' in ego.json_file['eTraGo']['extendable']: - storage_inv = network.storage_units[network.storage_units. - capital_cost > 0.] - storage_inv['investment_costs'] = (storage_inv.capital_cost * - storage_inv.p_nom_opt) - storage_inv['overnight_costs'] = etrago_convert_overnight_cost( - storage_inv['investment_costs'], json_file) + if "storage" in ego.json_file["eTraGo"]["extendable"]: + storage_inv = network.storage_units[network.storage_units.capital_cost > 0.0] + storage_inv["investment_costs"] = ( + storage_inv.capital_cost * storage_inv.p_nom_opt + ) + storage_inv["overnight_costs"] = etrago_convert_overnight_cost( + storage_inv["investment_costs"], json_file + ) msd_max = storage_inv[column].max() msd_median = storage_inv[column].median() msd_min = storage_inv[column].min() - if (msd_max - msd_min) > 1.e+5: + if (msd_max - msd_min) > 1.0e5: if msd_max != 0: LabelVal = int(log10(msd_max)) else: LabelVal = 0 if LabelVal < 0: - LabelUnit = '€' - msd_max, msd_median, msd_min = msd_max * \ - 1000, msd_median * 1000, msd_min * 1000 + LabelUnit = "€" + msd_max, msd_median, msd_min = ( + msd_max * 1000, + msd_median * 1000, + msd_min * 1000, + ) storage_inv[column] = storage_inv[column] * 1000 elif LabelVal < 3: - LabelUnit = 'k €' + LabelUnit = "k €" else: - LabelUnit = 'M €' - msd_max, msd_median, msd_min = msd_max / \ - 1000, msd_median / 1000, msd_min / 1000 + LabelUnit = "M €" + msd_max, msd_median, msd_min = ( + msd_max / 1000, + msd_median / 1000, + msd_min / 1000, + ) storage_inv[column] = storage_inv[column] / 1000 else: - LabelUnit = '€' + LabelUnit = "€" # start plotting figsize = 6, 6 @@ -187,48 +205,53 @@ def plot_storage_expansion(ego, filename=None, dpi=300, bus_sizes = storage_inv[column] * scaling - if column == 'investment_costs': - title = 'Annualized Storage costs per timestep' - ltitel = 'Storage costs' - if column == 'overnight_costs': - title = 'Total Expansion Costs Overnight' - ltitel = 'Storage costs' - if column == 'p_nom_opt': - title = 'Storage Expansion in MVA' - ltitel = 'Storage size' - LabelUnit = 'kW' - if column not in ['investment_costs', 'overnight_costs', 'p_nom_opt']: - title = 'unknown' - ltitel = 'unknown' - LabelUnit = 'unknown' + if column == "investment_costs": + title = "Annualized Storage costs per timestep" + ltitel = "Storage costs" + if column == "overnight_costs": + title = "Total Expansion Costs Overnight" + ltitel = "Storage costs" + if column == "p_nom_opt": + title = "Storage Expansion in MVA" + ltitel = "Storage size" + LabelUnit = "kW" + if column not in ["investment_costs", "overnight_costs", "p_nom_opt"]: + title = "unknown" + ltitel = "unknown" + LabelUnit = "unknown" if sum(storage_inv[column]) == 0: - sc = network.plot(bus_sizes=0, - ax=ax, - title="No storage expantion") + sc = network.plot(bus_sizes=0, ax=ax, title="No storage expantion") else: sc = network.plot( bus_sizes=bus_sizes, - bus_colors='g', + bus_colors="g", # bus_cmap= # line_colors='gray', title=title, - line_widths=0.3 + line_widths=0.3, ) ax.set_alpha(0.4) # add legend for area in [msd_max, msd_median, msd_min]: - plt.scatter([], [], c='white', s=area * scaling, - label='= ' + str(round(area, 0)) + LabelUnit + ' ') + plt.scatter( + [], + [], + c="white", + s=area * scaling, + label="= " + str(round(area, 0)) + LabelUnit + " ", + ) - plt.legend(scatterpoints=1, - labelspacing=1, - title=ltitel, - loc='upper left', - shadow=True, - fontsize='x-large') + plt.legend( + scatterpoints=1, + labelspacing=1, + title=ltitel, + loc="upper left", + shadow=True, + fontsize="x-large", + ) ax.autoscale(tight=True) @@ -237,12 +260,12 @@ def plot_storage_expansion(ego, filename=None, dpi=300, else: fig = ax.get_figure() fig.set_size_inches(10, 8, forward=True) - fig.savefig(filename, dpi=dpi) + fig.savefig(filename, dpi=dpi) plt.close() -def plot_line_expansion(ego, filename=None, dpi=300, column='overnight_costs'): - """ Plot line expantion +def plot_line_expansion(ego, filename=None, dpi=300, column="overnight_costs"): + """Plot line expantion Parameters ---------- @@ -267,18 +290,21 @@ def plot_line_expansion(ego, filename=None, dpi=300, column='overnight_costs'): json_file = ego.json_file # get values - if 'network' in ego.json_file['eTraGo']['extendable']: - network.lines['s_nom_expansion'] = network.lines.s_nom_opt.subtract( - network.lines.s_nom, axis='index') - network.lines['investment_costs'] = network.lines.s_nom_expansion.\ - multiply(network.lines.capital_cost, axis='index') - network.lines['overnight_costs'] = etrago_convert_overnight_cost( - network.lines['investment_costs'], json_file) + if "network" in ego.json_file["eTraGo"]["extendable"]: + network.lines["s_nom_expansion"] = network.lines.s_nom_opt.subtract( + network.lines.s_nom, axis="index" + ) + network.lines["investment_costs"] = network.lines.s_nom_expansion.multiply( + network.lines.capital_cost, axis="index" + ) + network.lines["overnight_costs"] = etrago_convert_overnight_cost( + network.lines["investment_costs"], json_file + ) else: - network.lines['s_nom_expansion'] = None - network.lines['investment_costs'] = None - network.lines['overnight_costs'] = None + network.lines["s_nom_expansion"] = None + network.lines["investment_costs"] = None + network.lines["overnight_costs"] = None # start plotting figsize = 10, 8 @@ -286,40 +312,40 @@ def plot_line_expansion(ego, filename=None, dpi=300, column='overnight_costs'): cmap = plt.cm.jet - if column == 's_nom_expansion': + if column == "s_nom_expansion": line_value = network.lines[column] title = "Line expansion in MVA" - if column == 'overnight_costs': + if column == "overnight_costs": line_value = network.lines[column] title = "Total Expansion Costs in € per line" - if column == 'investment_costs': + if column == "investment_costs": line_value = network.lines[column] title = "Annualized Expansion Costs in € per line and time step" - line_widths = (line_value/line_value.max()) + line_widths = line_value / line_value.max() - lc = network.plot(ax=ax, line_colors=line_value, - line_cmap=cmap, - title=title, - line_widths=line_widths) + lc = network.plot( + ax=ax, + line_colors=line_value, + line_cmap=cmap, + title=title, + line_widths=line_widths, + ) - boundaries = [min(line_value), - max(line_value)] + boundaries = [min(line_value), max(line_value)] v = np.linspace(boundaries[0], boundaries[1], 101) print(v.dtype.name) # colorbar - cb = plt.colorbar(lc[1], boundaries=v, - ticks=v[0:101:10], - ax=ax) + cb = plt.colorbar(lc[1], boundaries=v, ticks=v[0:101:10], ax=ax) cb.set_clim(vmin=boundaries[0], vmax=boundaries[1]) - if column == 's_nom_expansion': - cb.set_label('Expansion in MVA per line') - if column == 'overnight_costs': - cb.set_label('Total Expansion Costs in € per line') - if column == 'investment_costs': - cb.set_label('Annualized Expansion Costs in € per line') + if column == "s_nom_expansion": + cb.set_label("Expansion in MVA per line") + if column == "overnight_costs": + cb.set_label("Total Expansion Costs in € per line") + if column == "investment_costs": + cb.set_label("Annualized Expansion Costs in € per line") ax.autoscale(tight=True) @@ -328,7 +354,7 @@ def plot_line_expansion(ego, filename=None, dpi=300, column='overnight_costs'): else: fig = ax.get_figure() fig.set_size_inches(10, 8, forward=True) - fig.savefig(filename, dpi=dpi) + fig.savefig(filename, dpi=dpi) plt.close() @@ -358,51 +384,44 @@ def plot_grid_storage_investment(costs_df, filename, display, var=None): bar_width = 0.35 opacity = 0.4 - if var == 'overnight_cost': - tic = costs_df[['component', - 'overnight_costs', - 'voltage_level', - 'differentiation']] - tic.set_index(['voltage_level', 'component', - 'differentiation'], inplace=True) - ax = tic.unstack().plot(kind='bar', - stacked=False, - - rot=0, - color=([colors.get(key) - for key in - ['egoblue1', - 'egoblue2', - 'egoblue4']]), - legend=False) + if var == "overnight_cost": + tic = costs_df[ + ["component", "overnight_costs", "voltage_level", "differentiation"] + ] + tic.set_index(["voltage_level", "component", "differentiation"], inplace=True) + ax = tic.unstack().plot( + kind="bar", + stacked=False, + rot=0, + color=([colors.get(key) for key in ["egoblue1", "egoblue2", "egoblue4"]]), + legend=False, + ) ax.set_ylabel("Overnight costs of simulation") - ax.set_title("Total costs of simulation, " - "voltage level and component", y=1.08) + ax.set_title( + "Total costs of simulation, " "voltage level and component", y=1.08 + ) else: - tic = costs_df[['component', - 'capital_cost', - 'voltage_level', - 'differentiation']] - tic.set_index(['voltage_level', 'component', - 'differentiation'], inplace=True) - ax = tic.unstack().plot(kind='bar', - rot=0, - stacked=False, - - color=([colors.get(key) - for key in - ['egoblue1', - 'egoblue2', - 'egoblue3']]), - legend=False) + tic = costs_df[ + ["component", "capital_cost", "voltage_level", "differentiation"] + ] + tic.set_index(["voltage_level", "component", "differentiation"], inplace=True) + ax = tic.unstack().plot( + kind="bar", + rot=0, + stacked=False, + color=([colors.get(key) for key in ["egoblue1", "egoblue2", "egoblue3"]]), + legend=False, + ) ax.set_ylabel("Annualized costs per simulation periods") - ax.set_title("Annualized costs per simulation periods, " - "voltage level and component", y=1.08) + ax.set_title( + "Annualized costs per simulation periods, " "voltage level and component", + y=1.08, + ) - ax.set_xlabel('Voltage level and component') + ax.set_xlabel("Voltage level and component") ax.set_yscale("symlog") - ax.legend(('cross-border', 'domestic', 'foreign')) + ax.legend(("cross-border", "domestic", "foreign")) ax.autoscale() if display is True: @@ -410,7 +429,7 @@ def plot_grid_storage_investment(costs_df, filename, display, var=None): else: fig = ax.get_figure() fig.set_size_inches(10, 8, forward=True) - fig.savefig(filename, dpi=100) + fig.savefig(filename, dpi=100) plt.close() @@ -433,29 +452,29 @@ def power_price_plot(ego, filename, display): https://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.show """ plt.rcdefaults() -# colors = ego_colore() + # colors = ego_colore() carrier_colors = coloring() fig, ax = plt.subplots() # plot power_price - prc = ego.etrago.generator['power_price'] + prc = ego.etrago.generator["power_price"] bar_width = 0.35 opacity = 0.4 - ind = np.arange(len(prc.index)) # the x locations for the groups - width = 0.35 # the width of the bars: can also be len(x) sequence + ind = np.arange(len(prc.index)) # the x locations for the groups + width = 0.35 # the width of the bars: can also be len(x) sequence plt_colors = [carrier_colors[carrier] for carrier in prc.index] -# plt_colors = colors['egoblue1'] + # plt_colors = colors['egoblue1'] - ax.barh(ind, prc, align='center', color=plt_colors) + ax.barh(ind, prc, align="center", color=plt_colors) ax.set_yticks(ind) ax.set_yticklabels(prc.index) ax.invert_yaxis() - ax.set_xlabel('Power price in €/MWh') - ax.set_title('Power Costs per Carrier') + ax.set_xlabel("Power price in €/MWh") + ax.set_title("Power Costs per Carrier") ax.autoscale(tight=True) @@ -464,7 +483,7 @@ def power_price_plot(ego, filename, display): else: fig = ax.get_figure() fig.set_size_inches(10, 8, forward=True) - fig.savefig(filename, dpi=100) + fig.savefig(filename, dpi=100) def plot_storage_use(ego, filename, display): @@ -486,18 +505,15 @@ def plot_storage_use(ego, filename, display): """ colors = ego_colore() - ax = ego.etrago.\ - storage_charges[['charge', 'discharge']].plot(kind='bar', - title="Storage usage", - stacked=True, - color=([colors.get(key) - for key in - ['egoblue1', - 'egoblue2']]), - figsize=( - 15, 10), - legend=True, - fontsize=12) + ax = ego.etrago.storage_charges[["charge", "discharge"]].plot( + kind="bar", + title="Storage usage", + stacked=True, + color=([colors.get(key) for key in ["egoblue1", "egoblue2"]]), + figsize=(15, 10), + legend=True, + fontsize=12, + ) ax.set_xlabel("Kind of Storage", fontsize=12) ax.set_ylabel("Charge and Discharge in MWh", fontsize=12) ax.autoscale(tight=False) @@ -508,7 +524,7 @@ def plot_storage_use(ego, filename, display): fig = ax.get_figure() fig.set_size_inches(10, 8, forward=True) fig.subplots_adjust(bottom=0.25) - fig.savefig(filename, dpi=100) + fig.savefig(filename, dpi=100) def get_country(session, region=None): @@ -530,35 +546,38 @@ def get_country(session, region=None): if region is None: # Define regions 'FR', - region = ['DE', 'DK', 'BE', 'LU', - 'NO', 'PL', 'CH', 'CZ', 'SE', 'NL'] + region = ["DE", "DK", "BE", "LU", "NO", "PL", "CH", "CZ", "SE", "NL"] else: region # get database tabel - query = session.query(RenpassGisParameterRegion.gid, - RenpassGisParameterRegion.stat_level, - RenpassGisParameterRegion.u_region_id, - RenpassGisParameterRegion.geom, - RenpassGisParameterRegion.geom_point) + query = session.query( + RenpassGisParameterRegion.gid, + RenpassGisParameterRegion.stat_level, + RenpassGisParameterRegion.u_region_id, + RenpassGisParameterRegion.geom, + RenpassGisParameterRegion.geom_point, + ) # get regions by query and filter - Regions = [(gid, u_region_id, stat_level, - shape.to_shape(geom), - shape.to_shape(geom_point)) for gid, u_region_id, stat_level, - geom, geom_point in query.filter( - RenpassGisParameterRegion.u_region_id. - in_(region)).all()] + Regions = [ + (gid, u_region_id, stat_level, shape.to_shape(geom), shape.to_shape(geom_point)) + for gid, u_region_id, stat_level, geom, geom_point in query.filter( + RenpassGisParameterRegion.u_region_id.in_(region) + ).all() + ] # define SRID - crs = {'init': 'epsg:4326'} + crs = {"init": "epsg:4326"} country = gpd.GeoDataFrame( - Regions, columns=['gid', 'stat_level', 'u_region_id', - 'geometry', 'point_geom'], crs=crs) + Regions, + columns=["gid", "stat_level", "u_region_id", "geometry", "point_geom"], + crs=crs, + ) return country def prepareGD(session, subst_id=None, version=None): - """ Get MV grid districts for plotting form oedb. + """Get MV grid districts for plotting form oedb. Parameters ---------- @@ -577,45 +596,66 @@ def prepareGD(session, subst_id=None, version=None): if version: - query = session.query(EgoDpMvGriddistrict.subst_id, - EgoDpMvGriddistrict.geom) + query = session.query(EgoDpMvGriddistrict.subst_id, EgoDpMvGriddistrict.geom) if isinstance(subst_id, list): - Regions = [(subst_id, shape.to_shape(geom)) for subst_id, geom in - query.filter(EgoDpMvGriddistrict.version == version, - EgoDpMvGriddistrict.subst_id.in_( - subst_id)).all()] + Regions = [ + (subst_id, shape.to_shape(geom)) + for subst_id, geom in query.filter( + EgoDpMvGriddistrict.version == version, + EgoDpMvGriddistrict.subst_id.in_(subst_id), + ).all() + ] elif subst_id == "all": - Regions = [(subst_id, shape.to_shape(geom)) for subst_id, geom in - query.filter(EgoDpMvGriddistrict.version == - version).all()] + Regions = [ + (subst_id, shape.to_shape(geom)) + for subst_id, geom in query.filter( + EgoDpMvGriddistrict.version == version + ).all() + ] else: # ToDo query doesn't looks stable - Regions = [(subst_id, shape.to_shape(geom)) for subst_id, geom in - query.filter(EgoDpMvGriddistrict.version == - version).all()] + Regions = [ + (subst_id, shape.to_shape(geom)) + for subst_id, geom in query.filter( + EgoDpMvGriddistrict.version == version + ).all() + ] # toDo add values of sub_id etc. to popup else: # from model_draft - query = session.query(EgoGridMvGriddistrict.subst_id, - EgoGridMvGriddistrict.geom) - Regions = [(subst_id, shape.to_shape(geom)) for subst_id, geom in - query.filter(EgoGridMvGriddistrict.subst_id.in_( - subst_id)).all()] - - crs = {'init': 'epsg:3035'} - region = gpd.GeoDataFrame( - Regions, columns=['subst_id', 'geometry'], crs=crs) - region = region.to_crs({'init': 'epsg:4326'}) + query = session.query( + EgoGridMvGriddistrict.subst_id, EgoGridMvGriddistrict.geom + ) + Regions = [ + (subst_id, shape.to_shape(geom)) + for subst_id, geom in query.filter( + EgoGridMvGriddistrict.subst_id.in_(subst_id) + ).all() + ] + + crs = {"init": "epsg:3035"} + region = gpd.GeoDataFrame(Regions, columns=["subst_id", "geometry"], crs=crs) + region = region.to_crs({"init": "epsg:4326"}) return region -def plot_edisgo_cluster(ego, filename, region=['DE'], display=False, dpi=150, - add_ehv_storage=False, grid_choice=None, title="", - cmap="jet", labelsize=10, fontsize=10): +def plot_edisgo_cluster( + ego, + filename, + region=["DE"], + display=False, + dpi=150, + add_ehv_storage=False, + grid_choice=None, + title="", + cmap="jet", + labelsize=10, + fontsize=10, +): """Plot the Clustering of selected Dingo networks Parameters @@ -645,47 +685,47 @@ def plot_edisgo_cluster(ego, filename, region=['DE'], display=False, dpi=150, """ session = ego.session - version = ego.json_file['eTraGo']['gridversion'] + version = ego.json_file["eTraGo"]["gridversion"] # get cluster if grid_choice: cluster = pd.read_csv(grid_choice, index_col=0) - cluster['represented_grids'] = cluster.apply( - lambda x: eval(x['represented_grids']), axis=1) + cluster["represented_grids"] = cluster.apply( + lambda x: eval(x["represented_grids"]), axis=1 + ) else: cluster = ego.edisgo.grid_choice - cluster = cluster.rename( - columns={"the_selected_network_id": "subst_id"}) + cluster = cluster.rename(columns={"the_selected_network_id": "subst_id"}) cluster_id = list(cluster.subst_id) # get country Polygon cnty = get_country(session, region=region) # get grid districts singel - if ego.json_file['eGo']['eDisGo'] is True: + if ego.json_file["eGo"]["eDisGo"] is True: gridcluster = prepareGD(session, cluster_id, version) - gridcluster = gridcluster.merge(cluster, on='subst_id') + gridcluster = gridcluster.merge(cluster, on="subst_id") # add percentage of grid representation - gridcluster['percentage'] = ((gridcluster.no_of_points_per_cluster / - gridcluster.no_of_points_per_cluster.sum())*100) - gridcluster['percentage'] = gridcluster['percentage'].astype( - float).round(2) + gridcluster["percentage"] = ( + gridcluster.no_of_points_per_cluster + / gridcluster.no_of_points_per_cluster.sum() + ) * 100 + gridcluster["percentage"] = gridcluster["percentage"].astype(float).round(2) # get represented grids - repre_grids = pd.DataFrame(columns=['subst_id', - 'geometry', - 'cluster_id', - 'style']) + repre_grids = pd.DataFrame( + columns=["subst_id", "geometry", "cluster_id", "style"] + ) for cluster in gridcluster.index: rep_id = gridcluster.represented_grids[cluster] # represented_grids repre_grid = prepareGD(session, rep_id, version) - repre_grid['cluster_id'] = gridcluster.subst_id[cluster] + repre_grid["cluster_id"] = gridcluster.subst_id[cluster] repre_grids = repre_grids.append(repre_grid, ignore_index=True) # add common SRID - crs = {'init': 'epsg:4326'} + crs = {"init": "epsg:4326"} repre_grids = gpd.GeoDataFrame(repre_grids, crs=crs) # get all MV grids @@ -696,29 +736,35 @@ def plot_edisgo_cluster(ego, filename, region=['DE'], display=False, dpi=150, figsize = 5, 5 fig, ax = plt.subplots(1, 1, figsize=(figsize)) - cnty.plot(ax=ax, color='white', - edgecolor='whitesmoke', alpha=0.5, linewidth=0.1) - mvgrids.plot(ax=ax, color='white', alpha=0.1, linewidth=0.1) + cnty.plot(ax=ax, color="white", edgecolor="whitesmoke", alpha=0.5, linewidth=0.1) + mvgrids.plot(ax=ax, color="white", alpha=0.1, linewidth=0.1) - if ego.json_file['eGo']['eDisGo'] is True: + if ego.json_file["eGo"]["eDisGo"] is True: - repre_grids.plot(ax=ax, column='cluster_id', - cmap=cmap, - edgecolor='whitesmoke', - linewidth=0.005, - alpha=1, - legend=False) + repre_grids.plot( + ax=ax, + column="cluster_id", + cmap=cmap, + edgecolor="whitesmoke", + linewidth=0.005, + alpha=1, + legend=False, + ) # subplot - gridcluster.plot(ax=ax, column='percentage', - cmap=cmap, - edgecolor='black', - linewidth=1, - legend=True) + gridcluster.plot( + ax=ax, + column="percentage", + cmap=cmap, + edgecolor="black", + linewidth=1, + legend=True, + ) # add storage distribution if add_ehv_storage: - _storage_distribution(ego.etrago.network, scaling=1, filename=None, - ax=ax, fig=fig) + _storage_distribution( + ego.etrago.network, scaling=1, filename=None, ax=ax, fig=fig + ) ax.set_title(title) # ax.legend(title="id of cluster representative") @@ -727,8 +773,7 @@ def plot_edisgo_cluster(ego, filename, region=['DE'], display=False, dpi=150, # cb = plt.colorbar(ax) # cb.ax.tick_params(labelsize=17) - ax.set_ylabel("weighting of MV grid cluster in %", - fontsize=fontsize, rotation=270) + ax.set_ylabel("weighting of MV grid cluster in %", fontsize=fontsize, rotation=270) ax.yaxis.set_label_coords(1.2, 0.5) ax.autoscale(tight=True) @@ -738,7 +783,7 @@ def plot_edisgo_cluster(ego, filename, region=['DE'], display=False, dpi=150, else: fig = ax.get_figure() fig.set_size_inches(10, 8, forward=True) - fig.savefig(filename, dpi=dpi) + fig.savefig(filename, dpi=dpi) plt.close() @@ -762,43 +807,43 @@ def igeoplot(ego, tiles=None, geoloc=None, save_image=False): ------- plot: html HTML file with .js plot - """ + """ network = ego.etrago.network session = open_oedb_session(ego) # get scenario name from args - scn_name = ego.json_file['eTraGo']['scn_name'] - version = ego.json_file['eTraGo']['gridversion'] + scn_name = ego.json_file["eTraGo"]["scn_name"] + version = ego.json_file["eTraGo"]["gridversion"] # define SRID - crs = {'init': 'epsg:4326'} + crs = {"init": "epsg:4326"} if geoloc is None: geoloc = [network.buses.y.mean(), network.buses.x.mean()] - mp = folium.Map(tiles=None, location=geoloc, - control_scale=True, zoom_start=6) + mp = folium.Map(tiles=None, location=geoloc, control_scale=True, zoom_start=6) # add Nasa light background - if tiles == 'Nasa': - tiles = ("https://map1.vis.earthdata.nasa.gov/wmts-webmerc/" + - "VIIRS_CityLights_2012/default/GoogleMapsCompatible_" + - "Level8/{z}/{y}/{x}.jpg") - attr = ('© OpenStreetMap contributors, © CartoDB') + if tiles == "Nasa": + tiles = ( + "https://map1.vis.earthdata.nasa.gov/wmts-webmerc/" + + "VIIRS_CityLights_2012/default/GoogleMapsCompatible_" + + "Level8/{z}/{y}/{x}.jpg" + ) + attr = '© OpenStreetMap contributors, © CartoDB' folium.raster_layers.TileLayer(tiles=tiles, attr=attr).add_to(mp) else: - attr = ('© OpenStreetMap contributors, © OpenEnergy-Platform') + attr = '© OpenStreetMap contributors, © OpenEnergy-Platform' - folium.raster_layers.TileLayer('OpenStreetMap', attr=attr).add_to(mp) + folium.raster_layers.TileLayer("OpenStreetMap", attr=attr).add_to(mp) # Legend name - bus_group = folium.FeatureGroup( - name='Bus information (ehv/hv)') # , show=True + bus_group = folium.FeatureGroup(name="Bus information (ehv/hv)") # , show=True # create icon - #url = 'https://raw.githubusercontent.com/openego/eGo/master/doc/images/{}'.format - #icon_image = url('trafo.png') + # url = 'https://raw.githubusercontent.com/openego/eGo/master/doc/images/{}'.format + # icon_image = url('trafo.png') # bus_icon = CustomIcon(icon_image, # icon_size=(27, 47)) @@ -818,36 +863,43 @@ def igeoplot(ego, tiles=None, geoloc=None, save_image=False): v_mag_pu_max: {}
sub_network: {}
Version: {}
- """.format(row.name, scn_name, row['carrier'], - row['control'], row['type'], row['v_nom'], - row['v_mag_pu_set'], - row['v_mag_pu_min'], row['v_mag_pu_max'], - row['sub_network'], version) + """.format( + row.name, + scn_name, + row["carrier"], + row["control"], + row["type"], + row["v_nom"], + row["v_mag_pu_set"], + row["v_mag_pu_min"], + row["v_mag_pu_max"], + row["sub_network"], + version, + ) # add Popup values use HTML for formating - folium.Marker([row["y"], row["x"]], popup=popup - ).add_to(bus_group) # icon=bus_icon + folium.Marker([row["y"], row["x"]], popup=popup).add_to( + bus_group + ) # icon=bus_icon - logger.info('Added Busses') + logger.info("Added Busses") def convert_to_hex(rgba_color): - """Convert rgba colors to hex - """ - red = str(hex(int(rgba_color[0]*255)))[2:].capitalize() - green = str(hex(int(rgba_color[1]*255)))[2:].capitalize() - blue = str(hex(int(rgba_color[2]*255)))[2:].capitalize() + """Convert rgba colors to hex""" + red = str(hex(int(rgba_color[0] * 255)))[2:].capitalize() + green = str(hex(int(rgba_color[1] * 255)))[2:].capitalize() + blue = str(hex(int(rgba_color[2] * 255)))[2:].capitalize() - if blue == '0': - blue = '00' - if red == '0': - red = '00' - if green == '0': - green = '00' + if blue == "0": + blue = "00" + if red == "0": + red = "00" + if green == "0": + green = "00" - return '#' + red + green + blue + return "#" + red + green + blue # Prepare lines - line_group = folium.FeatureGroup( - name='Line Loading (ehv/hv)') # , show=False + line_group = folium.FeatureGroup(name="Line Loading (ehv/hv)") # , show=False # get line Coordinates x0 = network.lines.bus0.map(network.buses.x) @@ -861,54 +913,60 @@ def convert_to_hex(rgba_color): cols = list(network.lines.columns) # color map lines - colormap = cm.linear.YlOrRd_09.scale( - lines.s_nom.min(), lines.s_nom.max()).to_step(6) + colormap = cm.linear.YlOrRd_09.scale(lines.s_nom.min(), lines.s_nom.max()).to_step( + 6 + ) # add parameter for line in network.lines.index: popup = """ Line: {}
- version: {}
""".format(line, version) + version: {}
""".format( + line, version + ) for col in cols: popup += """ {}: {}
""".format(col, lines[col][line]) # change colore function - l_color = colormapper_lines( - colormap, lines, line, column="s_nom") + l_color = colormapper_lines(colormap, lines, line, column="s_nom") # ToDo make it more generic - folium.PolyLine(([y0[line], x0[line]], [y1[line], x1[line]]), - popup=popup, color=convert_to_hex(l_color)).\ - add_to(line_group) + folium.PolyLine( + ([y0[line], x0[line]], [y1[line], x1[line]]), + popup=popup, + color=convert_to_hex(l_color), + ).add_to(line_group) # Add results # add expansion costs per line lines = network.lines - if 'network' in ego.json_file['eTraGo']['extendable']: - lines['s_nom_expansion'] = lines.s_nom_opt.subtract( - lines.s_nom, axis='index') - lines['annuity'] = lines.s_nom_expansion.multiply( - lines.capital_cost, axis='index') - lines['overnight_cost'] = etrago_convert_overnight_cost( - lines['annuity'], - ego.json_file, t=40, p=0.05) - lines['overnight_cost'] = lines['overnight_cost'].astype(float).round(0) + if "network" in ego.json_file["eTraGo"]["extendable"]: + lines["s_nom_expansion"] = lines.s_nom_opt.subtract(lines.s_nom, axis="index") + lines["annuity"] = lines.s_nom_expansion.multiply( + lines.capital_cost, axis="index" + ) + lines["overnight_cost"] = etrago_convert_overnight_cost( + lines["annuity"], ego.json_file, t=40, p=0.05 + ) + lines["overnight_cost"] = lines["overnight_cost"].astype(float).round(0) else: - lines['s_nom_expansion'] = 0. - lines['annuity'] = 0. - lines['overnight_cost'] = 0. + lines["s_nom_expansion"] = 0.0 + lines["annuity"] = 0.0 + lines["overnight_cost"] = 0.0 # Prepare lines line_results_group = folium.FeatureGroup( - name='Line costs by annuity costs (ehv/hv)') + name="Line costs by annuity costs (ehv/hv)" + ) # color map lines colormap2 = cm.linear.YlGn_09.scale( - lines.annuity.min(), lines.annuity.max()).to_step(4) + lines.annuity.min(), lines.annuity.max() + ).to_step(4) # add parameter cols = list(ego.etrago.network.lines.columns) - res = ('overnight_cost', 's_nom_expansion', 'annuity') - unit = ('EUR', 'MVA', 'EUR') + res = ("overnight_cost", "s_nom_expansion", "annuity") + unit = ("EUR", "MVA", "EUR") cols = [x for x in cols if x not in res] for line in network.lines.index: @@ -916,7 +974,9 @@ def convert_to_hex(rgba_color): popup = """ Line: {}
version: {}

- Line parameter:
""".format(line, version) + Line parameter:
""".format( + line, version + ) for col in cols: popup += """ {}: {}
""".format(col, lines[col][line]) @@ -924,39 +984,49 @@ def convert_to_hex(rgba_color): popup += """
Results:
""" for idx, val in enumerate(res): - popup += """{}: {:,} in {}
""".format(val, - lines[val][line], - unit[idx]) + popup += """{}: {:,} in {}
""".format(val, lines[val][line], unit[idx]) # change colore function - lr_color = colormapper_lines( - colormap2, lines, line, column="annuity") + lr_color = colormapper_lines(colormap2, lines, line, column="annuity") # ToDo make it more generic - folium.PolyLine(([y0[line], x0[line]], [y1[line], x1[line]]), - popup=popup, - color=convert_to_hex(lr_color) - ).add_to(line_results_group) + folium.PolyLine( + ([y0[line], x0[line]], [y1[line], x1[line]]), + popup=popup, + color=convert_to_hex(lr_color), + ).add_to(line_results_group) - logger.info('Added Lines') + logger.info("Added Lines") # Create ehv/hv storage expantion plot - store_group = folium.FeatureGroup( - name='Storage expantion (ehv/hv)') # , show=True + store_group = folium.FeatureGroup(name="Storage expantion (ehv/hv)") # , show=True - stores = network.storage_units[network.storage_units.carrier == - 'extendable_storage'] + stores = network.storage_units[ + network.storage_units.carrier == "extendable_storage" + ] # differentiation of storage units batteries = stores[stores.max_hours == 6] hydrogen = stores[stores.max_hours == 168] # sum by type and bus - storage_distribution = network.storage_units.p_nom_opt[stores.index].groupby( - network.storage_units.bus).sum().reindex(network.buses.index, fill_value=0.) - battery_distribution = network.storage_units.p_nom_opt[batteries.index].groupby( - network.storage_units.bus).sum().reindex(network.buses.index, fill_value=0.) - hydrogen_distribution = network.storage_units.p_nom_opt[hydrogen.index].groupby( - network.storage_units.bus).sum().reindex(network.buses.index, fill_value=0.) + storage_distribution = ( + network.storage_units.p_nom_opt[stores.index] + .groupby(network.storage_units.bus) + .sum() + .reindex(network.buses.index, fill_value=0.0) + ) + battery_distribution = ( + network.storage_units.p_nom_opt[batteries.index] + .groupby(network.storage_units.bus) + .sum() + .reindex(network.buses.index, fill_value=0.0) + ) + hydrogen_distribution = ( + network.storage_units.p_nom_opt[hydrogen.index] + .groupby(network.storage_units.bus) + .sum() + .reindex(network.buses.index, fill_value=0.0) + ) # add Coordinates sto_x = stores.bus.map(network.buses.x) @@ -970,44 +1040,49 @@ def convert_to_hex(rgba_color): popup = """ Storage: {}
version: {}

- Parameter:
""".format(store, version) + Parameter:
""".format( + store, version + ) for col in cols: popup += """ {}: {}
""".format(col, stores[col][store]) # get storage radius by p_nom_opt (MW) if lager as 1 KW - if ((stores['p_nom_opt'][store] > 7.4e-04) & - (stores['capital_cost'][store] > 10)): + if (stores["p_nom_opt"][store] > 7.4e-04) & ( + stores["capital_cost"][store] > 10 + ): - radius = (3**(1+stores['p_nom_opt'][store]/sto_max)) + radius = 3 ** (1 + stores["p_nom_opt"][store] / sto_max) # add singel storage folium.CircleMarker( location=([sto_y[store], sto_x[store]]), radius=radius, popup=popup, - color='#3186cc', + color="#3186cc", fill=True, - fill_color='#3186cc', - weight=1).add_to(store_group) + fill_color="#3186cc", + weight=1, + ).add_to(store_group) - logger.info('Added storages') + logger.info("Added storages") ###################### # add MV line loading # add grid districs - if ego.json_file['eGo']['eDisGo'] is True: + if ego.json_file["eGo"]["eDisGo"] is True: grid_group = folium.FeatureGroup( - name='Represented MV Grid district') # , show=False + name="Represented MV Grid district" + ) # , show=False subst_id = list(ego.edisgo.grid_choice.the_selected_network_id) district = prepareGD(session, subst_id, version) # Add for loop - crs = {'init': 'epsg:4326'} + crs = {"init": "epsg:4326"} for name, row in district.iterrows(): - mv_grid_id = row['subst_id'] + mv_grid_id = row["subst_id"] if not isinstance(ego.edisgo.network[mv_grid_id], str): lv, mv = _get_mv_plot_res(ego, mv_grid_id) @@ -1017,50 +1092,57 @@ def convert_to_hex(rgba_color): pop = """Grid district: {}

MV results:
- """.format(row['subst_id']) + """.format( + row["subst_id"] + ) for idxs in mv.index: pop += """ {} : {} €
- """.format(idxs, mv[0][idxs].astype( - float).round(2)) + """.format( + idxs, mv[0][idxs].astype(float).round(2) + ) pop += """LV results:
""" for idxs in lv.index: pop += """ {} : {} €
- """.format(idxs, lv[0][idxs].astype( - float).round(2)) + """.format( + idxs, lv[0][idxs].astype(float).round(2) + ) else: pop = """Grid district: {}

- """.format(row['subst_id']) + """.format( + row["subst_id"] + ) # folium.GeoJson(row['geometry']).add_to( # grid_group).add_child(folium.Popup(pop)) - geojson = folium.GeoJson(row['geometry']) + geojson = folium.GeoJson(row["geometry"]) popup = folium.Popup(pop) popup.add_to(geojson) geojson.add_to(grid_group) # Add cluster grids repgrid_group = folium.FeatureGroup( - name='Represented MV Grids per Cluster') # , show=False + name="Represented MV Grids per Cluster" + ) # , show=False cluster = ego.edisgo.grid_choice - cluster = cluster.rename( - columns={"the_selected_network_id": "subst_id"}) + cluster = cluster.rename(columns={"the_selected_network_id": "subst_id"}) - repre_grids = pd.DataFrame(columns=['subst_id', - 'geometry', - 'cluster_id', - 'color']) + repre_grids = pd.DataFrame( + columns=["subst_id", "geometry", "cluster_id", "color"] + ) - style_function = (lambda x: { - 'fillColor': x['properties']['color'], - 'weight': 0.5, 'color': 'black'}) + style_function = lambda x: { + "fillColor": x["properties"]["color"], + "weight": 0.5, + "color": "black", + } # simplify MultiPolygon tolerance = 0.002 @@ -1069,22 +1151,24 @@ def convert_to_hex(rgba_color): cluster_id = list(cluster.represented_grids[idx]) # represented_grids repre_grid = prepareGD(session, cluster_id, version) - repre_grid['cluster_id'] = cluster.subst_id[idx] + repre_grid["cluster_id"] = cluster.subst_id[idx] repre_grids = repre_grids.append(repre_grid, ignore_index=True) # prepare cluster colore - normal = mpl.colors.Normalize(vmin=repre_grids.cluster_id.min(), - vmax=repre_grids.cluster_id.max(), - clip=True) + normal = mpl.colors.Normalize( + vmin=repre_grids.cluster_id.min(), + vmax=repre_grids.cluster_id.max(), + clip=True, + ) mapper = plt.cm.ScalarMappable(norm=normal, cmap=plt.cm.viridis) # add colors to column - repre_grids['color'] = repre_grids['cluster_id'].apply( - lambda x: mcolors.to_hex(mapper.to_rgba(x))) + repre_grids["color"] = repre_grids["cluster_id"].apply( + lambda x: mcolors.to_hex(mapper.to_rgba(x)) + ) - repre_grids = gpd.GeoDataFrame( - repre_grids, geometry='geometry', crs=crs) + repre_grids = gpd.GeoDataFrame(repre_grids, geometry="geometry", crs=crs) # simplify Polygon geometry repre_grids.geometry = repre_grids.geometry.simplify(tolerance) @@ -1092,20 +1176,18 @@ def convert_to_hex(rgba_color): # add popup for name, row in repre_grids.iterrows(): - pops = """Represented Grid: {}
""".format( - row['cluster_id']) + pops = """Represented Grid: {}
""".format(row["cluster_id"]) - folium.GeoJson(repre_grids[name:name+1], - style_function=style_function, - name='represented grids' - ).add_to(repgrid_group - ).add_child(folium.Popup(pops)) + folium.GeoJson( + repre_grids[name : name + 1], + style_function=style_function, + name="represented grids", + ).add_to(repgrid_group).add_child(folium.Popup(pops)) - logger.info('Added MV Grids') + logger.info("Added MV Grids") # Prepare MV lines - mv_line_group = folium.FeatureGroup( - name='MV Grids (>=10kV)') # show=False + mv_line_group = folium.FeatureGroup(name="MV Grids (>=10kV)") # show=False mv_list = ego.edisgo.grid_choice.the_selected_network_id @@ -1119,66 +1201,73 @@ def convert_to_hex(rgba_color): # get line Coordinates x0 = mv_network.lines.bus0.loc[mv_network.lines.v_nom >= 10].map( - mv_network.buses.x) + mv_network.buses.x + ) x1 = mv_network.lines.bus1.loc[mv_network.lines.v_nom >= 10].map( - mv_network.buses.x) + mv_network.buses.x + ) y0 = mv_network.lines.bus0.loc[mv_network.lines.v_nom >= 10].map( - mv_network.buses.y) + mv_network.buses.y + ) y1 = mv_network.lines.bus1.loc[mv_network.lines.v_nom >= 10].map( - mv_network.buses.y) + mv_network.buses.y + ) # get content grid_expansion_costs = ego.edisgo.network[ - mv_grid_id].network.results.grid_expansion_costs - lines = pd.concat([mv_network.lines, - grid_expansion_costs], - axis=1, - join_axes=[mv_network.lines.index]) + mv_grid_id + ].network.results.grid_expansion_costs + lines = pd.concat( + [mv_network.lines, grid_expansion_costs], + axis=1, + join_axes=[mv_network.lines.index], + ) lines = lines.loc[mv_network.lines.v_nom >= 10] lines = lines.reindex() cols = list(lines.columns) - res_mv = ('overnight_costs', 'capital_cost') - unit = ('EUR', 'EUR/time step') + res_mv = ("overnight_costs", "capital_cost") + unit = ("EUR", "EUR/time step") cols = [x for x in cols if x not in res_mv] # save results as csv csv_print = False if csv_print == True: - geo_lines2 = pd.concat([y0, x0, y1, x1], - axis=1, - join_axes=[y0.index]) + geo_lines2 = pd.concat( + [y0, x0, y1, x1], axis=1, join_axes=[y0.index] + ) - line_export = pd.concat([lines, geo_lines2], - axis=1, - join_axes=[lines.index]) + line_export = pd.concat( + [lines, geo_lines2], axis=1, join_axes=[lines.index] + ) - line_export.to_csv("results/mv_line_results_" + - str(mv_grid_id)+".csv") + line_export.to_csv( + "results/mv_line_results_" + str(mv_grid_id) + ".csv" + ) # color map lines try: mv_colormap = cm.linear.YlGnBu_09.scale( - lines.overnight_costs.min(), - lines.overnight_costs.max()).to_step(6) + lines.overnight_costs.min(), lines.overnight_costs.max() + ).to_step(6) except: - mv_colormap = cm.linear.YlGnBu_09.scale( - 0, 0).to_step(6) + mv_colormap = cm.linear.YlGnBu_09.scale(0, 0).to_step(6) - mv_colormap.caption = 'Line investment of overnight cost (mv)' + mv_colormap.caption = "Line investment of overnight cost (mv)" # add parameter for line in lines.index: popup = """ Line: {}
- version: {}

""".format(line, version) + version: {}

""".format( + line, version + ) popup += """MV line parameter:
""" for col in cols: try: - popup += """ {}: {}
""".format(col, - lines[col][line]) + popup += """ {}: {}
""".format(col, lines[col][line]) except: popup += """ """ @@ -1186,34 +1275,34 @@ def convert_to_hex(rgba_color): for idx, val in enumerate(res_mv): try: - popup += """{}: {} in {}
""".format(val, - lines[val][line], - unit[idx]) + popup += """{}: {} in {}
""".format( + val, lines[val][line], unit[idx] + ) except: popup += """ """ # change colore function mv_color = colormapper_lines( - mv_colormap, lines, line, column="overnight_costs") + mv_colormap, lines, line, column="overnight_costs" + ) # ToDo make it more generic try: - folium.PolyLine(([y0[line], x0[line]], - [y1[line], x1[line]]), - popup=popup, color=convert_to_hex( - mv_color) + folium.PolyLine( + ([y0[line], x0[line]], [y1[line], x1[line]]), + popup=popup, + color=convert_to_hex(mv_color), ).add_to(mv_line_group) except: logger.disabled = True - logger.info('Cound not find a geometry') + logger.info("Cound not find a geometry") logger.disabled = False else: - logger.info(str(mv_grid_id)+" " + - str(ego.edisgo.network[mv_grid_id])) + logger.info(str(mv_grid_id) + " " + str(ego.edisgo.network[mv_grid_id])) mp.add_child(mv_colormap) # Add MV Storage # Legend name - mv_sto_group = folium.FeatureGroup(name='MV storages') # ,show=False + mv_sto_group = folium.FeatureGroup(name="MV storages") # ,show=False # add mv storages mv_grid_id = list(ego.edisgo.grid_choice.the_selected_network_id) @@ -1225,7 +1314,9 @@ def convert_to_hex(rgba_color): # create pypsa network only containing MV buses and lines pypsa_plot = PyPSANetwork() - pypsa_plot.buses = pypsa_network.buses.loc[pypsa_network.buses.v_nom >= 10] + pypsa_plot.buses = pypsa_network.buses.loc[ + pypsa_network.buses.v_nom >= 10 + ] # add Coordinates sto_x = pypsa_plot.storage_units.bus.map(pypsa_plot.buses.x) @@ -1238,32 +1329,36 @@ def convert_to_hex(rgba_color): for store in pypsa_plot.storage_units.index: popup = """ Storage: {}

- Parameter:
""".format(store,) + Parameter:
""".format( + store, + ) for col in sto_cols: popup += """ {}: {}
- """.format(col, - pypsa_plot.storage_units[col][store]) + """.format( + col, pypsa_plot.storage_units[col][store] + ) folium.CircleMarker( location=([sto_y[store], sto_x[store]]), - radius=pypsa_plot.storage_units['p_nom'], + radius=pypsa_plot.storage_units["p_nom"], popup=popup, - color='#3186cc', + color="#3186cc", fill=True, - fill_color='#3186cc', - weight=1).add_to(mv_sto_group) + fill_color="#3186cc", + weight=1, + ).add_to(mv_sto_group) - logger.info('Added MV stores') + logger.info("Added MV stores") # add layers and others - colormap.caption = 'Line loading s_nom (ehv/hv)' - colormap2.caption = 'Line investment of annuity costs (ehv/hv)' + colormap.caption = "Line loading s_nom (ehv/hv)" + colormap2.caption = "Line investment of annuity costs (ehv/hv)" mp.add_child(colormap) mp.add_child(colormap2) # add legend # add layer groups - if ego.json_file['eGo']['eDisGo'] is True: + if ego.json_file["eGo"]["eDisGo"] is True: repgrid_group.add_to(mp) grid_group.add_to(mp) @@ -1278,22 +1373,22 @@ def convert_to_hex(rgba_color): folium.LayerControl().add_to(mp) plugins.Fullscreen( - position='topright', - title='Fullscreen', - title_cancel='Exit me', - force_separate_button=True).add_to(mp) + position="topright", + title="Fullscreen", + title_cancel="Exit me", + force_separate_button=True, + ).add_to(mp) - url = ('https://openego.readthedocs.io/en/master/_images/open_ego_icon_web.png') + url = "https://openego.readthedocs.io/en/master/_images/open_ego_icon_web.png" FloatImage(url, bottom=0, left=5).add_to(mp) - if ego.json_file['eGo']['eDisGo'] is True: - mp = iplot_griddistrict_legend( - mp=mp, repre_grids=repre_grids, start=True) + if ego.json_file["eGo"]["eDisGo"] is True: + mp = iplot_griddistrict_legend(mp=mp, repre_grids=repre_grids, start=True) mp = iplot_totalresults_legend(mp=mp, ego=ego, start=True) # Save Map - html_dir = 'results/html' + html_dir = "results/html" if not os.path.exists(html_dir): os.makedirs(html_dir) mp.save("results/html/iplot_map.html") @@ -1309,16 +1404,16 @@ def convert_to_hex(rgba_color): if save_image: url2 = "file://{}/{}".format(os.getcwd(), url) outfn = os.path.join(html_dir, "outfig.png") - subprocess.check_call(["cutycapt", "--url={}".format(url2), - "--out={}".format(outfn)]) + subprocess.check_call( + ["cutycapt", "--url={}".format(url2), "--out={}".format(outfn)] + ) # close oedb session.close() - logger.info('Done') + logger.info("Done") def colormapper_lines(colormap, lines, line, column="s_nom"): - """ Make Colore Map for lines. - """ + """Make Colore Map for lines.""" # TODO: make it more generic l_color = [] @@ -1336,7 +1431,7 @@ def colormapper_lines(colormap, lines, line, column="s_nom"): elif colormap.index[1] >= lines[column][line] >= colormap.index[0]: l_color = colormap.colors[0] else: - l_color = (0., 0., 0., 1.) + l_color = (0.0, 0.0, 0.0, 1.0) if len(colormap.index) == 5: if colormap.index[4] >= lines[column][line] > colormap.index[3]: @@ -1348,7 +1443,7 @@ def colormapper_lines(colormap, lines, line, column="s_nom"): elif colormap.index[1] >= lines[column][line] >= colormap.index[0]: l_color = colormap.colors[0] else: - l_color = (0., 0., 0., 1.) + l_color = (0.0, 0.0, 0.0, 1.0) return l_color @@ -1367,9 +1462,12 @@ def _storage_distribution(network, ax, fig, scaling=1, filename=None): """ stores = network.storage_units - storage_distribution = network.storage_units.p_nom_opt[stores.index]\ - .groupby(network.storage_units.bus)\ - .sum().reindex(network.buses.index, fill_value=0.) + storage_distribution = ( + network.storage_units.p_nom_opt[stores.index] + .groupby(network.storage_units.bus) + .sum() + .reindex(network.buses.index, fill_value=0.0) + ) msd_max = storage_distribution.max() msd_median = storage_distribution[storage_distribution != 0].median() @@ -1380,45 +1478,41 @@ def _storage_distribution(network, ax, fig, scaling=1, filename=None): else: LabelVal = 0 if LabelVal < 0: - LabelUnit = 'kW' - msd_max, msd_median, msd_min = msd_max * \ - 1000, msd_median * 1000, msd_min * 1000 + LabelUnit = "kW" + msd_max, msd_median, msd_min = msd_max * 1000, msd_median * 1000, msd_min * 1000 storage_distribution = storage_distribution * 1000 elif LabelVal < 3: - LabelUnit = 'MW' + LabelUnit = "MW" else: - LabelUnit = 'GW' - msd_max, msd_median, msd_min = msd_max / \ - 1000, msd_median / 1000, msd_min / 1000 + LabelUnit = "GW" + msd_max, msd_median, msd_min = msd_max / 1000, msd_median / 1000, msd_min / 1000 storage_distribution = storage_distribution / 1000 if sum(storage_distribution) == 0: network.plot(bus_sizes=0, ax=ax) else: - network.plot( - bus_sizes=storage_distribution * scaling, - ax=ax, - line_widths=0.3 - ) + network.plot(bus_sizes=storage_distribution * scaling, ax=ax, line_widths=0.3) def iplot_griddistrict_legend(mp, repre_grids, start=False): - """Add legend to iplot function of mv grids. - - """ + """Add legend to iplot function of mv grids.""" # from branca.element import Template, MacroElement from string import Template if start: legends = [] - for name, row in repre_grids.groupby(['cluster_id', 'color']).count().iterrows(): + for name, row in ( + repre_grids.groupby(["cluster_id", "color"]).count().iterrows() + ): color = name[1] grid_no = name[0] entry = """
  • - Represented by Grid {}
  • """.format(color, grid_no) + Represented by Grid {} """.format( + color, grid_no + ) legends.append(entry) @@ -1612,7 +1706,7 @@ def iplot_griddistrict_legend(mp, repre_grids, start=False): t = Template(temp_2) temp_2 = t.substitute(legend=legend) - temps = temp_1+temp_2+temp_3 + temps = temp_1 + temp_2 + temp_3 # macro = MacroElement(**leg) # macro._template = Template(template) @@ -1621,28 +1715,36 @@ def iplot_griddistrict_legend(mp, repre_grids, start=False): def iplot_totalresults_legend(mp, ego, start=False): - """ Add total results as legend to iplot function. - """ + """Add total results as legend to iplot function.""" from string import Template if start: # get data total = ego.total_investment_costs.rename( - columns={"capital_cost": "annuity_costs"}) + columns={"capital_cost": "annuity_costs"} + ) # change format - total['overnight_costs'] = ( - total['overnight_costs']/1000000).map('M€ {:,.2f}'.format) + total["overnight_costs"] = (total["overnight_costs"] / 1000000).map( + "M€ {:,.2f}".format + ) - total['annuity_costs'] = (total['annuity_costs'] / - 1000).map('T€ {:,.2f}'.format) + total["annuity_costs"] = (total["annuity_costs"] / 1000).map( + "T€ {:,.2f}".format + ) - total = total[['component', 'voltage_level', - 'differentiation', 'overnight_costs', - 'annuity_costs']].to_html(index=False) + total = total[ + [ + "component", + "voltage_level", + "differentiation", + "overnight_costs", + "annuity_costs", + ] + ].to_html(index=False) # inclued grafic - html_dir = 'results/html' + html_dir = "results/html" if not os.path.exists(html_dir): os.makedirs(html_dir) @@ -1697,8 +1799,7 @@ def iplot_totalresults_legend(mp, ego, start=False): def _get_mv_plot_res(ego, mv_grid_id): - """ Prepare mv results. - """ + """Prepare mv results.""" logger.disabled = True pypsa_network = ego.edisgo.network[mv_grid_id].network.pypsa @@ -1707,39 +1808,49 @@ def _get_mv_plot_res(ego, mv_grid_id): pypsa_plot = PyPSANetwork() pypsa_plot.buses = pypsa_network.buses.loc[pypsa_network.buses.v_nom >= 10] # filter buses of aggregated loads and generators - pypsa_plot.buses = pypsa_plot.buses[ - ~pypsa_plot.buses.index.str.contains("agg")] + pypsa_plot.buses = pypsa_plot.buses[~pypsa_plot.buses.index.str.contains("agg")] pypsa_plot.lines = pypsa_network.lines[ - pypsa_network.lines.bus0.isin(pypsa_plot.buses.index)][ - pypsa_network.lines.bus1.isin(pypsa_plot.buses.index)] + pypsa_network.lines.bus0.isin(pypsa_plot.buses.index) + ][pypsa_network.lines.bus1.isin(pypsa_plot.buses.index)] - grid_expansion_costs = ego.edisgo.network[mv_grid_id].network.results.grid_expansion_costs + grid_expansion_costs = ego.edisgo.network[ + mv_grid_id + ].network.results.grid_expansion_costs - bus_cost = pd.concat([pypsa_plot.buses, grid_expansion_costs], axis=1, - join_axes=[pypsa_plot.buses.index]) + bus_cost = pd.concat( + [pypsa_plot.buses, grid_expansion_costs], + axis=1, + join_axes=[pypsa_plot.buses.index], + ) costs_lv_stations = grid_expansion_costs[ - grid_expansion_costs.index.str.contains("LVStation")] - costs_lv_stations['station'] = \ - costs_lv_stations.reset_index()['index'].apply( - lambda _: '_'.join(_.split('_')[0:2])).values - costs_lv_stations = costs_lv_stations.groupby('station').sum() + grid_expansion_costs.index.str.contains("LVStation") + ] + costs_lv_stations["station"] = ( + costs_lv_stations.reset_index()["index"] + .apply(lambda _: "_".join(_.split("_")[0:2])) + .values + ) + costs_lv_stations = costs_lv_stations.groupby("station").sum() costs_mv_station = grid_expansion_costs[ - grid_expansion_costs.index.str.contains("MVStation")] - costs_mv_station['station'] = \ - costs_mv_station.reset_index()['index'].apply( - lambda _: '_'.join(_.split('_')[0:2])).values - costs_mv_station = costs_mv_station.groupby('station').sum() + grid_expansion_costs.index.str.contains("MVStation") + ] + costs_mv_station["station"] = ( + costs_mv_station.reset_index()["index"] + .apply(lambda _: "_".join(_.split("_")[0:2])) + .values + ) + costs_mv_station = costs_mv_station.groupby("station").sum() - costs_lv_stations_total = costs_lv_stations[['overnight_costs', - 'capital_cost']].sum() + costs_lv_stations_total = costs_lv_stations[ + ["overnight_costs", "capital_cost"] + ].sum() - costs_mv_station_total = costs_mv_station[['overnight_costs', - 'capital_cost']].sum() + costs_mv_station_total = costs_mv_station[["overnight_costs", "capital_cost"]].sum() costs_lv_stations_total = pd.DataFrame(costs_lv_stations_total) costs_mv_station_total = pd.DataFrame(costs_mv_station_total) logger.disabled = False - return costs_lv_stations_total, costs_mv_station_total + return costs_lv_stations_total, costs_mv_station_total diff --git a/ego/tools/results.py b/ego/tools/results.py index d41eec13..e0164191 100644 --- a/ego/tools/results.py +++ b/ego/tools/results.py @@ -23,17 +23,21 @@ # TODO - write results to database import io -import os import logging -logger = logging.getLogger('ego') +import os -if not 'READTHEDOCS' in os.environ: - import pandas as pd +logger = logging.getLogger("ego") + +if not "READTHEDOCS" in os.environ: import numpy as np + import pandas as pd + from ego.tools.economics import get_generator_investment -__copyright__ = "Flensburg University of Applied Sciences, Europa-Universität"\ +__copyright__ = ( + "Flensburg University of Applied Sciences, Europa-Universität" "Flensburg, Centre for Sustainable Energy Systems" +) __license__ = "GNU Affero General Public License Version 3 (AGPL-3.0)" __author__ = "wolfbunke" @@ -62,44 +66,50 @@ def create_etrago_results(network, scn_name): # rename function etg = network etrago = pd.DataFrame() - etrago['p_nom'] = etg.generators.groupby('carrier')['p_nom'].sum() # in MW - etrago['p_nom_opt'] = etg.generators.groupby('carrier')[ - 'p_nom_opt'].sum() # in MW + etrago["p_nom"] = etg.generators.groupby("carrier")["p_nom"].sum() # in MW + etrago["p_nom_opt"] = etg.generators.groupby("carrier")["p_nom_opt"].sum() # in MW # power price - etrago['marginal_cost'] = etg.generators.groupby('carrier' - )['marginal_cost'].mean() + etrago["marginal_cost"] = etg.generators.groupby("carrier")["marginal_cost"].mean() # in in [EUR] # get power price by production MWh _t.p * marginal_cost - power_price = etg.generators_t.p[etg.generators[etg.generators. - control != 'Slack'] - .index] * etg.generators.\ - marginal_cost[etg.generators[etg.generators. - control != 'Slack'].index] # without Slack + power_price = ( + etg.generators_t.p[etg.generators[etg.generators.control != "Slack"].index] + * etg.generators.marginal_cost[ + etg.generators[etg.generators.control != "Slack"].index + ] + ) # without Slack - power_price = power_price.groupby( - etg.generators.carrier, axis=1).sum().sum() - etrago['power_price'] = power_price + power_price = power_price.groupby(etg.generators.carrier, axis=1).sum().sum() + etrago["power_price"] = power_price # use country code - p_by_carrier = pd.concat([etg.generators_t.p - [etg.generators[etg.generators.control != - 'Slack'].index], - etg.generators_t.p[etg.generators[ - etg. - generators.control == 'Slack'].index - ].iloc[:, 0]. - apply(lambda x: x if x > 0 else 0)], axis=1).\ - groupby(etg.generators.carrier, axis=1).sum() # in MWh - - etrago['p'] = p_by_carrier.sum() + p_by_carrier = ( + pd.concat( + [ + etg.generators_t.p[ + etg.generators[etg.generators.control != "Slack"].index + ], + etg.generators_t.p[ + etg.generators[etg.generators.control == "Slack"].index + ] + .iloc[:, 0] + .apply(lambda x: x if x > 0 else 0), + ], + axis=1, + ) + .groupby(etg.generators.carrier, axis=1) + .sum() + ) # in MWh + + etrago["p"] = p_by_carrier.sum() # add invetment result_invest = get_generator_investment(network, scn_name) - etrago = etrago.assign(investment_costs=result_invest['carrier_costs']) + etrago = etrago.assign(investment_costs=result_invest["carrier_costs"]) return etrago -if __name__ == '__main__': +if __name__ == "__main__": pass diff --git a/ego/tools/specs.py b/ego/tools/specs.py deleted file mode 100644 index 1b7d2958..00000000 --- a/ego/tools/specs.py +++ /dev/null @@ -1,366 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2016-2018 Europa-Universität Flensburg, -# Flensburg University of Applied Sciences, -# Centre for Sustainable Energy Systems -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU Affero General Public License as -# published by the Free Software Foundation; either version 3 of the -# License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Affero General Public License for more details. -# -# You should have received a copy of the GNU Affero General Public License -# along with this program. If not, see . - -# File description -""" -This files contains all eGo interface functions -""" - -__copyright__ = ("Europa-Universität Flensburg, " - "Centre for Sustainable Energy Systems") -__license__ = "GNU Affero General Public License Version 3 (AGPL-3.0)" -__author__ = "wolf_bunke,maltesc" - -# Import -# General Packages -import os -import pandas as pd -import time -if not 'READTHEDOCS' in os.environ: - from egoio.db_tables import model_draft - from egoio.db_tables import supply - import math - -import logging -logger = logging.getLogger(__name__) - - -# Functions - -def get_etragospecs_direct(session, - bus_id, - etrago_network, - scn_name, - grid_version, - pf_post_lopf, - max_cos_phi_renewable): - """ - Reads eTraGo Results from Database and returns and returns - the interface values as a dictionary of corresponding dataframes - - Parameters - ---------- - session : sqlalchemy.orm.session.Session - Handles conversations with the database. - bus_id : int - ID of the corresponding HV bus - etrago_network: :class:`etrago.tools.io.NetworkScenario` - eTraGo network object compiled by :meth:`etrago.appl.etrago` - scn_name : str - Name of used scenario 'Status Quo', 'NEP 2035' or 'eGo 100' - - - Returns - ------- - :obj:`dict` of :pandas:`pandas.DataFrame` - Dataframes used as eDisGo inputs - - """ - logger.info('Specs for bus {}'.format(bus_id)) - if pf_post_lopf: - logger.info('Active and reactive power interface') - else: - logger.info('Only active power interface') - - specs_meta_data = {} - performance = {} - - specs_meta_data.update({'TG Bus ID': bus_id}) - - if grid_version is None: - logger.warning('Weather_id taken from model_draft (not tested)') - - ormclass_gen_single = model_draft.__getattribute__( - 'EgoSupplyPfGeneratorSingle') - else: - ormclass_aggr_w = supply.__getattribute__( - 'EgoAggrWeather') - - snap_idx = etrago_network.snapshots - - # Generators - t0 = time.perf_counter() - - weather_dpdnt = ['wind', 'solar', 'wind_onshore', 'wind_offshore'] - - # DF procesing - all_gens_df = etrago_network.generators[ - etrago_network.generators['bus'] == str(bus_id) - ] - all_gens_df.index.name = 'generator_id' - - all_gens_df.reset_index(inplace=True) - - all_gens_df = all_gens_df[[ - 'generator_id', - 'p_nom', - 'p_nom_opt', - 'carrier']] - - all_gens_df = all_gens_df.rename(columns={"carrier": "name"}) - - all_gens_df = all_gens_df[all_gens_df['name'] != 'wind_offshore'] - - for index, row in all_gens_df.iterrows(): - name = row['name'] - if name == 'wind_onshore': - all_gens_df.at[index, 'name'] = 'wind' - - # Conventionals - t1 = time.perf_counter() - performance.update({'Generator Data Processing': t1-t0}) - - conv_df = all_gens_df[~all_gens_df.name.isin(weather_dpdnt)] - - conv_dsptch = pd.DataFrame(0.0, - index=snap_idx, - columns=list(set(conv_df['name']))) - conv_reactive_power = pd.DataFrame(0.0, - index=snap_idx, - columns=list(set(conv_df['name']))) - - if not conv_df.empty: - conventionals = True - conv_cap = conv_df[['p_nom', 'name']].groupby('name').sum().T - - for index, row in conv_df.iterrows(): - generator_id = row['generator_id'] - source = row['name'] - p = etrago_network.generators_t.p[str(generator_id)] - p_norm = p / conv_cap[source]['p_nom'] - conv_dsptch[source] = conv_dsptch[source] + p_norm - if pf_post_lopf: - q = etrago_network.generators_t.q[str(generator_id)] - # q normalized with p_nom - q_norm = q / conv_cap[source]['p_nom'] - conv_reactive_power[source] = ( - conv_reactive_power[source] - + q_norm) - - if pf_post_lopf: - new_columns = [ - (col, '') for col in conv_reactive_power.columns - ] - conv_reactive_power.columns = pd.MultiIndex.from_tuples( - new_columns) - - else: - conventionals = False - logger.warning('No conventional generators at bus {}'.format(bus_id)) - - # Renewables - t2 = time.perf_counter() - performance.update({'Conventional Dispatch': t2-t1}) - # Capacities - ren_df = all_gens_df[all_gens_df.name.isin(weather_dpdnt)] - if ren_df.empty: - logger.warning('No renewable generators at bus {}'.format(bus_id)) - - for index, row in ren_df.iterrows(): - aggr_id = row['generator_id'] - if grid_version is None: - w_id = session.query( - ormclass_gen_single.w_id - ).filter( - ormclass_gen_single.aggr_id == aggr_id, - ormclass_gen_single.scn_name == scn_name - ).limit(1).scalar() - else: - w_id = session.query( - ormclass_aggr_w.w_id - ).filter( - ormclass_aggr_w.aggr_id == aggr_id, - #ormclass_aggr_w.scn_name == scn_name, - ormclass_aggr_w.version == grid_version - ).limit(1).scalar() - - ren_df.at[index, 'w_id'] = w_id - - ren_df.dropna(inplace=True) - - aggr_gens = ren_df.groupby([ - 'name', - 'w_id' - ]).agg({'p_nom': 'sum'}).reset_index() - - aggr_gens.rename(columns={'p_nom': 'p_nom_aggr'}, inplace=True) - - aggr_gens['ren_id'] = aggr_gens.index - - ### Dispatch and Curteilment - potential = pd.DataFrame(0.0, - index=snap_idx, - columns=aggr_gens['ren_id']) - dispatch = pd.DataFrame(0.0, - index=snap_idx, - columns=aggr_gens['ren_id']) - curtailment = pd.DataFrame(0.0, - index=snap_idx, - columns=aggr_gens['ren_id']) - if pf_post_lopf: - reactive_power = pd.DataFrame(0.0, - index=snap_idx, - columns=aggr_gens['ren_id']) - - for index, row in ren_df.iterrows(): - gen_id = row['generator_id'] - name = row['name'] - w_id = row['w_id'] - ren_id = int(aggr_gens[ - (aggr_gens['name'] == name) & - (aggr_gens['w_id'] == w_id)]['ren_id']) - - p_nom_aggr = float( - aggr_gens[aggr_gens['ren_id'] == ren_id]['p_nom_aggr']) - p_nom = row['p_nom'] - - p_series = etrago_network.generators_t.p[str(gen_id)] - p_norm_tot_series = p_series / p_nom_aggr - - p_max_pu_series = etrago_network.generators_t.p_max_pu[str(gen_id)] - p_max_norm_tot_series = p_max_pu_series * p_nom / p_nom_aggr - - potential[ren_id] = potential[ren_id] + p_max_norm_tot_series - dispatch[ren_id] = dispatch[ren_id] + p_norm_tot_series - - if pf_post_lopf: - q_series = etrago_network.generators_t.q[str(gen_id)] - q_norm_tot_series = q_series / p_nom_aggr - reactive_power[ren_id] = ( - reactive_power[ren_id] - + q_norm_tot_series) - - curtailment = potential.sub(dispatch) - - new_columns = [ - (aggr_gens[aggr_gens.ren_id == col].name.iloc[0], - aggr_gens[aggr_gens.ren_id == col].w_id.iloc[0]) - for col in potential.columns] - potential.columns = pd.MultiIndex.from_tuples(new_columns) - - new_columns = [ - (aggr_gens[aggr_gens.ren_id == col].name.iloc[0], - aggr_gens[aggr_gens.ren_id == col].w_id.iloc[0]) - for col in dispatch.columns] - dispatch.columns = pd.MultiIndex.from_tuples(new_columns) - - new_columns = [ - (aggr_gens[aggr_gens.ren_id == col].name.iloc[0], - aggr_gens[aggr_gens.ren_id == col].w_id.iloc[0]) - for col in curtailment.columns] - curtailment.columns = pd.MultiIndex.from_tuples(new_columns) - - if pf_post_lopf: - new_columns = [ - (aggr_gens[aggr_gens.ren_id == col].name.iloc[0], - aggr_gens[aggr_gens.ren_id == col].w_id.iloc[0]) - for col in reactive_power.columns] - reactive_power.columns = pd.MultiIndex.from_tuples(new_columns) - - # Q limit calculation - if max_cos_phi_renewable: - logger.info('Applying Q limit (max cos(phi)={})'.format( - max_cos_phi_renewable)) - - phi = math.acos(max_cos_phi_renewable) - - for col in reactive_power: - for idx in reactive_power.index: - p = dispatch.loc[idx][col] - q = reactive_power.loc[idx][col] - - q_max, q_min = p * math.tan(phi), -p * math.tan(phi) - - if q > q_max: - q = q_max - elif q < q_min: - q = q_min - - reactive_power.at[idx, col] = q - - # Reactive Power concat - if conventionals: - all_reactive_power = pd.concat([ - conv_reactive_power, - reactive_power], axis=1) - else: - all_reactive_power = reactive_power - - # Storage - t3 = time.perf_counter() - performance.update({'Renewable Dispatch and Curt.': t3-t2}) - # Capactiy - min_extended = 0.3 - stor_df = etrago_network.storage_units.loc[ - (etrago_network.storage_units['bus'] == str(bus_id)) - & (etrago_network.storage_units['p_nom_extendable'] == True) - & (etrago_network.storage_units['p_nom_opt'] > min_extended) - & (etrago_network.storage_units['max_hours'] <= 20.)] # Only batteries - - logger.warning('Minimum storage of {} MW'.format(min_extended)) - - ext_found = False - if len(stor_df) == 1: - logger.info('Extendable storage unit found') - ext_found = True - - stor_id = stor_df.index[0] - - stor_p_series_kW = etrago_network.storage_units_t.p[ - str(stor_id)] * 1000 - - if pf_post_lopf: - try: - stor_q_series_kvar = etrago_network.storage_units_t.q[ - str(stor_id)] * 1000 - except: - logger.warning("No Q series found for storage unit {}".format( - stor_id)) - stor_q_series_kvar = etrago_network.storage_units_t.p[ - str(stor_id)] * 0 - - if ext_found == False: - logger.info( - "No extendable storage unit found at bus {}".format(bus_id)) - - t4 = time.perf_counter() - performance.update({'Storage Data Processing and Dispatch': t4-t3}) - - specs = { - 'conv_dispatch': conv_dsptch, - 'ren_dispatch': dispatch, - 'ren_potential': potential, - 'ren_curtailment': curtailment - } - - if ext_found: - specs['battery_p_series'] = stor_p_series_kW - - if pf_post_lopf: - specs['battery_q_series'] = stor_q_series_kvar - - else: - specs['battery_p_series'] = specs['battery_q_series'] = None - - if pf_post_lopf: - specs['reactive_power'] = all_reactive_power - - t5 = time.perf_counter() - performance.update({'Overall time': t5-t0}) - - return specs diff --git a/ego/tools/storages.py b/ego/tools/storages.py index b8323a95..c49509a0 100644 --- a/ego/tools/storages.py +++ b/ego/tools/storages.py @@ -21,17 +21,18 @@ """ import io -import os import logging -logger = logging.getLogger('ego') +import os -if not 'READTHEDOCS' in os.environ: - import pandas as pd +logger = logging.getLogger("ego") + +if not "READTHEDOCS" in os.environ: import numpy as np + import pandas as pd + from etrago.tools.utilities import geolocation_buses -__copyright__ = ("Europa-Universität Flensburg, " - "Centre for Sustainable Energy Systems") +__copyright__ = "Europa-Universität Flensburg, " "Centre for Sustainable Energy Systems" __license__ = "GNU Affero General Public License Version 3 (AGPL-3.0)" __author__ = "wolf_bunke,maltesc" @@ -66,36 +67,56 @@ def etrago_storages(network): Sum of optimal installed power capacity """ if len(network.storage_units_t.p.sum()) > 0: - charge = network.storage_units_t.\ - p[network.storage_units_t.p[network. - storage_units[network.storage_units. - p_nom_opt > 0].index]. - values > 0.].groupby(network.storage_units. - carrier, axis=1).sum().sum() - - discharge = network.storage_units_t.p[network.storage_units_t. - p[network. - storage_units[ - network.storage_units. - p_nom_opt > 0]. - index].values < 0.].\ - groupby(network.storage_units.carrier, axis=1).sum().sum() - - count = network.storage_units.bus[network.storage_units.p_nom_opt > 0].\ - groupby(network.storage_units.carrier, axis=0).count() - - p_nom_sum = network.storage_units.p_nom.groupby(network.storage_units. - carrier, axis=0).sum() + charge = ( + network.storage_units_t.p[ + network.storage_units_t.p[ + network.storage_units[network.storage_units.p_nom_opt > 0].index + ].values + > 0.0 + ] + .groupby(network.storage_units.carrier, axis=1) + .sum() + .sum() + ) + + discharge = ( + network.storage_units_t.p[ + network.storage_units_t.p[ + network.storage_units[network.storage_units.p_nom_opt > 0].index + ].values + < 0.0 + ] + .groupby(network.storage_units.carrier, axis=1) + .sum() + .sum() + ) + + count = ( + network.storage_units.bus[network.storage_units.p_nom_opt > 0] + .groupby(network.storage_units.carrier, axis=0) + .count() + ) + + p_nom_sum = network.storage_units.p_nom.groupby( + network.storage_units.carrier, axis=0 + ).sum() p_nom_o_sum = network.storage_units.p_nom_opt.groupby( - network.storage_units. - carrier, axis=0).sum() + network.storage_units.carrier, axis=0 + ).sum() p_nom_o = p_nom_sum - p_nom_o_sum # Zubau - results = pd.concat([charge.rename('charge'), - discharge.rename('discharge'), - p_nom_sum, count.rename('total_units'), p_nom_o - .rename('extension'), ], axis=1, join='outer') + results = pd.concat( + [ + charge.rename("charge"), + discharge.rename("discharge"), + p_nom_sum, + count.rename("total_units"), + p_nom_o.rename("extension"), + ], + axis=1, + join="outer", + ) else: logger.info("No timeseries p for storages!") @@ -121,64 +142,67 @@ def etrago_storages_investment(network, json_file, session): """ # check spelling of storages and storage - logger.info(json_file['eTraGo']['extendable']) + logger.info(json_file["eTraGo"]["extendable"]) - stos = 'storage' + stos = "storage" # check settings for extendable - if stos not in json_file['eTraGo']['extendable']: - logger.info("The optimizition was not using parameter " - " 'extendable': storage" - "No storage expantion costs from etrago") + if stos not in json_file["eTraGo"]["extendable"]: + logger.info( + "The optimizition was not using parameter " + " 'extendable': storage" + "No storage expantion costs from etrago" + ) - if stos in json_file['eTraGo']['extendable']: + if stos in json_file["eTraGo"]["extendable"]: network = geolocation_buses(network, session) # get v_nom - _bus = pd.DataFrame(network.buses[['v_nom', 'country_code']]) + _bus = pd.DataFrame(network.buses[["v_nom", "country_code"]]) _bus.index.name = "name" _bus.reset_index(level=0, inplace=True) - _storage = network.storage_units[ - network.storage_units.p_nom_extendable == True] + _storage = network.storage_units[network.storage_units.p_nom_extendable == True] _storage.reset_index(level=0, inplace=True) # provide storage installation costs per voltage level - installed_storages = \ - pd.merge(_storage, _bus, left_on='bus', right_on='name') + installed_storages = pd.merge(_storage, _bus, left_on="bus", right_on="name") - installed_storages['investment_costs'] = (installed_storages. - capital_cost * - installed_storages.p_nom_opt) + installed_storages["investment_costs"] = ( + installed_storages.capital_cost * installed_storages.p_nom_opt + ) # add voltage_level - installed_storages['voltage_level'] = 'unknown' + installed_storages["voltage_level"] = "unknown" - ix_ehv = installed_storages[installed_storages['v_nom'] >= 380].index - installed_storages.set_value(ix_ehv, 'voltage_level', 'ehv') + ix_ehv = installed_storages[installed_storages["v_nom"] >= 380].index + installed_storages.set_value(ix_ehv, "voltage_level", "ehv") - ix_hv = installed_storages[(installed_storages['v_nom'] <= 220) & - (installed_storages['v_nom'] >= 110)].index - installed_storages.set_value(ix_hv, 'voltage_level', 'hv') + ix_hv = installed_storages[ + (installed_storages["v_nom"] <= 220) & (installed_storages["v_nom"] >= 110) + ].index + installed_storages.set_value(ix_hv, "voltage_level", "hv") # add country differentiation - installed_storages['differentiation'] = 'none' + installed_storages["differentiation"] = "none" for idx, val in installed_storages.iterrows(): - check = val['country_code'] + check = val["country_code"] if "DE" in check: - installed_storages['differentiation'][idx] = 'domestic' + installed_storages["differentiation"][idx] = "domestic" if "DE" not in check: - installed_storages['differentiation'][idx] = 'foreign' - - storages_investment = installed_storages[ - ['voltage_level', 'investment_costs', - 'differentiation']].groupby(['differentiation', - 'voltage_level'] - ).sum().reset_index() - - storages_investment = storages_investment.\ - rename(columns={'investment_costs': 'capital_cost'}) + installed_storages["differentiation"][idx] = "foreign" + + storages_investment = ( + installed_storages[["voltage_level", "investment_costs", "differentiation"]] + .groupby(["differentiation", "voltage_level"]) + .sum() + .reset_index() + ) + + storages_investment = storages_investment.rename( + columns={"investment_costs": "capital_cost"} + ) return storages_investment diff --git a/ego/tools/utilities.py b/ego/tools/utilities.py index f4030f5c..289ba5a8 100644 --- a/ego/tools/utilities.py +++ b/ego/tools/utilities.py @@ -20,25 +20,26 @@ """This module contains utility functions for the eGo application. """ import csv -import os -import pandas as pd import json -import csv -import sys import logging -logger = logging.getLogger(__name__) +import os +import sys from time import localtime, strftime -if not 'READTHEDOCS' in os.environ: - from egoio.db_tables import model_draft, grid +from sqlalchemy.orm import scoped_session, sessionmaker + +if "READTHEDOCS" not in os.environ: from egoio.tools import db -from sqlalchemy.orm import sessionmaker -from sqlalchemy.orm import scoped_session -__copyright__ = ("Flensburg University of Applied Sciences, " - "Europa-Universität Flensburg, " - "Centre for Sustainable Energy Systems") +logger = logging.getLogger(__name__) + + +__copyright__ = ( + "Flensburg University of Applied Sciences, " + "Europa-Universität Flensburg, " + "Centre for Sustainable Energy Systems" +) __license__ = "GNU Affero General Public License Version 3 (AGPL-3.0)" __author__ = "wolf_bunke" @@ -61,24 +62,23 @@ def define_logging(name): # add pypsa and other logger INFO to ego.log now = strftime("%Y-%m-%d_%H%M%S", localtime()) - log_dir = 'logs' + log_dir = "logs" if not os.path.exists(log_dir): os.makedirs(log_dir) # Logging - logging.basicConfig(stream=sys.stdout, - format='%(asctime)s %(message)s', - level=logging.INFO) + logging.basicConfig( + stream=sys.stdout, format="%(asctime)s %(message)s", level=logging.INFO + ) logger = logging.getLogger(name) formatter = logging.Formatter( - '%(asctime)s - %(name)s - %(levelname)s - %(message)s' + "%(asctime)s - %(name)s - %(levelname)s - %(message)s" ) -# logger = logging.FileHandler(log_name, mode='w') - fh = logging.FileHandler( - log_dir + '/' + name + '_' + now + '.log', mode='w') + # logger = logging.FileHandler(log_name, mode='w') + fh = logging.FileHandler(log_dir + "/" + name + "_" + now + ".log", mode="w") fh.setLevel(logging.INFO) fh.setFormatter(formatter) logger.addHandler(fh) @@ -86,7 +86,7 @@ def define_logging(name): return logger -def get_scenario_setting(jsonpath='scenario_setting.json'): +def get_scenario_setting(jsonpath=None): """Get and open json file with scenaio settings of eGo. The settings incluede eGo, eTraGo and eDisGo specific settings of arguments and parameters for a reproducible @@ -103,87 +103,115 @@ def get_scenario_setting(jsonpath='scenario_setting.json'): json_file : dict Dictionary of json file """ - path = os.getcwd() - # add try ego/ - logger.info("Your path is: {}".format(path)) + if jsonpath is None: + path = os.getcwd() + # add try ego/ + logger.info("Your path is: {}".format(path)) + jsonpath = os.path.join(path, "scenario_setting.json") - with open(path + '/' + jsonpath) as f: + with open(jsonpath) as f: json_file = json.load(f) # fix remove result_id - json_file['eGo'].update({'result_id': None}) + json_file["eGo"].update({"result_id": None}) # check settings - if (json_file['eGo']['eTraGo'] is False and json_file['eGo']['eDisGo'] - is False): - logger.warning("Something went wrong! \n" - "Please contoll your settings and restart. \n" - "Set at least eTraGo = true") + if json_file["eGo"]["eTraGo"] is False and json_file["eGo"]["eDisGo"] is False: + logger.warning( + "Something went wrong! \n" + "Please contoll your settings and restart. \n" + "Set at least eTraGo = true" + ) return - if (json_file['eGo']['eTraGo'] is None and json_file['eGo']['eDisGo'] - is None): - logger.warning("Something went wrong! \n" - "Please contoll your settings and restart. \n" - "Set at least eTraGo = true") + if json_file["eGo"]["eTraGo"] is None and json_file["eGo"]["eDisGo"] is None: + logger.warning( + "Something went wrong! \n" + "Please contoll your settings and restart. \n" + "Set at least eTraGo = true" + ) return - if json_file['eGo']['result_id'] and json_file['eGo']['csv_import_eTraGo']: + if json_file["eGo"]["result_id"] and json_file["eGo"]["csv_import_eTraGo"]: logger.warning( "You set a DB result_id and a csv import path! \n" - "Please remove on of this settings") + "Please remove on of this settings" + ) return # or ? json_file['eGo']['result_id'] = None - if json_file['eGo']['eTraGo'] is None and json_file['eGo']['eDisGo']: - logger.info( - "eDisGo needs eTraGo results. Please change your settings!\n") + if json_file["eGo"]["eTraGo"] is None and json_file["eGo"]["eDisGo"]: + logger.info("eDisGo needs eTraGo results. Please change your settings!\n") return - if json_file['eGo']['eTraGo'] is False and json_file['eGo']['eDisGo']: - logger.info( - "eDisGo needs eTraGo results. Please change your settings!\n") + if json_file["eGo"]["eTraGo"] is False and json_file["eGo"]["eDisGo"]: + logger.info("eDisGo needs eTraGo results. Please change your settings!\n") return - if (json_file['eGo']['result_id'] is None and - json_file['eGo']['csv_import_eTraGo'] is None): + if ( + json_file["eGo"]["result_id"] is None + and json_file["eGo"]["csv_import_eTraGo"] is None + ): logger.info( - "No data import from results is set \n" - "eGo runs by given settings") + "No data import from results is set \n" "eGo runs by given settings" + ) - if (json_file['eGo']['csv_import_eTraGo'] and - json_file['eGo']['csv_import_eDisGo']): - logger.info( - "eDisGo and eTraGo results will be imported from csv\n") + if json_file["eGo"]["csv_import_eTraGo"] and json_file["eGo"]["csv_import_eDisGo"]: + logger.info("eDisGo and eTraGo results will be imported from csv\n") - if json_file['eGo'].get('eTraGo') == True: + if json_file["eGo"].get("eTraGo") is True: - logger.info('Using and importing eTraGo settings') + logger.info("Using and importing eTraGo settings") # special case of SH and model_draft # TODO: check and maybe remove this part sh_scen = ["SH Status Quo", "SH NEP 2035", "SH eGo 100"] - if json_file['eTraGo'].get('scn_name') in sh_scen and json_file['eTraGo'].\ - get('gridversion') is not None: - json_file['eTraGo']['gridversion'] = None - - if json_file['eTraGo'].get('extendable') == "['network', 'storages']": - json_file['eTraGo'].update({'extendable': ['network', 'storage']}) - - if json_file['eTraGo'].get('extendable') == "['network', 'storage']": - json_file['eTraGo'].update({'extendable': ['network', 'storage']}) - - if json_file['eTraGo'].get('extendable') == "['network']": - json_file['eTraGo'].update({'extendable': ['network']}) - - if json_file['eTraGo'].get('extendable') == "['storages']": - json_file['eTraGo'].update({'extendable': ['storage']}) - - if json_file['eTraGo'].get('extendable') == "['storage']": - json_file['eTraGo'].update({'extendable': ['storage']}) - - if json_file['eGo'].get('eDisGo') == True: - logger.info('Using and importing eDisGo settings') + if ( + json_file["eTraGo"].get("scn_name") in sh_scen + and json_file["eTraGo"].get("gridversion") is not None + ): + json_file["eTraGo"]["gridversion"] = None + + if json_file["eTraGo"].get("extendable") == "['network', 'storages']": + json_file["eTraGo"].update({"extendable": ["network", "storage"]}) + + if json_file["eTraGo"].get("extendable") == "['network', 'storage']": + json_file["eTraGo"].update({"extendable": ["network", "storage"]}) + + if json_file["eTraGo"].get("extendable") == "['network']": + json_file["eTraGo"].update({"extendable": ["network"]}) + + if json_file["eTraGo"].get("extendable") == "['storages']": + json_file["eTraGo"].update({"extendable": ["storage"]}) + + if json_file["eTraGo"].get("extendable") == "['storage']": + json_file["eTraGo"].update({"extendable": ["storage"]}) + + if json_file["eGo"].get("eDisGo") is True: + logger.info("Using and importing eDisGo settings") + + if isinstance(json_file["external_config"], str): + path_external_config = os.path.expanduser(json_file["external_config"]) + logger.info(f"Load external config with path: {path_external_config}") + with open(path_external_config) as f: + external_config = json.load(f) + for key in external_config.keys(): + try: + json_file[key].update(external_config[key]) + except KeyError: + json_file[key] = external_config[key] + else: + logger.info("Don't load external config.") + + # Serializing json + json_object = json.dumps(json_file, indent=4) + + # Writing to sample.json + results_dir = os.path.join(json_file["eDisGo"]["results"]) + if not os.path.exists(results_dir): + os.makedirs(results_dir) + with open(os.path.join(results_dir, "config.json"), "w") as outfile: + outfile.write(json_object) return json_file @@ -194,26 +222,26 @@ def fix_leading_separator(csv_file, **kwargs): separator in its header, this field is deleted. If this is done the second field of every row is removed, too. """ - with open(csv_file, 'r') as f: + with open(csv_file, "r") as f: lines = csv.reader(f, **kwargs) if not lines: - raise Exception('File %s contained no data' % csv_file) + raise Exception("File %s contained no data" % csv_file) first_line = next(lines) - if first_line[0] == '': + if first_line[0] == "": path, fname = os.path.split(csv_file) - tmp_file = os.path.join(path, 'tmp_' + fname) - with open(tmp_file, 'w+') as out: + tmp_file = os.path.join(path, "tmp_" + fname) + with open(tmp_file, "w+") as out: writer = csv.writer(out, **kwargs) writer.writerow(first_line[1:]) for line in lines: - l = line[2:] - l.insert(0, line[0]) - writer.writerow(l, **kwargs) + line_selection = line[2:] + line_selection.insert(0, line[0]) + writer.writerow(line_selection, **kwargs) os.rename(tmp_file, csv_file) def get_time_steps(json_file): - """ Get time step of calculation by scenario settings. + """Get time step of calculation by scenario settings. Parameters ---------- @@ -226,16 +254,15 @@ def get_time_steps(json_file): Number of timesteps of the calculation. """ - end = json_file['eTraGo'].get('end_snapshot') - start = json_file['eTraGo'].get('start_snapshot') + end = json_file["eTraGo"].get("end_snapshot") + start = json_file["eTraGo"].get("start_snapshot") time_step = end - start return time_step def open_oedb_session(ego): - """ - """ + """ """ _db_section = ego.json_file["eTraGo"]["db"] conn = db.connection(section=_db_section) session_factory = sessionmaker(bind=conn) diff --git a/pytest.ini b/pytest.ini new file mode 100644 index 00000000..174a20ae --- /dev/null +++ b/pytest.ini @@ -0,0 +1,6 @@ +# pytest.ini +[pytest] +log_cli = True +log_level = INFO +testpaths = + tests diff --git a/requirements.txt b/requirements.txt index 98d8ba1d..4e199d06 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,8 +2,6 @@ # Using single requirments for docs, see: # https://github.com/rtfd/readthedocs.org/issues/2070 sphinx_rtd_theme -pandas >=0.20.3, <=0.20.3 -pypsa >= 0.11.0, <= 0.11.0 numpy numpydoc sphinxcontrib-httpdomain diff --git a/setup.py b/setup.py index 4fba29e4..4a984284 100644 --- a/setup.py +++ b/setup.py @@ -1,69 +1,60 @@ # -*- coding: utf-8 -*- + +# flake8: noqa: F401, F601 import os -from setuptools import find_packages, setup + from pip._internal.req import parse_requirements +from setuptools import find_packages, setup -__copyright__ = ("Flensburg University of Applied Sciences, " - "Europa-Universität Flensburg, " - "Centre for Sustainable Energy Systems") +__copyright__ = ( + "Flensburg University of Applied Sciences, " + "Europa-Universität Flensburg, " + "Centre for Sustainable Energy Systems" +) __license__ = "GNU Affero General Public License Version 3 (AGPL-3.0)" __author__ = "wolf_bunke, maltesc" + def read(fname): return open(os.path.join(os.path.dirname(__file__), fname)).read() - + + +req = [] + +dev_req = [ + "pre-commit", + "black", + "isort", + "pyupgrade", + "flake8", +] + +doc_req = ["numpydoc", "sphinxcontrib.httpdomain", "sphinx-jsondomain"] + +full_req = list(set(dev_req + doc_req)) + +extras = { + "dev": dev_req, + "doc": doc_req, + "full": full_req, +} + setup( - name='eGo', - version='0.3.3', - author='wolfbunke, maltesc', - author_email='wolf-dieter.bunke@uni-flensburg.de', - description=("A cross-grid-level electricity grid and storage " - "optimization tool "), - long_description= read('README.rst'), - url='https://github.com/openego/eGo', + name="eGo", + version="0.3.4", + author="wolfbunke, maltesc", + author_email="wolf-dieter.bunke@uni-flensburg.de", + description=("A cross-grid-level electricity grid and storage optimization tool."), + long_description=read("README.rst"), + url="https://github.com/openego/eGo", license="GNU Affero General Public License Version 3 (AGPL-3.0)", packages=find_packages(), - package_dir={'ego': 'ego'}, + package_dir={"ego": "ego"}, include_package_data=True, - install_requires=['egoio == 0.4.5', - 'ding0 == v0.1.9', - 'pycallgraph', - 'eDisGo == v0.0.9', - 'eTraGo == 0.7.1', - 'scikit-learn == 0.19.0', - 'pandas ==0.20.3', - 'pypsa==0.11.0fork', - 'sqlalchemy == 1.2.0', - 'geoalchemy2 >= 0.3.0, <=0.4.0', - 'tsam==0.9.9', - 'geopandas', - 'matplotlib == 3.0.0', - 'Rtree', - 'descartes', - 'pyproj', - 'plotly==2.2.3', - 'shapely', - 'multiprocess', - 'folium', - 'oedialect' - ], - dependency_links=[ - ('git+https://git@github.com/openego/PyPSA.git' - '@master#egg=pypsa-0.11.0fork')], - extras_require={ - 'doc': [ - 'sphinx >= 1.4', - 'sphinx_rtd_theme', - 'sphinxcontrib-httpdomain', - 'numpydoc == 0.7.0', - 'aiohttp_jinja2', - 'sphinx-jsondomain']}, + install_requires=req, + extras_require=extras, package_data={ - 'ego': [os.path.join('tools', '*.csv')], - 'ego': [os.path.join('tools', '*.json')], - 'ego': [os.path.join('', '*.json')], - 'ego.data': ['*.csv'] - } - ) - + "ego": [os.path.join("tools", "*.json")] + [os.path.join("", "*.json")], + }, +) diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 00000000..d5a556f8 --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,31 @@ +import os + +import pytest + + +def pytest_configure(config): + pytest.etrago_test_network_1_path = os.path.join( + os.path.realpath(os.path.dirname(__file__)), "data/etrago_test_network_1" + ) + pytest.interface_results_reference_data_path = os.path.join( + os.path.realpath(os.path.dirname(__file__)), + "data/interface_results_reference_data", + ) + + config.addinivalue_line("markers", "slow: mark test as slow to run") + + +def pytest_addoption(parser): + parser.addoption( + "--runslow", action="store_true", default=False, help="run slow tests" + ) + + +def pytest_collection_modifyitems(config, items): + if config.getoption("--runslow"): + # --runslow given in cli: do not skip slow tests + return + skip_slow = pytest.mark.skip(reason="need --runslow option to run") + for item in items: + if "slow" in item.keywords: + item.add_marker(skip_slow) diff --git a/tests/data/create_test_grid.ipynb b/tests/data/create_test_grid.ipynb new file mode 100644 index 00000000..063b8c91 --- /dev/null +++ b/tests/data/create_test_grid.ipynb @@ -0,0 +1,2052 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "id": "28c7d874-4076-4330-84ec-e0ef88d29239", + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [], + "source": [ + "import pandas as pd\n", + "import numpy as np\n", + "import os" + ] + }, + { + "cell_type": "markdown", + "id": "ad5d278e-1d45-4ada-aba6-3040c69e1a13", + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, + "source": [ + "# generators" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "1ba5c7cc-ca82-4051-9cd0-62b51254aeb2", + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [ + { + "data": { + "text/html": [ + "
    \n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
    namebuscarrierp_nomp_nom_opt
    00 biomass0biomass1.01.0
    110 biomass10biomass10.010.0
    20 central_biomass_CHP0central_biomass_CHP1.01.0
    310 central_biomass_CHP10central_biomass_CHP10.010.0
    40 run_of_river0run_of_river1.01.0
    510 run_of_river10run_of_river10.010.0
    60 gas0gas1.01.0
    710 gas10gas10.010.0
    80 other_non_renewable0other_non_renewable1.01.0
    910 other_non_renewable10other_non_renewable10.010.0
    100 reservoir0reservoir1.01.0
    1110 reservoir10reservoir10.010.0
    120 solar_00solar1.01.0
    130 solar_10solar10.010.0
    1410 solar_010solar1.01.0
    1510 solar_110solar10.010.0
    160 solar_rooftop_00solar_rooftop1.01.0
    170 solar_rooftop_10solar_rooftop10.010.0
    1810 solar_rooftop_010solar_rooftop1.01.0
    1910 solar_rooftop_110solar_rooftop10.010.0
    200 wind_onshore_00wind_onshore1.01.0
    210 wind_onshore_10wind_onshore10.010.0
    2210 wind_onshore_010wind_onshore1.01.0
    2310 wind_onshore_110wind_onshore10.010.0
    244 solar_thermal_collector4solar_thermal_collector1.01.0
    2510 solar_thermal_collector10solar_thermal_collector10.010.0
    264 geo_thermal4geo_thermal1.01.0
    2710 geo_thermal10geo_thermal10.010.0
    280 junk0junk100.0100.0
    2910 junk10junk100.0100.0
    \n", + "
    " + ], + "text/plain": [ + " name bus carrier p_nom p_nom_opt\n", + "0 0 biomass 0 biomass 1.0 1.0\n", + "1 10 biomass 10 biomass 10.0 10.0\n", + "2 0 central_biomass_CHP 0 central_biomass_CHP 1.0 1.0\n", + "3 10 central_biomass_CHP 10 central_biomass_CHP 10.0 10.0\n", + "4 0 run_of_river 0 run_of_river 1.0 1.0\n", + "5 10 run_of_river 10 run_of_river 10.0 10.0\n", + "6 0 gas 0 gas 1.0 1.0\n", + "7 10 gas 10 gas 10.0 10.0\n", + "8 0 other_non_renewable 0 other_non_renewable 1.0 1.0\n", + "9 10 other_non_renewable 10 other_non_renewable 10.0 10.0\n", + "10 0 reservoir 0 reservoir 1.0 1.0\n", + "11 10 reservoir 10 reservoir 10.0 10.0\n", + "12 0 solar_0 0 solar 1.0 1.0\n", + "13 0 solar_1 0 solar 10.0 10.0\n", + "14 10 solar_0 10 solar 1.0 1.0\n", + "15 10 solar_1 10 solar 10.0 10.0\n", + "16 0 solar_rooftop_0 0 solar_rooftop 1.0 1.0\n", + "17 0 solar_rooftop_1 0 solar_rooftop 10.0 10.0\n", + "18 10 solar_rooftop_0 10 solar_rooftop 1.0 1.0\n", + "19 10 solar_rooftop_1 10 solar_rooftop 10.0 10.0\n", + "20 0 wind_onshore_0 0 wind_onshore 1.0 1.0\n", + "21 0 wind_onshore_1 0 wind_onshore 10.0 10.0\n", + "22 10 wind_onshore_0 10 wind_onshore 1.0 1.0\n", + "23 10 wind_onshore_1 10 wind_onshore 10.0 10.0\n", + "24 4 solar_thermal_collector 4 solar_thermal_collector 1.0 1.0\n", + "25 10 solar_thermal_collector 10 solar_thermal_collector 10.0 10.0\n", + "26 4 geo_thermal 4 geo_thermal 1.0 1.0\n", + "27 10 geo_thermal 10 geo_thermal 10.0 10.0\n", + "28 0 junk 0 junk 100.0 100.0\n", + "29 10 junk 10 junk 100.0 100.0" + ] + }, + "execution_count": 2, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "generators_df = pd.read_csv(\"etrago_test_network_1/generators.csv\")\n", + "generators_df" + ] + }, + { + "cell_type": "markdown", + "id": "c8a91022-57c5-47f6-98b2-66aa9ad5a333", + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, + "source": [ + "## p" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "1b017098-0d72-470c-ac65-d4847ab43354", + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [ + { + "data": { + "text/plain": [ + "array([[0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,\n", + " 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,\n", + " 0. , 0. , 0. , 0. ],\n", + " [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5,\n", + " 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5,\n", + " 0.5, 0.5, 0.5, 0.5],\n", + " [1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. ,\n", + " 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. ,\n", + " 1. , 1. , 1. , 1. ]])" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "base = np.ones((3,generators_df.shape[0]))\n", + "scale = np.array([0.0,0.5,1.0])\n", + "\n", + "for i in range(scale.shape[0]):\n", + " base[i,:] = scale[i] * base[i,:]\n", + " \n", + "base" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "a057d570-bd42-4162-a24f-7b23cf61df85", + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [ + { + "data": { + "text/plain": [ + "0 0 biomass\n", + "1 10 biomass\n", + "2 0 central_biomass_CHP\n", + "3 10 central_biomass_CHP\n", + "4 0 run_of_river\n", + "5 10 run_of_river\n", + "6 0 gas\n", + "7 10 gas\n", + "8 0 other_non_renewable\n", + "9 10 other_non_renewable\n", + "10 0 reservoir\n", + "11 10 reservoir\n", + "12 0 solar_0\n", + "13 0 solar_1\n", + "14 10 solar_0\n", + "15 10 solar_1\n", + "16 0 solar_rooftop_0\n", + "17 0 solar_rooftop_1\n", + "18 10 solar_rooftop_0\n", + "19 10 solar_rooftop_1\n", + "20 0 wind_onshore_0\n", + "21 0 wind_onshore_1\n", + "22 10 wind_onshore_0\n", + "23 10 wind_onshore_1\n", + "24 4 solar_thermal_collector\n", + "25 10 solar_thermal_collector\n", + "26 4 geo_thermal\n", + "27 10 geo_thermal\n", + "28 0 junk\n", + "29 10 junk\n", + "Name: name, dtype: object" + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "generators_df[\"name\"]" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "1ea9ed1a-89eb-4ab1-a645-8e8418fd1531", + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [ + { + "data": { + "text/html": [ + "
    \n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
    0 biomass10 biomass0 central_biomass_CHP10 central_biomass_CHP0 run_of_river10 run_of_river0 gas10 gas0 other_non_renewable10 other_non_renewable...0 wind_onshore_00 wind_onshore_110 wind_onshore_010 wind_onshore_14 solar_thermal_collector10 solar_thermal_collector4 geo_thermal10 geo_thermal0 junk10 junk
    00.00.00.00.00.00.00.00.00.00.0...0.00.00.00.00.00.00.00.00.00.0
    10.50.50.50.50.50.50.50.50.50.5...0.50.50.50.50.50.50.50.50.50.5
    21.01.01.01.01.01.01.01.01.01.0...1.01.01.01.01.01.01.01.01.01.0
    \n", + "

    3 rows × 30 columns

    \n", + "
    " + ], + "text/plain": [ + " 0 biomass 10 biomass 0 central_biomass_CHP 10 central_biomass_CHP \\\n", + "0 0.0 0.0 0.0 0.0 \n", + "1 0.5 0.5 0.5 0.5 \n", + "2 1.0 1.0 1.0 1.0 \n", + "\n", + " 0 run_of_river 10 run_of_river 0 gas 10 gas 0 other_non_renewable \\\n", + "0 0.0 0.0 0.0 0.0 0.0 \n", + "1 0.5 0.5 0.5 0.5 0.5 \n", + "2 1.0 1.0 1.0 1.0 1.0 \n", + "\n", + " 10 other_non_renewable ... 0 wind_onshore_0 0 wind_onshore_1 \\\n", + "0 0.0 ... 0.0 0.0 \n", + "1 0.5 ... 0.5 0.5 \n", + "2 1.0 ... 1.0 1.0 \n", + "\n", + " 10 wind_onshore_0 10 wind_onshore_1 4 solar_thermal_collector \\\n", + "0 0.0 0.0 0.0 \n", + "1 0.5 0.5 0.5 \n", + "2 1.0 1.0 1.0 \n", + "\n", + " 10 solar_thermal_collector 4 geo_thermal 10 geo_thermal 0 junk 10 junk \n", + "0 0.0 0.0 0.0 0.0 0.0 \n", + "1 0.5 0.5 0.5 0.5 0.5 \n", + "2 1.0 1.0 1.0 1.0 1.0 \n", + "\n", + "[3 rows x 30 columns]" + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "power_df = pd.DataFrame(base, columns=generators_df[\"name\"].to_list())\n", + "power_df" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "296705d3-bf09-47f6-a563-6249618f8c60", + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [], + "source": [ + "power_df.to_csv(\"etrago_test_network_1/generators-p.csv\")\n", + "power_df.to_csv(\"etrago_test_network_1/generators-p_min_pu.csv\")" + ] + }, + { + "cell_type": "markdown", + "id": "b0578ab0-4a79-4d70-96c7-49587f1041a1", + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, + "source": [ + "## q" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "a85d39bb-1929-4351-8ee0-a9f94a649f22", + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [ + { + "data": { + "text/plain": [ + "array([[ 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,\n", + " 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,\n", + " 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ],\n", + " [ 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5,\n", + " 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5,\n", + " 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5],\n", + " [-1. , -1. , -1. , -1. , -1. , -1. , -1. , -1. , -1. , -1. , -1. ,\n", + " -1. , -1. , -1. , -1. , -1. , -1. , -1. , -1. , -1. , -1. , -1. ,\n", + " -1. , -1. , -1. , -1. , -1. , -1. , -1. , -1. ]])" + ] + }, + "execution_count": 7, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "base = np.ones((3,generators_df.shape[0]))\n", + "scale = np.array([0.0,0.5,-1.0])\n", + "\n", + "for i in range(scale.shape[0]):\n", + " base[i,:] = scale[i] * base[i,:]\n", + " \n", + "base" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "aa4bcd4f-22ce-4733-8898-b159d5f103b9", + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [ + { + "data": { + "text/plain": [ + "0 0 biomass\n", + "1 10 biomass\n", + "2 0 central_biomass_CHP\n", + "3 10 central_biomass_CHP\n", + "4 0 run_of_river\n", + "5 10 run_of_river\n", + "6 0 gas\n", + "7 10 gas\n", + "8 0 other_non_renewable\n", + "9 10 other_non_renewable\n", + "10 0 reservoir\n", + "11 10 reservoir\n", + "12 0 solar_0\n", + "13 0 solar_1\n", + "14 10 solar_0\n", + "15 10 solar_1\n", + "16 0 solar_rooftop_0\n", + "17 0 solar_rooftop_1\n", + "18 10 solar_rooftop_0\n", + "19 10 solar_rooftop_1\n", + "20 0 wind_onshore_0\n", + "21 0 wind_onshore_1\n", + "22 10 wind_onshore_0\n", + "23 10 wind_onshore_1\n", + "24 4 solar_thermal_collector\n", + "25 10 solar_thermal_collector\n", + "26 4 geo_thermal\n", + "27 10 geo_thermal\n", + "28 0 junk\n", + "29 10 junk\n", + "Name: name, dtype: object" + ] + }, + "execution_count": 8, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "generators_df[\"name\"]" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "a0decf15-9db6-4a75-ae65-ae58a2990b49", + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [ + { + "data": { + "text/html": [ + "
    \n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
    0 biomass10 biomass0 central_biomass_CHP10 central_biomass_CHP0 run_of_river10 run_of_river0 gas10 gas0 other_non_renewable10 other_non_renewable...0 wind_onshore_00 wind_onshore_110 wind_onshore_010 wind_onshore_14 solar_thermal_collector10 solar_thermal_collector4 geo_thermal10 geo_thermal0 junk10 junk
    00.00.00.00.00.00.00.00.00.00.0...0.00.00.00.00.00.00.00.00.00.0
    10.50.50.50.50.50.50.50.50.50.5...0.50.50.50.50.50.50.50.50.50.5
    2-1.0-1.0-1.0-1.0-1.0-1.0-1.0-1.0-1.0-1.0...-1.0-1.0-1.0-1.0-1.0-1.0-1.0-1.0-1.0-1.0
    \n", + "

    3 rows × 30 columns

    \n", + "
    " + ], + "text/plain": [ + " 0 biomass 10 biomass 0 central_biomass_CHP 10 central_biomass_CHP \\\n", + "0 0.0 0.0 0.0 0.0 \n", + "1 0.5 0.5 0.5 0.5 \n", + "2 -1.0 -1.0 -1.0 -1.0 \n", + "\n", + " 0 run_of_river 10 run_of_river 0 gas 10 gas 0 other_non_renewable \\\n", + "0 0.0 0.0 0.0 0.0 0.0 \n", + "1 0.5 0.5 0.5 0.5 0.5 \n", + "2 -1.0 -1.0 -1.0 -1.0 -1.0 \n", + "\n", + " 10 other_non_renewable ... 0 wind_onshore_0 0 wind_onshore_1 \\\n", + "0 0.0 ... 0.0 0.0 \n", + "1 0.5 ... 0.5 0.5 \n", + "2 -1.0 ... -1.0 -1.0 \n", + "\n", + " 10 wind_onshore_0 10 wind_onshore_1 4 solar_thermal_collector \\\n", + "0 0.0 0.0 0.0 \n", + "1 0.5 0.5 0.5 \n", + "2 -1.0 -1.0 -1.0 \n", + "\n", + " 10 solar_thermal_collector 4 geo_thermal 10 geo_thermal 0 junk 10 junk \n", + "0 0.0 0.0 0.0 0.0 0.0 \n", + "1 0.5 0.5 0.5 0.5 0.5 \n", + "2 -1.0 -1.0 -1.0 -1.0 -1.0 \n", + "\n", + "[3 rows x 30 columns]" + ] + }, + "execution_count": 9, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "power_df = pd.DataFrame(base, columns=generators_df[\"name\"].to_list())\n", + "power_df" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "62bd950a-8d91-44b2-8e9d-3b2e5f73f0f5", + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [], + "source": [ + "power_df.to_csv(\"etrago_test_network_1/generators-q.csv\")" + ] + }, + { + "cell_type": "markdown", + "id": "e8f25f92-7f8b-4063-8f8f-d08790ac046f", + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, + "source": [ + "## p_max_pu" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "423bbe9c-174c-4612-9247-5a524dd389b5", + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [ + { + "data": { + "text/plain": [ + "array([[0.5 , 0.5 , 0.5 , 0.5 , 0.5 , 0.5 , 0.5 , 0.5 , 0.5 , 0.5 , 0.5 ,\n", + " 0.5 , 0.5 , 0.5 , 0.5 , 0.5 , 0.5 , 0.5 , 0.5 , 0.5 , 0.5 , 0.5 ,\n", + " 0.5 , 0.5 , 0.5 , 0.5 , 0.5 , 0.5 , 0.5 , 0.5 ],\n", + " [0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75,\n", + " 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75,\n", + " 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75],\n", + " [1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. ,\n", + " 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. ,\n", + " 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. ]])" + ] + }, + "execution_count": 11, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "base = np.ones((3,generators_df.shape[0]))\n", + "scale = np.array([0.5,0.75,1.0])\n", + "\n", + "for i in range(scale.shape[0]):\n", + " base[i,:] = scale[i] * base[i,:]\n", + " \n", + "base" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "9904d1b0-8bb4-4571-920e-a3ffc956d542", + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [ + { + "data": { + "text/html": [ + "
    \n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
    0 biomass10 biomass0 central_biomass_CHP10 central_biomass_CHP0 run_of_river10 run_of_river0 gas10 gas0 other_non_renewable10 other_non_renewable...0 wind_onshore_00 wind_onshore_110 wind_onshore_010 wind_onshore_14 solar_thermal_collector10 solar_thermal_collector4 geo_thermal10 geo_thermal0 junk10 junk
    00.500.500.500.500.500.500.500.500.500.50...0.500.500.500.500.500.500.500.500.500.50
    10.750.750.750.750.750.750.750.750.750.75...0.750.750.750.750.750.750.750.750.750.75
    21.001.001.001.001.001.001.001.001.001.00...1.001.001.001.001.001.001.001.001.001.00
    \n", + "

    3 rows × 30 columns

    \n", + "
    " + ], + "text/plain": [ + " 0 biomass 10 biomass 0 central_biomass_CHP 10 central_biomass_CHP \\\n", + "0 0.50 0.50 0.50 0.50 \n", + "1 0.75 0.75 0.75 0.75 \n", + "2 1.00 1.00 1.00 1.00 \n", + "\n", + " 0 run_of_river 10 run_of_river 0 gas 10 gas 0 other_non_renewable \\\n", + "0 0.50 0.50 0.50 0.50 0.50 \n", + "1 0.75 0.75 0.75 0.75 0.75 \n", + "2 1.00 1.00 1.00 1.00 1.00 \n", + "\n", + " 10 other_non_renewable ... 0 wind_onshore_0 0 wind_onshore_1 \\\n", + "0 0.50 ... 0.50 0.50 \n", + "1 0.75 ... 0.75 0.75 \n", + "2 1.00 ... 1.00 1.00 \n", + "\n", + " 10 wind_onshore_0 10 wind_onshore_1 4 solar_thermal_collector \\\n", + "0 0.50 0.50 0.50 \n", + "1 0.75 0.75 0.75 \n", + "2 1.00 1.00 1.00 \n", + "\n", + " 10 solar_thermal_collector 4 geo_thermal 10 geo_thermal 0 junk 10 junk \n", + "0 0.50 0.50 0.50 0.50 0.50 \n", + "1 0.75 0.75 0.75 0.75 0.75 \n", + "2 1.00 1.00 1.00 1.00 1.00 \n", + "\n", + "[3 rows x 30 columns]" + ] + }, + "execution_count": 12, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "power_df = pd.DataFrame(base, columns=generators_df[\"name\"].to_list())\n", + "power_df" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "62fc415e-4e2a-427e-aa54-449606bca7e3", + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [], + "source": [ + "power_df.to_csv(\"etrago_test_network_1/generators-p_max_pu.csv\")" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "id": "cbe9aa85-74ac-4309-bab4-dab26c656811", + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "biomass\n", + "central_biomass_CHP\n", + "run_of_river\n", + "gas\n", + "other_non_renewable\n", + "reservoir\n", + "solar\n", + "solar_rooftop\n", + "wind_onshore\n", + "solar_thermal_collector\n", + "geo_thermal\n", + "junk\n" + ] + } + ], + "source": [ + "for carrier in generators_df[\"carrier\"].unique():\n", + " print(carrier)" + ] + }, + { + "cell_type": "markdown", + "id": "0198d501-447f-4c9d-ae61-b8b14f0ca510", + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, + "source": [ + "# links" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "id": "b35ed6df-2178-46e4-82ae-ba073ae4dbe7", + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [ + { + "data": { + "text/html": [ + "
    \n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
    namebus0bus1carrierp_nom_opt
    0010central_gas_CHP1.0
    111010central_gas_CHP10.0
    2203dsm1.0
    331010dsm10.0
    4404central_heat_pump1.0
    551010central_heat_pump10.0
    6604central_resistive_heater1.0
    771010central_resistive_heater10.0
    8845central_heat_store_charger1.0
    991010central_heat_store_charger10.0
    101006rural_heat_pump1.0
    11111010rural_heat_pump10.0
    121267rural_heat_store_charger1.0
    13131010rural_heat_store_charger10.0
    141403BEV charger1.0
    15151010BEV charger10.0
    16161010junk100.0
    \n", + "
    " + ], + "text/plain": [ + " name bus0 bus1 carrier p_nom_opt\n", + "0 0 1 0 central_gas_CHP 1.0\n", + "1 1 10 10 central_gas_CHP 10.0\n", + "2 2 0 3 dsm 1.0\n", + "3 3 10 10 dsm 10.0\n", + "4 4 0 4 central_heat_pump 1.0\n", + "5 5 10 10 central_heat_pump 10.0\n", + "6 6 0 4 central_resistive_heater 1.0\n", + "7 7 10 10 central_resistive_heater 10.0\n", + "8 8 4 5 central_heat_store_charger 1.0\n", + "9 9 10 10 central_heat_store_charger 10.0\n", + "10 10 0 6 rural_heat_pump 1.0\n", + "11 11 10 10 rural_heat_pump 10.0\n", + "12 12 6 7 rural_heat_store_charger 1.0\n", + "13 13 10 10 rural_heat_store_charger 10.0\n", + "14 14 0 3 BEV charger 1.0\n", + "15 15 10 10 BEV charger 10.0\n", + "16 16 10 10 junk 100.0" + ] + }, + "execution_count": 15, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "links_df = pd.read_csv(\"etrago_test_network_1/links.csv\")\n", + "links_df" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "id": "181d99ef-6b16-44ca-a5c4-4bb18b15e954", + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [ + { + "data": { + "text/plain": [ + "array([[0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,\n", + " 0. , 0. , 0. , 0. ],\n", + " [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5,\n", + " 0.5, 0.5, 0.5, 0.5],\n", + " [1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. ,\n", + " 1. , 1. , 1. , 1. ]])" + ] + }, + "execution_count": 16, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "base = np.ones((3,links_df.shape[0]))\n", + "scale = np.array([0.0,0.5,1.0])\n", + "\n", + "for i in range(scale.shape[0]):\n", + " base[i,:] = scale[i] * base[i,:]\n", + " \n", + "base" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "id": "c36b5043-d0f9-4c84-b86e-fb8aaf1eb180", + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [ + { + "data": { + "text/html": [ + "
    \n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
    012345678910111213141516
    00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.0
    10.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.5
    21.01.01.01.01.01.01.01.01.01.01.01.01.01.01.01.01.0
    \n", + "
    " + ], + "text/plain": [ + " 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 \\\n", + "0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 \n", + "1 0.5 0.5 0.5 0.5 0.5 0.5 0.5 0.5 0.5 0.5 0.5 0.5 0.5 0.5 0.5 \n", + "2 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 \n", + "\n", + " 15 16 \n", + "0 0.0 0.0 \n", + "1 0.5 0.5 \n", + "2 1.0 1.0 " + ] + }, + "execution_count": 17, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "power_df = pd.DataFrame(base, columns=links_df[\"name\"].to_list())\n", + "power_df" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "id": "07268ffa-c649-492b-85df-a5304b2c005b", + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [], + "source": [ + "power_df.to_csv(\"etrago_test_network_1/links-p0.csv\")\n", + "power_df.to_csv(\"etrago_test_network_1/links-p1.csv\")" + ] + }, + { + "cell_type": "markdown", + "id": "c85c0234-20ad-4a2b-b104-f889a289cdfb", + "metadata": { + "pycharm": { + "name": "#%% md\n" + }, + "tags": [] + }, + "source": [ + "# storage_units" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "id": "3f67735e-6081-421e-b9bd-09ca1ecff15c", + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [ + { + "data": { + "text/html": [ + "
    \n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
    namebuscarrierp_nom_optp_nom_extendablemax_hours
    00 battery0battery1.0True10.0
    110 battery10battery10.0True10.0
    210 junk10junk10.0True10.0
    \n", + "
    " + ], + "text/plain": [ + " name bus carrier p_nom_opt p_nom_extendable max_hours\n", + "0 0 battery 0 battery 1.0 True 10.0\n", + "1 10 battery 10 battery 10.0 True 10.0\n", + "2 10 junk 10 junk 10.0 True 10.0" + ] + }, + "execution_count": 19, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "storages_df = pd.read_csv(\"etrago_test_network_1/storage_units.csv\")\n", + "storages_df" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "id": "c181baf9-e79b-44a9-a411-6db3d90bb7e1", + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [ + { + "data": { + "text/plain": [ + "array([[0. , 0. , 0. ],\n", + " [0.5, 0.5, 0.5],\n", + " [1. , 1. , 1. ]])" + ] + }, + "execution_count": 20, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "base = np.ones((3,storages_df.shape[0]))\n", + "scale = np.array([0.0,0.5,1.0])\n", + "\n", + "for i in range(scale.shape[0]):\n", + " base[i,:] = scale[i] * base[i,:]\n", + " \n", + "base" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "id": "6efc3e6a-3cff-432c-bc0a-2e93c59629f6", + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [ + { + "data": { + "text/html": [ + "
    \n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
    0 battery10 battery10 junk
    00.00.00.0
    10.50.50.5
    21.01.01.0
    \n", + "
    " + ], + "text/plain": [ + " 0 battery 10 battery 10 junk\n", + "0 0.0 0.0 0.0\n", + "1 0.5 0.5 0.5\n", + "2 1.0 1.0 1.0" + ] + }, + "execution_count": 21, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "power_df = pd.DataFrame(base, columns=storages_df[\"name\"].to_list())\n", + "power_df" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "id": "e29b6a1c-c986-45a0-92be-527ee3bb0701", + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [], + "source": [ + "power_df.to_csv(\"etrago_test_network_1/storage_units-p.csv\")\n", + "power_df.to_csv(\"etrago_test_network_1/storage_units-q.csv\")" + ] + }, + { + "cell_type": "markdown", + "id": "bc1eeab7-a906-4a1f-8629-9b05df81435c", + "metadata": { + "pycharm": { + "name": "#%% md\n" + }, + "tags": [] + }, + "source": [ + "# test" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "id": "9be7b621-1931-46d6-84f0-6bf777a8d722", + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [ + { + "data": { + "text/html": [ + "
    \n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
    0 biomass10 biomass0 central_biomass_CHP10 central_biomass_CHP0 run_of_river10 run_of_river0 gas10 gas0 other_non_renewable10 other_non_renewable...0 wind_onshore_00 wind_onshore_110 wind_onshore_010 wind_onshore_14 solar_thermal_collector10 solar_thermal_collector4 geo_thermal10 geo_thermal0 junk10 junk
    00.00.00.00.00.00.00.00.00.00.0...0.00.00.00.00.00.00.00.00.00.0
    10.50.50.50.50.50.50.50.50.50.5...0.50.50.50.50.50.50.50.50.50.5
    21.01.01.01.01.01.01.01.01.01.0...1.01.01.01.01.01.01.01.01.01.0
    \n", + "

    3 rows × 30 columns

    \n", + "
    " + ], + "text/plain": [ + " 0 biomass 10 biomass 0 central_biomass_CHP 10 central_biomass_CHP \\\n", + "0 0.0 0.0 0.0 0.0 \n", + "1 0.5 0.5 0.5 0.5 \n", + "2 1.0 1.0 1.0 1.0 \n", + "\n", + " 0 run_of_river 10 run_of_river 0 gas 10 gas 0 other_non_renewable \\\n", + "0 0.0 0.0 0.0 0.0 0.0 \n", + "1 0.5 0.5 0.5 0.5 0.5 \n", + "2 1.0 1.0 1.0 1.0 1.0 \n", + "\n", + " 10 other_non_renewable ... 0 wind_onshore_0 0 wind_onshore_1 \\\n", + "0 0.0 ... 0.0 0.0 \n", + "1 0.5 ... 0.5 0.5 \n", + "2 1.0 ... 1.0 1.0 \n", + "\n", + " 10 wind_onshore_0 10 wind_onshore_1 4 solar_thermal_collector \\\n", + "0 0.0 0.0 0.0 \n", + "1 0.5 0.5 0.5 \n", + "2 1.0 1.0 1.0 \n", + "\n", + " 10 solar_thermal_collector 4 geo_thermal 10 geo_thermal 0 junk 10 junk \n", + "0 0.0 0.0 0.0 0.0 0.0 \n", + "1 0.5 0.5 0.5 0.5 0.5 \n", + "2 1.0 1.0 1.0 1.0 1.0 \n", + "\n", + "[3 rows x 30 columns]" + ] + }, + "execution_count": 23, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "test_df = pd.read_csv(\"etrago_test_network_1/generators-p.csv\", index_col=0)\n", + "test_df" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.10" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/tests/data/etrago_test_network_1/buses.csv b/tests/data/etrago_test_network_1/buses.csv new file mode 100644 index 00000000..7df91445 --- /dev/null +++ b/tests/data/etrago_test_network_1/buses.csv @@ -0,0 +1,10 @@ +name,carrier +0,AC +1,CH4 +2,dsm +3,Li ion +4,central_heat +5,central_heat_store +6,rural_heat +7,rural_heat_store +10,junk diff --git a/tests/data/etrago_test_network_1/generators-p.csv b/tests/data/etrago_test_network_1/generators-p.csv new file mode 100644 index 00000000..7dd05d57 --- /dev/null +++ b/tests/data/etrago_test_network_1/generators-p.csv @@ -0,0 +1,4 @@ +,0 biomass,10 biomass,0 central_biomass_CHP,10 central_biomass_CHP,0 run_of_river,10 run_of_river,0 gas,10 gas,0 other_non_renewable,10 other_non_renewable,0 reservoir,10 reservoir,0 solar_0,0 solar_1,10 solar_0,10 solar_1,0 solar_rooftop_0,0 solar_rooftop_1,10 solar_rooftop_0,10 solar_rooftop_1,0 wind_onshore_0,0 wind_onshore_1,10 wind_onshore_0,10 wind_onshore_1,4 solar_thermal_collector,10 solar_thermal_collector,4 geo_thermal,10 geo_thermal,0 junk,10 junk +0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0 +1,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5 +2,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0 diff --git a/tests/data/etrago_test_network_1/generators-p_max_pu.csv b/tests/data/etrago_test_network_1/generators-p_max_pu.csv new file mode 100644 index 00000000..b5ffc47e --- /dev/null +++ b/tests/data/etrago_test_network_1/generators-p_max_pu.csv @@ -0,0 +1,4 @@ +,0 biomass,10 biomass,0 central_biomass_CHP,10 central_biomass_CHP,0 run_of_river,10 run_of_river,0 gas,10 gas,0 other_non_renewable,10 other_non_renewable,0 reservoir,10 reservoir,0 solar_0,0 solar_1,10 solar_0,10 solar_1,0 solar_rooftop_0,0 solar_rooftop_1,10 solar_rooftop_0,10 solar_rooftop_1,0 wind_onshore_0,0 wind_onshore_1,10 wind_onshore_0,10 wind_onshore_1,4 solar_thermal_collector,10 solar_thermal_collector,4 geo_thermal,10 geo_thermal,0 junk,10 junk +0,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5 +1,0.75,0.75,0.75,0.75,0.75,0.75,0.75,0.75,0.75,0.75,0.75,0.75,0.75,0.75,0.75,0.75,0.75,0.75,0.75,0.75,0.75,0.75,0.75,0.75,0.75,0.75,0.75,0.75,0.75,0.75 +2,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0 diff --git a/tests/data/etrago_test_network_1/generators-p_min_pu.csv b/tests/data/etrago_test_network_1/generators-p_min_pu.csv new file mode 100644 index 00000000..7dd05d57 --- /dev/null +++ b/tests/data/etrago_test_network_1/generators-p_min_pu.csv @@ -0,0 +1,4 @@ +,0 biomass,10 biomass,0 central_biomass_CHP,10 central_biomass_CHP,0 run_of_river,10 run_of_river,0 gas,10 gas,0 other_non_renewable,10 other_non_renewable,0 reservoir,10 reservoir,0 solar_0,0 solar_1,10 solar_0,10 solar_1,0 solar_rooftop_0,0 solar_rooftop_1,10 solar_rooftop_0,10 solar_rooftop_1,0 wind_onshore_0,0 wind_onshore_1,10 wind_onshore_0,10 wind_onshore_1,4 solar_thermal_collector,10 solar_thermal_collector,4 geo_thermal,10 geo_thermal,0 junk,10 junk +0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0 +1,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5 +2,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0 diff --git a/tests/data/etrago_test_network_1/generators-q.csv b/tests/data/etrago_test_network_1/generators-q.csv new file mode 100644 index 00000000..223fb84f --- /dev/null +++ b/tests/data/etrago_test_network_1/generators-q.csv @@ -0,0 +1,4 @@ +,0 biomass,10 biomass,0 central_biomass_CHP,10 central_biomass_CHP,0 run_of_river,10 run_of_river,0 gas,10 gas,0 other_non_renewable,10 other_non_renewable,0 reservoir,10 reservoir,0 solar_0,0 solar_1,10 solar_0,10 solar_1,0 solar_rooftop_0,0 solar_rooftop_1,10 solar_rooftop_0,10 solar_rooftop_1,0 wind_onshore_0,0 wind_onshore_1,10 wind_onshore_0,10 wind_onshore_1,4 solar_thermal_collector,10 solar_thermal_collector,4 geo_thermal,10 geo_thermal,0 junk,10 junk +0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0 +1,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5 +2,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0 diff --git a/tests/data/etrago_test_network_1/generators.csv b/tests/data/etrago_test_network_1/generators.csv new file mode 100644 index 00000000..2e943126 --- /dev/null +++ b/tests/data/etrago_test_network_1/generators.csv @@ -0,0 +1,31 @@ +name,bus,carrier,p_nom,p_nom_opt +0 biomass,0,biomass,1.0,1.0 +10 biomass,10,biomass,10.0,10.0 +0 central_biomass_CHP,0,central_biomass_CHP,1.0,1.0 +10 central_biomass_CHP,10,central_biomass_CHP,10.0,10.0 +0 run_of_river,0,run_of_river,1.0,1.0 +10 run_of_river,10,run_of_river,10.0,10.0 +0 gas,0,gas,1.0,1.0 +10 gas,10,gas,10.0,10.0 +0 other_non_renewable,0,other_non_renewable,1.0,1.0 +10 other_non_renewable,10,other_non_renewable,10.0,10.0 +0 reservoir,0,reservoir,1.0,1.0 +10 reservoir,10,reservoir,10.0,10.0 +0 solar_0,0,solar,1.0,1.0 +0 solar_1,0,solar,10.0,10.0 +10 solar_0,10,solar,1.0,1.0 +10 solar_1,10,solar,10.0,10.0 +0 solar_rooftop_0,0,solar_rooftop,1.0,1.0 +0 solar_rooftop_1,0,solar_rooftop,10.0,10.0 +10 solar_rooftop_0,10,solar_rooftop,1.0,1.0 +10 solar_rooftop_1,10,solar_rooftop,10.0,10.0 +0 wind_onshore_0,0,wind_onshore,1.0,1.0 +0 wind_onshore_1,0,wind_onshore,10.0,10.0 +10 wind_onshore_0,10,wind_onshore,1.0,1.0 +10 wind_onshore_1,10,wind_onshore,10.0,10.0 +4 solar_thermal_collector,4,solar_thermal_collector,1.0,1.0 +10 solar_thermal_collector,10,solar_thermal_collector,10.0,10.0 +4 geo_thermal,4,geo_thermal,1.0,1.0 +10 geo_thermal,10,geo_thermal,10.0,10.0 +0 junk,0,junk,100.0,100.0 +10 junk,10,junk,100.0,100.0 diff --git a/tests/data/etrago_test_network_1/links-p0.csv b/tests/data/etrago_test_network_1/links-p0.csv new file mode 100644 index 00000000..b2de47da --- /dev/null +++ b/tests/data/etrago_test_network_1/links-p0.csv @@ -0,0 +1,4 @@ +,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16 +0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0 +1,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5 +2,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0 diff --git a/tests/data/etrago_test_network_1/links-p1.csv b/tests/data/etrago_test_network_1/links-p1.csv new file mode 100644 index 00000000..b2de47da --- /dev/null +++ b/tests/data/etrago_test_network_1/links-p1.csv @@ -0,0 +1,4 @@ +,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16 +0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0 +1,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5 +2,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0 diff --git a/tests/data/etrago_test_network_1/links.csv b/tests/data/etrago_test_network_1/links.csv new file mode 100644 index 00000000..8e835b47 --- /dev/null +++ b/tests/data/etrago_test_network_1/links.csv @@ -0,0 +1,18 @@ +name,bus0,bus1,carrier,p_nom,efficiency +0,1,0,central_gas_CHP,1.0,1.0 +1,10,10,central_gas_CHP,10.0,1.0 +2,0,3,dsm,1.0,1.0 +3,10,10,dsm,10.0,1.0 +4,0,4,central_heat_pump,1.0,1.0 +5,10,10,central_heat_pump,10.0,1.0 +6,0,4,central_resistive_heater,1.0,1.0 +7,10,10,central_resistive_heater,10.0,1.0 +8,4,5,central_heat_store_charger,1.0,0.84 +9,10,10,central_heat_store_charger,10.0,0.84 +10,0,6,rural_heat_pump,1.0,1.0 +11,10,10,rural_heat_pump,10.0,1.0 +12,6,7,rural_heat_store_charger,1.0,0.8 +13,10,10,rural_heat_store_charger,10.0,0.8 +14,0,3,BEV_charger,1.0,1.0 +15,10,10,BEV_charger,10.0,1.0 +16,10,10,junk,100.0,1.0 diff --git a/tests/data/etrago_test_network_1/snapshots.csv b/tests/data/etrago_test_network_1/snapshots.csv new file mode 100644 index 00000000..583437ce --- /dev/null +++ b/tests/data/etrago_test_network_1/snapshots.csv @@ -0,0 +1,4 @@ +,snapshot +0,2011-01-01 00:00:00 +1,2011-01-01 12:00:00 +2,2011-01-02 00:00:00 diff --git a/tests/data/etrago_test_network_1/storage_units-p.csv b/tests/data/etrago_test_network_1/storage_units-p.csv new file mode 100644 index 00000000..506a427a --- /dev/null +++ b/tests/data/etrago_test_network_1/storage_units-p.csv @@ -0,0 +1,4 @@ +,0 battery,10 battery,10 junk +0,0.0,0.0,0.0 +1,0.5,0.5,0.5 +2,1.0,1.0,1.0 diff --git a/tests/data/etrago_test_network_1/storage_units-q.csv b/tests/data/etrago_test_network_1/storage_units-q.csv new file mode 100644 index 00000000..506a427a --- /dev/null +++ b/tests/data/etrago_test_network_1/storage_units-q.csv @@ -0,0 +1,4 @@ +,0 battery,10 battery,10 junk +0,0.0,0.0,0.0 +1,0.5,0.5,0.5 +2,1.0,1.0,1.0 diff --git a/tests/data/etrago_test_network_1/storage_units-state_of_charge.csv b/tests/data/etrago_test_network_1/storage_units-state_of_charge.csv new file mode 100644 index 00000000..506a427a --- /dev/null +++ b/tests/data/etrago_test_network_1/storage_units-state_of_charge.csv @@ -0,0 +1,4 @@ +,0 battery,10 battery,10 junk +0,0.0,0.0,0.0 +1,0.5,0.5,0.5 +2,1.0,1.0,1.0 diff --git a/tests/data/etrago_test_network_1/storage_units.csv b/tests/data/etrago_test_network_1/storage_units.csv new file mode 100644 index 00000000..f0c5c016 --- /dev/null +++ b/tests/data/etrago_test_network_1/storage_units.csv @@ -0,0 +1,4 @@ +name,bus,carrier,p_nom_opt,p_nom_extendable,max_hours +0 battery,0,battery,1.0,True,10.0 +10 battery,10,battery,10.0,True,10.0 +10 junk,10,junk,10.0,True,10.0 diff --git a/tests/data/etrago_test_network_1/stores-e.csv b/tests/data/etrago_test_network_1/stores-e.csv new file mode 100644 index 00000000..2fac2f91 --- /dev/null +++ b/tests/data/etrago_test_network_1/stores-e.csv @@ -0,0 +1,4 @@ +,5 central_heat_store,10 central_heat_store,7 rural_heat_store,10 rural_heat_store +0,0.0,0.0,0.0,0.0 +1,0.5,0.5,0.5,0.5 +2,1.0,1.0,1.0,1.0 diff --git a/tests/data/etrago_test_network_1/stores.csv b/tests/data/etrago_test_network_1/stores.csv new file mode 100644 index 00000000..6511fbff --- /dev/null +++ b/tests/data/etrago_test_network_1/stores.csv @@ -0,0 +1,5 @@ +name,bus,carrier,e_nom_opt +5 central_heat_store,5,central_heat_store,1 +10 central_heat_store,10,central_heat_store,10 +7 rural_heat_store,7,rural_heat_store,1 +10 rural_heat_store,10,rural_heat_store,10 diff --git a/tests/data/interface_results_reference_data/dispatchable_generators_active_power.csv b/tests/data/interface_results_reference_data/dispatchable_generators_active_power.csv new file mode 100644 index 00000000..45530ad1 --- /dev/null +++ b/tests/data/interface_results_reference_data/dispatchable_generators_active_power.csv @@ -0,0 +1,4 @@ +snapshot,biomass,biomass_CHP,run_of_river,gas,other_non_renewable,junk +2011-01-01 00:00:00,0.0,0.0,0.0,0.0,0.0,0.0 +2011-01-01 12:00:00,0.5,0.5,0.5,0.5,0.5,0.005 +2011-01-02 00:00:00,1.0,1.0,1.0,1.0,1.0,0.01 diff --git a/tests/data/interface_results_reference_data/dispatchable_generators_reactive_power.csv b/tests/data/interface_results_reference_data/dispatchable_generators_reactive_power.csv new file mode 100644 index 00000000..fc3eef2c --- /dev/null +++ b/tests/data/interface_results_reference_data/dispatchable_generators_reactive_power.csv @@ -0,0 +1,4 @@ +snapshot,biomass,biomass_CHP,run_of_river,gas,other_non_renewable,junk +2011-01-01 00:00:00,0.0,0.0,0.0,0.0,0.0,0.0 +2011-01-01 12:00:00,0.5,0.5,0.5,0.5,0.5,0.005 +2011-01-02 00:00:00,-1.0,-1.0,-1.0,-1.0,-1.0,-0.01 diff --git a/tests/data/interface_results_reference_data/dsm_active_power.csv b/tests/data/interface_results_reference_data/dsm_active_power.csv new file mode 100644 index 00000000..9a217ea7 --- /dev/null +++ b/tests/data/interface_results_reference_data/dsm_active_power.csv @@ -0,0 +1,4 @@ +snapshot,dsm +2011-01-01 00:00:00,0.0 +2011-01-01 12:00:00,0.5 +2011-01-02 00:00:00,1.0 diff --git a/tests/data/interface_results_reference_data/dsm_reactive_power.csv b/tests/data/interface_results_reference_data/dsm_reactive_power.csv new file mode 100644 index 00000000..f90539eb --- /dev/null +++ b/tests/data/interface_results_reference_data/dsm_reactive_power.csv @@ -0,0 +1,4 @@ +snapshot,dsm +2011-01-01 00:00:00,0.0 +2011-01-01 12:00:00,0.0 +2011-01-02 00:00:00,0.0 diff --git a/tests/data/interface_results_reference_data/electromobility_active_power.csv b/tests/data/interface_results_reference_data/electromobility_active_power.csv new file mode 100644 index 00000000..4e95b784 --- /dev/null +++ b/tests/data/interface_results_reference_data/electromobility_active_power.csv @@ -0,0 +1,4 @@ +snapshot,BEV charger +2011-01-01 00:00:00,0.0 +2011-01-01 12:00:00,0.5 +2011-01-02 00:00:00,1.0 diff --git a/tests/data/interface_results_reference_data/electromobility_reactive_power.csv b/tests/data/interface_results_reference_data/electromobility_reactive_power.csv new file mode 100644 index 00000000..cd21048c --- /dev/null +++ b/tests/data/interface_results_reference_data/electromobility_reactive_power.csv @@ -0,0 +1,4 @@ +snapshot,BEV charger +2011-01-01 00:00:00,0.0 +2011-01-01 12:00:00,0.0 +2011-01-02 00:00:00,0.0 diff --git a/tests/data/interface_results_reference_data/feedin_district_heating.csv b/tests/data/interface_results_reference_data/feedin_district_heating.csv new file mode 100644 index 00000000..49cb0fbf --- /dev/null +++ b/tests/data/interface_results_reference_data/feedin_district_heating.csv @@ -0,0 +1,4 @@ +snapshot,4 +2011-01-01 00:00:00,0.0 +2011-01-01 12:00:00,1.0 +2011-01-02 00:00:00,2.0 diff --git a/tests/data/interface_results_reference_data/heat_pump_central_active_power.csv b/tests/data/interface_results_reference_data/heat_pump_central_active_power.csv new file mode 100644 index 00000000..afdd9dd7 --- /dev/null +++ b/tests/data/interface_results_reference_data/heat_pump_central_active_power.csv @@ -0,0 +1,4 @@ +snapshot,central_heat_pump +2011-01-01 00:00:00,0.0 +2011-01-01 12:00:00,1.0 +2011-01-02 00:00:00,2.0 diff --git a/tests/data/interface_results_reference_data/heat_pump_central_reactive_power.csv b/tests/data/interface_results_reference_data/heat_pump_central_reactive_power.csv new file mode 100644 index 00000000..25ef1425 --- /dev/null +++ b/tests/data/interface_results_reference_data/heat_pump_central_reactive_power.csv @@ -0,0 +1,4 @@ +snapshot,central_heat_pump +2011-01-01 00:00:00,0.0 +2011-01-01 12:00:00,0.0 +2011-01-02 00:00:00,0.0 diff --git a/tests/data/interface_results_reference_data/heat_pump_rural_active_power.csv b/tests/data/interface_results_reference_data/heat_pump_rural_active_power.csv new file mode 100644 index 00000000..13e33170 --- /dev/null +++ b/tests/data/interface_results_reference_data/heat_pump_rural_active_power.csv @@ -0,0 +1,4 @@ +snapshot,rural_heat_pump +2011-01-01 00:00:00,0.0 +2011-01-01 12:00:00,0.5 +2011-01-02 00:00:00,1.0 diff --git a/tests/data/interface_results_reference_data/heat_pump_rural_reactive_power.csv b/tests/data/interface_results_reference_data/heat_pump_rural_reactive_power.csv new file mode 100644 index 00000000..b7845c2e --- /dev/null +++ b/tests/data/interface_results_reference_data/heat_pump_rural_reactive_power.csv @@ -0,0 +1,4 @@ +snapshot,rural_heat_pump +2011-01-01 00:00:00,0.0 +2011-01-01 12:00:00,0.0 +2011-01-02 00:00:00,0.0 diff --git a/tests/data/interface_results_reference_data/renewables_curtailment.csv b/tests/data/interface_results_reference_data/renewables_curtailment.csv new file mode 100644 index 00000000..85a8dab8 --- /dev/null +++ b/tests/data/interface_results_reference_data/renewables_curtailment.csv @@ -0,0 +1,4 @@ +snapshot,solar,wind +2011-01-01 00:00:00,11.0,5.5 +2011-01-01 12:00:00,14.5,7.25 +2011-01-02 00:00:00,18.0,9.0 diff --git a/tests/data/interface_results_reference_data/renewables_dispatch_reactive_power.csv b/tests/data/interface_results_reference_data/renewables_dispatch_reactive_power.csv new file mode 100644 index 00000000..36db45a2 --- /dev/null +++ b/tests/data/interface_results_reference_data/renewables_dispatch_reactive_power.csv @@ -0,0 +1,4 @@ +snapshot,solar,wind +2011-01-01 00:00:00,0.0,0.0 +2011-01-01 12:00:00,0.09091,0.09091 +2011-01-02 00:00:00,-0.18182,-0.18182 diff --git a/tests/data/interface_results_reference_data/renewables_dispatch_reactive_power_max_cosphi.csv b/tests/data/interface_results_reference_data/renewables_dispatch_reactive_power_max_cosphi.csv new file mode 100644 index 00000000..b004a539 --- /dev/null +++ b/tests/data/interface_results_reference_data/renewables_dispatch_reactive_power_max_cosphi.csv @@ -0,0 +1,4 @@ +snapshot,solar,wind +2011-01-01 00:00:00,0.0,0.0 +2011-01-01 12:00:00,0.04403,0.04403 +2011-01-02 00:00:00,-0.08806,-0.08806 diff --git a/tests/data/interface_results_reference_data/renewables_p_nom.csv b/tests/data/interface_results_reference_data/renewables_p_nom.csv new file mode 100644 index 00000000..242760ec --- /dev/null +++ b/tests/data/interface_results_reference_data/renewables_p_nom.csv @@ -0,0 +1,3 @@ +carrier,p_nom +solar,22.0 +wind,11.0 diff --git a/tests/data/interface_results_reference_data/renewables_potential.csv b/tests/data/interface_results_reference_data/renewables_potential.csv new file mode 100644 index 00000000..1bc313f4 --- /dev/null +++ b/tests/data/interface_results_reference_data/renewables_potential.csv @@ -0,0 +1,4 @@ +snapshot,solar,wind +2011-01-01 00:00:00,0.5,0.5 +2011-01-01 12:00:00,0.75,0.75 +2011-01-02 00:00:00,1.0,1.0 diff --git a/tests/data/interface_results_reference_data/storage_units_active_power.csv b/tests/data/interface_results_reference_data/storage_units_active_power.csv new file mode 100644 index 00000000..afc27bff --- /dev/null +++ b/tests/data/interface_results_reference_data/storage_units_active_power.csv @@ -0,0 +1,4 @@ +snapshot,battery +2011-01-01 00:00:00,0.0 +2011-01-01 12:00:00,0.5 +2011-01-02 00:00:00,1.0 diff --git a/tests/data/interface_results_reference_data/storage_units_reactive_power.csv b/tests/data/interface_results_reference_data/storage_units_reactive_power.csv new file mode 100644 index 00000000..afc27bff --- /dev/null +++ b/tests/data/interface_results_reference_data/storage_units_reactive_power.csv @@ -0,0 +1,4 @@ +snapshot,battery +2011-01-01 00:00:00,0.0 +2011-01-01 12:00:00,0.5 +2011-01-02 00:00:00,1.0 diff --git a/tests/data/interface_results_reference_data/storage_units_soc.csv b/tests/data/interface_results_reference_data/storage_units_soc.csv new file mode 100644 index 00000000..e9f7ec39 --- /dev/null +++ b/tests/data/interface_results_reference_data/storage_units_soc.csv @@ -0,0 +1,4 @@ +snapshot,battery +2011-01-01 00:00:00,0.0 +2011-01-01 12:00:00,0.05 +2011-01-02 00:00:00,0.1 diff --git a/tests/data/interface_results_reference_data/thermal_storage_central_soc.csv b/tests/data/interface_results_reference_data/thermal_storage_central_soc.csv new file mode 100644 index 00000000..2e622412 --- /dev/null +++ b/tests/data/interface_results_reference_data/thermal_storage_central_soc.csv @@ -0,0 +1,4 @@ +snapshot,4 +2011-01-01 00:00:00,0.0 +2011-01-01 12:00:00,0.5 +2011-01-02 00:00:00,1.0 diff --git a/tests/data/interface_results_reference_data/thermal_storage_rural_soc.csv b/tests/data/interface_results_reference_data/thermal_storage_rural_soc.csv new file mode 100644 index 00000000..4827dda7 --- /dev/null +++ b/tests/data/interface_results_reference_data/thermal_storage_rural_soc.csv @@ -0,0 +1,4 @@ +snapshot, +2011-01-01 00:00:00,0.0 +2011-01-01 12:00:00,0.5 +2011-01-02 00:00:00,1.0 diff --git a/tests/tools/test_interface.py b/tests/tools/test_interface.py new file mode 100644 index 00000000..29f676cd --- /dev/null +++ b/tests/tools/test_interface.py @@ -0,0 +1,180 @@ +import logging +import os +import random + +import pandas as pd +import pytest + +from pypsa import Network as PyPSANetwork + +from ego.tools.interface import ETraGoMinimalData, get_etrago_results_per_bus + +logger = logging.getLogger(__name__) + +random.seed(42) + + +class TestSpecs: + @classmethod + def setup_class(cls): + cls.etrago_network = PyPSANetwork(pytest.etrago_test_network_1_path) + + def test_class_etrago_minimal_data(self): + etrago_network = ETraGoMinimalData(self.etrago_network) + assert "p_min_pu" not in etrago_network.generators_t + + def test_get_etrago_results_per_bus(self): + + bus_id = 0 + etrago_network = ETraGoMinimalData(self.etrago_network) + pf_post_lopf = True + max_cos_phi_renewable = False + + etrago_results_per_bus = get_etrago_results_per_bus( + bus_id, + etrago_network, + pf_post_lopf, + max_cos_phi_renewable, + ) + + for key, value in etrago_results_per_bus.items(): + logger.info(f"Check Result: {key}") + if key == "timeindex": + assert type(value) is pd.DatetimeIndex + pd.testing.assert_index_equal( + value, + pd.DatetimeIndex( + data=[ + "2011-01-01 00:00:00", + "2011-01-01 12:00:00", + "2011-01-02 00:00:00", + ], + name="snapshot", + ), + ) + elif key == "storage_units_p_nom": + assert value == 1.0 + elif key == "storage_units_max_hours": + assert value == 10.0 + elif key == "thermal_storage_central_capacity": + pd.testing.assert_series_equal( + value, pd.Series(index=["4"], data=[1.0]), check_names=False + ) + elif key == "thermal_storage_rural_capacity": + assert value == 1.0 + elif key == "heat_pump_rural_p_nom": + assert value == 1.0 + elif key == "heat_pump_central_p_nom": + assert value == 2.0 + elif key == "thermal_storage_rural_efficiency": + assert value == 0.8 + elif key == "thermal_storage_central_efficiency": + assert value == 0.84 + else: + path_reference_df = os.path.join( + pytest.interface_results_reference_data_path, f"{key}.csv" + ) + if isinstance(value, pd.DataFrame): + reference_df = pd.read_csv( + path_reference_df, index_col=0, parse_dates=True + ) + pd.testing.assert_frame_equal( + value, reference_df, check_index_type=False, check_names=False + ) + else: + reference_s = pd.read_csv( + path_reference_df, index_col=0, parse_dates=True + ).iloc[:, 0] + pd.testing.assert_series_equal( + value, reference_s, check_index_type=False, check_names=False + ) + + def test_get_etrago_results_per_bus_empty(self): + + bus_id = 11 + etrago_network = ETraGoMinimalData(self.etrago_network) + pf_post_lopf = True + max_cos_phi_renewable = False + + etrago_results_per_bus = get_etrago_results_per_bus( + bus_id, + etrago_network, + pf_post_lopf, + max_cos_phi_renewable, + ) + + float_results = [ + "storage_units_p_nom", + "storage_units_max_hours", + "heat_pump_rural_p_nom", + "heat_pump_central_p_nom", + "thermal_storage_rural_capacity", + "thermal_storage_rural_efficiency", + "thermal_storage_central_efficiency", + ] + series_results = [ + "renewables_p_nom", + "storage_units_active_power", + "storage_units_reactive_power", + "storage_units_soc", + "dsm_active_power", + "heat_pump_rural_active_power", + "heat_pump_rural_reactive_power", + "thermal_storage_rural_soc", + "heat_central_active_power", + "heat_central_reactive_power", + "thermal_storage_central_capacity", + "electromobility_active_power", + "electromobility_reactive_power", + ] + dataframes_results = [ + "dispatchable_generators_active_power", + "dispatchable_generators_reactive_power", + "renewables_potential", + "renewables_curtailment", + "renewables_dispatch_reactive_power", + "thermal_storage_central_soc", + "feedin_district_heating", + ] # + + for key, value in etrago_results_per_bus.items(): + if key in float_results: + if value == 0.0: + float_results.remove(key) + elif key in series_results: + if value.empty: + series_results.remove(key) + elif key in dataframes_results: + if len(value.columns) == 0: + dataframes_results.remove(key) + + assert len(float_results) == 0 + + def test_get_etrago_results_per_bus_with_set_max_cosphi(self): + + bus_id = 0 + etrago_network = ETraGoMinimalData(self.etrago_network) + pf_post_lopf = True + max_cos_phi_renewable = 0.9 + + etrago_results_per_bus = get_etrago_results_per_bus( + bus_id, + etrago_network, + pf_post_lopf, + max_cos_phi_renewable, + ) + renewables_dispatch_reactive_power = etrago_results_per_bus[ + "renewables_dispatch_reactive_power" + ] + path_reference_df = os.path.join( + pytest.interface_results_reference_data_path, + "renewables_dispatch_reactive_power_max_cosphi.csv", + ) + reference_df = pd.read_csv(path_reference_df, index_col=0, parse_dates=True) + pd.testing.assert_frame_equal( + renewables_dispatch_reactive_power, + reference_df, + check_index_type=False, + check_names=False, + atol=1e-4, + )