Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Feature/time series load update and sgen update #184

Open
wants to merge 4 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
305 changes: 299 additions & 6 deletions src/power_grid_model_io/converters/pandapower_converter.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,12 +46,19 @@ def __init__(self, system_frequency: float = 50.0, trafo_loading: str = "current
self.pgm_input_data: SingleDataset = {}
self.pp_output_data: PandaPowerData = {}
self.pgm_output_data: SingleDataset = {}
self.pp_update_data: PandaPowerData = {}
self.pgm_update_data: SingleDataset = {}
self.pgm_nodes_lookup: pd.DataFrame = pd.DataFrame()
self.idx: Dict[Tuple[str, Optional[str]], pd.Series] = {}
self.idx_lookup: Dict[Tuple[str, Optional[str]], pd.Series] = {}
self.next_idx = 0

def _parse_data(self, data: PandaPowerData, data_type: str, extra_info: Optional[ExtraInfo] = None) -> Dataset:
def _parse_data(
self,
data: PandaPowerData,
data_type: str,
extra_info: Optional[ExtraInfo] = None,
) -> Dataset:
"""
Set up for conversion from PandaPower to power-grid-model

Expand All @@ -65,18 +72,21 @@ def _parse_data(self, data: PandaPowerData, data_type: str, extra_info: Optional
Returns:
Converted power-grid-model data
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

right now, it always returns self.pgm_input_data. I think it should return self.pgm_update_data when the data_type == "update", right?

"""

# Clear pgm data
self.pgm_input_data = {}
self.idx_lookup = {}
self.next_idx = 0

# Set pandas data
self.pp_input_data = data
self.pgm_update_data = {}
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nitpick but please keep similar fields close together for readability, e.g.:

        self.pgm_input_data = {}
        self.pgm_update_data = {}
        self.idx_lookup = {}
        self.next_idx = 0


# Convert
if data_type == "input":
# Set pandas data
self.pp_input_data = data
self._create_input_data()
elif data_type == "update":
self.pp_update_data = data
self._update_input_data()
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

since this function is called _parse_data, this may not be the right place to actually set the data. could be out of scope though

Alternatively, maybe it's a good moment to split the (minor) chunks of differences between input_data, output_data and update_data into separate classes

else:
raise ValueError(f"Data type: '{data_type}' is not implemented")

Expand Down Expand Up @@ -115,7 +125,7 @@ def _serialize_data(self, data: Dataset, extra_info: Optional[ExtraInfo]) -> Pan
def pgm_output_dtype_checker(check_type: str) -> bool:
return all(
(
comp_array.dtype == power_grid_meta_data[check_type][component]
comp_array.dtype == power_grid_meta_data[check_type][component]["dtype"]
for component, comp_array in self.pgm_output_data.items()
)
)
Expand Down Expand Up @@ -247,7 +257,7 @@ def _extra_info_to_pgm_input_data(self, extra_info: ExtraInfo): # pylint: disab
nan = np.iinfo(dtype).min
all_other_cols = ["i_n"]
for component, data in self.pgm_output_data.items():
input_cols = power_grid_meta_data["input"][component].dtype.names
input_cols = power_grid_meta_data["input"][component]["dtype"].names
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

was this a bug? or a deprecated feature of structured arrays? the original one feels more correct but i could be wrong

node_cols = [col for col in input_cols if NODE_REF_RE.fullmatch(col)]
other_cols = [col for col in input_cols if col in all_other_cols]
if not node_cols + other_cols:
Expand Down Expand Up @@ -332,6 +342,10 @@ def _create_output_data_3ph(self):
self._pp_asym_gens_output_3ph()
self._pp_asym_loads_output_3ph()

def _update_input_data(self):
self._pp_update_loads()
self._pp_update_sgens()

def _create_pgm_input_nodes(self):
"""
This function converts a Bus Dataframe of PandaPower to a power-grid-model Node input array.
Expand Down Expand Up @@ -2045,6 +2059,273 @@ def _pp_asym_gens_output_3ph(self):
assert "res_asymmetric_sgen_3ph" not in self.pp_output_data
self.pp_output_data["res_asymmetric_sgen_3ph"] = pp_output_asym_gens_3ph

# pylint: disable-msg=too-many-locals
def _pp_update_loads(self): # pragma: no cover
pp_upd_data = self.pp_update_data["controller"]["object"]

scaling = self._get_pp_attr("load", "scaling", 1.0)
all_load_ids = self.pp_update_data["load"].index.values
const_i_multiplier = self._get_pp_attr("load", "const_i_percent", 0) * scaling * (1e-2 * 1e6)
const_z_multiplier = self._get_pp_attr("load", "const_z_percent", 0) * scaling * (1e-2 * 1e6)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
const_i_multiplier = self._get_pp_attr("load", "const_i_percent", 0) * scaling * (1e-2 * 1e6)
const_z_multiplier = self._get_pp_attr("load", "const_z_percent", 0) * scaling * (1e-2 * 1e6)
const_i_multiplier = self._get_pp_attr("load", "const_i_percent", 0) * scaling * 1e4
const_z_multiplier = self._get_pp_attr("load", "const_z_percent", 0) * scaling * 1e4

or split the coefficients, e.g. (but please clean up a bit by reordening):

Suggested change
const_i_multiplier = self._get_pp_attr("load", "const_i_percent", 0) * scaling * (1e-2 * 1e6)
const_z_multiplier = self._get_pp_attr("load", "const_z_percent", 0) * scaling * (1e-2 * 1e6)
percent = 1e-2
unit_magnitude = 1e6
scaling *= unit_magnitude
const_i_multiplier = self._get_pp_attr("load", "const_i_percent", 0) * percent * scaling
const_z_multiplier = self._get_pp_attr("load", "const_z_percent", 0) * percent * scaling

const_p_multiplier = (1e6 - const_i_multiplier - const_z_multiplier) * scaling

load_controller_ids = []
pp_load_ids = set()
# Loop over all controllers
for count, control in enumerate(pp_upd_data):
# If the element of a controller is a load, we save the controller id and load id
if control.element == "load":
load_controller_ids.append(count)
pp_load_ids.add(pp_upd_data[count].element_index[0])
# If there are no controllers for loads, we stop here
if len(load_controller_ids) < 1:
return # Whether to crash or not

# Convert it to a list, debugger was complaining
pp_load_ids = list(pp_load_ids)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

why does it need to be a set in the first place? why not start with a list? or, if it should be a set, there's probably a problem somewhere down the line 😄

For instance, i don't know if _get_timeseries_load_ids wants a list as input. If so, either start with a list or convert to list there.

If you really need the uniqueness feature of set, as well as use the list output in multiple places, then please update the comment instead


# Every constcontroller uses the same df, so we take the df of the first constcontroller?
data = pp_upd_data[load_controller_ids[0]].data_source.df

# Time steps are Dataframe indexes
time_steps = len(data)

# Profiles are Dataframe columns
profiles = len(pp_load_ids)

pgm_load_profile = initialize_array("update", "sym_load", (time_steps, profiles * 3))

pgm_load_profile["id"] = self._get_timeseries_load_ids(pp_load_ids)

# Loop through controller IDs which are responsible for loads
for controller_id in load_controller_ids:

load_id_const_power = self._get_pgm_ids(
"load", np.array(pp_upd_data[controller_id].element_index), name="const_power"
).iloc[0]

load_id_const_impedance = self._get_pgm_ids(
"load", np.array(pp_upd_data[controller_id].element_index), name="const_impedance"
).iloc[0]

load_id_const_current = self._get_pgm_ids(
"load", np.array(pp_upd_data[controller_id].element_index), name="const_current"
).iloc[0]

scaling_index = np.where(all_load_ids == pp_upd_data[controller_id].element_index[0])[0]

# If the current controller is reponsilbe for the p_mw attribute, set p_specified
if pp_upd_data[controller_id].variable == "p_mw":
p_mw = data.iloc[:, controller_id].to_numpy()

pgm_load_profile["p_specified"][pgm_load_profile["id"] == load_id_const_power] = (
p_mw * const_p_multiplier[scaling_index]
)
pgm_load_profile["p_specified"][pgm_load_profile["id"] == load_id_const_impedance] = (
p_mw * const_z_multiplier[scaling_index]
)
pgm_load_profile["p_specified"][pgm_load_profile["id"] == load_id_const_current] = (
p_mw * const_i_multiplier[scaling_index]
)

# If the current controller is reponsilbe for the q_mvar attribute, set q_specified
if pp_upd_data[controller_id].variable == "q_mvar":
q_mvar = data.iloc[:, controller_id].to_numpy()

pgm_load_profile["q_specified"][pgm_load_profile["id"] == load_id_const_power] = (
q_mvar * const_p_multiplier[scaling_index]
)
pgm_load_profile["q_specified"][pgm_load_profile["id"] == load_id_const_impedance] = (
q_mvar * const_z_multiplier[scaling_index]
)
pgm_load_profile["q_specified"][pgm_load_profile["id"] == load_id_const_current] = (
q_mvar * const_i_multiplier[scaling_index]
)

self.pgm_update_data["sym_load"] = pgm_load_profile

#
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

todo: remove before merge (you can always re-add later)

# # If there hasn't yet been a controller responsible for a load with this id
# # If the controller is responsible for the p_mw attribute, assign it to p_specified
# # then assign a nan value to q_specified, later on PGM will take an input value instead of nan
# if not np.isin(load_id_const_power,
# pgm_load_profile["id"]) and pp_upd_data[controller_id].variable == "p_mw":
# p_mw = data.iloc[:, controller_id].to_numpy()
#
# pgm_load_profile["id"] = np.append(pgm_load_profile["id"], load_id_const_power)
# pgm_load_profile["p_specified"] = np.append(
# pgm_load_profile["p_specified"], p_mw * const_p_multiplier)
# pgm_load_profile["q_specified"] = np.append(np.nan)
#
# pgm_load_profile["id"] = np.append(pgm_load_profile["id"], np.array(load_id_const_impedance))
# pgm_load_profile["p_specified"] = np.append(
# pgm_load_profile["p_specified"], p_mw * const_z_multiplier)
# pgm_load_profile["q_specified"] = np.append(np.nan)
#
# pgm_load_profile["id"] = np.append(pgm_load_profile["id"], np.array(load_id_const_current))
# pgm_load_profile["p_specified"] = np.append(
# pgm_load_profile["p_specified"], p_mw * const_i_multiplier)
# pgm_load_profile["q_specified"] = np.append(np.nan)
#
# # If the controller is responsible for the q_mvar attribute, assign it to q_specified
# # then assign a nan value to p_specified, later on PGM will take an input value instead of nan
# if load_id_const_power not in pgm_load_profile["id"] and pp_upd_data[controller_id].variable == "q_mvar":
# q_mvar = data.iloc[:, controller_id].to_numpy()
#
# pgm_load_profile["id"] = np.append(pgm_load_profile["id"], load_id_const_power)
# pgm_load_profile["q_specified"] = np.append(
# pgm_load_profile["q_specified"], q_mvar * const_p_multiplier)
# pgm_load_profile["p_specified"] = np.append(np.nan)
#
# pgm_load_profile["id"] = np.append(pgm_load_profile["id"], load_id_const_impedance)
# pgm_load_profile["q_specified"] = np.append(
# pgm_load_profile["q_specified"], q_mvar * const_z_multiplier)
# pgm_load_profile["p_specified"] = np.append(np.nan)
#
# pgm_load_profile["id"] = np.append(pgm_load_profile["id"], load_id_const_current)
# pgm_load_profile["q_specified"] = np.append(
# pgm_load_profile["q_specified"], q_mvar * const_i_multiplier)
# pgm_load_profile["p_specified"] = np.append(np.nan)
#

# if "load.p_mw" not in pp_upd_data and "load.q_mvar" not in pp_upd_data:
# return
#
# if "load.p_mw" in pp_upd_data and "load.q_mvar" in pp_upd_data:
# p_mw_ids = np.array(pp_upd_data["load.p_mw"].columns)
# q_mvar_ids = np.array(pp_upd_data["load.q_mvar"].columns)
# # Should we sort the DF columns initially?
# if p_mw_ids.sort() != q_mvar_ids.sort():
# raise Exception("The IDs of load p_mw Datasource and load q_mvar Datasource are different!")
#
# if "load.p_mw" in pp_upd_data:
# load_pmw_profile = pp_upd_data["load.p_mw"]
# # Length of a DF represents time steps
# time_steps = len(load_pmw_profile)
# # Length of columns of a DF represents number of profiles
# profiles = len(load_pmw_profile.columns)
#
# pgm_load_profile = initialize_array("update", "sym_load", (time_steps, profiles))
#
# pgm_load_profile["id"] = self._get_pgm_ids("load", np.array(load_pmw_profile.columns))
#
# pgm_load_profile["p_specified"] = load_pmw_profile.to_numpy() * 1e6
#
# if "load.q_mvar" in pp_upd_data:
# pgm_load_profile["q_specified"] = pp_upd_data["load.q_mvar"].to_numpy() * 1e6
#
# self.pgm_update_data["sym_load"] = pgm_load_profile
#
# if "load.q_mvar" in pp_upd_data and "load.p_mw" not in pp_upd_data:
# load_qmvar_profile = pp_upd_data["load.q_mvar"]
#
# time_steps = len(load_qmvar_profile)
# profiles = len(load_qmvar_profile.columns)
#
# pgm_load_profile = initialize_array("update", "sym_load", (time_steps, profiles))
#
# pgm_load_profile["id"] = self._get_pgm_ids("load", np.array(load_qmvar_profile.columns))
#
# pgm_load_profile["q_specified"] = load_qmvar_profile.to_numpy() * 1e6
#
# self.pgm_update_data["sym_load"] = pgm_load_profile

# pylint: disable-msg=too-many-locals
def _pp_update_sgens(self): # pragma: no cover
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

todo: probably a good idea to split in separate functions if it complains about this

(note: i understand why it's still in there, so no hurry. i just add this comment for reference)

pp_upd_data = self.pp_update_data["controller"]["object"]

scaling = self._get_pp_attr("sgen", "scaling", 1.0)
all_sgen_ids = self.pp_update_data["sgen"].index.values

sgen_controller_ids = []
pp_sgen_ids = set()
# Loop over all controllers
for count, control in enumerate(pp_upd_data):
# If the element of a controller is a load, we save the controller id and load id
if control.element == "sgen":
sgen_controller_ids.append(count)
pp_sgen_ids.add(pp_upd_data[count].element_index[0])
# If there are no controllers for sgens, we stop here
if len(sgen_controller_ids) < 1:
return

data = pp_upd_data[sgen_controller_ids[0]].data_source.df

# Time steps are Dataframe indexes
time_steps = len(data)

# Profiles are Dataframe columns
profiles = len(pp_sgen_ids)

pgm_symgen_profile = initialize_array("update", "sym_load", (time_steps, profiles))

pgm_symgen_profile["id"] = self._get_pgm_ids("sgen", np.array(list(pp_sgen_ids)))

for controller_id in sgen_controller_ids:

sym_gen_id = self._get_pgm_ids("sgen", np.array(pp_upd_data[controller_id].element_index)).iloc[0]

scaling_index = np.where(all_sgen_ids == pp_upd_data[controller_id].element_index[0])[0]

# If the current controller is reponsilbe for the p_mw attribute, set p_specified
if pp_upd_data[controller_id].variable == "p_mw":
p_mw = data.iloc[:, controller_id].to_numpy()
pgm_symgen_profile["p_specified"][pgm_symgen_profile["id"] == sym_gen_id] = p_mw * (
1e6 * scaling[scaling_index]
)

# If the current controller is reponsilbe for the q_mvar attribute, set q_specified
if pp_upd_data[controller_id].variable == "q_mvar":
q_mvar = data.iloc[:, controller_id].to_numpy()
pgm_symgen_profile["q_specified"][pgm_symgen_profile["id"] == sym_gen_id] = q_mvar * (
1e6 * scaling[scaling_index]
)

self.pgm_update_data["sym_gen"] = pgm_symgen_profile

# if "sgen.p_mw" not in pp_upd_data and "sgen.q_mvar" not in pp_upd_data:
# return
#
# if "sgen.p_mw" in pp_upd_data and "sgen.q_mvar" in pp_upd_data:
# p_mw_ids = np.array(pp_upd_data["sgen.p_mw"].columns)
# q_mvar_ids = np.array(pp_upd_data["sgen.q_mvar"].columns)
# # Should we sort the DF columns initially?
# if p_mw_ids.sort() != q_mvar_ids.sort():
# raise Exception("The IDs of sgen p_mw Datasource and sgen q_mvar Datasource are different!")
#
# if "sgen.p_mw" in pp_upd_data:
# sgen_pmw_profile = pp_upd_data["sgen.p_mw"]
#
# time_steps = len(sgen_pmw_profile)
#
# profiles = len(sgen_pmw_profile.columns)
#
# sgen_profile = initialize_array("update", "sym_gen", (time_steps, profiles))
#
# sgen_profile["id"] = self._get_pgm_ids("sgen", np.array(sgen_pmw_profile.columns))
#
# sgen_profile["p_specified"] = sgen_pmw_profile.to_numpy() * 1e6
#
# if "sgen.q_mvar" in pp_upd_data:
# sgen_profile["q_specified"] = pp_upd_data["sgen.q_mvar"].to_numpy() * 1e6
#
# self.pgm_update_data["sym_gen"] = sgen_profile
#
# if "sgen.q_mvar" in pp_upd_data and "sgen.p_mw" not in pp_upd_data:
# sgen_qmvar_profile = pp_upd_data["sgen.q_mvar"]
#
# time_steps = len(sgen_qmvar_profile)
#
# profiles = len(sgen_qmvar_profile)
#
# sgen_profile = initialize_array("update", "sym_gen", (time_steps, profiles))
#
# sgen_profile["id"] = self._get_pgm_ids("sgen", np.array(sgen_qmvar_profile.columns))
#
# sgen_profile["q_specified"] = sgen_qmvar_profile.to_numpy() * 1e6
#
# self.pgm_update_data["sym_gen"] = sgen_profile

def _generate_ids(self, pp_table: str, pp_idx: pd.Index, name: Optional[str] = None) -> np.ndarray:
"""
Generate numerical power-grid-model IDs for a PandaPower component
Expand Down Expand Up @@ -2103,6 +2384,18 @@ def _get_pp_ids(self, pp_table: str, pgm_idx: Optional[pd.Series] = None, name:
return self.idx_lookup[key]
return self.idx_lookup[key][pgm_idx]

def _get_timeseries_load_ids(self, pp_load_ids):

load_id_const_power = self._get_pgm_ids("load", np.array(pp_load_ids), name="const_power")

load_id_const_impedance = self._get_pgm_ids("load", np.array(pp_load_ids), name="const_impedance")

load_id_const_current = self._get_pgm_ids("load", np.array(pp_load_ids), name="const_current")

pgm_ids = pd.concat([load_id_const_power, load_id_const_impedance, load_id_const_current])

return pgm_ids

@staticmethod
def _get_tap_size(pp_trafo: pd.DataFrame) -> np.ndarray:
"""
Expand Down
Loading