From 9debc200fe68be7cb9197e9ee04cdd15d1958cbb Mon Sep 17 00:00:00 2001 From: Brian Wylie Date: Mon, 3 Mar 2025 09:04:36 -0700 Subject: [PATCH] reducing rows from 500 to 100 when sending to endpoint --- src/workbench/core/artifacts/endpoint_core.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/workbench/core/artifacts/endpoint_core.py b/src/workbench/core/artifacts/endpoint_core.py index ea048a175..4f99f6a94 100644 --- a/src/workbench/core/artifacts/endpoint_core.py +++ b/src/workbench/core/artifacts/endpoint_core.py @@ -456,15 +456,15 @@ def _predict(self, eval_df: pd.DataFrame) -> pd.DataFrame: deserializer=CSVDeserializer(), ) - # Now split up the dataframe into 500 row chunks, send those chunks to our + # Now split up the dataframe into 100 row chunks, send those chunks to our # endpoint (with error handling) and stitch all the chunks back together df_list = [] total_rows = len(eval_df) - for index in range(0, len(eval_df), 500): - self.log.info(f"Processing {index}:{min(index+500, total_rows)} out of {total_rows} rows...") + for index in range(0, len(eval_df), 100): + self.log.info(f"Processing {index}:{min(index+100, total_rows)} out of {total_rows} rows...") # Compute partial DataFrames, add them to a list, and concatenate at the end - partial_df = self._endpoint_error_handling(predictor, eval_df[index : index + 500]) + partial_df = self._endpoint_error_handling(predictor, eval_df[index : index + 100]) df_list.append(partial_df) # Concatenate the dataframes