Skip to content

Commit

Permalink
reducing rows from 500 to 100 when sending to endpoint
Browse files Browse the repository at this point in the history
  • Loading branch information
brifordwylie committed Mar 3, 2025
1 parent 3a4a65f commit 9debc20
Showing 1 changed file with 4 additions and 4 deletions.
8 changes: 4 additions & 4 deletions src/workbench/core/artifacts/endpoint_core.py
Original file line number Diff line number Diff line change
Expand Up @@ -456,15 +456,15 @@ def _predict(self, eval_df: pd.DataFrame) -> pd.DataFrame:
deserializer=CSVDeserializer(),
)

# Now split up the dataframe into 500 row chunks, send those chunks to our
# Now split up the dataframe into 100 row chunks, send those chunks to our
# endpoint (with error handling) and stitch all the chunks back together
df_list = []
total_rows = len(eval_df)
for index in range(0, len(eval_df), 500):
self.log.info(f"Processing {index}:{min(index+500, total_rows)} out of {total_rows} rows...")
for index in range(0, len(eval_df), 100):
self.log.info(f"Processing {index}:{min(index+100, total_rows)} out of {total_rows} rows...")

# Compute partial DataFrames, add them to a list, and concatenate at the end
partial_df = self._endpoint_error_handling(predictor, eval_df[index : index + 500])
partial_df = self._endpoint_error_handling(predictor, eval_df[index : index + 100])
df_list.append(partial_df)

# Concatenate the dataframes
Expand Down

0 comments on commit 9debc20

Please sign in to comment.