-
Notifications
You must be signed in to change notification settings - Fork 3.5k
Cpu memory accumulation bug #20730
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Open
ved1beta
wants to merge
8
commits into
Lightning-AI:master
Choose a base branch
from
ved1beta:cpu_memory_accumulation_bug
base: master
Could not load branches
Branch not found: {{ refName }}
Loading
Could not load tags
Nothing to show
Loading
Are you sure you want to change the base?
Some commits from the old base branch may be removed from the timeline,
and old review comments may become outdated.
Open
Cpu memory accumulation bug #20730
Changes from all commits
Commits
Show all changes
8 commits
Select commit
Hold shift + click to select a range
403b3ae
fix: Add memory leak prevention in prediction loop
ved1beta ff5f9eb
fix: Add memory leak prevention in prediction loop
ved1beta 65d38fe
test removed
ved1beta ce0897c
memory leak test
ved1beta f24ea83
env var cleanu
ved1beta f3cf136
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] 6aa644a
precommit fix
ved1beta 6d6d04e
Merge branch 'cpu_memory_accumulation_bug' of github.com:ved1beta/pyt…
ved1beta File filter
Filter by extension
Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
There are no files selected for viewing
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -15,11 +15,9 @@ | |
from collections.abc import Iterator | ||
from typing import Any, Optional, Union | ||
|
||
import torch | ||
from lightning_utilities import WarningCache | ||
|
||
import lightning.pytorch as pl | ||
from lightning.fabric.utilities import move_data_to_device | ||
from lightning.pytorch.callbacks import BasePredictionWriter | ||
from lightning.pytorch.loops.fetchers import _DataFetcher, _DataLoaderIterDataFetcher | ||
from lightning.pytorch.loops.loop import _Loop | ||
|
@@ -247,32 +245,29 @@ def _predict_step( | |
self.batch_progress.increment_started() | ||
|
||
# configure step_kwargs | ||
step_args = ( | ||
self._build_step_args_from_hook_kwargs(hook_kwargs, "predict_step") | ||
if not using_dataloader_iter | ||
else (dataloader_iter,) | ||
) | ||
predictions = call._call_strategy_hook(trainer, "predict_step", *step_args) | ||
if predictions is None: | ||
self._warning_cache.warn("predict returned None if it was on purpose, ignore this warning...") | ||
step_args = self._build_step_args_from_hook_kwargs(hook_kwargs, "predict_step") | ||
step_output = call._call_lightning_module_hook(trainer, "predict_step", *step_args) | ||
|
||
self.batch_progress.increment_processed() | ||
|
||
if using_dataloader_iter: | ||
# update the hook kwargs now that the step method might have consumed the iterator | ||
batch = data_fetcher._batch | ||
batch_idx = data_fetcher._batch_idx | ||
dataloader_idx = data_fetcher._dataloader_idx | ||
hook_kwargs = self._build_kwargs(batch, batch_idx, dataloader_idx if self.num_dataloaders > 1 else None) | ||
# track batch indices for prediction writer | ||
if not using_dataloader_iter and any_on_epoch: | ||
self.current_batch_indices = self._get_batch_indices(data_fetcher.current_dataloader) | ||
|
||
# track predictions if needed | ||
if self.return_predictions: | ||
self._predictions[dataloader_idx].append(step_output) | ||
else: | ||
# Clear memory if not returning predictions | ||
import gc | ||
|
||
gc.collect() | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. do you think it would be a good idea to have an argument As Adrian said: |
||
|
||
call._call_callback_hooks(trainer, "on_predict_batch_end", predictions, *hook_kwargs.values()) | ||
call._call_lightning_module_hook(trainer, "on_predict_batch_end", predictions, *hook_kwargs.values()) | ||
call._call_callback_hooks(trainer, "on_predict_batch_end", step_output, *hook_kwargs.values()) | ||
call._call_lightning_module_hook(trainer, "on_predict_batch_end", step_output, *hook_kwargs.values()) | ||
|
||
self.batch_progress.increment_completed() | ||
|
||
if self._return_predictions or any_on_epoch: | ||
self._predictions[dataloader_idx].append(move_data_to_device(predictions, torch.device("cpu"))) | ||
|
||
def _build_kwargs(self, batch: Any, batch_idx: int, dataloader_idx: Optional[int]) -> OrderedDict: | ||
"""Assembles the keyword arguments for the ``predict_step`` | ||
|
||
|
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,81 @@ | ||
import os | ||
|
||
import psutil | ||
import pytest | ||
import torch | ||
from torch.utils.data import DataLoader, Dataset | ||
|
||
from lightning.pytorch import Trainer | ||
from lightning.pytorch.demos.boring_classes import BoringModel | ||
|
||
|
||
class CustomModel(BoringModel): | ||
def __init__(self): | ||
super().__init__() | ||
self.layer = torch.nn.Linear(1000, 2) # Changed to match LargeDataset dim=1000 | ||
|
||
def forward(self, x): | ||
return self.layer(x) | ||
|
||
|
||
class LargeDataset(Dataset): | ||
def __init__(self, size=1000, dim=1000): | ||
self.data = torch.randn(size, dim) | ||
self.targets = torch.randint(0, 10, (size,)) | ||
|
||
def __len__(self): | ||
return len(self.data) | ||
|
||
def __iter__(self): | ||
for i in range(len(self)): | ||
yield self[i] | ||
|
||
def __getitem__(self, idx): | ||
# During prediction, return only the input tensor | ||
if hasattr(self, "prediction_mode") and self.prediction_mode: | ||
return self.data[idx] | ||
return self.data[idx], self.targets[idx] | ||
|
||
def set_prediction_mode(self, mode=True): | ||
self.prediction_mode = mode | ||
|
||
|
||
def get_memory_usage(): | ||
process = psutil.Process(os.getpid()) | ||
return process.memory_info().rss / 1024 / 1024 # MB | ||
|
||
|
||
@pytest.mark.parametrize("return_predictions", [True, False]) | ||
def test_prediction_memory_leak(tmp_path, return_predictions): | ||
"""Test that memory usage doesn't grow during prediction when return_predictions=False.""" | ||
# Create a model and dataset | ||
model = CustomModel() | ||
dataset = LargeDataset() | ||
dataset.set_prediction_mode(True) # Set prediction mode | ||
dataloader = DataLoader(dataset, batch_size=32) | ||
|
||
# Get initial memory usage | ||
initial_memory = get_memory_usage() | ||
|
||
# Run prediction | ||
trainer = Trainer( | ||
default_root_dir=tmp_path, | ||
accelerator="cpu", | ||
devices=1, | ||
max_epochs=1, | ||
) | ||
|
||
trainer.predict(model, dataloaders=dataloader, return_predictions=return_predictions) | ||
|
||
# Get final memory usage | ||
final_memory = get_memory_usage() | ||
|
||
# Calculate memory growth | ||
memory_growth = final_memory - initial_memory | ||
|
||
# When return_predictions=False, memory growth should be minimal | ||
if not return_predictions: | ||
assert memory_growth < 100, f"Memory growth {memory_growth}MB is too high when return_predictions=False" | ||
else: | ||
# When return_predictions=True, we expect some memory growth due to storing predictions | ||
assert memory_growth > 0, "Expected memory growth when storing predictions" |
Oops, something went wrong.
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
why are you directly calling
lightning module hook
without callingstrategy hook
?After couple of checks and
precision_plugin
context, it does calllightning_module's predict_step
.