From 1e7dcd2d34d54a067a9126d5c43cc4c6bea7d5b9 Mon Sep 17 00:00:00 2001
From: mgam <312065559@qq.com>
Date: Fri, 7 Nov 2025 23:25:49 +0800
Subject: [PATCH 01/18] Update pre-commit repo revs.
---
.pre-commit-config.yaml | 22 +++++++++++-----------
1 file changed, 11 insertions(+), 11 deletions(-)
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index ba8038d668..95df26fc0c 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -1,24 +1,24 @@
exclude: ^tests/data/
repos:
- repo: https://github.com/pre-commit/pre-commit
- rev: v4.0.0
+ rev: v4.3.0
hooks:
- id: validate_manifest
- repo: https://github.com/PyCQA/flake8
- rev: 7.1.1
+ rev: 7.3.0
hooks:
- id: flake8
- repo: https://github.com/PyCQA/isort
- rev: 5.11.5
+ rev: 7.0.0
hooks:
- id: isort
- - repo: https://github.com/pre-commit/mirrors-yapf
- rev: v0.32.0
+ - repo: https://github.com/google/yapf
+ rev: v0.43.0
hooks:
- id: yapf
additional_dependencies: [toml]
- repo: https://github.com/pre-commit/pre-commit-hooks
- rev: v5.0.0
+ rev: v6.0.0
hooks:
- id: trailing-whitespace
- id: check-yaml
@@ -31,7 +31,7 @@ repos:
- id: mixed-line-ending
args: ["--fix=lf"]
- repo: https://github.com/executablebooks/mdformat
- rev: 0.7.9
+ rev: 1.0.0
hooks:
- id: mdformat
args: ["--number"]
@@ -40,12 +40,12 @@ repos:
- mdformat_frontmatter
- linkify-it-py
- repo: https://github.com/myint/docformatter
- rev: 06907d0
+ rev: v1.7.7
hooks:
- id: docformatter
args: ["--in-place", "--wrap-descriptions", "79"]
- repo: https://github.com/asottile/pyupgrade
- rev: v3.0.0
+ rev: v3.21.0
hooks:
- id: pyupgrade
args: ["--py36-plus"]
@@ -56,7 +56,7 @@ repos:
args: ["mmengine", "tests"]
- id: remove-improper-eol-in-cn-docs
- repo: https://github.com/pre-commit/mirrors-mypy
- rev: v1.2.0
+ rev: v1.18.2
hooks:
- id: mypy
exclude: |-
@@ -67,6 +67,6 @@ repos:
additional_dependencies: ["types-setuptools", "types-requests", "types-PyYAML"]
- repo: https://github.com/astral-sh/uv-pre-commit
# uv version.
- rev: 0.9.5
+ rev: 0.9.7
hooks:
- id: uv-lock
From 8a1da0c07cf07e162956ce0f587362491f362adf Mon Sep 17 00:00:00 2001
From: mgam <312065559@qq.com>
Date: Fri, 7 Nov 2025 23:26:27 +0800
Subject: [PATCH 02/18] Remove `mdformat-openmmlab` and `mdformat_frontmatter`
as they are no longer maintained and not support mdformat rev 1.0.0.
---
.pre-commit-config.yaml | 2 --
1 file changed, 2 deletions(-)
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 95df26fc0c..34c83291a7 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -36,8 +36,6 @@ repos:
- id: mdformat
args: ["--number"]
additional_dependencies:
- - mdformat-openmmlab
- - mdformat_frontmatter
- linkify-it-py
- repo: https://github.com/myint/docformatter
rev: v1.7.7
From 534bf313b651b28ae9cba9c444a04f93fb4df2e4 Mon Sep 17 00:00:00 2001
From: mgam <312065559@qq.com>
Date: Fri, 7 Nov 2025 23:27:54 +0800
Subject: [PATCH 03/18] `fix-encoding-pragma` has been removed -- use
`pyupgrade` from https://github.com/asottile/pyupgrade
---
.pre-commit-config.yaml | 2 --
1 file changed, 2 deletions(-)
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 34c83291a7..5e420333f5 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -26,8 +26,6 @@ repos:
- id: requirements-txt-fixer
- id: double-quote-string-fixer
- id: check-merge-conflict
- - id: fix-encoding-pragma
- args: ["--remove"]
- id: mixed-line-ending
args: ["--fix=lf"]
- repo: https://github.com/executablebooks/mdformat
From 9288d438dbe502e2210c2a7c65f0426a5ec95045 Mon Sep 17 00:00:00 2001
From: mgam <312065559@qq.com>
Date: Fri, 7 Nov 2025 23:41:11 +0800
Subject: [PATCH 04/18] Fix detected flake8 F824.
---
mmengine/fileio/backends/registry_utils.py | 2 --
mmengine/fileio/io.py | 2 --
2 files changed, 4 deletions(-)
diff --git a/mmengine/fileio/backends/registry_utils.py b/mmengine/fileio/backends/registry_utils.py
index 4578a4ca76..e2f41fb248 100644
--- a/mmengine/fileio/backends/registry_utils.py
+++ b/mmengine/fileio/backends/registry_utils.py
@@ -28,8 +28,6 @@ def _register_backend(name: str,
prefixes (str or list[str] or tuple[str], optional): The prefix
of the registered storage backend. Defaults to None.
"""
- global backends, prefix_to_backends
-
if not isinstance(name, str):
raise TypeError('the backend name should be a string, '
f'but got {type(name)}')
diff --git a/mmengine/fileio/io.py b/mmengine/fileio/io.py
index fdeb4dc6df..d849abf658 100644
--- a/mmengine/fileio/io.py
+++ b/mmengine/fileio/io.py
@@ -128,8 +128,6 @@ def get_file_backend(
>>> # backend name has a higher priority if 'backend' in backend_args
>>> backend = get_file_backend(uri, backend_args={'backend': 'petrel'})
"""
- global backend_instances
-
if backend_args is None:
backend_args = {}
From bc0d1ef5e85270450bdf8a59a768e650dc6c4041 Mon Sep 17 00:00:00 2001
From: mgam <312065559@qq.com>
Date: Fri, 7 Nov 2025 23:43:22 +0800
Subject: [PATCH 05/18] Auto modified by pre-commit hooks after pre-commit
autoupdate.
---
CONTRIBUTING.md | 6 ++--
README.md | 4 +--
docs/en/advanced_tutorials/basedataset.md | 4 +--
docs/en/advanced_tutorials/cross_library.md | 2 +-
docs/en/advanced_tutorials/data_transform.md | 28 +++++++++----------
docs/en/advanced_tutorials/initialize.md | 4 +--
docs/en/advanced_tutorials/model_analysis.md | 6 ++--
docs/en/advanced_tutorials/registry.md | 2 +-
docs/en/design/evaluation.md | 2 +-
docs/en/design/infer.md | 2 +-
docs/en/design/logging.md | 2 +-
docs/en/design/visualization.md | 4 +--
docs/en/examples/train_a_gan.md | 2 +-
docs/en/migration/param_scheduler.md | 2 +-
docs/en/notes/changelog.md | 16 +++++------
docs/en/notes/contributing.md | 6 ++--
docs/en/tutorials/hook.md | 24 ++++++++--------
docs/en/tutorials/param_scheduler.md | 2 +-
mmengine/_strategy/deepspeed.py | 8 +++---
mmengine/config/config.py | 21 ++++++++------
mmengine/dataset/utils.py | 3 +-
mmengine/fileio/backends/local_backend.py | 4 +--
mmengine/fileio/file_client.py | 4 +--
mmengine/hooks/checkpoint_hook.py | 8 +++---
mmengine/model/test_time_aug.py | 7 +++--
mmengine/runner/checkpoint.py | 7 +++--
mmengine/utils/dl_utils/torch_ops.py | 6 ++--
mmengine/visualization/visualizer.py | 5 ++--
tests/test_analysis/test_jit_analysis.py | 7 +++--
tests/test_dataset/test_base_dataset.py | 8 +++---
.../test_optimizer/test_optimizer_wrapper.py | 12 ++++----
31 files changed, 113 insertions(+), 105 deletions(-)
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 618f68835f..b8964ec3b1 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -75,7 +75,7 @@ pre-commit run --all-files
If the installation process is interrupted, you can repeatedly run `pre-commit run ... ` to continue the installation.
-If the code does not conform to the code style specification, pre-commit will raise a warning and fixes some of the errors automatically.
+If the code does not conform to the code style specification, pre-commit will raise a warning and fixes some of the errors automatically.
@@ -234,7 +234,7 @@ The config for a pre-commit hook is stored in [.pre-commit-config](./.pre-commit
5. Provide clear and meaningful PR description
- - Task name should be clarified in title. The general format is: \[Prefix\] Short description of the PR (Suffix)
- - Prefix: add new feature \[Feature\], fix bug \[Fix\], related to documents \[Docs\], in developing \[WIP\] (which will not be reviewed temporarily)
+ - Task name should be clarified in title. The general format is: [Prefix] Short description of the PR (Suffix)
+ - Prefix: add new feature [Feature], fix bug [Fix], related to documents [Docs], in developing [WIP] (which will not be reviewed temporarily)
- Introduce main changes, results and influences on other modules in short description
- Associate related issues and pull requests with a milestone
diff --git a/README.md b/README.md
index 4534123986..8a56c7d275 100644
--- a/README.md
+++ b/README.md
@@ -83,9 +83,9 @@ MMEngine is a foundational library for training deep learning models based on Py
Supported PyTorch Versions
-| MMEngine | PyTorch | Python |
+| MMEngine | PyTorch | Python |
| -------- | ------------ | --------------- |
-| main | >=1.6 \<=2.1 | >=3.10, \<=3.11 |
+| main | >=1.6 \<=2.1 | >=3.10, \<=3.11 |
diff --git a/docs/en/design/infer.md b/docs/en/design/infer.md
index fa7582819f..c4ac474aff 100644
--- a/docs/en/design/infer.md
+++ b/docs/en/design/infer.md
@@ -92,7 +92,7 @@ When performing inference, the following steps are typically executed:
3. visualize: Visualization of predicted results.
4. postprocess: Post-processing of predicted results, including result format conversion, exporting predicted results, etc.
-To improve the user experience of the inferencer, we do not want users to have to configure parameters for each step when performing inference. In other words, we hope that users can simply configure parameters for the `__call__` interface without being aware of the above process and complete the inference.
+To improve the user experience of the inferencer, we do not want users to have to configure parameters for each step when performing inference. In other words, we hope that users can simply configure parameters for the `__call__` interface without being aware of the above process and complete the inference.
The `__call__` interface will execute the aforementioned steps in order, but it is not aware of which step the parameters provided by the user should be assigned to. Therefore, when developing a `CustomInferencer`, developers need to define four class attributes: `preprocess_kwargs`, `forward_kwargs`, `visualize_kwargs`, and `postprocess_kwargs`. Each attribute is a set of strings that are used to specify which step the parameters in the `__call__` interface correspond to:
diff --git a/docs/en/design/logging.md b/docs/en/design/logging.md
index 68a976bfc1..9b68eb8ece 100644
--- a/docs/en/design/logging.md
+++ b/docs/en/design/logging.md
@@ -10,7 +10,7 @@

-Each scalar (losses, learning rates, etc.) during training is encapsulated by HistoryBuffer, managed by MessageHub in key-value pairs, formatted by LogProcessor and then exported to various visualization backends by [LoggerHook](mmengine.hooks.LoggerHook). **In most cases, statistical methods of these scalars can be configured through the LogProcessor without understanding the data flow.** Before diving into the design of the logging system, please read through [logging tutorial](../advanced_tutorials/logging.md) first for familiarizing basic use cases.
+Each scalar (losses, learning rates, etc.) during training is encapsulated by HistoryBuffer, managed by MessageHub in key-value pairs, formatted by LogProcessor and then exported to various visualization backends by [LoggerHook](mmengine.hooks.LoggerHook). **In most cases, statistical methods of these scalars can be configured through the LogProcessor without understanding the data flow.** Before diving into the design of the logging system, please read through [logging tutorial](../advanced_tutorials/logging.md) first for familiarizing basic use cases.
## HistoryBuffer
diff --git a/docs/en/design/visualization.md b/docs/en/design/visualization.md
index f9161d1447..f161bed525 100644
--- a/docs/en/design/visualization.md
+++ b/docs/en/design/visualization.md
@@ -11,7 +11,7 @@ Visualization provides an intuitive explanation of the training and testing proc
Based on the above requirements, we proposed the `Visualizer` and various `VisBackend` such as `LocalVisBackend`, `WandbVisBackend`, and `TensorboardVisBackend` in OpenMMLab 2.0. The visualizer could not only visualize the image data, but also things like configurations, scalars, and model structure.
-- For convenience, the APIs provided by the `Visualizer` implement the drawing and storage functions. As an internal property of `Visualizer`, `VisBackend` will be called by `Visualizer` to write data to different backends.
+- For convenience, the APIs provided by the `Visualizer` implement the drawing and storage functions. As an internal property of `Visualizer`, `VisBackend` will be called by `Visualizer` to write data to different backends.
- Considering that you may want to write data to multiple backends after drawing, `Visualizer` can be configured with multiple backends. When the user calls the storage API of the `Visualizer`, it will traverse and call all the specified APIs of `VisBackend` internally.
The UML diagram of the two is as follows.
@@ -46,7 +46,7 @@ The above APIs can be called in a chain except for `draw_featmap` because the im
- [add_scalars](mmengine.visualization.Visualizer.add_scalars) writes multiple scalars to a specific storage backend at once
- [add_datasample](mmengine.visualization.Visualizer.add_datasample) the abstract interface for each repositories to draw data sample
-Interfaces beginning with the `add` prefix represent storage APIs. \[datasample\] (`./data_element.md`)is the unified interface of each downstream repository in the OpenMMLab 2.0, and `add_datasample` can process the data sample directly .
+Interfaces beginning with the `add` prefix represent storage APIs. [datasample] (`./data_element.md`)is the unified interface of each downstream repository in the OpenMMLab 2.0, and `add_datasample` can process the data sample directly .
3. Other APIs
diff --git a/docs/en/examples/train_a_gan.md b/docs/en/examples/train_a_gan.md
index 2617fda767..d84be9d3d5 100644
--- a/docs/en/examples/train_a_gan.md
+++ b/docs/en/examples/train_a_gan.md
@@ -148,7 +148,7 @@ from mmengine.model import ImgDataPreprocessor
data_preprocessor = ImgDataPreprocessor(mean=([127.5]), std=([127.5]))
```
-The following code implements the basic algorithm of GAN. To implement the algorithm using MMEngine, you need to inherit from the [BaseModel](mmengine.model.BaseModel) and implement the training process in the train_step. GAN requires alternating training of the generator and discriminator, which are implemented by train_discriminator and train_generator and implement disc_loss and gen_loss to calculate the discriminator loss function and generator loss function.
+The following code implements the basic algorithm of GAN. To implement the algorithm using MMEngine, you need to inherit from the [BaseModel](mmengine.model.BaseModel) and implement the training process in the train_step. GAN requires alternating training of the generator and discriminator, which are implemented by train_discriminator and train_generator and implement disc_loss and gen_loss to calculate the discriminator loss function and generator loss function.
More details about BaseModel, refer to [Model tutorial](../tutorials/model.md).
```python
diff --git a/docs/en/migration/param_scheduler.md b/docs/en/migration/param_scheduler.md
index 64867252e6..1c77ab8be4 100644
--- a/docs/en/migration/param_scheduler.md
+++ b/docs/en/migration/param_scheduler.md
@@ -435,7 +435,7 @@ param_scheduler = [
-Notice: `by_epoch` defaults to `False` in MMCV. It now defaults to `True` in MMEngine.
+Notice: `by_epoch` defaults to `False` in MMCV. It now defaults to `True` in MMEngine.
### LinearAnnealingLrUpdaterHook migration
diff --git a/docs/en/notes/changelog.md b/docs/en/notes/changelog.md
index 0aa64bfdbc..89c27c0811 100644
--- a/docs/en/notes/changelog.md
+++ b/docs/en/notes/changelog.md
@@ -117,7 +117,7 @@ A total of 3 developers contributed to this release. Thanks [@HIT-cwh](https://g
### Contributors
-A total of 9 developers contributed to this release. Thanks [@POI-WX](https://github.com/POI-WX), [@whlook](https://github.com/whlook), [@jonbakerfish](https://github.com/jonbakerfish), [@LZHgrla](https://github.com/LZHgrla), [@Ben-Louis](https://github.com/Ben-Louis), [@YiyaoYang1](https://github.com/YiyaoYang1), [@fanqiNO1](https://github.com/fanqiNO1), [@HAOCHENYE](https://github.com/HAOCHENYE), [@zhouzaida](https://github.com/zhouzaida)
+A total of 9 developers contributed to this release. Thanks [@POI-WX](https://github.com/POI-WX), [@whlook](https://github.com/whlook), [@jonbakerfish](https://github.com/jonbakerfish), [@LZHgrla](https://github.com/LZHgrla), [@Ben-Louis](https://github.com/Ben-Louis), [@YiyaoYang1](https://github.com/YiyaoYang1), [@fanqiNO1](https://github.com/fanqiNO1), [@HAOCHENYE](https://github.com/HAOCHENYE), [@zhouzaida](https://github.com/zhouzaida)
## v0.9.0 (10/10/2023)
@@ -345,7 +345,7 @@ A total of 9 developers contributed to this release. Thanks [@evdcush](https://g
### Contributors
-A total of 19 developers contributed to this release. Thanks [@Hongru-Xiao](https://github.com/Hongru-Xiao) [@i-aki-y](https://github.com/i-aki-y) [@Bomsw](https://github.com/Bomsw) [@KickCellarDoor](https://github.com/KickCellarDoor) [@zhouzaida](https://github.com/zhouzaida) [@YQisme](https://github.com/YQisme) [@gachiemchiep](https://github.com/gachiemchiep) [@CescMessi](https://github.com/CescMessi) [@W-ZN](https://github.com/W-ZN) [@Ginray](https://github.com/Ginray) [@adrianjoshua-strutt](https://github.com/adrianjoshua-strutt) [@CokeDong](https://github.com/CokeDong) [@xin-li-67](https://github.com/xin-li-67) [@Xiangxu-0103](https://github.com/Xiangxu-0103) [@HAOCHENYE](https://github.com/HAOCHENYE) [@Shiyang980713](https://github.com/Shiyang980713) [@TankNee](https://github.com/TankNee) [@zimonitrome](https://github.com/zimonitrome) [@gy-7](https://github.com/gy-7)
+A total of 19 developers contributed to this release. Thanks [@Hongru-Xiao](https://github.com/Hongru-Xiao) [@i-aki-y](https://github.com/i-aki-y) [@Bomsw](https://github.com/Bomsw) [@KickCellarDoor](https://github.com/KickCellarDoor) [@zhouzaida](https://github.com/zhouzaida) [@YQisme](https://github.com/YQisme) [@gachiemchiep](https://github.com/gachiemchiep) [@CescMessi](https://github.com/CescMessi) [@W-ZN](https://github.com/W-ZN) [@Ginray](https://github.com/Ginray) [@adrianjoshua-strutt](https://github.com/adrianjoshua-strutt) [@CokeDong](https://github.com/CokeDong) [@xin-li-67](https://github.com/xin-li-67) [@Xiangxu-0103](https://github.com/Xiangxu-0103) [@HAOCHENYE](https://github.com/HAOCHENYE) [@Shiyang980713](https://github.com/Shiyang980713) [@TankNee](https://github.com/TankNee) [@zimonitrome](https://github.com/zimonitrome) [@gy-7](https://github.com/gy-7)
## v0.7.3 (04/28/2023)
@@ -369,7 +369,7 @@ A total of 19 developers contributed to this release. Thanks [@Hongru-Xiao](http
- Enhance the support for MLU device by [@josh6688](https://github.com/josh6688) in https://github.com/open-mmlab/mmengine/pull/1075
- Support configuring synchronization directory for BaseMetric by [@HAOCHENYE](https://github.com/HAOCHENYE) in https://github.com/open-mmlab/mmengine/pull/1074
- Support accepting multiple `input_shape` for `get_model_complexity_info` by [@sjiang95](https://github.com/sjiang95) in https://github.com/open-mmlab/mmengine/pull/1065
-- Enhance docstring and error catching in `MessageHub` by [@HAOCHENYE](https://github.com/HAOCHENYE) in https://github.com/open-mmlab/mmengine/pull/1098
+- Enhance docstring and error catching in `MessageHub` by [@HAOCHENYE](https://github.com/HAOCHENYE) in https://github.com/open-mmlab/mmengine/pull/1098
- Enhance the efficiency of Visualizer.show by [@HAOCHENYE](https://github.com/HAOCHENYE) in https://github.com/open-mmlab/mmengine/pull/1015
- Update repo list by [@HAOCHENYE](https://github.com/HAOCHENYE) in https://github.com/open-mmlab/mmengine/pull/1108
- Enhance error message during custom import by [@HAOCHENYE](https://github.com/HAOCHENYE) in https://github.com/open-mmlab/mmengine/pull/1102
@@ -599,7 +599,7 @@ A total of 8 developers contributed to this release. Thanks [@LEFTeyex](https://
- `hub.get_model` fails on some MMCls models by [@C1rN09](https://github.com/C1rN09) in https://github.com/open-mmlab/mmengine/pull/784
- Fix `BaseModel.to` and `BaseDataPreprocessor.to` to make them consistent with `torch.nn.Module` by [@C1rN09](https://github.com/C1rN09) in https://github.com/open-mmlab/mmengine/pull/783
- Fix creating a new logger at PretrainedInit by [@xiexinch](https://github.com/xiexinch) in https://github.com/open-mmlab/mmengine/pull/791
-- Fix `ZeroRedundancyOptimizer` ambiguous error with param groups when PyTorch \< 1.12.0 by [@C1rN09](https://github.com/C1rN09) in https://github.com/open-mmlab/mmengine/pull/818
+- Fix `ZeroRedundancyOptimizer` ambiguous error with param groups when PyTorch < 1.12.0 by [@C1rN09](https://github.com/C1rN09) in https://github.com/open-mmlab/mmengine/pull/818
- Fix MessageHub set resumed key repeatedly by [@HAOCHENYE](https://github.com/HAOCHENYE) in https://github.com/open-mmlab/mmengine/pull/839
- Add `progress` argument to `load_from_http` by [@austinmw](https://github.com/austinmw) in https://github.com/open-mmlab/mmengine/pull/770
- Ensure metrics is not empty when saving best checkpoint by [@zhouzaida](https://github.com/zhouzaida) in https://github.com/open-mmlab/mmengine/pull/849
@@ -613,10 +613,10 @@ A total of 8 developers contributed to this release. Thanks [@LEFTeyex](https://
- Fix typos in EN `contributing.md` by [@RangeKing](https://github.com/RangeKing) in https://github.com/open-mmlab/mmengine/pull/792
- Translate data transform docs. by [@mzr1996](https://github.com/mzr1996) in https://github.com/open-mmlab/mmengine/pull/737
- Replace markdown table with html table by [@HAOCHENYE](https://github.com/HAOCHENYE) in https://github.com/open-mmlab/mmengine/pull/800
-- Fix wrong example in `Visualizer.draw_polygons` by [@lyviva](https://github.com/lyviva) in https://github.com/open-mmlab/mmengine/pull/798
+- Fix wrong example in `Visualizer.draw_polygons` by [@lyviva](https://github.com/lyviva) in https://github.com/open-mmlab/mmengine/pull/798
- Fix docstring format and rescale the images by [@zhouzaida](https://github.com/zhouzaida) in https://github.com/open-mmlab/mmengine/pull/802
- Fix failed link in registry by [@zhouzaida](https://github.com/zhouzaida) in https://github.com/open-mmlab/mmengine/pull/811
-- Fix typos by [@shanmo](https://github.com/shanmo) in https://github.com/open-mmlab/mmengine/pull/814
+- Fix typos by [@shanmo](https://github.com/shanmo) in https://github.com/open-mmlab/mmengine/pull/814
- Fix wrong links and typos in docs by [@shanmo](https://github.com/shanmo) in https://github.com/open-mmlab/mmengine/pull/815
- Translate `save_gpu_memory.md` by [@xin-li-67](https://github.com/xin-li-67) in https://github.com/open-mmlab/mmengine/pull/803
- Translate the documentation of hook design by [@zhouzaida](https://github.com/zhouzaida) in https://github.com/open-mmlab/mmengine/pull/780
@@ -691,7 +691,7 @@ A total of 16 developers contributed to this release. Thanks [@BayMaxBHL](https:
- Add documents for `clip_grad`, and support clip grad by value. by [@HAOCHENYE](https://github.com/HAOCHENYE) in https://github.com/open-mmlab/mmengine/pull/513
- Add ROCm info when collecting env by [@zhouzaida](https://github.com/zhouzaida) in https://github.com/open-mmlab/mmengine/pull/633
- Add a function to mark the deprecated function. by [@HAOCHENYE](https://github.com/HAOCHENYE) in https://github.com/open-mmlab/mmengine/pull/609
-- Call `register_all_modules` in `Registry.get()` by [@HAOCHENYE](https://github.com/HAOCHENYE) in https://github.com/open-mmlab/mmengine/pull/541
+- Call `register_all_modules` in `Registry.get()` by [@HAOCHENYE](https://github.com/HAOCHENYE) in https://github.com/open-mmlab/mmengine/pull/541
- Deprecate `_save_to_state_dict` implemented in mmengine by [@HAOCHENYE](https://github.com/HAOCHENYE) in https://github.com/open-mmlab/mmengine/pull/610
- Add `ignore_keys` in ConcatDataset by [@BIGWangYuDong](https://github.com/BIGWangYuDong) in https://github.com/open-mmlab/mmengine/pull/556
@@ -768,7 +768,7 @@ A total of 16 developers contributed to this release. Thanks [@BayMaxBHL](https:
- Fix uploading image in wandb backend [@okotaku](https://github.com/okotaku) in https://github.com/open-mmlab/mmengine/pull/510
- Fix loading state dictionary in `EMAHook` by [@okotaku](https://github.com/okotaku) in https://github.com/open-mmlab/mmengine/pull/507
- Fix circle import in `EMAHook` by [@HAOCHENYE](https://github.com/HAOCHENYE) in https://github.com/open-mmlab/mmengine/pull/523
-- Fix unit test could fail caused by `MultiProcessTestCase` by [@HAOCHENYE](https://github.com/HAOCHENYE) in https://github.com/open-mmlab/mmengine/pull/535
+- Fix unit test could fail caused by `MultiProcessTestCase` by [@HAOCHENYE](https://github.com/HAOCHENYE) in https://github.com/open-mmlab/mmengine/pull/535
- Remove unnecessary "if statement" in `Registry` by [@MambaWong](https://github.com/MambaWong) in https://github.com/open-mmlab/mmengine/pull/536
- Fix `_save_to_state_dict` by [@HAOCHENYE](https://github.com/HAOCHENYE) in https://github.com/open-mmlab/mmengine/pull/542
- Support comparing NumPy array dataset meta in `Runner.resume` by [@HAOCHENYE](https://github.com/HAOCHENYE) in https://github.com/open-mmlab/mmengine/pull/511
diff --git a/docs/en/notes/contributing.md b/docs/en/notes/contributing.md
index deb398f02f..a79592f9a1 100644
--- a/docs/en/notes/contributing.md
+++ b/docs/en/notes/contributing.md
@@ -77,7 +77,7 @@ pre-commit run --all-files
If the installation process is interrupted, you can repeatedly run `pre-commit run ... ` to continue the installation.
-If the code does not conform to the code style specification, pre-commit will raise a warning and fixes some of the errors automatically.
+If the code does not conform to the code style specification, pre-commit will raise a warning and fixes some of the errors automatically.
@@ -236,7 +236,7 @@ The config for a pre-commit hook is stored in [.pre-commit-config](https://githu
5. Provide clear and meaningful PR description
- - Task name should be clarified in title. The general format is: \[Prefix\] Short description of the PR (Suffix)
- - Prefix: add new feature \[Feature\], fix bug \[Fix\], related to documents \[Docs\], in developing \[WIP\] (which will not be reviewed temporarily)
+ - Task name should be clarified in title. The general format is: [Prefix] Short description of the PR (Suffix)
+ - Prefix: add new feature [Feature], fix bug [Fix], related to documents [Docs], in developing [WIP] (which will not be reviewed temporarily)
- Introduce main changes, results and influences on other modules in short description
- Associate related issues and pull requests with a milestone
diff --git a/docs/en/tutorials/hook.md b/docs/en/tutorials/hook.md
index 34f12d0084..c48a04367e 100644
--- a/docs/en/tutorials/hook.md
+++ b/docs/en/tutorials/hook.md
@@ -20,23 +20,23 @@ Each hook has a corresponding priority. At each mount point, hooks with higher p
**default hooks**
-| Name | Function | Priority |
+| Name | Function | Priority |
| :-----------------------------------------: | :------------------------------------------------------------------------------------------------------------------: | :---------------: |
-| [RuntimeInfoHook](#runtimeinfohook) | update runtime information into message hub | VERY_HIGH (10) |
-| [IterTimerHook](#itertimerhook) | Update the time spent during iteration into message hub | NORMAL (50) |
-| [DistSamplerSeedHook](#distsamplerseedhook) | Ensure distributed Sampler shuffle is active | NORMAL (50) |
-| [LoggerHook](#loggerhook) | Collect logs from different components of `Runner` and write them to terminal, JSON file, tensorboard and wandb .etc | BELOW_NORMAL (60) |
-| [ParamSchedulerHook](#paramschedulerhook) | update some hyper-parameters of optimizer | LOW (70) |
-| [CheckpointHook](#checkpointhook) | Save checkpoints periodically | VERY_LOW (90) |
+| [RuntimeInfoHook](#runtimeinfohook) | update runtime information into message hub | VERY_HIGH (10) |
+| [IterTimerHook](#itertimerhook) | Update the time spent during iteration into message hub | NORMAL (50) |
+| [DistSamplerSeedHook](#distsamplerseedhook) | Ensure distributed Sampler shuffle is active | NORMAL (50) |
+| [LoggerHook](#loggerhook) | Collect logs from different components of `Runner` and write them to terminal, JSON file, tensorboard and wandb .etc | BELOW_NORMAL (60) |
+| [ParamSchedulerHook](#paramschedulerhook) | update some hyper-parameters of optimizer | LOW (70) |
+| [CheckpointHook](#checkpointhook) | Save checkpoints periodically | VERY_LOW (90) |
**custom hooks**
-| Name | Function | Priority |
+| Name | Function | Priority |
| :---------------------------------: | :----------------------------------------------------------------------: | :-----------: |
-| [EMAHook](#emahook) | Apply Exponential Moving Average (EMA) on the model during training | NORMAL (50) |
-| [EmptyCacheHook](#emptycachehook) | Releases all unoccupied cached GPU memory during the process of training | NORMAL (50) |
-| [SyncBuffersHook](#syncbuffershook) | Synchronize model buffers at the end of each epoch | NORMAL (50) |
-| [ProfilerHook](#profilerhook) | Analyze the execution time and GPU memory usage of model operators | VERY_LOW (90) |
+| [EMAHook](#emahook) | Apply Exponential Moving Average (EMA) on the model during training | NORMAL (50) |
+| [EmptyCacheHook](#emptycachehook) | Releases all unoccupied cached GPU memory during the process of training | NORMAL (50) |
+| [SyncBuffersHook](#syncbuffershook) | Synchronize model buffers at the end of each epoch | NORMAL (50) |
+| [ProfilerHook](#profilerhook) | Analyze the execution time and GPU memory usage of model operators | VERY_LOW (90) |
```{note}
It is not recommended to modify the priority of the default hooks, as hooks with lower priority may depend on hooks with higher priority. For example, `CheckpointHook` needs to have a lower priority than ParamSchedulerHook so that the saved optimizer state is correct. Also, the priority of custom hooks defaults to `NORMAL (50)`.
diff --git a/docs/en/tutorials/param_scheduler.md b/docs/en/tutorials/param_scheduler.md
index f8be7057e4..2194e5815f 100644
--- a/docs/en/tutorials/param_scheduler.md
+++ b/docs/en/tutorials/param_scheduler.md
@@ -77,7 +77,7 @@ param_scheduler = dict(type='MultiStepLR', by_epoch=True, milestones=[8, 11], ga
Note that the parameter `by_epoch` is added here, which controls the frequency of learning rate adjustment. When set to True, it means adjusting by epoch. When set to False, it means adjusting by iteration. The default value is True.
-In the above example, it means to adjust according to epochs. At this time, the unit of the parameters is epoch. For example, \[8, 11\] in `milestones` means that the learning rate will be multiplied by 0.1 at the end of the 8 and 11 epoch.
+In the above example, it means to adjust according to epochs. At this time, the unit of the parameters is epoch. For example, [8, 11] in `milestones` means that the learning rate will be multiplied by 0.1 at the end of the 8 and 11 epoch.
When the frequency is modified, the meaning of the count-related settings of the scheduler will be changed accordingly. When `by_epoch=True`, the numbers in milestones indicate at which epoch the learning rate decay is performed, and when `by_epoch=False` it indicates at which iteration the learning rate decay is performed.
diff --git a/mmengine/_strategy/deepspeed.py b/mmengine/_strategy/deepspeed.py
index 3f89ff760d..3d945a6a54 100644
--- a/mmengine/_strategy/deepspeed.py
+++ b/mmengine/_strategy/deepspeed.py
@@ -310,10 +310,10 @@ def __init__(
self.config.setdefault('gradient_accumulation_steps', 1)
self.config['steps_per_print'] = steps_per_print
self._inputs_to_half = inputs_to_half
- assert (exclude_frozen_parameters is None or
- digit_version(deepspeed.__version__) >= digit_version('0.13.2')
- ), ('DeepSpeed >= 0.13.2 is required to enable '
- 'exclude_frozen_parameters')
+ assert (exclude_frozen_parameters is None or digit_version(
+ deepspeed.__version__) >= digit_version('0.13.2')), (
+ 'DeepSpeed >= 0.13.2 is required to enable '
+ 'exclude_frozen_parameters')
self.exclude_frozen_parameters = exclude_frozen_parameters
register_deepspeed_optimizers()
diff --git a/mmengine/config/config.py b/mmengine/config/config.py
index 7df8dcc52c..183138eea9 100644
--- a/mmengine/config/config.py
+++ b/mmengine/config/config.py
@@ -46,9 +46,10 @@
def _lazy2string(cfg_dict, dict_type=None):
if isinstance(cfg_dict, dict):
dict_type = dict_type or type(cfg_dict)
- return dict_type(
- {k: _lazy2string(v, dict_type)
- for k, v in dict.items(cfg_dict)})
+ return dict_type({
+ k: _lazy2string(v, dict_type)
+ for k, v in dict.items(cfg_dict)
+ })
elif isinstance(cfg_dict, (tuple, list)):
return type(cfg_dict)(_lazy2string(v, dict_type) for v in cfg_dict)
elif isinstance(cfg_dict, (LazyAttr, LazyObject)):
@@ -271,13 +272,15 @@ def __reduce_ex__(self, proto):
# called by CPython interpreter during pickling. See more details in
# https://github.com/python/cpython/blob/8d61a71f9c81619e34d4a30b625922ebc83c561b/Objects/typeobject.c#L6196 # noqa: E501
if digit_version(platform.python_version()) < digit_version('3.8'):
- return (self.__class__, ({k: v
- for k, v in super().items()}, ), None,
- None, None)
+ return (self.__class__, ({
+ k: v
+ for k, v in super().items()
+ }, ), None, None, None)
else:
- return (self.__class__, ({k: v
- for k, v in super().items()}, ), None,
- None, None, None)
+ return (self.__class__, ({
+ k: v
+ for k, v in super().items()
+ }, ), None, None, None, None)
def __eq__(self, other):
if isinstance(other, ConfigDict):
diff --git a/mmengine/dataset/utils.py b/mmengine/dataset/utils.py
index 2c9cf96497..d140cc8dc4 100644
--- a/mmengine/dataset/utils.py
+++ b/mmengine/dataset/utils.py
@@ -158,7 +158,8 @@ def default_collate(data_batch: Sequence) -> Any:
return [default_collate(samples) for samples in transposed]
elif isinstance(data_item, Mapping):
return data_item_type({
- key: default_collate([d[key] for d in data_batch])
+ key:
+ default_collate([d[key] for d in data_batch])
for key in data_item
})
else:
diff --git a/mmengine/fileio/backends/local_backend.py b/mmengine/fileio/backends/local_backend.py
index c7d5f04621..84ebe95514 100644
--- a/mmengine/fileio/backends/local_backend.py
+++ b/mmengine/fileio/backends/local_backend.py
@@ -156,8 +156,8 @@ def isfile(self, filepath: Union[str, Path]) -> bool:
"""
return osp.isfile(filepath)
- def join_path(self, filepath: Union[str, Path],
- *filepaths: Union[str, Path]) -> str:
+ def join_path(self, filepath: Union[str, Path], *filepaths:
+ Union[str, Path]) -> str:
r"""Concatenate all file paths.
Join one or more filepath components intelligently. The return value
diff --git a/mmengine/fileio/file_client.py b/mmengine/fileio/file_client.py
index bbb81b3dfc..0939f93c00 100644
--- a/mmengine/fileio/file_client.py
+++ b/mmengine/fileio/file_client.py
@@ -385,8 +385,8 @@ def isfile(self, filepath: Union[str, Path]) -> bool:
"""
return self.client.isfile(filepath)
- def join_path(self, filepath: Union[str, Path],
- *filepaths: Union[str, Path]) -> str:
+ def join_path(self, filepath: Union[str, Path], *filepaths:
+ Union[str, Path]) -> str:
r"""Concatenate all file paths.
Join one or more filepath components intelligently. The return value
diff --git a/mmengine/hooks/checkpoint_hook.py b/mmengine/hooks/checkpoint_hook.py
index 92a4867bb9..3adb78c7dc 100644
--- a/mmengine/hooks/checkpoint_hook.py
+++ b/mmengine/hooks/checkpoint_hook.py
@@ -196,10 +196,10 @@ def __init__(self,
self.save_best = save_best
# rule logic
- assert (isinstance(rule, str) or is_list_of(rule, str)
- or (rule is None)), (
- '"rule" should be a str or list of str or None, '
- f'but got {type(rule)}')
+ assert (isinstance(rule, str) or is_list_of(rule, str) or
+ (rule
+ is None)), ('"rule" should be a str or list of str or None, '
+ f'but got {type(rule)}')
if isinstance(rule, list):
# check the length of rule list
assert len(rule) in [
diff --git a/mmengine/model/test_time_aug.py b/mmengine/model/test_time_aug.py
index c623eec8bc..2f19248c2c 100644
--- a/mmengine/model/test_time_aug.py
+++ b/mmengine/model/test_time_aug.py
@@ -124,9 +124,10 @@ def test_step(self, data):
data_list: Union[List[dict], List[list]]
if isinstance(data, dict):
num_augs = len(data[next(iter(data))])
- data_list = [{key: value[idx]
- for key, value in data.items()}
- for idx in range(num_augs)]
+ data_list = [{
+ key: value[idx]
+ for key, value in data.items()
+ } for idx in range(num_augs)]
elif isinstance(data, (tuple, list)):
num_augs = len(data[0])
data_list = [[_data[idx] for _data in data]
diff --git a/mmengine/runner/checkpoint.py b/mmengine/runner/checkpoint.py
index b9c62a8f70..a5809013a1 100644
--- a/mmengine/runner/checkpoint.py
+++ b/mmengine/runner/checkpoint.py
@@ -661,9 +661,10 @@ def _load_checkpoint_to_model(model,
# strip prefix of state_dict
metadata = getattr(state_dict, '_metadata', OrderedDict())
for p, r in revise_keys:
- state_dict = OrderedDict(
- {re.sub(p, r, k): v
- for k, v in state_dict.items()})
+ state_dict = OrderedDict({
+ re.sub(p, r, k): v
+ for k, v in state_dict.items()
+ })
# Keep metadata in state_dict
state_dict._metadata = metadata
diff --git a/mmengine/utils/dl_utils/torch_ops.py b/mmengine/utils/dl_utils/torch_ops.py
index 2550ae6986..85dc3100d2 100644
--- a/mmengine/utils/dl_utils/torch_ops.py
+++ b/mmengine/utils/dl_utils/torch_ops.py
@@ -4,9 +4,9 @@
from ..version_utils import digit_version
from .parrots_wrapper import TORCH_VERSION
-_torch_version_meshgrid_indexing = (
- 'parrots' not in TORCH_VERSION
- and digit_version(TORCH_VERSION) >= digit_version('1.10.0a0'))
+_torch_version_meshgrid_indexing = ('parrots' not in TORCH_VERSION
+ and digit_version(TORCH_VERSION)
+ >= digit_version('1.10.0a0'))
def torch_meshgrid(*tensors):
diff --git a/mmengine/visualization/visualizer.py b/mmengine/visualization/visualizer.py
index 6979395aca..6653497d6e 100644
--- a/mmengine/visualization/visualizer.py
+++ b/mmengine/visualization/visualizer.py
@@ -754,8 +754,9 @@ def draw_bboxes(
assert bboxes.shape[-1] == 4, (
f'The shape of `bboxes` should be (N, 4), but got {bboxes.shape}')
- assert (bboxes[:, 0] <= bboxes[:, 2]).all() and (bboxes[:, 1] <=
- bboxes[:, 3]).all()
+ assert (bboxes[:, 0] <= bboxes[:, 2]).all() and (bboxes[:, 1]
+ <= bboxes[:,
+ 3]).all()
if not self._is_posion_valid(bboxes.reshape((-1, 2, 2))):
warnings.warn(
'Warning: The bbox is out of bounds,'
diff --git a/tests/test_analysis/test_jit_analysis.py b/tests/test_analysis/test_jit_analysis.py
index be10309d0f..4b1dfaf595 100644
--- a/tests/test_analysis/test_jit_analysis.py
+++ b/tests/test_analysis/test_jit_analysis.py
@@ -634,9 +634,10 @@ def dummy_ops_handle(inputs: List[Any],
dummy_flops = {}
for name, counts in model.flops.items():
- dummy_flops[name] = Counter(
- {op: flop
- for op, flop in counts.items() if op != self.lin_op})
+ dummy_flops[name] = Counter({
+ op: flop
+ for op, flop in counts.items() if op != self.lin_op
+ })
dummy_flops[''][dummy_name] = 2 * dummy_out
dummy_flops['fc'][dummy_name] = dummy_out
dummy_flops['submod'][dummy_name] = dummy_out
diff --git a/tests/test_dataset/test_base_dataset.py b/tests/test_dataset/test_base_dataset.py
index f4ec815ec2..48bba665fe 100644
--- a/tests/test_dataset/test_base_dataset.py
+++ b/tests/test_dataset/test_base_dataset.py
@@ -733,13 +733,13 @@ def test_length(self):
def test_getitem(self):
assert (
self.cat_datasets[0]['imgs'] == self.dataset_a[0]['imgs']).all()
- assert (self.cat_datasets[0]['imgs'] !=
- self.dataset_b[0]['imgs']).all()
+ assert (self.cat_datasets[0]['imgs']
+ != self.dataset_b[0]['imgs']).all()
assert (
self.cat_datasets[-1]['imgs'] == self.dataset_b[-1]['imgs']).all()
- assert (self.cat_datasets[-1]['imgs'] !=
- self.dataset_a[-1]['imgs']).all()
+ assert (self.cat_datasets[-1]['imgs']
+ != self.dataset_a[-1]['imgs']).all()
def test_get_data_info(self):
assert self.cat_datasets.get_data_info(
diff --git a/tests/test_optim/test_optimizer/test_optimizer_wrapper.py b/tests/test_optim/test_optimizer/test_optimizer_wrapper.py
index ba4ca77d11..763c2d054c 100644
--- a/tests/test_optim/test_optimizer/test_optimizer_wrapper.py
+++ b/tests/test_optim/test_optimizer/test_optimizer_wrapper.py
@@ -455,8 +455,8 @@ def test_init(self):
not torch.cuda.is_available(),
reason='`torch.cuda.amp` is only available when pytorch-gpu installed')
def test_step(self, dtype):
- if dtype is not None and (digit_version(TORCH_VERSION) <
- digit_version('1.10.0')):
+ if dtype is not None and (digit_version(TORCH_VERSION)
+ < digit_version('1.10.0')):
raise unittest.SkipTest('Require PyTorch version >= 1.10.0 to '
'support `dtype` argument in autocast')
if dtype == 'bfloat16' and not bf16_supported():
@@ -478,8 +478,8 @@ def test_step(self, dtype):
not torch.cuda.is_available(),
reason='`torch.cuda.amp` is only available when pytorch-gpu installed')
def test_backward(self, dtype):
- if dtype is not None and (digit_version(TORCH_VERSION) <
- digit_version('1.10.0')):
+ if dtype is not None and (digit_version(TORCH_VERSION)
+ < digit_version('1.10.0')):
raise unittest.SkipTest('Require PyTorch version >= 1.10.0 to '
'support `dtype` argument in autocast')
if dtype == 'bfloat16' and not bf16_supported():
@@ -539,8 +539,8 @@ def test_load_state_dict(self):
not torch.cuda.is_available(),
reason='`torch.cuda.amp` is only available when pytorch-gpu installed')
def test_optim_context(self, dtype, target_dtype):
- if dtype is not None and (digit_version(TORCH_VERSION) <
- digit_version('1.10.0')):
+ if dtype is not None and (digit_version(TORCH_VERSION)
+ < digit_version('1.10.0')):
raise unittest.SkipTest('Require PyTorch version >= 1.10.0 to '
'support `dtype` argument in autocast')
if dtype == 'bfloat16' and not bf16_supported():
From 5f34de33376d28f0b09324f5ff268437039c8fad Mon Sep 17 00:00:00 2001
From: mgam <312065559@qq.com>
Date: Fri, 7 Nov 2025 23:45:14 +0800
Subject: [PATCH 06/18] Fix mypy error: No overload variant of "join" matches
argument types "SimplePath", "str" [call-overload]
---
mmengine/utils/package_utils.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/mmengine/utils/package_utils.py b/mmengine/utils/package_utils.py
index 79188247df..41bc86fdf1 100644
--- a/mmengine/utils/package_utils.py
+++ b/mmengine/utils/package_utils.py
@@ -68,7 +68,7 @@ def get_installed_path(package: str) -> str:
top_level = dist.read_text('top_level.txt')
if top_level:
module_name = top_level.split('\n')[0].strip()
- possible_path = osp.join(dist.locate_file(''), module_name)
+ possible_path = osp.join(str(dist.locate_file('')), module_name)
if osp.exists(possible_path):
return possible_path
From 33b4c026d9f6455aa3d44d709363444d31cfaa4c Mon Sep 17 00:00:00 2001
From: mgam <312065559@qq.com>
Date: Fri, 7 Nov 2025 23:56:12 +0800
Subject: [PATCH 07/18] Fix mypy error: No overload variant of "__add__" of
"tuple" matches argument type "InstanceData" [operator]
---
mmengine/structures/instance_data.py | 11 +++++------
1 file changed, 5 insertions(+), 6 deletions(-)
diff --git a/mmengine/structures/instance_data.py b/mmengine/structures/instance_data.py
index a083b5b505..595d818d06 100644
--- a/mmengine/structures/instance_data.py
+++ b/mmengine/structures/instance_data.py
@@ -1,7 +1,7 @@
# Copyright (c) OpenMMLab. All rights reserved.
import itertools
from collections.abc import Sized
-from typing import Any, List, Union
+from typing import Any, List, Union, cast
import numpy as np
import torch
@@ -9,8 +9,8 @@
from mmengine.device import get_device
from .base_data_element import BaseDataElement
-BoolTypeTensor: Union[Any]
-LongTypeTensor: Union[Any]
+BoolTypeTensor: Any
+LongTypeTensor: Any
if get_device() == 'npu':
BoolTypeTensor = Union[torch.BoolTensor, torch.npu.BoolTensor]
@@ -25,8 +25,7 @@
BoolTypeTensor = Union[torch.BoolTensor, torch.cuda.BoolTensor]
LongTypeTensor = Union[torch.LongTensor, torch.cuda.LongTensor]
-IndexType: Union[Any] = Union[str, slice, int, list, LongTypeTensor,
- BoolTypeTensor, np.ndarray]
+IndexType = Union[str, slice, int, list, LongTypeTensor, BoolTypeTensor, np.ndarray]
# Modified from
@@ -284,7 +283,7 @@ def cat(instances_list: List['InstanceData']) -> 'InstanceData':
new_data = instances_list[0].__class__(
metainfo=instances_list[0].metainfo)
for k in instances_list[0].keys():
- values = [results[k] for results in instances_list]
+ values = [cast(Any, results[k]) for results in instances_list]
v0 = values[0]
if isinstance(v0, torch.Tensor):
new_values = torch.cat(values, dim=0)
From e019d3730408f0b61f09c8cdf0e39b0535831e33 Mon Sep 17 00:00:00 2001
From: mgam <312065559@qq.com>
Date: Fri, 7 Nov 2025 23:59:53 +0800
Subject: [PATCH 08/18] Fix mypy error: "