Skip to content

Commit 15a38f1

Browse files
authored
Merge pull request #28 from IBM/fix-pep-508
add deps pyproject
2 parents 20803d2 + 87bf596 commit 15a38f1

File tree

4 files changed

+42
-16
lines changed

4 files changed

+42
-16
lines changed

.github/workflows/commit_check.yml

Lines changed: 6 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -15,21 +15,19 @@ jobs:
1515
- uses: actions/checkout@v4
1616
with:
1717
fetch-depth: 0 # Get full git history
18-
- name: Install package
19-
run: |
20-
pip install --upgrade pip
21-
pip install -e .
2218
- name: Set up Python
2319
uses: actions/setup-python@v5
2420
with:
2521
python-version: '3.10.17'
26-
- name: Install dependencies
22+
- name: Install package
2723
run: |
28-
python -m pip install --upgrade pip
29-
pip install -r requirements_dev.txt
30-
pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu
24+
pip install --upgrade pip
25+
pip install uv
26+
uv venv --python 3.10.17
27+
uv pip install -e ".[dev]" --torch-backend=auto
3128
- name: Pytest
3229
run: |
30+
source .venv/bin/activate
3331
make pylint
3432
make pycodestyle
3533
make mypy

examples/deepspeed_and_huggingface/utils.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@
2323
from aihwkit_lightning.exceptions import ArgumentError
2424

2525

26-
class CustomTrainer(Trainer):
26+
class CustomTrainer(Trainer): # pylint: disable=too-few-public-methods
2727
"""Custom trainer handling weight clipping."""
2828

2929
# overwriting for clipping the weights
@@ -117,7 +117,7 @@ def create_rpu_config(args):
117117
return rpu_config
118118

119119

120-
class PrettySafeLoader(yaml.SafeLoader):
120+
class PrettySafeLoader(yaml.SafeLoader): # pylint: disable=too-few-public-methods
121121
"""Allows specifying tuples in yaml config."""
122122

123123
def construct_python_tuple(self, node):

pyproject.toml

Lines changed: 33 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
[build-system]
2-
requires = ["setuptools"]
2+
requires = ["setuptools>=61.0", "wheel"]
3+
build-backend = "setuptools.build_meta"
34

45
[project]
56
requires-python = ">= 3.8"
@@ -16,7 +17,12 @@ maintainers = [
1617
description = "IBM Analog Hardware Acceleration Kit - Lightning"
1718
readme = "README.md"
1819
license = "MIT"
19-
license-files = ["LICENCSE.TXT"]
20+
license-files = ["LICENSE.txt"]
21+
dependencies = [
22+
"torch>=1.10.0",
23+
"typing_extensions>=4.0.0",
24+
"tqdm",
25+
]
2026
keywords = [
2127
"ai",
2228
"analog",
@@ -32,9 +38,33 @@ keywords = [
3238
"non-volatile memory",
3339
"phase-change material"
3440
]
41+
[project.optional-dependencies]
42+
dev = [
43+
"mypy==1.8.0",
44+
"pylint==2.15.7",
45+
"pycodestyle==2.11.1",
46+
"black==24.3.0",
47+
"pytest",
48+
]
49+
huggingface = [
50+
"transformers>=4.20.0",
51+
"datasets",
52+
]
53+
triton = [
54+
"triton>=2.0.0",
55+
]
56+
3557
[project.urls]
3658
Paper = "https://openreview.net/forum?id=QNdxOgGmhR"
3759
Documentation = "https://aihwkit-lightning.readthedocs.io/en/latest/"
3860
Repository = "https://github.com/IBM/aihwkit-lightning"
3961
Issues = "https://github.com/IBM/aihwkit-lightning/issues"
40-
Changelog = "https://github.com/IBM/aihwkit-lightning/blob/main/CHANGELOG.md"
62+
Changelog = "https://github.com/IBM/aihwkit-lightning/blob/main/CHANGELOG.md"
63+
64+
[tool.setuptools.packages.find]
65+
where = ["src"]
66+
67+
[tool.mypy]
68+
ignore_missing_imports = true
69+
disable_error_code = ["operator", "index", "union-attr"]
70+

tests/test_post_training_weight_quantization.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -106,9 +106,7 @@ def test_post_training_weight_quantization(
106106
analog_model = convert_to_analog(model, rpu_config=rpu_config)
107107

108108
# create an analog optimizer
109-
optim = AnalogOptimizer(
110-
AdamW, analog_model.analog_layers, analog_model.parameters(), lr=0.001
111-
)
109+
optim = AnalogOptimizer(AdamW, analog_model.analog_layers, analog_model.parameters(), lr=0.001)
112110
for _ in range(1):
113111
optim.zero_grad()
114112
out = analog_model(inp)

0 commit comments

Comments
 (0)