Skip to content

Commit 745c7c7

Browse files
authored
Adjust ruff formatter configs (#571)
## Problem Updating our version of ruff uncovered some errors related to the pyproject.toml target-version config. ## Solution Update ruff configuration in pyproject.toml and .pre-commit-hooks. Since ruff is a formatting tool, there should not be any functional change. ## Type of Change - [x] Infrastructure change (CI configs, etc)
1 parent a792173 commit 745c7c7

8 files changed

Lines changed: 46 additions & 44 deletions

File tree

.pre-commit-config.yaml

Lines changed: 2 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -7,11 +7,7 @@ repos:
77
- id: check-yaml
88
- id: check-added-large-files
99
- repo: https://github.com/astral-sh/ruff-pre-commit
10-
# Ruff version.
11-
rev: v0.6.7
10+
rev: v0.14.10
1211
hooks:
13-
# Run the linter.
14-
- id: ruff
15-
args: [ --fix ]
16-
# Run the formatter.
12+
- id: ruff-check
1713
- id: ruff-format

pinecone/db_data/index.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -464,9 +464,9 @@ def upsert(
464464
)
465465
# When batch_size is provided, async_req cannot be True (checked above),
466466
# so batch_result is always UpsertResponse, not ApplyResult
467-
assert isinstance(
468-
batch_result, UpsertResponse
469-
), "batch_result must be UpsertResponse when batch_size is provided"
467+
assert isinstance(batch_result, UpsertResponse), (
468+
"batch_result must be UpsertResponse when batch_size is provided"
469+
)
470470
pbar.update(batch_result.upserted_count)
471471
# we can't use here pbar.n for the case show_progress=False
472472
total_upserted += batch_result.upserted_count
@@ -591,9 +591,9 @@ def upsert_from_dataframe(
591591
last_result = None
592592
for res in results:
593593
# upsert_from_dataframe doesn't use async_req, so res is always UpsertResponse
594-
assert isinstance(
595-
res, UpsertResponse
596-
), "Expected UpsertResponse when not using async_req"
594+
assert isinstance(res, UpsertResponse), (
595+
"Expected UpsertResponse when not using async_req"
596+
)
597597
upserted_count += res.upserted_count
598598
last_result = res
599599

pinecone/db_data/resources/sync/vector.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -188,9 +188,9 @@ def upsert(
188188
)
189189
# When batch_size is provided, async_req cannot be True (checked above),
190190
# so batch_result is always UpsertResponse, not ApplyResult
191-
assert isinstance(
192-
batch_result, UpsertResponse
193-
), "batch_result must be UpsertResponse when batch_size is provided"
191+
assert isinstance(batch_result, UpsertResponse), (
192+
"batch_result must be UpsertResponse when batch_size is provided"
193+
)
194194
pbar.update(batch_result.upserted_count)
195195
# we can't use here pbar.n for the case show_progress=False
196196
total_upserted += batch_result.upserted_count
@@ -289,9 +289,9 @@ def upsert_from_dataframe(
289289
for res in results:
290290
# res is always UpsertResponse when not using async_req
291291
# upsert() doesn't use async_req, so res is always UpsertResponse
292-
assert isinstance(
293-
res, UpsertResponse
294-
), "Expected UpsertResponse when not using async_req"
292+
assert isinstance(res, UpsertResponse), (
293+
"Expected UpsertResponse when not using async_req"
294+
)
295295
upserted_count += res.upserted_count
296296
last_result = res
297297

pyproject.toml

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -122,7 +122,7 @@ exclude = [
122122

123123
line-length = 100
124124
indent-width = 4
125-
target-version = "8.0.0"
125+
target-version = "py310"
126126

127127
[tool.ruff.lint]
128128
# Enable Pyflakes (`F`) and a subset of the pycodestyle (`E`) codes by default.
@@ -156,7 +156,3 @@ docstring-code-line-length = "dynamic"
156156

157157
# E712 Allow == comparison to True/False
158158
"tests/**" = ["E712"]
159-
160-
[tool.black]
161-
line-length = 100
162-
target-version = ["py310"]

tests/integration/rest_asyncio/db/data/test_upsert_sparse.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -74,8 +74,8 @@ async def test_upsert_with_batch_size_sparse(sparse_index_host, target_namespace
7474
assert "2" in fetched_vec.vectors
7575
assert "3" in fetched_vec.vectors
7676

77-
assert (
78-
fetched_vec._response_info is not None
79-
), "Expected _response_info to be present on fetch response"
77+
assert fetched_vec._response_info is not None, (
78+
"Expected _response_info to be present on fetch response"
79+
)
8080
logger.info(f"Fetch response info: {fetched_vec._response_info}")
8181
await asyncio_sparse_idx.close()

tests/unit/test_pytest_shard.py

Lines changed: 18 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -114,9 +114,9 @@ def test_e():
114114
# If plugin didn't load (testdir limitation), skip this assertion
115115
if total_test_count > 0:
116116
# Plugin worked - verify sharding
117-
assert (
118-
shard1_count < total_test_count or shard1_count == 0
119-
), "Plugin should filter tests"
117+
assert shard1_count < total_test_count or shard1_count == 0, (
118+
"Plugin should filter tests"
119+
)
120120
# If we got 0 tests, the plugin might have filtered them all out (unlikely but possible)
121121
# Or the plugin didn't load - either way, the test logic is sound
122122

@@ -208,7 +208,9 @@ def test_example():
208208
"--splits must be a positive integer" in stderr_text
209209
or "unrecognized arguments" in stderr_text
210210
or "INTERNALERROR" in stderr_text
211-
), f"Expected validation error, unrecognized args, or internal error, got: {stderr_text[:200]}"
211+
), (
212+
f"Expected validation error, unrecognized args, or internal error, got: {stderr_text[:200]}"
213+
)
212214

213215
result = testdir.runpytest("--splits=-1", "--group=1")
214216
if result.ret == 3: # INTERNAL_ERROR
@@ -218,7 +220,9 @@ def test_example():
218220
"--splits must be a positive integer" in stderr_text
219221
or "unrecognized arguments" in stderr_text
220222
or "INTERNALERROR" in stderr_text
221-
), f"Expected validation error, unrecognized args, or internal error, got: {stderr_text[:200]}"
223+
), (
224+
f"Expected validation error, unrecognized args, or internal error, got: {stderr_text[:200]}"
225+
)
222226

223227
def test_validation_group_must_be_positive(self, testdir):
224228
"""Test that --group must be a positive integer."""
@@ -237,7 +241,9 @@ def test_example():
237241
"--group must be a positive integer" in stderr_text
238242
or "unrecognized arguments" in stderr_text
239243
or "INTERNALERROR" in stderr_text
240-
), f"Expected validation error, unrecognized args, or internal error, got: {stderr_text[:200]}"
244+
), (
245+
f"Expected validation error, unrecognized args, or internal error, got: {stderr_text[:200]}"
246+
)
241247

242248
result = testdir.runpytest("--splits=3", "--group=-1")
243249
if result.ret == 3: # INTERNAL_ERROR
@@ -247,7 +253,9 @@ def test_example():
247253
"--group must be a positive integer" in stderr_text
248254
or "unrecognized arguments" in stderr_text
249255
or "INTERNALERROR" in stderr_text
250-
), f"Expected validation error, unrecognized args, or internal error, got: {stderr_text[:200]}"
256+
), (
257+
f"Expected validation error, unrecognized args, or internal error, got: {stderr_text[:200]}"
258+
)
251259

252260
def test_validation_group_cannot_exceed_splits(self, testdir):
253261
"""Test that --group cannot exceed --splits."""
@@ -266,7 +274,9 @@ def test_example():
266274
"--group (4) must be between 1 and --splits (3)" in stderr_text
267275
or "unrecognized arguments" in stderr_text
268276
or "INTERNALERROR" in stderr_text
269-
), f"Expected validation error, unrecognized args, or internal error, got: {stderr_text[:200]}"
277+
), (
278+
f"Expected validation error, unrecognized args, or internal error, got: {stderr_text[:200]}"
279+
)
270280

271281
def test_plugin_inactive_without_splits(self, testdir):
272282
"""Test that plugin doesn't filter tests when --splits is not provided."""

tests/unit_grpc/test_runner.py

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -23,9 +23,9 @@ def test_run_with_default_metadata(self, mocker):
2323
assert ("client-version", CLIENT_VERSION) in passed_metadata
2424

2525
# Request id assigned for each request
26-
assert any(
27-
item[0] == "request_id" for item in passed_metadata
28-
), "request_id not found in metadata"
26+
assert any(item[0] == "request_id" for item in passed_metadata), (
27+
"request_id not found in metadata"
28+
)
2929
for items in passed_metadata:
3030
if items[0] == "request_id":
3131
assert isinstance(items[1], str)
@@ -47,9 +47,9 @@ def test_each_run_gets_unique_request_id(self, mocker):
4747
for items in mock_func.call_args.kwargs["metadata"]:
4848
if items[0] == "request_id":
4949
second_request_id = items[1]
50-
assert (
51-
second_request_id != first_request_id
52-
), "request_id is not unique for each request"
50+
assert second_request_id != first_request_id, (
51+
"request_id is not unique for each request"
52+
)
5353

5454
def test_run_with_additional_metadata_from_grpc_config(self, mocker):
5555
config = Config(api_key="YOUR_API_KEY")
@@ -89,9 +89,9 @@ def test_with_additional_metadata_from_run(self, mocker):
8989
assert ("service-name", "my-index") in passed_metadata
9090
assert ("client-version", CLIENT_VERSION) in passed_metadata
9191
# Request id
92-
assert any(
93-
item[0] == "request_id" for item in passed_metadata
94-
), "request_id not found in metadata"
92+
assert any(item[0] == "request_id" for item in passed_metadata), (
93+
"request_id not found in metadata"
94+
)
9595
# Extras from configuration
9696
assert ("debug-header", "value123") in passed_metadata
9797
assert ("debug-header2", "value456") in passed_metadata

uv.lock

Lines changed: 1 addition & 1 deletion
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

0 commit comments

Comments
 (0)