Skip to content

Commit 8f7200e

Browse files
committed
fix test
1 parent e65867b commit 8f7200e

File tree

4 files changed

+65
-40
lines changed

4 files changed

+65
-40
lines changed

src/kaggle/api/kaggle_api.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -338,7 +338,7 @@ def kernel_push(self, kernel_push_request): # noqa: E501
338338
"""
339339
with tempfile.TemporaryDirectory() as tmpdir:
340340
meta_file = os.path.join(tmpdir, "kernel-metadata.json")
341-
fd, code_file = tempfile.mkstemp("code", "py", tmpdir, text=True)
341+
(fd, code_file) = tempfile.mkstemp("code", "py", tmpdir, text=True)
342342
fd.write(json.dumps(kernel_push_request.code))
343343
os.close(fd)
344344
with open(meta_file, "w") as f:

src/kaggle/api/kaggle_api_extended.py

Lines changed: 19 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -721,7 +721,7 @@ def _authenticate_with_legacy_apikey(self) -> bool:
721721
return True
722722

723723
def _authenticate_with_access_token(self):
724-
access_token, source = get_access_token_from_env()
724+
(access_token, source) = get_access_token_from_env()
725725
if not access_token:
726726
return False
727727

@@ -1901,7 +1901,7 @@ def dataset_metadata_update(self, dataset, path):
19011901
dataset: The dataset to update.
19021902
path: The path to the metadata file.
19031903
"""
1904-
owner_slug, dataset_slug, effective_path = self.dataset_metadata_prep(dataset, path)
1904+
(owner_slug, dataset_slug, effective_path) = self.dataset_metadata_prep(dataset, path)
19051905
meta_file = self.get_dataset_metadata_file(effective_path)
19061906
with open(meta_file, "r") as f:
19071907
metadata = json.load(f)
@@ -1954,7 +1954,7 @@ def dataset_metadata(self, dataset, path):
19541954
Returns:
19551955
The path to the downloaded metadata file.
19561956
"""
1957-
owner_slug, dataset_slug, effective_path = self.dataset_metadata_prep(dataset, path)
1957+
(owner_slug, dataset_slug, effective_path) = self.dataset_metadata_prep(dataset, path)
19581958

19591959
if not os.path.exists(effective_path):
19601960
os.makedirs(effective_path)
@@ -3462,7 +3462,7 @@ def kernels_output_cli(self, kernel, kernel_opt=None, path=None, force=False, qu
34623462
file_pattern: Regex pattern to match against filenames. Only files matching the pattern will be downloaded.
34633463
"""
34643464
kernel = kernel or kernel_opt
3465-
_, token = self.kernels_output(kernel, path, file_pattern, force, quiet)
3465+
(_, token) = self.kernels_output(kernel, path, file_pattern, force, quiet)
34663466
if token:
34673467
print(f"Next page token: {token}")
34683468

@@ -3508,9 +3508,18 @@ def kernels_status_cli(self, kernel, kernel_opt=None):
35083508
else:
35093509
print('%s has status "%s"' % (kernel, status))
35103510

3511+
def _resolve_kernel_slug(self, kernel):
3512+
"""Parses a kernel string into (owner_slug, kernel_slug)."""
3513+
if "/" in kernel:
3514+
self.validate_kernel_string(kernel)
3515+
owner_slug, kernel_slug = kernel.split("/")
3516+
else:
3517+
owner_slug = self.get_config_value(self.CONFIG_NAME_USER)
3518+
kernel_slug = kernel
3519+
return owner_slug, kernel_slug
3520+
35113521
def benchmarks_pull(self, kernel: str, path: str = None, quiet: bool = False):
35123522
"""Pulls a benchmark notebook and converts it to a .py script."""
3513-
import os
35143523

35153524
try:
35163525
import jupytext
@@ -3557,8 +3566,6 @@ def benchmarks_publish_and_run(
35573566
self, kernel: str = None, path: str = None, file_name: str = None, quiet: bool = False
35583567
):
35593568
"""Converts a local .py benchmark to .ipynb and pushes it to Kaggle."""
3560-
import os
3561-
import json
35623569

35633570
try:
35643571
import jupytext
@@ -3594,12 +3601,7 @@ def benchmarks_publish_and_run(
35943601
if not kernel:
35953602
raise ValueError("A kernel slug must be specified to create a new metadata file.")
35963603

3597-
if "/" in kernel:
3598-
self.validate_kernel_string(kernel)
3599-
owner_slug, kernel_slug = kernel.split("/")
3600-
else:
3601-
owner_slug = self.get_config_value(self.CONFIG_NAME_USER)
3602-
kernel_slug = kernel
3604+
owner_slug, kernel_slug = self._resolve_kernel_slug(kernel)
36033605

36043606
title = kernel_slug.replace("-", " ").title()
36053607
metadata = {
@@ -3627,12 +3629,7 @@ def benchmarks_publish_and_run(
36273629
metadata = json.load(f)
36283630

36293631
if kernel:
3630-
if "/" in kernel:
3631-
self.validate_kernel_string(kernel)
3632-
owner_slug, kernel_slug = kernel.split("/")
3633-
else:
3634-
owner_slug = self.get_config_value(self.CONFIG_NAME_USER)
3635-
kernel_slug = kernel
3632+
owner_slug, kernel_slug = self._resolve_kernel_slug(kernel)
36363633

36373634
new_id = f"{owner_slug}/{kernel_slug}"
36383635
if metadata.get("id") != new_id:
@@ -3666,9 +3663,6 @@ def benchmarks_publish_and_run_cli(self, kernel=None, kernel_opt=None, path=None
36663663

36673664
def benchmarks_get_results(self, kernel: str = None, path: str = None, poll_interval: int = 60, timeout: int = None):
36683665
"""Polls the status of a benchmark until complete, then downloads the output."""
3669-
import os
3670-
import time
3671-
import json
36723666

36733667
if kernel is None:
36743668
check_path = path or os.getcwd()
@@ -3699,7 +3693,7 @@ def benchmarks_get_results(self, kernel: str = None, path: str = None, poll_inte
36993693
print(f"Benchmark {kernel} failed!{error_txt}")
37003694
print(f"Attempting to download partial logs for debugging...")
37013695
try:
3702-
self.kernels_output(kernel, path=path, force=True, quiet=False)
3696+
self.kernels_output(kernel, path=path, file_pattern=None, force=True, quiet=False)
37033697
except Exception as log_err:
37043698
print(f"Could not retrieve backend logs: {log_err}")
37053699
raise ValueError(f"Benchmark execution terminated with an error state.")
@@ -3712,7 +3706,7 @@ def benchmarks_get_results(self, kernel: str = None, path: str = None, poll_inte
37123706

37133707
# Now download output
37143708
print(f"Downloading results for {kernel}...")
3715-
return self.kernels_output(kernel=kernel, path=path, force=True, quiet=False)
3709+
return self.kernels_output(kernel=kernel, path=path, file_pattern=None, force=True, quiet=False)
37163710

37173711
def benchmarks_get_results_cli(self, kernel, kernel_opt=None, path=None, poll_interval=60, timeout=None):
37183712
kernel = kernel or kernel_opt
@@ -4798,7 +4792,7 @@ def files_upload_cli(self, local_paths, inbox_path, no_resume, no_compress):
47984792
files_to_create = []
47994793
with ResumableUploadContext(no_resume) as upload_context:
48004794
for local_path in local_paths:
4801-
upload_file, file_name = self.file_upload_cli(local_path, inbox_path, no_compress, upload_context)
4795+
(upload_file, file_name) = self.file_upload_cli(local_path, inbox_path, no_compress, upload_context)
48024796
if upload_file is None:
48034797
continue
48044798

src/kaggle/cli.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1446,7 +1446,9 @@ class Help(object):
14461446
command_model_instances_update = "Update a model variation"
14471447

14481448
# Model Instance Versions params
1449-
param_model_instance_version = "Model variation version URL suffix in format <owner>/<model-name>/<framework>/<variation-slug>/<version-number>"
1449+
param_model_instance_version = (
1450+
"Model variation version URL suffix in format <owner>/<model-name>/<framework>/<variation-slug>/<version-number>"
1451+
)
14501452

14511453
# Model Instance Versions params
14521454
command_model_instance_versions_new = "Create a new model variation version"

tests/test_benchmarks.py

Lines changed: 42 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,9 @@ def test_benchmarks_pull(self, mock_write, mock_read):
2222
with open(dummy_ipynb, "w") as f:
2323
f.write("{}")
2424

25-
result_dir = self.api.benchmarks_pull("testuser/my-benchmark", path=tmpdir, quiet=True)
25+
result_dir = self.api.benchmarks_pull(
26+
"testuser/my-benchmark", path=tmpdir, quiet=True
27+
)
2628
self.assertEqual(result_dir, tmpdir)
2729

2830
# 1. notebooks should be renamed to benchmark.ipynb
@@ -33,7 +35,9 @@ def test_benchmarks_pull(self, mock_write, mock_read):
3335
# 2. jupytext read and write should be called
3436
mock_read.assert_called_once_with(benchmark_ipynb)
3537
mock_write.assert_called_once()
36-
self.assertEqual(mock_write.call_args[0][1], os.path.join(tmpdir, "benchmark.py"))
38+
self.assertEqual(
39+
mock_write.call_args[0][1], os.path.join(tmpdir, "benchmark.py")
40+
)
3741
self.assertEqual(mock_write.call_args[1]["fmt"], "py:percent")
3842
self.api.kernels_pull.assert_called_once_with(
3943
"testuser/my-benchmark", path=tmpdir, metadata=True, quiet=True
@@ -51,15 +55,20 @@ def test_benchmarks_publish_and_run(self, mock_write, mock_read):
5155
self.api.kernels_push = MagicMock(return_value="push_success")
5256

5357
result = self.api.benchmarks_publish_and_run(
54-
kernel="testuser/new-benchmark", path=tmpdir, file_name="benchmark.py", quiet=True
58+
kernel="testuser/new-benchmark",
59+
path=tmpdir,
60+
file_name="benchmark.py",
61+
quiet=True,
5562
)
5663

5764
self.assertEqual(result, "push_success")
5865

5966
# verify jupytext was used to read .py and write .ipynb
6067
mock_read.assert_called_once_with(py_file, fmt="py:percent")
6168
mock_write.assert_called_once()
62-
self.assertEqual(mock_write.call_args[0][1], os.path.join(tmpdir, "benchmark.ipynb"))
69+
self.assertEqual(
70+
mock_write.call_args[0][1], os.path.join(tmpdir, "benchmark.ipynb")
71+
)
6372

6473
# verify metadata was created correctly
6574
metadata_file = os.path.join(tmpdir, "kernel-metadata.json")
@@ -85,7 +94,14 @@ def test_benchmarks_publish_and_run_existing_metadata(self, mock_write, mock_rea
8594
# Create existing metadata
8695
metadata_file = os.path.join(tmpdir, "kernel-metadata.json")
8796
with open(metadata_file, "w") as f:
88-
json.dump({"id": "otheruser/existing-benchmark", "code_file": "old.ipynb", "keywords": ["tag1"]}, f)
97+
json.dump(
98+
{
99+
"id": "otheruser/existing-benchmark",
100+
"code_file": "old.ipynb",
101+
"keywords": ["tag1"],
102+
},
103+
f,
104+
)
89105

90106
self.api.kernels_push = MagicMock(return_value="push_success")
91107

@@ -118,7 +134,9 @@ def test_benchmarks_publish_and_run_explicit_kernel(self, mock_write, mock_read)
118134
self.api.kernels_push = MagicMock(return_value="push_success")
119135

120136
# Act with explicit kernel override
121-
self.api.benchmarks_publish_and_run(kernel="newuser/new-benchmark", path=tmpdir, quiet=True)
137+
self.api.benchmarks_publish_and_run(
138+
kernel="newuser/new-benchmark", path=tmpdir, quiet=True
139+
)
122140

123141
# Assert
124142
self.api.kernels_push.assert_called_once_with(tmpdir)
@@ -130,7 +148,6 @@ def test_benchmarks_publish_and_run_explicit_kernel(self, mock_write, mock_read)
130148
self.assertNotIn("id_no", metadata)
131149
self.assertIn("personal-benchmark", metadata["keywords"])
132150

133-
134151
@patch("time.sleep")
135152
def test_benchmarks_get_results(self, mock_sleep):
136153
# mock status to return 'running' once, then 'complete'
@@ -139,16 +156,24 @@ def __init__(self, status):
139156
self.status = status
140157
self.failure_message = ""
141158

142-
self.api.kernels_status = MagicMock(side_effect=[MockStatus("running"), MockStatus("complete")])
159+
self.api.kernels_status = MagicMock(
160+
side_effect=[MockStatus("running"), MockStatus("complete")]
161+
)
143162
self.api.kernels_output = MagicMock(return_value="output_data")
144163

145-
result = self.api.benchmarks_get_results("testuser/my-bench", path="some_path", poll_interval=10)
164+
result = self.api.benchmarks_get_results(
165+
"testuser/my-bench", path="some_path", poll_interval=10
166+
)
146167

147168
self.assertEqual(result, "output_data")
148169
self.assertEqual(self.api.kernels_status.call_count, 2)
149170
mock_sleep.assert_called_once_with(10)
150171
self.api.kernels_output.assert_called_once_with(
151-
kernel="testuser/my-bench", path="some_path", force=True, quiet=False
172+
kernel="testuser/my-bench",
173+
path="some_path",
174+
file_pattern=None,
175+
force=True,
176+
quiet=False,
152177
)
153178

154179
def test_benchmarks_get_results_error(self):
@@ -158,6 +183,7 @@ def __init__(self):
158183
self.failure_message = "syntax error"
159184

160185
self.api.kernels_status = MagicMock(return_value=MockStatusError())
186+
self.api.kernels_output = MagicMock()
161187

162188
with self.assertRaisesRegex(ValueError, "error state"):
163189
self.api.benchmarks_get_results("testuser/my-bench")
@@ -168,22 +194,25 @@ class MockStatus:
168194
def __init__(self, status):
169195
self.status = status
170196
self.failure_message = ""
171-
197+
172198
self.api.kernels_status = MagicMock(return_value=MockStatus("complete"))
173199
self.api.kernels_output = MagicMock(return_value="output_data")
174200

175201
with tempfile.TemporaryDirectory() as tmpdir:
176202
# Create existing metadata
177203
metadata_file = os.path.join(tmpdir, "kernel-metadata.json")
178204
with open(metadata_file, "w") as f:
179-
import json
180205
json.dump({"id": "implicit/my-bench"}, f)
181206

182207
result = self.api.benchmarks_get_results(kernel=None, path=tmpdir)
183208

184209
self.assertEqual(result, "output_data")
185210
self.api.kernels_output.assert_called_once_with(
186-
kernel="implicit/my-bench", path=tmpdir, force=True, quiet=False
211+
kernel="implicit/my-bench",
212+
path=tmpdir,
213+
file_pattern=None,
214+
force=True,
215+
quiet=False,
187216
)
188217

189218

0 commit comments

Comments
 (0)