Skip to content

Commit

Permalink
Test Runner: Make it easier to tune/bump timeouts as needed
Browse files Browse the repository at this point in the history
  • Loading branch information
davidfstr committed Jan 8, 2024
2 parents 730224a + 2259688 commit dcd7071
Show file tree
Hide file tree
Showing 6 changed files with 216 additions and 102 deletions.
6 changes: 3 additions & 3 deletions .github/workflows/push-github-action.yml
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ jobs:
- macos-latest
python-version: ["3.8", "3.9"]
runs-on: ${{ matrix.os }}
timeout-minutes: 12 # normally takes 9 min 50 secs, as of 2023-12-17
timeout-minutes: 15 # 150% of normal time: 9 min 50 secs, as of 2023-12-17
steps:
- name: Checkout code
uses: actions/checkout@v3
Expand Down Expand Up @@ -98,7 +98,7 @@ jobs:
- ubuntu-22.04
python-version: ["3.8", "3.9"]
runs-on: ${{ matrix.os }}
timeout-minutes: 5 # normally takes 3 min 40 sec, as of 2023-12-17
timeout-minutes: 6 # 150% of normal time: 3 min 40 sec, as of 2023-12-17
steps:
- name: Checkout code
uses: actions/checkout@v3
Expand Down Expand Up @@ -183,7 +183,7 @@ jobs:
- windows-latest
python-version: ["3.8", "3.9"]
runs-on: ${{ matrix.os }}
timeout-minutes: 18 # normally takes 14 min 40 secs, as of 2023-12-17
timeout-minutes: 23 # 150% of normal time: 14 min 40 secs, as of 2023-12-17
steps:
- name: Checkout code
uses: actions/checkout@v3
Expand Down
9 changes: 9 additions & 0 deletions RELEASE_NOTES.md
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,15 @@ Release Notes ⋮
* XML files like Atom feeds and RSS feeds are now served correctly,
without introducing an invalid `<script>` tag.

* Testing improvements
* Waits now use a soft timeout in addition to a hard timeout,
which makes it easier to tune/bump timeout durations as needed.
* Triggering a soft timeout causes a warning to be logged.
* Warnings logged during a test run are collected and reported
at the end of the test run.
* Warnings logged during a test run are reported to GitHub Actions
as warning annotations.

* Fixes
* Fix disappearance of error nodes when new root URL or group is added.

Expand Down
140 changes: 102 additions & 38 deletions src/crystal/tests/index.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
from contextlib import contextmanager
from crystal.tests import (
test_addgroup,
test_addrooturl,
Expand Down Expand Up @@ -34,8 +35,13 @@
import sys
import time
import traceback
from typing import Any, Callable, Coroutine, Dict, List, Optional, Tuple
from typing import Any, Callable, Coroutine, Dict, List, Iterator, Optional, Tuple
from unittest import SkipTest
import warnings


# Path to parent directory of the "crystal" package
_SOURCE_DIRPATH = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))


def _test_functions_in_module(mod) -> List[Callable]:
Expand Down Expand Up @@ -104,49 +110,53 @@ def _run_tests(test_names: List[str]) -> bool:
result_for_test_func_id = {} # type: Dict[_TestFuncId, Optional[Exception]]
start_time = time.time() # capture
run_count = 0
for test_func in _TEST_FUNCS:
if not callable(test_func):
raise ValueError(f'Test function is not callable: {test_func}')
test_func_id = (test_func.__module__, test_func.__name__) # type: _TestFuncId
test_name = f'{test_func_id[0]}.{test_func_id[1]}'

# Only run test if it was requested (or if all tests are to be run)
if len(test_names) > 0:
if test_name not in test_names and test_func.__module__ not in test_names:
continue
run_count += 1
with warnings.catch_warnings(record=True) as warning_list, _warnings_sent_to_ci():
assert warning_list is not None

os.environ['CRYSTAL_SCREENSHOT_ID'] = test_name

print('=' * 70)
print(f'RUNNING: {test_func_id[1]} ({test_func_id[0]})')
print('-' * 70)
try:
run_test(test_func)
except AssertionError as e:
result_for_test_func_id[test_func_id] = e
for test_func in _TEST_FUNCS:
if not callable(test_func):
raise ValueError(f'Test function is not callable: {test_func}')
test_func_id = (test_func.__module__, test_func.__name__) # type: _TestFuncId
test_name = f'{test_func_id[0]}.{test_func_id[1]}'

traceback.print_exc(file=sys.stdout)
print('FAILURE')
except SkipTest as e:
result_for_test_func_id[test_func_id] = e
# Only run test if it was requested (or if all tests are to be run)
if len(test_names) > 0:
if test_name not in test_names and test_func.__module__ not in test_names:
continue
run_count += 1

print(f'SKIP ({str(e)})')
except Exception as e:
result_for_test_func_id[test_func_id] = e
os.environ['CRYSTAL_SCREENSHOT_ID'] = test_name

if not isinstance(e, SubtestFailed):
print('=' * 70)
print(f'RUNNING: {test_func_id[1]} ({test_func_id[0]})')
print('-' * 70)
try:
run_test(test_func)
except AssertionError as e:
result_for_test_func_id[test_func_id] = e

traceback.print_exc(file=sys.stdout)
print(f'ERROR ({e.__class__.__name__})')
else:
result_for_test_func_id[test_func_id] = None
print('FAILURE')
except SkipTest as e:
result_for_test_func_id[test_func_id] = e

print(f'SKIP ({str(e)})')
except Exception as e:
result_for_test_func_id[test_func_id] = e

if not isinstance(e, SubtestFailed):
traceback.print_exc(file=sys.stdout)
print(f'ERROR ({e.__class__.__name__})')
else:
result_for_test_func_id[test_func_id] = None

print('OK')
print()

print('OK')
print()

# Garbage collect, running any finalizers in __del__() early,
# such as the warnings printed by ListenableMixin
gc.collect()
# Garbage collect, running any finalizers in __del__() early,
# such as the warnings printed by ListenableMixin
gc.collect()

end_time = time.time() # capture
delta_time = end_time - start_time

Expand Down Expand Up @@ -202,10 +212,64 @@ def _run_tests(test_names: List[str]) -> bool:
print()
print(f'{"OK" if is_ok else "FAILURE"}{suffix}')

# Print warnings, if any
if len(warning_list) >= 1:
print()
print('Warnings:')
for w in warning_list:
if w.filename.startswith(_SOURCE_DIRPATH):
short_filepath = os.path.relpath(w.filename, start=_SOURCE_DIRPATH)
else:
short_filepath = w.filename
w_str = warnings.formatwarning(w.message, w.category, short_filepath, w.lineno, w.line)
print('- ' + w_str, end='')

# Print command to rerun failed tests
if len(failed_test_names) != 0:
print()
print('Rerun failed tests with:')
print(f'$ crystal --test {" ".join(failed_test_names)}')
print()

return is_ok


@contextmanager
def _warnings_sent_to_ci() -> Iterator[None]:
if not _running_in_ci():
yield
return

super_showwarning = warnings.showwarning # capture

def showwarning(message, category, filename, lineno, file=None, line=None):
# Try to reformat `filename` to use the Linux format so that warning
# annotations are associated with the correct file
#
# Depending on OS, `filename` initially looks like:
# - macOS: setup/dist/Crystal Web Archiver.app/Contents/Resources/lib/python38.zip/crystal/tests/index.py
# - Linux: src/crystal/tests/index.py
# - Windows: crystal\tests\index.pyc
filename_parts = filename.split(os.path.sep)
if 'crystal' in filename_parts:
filename_parts = ['src'] + filename_parts[filename_parts.index('crystal'):]
if filename_parts[-1].endswith('.pyc'):
filename_parts[-1] = filename_parts[-1][:-1] # convert .pyc ending to .py
filename = '/'.join(filename_parts) # reinterpret

# Create warning annotation in GitHub Action's [Summary > Annotations] section
#
# Syntax: https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#example-setting-a-warning-message
print(f'::warning file={filename},line={lineno}::{message}')

return super_showwarning(message, category, filename, lineno, file, line)

warnings.showwarning = showwarning
try:
yield
finally:
warnings.showwarning = super_showwarning


def _running_in_ci() -> bool:
return os.environ.get('GITHUB_ACTIONS') == 'true'
66 changes: 46 additions & 20 deletions src/crystal/tests/test_shell.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,10 @@
from contextlib import contextmanager
from crystal import __version__ as crystal_version
from crystal.tests.util.asserts import *
from crystal.tests.util.wait import DEFAULT_WAIT_PERIOD, DEFAULT_WAIT_TIMEOUT, WaitTimedOut
from crystal.tests.util.wait import (
DEFAULT_WAIT_PERIOD, DEFAULT_WAIT_TIMEOUT, HARD_TIMEOUT_MULTIPLIER,
WaitTimedOut,
)
from crystal.tests.util.windows import MainWindow
from crystal.tests.util.screenshots import screenshot_if_raises
from crystal.tests.util.server import served_project
Expand All @@ -25,6 +28,7 @@
from unittest import skip, SkipTest, TestCase
from unittest.mock import ANY
import urllib
import warnings


_EXPECTED_PROXY_PUBLIC_MEMBERS = [] # type: List[str]
Expand Down Expand Up @@ -401,7 +405,9 @@ def test_can_write_project_with_shell(subtests: SubtestsContext) -> None:
# Test can import Project
assertEqual('', _py_eval(crystal, 'from crystal.model import Project'))
# Test can create project
assertEqual('', _py_eval(crystal, f'p = Project({project_dirpath!r})'))
assertEqual('', _py_eval(crystal, f'p = Project({project_dirpath!r})',
# 2.0s isn't long enough for macOS test runners on GitHub Actions
timeout=4.0))

with subtests.test(case='test can create project entities', return_if_failure=True):
# Test can import Resource
Expand Down Expand Up @@ -765,28 +771,48 @@ def _read_until(

stop_suffix_bytes_choices = [s.encode(stream.encoding) for s in stop_suffix]

soft_timeout = timeout
hard_timeout = timeout * HARD_TIMEOUT_MULTIPLIER

read_buffer = b''
found_stop_suffix = None # type: Optional[str]
start_time = time.time()
while True:
last_read_bytes = stream.buffer.read() # type: ignore[attr-defined]
if last_read_bytes is not None:
# NOTE: Quadratic performance.
# Not using for large amounts of text so I don't care.
read_buffer += last_read_bytes
for (i, s_bytes) in enumerate(stop_suffix_bytes_choices):
if read_buffer.endswith(s_bytes):
found_stop_suffix = stop_suffix[i]
hard_timeout_exceeded = False
try:
while True:
last_read_bytes = stream.buffer.read() # type: ignore[attr-defined]
if last_read_bytes is not None:
# NOTE: Quadratic performance.
# Not using for large amounts of text so I don't care.
read_buffer += last_read_bytes
for (i, s_bytes) in enumerate(stop_suffix_bytes_choices):
if read_buffer.endswith(s_bytes):
found_stop_suffix = stop_suffix[i]
break
if found_stop_suffix is not None:
break
if found_stop_suffix is not None:
break
delta_time = time.time() - start_time
if delta_time > timeout:
raise WaitTimedOut(
f'Timed out after {timeout:.1f}s while '
f'reading until {stop_suffix!r}. '
f'Read so far: {read_buffer.decode(stream.encoding)!r}')
time.sleep(period)
delta_time = time.time() - start_time
if delta_time > hard_timeout:
hard_timeout_exceeded = True
raise WaitTimedOut(
f'Timed out after {timeout:.1f}s while '
f'reading until {stop_suffix!r}. '
f'Read so far: {read_buffer.decode(stream.encoding)!r}')
time.sleep(period)
finally:
# Warn if soft timeout exceeded
if not hard_timeout_exceeded:
delta_time = time.time() - start_time
if delta_time > soft_timeout:
warnings.warn(
('Soft timeout exceeded (%.1fs > %.1fs) while '
'reading until %r.') % (
delta_time,
soft_timeout,
stop_suffix
),
stacklevel=2)

return (read_buffer.decode(stream.encoding), found_stop_suffix)


Expand Down
1 change: 0 additions & 1 deletion src/crystal/tests/util/tasks.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,6 @@ async def wait_for_download_to_start_and_finish(
# Wait while downloading
await wait_while(
first_task_title_func,
total_timeout=math.inf, # progress timeout is sufficient
progress_timeout=max_download_duration_per_large_item,
progress_timeout_message=lambda: (
f'Subresource download timed out after {max_download_duration_per_large_item:.1f}s: '
Expand Down
Loading

0 comments on commit dcd7071

Please sign in to comment.