diff --git a/.github/workflows/ci-package.yml b/.github/workflows/ci-package.yml index 211a28e..165be80 100644 --- a/.github/workflows/ci-package.yml +++ b/.github/workflows/ci-package.yml @@ -28,10 +28,13 @@ defaults: env: PACK_DIR: /root/.pack GENERATION_DIR: 'generated-modules' + GEN_TESTS_NUMBER: 256 BUILD_DIR: '.build' PACKAGE_NAME: verilog-model ERROR_DISTANCES_OUT: error_distances.html GH_PAGES_PATH: verilog-gh-pages + RUN_STATS_PATTERN: 'run-stats.json' + COMBINED_RUN_STATS_FILE: 'combined-run-stats.json' jobs: @@ -70,10 +73,20 @@ jobs: steps: - name: Checkout uses: actions/checkout@v4 + - name: Update `pack-db` + run: pack update-db + - name: Switch to the latest compiler, if needed + if: matrix.upstream-mode == 'bleeding-edge-compiler' + run: | + { echo; echo "[idris2]"; echo 'commit = "latest:main"'; } >> pack.toml + pack fetch + - name: Switch to the latest collection + run: pack switch latest - name: Calculate cache key id: cache-key run: | - echo "key=build-${{ hashFiles('src/**', 'verilog-model.ipkg', 'pack.toml') }}-${{ matrix.upstream-mode }}" >> $GITHUB_OUTPUT + ttc_version=$(idris2 --ttc-version) + echo "key=build-${{ hashFiles('src/**', 'verilog-model.ipkg', 'pack.toml') }}-${{ matrix.upstream-mode }}-$ttc_version" >> $GITHUB_OUTPUT - name: Restore from cache id: cache-build uses: actions/cache/restore@v4 @@ -85,15 +98,6 @@ jobs: run: | find "${{ env.BUILD_DIR }}" -type f -exec touch {} + sync - - name: Update `pack-db` - run: pack update-db - - name: Switch to the latest compiler, if needed - if: matrix.upstream-mode == 'bleeding-edge-compiler' - run: | - { echo; echo "[idris2]"; echo 'commit = "latest:main"'; } >> pack.toml - pack fetch - - name: Switch to the latest collection - run: pack switch latest - name: Build `${{ env.PACKAGE_NAME }}` run: pack build ${{ env.PACKAGE_NAME }} - name: Save build @@ -111,7 +115,7 @@ jobs: run: pack test ${{ env.PACKAGE_NAME }} - name: Gen SystemVerilog modules if: matrix.upstream-mode == 'latest-pack-collection' - run : pack run ${{ env.PACKAGE_NAME }} --to "${{ env.GENERATION_DIR }}" -n 128 --seed-name --seed-content --coverage mcov # Generate modules + run : pack run ${{ env.PACKAGE_NAME }} --to "${{ env.GENERATION_DIR }}" -n ${{ env.GEN_TESTS_NUMBER }} --seed-name --seed-content --coverage mcov # Generate modules - name: Show mcov if: matrix.upstream-mode == 'latest-pack-collection' run : cat mcov @@ -166,10 +170,10 @@ jobs: version: | iverilog -V run: iverilog -g2012 -o a.out {file} - error_regex: | + error_regex: > (syntax error\W[A-z-\/0-9,.:]+ .*$|(error|sorry|assert|vvp): [\S ]+$) sim_cmd: vvp a.out - sim_error_regex: | + sim_error_regex: > (syntax error\W[A-z-\/0-9,.:]+ .*$|(error|sorry|assert|vvp): [\S ]+$) extra_ignored_regexes: > "Unable to elaborate r-value: .*" @@ -187,7 +191,7 @@ jobs: version: | slang --version run: slang -Weverything {file} - error_regex: | + error_regex: > error: [\S ]+$ - name: verilator @@ -203,10 +207,10 @@ jobs: version: | verilator --version run: rm -rf obj_dir && rm -f top.sv && cp {file} top.sv && verilator --cc --exe .github/workflows/conf/verilator/testbench.cpp --timing -Wno-fatal --no-std +1800-2023ext+sv top.sv - error_regex: | + error_regex: > %Error(|\-[A-Z]+):( | Internal Error: )[A-z0-9-_\/]+.sv:\d+:\d+:[\S ]*$ sim_cmd: make -C obj_dir -f Vtop.mk Vtop && ./obj_dir/Vtop - sim_error_regex: | + sim_error_regex: > %Error:[\S ]+$ - name: surelog @@ -236,8 +240,10 @@ jobs: version: | sv2v --version run: sv2v -v {file} - error_regex: | + error_regex: > sv2v:(?!CallStack$).+ + extra_ignored_regexes: > + "sv2v:Language\.SystemVerilog\.AST\.Type" - name: tree-sitter-systemverilog repo: https://github.com/gmlarumbe/tree-sitter-systemverilog.git @@ -279,7 +285,7 @@ jobs: synlig --version synlig -h run: synlig -f systemverilog -o output.blif -S {file} - error_regex: | + error_regex: > ERROR: .*$ - name: sv_parser @@ -305,7 +311,7 @@ jobs: # install: | # sudo make install # cd .. - # bash .github/workflows/runner/rename_tests.sh + # bash .github/workflows/runner/tools-run/rename_tests.sh # version: yosys --version # run: yosys -p 'read -sv2012 {file}; hierarchy -check -top {top_module}; proc; opt; fsm; memory; sim -assert; clean' # error_regex: > @@ -330,13 +336,14 @@ jobs: cd .. sudo make install cd .. - bash .github/workflows/runner/rename_tests.sh + bash .github/workflows/runner/tools-run/rename_tests.sh version: yosys --version run: yosys -m slang -p 'read_slang {file}; hierarchy -check -top {top_module}; proc; opt; fsm; memory; sim -assert; clean' error_regex: > (ERROR|error): .* extra_ignored_regexes: > "Compilation failed" + "too many errors emitted, stopping now" steps: - name: Checkout @@ -352,7 +359,7 @@ jobs: run: | git clone ${{ matrix.tool.repo }} ${{ matrix.tool.path }} cd ${{ matrix.tool.path }} - echo "commit_hash=$(git rev-parse HEAD)" >> $GITHUB_ENV + echo "COMMIT_HASH=$(git rev-parse HEAD)" >> $GITHUB_ENV - name: Restore dependency if: ${{ matrix.tool.dep_path }} uses: actions/cache/restore@v4 @@ -364,7 +371,7 @@ jobs: uses: actions/cache/restore@v4 with: path: ${{ matrix.tool.path }} - key: ${{ matrix.tool.name }}-cache-${{ env.commit_hash }} + key: ${{ matrix.tool.name }}-cache-${{ env.COMMIT_HASH }} - name: Add bazel repo (if needed) if: ${{ contains(matrix.tool.deps, 'bazel') }} run: | @@ -418,10 +425,6 @@ jobs: ${{ matrix.tool.install }} - name: Print version run: ${{ matrix.tool.version }} - - name: Print git commit hash - run: | - cd ${{ matrix.tool.path }} - git rev-parse HEAD - name: Install runner deps run: sudo apt install -y bc python3 python3-pip python3-venv - name: Setup python venv @@ -429,19 +432,23 @@ jobs: python3 -m venv .venv source .venv/bin/activate python3 -m pip install --upgrade pip - pip install plotly pandas scikit-learn numpy textdistance pyyaml ${{ matrix.tool.python_runner_deps }} + pip install -r .github/workflows/runner/tools-run/requirements.txt + if [ -n "${{ matrix.tool.python_runner_deps }}" ]; then + pip install ${{ matrix.tool.python_runner_deps }} + fi + - name: Clone gh-pages + run: git clone https://github.com/DepTyCheck/verilog-model.git --branch gh-pages --single-branch ${{ env.GH_PAGES_PATH }} - name: Run run : | set +e source .venv/bin/activate - git clone https://github.com/DepTyCheck/verilog-model.git --branch gh-pages --single-branch ${{ env.GH_PAGES_PATH }} cd "${{ env.GH_PAGES_PATH }}" echo "Latest commit hash of ${{ env.GH_PAGES_PATH }}:" git rev-parse HEAD cd - - python3 .github/workflows/runner/main.py \ + python3 .github/workflows/runner/tools-run/main.py \ --gen-path "${{ env.GENERATION_DIR }}" \ --job-link "https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}/job/${{ github.job }}" \ --tool-name "${{ matrix.tool.name }}" \ @@ -451,6 +458,8 @@ jobs: --sim-error-regex "${{ matrix.tool.sim_error_regex }}" \ --ignored-errors-dir "${{ env.GH_PAGES_PATH }}/found_errors/${{ matrix.tool.path }}" \ --error-distances-output "$ERROR_DISTANCES_OUT" \ + --run-statistics-output "${{ matrix.tool.name }}-${{ env.RUN_STATS_PATTERN }}" \ + --commit "${{ env.COMMIT_HASH }}" \ --extra-ignored-regexes ${{ join(matrix.tool.extra_ignored_regexes, ' ') }} EXIT_CODE=$? @@ -463,6 +472,62 @@ jobs: with: name: error-distances-${{ matrix.tool.name }} path: ${{ env.ERROR_DISTANCES_OUT }} + - name: Upload run statistics + uses: actions/upload-artifact@v4 + with: + name: "${{ matrix.tool.name }}-${{ env.RUN_STATS_PATTERN }}" + path: "${{ matrix.tool.name }}-${{ env.RUN_STATS_PATTERN }}" + + test-combined-report: + name: Test combined report + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-python@v6 + with: + python-version: "3.13" + - run: | + cd .github/workflows/runner/combined-report + python -m tests.test + + push-run-stats: + name: Push run stats + runs-on: ubuntu-latest + needs: [test-combined-report, run-tools] + if: ${{ github.ref == 'refs/heads/master' }} + permissions: + contents: write + steps: + - name: Checkout + uses: actions/checkout@v4 + - name: Download run statistics + uses: actions/download-artifact@v3 + with: + path: run-stats + + - name: Clone gh-pages + run: git clone https://github.com/DepTyCheck/verilog-model.git --branch gh-pages --single-branch ${{ env.GH_PAGES_PATH }} + + - name: Combine statistics + run: | + python3 .github/workflows/combine_run_stats.py \ + --input-dir run-stats \ + --output-file run-stats/combined-run-stats.csv \ + --tests-number ${{ env.GEN_TESTS_NUMBER }} + + - name: Bot Details + id: bot-details + uses: raven-actions/bot-details@v1 + + - name: Commit & push + run: | + cd ${{ env.GH_PAGES_PATH }} + git config --global user.name "${{ steps.bot-details.outputs.name }}" + git config --global user.email "${{ steps.bot-details.outputs.email }}" + git add src/lib/data/experimental.txt + git commit -m "[ bot ] Update runs statistics" + git remote set-url origin "https://x-access-token:${{ secrets.PAT_PUSH }}@github.com/${{ github.repository }}" + git push origin gh-pages publish-container: name: Build and publish Docker image @@ -481,12 +546,18 @@ jobs: - name: Checkout repository uses: actions/checkout@v4 - - name: Restore build cache + - name: Calculate cache key + id: cache-key + run: | + ttc_version=$(idris2 --ttc-version) + echo "key=build-${{ hashFiles('src/**', 'verilog-model.ipkg', 'pack.toml') }}-${{ matrix.upstream-mode }}-$ttc_version" >> $GITHUB_OUTPUT + + - name: Restore from cache id: cache-build uses: actions/cache/restore@v4 with: path: ${{ env.BUILD_DIR }} - key: build-${{ hashFiles('src/**', 'verilog-model.ipkg', 'pack.toml') }}-latest-pack-collection + key: ${{ steps.cache-key.outputs.key }} - name: Touch ${{ env.BUILD_DIR }} dir (-_-) run: | diff --git a/.github/workflows/runner/combined-report/main.py b/.github/workflows/runner/combined-report/main.py new file mode 100644 index 0000000..f023578 --- /dev/null +++ b/.github/workflows/runner/combined-report/main.py @@ -0,0 +1,65 @@ +import sys +import argparse +from pathlib import Path + + +from src.previous_report import PreviousReport +from src.tools_report_list import ToolsReportsList +from src.combined_report import CombinedReport +from src.result_report import ResultReport + + +def parse_args(): + parser = argparse.ArgumentParser() + + parser.add_argument( + "--previous-report", + type=str, + help="Path to previous report", + required=True, + ) + parser.add_argument( + "--current-tools-reports-dir", + type=str, + help="Path to tools reports directory", + required=True, + ) + parser.add_argument( + "--tools-reports-pattern", + type=str, + help="Tools reports pattern", + required=True, + ) + parser.add_argument( + "--tests-number", + type=int, + help="Number of tests", + required=True, + ) + parser.add_argument( + "--result-report", + type=str, + help="Path to result report", + required=True, + ) + + return parser.parse_args() + + +def main() -> None: + args = parse_args() + + CombinedReport( + previous_report=PreviousReport( + file_path=args.previous_report, + ), + tools_reports_list=ToolsReportsList( + dir_path=args.current_tools_reports_dir, + pattern=args.tools_reports_pattern, + ), + tests_number=args.tests_number, + ).combine().save(args.result_report) + + +if __name__ == "__main__": + main() diff --git a/.github/workflows/runner/combined-report/requirements.txt b/.github/workflows/runner/combined-report/requirements.txt new file mode 100644 index 0000000..e69de29 diff --git a/.github/workflows/runner/combined-report/src/combined_report.py b/.github/workflows/runner/combined-report/src/combined_report.py new file mode 100644 index 0000000..c08e08d --- /dev/null +++ b/.github/workflows/runner/combined-report/src/combined_report.py @@ -0,0 +1,55 @@ +from datetime import datetime + +from src.tools_report_list import ToolsReportsList +from src.previous_report import PreviousReport +from src.result_report import ResultReport +from src.report_structure import LastOccurrence, ErrorInfo, RunInfo + + +class CombinedReport: + def __init__( + self, + previous_report: PreviousReport, + tools_reports_list: ToolsReportsList, + tests_number: int, + ): + self.previous_report = previous_report + self.tools_reports_list = tools_reports_list + self.tests_number = tests_number + + def combined_runs(self) -> list[RunInfo]: + date = ( + self.tools_reports_list.reports[0].date + if len(self.tools_reports_list.reports) > 0 + else datetime.now().isoformat() + ) + return self.previous_report.runs + [ + RunInfo(date=date, amount=self.tests_number) + ] + + def combined_errors(self) -> dict[str, ErrorInfo]: + data: dict[str, ErrorInfo] = self.previous_report.errors.copy() + + for tool_report in self.tools_reports_list.reports: + for error_report in tool_report.errors: + err_id = error_report.error_id + last = LastOccurrence(commit=tool_report.commit, date=tool_report.date) + + if err_id in list(data.keys()): + data[err_id].overall += error_report.overall + data[err_id].test_paths_count += error_report.test_paths_count + data[err_id].last = last + else: + data[err_id] = ErrorInfo( + overall=error_report.overall, + test_paths_count=error_report.test_paths_count, + last=last, + ) + + return data + + def combine(self) -> ResultReport: + return ResultReport( + errors=self.combined_errors(), + runs=self.combined_runs(), + ) diff --git a/.github/workflows/runner/combined-report/src/previous_report.py b/.github/workflows/runner/combined-report/src/previous_report.py new file mode 100644 index 0000000..6748e15 --- /dev/null +++ b/.github/workflows/runner/combined-report/src/previous_report.py @@ -0,0 +1,35 @@ +from typing import Dict, List +import json + +from src.report_structure import ErrorInfo, RunInfo, LastOccurrence + + +class PreviousReport: + def __init__(self, file_path: str): + self.errors: Dict[str, ErrorInfo] = {} + self.runs: List[RunInfo] = [] + + self.errors, self.runs = self.parse(file_path) + + def parse(self, file_path: str): + with open(file_path, "r") as file: + raw_json = file.read() + + data = json.loads(raw_json) + + errors: Dict[str, ErrorInfo] = {} + runs: List[RunInfo] = [] + + errors_data = data.get("errors", {}) + for error_id, error_info in errors_data.items(): + last_occurrence = LastOccurrence(commit=error_info["last"]["commit"], date=error_info["last"]["date"]) + errors[error_id] = ErrorInfo( + overall=error_info["overall"], + test_paths_count=error_info["test_paths_count"], + last=last_occurrence, + ) + + runs_data = data.get("runs", []) + runs = [RunInfo(date=run["date"], amount=run["amount"]) for run in runs_data] + + return errors, runs diff --git a/.github/workflows/runner/combined-report/src/report_structure.py b/.github/workflows/runner/combined-report/src/report_structure.py new file mode 100644 index 0000000..fbd3ca8 --- /dev/null +++ b/.github/workflows/runner/combined-report/src/report_structure.py @@ -0,0 +1,39 @@ +from dataclasses import dataclass + + +@dataclass +class LastOccurrence: + commit: str + date: str + + def to_dict(self): + return { + "commit": self.commit, + "date": self.date, + } + + +@dataclass +class ErrorInfo: + overall: int + test_paths_count: int + last: LastOccurrence + + def to_dict(self): + return { + "overall": self.overall, + "test_paths_count": self.test_paths_count, + "last": self.last.to_dict(), + } + + +@dataclass +class RunInfo: + date: str + amount: int + + def to_dict(self): + return { + "date": self.date, + "amount": self.amount, + } diff --git a/.github/workflows/runner/combined-report/src/result_report.py b/.github/workflows/runner/combined-report/src/result_report.py new file mode 100644 index 0000000..48fcdf5 --- /dev/null +++ b/.github/workflows/runner/combined-report/src/result_report.py @@ -0,0 +1,20 @@ +import json + +from typing import List +from src.report_structure import ErrorInfo, RunInfo + + +class ResultReport: + def __init__(self, errors: dict[str, ErrorInfo], runs: List[RunInfo]): + self.errors: dict[str, ErrorInfo] = errors + self.runs: List[RunInfo] = runs + + def save(self, file_path: str): + data = { + "errors": { + error_id: error.to_dict() for error_id, error in self.errors.items() + }, + "runs": [run.to_dict() for run in self.runs], + } + with open(file_path, "w") as file: + json.dump(data, file, indent=2) diff --git a/.github/workflows/runner/combined-report/src/tools_report.py b/.github/workflows/runner/combined-report/src/tools_report.py new file mode 100644 index 0000000..f59e02d --- /dev/null +++ b/.github/workflows/runner/combined-report/src/tools_report.py @@ -0,0 +1,19 @@ +from dataclasses import dataclass + + +@dataclass +class ErrorReport: + error_id: str + overall: int + test_paths_count: int + tests_paths: list[str] + + +class ToolsReport: + def __init__(self, data: dict): + self.errors: list[ErrorReport] = self.parse_errors(data) + self.commit = data["commit"] + self.date = data["date"] + + def parse_errors(self, data: dict) -> list[ErrorReport]: + return [ErrorReport(**error) for error in data.get("errors", [])] diff --git a/.github/workflows/runner/combined-report/src/tools_report_list.py b/.github/workflows/runner/combined-report/src/tools_report_list.py new file mode 100644 index 0000000..00590aa --- /dev/null +++ b/.github/workflows/runner/combined-report/src/tools_report_list.py @@ -0,0 +1,15 @@ +from typing import List +from pathlib import Path +import json + +from src.tools_report import ToolsReport + + +class ToolsReportsList: + def __init__(self, dir_path: str, pattern: str): + self.reports: List[ToolsReport] = [] + + for report_file in Path(dir_path).glob(pattern): + with open(report_file, "r") as f: + data_dict = json.load(f) + self.reports.append(ToolsReport(data_dict)) diff --git a/.github/workflows/runner/combined-report/tests/data/other-tool-run-stats.json b/.github/workflows/runner/combined-report/tests/data/other-tool-run-stats.json new file mode 100644 index 0000000..8bdda72 --- /dev/null +++ b/.github/workflows/runner/combined-report/tests/data/other-tool-run-stats.json @@ -0,0 +1,24 @@ +{ + "errors": [ + { + "error_id": "cannot_be_driven_with_non_default_strength", + "overall": 12, + "test_paths_count": 3, + "tests_paths": [ + "generated/10-seed_2428324134062495893,3128299129089410139.sv", + "generated/65-seed_9931058363952773939,3128299129089410139.sv", + "generated/29-seed_549568144774216018,3128299129089410139.sv" + ] + }, + { + "error_id": "new_error_id", + "overall": 1, + "test_paths_count": 1, + "tests_paths": [ + "generated/29-seed_549568144774216018,3128299129089410139.sv" + ] + } + ], + "commit": "abc12345", + "date": "2025-11-04T12:38:39.969316" +} \ No newline at end of file diff --git a/.github/workflows/runner/combined-report/tests/data/previous_report.json b/.github/workflows/runner/combined-report/tests/data/previous_report.json new file mode 100644 index 0000000..0abe0a5 --- /dev/null +++ b/.github/workflows/runner/combined-report/tests/data/previous_report.json @@ -0,0 +1,30 @@ +{ + "errors": { + "t_dll_api_cc_ivl_nexus_s": { + "overall": 10, + "test_paths_count": 10, + "last": { + "commit": "12345abc", + "date": "2025-10-19T15:10:48+00:00" + } + }, + "cannot_be_driven_with_non_default_strength": { + "overall": 12, + "test_paths_count": 417, + "last": { + "commit": "12345abc31231", + "date": "2025-10-18T15:10:48+00:00" + } + } + }, + "runs": [ + { + "date": "2025-10-19T15:10:48+00:00", + "amount": 256 + }, + { + "date": "2025-10-18T15:10:48+00:00", + "amount": 10 + } + ] +} diff --git a/.github/workflows/runner/combined-report/tests/data/some-tool-run-stats.json b/.github/workflows/runner/combined-report/tests/data/some-tool-run-stats.json new file mode 100644 index 0000000..db62f4a --- /dev/null +++ b/.github/workflows/runner/combined-report/tests/data/some-tool-run-stats.json @@ -0,0 +1,39 @@ +{ + "errors": [ + { + "error_id": "t_dll_api_cc_ivl_nexus_s", + "overall": 32, + "test_paths_count": 26, + "tests_paths": [ + "generated/10-seed_2428324134062495893,3128299129089410139.sv", + "generated/65-seed_9931058363952773939,3128299129089410139.sv", + "generated/29-seed_549568144774216018,3128299129089410139.sv", + "generated/83-seed_5099076482301640435,3128299129089410139.sv", + "generated/84-seed_13647994075863507267,3128299129089410139.sv", + "generated/58-seed_12822400028629458463,3128299129089410139.sv", + "generated/71-seed_8441463012171610221,3128299129089410139.sv", + "generated/36-seed_8355443864916960300,3128299129089410139.sv", + "generated/37-seed_1536635323995096370,3128299129089410139.sv", + "generated/59-seed_16536538239648392389,3128299129089410139.sv", + "generated/69-seed_17955976471768091138,3128299129089410139.sv", + "generated/80-seed_3117070915413338426,3128299129089410139.sv", + "generated/46-seed_4730330285713594232,3128299129089410139.sv", + "generated/22-seed_9741254061059762472,3128299129089410139.sv", + "generated/1-seed_11691253499933042360,3128299129089410139.sv", + "generated/57-seed_2189224357496412630,3128299129089410139.sv", + "generated/76-seed_2775971243336691657,3128299129089410139.sv", + "generated/53-seed_15923648016595859751,3128299129089410139.sv", + "generated/44-seed_16610296227259165878,3128299129089410139.sv", + "generated/93-seed_3118380402852848225,3128299129089410139.sv", + "generated/35-seed_10476335463025953815,3128299129089410139.sv", + "generated/70-seed_11334337321803779299,3128299129089410139.sv", + "generated/8-seed_2461222892141417034,3128299129089410139.sv", + "generated/97-seed_7659139185679314828,3128299129089410139.sv", + "generated/2-seed_13510878130923757581,3128299129089410139.sv", + "generated/28-seed_1148233422042294380,3128299129089410139.sv" + ] + } + ], + "commit": "54c4f9f4", + "date": "2025-11-04T12:38:39.969316" +} \ No newline at end of file diff --git a/.github/workflows/runner/combined-report/tests/requirements.txt b/.github/workflows/runner/combined-report/tests/requirements.txt new file mode 100644 index 0000000..e69de29 diff --git a/.github/workflows/runner/combined-report/tests/test.py b/.github/workflows/runner/combined-report/tests/test.py new file mode 100644 index 0000000..5d9ac4e --- /dev/null +++ b/.github/workflows/runner/combined-report/tests/test.py @@ -0,0 +1,22 @@ +import unittest + +from .test_previous import TestPreviousReport +from .test_tools_report import TestToolsReport +from .test_tools_report_list import TestToolsReportList +from .test_combined_report import TestCombinedReport + +if __name__ == "__main__": + suite = unittest.TestSuite() + + loader = unittest.TestLoader() + test_cases = [ + TestPreviousReport, + TestToolsReport, + TestToolsReportList, + TestCombinedReport, + ] + for test_case in test_cases: + suite.addTests(loader.loadTestsFromTestCase(test_case)) + + runner = unittest.TextTestRunner(verbosity=2) + runner.run(suite) diff --git a/.github/workflows/runner/combined-report/tests/test_combined_report.py b/.github/workflows/runner/combined-report/tests/test_combined_report.py new file mode 100644 index 0000000..7236ba5 --- /dev/null +++ b/.github/workflows/runner/combined-report/tests/test_combined_report.py @@ -0,0 +1,45 @@ +import unittest + +from src.previous_report import PreviousReport +from src.tools_report_list import ToolsReportsList +from src.combined_report import CombinedReport + + +class TestCombinedReport(unittest.TestCase): + def setUp(self): + p = PreviousReport("./tests/data/previous_report.json") + trl = ToolsReportsList(dir_path="./tests/data", pattern=r"*-run-stats.json") + self.tests_number = 1337 + + self.combined_report = CombinedReport( + previous_report=p, tools_reports_list=trl, tests_number=self.tests_number + ) + + def test_runs(self): + runs = self.combined_report.combined_runs() + last_run = runs[-1] + + self.assertEqual(len(runs), 3) + self.assertEqual(last_run.date, "2025-11-04T12:38:39.969316") + self.assertEqual(last_run.amount, self.tests_number) + + def test_errors(self): + data = self.combined_report.combined_errors() + + self.assertIn("t_dll_api_cc_ivl_nexus_s", list(data.keys())) + self.assertIn("cannot_be_driven_with_non_default_strength", list(data.keys())) + self.assertIn("new_error_id", list(data.keys())) + + self.assertEqual(data["t_dll_api_cc_ivl_nexus_s"].overall, 42) + self.assertEqual( + data["cannot_be_driven_with_non_default_strength"].test_paths_count, + 420, + ) + self.assertEqual( + data["cannot_be_driven_with_non_default_strength"].last.commit, + "abc12345", + ) + self.assertEqual( + data["cannot_be_driven_with_non_default_strength"].last.date, + "2025-11-04T12:38:39.969316", + ) diff --git a/.github/workflows/runner/combined-report/tests/test_previous.py b/.github/workflows/runner/combined-report/tests/test_previous.py new file mode 100644 index 0000000..b46fb8e --- /dev/null +++ b/.github/workflows/runner/combined-report/tests/test_previous.py @@ -0,0 +1,12 @@ +import unittest + +from src.previous_report import PreviousReport + + +class TestPreviousReport(unittest.TestCase): + + def test_parsing(self): + p = PreviousReport("./tests/data/previous_report.json") + + self.assertEqual(p.errors["t_dll_api_cc_ivl_nexus_s"].overall, 10) + self.assertEqual(p.runs[0].amount, 256) diff --git a/.github/workflows/runner/combined-report/tests/test_tools_report.py b/.github/workflows/runner/combined-report/tests/test_tools_report.py new file mode 100644 index 0000000..2fcb478 --- /dev/null +++ b/.github/workflows/runner/combined-report/tests/test_tools_report.py @@ -0,0 +1,18 @@ +import unittest +import json + +from src.tools_report import ToolsReport + + +class TestToolsReport(unittest.TestCase): + + def test_parsing(self): + with open("./tests/data/some-tool-run-stats.json", "r") as f: + data_dict = json.load(f) + + p = ToolsReport(data_dict) + + self.assertEqual(p.errors[0].error_id, "t_dll_api_cc_ivl_nexus_s") + self.assertEqual(p.errors[0].overall, 32) + self.assertEqual(p.commit, "54c4f9f4") + self.assertEqual(p.date, "2025-11-04T12:38:39.969316") diff --git a/.github/workflows/runner/combined-report/tests/test_tools_report_list.py b/.github/workflows/runner/combined-report/tests/test_tools_report_list.py new file mode 100644 index 0000000..da9dde2 --- /dev/null +++ b/.github/workflows/runner/combined-report/tests/test_tools_report_list.py @@ -0,0 +1,11 @@ +import unittest + +from src.tools_report_list import ToolsReportsList + + +class TestToolsReportList(unittest.TestCase): + + def test_parsing(self): + trl = ToolsReportsList(dir_path="./tests/data", pattern=r"*-run-stats.json") + + self.assertEqual(len(trl.reports), 2) diff --git a/.github/workflows/runner/main.py b/.github/workflows/runner/tools-run/main.py similarity index 85% rename from .github/workflows/runner/main.py rename to .github/workflows/runner/tools-run/main.py index 4a83019..f477d33 100644 --- a/.github/workflows/runner/main.py +++ b/.github/workflows/runner/tools-run/main.py @@ -3,15 +3,17 @@ import sys import argparse from pathlib import Path -from ignored_errors_list import IgnoredErrorsList, UnexpectedErrorText, FoundMatch, KnownError +from src.ignored_errors_list import IgnoredErrorsList, UnexpectedErrorText, FoundMatch, KnownError from collections import Counter -from found_error import ( +from src.found_error import ( FoundError, compute_ncd_for_errors, plot_error_distances_mds, ) -from run_test import make_command, run_test -from utils import print_pretty +from src.error_match_in_test import ErrorMatchInTest +from src.known_errors_report import KnownErrorsReport +from src.run_test import make_command, run_test +from src.utils import print_pretty def parse_args(): @@ -78,6 +80,18 @@ def parse_args(): default=[], help="Additional regexes to ignore (can be specified multiple times)", ) + parser.add_argument( + "--run-statistics-output", + type=str, + help="Path to save run statistics", + required=True, + ) + parser.add_argument( + "--commit", + type=str, + help="Commit", + required=True, + ) return parser.parse_args() @@ -89,7 +103,7 @@ def run( file_content: str, ignored_errors: IgnoredErrorsList, all_found_errors: list[FoundError], -) -> tuple[bool, list[UnexpectedErrorText], list[FoundMatch]]: +) -> tuple[bool, list[UnexpectedErrorText], list[ErrorMatchInTest]]: real_cmd = make_command( cmd=raw_cmd, file_path=file_path_str, @@ -124,7 +138,11 @@ def main() -> None: stats = Counter() all_found_errors: list[FoundError] = [] - for file_path in Path(gen_path).glob("*.sv"): + files = Path(gen_path).glob("*.sv") + + report = KnownErrorsReport(commit=args.commit) + + for file_path in files: file_path_str = str(file_path) with open(file_path, "r", encoding="utf-8") as file: file_content = file.read() @@ -139,6 +157,8 @@ def main() -> None: all_found_errors=all_found_errors, ) + report.add_errors(cmd_found_matches) + sim_res = True sim_unexpected_errors: list[UnexpectedErrorText] = [] @@ -153,6 +173,8 @@ def main() -> None: all_found_errors=all_found_errors, ) + report.add_errors(sim_found_matches) + if cmd_res and sim_res: stats["clean"] += 1 elif len(cmd_unexpected_errors + sim_unexpected_errors) == 0: @@ -168,7 +190,7 @@ def main() -> None: nodes_text = [err.text for err in all_found_errors] + [ke.pattern for ke in ignored_errors.errors()] ncd_results = compute_ncd_for_errors( nodes_text, - ".github/workflows/runner/ncd-xz.sh", + ".github/workflows/runner/tools-run/ncd-xz.sh", ) plot_error_distances_mds( all_found_errors, @@ -179,6 +201,8 @@ def main() -> None: output_path=args.error_distances_output, ) + report.save(args.run_statistics_output) + print_pretty( [ "Test Statistics:", diff --git a/.github/workflows/runner/ncd-xz.sh b/.github/workflows/runner/tools-run/ncd-xz.sh similarity index 100% rename from .github/workflows/runner/ncd-xz.sh rename to .github/workflows/runner/tools-run/ncd-xz.sh diff --git a/.github/workflows/runner/rename_tests.sh b/.github/workflows/runner/tools-run/rename_tests.sh similarity index 100% rename from .github/workflows/runner/rename_tests.sh rename to .github/workflows/runner/tools-run/rename_tests.sh diff --git a/.github/workflows/runner/tools-run/requirements.txt b/.github/workflows/runner/tools-run/requirements.txt new file mode 100644 index 0000000..f35a6d2 --- /dev/null +++ b/.github/workflows/runner/tools-run/requirements.txt @@ -0,0 +1,6 @@ +plotly +pandas +scikit-learn +numpy +textdistance +pyyaml diff --git a/.github/workflows/runner/tools-run/src/error_match_in_test.py b/.github/workflows/runner/tools-run/src/error_match_in_test.py new file mode 100644 index 0000000..1739c35 --- /dev/null +++ b/.github/workflows/runner/tools-run/src/error_match_in_test.py @@ -0,0 +1,7 @@ +from src.ignored_errors_list import FoundMatch + + +class ErrorMatchInTest: + def __init__(self, match: FoundMatch, test_path: str) -> None: + self.test_path = test_path + self.match = match diff --git a/.github/workflows/runner/find_top.py b/.github/workflows/runner/tools-run/src/find_top.py similarity index 76% rename from .github/workflows/runner/find_top.py rename to .github/workflows/runner/tools-run/src/find_top.py index e927ae5..2e850e1 100644 --- a/.github/workflows/runner/find_top.py +++ b/.github/workflows/runner/tools-run/src/find_top.py @@ -1,5 +1,6 @@ import re + def find_top(file_content: str) -> str: """ Find the top module name in the given file content. @@ -10,8 +11,8 @@ def find_top(file_content: str) -> str: Returns: str: The name of the top module, or None if no top module is found """ - matches = re.findall(r'(?<=module )[A-z]+', file_content, re.MULTILINE) + matches = re.findall(r"(?<=module )[A-z]+", file_content, re.MULTILINE) if matches: return matches[-1] - raise Exception('No top module found') + raise Exception("No top module found") diff --git a/.github/workflows/runner/found_error.py b/.github/workflows/runner/tools-run/src/found_error.py similarity index 96% rename from .github/workflows/runner/found_error.py rename to .github/workflows/runner/tools-run/src/found_error.py index d701c09..6a5f6b3 100644 --- a/.github/workflows/runner/found_error.py +++ b/.github/workflows/runner/tools-run/src/found_error.py @@ -1,15 +1,17 @@ import subprocess import os +import re +import datetime + +from textdistance import LZMANCD from typing import List, Tuple, Dict import numpy as np import plotly.graph_objects as go import plotly.express as px -from ignored_errors_list import KnownError from sklearn.manifold import MDS -import re -from utils import print_pretty -from textdistance import ZLIBNCD -import datetime + +from src.ignored_errors_list import KnownError +from src.utils import print_pretty class FoundError: @@ -50,7 +52,7 @@ def compute_ncd_for_errors(nodes_text: List[str], ncd_script_path: str) -> Dict[ # if os.path.exists(file2): # os.remove(file2) try: - ncd_value = ZLIBNCD().distance(nodes_text[i], nodes_text[j]) + ncd_value = LZMANCD().distance(nodes_text[i], nodes_text[j]) results[(i, j)] = ncd_value except Exception as e: print(e) diff --git a/.github/workflows/runner/handle_errors.py b/.github/workflows/runner/tools-run/src/handle_errors.py similarity index 79% rename from .github/workflows/runner/handle_errors.py rename to .github/workflows/runner/tools-run/src/handle_errors.py index 802747c..6968a70 100644 --- a/.github/workflows/runner/handle_errors.py +++ b/.github/workflows/runner/tools-run/src/handle_errors.py @@ -1,12 +1,14 @@ import re -from ignored_errors_list import FoundMatch, IgnoredErrorsList, MatchingMode, UnexpectedErrorText +from src.ignored_errors_list import FoundMatch, IgnoredErrorsList, MatchingMode, UnexpectedErrorText +from src.error_match_in_test import ErrorMatchInTest def extract_and_classify_errors( output: str, error_regex: str, ignored_errors: IgnoredErrorsList, -) -> tuple[list[UnexpectedErrorText], list[FoundMatch]]: + test_path: str, +) -> tuple[list[UnexpectedErrorText], list[ErrorMatchInTest]]: """ Extract errors from the output and classify them as ignored or unexpected. Args: @@ -23,7 +25,7 @@ def extract_and_classify_errors( return [], [] found_errors: list[UnexpectedErrorText] = [] - found_matches: list[FoundMatch] = [] + found_matches: list[ErrorMatchInTest] = [] for match in matches: error_text = match.group(0) @@ -33,7 +35,12 @@ def extract_and_classify_errors( print(f"\033[91mFound unexpected error: {error_text}\033[0m\n") found_errors.append(error_text) else: - found_matches.append(found_match) + found_matches.append( + ErrorMatchInTest( + match=found_match, + test_path=test_path, + ) + ) return found_errors, found_matches diff --git a/.github/workflows/runner/ignored_errors_list.py b/.github/workflows/runner/tools-run/src/ignored_errors_list.py similarity index 92% rename from .github/workflows/runner/ignored_errors_list.py rename to .github/workflows/runner/tools-run/src/ignored_errors_list.py index 27bceae..d0bd133 100644 --- a/.github/workflows/runner/ignored_errors_list.py +++ b/.github/workflows/runner/tools-run/src/ignored_errors_list.py @@ -1,9 +1,10 @@ from pathlib import Path from typing import List import re -import yaml from enum import Enum +import yaml + UnexpectedErrorText = str @@ -80,8 +81,8 @@ def _load_errors(self, dir_path: str) -> None: dir_path_obj = Path(dir_path) if not dir_path_obj.exists(): print( - f"Warning: Directory '{ - dir_path}' does not exist. No ignored errors loaded." + f"""Warning: Directory '{ + dir_path_obj.absolute()}' does not exist. No ignored errors loaded.""" ) self._errors = [] return @@ -89,8 +90,8 @@ def _load_errors(self, dir_path: str) -> None: yaml_files = list(dir_path_obj.glob("*.yaml")) if not yaml_files: print( - f"Warning: No YAML files found in '{ - dir_path}'. No ignored errors loaded." + f"""Warning: No YAML files found in '{ + dir_path_obj.absolute()}'. No ignored errors loaded.""" ) errors: List[KnownError] = [] @@ -110,8 +111,8 @@ def _load_errors(self, dir_path: str) -> None: errors.append(KnownError(id=error_id, pattern=pattern, mode=mode)) else: print( - f"Warning: { - yaml_file} missing 'id' or 'regex', skipping." + f"""Warning: { + yaml_file} missing 'id' or 'regex', skipping.""" ) except Exception as e: print(f"Warning: Failed to parse {yaml_file}: {e}") @@ -138,8 +139,8 @@ def match(self, input_text: str, mode: MatchingMode) -> FoundMatch | None: match = re.search(ignored_error.pattern, input_text, re.MULTILINE) if match: print( - f"Found ignored error (ignored error): Pattern: { - ignored_error.pattern}\n" + f"""Found ignored error (ignored error): Pattern: { + ignored_error.pattern}\n""" ) return FoundMatch(error=ignored_error, matched_text=match.group(0)) return None diff --git a/.github/workflows/runner/tools-run/src/known_errors_report.py b/.github/workflows/runner/tools-run/src/known_errors_report.py new file mode 100644 index 0000000..c981397 --- /dev/null +++ b/.github/workflows/runner/tools-run/src/known_errors_report.py @@ -0,0 +1,59 @@ +from typing import List, Set +import json +from datetime import datetime + +from src.error_match_in_test import ErrorMatchInTest +from src.ignored_errors_list import KnownError + + +class ErrorsCount: + def __init__(self, error_id: str): + self.overall = 0 # total number of times this error was found + self.tests_paths: Set[str] = set() # paths of tests where this error was found + self.error_id = error_id + + def to_dict(self): + test_paths = list(self.tests_paths) + return { + "error_id": self.error_id, + "overall": self.overall, + "test_paths_count": len(test_paths), + "tests_paths": test_paths, + } + + +class KnownErrorsReport: + def __init__(self, commit: str): + self.found_errors: List[ErrorsCount] = [] + self.commit = commit + self.date = datetime.now().isoformat() + + def add_error(self, error: ErrorMatchInTest): + if not isinstance(error.match.error, KnownError): + return + error_id = error.match.error.id + + existing_error = None + for errors_count in self.found_errors: + if errors_count.error_id == error_id: + existing_error = errors_count + break + + target_error = existing_error if existing_error else ErrorsCount(error_id) + target_error.tests_paths.add(error.test_path) + target_error.overall += 1 + if not existing_error: + self.found_errors.append(target_error) + + def add_errors(self, errors: List[ErrorMatchInTest]): + for error in errors: + self.add_error(error) + + def save(self, file_path: str): + data = { + "errors": [error.to_dict() for error in self.found_errors], + "commit": self.commit, + "date": self.date, + } + with open(file_path, "w") as file: + json.dump(data, file, indent=2) diff --git a/.github/workflows/runner/run_test.py b/.github/workflows/runner/tools-run/src/run_test.py similarity index 81% rename from .github/workflows/runner/run_test.py rename to .github/workflows/runner/tools-run/src/run_test.py index a2dfc2b..7c9b88b 100644 --- a/.github/workflows/runner/run_test.py +++ b/.github/workflows/runner/tools-run/src/run_test.py @@ -1,7 +1,8 @@ import subprocess -from find_top import find_top -from handle_errors import FoundMatch, UnexpectedErrorText, extract_and_classify_errors, match_whole_output -from ignored_errors_list import IgnoredErrorsList +from src.find_top import find_top +from src.handle_errors import FoundMatch, UnexpectedErrorText, extract_and_classify_errors, match_whole_output +from src.ignored_errors_list import IgnoredErrorsList +from src.error_match_in_test import ErrorMatchInTest COMMAND_TIMEOUT_MINUTES = 7 COMMAND_TIMEOUT_SECONDS = COMMAND_TIMEOUT_MINUTES * 60 @@ -26,14 +27,21 @@ def execute_command(cmd: str) -> tuple[str, int]: print(f"Execute: {cmd}") try: - result = subprocess.run(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True, timeout=COMMAND_TIMEOUT_SECONDS) + result = subprocess.run( + cmd, + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + text=True, + timeout=COMMAND_TIMEOUT_SECONDS, + ) output = result.stdout print(f"Exit code: {result.returncode}. Output:\n{output}") return output, result.returncode except subprocess.TimeoutExpired as timeout_error: print( - f"Command timed out after { - COMMAND_TIMEOUT_MINUTES} minutes: {timeout_error}" + f"""Command timed out after { + COMMAND_TIMEOUT_MINUTES} minutes: {timeout_error}""" ) return f"Command timed out after {COMMAND_TIMEOUT_MINUTES} minutes: {timeout_error}", 1 except Exception as error: @@ -69,7 +77,7 @@ def print_file(file_content: str, file_path: str) -> None: def run_test( cmd: str, file_content: str, file_path: str, error_regex: str, ignored_errors: IgnoredErrorsList -) -> tuple[bool, list[str], list[FoundMatch]]: +) -> tuple[bool, list[str], list[ErrorMatchInTest]]: """ Run a single test (analysis or simulation) and handle its errors. Returns: @@ -83,6 +91,7 @@ def run_test( output, error_regex, ignored_errors, + test_path=file_path, ) # Match whole output @@ -92,7 +101,7 @@ def run_test( if unexpected_error_whole == None: unexpected_errors.append("\n".join(output.splitlines()[:3])) elif isinstance(unexpected_error_whole, FoundMatch): - found_matches.append(unexpected_error_whole) + found_matches.append(ErrorMatchInTest(match=unexpected_error_whole, test_path=file_path)) if len(unexpected_errors) > 0: print_file(file_content, file_path) diff --git a/.github/workflows/runner/utils.py b/.github/workflows/runner/tools-run/src/utils.py similarity index 100% rename from .github/workflows/runner/utils.py rename to .github/workflows/runner/tools-run/src/utils.py diff --git a/tests/printer/create-dir/run b/tests/printer/create-dir/run index df69146..44d6240 100644 --- a/tests/printer/create-dir/run +++ b/tests/printer/create-dir/run @@ -1,5 +1,8 @@ rm -rf newdir -pack run verilog-model -o newdir/newdir -n 1 --seed 0,1 --seed-name --seed-content && cat newdir/newdir/* +pack run verilog-model -o newdir/newdir -n 1 --seed 0,1 --seed-name --seed-content +sleep 1 +sync +cat newdir/newdir/* rm -rf newdir diff --git a/tests/printer/model-coverage/run b/tests/printer/model-coverage/run index 5ae4e0e..e1a646f 100644 --- a/tests/printer/model-coverage/run +++ b/tests/printer/model-coverage/run @@ -1,5 +1,8 @@ rm -f mcov -pack run verilog-model --coverage mcov -n 1 --seed 0,1 && cat mcov +pack run verilog-model --coverage mcov -n 1 --seed 0,1 +sleep 1 +sync +cat mcov rm -f mcov