diff --git a/.github/workflows/sycl-linux-precommit.yml b/.github/workflows/sycl-linux-precommit.yml index f9ed0e8734f05..2a6c481b0b2cd 100644 --- a/.github/workflows/sycl-linux-precommit.yml +++ b/.github/workflows/sycl-linux-precommit.yml @@ -47,6 +47,7 @@ permissions: read-all jobs: detect_changes: + if: false # XXX uses: ./.github/workflows/sycl-detect-changes.yml build: diff --git a/.github/workflows/sycl-linux-run-tests.yml b/.github/workflows/sycl-linux-run-tests.yml index 90b8cc6677545..5be5134937334 100644 --- a/.github/workflows/sycl-linux-run-tests.yml +++ b/.github/workflows/sycl-linux-run-tests.yml @@ -131,9 +131,9 @@ on: type: string default: 'false' required: False - benchmark_gdb_mode: + benchmark_debug_mode: description: | - Whether or not to run benchmarks under gdb. + Whether or not to run benchmarks in debug mode. type: string required: False default: '0' @@ -373,7 +373,7 @@ jobs: exit_on_failure: ${{ inputs.benchmark_exit_on_failure }} build_ref: ${{ inputs.repo_ref }} runner: ${{ inputs.runner }} - gdb_mode: ${{ inputs.benchmark_gdb_mode }} + debug_mode: ${{ inputs.benchmark_debug_mode }} custom_cr: ${{ inputs.benchmark_custom_cr }} - name: Debug CI platform information diff --git a/.github/workflows/sycl-post-commit.yml b/.github/workflows/sycl-post-commit.yml index bd4dea471c8a9..b4e457d83cd7e 100644 --- a/.github/workflows/sycl-post-commit.yml +++ b/.github/workflows/sycl-post-commit.yml @@ -29,7 +29,8 @@ permissions: read-all jobs: detect_changes: - if: ${{ github.event_name == 'pull_request' }} + # if: ${{ github.event_name == 'pull_request' }} + if: false # XXX uses: ./.github/workflows/sycl-detect-changes.yml build-lin: diff --git a/.github/workflows/sycl-ur-perf-benchmarking.yml b/.github/workflows/sycl-ur-perf-benchmarking.yml index a1e5f44cc02a2..71141f2a5f9a2 100644 --- a/.github/workflows/sycl-ur-perf-benchmarking.yml +++ b/.github/workflows/sycl-ur-perf-benchmarking.yml @@ -90,12 +90,13 @@ on: - 'false' - 'true' default: 'false' - gdb_mode: - description: Whether or not to run benchmarks under gdb. + debug_mode: + description: Whether or not to run benchmarks in debug mode. 0 - run normally, 1 - run with gdb, 2 - run with unitrace type: choice options: - '0' - '1' + - '2' default: '0' custom_cr: description: Custom Compute Runtime to use in benchmarks. @@ -205,7 +206,7 @@ jobs: uses: ./.github/workflows/sycl-linux-run-tests.yml secrets: inherit with: - name: "Benchmarks (${{ matrix.runner }}, ${{ matrix.backend }}, preset: ${{ inputs.preset }}, gdb_mode: ${{ inputs.gdb_mode }})" + name: "Benchmarks (${{ matrix.runner }}, ${{ matrix.backend }}, preset: ${{ inputs.preset }}, debug: ${{ inputs.debug_mode }})" runner: ${{ matrix.runner }} image: ghcr.io/intel/llvm/sycl_ubuntu2404_nightly:latest image_options: -u 1001 --device=/dev/dri -v /dev/dri/by-path:/dev/dri/by-path --privileged --cap-add SYS_ADMIN @@ -215,7 +216,7 @@ jobs: benchmark_save_name: ${{ needs.sanitize_inputs_dispatch.outputs.benchmark_save_name }} benchmark_preset: ${{ inputs.preset }} benchmark_exit_on_failure: ${{ inputs.exit_on_failure }} - benchmark_gdb_mode: ${{ inputs.gdb_mode }} + benchmark_debug_mode: ${{ inputs.debug_mode }} benchmark_custom_cr: ${{ inputs.custom_cr }} repo_ref: ${{ needs.sanitize_inputs_dispatch.outputs.build_ref }} toolchain_artifact: ${{ needs.build_sycl_dispatch.outputs.toolchain_artifact }} @@ -276,7 +277,8 @@ jobs: permissions: contents: write packages: read - if: github.event_name == 'pull_request' + # if: github.event_name == 'pull_request' + if: false # XXX uses: ./.github/workflows/sycl-linux-run-tests.yml with: name: 'Framework test only: L0, Minimal preset, dry-run' diff --git a/devops/actions/run-tests/benchmark/action.yml b/devops/actions/run-tests/benchmark/action.yml index f5188f70dd125..e422d38514df4 100644 --- a/devops/actions/run-tests/benchmark/action.yml +++ b/devops/actions/run-tests/benchmark/action.yml @@ -23,7 +23,8 @@ inputs: type: string required: False default: "" - gdb_mode: + # 0 - run normally, 1 - run with gdb, 2 - run with unitrace + debug_mode: type: string required: False default: "0" @@ -280,7 +281,7 @@ runs: env: BENCH_WORKDIR: ${{ steps.establish_outputs.outputs.BENCH_WORKDIR }} BENCHMARK_RESULTS_REPO_PATH: ${{ steps.establish_outputs.outputs.BENCHMARK_RESULTS_REPO_PATH }} - LLVM_BENCHMARKS_USE_GDB: ${{ inputs.gdb_mode }} + LLVM_BENCHMARKS_DEBUG_MODE: ${{ inputs.debug_mode }} CR_BUILD_REF: ${{ steps.establish_outputs.outputs.CR_BUILD_REF }} run: | # Build and run benchmarks @@ -308,14 +309,17 @@ runs: --timestamp-override "$SAVE_TIMESTAMP" \ --detect-version sycl,compute_runtime \ --produce-github-summary \ + --verbose \ $([[ -n "${CR_BUILD_REF}" ]] && echo "--compute-runtime ${CR_BUILD_REF}" || echo '') \ - ${{ inputs.exit_on_failure == 'true' && '--exit-on-failure --iterations 1' || '' }} + ${{ inputs.exit_on_failure == 'true' && '--exit-on-failure --iterations 1' || '' }} \ + ${{ inputs.debug_mode == '2' && '--unitrace inclusive' || '' }} # TODO: add back: "--flamegraph inclusive" once works properly echo "::endgroup::" echo "::group::compare_results" - if [ "$LLVM_BENCHMARKS_USE_GDB" == "0" ]; then + # Skip comparison if in debug mode + if [ "$LLVM_BENCHMARKS_DEBUG_MODE" == "0" ]; then python3 ./devops/scripts/benchmarks/compare.py to_hist \ --avg-type EWMA \ --cutoff "$(date -u -d '7 days ago' +'%Y%m%d_%H%M%S')" \ @@ -328,7 +332,7 @@ runs: --produce-github-summary \ ${{ inputs.dry_run == 'true' && '--dry-run' || '' }} else - echo "Skipping regression comparison due to GDB mode enabled." + echo "Skipping regression comparison due to debug mode enabled." fi echo "::endgroup::" @@ -361,7 +365,7 @@ runs: cp "$diff" "../cached_changes/$diff" done - name: Push benchmarks results - if: always() && inputs.upload_results == 'true' && inputs.gdb_mode == '0' + if: always() && inputs.upload_results == 'true' && inputs.debug_mode == '0' shell: bash env: BENCH_WORKDIR: ${{ steps.establish_outputs.outputs.BENCH_WORKDIR }} @@ -412,9 +416,16 @@ runs: --dry-run cd - done + - name: Archive unitrace results + if: inputs.debug_mode == '2' && always() + uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0 + with: + name: "Unitrace ${{ env.SAVE_NAME }} (run id ${{ github.run_id }})" + path: ./llvm-ci-perf-results/results/traces + retention-days: 30 - name: Archive benchmark results if: always() - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0 with: - name: Benchmark run ${{ github.run_id }} (${{ env.SAVE_NAME }}) + name: "Benchmark ${{ env.SAVE_NAME }} (run id ${{ github.run_id }})" path: ./cached_changes diff --git a/devops/scripts/benchmarks/README.md b/devops/scripts/benchmarks/README.md index 532cb78704325..59c60d8e4cfe9 100644 --- a/devops/scripts/benchmarks/README.md +++ b/devops/scripts/benchmarks/README.md @@ -118,7 +118,7 @@ The benchmarks scripts are used in a GitHub Actions workflow, and can be automat ![compute benchmarks](workflow.png "Compute Benchmarks CI job") -To execute the benchmarks in CI, navigate to the `Actions` tab and then go to the `SYCL Run Benchmarks` workflow. Here, you will find a list of previous runs and a "Run workflow" button. Upon clicking the button, you will be prompted to fill in a form to customize your benchmark run. Important field is the `PR number`, which is the identifier for the Pull Request against which you want the benchmarks to run. Instead, you can specify `Commit hash` from within intel/llvm repository, or leave both empty to run benchmarks against the branch/tag the workflow started from (the value from dropdown list at the top). You can run benchmarks in debug mode by enabling `gdb_mode`. +To execute the benchmarks in CI, navigate to the `Actions` tab and then go to the `SYCL Run Benchmarks` workflow. Here, you will find a list of previous runs and a "Run workflow" button. Upon clicking the button, you will be prompted to fill in a form to customize your benchmark run. Important field is the `PR number`, which is the identifier for the Pull Request against which you want the benchmarks to run. Instead, you can specify `Commit hash` from within intel/llvm repository, or leave both empty to run benchmarks against the branch/tag the workflow started from (the value from dropdown list at the top). You can run benchmarks in debug mode by setting `debug_mode` to '1' (gdb enabled) or '2' (unitrace enabled). Once all the information is entered, click the "Run workflow" button to initiate a new workflow run. This will execute the benchmarks and then post the results as a comment on the specified Pull Request. diff --git a/devops/scripts/benchmarks/benches/base.py b/devops/scripts/benchmarks/benches/base.py index fdc0f2a4fb1db..5a5059cbf8e8b 100644 --- a/devops/scripts/benchmarks/benches/base.py +++ b/devops/scripts/benchmarks/benches/base.py @@ -140,8 +140,9 @@ def run_bench( if extra_trace_opt is None: extra_trace_opt = [] unitrace_output, command = get_unitrace().setup( - self.name(), command, extra_trace_opt + self.name().replace(":", "="), command, extra_trace_opt ) + # ':' is a problematic character for Github actions/artifacts log.debug(f"Unitrace output: {unitrace_output}") log.debug(f"Unitrace command: {' '.join(command)}") diff --git a/devops/scripts/benchmarks/benches/compute/compute.py b/devops/scripts/benchmarks/benches/compute/compute.py index 1923d56e6417d..60d826f04947b 100644 --- a/devops/scripts/benchmarks/benches/compute/compute.py +++ b/devops/scripts/benchmarks/benches/compute/compute.py @@ -71,7 +71,7 @@ def setup(self) -> None: f"-DCMAKE_PREFIX_PATH={options.sycl}", ] - is_gdb_mode = os.environ.get("LLVM_BENCHMARKS_USE_GDB", "") == "1" + is_gdb_mode = os.environ.get("LLVM_BENCHMARKS_DEBUG_MODE", "") == "1" if is_gdb_mode: extra_args += [ f"-DCMAKE_CXX_FLAGS_RELWITHDEBINFO:STRING=-O2 -g -DNDEBUG -fdebug-info-for-profiling", diff --git a/devops/scripts/benchmarks/benches/compute/compute_benchmark.py b/devops/scripts/benchmarks/benches/compute/compute_benchmark.py index a71429235a502..d26a42c5df31d 100644 --- a/devops/scripts/benchmarks/benches/compute/compute_benchmark.py +++ b/devops/scripts/benchmarks/benches/compute/compute_benchmark.py @@ -151,7 +151,7 @@ def __enabled_runtimes(self) -> list[RUNTIMES]: return runtimes def __parse_output(self, output: str) -> list[tuple[float, float]]: - is_gdb_mode = os.environ.get("LLVM_BENCHMARKS_USE_GDB", "") == "1" + is_gdb_mode = os.environ.get("LLVM_BENCHMARKS_DEBUG_MODE", "") == "1" if is_gdb_mode: log.info(output) diff --git a/devops/scripts/benchmarks/git_project.py b/devops/scripts/benchmarks/git_project.py index 33b4bee638f64..ceb6d25b3754d 100644 --- a/devops/scripts/benchmarks/git_project.py +++ b/devops/scripts/benchmarks/git_project.py @@ -79,7 +79,7 @@ def configure( ) -> None: """Configures the project.""" - is_gdb_mode = os.environ.get("LLVM_BENCHMARKS_USE_GDB", "") == "1" + is_gdb_mode = os.environ.get("LLVM_BENCHMARKS_DEBUG_MODE", "") == "1" build_type = "RelWithDebInfo" if is_gdb_mode else "Release" cmd = [ diff --git a/devops/scripts/benchmarks/main.py b/devops/scripts/benchmarks/main.py index 913a7b3aa9d8e..bcf22d88762ca 100755 --- a/devops/scripts/benchmarks/main.py +++ b/devops/scripts/benchmarks/main.py @@ -389,6 +389,7 @@ def main(directory, additional_env_vars, compare_names, filter, execution_stats) if options.unitrace and ( benchmark.traceable(TracingType.UNITRACE) or args.unitrace == "force" ): + log.debug(f"Running unitrace for {benchmark.name()}...") iterations_rc = run_iterations( benchmark, merged_env_vars, diff --git a/devops/scripts/benchmarks/utils/unitrace.py b/devops/scripts/benchmarks/utils/unitrace.py index f788b11c69be3..9f335bd615c59 100644 --- a/devops/scripts/benchmarks/utils/unitrace.py +++ b/devops/scripts/benchmarks/utils/unitrace.py @@ -23,6 +23,10 @@ class Unitrace: """Unitrace wrapper for managing Unitrace tool execution and results.""" + def git_hash(self) -> str: + # Mar 16, 2026 + return "pti-0.12.4" + def __init__(self): self.timestamp = ( datetime.now(tz=timezone.utc).strftime(options.TIMESTAMP_FORMAT) @@ -30,10 +34,11 @@ def __init__(self): else options.timestamp_override ) + log.info("Downloading and building Unitrace...") self.project = GitProject( "https://github.com/intel/pti-gpu.git", - "pti-0.12.4", + self.git_hash(), Path(options.workdir), "pti-gpu", ) @@ -56,6 +61,7 @@ def __init__(self): "-DBUILD_WITH_ITT=1", "-DBUILD_WITH_XPTI=1", "-DBUILD_WITH_MPI=0", + "-DCMAKE_POLICY_VERSION_MINIMUM=3.5", ], add_sycl=True, ) @@ -122,7 +128,6 @@ def setup( + extra_unitrace_opt + command ) - log.debug(f"Unitrace cmd: {' '.join(unitrace_command)}") return unitrace_output, unitrace_command diff --git a/devops/scripts/benchmarks/utils/utils.py b/devops/scripts/benchmarks/utils/utils.py index 9b0a149042e4c..5dffe838365e2 100644 --- a/devops/scripts/benchmarks/utils/utils.py +++ b/devops/scripts/benchmarks/utils/utils.py @@ -50,7 +50,7 @@ def run( if isinstance(command, str): command = command.split() - is_gdb_mode = os.environ.get("LLVM_BENCHMARKS_USE_GDB", "") == "1" + is_gdb_mode = os.environ.get("LLVM_BENCHMARKS_DEBUG_MODE", "") == "1" if any("/compute-benchmarks-build/bin/" in x for x in command) and is_gdb_mode: command = [ "gdb",