Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .github/workflows/sycl-linux-precommit.yml
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,7 @@ permissions: read-all

jobs:
detect_changes:
if: false # XXX
uses: ./.github/workflows/sycl-detect-changes.yml

build:
Expand Down
6 changes: 3 additions & 3 deletions .github/workflows/sycl-linux-run-tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -131,9 +131,9 @@ on:
type: string
default: 'false'
required: False
benchmark_gdb_mode:
benchmark_debug_mode:
description: |
Whether or not to run benchmarks under gdb.
Whether or not to run benchmarks in debug mode.
type: string
required: False
default: '0'
Expand Down Expand Up @@ -373,7 +373,7 @@ jobs:
exit_on_failure: ${{ inputs.benchmark_exit_on_failure }}
build_ref: ${{ inputs.repo_ref }}
runner: ${{ inputs.runner }}
gdb_mode: ${{ inputs.benchmark_gdb_mode }}
debug_mode: ${{ inputs.benchmark_debug_mode }}
custom_cr: ${{ inputs.benchmark_custom_cr }}

- name: Debug CI platform information
Expand Down
3 changes: 2 additions & 1 deletion .github/workflows/sycl-post-commit.yml
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,8 @@ permissions: read-all

jobs:
detect_changes:
if: ${{ github.event_name == 'pull_request' }}
# if: ${{ github.event_name == 'pull_request' }}
if: false # XXX
uses: ./.github/workflows/sycl-detect-changes.yml

build-lin:
Expand Down
12 changes: 7 additions & 5 deletions .github/workflows/sycl-ur-perf-benchmarking.yml
Original file line number Diff line number Diff line change
Expand Up @@ -90,12 +90,13 @@ on:
- 'false'
- 'true'
default: 'false'
gdb_mode:
description: Whether or not to run benchmarks under gdb.
debug_mode:
description: Whether or not to run benchmarks in debug mode. 0 - run normally, 1 - run with gdb, 2 - run with unitrace
type: choice
options:
- '0'
- '1'
- '2'
default: '0'
custom_cr:
description: Custom Compute Runtime to use in benchmarks.
Expand Down Expand Up @@ -205,7 +206,7 @@ jobs:
uses: ./.github/workflows/sycl-linux-run-tests.yml
secrets: inherit
with:
name: "Benchmarks (${{ matrix.runner }}, ${{ matrix.backend }}, preset: ${{ inputs.preset }}, gdb_mode: ${{ inputs.gdb_mode }})"
name: "Benchmarks (${{ matrix.runner }}, ${{ matrix.backend }}, preset: ${{ inputs.preset }}, debug: ${{ inputs.debug_mode }})"
runner: ${{ matrix.runner }}
image: ghcr.io/intel/llvm/sycl_ubuntu2404_nightly:latest
image_options: -u 1001 --device=/dev/dri -v /dev/dri/by-path:/dev/dri/by-path --privileged --cap-add SYS_ADMIN
Expand All @@ -215,7 +216,7 @@ jobs:
benchmark_save_name: ${{ needs.sanitize_inputs_dispatch.outputs.benchmark_save_name }}
benchmark_preset: ${{ inputs.preset }}
benchmark_exit_on_failure: ${{ inputs.exit_on_failure }}
benchmark_gdb_mode: ${{ inputs.gdb_mode }}
benchmark_debug_mode: ${{ inputs.debug_mode }}
benchmark_custom_cr: ${{ inputs.custom_cr }}
repo_ref: ${{ needs.sanitize_inputs_dispatch.outputs.build_ref }}
toolchain_artifact: ${{ needs.build_sycl_dispatch.outputs.toolchain_artifact }}
Expand Down Expand Up @@ -276,7 +277,8 @@ jobs:
permissions:
contents: write
packages: read
if: github.event_name == 'pull_request'
# if: github.event_name == 'pull_request'
if: false # XXX
uses: ./.github/workflows/sycl-linux-run-tests.yml
with:
name: 'Framework test only: L0, Minimal preset, dry-run'
Expand Down
27 changes: 19 additions & 8 deletions devops/actions/run-tests/benchmark/action.yml
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,8 @@ inputs:
type: string
required: False
default: ""
gdb_mode:
# 0 - run normally, 1 - run with gdb, 2 - run with unitrace
debug_mode:
type: string
required: False
default: "0"
Expand Down Expand Up @@ -280,7 +281,7 @@ runs:
env:
BENCH_WORKDIR: ${{ steps.establish_outputs.outputs.BENCH_WORKDIR }}
BENCHMARK_RESULTS_REPO_PATH: ${{ steps.establish_outputs.outputs.BENCHMARK_RESULTS_REPO_PATH }}
LLVM_BENCHMARKS_USE_GDB: ${{ inputs.gdb_mode }}
LLVM_BENCHMARKS_DEBUG_MODE: ${{ inputs.debug_mode }}
CR_BUILD_REF: ${{ steps.establish_outputs.outputs.CR_BUILD_REF }}
run: |
# Build and run benchmarks
Expand Down Expand Up @@ -308,14 +309,17 @@ runs:
--timestamp-override "$SAVE_TIMESTAMP" \
--detect-version sycl,compute_runtime \
--produce-github-summary \
--verbose \
$([[ -n "${CR_BUILD_REF}" ]] && echo "--compute-runtime ${CR_BUILD_REF}" || echo '') \
${{ inputs.exit_on_failure == 'true' && '--exit-on-failure --iterations 1' || '' }}
${{ inputs.exit_on_failure == 'true' && '--exit-on-failure --iterations 1' || '' }} \
${{ inputs.debug_mode == '2' && '--unitrace inclusive' || '' }}
# TODO: add back: "--flamegraph inclusive" once works properly

echo "::endgroup::"
echo "::group::compare_results"

if [ "$LLVM_BENCHMARKS_USE_GDB" == "0" ]; then
# Skip comparison if in debug mode
if [ "$LLVM_BENCHMARKS_DEBUG_MODE" == "0" ]; then
python3 ./devops/scripts/benchmarks/compare.py to_hist \
--avg-type EWMA \
--cutoff "$(date -u -d '7 days ago' +'%Y%m%d_%H%M%S')" \
Expand All @@ -328,7 +332,7 @@ runs:
--produce-github-summary \
${{ inputs.dry_run == 'true' && '--dry-run' || '' }}
else
echo "Skipping regression comparison due to GDB mode enabled."
echo "Skipping regression comparison due to debug mode enabled."
fi

echo "::endgroup::"
Expand Down Expand Up @@ -361,7 +365,7 @@ runs:
cp "$diff" "../cached_changes/$diff"
done
- name: Push benchmarks results
if: always() && inputs.upload_results == 'true' && inputs.gdb_mode == '0'
if: always() && inputs.upload_results == 'true' && inputs.debug_mode == '0'
shell: bash
env:
BENCH_WORKDIR: ${{ steps.establish_outputs.outputs.BENCH_WORKDIR }}
Expand Down Expand Up @@ -412,9 +416,16 @@ runs:
--dry-run
cd -
done
- name: Archive unitrace results
if: inputs.debug_mode == '2' && always()
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0
with:
name: "Unitrace ${{ env.SAVE_NAME }} (run id ${{ github.run_id }})"
path: ./llvm-ci-perf-results/results/traces
retention-days: 30
- name: Archive benchmark results
if: always()
uses: actions/upload-artifact@v4
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0
with:
name: Benchmark run ${{ github.run_id }} (${{ env.SAVE_NAME }})
name: "Benchmark ${{ env.SAVE_NAME }} (run id ${{ github.run_id }})"
path: ./cached_changes
2 changes: 1 addition & 1 deletion devops/scripts/benchmarks/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,7 @@ The benchmarks scripts are used in a GitHub Actions workflow, and can be automat

![compute benchmarks](workflow.png "Compute Benchmarks CI job")

To execute the benchmarks in CI, navigate to the `Actions` tab and then go to the `SYCL Run Benchmarks` workflow. Here, you will find a list of previous runs and a "Run workflow" button. Upon clicking the button, you will be prompted to fill in a form to customize your benchmark run. Important field is the `PR number`, which is the identifier for the Pull Request against which you want the benchmarks to run. Instead, you can specify `Commit hash` from within intel/llvm repository, or leave both empty to run benchmarks against the branch/tag the workflow started from (the value from dropdown list at the top). You can run benchmarks in debug mode by enabling `gdb_mode`.
To execute the benchmarks in CI, navigate to the `Actions` tab and then go to the `SYCL Run Benchmarks` workflow. Here, you will find a list of previous runs and a "Run workflow" button. Upon clicking the button, you will be prompted to fill in a form to customize your benchmark run. Important field is the `PR number`, which is the identifier for the Pull Request against which you want the benchmarks to run. Instead, you can specify `Commit hash` from within intel/llvm repository, or leave both empty to run benchmarks against the branch/tag the workflow started from (the value from dropdown list at the top). You can run benchmarks in debug mode by setting `debug_mode` to '1' (gdb enabled) or '2' (unitrace enabled).

Once all the information is entered, click the "Run workflow" button to initiate a new workflow run. This will execute the benchmarks and then post the results as a comment on the specified Pull Request.

Expand Down
3 changes: 2 additions & 1 deletion devops/scripts/benchmarks/benches/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -140,8 +140,9 @@ def run_bench(
if extra_trace_opt is None:
extra_trace_opt = []
unitrace_output, command = get_unitrace().setup(
self.name(), command, extra_trace_opt
self.name().replace(":", "="), command, extra_trace_opt
)
# ':' is a problematic character for Github actions/artifacts
log.debug(f"Unitrace output: {unitrace_output}")
log.debug(f"Unitrace command: {' '.join(command)}")

Expand Down
2 changes: 1 addition & 1 deletion devops/scripts/benchmarks/benches/compute/compute.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ def setup(self) -> None:
f"-DCMAKE_PREFIX_PATH={options.sycl}",
]

is_gdb_mode = os.environ.get("LLVM_BENCHMARKS_USE_GDB", "") == "1"
is_gdb_mode = os.environ.get("LLVM_BENCHMARKS_DEBUG_MODE", "") == "1"
if is_gdb_mode:
extra_args += [
f"-DCMAKE_CXX_FLAGS_RELWITHDEBINFO:STRING=-O2 -g -DNDEBUG -fdebug-info-for-profiling",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,7 @@ def __enabled_runtimes(self) -> list[RUNTIMES]:
return runtimes

def __parse_output(self, output: str) -> list[tuple[float, float]]:
is_gdb_mode = os.environ.get("LLVM_BENCHMARKS_USE_GDB", "") == "1"
is_gdb_mode = os.environ.get("LLVM_BENCHMARKS_DEBUG_MODE", "") == "1"

if is_gdb_mode:
log.info(output)
Expand Down
2 changes: 1 addition & 1 deletion devops/scripts/benchmarks/git_project.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ def configure(
) -> None:
"""Configures the project."""

is_gdb_mode = os.environ.get("LLVM_BENCHMARKS_USE_GDB", "") == "1"
is_gdb_mode = os.environ.get("LLVM_BENCHMARKS_DEBUG_MODE", "") == "1"
build_type = "RelWithDebInfo" if is_gdb_mode else "Release"

cmd = [
Expand Down
1 change: 1 addition & 0 deletions devops/scripts/benchmarks/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -389,6 +389,7 @@ def main(directory, additional_env_vars, compare_names, filter, execution_stats)
if options.unitrace and (
benchmark.traceable(TracingType.UNITRACE) or args.unitrace == "force"
):
log.debug(f"Running unitrace for {benchmark.name()}...")
iterations_rc = run_iterations(
benchmark,
merged_env_vars,
Expand Down
9 changes: 7 additions & 2 deletions devops/scripts/benchmarks/utils/unitrace.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,17 +23,22 @@
class Unitrace:
"""Unitrace wrapper for managing Unitrace tool execution and results."""

def git_hash(self) -> str:
# Mar 16, 2026
return "pti-0.12.4"

def __init__(self):
self.timestamp = (
datetime.now(tz=timezone.utc).strftime(options.TIMESTAMP_FORMAT)
if options.timestamp_override is None
else options.timestamp_override
)


log.info("Downloading and building Unitrace...")
self.project = GitProject(
"https://github.com/intel/pti-gpu.git",
"pti-0.12.4",
self.git_hash(),
Path(options.workdir),
"pti-gpu",
)
Expand All @@ -56,6 +61,7 @@ def __init__(self):
"-DBUILD_WITH_ITT=1",
"-DBUILD_WITH_XPTI=1",
"-DBUILD_WITH_MPI=0",
"-DCMAKE_POLICY_VERSION_MINIMUM=3.5",
],
add_sycl=True,
)
Expand Down Expand Up @@ -122,7 +128,6 @@ def setup(
+ extra_unitrace_opt
+ command
)
log.debug(f"Unitrace cmd: {' '.join(unitrace_command)}")

return unitrace_output, unitrace_command

Expand Down
2 changes: 1 addition & 1 deletion devops/scripts/benchmarks/utils/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ def run(
if isinstance(command, str):
command = command.split()

is_gdb_mode = os.environ.get("LLVM_BENCHMARKS_USE_GDB", "") == "1"
is_gdb_mode = os.environ.get("LLVM_BENCHMARKS_DEBUG_MODE", "") == "1"
if any("/compute-benchmarks-build/bin/" in x for x in command) and is_gdb_mode:
command = [
"gdb",
Expand Down
Loading