Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions .github/configs/feature.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -9,11 +9,11 @@ develop:

benchmark:
evm-type: benchmark
fill-params: --fork=Osaka --gas-benchmark-values 1,5,10,30,60,100,150 ./tests/benchmark --maxprocesses=30 --dist=worksteal
fill-params: --fork=Osaka --gas-benchmark-values 1,5,10,30,60,100,150 ./tests/benchmark/compute --maxprocesses=30 --dist=worksteal

benchmark_fast:
evm-type: benchmark
fill-params: --fork=Osaka --gas-benchmark-values 100 ./tests/benchmark
fill-params: --fork=Osaka --gas-benchmark-values 100 ./tests/benchmark/compute
feature_only: true

bal:
Expand Down
2 changes: 2 additions & 0 deletions packages/testing/src/execution_testing/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
BenchmarkCodeGenerator,
ExtCallGenerator,
JumpLoopGenerator,
StubConfig,
)
from .checklists import EIPChecklist
from .exceptions import (
Expand Down Expand Up @@ -186,6 +187,7 @@
"StateTest",
"StateTestFiller",
"Storage",
"StubConfig",
"Switch",
"TestAddress",
"TestAddress2",
Expand Down
2 changes: 2 additions & 0 deletions packages/testing/src/execution_testing/benchmark/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,9 +8,11 @@
ExtCallGenerator,
JumpLoopGenerator,
)
from .stub_config import StubConfig

__all__ = (
"BenchmarkCodeGenerator",
"ExtCallGenerator",
"JumpLoopGenerator",
"StubConfig",
)
52 changes: 52 additions & 0 deletions packages/testing/src/execution_testing/benchmark/stub_config.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
"""Benchmark stub configuration model."""

import json
import warnings
from pathlib import Path

from execution_testing.base_types import (
Address,
EthereumTestBaseModel,
)


class StubConfig(EthereumTestBaseModel):
"""
Benchmark stub configuration with prefix-based token extraction.
Build from an ``AddressStubs`` mapping (via ``--address-stubs``)
or from a JSON file. Use ``extract_tokens`` to derive parameter
lists for any prefix — no hardcoded categories required.
"""

stubs: dict[str, Address]

def extract_tokens(self, prefix: str) -> list[str]:
"""Return stub keys matching *prefix*."""
return [k for k in self.stubs if k.startswith(prefix)]

def parametrize_args(
self, prefix: str, *, caller: str = ""
) -> tuple[list[str], list[str]]:
"""
Return ``(values, ids)`` for ``metafunc.parametrize``.
*values* are full stub keys matching *prefix*.
*ids* are the keys with the prefix stripped for clean test output.
Emits a warning when no stubs match.
"""
values = self.extract_tokens(prefix)
ids = [v.removeprefix(prefix) for v in values]
if not values:
label = f" for {caller}" if caller else ""
warnings.warn(
f"stub_parametrize: no stubs matched prefix "
f"'{prefix}'{label}; test will be skipped",
stacklevel=2,
)
return values, ids

@classmethod
def from_file(cls, path: Path) -> "StubConfig":
"""Load stubs from a JSON file."""
return cls(stubs=json.loads(path.read_text()))
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
"""Tests for the benchmark module."""
Original file line number Diff line number Diff line change
@@ -0,0 +1,118 @@
"""Tests for the StubConfig model."""

import json
from pathlib import Path

import pytest

from execution_testing.benchmark.stub_config import StubConfig

ADDR = "0x398324972FcE0e89E048c2104f1298031d1931fc"


def test_extract_tokens_returns_full_keys() -> None:
"""Return full keys matching the prefix."""
stub_config = StubConfig(
stubs={
"test_sload_empty_erc20_balanceof_XEN": ADDR,
"test_sload_empty_erc20_balanceof_USDC": ADDR,
"unrelated_key": ADDR,
}
)
result = stub_config.extract_tokens("test_sload_empty_erc20_balanceof_")
assert result == [
"test_sload_empty_erc20_balanceof_XEN",
"test_sload_empty_erc20_balanceof_USDC",
]


def test_extract_tokens_no_match() -> None:
"""Return empty list when no keys match the prefix."""
stub_config = StubConfig(stubs={"test_sstore_erc20_approve_XEN": ADDR})
assert (
stub_config.extract_tokens("test_sload_empty_erc20_balanceof_") == []
)


def test_extract_tokens_empty_stubs() -> None:
"""Return empty list for empty stubs."""
stub_config = StubConfig(stubs={})
assert stub_config.extract_tokens("any_prefix_") == []


@pytest.mark.parametrize(
"prefix",
[
"test_sload_empty_erc20_balanceof_",
"test_sstore_erc20_approve_",
"test_sstore_erc20_mint_",
"test_mixed_sload_sstore_",
"bloatnet_factory_",
],
)
def test_extract_tokens_various_prefixes(prefix: str) -> None:
"""Extract matching keys for each prefix."""
stub_config = StubConfig(
stubs={
f"{prefix}A": ADDR,
f"{prefix}B": ADDR,
"unrelated_key": ADDR,
}
)
assert stub_config.extract_tokens(prefix) == [
f"{prefix}A",
f"{prefix}B",
]


def test_parametrize_args_values_and_ids() -> None:
"""Return full keys as values and stripped names as ids."""
stub_config = StubConfig(
stubs={
"test_sload_empty_erc20_balanceof_XEN": ADDR,
"test_sload_empty_erc20_balanceof_USDC": ADDR,
}
)
values, ids = stub_config.parametrize_args(
"test_sload_empty_erc20_balanceof_"
)
assert values == [
"test_sload_empty_erc20_balanceof_XEN",
"test_sload_empty_erc20_balanceof_USDC",
]
assert ids == ["XEN", "USDC"]


def test_parametrize_args_empty_warns() -> None:
"""Emit a warning when no stubs match the prefix."""
stub_config = StubConfig(stubs={})
with pytest.warns(UserWarning, match="no stubs matched prefix"):
values, ids = stub_config.parametrize_args(
"missing_prefix_", caller="test_foo"
)
assert values == []
assert ids == []


def test_from_file(tmp_path: Path) -> None:
"""Load stubs from a JSON file."""
data = {
"test_sload_empty_erc20_balanceof_XEN": ADDR,
"bloatnet_factory_1kb": ADDR,
}
stub_file = tmp_path / "stubs.json"
stub_file.write_text(json.dumps(data))

stub_config = StubConfig.from_file(stub_file)
assert stub_config.extract_tokens("test_sload_empty_erc20_balanceof_") == [
"test_sload_empty_erc20_balanceof_XEN"
]
assert stub_config.extract_tokens("bloatnet_factory_") == [
"bloatnet_factory_1kb"
]


def test_from_file_not_found(tmp_path: Path) -> None:
"""Raise FileNotFoundError for missing files."""
with pytest.raises(FileNotFoundError):
StubConfig.from_file(tmp_path / "nonexistent.json")
Original file line number Diff line number Diff line change
Expand Up @@ -7,12 +7,19 @@
)
from execution_testing.fixtures import LabeledFixtureFormat
from execution_testing.forks import (
BPO1,
BPO2,
Amsterdam,
ArrowGlacier,
Fork,
forks_from_until,
get_deployed_forks,
get_forks,
)
from execution_testing.forks.forks.transition import (
BPO2ToAmsterdamAtTime15k,
OsakaToBPO1AtTime15k,
)
from execution_testing.specs import StateTest


Expand Down Expand Up @@ -260,3 +267,53 @@ def test_all_forks({StateTest.pytest_parameter_name()}):
skipped=0,
errors=0,
)


def test_transition_fork_until_excludes_target(
pytester: pytest.Pytester,
) -> None:
"""
Test that `--until` with a transition fork excludes the
transition's target fork from the selected fork set.

The "Generating fixtures for:" header printed by
`pytest_report_header` reflects `config.selected_fork_set`.
"""
pytester.makepyfile(
f"""
def test_fork_range({StateTest.pytest_parameter_name()}):
pass
"""
)
pytester.copy_example(
name="src/execution_testing/cli/pytest_commands/"
"pytest_ini_files/pytest-fill.ini"
)
result = pytester.runpytest(
"-c",
"pytest-fill.ini",
"-v",
"--from",
"OsakaToBPO1AtTime15k",
"--until",
"BPO2ToAmsterdamAtTime15k",
)
stdout = "\n".join(result.stdout.lines)
# The header line lists the selected fork set; parse it.
assert "Generating fixtures for:" in stdout
header_line = [
line
for line in result.stdout.lines
if "Generating fixtures for:" in line
][0]
fork_names = [
name.strip()
for name in header_line.split("Generating fixtures for:")[1].split(",")
]
# Strip ANSI codes from the last element.
fork_names[-1] = fork_names[-1].split("\x1b")[0]
assert Amsterdam.name() not in fork_names
assert BPO1.name() in fork_names
assert BPO2.name() in fork_names
assert BPO2ToAmsterdamAtTime15k.name() in fork_names
assert OsakaToBPO1AtTime15k.name() in fork_names
Original file line number Diff line number Diff line change
Expand Up @@ -80,6 +80,10 @@ def pytest_configure(config: pytest.Config) -> None:
"markers",
"repricing: Mark test as reference test for gas repricing analysis",
)
config.addinivalue_line(
"markers",
"stub_parametrize(param, prefix): parametrize with matching stubs",
)

# Ensure mutual exclusivity
gas_benchmark_values = GasBenchmarkValues.from_config(config)
Expand Down
12 changes: 12 additions & 0 deletions packages/testing/src/execution_testing/forks/helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -221,13 +221,25 @@ def get_selected_fork_set(
selected_fork_set |= get_from_until_fork_set(
ALL_FORKS, forks_from, forks_until
)
# Transition fork comparison operators resolve to
# transitions_to(), so an --until transition fork boundary
# incorrectly includes its target in the normal fork set.
for fork_until in forks_until:
if issubclass(fork_until, TransitionBaseClass):
selected_fork_set.discard(fork_until.transitions_to())
selected_fork_set_with_transitions: Set[
Type[BaseFork | TransitionBaseClass]
] = set() | selected_fork_set
if transition_forks:
for normal_fork in list(selected_fork_set):
transition_fork_set = transition_fork_to(normal_fork)
selected_fork_set_with_transitions |= transition_fork_set
# Explicitly add transition fork boundaries whose target fork
# was removed above (transition_fork_to won't find them).
if not single_fork:
for fork in forks_from | forks_until:
if issubclass(fork, TransitionBaseClass):
selected_fork_set_with_transitions.add(fork)
return selected_fork_set_with_transitions


Expand Down
Loading
Loading