diff --git a/Cargo.lock b/Cargo.lock index a9680fa..e7f4772 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1565,14 +1565,26 @@ dependencies = [ name = "itest" version = "0.1.0" dependencies = [ + "itest-macros", "libtest-mimic", "linkme", "paste", "quick-junit", "rustix", + "tempfile", + "tokio", "xshell", ] +[[package]] +name = "itest-macros" +version = "0.1.0" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "itest-selftest" version = "0.1.0" diff --git a/Justfile b/Justfile index 284e592..f2c81eb 100644 --- a/Justfile +++ b/Justfile @@ -24,7 +24,8 @@ unit *ARGS: pull-test-images: podman pull -q {{ALL_BASE_IMAGES}} >/dev/null -# Run integration tests (auto-detects nextest, with cleanup) +# Run integration tests (prefers cargo-nextest, falls back to cargo test with +# built-in fork-exec output capture) test-integration *ARGS: build pull-test-images #!/usr/bin/env bash set -euo pipefail @@ -36,16 +37,15 @@ test-integration *ARGS: build pull-test-images # Clean up any leftover containers before starting cargo run --release --bin test-cleanup -p integration-tests 2>/dev/null || true - # Run the tests + # Prefer nextest for better UX (retries, timing, etc.), but the harness + # captures output itself via fork-exec so cargo test works too. if command -v cargo-nextest &> /dev/null; then - cargo nextest run --release -P integration -p integration-tests {{ ARGS }} - TEST_EXIT_CODE=$? + cargo nextest run --release -P integration -p integration-tests {{ ARGS }} && TEST_EXIT_CODE=0 || TEST_EXIT_CODE=$? else - cargo test --release -p integration-tests -- {{ ARGS }} - TEST_EXIT_CODE=$? + cargo test --release -p integration-tests -- {{ ARGS }} && TEST_EXIT_CODE=0 || TEST_EXIT_CODE=$? fi - # Clean up containers after tests complete + # Clean up containers after tests complete (must run even on failure) cargo run --release --bin test-cleanup -p integration-tests 2>/dev/null || true exit $TEST_EXIT_CODE diff --git a/crates/itest-macros/Cargo.toml b/crates/itest-macros/Cargo.toml new file mode 100644 index 0000000..92ca7e5 --- /dev/null +++ b/crates/itest-macros/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "itest-macros" +version = "0.1.0" +edition = "2021" +publish = false +description = "Proc-macro companion crate for itest" + +[lib] +proc-macro = true + +[dependencies] +proc-macro2 = "1" +quote = "1" +syn = { version = "2", features = ["full", "extra-traits"] } + +[lints] +workspace = true diff --git a/crates/itest-macros/src/lib.rs b/crates/itest-macros/src/lib.rs new file mode 100644 index 0000000..8bf5133 --- /dev/null +++ b/crates/itest-macros/src/lib.rs @@ -0,0 +1,329 @@ +//! Proc-macro companion for the `itest` integration test framework. +//! +//! Provides the `#[itest::test]` attribute macro for registering +//! integration tests with less boilerplate than the declarative macros. + +use proc_macro::TokenStream; +use quote::quote; +use syn::parse::{Parse, ParseStream}; +use syn::punctuated::Punctuated; +use syn::{Expr, Ident, ItemFn, Token}; + +/// Attribute arguments for `#[itest::test(...)]`. +struct TestArgs { + privileged: bool, + booted: bool, + binary: Option, + itype: Option, + timeout: Option, + tags: Vec, + summary: Option, + needs_root: bool, + needs_internet: bool, + flaky: bool, +} + +impl Default for TestArgs { + fn default() -> Self { + Self { + privileged: false, + booted: false, + binary: None, + itype: None, + timeout: None, + tags: Vec::new(), + summary: None, + needs_root: false, + needs_internet: false, + flaky: false, + } + } +} + +/// A single key or key=value argument. +enum Arg { + Flag(Ident), + KeyValue(Ident, Expr), +} + +impl Parse for Arg { + fn parse(input: ParseStream) -> syn::Result { + let key: Ident = input.parse()?; + if input.peek(Token![=]) { + let _: Token![=] = input.parse()?; + let value: Expr = input.parse()?; + Ok(Arg::KeyValue(key, value)) + } else { + Ok(Arg::Flag(key)) + } + } +} + +struct TestAttrArgs { + args: Punctuated, +} + +impl Parse for TestAttrArgs { + fn parse(input: ParseStream) -> syn::Result { + Ok(TestAttrArgs { + args: Punctuated::parse_terminated(input)?, + }) + } +} + +fn parse_args(attr: TokenStream) -> syn::Result { + if attr.is_empty() { + return Ok(TestArgs::default()); + } + + let parsed: TestAttrArgs = syn::parse(attr)?; + let mut result = TestArgs::default(); + + for arg in parsed.args { + match arg { + Arg::Flag(ident) => { + let name = ident.to_string(); + match name.as_str() { + "privileged" => result.privileged = true, + "booted" => result.booted = true, + "needs_root" => result.needs_root = true, + "needs_internet" => result.needs_internet = true, + "flaky" => result.flaky = true, + _ => { + return Err(syn::Error::new_spanned( + ident, + format!("unknown flag: {name}"), + )) + } + } + } + Arg::KeyValue(ident, value) => { + let name = ident.to_string(); + match name.as_str() { + "binary" => { + result.binary = Some(expr_to_string(&value)?); + } + "itype" => { + result.itype = Some(expr_to_string(&value)?); + } + "timeout" => { + result.timeout = Some(expr_to_string(&value)?); + } + "summary" => { + result.summary = Some(expr_to_string(&value)?); + } + "tags" => { + result.tags = expr_to_string_list(&value)?; + } + _ => { + return Err(syn::Error::new_spanned( + ident, + format!("unknown attribute: {name}"), + )) + } + } + } + } + } + + // Validate: privileged/booted require binary + if (result.privileged || result.booted) && result.binary.is_none() { + return Err(syn::Error::new( + proc_macro2::Span::call_site(), + "privileged/booted tests require `binary = \"...\"`", + )); + } + + Ok(result) +} + +/// Extract a string literal from an expression. +fn expr_to_string(expr: &Expr) -> syn::Result { + match expr { + Expr::Lit(lit) => { + if let syn::Lit::Str(s) = &lit.lit { + Ok(s.value()) + } else { + Err(syn::Error::new_spanned(expr, "expected string literal")) + } + } + _ => Err(syn::Error::new_spanned(expr, "expected string literal")), + } +} + +/// Extract a list of string literals from `["a", "b"]`. +fn expr_to_string_list(expr: &Expr) -> syn::Result> { + match expr { + Expr::Array(arr) => arr.elems.iter().map(|e| expr_to_string(e)).collect(), + _ => Err(syn::Error::new_spanned( + expr, + "expected array of string literals", + )), + } +} + +/// Register a function as an itest integration test. +/// +/// # Plain test +/// +/// ```ignore +/// /// My test does something. +/// #[itest::test] +/// fn test_something() -> anyhow::Result<()> { +/// Ok(()) +/// } +/// ``` +/// +/// # Privileged test (auto-dispatches to VM when not root) +/// +/// ```ignore +/// #[itest::test(privileged, binary = "my-binary")] +/// fn privileged_check_root() -> anyhow::Result<()> { +/// Ok(()) +/// } +/// ``` +/// +/// # Booted test (full bootc install-to-disk) +/// +/// ```ignore +/// #[itest::test(booted, binary = "my-binary", itype = "u1.large")] +/// fn test_ostree() -> anyhow::Result<()> { +/// Ok(()) +/// } +/// ``` +/// +/// # Metadata +/// +/// ```ignore +/// #[itest::test( +/// timeout = "1h", +/// tags = ["slow", "network"], +/// needs_internet, +/// flaky, +/// summary = "A slow network test", +/// )] +/// fn slow_test() -> anyhow::Result<()> { +/// Ok(()) +/// } +/// ``` +#[proc_macro_attribute] +pub fn integration_test(attr: TokenStream, item: TokenStream) -> TokenStream { + match test_impl(attr, item) { + Ok(ts) => ts, + Err(e) => e.to_compile_error().into(), + } +} + +fn test_impl(attr: TokenStream, item: TokenStream) -> syn::Result { + let args = parse_args(attr)?; + let func: ItemFn = syn::parse(item)?; + + let is_async = func.sig.asyncness.is_some(); + + let fn_name = &func.sig.ident; + let fn_name_str = fn_name.to_string(); + + // Generate the wrapper function name + let wrapper_name = syn::Ident::new(&format!("__itest_wrap_{fn_name_str}"), fn_name.span()); + let slice_name = syn::Ident::new( + &format!("__ITEST_{}", fn_name_str.to_uppercase()), + fn_name.span(), + ); + + // Build TestMeta + let timeout_expr = match &args.timeout { + Some(t) => quote! { Some(#t) }, + None => quote! { None }, + }; + let summary_expr = match &args.summary { + Some(s) => quote! { Some(#s) }, + None => quote! { None }, + }; + let tags_expr = if args.tags.is_empty() { + quote! { &[] } + } else { + let tags = &args.tags; + quote! { &[#(#tags),*] } + }; + let needs_root = args.needs_root || args.privileged || args.booted; + let needs_internet = args.needs_internet; + let flaky = args.flaky; + let isolation_expr = if args.booted { + quote! { ::itest::Isolation::Machine } + } else { + quote! { ::itest::Isolation::None } + }; + + // How to call the test function — async fns need a tokio runtime. + let call_expr = if is_async { + quote! { + ::itest::tokio::runtime::Builder::new_current_thread() + .enable_all() + .build() + .expect("failed to build tokio runtime") + .block_on(#fn_name()) + } + } else { + quote! { #fn_name() } + }; + + // Build the wrapper function body for privileged/booted tests + let body = if args.privileged || args.booted { + let binary = args.binary.as_ref().unwrap(); // validated above + let dispatch_mode = if args.booted { + quote! { ::itest::DispatchMode::Booted } + } else { + quote! { ::itest::DispatchMode::Privileged } + }; + let itype_expr = match &args.itype { + Some(t) => quote! { Some(#t) }, + None => quote! { None }, + }; + + quote! { + fn #wrapper_name() -> ::itest::TestResult { + let vm_opts = ::itest::VmOptions { itype: #itype_expr, ..Default::default() }; + if ::itest::require_root( + #fn_name_str, + #binary, + #dispatch_mode, + &vm_opts, + )? + .is_some() + { + return Ok(()); + } + #call_expr.map_err(::std::convert::Into::into) + } + } + } else { + quote! { + fn #wrapper_name() -> ::itest::TestResult { + #call_expr.map_err(::std::convert::Into::into) + } + } + }; + + let output = quote! { + #func + + #body + + #[::itest::linkme::distributed_slice(::itest::INTEGRATION_TESTS)] + static #slice_name: ::itest::IntegrationTest = ::itest::IntegrationTest::with_meta( + #fn_name_str, + #wrapper_name, + ::itest::TestMeta { + timeout: #timeout_expr, + needs_root: #needs_root, + isolation: #isolation_expr, + tags: #tags_expr, + summary: #summary_expr, + needs_internet: #needs_internet, + flaky: #flaky, + }, + ); + }; + + Ok(output.into()) +} diff --git a/crates/itest/Cargo.toml b/crates/itest/Cargo.toml index 4246afe..e8f9bff 100644 --- a/crates/itest/Cargo.toml +++ b/crates/itest/Cargo.toml @@ -5,13 +5,30 @@ edition = "2021" publish = false description = "Reusable integration test infrastructure for bootc-dev projects" +# Disable auto-discovery so the fixture binary doesn't conflict +autobins = false +autotests = false + +[[bin]] +name = "itest-fixture" +path = "tests/fixture.rs" + +[[test]] +name = "harness_integration" +path = "tests/harness_integration.rs" + [dependencies] +itest-macros = { path = "../itest-macros" } libtest-mimic = "0.8" linkme = "0.3" paste = "1" quick-junit = "0.5" -rustix = { version = "1", default-features = false, features = ["process"] } +rustix = { version = "1", default-features = false, features = ["fs", "pipe", "process"] } +tokio = { version = "1", features = ["rt"] } xshell = { workspace = true } +[dev-dependencies] +tempfile = "3" + [lints] workspace = true diff --git a/crates/itest/README.md b/crates/itest/README.md new file mode 100644 index 0000000..8be8c2f --- /dev/null +++ b/crates/itest/README.md @@ -0,0 +1,264 @@ +# itest — integration test framework + +Reusable integration test infrastructure for bootc-dev projects. +Built on [libtest-mimic] with automatic test registration via +[linkme] distributed slices. + +## Quick start + +Create a binary crate with `harness = false` and register tests +with macros: + +```rust +fn my_test() -> itest::TestResult { + // your test logic + Ok(()) +} +itest::integration_test!(my_test); + +fn main() { + itest::run_tests(); +} +``` + +## Test types + +### Plain tests + +```rust +fn test_something() -> itest::TestResult { + assert_eq!(2 + 2, 4); + Ok(()) +} +itest::integration_test!(test_something); +``` + +### Parameterized tests + +Expanded once per parameter value configured in `TestConfig::parameters`: + +```rust +fn test_with_image(image: &str) -> itest::TestResult { + println!("testing with {image}"); + Ok(()) +} +itest::parameterized_integration_test!(test_with_image); + +fn main() { + let config = itest::TestConfig { + parameters: vec![ + "quay.io/fedora/fedora-bootc:42".into(), + "quay.io/centos-bootc/centos-bootc:stream10".into(), + ], + ..Default::default() + }; + itest::run_tests_with_config(config); +} +``` + +### Privileged tests + +Tests that need root. When run unprivileged, the harness +automatically dispatches them inside a bcvk ephemeral VM: + +```rust +itest::privileged_test!("my-binary", test_needs_root, { + assert!(rustix::process::getuid().is_root()); + Ok(()) +}); +``` + +An optional `itype` parameter specifies the VM instance type +(note the trailing comma): + +```rust +itest::privileged_test!("my-binary", big_test, itype = "u1.large", { + // runs in a VM with 2 vCPU / 8 GiB + Ok(()) +}); +``` + +### Test metadata + +Any test can carry metadata that flows into tmt and autopkgtest +output. Metadata is declared inline via a `const` block: + +```rust +fn slow_network_test() -> itest::TestResult { + Ok(()) +} +itest::integration_test!(slow_network_test, meta = const { + itest::TestMeta { + timeout: Some("1h"), + needs_root: true, + isolation: itest::Isolation::Machine, + tags: &["slow", "network"], + summary: Some("A test that needs internet and a full VM"), + needs_internet: true, + flaky: true, + ..itest::TestMeta::EMPTY + } +}); +``` + +Fields and how they map to each format: + +| Field | tmt (FMF) | autopkgtest (DEP-8) | +|---|---|---| +| `timeout` | `duration:` | *(global only)* | +| `needs_root` | *(plan-level)* | `Restrictions: needs-root` | +| `isolation` | *(plan-level)* | `Restrictions: isolation-{container,machine}` | +| `tags` | `tag:` | `Classes:` | +| `summary` | `summary:` | *(none)* | +| `needs_internet` | *(plan-level)* | `Restrictions: needs-internet` | +| `flaky` | `result: xfail` | `Restrictions: flaky` | + +All fields are optional. Tests without `meta = const { ... }` get +sensible defaults (20m timeout, no restrictions). + +### Booted tests + +Like privileged tests, but dispatched via `bcvk libvirt run` which +does a full `bootc install to-disk`: + +```rust +itest::booted_test!("my-binary", test_ostree, { + // runs inside a real booted ostree deployment + Ok(()) +}); +``` + +## Running tests + +The harness supports multiple test runners. It auto-detects which +runner is active and adapts its behavior. + +### cargo test (built-in capture) + +```bash +cargo test -p my-tests +``` + +When no external runner is detected, itest automatically captures +output by re-executing itself per test (fork-exec). Passing test +output is suppressed; failing test output is shown — matching the +default `cargo test` behavior. + +Set `ITEST_NOCAPTURE=1` to disable capture for debugging: + +```bash +ITEST_NOCAPTURE=1 cargo test -p my-tests +``` + +### cargo-nextest + +```bash +cargo nextest run -P integration -p my-tests +``` + +[nextest] runs each test as a separate process natively, so the +harness detects this (via the `NEXTEST` env var) and skips its own +fork-exec layer. nextest provides additional features like retries, +timing reports, and better parallelism control. + +### tmt + +[tmt] discovers tests from FMF metadata files. Generate them from +the test binary: + +```bash +my-tests --emit-tmt tmt/tests/ +``` + +This creates a `tests.fmf` file where each registered test becomes +an entry like: + +```yaml +/test_something: + summary: test_something + test: my-tests --exact test_something + duration: 20m +``` + +Then run with tmt: + +```bash +tmt run --all provision --how local --feeling-safe +``` + +The harness detects tmt (via `TMT_TEST_DATA`) and runs tests +directly without the fork-exec capture layer — tmt handles +per-test output isolation itself. + +### autopkgtest (DEP-8) + +[autopkgtest] discovers tests from a `debian/tests/control` file. +Generate it: + +```bash +my-tests --emit-autopkgtest debian/tests/ +``` + +This creates a `control` file with one stanza per test: + +``` +Test-Command: my-tests --exact test_something +Features: test-name=test_something +Restrictions: needs-root +``` + +Then run with autopkgtest: + +```bash +autopkgtest -- null # run on localhost +autopkgtest -- qemu ... # run in a QEMU VM +``` + +## Environment variables + +| Variable | Effect | +|---|---| +| `ITEST_NOCAPTURE=1` | Disable fork-exec output capture | +| `ITEST_SUBPROCESS=1` | Set internally; marks a fork-exec child | +| `ITEST_IN_VM=1` | Set internally; recursion guard for VM dispatch | +| `ITEST_IMAGE` | Container image for VM dispatch (required when not root) | +| `BCVK_PATH` | Path to the `bcvk` binary (default: `bcvk`) | +| `JUNIT_OUTPUT` | Path to write JUnit XML results | + +## Architecture + +``` +┌────────────────────────────────────────────────────┐ +│ test binary │ +│ ┌──────────────┐ ┌──────────────────────────┐ │ +│ │ integration_ │ │ parameterized_ │ │ +│ │ test! macros │ │ integration_test! macros │ │ +│ └──────┬───────┘ └────────────┬─────────────┘ │ +│ │ linkme distributed │ │ +│ │ slices │ │ +│ ┌──────▼───────────────────────▼─────────────┐ │ +│ │ run_tests_with_config() │ │ +│ │ │ │ +│ │ ┌─────────┐ ┌───────────┐ ┌──────────┐ │ │ +│ │ │--emit- │ │--emit- │ │ fork-exec│ │ │ +│ │ │tmt │ │autopkgtest│ │ capture │ │ │ +│ │ └─────────┘ └───────────┘ └──────────┘ │ │ +│ │ │ │ +│ │ ┌─────────────────────────────────────┐ │ │ +│ │ │ libtest-mimic │ │ │ +│ │ │ (filtering, --list, --exact, etc.) │ │ │ +│ │ └─────────────────────────────────────┘ │ │ +│ └─────────────────────────────────────────────┘ │ +└────────────────────────────────────────────────────┘ + │ │ + ┌────▼─────┐ ┌─────▼──────┐ + │ nextest │ │ tmt / │ + │ (native) │ │ autopkgtest │ + └──────────┘ └────────────┘ +``` + +[libtest-mimic]: https://crates.io/crates/libtest-mimic +[linkme]: https://crates.io/crates/linkme +[nextest]: https://nexte.st +[tmt]: https://tmt.readthedocs.io +[autopkgtest]: https://wiki.debian.org/ContinuousIntegration/autopkgtest diff --git a/crates/itest/src/harness.rs b/crates/itest/src/harness.rs index 277db2b..6cc3d60 100644 --- a/crates/itest/src/harness.rs +++ b/crates/itest/src/harness.rs @@ -9,6 +9,13 @@ use libtest_mimic::{Arguments, Trial}; use crate::junit::{write_junit, TestOutcome}; use crate::{image_to_test_suffix, INTEGRATION_TESTS, PARAMETERIZED_INTEGRATION_TESTS}; +/// Default tmt test timeout, matching the nextest integration profile. +const TMT_DEFAULT_DURATION: &str = "20m"; + +/// Environment variable set in the child process during fork-exec +/// capture to prevent infinite recursion. +const SUBPROCESS_ENV: &str = "ITEST_SUBPROCESS"; + /// Per-project configuration for the test harness. #[derive(Debug, Clone)] pub struct TestConfig { @@ -45,37 +52,123 @@ pub fn run_tests() -> ! { run_tests_with_config(TestConfig::default()) } +/// Determine whether we should capture output via fork-exec-self. +/// +/// Returns `true` when: +/// - Not already inside a subprocess (`ITEST_SUBPROCESS` not set) +/// - Not running under a per-test runner (nextest, tmt) +/// - Not explicitly suppressed (`ITEST_NOCAPTURE=1`) +fn should_capture() -> bool { + std::env::var_os(SUBPROCESS_ENV).is_none() + && std::env::var_os("NEXTEST").is_none() + && std::env::var_os("TMT_TEST_DATA").is_none() + && std::env::var_os("ITEST_NOCAPTURE").is_none() +} + /// Run all registered tests with the given configuration. /// /// This function collects tests from the global distributed slices, /// expands parameterised variants, runs them through libtest-mimic, /// optionally writes JUnit XML, and exits the process. +/// +/// # Output capture +/// +/// When not running under an external test runner (nextest, tmt), +/// the harness automatically captures output by re-executing itself +/// as a subprocess for each test. This prevents interleaved output +/// from parallel tests. Captured output is only shown for failing +/// tests, matching `cargo test` default behaviour. +/// +/// To disable capture (e.g. for debugging), set `ITEST_NOCAPTURE=1`. +/// +/// # Test metadata generation +/// +/// Instead of running tests, the binary can emit metadata for +/// external test runners: +/// +/// - `--emit-tmt ` — FMF metadata for [tmt](https://tmt.readthedocs.io) +/// - `--emit-autopkgtest ` — DEP-8 `control` file for +/// [autopkgtest](https://wiki.debian.org/ContinuousIntegration/autopkgtest) +/// +/// Each registered test becomes an entry that invokes +/// ` --exact `. pub fn run_tests_with_config(config: TestConfig) -> ! { + let raw_args: Vec = std::env::args().collect(); + + // --vm-jobserver -- + // Create a VM memory jobserver, then exec the given command + // with the pipe fds inherited. Used to wrap nextest: + // my-tests --vm-jobserver -- cargo nextest run ... + if let Some(pos) = raw_args.iter().position(|a| a == "--vm-jobserver") { + let rest: Vec<&str> = raw_args[pos + 1..] + .iter() + .skip_while(|a| a.as_str() == "--") + .map(|s| s.as_str()) + .collect(); + if rest.is_empty() { + eprintln!("error: --vm-jobserver requires a command after --"); + std::process::exit(1); + } + exec_with_jobserver(&rest); + } + + // Check for metadata emission flags before libtest-mimic parses args. + for (flag, emitter) in [ + ("--emit-tmt", emit_tmt as fn(&TestConfig, &str, &str) -> _), + ("--emit-autopkgtest", emit_autopkgtest as _), + ] { + if let Some(pos) = raw_args.iter().position(|a| a == flag) { + let dir = raw_args.get(pos + 1).cloned().unwrap_or_else(|| { + eprintln!("error: {flag} requires a directory argument"); + std::process::exit(1); + }); + let binary = &raw_args[0]; + if let Err(e) = emitter(&config, binary, &dir) { + eprintln!("error: {flag} failed: {e}"); + std::process::exit(1); + } + std::process::exit(0); + } + } + + let capture = should_capture(); + let args = Arguments::from_args(); let outcomes: Arc>> = Arc::new(Mutex::new(Vec::new())); let mut tests: Vec = Vec::new(); + // The binary path for fork-exec; only resolved when capturing. + let self_exe: Option> = if capture { + match std::env::current_exe() { + Ok(p) => Some(Arc::new(p)), + Err(e) => { + eprintln!("warning: cannot resolve current_exe, disabling output capture: {e}"); + None + } + } + } else { + None + }; + // Collect plain tests for t in INTEGRATION_TESTS.iter() { - let f = t.f; let name = t.name.to_owned(); let outcomes = Arc::clone(&outcomes); - tests.push(Trial::test(t.name, move || { - let start = Instant::now(); - let result = f(); - let duration = start.elapsed(); - let outcome = TestOutcome { - name, - duration, - result: result.as_ref().map(|_| ()).map_err(|e| format!("{e:?}")), - }; - outcomes - .lock() - .unwrap_or_else(|e| e.into_inner()) - .push(outcome); - result.map_err(|e| format!("{e:?}").into()) - })); + + let trial = if let Some(ref exe) = self_exe { + // Capture mode: fork-exec self with --exact + let exe = Arc::clone(exe); + let test_name = t.name.to_owned(); + Trial::test(t.name, move || { + run_captured(&exe, &test_name, &name, &outcomes) + }) + } else { + // Direct mode: run in-process + let f = t.f; + Trial::test(t.name, move || run_direct(f, &name, &outcomes)) + }; + tests.push(trial); } // Expand parameterised tests @@ -85,23 +178,21 @@ pub fn run_tests_with_config(config: TestConfig) -> ! { let suffix = image_to_test_suffix(¶m); let test_name = format!("{}_{}", pt.name, suffix); let display_name = test_name.clone(); - let f = pt.f; let outcomes = Arc::clone(&outcomes); - tests.push(Trial::test(test_name, move || { - let start = Instant::now(); - let result = f(¶m); - let duration = start.elapsed(); - let outcome = TestOutcome { - name: display_name, - duration, - result: result.as_ref().map(|_| ()).map_err(|e| format!("{e:?}")), - }; - outcomes - .lock() - .unwrap_or_else(|e| e.into_inner()) - .push(outcome); - result.map_err(|e| format!("{e:?}").into()) - })); + + let trial = if let Some(ref exe) = self_exe { + let exe = Arc::clone(exe); + let tn = test_name.clone(); + Trial::test(test_name, move || { + run_captured(&exe, &tn, &display_name, &outcomes) + }) + } else { + let f = pt.f; + Trial::test(test_name, move || { + run_direct_param(f, ¶m, &display_name, &outcomes) + }) + }; + tests.push(trial); } } @@ -121,3 +212,541 @@ pub fn run_tests_with_config(config: TestConfig) -> ! { std::process::exit(if conclusion.has_failed() { 101 } else { 0 }); } + +/// Run a test function directly in-process (used when capture is not needed). +fn run_direct( + f: crate::TestFn, + name: &str, + outcomes: &Mutex>, +) -> Result<(), libtest_mimic::Failed> { + let start = Instant::now(); + let result = f(); + let duration = start.elapsed(); + let outcome = TestOutcome { + name: name.to_owned(), + duration, + result: result.as_ref().map(|_| ()).map_err(|e| format!("{e:?}")), + }; + outcomes + .lock() + .unwrap_or_else(|e| e.into_inner()) + .push(outcome); + result.map_err(|e| format!("{e:?}").into()) +} + +/// Run a parameterised test function directly in-process. +fn run_direct_param( + f: crate::ParameterizedTestFn, + param: &str, + name: &str, + outcomes: &Mutex>, +) -> Result<(), libtest_mimic::Failed> { + let start = Instant::now(); + let result = f(param); + let duration = start.elapsed(); + let outcome = TestOutcome { + name: name.to_owned(), + duration, + result: result.as_ref().map(|_| ()).map_err(|e| format!("{e:?}")), + }; + outcomes + .lock() + .unwrap_or_else(|e| e.into_inner()) + .push(outcome); + result.map_err(|e| format!("{e:?}").into()) +} + +/// Run a test by re-executing self as a subprocess, capturing output. +/// +/// The child is invoked with `ITEST_SUBPROCESS=1` to prevent recursion, +/// plus `--exact --nocapture` so libtest-mimic runs just +/// the one test and doesn't try to capture (the parent is doing it). +/// +/// On success, captured output is discarded. On failure, it is +/// included in the error message so libtest-mimic displays it. +fn run_captured( + exe: &std::path::Path, + test_name: &str, + display_name: &str, + outcomes: &Mutex>, +) -> Result<(), libtest_mimic::Failed> { + let start = Instant::now(); + + let output = std::process::Command::new(exe) + .arg("--exact") + .arg(test_name) + .arg("--nocapture") + .env(SUBPROCESS_ENV, "1") + .env_remove("JUNIT_OUTPUT") // parent handles JUnit + .output(); + + let duration = start.elapsed(); + + match output { + Ok(output) => { + let success = output.status.success(); + let outcome = TestOutcome { + name: display_name.to_owned(), + duration, + result: if success { + Ok(()) + } else { + Err(format!("exit status: {}", output.status)) + }, + }; + outcomes + .lock() + .unwrap_or_else(|e| e.into_inner()) + .push(outcome); + + if success { + Ok(()) + } else { + // Include captured output in the failure message + let stdout = String::from_utf8_lossy(&output.stdout); + let stderr = String::from_utf8_lossy(&output.stderr); + let mut msg = format!("test failed ({})\n", output.status); + if !stdout.is_empty() { + msg.push_str("--- stdout ---\n"); + msg.push_str(&stdout); + } + if !stderr.is_empty() { + msg.push_str("--- stderr ---\n"); + msg.push_str(&stderr); + } + Err(msg.into()) + } + } + Err(e) => { + let outcome = TestOutcome { + name: display_name.to_owned(), + duration, + result: Err(format!("failed to spawn subprocess: {e}")), + }; + outcomes + .lock() + .unwrap_or_else(|e| e.into_inner()) + .push(outcome); + Err(format!("failed to spawn subprocess: {e}").into()) + } + } +} + +/// Generate tmt FMF test metadata from registered tests. +/// +/// Creates a `tests.fmf` file in `dir` with one entry per test. +/// Each test's `test:` field invokes the binary with `--exact`. +fn emit_tmt( + config: &TestConfig, + binary: &str, + dir: &str, +) -> Result<(), Box> { + let dir = std::path::Path::new(dir); + std::fs::create_dir_all(dir)?; + + let tests: Vec<_> = INTEGRATION_TESTS + .iter() + .map(|t| (t.name, &t.meta)) + .collect(); + let param_tests: Vec<_> = PARAMETERIZED_INTEGRATION_TESTS + .iter() + .map(|t| (t.name, &t.meta)) + .collect(); + + let content = format_tmt_fmf(binary, &tests, ¶m_tests, &config.parameters); + + let output_path = dir.join("tests.fmf"); + std::fs::write(&output_path, &content)?; + eprintln!("tmt metadata written to {}", output_path.display()); + + Ok(()) +} + +/// Format tmt FMF content from test names and metadata. +/// +/// Separated from [`emit_tmt`] for testability — no I/O or global state. +fn format_tmt_fmf( + binary: &str, + tests: &[(&str, &crate::TestMeta)], + parameterized_tests: &[(&str, &crate::TestMeta)], + parameters: &[String], +) -> String { + use std::fmt::Write; + + let binary_name = binary_basename(binary); + + let mut content = String::new(); + let _ = writeln!(content, "# THIS IS GENERATED CODE - DO NOT EDIT"); + let _ = writeln!(content, "# Generated by: {binary_name} --emit-tmt"); + let _ = writeln!(content); + + for &(name, meta) in tests { + write_tmt_entry(&mut content, binary_name, name, name, meta); + } + + for &(name, meta) in parameterized_tests { + for param in parameters { + let suffix = image_to_test_suffix(param); + let test_name = format!("{name}_{suffix}"); + let summary = format!("{name} [{param}]"); + write_tmt_entry(&mut content, binary_name, &test_name, &summary, meta); + } + } + + content +} + +/// Write a single tmt FMF test entry, including metadata fields. +fn write_tmt_entry( + w: &mut String, + binary_name: &str, + test_name: &str, + summary: &str, + meta: &crate::TestMeta, +) { + use std::fmt::Write; + + let _ = writeln!(w, "/{test_name}:"); + let _ = writeln!(w, " summary: {}", meta.summary.unwrap_or(summary)); + let _ = writeln!(w, " test: {binary_name} --exact {test_name}"); + let duration = meta.timeout.unwrap_or(TMT_DEFAULT_DURATION); + let _ = writeln!(w, " duration: {duration}"); + if !meta.tags.is_empty() { + let _ = writeln!(w, " tag: [{}]", meta.tags.join(", ")); + } + if meta.flaky { + let _ = writeln!(w, " result: xfail"); + } + let _ = writeln!(w); +} + +/// Generate autopkgtest (DEP-8) control file from registered tests. +/// +/// Creates a `control` file in `dir` with one stanza per test. +/// Each stanza uses `Test-Command:` to invoke the binary with `--exact`. +fn emit_autopkgtest( + config: &TestConfig, + binary: &str, + dir: &str, +) -> Result<(), Box> { + let dir = std::path::Path::new(dir); + std::fs::create_dir_all(dir)?; + + let tests: Vec<_> = INTEGRATION_TESTS + .iter() + .map(|t| (t.name, &t.meta)) + .collect(); + let param_tests: Vec<_> = PARAMETERIZED_INTEGRATION_TESTS + .iter() + .map(|t| (t.name, &t.meta)) + .collect(); + + let content = format_autopkgtest_control(binary, &tests, ¶m_tests, &config.parameters); + + let output_path = dir.join("control"); + std::fs::write(&output_path, &content)?; + eprintln!("autopkgtest control written to {}", output_path.display()); + + Ok(()) +} + +/// Format autopkgtest (DEP-8) control file content. +/// +/// Each test becomes a stanza with `Test-Command:` and `Features: test-name`. +/// Stanzas are separated by blank lines per the DEP-8 spec. +/// +/// Separated from [`emit_autopkgtest`] for testability. +fn format_autopkgtest_control( + binary: &str, + tests: &[(&str, &crate::TestMeta)], + parameterized_tests: &[(&str, &crate::TestMeta)], + parameters: &[String], +) -> String { + use std::fmt::Write; + + let binary_name = binary_basename(binary); + + let mut content = String::new(); + let _ = writeln!(content, "# THIS IS GENERATED CODE - DO NOT EDIT"); + let _ = writeln!(content, "# Generated by: {binary_name} --emit-autopkgtest"); + + for &(name, meta) in tests { + write_autopkgtest_stanza(&mut content, binary_name, name, meta); + } + + for &(name, meta) in parameterized_tests { + for param in parameters { + let suffix = image_to_test_suffix(param); + let test_name = format!("{name}_{suffix}"); + write_autopkgtest_stanza(&mut content, binary_name, &test_name, meta); + } + } + + content +} + +/// Write a single autopkgtest (DEP-8) stanza, including metadata fields. +fn write_autopkgtest_stanza( + w: &mut String, + binary_name: &str, + test_name: &str, + meta: &crate::TestMeta, +) { + use std::fmt::Write; + + let _ = writeln!(w); + let _ = writeln!(w, "Test-Command: {binary_name} --exact {test_name}"); + let _ = writeln!(w, "Features: test-name={test_name}"); + + // Build restrictions list from metadata + let mut restrictions = Vec::new(); + if meta.needs_root { + restrictions.push("needs-root"); + } + match meta.isolation { + crate::Isolation::None => {} + crate::Isolation::Container => restrictions.push("isolation-container"), + crate::Isolation::Machine => restrictions.push("isolation-machine"), + } + if meta.needs_internet { + restrictions.push("needs-internet"); + } + if meta.flaky { + restrictions.push("flaky"); + } + if !restrictions.is_empty() { + let _ = writeln!(w, "Restrictions: {}", restrictions.join(", ")); + } + if !meta.tags.is_empty() { + let _ = writeln!(w, "Classes: {}", meta.tags.join(", ")); + } +} + +/// Extract just the filename from a binary path. +fn binary_basename(binary: &str) -> &str { + std::path::Path::new(binary) + .file_name() + .and_then(|n| n.to_str()) + .unwrap_or(binary) +} + +/// Create a VM jobserver and exec the given command with the pipe +/// fds inherited. Does not return on success. +fn exec_with_jobserver(cmd: &[&str]) -> ! { + use crate::resources::{compute_token_count, VmJobserver}; + + let tokens = compute_token_count(); + let js = VmJobserver::create(tokens).unwrap_or_else(|e| { + eprintln!("error: failed to create VM jobserver: {e}"); + std::process::exit(1); + }); + let (r, w) = js.fds(); + + let budget_mib = tokens * crate::resources::TOKEN_MIB; + eprintln!( + "itest: VM jobserver: {tokens} token(s) ({budget_mib} MiB budget), \ + fds ({r},{w})" + ); + + // Keep the pipe fds open for the child process we're about to exec into. + // VmJobserver stores raw fds — no Drop impl — but ManuallyDrop makes + // the intent explicit and avoids clippy::forget_non_drop. + let _js = std::mem::ManuallyDrop::new(js); + + // exec the command with ITEST_VM_FDS set + use std::os::unix::process::CommandExt; + let err = std::process::Command::new(cmd[0]) + .args(&cmd[1..]) + .env("ITEST_VM_FDS", format!("{r},{w}")) + .exec(); + + eprintln!("error: exec {:?} failed: {err}", cmd[0]); + std::process::exit(1); +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{Isolation, TestMeta}; + + const DEFAULT: TestMeta = TestMeta::EMPTY; + + const ROOT_META: TestMeta = TestMeta { + needs_root: true, + ..TestMeta::EMPTY + }; + + const RICH_META: TestMeta = TestMeta { + timeout: Some("1h"), + needs_root: true, + isolation: Isolation::Machine, + tags: &["slow", "network"], + summary: Some("A slow network test"), + needs_internet: true, + flaky: true, + }; + + // ── tmt FMF ───────────────────────────────────────────────────── + + #[test] + fn tmt_fmf_plain_tests() { + let content = format_tmt_fmf( + "/path/to/my-binary", + &[("test_foo", &DEFAULT), ("test_bar", &DEFAULT)], + &[], + &[], + ); + + assert!(content.starts_with("# THIS IS GENERATED CODE")); + assert!(content.contains("# Generated by: my-binary --emit-tmt")); + assert!(content.contains("/test_foo:")); + assert!(content.contains(" summary: test_foo")); + assert!(content.contains(" test: my-binary --exact test_foo")); + assert!(content.contains(" duration: 20m")); + assert!(content.contains("/test_bar:")); + } + + #[test] + fn tmt_fmf_parameterized_tests() { + let content = format_tmt_fmf( + "my-binary", + &[], + &[("test_multi", &DEFAULT)], + &["quay.io/img:v1".into(), "localhost/other".into()], + ); + + assert!(content.contains("/test_multi_quay_io_img_v1:")); + assert!(content.contains(" summary: test_multi [quay.io/img:v1]")); + assert!(content.contains(" test: my-binary --exact test_multi_quay_io_img_v1")); + assert!(content.contains("/test_multi_localhost_other:")); + } + + #[test] + fn tmt_fmf_with_metadata() { + let content = format_tmt_fmf("bin", &[("test_rich", &RICH_META)], &[], &[]); + + assert!(content.contains(" duration: 1h")); + assert!(content.contains(" summary: A slow network test")); + assert!(content.contains(" tag: [slow, network]")); + assert!(content.contains(" result: xfail")); + } + + #[test] + fn tmt_fmf_no_extra_fields_with_defaults() { + let content = format_tmt_fmf("bin", &[("test_plain", &DEFAULT)], &[], &[]); + + // Default metadata should not emit tag or result + assert!(!content.contains(" tag:")); + assert!(!content.contains(" result:")); + } + + #[test] + fn tmt_fmf_empty() { + let content = format_tmt_fmf("bin", &[], &[], &[]); + assert!(content.contains("# THIS IS GENERATED CODE")); + assert!(!content.contains(" test:")); + } + + #[test] + fn tmt_fmf_strips_path() { + let content = format_tmt_fmf("/usr/local/bin/my-tests", &[("a_test", &DEFAULT)], &[], &[]); + assert!(content.contains(" test: my-tests --exact a_test")); + assert!(!content.contains("/usr/local/bin")); + } + + // ── autopkgtest ───────────────────────────────────────────────── + + #[test] + fn autopkgtest_plain_tests() { + let content = format_autopkgtest_control( + "/usr/bin/my-binary", + &[("test_foo", &ROOT_META), ("test_bar", &ROOT_META)], + &[], + &[], + ); + + assert!(content.contains("# THIS IS GENERATED CODE")); + assert!(content.contains("Test-Command: my-binary --exact test_foo")); + assert!(content.contains("Features: test-name=test_foo")); + assert!(content.contains("Restrictions: needs-root")); + assert!(content.contains("Test-Command: my-binary --exact test_bar")); + } + + #[test] + fn autopkgtest_with_metadata() { + let content = format_autopkgtest_control("bin", &[("test_rich", &RICH_META)], &[], &[]); + + assert!( + content.contains("Restrictions: needs-root, isolation-machine, needs-internet, flaky") + ); + assert!(content.contains("Classes: slow, network")); + } + + #[test] + fn autopkgtest_no_restrictions_when_empty() { + let content = format_autopkgtest_control("bin", &[("test_plain", &DEFAULT)], &[], &[]); + + // With default meta (no needs_root, no isolation, etc.), + // there should be no Restrictions line + assert!(!content.contains("Restrictions:")); + assert!(!content.contains("Classes:")); + } + + #[test] + fn autopkgtest_parameterized() { + let content = format_autopkgtest_control( + "my-binary", + &[], + &[("test_multi", &ROOT_META)], + &["img:v1".into()], + ); + + assert!(content.contains("Test-Command: my-binary --exact test_multi_img_v1")); + assert!(content.contains("Features: test-name=test_multi_img_v1")); + } + + #[test] + fn autopkgtest_stanzas_separated_by_blank_lines() { + let content = + format_autopkgtest_control("bin", &[("a", &ROOT_META), ("b", &ROOT_META)], &[], &[]); + + assert!( + content.contains("Restrictions: needs-root\n\nTest-Command:"), + "stanzas must be separated by blank lines, got:\n{content}" + ); + } + + #[test] + fn autopkgtest_empty() { + let content = format_autopkgtest_control("bin", &[], &[], &[]); + assert!(content.contains("# THIS IS GENERATED CODE")); + assert!(!content.contains("Test-Command:")); + } + + #[test] + fn autopkgtest_isolation_container() { + let meta = TestMeta { + isolation: Isolation::Container, + ..TestMeta::EMPTY + }; + let content = format_autopkgtest_control("bin", &[("test_c", &meta)], &[], &[]); + assert!(content.contains("Restrictions: isolation-container")); + } + + // ── misc ──────────────────────────────────────────────────────── + + #[test] + fn capture_disabled_under_nextest() { + assert_eq!(SUBPROCESS_ENV, "ITEST_SUBPROCESS"); + } + + #[test] + fn tmt_default_duration_is_valid() { + assert!(TMT_DEFAULT_DURATION.ends_with('m') || TMT_DEFAULT_DURATION.ends_with('h')); + let numeric: String = TMT_DEFAULT_DURATION + .chars() + .take_while(|c| c.is_ascii_digit()) + .collect(); + assert!(!numeric.is_empty(), "duration should start with digits"); + } +} diff --git a/crates/itest/src/lib.rs b/crates/itest/src/lib.rs index f0888d9..c9eb73a 100644 --- a/crates/itest/src/lib.rs +++ b/crates/itest/src/lib.rs @@ -38,6 +38,7 @@ mod harness; mod junit; mod privilege; +mod resources; pub use harness::{run_tests, run_tests_with_config, TestConfig}; pub use privilege::{require_root, DispatchMode}; @@ -48,6 +49,13 @@ pub use privilege::{require_root, DispatchMode}; pub use linkme; #[doc(hidden)] pub use paste; +#[doc(hidden)] +pub use tokio; + +// Re-export the proc-macro attribute. Named `test_attr` to avoid +// conflicts with both the `#[test]` prelude attribute and the +// `integration_test!` declarative macro. +pub use itest_macros::integration_test as test_attr; /// Error type for integration tests. /// @@ -66,6 +74,112 @@ pub type TestFn = fn() -> TestResult; /// Signature for a parameterised test (receives one string parameter). pub type ParameterizedTestFn = fn(&str) -> TestResult; +/// Options that control how a test is dispatched to a VM. +#[derive(Debug, Clone, Default)] +pub struct VmOptions { + /// Instance type (e.g. `"u1.large"`) passed to `bcvk --itype`. + /// + /// When set, takes precedence over `memory_mib` and `vcpus`. + pub itype: Option<&'static str>, + + /// Explicit VM memory in MiB. + /// + /// When `None` and `itype` is also `None`, auto-detected from host + /// resources (capped at 70% of available memory). + pub memory_mib: Option, + + /// Explicit VM vCPU count. + /// + /// When `None` and `itype` is also `None`, auto-detected from host + /// resources. + pub vcpus: Option, +} + +/// Per-test metadata that maps to external test runner formats. +/// +/// This struct captures test properties that are meaningful across +/// runners (tmt, autopkgtest, nextest). All fields are optional; +/// the harness fills in sensible defaults when emitting metadata. +/// +/// Fields are `const`-constructible so they can live in distributed +/// slices. +/// +/// # Format mapping +/// +/// | Field | tmt (FMF) | autopkgtest (DEP-8) | +/// |---|---|---| +/// | `timeout` | `duration:` | *(global only)* | +/// | `needs_root` | *(plan-level)* | `Restrictions: needs-root` | +/// | `isolation` | *(plan-level)* | `Restrictions: isolation-{container,machine}` | +/// | `tags` | `tag:` | `Classes:` | +/// | `summary` | `summary:` | *(none)* | +/// | `needs_internet` | *(none)* | `Restrictions: needs-internet` | +/// | `flaky` | `result: xfail` | `Restrictions: flaky` | +#[derive(Debug, Clone)] +pub struct TestMeta { + /// Maximum test duration (e.g. `"5m"`, `"1h"`). + /// + /// Defaults to the harness-wide default when `None`. + pub timeout: Option<&'static str>, + + /// Whether the test requires root privileges. + /// + /// Set automatically by [`privileged_test!`] and [`booted_test!`]. + pub needs_root: bool, + + /// Minimum isolation level required. + /// + /// Maps to autopkgtest `isolation-container` / `isolation-machine`. + pub isolation: Isolation, + + /// Free-form tags for filtering and categorization. + /// + /// Maps to tmt `tag:` and autopkgtest `Classes:`. + pub tags: &'static [&'static str], + + /// One-line summary. Falls back to the test name when `None`. + pub summary: Option<&'static str>, + + /// Whether the test requires unrestricted internet access. + pub needs_internet: bool, + + /// Whether the test is known to be flaky. + /// + /// Maps to autopkgtest `Restrictions: flaky` and tmt `result: xfail`. + pub flaky: bool, +} + +impl TestMeta { + /// An empty metadata set — all defaults. + pub const EMPTY: Self = Self { + timeout: None, + needs_root: false, + isolation: Isolation::None, + tags: &[], + summary: None, + needs_internet: false, + flaky: false, + }; +} + +impl Default for TestMeta { + fn default() -> Self { + Self::EMPTY + } +} + +/// Minimum isolation level a test requires. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)] +pub enum Isolation { + /// No special isolation (default). + #[default] + None, + /// Needs its own container (can start services, open ports). + Container, + /// Needs its own machine (can interact with kernel, reboot). + Machine, +} + /// Metadata for a registered integration test. #[derive(Debug)] pub struct IntegrationTest { @@ -73,12 +187,23 @@ pub struct IntegrationTest { pub name: &'static str, /// Test function. pub f: TestFn, + /// Per-test metadata for external runners. + pub meta: TestMeta, } impl IntegrationTest { - /// Create a new integration test. + /// Create a new integration test with default metadata. pub const fn new(name: &'static str, f: TestFn) -> Self { - Self { name, f } + Self { + name, + f, + meta: TestMeta::EMPTY, + } + } + + /// Create a new integration test with explicit metadata. + pub const fn with_meta(name: &'static str, f: TestFn, meta: TestMeta) -> Self { + Self { name, f, meta } } } @@ -89,12 +214,23 @@ pub struct ParameterizedIntegrationTest { pub name: &'static str, /// Test function receiving one string parameter. pub f: ParameterizedTestFn, + /// Per-test metadata for external runners. + pub meta: TestMeta, } impl ParameterizedIntegrationTest { - /// Create a new parameterised integration test. + /// Create a new parameterised integration test with default metadata. pub const fn new(name: &'static str, f: ParameterizedTestFn) -> Self { - Self { name, f } + Self { + name, + f, + meta: TestMeta::EMPTY, + } + } + + /// Create a new parameterised integration test with explicit metadata. + pub const fn with_meta(name: &'static str, f: ParameterizedTestFn, meta: TestMeta) -> Self { + Self { name, f, meta } } } @@ -115,17 +251,54 @@ pub static PARAMETERIZED_INTEGRATION_TESTS: [ParameterizedIntegrationTest]; /// Register a test function. /// +/// The function may return any `Result<(), E>` where +/// `E: Into>` — this includes +/// `anyhow::Result`, `eyre::Result`, and plain `std::io::Result`. +/// /// ```ignore -/// fn my_test() -> itest::TestResult { Ok(()) } +/// fn my_test() -> anyhow::Result<()> { Ok(()) } /// itest::integration_test!(my_test); /// ``` +/// +/// With metadata: +/// +/// ```ignore +/// fn slow_test() -> itest::TestResult { Ok(()) } +/// itest::integration_test!(slow_test, meta = const { itest::TestMeta { +/// timeout: Some("1h"), +/// tags: &["slow", "network"], +/// needs_internet: true, +/// ..itest::TestMeta::EMPTY +/// }}); +/// ``` #[macro_export] macro_rules! integration_test { + ($fn_name:ident, meta = const $meta:block) => { + $crate::paste::paste! { + // Wrapper converts any compatible error type to TestError. + fn [<__itest_wrap_ $fn_name>]() -> $crate::TestResult { + $fn_name().map_err(::std::convert::Into::into) + } + #[$crate::linkme::distributed_slice($crate::INTEGRATION_TESTS)] + static [<__ITEST_ $fn_name:upper>]: $crate::IntegrationTest = + $crate::IntegrationTest::with_meta( + stringify!($fn_name), + [<__itest_wrap_ $fn_name>], + $meta, + ); + } + }; ($fn_name:ident) => { $crate::paste::paste! { + fn [<__itest_wrap_ $fn_name>]() -> $crate::TestResult { + $fn_name().map_err(::std::convert::Into::into) + } #[$crate::linkme::distributed_slice($crate::INTEGRATION_TESTS)] - static [<$fn_name:upper>]: $crate::IntegrationTest = - $crate::IntegrationTest::new(stringify!($fn_name), $fn_name); + static [<__ITEST_ $fn_name:upper>]: $crate::IntegrationTest = + $crate::IntegrationTest::new( + stringify!($fn_name), + [<__itest_wrap_ $fn_name>], + ); } }; } @@ -139,13 +312,43 @@ macro_rules! integration_test { /// fn my_test(image: &str) -> itest::TestResult { Ok(()) } /// itest::parameterized_integration_test!(my_test); /// ``` +/// +/// With metadata: +/// +/// ```ignore +/// fn slow_test(image: &str) -> itest::TestResult { Ok(()) } +/// itest::parameterized_integration_test!(slow_test, meta = const { itest::TestMeta { +/// timeout: Some("30m"), +/// ..itest::TestMeta::EMPTY +/// }}); +/// ``` #[macro_export] macro_rules! parameterized_integration_test { + ($fn_name:ident, meta = const $meta:block) => { + $crate::paste::paste! { + fn [<__itest_wrap_ $fn_name>](p: &str) -> $crate::TestResult { + $fn_name(p).map_err(::std::convert::Into::into) + } + #[$crate::linkme::distributed_slice($crate::PARAMETERIZED_INTEGRATION_TESTS)] + static [<__ITEST_ $fn_name:upper>]: $crate::ParameterizedIntegrationTest = + $crate::ParameterizedIntegrationTest::with_meta( + stringify!($fn_name), + [<__itest_wrap_ $fn_name>], + $meta, + ); + } + }; ($fn_name:ident) => { $crate::paste::paste! { + fn [<__itest_wrap_ $fn_name>](p: &str) -> $crate::TestResult { + $fn_name(p).map_err(::std::convert::Into::into) + } #[$crate::linkme::distributed_slice($crate::PARAMETERIZED_INTEGRATION_TESTS)] - static [<$fn_name:upper>]: $crate::ParameterizedIntegrationTest = - $crate::ParameterizedIntegrationTest::new(stringify!($fn_name), $fn_name); + static [<__ITEST_ $fn_name:upper>]: $crate::ParameterizedIntegrationTest = + $crate::ParameterizedIntegrationTest::new( + stringify!($fn_name), + [<__itest_wrap_ $fn_name>], + ); } }; } @@ -158,31 +361,49 @@ macro_rules! parameterized_integration_test { /// The test binary name is taken from the first argument; it must match the /// installed binary name so that `bcvk ephemeral run-ssh` can invoke it. /// +/// An optional `itype = "..."` argument specifies the VM instance type +/// (e.g. `"u1.large"` for 2 vCPU / 8 GiB). When omitted the default +/// instance type is used. +/// /// ```ignore /// itest::privileged_test!("my-binary", my_test, { -/// // runs as root +/// // runs as root with default VM size +/// Ok(()) +/// }); +/// +/// itest::privileged_test!("my-binary", big_test, itype = "u1.large", { +/// // runs as root in a larger VM — note trailing comma after itype /// Ok(()) /// }); /// ``` #[macro_export] macro_rules! privileged_test { - ($binary:expr, $fn_name:ident, $body:expr) => { + ($binary:expr, $fn_name:ident, $(itype = $itype:expr,)? $body:expr) => { fn $fn_name() -> $crate::TestResult { + #[allow(unused_mut)] + let mut vm_opts = $crate::VmOptions::default(); + $( vm_opts.itype = Some($itype); )? if $crate::require_root( stringify!($fn_name), $binary, $crate::DispatchMode::Privileged, + &vm_opts, )? .is_some() { return Ok(()); } - // Inner closure: its return type is inferred from $body, + // Inner closure: return type is inferred from $body, // allowing any Result<(), E> where E: Into. let inner = || $body; inner().map_err(::std::convert::Into::into) } - $crate::integration_test!($fn_name); + $crate::integration_test!($fn_name, meta = const { + $crate::TestMeta { + needs_root: true, + ..$crate::TestMeta::EMPTY + } + }); }; } @@ -191,25 +412,46 @@ macro_rules! privileged_test { /// When not running as root the test is dispatched via `bcvk libvirt run` /// which does a full `bootc install to-disk`. /// +/// An optional `itype = "..."` argument specifies the VM instance type. +/// /// ```ignore /// itest::booted_test!("my-binary", my_test, { /// // runs inside a booted ostree deployment /// Ok(()) /// }); +/// +/// itest::booted_test!("my-binary", big_test, itype = "u1.large", { +/// // runs in a larger VM — note trailing comma after itype +/// Ok(()) +/// }); /// ``` #[macro_export] macro_rules! booted_test { - ($binary:expr, $fn_name:ident, $body:expr) => { + ($binary:expr, $fn_name:ident, $(itype = $itype:expr,)? $body:expr) => { fn $fn_name() -> $crate::TestResult { - if $crate::require_root(stringify!($fn_name), $binary, $crate::DispatchMode::Booted)? - .is_some() + #[allow(unused_mut)] + let mut vm_opts = $crate::VmOptions::default(); + $( vm_opts.itype = Some($itype); )? + if $crate::require_root( + stringify!($fn_name), + $binary, + $crate::DispatchMode::Booted, + &vm_opts, + )? + .is_some() { return Ok(()); } let inner = || $body; inner().map_err(::std::convert::Into::into) } - $crate::integration_test!($fn_name); + $crate::integration_test!($fn_name, meta = const { + $crate::TestMeta { + needs_root: true, + isolation: $crate::Isolation::Machine, + ..$crate::TestMeta::EMPTY + } + }); }; } diff --git a/crates/itest/src/privilege.rs b/crates/itest/src/privilege.rs index 7bda68c..540e184 100644 --- a/crates/itest/src/privilege.rs +++ b/crates/itest/src/privilege.rs @@ -8,8 +8,15 @@ //! install). //! * **Booted** — `bcvk libvirt run` + SSH (full disk install via //! `bootc install to-disk`). +//! +//! Before launching a VM, `require_root` acquires tokens from the +//! global VM jobserver (one token per 128 MiB of VM memory). This +//! limits total concurrent VM memory to what the host can sustain. -use crate::TestError; +use crate::resources::{ + itype_memory_mib, vm_jobserver, DEFAULT_VM_MEMORY_MIB, DEFAULT_VM_VCPUS, TOKEN_MIB, +}; +use crate::{TestError, VmOptions}; use xshell::{cmd, Shell}; /// How a test should be dispatched when not running as root. @@ -29,11 +36,17 @@ pub enum DispatchMode { /// * Returns `Ok(Some(()))` after successfully dispatching — the /// caller should return early. /// +/// Before launching a VM, acquires tokens from the global VM +/// jobserver — one token per 128 MiB of VM memory. This ensures +/// concurrent VMs don't exceed the host's memory budget, regardless +/// of the test runner being used. +/// /// # Arguments /// /// * `test_name` — the name passed to `--exact` when re-invoking. /// * `test_binary` — binary name or path invoked inside the VM. /// * `mode` — [`DispatchMode::Privileged`] or [`DispatchMode::Booted`]. +/// * `vm_options` — VM sizing options (instance type, etc.). /// /// # Environment variables /// @@ -42,14 +55,11 @@ pub enum DispatchMode { /// when not root). /// * `ITEST_IN_VM` — recursion guard: if set we expect to already be /// root; if not, something is broken. -/// -/// Projects that need different env var names should set `ITEST_IMAGE` -/// from their own project-specific variable in `main()`, or define -/// thin wrapper functions. pub fn require_root( test_name: &str, test_binary: &str, mode: DispatchMode, + vm_options: &VmOptions, ) -> Result, TestError> { if rustix::process::getuid().is_root() { return Ok(None); @@ -66,18 +76,45 @@ pub fn require_root( .into() })?; + // Determine VM memory for jobserver token count. + // Priority: itype → look up its memory; explicit memory_mib; default. + let memory_mib = match vm_options.itype { + Some(it) => itype_memory_mib(it).unwrap_or(DEFAULT_VM_MEMORY_MIB), + None => vm_options.memory_mib.unwrap_or(DEFAULT_VM_MEMORY_MIB), + }; + + // Acquire jobserver tokens (1 token = 128 MiB, rounded up) + let tokens = (memory_mib + TOKEN_MIB - 1) / TOKEN_MIB; + let _permit = vm_jobserver().acquire(tokens).map_err(|e| -> TestError { + format!("failed to acquire {tokens} VM token(s): {e}").into() + })?; + let sh = Shell::new()?; let bcvk = std::env::var("BCVK_PATH").unwrap_or_else(|_| "bcvk".into()); - - // Pass the recursion guard so the binary knows it's inside a VM let in_vm_env = "ITEST_IN_VM=1"; + // Build VM sizing arguments. + let mut vm_args: Vec = Vec::new(); + + if let Some(itype) = vm_options.itype { + vm_args.push("--itype".into()); + vm_args.push(itype.into()); + } else { + let mem = vm_options.memory_mib.unwrap_or(DEFAULT_VM_MEMORY_MIB); + let cpus = vm_options.vcpus.unwrap_or(DEFAULT_VM_VCPUS); + + vm_args.push("--memory".into()); + vm_args.push(format!("{mem}M")); + vm_args.push("--vcpus".into()); + vm_args.push(cpus.to_string()); + } + match mode { DispatchMode::Booted => { let vm_name = format!("itest-{}", test_name.replace('_', "-")); cmd!( sh, - "{bcvk} libvirt run --name {vm_name} --replace --detach --ssh-wait {image}" + "{bcvk} libvirt run --name {vm_name} --replace --detach --ssh-wait {vm_args...} {image}" ) .run()?; @@ -96,11 +133,12 @@ pub fn require_root( DispatchMode::Privileged => { cmd!( sh, - "{bcvk} ephemeral run-ssh {image} -- env {in_vm_env} {test_binary} --exact {test_name}" + "{bcvk} ephemeral run-ssh {vm_args...} {image} -- env {in_vm_env} {test_binary} --exact {test_name}" ) .run()?; } } + // _permit dropped here → tokens returned to pipe Ok(Some(())) } diff --git a/crates/itest/src/resources.rs b/crates/itest/src/resources.rs new file mode 100644 index 0000000..395a996 --- /dev/null +++ b/crates/itest/src/resources.rs @@ -0,0 +1,509 @@ +//! Host resource detection and VM concurrency control. +//! +//! Integration tests that dispatch to bcvk VMs must avoid +//! overcommitting host memory. This module provides: +//! +//! 1. Memory detection (physical + cgroup v2/v1 limits) +//! 2. A pipe-based jobserver (like `make -j`) that limits total +//! VM memory across all concurrent test processes +//! +//! The jobserver uses a Unix pipe filled with tokens, where each +//! token represents 128 MiB of VM memory budget. A test that needs +//! a 512 MiB VM (u1.nano) reads 4 tokens; a 2 GiB VM reads 16. +//! Tokens are returned when the VM exits. +//! +//! ## Setup +//! +//! The Justfile (or CI script) creates the jobserver before running +//! tests. The recommended pattern: +//! +//! ```bash +//! eval "$(my-test-binary --vm-jobserver)" +//! cargo nextest run ... +//! ``` +//! +//! The `--vm-jobserver` flag creates a pipe, fills it with tokens +//! based on detected host memory, and prints shell commands to +//! export `ITEST_VM_FDS=,`. All child +//! processes inherit these fds. +//! +//! If `ITEST_VM_FDS` is not set when `require_root()` runs, the +//! harness creates a process-local jobserver as fallback. This +//! works for `cargo test` (single process) but not for nextest +//! (separate processes per test). + +use std::cmp::min; +use std::fs; +use std::io::{self, Read, Write}; +use std::os::unix::io::{FromRawFd, RawFd}; +use std::path::Path; +use std::sync::OnceLock; + +/// Default VM memory in MiB (matches u1.nano). +pub(crate) const DEFAULT_VM_MEMORY_MIB: u32 = 512; + +/// Default VM vCPU count. +pub(crate) const DEFAULT_VM_VCPUS: u32 = 1; + +/// Fraction of host memory available for VMs (default 70%). +const DEFAULT_MEMORY_FRACTION: f64 = 0.70; + +/// Each jobserver token represents this many MiB of VM memory. +pub(crate) const TOKEN_MIB: u32 = 128; + +/// Resolve an instance type string (e.g. `"u1.nano"`) to its memory in MiB. +/// +/// Returns `None` for unrecognised types — callers should fall back to +/// [`DEFAULT_VM_MEMORY_MIB`]. +pub(crate) fn itype_memory_mib(itype: &str) -> Option { + // Keep in sync with crates/kit/src/instancetypes.rs. + // We duplicate a small table here so that itest doesn't depend on kit. + match itype { + "u1.nano" => Some(512), + "u1.micro" => Some(1024), + "u1.small" => Some(2048), + "u1.medium" => Some(4096), + "u1.2xmedium" => Some(4096), + "u1.large" => Some(8192), + "u1.xlarge" => Some(16384), + "u1.2xlarge" => Some(32768), + "u1.4xlarge" => Some(65536), + "u1.8xlarge" => Some(131072), + _ => None, + } +} + +// ── Pipe-based jobserver ──────────────────────────────────────────── + +/// A make-style pipe jobserver for VM memory budgeting. +/// +/// The pipe contains N byte-tokens, where N = `available_memory / TOKEN_MIB`. +/// Acquiring K tokens blocks until K bytes can be read from the pipe. +/// Releasing writes them back. +pub(crate) struct VmJobserver { + read_fd: RawFd, + write_fd: RawFd, +} + +/// Held jobserver tokens. Dropping returns them to the pipe. +pub(crate) struct VmPermit { + write_fd: RawFd, + count: u32, +} + +impl Drop for VmPermit { + fn drop(&mut self) { + let buf = vec![b'+'; self.count as usize]; + let mut f = unsafe { fs::File::from_raw_fd(self.write_fd) }; + let _ = f.write_all(&buf); + // Don't close — fd is shared + std::mem::forget(f); + } +} + +impl VmJobserver { + /// Create a new jobserver pipe and fill it with `tokens` tokens. + /// + /// The pipe fds have `CLOEXEC` cleared so they are inherited by + /// child processes (including nextest-launched test binaries). + pub(crate) fn create(tokens: u32) -> io::Result { + let (read_fd, write_fd) = pipe_fds()?; + + // Fill the pipe with tokens + let buf = vec![b'+'; tokens as usize]; + let mut f = unsafe { fs::File::from_raw_fd(write_fd) }; + f.write_all(&buf)?; + std::mem::forget(f); + + Ok(Self { read_fd, write_fd }) + } + + /// Adopt an existing jobserver from inherited file descriptors. + fn from_fds(read_fd: RawFd, write_fd: RawFd) -> Self { + Self { read_fd, write_fd } + } + + /// The read and write file descriptors. + pub(crate) fn fds(&self) -> (RawFd, RawFd) { + (self.read_fd, self.write_fd) + } + + /// Acquire `count` tokens (each = 128 MiB of VM memory). + /// + /// Blocks until enough tokens are available. + pub fn acquire(&self, count: u32) -> io::Result { + let mut buf = vec![0u8; count as usize]; + let mut f = unsafe { fs::File::from_raw_fd(self.read_fd) }; + f.read_exact(&mut buf)?; + std::mem::forget(f); + + Ok(VmPermit { + write_fd: self.write_fd, + count, + }) + } +} + +/// Create a pipe and return (read_fd, write_fd) with CLOEXEC cleared. +fn pipe_fds() -> io::Result<(RawFd, RawFd)> { + use rustix::fd::IntoRawFd; + use rustix::io::{fcntl_setfd, FdFlags}; + use rustix::pipe::pipe; + + let (reader, writer) = pipe().map_err(|e| io::Error::from_raw_os_error(e.raw_os_error()))?; + + // Clear CLOEXEC so children inherit the fds + fcntl_setfd(&reader, FdFlags::empty()) + .map_err(|e| io::Error::from_raw_os_error(e.raw_os_error()))?; + fcntl_setfd(&writer, FdFlags::empty()) + .map_err(|e| io::Error::from_raw_os_error(e.raw_os_error()))?; + + Ok((reader.into_raw_fd(), writer.into_raw_fd())) +} + +/// Get the global VM jobserver. +/// +/// Checks `ITEST_VM_FDS` for inherited fds first (set by the Justfile +/// via `--vm-jobserver`). Falls back to creating a process-local +/// jobserver — this works for `cargo test` (fork-exec capture in a +/// single process tree) but not for nextest (separate processes). +pub(crate) fn vm_jobserver() -> &'static VmJobserver { + static JS: OnceLock = OnceLock::new(); + JS.get_or_init(|| { + if let Some(js) = inherit_jobserver() { + eprintln!("itest: inherited VM jobserver from ITEST_VM_FDS"); + return js; + } + + let tokens = compute_token_count(); + let budget_mib = tokens * TOKEN_MIB; + eprintln!( + "itest: created VM jobserver: {tokens} token(s) \ + ({budget_mib} MiB budget, override with ITEST_VM_SLOTS)" + ); + let js = VmJobserver::create(tokens).expect("failed to create VM jobserver pipe"); + + // Export for fork-exec children (itest's own capture mode) + let (r, w) = js.fds(); + // SAFETY: called from OnceLock init, effectively single-threaded + unsafe { + std::env::set_var("ITEST_VM_FDS", format!("{r},{w}")); + } + + js + }) +} + +/// Try to inherit a jobserver from `ITEST_VM_FDS=,`. +fn inherit_jobserver() -> Option { + let val = std::env::var("ITEST_VM_FDS").ok()?; + let (r, w) = val.split_once(',')?; + let read_fd: RawFd = r.trim().parse().ok()?; + let write_fd: RawFd = w.trim().parse().ok()?; + + // Verify the fds are valid + if rustix::fs::fstat(unsafe { rustix::fd::BorrowedFd::borrow_raw(read_fd) }).is_err() { + return None; + } + + Some(VmJobserver::from_fds(read_fd, write_fd)) +} + +/// Compute how many tokens (128 MiB each) fit in the VM budget. +pub(crate) fn compute_token_count() -> u32 { + if let Some(slots) = env_u32("ITEST_VM_SLOTS") { + return slots.max(1); + } + + let host_mem_mib = detect_memory_mib(); + let fraction = env_f64("ITEST_VM_MEMORY_FRACTION").unwrap_or(DEFAULT_MEMORY_FRACTION); + let available_mib = (host_mem_mib as f64 * fraction) as u32; + (available_mib / TOKEN_MIB).max(1) +} + +// ── Memory detection ──────────────────────────────────────────────── + +/// Detect available memory in MiB, respecting cgroup limits. +fn detect_memory_mib() -> u32 { + detect_memory_mib_from( + Path::new("/proc/meminfo"), + Path::new("/proc/self/cgroup"), + Path::new("/sys/fs/cgroup"), + ) +} + +fn detect_memory_mib_from(meminfo: &Path, self_cgroup: &Path, cgroup_root: &Path) -> u32 { + let phys_kib = parse_meminfo_total(meminfo).unwrap_or(4 * 1024 * 1024); + let phys_bytes = phys_kib * 1024; + + let cgroup_bytes = detect_cgroup_memory_limit(self_cgroup, cgroup_root); + + let effective = match cgroup_bytes { + Some(limit) if limit < phys_bytes => limit, + _ => phys_bytes, + }; + + (effective / (1024 * 1024)) as u32 +} + +fn parse_meminfo_total(path: &Path) -> Option { + let content = fs::read_to_string(path).ok()?; + for line in content.lines() { + if let Some(rest) = line.strip_prefix("MemTotal:") { + return rest.trim().split_whitespace().next()?.parse().ok(); + } + } + None +} + +fn detect_cgroup_memory_limit(self_cgroup: &Path, cgroup_root: &Path) -> Option { + detect_cgroupv2_memory(self_cgroup, cgroup_root).or_else(|| detect_cgroupv1_memory(cgroup_root)) +} + +fn detect_cgroupv2_memory(self_cgroup: &Path, cgroup_root: &Path) -> Option { + let content = fs::read_to_string(self_cgroup).ok()?; + let cgroup_path = content.lines().find_map(|line| { + let line = line.trim(); + line.starts_with("0::").then(|| line[3..].to_string()) + })?; + + let mut min_limit: Option = None; + let mut path = cgroup_root.join(cgroup_path.trim_start_matches('/')); + + loop { + if let Ok(content) = fs::read_to_string(path.join("memory.max")) { + let content = content.trim(); + if content != "max" { + if let Ok(limit) = content.parse::() { + min_limit = Some(min_limit.map_or(limit, |cur| min(cur, limit))); + } + } + } + + if path == cgroup_root { + break; + } + match path.parent() { + Some(parent) if parent >= cgroup_root => path = parent.to_path_buf(), + _ => break, + } + } + + min_limit +} + +fn detect_cgroupv1_memory(cgroup_root: &Path) -> Option { + let limit: u64 = fs::read_to_string(cgroup_root.join("memory/memory.limit_in_bytes")) + .ok()? + .trim() + .parse() + .ok()?; + (limit <= (1u64 << 62)).then_some(limit) +} + +fn env_u32(name: &str) -> Option { + std::env::var(name).ok()?.parse().ok() +} + +fn env_f64(name: &str) -> Option { + std::env::var(name).ok()?.parse().ok() +} + +#[cfg(test)] +mod tests { + use super::*; + + fn write_meminfo(dir: &Path, total_kib: u64) { + fs::write( + dir.join("meminfo"), + format!( + "MemTotal: {total_kib} kB\n\ + MemFree: 1000000 kB\n\ + MemAvailable: 2000000 kB\n" + ), + ) + .unwrap(); + } + + fn write_cgroupv2(dir: &Path, cgroup_path: &str, memory_max: &str) { + fs::write(dir.join("self_cgroup"), format!("0::{cgroup_path}\n")).unwrap(); + let cg_dir = dir + .join("cgroup_root") + .join(cgroup_path.trim_start_matches('/')); + fs::create_dir_all(&cg_dir).unwrap(); + fs::write(cg_dir.join("memory.max"), format!("{memory_max}\n")).unwrap(); + } + + // ── memory detection ──────────────────────────────────────────── + + #[test] + fn meminfo_parsing() { + let dir = tempfile::tempdir().unwrap(); + write_meminfo(dir.path(), 8_000_000); + assert_eq!( + parse_meminfo_total(&dir.path().join("meminfo")).unwrap(), + 8_000_000 + ); + } + + #[test] + fn meminfo_missing() { + assert!(parse_meminfo_total(Path::new("/nonexistent")).is_none()); + } + + #[test] + fn cgroupv2_limit() { + let dir = tempfile::tempdir().unwrap(); + write_cgroupv2(dir.path(), "/user.slice/test", "7516192768"); + assert_eq!( + detect_cgroupv2_memory( + &dir.path().join("self_cgroup"), + &dir.path().join("cgroup_root") + ), + Some(7_516_192_768) + ); + } + + #[test] + fn cgroupv2_unlimited() { + let dir = tempfile::tempdir().unwrap(); + write_cgroupv2(dir.path(), "/test", "max"); + assert!(detect_cgroupv2_memory( + &dir.path().join("self_cgroup"), + &dir.path().join("cgroup_root") + ) + .is_none()); + } + + #[test] + fn cgroupv2_hierarchy_minimum() { + let dir = tempfile::tempdir().unwrap(); + fs::write(dir.path().join("self_cgroup"), "0::/a/b/c\n").unwrap(); + let root = dir.path().join("cgroup_root"); + fs::create_dir_all(root.join("a/b/c")).unwrap(); + fs::write(root.join("a/memory.max"), "8589934592\n").unwrap(); + fs::write(root.join("a/b/c/memory.max"), "4294967296\n").unwrap(); + assert_eq!( + detect_cgroupv2_memory(&dir.path().join("self_cgroup"), &root), + Some(4_294_967_296) + ); + } + + #[test] + fn memory_cgroup_cap() { + let dir = tempfile::tempdir().unwrap(); + write_meminfo(dir.path(), 16_777_216); + write_cgroupv2(dir.path(), "/test", "7516192768"); + assert_eq!( + detect_memory_mib_from( + &dir.path().join("meminfo"), + &dir.path().join("self_cgroup"), + &dir.path().join("cgroup_root") + ), + 7168 + ); + } + + #[test] + fn memory_physical_smaller() { + let dir = tempfile::tempdir().unwrap(); + write_meminfo(dir.path(), 4_194_304); + write_cgroupv2(dir.path(), "/test", "17179869184"); + assert_eq!( + detect_memory_mib_from( + &dir.path().join("meminfo"), + &dir.path().join("self_cgroup"), + &dir.path().join("cgroup_root") + ), + 4096 + ); + } + + #[test] + fn memory_no_cgroup() { + let dir = tempfile::tempdir().unwrap(); + write_meminfo(dir.path(), 8_388_608); + assert_eq!( + detect_memory_mib_from( + &dir.path().join("meminfo"), + &dir.path().join("x"), + &dir.path().join("y") + ), + 8192 + ); + } + + // ── token budget ──────────────────────────────────────────────── + + #[test] + fn tokens_gha_runner() { + // 7 GiB host → 70% = 5017 MiB → 5017/128 = 39 tokens + let dir = tempfile::tempdir().unwrap(); + write_meminfo(dir.path(), 7_340_032); + let mib = detect_memory_mib_from( + &dir.path().join("meminfo"), + &dir.path().join("x"), + &dir.path().join("y"), + ); + let available = (mib as f64 * DEFAULT_MEMORY_FRACTION) as u32; + assert_eq!(available / TOKEN_MIB, 39); + // A 512 MiB VM (u1.nano) needs 512/128 = 4 tokens → 39/4 = 9 concurrent VMs + assert_eq!(available / DEFAULT_VM_MEMORY_MIB, 9); + } + + #[test] + fn tokens_tiny_host() { + // 1.5 GiB → 70% = 1075 MiB → 1075/128 = 8 tokens (two 512 MiB VMs) + let dir = tempfile::tempdir().unwrap(); + write_meminfo(dir.path(), 1_572_864); + let mib = detect_memory_mib_from( + &dir.path().join("meminfo"), + &dir.path().join("x"), + &dir.path().join("y"), + ); + let available = (mib as f64 * DEFAULT_MEMORY_FRACTION) as u32; + assert_eq!((available / TOKEN_MIB).max(1), 8); + } + + // ── itype lookup ────────────────────────────────────────────── + + #[test] + fn itype_known() { + assert_eq!(itype_memory_mib("u1.nano"), Some(512)); + assert_eq!(itype_memory_mib("u1.micro"), Some(1024)); + assert_eq!(itype_memory_mib("u1.large"), Some(8192)); + } + + #[test] + fn itype_unknown() { + assert_eq!(itype_memory_mib("custom.big"), None); + } + + // ── jobserver ─────────────────────────────────────────────────── + + #[test] + fn jobserver_create_and_acquire() { + let js = VmJobserver::create(3).unwrap(); + let p1 = js.acquire(1).unwrap(); + let p2 = js.acquire(2).unwrap(); + drop(p1); + let _p3 = js.acquire(1).unwrap(); + drop(p2); + drop(_p3); + } + + #[test] + fn jobserver_weighted() { + let js = VmJobserver::create(4).unwrap(); + + // 4 GiB VM takes all tokens + let p1 = js.acquire(4).unwrap(); + drop(p1); + + // 2 × 2 GiB VMs + let p2 = js.acquire(2).unwrap(); + let p3 = js.acquire(2).unwrap(); + drop(p2); + drop(p3); + } +} diff --git a/crates/itest/tests/fixture.rs b/crates/itest/tests/fixture.rs new file mode 100644 index 0000000..0e1b393 --- /dev/null +++ b/crates/itest/tests/fixture.rs @@ -0,0 +1,57 @@ +//! Tiny test fixture binary for testing itest's harness. +//! +//! Registers a few tests with known output patterns so the integration +//! tests can verify capture, --emit-tmt, --list, etc. + +#![allow(unsafe_code)] + +#[itest::test_attr] +fn passing_test() -> itest::TestResult { + println!("FIXTURE_STDOUT_PASS"); + eprintln!("FIXTURE_STDERR_PASS"); + Ok(()) +} + +#[itest::test_attr] +fn failing_test() -> itest::TestResult { + println!("FIXTURE_STDOUT_FAIL"); + eprintln!("FIXTURE_STDERR_FAIL"); + Err("deliberate failure".into()) +} + +/// A test with rich metadata to verify it flows through to emitted formats. +#[itest::test_attr( + timeout = "1h", + needs_root, + tags = ["slow", "network"], + summary = "A test with rich metadata", + needs_internet, + flaky, +)] +fn meta_test() -> itest::TestResult { + Ok(()) +} + +#[itest::test_attr] +async fn async_test() -> itest::TestResult { + println!("FIXTURE_ASYNC"); + // Prove we're actually in a tokio runtime + tokio::task::yield_now().await; + Ok(()) +} + +fn parameterized_test(param: &str) -> itest::TestResult { + println!("FIXTURE_PARAM={param}"); + Ok(()) +} +itest::parameterized_integration_test!(parameterized_test); + +fn main() { + let config = itest::TestConfig { + report_name: "itest-fixture".into(), + suite_name: "fixture".into(), + parameters: vec!["alpha".into(), "beta".into()], + }; + + itest::run_tests_with_config(config); +} diff --git a/crates/itest/tests/harness_integration.rs b/crates/itest/tests/harness_integration.rs new file mode 100644 index 0000000..3ad884e --- /dev/null +++ b/crates/itest/tests/harness_integration.rs @@ -0,0 +1,252 @@ +//! Integration tests for itest's harness features. +//! +//! These tests run the `itest-fixture` binary (a tiny test harness +//! with known tests) and verify capture, --emit-tmt, and --list +//! behaviour. + +use std::process::Command; + +/// Path to the fixture binary, set by cargo. +fn fixture_bin() -> String { + // cargo sets CARGO_BIN_EXE_ for [[bin]] targets in the same crate + env!("CARGO_BIN_EXE_itest-fixture").to_string() +} + +/// Run the fixture with the given args and env. +fn run_fixture(args: &[&str], env: &[(&str, &str)]) -> std::process::Output { + let mut cmd = Command::new(fixture_bin()); + cmd.args(args); + for (k, v) in env { + cmd.env(k, v); + } + // Ensure we don't inherit these from the outer test runner + cmd.env_remove("NEXTEST"); + cmd.env_remove("TMT_TEST_DATA"); + cmd.output() + .unwrap_or_else(|e| panic!("failed to run fixture: {e}")) +} + +// ── --list ────────────────────────────────────────────────────────── + +#[test] +fn list_shows_all_tests() { + let out = run_fixture(&["--list"], &[("ITEST_SUBPROCESS", "1")]); + let stdout = String::from_utf8_lossy(&out.stdout); + + assert!(out.status.success(), "fixture --list failed: {stdout}"); + assert!(stdout.contains("passing_test: test")); + assert!(stdout.contains("failing_test: test")); + assert!(stdout.contains("async_test: test")); + assert!(stdout.contains("parameterized_test_alpha: test")); + assert!(stdout.contains("parameterized_test_beta: test")); +} + +// ── async tests ───────────────────────────────────────────────────── + +#[test] +fn async_test_runs_with_tokio_runtime() { + // The async_test fixture uses tokio::task::yield_now().await, + // proving it has a real tokio runtime. + let out = run_fixture(&["--exact", "async_test"], &[("ITEST_SUBPROCESS", "1")]); + let stdout = String::from_utf8_lossy(&out.stdout); + + assert!(out.status.success(), "async test should pass"); + assert!( + stdout.contains("FIXTURE_ASYNC"), + "async test should produce output, got:\n{stdout}" + ); +} + +// ── fork-exec capture ─────────────────────────────────────────────── + +#[test] +fn capture_hides_passing_test_output() { + // Run only the passing test, with capture active (no NEXTEST, no + // ITEST_SUBPROCESS). + let out = run_fixture(&["--exact", "passing_test"], &[]); + let stdout = String::from_utf8_lossy(&out.stdout); + let stderr = String::from_utf8_lossy(&out.stderr); + + assert!(out.status.success(), "passing test should succeed"); + // The fixture prints FIXTURE_STDOUT_PASS, but capture should hide it + assert!( + !stdout.contains("FIXTURE_STDOUT_PASS"), + "captured output should not appear in stdout for passing test, got:\n{stdout}" + ); + assert!( + !stderr.contains("FIXTURE_STDERR_PASS"), + "captured output should not appear in stderr for passing test, got:\n{stderr}" + ); +} + +#[test] +fn capture_shows_failing_test_output() { + // Run only the failing test with capture active + let out = run_fixture(&["--exact", "failing_test"], &[]); + + assert!(!out.status.success(), "failing test should fail"); + + // The failure output should include the captured stdout/stderr + let combined = format!( + "{}\n{}", + String::from_utf8_lossy(&out.stdout), + String::from_utf8_lossy(&out.stderr) + ); + assert!( + combined.contains("FIXTURE_STDOUT_FAIL"), + "failure output should contain captured stdout, got:\n{combined}" + ); + assert!( + combined.contains("FIXTURE_STDERR_FAIL"), + "failure output should contain captured stderr, got:\n{combined}" + ); +} + +#[test] +fn nocapture_passes_output_through() { + // With ITEST_NOCAPTURE=1, output should pass through directly + let out = run_fixture(&["--exact", "passing_test"], &[("ITEST_NOCAPTURE", "1")]); + let stdout = String::from_utf8_lossy(&out.stdout); + + assert!(out.status.success()); + assert!( + stdout.contains("FIXTURE_STDOUT_PASS"), + "with ITEST_NOCAPTURE, output should pass through, got:\n{stdout}" + ); +} + +#[test] +fn subprocess_env_runs_directly() { + // With ITEST_SUBPROCESS=1, the test runs directly (no fork-exec). + // Output should pass through since there's no capture layer. + let out = run_fixture( + &["--exact", "passing_test", "--nocapture"], + &[("ITEST_SUBPROCESS", "1")], + ); + let stdout = String::from_utf8_lossy(&out.stdout); + + assert!(out.status.success()); + assert!( + stdout.contains("FIXTURE_STDOUT_PASS"), + "in subprocess mode, output should pass through, got:\n{stdout}" + ); +} + +// ── --emit-tmt ────────────────────────────────────────────────────── + +#[test] +fn emit_tmt_generates_valid_fmf() { + let dir = tempfile::tempdir().unwrap(); + let dir_path = dir.path().to_str().unwrap(); + + let out = run_fixture(&["--emit-tmt", dir_path], &[("ITEST_SUBPROCESS", "1")]); + assert!( + out.status.success(), + "--emit-tmt failed: {}", + String::from_utf8_lossy(&out.stderr) + ); + + let fmf = std::fs::read_to_string(dir.path().join("tests.fmf")).unwrap(); + + // Header + assert!(fmf.contains("# THIS IS GENERATED CODE")); + + // Plain tests + assert!(fmf.contains("/passing_test:")); + assert!(fmf.contains(" test: itest-fixture --exact passing_test")); + assert!(fmf.contains("/failing_test:")); + assert!(fmf.contains(" test: itest-fixture --exact failing_test")); + + // Parameterised tests + assert!(fmf.contains("/parameterized_test_alpha:")); + assert!(fmf.contains(" summary: parameterized_test [alpha]")); + assert!(fmf.contains("/parameterized_test_beta:")); + + // All entries have duration + assert!(fmf.contains(" duration: 20m")); +} + +#[test] +fn emit_tmt_creates_directory() { + let parent = tempfile::tempdir().unwrap(); + let nested = parent.path().join("deep").join("nested"); + let dir_path = nested.to_str().unwrap(); + + let out = run_fixture(&["--emit-tmt", dir_path], &[("ITEST_SUBPROCESS", "1")]); + assert!(out.status.success()); + assert!(nested.join("tests.fmf").exists()); +} + +// ── --emit-autopkgtest ────────────────────────────────────────────── + +#[test] +fn emit_autopkgtest_generates_control() { + let dir = tempfile::tempdir().unwrap(); + let dir_path = dir.path().to_str().unwrap(); + + let out = run_fixture( + &["--emit-autopkgtest", dir_path], + &[("ITEST_SUBPROCESS", "1")], + ); + assert!( + out.status.success(), + "--emit-autopkgtest failed: {}", + String::from_utf8_lossy(&out.stderr) + ); + + let control = std::fs::read_to_string(dir.path().join("control")).unwrap(); + + // Header + assert!(control.contains("# THIS IS GENERATED CODE")); + + // Plain tests — each has a Test-Command stanza + assert!(control.contains("Test-Command: itest-fixture --exact passing_test")); + assert!(control.contains("Test-Command: itest-fixture --exact failing_test")); + + // Parameterised tests + assert!(control.contains("Test-Command: itest-fixture --exact parameterized_test_alpha")); + assert!(control.contains("Test-Command: itest-fixture --exact parameterized_test_beta")); + + // Features field present + assert!(control.contains("Features: test-name=passing_test")); + + // meta_test has rich metadata that should map to DEP-8 fields + assert!(control.contains("Test-Command: itest-fixture --exact meta_test")); + assert!( + control.contains("Restrictions: needs-root, needs-internet, flaky"), + "meta_test should have rich restrictions, got:\n{control}" + ); + assert!( + control.contains("Classes: slow, network"), + "meta_test should have Classes from tags, got:\n{control}" + ); +} + +#[test] +fn emit_tmt_includes_metadata() { + let dir = tempfile::tempdir().unwrap(); + let dir_path = dir.path().to_str().unwrap(); + + let out = run_fixture(&["--emit-tmt", dir_path], &[("ITEST_SUBPROCESS", "1")]); + assert!(out.status.success()); + + let fmf = std::fs::read_to_string(dir.path().join("tests.fmf")).unwrap(); + + // meta_test should have custom duration, tags, summary, and result + assert!( + fmf.contains(" duration: 1h"), + "meta_test should have 1h duration, got:\n{fmf}" + ); + assert!( + fmf.contains(" summary: A test with rich metadata"), + "meta_test should have custom summary, got:\n{fmf}" + ); + assert!( + fmf.contains(" tag: [slow, network]"), + "meta_test should have tags, got:\n{fmf}" + ); + assert!( + fmf.contains(" result: xfail"), + "meta_test should have xfail result, got:\n{fmf}" + ); +}