diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml
index 76e6a75dc..9cd42fc1c 100644
--- a/.github/workflows/test.yml
+++ b/.github/workflows/test.yml
@@ -47,12 +47,37 @@ jobs:
with:
version: ${{ steps.uv_release.outputs.uv_version }}
- name: 'Run unit tests'
- run: make test-unit
+ run: make test-unit PARALLEL=12
integration-tests:
needs: code-quality-checks
- name: 'Integration Tests'
+ name: 'Integration Tests ${{ matrix.name }}'
runs-on: [self-hosted, linux, normal]
+ timeout-minutes: ${{ matrix.timeout }}
+ strategy:
+ fail-fast: true
+ matrix:
+ include:
+ - name: 'LLVM Concrete Tests'
+ test-args: '-k "llvm or test_run_smir_random"'
+ parallel: 12
+ timeout: 30
+ - name: 'Haskell Exec SMIR'
+ test-args: '-k "test_exec_smir and haskell"'
+ parallel: 6
+ timeout: 20
+ - name: 'Haskell Termination'
+ test-args: '-k test_prove_termination'
+ parallel: 6
+ timeout: 20
+ - name: 'Haskell Proofs'
+ test-args: '-k "test_prove and not test_prove_termination"'
+ parallel: 6
+ timeout: 120
+ - name: 'Remainder'
+ test-args: '-k "not llvm and not test_run_smir_random and not test_exec_smir and not test_prove_termination and not test_prove"'
+ parallel: 6
+ timeout: 20
steps:
- name: 'Check out code'
uses: actions/checkout@v4
@@ -66,7 +91,9 @@ jobs:
- name: 'Build stable-mir-json and kmir'
run: docker exec --user github-user mir-semantics-ci-${GITHUB_SHA} make build
- name: 'Run integration tests'
- run: docker exec --user github-user mir-semantics-ci-${GITHUB_SHA} make test-integration
+ run: |
+ docker exec --user github-user mir-semantics-ci-${GITHUB_SHA} make test-integration \
+ TEST_ARGS='${{ matrix.test-args }}' PARALLEL=${{ matrix.parallel }}
- name: 'Tear down Docker'
if: always()
run: docker stop --time 0 mir-semantics-ci-${GITHUB_SHA}
diff --git a/kmir/src/kmir/__main__.py b/kmir/src/kmir/__main__.py
index b2ace6887..2ada43ffc 100644
--- a/kmir/src/kmir/__main__.py
+++ b/kmir/src/kmir/__main__.py
@@ -23,6 +23,7 @@
LinkOpts,
ProveOpts,
PruneOpts,
+ ReduceOpts,
RunOpts,
SectionEdgeOpts,
ShowOpts,
@@ -251,6 +252,17 @@ def _kmir_link(opts: LinkOpts) -> None:
result.dump(opts.output_file)
+def _kmir_reduce(opts: ReduceOpts) -> None:
+ smir_info = SMIRInfo.from_file(opts.smir_file)
+ original = len(smir_info.items)
+ reduced = smir_info.reduce_to(opts.roots)
+ reduced.dump(opts.output_file)
+ _LOGGER.info(
+ f'Reduced {original} -> {len(reduced.items)} items'
+ f' ({original - len(reduced.items)} pruned), written to {opts.output_file}'
+ )
+
+
def kmir(args: Sequence[str]) -> None:
ns = _arg_parser().parse_args(args)
opts = _parse_args(ns)
@@ -272,6 +284,8 @@ def kmir(args: Sequence[str]) -> None:
_kmir_prove(opts)
case LinkOpts():
_kmir_link(opts)
+ case ReduceOpts():
+ _kmir_reduce(opts)
case _:
raise AssertionError()
@@ -575,6 +589,27 @@ def _arg_parser() -> ArgumentParser:
default='linker_output.smir.json',
)
+ reduce_parser = command_parser.add_parser(
+ 'reduce',
+ help='Reduce SMIR to functions reachable from given roots',
+ parents=[kcli_args.logging_args],
+ )
+ reduce_parser.add_argument('smir_file', metavar='SMIR_JSON', help='SMIR JSON file to reduce')
+ reduce_parser.add_argument(
+ '--roots',
+ '-r',
+ required=True,
+ metavar='ROOTS',
+ help='Comma-separated root function names, or @file for newline-separated file',
+ )
+ reduce_parser.add_argument(
+ '--output-file',
+ '-o',
+ metavar='FILE',
+ help='Output file (default: reduced.smir.json)',
+ default='reduced.smir.json',
+ )
+
return parser
@@ -677,6 +712,12 @@ def _parse_args(ns: Namespace) -> KMirOpts:
smir_files=ns.smir_files,
output_file=ns.output_file,
)
+ case 'reduce':
+ return ReduceOpts(
+ smir_file=ns.smir_file,
+ roots=ns.roots,
+ output_file=ns.output_file,
+ )
case _:
raise AssertionError()
diff --git a/kmir/src/kmir/_prove.py b/kmir/src/kmir/_prove.py
index de729adf8..61d2a352d 100644
--- a/kmir/src/kmir/_prove.py
+++ b/kmir/src/kmir/_prove.py
@@ -82,7 +82,6 @@ def _prove(opts: ProveOpts, target_path: Path, label: str) -> APRProof:
if 'MonoItemFn' in item['mono_item_kind'] and item['mono_item_kind']['MonoItemFn'].get('body') is None
]
has_missing = len(missing_body_syms) > 0
- _LOGGER.info(f'Reduced items table size {len(smir_info.items)}')
if has_missing:
_LOGGER.info(f'missing-bodies-present={has_missing} count={len(missing_body_syms)}')
_LOGGER.debug(f'Missing-body function symbols (first 5): {missing_body_syms[:5]}')
diff --git a/kmir/src/kmir/kdist/mir-semantics/rt/data.md b/kmir/src/kmir/kdist/mir-semantics/rt/data.md
index f76620160..5839521b4 100644
--- a/kmir/src/kmir/kdist/mir-semantics/rt/data.md
+++ b/kmir/src/kmir/kdist/mir-semantics/rt/data.md
@@ -178,15 +178,11 @@ In contrast to regular write operations, the value does not have to be _mutable_
The `#setLocalValue` operation writes a `Value` value to a given `Place` within the `List` of local variables currently on top of the stack.
If we are setting a value at a `Place` which has `Projection`s in it, then we must first traverse the projections before setting the value.
-A variant `#forceSetLocal` is provided for setting the local value without checking the mutability of the location.
**Note on mutability:** The Rust compiler validates assignment legality and may reuse immutable locals in MIR (e.g., loop variables), so `#setLocalValue` does not guard on mutability.
-TODO: `#forceSetLocal` is now functionally identical to `#setLocalValue` and may be removed.
-
```k
syntax KItem ::= #setLocalValue( Place, Evaluation ) [strict(2)]
- | #forceSetLocal ( Local , Evaluation ) [strict(2)]
rule #setLocalValue(place(local(I), .ProjectionElems), VAL) => .K ...
@@ -216,11 +212,6 @@ TODO: `#forceSetLocal` is now functionally identical to `#setLocalValue` and may
andBool isTypedValue(LOCALS[I])
[preserves-definedness] // valid list indexing and sort checked
- rule #forceSetLocal(local(I), MBVAL:Value) => .K ...
- LOCALS => LOCALS[I <- typedValue(MBVAL, tyOfLocal(getLocal(LOCALS, I)), mutabilityOf(getLocal(LOCALS, I)))]
- requires 0 <=Int I andBool I #traverseProjection(toLocal(I), _ORIGINAL, .ProjectionElems, CONTEXTS)
~> #writeMoved
- => #forceSetLocal(local(I), #buildUpdate(Moved, CONTEXTS)) // TODO retain Ty and Mutability from _ORIGINAL
+ => #setLocalValue(place(local(I), .ProjectionElems), #buildUpdate(Moved, CONTEXTS)) // TODO retain Ty and Mutability from _ORIGINAL
...
[preserves-definedness] // valid context ensured upon context construction
@@ -1238,8 +1229,8 @@ This eliminates any `Deref` projections from the place, and also resolves `Index
// Borrowing a zero-sized local that is still `NewLocal`: initialise it, then reuse the regular rule.
rule rvalueRef(REGION, KIND, place(local(I), PROJS))
- => #forceSetLocal(
- local(I),
+ => #setLocalValue(
+ place(local(I), .ProjectionElems),
#decodeConstant(
constantKindZeroSized,
tyOfLocal(getLocal(LOCALS, I)),
@@ -1421,7 +1412,41 @@ Boolean values can also be cast to Integers (encoding `true` as `1`).
[preserves-definedness] // ensures #numTypeOf is defined
```
-Casts involving `Float` values are currently not implemented.
+Casts involving `Float` values: `IntToFloat`, `FloatToInt`, and `FloatToFloat`.
+
+```k
+ // IntToFloat: convert integer to float with the target float type's precision
+ rule #cast(Integer(VAL, _WIDTH, _SIGNEDNESS), castKindIntToFloat, _, TY)
+ => Float(
+ Int2Float(VAL,
+ #significandBits(#floatTypeOf(lookupTy(TY))),
+ #exponentBits(#floatTypeOf(lookupTy(TY)))),
+ #bitWidth(#floatTypeOf(lookupTy(TY)))
+ )
+ ...
+
+ [preserves-definedness]
+
+ // FloatToInt: truncate float towards zero and convert to integer
+ rule #cast(Float(VAL, _WIDTH), castKindFloatToInt, _, TY)
+ => #intAsType(Float2Int(VAL), 128, #intTypeOf(lookupTy(TY)))
+ ...
+
+ requires #isIntType(lookupTy(TY))
+ [preserves-definedness]
+
+ // FloatToFloat: round float to the target float type's precision
+ rule #cast(Float(VAL, _WIDTH), castKindFloatToFloat, _, TY)
+ => Float(
+ roundFloat(VAL,
+ #significandBits(#floatTypeOf(lookupTy(TY))),
+ #exponentBits(#floatTypeOf(lookupTy(TY)))),
+ #bitWidth(#floatTypeOf(lookupTy(TY)))
+ )
+ ...
+
+ [preserves-definedness]
+```
### Casts between pointer types
@@ -1606,6 +1631,30 @@ The first cast is reified as a `thunk`, the second one resolves it and eliminate
andBool lookupTy(TY_DEST_INNER) ==K lookupTy(TY_SRC_OUTER) // and is well-formed (invariant)
```
+Transmuting a value `T` into a single-field wrapper struct `G` (or vice versa) is sound when the struct
+has its field at zero offset and `transmute` compiled (guaranteeing equal sizes).
+These are essentially `#[repr(transparent)]` but are `#[repr(rust)]` by default without the annotation and
+thus there are no compiler optimisations to remove the transmute (there would be otherwise for downcast).
+The layout is the same for the wrapped type and so the cast in either direction is sound.
+
+```k
+ // Up: T -> Wrapper(T)
+ rule #cast(VAL:Value, castKindTransmute, TY_SOURCE, TY_TARGET)
+ =>
+ Aggregate(variantIdx(0), ListItem(VAL))
+ ...
+
+ requires #transparentFieldTy(lookupTy(TY_TARGET)) ==K TY_SOURCE
+
+ // Down: Wrapper(T) -> T
+ rule #cast(Aggregate(variantIdx(0), ListItem(VAL)), castKindTransmute, TY_SOURCE, TY_TARGET)
+ =>
+ VAL
+ ...
+
+ requires {#transparentFieldTy(lookupTy(TY_SOURCE))}:>Ty ==K TY_TARGET
+```
+
Casting a byte array/slice to an integer reinterprets the bytes in little-endian order.
```k
@@ -1872,6 +1921,9 @@ Zero-sized types can be decoded trivially into their respective representation.
// zero-sized array
rule #decodeConstant(constantKindZeroSized, _TY, typeInfoArrayType(_, _))
=> Range(.List) ...
+ // zero-sized function item (e.g., closures without captures)
+ rule #decodeConstant(constantKindZeroSized, _TY, typeInfoFunType(_))
+ => Aggregate(variantIdx(0), .List) ...
```
Allocated constants of reference type with a single provenance map entry are decoded as references
@@ -1991,6 +2043,20 @@ are correct.
rule onInt(binOpRem, X, Y) => X %Int Y requires Y =/=Int 0 [preserves-definedness]
// operation undefined otherwise
+ // performs the given operation on IEEE 754 floats
+ // Note: Rust's float % is truncating remainder: x - trunc(x/y) * y
+ // This differs from K's %Float which is IEEE 754 remainder (round to nearest).
+ syntax Float ::= onFloat( BinOp, Float, Float ) [function]
+ // -------------------------------------------------------
+ rule onFloat(binOpAdd, X, Y) => X +Float Y [preserves-definedness]
+ rule onFloat(binOpAddUnchecked, X, Y) => X +Float Y [preserves-definedness]
+ rule onFloat(binOpSub, X, Y) => X -Float Y [preserves-definedness]
+ rule onFloat(binOpSubUnchecked, X, Y) => X -Float Y [preserves-definedness]
+ rule onFloat(binOpMul, X, Y) => X *Float Y [preserves-definedness]
+ rule onFloat(binOpMulUnchecked, X, Y) => X *Float Y [preserves-definedness]
+ rule onFloat(binOpDiv, X, Y) => X /Float Y [preserves-definedness]
+ rule onFloat(binOpRem, X, Y) => X -Float (Y *Float truncFloat(X /Float Y)) [preserves-definedness]
+
// error cases for isArithmetic(BOP):
// * arguments must be Numbers
@@ -2059,6 +2125,18 @@ are correct.
// infinite precision result must equal truncated result
andBool truncate(onInt(BOP, ARG1, ARG2), WIDTH, Unsigned) ==Int onInt(BOP, ARG1, ARG2)
[preserves-definedness]
+
+ // Float arithmetic: Rust never emits CheckedBinaryOp for floats (only BinaryOp),
+ // so the checked flag is always false here. See rustc_const_eval/src/interpret/operator.rs:
+ // binary_float_op returns a plain value, not a (value, overflow) pair.
+ rule #applyBinOp(
+ BOP,
+ Float(ARG1, WIDTH),
+ Float(ARG2, WIDTH),
+ false)
+ => Float(onFloat(BOP, ARG1, ARG2), WIDTH)
+ requires isArithmetic(BOP)
+ [preserves-definedness]
```
#### Comparison operations
@@ -2095,6 +2173,14 @@ The argument types must be the same for all comparison operations, however this
rule cmpOpBool(binOpGe, X, Y) => cmpOpBool(binOpLe, Y, X)
rule cmpOpBool(binOpGt, X, Y) => cmpOpBool(binOpLt, Y, X)
+ syntax Bool ::= cmpOpFloat ( BinOp, Float, Float ) [function]
+ rule cmpOpFloat(binOpEq, X, Y) => X ==Float Y
+ rule cmpOpFloat(binOpLt, X, Y) => X X <=Float Y
+ rule cmpOpFloat(binOpNe, X, Y) => X =/=Float Y
+ rule cmpOpFloat(binOpGe, X, Y) => X >=Float Y
+ rule cmpOpFloat(binOpGt, X, Y) => X >Float Y
+
// error cases for isComparison and binOpCmp:
// * arguments must be numbers or Bool
@@ -2122,9 +2208,19 @@ The argument types must be the same for all comparison operations, however this
BoolVal(cmpOpBool(OP, VAL1, VAL2))
requires isComparison(OP)
[priority(60), preserves-definedness] // OP known to be a comparison
+
+ rule #applyBinOp(OP, Float(VAL1, WIDTH), Float(VAL2, WIDTH), _)
+ =>
+ BoolVal(cmpOpFloat(OP, VAL1, VAL2))
+ requires isComparison(OP)
+ [preserves-definedness] // OP known to be a comparison
```
-The `binOpCmp` operation returns `-1`, `0`, or `+1` (the behaviour of Rust's `std::cmp::Ordering as i8`), indicating `LE`, `EQ`, or `GT`.
+Types that are equivlance relations can implement [Eq](https://doc.rust-lang.org/std/cmp/trait.Eq.html),
+and then they may implement [Ord](https://doc.rust-lang.org/std/cmp/trait.Ord.html) for a total ordering.
+For types that implement `Ord` the `cmp` method must be implemented which can compare any two elements respective to their total ordering.
+Here we provide the `binOpCmp` for `Bool` and `Int` operation which returns `-1`, `0`, or `+1` (the behaviour of Rust's `std::cmp::Ordering as i8`),
+indicating `LE`, `EQ`, or `GT`.
```k
syntax Int ::= cmpInt ( Int , Int ) [function , total]
@@ -2159,7 +2255,11 @@ The semantics of the operation in this case is to wrap around (with the given bi
...
- // TODO add rule for Floats once they are supported.
+ rule #applyUnOp(unOpNeg, Float(VAL, WIDTH))
+ =>
+ Float(--Float VAL, WIDTH)
+ ...
+
```
The `unOpNot` operation works on boolean and integral values, with the usual semantics for booleans and a bitwise semantics for integral values (overflows cannot occur).
diff --git a/kmir/src/kmir/kdist/mir-semantics/rt/decoding.md b/kmir/src/kmir/kdist/mir-semantics/rt/decoding.md
index 98b522012..f7d87552d 100644
--- a/kmir/src/kmir/kdist/mir-semantics/rt/decoding.md
+++ b/kmir/src/kmir/kdist/mir-semantics/rt/decoding.md
@@ -52,10 +52,13 @@ and arrays (where layout is trivial).
requires #isIntType(TYPEINFO) andBool lengthBytes(BYTES) ==Int #elemSize(TYPEINFO)
[preserves-definedness]
+ // Float: handled in separate module for numeric operations
+ rule #decodeValue(BYTES, TYPEINFO) => #decodeFloat(BYTES, #floatTypeOf(TYPEINFO))
+ requires #isFloatType(TYPEINFO) andBool lengthBytes(BYTES) ==Int #elemSize(TYPEINFO)
+ [preserves-definedness]
+
// TODO Char type
// rule #decodeConstant(constantKindAllocated(allocation(BYTES, _, _, _)), typeInfoPrimitiveType(primTypeChar)) => typedValue(Str(...), TY, mutabilityNot)
-
- // TODO Float decoding: not supported natively in K
```
diff --git a/kmir/src/kmir/kdist/mir-semantics/rt/numbers.md b/kmir/src/kmir/kdist/mir-semantics/rt/numbers.md
index d34c62b95..1f3f2688d 100644
--- a/kmir/src/kmir/kdist/mir-semantics/rt/numbers.md
+++ b/kmir/src/kmir/kdist/mir-semantics/rt/numbers.md
@@ -5,6 +5,7 @@ The code in this file implements functionality for `Integer` and `Float` values
```k
requires "./value.md"
requires "../ty.md"
+requires "rat.md"
module RT-NUMBERS
imports TYPES
@@ -13,6 +14,8 @@ module RT-NUMBERS
imports BOOL
imports BYTES
imports INT
+ imports FLOAT
+ imports RAT
```
## Helpers and Constants for Integer Operations
@@ -38,6 +41,15 @@ module RT-NUMBERS
rule #isIntType(typeInfoPrimitiveType(primTypeInt(_))) => true
rule #isIntType(typeInfoPrimitiveType(primTypeUint(_))) => true
rule #isIntType(_) => false [owise]
+
+ syntax Bool ::= #isFloatType ( TypeInfo ) [function, total]
+ // --------------------------------------------------------
+ rule #isFloatType(typeInfoPrimitiveType(primTypeFloat(_))) => true
+ rule #isFloatType(_) => false [owise]
+
+ syntax FloatTy ::= #floatTypeOf ( TypeInfo ) [function]
+ // ----------------------------------------------------
+ rule #floatTypeOf(typeInfoPrimitiveType(primTypeFloat(FLOATTY))) => FLOATTY
```
Constants used for overflow-checking and truncation are defined here as macros.
@@ -110,6 +122,197 @@ This truncation function is instrumental in the implementation of Integer arithm
[preserves-definedness]
```
+## Helpers and Constants for Float Operations
+
+Rust supports four fixed-width IEEE 754 float types: `f16`, `f32`, `f64`, and `f128`.
+The helpers below extract format parameters for each type. First, an overview of the format.
+
+### IEEE 754 Binary Format
+
+An IEEE 754 binary floating-point word has three fields stored left-to-right:
+
+```
+ MSB LSB
+ +---------+----------------+----------------------+
+ | sign | exponent | fraction |
+ | (1 bit) | (EB bits) | (SB - 1 bits) |
+ +---------+----------------+----------------------+
+ total bits = 1 + EB + (SB - 1)
+```
+
+The **significand** (also called **precision**) is the total number of significant bits
+in the represented value, including an implicit leading 1 that is not stored in the
+fraction field. For a normal number, the mathematical value is:
+
+ value = (-1)^sign * 2^(exponent - bias) * 1.fraction
+
+The "1." prefix is the implicit bit, so the significand has `SB` bits of precision
+even though only `SB - 1` fraction bits are stored. For example, f64 stores 52 fraction
+bits but has 53 bits of significand precision.
+
+K's built-in `FLOAT` module uses this convention: `Int2Float(x, precision, exponentBits)`
+takes `precision = SB` (total significand bits including the implicit 1) and `exponentBits = EB`.
+See [IEEE 754 on Wikipedia](https://en.wikipedia.org/wiki/IEEE_754) for full details.
+
+The exponent is stored as an unsigned integer in
+[excess-M encoding](https://en.wikipedia.org/wiki/Offset_binary) with `bias = 2^(EB-1) - 1`,
+so that the actual exponent is `stored - bias`. For f64, bias = 1023: a stored value of 1023
+means exponent 0, 1024 means +1, and 1 means -1022. Stored values 0 and `2^EB - 1` are
+reserved for zero/subnormals and infinity/NaN respectively.
+
+| Type | Total bits | Sign | Exponent (EB) | Fraction (SB-1) | Significand (SB) | Bias |
+|------|------------|------|---------------|-----------------|------------------|------------|
+| f16 | 16 | 1 | 5 | 10 | 11 | 15 |
+| f32 | 32 | 1 | 8 | 23 | 24 | 127 |
+| f64 | 64 | 1 | 11 | 52 | 53 | 1023 |
+| f128 | 128 | 1 | 15 | 112 | 113 | 16383 |
+
+```k
+ syntax Int ::= #significandBits ( FloatTy ) [function, total]
+ // ----------------------------------------------------------
+ rule #significandBits(floatTyF16) => 11
+ rule #significandBits(floatTyF32) => 24
+ rule #significandBits(floatTyF64) => 53
+ rule #significandBits(floatTyF128) => 113
+
+ syntax Int ::= #exponentBits ( FloatTy ) [function, total]
+ // -------------------------------------------------------
+ rule #exponentBits(floatTyF16) => 5
+ rule #exponentBits(floatTyF32) => 8
+ rule #exponentBits(floatTyF64) => 11
+ rule #exponentBits(floatTyF128) => 15
+
+ syntax Int ::= #bias ( FloatTy ) [function, total]
+ // -----------------------------------------------
+ rule #bias(FLOATTY) => (1 <x` suffix to specify precision and exponent
+bits. See IEEE 754 Binary Format above for values of SB and EB.
+
+For example, `Infinityp53x11` is f64 positive infinity, `NaNp24x8` is f32 NaN.
+
+```k
+ syntax Float ::= #posInfFloat ( FloatTy ) [function, total]
+ // --------------------------------------------------------
+ rule #posInfFloat(floatTyF16) => Infinityp11x5
+ rule #posInfFloat(floatTyF32) => Infinityp24x8
+ rule #posInfFloat(floatTyF64) => Infinityp53x11
+ rule #posInfFloat(floatTyF128) => Infinityp113x15
+
+ syntax Float ::= #nanFloat ( FloatTy ) [function, total]
+ // -----------------------------------------------------
+ rule #nanFloat(floatTyF16) => NaNp11x5
+ rule #nanFloat(floatTyF32) => NaNp24x8
+ rule #nanFloat(floatTyF64) => NaNp53x11
+ rule #nanFloat(floatTyF128) => NaNp113x15
+```
+
+## Decoding Float values from `Bytes` for `OperandConstant`
+
+The `#decodeFloat` function reconstructs a `Float` value from its IEEE 754 byte representation.
+The bytes are first converted to a raw integer, then the sign, biased exponent, and stored significand
+are extracted. The value is reconstructed using K's `Int2Float` and float arithmetic, with a
+high-precision intermediate to avoid overflow when reconstructing subnormals and small normal values.
+
+```k
+ syntax Value ::= #decodeFloat ( Bytes, FloatTy ) [function]
+ // --------------------------------------------------------
+ rule #decodeFloat(BYTES, FLOATTY) => #decodeFloatRaw(Bytes2Int(BYTES, LE, Unsigned), FLOATTY)
+ requires lengthBytes(BYTES) ==Int #bitWidth(FLOATTY) /Int 8
+ [preserves-definedness]
+
+ syntax Value ::= #decodeFloatRaw ( Int, FloatTy ) [function, total]
+ // ----------------------------------------------------------------
+ rule #decodeFloatRaw(RAW, FLOATTY)
+ => #decodeFloatParts(
+ RAW >>Int (#significandBits(FLOATTY) +Int #exponentBits(FLOATTY) -Int 1),
+ (RAW >>Int (#significandBits(FLOATTY) -Int 1)) &Int ((1 < Float(#applyFloatSign(Int2Float(0, #significandBits(FLOATTY), #exponentBits(FLOATTY)), SIGN), #bitWidth(FLOATTY))
+
+ // Subnormal: no implicit leading 1, exponent is 1 - bias
+ rule #decodeFloatParts(SIGN, 0, SIG, FLOATTY)
+ => Float(
+ #applyFloatSign(
+ #reconstructFloat(SIG, 2 -Int #bias(FLOATTY) -Int #significandBits(FLOATTY), FLOATTY),
+ SIGN
+ ),
+ #bitWidth(FLOATTY)
+ )
+ requires SIG =/=Int 0
+
+ // Normal: implicit leading 1 in significand
+ rule #decodeFloatParts(SIGN, EXP, SIG, FLOATTY)
+ => Float(
+ #applyFloatSign(
+ #reconstructFloat(
+ SIG |Int (1 <Int 0 andBool EXP Float(#applyFloatSign(#posInfFloat(FLOATTY), SIGN), #bitWidth(FLOATTY))
+ requires EXP ==Int ((1 < Float(#nanFloat(FLOATTY), #bitWidth(FLOATTY))
+ requires EXP ==Int ((1 < StringVal( "ERRORFailedToDecodeFloat" ) [owise]
+
+```
+
+Reconstruct a float from its integer significand and adjusted exponent.
+For positive exponents, shift the significand left and convert directly.
+For negative exponents, use `Rat2Float` to convert the exact rational
+`SIG / 2^|AEXP|` to the target float precision.
+
+```k
+ syntax Float ::= #reconstructFloat ( sig: Int, adjExp: Int, FloatTy ) [function]
+ // -------------------------------------------------------------------------------
+ rule #reconstructFloat(SIG, AEXP, FLOATTY)
+ => Int2Float(SIG <=Int 0
+ [preserves-definedness]
+
+ rule #reconstructFloat(SIG, AEXP, FLOATTY)
+ => Rat2Float(SIG /Rat (1 < F
+ rule #applyFloatSign(F, 1) => --Float F
+ rule #applyFloatSign(F, _) => F [owise]
+```
+
## Type Casts Between Different Numeric Types
diff --git a/kmir/src/kmir/kdist/mir-semantics/rt/types.md b/kmir/src/kmir/kdist/mir-semantics/rt/types.md
index 3998cb3eb..aaf854c13 100644
--- a/kmir/src/kmir/kdist/mir-semantics/rt/types.md
+++ b/kmir/src/kmir/kdist/mir-semantics/rt/types.md
@@ -76,29 +76,18 @@ It also implements cancellation of inverse projections (such as casting from one
The `#pointeeProjection` function computes, for compatible pointee types, how to project from one pointee to the other.
+It uses a **source-first strategy**: always unwrap the source type (struct wrapper or array) before
+attempting to unwrap the target type. This eliminates non-deterministic overlap between source-side
+and target-side rules, because a type cannot be both a struct and an array simultaneously.
+When the source cannot be unwrapped further, target-side unwrapping is handled by `#pointeeProjectionTarget`.
+
```k
syntax MaybeProjectionElems ::= #pointeeProjection ( TypeInfo , TypeInfo ) [function, total]
```
A short-cut rule for identical types takes preference.
-As a default, no projection elements are returned for incompatible types.
```k
rule #pointeeProjection(T , T) => .ProjectionElems [priority(40)]
- rule #pointeeProjection(_ , _) => NoProjectionElems [owise]
-```
-
-Pointers to arrays/slices are compatible with pointers to the element type
-```k
- rule #pointeeProjection(typeInfoArrayType(TY1, _), TY2)
- => maybeConcatProj(
- projectionElemConstantIndex(0, 0, false),
- #pointeeProjection(lookupTy(TY1), TY2)
- )
- rule #pointeeProjection(TY1, typeInfoArrayType(TY2, _))
- => maybeConcatProj(
- projectionElemSingletonArray,
- #pointeeProjection(TY1, lookupTy(TY2))
- )
```
Pointers to zero-sized types can be converted from and to. No recursion beyond the ZST.
@@ -112,9 +101,11 @@ Pointers to zero-sized types can be converted from and to. No recursion beyond t
[priority(45)]
```
-Pointers to structs with a single zero-offset field are compatible with pointers to that field's type
-```k
+Source-side: unwrap structs and arrays from the source type first.
+When source is an array and target is a transparent wrapper whose inner type equals the source,
+the source should be wrapped rather than unwrapped (e.g., `*const [u8;2] → *const Wrapper([u8;2])`).
+```k
rule #pointeeProjection(typeInfoStructType(_, _, FIELD .Tys, LAYOUT), OTHER)
=> maybeConcatProj(
projectionElemField(fieldIdx(0), FIELD),
@@ -122,12 +113,21 @@ Pointers to structs with a single zero-offset field are compatible with pointers
)
requires #zeroFieldOffset(LAYOUT)
- rule #pointeeProjection(OTHER, typeInfoStructType(_, _, FIELD .Tys, LAYOUT))
+ rule #pointeeProjection(SRC:TypeInfo, typeInfoStructType(_NAME, _ADTDEF, FIELD .Tys, LAYOUT))
=> maybeConcatProj(
projectionElemWrapStruct,
- #pointeeProjection(OTHER, lookupTy(FIELD))
+ #pointeeProjection(SRC, lookupTy(FIELD))
+ )
+ requires #isArrayType(SRC)
+ andBool #zeroFieldOffset(LAYOUT)
+ andBool lookupTy(FIELD) ==K SRC
+ [priority(42)]
+
+ rule #pointeeProjection(typeInfoArrayType(TY1, _), TY2)
+ => maybeConcatProj(
+ projectionElemConstantIndex(0, 0, false),
+ #pointeeProjection(lookupTy(TY1), TY2)
)
- requires #zeroFieldOffset(LAYOUT)
```
Pointers to `MaybeUninit` can be cast to pointers to `X`.
@@ -148,6 +148,34 @@ which is a singleton struct (see above).
andBool #lookupMaybeTy(getFieldTy(#lookupMaybeTy(getFieldTy(MAYBEUNINIT_TYINFO, 1)), 0)) ==K ELEM_TYINFO
```
+Fallback: source is not unwrappable, delegate to target-side.
+```k
+ rule #pointeeProjection(SRC, TGT) => #pointeeProjectionTarget(SRC, TGT) [owise]
+```
+
+Target-side fallback: only reached when source cannot be unwrapped further.
+After one step of target unwrapping, recurse back to `#pointeeProjection` to maintain
+the source-first strategy.
+
+```k
+ syntax MaybeProjectionElems ::= #pointeeProjectionTarget ( TypeInfo , TypeInfo ) [function, total]
+
+ rule #pointeeProjectionTarget(TY1, typeInfoArrayType(TY2, _))
+ => maybeConcatProj(
+ projectionElemSingletonArray,
+ #pointeeProjection(TY1, lookupTy(TY2))
+ )
+
+ rule #pointeeProjectionTarget(OTHER, typeInfoStructType(_, _, FIELD .Tys, LAYOUT))
+ => maybeConcatProj(
+ projectionElemWrapStruct,
+ #pointeeProjection(OTHER, lookupTy(FIELD))
+ )
+ requires #zeroFieldOffset(LAYOUT)
+
+ rule #pointeeProjectionTarget(_, _) => NoProjectionElems [owise]
+```
+
```k
syntax Bool ::= #zeroFieldOffset ( MaybeLayoutShape ) [function, total]
// --------------------------------------------------------------------
@@ -320,8 +348,9 @@ Slices, `str`s and dynamic types require it, and any `Ty` that `is_sized` does
rule #zeroSizedType(typeInfoTupleType(.Tys, _)) => true
rule #zeroSizedType(typeInfoStructType(_, _, .Tys, _)) => true
rule #zeroSizedType(typeInfoVoidType) => true
- // FIXME: Only unit tuples, empty structs, and void are recognized here; other
- // zero-sized types (e.g. single-variant enums, function or closure items,
+ rule #zeroSizedType(typeInfoFunType(_)) => true
+ // FIXME: Only unit tuples, empty structs, void, and function items are
+ // recognized here; other zero-sized types (e.g. single-variant enums,
// newtype wrappers around ZSTs) still fall through because we do not consult
// the layout metadata yet. Update once we rely on machineSize(0).
rule #zeroSizedType(_) => false [owise]
diff --git a/kmir/src/kmir/kdist/mir-semantics/symbolic/spl-token.md b/kmir/src/kmir/kdist/mir-semantics/symbolic/spl-token.md
index 3d61da075..7432a9dfe 100644
--- a/kmir/src/kmir/kdist/mir-semantics/symbolic/spl-token.md
+++ b/kmir/src/kmir/kdist/mir-semantics/symbolic/spl-token.md
@@ -63,7 +63,7 @@ module KMIR-SPL-TOKEN
rule #traverseProjection(toLocal(I), _ORIGINAL, .ProjectionElems, CONTEXTS)
~> #writeProjectionForce(NEW)
- => #forceSetLocal(local(I), #buildUpdate(NEW, CONTEXTS))
+ => #setLocalValue(place(local(I), .ProjectionElems), #buildUpdate(NEW, CONTEXTS))
...
LOCALS
diff --git a/kmir/src/kmir/options.py b/kmir/src/kmir/options.py
index 2a65d20dc..fde30a6c1 100644
--- a/kmir/src/kmir/options.py
+++ b/kmir/src/kmir/options.py
@@ -323,3 +323,20 @@ class LinkOpts(KMirOpts):
def __init__(self, smir_files: list[str], output_file: str | None = None) -> None:
self.smir_files = [Path(f) for f in smir_files]
self.output_file = Path(output_file) if output_file is not None else Path('linker_output.smir.json')
+
+
+@dataclass
+class ReduceOpts(KMirOpts):
+ smir_file: Path
+ output_file: Path
+ roots: list[str]
+
+ def __init__(self, smir_file: str, roots: str, output_file: str | None = None) -> None:
+ self.smir_file = Path(smir_file)
+ self.output_file = Path(output_file) if output_file is not None else Path('reduced.smir.json')
+ # Support @file syntax for reading roots from a file
+ if roots.startswith('@'):
+ roots_file = Path(roots[1:])
+ self.roots = list(filter(None, [r.strip() for r in roots_file.read_text().splitlines()]))
+ else:
+ self.roots = [r.strip() for r in roots.split(',') if r.strip()]
diff --git a/kmir/src/kmir/smir.py b/kmir/src/kmir/smir.py
index f609a9fe8..1ce7b897b 100644
--- a/kmir/src/kmir/smir.py
+++ b/kmir/src/kmir/smir.py
@@ -10,6 +10,7 @@
from .ty import EnumT, RefT, StructT, Ty, TypeMetadata, UnionT
if TYPE_CHECKING:
+ from collections.abc import Sequence
from pathlib import Path
from typing import Final
@@ -180,13 +181,19 @@ def spans(self) -> dict[int, tuple[Path, int, int, int, int]]:
def _is_func(item: dict[str, dict]) -> bool:
return 'MonoItemFn' in item['mono_item_kind']
- def reduce_to(self, start_name: str) -> SMIRInfo:
- # returns a new SMIRInfo with all _items_ removed that are not reachable from the named function
- start_ty = self.function_tys[start_name]
+ def reduce_to(self, start_symbols: str | Sequence[str]) -> SMIRInfo:
+ # returns a new SMIRInfo with all _items_ removed that are not reachable from the named function(s)
+ match start_symbols:
+ case str(symbol):
+ start_tys = [Ty(self.function_tys[symbol])]
+ case [*symbols] if symbols and all(isinstance(sym, str) for sym in symbols):
+ start_tys = [Ty(self.function_tys[sym]) for sym in symbols]
+ case _:
+ raise ValueError('SMIRInfo.reduce_to() received an invalid start_symbol')
- _LOGGER.debug(f'Reducing items, starting at {start_ty}. Call Edges {self.call_edges}')
+ _LOGGER.debug(f'Reducing items, starting at {start_tys}. Call Edges {self.call_edges}')
- reachable = compute_closure(Ty(start_ty), self.call_edges)
+ reachable = compute_closure(start_tys, self.call_edges)
_LOGGER.debug(f'Reducing to reachable Tys {reachable}')
@@ -249,8 +256,8 @@ def call_edges(self) -> dict[Ty, set[Ty]]:
return result
-def compute_closure(start: Ty, edges: dict[Ty, set[Ty]]) -> set[Ty]:
- work = deque([start])
+def compute_closure(start_nodes: Sequence[Ty], edges: dict[Ty, set[Ty]]) -> set[Ty]:
+ work = deque(start_nodes)
reached = set()
finished = False
while not finished:
diff --git a/kmir/src/tests/integration/data/exec-smir/structs-tuples/struct_field_update.state b/kmir/src/tests/integration/data/exec-smir/structs-tuples/struct_field_update.state
index 57665d4db..bed580f75 100644
--- a/kmir/src/tests/integration/data/exec-smir/structs-tuples/struct_field_update.state
+++ b/kmir/src/tests/integration/data/exec-smir/structs-tuples/struct_field_update.state
@@ -28,7 +28,7 @@
ListItem ( newLocal ( ty ( 1 ) , mutabilityMut ) )
ListItem ( typedValue ( Aggregate ( variantIdx ( 0 ) , ListItem ( Integer ( 1 , 32 , true ) )
ListItem ( BoolVal ( true ) )
- ListItem ( thunk ( UnableToDecode ( b"33333sE@" , typeInfoPrimitiveType ( primTypeFloat ( floatTyF64 ) ) ) ) )
+ ListItem ( Float ( 0.42899999999999999e2 , 64 ) )
ListItem ( Aggregate ( variantIdx ( 0 ) , ListItem ( Integer ( 1 , 32 , true ) )
ListItem ( Integer ( 10 , 32 , true ) ) ) ) ) , ty ( 28 ) , mutabilityMut ) )
ListItem ( typedValue ( Moved , ty ( 27 ) , mutabilityMut ) )
diff --git a/kmir/src/tests/integration/data/exec-smir/structs-tuples/structs-tuples.state b/kmir/src/tests/integration/data/exec-smir/structs-tuples/structs-tuples.state
index 47b68a635..e6dc9aec9 100644
--- a/kmir/src/tests/integration/data/exec-smir/structs-tuples/structs-tuples.state
+++ b/kmir/src/tests/integration/data/exec-smir/structs-tuples/structs-tuples.state
@@ -1,6 +1,6 @@
- #execStmts ( .Statements ) ~> #execTerminator ( terminator (... kind: terminatorKindReturn , span: span ( 73 ) ) ) ~> .K
+ #execTerminator ( terminator (... kind: terminatorKindReturn , span: span ( 73 ) ) ) ~> .K
noReturn
@@ -28,17 +28,17 @@
ListItem ( newLocal ( ty ( 1 ) , mutabilityMut ) )
ListItem ( typedValue ( Integer ( 10 , 32 , true ) , ty ( 16 ) , mutabilityNot ) )
ListItem ( typedValue ( BoolVal ( false ) , ty ( 26 ) , mutabilityNot ) )
- ListItem ( typedValue ( thunk ( UnableToDecode ( b"\x00\x00\x00\x00\x00\x00$@" , typeInfoPrimitiveType ( primTypeFloat ( floatTyF64 ) ) ) ) , ty ( 27 ) , mutabilityNot ) )
+ ListItem ( typedValue ( Float ( 0.10000000000000000e2 , 64 ) , ty ( 27 ) , mutabilityNot ) )
ListItem ( StackFrame ( ty ( -1 ) , place (... local: local ( 0 ) , projection: .ProjectionElems ) , noBasicBlockIdx , unwindActionContinue , ListItem ( newLocal ( ty ( 1 ) , mutabilityMut ) )
ListItem ( typedValue ( Aggregate ( variantIdx ( 0 ) , ListItem ( Integer ( 10 , 32 , true ) )
ListItem ( BoolVal ( false ) )
- ListItem ( thunk ( UnableToDecode ( b"\x00\x00\x00\x00\x00\x00$@" , typeInfoPrimitiveType ( primTypeFloat ( floatTyF64 ) ) ) ) ) ) , ty ( 28 ) , mutabilityNot ) )
+ ListItem ( Float ( 0.10000000000000000e2 , 64 ) ) ) , ty ( 28 ) , mutabilityNot ) )
ListItem ( typedValue ( Aggregate ( variantIdx ( 0 ) , ListItem ( Integer ( 11 , 32 , true ) )
ListItem ( BoolVal ( true ) )
- ListItem ( thunk ( UnableToDecode ( b"\x00\x00\x00\x00\x00\x00$@" , typeInfoPrimitiveType ( primTypeFloat ( floatTyF64 ) ) ) ) ) ) , ty ( 29 ) , mutabilityNot ) )
+ ListItem ( Float ( 0.10000000000000000e2 , 64 ) ) ) , ty ( 29 ) , mutabilityNot ) )
ListItem ( typedValue ( Moved , ty ( 27 ) , mutabilityMut ) )
ListItem ( newLocal ( ty ( 1 ) , mutabilityNot ) )
ListItem ( typedValue ( Moved , ty ( 16 ) , mutabilityMut ) )
diff --git a/kmir/src/tests/integration/data/prove-rs/box_heap_alloc-fail.rs b/kmir/src/tests/integration/data/prove-rs/box_heap_alloc-fail.rs
new file mode 100644
index 000000000..10988cbe7
--- /dev/null
+++ b/kmir/src/tests/integration/data/prove-rs/box_heap_alloc-fail.rs
@@ -0,0 +1,4 @@
+fn main() {
+ let a = Box::new(42i32);
+ assert!(*a == 42);
+}
diff --git a/kmir/src/tests/integration/data/prove-rs/float_arith.rs b/kmir/src/tests/integration/data/prove-rs/float_arith.rs
new file mode 100644
index 000000000..a8edf36d5
--- /dev/null
+++ b/kmir/src/tests/integration/data/prove-rs/float_arith.rs
@@ -0,0 +1,47 @@
+#![feature(f16)]
+#![feature(f128)]
+
+fn main() {
+ // f16
+ assert!(1.5_f16 + 2.5_f16 == 4.0_f16);
+ assert!(5.0_f16 - 1.5_f16 == 3.5_f16);
+ assert!(2.0_f16 * 3.0_f16 == 6.0_f16);
+ assert!(7.0_f16 / 2.0_f16 == 3.5_f16);
+
+ // f32
+ assert!(1.5_f32 + 2.5_f32 == 4.0_f32);
+ assert!(5.0_f32 - 1.5_f32 == 3.5_f32);
+ assert!(2.0_f32 * 3.0_f32 == 6.0_f32);
+ assert!(7.0_f32 / 2.0_f32 == 3.5_f32);
+
+ // f64
+ assert!(3.5_f64 + 1.5_f64 == 5.0_f64);
+ assert!(3.5_f64 - 1.5_f64 == 2.0_f64);
+ assert!(3.5_f64 * 1.5_f64 == 5.25_f64);
+ assert!(10.0_f64 / 2.0_f64 == 5.0_f64);
+
+ // f128
+ assert!(1.5_f128 + 2.5_f128 == 4.0_f128);
+ assert!(5.0_f128 - 1.5_f128 == 3.5_f128);
+ assert!(2.0_f128 * 3.0_f128 == 6.0_f128);
+ assert!(7.0_f128 / 2.0_f128 == 3.5_f128);
+
+ // Modulo (truncating)
+ assert!(7.0_f16 % 4.0_f16 == 3.0_f16);
+ assert!(7.0_f32 % 4.0_f32 == 3.0_f32);
+ assert!(7.0_f64 % 4.0_f64 == 3.0_f64);
+ assert!(7.0_f128 % 4.0_f128 == 3.0_f128);
+
+ // Subnormal constant literals
+ let sub_16: f16 = 5.96e-8_f16; // below f16 min normal (6.1e-5)
+ assert!(sub_16 + sub_16 == sub_16 * 2.0_f16);
+
+ let sub_32: f32 = 1.0e-45_f32; // below f32 min normal (1.2e-38)
+ assert!(sub_32 + sub_32 == sub_32 * 2.0_f32);
+
+ let sub_64: f64 = 5e-324_f64; // below f64 min normal (2.2e-308)
+ assert!(sub_64 + sub_64 == 1e-323_f64);
+
+ let sub_128: f128 = 1.0e-4933_f128; // below f128 min normal (~3.4e-4932)
+ assert!(sub_128 + sub_128 == sub_128 * 2.0_f128);
+}
diff --git a/kmir/src/tests/integration/data/prove-rs/float_cast.rs b/kmir/src/tests/integration/data/prove-rs/float_cast.rs
new file mode 100644
index 000000000..747c9a7da
--- /dev/null
+++ b/kmir/src/tests/integration/data/prove-rs/float_cast.rs
@@ -0,0 +1,22 @@
+#![feature(f16)]
+#![feature(f128)]
+
+fn main() {
+ // FloatToInt
+ assert!(3.14_f16 as i32 == 3);
+ assert!(3.14_f32 as i32 == 3);
+ assert!(3.14_f64 as i32 == 3);
+ assert!(3.14_f128 as i32 == 3);
+
+ // IntToFloat
+ assert!(42_i64 as f16 == 42.0_f16);
+ assert!(42_i64 as f32 == 42.0_f32);
+ assert!(42_i64 as f64 == 42.0_f64);
+ assert!(42_i64 as f128 == 42.0_f128);
+
+ // FloatToFloat
+ assert!(2.5_f32 as f64 == 2.5_f64);
+ assert!(2.5_f64 as f32 == 2.5_f32);
+ assert!(2.5_f16 as f64 == 2.5_f64);
+ assert!(2.5_f64 as f128 == 2.5_f128);
+}
diff --git a/kmir/src/tests/integration/data/prove-rs/float_cmp.rs b/kmir/src/tests/integration/data/prove-rs/float_cmp.rs
new file mode 100644
index 000000000..9144787a2
--- /dev/null
+++ b/kmir/src/tests/integration/data/prove-rs/float_cmp.rs
@@ -0,0 +1,38 @@
+#![feature(f16)]
+#![feature(f128)]
+
+fn main() {
+ // f16
+ assert!(1.0_f16 < 2.0_f16);
+ assert!(2.0_f16 >= 2.0_f16);
+ assert!(3.0_f16 > 1.0_f16);
+ assert!(1.0_f16 <= 1.0_f16);
+
+ // f32
+ assert!(1.0_f32 < 2.0_f32);
+ assert!(2.0_f32 >= 2.0_f32);
+ assert!(3.0_f32 > 1.0_f32);
+ assert!(1.0_f32 <= 1.0_f32);
+
+ // f64
+ assert!(1.0_f64 < 2.0_f64);
+ assert!(2.0_f64 >= 2.0_f64);
+ assert!(3.0_f64 > 1.0_f64);
+ assert!(1.0_f64 <= 1.0_f64);
+
+ // f128
+ assert!(1.0_f128 < 2.0_f128);
+ assert!(2.0_f128 >= 2.0_f128);
+ assert!(3.0_f128 > 1.0_f128);
+ assert!(1.0_f128 <= 1.0_f128);
+
+ // Negative values
+ assert!(-1.0_f16 < 0.0_f16);
+ assert!(-2.0_f16 < -1.0_f16);
+ assert!(-1.0_f32 < 0.0_f32);
+ assert!(-2.0_f32 < -1.0_f32);
+ assert!(-1.0_f64 < 0.0_f64);
+ assert!(-2.0_f64 < -1.0_f64);
+ assert!(-1.0_f128 < 0.0_f128);
+ assert!(-2.0_f128 < -1.0_f128);
+}
diff --git a/kmir/src/tests/integration/data/prove-rs/float_eq.rs b/kmir/src/tests/integration/data/prove-rs/float_eq.rs
new file mode 100644
index 000000000..3e0bbeb43
--- /dev/null
+++ b/kmir/src/tests/integration/data/prove-rs/float_eq.rs
@@ -0,0 +1,30 @@
+#![feature(f16)]
+#![feature(f128)]
+
+fn main() {
+ // f16
+ assert!(1.0_f16 == 1.0_f16);
+ assert!(0.0_f16 == 0.0_f16);
+ assert!(1.0_f16 != 2.0_f16);
+
+ // f32
+ assert!(1.0_f32 == 1.0_f32);
+ assert!(0.0_f32 == 0.0_f32);
+ assert!(1.0_f32 != 2.0_f32);
+
+ // f64
+ assert!(1.0_f64 == 1.0_f64);
+ assert!(0.0_f64 == 0.0_f64);
+ assert!(1.0_f64 != 2.0_f64);
+
+ // f128
+ assert!(1.0_f128 == 1.0_f128);
+ assert!(0.0_f128 == 0.0_f128);
+ assert!(1.0_f128 != 2.0_f128);
+
+ // Negative zero equals positive zero (IEEE 754)
+ assert!(-0.0_f16 == 0.0_f16);
+ assert!(-0.0_f32 == 0.0_f32);
+ assert!(-0.0_f64 == 0.0_f64);
+ assert!(-0.0_f128 == 0.0_f128);
+}
diff --git a/kmir/src/tests/integration/data/prove-rs/float_neg.rs b/kmir/src/tests/integration/data/prove-rs/float_neg.rs
new file mode 100644
index 000000000..065ff2bc2
--- /dev/null
+++ b/kmir/src/tests/integration/data/prove-rs/float_neg.rs
@@ -0,0 +1,30 @@
+#![feature(f16)]
+#![feature(f128)]
+
+fn main() {
+ // f16
+ let a16: f16 = 3.5;
+ assert!(-a16 == -3.5_f16);
+ assert!(-(-a16) == a16);
+
+ // f32
+ let a32: f32 = 3.5;
+ assert!(-a32 == -3.5_f32);
+ assert!(-(-a32) == a32);
+
+ // f64
+ let a64: f64 = 3.5;
+ assert!(-a64 == -3.5_f64);
+ assert!(-(-a64) == a64);
+
+ // f128
+ let a128: f128 = 3.5;
+ assert!(-a128 == -3.5_f128);
+ assert!(-(-a128) == a128);
+
+ // Negating zero
+ assert!(-0.0_f16 == 0.0_f16);
+ assert!(-0.0_f32 == 0.0_f32);
+ assert!(-0.0_f64 == 0.0_f64);
+ assert!(-0.0_f128 == 0.0_f128);
+}
diff --git a/kmir/src/tests/integration/data/prove-rs/float_special.rs b/kmir/src/tests/integration/data/prove-rs/float_special.rs
new file mode 100644
index 000000000..aa6449da2
--- /dev/null
+++ b/kmir/src/tests/integration/data/prove-rs/float_special.rs
@@ -0,0 +1,56 @@
+#![feature(f16)]
+#![feature(f128)]
+
+fn main() {
+ // f16 infinity
+ let inf_16: f16 = 1.0_f16 / 0.0_f16;
+ let neg_inf_16: f16 = -1.0_f16 / 0.0_f16;
+ assert!(inf_16 == inf_16);
+ assert!(neg_inf_16 == -inf_16);
+
+ // f16 NaN
+ let nan_16: f16 = 0.0_f16 / 0.0_f16;
+ assert!(nan_16 != nan_16);
+ assert!(!(nan_16 == nan_16));
+
+ // f32 infinity
+ let inf_32: f32 = 1.0_f32 / 0.0_f32;
+ let neg_inf_32: f32 = -1.0_f32 / 0.0_f32;
+ assert!(inf_32 == inf_32);
+ assert!(inf_32 > 1.0e38_f32);
+ assert!(neg_inf_32 < -1.0e38_f32);
+ assert!(neg_inf_32 == -inf_32);
+
+ // f32 NaN
+ let nan_32: f32 = 0.0_f32 / 0.0_f32;
+ assert!(nan_32 != nan_32);
+ assert!(!(nan_32 == nan_32));
+ assert!(!(nan_32 < 0.0_f32));
+ assert!(!(nan_32 > 0.0_f32));
+
+ // f64 infinity
+ let inf_64: f64 = 1.0_f64 / 0.0_f64;
+ let neg_inf_64: f64 = -1.0_f64 / 0.0_f64;
+ assert!(inf_64 == inf_64);
+ assert!(inf_64 > 1.0e308_f64);
+ assert!(neg_inf_64 < -1.0e308_f64);
+ assert!(neg_inf_64 == -inf_64);
+
+ // f64 NaN
+ let nan_64: f64 = 0.0_f64 / 0.0_f64;
+ assert!(nan_64 != nan_64);
+ assert!(!(nan_64 == nan_64));
+ assert!(!(nan_64 < 0.0_f64));
+ assert!(!(nan_64 > 0.0_f64));
+
+ // f128 infinity
+ let inf_128: f128 = 1.0_f128 / 0.0_f128;
+ let neg_inf_128: f128 = -1.0_f128 / 0.0_f128;
+ assert!(inf_128 == inf_128);
+ assert!(neg_inf_128 == -inf_128);
+
+ // f128 NaN
+ let nan_128: f128 = 0.0_f128 / 0.0_f128;
+ assert!(nan_128 != nan_128);
+ assert!(!(nan_128 == nan_128));
+}
diff --git a/kmir/src/tests/integration/data/prove-rs/ptr-cast-array-to-nested-wrapper-fail.rs b/kmir/src/tests/integration/data/prove-rs/ptr-cast-array-to-nested-wrapper-fail.rs
new file mode 100644
index 000000000..9df7ec0f8
--- /dev/null
+++ b/kmir/src/tests/integration/data/prove-rs/ptr-cast-array-to-nested-wrapper-fail.rs
@@ -0,0 +1,12 @@
+// [T; N] -> W2(W1([T; N])) - nested struct wrapping on target
+#[derive(Clone, Copy, PartialEq, Debug)]
+struct Inner([u8; 2]);
+
+#[derive(Clone, Copy, PartialEq, Debug)]
+struct Outer(Inner);
+
+fn main() {
+ let arr: [u8; 2] = [11, 22];
+ let o: Outer = unsafe { *((&arr) as *const [u8; 2] as *const Outer) };
+ assert_eq!(o.0 .0, [11, 22]);
+}
diff --git a/kmir/src/tests/integration/data/prove-rs/ptr-cast-array-to-singleton-wrapped-array-fail.rs b/kmir/src/tests/integration/data/prove-rs/ptr-cast-array-to-singleton-wrapped-array-fail.rs
new file mode 100644
index 000000000..1e40d8de2
--- /dev/null
+++ b/kmir/src/tests/integration/data/prove-rs/ptr-cast-array-to-singleton-wrapped-array-fail.rs
@@ -0,0 +1,9 @@
+// [T; N] -> W([[T; N]; 1]) - singleton-array wrapping array (in-wrapper) on target
+#[derive(Clone, Copy, PartialEq, Debug)]
+struct Wrapper([[u8; 2]; 1]);
+
+fn main() {
+ let arr: [u8; 2] = [11, 22];
+ let w: Wrapper = unsafe { *((&arr) as *const [u8; 2] as *const Wrapper) };
+ assert_eq!(w.0, [[11, 22]]);
+}
diff --git a/kmir/src/tests/integration/data/prove-rs/ptr-cast-array-to-wrapper-fail.rs b/kmir/src/tests/integration/data/prove-rs/ptr-cast-array-to-wrapper-fail.rs
new file mode 100644
index 000000000..ae40e8d8b
--- /dev/null
+++ b/kmir/src/tests/integration/data/prove-rs/ptr-cast-array-to-wrapper-fail.rs
@@ -0,0 +1,8 @@
+#[derive(Clone, Copy, PartialEq, Debug)]
+struct Wrapper([u8; 2]);
+
+fn main() {
+ let arr: [u8; 2] = [11, 22];
+ let w: Wrapper = unsafe { *((&arr) as *const [u8; 2] as *const Wrapper) };
+ assert_eq!(w.0, [11, 22]);
+}
diff --git a/kmir/src/tests/integration/data/prove-rs/ptr-cast-nested-wrapper-to-array.rs b/kmir/src/tests/integration/data/prove-rs/ptr-cast-nested-wrapper-to-array.rs
new file mode 100644
index 000000000..ca931f6cc
--- /dev/null
+++ b/kmir/src/tests/integration/data/prove-rs/ptr-cast-nested-wrapper-to-array.rs
@@ -0,0 +1,9 @@
+// W2(W1([T; N])) -> [T; N] - nested struct wrapping on source
+struct Inner([u8; 2]);
+struct Outer(Inner);
+
+fn main() {
+ let o = Outer(Inner([11, 22]));
+ let arr: [u8; 2] = unsafe { *((&o) as *const Outer as *const [u8; 2]) };
+ assert_eq!(arr, [11, 22]);
+}
diff --git a/kmir/src/tests/integration/data/prove-rs/ptr-cast-singleton-wrapped-array-to-array.rs b/kmir/src/tests/integration/data/prove-rs/ptr-cast-singleton-wrapped-array-to-array.rs
new file mode 100644
index 000000000..58aa5f74c
--- /dev/null
+++ b/kmir/src/tests/integration/data/prove-rs/ptr-cast-singleton-wrapped-array-to-array.rs
@@ -0,0 +1,8 @@
+// W([[T; N]; 1]) -> [T; N] - singleton-array wrapping array (in-wrapper) on source
+struct Wrapper([[u8; 2]; 1]);
+
+fn main() {
+ let w = Wrapper([[11, 22]]);
+ let arr: [u8; 2] = unsafe { *((&w) as *const Wrapper as *const [u8; 2]) };
+ assert_eq!(arr, [11, 22]);
+}
diff --git a/kmir/src/tests/integration/data/prove-rs/ptr-cast-wrapper-to-array.rs b/kmir/src/tests/integration/data/prove-rs/ptr-cast-wrapper-to-array.rs
new file mode 100644
index 000000000..dbc4625ac
--- /dev/null
+++ b/kmir/src/tests/integration/data/prove-rs/ptr-cast-wrapper-to-array.rs
@@ -0,0 +1,7 @@
+struct Wrapper([u8; 2]);
+
+fn main() {
+ let w = Wrapper([11, 22]);
+ let arr: [u8; 2] = unsafe { *((&w) as *const Wrapper as *const [u8; 2]) };
+ assert_eq!(arr, [11, 22]);
+}
diff --git a/kmir/src/tests/integration/data/prove-rs/show/box_heap_alloc-fail.main.expected b/kmir/src/tests/integration/data/prove-rs/show/box_heap_alloc-fail.main.expected
new file mode 100644
index 000000000..6b316872b
--- /dev/null
+++ b/kmir/src/tests/integration/data/prove-rs/show/box_heap_alloc-fail.main.expected
@@ -0,0 +1,15 @@
+
+┌─ 1 (root, init)
+│ #execTerminator ( terminator ( ... kind: terminatorKindCall ( ... func: operandC
+│ span: 0
+│
+│ (199 steps)
+└─ 3 (stuck, leaf)
+ #execIntrinsic ( IntrinsicFunction ( symbol ( "volatile_load" ) ) , operandConst
+ span: 32
+
+
+┌─ 2 (root, leaf, target, terminal)
+│ #EndProgram ~> .K
+
+
diff --git a/kmir/src/tests/integration/data/prove-rs/show/ptr-cast-array-to-nested-wrapper-fail.main.expected b/kmir/src/tests/integration/data/prove-rs/show/ptr-cast-array-to-nested-wrapper-fail.main.expected
new file mode 100644
index 000000000..7ea5e8b1e
--- /dev/null
+++ b/kmir/src/tests/integration/data/prove-rs/show/ptr-cast-array-to-nested-wrapper-fail.main.expected
@@ -0,0 +1,39 @@
+
+┌─ 1 (root, init)
+│ #execTerminator ( terminator ( ... kind: terminatorKindCall ( ... func: operandC
+│ span: 0
+│
+│ (128 steps)
+├─ 3
+│ #expect ( thunk ( #applyBinOp ( binOpEq , thunk ( #applyBinOp ( binOpBitAnd , th
+│ function: main
+┃
+┃ (1 step)
+┣━━┓
+┃ │
+┃ ├─ 4
+┃ │ AssertError ( assertMessageMisalignedPointerDereference ( ... required: operandC
+┃ │ function: main
+┃ │
+┃ │ (1 step)
+┃ └─ 6 (stuck, leaf)
+┃ #ProgramError ( AssertError ( assertMessageMisalignedPointerDereference ( ... re
+┃ function: main
+┃
+┗━━┓
+ │
+ ├─ 5
+ │ #execBlockIdx ( basicBlockIdx ( 4 ) ) ~> .K
+ │ function: main
+ │
+ │ (11 steps)
+ └─ 7 (stuck, leaf)
+ #traverseProjection ( toLocal ( 1 ) , Aggregate ( variantIdx ( 0 ) , ListItem (
+ function: main
+ span: 282
+
+
+┌─ 2 (root, leaf, target, terminal)
+│ #EndProgram ~> .K
+
+
diff --git a/kmir/src/tests/integration/data/prove-rs/show/ptr-cast-array-to-singleton-wrapped-array-fail.main.expected b/kmir/src/tests/integration/data/prove-rs/show/ptr-cast-array-to-singleton-wrapped-array-fail.main.expected
new file mode 100644
index 000000000..a7bbe1146
--- /dev/null
+++ b/kmir/src/tests/integration/data/prove-rs/show/ptr-cast-array-to-singleton-wrapped-array-fail.main.expected
@@ -0,0 +1,39 @@
+
+┌─ 1 (root, init)
+│ #execTerminator ( terminator ( ... kind: terminatorKindCall ( ... func: operandC
+│ span: 0
+│
+│ (128 steps)
+├─ 3
+│ #expect ( thunk ( #applyBinOp ( binOpEq , thunk ( #applyBinOp ( binOpBitAnd , th
+│ function: main
+┃
+┃ (1 step)
+┣━━┓
+┃ │
+┃ ├─ 4
+┃ │ AssertError ( assertMessageMisalignedPointerDereference ( ... required: operandC
+┃ │ function: main
+┃ │
+┃ │ (1 step)
+┃ └─ 6 (stuck, leaf)
+┃ #ProgramError ( AssertError ( assertMessageMisalignedPointerDereference ( ... re
+┃ function: main
+┃
+┗━━┓
+ │
+ ├─ 5
+ │ #execBlockIdx ( basicBlockIdx ( 4 ) ) ~> .K
+ │ function: main
+ │
+ │ (10 steps)
+ └─ 7 (stuck, leaf)
+ #traverseProjection ( toLocal ( 1 ) , Aggregate ( variantIdx ( 0 ) , ListItem (
+ function: main
+ span: 282
+
+
+┌─ 2 (root, leaf, target, terminal)
+│ #EndProgram ~> .K
+
+
diff --git a/kmir/src/tests/integration/data/prove-rs/show/ptr-cast-array-to-wrapper-fail.main.expected b/kmir/src/tests/integration/data/prove-rs/show/ptr-cast-array-to-wrapper-fail.main.expected
new file mode 100644
index 000000000..2a3653a3e
--- /dev/null
+++ b/kmir/src/tests/integration/data/prove-rs/show/ptr-cast-array-to-wrapper-fail.main.expected
@@ -0,0 +1,40 @@
+
+┌─ 1 (root, init)
+│ #execTerminator ( terminator ( ... kind: terminatorKindCall ( ... func: operandC
+│ span: 0
+│
+│ (128 steps)
+├─ 3
+│ #expect ( thunk ( #applyBinOp ( binOpEq , thunk ( #applyBinOp ( binOpBitAnd , th
+│ function: main
+┃
+┃ (1 step)
+┣━━┓
+┃ │
+┃ ├─ 4
+┃ │ AssertError ( assertMessageMisalignedPointerDereference ( ... required: operandC
+┃ │ function: main
+┃ │
+┃ │ (1 step)
+┃ └─ 6 (stuck, leaf)
+┃ #ProgramError ( AssertError ( assertMessageMisalignedPointerDereference ( ... re
+┃ function: main
+┃
+┗━━┓
+ │
+ ├─ 5
+ │ #execBlockIdx ( basicBlockIdx ( 4 ) ) ~> .K
+ │ function: main
+ │
+ │ (154 steps)
+ ├─ 7 (terminal)
+ │ #EndProgram ~> .K
+ │ function: main
+ │
+ ┊ constraint: true
+ ┊ subst: ...
+ └─ 2 (leaf, target, terminal)
+ #EndProgram ~> .K
+
+
+
diff --git a/kmir/src/tests/integration/data/prove-rs/show/volatile_load_static-fail.main.expected b/kmir/src/tests/integration/data/prove-rs/show/volatile_load_static-fail.main.expected
new file mode 100644
index 000000000..f792b03d2
--- /dev/null
+++ b/kmir/src/tests/integration/data/prove-rs/show/volatile_load_static-fail.main.expected
@@ -0,0 +1,16 @@
+
+┌─ 1 (root, init)
+│ #execTerminator ( terminator ( ... kind: terminatorKindCall ( ... func: operandC
+│ span: 0
+│
+│ (17 steps)
+└─ 3 (stuck, leaf)
+ #traverseProjection ( toLocal ( 3 ) , thunk ( #decodeConstant ( constantKindAllo
+ function: main
+ span: 53
+
+
+┌─ 2 (root, leaf, target, terminal)
+│ #EndProgram ~> .K
+
+
diff --git a/kmir/src/tests/integration/data/prove-rs/show/volatile_store_static-fail.main.expected b/kmir/src/tests/integration/data/prove-rs/show/volatile_store_static-fail.main.expected
new file mode 100644
index 000000000..0c9649cc5
--- /dev/null
+++ b/kmir/src/tests/integration/data/prove-rs/show/volatile_store_static-fail.main.expected
@@ -0,0 +1,16 @@
+
+┌─ 1 (root, init)
+│ #execTerminator ( terminator ( ... kind: terminatorKindCall ( ... func: operandC
+│ span: 0
+│
+│ (17 steps)
+└─ 3 (stuck, leaf)
+ #traverseProjection ( toLocal ( 4 ) , thunk ( #decodeConstant ( constantKindAllo
+ function: main
+ span: 57
+
+
+┌─ 2 (root, leaf, target, terminal)
+│ #EndProgram ~> .K
+
+
diff --git a/kmir/src/tests/integration/data/prove-rs/transmute_transparent_wrapper_down.rs b/kmir/src/tests/integration/data/prove-rs/transmute_transparent_wrapper_down.rs
new file mode 100644
index 000000000..7d48657eb
--- /dev/null
+++ b/kmir/src/tests/integration/data/prove-rs/transmute_transparent_wrapper_down.rs
@@ -0,0 +1,7 @@
+struct Wrapper(u64);
+
+fn main() {
+ let wrapped = Wrapper(42);
+ let val: u64 = unsafe { core::mem::transmute::(wrapped) };
+ assert!(val == 42);
+}
diff --git a/kmir/src/tests/integration/data/prove-rs/transmute_transparent_wrapper_up.rs b/kmir/src/tests/integration/data/prove-rs/transmute_transparent_wrapper_up.rs
new file mode 100644
index 000000000..043193722
--- /dev/null
+++ b/kmir/src/tests/integration/data/prove-rs/transmute_transparent_wrapper_up.rs
@@ -0,0 +1,7 @@
+struct Wrapper(u64);
+
+fn main() {
+ let val: u64 = 42;
+ let wrapped: Wrapper = unsafe { core::mem::transmute::(val) };
+ assert!(wrapped.0 == 42);
+}
diff --git a/kmir/src/tests/integration/data/prove-rs/volatile_load_static-fail.rs b/kmir/src/tests/integration/data/prove-rs/volatile_load_static-fail.rs
new file mode 100644
index 000000000..7704d6ae2
--- /dev/null
+++ b/kmir/src/tests/integration/data/prove-rs/volatile_load_static-fail.rs
@@ -0,0 +1,11 @@
+#![feature(core_intrinsics)]
+static A: i32 = 5555;
+
+fn main() {
+ let val: i32;
+ unsafe {
+ val = std::intrinsics::volatile_load(&A as *const i32);
+ }
+
+ assert!(val == 5555);
+}
diff --git a/kmir/src/tests/integration/data/prove-rs/volatile_store_static-fail.rs b/kmir/src/tests/integration/data/prove-rs/volatile_store_static-fail.rs
new file mode 100644
index 000000000..852d0a602
--- /dev/null
+++ b/kmir/src/tests/integration/data/prove-rs/volatile_store_static-fail.rs
@@ -0,0 +1,12 @@
+#![feature(core_intrinsics)]
+static mut A: i32 = 5555;
+
+fn main() {
+ unsafe {
+ std::intrinsics::volatile_store(&mut A as *mut i32, 7777);
+ }
+
+ unsafe {
+ assert!(A == 7777);
+ }
+}
diff --git a/kmir/src/tests/integration/test_integration.py b/kmir/src/tests/integration/test_integration.py
index 474303f1b..0f3680c7f 100644
--- a/kmir/src/tests/integration/test_integration.py
+++ b/kmir/src/tests/integration/test_integration.py
@@ -42,6 +42,9 @@
'spl-multisig-iter-eq-copied-next': ['repro'],
'spl-multisig-signer-index': ['repro'],
}
+PROVE_TERMINATE_ON_THUNK = [
+ 'closure-staged',
+]
PROVE_SHOW_SPECS = [
'local-raw-fail',
'interior-mut-fail',
@@ -67,6 +70,12 @@
'ref-ptr-cast-elem-fail',
'ref-ptr-cast-elem-offset-fail',
'spl-multisig-iter-eq-copied-next-fail',
+ 'volatile_store_static-fail',
+ 'volatile_load_static-fail',
+ 'box_heap_alloc-fail',
+ 'ptr-cast-array-to-wrapper-fail',
+ 'ptr-cast-array-to-nested-wrapper-fail',
+ 'ptr-cast-array-to-singleton-wrapped-array-fail',
]
@@ -83,7 +92,8 @@ def test_prove(rs_file: Path, kmir: KMIR, update_expected_output: bool) -> None:
if update_expected_output and not should_show:
pytest.skip()
- prove_opts = ProveOpts(rs_file, smir=is_smir)
+ should_terminate_on_thunk = rs_file.stem in PROVE_TERMINATE_ON_THUNK
+ prove_opts = ProveOpts(rs_file, smir=is_smir, terminate_on_thunk=should_terminate_on_thunk)
printer = PrettyPrinter(kmir.definition)
cterm_show = CTermShow(printer.print)
@@ -382,6 +392,17 @@ def test_crate_examples(main_crate: Path, kmir: KMIR, update_expected_output: bo
]
+# Tests containing float values that the pure kore-exec haskell backend cannot handle.
+# The haskell backend has no Float builtins (no Float.hs in kore/src/Kore/Builtin/),
+# so kore-exec crashes with "missing hook FLOAT.int2float" at Evaluator.hs:377.
+# The booster avoids this by delegating Float evaluation to the LLVM shared library
+# via simplifyTerm in booster/library/Booster/LLVM.hs.
+EXEC_SMIR_SKIP_HASKELL = {
+ 'structs-tuples',
+ 'struct-field-update',
+}
+
+
@pytest.mark.parametrize('symbolic', [False, True], ids=['llvm', 'haskell'])
@pytest.mark.parametrize(
'test_case',
@@ -394,7 +415,9 @@ def test_exec_smir(
update_expected_output: bool,
tmp_path: Path,
) -> None:
- _, input_json, output_kast, depth = test_case
+ name, input_json, output_kast, depth = test_case
+ if symbolic and name in EXEC_SMIR_SKIP_HASKELL:
+ pytest.skip('haskell-backend lacks FLOAT hooks')
smir_info = SMIRInfo.from_file(input_json)
kmir_backend = KMIR.from_kompiled_kore(smir_info, target_dir=tmp_path, symbolic=symbolic)
result = kmir_backend.run_smir(smir_info, depth=depth)
@@ -635,3 +658,40 @@ def test_schema_kapply_parse(
json_data, expected_term, expected_sort = test_case
assert parser.parse_mir_json(json_data, expected_sort.name) == (expected_term, expected_sort)
+
+
+ARITH_SMIR = PROVE_DIR / 'arith.smir.json'
+
+
+def test_reduce_standalone() -> None:
+ """Test that kmir reduce correctly prunes SMIR items by reachability."""
+ smir_data = json.loads(ARITH_SMIR.read_text())
+ info = SMIRInfo(smir_data)
+ assert len(info.items) == 11
+
+ # Single root 'add' — should keep 1 item
+ reduced_add = info.reduce_to('add')
+ assert len(reduced_add.items) == 1
+
+ # Single root 'mul' — should keep 1 item (independent from add)
+ reduced_mul = info.reduce_to('mul')
+ assert len(reduced_mul.items) == 1
+
+ # Multiple roots — should keep strictly more than either alone
+ reduced_multi = info.reduce_to(['add', 'mul'])
+ assert len(reduced_multi.items) == 2
+
+ # 'main' calls both add and mul — should keep all 3
+ reduced_main = info.reduce_to('main')
+ assert len(reduced_main.items) == 3
+
+ # Roundtrip: save reduced SMIR and reload it
+ with tempfile.NamedTemporaryFile(suffix='.smir.json', delete=False, mode='w') as f:
+ f.write(json.dumps(reduced_multi._smir))
+ reduced_path = Path(f.name)
+
+ try:
+ reloaded = SMIRInfo(json.loads(reduced_path.read_text()))
+ assert len(reloaded.items) == 2
+ finally:
+ reduced_path.unlink()
diff --git a/kmir/src/tests/unit/test_unit.py b/kmir/src/tests/unit/test_unit.py
index 61c88d773..87b6c8d7c 100644
--- a/kmir/src/tests/unit/test_unit.py
+++ b/kmir/src/tests/unit/test_unit.py
@@ -59,5 +59,5 @@
def test_compute_closure(test_case: tuple[str, Ty, dict[Ty, set[Ty]], list[Ty]]) -> None:
_, start, edges, expected = test_case
- result = compute_closure(start, edges)
+ result = compute_closure([start], edges)
assert result == expected