From 4d330f4d97a85e005942176a73512eea9e593c6f Mon Sep 17 00:00:00 2001 From: "Stefan J. Wernli" Date: Wed, 18 Mar 2026 07:40:38 -0700 Subject: [PATCH 1/5] Introduce QIR v2.1 Profile `Adaptive_RIFLA` This change introduces a new QIR profile, `Adaptive_RIFLA`, which targets the upcoming QIR v2.1 specification This profile builds from the Integer and Float computations supported by `Adaptive_RIF` and adds flags for Loops (backwards branching) and Arrays. Note that this PR does not include actual codegen support for these features, but the flags are specified in the template in advance of that work. `Adaptive_RIFLA` is intended to be in preview/experimental and is not configured as a default (opt-in only). This PR includes the following changes: - Adds the `Adaptive_RIFLA` profile to as a recognized profile for Q# and OpenQASM - Adds the profile to Python enum - Adds new v2 codegen for QIR that uses opaque pointers, updated LLVM pattterns, and updated module flags - Updates the RIR data structures to introduce new `AdvancedInstr` struct to capture instructions that are only supported in the new profile. - Adds `Load` and `Alloca` advanced instructions to complement the existing `Store` instruction - Adds an RIR pass that prunes most extraneous store instructions to identify the required set of variables and values - Adds an RIR pass that adds the necessary `Load` and `Alloca` instructions to support stack-based variable management - Adds test cases for new RIR passes, v2 instructions, and integration tests verifying QIR generation and execution via qir-runner. --- source/compiler/qsc/src/interpret.rs | 6 +- .../qsc_circuit/src/rir_to_circuit.rs | 4 +- source/compiler/qsc_codegen/src/qir.rs | 770 +----------------- source/compiler/qsc_codegen/src/qir/v1.rs | 737 +++++++++++++++++ .../src/qir/{ => v1}/instruction_tests.rs | 0 .../qir/{ => v1}/instruction_tests/bool.rs | 2 +- .../qir/{ => v1}/instruction_tests/double.rs | 2 +- .../src/qir/{ => v1}/instruction_tests/int.rs | 2 +- .../qir/{ => v1}/instruction_tests/invalid.rs | 2 +- .../src/qir/{ => v1}/instruction_tests/phi.rs | 2 +- .../qsc_codegen/src/qir/{ => v1}/template.ll | 0 .../qsc_codegen/src/qir/{ => v1}/tests.rs | 0 source/compiler/qsc_codegen/src/qir/v2.rs | 669 +++++++++++++++ .../src/qir/v2/instruction_tests.rs | 10 + .../src/qir/v2/instruction_tests/alloca.rs | 38 + .../src/qir/v2/instruction_tests/bool.rs | 109 +++ .../src/qir/v2/instruction_tests/double.rs | 507 ++++++++++++ .../src/qir/v2/instruction_tests/int.rs | 587 +++++++++++++ .../src/qir/v2/instruction_tests/invalid.rs | 170 ++++ .../src/qir/v2/instruction_tests/load.rs | 42 + .../src/qir/v2/instruction_tests/store.rs | 51 ++ .../qsc_codegen/src/qir/v2/template.ll | 18 + .../compiler/qsc_codegen/src/qir/v2/tests.rs | 237 ++++++ .../qsc_data_structures/src/target.rs | 14 +- source/compiler/qsc_partial_eval/src/tests.rs | 8 +- .../qsc_partial_eval/src/tests/arrays.rs | 2 +- .../qsc_partial_eval/src/tests/intrinsics.rs | 6 +- .../qsc_partial_eval/src/tests/loops.rs | 2 +- .../src/tests/output_recording.rs | 16 +- source/compiler/qsc_rir/src/passes.rs | 36 +- .../qsc_rir/src/passes/insert_alloca_load.rs | 252 ++++++ .../src/passes/insert_alloca_load/tests.rs | 209 +++++ .../src/passes/prune_unneeded_stores.rs | 176 ++++ .../src/passes/prune_unneeded_stores/tests.rs | 198 +++++ .../compiler/qsc_rir/src/passes/ssa_check.rs | 4 + .../qsc_rir/src/passes/ssa_transform.rs | 122 +-- .../qsc_rir/src/passes/ssa_transform/tests.rs | 38 +- .../compiler/qsc_rir/src/passes/type_check.rs | 7 +- source/compiler/qsc_rir/src/rir.rs | 281 ++++--- source/compiler/qsc_rir/src/utils.rs | 141 +++- source/pip/qsharp/_native.pyi | 7 + source/pip/src/interpreter.rs | 9 +- .../adaptive_rifla/input/ArithmeticOps.qs | 34 + .../input/BernsteinVaziraniNISQ.qs | 153 ++++ .../adaptive_rifla/input/ConstantFolding.qs | 44 + .../input/CopyAndUpdateExpressions.qs | 32 + .../adaptive_rifla/input/DeutschJozsaNISQ.qs | 103 +++ .../resources/adaptive_rifla/input/Doubles.qs | 27 + .../adaptive_rifla/input/ExpandedTests.qs | 124 +++ .../adaptive_rifla/input/Functors.qs | 51 ++ .../adaptive_rifla/input/HiddenShiftNISQ.qs | 169 ++++ .../adaptive_rifla/input/IntegerComparison.qs | 21 + .../adaptive_rifla/input/IntrinsicCCNOT.qs | 30 + .../adaptive_rifla/input/IntrinsicCNOT.qs | 23 + .../adaptive_rifla/input/IntrinsicHIXYZ.qs | 47 ++ .../adaptive_rifla/input/IntrinsicM.qs | 19 + .../input/IntrinsicMeasureWithBitFlipCode.qs | 35 + .../IntrinsicMeasureWithPhaseFlipCode.qs | 38 + .../input/IntrinsicRotationsWithPeriod.qs | 63 ++ .../adaptive_rifla/input/IntrinsicSTSWAP.qs | 37 + .../adaptive_rifla/input/MeasureAndReuse.qs | 24 + .../input/MeasurementComparison.qs | 18 + .../adaptive_rifla/input/NestedBranching.qs | 143 ++++ .../adaptive_rifla/input/RandomBit.qs | 13 + .../adaptive_rifla/input/SampleTeleport.qs | 43 + .../input/ShortcuttingMeasurement.qs | 22 + .../resources/adaptive_rifla/input/Slicing.qs | 20 + .../adaptive_rifla/input/SuperdenseCoding.qs | 69 ++ .../adaptive_rifla/input/SwitchHandling.qs | 37 + .../input/ThreeQubitRepetitionCode.qs | 112 +++ .../adaptive_rifla/input/WithinApply.qs | 25 + .../adaptive_rifla/output/ArithmeticOps.ll | 150 ++++ .../adaptive_rifla/output/ArithmeticOps.out | 12 + .../output/BernsteinVaziraniNISQ.ll | 71 ++ .../output/BernsteinVaziraniNISQ.out | 13 + .../adaptive_rifla/output/ConstantFolding.ll | 76 ++ .../adaptive_rifla/output/ConstantFolding.out | 12 + .../output/CopyAndUpdateExpressions.ll | 65 ++ .../output/CopyAndUpdateExpressions.out | 16 + .../adaptive_rifla/output/DeutschJozsaNISQ.ll | 102 +++ .../output/DeutschJozsaNISQ.out | 18 + .../adaptive_rifla/output/Doubles.ll | 253 ++++++ .../adaptive_rifla/output/Doubles.out | 16 + .../adaptive_rifla/output/ExpandedTests.ll | 93 +++ .../adaptive_rifla/output/ExpandedTests.out | 12 + .../adaptive_rifla/output/Functors.ll | 131 +++ .../adaptive_rifla/output/Functors.out | 17 + .../adaptive_rifla/output/HiddenShiftNISQ.ll | 84 ++ .../adaptive_rifla/output/HiddenShiftNISQ.out | 14 + .../output/IntegerComparison.ll | 159 ++++ .../output/IntegerComparison.out | 11 + .../adaptive_rifla/output/IntrinsicCCNOT.ll | 88 ++ .../adaptive_rifla/output/IntrinsicCCNOT.out | 20 + .../adaptive_rifla/output/IntrinsicCNOT.ll | 63 ++ .../adaptive_rifla/output/IntrinsicCNOT.out | 14 + .../adaptive_rifla/output/IntrinsicHIXYZ.ll | 70 ++ .../adaptive_rifla/output/IntrinsicHIXYZ.out | 14 + .../adaptive_rifla/output/IntrinsicM.ll | 45 + .../adaptive_rifla/output/IntrinsicM.out | 10 + .../output/IntrinsicMeasureWithBitFlipCode.ll | 90 ++ .../IntrinsicMeasureWithBitFlipCode.out | 18 + .../IntrinsicMeasureWithPhaseFlipCode.ll | 94 +++ .../IntrinsicMeasureWithPhaseFlipCode.out | 18 + .../output/IntrinsicRotationsWithPeriod.ll | 130 +++ .../output/IntrinsicRotationsWithPeriod.out | 17 + .../adaptive_rifla/output/IntrinsicSTSWAP.ll | 70 ++ .../adaptive_rifla/output/IntrinsicSTSWAP.out | 13 + .../adaptive_rifla/output/MeasureAndReuse.ll | 63 ++ .../adaptive_rifla/output/MeasureAndReuse.out | 13 + .../output/MeasurementComparison.ll | 75 ++ .../output/MeasurementComparison.out | 12 + .../adaptive_rifla/output/NestedBranching.ll | 383 +++++++++ .../adaptive_rifla/output/NestedBranching.out | 21 + .../adaptive_rifla/output/RandomBit.ll | 37 + .../adaptive_rifla/output/RandomBit.out | 8 + .../adaptive_rifla/output/SampleTeleport.ll | 69 ++ .../adaptive_rifla/output/SampleTeleport.out | 8 + .../output/ShortcuttingMeasurement.ll | 68 ++ .../output/ShortcuttingMeasurement.out | 10 + .../adaptive_rifla/output/Slicing.ll | 66 ++ .../adaptive_rifla/output/Slicing.out | 13 + .../adaptive_rifla/output/SuperdenseCoding.ll | 98 +++ .../output/SuperdenseCoding.out | 14 + .../adaptive_rifla/output/SwitchHandling.ll | 98 +++ .../adaptive_rifla/output/SwitchHandling.out | 8 + .../output/ThreeQubitRepetitionCode.ll | 329 ++++++++ .../output/ThreeQubitRepetitionCode.out | 10 + .../adaptive_rifla/output/WithinApply.ll | 55 ++ .../adaptive_rifla/output/WithinApply.out | 11 + .../test_adaptive_rifla_qir.py | 49 ++ source/pip/tests/test_enums.py | 10 +- 131 files changed, 9662 insertions(+), 1034 deletions(-) create mode 100644 source/compiler/qsc_codegen/src/qir/v1.rs rename source/compiler/qsc_codegen/src/qir/{ => v1}/instruction_tests.rs (100%) rename source/compiler/qsc_codegen/src/qir/{ => v1}/instruction_tests/bool.rs (99%) rename source/compiler/qsc_codegen/src/qir/{ => v1}/instruction_tests/double.rs (99%) rename source/compiler/qsc_codegen/src/qir/{ => v1}/instruction_tests/int.rs (99%) rename source/compiler/qsc_codegen/src/qir/{ => v1}/instruction_tests/invalid.rs (99%) rename source/compiler/qsc_codegen/src/qir/{ => v1}/instruction_tests/phi.rs (98%) rename source/compiler/qsc_codegen/src/qir/{ => v1}/template.ll (100%) rename source/compiler/qsc_codegen/src/qir/{ => v1}/tests.rs (100%) create mode 100644 source/compiler/qsc_codegen/src/qir/v2.rs create mode 100644 source/compiler/qsc_codegen/src/qir/v2/instruction_tests.rs create mode 100644 source/compiler/qsc_codegen/src/qir/v2/instruction_tests/alloca.rs create mode 100644 source/compiler/qsc_codegen/src/qir/v2/instruction_tests/bool.rs create mode 100644 source/compiler/qsc_codegen/src/qir/v2/instruction_tests/double.rs create mode 100644 source/compiler/qsc_codegen/src/qir/v2/instruction_tests/int.rs create mode 100644 source/compiler/qsc_codegen/src/qir/v2/instruction_tests/invalid.rs create mode 100644 source/compiler/qsc_codegen/src/qir/v2/instruction_tests/load.rs create mode 100644 source/compiler/qsc_codegen/src/qir/v2/instruction_tests/store.rs create mode 100644 source/compiler/qsc_codegen/src/qir/v2/template.ll create mode 100644 source/compiler/qsc_codegen/src/qir/v2/tests.rs create mode 100644 source/compiler/qsc_rir/src/passes/insert_alloca_load.rs create mode 100644 source/compiler/qsc_rir/src/passes/insert_alloca_load/tests.rs create mode 100644 source/compiler/qsc_rir/src/passes/prune_unneeded_stores.rs create mode 100644 source/compiler/qsc_rir/src/passes/prune_unneeded_stores/tests.rs create mode 100644 source/pip/tests-integration/resources/adaptive_rifla/input/ArithmeticOps.qs create mode 100644 source/pip/tests-integration/resources/adaptive_rifla/input/BernsteinVaziraniNISQ.qs create mode 100644 source/pip/tests-integration/resources/adaptive_rifla/input/ConstantFolding.qs create mode 100644 source/pip/tests-integration/resources/adaptive_rifla/input/CopyAndUpdateExpressions.qs create mode 100644 source/pip/tests-integration/resources/adaptive_rifla/input/DeutschJozsaNISQ.qs create mode 100644 source/pip/tests-integration/resources/adaptive_rifla/input/Doubles.qs create mode 100644 source/pip/tests-integration/resources/adaptive_rifla/input/ExpandedTests.qs create mode 100644 source/pip/tests-integration/resources/adaptive_rifla/input/Functors.qs create mode 100644 source/pip/tests-integration/resources/adaptive_rifla/input/HiddenShiftNISQ.qs create mode 100644 source/pip/tests-integration/resources/adaptive_rifla/input/IntegerComparison.qs create mode 100644 source/pip/tests-integration/resources/adaptive_rifla/input/IntrinsicCCNOT.qs create mode 100644 source/pip/tests-integration/resources/adaptive_rifla/input/IntrinsicCNOT.qs create mode 100644 source/pip/tests-integration/resources/adaptive_rifla/input/IntrinsicHIXYZ.qs create mode 100644 source/pip/tests-integration/resources/adaptive_rifla/input/IntrinsicM.qs create mode 100644 source/pip/tests-integration/resources/adaptive_rifla/input/IntrinsicMeasureWithBitFlipCode.qs create mode 100644 source/pip/tests-integration/resources/adaptive_rifla/input/IntrinsicMeasureWithPhaseFlipCode.qs create mode 100644 source/pip/tests-integration/resources/adaptive_rifla/input/IntrinsicRotationsWithPeriod.qs create mode 100644 source/pip/tests-integration/resources/adaptive_rifla/input/IntrinsicSTSWAP.qs create mode 100644 source/pip/tests-integration/resources/adaptive_rifla/input/MeasureAndReuse.qs create mode 100644 source/pip/tests-integration/resources/adaptive_rifla/input/MeasurementComparison.qs create mode 100644 source/pip/tests-integration/resources/adaptive_rifla/input/NestedBranching.qs create mode 100644 source/pip/tests-integration/resources/adaptive_rifla/input/RandomBit.qs create mode 100644 source/pip/tests-integration/resources/adaptive_rifla/input/SampleTeleport.qs create mode 100644 source/pip/tests-integration/resources/adaptive_rifla/input/ShortcuttingMeasurement.qs create mode 100644 source/pip/tests-integration/resources/adaptive_rifla/input/Slicing.qs create mode 100644 source/pip/tests-integration/resources/adaptive_rifla/input/SuperdenseCoding.qs create mode 100644 source/pip/tests-integration/resources/adaptive_rifla/input/SwitchHandling.qs create mode 100644 source/pip/tests-integration/resources/adaptive_rifla/input/ThreeQubitRepetitionCode.qs create mode 100644 source/pip/tests-integration/resources/adaptive_rifla/input/WithinApply.qs create mode 100644 source/pip/tests-integration/resources/adaptive_rifla/output/ArithmeticOps.ll create mode 100644 source/pip/tests-integration/resources/adaptive_rifla/output/ArithmeticOps.out create mode 100644 source/pip/tests-integration/resources/adaptive_rifla/output/BernsteinVaziraniNISQ.ll create mode 100644 source/pip/tests-integration/resources/adaptive_rifla/output/BernsteinVaziraniNISQ.out create mode 100644 source/pip/tests-integration/resources/adaptive_rifla/output/ConstantFolding.ll create mode 100644 source/pip/tests-integration/resources/adaptive_rifla/output/ConstantFolding.out create mode 100644 source/pip/tests-integration/resources/adaptive_rifla/output/CopyAndUpdateExpressions.ll create mode 100644 source/pip/tests-integration/resources/adaptive_rifla/output/CopyAndUpdateExpressions.out create mode 100644 source/pip/tests-integration/resources/adaptive_rifla/output/DeutschJozsaNISQ.ll create mode 100644 source/pip/tests-integration/resources/adaptive_rifla/output/DeutschJozsaNISQ.out create mode 100644 source/pip/tests-integration/resources/adaptive_rifla/output/Doubles.ll create mode 100644 source/pip/tests-integration/resources/adaptive_rifla/output/Doubles.out create mode 100644 source/pip/tests-integration/resources/adaptive_rifla/output/ExpandedTests.ll create mode 100644 source/pip/tests-integration/resources/adaptive_rifla/output/ExpandedTests.out create mode 100644 source/pip/tests-integration/resources/adaptive_rifla/output/Functors.ll create mode 100644 source/pip/tests-integration/resources/adaptive_rifla/output/Functors.out create mode 100644 source/pip/tests-integration/resources/adaptive_rifla/output/HiddenShiftNISQ.ll create mode 100644 source/pip/tests-integration/resources/adaptive_rifla/output/HiddenShiftNISQ.out create mode 100644 source/pip/tests-integration/resources/adaptive_rifla/output/IntegerComparison.ll create mode 100644 source/pip/tests-integration/resources/adaptive_rifla/output/IntegerComparison.out create mode 100644 source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicCCNOT.ll create mode 100644 source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicCCNOT.out create mode 100644 source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicCNOT.ll create mode 100644 source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicCNOT.out create mode 100644 source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicHIXYZ.ll create mode 100644 source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicHIXYZ.out create mode 100644 source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicM.ll create mode 100644 source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicM.out create mode 100644 source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicMeasureWithBitFlipCode.ll create mode 100644 source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicMeasureWithBitFlipCode.out create mode 100644 source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicMeasureWithPhaseFlipCode.ll create mode 100644 source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicMeasureWithPhaseFlipCode.out create mode 100644 source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicRotationsWithPeriod.ll create mode 100644 source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicRotationsWithPeriod.out create mode 100644 source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicSTSWAP.ll create mode 100644 source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicSTSWAP.out create mode 100644 source/pip/tests-integration/resources/adaptive_rifla/output/MeasureAndReuse.ll create mode 100644 source/pip/tests-integration/resources/adaptive_rifla/output/MeasureAndReuse.out create mode 100644 source/pip/tests-integration/resources/adaptive_rifla/output/MeasurementComparison.ll create mode 100644 source/pip/tests-integration/resources/adaptive_rifla/output/MeasurementComparison.out create mode 100644 source/pip/tests-integration/resources/adaptive_rifla/output/NestedBranching.ll create mode 100644 source/pip/tests-integration/resources/adaptive_rifla/output/NestedBranching.out create mode 100644 source/pip/tests-integration/resources/adaptive_rifla/output/RandomBit.ll create mode 100644 source/pip/tests-integration/resources/adaptive_rifla/output/RandomBit.out create mode 100644 source/pip/tests-integration/resources/adaptive_rifla/output/SampleTeleport.ll create mode 100644 source/pip/tests-integration/resources/adaptive_rifla/output/SampleTeleport.out create mode 100644 source/pip/tests-integration/resources/adaptive_rifla/output/ShortcuttingMeasurement.ll create mode 100644 source/pip/tests-integration/resources/adaptive_rifla/output/ShortcuttingMeasurement.out create mode 100644 source/pip/tests-integration/resources/adaptive_rifla/output/Slicing.ll create mode 100644 source/pip/tests-integration/resources/adaptive_rifla/output/Slicing.out create mode 100644 source/pip/tests-integration/resources/adaptive_rifla/output/SuperdenseCoding.ll create mode 100644 source/pip/tests-integration/resources/adaptive_rifla/output/SuperdenseCoding.out create mode 100644 source/pip/tests-integration/resources/adaptive_rifla/output/SwitchHandling.ll create mode 100644 source/pip/tests-integration/resources/adaptive_rifla/output/SwitchHandling.out create mode 100644 source/pip/tests-integration/resources/adaptive_rifla/output/ThreeQubitRepetitionCode.ll create mode 100644 source/pip/tests-integration/resources/adaptive_rifla/output/ThreeQubitRepetitionCode.out create mode 100644 source/pip/tests-integration/resources/adaptive_rifla/output/WithinApply.ll create mode 100644 source/pip/tests-integration/resources/adaptive_rifla/output/WithinApply.out create mode 100644 source/pip/tests-integration/test_adaptive_rifla_qir.py diff --git a/source/compiler/qsc/src/interpret.rs b/source/compiler/qsc/src/interpret.rs index eb7fca2fd3..a334751cbf 100644 --- a/source/compiler/qsc/src/interpret.rs +++ b/source/compiler/qsc/src/interpret.rs @@ -39,7 +39,7 @@ use qsc_data_structures::{ line_column::{Encoding, Range}, source::{Source, SourceMap}, span::Span, - target::TargetCapabilityFlags, + target::{Profile, TargetCapabilityFlags}, }; use qsc_eval::{ Env, ErrorBehavior, State, VariableInfo, @@ -1069,7 +1069,7 @@ impl Interpreter { entry_expr: Option<&str>, tracer_config: TracerConfig, ) -> std::result::Result> { - if self.capabilities == TargetCapabilityFlags::all() { + if self.capabilities > Profile::AdaptiveRIF.into() { return Err(vec![Error::UnsupportedRuntimeCapabilities]); } @@ -1089,7 +1089,7 @@ impl Interpreter { args: Value, tracer_config: TracerConfig, ) -> std::result::Result> { - if self.capabilities == TargetCapabilityFlags::all() { + if self.capabilities > Profile::AdaptiveRIF.into() { return Err(vec![Error::UnsupportedRuntimeCapabilities]); } diff --git a/source/compiler/qsc_circuit/src/rir_to_circuit.rs b/source/compiler/qsc_circuit/src/rir_to_circuit.rs index 0a5b36a6ac..ffb990a2f2 100644 --- a/source/compiler/qsc_circuit/src/rir_to_circuit.rs +++ b/source/compiler/qsc_circuit/src/rir_to_circuit.rs @@ -369,7 +369,9 @@ fn process_variables( let expr = expr_from_operand(&state.variables, operand)?; store_expr_in_variable(&mut state.variables, *variable, expr)?; } - instruction @ (Instruction::Store(..) | Instruction::BitwiseNot(..)) => { + instruction @ (Instruction::Store(..) + | Instruction::BitwiseNot(..) + | Instruction::Advanced(..)) => { return Err(Error::UnsupportedFeature(format!( "unsupported instruction in block: {instruction:?}" ))); diff --git a/source/compiler/qsc_codegen/src/qir.rs b/source/compiler/qsc_codegen/src/qir.rs index 59a307cade..95f3bfbd08 100644 --- a/source/compiler/qsc_codegen/src/qir.rs +++ b/source/compiler/qsc_codegen/src/qir.rs @@ -1,45 +1,16 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -#[cfg(test)] -mod instruction_tests; - -#[cfg(test)] -mod tests; - -use qsc_data_structures::{attrs::Attributes, target::TargetCapabilityFlags}; +use qsc_data_structures::target::{Profile, TargetCapabilityFlags}; use qsc_eval::val::Value; -use qsc_lowerer::map_hir_package_to_fir; use qsc_partial_eval::{ - PartialEvalConfig, ProgramEntry, partially_evaluate, partially_evaluate_call, + PartialEvalConfig, Program, ProgramEntry, partially_evaluate, partially_evaluate_call, }; use qsc_rca::PackageStoreComputeProperties; -use qsc_rir::{ - passes::check_and_transform, - rir::{self, ConditionCode, FcmpConditionCode, Program}, - utils::get_all_block_successors, -}; -use std::fmt::Write; - -fn lower_store(package_store: &qsc_frontend::compile::PackageStore) -> qsc_fir::fir::PackageStore { - let mut fir_store = qsc_fir::fir::PackageStore::new(); - for (id, unit) in package_store { - let package = qsc_lowerer::Lowerer::new().lower_package(&unit.package, &fir_store); - fir_store.insert(map_hir_package_to_fir(id), package); - } - fir_store -} +use qsc_rir::{passes::check_and_transform, rir}; -/// converts the given sources to QIR using the given language features. -pub fn hir_to_qir( - package_store: &qsc_frontend::compile::PackageStore, - capabilities: TargetCapabilityFlags, - compute_properties: Option, - entry: &ProgramEntry, -) -> Result { - let fir_store = lower_store(package_store); - fir_to_qir(&fir_store, capabilities, compute_properties, entry) -} +pub mod v1; +pub mod v2; /// converts the given sources to RIR using the given language features. pub fn fir_to_rir( @@ -78,7 +49,11 @@ pub fn fir_to_qir( }, )?; check_and_transform(&mut program); - Ok(ToQir::::to_qir(&program, &program)) + if capabilities <= Profile::AdaptiveRIF.into() { + Ok(v1::ToQir::::to_qir(&program, &program)) + } else { + Ok(v2::ToQir::::to_qir(&program, &program)) + } } /// converts the given callable to QIR using the given arguments and language features. @@ -105,7 +80,11 @@ pub fn fir_to_qir_from_callable( }, )?; check_and_transform(&mut program); - Ok(ToQir::::to_qir(&program, &program)) + if capabilities <= Profile::AdaptiveRIF.into() { + Ok(v1::ToQir::::to_qir(&program, &program)) + } else { + Ok(v2::ToQir::::to_qir(&program, &program)) + } } /// converts the given callable to RIR using the given arguments and language features. @@ -155,722 +134,3 @@ fn get_rir_from_compilation( partial_eval_config, ) } - -/// A trait for converting a type into QIR of type `T`. -/// This can be used to generate QIR strings or other representations. -pub trait ToQir { - fn to_qir(&self, program: &rir::Program) -> T; -} - -impl ToQir for rir::Literal { - fn to_qir(&self, _program: &rir::Program) -> String { - match self { - rir::Literal::Bool(b) => format!("i1 {b}"), - rir::Literal::Double(d) => { - if (d.floor() - d.ceil()).abs() < f64::EPSILON { - // The value is a whole number, which requires at least one decimal point - // to differentiate it from an integer value. - format!("double {d:.1}") - } else { - format!("double {d}") - } - } - rir::Literal::Integer(i) => format!("i64 {i}"), - rir::Literal::Pointer => "i8* null".to_string(), - rir::Literal::Qubit(q) => format!("%Qubit* inttoptr (i64 {q} to %Qubit*)"), - rir::Literal::Result(r) => format!("%Result* inttoptr (i64 {r} to %Result*)"), - rir::Literal::Tag(idx, len) => { - let len = len + 1; // +1 for the null terminator - format!( - "i8* getelementptr inbounds ([{len} x i8], [{len} x i8]* @{idx}, i64 0, i64 0)" - ) - } - } - } -} - -impl ToQir for rir::Ty { - fn to_qir(&self, _program: &rir::Program) -> String { - match self { - rir::Ty::Boolean => "i1".to_string(), - rir::Ty::Double => "double".to_string(), - rir::Ty::Integer => "i64".to_string(), - rir::Ty::Pointer => "i8*".to_string(), - rir::Ty::Qubit => "%Qubit*".to_string(), - rir::Ty::Result => "%Result*".to_string(), - } - } -} - -impl ToQir for Option { - fn to_qir(&self, program: &rir::Program) -> String { - match self { - Some(ty) => ToQir::::to_qir(ty, program), - None => "void".to_string(), - } - } -} - -impl ToQir for rir::VariableId { - fn to_qir(&self, _program: &rir::Program) -> String { - format!("%var_{}", self.0) - } -} - -impl ToQir for rir::Variable { - fn to_qir(&self, program: &rir::Program) -> String { - format!( - "{} {}", - ToQir::::to_qir(&self.ty, program), - ToQir::::to_qir(&self.variable_id, program) - ) - } -} - -impl ToQir for rir::Operand { - fn to_qir(&self, program: &rir::Program) -> String { - match self { - rir::Operand::Literal(lit) => ToQir::::to_qir(lit, program), - rir::Operand::Variable(var) => ToQir::::to_qir(var, program), - } - } -} - -impl ToQir for rir::FcmpConditionCode { - fn to_qir(&self, _program: &rir::Program) -> String { - match self { - rir::FcmpConditionCode::False => "false".to_string(), - rir::FcmpConditionCode::OrderedAndEqual => "oeq".to_string(), - rir::FcmpConditionCode::OrderedAndGreaterThan => "ogt".to_string(), - rir::FcmpConditionCode::OrderedAndGreaterThanOrEqual => "oge".to_string(), - rir::FcmpConditionCode::OrderedAndLessThan => "olt".to_string(), - rir::FcmpConditionCode::OrderedAndLessThanOrEqual => "ole".to_string(), - rir::FcmpConditionCode::OrderedAndNotEqual => "one".to_string(), - rir::FcmpConditionCode::Ordered => "ord".to_string(), - rir::FcmpConditionCode::UnorderedOrEqual => "ueq".to_string(), - rir::FcmpConditionCode::UnorderedOrGreaterThan => "ugt".to_string(), - rir::FcmpConditionCode::UnorderedOrGreaterThanOrEqual => "uge".to_string(), - rir::FcmpConditionCode::UnorderedOrLessThan => "ult".to_string(), - rir::FcmpConditionCode::UnorderedOrLessThanOrEqual => "ule".to_string(), - rir::FcmpConditionCode::UnorderedOrNotEqual => "une".to_string(), - rir::FcmpConditionCode::Unordered => "uno".to_string(), - rir::FcmpConditionCode::True => "true".to_string(), - } - } -} - -impl ToQir for rir::ConditionCode { - fn to_qir(&self, _program: &rir::Program) -> String { - match self { - rir::ConditionCode::Eq => "eq".to_string(), - rir::ConditionCode::Ne => "ne".to_string(), - rir::ConditionCode::Sgt => "sgt".to_string(), - rir::ConditionCode::Sge => "sge".to_string(), - rir::ConditionCode::Slt => "slt".to_string(), - rir::ConditionCode::Sle => "sle".to_string(), - } - } -} - -impl ToQir for rir::Instruction { - fn to_qir(&self, program: &rir::Program) -> String { - match self { - rir::Instruction::Add(lhs, rhs, variable) => { - binop_to_qir("add", lhs, rhs, *variable, program) - } - rir::Instruction::Ashr(lhs, rhs, variable) => { - binop_to_qir("ashr", lhs, rhs, *variable, program) - } - rir::Instruction::BitwiseAnd(lhs, rhs, variable) => { - simple_bitwise_to_qir("and", lhs, rhs, *variable, program) - } - rir::Instruction::BitwiseNot(value, variable) => { - bitwise_not_to_qir(value, *variable, program) - } - rir::Instruction::BitwiseOr(lhs, rhs, variable) => { - simple_bitwise_to_qir("or", lhs, rhs, *variable, program) - } - rir::Instruction::BitwiseXor(lhs, rhs, variable) => { - simple_bitwise_to_qir("xor", lhs, rhs, *variable, program) - } - rir::Instruction::Branch(cond, true_id, false_id, _) => { - format!( - " br {}, label %{}, label %{}", - ToQir::::to_qir(cond, program), - ToQir::::to_qir(true_id, program), - ToQir::::to_qir(false_id, program) - ) - } - rir::Instruction::Call(call_id, args, output, _) => { - call_to_qir(args, *call_id, *output, program) - } - rir::Instruction::Convert(operand, variable) => { - convert_to_qir(operand, *variable, program) - } - rir::Instruction::Fadd(lhs, rhs, variable) => { - fbinop_to_qir("fadd", lhs, rhs, *variable, program) - } - rir::Instruction::Fdiv(lhs, rhs, variable) => { - fbinop_to_qir("fdiv", lhs, rhs, *variable, program) - } - rir::Instruction::Fmul(lhs, rhs, variable) => { - fbinop_to_qir("fmul", lhs, rhs, *variable, program) - } - rir::Instruction::Fsub(lhs, rhs, variable) => { - fbinop_to_qir("fsub", lhs, rhs, *variable, program) - } - rir::Instruction::LogicalAnd(lhs, rhs, variable) => { - logical_binop_to_qir("and", lhs, rhs, *variable, program) - } - rir::Instruction::LogicalNot(value, variable) => { - logical_not_to_qir(value, *variable, program) - } - rir::Instruction::LogicalOr(lhs, rhs, variable) => { - logical_binop_to_qir("or", lhs, rhs, *variable, program) - } - rir::Instruction::Mul(lhs, rhs, variable) => { - binop_to_qir("mul", lhs, rhs, *variable, program) - } - rir::Instruction::Fcmp(op, lhs, rhs, variable) => { - fcmp_to_qir(*op, lhs, rhs, *variable, program) - } - rir::Instruction::Icmp(op, lhs, rhs, variable) => { - icmp_to_qir(*op, lhs, rhs, *variable, program) - } - rir::Instruction::Jump(block_id) => { - format!(" br label %{}", ToQir::::to_qir(block_id, program)) - } - rir::Instruction::Phi(args, variable) => phi_to_qir(args, *variable, program), - rir::Instruction::Return => " ret i64 0".to_string(), - rir::Instruction::Sdiv(lhs, rhs, variable) => { - binop_to_qir("sdiv", lhs, rhs, *variable, program) - } - rir::Instruction::Shl(lhs, rhs, variable) => { - binop_to_qir("shl", lhs, rhs, *variable, program) - } - rir::Instruction::Srem(lhs, rhs, variable) => { - binop_to_qir("srem", lhs, rhs, *variable, program) - } - rir::Instruction::Store(_, _) => unimplemented!("store should be removed by pass"), - rir::Instruction::Sub(lhs, rhs, variable) => { - binop_to_qir("sub", lhs, rhs, *variable, program) - } - } - } -} - -fn convert_to_qir( - operand: &rir::Operand, - variable: rir::Variable, - program: &rir::Program, -) -> String { - let operand_ty = get_value_ty(operand); - let var_ty = get_variable_ty(variable); - assert_ne!( - operand_ty, var_ty, - "input/output types ({operand_ty}, {var_ty}) should not match in convert" - ); - - let convert_instr = match (operand_ty, var_ty) { - ("i64", "double") => "sitofp i64", - ("double", "i64") => "fptosi double", - _ => panic!("unsupported conversion from {operand_ty} to {var_ty} in convert instruction"), - }; - - format!( - " {} = {convert_instr} {} to {var_ty}", - ToQir::::to_qir(&variable.variable_id, program), - get_value_as_str(operand, program), - ) -} - -fn logical_not_to_qir( - value: &rir::Operand, - variable: rir::Variable, - program: &rir::Program, -) -> String { - let value_ty = get_value_ty(value); - let var_ty = get_variable_ty(variable); - assert_eq!( - value_ty, var_ty, - "mismatched input/output types ({value_ty}, {var_ty}) for not" - ); - assert_eq!(var_ty, "i1", "unsupported type {var_ty} for not"); - - format!( - " {} = xor i1 {}, true", - ToQir::::to_qir(&variable.variable_id, program), - get_value_as_str(value, program) - ) -} - -fn logical_binop_to_qir( - op: &str, - lhs: &rir::Operand, - rhs: &rir::Operand, - variable: rir::Variable, - program: &rir::Program, -) -> String { - let lhs_ty = get_value_ty(lhs); - let rhs_ty = get_value_ty(rhs); - let var_ty = get_variable_ty(variable); - assert_eq!( - lhs_ty, rhs_ty, - "mismatched input types ({lhs_ty}, {rhs_ty}) for {op}" - ); - assert_eq!( - lhs_ty, var_ty, - "mismatched input/output types ({lhs_ty}, {var_ty}) for {op}" - ); - assert_eq!(var_ty, "i1", "unsupported type {var_ty} for {op}"); - - format!( - " {} = {op} {var_ty} {}, {}", - ToQir::::to_qir(&variable.variable_id, program), - get_value_as_str(lhs, program), - get_value_as_str(rhs, program) - ) -} - -fn bitwise_not_to_qir( - value: &rir::Operand, - variable: rir::Variable, - program: &rir::Program, -) -> String { - let value_ty = get_value_ty(value); - let var_ty = get_variable_ty(variable); - assert_eq!( - value_ty, var_ty, - "mismatched input/output types ({value_ty}, {var_ty}) for not" - ); - assert_eq!(var_ty, "i64", "unsupported type {var_ty} for not"); - - format!( - " {} = xor {var_ty} {}, -1", - ToQir::::to_qir(&variable.variable_id, program), - get_value_as_str(value, program) - ) -} - -fn call_to_qir( - args: &[rir::Operand], - call_id: rir::CallableId, - output: Option, - program: &rir::Program, -) -> String { - let args = args - .iter() - .map(|arg| ToQir::::to_qir(arg, program)) - .collect::>() - .join(", "); - let callable = program.get_callable(call_id); - if let Some(output) = output { - format!( - " {} = call {} @{}({args})", - ToQir::::to_qir(&output.variable_id, program), - ToQir::::to_qir(&callable.output_type, program), - callable.name - ) - } else { - format!( - " call {} @{}({args})", - ToQir::::to_qir(&callable.output_type, program), - callable.name - ) - } -} - -fn fcmp_to_qir( - op: FcmpConditionCode, - lhs: &rir::Operand, - rhs: &rir::Operand, - variable: rir::Variable, - program: &rir::Program, -) -> String { - let lhs_ty = get_value_ty(lhs); - let rhs_ty = get_value_ty(rhs); - let var_ty = get_variable_ty(variable); - assert_eq!( - lhs_ty, rhs_ty, - "mismatched input types ({lhs_ty}, {rhs_ty}) for fcmp {op}" - ); - - assert_eq!(var_ty, "i1", "unsupported output type {var_ty} for fcmp"); - format!( - " {} = fcmp {} {lhs_ty} {}, {}", - ToQir::::to_qir(&variable.variable_id, program), - ToQir::::to_qir(&op, program), - get_value_as_str(lhs, program), - get_value_as_str(rhs, program) - ) -} - -fn icmp_to_qir( - op: ConditionCode, - lhs: &rir::Operand, - rhs: &rir::Operand, - variable: rir::Variable, - program: &rir::Program, -) -> String { - let lhs_ty = get_value_ty(lhs); - let rhs_ty = get_value_ty(rhs); - let var_ty = get_variable_ty(variable); - assert_eq!( - lhs_ty, rhs_ty, - "mismatched input types ({lhs_ty}, {rhs_ty}) for icmp {op}" - ); - - assert_eq!(var_ty, "i1", "unsupported output type {var_ty} for icmp"); - format!( - " {} = icmp {} {lhs_ty} {}, {}", - ToQir::::to_qir(&variable.variable_id, program), - ToQir::::to_qir(&op, program), - get_value_as_str(lhs, program), - get_value_as_str(rhs, program) - ) -} - -fn binop_to_qir( - op: &str, - lhs: &rir::Operand, - rhs: &rir::Operand, - variable: rir::Variable, - program: &rir::Program, -) -> String { - let lhs_ty = get_value_ty(lhs); - let rhs_ty = get_value_ty(rhs); - let var_ty = get_variable_ty(variable); - assert_eq!( - lhs_ty, rhs_ty, - "mismatched input types ({lhs_ty}, {rhs_ty}) for {op}" - ); - assert_eq!( - lhs_ty, var_ty, - "mismatched input/output types ({lhs_ty}, {var_ty}) for {op}" - ); - assert_eq!(var_ty, "i64", "unsupported type {var_ty} for {op}"); - - format!( - " {} = {op} {var_ty} {}, {}", - ToQir::::to_qir(&variable.variable_id, program), - get_value_as_str(lhs, program), - get_value_as_str(rhs, program) - ) -} - -fn fbinop_to_qir( - op: &str, - lhs: &rir::Operand, - rhs: &rir::Operand, - variable: rir::Variable, - program: &rir::Program, -) -> String { - let lhs_ty = get_value_ty(lhs); - let rhs_ty = get_value_ty(rhs); - let var_ty = get_variable_ty(variable); - assert_eq!( - lhs_ty, rhs_ty, - "mismatched input types ({lhs_ty}, {rhs_ty}) for {op}" - ); - assert_eq!( - lhs_ty, var_ty, - "mismatched input/output types ({lhs_ty}, {var_ty}) for {op}" - ); - assert_eq!(var_ty, "double", "unsupported type {var_ty} for {op}"); - - format!( - " {} = {op} {var_ty} {}, {}", - ToQir::::to_qir(&variable.variable_id, program), - get_value_as_str(lhs, program), - get_value_as_str(rhs, program) - ) -} - -fn simple_bitwise_to_qir( - op: &str, - lhs: &rir::Operand, - rhs: &rir::Operand, - variable: rir::Variable, - program: &rir::Program, -) -> String { - let lhs_ty = get_value_ty(lhs); - let rhs_ty = get_value_ty(rhs); - let var_ty = get_variable_ty(variable); - assert_eq!( - lhs_ty, rhs_ty, - "mismatched input types ({lhs_ty}, {rhs_ty}) for {op}" - ); - assert_eq!( - lhs_ty, var_ty, - "mismatched input/output types ({lhs_ty}, {var_ty}) for {op}" - ); - assert_eq!(var_ty, "i64", "unsupported type {var_ty} for {op}"); - - format!( - " {} = {op} {var_ty} {}, {}", - ToQir::::to_qir(&variable.variable_id, program), - get_value_as_str(lhs, program), - get_value_as_str(rhs, program) - ) -} - -fn phi_to_qir( - args: &[(rir::Operand, rir::BlockId)], - variable: rir::Variable, - program: &rir::Program, -) -> String { - assert!( - !args.is_empty(), - "phi instruction should have at least one argument" - ); - let var_ty = get_variable_ty(variable); - let args = args - .iter() - .map(|(arg, block_id)| { - let arg_ty = get_value_ty(arg); - assert_eq!( - arg_ty, var_ty, - "mismatched types ({var_ty} [... {arg_ty}]) for phi" - ); - format!( - "[{}, %{}]", - get_value_as_str(arg, program), - ToQir::::to_qir(block_id, program) - ) - }) - .collect::>() - .join(", "); - - format!( - " {} = phi {var_ty} {args}", - ToQir::::to_qir(&variable.variable_id, program) - ) -} - -fn get_value_as_str(value: &rir::Operand, program: &rir::Program) -> String { - match value { - rir::Operand::Literal(lit) => match lit { - rir::Literal::Bool(b) => format!("{b}"), - rir::Literal::Double(d) => { - if (d.floor() - d.ceil()).abs() < f64::EPSILON { - // The value is a whole number, which requires at least one decimal point - // to differentiate it from an integer value. - format!("{d:.1}") - } else { - format!("{d}") - } - } - rir::Literal::Integer(i) => format!("{i}"), - rir::Literal::Pointer => "null".to_string(), - rir::Literal::Qubit(q) => format!("{q}"), - rir::Literal::Result(r) => format!("{r}"), - rir::Literal::Tag(..) => panic!( - "tag literals should not be used as string values outside of output recording" - ), - }, - rir::Operand::Variable(var) => ToQir::::to_qir(&var.variable_id, program), - } -} - -fn get_value_ty(lhs: &rir::Operand) -> &str { - match lhs { - rir::Operand::Literal(lit) => match lit { - rir::Literal::Integer(_) => "i64", - rir::Literal::Bool(_) => "i1", - rir::Literal::Double(_) => get_f64_ty(), - rir::Literal::Qubit(_) => "%Qubit*", - rir::Literal::Result(_) => "%Result*", - rir::Literal::Pointer | rir::Literal::Tag(..) => "i8*", - }, - rir::Operand::Variable(var) => get_variable_ty(*var), - } -} - -fn get_variable_ty(variable: rir::Variable) -> &'static str { - match variable.ty { - rir::Ty::Integer => "i64", - rir::Ty::Boolean => "i1", - rir::Ty::Double => get_f64_ty(), - rir::Ty::Qubit => "%Qubit*", - rir::Ty::Result => "%Result*", - rir::Ty::Pointer => "i8*", - } -} - -/// phi only supports "Floating-Point Types" which are defined as: -/// - `half` (`f16`) -/// - `bfloat` -/// - `float` (`f32`) -/// - `double` (`f64`) -/// - `fp128` -/// -/// We only support `f64`, so we break the pattern used for integers -/// and have to use `double` here. -/// -/// This conflicts with the QIR spec which says f64. Need to follow up on this. -fn get_f64_ty() -> &'static str { - "double" -} - -impl ToQir for rir::BlockId { - fn to_qir(&self, _program: &rir::Program) -> String { - format!("block_{}", self.0) - } -} - -impl ToQir for rir::Block { - fn to_qir(&self, program: &rir::Program) -> String { - self.0 - .iter() - .map(|instr| ToQir::::to_qir(instr, program)) - .collect::>() - .join("\n") - } -} - -impl ToQir for rir::Callable { - fn to_qir(&self, program: &rir::Program) -> String { - let input_type = self - .input_type - .iter() - .map(|t| ToQir::::to_qir(t, program)) - .collect::>() - .join(", "); - let output_type = ToQir::::to_qir(&self.output_type, program); - let Some(entry_id) = self.body else { - return format!( - "declare {output_type} @{}({input_type}){}", - self.name, - match self.call_type { - rir::CallableType::Measurement | rir::CallableType::Reset => { - // These callables are a special case that need the irreversible attribute. - " #1" - } - rir::CallableType::NoiseIntrinsic => " #2", - _ => "", - } - ); - }; - let mut body = String::new(); - let mut all_blocks = vec![entry_id]; - all_blocks.extend(get_all_block_successors(entry_id, program)); - for block_id in all_blocks { - let block = program.get_block(block_id); - write!( - body, - "{}:\n{}\n", - ToQir::::to_qir(&block_id, program), - ToQir::::to_qir(block, program) - ) - .expect("writing to string should succeed"); - } - assert!( - input_type.is_empty(), - "entry point should not have an input" - ); - format!("define {output_type} @ENTRYPOINT__main() #0 {{\n{body}}}",) - } -} - -impl ToQir for rir::Program { - fn to_qir(&self, _program: &rir::Program) -> String { - let callables = self - .callables - .iter() - .map(|(_, callable)| ToQir::::to_qir(callable, self)) - .collect::>() - .join("\n\n"); - let profile = if self.config.is_base() { - "base_profile" - } else { - "adaptive_profile" - }; - let mut constants = String::default(); - for (idx, tag) in self.tags.iter().enumerate() { - // We need to add the tag as a global constant. - writeln!( - constants, - "@{idx} = internal constant [{} x i8] c\"{tag}\\00\"", - tag.len() + 1 - ) - .expect("writing to string should succeed"); - } - let body = format!( - include_str!("./qir/template.ll"), - constants, - callables, - profile, - self.num_qubits, - self.num_results, - get_additional_module_attributes(self) - ); - let flags = get_module_metadata(self); - body + "\n" + &flags - } -} - -fn get_additional_module_attributes(program: &rir::Program) -> String { - let mut attrs = String::new(); - if program.attrs.contains(Attributes::QdkNoise) { - attrs.push_str("\nattributes #2 = { \"qdk_noise\" }"); - } - - attrs -} - -/// Create the module metadata for the given program. -/// creating the `llvm.module.flags` and its associated values. -fn get_module_metadata(program: &rir::Program) -> String { - let mut flags = String::new(); - - // push the default attrs, we don't have any config values - // for now that would change any of them. - flags.push_str( - r#" -!0 = !{i32 1, !"qir_major_version", i32 1} -!1 = !{i32 7, !"qir_minor_version", i32 0} -!2 = !{i32 1, !"dynamic_qubit_management", i1 false} -!3 = !{i32 1, !"dynamic_result_management", i1 false} -"#, - ); - - let mut index = 4; - - // If we are not in the base profile, we need to add the capabilities - // associated with the adaptive profile. - if !program.config.is_base() { - // loop through the capabilities and add them to the metadata - // for values that we can generate. - for cap in program.config.capabilities.iter() { - match cap { - TargetCapabilityFlags::IntegerComputations => { - // Use `5` as the flag to signify "Append" mode. See https://llvm.org/docs/LangRef.html#module-flags-metadata - writeln!( - flags, - "!{index} = !{{i32 5, !\"int_computations\", !{{!\"i64\"}}}}", - ) - .expect("writing to string should succeed"); - index += 1; - } - TargetCapabilityFlags::FloatingPointComputations => { - // Use `5` as the flag to signify "Append" mode. See https://llvm.org/docs/LangRef.html#module-flags-metadata - writeln!( - flags, - "!{index} = !{{i32 5, !\"float_computations\", !{{!\"double\"}}}}", - ) - .expect("writing to string should succeed"); - index += 1; - } - _ => {} - } - } - } - - let mut metadata_def = String::new(); - metadata_def.push_str("!llvm.module.flags = !{"); - for i in 0..index - 1 { - write!(metadata_def, "!{i}, ").expect("writing to string should succeed"); - } - writeln!(metadata_def, "!{}}}", index - 1).expect("writing to string should succeed"); - metadata_def + &flags -} diff --git a/source/compiler/qsc_codegen/src/qir/v1.rs b/source/compiler/qsc_codegen/src/qir/v1.rs new file mode 100644 index 0000000000..aa44cd820e --- /dev/null +++ b/source/compiler/qsc_codegen/src/qir/v1.rs @@ -0,0 +1,737 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +#[cfg(test)] +mod instruction_tests; + +#[cfg(test)] +mod tests; + +use qsc_data_structures::{attrs::Attributes, target::TargetCapabilityFlags}; +use qsc_rir::{ + rir::{self, ConditionCode, FcmpConditionCode}, + utils::get_all_block_successors, +}; +use std::fmt::Write; + +/// A trait for converting a type into QIR of type `T`. +/// This can be used to generate QIR strings or other representations. +pub trait ToQir { + fn to_qir(&self, program: &rir::Program) -> T; +} + +impl ToQir for rir::Literal { + fn to_qir(&self, _program: &rir::Program) -> String { + match self { + rir::Literal::Bool(b) => format!("i1 {b}"), + rir::Literal::Double(d) => { + if (d.floor() - d.ceil()).abs() < f64::EPSILON { + // The value is a whole number, which requires at least one decimal point + // to differentiate it from an integer value. + format!("double {d:.1}") + } else { + format!("double {d}") + } + } + rir::Literal::Integer(i) => format!("i64 {i}"), + rir::Literal::Pointer => "i8* null".to_string(), + rir::Literal::Qubit(q) => format!("%Qubit* inttoptr (i64 {q} to %Qubit*)"), + rir::Literal::Result(r) => format!("%Result* inttoptr (i64 {r} to %Result*)"), + rir::Literal::Tag(idx, len) => { + let len = len + 1; // +1 for the null terminator + format!( + "i8* getelementptr inbounds ([{len} x i8], [{len} x i8]* @{idx}, i64 0, i64 0)" + ) + } + } + } +} + +impl ToQir for rir::Ty { + fn to_qir(&self, _program: &rir::Program) -> String { + match self { + rir::Ty::Boolean => "i1".to_string(), + rir::Ty::Double => "double".to_string(), + rir::Ty::Integer => "i64".to_string(), + rir::Ty::Pointer => "i8*".to_string(), + rir::Ty::Qubit => "%Qubit*".to_string(), + rir::Ty::Result => "%Result*".to_string(), + } + } +} + +impl ToQir for Option { + fn to_qir(&self, program: &rir::Program) -> String { + match self { + Some(ty) => ToQir::::to_qir(ty, program), + None => "void".to_string(), + } + } +} + +impl ToQir for rir::VariableId { + fn to_qir(&self, _program: &rir::Program) -> String { + format!("%var_{}", self.0) + } +} + +impl ToQir for rir::Variable { + fn to_qir(&self, program: &rir::Program) -> String { + format!( + "{} {}", + ToQir::::to_qir(&self.ty, program), + ToQir::::to_qir(&self.variable_id, program) + ) + } +} + +impl ToQir for rir::Operand { + fn to_qir(&self, program: &rir::Program) -> String { + match self { + rir::Operand::Literal(lit) => ToQir::::to_qir(lit, program), + rir::Operand::Variable(var) => ToQir::::to_qir(var, program), + } + } +} + +impl ToQir for rir::FcmpConditionCode { + fn to_qir(&self, _program: &rir::Program) -> String { + match self { + rir::FcmpConditionCode::False => "false".to_string(), + rir::FcmpConditionCode::OrderedAndEqual => "oeq".to_string(), + rir::FcmpConditionCode::OrderedAndGreaterThan => "ogt".to_string(), + rir::FcmpConditionCode::OrderedAndGreaterThanOrEqual => "oge".to_string(), + rir::FcmpConditionCode::OrderedAndLessThan => "olt".to_string(), + rir::FcmpConditionCode::OrderedAndLessThanOrEqual => "ole".to_string(), + rir::FcmpConditionCode::OrderedAndNotEqual => "one".to_string(), + rir::FcmpConditionCode::Ordered => "ord".to_string(), + rir::FcmpConditionCode::UnorderedOrEqual => "ueq".to_string(), + rir::FcmpConditionCode::UnorderedOrGreaterThan => "ugt".to_string(), + rir::FcmpConditionCode::UnorderedOrGreaterThanOrEqual => "uge".to_string(), + rir::FcmpConditionCode::UnorderedOrLessThan => "ult".to_string(), + rir::FcmpConditionCode::UnorderedOrLessThanOrEqual => "ule".to_string(), + rir::FcmpConditionCode::UnorderedOrNotEqual => "une".to_string(), + rir::FcmpConditionCode::Unordered => "uno".to_string(), + rir::FcmpConditionCode::True => "true".to_string(), + } + } +} + +impl ToQir for rir::ConditionCode { + fn to_qir(&self, _program: &rir::Program) -> String { + match self { + rir::ConditionCode::Eq => "eq".to_string(), + rir::ConditionCode::Ne => "ne".to_string(), + rir::ConditionCode::Sgt => "sgt".to_string(), + rir::ConditionCode::Sge => "sge".to_string(), + rir::ConditionCode::Slt => "slt".to_string(), + rir::ConditionCode::Sle => "sle".to_string(), + } + } +} + +impl ToQir for rir::Instruction { + fn to_qir(&self, program: &rir::Program) -> String { + match self { + rir::Instruction::Add(lhs, rhs, variable) => { + binop_to_qir("add", lhs, rhs, *variable, program) + } + rir::Instruction::Ashr(lhs, rhs, variable) => { + binop_to_qir("ashr", lhs, rhs, *variable, program) + } + rir::Instruction::BitwiseAnd(lhs, rhs, variable) => { + simple_bitwise_to_qir("and", lhs, rhs, *variable, program) + } + rir::Instruction::BitwiseNot(value, variable) => { + bitwise_not_to_qir(value, *variable, program) + } + rir::Instruction::BitwiseOr(lhs, rhs, variable) => { + simple_bitwise_to_qir("or", lhs, rhs, *variable, program) + } + rir::Instruction::BitwiseXor(lhs, rhs, variable) => { + simple_bitwise_to_qir("xor", lhs, rhs, *variable, program) + } + rir::Instruction::Branch(cond, true_id, false_id, _) => { + format!( + " br {}, label %{}, label %{}", + ToQir::::to_qir(cond, program), + ToQir::::to_qir(true_id, program), + ToQir::::to_qir(false_id, program) + ) + } + rir::Instruction::Call(call_id, args, output, _) => { + call_to_qir(args, *call_id, *output, program) + } + rir::Instruction::Convert(operand, variable) => { + convert_to_qir(operand, *variable, program) + } + rir::Instruction::Fadd(lhs, rhs, variable) => { + fbinop_to_qir("fadd", lhs, rhs, *variable, program) + } + rir::Instruction::Fdiv(lhs, rhs, variable) => { + fbinop_to_qir("fdiv", lhs, rhs, *variable, program) + } + rir::Instruction::Fmul(lhs, rhs, variable) => { + fbinop_to_qir("fmul", lhs, rhs, *variable, program) + } + rir::Instruction::Fsub(lhs, rhs, variable) => { + fbinop_to_qir("fsub", lhs, rhs, *variable, program) + } + rir::Instruction::LogicalAnd(lhs, rhs, variable) => { + logical_binop_to_qir("and", lhs, rhs, *variable, program) + } + rir::Instruction::LogicalNot(value, variable) => { + logical_not_to_qir(value, *variable, program) + } + rir::Instruction::LogicalOr(lhs, rhs, variable) => { + logical_binop_to_qir("or", lhs, rhs, *variable, program) + } + rir::Instruction::Mul(lhs, rhs, variable) => { + binop_to_qir("mul", lhs, rhs, *variable, program) + } + rir::Instruction::Fcmp(op, lhs, rhs, variable) => { + fcmp_to_qir(*op, lhs, rhs, *variable, program) + } + rir::Instruction::Icmp(op, lhs, rhs, variable) => { + icmp_to_qir(*op, lhs, rhs, *variable, program) + } + rir::Instruction::Jump(block_id) => { + format!(" br label %{}", ToQir::::to_qir(block_id, program)) + } + rir::Instruction::Phi(args, variable) => phi_to_qir(args, *variable, program), + rir::Instruction::Return => " ret i64 0".to_string(), + rir::Instruction::Sdiv(lhs, rhs, variable) => { + binop_to_qir("sdiv", lhs, rhs, *variable, program) + } + rir::Instruction::Shl(lhs, rhs, variable) => { + binop_to_qir("shl", lhs, rhs, *variable, program) + } + rir::Instruction::Srem(lhs, rhs, variable) => { + binop_to_qir("srem", lhs, rhs, *variable, program) + } + rir::Instruction::Store(_, _) => unimplemented!("store should be removed by pass"), + rir::Instruction::Sub(lhs, rhs, variable) => { + binop_to_qir("sub", lhs, rhs, *variable, program) + } + rir::Instruction::Advanced(..) => { + unimplemented!("advanced instructions are not supported in QIR v1 generation") + } + } + } +} + +fn convert_to_qir( + operand: &rir::Operand, + variable: rir::Variable, + program: &rir::Program, +) -> String { + let operand_ty = get_value_ty(operand); + let var_ty = get_variable_ty(variable); + assert_ne!( + operand_ty, var_ty, + "input/output types ({operand_ty}, {var_ty}) should not match in convert" + ); + + let convert_instr = match (operand_ty, var_ty) { + ("i64", "double") => "sitofp i64", + ("double", "i64") => "fptosi double", + _ => panic!("unsupported conversion from {operand_ty} to {var_ty} in convert instruction"), + }; + + format!( + " {} = {convert_instr} {} to {var_ty}", + ToQir::::to_qir(&variable.variable_id, program), + get_value_as_str(operand, program), + ) +} + +fn logical_not_to_qir( + value: &rir::Operand, + variable: rir::Variable, + program: &rir::Program, +) -> String { + let value_ty = get_value_ty(value); + let var_ty = get_variable_ty(variable); + assert_eq!( + value_ty, var_ty, + "mismatched input/output types ({value_ty}, {var_ty}) for not" + ); + assert_eq!(var_ty, "i1", "unsupported type {var_ty} for not"); + + format!( + " {} = xor i1 {}, true", + ToQir::::to_qir(&variable.variable_id, program), + get_value_as_str(value, program) + ) +} + +fn logical_binop_to_qir( + op: &str, + lhs: &rir::Operand, + rhs: &rir::Operand, + variable: rir::Variable, + program: &rir::Program, +) -> String { + let lhs_ty = get_value_ty(lhs); + let rhs_ty = get_value_ty(rhs); + let var_ty = get_variable_ty(variable); + assert_eq!( + lhs_ty, rhs_ty, + "mismatched input types ({lhs_ty}, {rhs_ty}) for {op}" + ); + assert_eq!( + lhs_ty, var_ty, + "mismatched input/output types ({lhs_ty}, {var_ty}) for {op}" + ); + assert_eq!(var_ty, "i1", "unsupported type {var_ty} for {op}"); + + format!( + " {} = {op} {var_ty} {}, {}", + ToQir::::to_qir(&variable.variable_id, program), + get_value_as_str(lhs, program), + get_value_as_str(rhs, program) + ) +} + +fn bitwise_not_to_qir( + value: &rir::Operand, + variable: rir::Variable, + program: &rir::Program, +) -> String { + let value_ty = get_value_ty(value); + let var_ty = get_variable_ty(variable); + assert_eq!( + value_ty, var_ty, + "mismatched input/output types ({value_ty}, {var_ty}) for not" + ); + assert_eq!(var_ty, "i64", "unsupported type {var_ty} for not"); + + format!( + " {} = xor {var_ty} {}, -1", + ToQir::::to_qir(&variable.variable_id, program), + get_value_as_str(value, program) + ) +} + +fn call_to_qir( + args: &[rir::Operand], + call_id: rir::CallableId, + output: Option, + program: &rir::Program, +) -> String { + let args = args + .iter() + .map(|arg| ToQir::::to_qir(arg, program)) + .collect::>() + .join(", "); + let callable = program.get_callable(call_id); + if let Some(output) = output { + format!( + " {} = call {} @{}({args})", + ToQir::::to_qir(&output.variable_id, program), + ToQir::::to_qir(&callable.output_type, program), + callable.name + ) + } else { + format!( + " call {} @{}({args})", + ToQir::::to_qir(&callable.output_type, program), + callable.name + ) + } +} + +fn fcmp_to_qir( + op: FcmpConditionCode, + lhs: &rir::Operand, + rhs: &rir::Operand, + variable: rir::Variable, + program: &rir::Program, +) -> String { + let lhs_ty = get_value_ty(lhs); + let rhs_ty = get_value_ty(rhs); + let var_ty = get_variable_ty(variable); + assert_eq!( + lhs_ty, rhs_ty, + "mismatched input types ({lhs_ty}, {rhs_ty}) for fcmp {op}" + ); + + assert_eq!(var_ty, "i1", "unsupported output type {var_ty} for fcmp"); + format!( + " {} = fcmp {} {lhs_ty} {}, {}", + ToQir::::to_qir(&variable.variable_id, program), + ToQir::::to_qir(&op, program), + get_value_as_str(lhs, program), + get_value_as_str(rhs, program) + ) +} + +fn icmp_to_qir( + op: ConditionCode, + lhs: &rir::Operand, + rhs: &rir::Operand, + variable: rir::Variable, + program: &rir::Program, +) -> String { + let lhs_ty = get_value_ty(lhs); + let rhs_ty = get_value_ty(rhs); + let var_ty = get_variable_ty(variable); + assert_eq!( + lhs_ty, rhs_ty, + "mismatched input types ({lhs_ty}, {rhs_ty}) for icmp {op}" + ); + + assert_eq!(var_ty, "i1", "unsupported output type {var_ty} for icmp"); + format!( + " {} = icmp {} {lhs_ty} {}, {}", + ToQir::::to_qir(&variable.variable_id, program), + ToQir::::to_qir(&op, program), + get_value_as_str(lhs, program), + get_value_as_str(rhs, program) + ) +} + +fn binop_to_qir( + op: &str, + lhs: &rir::Operand, + rhs: &rir::Operand, + variable: rir::Variable, + program: &rir::Program, +) -> String { + let lhs_ty = get_value_ty(lhs); + let rhs_ty = get_value_ty(rhs); + let var_ty = get_variable_ty(variable); + assert_eq!( + lhs_ty, rhs_ty, + "mismatched input types ({lhs_ty}, {rhs_ty}) for {op}" + ); + assert_eq!( + lhs_ty, var_ty, + "mismatched input/output types ({lhs_ty}, {var_ty}) for {op}" + ); + assert_eq!(var_ty, "i64", "unsupported type {var_ty} for {op}"); + + format!( + " {} = {op} {var_ty} {}, {}", + ToQir::::to_qir(&variable.variable_id, program), + get_value_as_str(lhs, program), + get_value_as_str(rhs, program) + ) +} + +fn fbinop_to_qir( + op: &str, + lhs: &rir::Operand, + rhs: &rir::Operand, + variable: rir::Variable, + program: &rir::Program, +) -> String { + let lhs_ty = get_value_ty(lhs); + let rhs_ty = get_value_ty(rhs); + let var_ty = get_variable_ty(variable); + assert_eq!( + lhs_ty, rhs_ty, + "mismatched input types ({lhs_ty}, {rhs_ty}) for {op}" + ); + assert_eq!( + lhs_ty, var_ty, + "mismatched input/output types ({lhs_ty}, {var_ty}) for {op}" + ); + assert_eq!(var_ty, "double", "unsupported type {var_ty} for {op}"); + + format!( + " {} = {op} {var_ty} {}, {}", + ToQir::::to_qir(&variable.variable_id, program), + get_value_as_str(lhs, program), + get_value_as_str(rhs, program) + ) +} + +fn simple_bitwise_to_qir( + op: &str, + lhs: &rir::Operand, + rhs: &rir::Operand, + variable: rir::Variable, + program: &rir::Program, +) -> String { + let lhs_ty = get_value_ty(lhs); + let rhs_ty = get_value_ty(rhs); + let var_ty = get_variable_ty(variable); + assert_eq!( + lhs_ty, rhs_ty, + "mismatched input types ({lhs_ty}, {rhs_ty}) for {op}" + ); + assert_eq!( + lhs_ty, var_ty, + "mismatched input/output types ({lhs_ty}, {var_ty}) for {op}" + ); + assert_eq!(var_ty, "i64", "unsupported type {var_ty} for {op}"); + + format!( + " {} = {op} {var_ty} {}, {}", + ToQir::::to_qir(&variable.variable_id, program), + get_value_as_str(lhs, program), + get_value_as_str(rhs, program) + ) +} + +fn phi_to_qir( + args: &[(rir::Operand, rir::BlockId)], + variable: rir::Variable, + program: &rir::Program, +) -> String { + assert!( + !args.is_empty(), + "phi instruction should have at least one argument" + ); + let var_ty = get_variable_ty(variable); + let args = args + .iter() + .map(|(arg, block_id)| { + let arg_ty = get_value_ty(arg); + assert_eq!( + arg_ty, var_ty, + "mismatched types ({var_ty} [... {arg_ty}]) for phi" + ); + format!( + "[{}, %{}]", + get_value_as_str(arg, program), + ToQir::::to_qir(block_id, program) + ) + }) + .collect::>() + .join(", "); + + format!( + " {} = phi {var_ty} {args}", + ToQir::::to_qir(&variable.variable_id, program) + ) +} + +fn get_value_as_str(value: &rir::Operand, program: &rir::Program) -> String { + match value { + rir::Operand::Literal(lit) => match lit { + rir::Literal::Bool(b) => format!("{b}"), + rir::Literal::Double(d) => { + if (d.floor() - d.ceil()).abs() < f64::EPSILON { + // The value is a whole number, which requires at least one decimal point + // to differentiate it from an integer value. + format!("{d:.1}") + } else { + format!("{d}") + } + } + rir::Literal::Integer(i) => format!("{i}"), + rir::Literal::Pointer => "null".to_string(), + rir::Literal::Qubit(q) => format!("{q}"), + rir::Literal::Result(r) => format!("{r}"), + rir::Literal::Tag(..) => panic!( + "tag literals should not be used as string values outside of output recording" + ), + }, + rir::Operand::Variable(var) => ToQir::::to_qir(&var.variable_id, program), + } +} + +fn get_value_ty(lhs: &rir::Operand) -> &str { + match lhs { + rir::Operand::Literal(lit) => match lit { + rir::Literal::Integer(_) => "i64", + rir::Literal::Bool(_) => "i1", + rir::Literal::Double(_) => get_f64_ty(), + rir::Literal::Qubit(_) => "%Qubit*", + rir::Literal::Result(_) => "%Result*", + rir::Literal::Pointer | rir::Literal::Tag(..) => "i8*", + }, + rir::Operand::Variable(var) => get_variable_ty(*var), + } +} + +fn get_variable_ty(variable: rir::Variable) -> &'static str { + match variable.ty { + rir::Ty::Integer => "i64", + rir::Ty::Boolean => "i1", + rir::Ty::Double => get_f64_ty(), + rir::Ty::Qubit => "%Qubit*", + rir::Ty::Result => "%Result*", + rir::Ty::Pointer => "i8*", + } +} + +/// phi only supports "Floating-Point Types" which are defined as: +/// - `half` (`f16`) +/// - `bfloat` +/// - `float` (`f32`) +/// - `double` (`f64`) +/// - `fp128` +/// +/// We only support `f64`, so we break the pattern used for integers +/// and have to use `double` here. +/// +/// This conflicts with the QIR spec which says f64. Need to follow up on this. +fn get_f64_ty() -> &'static str { + "double" +} + +impl ToQir for rir::BlockId { + fn to_qir(&self, _program: &rir::Program) -> String { + format!("block_{}", self.0) + } +} + +impl ToQir for rir::Block { + fn to_qir(&self, program: &rir::Program) -> String { + self.0 + .iter() + .map(|instr| ToQir::::to_qir(instr, program)) + .collect::>() + .join("\n") + } +} + +impl ToQir for rir::Callable { + fn to_qir(&self, program: &rir::Program) -> String { + let input_type = self + .input_type + .iter() + .map(|t| ToQir::::to_qir(t, program)) + .collect::>() + .join(", "); + let output_type = ToQir::::to_qir(&self.output_type, program); + let Some(entry_id) = self.body else { + return format!( + "declare {output_type} @{}({input_type}){}", + self.name, + match self.call_type { + rir::CallableType::Measurement | rir::CallableType::Reset => { + // These callables are a special case that need the irreversible attribute. + " #1" + } + rir::CallableType::NoiseIntrinsic => " #2", + _ => "", + } + ); + }; + let mut body = String::new(); + let mut all_blocks = vec![entry_id]; + all_blocks.extend(get_all_block_successors(entry_id, program)); + for block_id in all_blocks { + let block = program.get_block(block_id); + write!( + body, + "{}:\n{}\n", + ToQir::::to_qir(&block_id, program), + ToQir::::to_qir(block, program) + ) + .expect("writing to string should succeed"); + } + assert!( + input_type.is_empty(), + "entry point should not have an input" + ); + format!("define {output_type} @ENTRYPOINT__main() #0 {{\n{body}}}",) + } +} + +impl ToQir for rir::Program { + fn to_qir(&self, _program: &rir::Program) -> String { + let callables = self + .callables + .iter() + .map(|(_, callable)| ToQir::::to_qir(callable, self)) + .collect::>() + .join("\n\n"); + let profile = if self.config.is_base() { + "base_profile" + } else { + "adaptive_profile" + }; + let mut constants = String::default(); + for (idx, tag) in self.tags.iter().enumerate() { + // We need to add the tag as a global constant. + writeln!( + constants, + "@{idx} = internal constant [{} x i8] c\"{tag}\\00\"", + tag.len() + 1 + ) + .expect("writing to string should succeed"); + } + let body = format!( + include_str!("./v1/template.ll"), + constants, + callables, + profile, + self.num_qubits, + self.num_results, + get_additional_module_attributes(self) + ); + let flags = get_module_metadata(self); + body + "\n" + &flags + } +} + +fn get_additional_module_attributes(program: &rir::Program) -> String { + let mut attrs = String::new(); + if program.attrs.contains(Attributes::QdkNoise) { + attrs.push_str("\nattributes #2 = { \"qdk_noise\" }"); + } + + attrs +} + +/// Create the module metadata for the given program. +/// creating the `llvm.module.flags` and its associated values. +fn get_module_metadata(program: &rir::Program) -> String { + let mut flags = String::new(); + + // push the default attrs, we don't have any config values + // for now that would change any of them. + flags.push_str( + r#" +!0 = !{i32 1, !"qir_major_version", i32 1} +!1 = !{i32 7, !"qir_minor_version", i32 0} +!2 = !{i32 1, !"dynamic_qubit_management", i1 false} +!3 = !{i32 1, !"dynamic_result_management", i1 false} +"#, + ); + + let mut index = 4; + + // If we are not in the base profile, we need to add the capabilities + // associated with the adaptive profile. + if !program.config.is_base() { + // loop through the capabilities and add them to the metadata + // for values that we can generate. + for cap in program.config.capabilities.iter() { + match cap { + TargetCapabilityFlags::IntegerComputations => { + // Use `5` as the flag to signify "Append" mode. See https://llvm.org/docs/LangRef.html#module-flags-metadata + writeln!( + flags, + "!{index} = !{{i32 5, !\"int_computations\", !{{!\"i64\"}}}}", + ) + .expect("writing to string should succeed"); + index += 1; + } + TargetCapabilityFlags::FloatingPointComputations => { + // Use `5` as the flag to signify "Append" mode. See https://llvm.org/docs/LangRef.html#module-flags-metadata + writeln!( + flags, + "!{index} = !{{i32 5, !\"float_computations\", !{{!\"double\"}}}}", + ) + .expect("writing to string should succeed"); + index += 1; + } + _ => {} + } + } + } + + let mut metadata_def = String::new(); + metadata_def.push_str("!llvm.module.flags = !{"); + for i in 0..index - 1 { + write!(metadata_def, "!{i}, ").expect("writing to string should succeed"); + } + writeln!(metadata_def, "!{}}}", index - 1).expect("writing to string should succeed"); + metadata_def + &flags +} diff --git a/source/compiler/qsc_codegen/src/qir/instruction_tests.rs b/source/compiler/qsc_codegen/src/qir/v1/instruction_tests.rs similarity index 100% rename from source/compiler/qsc_codegen/src/qir/instruction_tests.rs rename to source/compiler/qsc_codegen/src/qir/v1/instruction_tests.rs diff --git a/source/compiler/qsc_codegen/src/qir/instruction_tests/bool.rs b/source/compiler/qsc_codegen/src/qir/v1/instruction_tests/bool.rs similarity index 99% rename from source/compiler/qsc_codegen/src/qir/instruction_tests/bool.rs rename to source/compiler/qsc_codegen/src/qir/v1/instruction_tests/bool.rs index 84f9dc4fd1..a2968d9882 100644 --- a/source/compiler/qsc_codegen/src/qir/instruction_tests/bool.rs +++ b/source/compiler/qsc_codegen/src/qir/v1/instruction_tests/bool.rs @@ -1,7 +1,7 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -use crate::qir::ToQir; +use super::super::ToQir; use expect_test::expect; use qsc_rir::rir; diff --git a/source/compiler/qsc_codegen/src/qir/instruction_tests/double.rs b/source/compiler/qsc_codegen/src/qir/v1/instruction_tests/double.rs similarity index 99% rename from source/compiler/qsc_codegen/src/qir/instruction_tests/double.rs rename to source/compiler/qsc_codegen/src/qir/v1/instruction_tests/double.rs index bdcfdbaa43..649ce98f6e 100644 --- a/source/compiler/qsc_codegen/src/qir/instruction_tests/double.rs +++ b/source/compiler/qsc_codegen/src/qir/v1/instruction_tests/double.rs @@ -3,7 +3,7 @@ use core::f64::consts::{E, PI}; -use crate::qir::ToQir; +use super::super::ToQir; use expect_test::expect; use qsc_rir::rir::{ FcmpConditionCode, Instruction, Literal, Operand, Program, Ty, Variable, VariableId, diff --git a/source/compiler/qsc_codegen/src/qir/instruction_tests/int.rs b/source/compiler/qsc_codegen/src/qir/v1/instruction_tests/int.rs similarity index 99% rename from source/compiler/qsc_codegen/src/qir/instruction_tests/int.rs rename to source/compiler/qsc_codegen/src/qir/v1/instruction_tests/int.rs index 6c0031e55d..fe2c725544 100644 --- a/source/compiler/qsc_codegen/src/qir/instruction_tests/int.rs +++ b/source/compiler/qsc_codegen/src/qir/v1/instruction_tests/int.rs @@ -1,7 +1,7 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -use crate::qir::ToQir; +use super::super::ToQir; use expect_test::expect; use qsc_rir::rir; diff --git a/source/compiler/qsc_codegen/src/qir/instruction_tests/invalid.rs b/source/compiler/qsc_codegen/src/qir/v1/instruction_tests/invalid.rs similarity index 99% rename from source/compiler/qsc_codegen/src/qir/instruction_tests/invalid.rs rename to source/compiler/qsc_codegen/src/qir/v1/instruction_tests/invalid.rs index c0f8683596..62383253c0 100644 --- a/source/compiler/qsc_codegen/src/qir/instruction_tests/invalid.rs +++ b/source/compiler/qsc_codegen/src/qir/v1/instruction_tests/invalid.rs @@ -1,7 +1,7 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -use crate::qir::ToQir; +use super::super::ToQir; use qsc_rir::rir; #[test] diff --git a/source/compiler/qsc_codegen/src/qir/instruction_tests/phi.rs b/source/compiler/qsc_codegen/src/qir/v1/instruction_tests/phi.rs similarity index 98% rename from source/compiler/qsc_codegen/src/qir/instruction_tests/phi.rs rename to source/compiler/qsc_codegen/src/qir/v1/instruction_tests/phi.rs index dac4cf69b7..f06c346d80 100644 --- a/source/compiler/qsc_codegen/src/qir/instruction_tests/phi.rs +++ b/source/compiler/qsc_codegen/src/qir/v1/instruction_tests/phi.rs @@ -1,7 +1,7 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -use crate::qir::ToQir; +use super::super::ToQir; use expect_test::expect; use qsc_rir::rir; diff --git a/source/compiler/qsc_codegen/src/qir/template.ll b/source/compiler/qsc_codegen/src/qir/v1/template.ll similarity index 100% rename from source/compiler/qsc_codegen/src/qir/template.ll rename to source/compiler/qsc_codegen/src/qir/v1/template.ll diff --git a/source/compiler/qsc_codegen/src/qir/tests.rs b/source/compiler/qsc_codegen/src/qir/v1/tests.rs similarity index 100% rename from source/compiler/qsc_codegen/src/qir/tests.rs rename to source/compiler/qsc_codegen/src/qir/v1/tests.rs diff --git a/source/compiler/qsc_codegen/src/qir/v2.rs b/source/compiler/qsc_codegen/src/qir/v2.rs new file mode 100644 index 0000000000..c721c01ee6 --- /dev/null +++ b/source/compiler/qsc_codegen/src/qir/v2.rs @@ -0,0 +1,669 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +#[cfg(test)] +mod instruction_tests; + +#[cfg(test)] +mod tests; + +use qsc_data_structures::attrs::Attributes; +use qsc_rir::{ + rir::{self, ConditionCode, FcmpConditionCode}, + utils::get_all_block_successors, +}; +use std::fmt::Write; + +/// A trait for converting a type into QIR of type `T`. +/// This can be used to generate QIR strings or other representations. +pub trait ToQir { + fn to_qir(&self, program: &rir::Program) -> T; +} + +impl ToQir for rir::Literal { + fn to_qir(&self, _program: &rir::Program) -> String { + match self { + rir::Literal::Bool(b) => format!("i1 {b}"), + rir::Literal::Double(d) => { + if (d.floor() - d.ceil()).abs() < f64::EPSILON { + // The value is a whole number, which requires at least one decimal point + // to differentiate it from an integer value. + format!("double {d:.1}") + } else { + format!("double {d}") + } + } + rir::Literal::Integer(i) => format!("i64 {i}"), + rir::Literal::Pointer => "ptr null".to_string(), + rir::Literal::Qubit(q) => format!("ptr inttoptr (i64 {q} to ptr)"), + rir::Literal::Result(r) => format!("ptr inttoptr (i64 {r} to ptr)"), + rir::Literal::Tag(idx, _) => format!("ptr @{idx}"), + } + } +} + +impl ToQir for rir::Ty { + fn to_qir(&self, _program: &rir::Program) -> String { + match self { + rir::Ty::Boolean => "i1".to_string(), + rir::Ty::Double => "double".to_string(), + rir::Ty::Integer => "i64".to_string(), + rir::Ty::Pointer | rir::Ty::Qubit | rir::Ty::Result => "ptr".to_string(), + } + } +} + +impl ToQir for Option { + fn to_qir(&self, program: &rir::Program) -> String { + match self { + Some(ty) => ToQir::::to_qir(ty, program), + None => "void".to_string(), + } + } +} + +impl ToQir for rir::VariableId { + fn to_qir(&self, _program: &rir::Program) -> String { + format!("%var_{}", self.0) + } +} + +impl ToQir for rir::Variable { + fn to_qir(&self, program: &rir::Program) -> String { + format!( + "{} {}", + ToQir::::to_qir(&self.ty, program), + ToQir::::to_qir(&self.variable_id, program) + ) + } +} + +impl ToQir for rir::Operand { + fn to_qir(&self, program: &rir::Program) -> String { + match self { + rir::Operand::Literal(lit) => ToQir::::to_qir(lit, program), + rir::Operand::Variable(var) => ToQir::::to_qir(var, program), + } + } +} + +impl ToQir for rir::FcmpConditionCode { + fn to_qir(&self, _program: &rir::Program) -> String { + match self { + rir::FcmpConditionCode::False => "false".to_string(), + rir::FcmpConditionCode::OrderedAndEqual => "oeq".to_string(), + rir::FcmpConditionCode::OrderedAndGreaterThan => "ogt".to_string(), + rir::FcmpConditionCode::OrderedAndGreaterThanOrEqual => "oge".to_string(), + rir::FcmpConditionCode::OrderedAndLessThan => "olt".to_string(), + rir::FcmpConditionCode::OrderedAndLessThanOrEqual => "ole".to_string(), + rir::FcmpConditionCode::OrderedAndNotEqual => "one".to_string(), + rir::FcmpConditionCode::Ordered => "ord".to_string(), + rir::FcmpConditionCode::UnorderedOrEqual => "ueq".to_string(), + rir::FcmpConditionCode::UnorderedOrGreaterThan => "ugt".to_string(), + rir::FcmpConditionCode::UnorderedOrGreaterThanOrEqual => "uge".to_string(), + rir::FcmpConditionCode::UnorderedOrLessThan => "ult".to_string(), + rir::FcmpConditionCode::UnorderedOrLessThanOrEqual => "ule".to_string(), + rir::FcmpConditionCode::UnorderedOrNotEqual => "une".to_string(), + rir::FcmpConditionCode::Unordered => "uno".to_string(), + rir::FcmpConditionCode::True => "true".to_string(), + } + } +} + +impl ToQir for rir::ConditionCode { + fn to_qir(&self, _program: &rir::Program) -> String { + match self { + rir::ConditionCode::Eq => "eq".to_string(), + rir::ConditionCode::Ne => "ne".to_string(), + rir::ConditionCode::Sgt => "sgt".to_string(), + rir::ConditionCode::Sge => "sge".to_string(), + rir::ConditionCode::Slt => "slt".to_string(), + rir::ConditionCode::Sle => "sle".to_string(), + } + } +} + +impl ToQir for rir::Instruction { + fn to_qir(&self, program: &rir::Program) -> String { + match self { + rir::Instruction::Add(lhs, rhs, variable) => { + binop_to_qir("add", lhs, rhs, *variable, program) + } + rir::Instruction::Ashr(lhs, rhs, variable) => { + binop_to_qir("ashr", lhs, rhs, *variable, program) + } + rir::Instruction::BitwiseAnd(lhs, rhs, variable) => { + simple_bitwise_to_qir("and", lhs, rhs, *variable, program) + } + rir::Instruction::BitwiseNot(value, variable) => { + bitwise_not_to_qir(value, *variable, program) + } + rir::Instruction::BitwiseOr(lhs, rhs, variable) => { + simple_bitwise_to_qir("or", lhs, rhs, *variable, program) + } + rir::Instruction::BitwiseXor(lhs, rhs, variable) => { + simple_bitwise_to_qir("xor", lhs, rhs, *variable, program) + } + rir::Instruction::Branch(cond, true_id, false_id, _) => { + format!( + " br {}, label %{}, label %{}", + ToQir::::to_qir(cond, program), + ToQir::::to_qir(true_id, program), + ToQir::::to_qir(false_id, program) + ) + } + rir::Instruction::Call(call_id, args, output, _) => { + call_to_qir(args, *call_id, *output, program) + } + rir::Instruction::Convert(operand, variable) => { + convert_to_qir(operand, *variable, program) + } + rir::Instruction::Fadd(lhs, rhs, variable) => { + fbinop_to_qir("fadd", lhs, rhs, *variable, program) + } + rir::Instruction::Fdiv(lhs, rhs, variable) => { + fbinop_to_qir("fdiv", lhs, rhs, *variable, program) + } + rir::Instruction::Fmul(lhs, rhs, variable) => { + fbinop_to_qir("fmul", lhs, rhs, *variable, program) + } + rir::Instruction::Fsub(lhs, rhs, variable) => { + fbinop_to_qir("fsub", lhs, rhs, *variable, program) + } + rir::Instruction::LogicalAnd(lhs, rhs, variable) => { + logical_binop_to_qir("and", lhs, rhs, *variable, program) + } + rir::Instruction::LogicalNot(value, variable) => { + logical_not_to_qir(value, *variable, program) + } + rir::Instruction::LogicalOr(lhs, rhs, variable) => { + logical_binop_to_qir("or", lhs, rhs, *variable, program) + } + rir::Instruction::Mul(lhs, rhs, variable) => { + binop_to_qir("mul", lhs, rhs, *variable, program) + } + rir::Instruction::Fcmp(op, lhs, rhs, variable) => { + fcmp_to_qir(*op, lhs, rhs, *variable, program) + } + rir::Instruction::Icmp(op, lhs, rhs, variable) => { + icmp_to_qir(*op, lhs, rhs, *variable, program) + } + rir::Instruction::Jump(block_id) => { + format!(" br label %{}", ToQir::::to_qir(block_id, program)) + } + rir::Instruction::Phi(..) => { + unreachable!("phi nodes should not be inserted for QIR v2 generation") + } + rir::Instruction::Return => " ret i64 0".to_string(), + rir::Instruction::Sdiv(lhs, rhs, variable) => { + binop_to_qir("sdiv", lhs, rhs, *variable, program) + } + rir::Instruction::Shl(lhs, rhs, variable) => { + binop_to_qir("shl", lhs, rhs, *variable, program) + } + rir::Instruction::Srem(lhs, rhs, variable) => { + binop_to_qir("srem", lhs, rhs, *variable, program) + } + rir::Instruction::Store(operand, variable) => { + store_to_qir(*operand, *variable, program) + } + rir::Instruction::Sub(lhs, rhs, variable) => { + binop_to_qir("sub", lhs, rhs, *variable, program) + } + rir::Instruction::Advanced(instr) => ToQir::::to_qir(instr, program), + } + } +} + +impl ToQir for rir::AdvancedInstr { + fn to_qir(&self, program: &rir::Program) -> String { + match self { + rir::AdvancedInstr::Alloca(variable) => alloca_to_qir(*variable, program), + rir::AdvancedInstr::Load(var_from, var_to) => load_to_qir(*var_from, *var_to, program), + } + } +} + +fn convert_to_qir( + operand: &rir::Operand, + variable: rir::Variable, + program: &rir::Program, +) -> String { + let operand_ty = get_value_ty(operand); + let var_ty = get_variable_ty(variable); + assert_ne!( + operand_ty, var_ty, + "input/output types ({operand_ty}, {var_ty}) should not match in convert" + ); + + let convert_instr = match (operand_ty, var_ty) { + ("i64", "double") => "sitofp i64", + ("double", "i64") => "fptosi double", + _ => panic!("unsupported conversion from {operand_ty} to {var_ty} in convert instruction"), + }; + + format!( + " {} = {convert_instr} {} to {var_ty}", + ToQir::::to_qir(&variable.variable_id, program), + get_value_as_str(operand, program), + ) +} + +fn store_to_qir(operand: rir::Operand, variable: rir::Variable, program: &rir::Program) -> String { + let op_ty = get_value_ty(&operand); + format!( + " store {op_ty} {}, ptr {}", + get_value_as_str(&operand, program), + ToQir::::to_qir(&variable.variable_id, program) + ) +} + +fn load_to_qir(var_from: rir::Variable, var_to: rir::Variable, program: &rir::Program) -> String { + let var_to_ty = get_variable_ty(var_to); + format!( + " {} = load {var_to_ty}, ptr {}", + ToQir::::to_qir(&var_to.variable_id, program), + ToQir::::to_qir(&var_from.variable_id, program) + ) +} + +fn alloca_to_qir(variable: rir::Variable, program: &rir::Program) -> String { + let variable_ty = get_variable_ty(variable); + format!( + " {} = alloca {variable_ty}", + ToQir::::to_qir(&variable.variable_id, program) + ) +} + +fn logical_not_to_qir( + value: &rir::Operand, + variable: rir::Variable, + program: &rir::Program, +) -> String { + let value_ty = get_value_ty(value); + let var_ty = get_variable_ty(variable); + assert_eq!( + value_ty, var_ty, + "mismatched input/output types ({value_ty}, {var_ty}) for not" + ); + assert_eq!(var_ty, "i1", "unsupported type {var_ty} for not"); + + format!( + " {} = xor i1 {}, true", + ToQir::::to_qir(&variable.variable_id, program), + get_value_as_str(value, program) + ) +} + +fn logical_binop_to_qir( + op: &str, + lhs: &rir::Operand, + rhs: &rir::Operand, + variable: rir::Variable, + program: &rir::Program, +) -> String { + let lhs_ty = get_value_ty(lhs); + let rhs_ty = get_value_ty(rhs); + let var_ty = get_variable_ty(variable); + assert_eq!( + lhs_ty, rhs_ty, + "mismatched input types ({lhs_ty}, {rhs_ty}) for {op}" + ); + assert_eq!( + lhs_ty, var_ty, + "mismatched input/output types ({lhs_ty}, {var_ty}) for {op}" + ); + assert_eq!(var_ty, "i1", "unsupported type {var_ty} for {op}"); + + format!( + " {} = {op} {var_ty} {}, {}", + ToQir::::to_qir(&variable.variable_id, program), + get_value_as_str(lhs, program), + get_value_as_str(rhs, program) + ) +} + +fn bitwise_not_to_qir( + value: &rir::Operand, + variable: rir::Variable, + program: &rir::Program, +) -> String { + let value_ty = get_value_ty(value); + let var_ty = get_variable_ty(variable); + assert_eq!( + value_ty, var_ty, + "mismatched input/output types ({value_ty}, {var_ty}) for not" + ); + assert_eq!(var_ty, "i64", "unsupported type {var_ty} for not"); + + format!( + " {} = xor {var_ty} {}, -1", + ToQir::::to_qir(&variable.variable_id, program), + get_value_as_str(value, program) + ) +} + +fn call_to_qir( + args: &[rir::Operand], + call_id: rir::CallableId, + output: Option, + program: &rir::Program, +) -> String { + let args = args + .iter() + .map(|arg| ToQir::::to_qir(arg, program)) + .collect::>() + .join(", "); + let callable = program.get_callable(call_id); + if let Some(output) = output { + format!( + " {} = call {} @{}({args})", + ToQir::::to_qir(&output.variable_id, program), + ToQir::::to_qir(&callable.output_type, program), + callable.name + ) + } else { + format!( + " call {} @{}({args})", + ToQir::::to_qir(&callable.output_type, program), + callable.name + ) + } +} + +fn fcmp_to_qir( + op: FcmpConditionCode, + lhs: &rir::Operand, + rhs: &rir::Operand, + variable: rir::Variable, + program: &rir::Program, +) -> String { + let lhs_ty = get_value_ty(lhs); + let rhs_ty = get_value_ty(rhs); + let var_ty = get_variable_ty(variable); + assert_eq!( + lhs_ty, rhs_ty, + "mismatched input types ({lhs_ty}, {rhs_ty}) for fcmp {op}" + ); + + assert_eq!(var_ty, "i1", "unsupported output type {var_ty} for fcmp"); + format!( + " {} = fcmp {} {lhs_ty} {}, {}", + ToQir::::to_qir(&variable.variable_id, program), + ToQir::::to_qir(&op, program), + get_value_as_str(lhs, program), + get_value_as_str(rhs, program) + ) +} + +fn icmp_to_qir( + op: ConditionCode, + lhs: &rir::Operand, + rhs: &rir::Operand, + variable: rir::Variable, + program: &rir::Program, +) -> String { + let lhs_ty = get_value_ty(lhs); + let rhs_ty = get_value_ty(rhs); + let var_ty = get_variable_ty(variable); + assert_eq!( + lhs_ty, rhs_ty, + "mismatched input types ({lhs_ty}, {rhs_ty}) for icmp {op}" + ); + + assert_eq!(var_ty, "i1", "unsupported output type {var_ty} for icmp"); + format!( + " {} = icmp {} {lhs_ty} {}, {}", + ToQir::::to_qir(&variable.variable_id, program), + ToQir::::to_qir(&op, program), + get_value_as_str(lhs, program), + get_value_as_str(rhs, program) + ) +} + +fn binop_to_qir( + op: &str, + lhs: &rir::Operand, + rhs: &rir::Operand, + variable: rir::Variable, + program: &rir::Program, +) -> String { + let lhs_ty = get_value_ty(lhs); + let rhs_ty = get_value_ty(rhs); + let var_ty = get_variable_ty(variable); + assert_eq!( + lhs_ty, rhs_ty, + "mismatched input types ({lhs_ty}, {rhs_ty}) for {op}" + ); + assert_eq!( + lhs_ty, var_ty, + "mismatched input/output types ({lhs_ty}, {var_ty}) for {op}" + ); + assert_eq!(var_ty, "i64", "unsupported type {var_ty} for {op}"); + + format!( + " {} = {op} {var_ty} {}, {}", + ToQir::::to_qir(&variable.variable_id, program), + get_value_as_str(lhs, program), + get_value_as_str(rhs, program) + ) +} + +fn fbinop_to_qir( + op: &str, + lhs: &rir::Operand, + rhs: &rir::Operand, + variable: rir::Variable, + program: &rir::Program, +) -> String { + let lhs_ty = get_value_ty(lhs); + let rhs_ty = get_value_ty(rhs); + let var_ty = get_variable_ty(variable); + assert_eq!( + lhs_ty, rhs_ty, + "mismatched input types ({lhs_ty}, {rhs_ty}) for {op}" + ); + assert_eq!( + lhs_ty, var_ty, + "mismatched input/output types ({lhs_ty}, {var_ty}) for {op}" + ); + assert_eq!(var_ty, "double", "unsupported type {var_ty} for {op}"); + + format!( + " {} = {op} {var_ty} {}, {}", + ToQir::::to_qir(&variable.variable_id, program), + get_value_as_str(lhs, program), + get_value_as_str(rhs, program) + ) +} + +fn simple_bitwise_to_qir( + op: &str, + lhs: &rir::Operand, + rhs: &rir::Operand, + variable: rir::Variable, + program: &rir::Program, +) -> String { + let lhs_ty = get_value_ty(lhs); + let rhs_ty = get_value_ty(rhs); + let var_ty = get_variable_ty(variable); + assert_eq!( + lhs_ty, rhs_ty, + "mismatched input types ({lhs_ty}, {rhs_ty}) for {op}" + ); + assert_eq!( + lhs_ty, var_ty, + "mismatched input/output types ({lhs_ty}, {var_ty}) for {op}" + ); + assert_eq!(var_ty, "i64", "unsupported type {var_ty} for {op}"); + + format!( + " {} = {op} {var_ty} {}, {}", + ToQir::::to_qir(&variable.variable_id, program), + get_value_as_str(lhs, program), + get_value_as_str(rhs, program) + ) +} + +fn get_value_as_str(value: &rir::Operand, program: &rir::Program) -> String { + match value { + rir::Operand::Literal(lit) => match lit { + rir::Literal::Bool(b) => format!("{b}"), + rir::Literal::Double(d) => { + if (d.floor() - d.ceil()).abs() < f64::EPSILON { + // The value is a whole number, which requires at least one decimal point + // to differentiate it from an integer value. + format!("{d:.1}") + } else { + format!("{d}") + } + } + rir::Literal::Integer(i) => format!("{i}"), + rir::Literal::Pointer => "null".to_string(), + rir::Literal::Qubit(q) => format!("{q}"), + rir::Literal::Result(r) => format!("{r}"), + rir::Literal::Tag(..) => panic!( + "tag literals should not be used as string values outside of output recording" + ), + }, + rir::Operand::Variable(var) => ToQir::::to_qir(&var.variable_id, program), + } +} + +fn get_value_ty(lhs: &rir::Operand) -> &str { + match lhs { + rir::Operand::Literal(lit) => match lit { + rir::Literal::Integer(_) => "i64", + rir::Literal::Bool(_) => "i1", + rir::Literal::Double(_) => get_f64_ty(), + rir::Literal::Qubit(_) + | rir::Literal::Result(_) + | rir::Literal::Pointer + | rir::Literal::Tag(..) => "ptr", + }, + rir::Operand::Variable(var) => get_variable_ty(*var), + } +} + +fn get_variable_ty(variable: rir::Variable) -> &'static str { + match variable.ty { + rir::Ty::Integer => "i64", + rir::Ty::Boolean => "i1", + rir::Ty::Double => get_f64_ty(), + rir::Ty::Qubit | rir::Ty::Result | rir::Ty::Pointer => "ptr", + } +} + +/// phi only supports "Floating-Point Types" which are defined as: +/// - `half` (`f16`) +/// - `bfloat` +/// - `float` (`f32`) +/// - `double` (`f64`) +/// - `fp128` +/// +/// We only support `f64`, so we break the pattern used for integers +/// and have to use `double` here. +/// +/// This conflicts with the QIR spec which says f64. Need to follow up on this. +fn get_f64_ty() -> &'static str { + "double" +} + +impl ToQir for rir::BlockId { + fn to_qir(&self, _program: &rir::Program) -> String { + format!("block_{}", self.0) + } +} + +impl ToQir for rir::Block { + fn to_qir(&self, program: &rir::Program) -> String { + self.0 + .iter() + .map(|instr| ToQir::::to_qir(instr, program)) + .collect::>() + .join("\n") + } +} + +impl ToQir for rir::Callable { + fn to_qir(&self, program: &rir::Program) -> String { + let input_type = self + .input_type + .iter() + .map(|t| ToQir::::to_qir(t, program)) + .collect::>() + .join(", "); + let output_type = ToQir::::to_qir(&self.output_type, program); + let Some(entry_id) = self.body else { + return format!( + "declare {output_type} @{}({input_type}){}", + self.name, + match self.call_type { + rir::CallableType::Measurement | rir::CallableType::Reset => { + // These callables are a special case that need the irreversible attribute. + " #1" + } + rir::CallableType::NoiseIntrinsic => " #2", + _ => "", + } + ); + }; + let mut body = String::new(); + let mut all_blocks = vec![entry_id]; + all_blocks.extend(get_all_block_successors(entry_id, program)); + for block_id in all_blocks { + let block = program.get_block(block_id); + write!( + body, + "{}:\n{}\n", + ToQir::::to_qir(&block_id, program), + ToQir::::to_qir(block, program) + ) + .expect("writing to string should succeed"); + } + assert!( + input_type.is_empty(), + "entry point should not have an input" + ); + format!("define {output_type} @ENTRYPOINT__main() #0 {{\n{body}}}",) + } +} + +impl ToQir for rir::Program { + fn to_qir(&self, _program: &rir::Program) -> String { + let callables = self + .callables + .iter() + .map(|(_, callable)| ToQir::::to_qir(callable, self)) + .collect::>() + .join("\n\n"); + let mut constants = String::default(); + for (idx, tag) in self.tags.iter().enumerate() { + // We need to add the tag as a global constant. + writeln!( + constants, + "@{idx} = internal constant [{} x i8] c\"{tag}\\00\"", + tag.len() + 1 + ) + .expect("writing to string should succeed"); + } + let body = format!( + include_str!("./v2/template.ll"), + constants, + callables, + self.num_qubits, + self.num_results, + get_additional_module_attributes(self) + ); + body + } +} + +fn get_additional_module_attributes(program: &rir::Program) -> String { + let mut attrs = String::new(); + if program.attrs.contains(Attributes::QdkNoise) { + attrs.push_str("\nattributes #2 = { \"qdk_noise\" }"); + } + + attrs +} diff --git a/source/compiler/qsc_codegen/src/qir/v2/instruction_tests.rs b/source/compiler/qsc_codegen/src/qir/v2/instruction_tests.rs new file mode 100644 index 0000000000..581b070a12 --- /dev/null +++ b/source/compiler/qsc_codegen/src/qir/v2/instruction_tests.rs @@ -0,0 +1,10 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +mod alloca; +mod bool; +mod double; +mod int; +mod invalid; +mod load; +mod store; diff --git a/source/compiler/qsc_codegen/src/qir/v2/instruction_tests/alloca.rs b/source/compiler/qsc_codegen/src/qir/v2/instruction_tests/alloca.rs new file mode 100644 index 0000000000..3823006e5f --- /dev/null +++ b/source/compiler/qsc_codegen/src/qir/v2/instruction_tests/alloca.rs @@ -0,0 +1,38 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +use crate::qir::v2::ToQir; +use expect_test::expect; +use qsc_rir::rir; + +#[test] +fn alloca_integer_without_size() { + let inst = rir::Instruction::Advanced(rir::AdvancedInstr::Alloca(rir::Variable::new_integer( + rir::VariableId(0), + ))); + expect![" %var_0 = alloca i64"].assert_eq(&inst.to_qir(&rir::Program::default())); +} + +#[test] +fn alloca_bool_without_size() { + let inst = rir::Instruction::Advanced(rir::AdvancedInstr::Alloca(rir::Variable::new_boolean( + rir::VariableId(0), + ))); + expect![" %var_0 = alloca i1"].assert_eq(&inst.to_qir(&rir::Program::default())); +} + +#[test] +fn alloca_double_without_size() { + let inst = rir::Instruction::Advanced(rir::AdvancedInstr::Alloca(rir::Variable::new_double( + rir::VariableId(0), + ))); + expect![" %var_0 = alloca double"].assert_eq(&inst.to_qir(&rir::Program::default())); +} + +#[test] +fn alloca_pointer_without_size() { + let inst = rir::Instruction::Advanced(rir::AdvancedInstr::Alloca(rir::Variable::new_ptr( + rir::VariableId(0), + ))); + expect![" %var_0 = alloca ptr"].assert_eq(&inst.to_qir(&rir::Program::default())); +} diff --git a/source/compiler/qsc_codegen/src/qir/v2/instruction_tests/bool.rs b/source/compiler/qsc_codegen/src/qir/v2/instruction_tests/bool.rs new file mode 100644 index 0000000000..a2968d9882 --- /dev/null +++ b/source/compiler/qsc_codegen/src/qir/v2/instruction_tests/bool.rs @@ -0,0 +1,109 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +use super::super::ToQir; +use expect_test::expect; +use qsc_rir::rir; + +#[test] +fn logical_and_literals() { + let inst = rir::Instruction::LogicalAnd( + rir::Operand::Literal(rir::Literal::Bool(true)), + rir::Operand::Literal(rir::Literal::Bool(false)), + rir::Variable { + variable_id: rir::VariableId(0), + ty: rir::Ty::Boolean, + }, + ); + expect![" %var_0 = and i1 true, false"].assert_eq(&inst.to_qir(&rir::Program::default())); +} + +#[test] +fn logical_and_variables() { + let inst = rir::Instruction::LogicalAnd( + rir::Operand::Variable(rir::Variable { + variable_id: rir::VariableId(1), + ty: rir::Ty::Boolean, + }), + rir::Operand::Variable(rir::Variable { + variable_id: rir::VariableId(2), + ty: rir::Ty::Boolean, + }), + rir::Variable { + variable_id: rir::VariableId(0), + ty: rir::Ty::Boolean, + }, + ); + expect![" %var_0 = and i1 %var_1, %var_2"].assert_eq(&inst.to_qir(&rir::Program::default())); +} + +#[test] +fn logical_not_true_literal() { + let inst = rir::Instruction::LogicalNot( + rir::Operand::Literal(rir::Literal::Bool(true)), + rir::Variable { + variable_id: rir::VariableId(0), + ty: rir::Ty::Boolean, + }, + ); + expect![" %var_0 = xor i1 true, true"].assert_eq(&inst.to_qir(&rir::Program::default())); +} + +#[test] +fn logical_not_variables() { + let inst = rir::Instruction::LogicalNot( + rir::Operand::Variable(rir::Variable { + variable_id: rir::VariableId(1), + ty: rir::Ty::Boolean, + }), + rir::Variable { + variable_id: rir::VariableId(0), + ty: rir::Ty::Boolean, + }, + ); + expect![" %var_0 = xor i1 %var_1, true"].assert_eq(&inst.to_qir(&rir::Program::default())); +} + +#[test] +fn logical_not_false_literal() { + let inst = rir::Instruction::LogicalNot( + rir::Operand::Literal(rir::Literal::Bool(false)), + rir::Variable { + variable_id: rir::VariableId(0), + ty: rir::Ty::Boolean, + }, + ); + expect![" %var_0 = xor i1 false, true"].assert_eq(&inst.to_qir(&rir::Program::default())); +} + +#[test] +fn logical_or_literals() { + let inst = rir::Instruction::LogicalOr( + rir::Operand::Literal(rir::Literal::Bool(true)), + rir::Operand::Literal(rir::Literal::Bool(false)), + rir::Variable { + variable_id: rir::VariableId(0), + ty: rir::Ty::Boolean, + }, + ); + expect![" %var_0 = or i1 true, false"].assert_eq(&inst.to_qir(&rir::Program::default())); +} + +#[test] +fn logical_or_variables() { + let inst = rir::Instruction::LogicalOr( + rir::Operand::Variable(rir::Variable { + variable_id: rir::VariableId(1), + ty: rir::Ty::Boolean, + }), + rir::Operand::Variable(rir::Variable { + variable_id: rir::VariableId(2), + ty: rir::Ty::Boolean, + }), + rir::Variable { + variable_id: rir::VariableId(0), + ty: rir::Ty::Boolean, + }, + ); + expect![" %var_0 = or i1 %var_1, %var_2"].assert_eq(&inst.to_qir(&rir::Program::default())); +} diff --git a/source/compiler/qsc_codegen/src/qir/v2/instruction_tests/double.rs b/source/compiler/qsc_codegen/src/qir/v2/instruction_tests/double.rs new file mode 100644 index 0000000000..649ce98f6e --- /dev/null +++ b/source/compiler/qsc_codegen/src/qir/v2/instruction_tests/double.rs @@ -0,0 +1,507 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +use core::f64::consts::{E, PI}; + +use super::super::ToQir; +use expect_test::expect; +use qsc_rir::rir::{ + FcmpConditionCode, Instruction, Literal, Operand, Program, Ty, Variable, VariableId, +}; + +#[test] +#[should_panic(expected = "unsupported type double for add")] +fn add_double_literals() { + let inst = Instruction::Add( + Operand::Literal(Literal::Double(PI)), + Operand::Literal(Literal::Double(E)), + Variable { + variable_id: VariableId(0), + ty: Ty::Double, + }, + ); + let _ = &inst.to_qir(&Program::default()); +} + +#[test] +#[should_panic(expected = "unsupported type double for sub")] +fn sub_double_literals() { + let inst = Instruction::Sub( + Operand::Literal(Literal::Double(PI)), + Operand::Literal(Literal::Double(E)), + Variable { + variable_id: VariableId(0), + ty: Ty::Double, + }, + ); + let _ = &inst.to_qir(&Program::default()); +} + +#[test] +#[should_panic(expected = "unsupported type double for mul")] +fn mul_double_literals() { + let inst = Instruction::Mul( + Operand::Literal(Literal::Double(PI)), + Operand::Literal(Literal::Double(E)), + Variable { + variable_id: VariableId(0), + ty: Ty::Double, + }, + ); + let _ = &inst.to_qir(&Program::default()); +} + +#[test] +#[should_panic(expected = "unsupported type double for sdiv")] +fn sdiv_double_literals() { + let inst = Instruction::Sdiv( + Operand::Literal(Literal::Double(PI)), + Operand::Literal(Literal::Double(E)), + Variable { + variable_id: VariableId(0), + ty: Ty::Double, + }, + ); + let _ = &inst.to_qir(&Program::default()); +} + +#[test] +fn fadd_double_literals() { + let inst = Instruction::Fadd( + Operand::Literal(Literal::Double(PI)), + Operand::Literal(Literal::Double(E)), + Variable { + variable_id: VariableId(0), + ty: Ty::Double, + }, + ); + expect![" %var_0 = fadd double 3.141592653589793, 2.718281828459045"] + .assert_eq(&inst.to_qir(&Program::default())); +} + +#[test] +#[should_panic(expected = "unsupported type double for ashr")] +fn ashr_double_literals() { + let inst = Instruction::Ashr( + Operand::Literal(Literal::Double(PI)), + Operand::Literal(Literal::Double(E)), + Variable { + variable_id: VariableId(0), + ty: Ty::Double, + }, + ); + let _ = &inst.to_qir(&Program::default()); +} + +#[test] +#[should_panic(expected = "unsupported type double for and")] +fn bitwise_and_double_literals() { + let inst = Instruction::BitwiseAnd( + Operand::Literal(Literal::Double(PI)), + Operand::Literal(Literal::Double(E)), + Variable { + variable_id: VariableId(0), + ty: Ty::Double, + }, + ); + let _ = &inst.to_qir(&Program::default()); +} + +#[test] +#[should_panic(expected = "unsupported type double for not")] +fn bitwise_not_double_literals() { + let inst = Instruction::BitwiseNot( + Operand::Literal(Literal::Double(PI)), + Variable { + variable_id: VariableId(0), + ty: Ty::Double, + }, + ); + let _ = &inst.to_qir(&Program::default()); +} + +#[test] +#[should_panic(expected = "unsupported type double for or")] +fn bitwise_or_double_literals() { + let inst = Instruction::BitwiseOr( + Operand::Literal(Literal::Double(PI)), + Operand::Literal(Literal::Double(E)), + Variable { + variable_id: VariableId(0), + ty: Ty::Double, + }, + ); + let _ = &inst.to_qir(&Program::default()); +} + +#[test] +#[should_panic(expected = "unsupported type double for xor")] +fn bitwise_xor_double_literals() { + let inst = Instruction::BitwiseXor( + Operand::Literal(Literal::Double(PI)), + Operand::Literal(Literal::Double(E)), + Variable { + variable_id: VariableId(0), + ty: Ty::Double, + }, + ); + let _ = &inst.to_qir(&Program::default()); +} + +#[test] +fn fadd_double_variables() { + let inst = Instruction::Fadd( + Operand::Variable(Variable { + variable_id: VariableId(1), + ty: Ty::Double, + }), + Operand::Variable(Variable { + variable_id: VariableId(2), + ty: Ty::Double, + }), + Variable { + variable_id: VariableId(0), + ty: Ty::Double, + }, + ); + expect![" %var_0 = fadd double %var_1, %var_2"].assert_eq(&inst.to_qir(&Program::default())); +} + +#[test] +fn fcmp_oeq_double_literals() { + let inst = Instruction::Fcmp( + FcmpConditionCode::OrderedAndEqual, + Operand::Literal(Literal::Double(PI)), + Operand::Literal(Literal::Double(E)), + Variable { + variable_id: VariableId(0), + ty: Ty::Boolean, + }, + ); + expect![" %var_0 = fcmp oeq double 3.141592653589793, 2.718281828459045"] + .assert_eq(&inst.to_qir(&Program::default())); +} + +#[test] +fn fcmp_oeq_double_variables() { + let inst = Instruction::Fcmp( + FcmpConditionCode::OrderedAndEqual, + Operand::Variable(Variable { + variable_id: VariableId(1), + ty: Ty::Double, + }), + Operand::Variable(Variable { + variable_id: VariableId(2), + ty: Ty::Double, + }), + Variable { + variable_id: VariableId(0), + ty: Ty::Boolean, + }, + ); + expect![" %var_0 = fcmp oeq double %var_1, %var_2"] + .assert_eq(&inst.to_qir(&Program::default())); +} + +#[test] +fn fcmp_one_double_literals() { + let inst = Instruction::Fcmp( + FcmpConditionCode::OrderedAndNotEqual, + Operand::Literal(Literal::Double(PI)), + Operand::Literal(Literal::Double(E)), + Variable { + variable_id: VariableId(0), + ty: Ty::Boolean, + }, + ); + expect![" %var_0 = fcmp one double 3.141592653589793, 2.718281828459045"] + .assert_eq(&inst.to_qir(&Program::default())); +} + +#[test] +fn fcmp_one_double_variables() { + let inst = Instruction::Fcmp( + FcmpConditionCode::OrderedAndNotEqual, + Operand::Variable(Variable { + variable_id: VariableId(1), + ty: Ty::Double, + }), + Operand::Variable(Variable { + variable_id: VariableId(2), + ty: Ty::Double, + }), + Variable { + variable_id: VariableId(0), + ty: Ty::Boolean, + }, + ); + expect![" %var_0 = fcmp one double %var_1, %var_2"] + .assert_eq(&inst.to_qir(&Program::default())); +} +#[test] +fn fcmp_olt_double_literals() { + let inst = Instruction::Fcmp( + FcmpConditionCode::OrderedAndLessThan, + Operand::Literal(Literal::Double(PI)), + Operand::Literal(Literal::Double(E)), + Variable { + variable_id: VariableId(0), + ty: Ty::Boolean, + }, + ); + expect![" %var_0 = fcmp olt double 3.141592653589793, 2.718281828459045"] + .assert_eq(&inst.to_qir(&Program::default())); +} + +#[test] +fn fcmp_olt_double_variables() { + let inst = Instruction::Fcmp( + FcmpConditionCode::OrderedAndLessThan, + Operand::Variable(Variable { + variable_id: VariableId(1), + ty: Ty::Double, + }), + Operand::Variable(Variable { + variable_id: VariableId(2), + ty: Ty::Double, + }), + Variable { + variable_id: VariableId(0), + ty: Ty::Boolean, + }, + ); + expect![" %var_0 = fcmp olt double %var_1, %var_2"] + .assert_eq(&inst.to_qir(&Program::default())); +} +#[test] +fn fcmp_ole_double_literals() { + let inst = Instruction::Fcmp( + FcmpConditionCode::OrderedAndLessThanOrEqual, + Operand::Literal(Literal::Double(PI)), + Operand::Literal(Literal::Double(E)), + Variable { + variable_id: VariableId(0), + ty: Ty::Boolean, + }, + ); + expect![" %var_0 = fcmp ole double 3.141592653589793, 2.718281828459045"] + .assert_eq(&inst.to_qir(&Program::default())); +} + +#[test] +fn fcmp_ole_double_variables() { + let inst = Instruction::Fcmp( + FcmpConditionCode::OrderedAndLessThanOrEqual, + Operand::Variable(Variable { + variable_id: VariableId(1), + ty: Ty::Double, + }), + Operand::Variable(Variable { + variable_id: VariableId(2), + ty: Ty::Double, + }), + Variable { + variable_id: VariableId(0), + ty: Ty::Boolean, + }, + ); + expect![" %var_0 = fcmp ole double %var_1, %var_2"] + .assert_eq(&inst.to_qir(&Program::default())); +} +#[test] +fn fcmp_ogt_double_literals() { + let inst = Instruction::Fcmp( + FcmpConditionCode::OrderedAndGreaterThan, + Operand::Literal(Literal::Double(PI)), + Operand::Literal(Literal::Double(E)), + Variable { + variable_id: VariableId(0), + ty: Ty::Boolean, + }, + ); + expect![" %var_0 = fcmp ogt double 3.141592653589793, 2.718281828459045"] + .assert_eq(&inst.to_qir(&Program::default())); +} + +#[test] +fn fcmp_ogt_double_variables() { + let inst = Instruction::Fcmp( + FcmpConditionCode::OrderedAndGreaterThan, + Operand::Variable(Variable { + variable_id: VariableId(1), + ty: Ty::Double, + }), + Operand::Variable(Variable { + variable_id: VariableId(2), + ty: Ty::Double, + }), + Variable { + variable_id: VariableId(0), + ty: Ty::Boolean, + }, + ); + expect![" %var_0 = fcmp ogt double %var_1, %var_2"] + .assert_eq(&inst.to_qir(&Program::default())); +} +#[test] +fn fcmp_oge_double_literals() { + let inst = Instruction::Fcmp( + FcmpConditionCode::OrderedAndGreaterThanOrEqual, + Operand::Literal(Literal::Double(PI)), + Operand::Literal(Literal::Double(E)), + Variable { + variable_id: VariableId(0), + ty: Ty::Boolean, + }, + ); + expect![" %var_0 = fcmp oge double 3.141592653589793, 2.718281828459045"] + .assert_eq(&inst.to_qir(&Program::default())); +} + +#[test] +fn fcmp_oge_double_variables() { + let inst = Instruction::Fcmp( + FcmpConditionCode::OrderedAndGreaterThanOrEqual, + Operand::Variable(Variable { + variable_id: VariableId(1), + ty: Ty::Double, + }), + Operand::Variable(Variable { + variable_id: VariableId(2), + ty: Ty::Double, + }), + Variable { + variable_id: VariableId(0), + ty: Ty::Boolean, + }, + ); + expect![" %var_0 = fcmp oge double %var_1, %var_2"] + .assert_eq(&inst.to_qir(&Program::default())); +} + +#[test] +fn fmul_double_literals() { + let inst = Instruction::Fmul( + Operand::Literal(Literal::Double(PI)), + Operand::Literal(Literal::Double(E)), + Variable { + variable_id: VariableId(0), + ty: Ty::Double, + }, + ); + expect![" %var_0 = fmul double 3.141592653589793, 2.718281828459045"] + .assert_eq(&inst.to_qir(&Program::default())); +} + +#[test] +fn fmul_double_variables() { + let inst = Instruction::Fmul( + Operand::Variable(Variable { + variable_id: VariableId(1), + ty: Ty::Double, + }), + Operand::Variable(Variable { + variable_id: VariableId(2), + ty: Ty::Double, + }), + Variable { + variable_id: VariableId(0), + ty: Ty::Double, + }, + ); + expect![" %var_0 = fmul double %var_1, %var_2"].assert_eq(&inst.to_qir(&Program::default())); +} + +#[test] +fn fdiv_double_literals() { + let inst = Instruction::Fdiv( + Operand::Literal(Literal::Double(PI)), + Operand::Literal(Literal::Double(E)), + Variable { + variable_id: VariableId(0), + ty: Ty::Double, + }, + ); + expect![" %var_0 = fdiv double 3.141592653589793, 2.718281828459045"] + .assert_eq(&inst.to_qir(&Program::default())); +} + +#[test] +fn fdiv_double_variables() { + let inst = Instruction::Fdiv( + Operand::Variable(Variable { + variable_id: VariableId(1), + ty: Ty::Double, + }), + Operand::Variable(Variable { + variable_id: VariableId(2), + ty: Ty::Double, + }), + Variable { + variable_id: VariableId(0), + ty: Ty::Double, + }, + ); + expect![" %var_0 = fdiv double %var_1, %var_2"].assert_eq(&inst.to_qir(&Program::default())); +} + +#[test] +fn fsub_double_literals() { + let inst = Instruction::Fsub( + Operand::Literal(Literal::Double(PI)), + Operand::Literal(Literal::Double(E)), + Variable { + variable_id: VariableId(0), + ty: Ty::Double, + }, + ); + expect![" %var_0 = fsub double 3.141592653589793, 2.718281828459045"] + .assert_eq(&inst.to_qir(&Program::default())); +} + +#[test] +fn fsub_double_variables() { + let inst = Instruction::Fsub( + Operand::Variable(Variable { + variable_id: VariableId(1), + ty: Ty::Double, + }), + Operand::Variable(Variable { + variable_id: VariableId(2), + ty: Ty::Double, + }), + Variable { + variable_id: VariableId(0), + ty: Ty::Double, + }, + ); + expect![" %var_0 = fsub double %var_1, %var_2"].assert_eq(&inst.to_qir(&Program::default())); +} + +#[test] +fn convert_double_literal_to_integer() { + let inst = Instruction::Convert( + Operand::Literal(Literal::Double(PI)), + Variable { + variable_id: VariableId(0), + ty: Ty::Integer, + }, + ); + expect![" %var_0 = fptosi double 3.141592653589793 to i64"] + .assert_eq(&inst.to_qir(&Program::default())); +} + +#[test] +fn convert_double_variable_to_integer() { + let inst = Instruction::Convert( + Operand::Variable(Variable { + variable_id: VariableId(1), + ty: Ty::Double, + }), + Variable { + variable_id: VariableId(0), + ty: Ty::Integer, + }, + ); + expect![" %var_0 = fptosi double %var_1 to i64"].assert_eq(&inst.to_qir(&Program::default())); +} diff --git a/source/compiler/qsc_codegen/src/qir/v2/instruction_tests/int.rs b/source/compiler/qsc_codegen/src/qir/v2/instruction_tests/int.rs new file mode 100644 index 0000000000..fe2c725544 --- /dev/null +++ b/source/compiler/qsc_codegen/src/qir/v2/instruction_tests/int.rs @@ -0,0 +1,587 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +use super::super::ToQir; +use expect_test::expect; +use qsc_rir::rir; + +#[test] +fn add_integer_literals() { + let inst = rir::Instruction::Add( + rir::Operand::Literal(rir::Literal::Integer(2)), + rir::Operand::Literal(rir::Literal::Integer(5)), + rir::Variable { + variable_id: rir::VariableId(0), + ty: rir::Ty::Integer, + }, + ); + expect![" %var_0 = add i64 2, 5"].assert_eq(&inst.to_qir(&rir::Program::default())); +} + +#[test] +fn add_integer_variables() { + let inst = rir::Instruction::Add( + rir::Operand::Variable(rir::Variable { + variable_id: rir::VariableId(1), + ty: rir::Ty::Integer, + }), + rir::Operand::Variable(rir::Variable { + variable_id: rir::VariableId(2), + ty: rir::Ty::Integer, + }), + rir::Variable { + variable_id: rir::VariableId(0), + ty: rir::Ty::Integer, + }, + ); + expect![" %var_0 = add i64 %var_1, %var_2"].assert_eq(&inst.to_qir(&rir::Program::default())); +} + +#[test] +fn ashr_integer_literals() { + let inst = rir::Instruction::Ashr( + rir::Operand::Literal(rir::Literal::Integer(2)), + rir::Operand::Literal(rir::Literal::Integer(5)), + rir::Variable { + variable_id: rir::VariableId(0), + ty: rir::Ty::Integer, + }, + ); + expect![" %var_0 = ashr i64 2, 5"].assert_eq(&inst.to_qir(&rir::Program::default())); +} + +#[test] +fn ashr_integer_variables() { + let inst = rir::Instruction::Ashr( + rir::Operand::Variable(rir::Variable { + variable_id: rir::VariableId(1), + ty: rir::Ty::Integer, + }), + rir::Operand::Variable(rir::Variable { + variable_id: rir::VariableId(2), + ty: rir::Ty::Integer, + }), + rir::Variable { + variable_id: rir::VariableId(0), + ty: rir::Ty::Integer, + }, + ); + expect![" %var_0 = ashr i64 %var_1, %var_2"].assert_eq(&inst.to_qir(&rir::Program::default())); +} + +#[test] +fn bitwise_and_integer_literals() { + let inst = rir::Instruction::BitwiseAnd( + rir::Operand::Literal(rir::Literal::Integer(2)), + rir::Operand::Literal(rir::Literal::Integer(5)), + rir::Variable { + variable_id: rir::VariableId(0), + ty: rir::Ty::Integer, + }, + ); + expect![" %var_0 = and i64 2, 5"].assert_eq(&inst.to_qir(&rir::Program::default())); +} + +#[test] +fn bitwise_add_integer_variables() { + let inst = rir::Instruction::BitwiseAnd( + rir::Operand::Variable(rir::Variable { + variable_id: rir::VariableId(1), + ty: rir::Ty::Integer, + }), + rir::Operand::Variable(rir::Variable { + variable_id: rir::VariableId(2), + ty: rir::Ty::Integer, + }), + rir::Variable { + variable_id: rir::VariableId(0), + ty: rir::Ty::Integer, + }, + ); + expect![" %var_0 = and i64 %var_1, %var_2"].assert_eq(&inst.to_qir(&rir::Program::default())); +} + +#[test] +fn bitwise_not_integer_literals() { + let inst = rir::Instruction::BitwiseNot( + rir::Operand::Literal(rir::Literal::Integer(2)), + rir::Variable { + variable_id: rir::VariableId(0), + ty: rir::Ty::Integer, + }, + ); + expect![" %var_0 = xor i64 2, -1"].assert_eq(&inst.to_qir(&rir::Program::default())); +} + +#[test] +fn bitwise_not_integer_variables() { + let inst = rir::Instruction::BitwiseNot( + rir::Operand::Variable(rir::Variable { + variable_id: rir::VariableId(1), + ty: rir::Ty::Integer, + }), + rir::Variable { + variable_id: rir::VariableId(0), + ty: rir::Ty::Integer, + }, + ); + expect![" %var_0 = xor i64 %var_1, -1"].assert_eq(&inst.to_qir(&rir::Program::default())); +} + +#[test] +fn bitwise_or_integer_literals() { + let inst = rir::Instruction::BitwiseOr( + rir::Operand::Literal(rir::Literal::Integer(2)), + rir::Operand::Literal(rir::Literal::Integer(5)), + rir::Variable { + variable_id: rir::VariableId(0), + ty: rir::Ty::Integer, + }, + ); + expect![" %var_0 = or i64 2, 5"].assert_eq(&inst.to_qir(&rir::Program::default())); +} + +#[test] +fn bitwise_or_integer_variables() { + let inst = rir::Instruction::BitwiseOr( + rir::Operand::Variable(rir::Variable { + variable_id: rir::VariableId(1), + ty: rir::Ty::Integer, + }), + rir::Operand::Variable(rir::Variable { + variable_id: rir::VariableId(2), + ty: rir::Ty::Integer, + }), + rir::Variable { + variable_id: rir::VariableId(0), + ty: rir::Ty::Integer, + }, + ); + expect![" %var_0 = or i64 %var_1, %var_2"].assert_eq(&inst.to_qir(&rir::Program::default())); +} + +#[test] +fn bitwise_xor_integer_literals() { + let inst = rir::Instruction::BitwiseXor( + rir::Operand::Literal(rir::Literal::Integer(2)), + rir::Operand::Literal(rir::Literal::Integer(5)), + rir::Variable { + variable_id: rir::VariableId(0), + ty: rir::Ty::Integer, + }, + ); + expect![" %var_0 = xor i64 2, 5"].assert_eq(&inst.to_qir(&rir::Program::default())); +} + +#[test] +fn bitwise_xor_integer_variables() { + let inst = rir::Instruction::BitwiseXor( + rir::Operand::Variable(rir::Variable { + variable_id: rir::VariableId(1), + ty: rir::Ty::Integer, + }), + rir::Operand::Variable(rir::Variable { + variable_id: rir::VariableId(2), + ty: rir::Ty::Integer, + }), + rir::Variable { + variable_id: rir::VariableId(0), + ty: rir::Ty::Integer, + }, + ); + expect![" %var_0 = xor i64 %var_1, %var_2"].assert_eq(&inst.to_qir(&rir::Program::default())); +} + +#[test] +fn icmp_eq_integer_literals() { + let inst = rir::Instruction::Icmp( + rir::ConditionCode::Eq, + rir::Operand::Literal(rir::Literal::Integer(2)), + rir::Operand::Literal(rir::Literal::Integer(5)), + rir::Variable { + variable_id: rir::VariableId(0), + ty: rir::Ty::Boolean, + }, + ); + expect![" %var_0 = icmp eq i64 2, 5"].assert_eq(&inst.to_qir(&rir::Program::default())); +} + +#[test] +fn icmp_eq_integer_variables() { + let inst = rir::Instruction::Icmp( + rir::ConditionCode::Eq, + rir::Operand::Variable(rir::Variable { + variable_id: rir::VariableId(1), + ty: rir::Ty::Integer, + }), + rir::Operand::Variable(rir::Variable { + variable_id: rir::VariableId(2), + ty: rir::Ty::Integer, + }), + rir::Variable { + variable_id: rir::VariableId(0), + ty: rir::Ty::Boolean, + }, + ); + expect![" %var_0 = icmp eq i64 %var_1, %var_2"] + .assert_eq(&inst.to_qir(&rir::Program::default())); +} + +#[test] +fn icmp_ne_integer_literals() { + let inst = rir::Instruction::Icmp( + rir::ConditionCode::Ne, + rir::Operand::Literal(rir::Literal::Integer(2)), + rir::Operand::Literal(rir::Literal::Integer(5)), + rir::Variable { + variable_id: rir::VariableId(0), + ty: rir::Ty::Boolean, + }, + ); + expect![" %var_0 = icmp ne i64 2, 5"].assert_eq(&inst.to_qir(&rir::Program::default())); +} + +#[test] +fn icmp_ne_integer_variables() { + let inst = rir::Instruction::Icmp( + rir::ConditionCode::Ne, + rir::Operand::Variable(rir::Variable { + variable_id: rir::VariableId(1), + ty: rir::Ty::Integer, + }), + rir::Operand::Variable(rir::Variable { + variable_id: rir::VariableId(2), + ty: rir::Ty::Integer, + }), + rir::Variable { + variable_id: rir::VariableId(0), + ty: rir::Ty::Boolean, + }, + ); + expect![" %var_0 = icmp ne i64 %var_1, %var_2"] + .assert_eq(&inst.to_qir(&rir::Program::default())); +} +#[test] +fn icmp_slt_integer_literals() { + let inst = rir::Instruction::Icmp( + rir::ConditionCode::Slt, + rir::Operand::Literal(rir::Literal::Integer(2)), + rir::Operand::Literal(rir::Literal::Integer(5)), + rir::Variable { + variable_id: rir::VariableId(0), + ty: rir::Ty::Boolean, + }, + ); + expect![" %var_0 = icmp slt i64 2, 5"].assert_eq(&inst.to_qir(&rir::Program::default())); +} + +#[test] +fn icmp_slt_integer_variables() { + let inst = rir::Instruction::Icmp( + rir::ConditionCode::Slt, + rir::Operand::Variable(rir::Variable { + variable_id: rir::VariableId(1), + ty: rir::Ty::Integer, + }), + rir::Operand::Variable(rir::Variable { + variable_id: rir::VariableId(2), + ty: rir::Ty::Integer, + }), + rir::Variable { + variable_id: rir::VariableId(0), + ty: rir::Ty::Boolean, + }, + ); + expect![" %var_0 = icmp slt i64 %var_1, %var_2"] + .assert_eq(&inst.to_qir(&rir::Program::default())); +} +#[test] +fn icmp_sle_integer_literals() { + let inst = rir::Instruction::Icmp( + rir::ConditionCode::Sle, + rir::Operand::Literal(rir::Literal::Integer(2)), + rir::Operand::Literal(rir::Literal::Integer(5)), + rir::Variable { + variable_id: rir::VariableId(0), + ty: rir::Ty::Boolean, + }, + ); + expect![" %var_0 = icmp sle i64 2, 5"].assert_eq(&inst.to_qir(&rir::Program::default())); +} + +#[test] +fn icmp_sle_integer_variables() { + let inst = rir::Instruction::Icmp( + rir::ConditionCode::Sle, + rir::Operand::Variable(rir::Variable { + variable_id: rir::VariableId(1), + ty: rir::Ty::Integer, + }), + rir::Operand::Variable(rir::Variable { + variable_id: rir::VariableId(2), + ty: rir::Ty::Integer, + }), + rir::Variable { + variable_id: rir::VariableId(0), + ty: rir::Ty::Boolean, + }, + ); + expect![" %var_0 = icmp sle i64 %var_1, %var_2"] + .assert_eq(&inst.to_qir(&rir::Program::default())); +} +#[test] +fn icmp_sgt_integer_literals() { + let inst = rir::Instruction::Icmp( + rir::ConditionCode::Sgt, + rir::Operand::Literal(rir::Literal::Integer(2)), + rir::Operand::Literal(rir::Literal::Integer(5)), + rir::Variable { + variable_id: rir::VariableId(0), + ty: rir::Ty::Boolean, + }, + ); + expect![" %var_0 = icmp sgt i64 2, 5"].assert_eq(&inst.to_qir(&rir::Program::default())); +} + +#[test] +fn icmp_sgt_integer_variables() { + let inst = rir::Instruction::Icmp( + rir::ConditionCode::Sgt, + rir::Operand::Variable(rir::Variable { + variable_id: rir::VariableId(1), + ty: rir::Ty::Integer, + }), + rir::Operand::Variable(rir::Variable { + variable_id: rir::VariableId(2), + ty: rir::Ty::Integer, + }), + rir::Variable { + variable_id: rir::VariableId(0), + ty: rir::Ty::Boolean, + }, + ); + expect![" %var_0 = icmp sgt i64 %var_1, %var_2"] + .assert_eq(&inst.to_qir(&rir::Program::default())); +} +#[test] +fn icmp_sge_integer_literals() { + let inst = rir::Instruction::Icmp( + rir::ConditionCode::Sge, + rir::Operand::Literal(rir::Literal::Integer(2)), + rir::Operand::Literal(rir::Literal::Integer(5)), + rir::Variable { + variable_id: rir::VariableId(0), + ty: rir::Ty::Boolean, + }, + ); + expect![" %var_0 = icmp sge i64 2, 5"].assert_eq(&inst.to_qir(&rir::Program::default())); +} + +#[test] +fn icmp_sge_integer_variables() { + let inst = rir::Instruction::Icmp( + rir::ConditionCode::Sge, + rir::Operand::Variable(rir::Variable { + variable_id: rir::VariableId(1), + ty: rir::Ty::Integer, + }), + rir::Operand::Variable(rir::Variable { + variable_id: rir::VariableId(2), + ty: rir::Ty::Integer, + }), + rir::Variable { + variable_id: rir::VariableId(0), + ty: rir::Ty::Boolean, + }, + ); + expect![" %var_0 = icmp sge i64 %var_1, %var_2"] + .assert_eq(&inst.to_qir(&rir::Program::default())); +} + +#[test] +fn mul_integer_literals() { + let inst = rir::Instruction::Mul( + rir::Operand::Literal(rir::Literal::Integer(2)), + rir::Operand::Literal(rir::Literal::Integer(5)), + rir::Variable { + variable_id: rir::VariableId(0), + ty: rir::Ty::Integer, + }, + ); + expect![" %var_0 = mul i64 2, 5"].assert_eq(&inst.to_qir(&rir::Program::default())); +} + +#[test] +fn mul_integer_variables() { + let inst = rir::Instruction::Mul( + rir::Operand::Variable(rir::Variable { + variable_id: rir::VariableId(1), + ty: rir::Ty::Integer, + }), + rir::Operand::Variable(rir::Variable { + variable_id: rir::VariableId(2), + ty: rir::Ty::Integer, + }), + rir::Variable { + variable_id: rir::VariableId(0), + ty: rir::Ty::Integer, + }, + ); + expect![" %var_0 = mul i64 %var_1, %var_2"].assert_eq(&inst.to_qir(&rir::Program::default())); +} + +#[test] +fn sdiv_integer_literals() { + let inst = rir::Instruction::Sdiv( + rir::Operand::Literal(rir::Literal::Integer(2)), + rir::Operand::Literal(rir::Literal::Integer(5)), + rir::Variable { + variable_id: rir::VariableId(0), + ty: rir::Ty::Integer, + }, + ); + expect![" %var_0 = sdiv i64 2, 5"].assert_eq(&inst.to_qir(&rir::Program::default())); +} + +#[test] +fn sdiv_integer_variables() { + let inst = rir::Instruction::Sdiv( + rir::Operand::Variable(rir::Variable { + variable_id: rir::VariableId(1), + ty: rir::Ty::Integer, + }), + rir::Operand::Variable(rir::Variable { + variable_id: rir::VariableId(2), + ty: rir::Ty::Integer, + }), + rir::Variable { + variable_id: rir::VariableId(0), + ty: rir::Ty::Integer, + }, + ); + expect![" %var_0 = sdiv i64 %var_1, %var_2"].assert_eq(&inst.to_qir(&rir::Program::default())); +} + +#[test] +fn shl_integer_literals() { + let inst = rir::Instruction::Shl( + rir::Operand::Literal(rir::Literal::Integer(2)), + rir::Operand::Literal(rir::Literal::Integer(5)), + rir::Variable { + variable_id: rir::VariableId(0), + ty: rir::Ty::Integer, + }, + ); + expect![" %var_0 = shl i64 2, 5"].assert_eq(&inst.to_qir(&rir::Program::default())); +} + +#[test] +fn shl_integer_variables() { + let inst = rir::Instruction::Shl( + rir::Operand::Variable(rir::Variable { + variable_id: rir::VariableId(1), + ty: rir::Ty::Integer, + }), + rir::Operand::Variable(rir::Variable { + variable_id: rir::VariableId(2), + ty: rir::Ty::Integer, + }), + rir::Variable { + variable_id: rir::VariableId(0), + ty: rir::Ty::Integer, + }, + ); + expect![" %var_0 = shl i64 %var_1, %var_2"].assert_eq(&inst.to_qir(&rir::Program::default())); +} + +#[test] +fn srem_integer_literals() { + let inst = rir::Instruction::Srem( + rir::Operand::Literal(rir::Literal::Integer(2)), + rir::Operand::Literal(rir::Literal::Integer(5)), + rir::Variable { + variable_id: rir::VariableId(0), + ty: rir::Ty::Integer, + }, + ); + expect![" %var_0 = srem i64 2, 5"].assert_eq(&inst.to_qir(&rir::Program::default())); +} + +#[test] +fn srem_integer_variables() { + let inst = rir::Instruction::Srem( + rir::Operand::Variable(rir::Variable { + variable_id: rir::VariableId(1), + ty: rir::Ty::Integer, + }), + rir::Operand::Variable(rir::Variable { + variable_id: rir::VariableId(2), + ty: rir::Ty::Integer, + }), + rir::Variable { + variable_id: rir::VariableId(0), + ty: rir::Ty::Integer, + }, + ); + expect![" %var_0 = srem i64 %var_1, %var_2"].assert_eq(&inst.to_qir(&rir::Program::default())); +} + +#[test] +fn sub_integer_literals() { + let inst = rir::Instruction::Sub( + rir::Operand::Literal(rir::Literal::Integer(2)), + rir::Operand::Literal(rir::Literal::Integer(5)), + rir::Variable { + variable_id: rir::VariableId(0), + ty: rir::Ty::Integer, + }, + ); + expect![" %var_0 = sub i64 2, 5"].assert_eq(&inst.to_qir(&rir::Program::default())); +} + +#[test] +fn sub_integer_variables() { + let inst = rir::Instruction::Sub( + rir::Operand::Variable(rir::Variable { + variable_id: rir::VariableId(1), + ty: rir::Ty::Integer, + }), + rir::Operand::Variable(rir::Variable { + variable_id: rir::VariableId(2), + ty: rir::Ty::Integer, + }), + rir::Variable { + variable_id: rir::VariableId(0), + ty: rir::Ty::Integer, + }, + ); + expect![" %var_0 = sub i64 %var_1, %var_2"].assert_eq(&inst.to_qir(&rir::Program::default())); +} + +#[test] +fn convert_integer_literal_to_double() { + let inst = rir::Instruction::Convert( + rir::Operand::Literal(rir::Literal::Integer(2)), + rir::Variable { + variable_id: rir::VariableId(0), + ty: rir::Ty::Double, + }, + ); + expect![" %var_0 = sitofp i64 2 to double"].assert_eq(&inst.to_qir(&rir::Program::default())); +} + +#[test] +fn convert_integer_variable_to_double() { + let inst = rir::Instruction::Convert( + rir::Operand::Variable(rir::Variable { + variable_id: rir::VariableId(1), + ty: rir::Ty::Integer, + }), + rir::Variable { + variable_id: rir::VariableId(0), + ty: rir::Ty::Double, + }, + ); + expect![" %var_0 = sitofp i64 %var_1 to double"] + .assert_eq(&inst.to_qir(&rir::Program::default())); +} diff --git a/source/compiler/qsc_codegen/src/qir/v2/instruction_tests/invalid.rs b/source/compiler/qsc_codegen/src/qir/v2/instruction_tests/invalid.rs new file mode 100644 index 0000000000..54dd71713d --- /dev/null +++ b/source/compiler/qsc_codegen/src/qir/v2/instruction_tests/invalid.rs @@ -0,0 +1,170 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +use super::super::ToQir; +use qsc_rir::rir; + +#[test] +#[should_panic(expected = "mismatched input types (i64, double) for add")] +fn add_mismatched_literal_input_tys_should_panic() { + let inst = rir::Instruction::Add( + rir::Operand::Literal(rir::Literal::Integer(2)), + rir::Operand::Literal(rir::Literal::Double(1.0)), + rir::Variable { + variable_id: rir::VariableId(0), + ty: rir::Ty::Integer, + }, + ); + let _ = &inst.to_qir(&rir::Program::default()); +} + +#[test] +#[should_panic(expected = "mismatched input/output types (i64, double) for add")] +fn add_mismatched_literal_input_output_tys_should_panic() { + let inst = rir::Instruction::Add( + rir::Operand::Literal(rir::Literal::Integer(2)), + rir::Operand::Literal(rir::Literal::Integer(5)), + rir::Variable { + variable_id: rir::VariableId(0), + ty: rir::Ty::Double, + }, + ); + let _ = &inst.to_qir(&rir::Program::default()); +} + +#[test] +#[should_panic(expected = "mismatched input types (i64, double) for add")] +fn add_mismatched_variable_input_tys_should_panic() { + let inst = rir::Instruction::Add( + rir::Operand::Variable(rir::Variable { + variable_id: rir::VariableId(1), + ty: rir::Ty::Integer, + }), + rir::Operand::Variable(rir::Variable { + variable_id: rir::VariableId(2), + ty: rir::Ty::Double, + }), + rir::Variable { + variable_id: rir::VariableId(0), + ty: rir::Ty::Integer, + }, + ); + let _ = &inst.to_qir(&rir::Program::default()); +} + +#[test] +#[should_panic(expected = "mismatched input/output types (i64, double) for add")] +fn add_mismatched_variable_input_output_tys_should_panic() { + let inst = rir::Instruction::Add( + rir::Operand::Variable(rir::Variable { + variable_id: rir::VariableId(1), + ty: rir::Ty::Integer, + }), + rir::Operand::Variable(rir::Variable { + variable_id: rir::VariableId(2), + ty: rir::Ty::Integer, + }), + rir::Variable { + variable_id: rir::VariableId(0), + ty: rir::Ty::Double, + }, + ); + let _ = &inst.to_qir(&rir::Program::default()); +} + +#[test] +#[should_panic(expected = "mismatched input types (i64, double) for and")] +fn bitwise_and_mismatched_literal_input_tys_should_panic() { + let inst = rir::Instruction::BitwiseAnd( + rir::Operand::Literal(rir::Literal::Integer(2)), + rir::Operand::Literal(rir::Literal::Double(1.0)), + rir::Variable { + variable_id: rir::VariableId(0), + ty: rir::Ty::Integer, + }, + ); + let _ = &inst.to_qir(&rir::Program::default()); +} + +#[test] +#[should_panic(expected = "mismatched input/output types (i64, double) for and")] +fn bitwise_and_mismatched_literal_input_output_tys_should_panic() { + let inst = rir::Instruction::BitwiseAnd( + rir::Operand::Literal(rir::Literal::Integer(2)), + rir::Operand::Literal(rir::Literal::Integer(5)), + rir::Variable { + variable_id: rir::VariableId(0), + ty: rir::Ty::Double, + }, + ); + let _ = &inst.to_qir(&rir::Program::default()); +} + +#[test] +#[should_panic(expected = "mismatched input types (i64, double) for and")] +fn bitwise_and_mismatched_variable_input_tys_should_panic() { + let inst = rir::Instruction::BitwiseAnd( + rir::Operand::Variable(rir::Variable { + variable_id: rir::VariableId(1), + ty: rir::Ty::Integer, + }), + rir::Operand::Variable(rir::Variable { + variable_id: rir::VariableId(2), + ty: rir::Ty::Double, + }), + rir::Variable { + variable_id: rir::VariableId(0), + ty: rir::Ty::Integer, + }, + ); + let _ = &inst.to_qir(&rir::Program::default()); +} + +#[test] +#[should_panic(expected = "mismatched input/output types (i64, double) for and")] +fn bitwise_and_mismatched_variable_input_output_tys_should_panic() { + let inst = rir::Instruction::BitwiseAnd( + rir::Operand::Variable(rir::Variable { + variable_id: rir::VariableId(1), + ty: rir::Ty::Integer, + }), + rir::Operand::Variable(rir::Variable { + variable_id: rir::VariableId(2), + ty: rir::Ty::Integer, + }), + rir::Variable { + variable_id: rir::VariableId(0), + ty: rir::Ty::Double, + }, + ); + let _ = &inst.to_qir(&rir::Program::default()); +} + +#[test] +#[should_panic(expected = "unsupported type i1 for add")] +fn add_bool_should_panic() { + let inst = rir::Instruction::Add( + rir::Operand::Literal(rir::Literal::Bool(true)), + rir::Operand::Literal(rir::Literal::Bool(false)), + rir::Variable { + variable_id: rir::VariableId(0), + ty: rir::Ty::Boolean, + }, + ); + let _ = &inst.to_qir(&rir::Program::default()); +} + +#[test] +#[should_panic(expected = "unsupported output type i64 for icmp")] +fn icmp_with_non_boolean_result_var_should_panic() { + let inst = rir::Instruction::Icmp( + rir::ConditionCode::Eq, + rir::Operand::Literal(rir::Literal::Integer(2)), + rir::Operand::Literal(rir::Literal::Integer(5)), + rir::Variable { + variable_id: rir::VariableId(0), + ty: rir::Ty::Integer, + }, + ); + let _ = inst.to_qir(&rir::Program::default()); +} diff --git a/source/compiler/qsc_codegen/src/qir/v2/instruction_tests/load.rs b/source/compiler/qsc_codegen/src/qir/v2/instruction_tests/load.rs new file mode 100644 index 0000000000..4204d0788a --- /dev/null +++ b/source/compiler/qsc_codegen/src/qir/v2/instruction_tests/load.rs @@ -0,0 +1,42 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +use crate::qir::v2::ToQir; +use expect_test::expect; +use qsc_rir::rir; + +#[test] +fn load_integer_from_pointer() { + let inst = rir::Instruction::Advanced(rir::AdvancedInstr::Load( + rir::Variable::new_ptr(rir::VariableId(1)), + rir::Variable::new_integer(rir::VariableId(0)), + )); + expect![" %var_0 = load i64, ptr %var_1"].assert_eq(&inst.to_qir(&rir::Program::default())); +} + +#[test] +fn load_bool_from_pointer() { + let inst = rir::Instruction::Advanced(rir::AdvancedInstr::Load( + rir::Variable::new_ptr(rir::VariableId(1)), + rir::Variable::new_boolean(rir::VariableId(0)), + )); + expect![" %var_0 = load i1, ptr %var_1"].assert_eq(&inst.to_qir(&rir::Program::default())); +} + +#[test] +fn load_double_from_pointer() { + let inst = rir::Instruction::Advanced(rir::AdvancedInstr::Load( + rir::Variable::new_ptr(rir::VariableId(1)), + rir::Variable::new_double(rir::VariableId(0)), + )); + expect![" %var_0 = load double, ptr %var_1"].assert_eq(&inst.to_qir(&rir::Program::default())); +} + +#[test] +fn load_pointer_from_pointer() { + let inst = rir::Instruction::Advanced(rir::AdvancedInstr::Load( + rir::Variable::new_ptr(rir::VariableId(1)), + rir::Variable::new_ptr(rir::VariableId(0)), + )); + expect![" %var_0 = load ptr, ptr %var_1"].assert_eq(&inst.to_qir(&rir::Program::default())); +} diff --git a/source/compiler/qsc_codegen/src/qir/v2/instruction_tests/store.rs b/source/compiler/qsc_codegen/src/qir/v2/instruction_tests/store.rs new file mode 100644 index 0000000000..7820f4ac12 --- /dev/null +++ b/source/compiler/qsc_codegen/src/qir/v2/instruction_tests/store.rs @@ -0,0 +1,51 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +use crate::qir::v2::ToQir; +use expect_test::expect; +use qsc_rir::rir; + +#[test] +fn store_integer_literal_to_pointer() { + let inst = rir::Instruction::Store( + rir::Operand::Literal(rir::Literal::Integer(5)), + rir::Variable::new_ptr(rir::VariableId(0)), + ); + expect![" store i64 5, ptr %var_0"].assert_eq(&inst.to_qir(&rir::Program::default())); +} + +#[test] +fn store_integer_variable_to_pointer() { + let inst = rir::Instruction::Store( + rir::Operand::Variable(rir::Variable::new_integer(rir::VariableId(1))), + rir::Variable::new_ptr(rir::VariableId(0)), + ); + expect![" store i64 %var_1, ptr %var_0"].assert_eq(&inst.to_qir(&rir::Program::default())); +} + +#[test] +fn store_bool_literal_to_pointer() { + let inst = rir::Instruction::Store( + rir::Operand::Literal(rir::Literal::Bool(true)), + rir::Variable::new_ptr(rir::VariableId(0)), + ); + expect![" store i1 true, ptr %var_0"].assert_eq(&inst.to_qir(&rir::Program::default())); +} + +#[test] +fn store_double_literal_to_pointer() { + let inst = rir::Instruction::Store( + rir::Operand::Literal(rir::Literal::Double(2.5)), + rir::Variable::new_ptr(rir::VariableId(0)), + ); + expect![" store double 2.5, ptr %var_0"].assert_eq(&inst.to_qir(&rir::Program::default())); +} + +#[test] +fn store_pointer_literal_to_pointer() { + let inst = rir::Instruction::Store( + rir::Operand::Literal(rir::Literal::Pointer), + rir::Variable::new_ptr(rir::VariableId(0)), + ); + expect![" store ptr null, ptr %var_0"].assert_eq(&inst.to_qir(&rir::Program::default())); +} diff --git a/source/compiler/qsc_codegen/src/qir/v2/template.ll b/source/compiler/qsc_codegen/src/qir/v2/template.ll new file mode 100644 index 0000000000..e4c0ad408c --- /dev/null +++ b/source/compiler/qsc_codegen/src/qir/v2/template.ll @@ -0,0 +1,18 @@ +{} +{} + +attributes #0 = {{ "entry_point" "output_labeling_schema" "qir_profiles"="adaptive_profile" "required_num_qubits"="{}" "required_num_results"="{}" }} +attributes #1 = {{ "irreversible" }}{} + +; module flags + +!llvm.module.flags = !{{!0, !1, !2, !3, !4, !5, !6, !7}} + +!0 = !{{i32 1, !"qir_major_version", i32 2}} +!1 = !{{i32 7, !"qir_minor_version", i32 1}} +!2 = !{{i32 1, !"dynamic_qubit_management", i1 false}} +!3 = !{{i32 1, !"dynamic_result_management", i1 false}} +!4 = !{{i32 5, !"int_computations", !{{!"i64"}}}} +!5 = !{{i32 5, !"float_computations", !{{!"double"}}}} +!6 = !{{i32 7, !"backwards_branching", i2 3}} +!7 = !{{i32 1, !"arrays", i1 true}} diff --git a/source/compiler/qsc_codegen/src/qir/v2/tests.rs b/source/compiler/qsc_codegen/src/qir/v2/tests.rs new file mode 100644 index 0000000000..19facf0b9b --- /dev/null +++ b/source/compiler/qsc_codegen/src/qir/v2/tests.rs @@ -0,0 +1,237 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +use super::ToQir; +use expect_test::expect; +use qsc_rir::builder; +use qsc_rir::rir; + +#[test] +fn single_qubit_gate_decl_works() { + let decl = builder::x_decl(); + expect!["declare void @__quantum__qis__x__body(ptr)"] + .assert_eq(&decl.to_qir(&rir::Program::default())); +} + +#[test] +fn two_qubit_gate_decl_works() { + let decl = builder::cx_decl(); + expect!["declare void @__quantum__qis__cx__body(ptr, ptr)"] + .assert_eq(&decl.to_qir(&rir::Program::default())); +} + +#[test] +fn single_qubit_rotation_decl_works() { + let decl = builder::rx_decl(); + expect!["declare void @__quantum__qis__rx__body(double, ptr)"] + .assert_eq(&decl.to_qir(&rir::Program::default())); +} + +#[test] +fn measurement_decl_works() { + let decl = builder::m_decl(); + expect!["declare void @__quantum__qis__m__body(ptr, ptr) #1"] + .assert_eq(&decl.to_qir(&rir::Program::default())); +} + +#[test] +fn read_result_decl_works() { + let decl = builder::read_result_decl(); + expect!["declare i1 @__quantum__rt__read_result(ptr)"] + .assert_eq(&decl.to_qir(&rir::Program::default())); +} + +#[test] +fn result_record_decl_works() { + let decl = builder::result_record_decl(); + expect!["declare void @__quantum__rt__result_record_output(ptr, ptr)"] + .assert_eq(&decl.to_qir(&rir::Program::default())); +} + +#[test] +fn single_qubit_call() { + let mut program = rir::Program::default(); + program + .callables + .insert(rir::CallableId(0), builder::x_decl()); + let call = rir::Instruction::Call( + rir::CallableId(0), + vec![rir::Operand::Literal(rir::Literal::Qubit(0))], + None, + None, + ); + expect![" call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr))"] + .assert_eq(&call.to_qir(&program)); +} + +#[test] +fn qubit_rotation_call() { + let mut program = rir::Program::default(); + program + .callables + .insert(rir::CallableId(0), builder::rx_decl()); + let call = rir::Instruction::Call( + rir::CallableId(0), + vec![ + rir::Operand::Literal(rir::Literal::Double(std::f64::consts::PI)), + rir::Operand::Literal(rir::Literal::Qubit(0)), + ], + None, + None, + ); + expect![" call void @__quantum__qis__rx__body(double 3.141592653589793, ptr inttoptr (i64 0 to ptr))"] + .assert_eq(&call.to_qir(&program)); +} + +#[test] +fn qubit_rotation_round_number_call() { + let mut program = rir::Program::default(); + program + .callables + .insert(rir::CallableId(0), builder::rx_decl()); + let call = rir::Instruction::Call( + rir::CallableId(0), + vec![ + rir::Operand::Literal(rir::Literal::Double(3.0)), + rir::Operand::Literal(rir::Literal::Qubit(0)), + ], + None, + None, + ); + expect![" call void @__quantum__qis__rx__body(double 3.0, ptr inttoptr (i64 0 to ptr))"] + .assert_eq(&call.to_qir(&program)); +} + +#[test] +fn qubit_rotation_variable_angle_call() { + let mut program = rir::Program::default(); + program + .callables + .insert(rir::CallableId(0), builder::rx_decl()); + let call = rir::Instruction::Call( + rir::CallableId(0), + vec![ + rir::Operand::Variable(rir::Variable { + variable_id: rir::VariableId(0), + ty: rir::Ty::Double, + }), + rir::Operand::Literal(rir::Literal::Qubit(0)), + ], + None, + None, + ); + expect![" call void @__quantum__qis__rx__body(double %var_0, ptr inttoptr (i64 0 to ptr))"] + .assert_eq(&call.to_qir(&program)); +} + +#[test] +fn bell_program() { + let program = builder::bell_program(); + expect![[r#" + @0 = internal constant [4 x i8] c"0_a\00" + @1 = internal constant [6 x i8] c"1_a0r\00" + @2 = internal constant [6 x i8] c"2_a1r\00" + + declare void @__quantum__qis__h__body(ptr) + + declare void @__quantum__qis__cx__body(ptr, ptr) + + declare void @__quantum__qis__m__body(ptr, ptr) #1 + + declare void @__quantum__rt__array_record_output(i64, ptr) + + declare void @__quantum__rt__result_record_output(ptr, ptr) + + define i64 @ENTRYPOINT__main() #0 { + block_0: + call void @__quantum__qis__h__body(ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__cx__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__m__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__m__body(ptr inttoptr (i64 1 to ptr), ptr inttoptr (i64 1 to ptr)) + call void @__quantum__rt__array_record_output(i64 2, ptr @0) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 0 to ptr), ptr @1) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 1 to ptr), ptr @2) + ret i64 0 + } + + attributes #0 = { "entry_point" "output_labeling_schema" "qir_profiles"="adaptive_profile" "required_num_qubits"="2" "required_num_results"="2" } + attributes #1 = { "irreversible" } + + ; module flags + + !llvm.module.flags = !{!0, !1, !2, !3, !4, !5, !6, !7} + + !0 = !{i32 1, !"qir_major_version", i32 2} + !1 = !{i32 7, !"qir_minor_version", i32 1} + !2 = !{i32 1, !"dynamic_qubit_management", i1 false} + !3 = !{i32 1, !"dynamic_result_management", i1 false} + !4 = !{i32 5, !"int_computations", !{!"i64"}} + !5 = !{i32 5, !"float_computations", !{!"double"}} + !6 = !{i32 7, !"backwards_branching", i2 3} + !7 = !{i32 1, !"arrays", i1 true} + "#]].assert_eq(&program.to_qir(&program)); +} + +#[test] +fn teleport_program() { + let program = builder::teleport_program(); + expect![[r#" + @0 = internal constant [4 x i8] c"0_r\00" + + declare void @__quantum__qis__h__body(ptr) + + declare void @__quantum__qis__z__body(ptr) + + declare void @__quantum__qis__x__body(ptr) + + declare void @__quantum__qis__cx__body(ptr, ptr) + + declare void @__quantum__qis__mresetz__body(ptr, ptr) #1 + + declare i1 @__quantum__rt__read_result(ptr) + + declare void @__quantum__rt__result_record_output(ptr, ptr) + + define i64 @ENTRYPOINT__main() #0 { + block_0: + call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__cx__body(ptr inttoptr (i64 2 to ptr), ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__cx__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__mresetz__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 0 to ptr)) + %var_0 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 0 to ptr)) + br i1 %var_0, label %block_1, label %block_2 + block_1: + call void @__quantum__qis__z__body(ptr inttoptr (i64 1 to ptr)) + br label %block_2 + block_2: + call void @__quantum__qis__mresetz__body(ptr inttoptr (i64 2 to ptr), ptr inttoptr (i64 1 to ptr)) + %var_1 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 1 to ptr)) + br i1 %var_1, label %block_3, label %block_4 + block_3: + call void @__quantum__qis__x__body(ptr inttoptr (i64 1 to ptr)) + br label %block_4 + block_4: + call void @__quantum__qis__mresetz__body(ptr inttoptr (i64 1 to ptr), ptr inttoptr (i64 2 to ptr)) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 2 to ptr), ptr @0) + ret i64 0 + } + + attributes #0 = { "entry_point" "output_labeling_schema" "qir_profiles"="adaptive_profile" "required_num_qubits"="3" "required_num_results"="3" } + attributes #1 = { "irreversible" } + + ; module flags + + !llvm.module.flags = !{!0, !1, !2, !3, !4, !5, !6, !7} + + !0 = !{i32 1, !"qir_major_version", i32 2} + !1 = !{i32 7, !"qir_minor_version", i32 1} + !2 = !{i32 1, !"dynamic_qubit_management", i1 false} + !3 = !{i32 1, !"dynamic_result_management", i1 false} + !4 = !{i32 5, !"int_computations", !{!"i64"}} + !5 = !{i32 5, !"float_computations", !{!"double"}} + !6 = !{i32 7, !"backwards_branching", i2 3} + !7 = !{i32 1, !"arrays", i1 true} + "#]].assert_eq(&program.to_qir(&program)); +} diff --git a/source/compiler/qsc_data_structures/src/target.rs b/source/compiler/qsc_data_structures/src/target.rs index 4f3cf143a2..1349755fd8 100644 --- a/source/compiler/qsc_data_structures/src/target.rs +++ b/source/compiler/qsc_data_structures/src/target.rs @@ -10,7 +10,8 @@ bitflags! { const IntegerComputations = 0b0000_0010; const FloatingPointComputations = 0b0000_0100; const BackwardsBranching = 0b0000_1000; - const HigherLevelConstructs = 0b0001_0000; + const StaticSizedArrays = 0b0001_0000; + const HigherLevelConstructs = 0b1000_0000; } } @@ -24,6 +25,7 @@ impl std::str::FromStr for TargetCapabilityFlags { "IntegerComputations" => Ok(TargetCapabilityFlags::IntegerComputations), "FloatingPointComputations" => Ok(TargetCapabilityFlags::FloatingPointComputations), "BackwardsBranching" => Ok(TargetCapabilityFlags::BackwardsBranching), + "StaticSizedArrays" => Ok(TargetCapabilityFlags::StaticSizedArrays), "HigherLevelConstructs" => Ok(TargetCapabilityFlags::HigherLevelConstructs), "Unrestricted" => Ok(TargetCapabilityFlags::all()), _ => Err(()), @@ -45,6 +47,7 @@ pub enum Profile { Base, AdaptiveRI, AdaptiveRIF, + AdaptiveRIFLA, } impl Profile { @@ -55,6 +58,7 @@ impl Profile { Self::Base => "Base", Self::AdaptiveRI => "Adaptive_RI", Self::AdaptiveRIF => "Adaptive_RIF", + Self::AdaptiveRIFLA => "Adaptive_RIFLA", } } } @@ -68,6 +72,13 @@ impl From for TargetCapabilityFlags { Profile::AdaptiveRIF => { Self::Adaptive | Self::IntegerComputations | Self::FloatingPointComputations } + Profile::AdaptiveRIFLA => { + Self::Adaptive + | Self::IntegerComputations + | Self::FloatingPointComputations + | Self::BackwardsBranching + | Self::StaticSizedArrays + } } } } @@ -79,6 +90,7 @@ impl FromStr for Profile { match s.to_lowercase().as_str() { "adaptive_ri" => Ok(Self::AdaptiveRI), "adaptive_rif" => Ok(Self::AdaptiveRIF), + "adaptive_rifla" => Ok(Self::AdaptiveRIFLA), "base" => Ok(Self::Base), "unrestricted" => Ok(Self::Unrestricted), _ => Err(()), diff --git a/source/compiler/qsc_partial_eval/src/tests.rs b/source/compiler/qsc_partial_eval/src/tests.rs index 5bb38be81c..38dae7065c 100644 --- a/source/compiler/qsc_partial_eval/src/tests.rs +++ b/source/compiler/qsc_partial_eval/src/tests.rs @@ -22,7 +22,9 @@ use crate::{Error, PartialEvalConfig, ProgramEntry, partially_evaluate}; use expect_test::Expect; use qsc::{PackageType, incremental::Compiler}; use qsc_data_structures::{ - language_features::LanguageFeatures, source::SourceMap, target::TargetCapabilityFlags, + language_features::LanguageFeatures, + source::SourceMap, + target::{Profile, TargetCapabilityFlags}, }; use qsc_fir::fir::PackageStore; use qsc_frontend::compile::PackageStore as HirPackageStore; @@ -100,7 +102,7 @@ pub fn get_partial_evaluation_error_with_capabilities( pub fn get_rir_program(source: &str) -> Program { let maybe_program = compile_and_partially_evaluate( source, - TargetCapabilityFlags::all(), + Profile::AdaptiveRIF.into(), PartialEvalConfig { generate_debug_metadata: false, }, @@ -119,7 +121,7 @@ pub fn get_rir_program(source: &str) -> Program { pub fn get_rir_program_with_dbg_metadata(source: &str) -> Program { let maybe_program = compile_and_partially_evaluate( source, - TargetCapabilityFlags::all(), + Profile::AdaptiveRIF.into(), PartialEvalConfig { generate_debug_metadata: true, }, diff --git a/source/compiler/qsc_partial_eval/src/tests/arrays.rs b/source/compiler/qsc_partial_eval/src/tests/arrays.rs index bd75b34a10..a06035e6b4 100644 --- a/source/compiler/qsc_partial_eval/src/tests/arrays.rs +++ b/source/compiler/qsc_partial_eval/src/tests/arrays.rs @@ -1102,7 +1102,7 @@ fn result_array_index_range_returns_length_as_end() { Call id(3), args( Integer(1), Tag(0, 3), ) Return config: Config: - capabilities: TargetCapabilityFlags(Adaptive | IntegerComputations | FloatingPointComputations | BackwardsBranching | HigherLevelConstructs) + capabilities: TargetCapabilityFlags(Adaptive | IntegerComputations | FloatingPointComputations) num_qubits: 2 num_results: 2 tags: diff --git a/source/compiler/qsc_partial_eval/src/tests/intrinsics.rs b/source/compiler/qsc_partial_eval/src/tests/intrinsics.rs index be5e66808c..6004bc44a9 100644 --- a/source/compiler/qsc_partial_eval/src/tests/intrinsics.rs +++ b/source/compiler/qsc_partial_eval/src/tests/intrinsics.rs @@ -1001,7 +1001,7 @@ fn call_to_check_zero_panics() { } #[test] -#[should_panic(expected = "`DrawRandomInt` is not a supported by partial evaluation")] +#[should_panic(expected = "NotAvailable(\"DrawRandomInt\", \"Std.Random.DrawRandomInt\"")] fn call_to_draw_random_int_panics() { _ = get_rir_program(indoc! { r#" @@ -1017,7 +1017,7 @@ fn call_to_draw_random_int_panics() { } #[test] -#[should_panic(expected = "`DrawRandomDouble` is not a supported by partial evaluation")] +#[should_panic(expected = "NotAvailable(\"DrawRandomDouble\", \"Std.Random.DrawRandomDouble\"")] fn call_to_draw_random_double_panics() { _ = get_rir_program(indoc! { r#" @@ -1033,7 +1033,7 @@ fn call_to_draw_random_double_panics() { } #[test] -#[should_panic(expected = "`DrawRandomBool` is not a supported by partial evaluation")] +#[should_panic(expected = "NotAvailable(\"DrawRandomBool\", \"Std.Random.DrawRandomBool\"")] fn call_to_draw_random_bool_panics() { _ = get_rir_program(indoc! { r#" diff --git a/source/compiler/qsc_partial_eval/src/tests/loops.rs b/source/compiler/qsc_partial_eval/src/tests/loops.rs index bb93ccb35d..6fd0e95f0d 100644 --- a/source/compiler/qsc_partial_eval/src/tests/loops.rs +++ b/source/compiler/qsc_partial_eval/src/tests/loops.rs @@ -553,7 +553,7 @@ fn result_array_index_range_in_for_loop() { Variable(2, Integer) = Store Variable(8, Integer) Jump(3) config: Config: - capabilities: TargetCapabilityFlags(Adaptive | IntegerComputations | FloatingPointComputations | BackwardsBranching | HigherLevelConstructs) + capabilities: TargetCapabilityFlags(Adaptive | IntegerComputations | FloatingPointComputations) num_qubits: 2 num_results: 2 tags: diff --git a/source/compiler/qsc_partial_eval/src/tests/output_recording.rs b/source/compiler/qsc_partial_eval/src/tests/output_recording.rs index d79c23b5c9..9ba9a8edb9 100644 --- a/source/compiler/qsc_partial_eval/src/tests/output_recording.rs +++ b/source/compiler/qsc_partial_eval/src/tests/output_recording.rs @@ -88,7 +88,7 @@ fn output_recording_for_tuple_of_different_types() { Call id(6), args( Variable(1, Boolean), Tag(2, 5), ) Return config: Config: - capabilities: TargetCapabilityFlags(Adaptive | IntegerComputations | FloatingPointComputations | BackwardsBranching | HigherLevelConstructs) + capabilities: TargetCapabilityFlags(Adaptive | IntegerComputations | FloatingPointComputations) num_qubits: 1 num_results: 1 tags: @@ -187,7 +187,7 @@ fn output_recording_for_nested_tuples() { Call id(6), args( Variable(3, Boolean), Tag(6, 7), ) Return config: Config: - capabilities: TargetCapabilityFlags(Adaptive | IntegerComputations | FloatingPointComputations | BackwardsBranching | HigherLevelConstructs) + capabilities: TargetCapabilityFlags(Adaptive | IntegerComputations | FloatingPointComputations) num_qubits: 1 num_results: 1 tags: @@ -298,7 +298,7 @@ fn output_recording_for_tuple_of_arrays() { Call id(7), args( Variable(3, Boolean), Tag(4, 7), ) Return config: Config: - capabilities: TargetCapabilityFlags(Adaptive | IntegerComputations | FloatingPointComputations | BackwardsBranching | HigherLevelConstructs) + capabilities: TargetCapabilityFlags(Adaptive | IntegerComputations | FloatingPointComputations) num_qubits: 1 num_results: 1 tags: @@ -407,7 +407,7 @@ fn output_recording_for_array_of_tuples() { Call id(7), args( Variable(3, Boolean), Tag(6, 7), ) Return config: Config: - capabilities: TargetCapabilityFlags(Adaptive | IntegerComputations | FloatingPointComputations | BackwardsBranching | HigherLevelConstructs) + capabilities: TargetCapabilityFlags(Adaptive | IntegerComputations | FloatingPointComputations) num_qubits: 1 num_results: 1 tags: @@ -466,7 +466,7 @@ fn output_recording_for_literal_bool() { Call id(2), args( Bool(true), Tag(0, 3), ) Return config: Config: - capabilities: TargetCapabilityFlags(Adaptive | IntegerComputations | FloatingPointComputations | BackwardsBranching | HigherLevelConstructs) + capabilities: TargetCapabilityFlags(Adaptive | IntegerComputations | FloatingPointComputations) num_qubits: 0 num_results: 0 tags: @@ -519,7 +519,7 @@ fn output_recording_for_literal_double() { Call id(2), args( Double(42.1), Tag(0, 3), ) Return config: Config: - capabilities: TargetCapabilityFlags(Adaptive | IntegerComputations | FloatingPointComputations | BackwardsBranching | HigherLevelConstructs) + capabilities: TargetCapabilityFlags(Adaptive | IntegerComputations | FloatingPointComputations) num_qubits: 0 num_results: 0 tags: @@ -572,7 +572,7 @@ fn output_recording_for_literal_int() { Call id(2), args( Integer(42), Tag(0, 3), ) Return config: Config: - capabilities: TargetCapabilityFlags(Adaptive | IntegerComputations | FloatingPointComputations | BackwardsBranching | HigherLevelConstructs) + capabilities: TargetCapabilityFlags(Adaptive | IntegerComputations | FloatingPointComputations) num_qubits: 0 num_results: 0 tags: @@ -654,7 +654,7 @@ fn output_recording_for_mix_of_literal_and_variable() { Call id(5), args( Bool(true), Tag(2, 5), ) Return config: Config: - capabilities: TargetCapabilityFlags(Adaptive | IntegerComputations | FloatingPointComputations | BackwardsBranching | HigherLevelConstructs) + capabilities: TargetCapabilityFlags(Adaptive | IntegerComputations | FloatingPointComputations) num_qubits: 1 num_results: 1 tags: diff --git a/source/compiler/qsc_rir/src/passes.rs b/source/compiler/qsc_rir/src/passes.rs index 8ecceef0b4..ff696dbebf 100644 --- a/source/compiler/qsc_rir/src/passes.rs +++ b/source/compiler/qsc_rir/src/passes.rs @@ -3,6 +3,8 @@ mod build_dominator_graph; mod defer_meas; +mod insert_alloca_load; +mod prune_unneeded_stores; mod reindex_qubits; mod remap_block_ids; mod simplify_control_flow; @@ -13,7 +15,7 @@ mod unreachable_code_check; use build_dominator_graph::build_dominator_graph; use defer_meas::defer_measurements; -use qsc_data_structures::target::TargetCapabilityFlags; +use qsc_data_structures::target::{Profile, TargetCapabilityFlags}; use reindex_qubits::reindex_qubits; use remap_block_ids::remap_block_ids; use simplify_control_flow::simplify_control_flow; @@ -22,7 +24,13 @@ use ssa_transform::transform_to_ssa; pub use type_check::check_types; pub use unreachable_code_check::check_unreachable_code; -use crate::{rir::Program, utils::build_predecessors_map}; +use crate::{ + passes::{ + insert_alloca_load::insert_alloca_load_instrs, prune_unneeded_stores::prune_unneeded_stores, + }, + rir::Program, + utils::build_predecessors_map, +}; /// Run the default set of RIR check and transformation passes. /// This includes: @@ -39,15 +47,21 @@ pub fn check_and_transform(program: &mut Program) { check_unreachable_code(program); check_types(program); remap_block_ids(program); - let preds = build_predecessors_map(program); - transform_to_ssa(program, &preds); - let doms = build_dominator_graph(program, &preds); - check_ssa_form(program, &preds, &doms); - check_unreachable_code(program); - check_types(program); - if program.config.capabilities == TargetCapabilityFlags::empty() { - reindex_qubits(program); - defer_measurements(program); + if program.config.capabilities >= Profile::AdaptiveRIFLA.into() { + prune_unneeded_stores(program); + insert_alloca_load_instrs(program); + } else { + let preds = build_predecessors_map(program); + transform_to_ssa(program, &preds); + let doms = build_dominator_graph(program, &preds); + check_ssa_form(program, &preds, &doms); + check_unreachable_code(program); + check_types(program); + + if program.config.capabilities == TargetCapabilityFlags::empty() { + reindex_qubits(program); + defer_measurements(program); + } } } diff --git a/source/compiler/qsc_rir/src/passes/insert_alloca_load.rs b/source/compiler/qsc_rir/src/passes/insert_alloca_load.rs new file mode 100644 index 0000000000..e1811406ae --- /dev/null +++ b/source/compiler/qsc_rir/src/passes/insert_alloca_load.rs @@ -0,0 +1,252 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +#[cfg(test)] +mod tests; + +use qsc_data_structures::index_map::IndexMap; +use rustc_hash::{FxHashMap, FxHashSet}; + +use crate::{ + rir::{ + AdvancedInstr, BlockId, CallableId, Instruction, Operand, Program, Variable, VariableId, + }, + utils::{get_block_successors, get_variable_assignments}, +}; + +pub fn insert_alloca_load_instrs(program: &mut Program) { + // Get the next available variable ID for use in newly generated loads. + let mut next_var_id = get_variable_assignments(program) + .iter() + .next_back() + .map(|(var_id, _)| var_id.successor()) + .unwrap_or_default(); + + for callable_id in program.all_callable_ids() { + process_callable(program, callable_id, &mut next_var_id); + } +} + +fn process_callable(program: &mut Program, callable_id: CallableId, next_var_id: &mut VariableId) { + let callable = program.get_callable(callable_id); + + let Some(entry_block_id) = callable.body else { + return; + }; + + let mut vars_to_alloca = IndexMap::default(); + let mut visited_blocks = FxHashSet::default(); + let mut blocks_to_visit = vec![entry_block_id]; + while let Some(block_id) = blocks_to_visit.pop() { + if visited_blocks.contains(&block_id) { + continue; + } + visited_blocks.insert(block_id); + add_alloca_load_to_block(program, block_id, &mut vars_to_alloca, next_var_id); + for successor_id in get_block_successors(program.get_block(block_id)) { + if !visited_blocks.contains(&successor_id) { + blocks_to_visit.push(successor_id); + } + } + } + + let mut alloca_instrs = Vec::new(); + for (_, variable) in vars_to_alloca.iter() { + alloca_instrs.push(AdvancedInstr::Alloca(*variable).into()); + } + let entry_block = program.get_block_mut(entry_block_id); + let new_instrs = alloca_instrs + .into_iter() + .chain(entry_block.0.drain(..)) + .collect(); + entry_block.0 = new_instrs; +} + +fn add_alloca_load_to_block( + program: &mut Program, + block_id: BlockId, + vars_to_alloca: &mut IndexMap, + next_var_id: &mut VariableId, +) { + let block = program.get_block_mut(block_id); + let instrs = block.0.drain(..).collect::>(); + let mut var_map = FxHashMap::default(); + for mut instr in instrs { + match &mut instr { + // Track that this is a value that needs to be allocated, and clear any previous loaded variables. + Instruction::Store(operand, var) => { + vars_to_alloca.insert(var.variable_id, *var); + let new_operand = map_or_load_operand( + operand, + &mut var_map, + &mut block.0, + next_var_id, + should_load_operand(operand, vars_to_alloca), + ); + block.0.push(Instruction::Store(new_operand, *var)); + *next_var_id = next_var_id.successor(); + continue; + } + + // Replace any arguments with the new values of stored variables. + Instruction::Call(_, args, _, _) => { + *args = args + .iter() + .map(|arg| match arg { + Operand::Variable(var) => map_or_load_variable_to_operand( + *var, + &mut var_map, + &mut block.0, + next_var_id, + vars_to_alloca.contains_key(var.variable_id), + ), + Operand::Literal(_) => *arg, + }) + .collect(); + } + + // Replace the branch condition with the new value of the variable. + Instruction::Branch(var, _, _, _) => { + *var = map_or_load_variable( + *var, + &mut var_map, + &mut block.0, + next_var_id, + vars_to_alloca.contains_key(var.variable_id), + ); + } + + // Two variable instructions, replace left and right operands with new values. + Instruction::Add(lhs, rhs, _) + | Instruction::Sub(lhs, rhs, _) + | Instruction::Mul(lhs, rhs, _) + | Instruction::Sdiv(lhs, rhs, _) + | Instruction::Srem(lhs, rhs, _) + | Instruction::Shl(lhs, rhs, _) + | Instruction::Ashr(lhs, rhs, _) + | Instruction::Fadd(lhs, rhs, _) + | Instruction::Fsub(lhs, rhs, _) + | Instruction::Fmul(lhs, rhs, _) + | Instruction::Fdiv(lhs, rhs, _) + | Instruction::Fcmp(_, lhs, rhs, _) + | Instruction::Icmp(_, lhs, rhs, _) + | Instruction::LogicalAnd(lhs, rhs, _) + | Instruction::LogicalOr(lhs, rhs, _) + | Instruction::BitwiseAnd(lhs, rhs, _) + | Instruction::BitwiseOr(lhs, rhs, _) + | Instruction::BitwiseXor(lhs, rhs, _) => { + *lhs = map_or_load_operand( + lhs, + &mut var_map, + &mut block.0, + next_var_id, + should_load_operand(lhs, vars_to_alloca), + ); + *rhs = map_or_load_operand( + rhs, + &mut var_map, + &mut block.0, + next_var_id, + should_load_operand(rhs, vars_to_alloca), + ); + } + + // Single variable instructions, replace operand with new value. + Instruction::BitwiseNot(operand, _) + | Instruction::LogicalNot(operand, _) + | Instruction::Convert(operand, _) => { + *operand = map_or_load_operand( + operand, + &mut var_map, + &mut block.0, + next_var_id, + should_load_operand(operand, vars_to_alloca), + ); + } + + // Phi nodes are handled separately in the SSA transformation, but need to be passed through + // like the unconditional terminators. + Instruction::Phi(..) | Instruction::Jump(..) | Instruction::Return => {} + + Instruction::Advanced(AdvancedInstr::Alloca(..)) => { + panic!("alloca not expected in alloca insertion") + } + Instruction::Advanced(AdvancedInstr::Load(..)) => { + panic!("load not expected in alloca insertion") + } + } + block.0.push(instr); + } +} + +fn should_load_operand(operand: &Operand, vars_to_alloca: &IndexMap) -> bool { + match operand { + Operand::Literal(_) => false, + Operand::Variable(var) => vars_to_alloca.contains_key(var.variable_id), + } +} + +fn map_or_load_operand( + operand: &Operand, + var_map: &mut FxHashMap, + instrs: &mut Vec, + next_var_id: &mut VariableId, + should_load: bool, +) -> Operand { + match operand { + Operand::Literal(_) => *operand, + Operand::Variable(var) => { + map_or_load_variable_to_operand(*var, var_map, instrs, next_var_id, should_load) + } + } +} + +fn map_or_load_variable_to_operand( + variable: Variable, + var_map: &mut FxHashMap, + instrs: &mut Vec, + next_var_id: &mut VariableId, + should_load: bool, +) -> Operand { + if let Some(operand) = var_map.get(&variable.variable_id) { + *operand + } else if should_load { + let new_var = Variable { + variable_id: *next_var_id, + ty: variable.ty, + }; + instrs.push(AdvancedInstr::Load(variable, new_var).into()); + var_map.insert(variable.variable_id, Operand::Variable(new_var)); + *next_var_id = next_var_id.successor(); + Operand::Variable(new_var) + } else { + Operand::Variable(variable) + } +} + +fn map_or_load_variable( + variable: Variable, + var_map: &mut FxHashMap, + instrs: &mut Vec, + next_var_id: &mut VariableId, + should_load: bool, +) -> Variable { + match var_map.get(&variable.variable_id) { + Some(Operand::Variable(var)) => *var, + Some(Operand::Literal(_)) => panic!("literal not expected in variable mapping"), + None => { + if should_load { + let new_var = Variable { + variable_id: *next_var_id, + ty: variable.ty, + }; + instrs.push(AdvancedInstr::Load(variable, new_var).into()); + var_map.insert(variable.variable_id, Operand::Variable(new_var)); + *next_var_id = next_var_id.successor(); + new_var + } else { + variable + } + } + } +} diff --git a/source/compiler/qsc_rir/src/passes/insert_alloca_load/tests.rs b/source/compiler/qsc_rir/src/passes/insert_alloca_load/tests.rs new file mode 100644 index 0000000000..ee37eee8f9 --- /dev/null +++ b/source/compiler/qsc_rir/src/passes/insert_alloca_load/tests.rs @@ -0,0 +1,209 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +use expect_test::expect; + +use crate::rir::{ + AdvancedInstr, Block, BlockId, CallableId, Instruction, Literal, Operand, Program, Variable, + VariableId, +}; + +use super::insert_alloca_load_instrs; + +#[test] +fn inserts_alloca_and_load_for_branch_and_call() { + let stored_var = Variable::new_boolean(VariableId(0)); + let mut program = Program::with_blocks(vec![ + ( + BlockId(0), + Block(vec![ + Instruction::Store(Operand::Literal(Literal::Bool(true)), stored_var), + Instruction::Call( + CallableId(1), + vec![Operand::Variable(stored_var)], + None, + None, + ), + Instruction::Branch(stored_var, BlockId(1), BlockId(2), None), + ]), + ), + (BlockId(1), Block(vec![Instruction::Return])), + (BlockId(2), Block(vec![Instruction::Return])), + ]); + + // Before + expect![[r#" + Block: + Variable(0, Boolean) = Store Bool(true) + Call id(1), args( Variable(0, Boolean), ) + Branch Variable(0, Boolean), 1, 2"#]] + .assert_eq(&program.get_block(BlockId(0)).to_string()); + + insert_alloca_load_instrs(&mut program); + + // After + expect![[r#" + Block: + Variable(0, Boolean) = Alloca + Variable(0, Boolean) = Store Bool(true) + Variable(2, Boolean) = Load Variable(0, Boolean) + Call id(1), args( Variable(2, Boolean), ) + Branch Variable(2, Boolean), 1, 2"#]] + .assert_eq(&program.get_block(BlockId(0)).to_string()); +} + +#[test] +fn reuses_single_load_within_block() { + let stored_var = Variable::new_integer(VariableId(0)); + let sum_var = Variable::new_integer(VariableId(1)); + let mut program = Program::with_blocks(vec![( + BlockId(0), + Block(vec![ + Instruction::Store(Operand::Literal(Literal::Integer(5)), stored_var), + Instruction::Add( + Operand::Variable(stored_var), + Operand::Variable(stored_var), + sum_var, + ), + Instruction::Call( + CallableId(1), + vec![Operand::Variable(stored_var)], + None, + None, + ), + Instruction::Return, + ]), + )]); + + // Before + expect![[r#" + Block: + Variable(0, Integer) = Store Integer(5) + Variable(1, Integer) = Add Variable(0, Integer), Variable(0, Integer) + Call id(1), args( Variable(0, Integer), ) + Return"#]] + .assert_eq(&program.get_block(BlockId(0)).to_string()); + + insert_alloca_load_instrs(&mut program); + + let block = program.get_block(BlockId(0)); + let load_count = block + .0 + .iter() + .filter(|instr| matches!(instr, Instruction::Advanced(AdvancedInstr::Load(..)))) + .count(); + assert_eq!( + load_count, 1, + "expected a single load for all uses within the block" + ); + + // After + expect![[r#" + Block: + Variable(0, Integer) = Alloca + Variable(0, Integer) = Store Integer(5) + Variable(3, Integer) = Load Variable(0, Integer) + Variable(1, Integer) = Add Variable(3, Integer), Variable(3, Integer) + Call id(1), args( Variable(3, Integer), ) + Return"#]] + .assert_eq(&block.to_string()); +} + +#[test] +fn inserts_load_in_successor_block() { + let stored_var = Variable::new_boolean(VariableId(0)); + let result_var = Variable::new_boolean(VariableId(1)); + let mut program = Program::with_blocks(vec![ + ( + BlockId(0), + Block(vec![ + Instruction::Store(Operand::Literal(Literal::Bool(false)), stored_var), + Instruction::Jump(BlockId(1)), + ]), + ), + ( + BlockId(1), + Block(vec![ + Instruction::LogicalNot(Operand::Variable(stored_var), result_var), + Instruction::Return, + ]), + ), + ]); + + // Before + expect![[r#" + Block: + Variable(0, Boolean) = Store Bool(false) + Jump(1)"#]] + .assert_eq(&program.get_block(BlockId(0)).to_string()); + expect![[r#" + Block: + Variable(1, Boolean) = LogicalNot Variable(0, Boolean) + Return"#]] + .assert_eq(&program.get_block(BlockId(1)).to_string()); + + insert_alloca_load_instrs(&mut program); + + // After block 0 + expect![[r#" + Block: + Variable(0, Boolean) = Alloca + Variable(0, Boolean) = Store Bool(false) + Jump(1)"#]] + .assert_eq(&program.get_block(BlockId(0)).to_string()); + + // After block 1 + expect![[r#" + Block: + Variable(3, Boolean) = Load Variable(0, Boolean) + Variable(1, Boolean) = LogicalNot Variable(3, Boolean) + Return"#]] + .assert_eq(&program.get_block(BlockId(1)).to_string()); +} + +#[test] +fn leaves_unrelated_operands_unloaded() { + let stored_var = Variable::new_boolean(VariableId(0)); + let unrelated_var = Variable::new_boolean(VariableId(1)); + let mut program = Program::with_blocks(vec![( + BlockId(0), + Block(vec![ + Instruction::Store(Operand::Literal(Literal::Bool(true)), stored_var), + Instruction::Call( + CallableId(1), + vec![Operand::Variable(unrelated_var)], + None, + None, + ), + Instruction::Return, + ]), + )]); + + // Before + expect![[r#" + Block: + Variable(0, Boolean) = Store Bool(true) + Call id(1), args( Variable(1, Boolean), ) + Return"#]] + .assert_eq(&program.get_block(BlockId(0)).to_string()); + + insert_alloca_load_instrs(&mut program); + + let block = program.get_block(BlockId(0)); + assert!( + block + .0 + .iter() + .all(|instr| !matches!(instr, Instruction::Advanced(AdvancedInstr::Load(..)))), + "no loads should be inserted for operands unrelated to stored variables", + ); + + // After + expect![[r#" + Block: + Variable(0, Boolean) = Alloca + Variable(0, Boolean) = Store Bool(true) + Call id(1), args( Variable(1, Boolean), ) + Return"#]] + .assert_eq(&block.to_string()); +} diff --git a/source/compiler/qsc_rir/src/passes/prune_unneeded_stores.rs b/source/compiler/qsc_rir/src/passes/prune_unneeded_stores.rs new file mode 100644 index 0000000000..3977c8aa5b --- /dev/null +++ b/source/compiler/qsc_rir/src/passes/prune_unneeded_stores.rs @@ -0,0 +1,176 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +#[cfg(test)] +mod tests; + +use rustc_hash::{FxHashMap, FxHashSet}; + +use crate::{ + rir::{AdvancedInstr, CallableId, Instruction, Program, VariableId}, + utils::{get_block_successors, map_variable_use_in_block}, +}; + +pub fn prune_unneeded_stores(program: &mut Program) { + for callable_id in program.all_callable_ids() { + process_callable(program, callable_id); + } +} + +fn process_callable(program: &mut Program, callable_id: CallableId) { + let callable = program.get_callable(callable_id); + + let Some(entry_block_id) = callable.body else { + return; + }; + + // Walk all the blocks to track which variables are stored and which are used. + let mut stored_vars = FxHashSet::default(); + let mut used_vars = FxHashSet::default(); + let mut cross_block_used_vars = FxHashSet::default(); + let mut visited_blocks = FxHashSet::default(); + let mut blocks_to_visit = vec![entry_block_id]; + while let Some(block_id) = blocks_to_visit.pop() { + visited_blocks.insert(block_id); + let mut used_vars_in_block = FxHashSet::default(); + let stored_vars_before_block = stored_vars.clone(); + check_var_usage(program, block_id, &mut stored_vars, &mut used_vars_in_block); + + for var in used_vars_in_block { + if !used_vars.insert(var) || stored_vars_before_block.contains(&var) { + // This variable was already marked as used, which means it is used cross-block. + // Alternatively, the variable was stored before this block and is used here. + // Either means we shouldn't try to transform stores to this variable away. + cross_block_used_vars.insert(var); + } + } + + for successor_id in get_block_successors(program.get_block(block_id)) { + if !visited_blocks.contains(&successor_id) { + blocks_to_visit.push(successor_id); + } + } + } + + // Perform a intra-block-only version of the SSA transform to eliminate stores to variables that + // are only used within a single block. + visited_blocks.clear(); + blocks_to_visit.push(entry_block_id); + while let Some(block_id) = blocks_to_visit.pop() { + visited_blocks.insert(block_id); + let block = program.get_block_mut(block_id); + let mut last_store_map = FxHashMap::default(); + map_variable_use_in_block(block, &mut last_store_map, &cross_block_used_vars); + + for successor_id in get_block_successors(program.get_block(block_id)) { + if !visited_blocks.contains(&successor_id) { + blocks_to_visit.push(successor_id); + } + } + } + + // Now that we know which variables are used, we can remove the stores to unused variables. + // Filtered stored_vars to only those that are used, then revisit the blocks to remove stores to unused variables. + stored_vars.retain(|var| used_vars.contains(var)); + visited_blocks.clear(); + blocks_to_visit.push(entry_block_id); + while let Some(block_id) = blocks_to_visit.pop() { + visited_blocks.insert(block_id); + let block = program.get_block_mut(block_id); + block.0.retain(|instr| match instr { + Instruction::Store(_, variable) => stored_vars.contains(&variable.variable_id), + _ => true, + }); + for successor_id in get_block_successors(program.get_block(block_id)) { + if !visited_blocks.contains(&successor_id) { + blocks_to_visit.push(successor_id); + } + } + } +} + +fn check_var_usage( + program: &mut Program, + block_id: crate::rir::BlockId, + stored_vars: &mut FxHashSet, + used_vars: &mut FxHashSet, +) { + let block = program.get_block(block_id); + for instr in &block.0 { + match instr { + Instruction::Store(operand, variable) => { + if let crate::rir::Operand::Variable(var) = operand { + used_vars.insert(var.variable_id); + } + stored_vars.insert(variable.variable_id); + } + + Instruction::Call(_, operands, variable, _) => { + if let Some(var) = variable + && stored_vars.contains(&var.variable_id) + { + panic!("calls should not use stored variables for capturing return values"); + } + for operand in operands { + if let crate::rir::Operand::Variable(var) = operand { + used_vars.insert(var.variable_id); + } + } + } + Instruction::Branch(variable, _, _, _) => { + used_vars.insert(variable.variable_id); + } + Instruction::Fcmp(_, operand0, operand1, variable) + | Instruction::Icmp(_, operand0, operand1, variable) + | Instruction::Add(operand0, operand1, variable) + | Instruction::Sub(operand0, operand1, variable) + | Instruction::Mul(operand0, operand1, variable) + | Instruction::Sdiv(operand0, operand1, variable) + | Instruction::Srem(operand0, operand1, variable) + | Instruction::Shl(operand0, operand1, variable) + | Instruction::Ashr(operand0, operand1, variable) + | Instruction::Fadd(operand0, operand1, variable) + | Instruction::Fsub(operand0, operand1, variable) + | Instruction::Fmul(operand0, operand1, variable) + | Instruction::Fdiv(operand0, operand1, variable) + | Instruction::LogicalAnd(operand0, operand1, variable) + | Instruction::LogicalOr(operand0, operand1, variable) + | Instruction::BitwiseAnd(operand0, operand1, variable) + | Instruction::BitwiseOr(operand0, operand1, variable) + | Instruction::BitwiseXor(operand0, operand1, variable) => { + for op in [operand0, operand1] { + if let crate::rir::Operand::Variable(var) = op { + used_vars.insert(var.variable_id); + } + } + assert!( + !stored_vars.contains(&variable.variable_id), + "arithmetic instructions should not use stored variables for capturing return values" + ); + used_vars.insert(variable.variable_id); + } + Instruction::LogicalNot(operand, variable) + | Instruction::BitwiseNot(operand, variable) + | Instruction::Convert(operand, variable) => { + if let crate::rir::Operand::Variable(var) = operand { + used_vars.insert(var.variable_id); + } + assert!( + !stored_vars.contains(&variable.variable_id), + "not instructions should not use stored variables for capturing return values" + ); + used_vars.insert(variable.variable_id); + } + + Instruction::Advanced(AdvancedInstr::Load(..)) => { + panic!("loads should not be present during store pruning") + } + Instruction::Advanced(AdvancedInstr::Alloca(..)) => { + panic!("allocas should not be present during store pruning") + } + Instruction::Phi(..) => panic!("phis should not be present during store pruning"), + + Instruction::Return | Instruction::Jump(..) => {} + } + } +} diff --git a/source/compiler/qsc_rir/src/passes/prune_unneeded_stores/tests.rs b/source/compiler/qsc_rir/src/passes/prune_unneeded_stores/tests.rs new file mode 100644 index 0000000000..4a801230dc --- /dev/null +++ b/source/compiler/qsc_rir/src/passes/prune_unneeded_stores/tests.rs @@ -0,0 +1,198 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +use expect_test::expect; + +use crate::rir::{ + Block, BlockId, CallableId, Instruction, Literal, Operand, Program, Variable, VariableId, +}; + +use super::prune_unneeded_stores; + +#[test] +fn removes_store_without_use() { + let mut program = Program::with_blocks(vec![( + BlockId(0), + Block(vec![ + Instruction::Store( + Operand::Literal(Literal::Bool(true)), + Variable::new_boolean(VariableId(0)), + ), + Instruction::Return, + ]), + )]); + + // Before + expect![[r#" + Block: + Variable(0, Boolean) = Store Bool(true) + Return"#]] + .assert_eq(&program.get_block(BlockId(0)).to_string()); + + prune_unneeded_stores(&mut program); + + // After + expect![[r#" + Block: + Return"#]] + .assert_eq(&program.get_block(BlockId(0)).to_string()); +} + +#[test] +fn propagates_literal_within_block() { + let stored_var = Variable::new_boolean(VariableId(0)); + let mut program = Program::with_blocks(vec![( + BlockId(0), + Block(vec![ + Instruction::Store(Operand::Literal(Literal::Bool(false)), stored_var), + Instruction::LogicalNot( + Operand::Variable(stored_var), + Variable::new_boolean(VariableId(1)), + ), + Instruction::Return, + ]), + )]); + + // Before + expect![[r#" + Block: + Variable(0, Boolean) = Store Bool(false) + Variable(1, Boolean) = LogicalNot Variable(0, Boolean) + Return"#]] + .assert_eq(&program.get_block(BlockId(0)).to_string()); + + prune_unneeded_stores(&mut program); + + // After + expect![[r#" + Block: + Variable(1, Boolean) = LogicalNot Bool(false) + Return"#]] + .assert_eq(&program.get_block(BlockId(0)).to_string()); +} + +#[test] +fn keeps_store_for_cross_block_use() { + let stored_var = Variable::new_boolean(VariableId(0)); + let mut program = Program::with_blocks(vec![ + ( + BlockId(0), + Block(vec![ + Instruction::Store(Operand::Literal(Literal::Bool(true)), stored_var), + Instruction::Jump(BlockId(1)), + ]), + ), + ( + BlockId(1), + Block(vec![ + Instruction::LogicalNot( + Operand::Variable(stored_var), + Variable::new_boolean(VariableId(1)), + ), + Instruction::Return, + ]), + ), + ]); + + // Before + expect![[r#" + Block: + Variable(0, Boolean) = Store Bool(true) + Jump(1)"#]] + .assert_eq(&program.get_block(BlockId(0)).to_string()); + expect![[r#" + Block: + Variable(1, Boolean) = LogicalNot Variable(0, Boolean) + Return"#]] + .assert_eq(&program.get_block(BlockId(1)).to_string()); + + prune_unneeded_stores(&mut program); + + // After + expect![[r#" + Block: + Variable(0, Boolean) = Store Bool(true) + Jump(1)"#]] + .assert_eq(&program.get_block(BlockId(0)).to_string()); + + expect![[r#" + Block: + Variable(1, Boolean) = LogicalNot Variable(0, Boolean) + Return"#]] + .assert_eq(&program.get_block(BlockId(1)).to_string()); +} + +#[test] +fn removes_overwritten_store_and_keeps_last_value() { + let stored_var = Variable::new_boolean(VariableId(0)); + let mut program = Program::with_blocks(vec![( + BlockId(0), + Block(vec![ + Instruction::Store(Operand::Literal(Literal::Bool(true)), stored_var), + Instruction::Store(Operand::Literal(Literal::Bool(false)), stored_var), + Instruction::Call( + CallableId(1), + vec![Operand::Variable(stored_var)], + None, + None, + ), + Instruction::Return, + ]), + )]); + + // Before + expect![[r#" + Block: + Variable(0, Boolean) = Store Bool(true) + Variable(0, Boolean) = Store Bool(false) + Call id(1), args( Variable(0, Boolean), ) + Return"#]] + .assert_eq(&program.get_block(BlockId(0)).to_string()); + + prune_unneeded_stores(&mut program); + + // After + expect![[r#" + Block: + Call id(1), args( Bool(false), ) + Return"#]] + .assert_eq(&program.get_block(BlockId(0)).to_string()); +} + +#[test] +fn propagates_chained_stores() { + let source_var = Variable::new_boolean(VariableId(0)); + let alias_var = Variable::new_boolean(VariableId(1)); + let mut program = Program::with_blocks(vec![( + BlockId(0), + Block(vec![ + Instruction::Store(Operand::Literal(Literal::Bool(true)), source_var), + Instruction::Store(Operand::Variable(source_var), alias_var), + Instruction::Call( + CallableId(1), + vec![Operand::Variable(alias_var)], + None, + None, + ), + Instruction::Return, + ]), + )]); + + // Before + expect![[r#" + Block: + Variable(0, Boolean) = Store Bool(true) + Variable(1, Boolean) = Store Variable(0, Boolean) + Call id(1), args( Variable(1, Boolean), ) + Return"#]] + .assert_eq(&program.get_block(BlockId(0)).to_string()); + + prune_unneeded_stores(&mut program); + + // After + expect![[r#" + Block: + Call id(1), args( Bool(true), ) + Return"#]] + .assert_eq(&program.get_block(BlockId(0)).to_string()); +} diff --git a/source/compiler/qsc_rir/src/passes/ssa_check.rs b/source/compiler/qsc_rir/src/passes/ssa_check.rs index f94557a313..477334527c 100644 --- a/source/compiler/qsc_rir/src/passes/ssa_check.rs +++ b/source/compiler/qsc_rir/src/passes/ssa_check.rs @@ -226,6 +226,10 @@ fn get_variable_uses(program: &Program) -> IndexMap { panic!("Unexpected Store at {block_id:?}, instruction {idx}") } + + Instruction::Advanced(..) => { + panic!("Unexpected Advanced instruction at {block_id:?}, instruction {idx}") + } } } } diff --git a/source/compiler/qsc_rir/src/passes/ssa_transform.rs b/source/compiler/qsc_rir/src/passes/ssa_transform.rs index 9992e70122..829b8eedc1 100644 --- a/source/compiler/qsc_rir/src/passes/ssa_transform.rs +++ b/source/compiler/qsc_rir/src/passes/ssa_transform.rs @@ -5,11 +5,11 @@ mod tests; use crate::{ - rir::{Block, BlockId, Instruction, Operand, Program, Variable, VariableId}, - utils::get_variable_assignments, + rir::{BlockId, Instruction, Operand, Program, Variable, VariableId}, + utils::{get_variable_assignments, map_variable_use_in_block}, }; use qsc_data_structures::index_map::IndexMap; -use rustc_hash::FxHashMap; +use rustc_hash::{FxHashMap, FxHashSet}; /// Transforms the program into Single Static Assignment (SSA) form by inserting phi nodes /// at the beginning of blocks where necessary, allowing the removal of store instructions. @@ -116,7 +116,7 @@ pub fn transform_to_ssa(program: &mut Program, preds: &IndexMap FxHashMap::default(), }; - map_variable_use_in_block(block, &mut var_map); + map_variable_use_in_block(block, &mut var_map, &FxHashSet::default()); block_var_map.insert(block_id, var_map); } block_var_map } - -// Propagates stored variables through a block, tracking the latest stored value and replacing -// usage of the variable with the stored value. -fn map_variable_use_in_block(block: &mut Block, var_map: &mut FxHashMap) { - let instrs = block.0.drain(..).collect::>(); - - for mut instr in instrs { - match &mut instr { - // Track the new value of the variable and omit the store instruction. - Instruction::Store(operand, var) => { - // Note this uses the mapped operand to make sure this variable points to whatever root literal or variable - // this operand corresponds to at this point in the block. This makes the new variable respect a point-in-time - // copy of the operand. - var_map.insert(var.variable_id, operand.mapped(var_map)); - continue; - } - - // Replace any arguments with the new values of stored variables. - Instruction::Call(_, args, _, _) => { - *args = args - .iter() - .map(|arg| match arg { - Operand::Variable(var) => { - // If the variable is not in the map, it is not something whose value has been updated via store in this block, - // so just fallback to use the `arg` value directly. - // `map_to_operand` does this automatically by returning `self`` when the variable is not in the map. - var.map_to_operand(var_map) - } - Operand::Literal(_) => *arg, - }) - .collect(); - } - - // Replace the branch condition with the new value of the variable. - Instruction::Branch(var, _, _, _) => { - *var = var.map_to_variable(var_map); - } - - // Two variable instructions, replace left and right operands with new values. - Instruction::Add(lhs, rhs, _) - | Instruction::Sub(lhs, rhs, _) - | Instruction::Mul(lhs, rhs, _) - | Instruction::Sdiv(lhs, rhs, _) - | Instruction::Srem(lhs, rhs, _) - | Instruction::Shl(lhs, rhs, _) - | Instruction::Ashr(lhs, rhs, _) - | Instruction::Fadd(lhs, rhs, _) - | Instruction::Fsub(lhs, rhs, _) - | Instruction::Fmul(lhs, rhs, _) - | Instruction::Fdiv(lhs, rhs, _) - | Instruction::Fcmp(_, lhs, rhs, _) - | Instruction::Icmp(_, lhs, rhs, _) - | Instruction::LogicalAnd(lhs, rhs, _) - | Instruction::LogicalOr(lhs, rhs, _) - | Instruction::BitwiseAnd(lhs, rhs, _) - | Instruction::BitwiseOr(lhs, rhs, _) - | Instruction::BitwiseXor(lhs, rhs, _) => { - *lhs = lhs.mapped(var_map); - *rhs = rhs.mapped(var_map); - } - - // Single variable instructions, replace operand with new value. - Instruction::BitwiseNot(operand, _) | Instruction::LogicalNot(operand, _) => { - *operand = operand.mapped(var_map); - } - - Instruction::Convert(operand, var) => { - *operand = operand.mapped(var_map); - *var = var.map_to_variable(var_map); - } - - // Phi nodes are handled separately in the SSA transformation, but need to be passed through - // like the unconditional terminators. - Instruction::Phi(..) | Instruction::Jump(..) | Instruction::Return => {} - } - block.0.push(instr); - } -} - -impl Operand { - fn mapped(&self, var_map: &FxHashMap) -> Operand { - match self { - Operand::Literal(_) => *self, - Operand::Variable(var) => var.map_to_operand(var_map), - } - } -} - -impl Variable { - fn map_to_operand(self, var_map: &FxHashMap) -> Operand { - let mut var = self; - while let Some(operand) = var_map.get(&var.variable_id) { - if let Operand::Variable(new_var) = operand { - var = *new_var; - } else { - return *operand; - } - } - Operand::Variable(var) - } - - fn map_to_variable(self, var_map: &FxHashMap) -> Variable { - let mut var = self; - while let Some(operand) = var_map.get(&var.variable_id) { - let Operand::Variable(new_var) = operand else { - panic!("literal not supported in this context"); - }; - var = *new_var; - } - var - } -} diff --git a/source/compiler/qsc_rir/src/passes/ssa_transform/tests.rs b/source/compiler/qsc_rir/src/passes/ssa_transform/tests.rs index ef6e1cf2b0..160b9ee862 100644 --- a/source/compiler/qsc_rir/src/passes/ssa_transform/tests.rs +++ b/source/compiler/qsc_rir/src/passes/ssa_transform/tests.rs @@ -4,7 +4,7 @@ #![allow(clippy::too_many_lines, clippy::needless_raw_string_hashes)] use expect_test::expect; -use qsc_data_structures::target::TargetCapabilityFlags; +use qsc_data_structures::target::Profile; use crate::{ builder::{bell_program, new_program, teleport_program}, @@ -15,14 +15,14 @@ use crate::{ }, }; fn transform_program(program: &mut Program) { - program.config.capabilities = TargetCapabilityFlags::all(); + program.config.capabilities = Profile::AdaptiveRIF.into(); check_and_transform(program); } #[test] fn ssa_transform_leaves_program_without_store_instruction_unchanged() { let mut program = bell_program(); - program.config.capabilities = TargetCapabilityFlags::all(); + program.config.capabilities = Profile::AdaptiveRIF.into(); let program_string_orignal = program.to_string(); transform_program(&mut program); @@ -32,7 +32,7 @@ fn ssa_transform_leaves_program_without_store_instruction_unchanged() { #[test] fn ssa_transform_leaves_branching_program_without_store_instruction_unchanged() { let mut program = teleport_program(); - program.config.capabilities = TargetCapabilityFlags::all(); + program.config.capabilities = Profile::AdaptiveRIF.into(); let program_string_orignal = program.to_string(); transform_program(&mut program); @@ -144,7 +144,7 @@ fn ssa_transform_removes_store_in_single_block_program() { Variable(2, Boolean) = LogicalNot Variable(0, Boolean) Return config: Config: - capabilities: TargetCapabilityFlags(Adaptive | IntegerComputations | FloatingPointComputations | BackwardsBranching | HigherLevelConstructs) + capabilities: TargetCapabilityFlags(Adaptive | IntegerComputations | FloatingPointComputations) num_qubits: 0 num_results: 0 tags: @@ -303,7 +303,7 @@ fn ssa_transform_removes_multiple_stores_in_single_block_program() { Variable(4, Boolean) = LogicalNot Variable(3, Boolean) Return config: Config: - capabilities: TargetCapabilityFlags(Adaptive | IntegerComputations | FloatingPointComputations | BackwardsBranching | HigherLevelConstructs) + capabilities: TargetCapabilityFlags(Adaptive | IntegerComputations | FloatingPointComputations) num_qubits: 0 num_results: 0 tags: @@ -478,7 +478,7 @@ fn ssa_transform_store_dominating_usage_propagates_to_successor_blocks() { Variable(4, Boolean) = LogicalNot Variable(0, Boolean) Return config: Config: - capabilities: TargetCapabilityFlags(Adaptive | IntegerComputations | FloatingPointComputations | BackwardsBranching | HigherLevelConstructs) + capabilities: TargetCapabilityFlags(Adaptive | IntegerComputations | FloatingPointComputations) num_qubits: 0 num_results: 0 tags: @@ -624,7 +624,7 @@ fn ssa_transform_store_dominating_usage_propagates_to_successor_blocks_without_i Variable(4, Boolean) = LogicalNot Variable(0, Boolean) Return config: Config: - capabilities: TargetCapabilityFlags(Adaptive | IntegerComputations | FloatingPointComputations | BackwardsBranching | HigherLevelConstructs) + capabilities: TargetCapabilityFlags(Adaptive | IntegerComputations | FloatingPointComputations) num_qubits: 0 num_results: 0 tags: @@ -822,7 +822,7 @@ fn ssa_transform_inserts_phi_for_store_not_dominating_usage() { Variable(4, Boolean) = LogicalNot Variable(5, Boolean) Return config: Config: - capabilities: TargetCapabilityFlags(Adaptive | IntegerComputations | FloatingPointComputations | BackwardsBranching | HigherLevelConstructs) + capabilities: TargetCapabilityFlags(Adaptive | IntegerComputations | FloatingPointComputations) num_qubits: 0 num_results: 0 tags: @@ -993,7 +993,7 @@ fn ssa_transform_inserts_phi_for_store_not_dominating_usage_in_one_branch() { Variable(4, Boolean) = LogicalNot Variable(5, Boolean) Return config: Config: - capabilities: TargetCapabilityFlags(Adaptive | IntegerComputations | FloatingPointComputations | BackwardsBranching | HigherLevelConstructs) + capabilities: TargetCapabilityFlags(Adaptive | IntegerComputations | FloatingPointComputations) num_qubits: 0 num_results: 0 tags: @@ -1260,7 +1260,7 @@ fn ssa_transform_inserts_phi_for_node_with_many_predecessors() { Variable(5, Boolean) = LogicalNot Variable(6, Boolean) Return config: Config: - capabilities: TargetCapabilityFlags(Adaptive | IntegerComputations | FloatingPointComputations | BackwardsBranching | HigherLevelConstructs) + capabilities: TargetCapabilityFlags(Adaptive | IntegerComputations | FloatingPointComputations) num_qubits: 0 num_results: 0 tags: @@ -1481,7 +1481,7 @@ fn ssa_transform_inserts_phi_for_multiple_stored_values() { Variable(6, Boolean) = LogicalNot Variable(8, Boolean) Return config: Config: - capabilities: TargetCapabilityFlags(Adaptive | IntegerComputations | FloatingPointComputations | BackwardsBranching | HigherLevelConstructs) + capabilities: TargetCapabilityFlags(Adaptive | IntegerComputations | FloatingPointComputations) num_qubits: 0 num_results: 0 tags: @@ -1817,7 +1817,7 @@ fn ssa_transform_inserts_phi_nodes_in_successive_blocks_for_chained_branches() { Variable(8, Boolean) = LogicalNot Variable(10, Boolean) Return config: Config: - capabilities: TargetCapabilityFlags(Adaptive | IntegerComputations | FloatingPointComputations | BackwardsBranching | HigherLevelConstructs) + capabilities: TargetCapabilityFlags(Adaptive | IntegerComputations | FloatingPointComputations) num_qubits: 0 num_results: 0 tags: @@ -2111,7 +2111,7 @@ fn ssa_transform_inerts_phi_nodes_for_early_return_graph_pattern() { Variable(4, Boolean) = LogicalNot Variable(9, Boolean) Return config: Config: - capabilities: TargetCapabilityFlags(Adaptive | IntegerComputations | FloatingPointComputations | BackwardsBranching | HigherLevelConstructs) + capabilities: TargetCapabilityFlags(Adaptive | IntegerComputations | FloatingPointComputations) num_qubits: 0 num_results: 0 tags: @@ -2285,7 +2285,7 @@ fn ssa_transform_propagates_updates_from_multiple_predecessors_to_later_single_s Block 4: Block: Return config: Config: - capabilities: TargetCapabilityFlags(Adaptive | IntegerComputations | FloatingPointComputations | BackwardsBranching | HigherLevelConstructs) + capabilities: TargetCapabilityFlags(Adaptive | IntegerComputations | FloatingPointComputations) num_qubits: 0 num_results: 0 tags: @@ -2408,7 +2408,7 @@ fn ssa_transform_maps_store_instrs_that_use_values_from_other_store_instrs() { Variable(3, Boolean) = LogicalNot Variable(0, Boolean) Return config: Config: - capabilities: TargetCapabilityFlags(Adaptive | IntegerComputations | FloatingPointComputations | BackwardsBranching | HigherLevelConstructs) + capabilities: TargetCapabilityFlags(Adaptive | IntegerComputations | FloatingPointComputations) num_qubits: 0 num_results: 0 tags: @@ -2568,7 +2568,7 @@ fn ssa_transform_maps_store_with_variable_from_store_in_conditional_to_phi_node( Variable(3, Boolean) = LogicalNot Variable(4, Boolean) Return config: Config: - capabilities: TargetCapabilityFlags(Adaptive | IntegerComputations | FloatingPointComputations | BackwardsBranching | HigherLevelConstructs) + capabilities: TargetCapabilityFlags(Adaptive | IntegerComputations | FloatingPointComputations) num_qubits: 0 num_results: 0 tags: @@ -2726,7 +2726,7 @@ fn ssa_transform_allows_point_in_time_copy_of_dynamic_variable() { Variable(5, Boolean) = LogicalNot Variable(3, Boolean) Return config: Config: - capabilities: TargetCapabilityFlags(Adaptive | IntegerComputations | FloatingPointComputations | BackwardsBranching | HigherLevelConstructs) + capabilities: TargetCapabilityFlags(Adaptive | IntegerComputations | FloatingPointComputations) num_qubits: 0 num_results: 0 tags: @@ -2952,7 +2952,7 @@ fn ssa_transform_propagates_phi_var_to_successor_blocks_across_sequential_branch Call id(2), args( Variable(4, Boolean), ) Return config: Config: - capabilities: TargetCapabilityFlags(Adaptive | IntegerComputations | FloatingPointComputations | BackwardsBranching | HigherLevelConstructs) + capabilities: TargetCapabilityFlags(Adaptive | IntegerComputations | FloatingPointComputations) num_qubits: 0 num_results: 0 tags: diff --git a/source/compiler/qsc_rir/src/passes/type_check.rs b/source/compiler/qsc_rir/src/passes/type_check.rs index ed7c61cf35..af77d19e2b 100644 --- a/source/compiler/qsc_rir/src/passes/type_check.rs +++ b/source/compiler/qsc_rir/src/passes/type_check.rs @@ -1,7 +1,7 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -use crate::rir::{Callable, Instruction, Operand, Program, Ty, Variable}; +use crate::rir::{AdvancedInstr, Callable, Instruction, Operand, Program, Ty, Variable}; #[cfg(test)] mod tests; @@ -59,7 +59,10 @@ fn check_instr_types(program: &Program, instr: &Instruction) { } } - Instruction::Convert(_, _) | Instruction::Jump(_) | Instruction::Return => {} + Instruction::Convert(_, _) + | Instruction::Jump(_) + | Instruction::Advanced(AdvancedInstr::Alloca(..) | AdvancedInstr::Load(..)) + | Instruction::Return => {} } } diff --git a/source/compiler/qsc_rir/src/rir.rs b/source/compiler/qsc_rir/src/rir.rs index d9a4320f6c..68e10b1e11 100644 --- a/source/compiler/qsc_rir/src/rir.rs +++ b/source/compiler/qsc_rir/src/rir.rs @@ -59,6 +59,40 @@ impl Program { Self::default() } + #[must_use] + pub fn with_blocks(blocks: Vec<(BlockId, Block)>) -> Self { + let mut program = Self::new(); + let mut entry_block_id = None; + for (id, block) in blocks { + if entry_block_id.is_none() { + entry_block_id = Some(id); + } + program.blocks.insert(id, block); + } + + if let Some(entry_block_id) = entry_block_id { + const ENTRY: CallableId = CallableId(0); + program.entry = ENTRY; + program.callables.insert( + ENTRY, + Callable { + name: "entry".into(), + input_type: vec![], + output_type: None, + body: Some(entry_block_id), + call_type: CallableType::Regular, + }, + ); + } + + program + } + + #[must_use] + pub fn all_callable_ids(&self) -> Vec { + self.callables.iter().map(|(id, _)| id).collect() + } + #[must_use] pub fn get_callable(&self, id: CallableId) -> &Callable { self.callables.get(id).expect("callable should be present") @@ -347,6 +381,7 @@ pub enum Instruction { BitwiseXor(Operand, Operand, Variable), Phi(Vec<(Operand, BlockId)>, Variable), Convert(Operand, Variable), + Advanced(AdvancedInstr), Return, } @@ -361,106 +396,7 @@ impl Instruction { } impl Display for Instruction { - #[allow(clippy::too_many_lines)] fn fmt(&self, f: &mut Formatter) -> fmt::Result { - fn write_binary_instruction( - f: &mut Formatter, - instruction: &str, - lhs: &Operand, - rhs: &Operand, - variable: Variable, - ) -> fmt::Result { - let mut indent = set_indentation(indented(f), 0); - write!(indent, "{variable} = {instruction} {lhs}, {rhs}")?; - Ok(()) - } - - fn write_branch( - f: &mut Formatter, - condition: Variable, - if_true: BlockId, - if_false: BlockId, - metadata: Option<&InstructionDbgMetadata>, - ) -> fmt::Result { - let mut indent = set_indentation(indented(f), 0); - write!(indent, "Branch {condition}, {}, {}", if_true.0, if_false.0)?; - if let Some(metadata) = metadata { - write!(f, " {metadata}")?; - } - Ok(()) - } - - fn write_call( - f: &mut Formatter, - callable_id: CallableId, - args: &[Operand], - variable: Option, - metadata: Option<&InstructionDbgMetadata>, - ) -> fmt::Result { - let mut indent = set_indentation(indented(f), 0); - if let Some(variable) = variable { - write!(indent, "{variable} = ")?; - } - write!(indent, "Call id({}), args( ", callable_id.0)?; - for arg in args { - write!(indent, "{arg}, ")?; - } - write!(indent, ")")?; - if let Some(metadata) = metadata { - write!(f, " {metadata}")?; - } - Ok(()) - } - - fn write_unary_instruction( - f: &mut Formatter, - instruction: &str, - value: &Operand, - variable: Variable, - ) -> fmt::Result { - let mut indent = set_indentation(indented(f), 0); - write!(indent, "{variable} = {instruction} {value}")?; - Ok(()) - } - - fn write_fcmp_instruction( - f: &mut Formatter, - condition: FcmpConditionCode, - lhs: &Operand, - rhs: &Operand, - variable: Variable, - ) -> fmt::Result { - let mut indent = set_indentation(indented(f), 0); - write!(indent, "{variable} = Fcmp {condition}, {lhs}, {rhs}")?; - Ok(()) - } - - fn write_icmp_instruction( - f: &mut Formatter, - condition: ConditionCode, - lhs: &Operand, - rhs: &Operand, - variable: Variable, - ) -> fmt::Result { - let mut indent = set_indentation(indented(f), 0); - write!(indent, "{variable} = Icmp {condition}, {lhs}, {rhs}")?; - Ok(()) - } - - fn write_phi_instruction( - f: &mut Formatter, - args: &[(Operand, BlockId)], - variable: Variable, - ) -> fmt::Result { - let mut indent = set_indentation(indented(f), 0); - write!(indent, "{variable} = Phi ( ")?; - for (val, block_id) in args { - write!(indent, "[{val}, {}], ", block_id.0)?; - } - write!(indent, ")")?; - Ok(()) - } - match &self { Self::Store(value, variable) => write_unary_instruction(f, "Store", value, *variable)?, Self::Jump(block_id) => write!(f, "Jump({})", block_id.0)?, @@ -537,12 +473,53 @@ impl Display for Instruction { let mut indent = set_indentation(indented(f), 0); write!(indent, "{variable} = Convert {operand}")?; } + Self::Advanced(instr) => { + write!(f, "{instr}")?; + } Self::Return => write!(f, "Return")?, } Ok(()) } } +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub enum AdvancedInstr { + Load(Variable, Variable), + Alloca(Variable), +} + +impl From for Instruction { + fn from(instr: AdvancedInstr) -> Self { + Self::Advanced(instr) + } +} + +impl TryFrom for AdvancedInstr { + type Error = (); + + fn try_from(instr: Instruction) -> Result { + match instr { + Instruction::Advanced(adv_instr) => Ok(adv_instr), + _ => Err(()), + } + } +} + +impl Display for AdvancedInstr { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + match &self { + Self::Load(lhs, rhs) => { + write_unary_instruction(f, "Load", &Operand::Variable(*lhs), *rhs)?; + } + Self::Alloca(variable) => { + let mut indent = set_indentation(indented(f), 0); + write!(indent, "{variable} = Alloca")?; + } + } + Ok(()) + } +} + #[derive(Debug, Clone, Copy, Default, PartialEq, PartialOrd, Eq, Ord, Hash)] pub struct VariableId(pub u32); @@ -603,6 +580,14 @@ impl Variable { ty: Ty::Double, } } + + #[must_use] + pub fn new_ptr(id: VariableId) -> Self { + Self { + variable_id: id, + ty: Ty::Pointer, + } + } } #[derive(Clone, Copy, Debug, PartialEq, Eq)] @@ -752,3 +737,101 @@ fn set_indentation<'a, 'b>( _ => unimplemented!("indentation level not supported"), } } + +fn write_binary_instruction( + f: &mut Formatter, + instruction: &str, + lhs: &Operand, + rhs: &Operand, + variable: Variable, +) -> fmt::Result { + let mut indent = set_indentation(indented(f), 0); + write!(indent, "{variable} = {instruction} {lhs}, {rhs}")?; + Ok(()) +} + +fn write_branch( + f: &mut Formatter, + condition: Variable, + if_true: BlockId, + if_false: BlockId, + metadata: Option<&InstructionDbgMetadata>, +) -> fmt::Result { + let mut indent = set_indentation(indented(f), 0); + write!(indent, "Branch {condition}, {}, {}", if_true.0, if_false.0)?; + if let Some(metadata) = metadata { + write!(f, " {metadata}")?; + } + Ok(()) +} + +fn write_call( + f: &mut Formatter, + callable_id: CallableId, + args: &[Operand], + variable: Option, + metadata: Option<&InstructionDbgMetadata>, +) -> fmt::Result { + let mut indent = set_indentation(indented(f), 0); + if let Some(variable) = variable { + write!(indent, "{variable} = ")?; + } + write!(indent, "Call id({}), args( ", callable_id.0)?; + for arg in args { + write!(indent, "{arg}, ")?; + } + write!(indent, ")")?; + if let Some(metadata) = metadata { + write!(f, " {metadata}")?; + } + Ok(()) +} + +fn write_unary_instruction( + f: &mut Formatter, + instruction: &str, + value: &Operand, + variable: Variable, +) -> fmt::Result { + let mut indent = set_indentation(indented(f), 0); + write!(indent, "{variable} = {instruction} {value}")?; + Ok(()) +} + +fn write_fcmp_instruction( + f: &mut Formatter, + condition: FcmpConditionCode, + lhs: &Operand, + rhs: &Operand, + variable: Variable, +) -> fmt::Result { + let mut indent = set_indentation(indented(f), 0); + write!(indent, "{variable} = Fcmp {condition}, {lhs}, {rhs}")?; + Ok(()) +} + +fn write_icmp_instruction( + f: &mut Formatter, + condition: ConditionCode, + lhs: &Operand, + rhs: &Operand, + variable: Variable, +) -> fmt::Result { + let mut indent = set_indentation(indented(f), 0); + write!(indent, "{variable} = Icmp {condition}, {lhs}, {rhs}")?; + Ok(()) +} + +fn write_phi_instruction( + f: &mut Formatter, + args: &[(Operand, BlockId)], + variable: Variable, +) -> fmt::Result { + let mut indent = set_indentation(indented(f), 0); + write!(indent, "{variable} = Phi ( ")?; + for (val, block_id) in args { + write!(indent, "[{val}, {}], ", block_id.0)?; + } + write!(indent, ")")?; + Ok(()) +} diff --git a/source/compiler/qsc_rir/src/utils.rs b/source/compiler/qsc_rir/src/utils.rs index 4a4f6aecbb..db16859449 100644 --- a/source/compiler/qsc_rir/src/utils.rs +++ b/source/compiler/qsc_rir/src/utils.rs @@ -1,9 +1,11 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -use crate::rir::{Block, BlockId, Instruction, Program, VariableId}; +use crate::rir::{ + AdvancedInstr, Block, BlockId, Instruction, Operand, Program, Variable, VariableId, +}; use qsc_data_structures::index_map::IndexMap; -use rustc_hash::FxHashSet; +use rustc_hash::{FxHashMap, FxHashSet}; /// Given a block, return the block IDs of its successors. #[must_use] @@ -108,7 +110,9 @@ pub fn get_variable_assignments(program: &Program) -> IndexMap { + Instruction::Store(_, var) + | Instruction::Advanced(AdvancedInstr::Alloca(var) | AdvancedInstr::Load(_, var)) => + { has_store = true; assignments.insert(var.variable_id, (block_id, idx)); } @@ -126,3 +130,134 @@ pub fn get_variable_assignments(program: &Program) -> IndexMap, + var_stor_to_keep: &FxHashSet, +) { + let instrs = block.0.drain(..).collect::>(); + + for mut instr in instrs { + match &mut instr { + // Track the new value of the variable and omit the store instruction. + Instruction::Store(operand, var) => { + if var_stor_to_keep.contains(&var.variable_id) { + // Only keep stores to variables that are in the set to keep. + *operand = operand.mapped(var_map); + } else { + // Note this uses the mapped operand to make sure this variable points to whatever root literal or variable + // this operand corresponds to at this point in the block. This makes the new variable respect a point-in-time + // copy of the operand. + var_map.insert(var.variable_id, operand.mapped(var_map)); + continue; + } + } + + // Replace any arguments with the new values of stored variables. + Instruction::Call(_, args, _, _) => { + *args = args + .iter() + .map(|arg| match arg { + Operand::Variable(var) => { + // If the variable is not in the map, it is not something whose value has been updated via store in this block, + // so just fallback to use the `arg` value directly. + // `map_to_operand` does this automatically by returning `self`` when the variable is not in the map. + var.map_to_operand(var_map) + } + Operand::Literal(_) => *arg, + }) + .collect(); + } + + // Replace the branch condition with the new value of the variable. + Instruction::Branch(var, _, _, _) => { + *var = var.map_to_variable(var_map); + } + + Instruction::Convert(operand, var) => { + *operand = operand.mapped(var_map); + *var = var.map_to_variable(var_map); + } + + // Two variable instructions, replace left and right operands with new values. + Instruction::Add(lhs, rhs, _) + | Instruction::Sub(lhs, rhs, _) + | Instruction::Mul(lhs, rhs, _) + | Instruction::Sdiv(lhs, rhs, _) + | Instruction::Srem(lhs, rhs, _) + | Instruction::Shl(lhs, rhs, _) + | Instruction::Ashr(lhs, rhs, _) + | Instruction::Fadd(lhs, rhs, _) + | Instruction::Fsub(lhs, rhs, _) + | Instruction::Fmul(lhs, rhs, _) + | Instruction::Fdiv(lhs, rhs, _) + | Instruction::Fcmp(_, lhs, rhs, _) + | Instruction::Icmp(_, lhs, rhs, _) + | Instruction::LogicalAnd(lhs, rhs, _) + | Instruction::LogicalOr(lhs, rhs, _) + | Instruction::BitwiseAnd(lhs, rhs, _) + | Instruction::BitwiseOr(lhs, rhs, _) + | Instruction::BitwiseXor(lhs, rhs, _) => { + *lhs = lhs.mapped(var_map); + *rhs = rhs.mapped(var_map); + } + + // Single variable instructions, replace operand with new value. + Instruction::BitwiseNot(operand, _) | Instruction::LogicalNot(operand, _) => { + *operand = operand.mapped(var_map); + } + + // Phi nodes are handled separately in the SSA transformation, but need to be passed through + // like the unconditional terminators. + Instruction::Phi(..) | Instruction::Jump(..) | Instruction::Return => {} + + Instruction::Advanced(AdvancedInstr::Alloca(..)) => { + panic!("alloca not supported in ssa transformation") + } + Instruction::Advanced(AdvancedInstr::Load(..)) => { + panic!("load not supported in ssa transformation") + } + } + block.0.push(instr); + } +} + +impl Operand { + #[must_use] + pub fn mapped(&self, var_map: &FxHashMap) -> Operand { + match self { + Operand::Literal(_) => *self, + Operand::Variable(var) => var.map_to_operand(var_map), + } + } +} + +impl Variable { + #[must_use] + pub fn map_to_operand(self, var_map: &FxHashMap) -> Operand { + let mut var = self; + while let Some(operand) = var_map.get(&var.variable_id) { + if let Operand::Variable(new_var) = operand { + var = *new_var; + } else { + return *operand; + } + } + Operand::Variable(var) + } + + #[must_use] + pub fn map_to_variable(self, var_map: &FxHashMap) -> Variable { + let mut var = self; + while let Some(operand) = var_map.get(&var.variable_id) { + let Operand::Variable(new_var) = operand else { + panic!("literal not supported in this context"); + }; + var = *new_var; + } + var + } +} diff --git a/source/pip/qsharp/_native.pyi b/source/pip/qsharp/_native.pyi index 6dcb6079df..e89e6bce23 100644 --- a/source/pip/qsharp/_native.pyi +++ b/source/pip/qsharp/_native.pyi @@ -107,6 +107,13 @@ class TargetProfile(Enum): extension defined by the QIR specification. """ + Adaptive_RIFLA: TargetProfile + """ + Target supports the Adaptive profile with integer & floating-point + computation extensions as well as loop extension and statically-sized + arrays extension. + """ + Unrestricted: TargetProfile """ Describes the unrestricted set of capabilities required to run any Q# program. diff --git a/source/pip/src/interpreter.rs b/source/pip/src/interpreter.rs index 5a3c5db75c..38b8fc031b 100644 --- a/source/pip/src/interpreter.rs +++ b/source/pip/src/interpreter.rs @@ -172,6 +172,10 @@ pub(crate) enum TargetProfile { /// capabilities, as well as the optional floating-point computation /// extension defined by the QIR specification. Adaptive_RIF, + /// Target supports the Adaptive profile with integer & floating-point + /// computation extensions as well as loop extension and statically-sized + /// arrays extension. + Adaptive_RIFLA, /// Target supports the full set of capabilities required to run any Q# program. /// /// This option maps to the Full Profile as defined by the QIR specification. @@ -200,7 +204,8 @@ impl TargetProfile { 0 => Self::Base, 1 => Self::Adaptive_RI, 2 => Self::Adaptive_RIF, - 3 => Self::Unrestricted, + 3 => Self::Adaptive_RIFLA, + 4 => Self::Unrestricted, _ => return Err(PyValueError::new_err("invalid state")), }; Ok(()) @@ -229,6 +234,7 @@ impl From for TargetProfile { Profile::Base => TargetProfile::Base, Profile::AdaptiveRI => TargetProfile::Adaptive_RI, Profile::AdaptiveRIF => TargetProfile::Adaptive_RIF, + Profile::AdaptiveRIFLA => TargetProfile::Adaptive_RIFLA, Profile::Unrestricted => TargetProfile::Unrestricted, } } @@ -240,6 +246,7 @@ impl From for Profile { TargetProfile::Base => Profile::Base, TargetProfile::Adaptive_RI => Profile::AdaptiveRI, TargetProfile::Adaptive_RIF => Profile::AdaptiveRIF, + TargetProfile::Adaptive_RIFLA => Profile::AdaptiveRIFLA, TargetProfile::Unrestricted => Profile::Unrestricted, } } diff --git a/source/pip/tests-integration/resources/adaptive_rifla/input/ArithmeticOps.qs b/source/pip/tests-integration/resources/adaptive_rifla/input/ArithmeticOps.qs new file mode 100644 index 0000000000..4dff49b1c0 --- /dev/null +++ b/source/pip/tests-integration/resources/adaptive_rifla/input/ArithmeticOps.qs @@ -0,0 +1,34 @@ +namespace Test { + + import Std.Intrinsic.*; + import Std.Measurement.*; + + // Demonstrates use of arithmetic operations on integers at runtime. + // Expected output: (5, 25, 0, 243) + @EntryPoint() + operation Main() : (Int, Int, Int, Int) { + mutable count = 0; + mutable countPos = 0; + mutable countNeg = 10; + mutable countMul = 1; + use qs = Qubit[5]; + for q in qs { + X(q); + } + for r in MeasureEachZ(qs) { + if r == One { + // Note that addition of a 1 will get optimized into a zext on the bool. + set count += 1; + + set countPos += 5; + + // Note that subtraction of 2 turns into add of -2... problem for providers without negative numbers? + set countNeg -= 2; + + set countMul *= 3; + } + } + ResetAll(qs); + return (count, countPos, countNeg, countMul); + } +} diff --git a/source/pip/tests-integration/resources/adaptive_rifla/input/BernsteinVaziraniNISQ.qs b/source/pip/tests-integration/resources/adaptive_rifla/input/BernsteinVaziraniNISQ.qs new file mode 100644 index 0000000000..361635f5e0 --- /dev/null +++ b/source/pip/tests-integration/resources/adaptive_rifla/input/BernsteinVaziraniNISQ.qs @@ -0,0 +1,153 @@ +/// # Sample +/// Bernstein-Vazirani algorithm +/// +/// # Description +/// The Bernstein-Vazirani algorithm determines the value of a bit string +/// encoded in a function. +/// +/// This Q# program implements the Bernstein-Vazirani algorithm. +namespace Test { + import Std.Arrays.*; + import Std.Convert.*; + import Std.Diagnostics.*; + import Std.Intrinsic.*; + import Std.Math.*; + import Std.Measurement.*; + + @EntryPoint() + operation Main() : Result[] { + // Consider a function 𝑓(𝑥⃗) on bitstrings 𝑥⃗ = (𝑥₀, …, 𝑥ₙ₋₁) of the form + // 𝑓(𝑥⃗) ≔ Σᵢ 𝑥ᵢ 𝑟ᵢ + // where 𝑟⃗ = (𝑟₀, …, 𝑟ₙ₋₁) is an unknown bit string that determines the + // parity of 𝑓. + + // The Bernstein–Vazirani algorithm allows determining 𝑟 given a + // quantum operation that implements + // |𝑥〉|𝑦〉 ↦ |𝑥〉|𝑦 ⊕ 𝑓(𝑥)〉. + + // This entry point function of this program, `Main`, shows how to use + // the `BernsteinVazirani` operation to determine the value of bitstring + // 𝑟. + let secretBitString = SecretBitStringAsBoolArray(); + let parityOperation = EncodeBitStringAsParityOperation(secretBitString); + let decodedBitString = BernsteinVazirani( + parityOperation, + Length(secretBitString) + ); + + return decodedBitString; + } + + /// # Summary + /// This operation implements the Bernstein-Vazirani quantum algorithm. + /// This algorithm computes for a given Boolean function that is promised to + /// be a parity 𝑓(𝑥₀, …, 𝑥ₙ₋₁) = Σᵢ 𝑟ᵢ 𝑥ᵢ a result in the form of a bit + /// vector (𝑟₀, …, 𝑟ₙ₋₁) corresponding to the parity function. + /// Note that it is promised that the function is actually a parity + /// function. + /// + /// # Input + /// ## Uf + /// A quantum operation that implements |𝑥〉|𝑦〉 ↦ |𝑥〉|𝑦 ⊕ 𝑓(𝑥)〉, + /// where 𝑓 is a Boolean function that implements a parity Σᵢ 𝑟ᵢ 𝑥ᵢ. + /// ## n + /// The number of bits in the input register |𝑥〉. + /// + /// # Output + /// An array of type `Result[]` that contains the parity 𝑟⃗ = (𝑟₀, …, 𝑟ₙ₋₁). + /// + /// # See Also + /// - For details see Section 1.4.3 of Nielsen & Chuang. + /// + /// # References + /// - [ *Ethan Bernstein and Umesh Vazirani*, + /// SIAM J. Comput., 26(5), 1411–1473, 1997 ] + /// (https://doi.org/10.1137/S0097539796300921) + operation BernsteinVazirani(Uf : ((Qubit[], Qubit) => Unit), n : Int) : Result[] { + // We allocate n + 1 clean qubits. Note that the function parameter Uf is defined + // on inputs of the form (x, y), where x has n bits and y has 1 bit. + use queryRegister = Qubit[n]; + use target = Qubit(); + + // The last qubit needs to be flipped so that a relative phase is + // introduced when we apply a Hadamard gate later on and we can use + // phase kickback when Uf is applied. + X(target); + + within { + // Now, a Hadamard transform is applied to each of the qubits. As + // the last step before the measurement, a Hadamard transform is + // applied to all qubits except the last one. We could also + // transform the last qubit, but this would not affect the + // final outcome. + // We use a within-apply block to ensure that the Hadamard transform + // is correctly inverted. + ApplyToEachA(H, queryRegister); + } apply { + H(target); + // We now apply Uf to the n+1 qubits, computing + // |x, y〉 ↦ |x, y ⊕ f(x)〉. + Uf(queryRegister, target); + } + + // Measure all qubits and reset them to the |0〉 state so that they can + // be safely deallocated at the end of the block. + let resultArray = MResetEachZ(queryRegister); + + // Finally, the last qubit, which held the y-register, is reset. + Reset(target); + + // The result is already contained in resultArray so no further + // post-processing is necessary. + return resultArray; + } + + /// # Summary + /// Given bit string 𝑟⃗ = (r₀, …, rₙ₋₁), represented as an array of Booleans, + /// this operation applies a unitary 𝑈 that acts on 𝑛 + 1 qubits as: + /// 𝑈 |𝑥〉|𝑦〉 = |𝑥〉|𝑦 ⊕ 𝑓(𝑥)〉 + /// where 𝑓(𝑥) = Σᵢ 𝑥ᵢ 𝑟ᵢ mod 2. + /// + /// # Input + /// ## bitStringAsBoolArray + /// A bit string 𝑟⃗, represented as an array of Booleans, used to define the + /// function 𝑓. + /// ## xRegister + /// Represents the |𝑥〉 register that 𝑈 acts on. + /// ## yQubit + /// Represents the |𝑦〉 qubit that 𝑈 acts on. + operation ApplyParityOperation( + bitStringAsBoolArray : Bool[], + xRegister : Qubit[], + yQubit : Qubit + ) : Unit { + // `xRegister` muts have enough qubits to represent the integer. + let requiredBits = Length(bitStringAsBoolArray); + let availableQubits = Length(xRegister); + Fact( + availableQubits >= requiredBits, + $"The bitstring has {requiredBits} bits but the quantum register " + $"only has {availableQubits} qubits" + ); + + // Apply the quantum operations that encode the bit string. + for (index, bit) in Enumerated(bitStringAsBoolArray) { + if bit { + CNOT(xRegister[index], yQubit); + } + } + } + + /// # Summary + /// This is a higher-order operation which returns an operation (Qubit[], Qubit) => () of the form + /// U_f |𝑥〉|𝑦〉 = |𝑥〉|𝑦 ⊕ 𝑓(𝑥)〉. + /// We define 𝑓 by providing the bit string 𝑟⃗ as an integer. + operation EncodeBitStringAsParityOperation(bitStringAsBoolArray : Bool[]) : (Qubit[], Qubit) => Unit { + return ApplyParityOperation(bitStringAsBoolArray, _, _); + } + + /// # Summary + /// Returns a particular bit string as an array of Booleans. + function SecretBitStringAsBoolArray() : Bool[] { + return [true, false, true, false, true]; + } +} diff --git a/source/pip/tests-integration/resources/adaptive_rifla/input/ConstantFolding.qs b/source/pip/tests-integration/resources/adaptive_rifla/input/ConstantFolding.qs new file mode 100644 index 0000000000..1f02c93a2f --- /dev/null +++ b/source/pip/tests-integration/resources/adaptive_rifla/input/ConstantFolding.qs @@ -0,0 +1,44 @@ +namespace Test { + + import Std.Intrinsic.*; + import Std.Canon.*; + import Std.Measurement.*; + + // Verifies constant folding, unrolling, optimizing out of invalid array access, array concatenation. + // Expected output: [1, 1, 1, 1] + @EntryPoint() + operation Main() : Result[] { + let nQubits = 3; + let iterations = nQubits * 3; + let secondRun = true; + + use qs = Qubit[nQubits]; + X(qs[0]); + if nQubits > 1 { + for _ in 1..iterations { + for q in qs[1...] { + CNOT(qs[0], q); + } + } + } + + let nQubits2 = 1; + let pi = Microsoft.Quantum.Math.PI() / 2.0; + use qs2 = Qubit[nQubits2]; + if secondRun { + Rx(pi * 2.0, qs2[0]); + if nQubits2 > 1 { + for _ in 1..iterations { + for q in qs2[1...] { + CNOT(qs2[0], q); + } + } + } + } + + let results = MeasureEachZ(qs) + MeasureEachZ(qs2); + ResetAll(qs); + ResetAll(qs2); + return results; + } +} diff --git a/source/pip/tests-integration/resources/adaptive_rifla/input/CopyAndUpdateExpressions.qs b/source/pip/tests-integration/resources/adaptive_rifla/input/CopyAndUpdateExpressions.qs new file mode 100644 index 0000000000..73f79df03f --- /dev/null +++ b/source/pip/tests-integration/resources/adaptive_rifla/input/CopyAndUpdateExpressions.qs @@ -0,0 +1,32 @@ +namespace Test { + + import Std.Intrinsic.*; + import Std.Measurement.*; + + // Demonstrates copy and update expressions. + // Expected output: ([1], [0], [1, 1, 1]) + @EntryPoint() + operation Main() : (Result[], Result[], Result[]) { + use qubitA = Qubit(); + X(qubitA); + let resultsA = [Zero] w/ 0 <- MResetZ(qubitA); + + // Simple concatenated copy and update expressions. + use qubitB = Qubit(); + let resultsB = [Zero] + w/ 0 <- One + w/ 0 <- MResetZ(qubitB); + + // Copy and update expression that make use of ranges. + use registerC = Qubit[3]; + X(registerC[0]); + mutable resultsC = MeasureEachZ(registerC); + ApplyToEachCA(X, registerC[1..2]); + set resultsC w/= 1..2 <- MeasureEachZ(registerC[1..2]); + return ( + resultsA, + resultsB, + resultsC + ); + } +} diff --git a/source/pip/tests-integration/resources/adaptive_rifla/input/DeutschJozsaNISQ.qs b/source/pip/tests-integration/resources/adaptive_rifla/input/DeutschJozsaNISQ.qs new file mode 100644 index 0000000000..416f7b8263 --- /dev/null +++ b/source/pip/tests-integration/resources/adaptive_rifla/input/DeutschJozsaNISQ.qs @@ -0,0 +1,103 @@ +/// # Sample +/// Deutsch–Jozsa algorithm +/// +/// # Description +/// Deutsch–Jozsa is a quantum algorithm that determines whether a given Boolean +/// function 𝑓 is constant (0 on all inputs or 1 on all inputs) or balanced +/// (1 for exactly half of the input domain and 0 for the other half). +/// +/// This Q# program implements the Deutsch–Jozsa algorithm. +namespace Test { + + import Std.Intrinsic.*; + import Std.Measurement.*; + + @EntryPoint() + operation Main() : (Result[], Result[]) { + // A Boolean function is a function that maps bitstrings to a bit: + // 𝑓 : {0, 1}^n → {0, 1}. + + // We say that 𝑓 is constant if 𝑓(𝑥⃗) = 𝑓(𝑦⃗) for all bitstrings 𝑥⃗ and + // 𝑦⃗, and that 𝑓 is balanced if 𝑓 evaluates to true for exactly half of + // its inputs. + + // If we are given a function 𝑓 as a quantum operation 𝑈 |𝑥〉|𝑦〉 = + // |𝑥〉|𝑦 ⊕ 𝑓(𝑥)〉, and are promised that 𝑓 is either constant or is + // balanced, then the Deutsch–Jozsa algorithm decides between these + // cases with a single application of 𝑈. + + // Here, we demonstrate the use of the Deutsch-Jozsa algorithm by + // determining the type (constant or balanced) of a couple of functions. + let balancedResults = DeutschJozsa(SimpleBalancedBoolF, 4); + let constantResults = DeutschJozsa(SimpleConstantBoolF, 4); + return (balancedResults, constantResults); + } + + /// # Summary + /// This operation implements the DeutschJozsa algorithm. + /// It returns the query register measurement results. If all the measurement + /// results are `Zero`, the function is constant. If at least one measurement + /// result is `One`, the function is balanced. + /// It is assumed that the function is either constant or balanced. + /// + /// # Input + /// ## Uf + /// A quantum operation that implements |𝑥〉|𝑦〉 ↦ |𝑥〉|𝑦 ⊕ 𝑓(𝑥)〉, where 𝑓 is a + /// Boolean function, 𝑥 is an 𝑛 bit register and 𝑦 is a single qubit. + /// ## n + /// The number of bits in the input register |𝑥〉. + /// + /// # Output + /// An array of measurement results for the query register. + /// All `Zero` measurement results indicate that the function is constant. + /// At least one `One` measurement result in the array indicates that the + /// function is balanced. + /// + /// # See Also + /// - For details see Section 1.4.3 of Nielsen & Chuang. + /// + /// # References + /// - [ *Michael A. Nielsen , Isaac L. Chuang*, + /// Quantum Computation and Quantum Information ] + /// (http://doi.org/10.1017/CBO9780511976667) + operation DeutschJozsa(Uf : ((Qubit[], Qubit) => Unit), n : Int) : Result[] { + // We allocate n + 1 clean qubits. Note that the function `Uf` is defined + // on inputs of the form (x, y), where x has n bits and y has 1 bit. + use queryRegister = Qubit[n]; + use target = Qubit(); + + // The last qubit needs to be flipped so that the function will actually + // be computed into the phase when Uf is applied. + X(target); + + // Now, a Hadamard transform is applied to each of the qubits. + H(target); + // We use a within-apply block to ensure that the Hadamard transform is + // correctly inverted on the |𝑥〉 register. + within { + for q in queryRegister { + H(q); + } + } apply { + // We apply Uf to the n+1 qubits, computing |𝑥, 𝑦〉 ↦ |𝑥, 𝑦 ⊕ 𝑓(𝑥)〉. + Uf(queryRegister, target); + } + + // Measure the query register and reset all qubits so they can be safely + // deallocated. + let results = MeasureEachZ(queryRegister); + ResetAll(queryRegister); + Reset(target); + return results; + } + + // Simple constant Boolean function + operation SimpleConstantBoolF(args : Qubit[], target : Qubit) : Unit { + X(target); + } + + // Simple balanced Boolean function + operation SimpleBalancedBoolF(args : Qubit[], target : Qubit) : Unit { + CX(args[0], target); + } +} diff --git a/source/pip/tests-integration/resources/adaptive_rifla/input/Doubles.qs b/source/pip/tests-integration/resources/adaptive_rifla/input/Doubles.qs new file mode 100644 index 0000000000..ec5f89044c --- /dev/null +++ b/source/pip/tests-integration/resources/adaptive_rifla/input/Doubles.qs @@ -0,0 +1,27 @@ +namespace Test { + + import Std.Intrinsic.*; + + // Demonstrates use of double comparisons. + // Expected output: (10.0, true, false, true, true, false) + @EntryPoint() + operation Main() : (Double, Bool, Bool, Bool, Bool, Bool, Int, Double) { + mutable count = 0.0; + use q = Qubit(); + for _ in 1..10 { + X(q); + if M(q) == One { + X(q); + set count += 1.0; + set count *= 1.0; + set count -= 1.0; + set count /= 1.0; + set count += 1.0; + } + } + Reset(q); + let countInteger = Std.Math.Truncate(count); + let countIntegerAsDouble = Std.Convert.IntAsDouble(countInteger); + return (count, count > 5.0, count < 5.0, count >= 10.0, count == 10.0, count != 10.0, countInteger, countIntegerAsDouble); + } +} diff --git a/source/pip/tests-integration/resources/adaptive_rifla/input/ExpandedTests.qs b/source/pip/tests-integration/resources/adaptive_rifla/input/ExpandedTests.qs new file mode 100644 index 0000000000..c993b60513 --- /dev/null +++ b/source/pip/tests-integration/resources/adaptive_rifla/input/ExpandedTests.qs @@ -0,0 +1,124 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +namespace Test { + import Std.Intrinsic.*; + import Std.Convert.*; + import Std.Math.*; + import Std.Arrays.*; + import Std.Measurement.*; + import Std.Canon.*; + + @EntryPoint() + operation Main() : (Result[], Result) { + return (SearchForMarkedInput(), VerifyCNOTfromExp()); + } + + operation VerifyCNOTfromExp() : Result { + use (control, target, paired) = (Qubit(), Qubit(), Qubit()); + + within { + H(paired); + CNOT(paired, target); + CNOT(paired, control); + } apply { + // CNOT + let theta = PI() / 4.0; + Rx(-2.0 * theta, target); + Rz(-2.0 * theta, control); + Adjoint Exp([PauliZ, PauliX], theta, [control, target]); + + Adjoint CNOT(control, target); + } + + return M(target); + } + + /// # Summary + /// This operation applies Grover's algorithm to search all possible inputs + /// to an operation to find a particular marked state. + operation SearchForMarkedInput() : Result[] { + let nQubits = 2; + use qubits = Qubit[nQubits] { + // Initialize a uniform superposition over all possible inputs. + PrepareUniform(qubits); + // The search itself consists of repeatedly reflecting about the + // marked state and our start state, which we can write out in Q# + // as a for loop. + for idxIteration in 0..NIterations(nQubits) - 1 { + ReflectAboutMarked(qubits); + ReflectAboutUniform(qubits); + } + // Measure and return the answer. + return MResetEachZ(qubits); + } + } + + /// # Summary + /// Returns the number of Grover iterations needed to find a single marked + /// item, given the number of qubits in a register. + function NIterations(nQubits : Int) : Int { + let nItems = 1 <<< nQubits; // 2^numQubits + // compute number of iterations: + let angle = ArcSin(1. / Sqrt(IntAsDouble(nItems))); + let nIterations = Round(0.25 * PI() / angle - 0.5); + return nIterations; + } + + /// # Summary + /// Reflects about the basis state marked by alternating zeros and ones. + /// This operation defines what input we are trying to find in the main + /// search. + operation ReflectAboutMarked(inputQubits : Qubit[]) : Unit { + use outputQubit = Qubit() { + within { + // We initialize the outputQubit to (|0⟩ - |1⟩) / √2, + // so that toggling it results in a (-1) phase. + X(outputQubit); + H(outputQubit); + // Flip the outputQubit for marked states. + // Here, we get the state with alternating 0s and 1s by using + // the X instruction on every other qubit. + ApplyToEachA(X, inputQubits[...2...]); + } apply { + Controlled X(inputQubits, outputQubit); + } + } + } + + /// # Summary + /// Reflects about the uniform superposition state. + operation ReflectAboutUniform(inputQubits : Qubit[]) : Unit { + within { + // Transform the uniform superposition to all-zero. + Adjoint PrepareUniform(inputQubits); + // Transform the all-zero state to all-ones + PrepareAllOnes(inputQubits); + } apply { + // Now that we've transformed the uniform superposition to the + // all-ones state, reflect about the all-ones state, then let + // the within/apply block transform us back. + ReflectAboutAllOnes(inputQubits); + } + } + + /// # Summary + /// Reflects about the all-ones state. + operation ReflectAboutAllOnes(inputQubits : Qubit[]) : Unit { + Controlled Z(Most(inputQubits), Tail(inputQubits)); + } + + /// # Summary + /// Given a register in the all-zeros state, prepares a uniform + /// superposition over all basis states. + operation PrepareUniform(inputQubits : Qubit[]) : Unit is Adj + Ctl { + ApplyToEachCA(H, inputQubits); + } + + /// # Summary + /// Given a register in the all-zeros state, prepares an all-ones state + /// by flipping every qubit. + operation PrepareAllOnes(inputQubits : Qubit[]) : Unit is Adj + Ctl { + ApplyToEachCA(X, inputQubits); + } +} diff --git a/source/pip/tests-integration/resources/adaptive_rifla/input/Functors.qs b/source/pip/tests-integration/resources/adaptive_rifla/input/Functors.qs new file mode 100644 index 0000000000..eb787841c0 --- /dev/null +++ b/source/pip/tests-integration/resources/adaptive_rifla/input/Functors.qs @@ -0,0 +1,51 @@ +namespace Test { + + import Std.Intrinsic.*; + import Std.Measurement.*; + + // Verifies use of Q# functors. + // Expected simulation output: + // ([0, 0], [1, 0], [0, 0]) -> 0.5 + // ([0, 0], [1, 1], [0, 0]) -> 0.5 + @EntryPoint() + operation Main() : (Result[], Result[], Result[]) { + use targetsA = Qubit[2]; + Unitary(targetsA); + Adjoint Unitary(targetsA); + + use controls = Qubit[2]; + use targetsB = Qubit[2]; + within { + for q in controls { + X(q); + } + } apply { + Controlled Unitary(controls, targetsB); + } + + use targetsC = Qubit[2]; + within { + for q in controls { + X(q); + } + } apply { + Controlled Unitary(controls, targetsC); + Controlled Adjoint Unitary(controls, targetsC); + } + + let rA = MeasureEachZ(targetsA); + let rB = MeasureEachZ(targetsB); + let rC = MeasureEachZ(targetsC); + ResetAll(controls); + ResetAll(targetsA); + ResetAll(targetsB); + ResetAll(targetsC); + return (rA, rB, rC); + } + + operation Unitary(register : Qubit[]) : Unit is Adj + Ctl { + X(register[0]); + H(register[1]); + Z(register[1]); + } +} diff --git a/source/pip/tests-integration/resources/adaptive_rifla/input/HiddenShiftNISQ.qs b/source/pip/tests-integration/resources/adaptive_rifla/input/HiddenShiftNISQ.qs new file mode 100644 index 0000000000..ad0477ef68 --- /dev/null +++ b/source/pip/tests-integration/resources/adaptive_rifla/input/HiddenShiftNISQ.qs @@ -0,0 +1,169 @@ +/// # Sample +/// Hidden shift +/// +/// # Description +/// There is a family of problems known as hidden shift problems, in which it +/// is given that two Boolean functions 𝑓 and 𝑔 satisfy the relation +/// 𝑔(𝑥) = 𝑓(𝑥 ⊕ 𝑠) for all 𝑥 +/// where 𝑠 is a hidden bit string that we would like to find. +/// +/// This Q# program implements an algorithm to solve the hidden shift problem. +namespace Test { + import Std.Arrays.*; + import Std.Convert.*; + import Std.Diagnostics.*; + import Std.Intrinsic.*; + import Std.Measurement.*; + + @EntryPoint() + operation Main() : Result[] { + // Consider the case of finding a hidden shift 𝑠 between two Boolean + // functions 𝑓(𝑥) and 𝑔(𝑥) = 𝑓(𝑥 ⊕ 𝑠). + // This problem can be solved on a quantum computer with one call to + // each of 𝑓 and 𝑔 in the special case that both functions are bent; + // that is, that they are as far from linear as possible. + + // Here, we find the hidden shift encoded in the following array of + // Booleans. + let shiftAsBoolArray = [true, false, false, false, false, true]; + let shiftAsInt = BoolArrayAsInt(shiftAsBoolArray); + let hiddenShiftBitString = FindHiddenShift( + BentFunction, + register => ShiftedBentFunction(shiftAsInt, register), + Length(shiftAsBoolArray) + ); + + return hiddenShiftBitString; + } + + /// # Summary + /// Implements a correlation-based algorithm to solve the hidden shift + /// problem for bent functions. + /// + /// # Description + /// Implements a solution for the hidden shift problem, which is to identify + /// an unknown shift 𝑠 of the arguments of two Boolean functions 𝑓 and 𝑔 + /// that are promised to satisfy the relation 𝑔(𝑥) = 𝑓(𝑥 ⊕ 𝑠) for all 𝑥. + /// + /// 𝑓 and 𝑔 are assumed to be bent functions. A Boolean function is bent if + /// it is as far from linear as possible. In particular, bent functions have + /// flat Fourier (Walsh–Hadamard) spectra. + /// + /// In this case, the Roetteler algorithm (see References, below) uses + /// black-box oracles for 𝑓^* and 𝑔, where 𝑓^* is the dual bent function to + /// 𝑓, and computes the hidden shift 𝑠 between 𝑓 and 𝑔. + /// + /// # Input + /// ## Ufstar + /// A quantum operation that implements + /// $U_f^*: |𝑥〉 ↦ (-1)^{f^*(x)} |𝑥〉$, + /// where $f^*$ is a Boolean function, 𝑥 is an $n$ bit register + /// ## Ug + /// A quantum operation that implements + /// $U_g:|𝑥〉 ↦ (-1)^{g(x)} |𝑥〉$, + /// where 𝑔 is a Boolean function that is shifted by unknown + /// 𝑠 from 𝑓, and 𝑥 is an $n$ bit register. + /// ## n + /// The number of bits of the input register |𝑥〉. + /// + /// # Output + /// An array of type `Result[]` which encodes the bit representation + /// of the hidden shift. + /// + /// # References + /// - [*Martin Roetteler*, + /// Proc. SODA 2010, ACM, pp. 448-457, 2010] + /// (https://doi.org/10.1137/1.9781611973075.37) + operation FindHiddenShift( + Ufstar : (Qubit[] => Unit), + Ug : (Qubit[] => Unit), + n : Int + ) : Result[] { + // We allocate n clean qubits. Note that the function Ufstar and Ug are + // unitary operations on n qubits defined via phase encoding. + use qubits = Qubit[n]; + + // First, a Hadamard transform is applied to each of the qubits. + ApplyToEach(H, qubits); + + // We now apply the shifted function Ug to the n qubits, computing + // |x〉 -> (-1)^{g(x)} |x〉. + Ug(qubits); + + within { + // A Hadamard transform is applied to each of the n qubits. + ApplyToEachA(H, qubits); + } apply { + // we now apply the dual function of the unshifted function, i.e., + // Ufstar, to the n qubits, computing |x〉 -> (-1)^{fstar(x)} |x〉. + Ufstar(qubits); + } + + // Measure the n qubits and reset them to zero so that they can be + // safely deallocated at the end of the block. + return MResetEachZ(qubits); + } + + /// # Summary + /// Implements an oracle for a bent function constructed from the inner + /// product of Boolean functions. + /// + /// # Description + /// This operation defines the Boolean function IP(x_0, ..., x_{n-1}) which + /// is computed into the phase, i.e., a diagonal operator that maps + /// |x〉 -> (-1)^{IP(x)} |x〉, where x stands for x=(x_0, ..., x_{n-1}) and all + /// the x_i are binary. The IP function is defined as + /// IP(y, z) = y_0 z_0 + y_1 z_1 + ... y_{u-1} z_{u-1} where + /// y = (y_0, ..., y_{u-1}) and z = (z_0, ..., z_{u-1}) are two bit vectors + /// of length u. Notice that the function IP is a Boolean function on n = 2u + /// bits. IP is a special case of bent function. These are functions for + /// which the Walsh-Hadamard transform is perfectly flat (in absolute + /// value). + /// Because of this flatness, the Walsh-Hadamard spectrum of any bent + /// function defines a +1/-1 function, i.e., gives rise to another Boolean + /// function, called the dual bent function. Moreover, for the case of the + /// IP function it can be shown that IP is equal to its own dual bent + /// function. + /// + /// # Remarks + /// Notice that a diagonal operator implementing IP between 2 variables y_0 + /// and z_0 is nothing but the AND function between those variables, i.e., + /// in phase encoding it is computed by a Controlled-Z gate. + /// Extending this to an XOR of the AND of more variables, as required in + /// the definition of the IP function can then be accomplished by applying + /// several Controlled-Z gates between the respective inputs. + operation BentFunction(register : Qubit[]) : Unit { + Fact(Length(register) % 2 == 0, "Length of register must be even."); + let u = Length(register) / 2; + let xs = register[0..u - 1]; + let ys = register[u...]; + for index in 0..u-1 { + CZ(xs[index], ys[index]); + } + } + + /// # Summary + /// Implements a shifted bend function 𝑔(𝑥) = 𝑓(𝑥 ⊕ 𝑠). + /// + /// # Description + /// For the hidden shift problem we need another function g which is related + /// to IP via g(x) = IP(x + s), i.e., we have to shift the argument of the + /// IP function by a given shift. Notice that the '+' operation here is the + /// Boolean addition, i.e., a bit-wise operation. Notice further, that in + /// general a diagonal operation |x〉 -> (-1)^{f(x)} can be turned into a + /// shifted version by applying a bit flip to the |x〉 register first, then + /// applying the diagonal operation, and then undoing the bit flips to the + /// |x〉 register. We use this principle to define shifted versions of the IP + /// operation. + operation ShiftedBentFunction(shift : Int, register : Qubit[]) : Unit { + Fact(Length(register) % 2 == 0, "Length of register must be even."); + let u = Length(register) / 2; + within { + // Flips the bits in shift. + ApplyXorInPlace(shift, register); + } apply { + // Compute the IP function into the phase. + BentFunction(register); + } + } +} diff --git a/source/pip/tests-integration/resources/adaptive_rifla/input/IntegerComparison.qs b/source/pip/tests-integration/resources/adaptive_rifla/input/IntegerComparison.qs new file mode 100644 index 0000000000..1218558b29 --- /dev/null +++ b/source/pip/tests-integration/resources/adaptive_rifla/input/IntegerComparison.qs @@ -0,0 +1,21 @@ +namespace Test { + + import Std.Intrinsic.*; + + // Demonstrates use of integer comparisons. + // Expected output: (true, false, true) + @EntryPoint() + operation Main() : (Bool, Bool, Bool) { + mutable count = 0; + use q = Qubit(); + for _ in 1..10 { + X(q); + if M(q) == One { + X(q); + set count += 1; + } + } + Reset(q); + return (count > 5, count < 5, count == 10); + } +} diff --git a/source/pip/tests-integration/resources/adaptive_rifla/input/IntrinsicCCNOT.qs b/source/pip/tests-integration/resources/adaptive_rifla/input/IntrinsicCCNOT.qs new file mode 100644 index 0000000000..d6aeafb644 --- /dev/null +++ b/source/pip/tests-integration/resources/adaptive_rifla/input/IntrinsicCCNOT.qs @@ -0,0 +1,30 @@ +namespace Test { + + import Std.Intrinsic.*; + import Std.Measurement.*; + + // Verifies the use of the CCNOT quantum gate from Q#'s Microsoft.Quantum.Intrinsic namespace. + // Expected simulation output: ([0, 0, 0], [1, 0, 0], [1, 1, 1]). + @EntryPoint() + operation Main() : (Result[], Result[], Result[]) { + use registerA = Qubit[3]; // |000⟩ + CCNOT(registerA[0], registerA[1], registerA[2]); // |000⟩ + let resultsA = MeasureEachZ(registerA); + ResetAll(registerA); + + use registerB = Qubit[3]; // |000⟩ + X(registerB[0]); // |100⟩ + CCNOT(registerB[0], registerB[1], registerB[2]); // |100⟩ + let resultsB = MeasureEachZ(registerB); + ResetAll(registerB); + + use registerC = Qubit[3]; // |000⟩ + X(registerC[0]); // |100⟩ + X(registerC[1]); // |110⟩ + CCNOT(registerC[0], registerC[1], registerC[2]); // |111⟩ + let resultsC = MeasureEachZ(registerC); + ResetAll(registerC); + + return (resultsA, resultsB, resultsC); + } +} diff --git a/source/pip/tests-integration/resources/adaptive_rifla/input/IntrinsicCNOT.qs b/source/pip/tests-integration/resources/adaptive_rifla/input/IntrinsicCNOT.qs new file mode 100644 index 0000000000..a5b783e1e1 --- /dev/null +++ b/source/pip/tests-integration/resources/adaptive_rifla/input/IntrinsicCNOT.qs @@ -0,0 +1,23 @@ +namespace Test { + + import Std.Intrinsic.*; + import Std.Measurement.*; + + // Verifies the use of the CNOT quantum gate from Q#'s Microsoft.Quantum.Intrinsic namespace. + // Expected simulation output: ([0, 0], [1, 1]). + @EntryPoint() + operation Main() : (Result[], Result[]) { + use registerA = Qubit[2]; // |00⟩ + CNOT(registerA[0], registerA[1]); // |00⟩ + let resultsA = MeasureEachZ(registerA); + ResetAll(registerA); + + use registerB = Qubit[2]; // |00⟩ + X(registerB[0]); // |10⟩ + CNOT(registerB[0], registerB[1]); // |11⟩ + let resultsB = MeasureEachZ(registerB); + ResetAll(registerB); + + return (resultsA, resultsB); + } +} diff --git a/source/pip/tests-integration/resources/adaptive_rifla/input/IntrinsicHIXYZ.qs b/source/pip/tests-integration/resources/adaptive_rifla/input/IntrinsicHIXYZ.qs new file mode 100644 index 0000000000..467b53f822 --- /dev/null +++ b/source/pip/tests-integration/resources/adaptive_rifla/input/IntrinsicHIXYZ.qs @@ -0,0 +1,47 @@ +namespace Test { + + import Std.Intrinsic.*; + import Std.Measurement.*; + + // Verifies the I, H, X, Y, and Z quantum gates from Q#'s Microsoft.Quantum.Intrinsic namespace. + // Expected simulation output: (1, 1, 1, 1, 1, 1). + @EntryPoint() + operation Main() : (Result, Result, Result, Result, Result, Result) { + // Exercise H. + use hTarget = Qubit(); // |0⟩ + H(hTarget); // |+⟩ + Z(hTarget); // |-⟩ + H(hTarget); // |1⟩ + let hResult = MResetZ(hTarget); + + // Exercise I. + use iTarget = Qubit(); // |0⟩ + X(iTarget); // |1⟩ + I(iTarget); // |1⟩ + let iResult = MResetZ(iTarget); + + // Exercise X. + use xTarget = Qubit(); // |0⟩ + X(xTarget); // |1⟩ + let xResult = MResetZ(xTarget); + + // Exercise Y. + use yTargetA = Qubit(); // |0⟩ + Y(yTargetA); // i|1⟩ + let yResultA = MResetZ(yTargetA); + use yTargetB = Qubit(); // |0⟩ + H(yTargetB); // |+⟩ + Y(yTargetB); // -i|-⟩ + H(yTargetB); // -i|1⟩ + let yResultB = MResetZ(yTargetB); + + // Exercise Z. + use zTarget = Qubit(); // |0⟩ + H(zTarget); // |+⟩ + Z(zTarget); // |-⟩ + H(zTarget); // |1⟩ + let zResult = MResetZ(zTarget); + + return (hResult, iResult, xResult, yResultA, yResultB, zResult); + } +} diff --git a/source/pip/tests-integration/resources/adaptive_rifla/input/IntrinsicM.qs b/source/pip/tests-integration/resources/adaptive_rifla/input/IntrinsicM.qs new file mode 100644 index 0000000000..cf46449e1f --- /dev/null +++ b/source/pip/tests-integration/resources/adaptive_rifla/input/IntrinsicM.qs @@ -0,0 +1,19 @@ +namespace Test { + + import Std.Intrinsic.*; + import Std.Measurement.*; + + // Verifies the use of the M gate from Q#'s Microsoft.Quantum.Intrinsic namespace. + // Expected simulation output: (0, 1). + @EntryPoint() + operation Main() : (Result, Result) { + use qubitA = Qubit(); + let resultA = M(qubitA); + use qubitB = Qubit(); + X(qubitB); + let resultB = M(qubitB); + Reset(qubitA); + Reset(qubitB); + return (resultA, resultB); + } +} diff --git a/source/pip/tests-integration/resources/adaptive_rifla/input/IntrinsicMeasureWithBitFlipCode.qs b/source/pip/tests-integration/resources/adaptive_rifla/input/IntrinsicMeasureWithBitFlipCode.qs new file mode 100644 index 0000000000..2a5779b8b5 --- /dev/null +++ b/source/pip/tests-integration/resources/adaptive_rifla/input/IntrinsicMeasureWithBitFlipCode.qs @@ -0,0 +1,35 @@ +namespace Test { + + import Std.Intrinsic.*; + import Std.Measurement.*; + + // Verifies the use of the Measure operation from Q#'s Microsoft.Quantum.Intrinsic namespace. + // Expected simulation output: ([0, 0], [0, 1], [0, 0, 1]). + @EntryPoint() + operation Main() : (Result[], Result[], Result[]) { + use register = Qubit[3]; + Encode(register); + + // Verify parity between qubits. + let firstParity01 = Measure([PauliZ, PauliZ, PauliI], register); + let firtsParity12 = Measure([PauliI, PauliZ, PauliZ], register); + + // Verify parity between qubits after flipping one of them. + X(register[2]); + let secondParity01 = Measure([PauliZ, PauliZ, PauliI], register); + let secondParity12 = Measure([PauliI, PauliZ, PauliZ], register); + + // Decode. + Adjoint Encode(register); + return ( + [firstParity01, firtsParity12], + [secondParity01, secondParity12], + [MResetZ(register[0]), MResetZ(register[1]), MResetZ(register[2])] + ); + } + + operation Encode(register : Qubit[]) : Unit is Adj { + CNOT(register[0], register[1]); + CNOT(register[0], register[2]); + } +} diff --git a/source/pip/tests-integration/resources/adaptive_rifla/input/IntrinsicMeasureWithPhaseFlipCode.qs b/source/pip/tests-integration/resources/adaptive_rifla/input/IntrinsicMeasureWithPhaseFlipCode.qs new file mode 100644 index 0000000000..81e69a1632 --- /dev/null +++ b/source/pip/tests-integration/resources/adaptive_rifla/input/IntrinsicMeasureWithPhaseFlipCode.qs @@ -0,0 +1,38 @@ +namespace Test { + + import Std.Intrinsic.*; + import Std.Measurement.*; + + // Verifies the use of the Measure operation from Q#'s Microsoft.Quantum.Intrinsic namespace. + // Expected simulation output: ([0, 0], [1, 1], [0, 1, 0]). + @EntryPoint() + operation Main() : (Result[], Result[], Result[]) { + use register = Qubit[3]; + Encode(register); + + // Verify parity between qubits. + let firstParity01 = Measure([PauliX, PauliX, PauliI], register); + let firtsParity12 = Measure([PauliI, PauliX, PauliX], register); + + // Verify parity between qubits after flipping one of them. + Z(register[1]); + let secondParity01 = Measure([PauliX, PauliX, PauliI], register); + let secondParity12 = Measure([PauliI, PauliX, PauliX], register); + + // Decode. + Adjoint Encode(register); + return ( + [firstParity01, firtsParity12], + [secondParity01, secondParity12], + [MResetZ(register[0]), MResetZ(register[1]), MResetZ(register[2])] + ); + } + + operation Encode(register : Qubit[]) : Unit is Adj { + CNOT(register[0], register[1]); + CNOT(register[0], register[2]); + H(register[0]); + H(register[1]); + H(register[2]); + } +} diff --git a/source/pip/tests-integration/resources/adaptive_rifla/input/IntrinsicRotationsWithPeriod.qs b/source/pip/tests-integration/resources/adaptive_rifla/input/IntrinsicRotationsWithPeriod.qs new file mode 100644 index 0000000000..e91e7364a5 --- /dev/null +++ b/source/pip/tests-integration/resources/adaptive_rifla/input/IntrinsicRotationsWithPeriod.qs @@ -0,0 +1,63 @@ +namespace Test { + + import Std.Intrinsic.*; + import Std.Math.*; + import Std.Measurement.*; + + // Verifies the use of the rotation quantum operations from Q#'s Microsoft.Quantum.Intrinsic namespace. + // Expected simulation output: ([1, 1], [1, 1], [1, 1]). + @EntryPoint() + operation Main() : (Result[], Result[], Result[]) { + // The period for each rotation operator is 4π. This test initializes all qubits in a known state, performs 8 + // π/2 rotations over each axis, and verifies that the qubit is back at the initial known state after these + // rotations have been performed. + use rxRegister = Qubit[2]; + use ryRegister = Qubit[2]; + use rzRegister = Qubit[2]; + InitializeRxRegister(rxRegister); // |11⟩ + InitializeRyRegister(ryRegister); // (i|1⟩)(i|1⟩) + InitializeRzRegister(rzRegister); // |11⟩ + let rotationPeriod = 4.0 * PI(); + let stepsDouble = 8.0; + let stepsInt = 8; + let theta = rotationPeriod / stepsDouble; + for _ in 1..stepsInt { + // Test both R and its corresponding axis rotation gate (Rx, Ry, Rz) using the same register. + Rx(theta, rxRegister[0]); + R(PauliX, theta, rxRegister[1]); + Ry(theta, ryRegister[0]); + R(PauliY, theta, ryRegister[1]); + Rz(theta, rzRegister[0]); + R(PauliZ, theta, rzRegister[1]); + } + + let rxResult = MResetZ2Register(rxRegister); + let ryResult = MResetZ2Register(ryRegister); + let rzResult = MResetZ2Register(rzRegister); + return (rxResult, ryResult, rzResult); + } + + operation InitializeRxRegister(register : Qubit[]) : Unit { + for qubit in register { + X(qubit); + } + } + + operation InitializeRyRegister(register : Qubit[]) : Unit { + for qubit in register { + Y(qubit); + } + } + + operation InitializeRzRegister(register : Qubit[]) : Unit { + for qubit in register { + H(qubit); + Z(qubit); + H(qubit); + } + } + + operation MResetZ2Register(register : Qubit[]) : Result[] { + return [MResetZ(register[0]), MResetZ(register[1])]; + } +} diff --git a/source/pip/tests-integration/resources/adaptive_rifla/input/IntrinsicSTSWAP.qs b/source/pip/tests-integration/resources/adaptive_rifla/input/IntrinsicSTSWAP.qs new file mode 100644 index 0000000000..181633c50b --- /dev/null +++ b/source/pip/tests-integration/resources/adaptive_rifla/input/IntrinsicSTSWAP.qs @@ -0,0 +1,37 @@ +namespace Test { + + import Std.Intrinsic.*; + import Std.Measurement.*; + + // Verifies the use of the S, SWAP and T operations from Q#'s Microsoft.Quantum.Intrinsic namespace. + // Expected simulation output: (1, 1, [1, 0]). + @EntryPoint() + operation Main() : (Result, Result, Result[]) { + // Exercise S operation. + // N.B. The S operation is equivalent to sqrt(Z). + use sTarget = Qubit(); // |0⟩ + H(sTarget); // |+⟩ + S(sTarget); // sqrt(Z)|+⟩ + S(sTarget); // sqrt(Z)^2|+⟩ ≡ Z|+⟩ ≡ |-⟩ + H(sTarget); // |1⟩ + let sResult = MResetZ(sTarget); + + // Exercise T operation. + // N.B. The T operation is equivalent to sqrt(S). + use tTarget = Qubit(); // |0⟩ + H(tTarget); // |+⟩ + T(tTarget); // sqrt(S)|+⟩ + T(tTarget); // sqrt(S)^2|+⟩ + T(tTarget); // sqrt(S)^3|+⟩ + T(tTarget); // sqrt(S)^4|+⟩ ≡ S^2|+⟩ ≡ sqrt(Z)^2|+⟩ ≡ Z|+⟩ ≡ |-⟩ + H(tTarget); // |1⟩ + let tResult = MResetZ(tTarget); + + // Exercise SWAP operation. + use swapRegister = Qubit[2]; + X(swapRegister[1]); + SWAP(swapRegister[0], swapRegister[1]); + let swapResults = [MResetZ(swapRegister[0]), MResetZ(swapRegister[1])]; + return (sResult, tResult, swapResults); + } +} diff --git a/source/pip/tests-integration/resources/adaptive_rifla/input/MeasureAndReuse.qs b/source/pip/tests-integration/resources/adaptive_rifla/input/MeasureAndReuse.qs new file mode 100644 index 0000000000..7870d64e7b --- /dev/null +++ b/source/pip/tests-integration/resources/adaptive_rifla/input/MeasureAndReuse.qs @@ -0,0 +1,24 @@ +namespace Test { + + import Std.Intrinsic.*; + import Std.Measurement.*; + + // Reusing a qubit after `M` should work on supported platforms, be replaced by entanglement with auxiliary on others. + // Reusing a qubit after `Reset` should work on supported platforms, be replaced by newly allocated qubit on others. + // Expected output: (0, 1, 1, 0, 0) + @EntryPoint() + operation Main() : (Result, Result, Result, Result, Result) { + use q = Qubit(); + let r1 = M(q); + X(q); + let r2 = M(q); + H(q); + let r3 = MResetX(q); + let r4 = MResetZ(q); + H(q); + Adjoint S(q); + H(q); + let r5 = MResetY(q); + return (r1, r2, r3, r4, r5); + } +} diff --git a/source/pip/tests-integration/resources/adaptive_rifla/input/MeasurementComparison.qs b/source/pip/tests-integration/resources/adaptive_rifla/input/MeasurementComparison.qs new file mode 100644 index 0000000000..aecc5b7a4b --- /dev/null +++ b/source/pip/tests-integration/resources/adaptive_rifla/input/MeasurementComparison.qs @@ -0,0 +1,18 @@ +namespace Test { + + import Std.Arrays.*; + import Std.Intrinsic.*; + + // Demonstrates use of measurement comparisons, including ternary. + // Expected output: (true, false, true, true) + @EntryPoint() + operation Main() : (Bool, Bool, Bool, Bool) { + use (q0, q1) = (Qubit(), Qubit()); + X(q0); + CNOT(q0, q1); + let (r0, r1) = (M(q0), M(q1)); + Reset(q0); + Reset(q1); + return (r0 == One, r1 == Zero, r0 == r1, r0 == Zero ? false | true); + } +} diff --git a/source/pip/tests-integration/resources/adaptive_rifla/input/NestedBranching.qs b/source/pip/tests-integration/resources/adaptive_rifla/input/NestedBranching.qs new file mode 100644 index 0000000000..52108d70ee --- /dev/null +++ b/source/pip/tests-integration/resources/adaptive_rifla/input/NestedBranching.qs @@ -0,0 +1,143 @@ +namespace Test { + + import Std.Intrinsic.*; + import Std.Math.*; + import Std.Measurement.*; + + // Demonstrates nested branching. + // Expected output: (([1, 1, 0], 6), ([1, 1, 1, 0], true)) + @EntryPoint() + operation Main() : ((Result[], Int), (Result[], Bool)) { + // Nested branching using bool literals. + use registerA = Qubit[3]; + if true { + X(registerA[0]); + if true { + X(registerA[1]); + if false { + X(registerA[2]); + } + } + } + let registerAMeasurements = MeasureEachZ(registerA); + + // Nested branching using measurement results to control the values of integers with no top-level branching + // coming from elif instructions. + mutable a = 0; + if registerAMeasurements[0] == Zero { + if registerAMeasurements[1] == Zero and registerAMeasurements[2] == Zero { + set a = 0; + } elif registerAMeasurements[1] == Zero and registerAMeasurements[2] == One { + set a = 1; + } elif registerAMeasurements[1] == One and registerAMeasurements[2] == Zero { + set a = 2; + } else { + set a = 3; + } + } else { + if registerAMeasurements[1] == Zero and registerAMeasurements[2] == Zero { + set a = 4; + } elif registerAMeasurements[1] == Zero and registerAMeasurements[2] == One { + set a = 5; + } elif registerAMeasurements[1] == One and registerAMeasurements[2] == Zero { + set a = 6; + } else { + set a = 7; + } + } + ResetAll(registerA); + + // Triple-nested branches with multiple quantum instructions inside using measurement results for conditions. + use registerB = Qubit[4]; + use target = Qubit(); + X(target); + SetIntToQubitRegister(7, registerB); + let registerBMeasurements = MeasureEachZ(registerB); + if registerBMeasurements[0] == Zero { + if registerBMeasurements[1] == Zero { + if registerBMeasurements[2] == Zero { + I(target); + I(target); + } else { + X(target); + X(target); + } + } else { + if registerBMeasurements[2] == Zero { + Y(target); + Y(target); + } else { + Z(target); + Z(target); + } + } + } elif registerBMeasurements[0] == Zero and registerBMeasurements[1] == One { + if registerBMeasurements[1] == Zero { + if registerBMeasurements[2] == Zero { + I(target); + I(target); + } else { + X(target); + X(target); + } + } else { + if registerBMeasurements[2] == Zero { + Y(target); + Y(target); + } else { + Z(target); + Z(target); + } + } + } elif registerBMeasurements[0] == One and registerBMeasurements[1] == Zero { + if registerBMeasurements[1] == Zero { + if registerBMeasurements[2] == Zero { + I(target); + I(target); + } else { + X(target); + X(target); + } + } else { + if registerBMeasurements[2] == Zero { + Y(target); + Y(target); + } else { + Z(target); + Z(target); + } + } + } else { + if registerBMeasurements[1] == Zero { + if registerBMeasurements[2] == Zero { + I(target); + I(target); + } else { + X(target); + X(target); + } + } else { + if registerBMeasurements[2] == Zero { + Y(target); + Y(target); + } else { + Z(target); + Z(target); + } + } + } + ResetAll(registerB); + + return ((registerAMeasurements, a), (registerBMeasurements, MResetZ(target) == One)); + } + + operation SetIntToQubitRegister(integer : Int, register : Qubit[]) : Unit { + mutable bits = integer; + for q in register { + if (bits &&& 1) == 1 { + X(q); + } + set bits = bits >>> 1; + } + } +} diff --git a/source/pip/tests-integration/resources/adaptive_rifla/input/RandomBit.qs b/source/pip/tests-integration/resources/adaptive_rifla/input/RandomBit.qs new file mode 100644 index 0000000000..b2ebaca677 --- /dev/null +++ b/source/pip/tests-integration/resources/adaptive_rifla/input/RandomBit.qs @@ -0,0 +1,13 @@ +namespace Test { + + import Std.Intrinsic.*; + + @EntryPoint() + operation Main() : Result { + use q = Qubit(); + H(q); + let r = M(q); + Reset(q); + return r; + } +} diff --git a/source/pip/tests-integration/resources/adaptive_rifla/input/SampleTeleport.qs b/source/pip/tests-integration/resources/adaptive_rifla/input/SampleTeleport.qs new file mode 100644 index 0000000000..59d639a46b --- /dev/null +++ b/source/pip/tests-integration/resources/adaptive_rifla/input/SampleTeleport.qs @@ -0,0 +1,43 @@ +namespace Test { + import Std.Intrinsic.*; + import Std.Arrays.*; + import Std.Measurement.*; + + @EntryPoint() + operation Main() : Result { + use bellPair = Qubit[2] { + H(bellPair[0]); + CNOT(bellPair[0], bellPair[1]); + use qubit = Qubit() { + EncodeValue(true, PauliX, qubit); + CNOT(qubit, bellPair[0]); + H(qubit); + if (M(bellPair[0]) == One) { + X(bellPair[1]); + } + if (MResetZ(qubit) == One) { + Z(bellPair[1]); + } + let mres = Measure([PauliX], [bellPair[1]]); + ResetAll(bellPair); + return mres; + } + } + } + + operation EncodeValue(value : Bool, basis : Pauli, qubit : Qubit) : Unit { + if (value) { + X(qubit); + } + PreparePauliEigenstate(basis, qubit); + } + + operation PreparePauliEigenstate(basis : Pauli, qubit : Qubit) : Unit { + if (basis == PauliX) { + H(qubit); + } elif (basis == PauliY) { + H(qubit); + S(qubit); + } + } +} diff --git a/source/pip/tests-integration/resources/adaptive_rifla/input/ShortcuttingMeasurement.qs b/source/pip/tests-integration/resources/adaptive_rifla/input/ShortcuttingMeasurement.qs new file mode 100644 index 0000000000..d54c7f0e2f --- /dev/null +++ b/source/pip/tests-integration/resources/adaptive_rifla/input/ShortcuttingMeasurement.qs @@ -0,0 +1,22 @@ +namespace Test { + + import Std.Intrinsic.*; + + // Demonstrates shortcutting of measurement ops in conditionals. + // Expected output: (0, 0) + @EntryPoint() + operation Main() : (Result, Result) { + use (q0, q1) = (Qubit(), Qubit()); + X(q0); + CNOT(q0, q1); + if M(q0) != Zero or M(q1) != Zero { + X(q0); + X(q1); + } + let (r0, r1) = (M(q0), M(q1)); + Reset(q0); + Reset(q1); + return (r0, r1); + } + +} diff --git a/source/pip/tests-integration/resources/adaptive_rifla/input/Slicing.qs b/source/pip/tests-integration/resources/adaptive_rifla/input/Slicing.qs new file mode 100644 index 0000000000..d8d40f92da --- /dev/null +++ b/source/pip/tests-integration/resources/adaptive_rifla/input/Slicing.qs @@ -0,0 +1,20 @@ +namespace Test { + + import Std.Canon.*; + import Std.Intrinsic.*; + import Std.Measurement.*; + + // Verifies loop over subset of index range, constant folding division of array length, array slicing, qubit reindexing, reverse iteration. + // Expected output: [1, 1, 1, 1, 1]. + @EntryPoint() + operation Main() : Result[] { + use qs = Qubit[10]; + for i in (Length(qs) - 1).. -1..(Length(qs) / 2) { + X(qs[i]); + } + let results = MeasureEachZ(qs[Length(qs) / 2...]); + ResetAll(qs); + return results; + } + +} diff --git a/source/pip/tests-integration/resources/adaptive_rifla/input/SuperdenseCoding.qs b/source/pip/tests-integration/resources/adaptive_rifla/input/SuperdenseCoding.qs new file mode 100644 index 0000000000..9e47b4921d --- /dev/null +++ b/source/pip/tests-integration/resources/adaptive_rifla/input/SuperdenseCoding.qs @@ -0,0 +1,69 @@ +/// # Sample +/// Superdense Coding +/// +/// # Description +/// Superdense coding is a quantum communication protocol to communicate a +/// number of classical bits of information by only transmitting a smaller +/// number of qubits. +/// +/// This Q# program implements superdense coding to send two classical bits of +/// information. +namespace Test { + @EntryPoint() + operation Main() : ((Bool, Bool), (Bool, Bool)) { + use (aliceQubit, bobQubit) = (Qubit(), Qubit()); + + // The protocol starts with the preparation of an entangled state, which + // is later shared between Alice and Bob. + CreateEntangledPair(aliceQubit, bobQubit); + + // Alice encodes 2 random bits in her qubit of the entangled pair. + let sourceBit1 = DrawRandomBit(); + let sourceBit2 = DrawRandomBit(); + SuperdenseEncode(sourceBit1, sourceBit2, aliceQubit); + + // "Send" Alice's qubit to Bob and let Bob decode two bits. + let (decodedBit1, decodedBit2) = SuperdenseDecode(aliceQubit, bobQubit); + + ResetAll([aliceQubit, bobQubit]); + return ((sourceBit1, sourceBit2), (decodedBit1, decodedBit2)); + } + + // Generates random bit using an auxiliary qubit + operation DrawRandomBit() : Bool { + use q = Qubit(); + H(q); + return MResetZ(q) == One; + } + + // Prepares an entangled state: 1/sqrt(2)(|00〉 + |11〉) + operation CreateEntangledPair(q1 : Qubit, q2 : Qubit) : Unit { + H(q1); + CNOT(q1, q2); + } + + // Encodes two bits of information in one qubit. The qubit is expected to + // be a half of an entangled pair. + operation SuperdenseEncode(bit1 : Bool, bit2 : Bool, qubit : Qubit) : Unit { + if (bit1) { + Z(qubit); + } + if (bit2) { + X(qubit); + } + } + + // Decodes two bits of information from a joint state of two qubits. + operation SuperdenseDecode(qubit1 : Qubit, qubit2 : Qubit) : (Bool, Bool) { + // If bit1 in the encoding procedure was true we applied Z to the first + // qubit which anti-commutes with XX, therefore bit1 can be read out + // from XX measurement. + let bit1 = Measure([PauliX, PauliX], [qubit1, qubit2]) == One; + + // If bit2 in the encoding procedure was true we applied X to the first + // qubit which anti-commutes with ZZ, therefore bit2 can be read out + // from ZZ measurement. + let bit2 = Measure([PauliZ, PauliZ], [qubit1, qubit2]) == One; + return (bit1, bit2); + } +} diff --git a/source/pip/tests-integration/resources/adaptive_rifla/input/SwitchHandling.qs b/source/pip/tests-integration/resources/adaptive_rifla/input/SwitchHandling.qs new file mode 100644 index 0000000000..135dd93dfe --- /dev/null +++ b/source/pip/tests-integration/resources/adaptive_rifla/input/SwitchHandling.qs @@ -0,0 +1,37 @@ +namespace Test { + + import Std.Intrinsic.*; + import Std.Measurement.*; + import Std.Math.*; + + // Demonstrates using a computed integer to do a branch that gets turned into a switch instruction + // (should get transformed back into nested branches). + // Expected output: 1 + @EntryPoint() + operation Main() : Result { + use qs = Qubit[2]; + for q in qs { + X(q); + } + mutable rand = 0; + for r in MeasureEachZ(qs) { + set rand <<<= 1; + if r == One { + set rand += 1; + } + } + ResetAll(qs); + + use q = Qubit(); + if rand == 0 { + R(PauliI, PI(), q); + } elif rand == 1 { + R(PauliY, PI(), q); + } elif rand == 2 { + R(PauliZ, PI(), q); + } else { + R(PauliX, PI(), q); + } + return MResetZ(q); + } +} diff --git a/source/pip/tests-integration/resources/adaptive_rifla/input/ThreeQubitRepetitionCode.qs b/source/pip/tests-integration/resources/adaptive_rifla/input/ThreeQubitRepetitionCode.qs new file mode 100644 index 0000000000..08088bc1af --- /dev/null +++ b/source/pip/tests-integration/resources/adaptive_rifla/input/ThreeQubitRepetitionCode.qs @@ -0,0 +1,112 @@ +namespace Test { + import Std.Math.*; + import Std.Diagnostics.*; + + @EntryPoint() + operation Main() : (Bool, Int) { + // A qubit register that will be used for encoding. + use encodedRegister = Qubit[3]; + + // Initialize the first qubit in the register to a |-〉 state. + H(encodedRegister[0]); + Z(encodedRegister[0]); + + // Apply several unitary operations to the encoded qubits + // performing bit flip detection and correction between each application. + mutable bitFlipCount = 0; + within { + // The 3 qubit register is used as a repetition code. + Encode(encodedRegister); + } apply { + let iterations = 5; + for _ in 1..iterations { + // Apply a unitary operatyion to the encoded register that should + // effectively perform an identity operation but may be noisy + // on the quantum hardware and introduce errors. + ApplyRotationalIdentity(encodedRegister); + + // Measure the bit flip error syndrome, revert the bit flip if needed, + // and increase the count if a bit flip occurred. + let (parity01, parity12) = MeasureBitFlipSyndrome(encodedRegister); + let bitFlipReverted = RevertBitFlip(encodedRegister, parity01, parity12); + if (bitFlipReverted) { + set bitFlipCount += 1; + } + } + } + + // Transform the qubit to the |1〉 state and measure it in the computational basis. + H(encodedRegister[0]); + let result = MResetZ(encodedRegister[0]) == One; + // Note that the qubit at index 0 is already reset by MResetZ operation. + // There's no need to reset it again. Also, MResetZ operation is + // preferable to the measurement, which is followed by Reset as MResetZ + // may be directly implemented by the hardware. + ResetAll(encodedRegister[1...]); + + // The output of the program is a boolean-integer tuple where the boolean + // represents whether the qubit measurement result was the expected one + // and the integer represents the number of times bit flips occurred + // throughout the program. + return (result, bitFlipCount); + } + + /// # Summary + /// Apply four 𝜋/2 rotations about the x-axis to all qubits in the `register`. + /// + /// # Description + /// This operation implements an identity operation using rotations about the x-axis. + /// The Rx operation has a period of 2𝜋. Using it to apply four 𝜋/2 rotations + /// about the x-axis, effectively leaves the qubit register in its original state. + /// However it is likely to be very noisy on a quantum hardware. + operation ApplyRotationalIdentity(register : Qubit[]) : Unit is Adj { + let theta = PI() * 0.5; + for i in 1..4 { + for qubit in register { + Rx(theta, qubit); + } + } + } + + /// # Summary + /// Reverts bit flips in the `register` based on `parity01` and `parity12`. + operation RevertBitFlip(register : Qubit[], parity01 : Result, parity12 : Result) : Bool { + mutable result = true; + if parity01 == One { + if parity12 == One { + X(register[1]); + } else { + X(register[0]); + } + } else { + if parity12 == One { + X(register[2]); + } else { + set result = false; + } + } + return result; + } + + operation Encode(register : Qubit[]) : Unit is Adj { + CNOT(register[0], register[1]); + CNOT(register[0], register[2]); + } + + /// # Summary + /// Measures the bit flip syndrome by checking the parities between + /// qubits 0 and 1, and between qubits 1 and 2. + operation MeasureBitFlipSyndrome(encodedRegister : Qubit[]) : (Result, Result) { + Fact(Length(encodedRegister) == 3, "Encoded register must be of length 3."); + use auxiliaryRegister = Qubit[2]; + + CNOT(encodedRegister[0], auxiliaryRegister[0]); + CNOT(encodedRegister[1], auxiliaryRegister[0]); + CNOT(encodedRegister[1], auxiliaryRegister[1]); + CNOT(encodedRegister[2], auxiliaryRegister[1]); + + let parity01 = MResetZ(auxiliaryRegister[0]); + let parity12 = MResetZ(auxiliaryRegister[1]); + return (parity01, parity12); + } +} diff --git a/source/pip/tests-integration/resources/adaptive_rifla/input/WithinApply.qs b/source/pip/tests-integration/resources/adaptive_rifla/input/WithinApply.qs new file mode 100644 index 0000000000..fa5a41c0bd --- /dev/null +++ b/source/pip/tests-integration/resources/adaptive_rifla/input/WithinApply.qs @@ -0,0 +1,25 @@ +namespace Test { + + import Std.Intrinsic.*; + import Std.Measurement.*; + + // Verifies use of Q# within apply construct. + // Expected simulation output: [0, 0, 1] + @EntryPoint() + operation Main() : Result[] { + use target = Qubit(); + use controls = Qubit[2]; + within { + for q in controls { + X(q); + } + } apply { + Controlled X(controls, target); + } + + let results = MeasureEachZ(controls + [target]); + ResetAll(controls); + Reset(target); + return results; + } +} diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/ArithmeticOps.ll b/source/pip/tests-integration/resources/adaptive_rifla/output/ArithmeticOps.ll new file mode 100644 index 0000000000..4d38c5fd05 --- /dev/null +++ b/source/pip/tests-integration/resources/adaptive_rifla/output/ArithmeticOps.ll @@ -0,0 +1,150 @@ +@0 = internal constant [4 x i8] c"0_t\00" +@1 = internal constant [6 x i8] c"1_t0i\00" +@2 = internal constant [6 x i8] c"2_t1i\00" +@3 = internal constant [6 x i8] c"3_t2i\00" +@4 = internal constant [6 x i8] c"4_t3i\00" + +define i64 @ENTRYPOINT__main() #0 { +block_0: + %var_0 = alloca i64 + %var_1 = alloca i64 + %var_2 = alloca i64 + %var_3 = alloca i64 + call void @__quantum__rt__initialize(ptr null) + store i64 0, ptr %var_0 + store i64 0, ptr %var_1 + store i64 10, ptr %var_2 + store i64 1, ptr %var_3 + call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__x__body(ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__x__body(ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__x__body(ptr inttoptr (i64 3 to ptr)) + call void @__quantum__qis__x__body(ptr inttoptr (i64 4 to ptr)) + call void @__quantum__qis__m__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__m__body(ptr inttoptr (i64 1 to ptr), ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__m__body(ptr inttoptr (i64 2 to ptr), ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__m__body(ptr inttoptr (i64 3 to ptr), ptr inttoptr (i64 3 to ptr)) + call void @__quantum__qis__m__body(ptr inttoptr (i64 4 to ptr), ptr inttoptr (i64 4 to ptr)) + %var_8 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 0 to ptr)) + br i1 %var_8, label %block_1, label %block_2 +block_1: + store i64 1, ptr %var_0 + store i64 5, ptr %var_1 + store i64 8, ptr %var_2 + store i64 3, ptr %var_3 + br label %block_2 +block_2: + %var_10 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 1 to ptr)) + br i1 %var_10, label %block_3, label %block_4 +block_3: + %var_66 = load i64, ptr %var_0 + %var_12 = add i64 %var_66, 1 + store i64 %var_12, ptr %var_0 + %var_68 = load i64, ptr %var_1 + %var_13 = add i64 %var_68, 5 + store i64 %var_13, ptr %var_1 + %var_70 = load i64, ptr %var_2 + %var_14 = sub i64 %var_70, 2 + store i64 %var_14, ptr %var_2 + %var_72 = load i64, ptr %var_3 + %var_15 = mul i64 %var_72, 3 + store i64 %var_15, ptr %var_3 + br label %block_4 +block_4: + %var_16 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 2 to ptr)) + br i1 %var_16, label %block_5, label %block_6 +block_5: + %var_58 = load i64, ptr %var_0 + %var_18 = add i64 %var_58, 1 + store i64 %var_18, ptr %var_0 + %var_60 = load i64, ptr %var_1 + %var_19 = add i64 %var_60, 5 + store i64 %var_19, ptr %var_1 + %var_62 = load i64, ptr %var_2 + %var_20 = sub i64 %var_62, 2 + store i64 %var_20, ptr %var_2 + %var_64 = load i64, ptr %var_3 + %var_21 = mul i64 %var_64, 3 + store i64 %var_21, ptr %var_3 + br label %block_6 +block_6: + %var_22 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 3 to ptr)) + br i1 %var_22, label %block_7, label %block_8 +block_7: + %var_50 = load i64, ptr %var_0 + %var_24 = add i64 %var_50, 1 + store i64 %var_24, ptr %var_0 + %var_52 = load i64, ptr %var_1 + %var_25 = add i64 %var_52, 5 + store i64 %var_25, ptr %var_1 + %var_54 = load i64, ptr %var_2 + %var_26 = sub i64 %var_54, 2 + store i64 %var_26, ptr %var_2 + %var_56 = load i64, ptr %var_3 + %var_27 = mul i64 %var_56, 3 + store i64 %var_27, ptr %var_3 + br label %block_8 +block_8: + %var_28 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 4 to ptr)) + br i1 %var_28, label %block_9, label %block_10 +block_9: + %var_42 = load i64, ptr %var_0 + %var_30 = add i64 %var_42, 1 + store i64 %var_30, ptr %var_0 + %var_44 = load i64, ptr %var_1 + %var_31 = add i64 %var_44, 5 + store i64 %var_31, ptr %var_1 + %var_46 = load i64, ptr %var_2 + %var_32 = sub i64 %var_46, 2 + store i64 %var_32, ptr %var_2 + %var_48 = load i64, ptr %var_3 + %var_33 = mul i64 %var_48, 3 + store i64 %var_33, ptr %var_3 + br label %block_10 +block_10: + call void @__quantum__qis__reset__body(ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__reset__body(ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__reset__body(ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__reset__body(ptr inttoptr (i64 3 to ptr)) + call void @__quantum__qis__reset__body(ptr inttoptr (i64 4 to ptr)) + call void @__quantum__rt__tuple_record_output(i64 4, ptr @0) + %var_38 = load i64, ptr %var_0 + call void @__quantum__rt__int_record_output(i64 %var_38, ptr @1) + %var_39 = load i64, ptr %var_1 + call void @__quantum__rt__int_record_output(i64 %var_39, ptr @2) + %var_40 = load i64, ptr %var_2 + call void @__quantum__rt__int_record_output(i64 %var_40, ptr @3) + %var_41 = load i64, ptr %var_3 + call void @__quantum__rt__int_record_output(i64 %var_41, ptr @4) + ret i64 0 +} + +declare void @__quantum__rt__initialize(ptr) + +declare void @__quantum__qis__x__body(ptr) + +declare void @__quantum__qis__m__body(ptr, ptr) #1 + +declare i1 @__quantum__rt__read_result(ptr) + +declare void @__quantum__qis__reset__body(ptr) #1 + +declare void @__quantum__rt__tuple_record_output(i64, ptr) + +declare void @__quantum__rt__int_record_output(i64, ptr) + +attributes #0 = { "entry_point" "output_labeling_schema" "qir_profiles"="adaptive_profile" "required_num_qubits"="5" "required_num_results"="5" } +attributes #1 = { "irreversible" } + +; module flags + +!llvm.module.flags = !{!0, !1, !2, !3, !4, !5, !6, !7} + +!0 = !{i32 1, !"qir_major_version", i32 2} +!1 = !{i32 7, !"qir_minor_version", i32 1} +!2 = !{i32 1, !"dynamic_qubit_management", i1 false} +!3 = !{i32 1, !"dynamic_result_management", i1 false} +!4 = !{i32 5, !"int_computations", !{!"i64"}} +!5 = !{i32 5, !"float_computations", !{!"double"}} +!6 = !{i32 7, !"backwards_branching", i2 3} +!7 = !{i32 1, !"arrays", i1 true} diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/ArithmeticOps.out b/source/pip/tests-integration/resources/adaptive_rifla/output/ArithmeticOps.out new file mode 100644 index 0000000000..9ceaf2bfde --- /dev/null +++ b/source/pip/tests-integration/resources/adaptive_rifla/output/ArithmeticOps.out @@ -0,0 +1,12 @@ +START +METADATA entry_point +METADATA output_labeling_schema +METADATA qir_profiles adaptive_profile +METADATA required_num_qubits 5 +METADATA required_num_results 5 +OUTPUT TUPLE 4 0_t +OUTPUT INT 5 1_t0i +OUTPUT INT 25 2_t1i +OUTPUT INT 0 3_t2i +OUTPUT INT 243 4_t3i +END 0 diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/BernsteinVaziraniNISQ.ll b/source/pip/tests-integration/resources/adaptive_rifla/output/BernsteinVaziraniNISQ.ll new file mode 100644 index 0000000000..6f42df0635 --- /dev/null +++ b/source/pip/tests-integration/resources/adaptive_rifla/output/BernsteinVaziraniNISQ.ll @@ -0,0 +1,71 @@ +@0 = internal constant [4 x i8] c"0_a\00" +@1 = internal constant [6 x i8] c"1_a0r\00" +@2 = internal constant [6 x i8] c"2_a1r\00" +@3 = internal constant [6 x i8] c"3_a2r\00" +@4 = internal constant [6 x i8] c"4_a3r\00" +@5 = internal constant [6 x i8] c"5_a4r\00" + +define i64 @ENTRYPOINT__main() #0 { +block_0: + call void @__quantum__rt__initialize(ptr null) + call void @__quantum__qis__x__body(ptr inttoptr (i64 5 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 3 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 4 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 5 to ptr)) + call void @__quantum__qis__cx__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 5 to ptr)) + call void @__quantum__qis__cx__body(ptr inttoptr (i64 2 to ptr), ptr inttoptr (i64 5 to ptr)) + call void @__quantum__qis__cx__body(ptr inttoptr (i64 4 to ptr), ptr inttoptr (i64 5 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 4 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 3 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__mresetz__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__mresetz__body(ptr inttoptr (i64 1 to ptr), ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__mresetz__body(ptr inttoptr (i64 2 to ptr), ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__mresetz__body(ptr inttoptr (i64 3 to ptr), ptr inttoptr (i64 3 to ptr)) + call void @__quantum__qis__mresetz__body(ptr inttoptr (i64 4 to ptr), ptr inttoptr (i64 4 to ptr)) + call void @__quantum__qis__reset__body(ptr inttoptr (i64 5 to ptr)) + call void @__quantum__rt__array_record_output(i64 5, ptr @0) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 0 to ptr), ptr @1) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 1 to ptr), ptr @2) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 2 to ptr), ptr @3) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 3 to ptr), ptr @4) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 4 to ptr), ptr @5) + ret i64 0 +} + +declare void @__quantum__rt__initialize(ptr) + +declare void @__quantum__qis__x__body(ptr) + +declare void @__quantum__qis__h__body(ptr) + +declare void @__quantum__qis__cx__body(ptr, ptr) + +declare void @__quantum__qis__mresetz__body(ptr, ptr) #1 + +declare void @__quantum__qis__reset__body(ptr) #1 + +declare void @__quantum__rt__array_record_output(i64, ptr) + +declare void @__quantum__rt__result_record_output(ptr, ptr) + +attributes #0 = { "entry_point" "output_labeling_schema" "qir_profiles"="adaptive_profile" "required_num_qubits"="6" "required_num_results"="5" } +attributes #1 = { "irreversible" } + +; module flags + +!llvm.module.flags = !{!0, !1, !2, !3, !4, !5, !6, !7} + +!0 = !{i32 1, !"qir_major_version", i32 2} +!1 = !{i32 7, !"qir_minor_version", i32 1} +!2 = !{i32 1, !"dynamic_qubit_management", i1 false} +!3 = !{i32 1, !"dynamic_result_management", i1 false} +!4 = !{i32 5, !"int_computations", !{!"i64"}} +!5 = !{i32 5, !"float_computations", !{!"double"}} +!6 = !{i32 7, !"backwards_branching", i2 3} +!7 = !{i32 1, !"arrays", i1 true} diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/BernsteinVaziraniNISQ.out b/source/pip/tests-integration/resources/adaptive_rifla/output/BernsteinVaziraniNISQ.out new file mode 100644 index 0000000000..fc41056ddf --- /dev/null +++ b/source/pip/tests-integration/resources/adaptive_rifla/output/BernsteinVaziraniNISQ.out @@ -0,0 +1,13 @@ +START +METADATA entry_point +METADATA output_labeling_schema +METADATA qir_profiles adaptive_profile +METADATA required_num_qubits 6 +METADATA required_num_results 5 +OUTPUT ARRAY 5 0_a +OUTPUT RESULT 1 1_a0r +OUTPUT RESULT 0 2_a1r +OUTPUT RESULT 1 3_a2r +OUTPUT RESULT 0 4_a3r +OUTPUT RESULT 1 5_a4r +END 0 diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/ConstantFolding.ll b/source/pip/tests-integration/resources/adaptive_rifla/output/ConstantFolding.ll new file mode 100644 index 0000000000..b1a1bd3938 --- /dev/null +++ b/source/pip/tests-integration/resources/adaptive_rifla/output/ConstantFolding.ll @@ -0,0 +1,76 @@ +@0 = internal constant [4 x i8] c"0_a\00" +@1 = internal constant [6 x i8] c"1_a0r\00" +@2 = internal constant [6 x i8] c"2_a1r\00" +@3 = internal constant [6 x i8] c"3_a2r\00" +@4 = internal constant [6 x i8] c"4_a3r\00" + +define i64 @ENTRYPOINT__main() #0 { +block_0: + call void @__quantum__rt__initialize(ptr null) + call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__cx__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__cx__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__cx__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__cx__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__cx__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__cx__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__cx__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__cx__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__cx__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__cx__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__cx__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__cx__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__cx__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__cx__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__cx__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__cx__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__cx__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__cx__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__rx__body(double 3.141592653589793, ptr inttoptr (i64 3 to ptr)) + call void @__quantum__qis__m__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__m__body(ptr inttoptr (i64 1 to ptr), ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__m__body(ptr inttoptr (i64 2 to ptr), ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__m__body(ptr inttoptr (i64 3 to ptr), ptr inttoptr (i64 3 to ptr)) + call void @__quantum__qis__reset__body(ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__reset__body(ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__reset__body(ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__reset__body(ptr inttoptr (i64 3 to ptr)) + call void @__quantum__rt__array_record_output(i64 4, ptr @0) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 0 to ptr), ptr @1) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 1 to ptr), ptr @2) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 2 to ptr), ptr @3) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 3 to ptr), ptr @4) + ret i64 0 +} + +declare void @__quantum__rt__initialize(ptr) + +declare void @__quantum__qis__x__body(ptr) + +declare void @__quantum__qis__cx__body(ptr, ptr) + +declare void @__quantum__qis__rx__body(double, ptr) + +declare void @__quantum__qis__m__body(ptr, ptr) #1 + +declare void @__quantum__qis__reset__body(ptr) #1 + +declare void @__quantum__rt__array_record_output(i64, ptr) + +declare void @__quantum__rt__result_record_output(ptr, ptr) + +attributes #0 = { "entry_point" "output_labeling_schema" "qir_profiles"="adaptive_profile" "required_num_qubits"="4" "required_num_results"="4" } +attributes #1 = { "irreversible" } + +; module flags + +!llvm.module.flags = !{!0, !1, !2, !3, !4, !5, !6, !7} + +!0 = !{i32 1, !"qir_major_version", i32 2} +!1 = !{i32 7, !"qir_minor_version", i32 1} +!2 = !{i32 1, !"dynamic_qubit_management", i1 false} +!3 = !{i32 1, !"dynamic_result_management", i1 false} +!4 = !{i32 5, !"int_computations", !{!"i64"}} +!5 = !{i32 5, !"float_computations", !{!"double"}} +!6 = !{i32 7, !"backwards_branching", i2 3} +!7 = !{i32 1, !"arrays", i1 true} diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/ConstantFolding.out b/source/pip/tests-integration/resources/adaptive_rifla/output/ConstantFolding.out new file mode 100644 index 0000000000..1126ca83c5 --- /dev/null +++ b/source/pip/tests-integration/resources/adaptive_rifla/output/ConstantFolding.out @@ -0,0 +1,12 @@ +START +METADATA entry_point +METADATA output_labeling_schema +METADATA qir_profiles adaptive_profile +METADATA required_num_qubits 4 +METADATA required_num_results 4 +OUTPUT ARRAY 4 0_a +OUTPUT RESULT 1 1_a0r +OUTPUT RESULT 1 2_a1r +OUTPUT RESULT 1 3_a2r +OUTPUT RESULT 1 4_a3r +END 0 diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/CopyAndUpdateExpressions.ll b/source/pip/tests-integration/resources/adaptive_rifla/output/CopyAndUpdateExpressions.ll new file mode 100644 index 0000000000..6e360a9f87 --- /dev/null +++ b/source/pip/tests-integration/resources/adaptive_rifla/output/CopyAndUpdateExpressions.ll @@ -0,0 +1,65 @@ +@0 = internal constant [4 x i8] c"0_t\00" +@1 = internal constant [6 x i8] c"1_t0a\00" +@2 = internal constant [8 x i8] c"2_t0a0r\00" +@3 = internal constant [6 x i8] c"3_t1a\00" +@4 = internal constant [8 x i8] c"4_t1a0r\00" +@5 = internal constant [6 x i8] c"5_t2a\00" +@6 = internal constant [8 x i8] c"6_t2a0r\00" +@7 = internal constant [8 x i8] c"7_t2a1r\00" +@8 = internal constant [8 x i8] c"8_t2a2r\00" + +define i64 @ENTRYPOINT__main() #0 { +block_0: + call void @__quantum__rt__initialize(ptr null) + call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__mresetz__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__mresetz__body(ptr inttoptr (i64 1 to ptr), ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__x__body(ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__m__body(ptr inttoptr (i64 2 to ptr), ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__m__body(ptr inttoptr (i64 3 to ptr), ptr inttoptr (i64 3 to ptr)) + call void @__quantum__qis__m__body(ptr inttoptr (i64 4 to ptr), ptr inttoptr (i64 4 to ptr)) + call void @__quantum__qis__x__body(ptr inttoptr (i64 3 to ptr)) + call void @__quantum__qis__x__body(ptr inttoptr (i64 4 to ptr)) + call void @__quantum__qis__m__body(ptr inttoptr (i64 3 to ptr), ptr inttoptr (i64 5 to ptr)) + call void @__quantum__qis__m__body(ptr inttoptr (i64 4 to ptr), ptr inttoptr (i64 6 to ptr)) + call void @__quantum__rt__tuple_record_output(i64 3, ptr @0) + call void @__quantum__rt__array_record_output(i64 1, ptr @1) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 0 to ptr), ptr @2) + call void @__quantum__rt__array_record_output(i64 1, ptr @3) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 1 to ptr), ptr @4) + call void @__quantum__rt__array_record_output(i64 3, ptr @5) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 2 to ptr), ptr @6) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 5 to ptr), ptr @7) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 6 to ptr), ptr @8) + ret i64 0 +} + +declare void @__quantum__rt__initialize(ptr) + +declare void @__quantum__qis__x__body(ptr) + +declare void @__quantum__qis__mresetz__body(ptr, ptr) #1 + +declare void @__quantum__qis__m__body(ptr, ptr) #1 + +declare void @__quantum__rt__tuple_record_output(i64, ptr) + +declare void @__quantum__rt__array_record_output(i64, ptr) + +declare void @__quantum__rt__result_record_output(ptr, ptr) + +attributes #0 = { "entry_point" "output_labeling_schema" "qir_profiles"="adaptive_profile" "required_num_qubits"="5" "required_num_results"="7" } +attributes #1 = { "irreversible" } + +; module flags + +!llvm.module.flags = !{!0, !1, !2, !3, !4, !5, !6, !7} + +!0 = !{i32 1, !"qir_major_version", i32 2} +!1 = !{i32 7, !"qir_minor_version", i32 1} +!2 = !{i32 1, !"dynamic_qubit_management", i1 false} +!3 = !{i32 1, !"dynamic_result_management", i1 false} +!4 = !{i32 5, !"int_computations", !{!"i64"}} +!5 = !{i32 5, !"float_computations", !{!"double"}} +!6 = !{i32 7, !"backwards_branching", i2 3} +!7 = !{i32 1, !"arrays", i1 true} diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/CopyAndUpdateExpressions.out b/source/pip/tests-integration/resources/adaptive_rifla/output/CopyAndUpdateExpressions.out new file mode 100644 index 0000000000..425f537742 --- /dev/null +++ b/source/pip/tests-integration/resources/adaptive_rifla/output/CopyAndUpdateExpressions.out @@ -0,0 +1,16 @@ +START +METADATA entry_point +METADATA output_labeling_schema +METADATA qir_profiles adaptive_profile +METADATA required_num_qubits 5 +METADATA required_num_results 7 +OUTPUT TUPLE 3 0_t +OUTPUT ARRAY 1 1_t0a +OUTPUT RESULT 1 2_t0a0r +OUTPUT ARRAY 1 3_t1a +OUTPUT RESULT 0 4_t1a0r +OUTPUT ARRAY 3 5_t2a +OUTPUT RESULT 1 6_t2a0r +OUTPUT RESULT 1 7_t2a1r +OUTPUT RESULT 1 8_t2a2r +END 0 diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/DeutschJozsaNISQ.ll b/source/pip/tests-integration/resources/adaptive_rifla/output/DeutschJozsaNISQ.ll new file mode 100644 index 0000000000..9e66d28f7e --- /dev/null +++ b/source/pip/tests-integration/resources/adaptive_rifla/output/DeutschJozsaNISQ.ll @@ -0,0 +1,102 @@ +@0 = internal constant [4 x i8] c"0_t\00" +@1 = internal constant [6 x i8] c"1_t0a\00" +@2 = internal constant [8 x i8] c"2_t0a0r\00" +@3 = internal constant [8 x i8] c"3_t0a1r\00" +@4 = internal constant [8 x i8] c"4_t0a2r\00" +@5 = internal constant [8 x i8] c"5_t0a3r\00" +@6 = internal constant [6 x i8] c"6_t1a\00" +@7 = internal constant [8 x i8] c"7_t1a0r\00" +@8 = internal constant [8 x i8] c"8_t1a1r\00" +@9 = internal constant [8 x i8] c"9_t1a2r\00" +@10 = internal constant [9 x i8] c"10_t1a3r\00" + +define i64 @ENTRYPOINT__main() #0 { +block_0: + call void @__quantum__rt__initialize(ptr null) + call void @__quantum__qis__x__body(ptr inttoptr (i64 4 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 4 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 3 to ptr)) + call void @__quantum__qis__cx__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 4 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 3 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__m__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__m__body(ptr inttoptr (i64 1 to ptr), ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__m__body(ptr inttoptr (i64 2 to ptr), ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__m__body(ptr inttoptr (i64 3 to ptr), ptr inttoptr (i64 3 to ptr)) + call void @__quantum__qis__reset__body(ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__reset__body(ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__reset__body(ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__reset__body(ptr inttoptr (i64 3 to ptr)) + call void @__quantum__qis__reset__body(ptr inttoptr (i64 4 to ptr)) + call void @__quantum__qis__x__body(ptr inttoptr (i64 4 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 4 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 3 to ptr)) + call void @__quantum__qis__x__body(ptr inttoptr (i64 4 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 3 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__m__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 4 to ptr)) + call void @__quantum__qis__m__body(ptr inttoptr (i64 1 to ptr), ptr inttoptr (i64 5 to ptr)) + call void @__quantum__qis__m__body(ptr inttoptr (i64 2 to ptr), ptr inttoptr (i64 6 to ptr)) + call void @__quantum__qis__m__body(ptr inttoptr (i64 3 to ptr), ptr inttoptr (i64 7 to ptr)) + call void @__quantum__qis__reset__body(ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__reset__body(ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__reset__body(ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__reset__body(ptr inttoptr (i64 3 to ptr)) + call void @__quantum__qis__reset__body(ptr inttoptr (i64 4 to ptr)) + call void @__quantum__rt__tuple_record_output(i64 2, ptr @0) + call void @__quantum__rt__array_record_output(i64 4, ptr @1) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 0 to ptr), ptr @2) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 1 to ptr), ptr @3) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 2 to ptr), ptr @4) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 3 to ptr), ptr @5) + call void @__quantum__rt__array_record_output(i64 4, ptr @6) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 4 to ptr), ptr @7) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 5 to ptr), ptr @8) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 6 to ptr), ptr @9) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 7 to ptr), ptr @10) + ret i64 0 +} + +declare void @__quantum__rt__initialize(ptr) + +declare void @__quantum__qis__x__body(ptr) + +declare void @__quantum__qis__h__body(ptr) + +declare void @__quantum__qis__cx__body(ptr, ptr) + +declare void @__quantum__qis__m__body(ptr, ptr) #1 + +declare void @__quantum__qis__reset__body(ptr) #1 + +declare void @__quantum__rt__tuple_record_output(i64, ptr) + +declare void @__quantum__rt__array_record_output(i64, ptr) + +declare void @__quantum__rt__result_record_output(ptr, ptr) + +attributes #0 = { "entry_point" "output_labeling_schema" "qir_profiles"="adaptive_profile" "required_num_qubits"="5" "required_num_results"="8" } +attributes #1 = { "irreversible" } + +; module flags + +!llvm.module.flags = !{!0, !1, !2, !3, !4, !5, !6, !7} + +!0 = !{i32 1, !"qir_major_version", i32 2} +!1 = !{i32 7, !"qir_minor_version", i32 1} +!2 = !{i32 1, !"dynamic_qubit_management", i1 false} +!3 = !{i32 1, !"dynamic_result_management", i1 false} +!4 = !{i32 5, !"int_computations", !{!"i64"}} +!5 = !{i32 5, !"float_computations", !{!"double"}} +!6 = !{i32 7, !"backwards_branching", i2 3} +!7 = !{i32 1, !"arrays", i1 true} diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/DeutschJozsaNISQ.out b/source/pip/tests-integration/resources/adaptive_rifla/output/DeutschJozsaNISQ.out new file mode 100644 index 0000000000..47d4685e7f --- /dev/null +++ b/source/pip/tests-integration/resources/adaptive_rifla/output/DeutschJozsaNISQ.out @@ -0,0 +1,18 @@ +START +METADATA entry_point +METADATA output_labeling_schema +METADATA qir_profiles adaptive_profile +METADATA required_num_qubits 5 +METADATA required_num_results 8 +OUTPUT TUPLE 2 0_t +OUTPUT ARRAY 4 1_t0a +OUTPUT RESULT 1 2_t0a0r +OUTPUT RESULT 0 3_t0a1r +OUTPUT RESULT 0 4_t0a2r +OUTPUT RESULT 0 5_t0a3r +OUTPUT ARRAY 4 6_t1a +OUTPUT RESULT 0 7_t1a0r +OUTPUT RESULT 0 8_t1a1r +OUTPUT RESULT 0 9_t1a2r +OUTPUT RESULT 0 10_t1a3r +END 0 diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/Doubles.ll b/source/pip/tests-integration/resources/adaptive_rifla/output/Doubles.ll new file mode 100644 index 0000000000..8cdefdc1e9 --- /dev/null +++ b/source/pip/tests-integration/resources/adaptive_rifla/output/Doubles.ll @@ -0,0 +1,253 @@ +@0 = internal constant [4 x i8] c"0_t\00" +@1 = internal constant [6 x i8] c"1_t0d\00" +@2 = internal constant [6 x i8] c"2_t1b\00" +@3 = internal constant [6 x i8] c"3_t2b\00" +@4 = internal constant [6 x i8] c"4_t3b\00" +@5 = internal constant [6 x i8] c"5_t4b\00" +@6 = internal constant [6 x i8] c"6_t5b\00" +@7 = internal constant [6 x i8] c"7_t6i\00" +@8 = internal constant [6 x i8] c"8_t7d\00" + +define i64 @ENTRYPOINT__main() #0 { +block_0: + %var_0 = alloca double + call void @__quantum__rt__initialize(ptr null) + store double 0.0, ptr %var_0 + call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__m__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 0 to ptr)) + %var_2 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 0 to ptr)) + br i1 %var_2, label %block_1, label %block_2 +block_1: + call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) + store double 1.0, ptr %var_0 + store double 1.0, ptr %var_0 + store double 0.0, ptr %var_0 + store double 0.0, ptr %var_0 + store double 1.0, ptr %var_0 + br label %block_2 +block_2: + call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__m__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 1 to ptr)) + %var_4 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 1 to ptr)) + br i1 %var_4, label %block_3, label %block_4 +block_3: + call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) + %var_126 = load double, ptr %var_0 + %var_6 = fadd double %var_126, 1.0 + store double %var_6, ptr %var_0 + %var_7 = fmul double %var_126, 1.0 + store double %var_7, ptr %var_0 + %var_8 = fsub double %var_126, 1.0 + store double %var_8, ptr %var_0 + %var_9 = fdiv double %var_126, 1.0 + store double %var_9, ptr %var_0 + %var_10 = fadd double %var_126, 1.0 + store double %var_10, ptr %var_0 + br label %block_4 +block_4: + call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__m__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 2 to ptr)) + %var_11 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 2 to ptr)) + br i1 %var_11, label %block_5, label %block_6 +block_5: + call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) + %var_120 = load double, ptr %var_0 + %var_13 = fadd double %var_120, 1.0 + store double %var_13, ptr %var_0 + %var_14 = fmul double %var_120, 1.0 + store double %var_14, ptr %var_0 + %var_15 = fsub double %var_120, 1.0 + store double %var_15, ptr %var_0 + %var_16 = fdiv double %var_120, 1.0 + store double %var_16, ptr %var_0 + %var_17 = fadd double %var_120, 1.0 + store double %var_17, ptr %var_0 + br label %block_6 +block_6: + call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__m__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 3 to ptr)) + %var_18 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 3 to ptr)) + br i1 %var_18, label %block_7, label %block_8 +block_7: + call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) + %var_114 = load double, ptr %var_0 + %var_20 = fadd double %var_114, 1.0 + store double %var_20, ptr %var_0 + %var_21 = fmul double %var_114, 1.0 + store double %var_21, ptr %var_0 + %var_22 = fsub double %var_114, 1.0 + store double %var_22, ptr %var_0 + %var_23 = fdiv double %var_114, 1.0 + store double %var_23, ptr %var_0 + %var_24 = fadd double %var_114, 1.0 + store double %var_24, ptr %var_0 + br label %block_8 +block_8: + call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__m__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 4 to ptr)) + %var_25 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 4 to ptr)) + br i1 %var_25, label %block_9, label %block_10 +block_9: + call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) + %var_108 = load double, ptr %var_0 + %var_27 = fadd double %var_108, 1.0 + store double %var_27, ptr %var_0 + %var_28 = fmul double %var_108, 1.0 + store double %var_28, ptr %var_0 + %var_29 = fsub double %var_108, 1.0 + store double %var_29, ptr %var_0 + %var_30 = fdiv double %var_108, 1.0 + store double %var_30, ptr %var_0 + %var_31 = fadd double %var_108, 1.0 + store double %var_31, ptr %var_0 + br label %block_10 +block_10: + call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__m__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 5 to ptr)) + %var_32 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 5 to ptr)) + br i1 %var_32, label %block_11, label %block_12 +block_11: + call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) + %var_102 = load double, ptr %var_0 + %var_34 = fadd double %var_102, 1.0 + store double %var_34, ptr %var_0 + %var_35 = fmul double %var_102, 1.0 + store double %var_35, ptr %var_0 + %var_36 = fsub double %var_102, 1.0 + store double %var_36, ptr %var_0 + %var_37 = fdiv double %var_102, 1.0 + store double %var_37, ptr %var_0 + %var_38 = fadd double %var_102, 1.0 + store double %var_38, ptr %var_0 + br label %block_12 +block_12: + call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__m__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 6 to ptr)) + %var_39 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 6 to ptr)) + br i1 %var_39, label %block_13, label %block_14 +block_13: + call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) + %var_96 = load double, ptr %var_0 + %var_41 = fadd double %var_96, 1.0 + store double %var_41, ptr %var_0 + %var_42 = fmul double %var_96, 1.0 + store double %var_42, ptr %var_0 + %var_43 = fsub double %var_96, 1.0 + store double %var_43, ptr %var_0 + %var_44 = fdiv double %var_96, 1.0 + store double %var_44, ptr %var_0 + %var_45 = fadd double %var_96, 1.0 + store double %var_45, ptr %var_0 + br label %block_14 +block_14: + call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__m__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 7 to ptr)) + %var_46 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 7 to ptr)) + br i1 %var_46, label %block_15, label %block_16 +block_15: + call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) + %var_90 = load double, ptr %var_0 + %var_48 = fadd double %var_90, 1.0 + store double %var_48, ptr %var_0 + %var_49 = fmul double %var_90, 1.0 + store double %var_49, ptr %var_0 + %var_50 = fsub double %var_90, 1.0 + store double %var_50, ptr %var_0 + %var_51 = fdiv double %var_90, 1.0 + store double %var_51, ptr %var_0 + %var_52 = fadd double %var_90, 1.0 + store double %var_52, ptr %var_0 + br label %block_16 +block_16: + call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__m__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 8 to ptr)) + %var_53 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 8 to ptr)) + br i1 %var_53, label %block_17, label %block_18 +block_17: + call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) + %var_84 = load double, ptr %var_0 + %var_55 = fadd double %var_84, 1.0 + store double %var_55, ptr %var_0 + %var_56 = fmul double %var_84, 1.0 + store double %var_56, ptr %var_0 + %var_57 = fsub double %var_84, 1.0 + store double %var_57, ptr %var_0 + %var_58 = fdiv double %var_84, 1.0 + store double %var_58, ptr %var_0 + %var_59 = fadd double %var_84, 1.0 + store double %var_59, ptr %var_0 + br label %block_18 +block_18: + call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__m__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 9 to ptr)) + %var_60 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 9 to ptr)) + br i1 %var_60, label %block_19, label %block_20 +block_19: + call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) + %var_78 = load double, ptr %var_0 + %var_62 = fadd double %var_78, 1.0 + store double %var_62, ptr %var_0 + %var_63 = fmul double %var_78, 1.0 + store double %var_63, ptr %var_0 + %var_64 = fsub double %var_78, 1.0 + store double %var_64, ptr %var_0 + %var_65 = fdiv double %var_78, 1.0 + store double %var_65, ptr %var_0 + %var_66 = fadd double %var_78, 1.0 + store double %var_66, ptr %var_0 + br label %block_20 +block_20: + call void @__quantum__qis__reset__body(ptr inttoptr (i64 0 to ptr)) + %var_77 = load double, ptr %var_0 + %var_67 = fptosi double %var_77 to i64 + %var_69 = sitofp i64 %var_67 to double + %var_71 = fcmp ogt double %var_77, 5.0 + %var_72 = fcmp olt double %var_77, 5.0 + %var_73 = fcmp oge double %var_77, 10.0 + %var_74 = fcmp oeq double %var_77, 10.0 + %var_75 = fcmp one double %var_77, 10.0 + call void @__quantum__rt__tuple_record_output(i64 8, ptr @0) + call void @__quantum__rt__double_record_output(double %var_77, ptr @1) + call void @__quantum__rt__bool_record_output(i1 %var_71, ptr @2) + call void @__quantum__rt__bool_record_output(i1 %var_72, ptr @3) + call void @__quantum__rt__bool_record_output(i1 %var_73, ptr @4) + call void @__quantum__rt__bool_record_output(i1 %var_74, ptr @5) + call void @__quantum__rt__bool_record_output(i1 %var_75, ptr @6) + call void @__quantum__rt__int_record_output(i64 %var_67, ptr @7) + call void @__quantum__rt__double_record_output(double %var_69, ptr @8) + ret i64 0 +} + +declare void @__quantum__rt__initialize(ptr) + +declare void @__quantum__qis__x__body(ptr) + +declare void @__quantum__qis__m__body(ptr, ptr) #1 + +declare i1 @__quantum__rt__read_result(ptr) + +declare void @__quantum__qis__reset__body(ptr) #1 + +declare void @__quantum__rt__tuple_record_output(i64, ptr) + +declare void @__quantum__rt__double_record_output(double, ptr) + +declare void @__quantum__rt__bool_record_output(i1, ptr) + +declare void @__quantum__rt__int_record_output(i64, ptr) + +attributes #0 = { "entry_point" "output_labeling_schema" "qir_profiles"="adaptive_profile" "required_num_qubits"="1" "required_num_results"="10" } +attributes #1 = { "irreversible" } + +; module flags + +!llvm.module.flags = !{!0, !1, !2, !3, !4, !5, !6, !7} + +!0 = !{i32 1, !"qir_major_version", i32 2} +!1 = !{i32 7, !"qir_minor_version", i32 1} +!2 = !{i32 1, !"dynamic_qubit_management", i1 false} +!3 = !{i32 1, !"dynamic_result_management", i1 false} +!4 = !{i32 5, !"int_computations", !{!"i64"}} +!5 = !{i32 5, !"float_computations", !{!"double"}} +!6 = !{i32 7, !"backwards_branching", i2 3} +!7 = !{i32 1, !"arrays", i1 true} diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/Doubles.out b/source/pip/tests-integration/resources/adaptive_rifla/output/Doubles.out new file mode 100644 index 0000000000..1e75619beb --- /dev/null +++ b/source/pip/tests-integration/resources/adaptive_rifla/output/Doubles.out @@ -0,0 +1,16 @@ +START +METADATA entry_point +METADATA output_labeling_schema +METADATA qir_profiles adaptive_profile +METADATA required_num_qubits 1 +METADATA required_num_results 10 +OUTPUT TUPLE 8 0_t +OUTPUT DOUBLE 10.0 1_t0d +OUTPUT BOOL true 2_t1b +OUTPUT BOOL false 3_t2b +OUTPUT BOOL true 4_t3b +OUTPUT BOOL true 5_t4b +OUTPUT BOOL false 6_t5b +OUTPUT INT 10 7_t6i +OUTPUT DOUBLE 10.0 8_t7d +END 0 diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/ExpandedTests.ll b/source/pip/tests-integration/resources/adaptive_rifla/output/ExpandedTests.ll new file mode 100644 index 0000000000..6e5083c457 --- /dev/null +++ b/source/pip/tests-integration/resources/adaptive_rifla/output/ExpandedTests.ll @@ -0,0 +1,93 @@ +@0 = internal constant [4 x i8] c"0_t\00" +@1 = internal constant [6 x i8] c"1_t0a\00" +@2 = internal constant [8 x i8] c"2_t0a0r\00" +@3 = internal constant [8 x i8] c"3_t0a1r\00" +@4 = internal constant [6 x i8] c"4_t1r\00" + +define i64 @ENTRYPOINT__main() #0 { +block_0: + call void @__quantum__rt__initialize(ptr null) + call void @__quantum__qis__h__body(ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__x__body(ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__ccx__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 1 to ptr), ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__x__body(ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__x__body(ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__cz__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__x__body(ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__mresetz__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__mresetz__body(ptr inttoptr (i64 1 to ptr), ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__cx__body(ptr inttoptr (i64 2 to ptr), ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__cx__body(ptr inttoptr (i64 2 to ptr), ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__rx__body(double -1.5707963267948966, ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__rz__body(double -1.5707963267948966, ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__rzz__body(double 1.5707963267948966, ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__cx__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__cx__body(ptr inttoptr (i64 2 to ptr), ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__cx__body(ptr inttoptr (i64 2 to ptr), ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__m__body(ptr inttoptr (i64 1 to ptr), ptr inttoptr (i64 2 to ptr)) + call void @__quantum__rt__tuple_record_output(i64 2, ptr @0) + call void @__quantum__rt__array_record_output(i64 2, ptr @1) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 0 to ptr), ptr @2) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 1 to ptr), ptr @3) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 2 to ptr), ptr @4) + ret i64 0 +} + +declare void @__quantum__rt__initialize(ptr) + +declare void @__quantum__qis__h__body(ptr) + +declare void @__quantum__qis__x__body(ptr) + +declare void @__quantum__qis__ccx__body(ptr, ptr, ptr) + +declare void @__quantum__qis__cz__body(ptr, ptr) + +declare void @__quantum__qis__mresetz__body(ptr, ptr) #1 + +declare void @__quantum__qis__cx__body(ptr, ptr) + +declare void @__quantum__qis__rx__body(double, ptr) + +declare void @__quantum__qis__rz__body(double, ptr) + +declare void @__quantum__qis__rzz__body(double, ptr, ptr) + +declare void @__quantum__qis__m__body(ptr, ptr) #1 + +declare void @__quantum__rt__tuple_record_output(i64, ptr) + +declare void @__quantum__rt__array_record_output(i64, ptr) + +declare void @__quantum__rt__result_record_output(ptr, ptr) + +attributes #0 = { "entry_point" "output_labeling_schema" "qir_profiles"="adaptive_profile" "required_num_qubits"="3" "required_num_results"="3" } +attributes #1 = { "irreversible" } + +; module flags + +!llvm.module.flags = !{!0, !1, !2, !3, !4, !5, !6, !7} + +!0 = !{i32 1, !"qir_major_version", i32 2} +!1 = !{i32 7, !"qir_minor_version", i32 1} +!2 = !{i32 1, !"dynamic_qubit_management", i1 false} +!3 = !{i32 1, !"dynamic_result_management", i1 false} +!4 = !{i32 5, !"int_computations", !{!"i64"}} +!5 = !{i32 5, !"float_computations", !{!"double"}} +!6 = !{i32 7, !"backwards_branching", i2 3} +!7 = !{i32 1, !"arrays", i1 true} diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/ExpandedTests.out b/source/pip/tests-integration/resources/adaptive_rifla/output/ExpandedTests.out new file mode 100644 index 0000000000..d720206f0c --- /dev/null +++ b/source/pip/tests-integration/resources/adaptive_rifla/output/ExpandedTests.out @@ -0,0 +1,12 @@ +START +METADATA entry_point +METADATA output_labeling_schema +METADATA qir_profiles adaptive_profile +METADATA required_num_qubits 3 +METADATA required_num_results 3 +OUTPUT TUPLE 2 0_t +OUTPUT ARRAY 2 1_t0a +OUTPUT RESULT 0 2_t0a0r +OUTPUT RESULT 1 3_t0a1r +OUTPUT RESULT 0 4_t1r +END 0 diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/Functors.ll b/source/pip/tests-integration/resources/adaptive_rifla/output/Functors.ll new file mode 100644 index 0000000000..42f5728042 --- /dev/null +++ b/source/pip/tests-integration/resources/adaptive_rifla/output/Functors.ll @@ -0,0 +1,131 @@ +@0 = internal constant [4 x i8] c"0_t\00" +@1 = internal constant [6 x i8] c"1_t0a\00" +@2 = internal constant [8 x i8] c"2_t0a0r\00" +@3 = internal constant [8 x i8] c"3_t0a1r\00" +@4 = internal constant [6 x i8] c"4_t1a\00" +@5 = internal constant [8 x i8] c"5_t1a0r\00" +@6 = internal constant [8 x i8] c"6_t1a1r\00" +@7 = internal constant [6 x i8] c"7_t2a\00" +@8 = internal constant [8 x i8] c"8_t2a0r\00" +@9 = internal constant [8 x i8] c"9_t2a1r\00" + +define i64 @ENTRYPOINT__main() #0 { +block_0: + call void @__quantum__rt__initialize(ptr null) + call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__z__body(ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__z__body(ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__x__body(ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__x__body(ptr inttoptr (i64 3 to ptr)) + call void @__quantum__qis__ccx__body(ptr inttoptr (i64 2 to ptr), ptr inttoptr (i64 3 to ptr), ptr inttoptr (i64 4 to ptr)) + call void @__quantum__qis__s__body(ptr inttoptr (i64 5 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 5 to ptr)) + call void @__quantum__qis__t__body(ptr inttoptr (i64 5 to ptr)) + call void @__quantum__qis__ccx__body(ptr inttoptr (i64 2 to ptr), ptr inttoptr (i64 3 to ptr), ptr inttoptr (i64 5 to ptr)) + call void @__quantum__qis__t__adj(ptr inttoptr (i64 5 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 5 to ptr)) + call void @__quantum__qis__s__adj(ptr inttoptr (i64 5 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 5 to ptr)) + call void @__quantum__qis__ccx__body(ptr inttoptr (i64 2 to ptr), ptr inttoptr (i64 3 to ptr), ptr inttoptr (i64 5 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 5 to ptr)) + call void @__quantum__qis__x__body(ptr inttoptr (i64 3 to ptr)) + call void @__quantum__qis__x__body(ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__x__body(ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__x__body(ptr inttoptr (i64 3 to ptr)) + call void @__quantum__qis__ccx__body(ptr inttoptr (i64 2 to ptr), ptr inttoptr (i64 3 to ptr), ptr inttoptr (i64 6 to ptr)) + call void @__quantum__qis__s__body(ptr inttoptr (i64 7 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 7 to ptr)) + call void @__quantum__qis__t__body(ptr inttoptr (i64 7 to ptr)) + call void @__quantum__qis__ccx__body(ptr inttoptr (i64 2 to ptr), ptr inttoptr (i64 3 to ptr), ptr inttoptr (i64 7 to ptr)) + call void @__quantum__qis__t__adj(ptr inttoptr (i64 7 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 7 to ptr)) + call void @__quantum__qis__s__adj(ptr inttoptr (i64 7 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 7 to ptr)) + call void @__quantum__qis__ccx__body(ptr inttoptr (i64 2 to ptr), ptr inttoptr (i64 3 to ptr), ptr inttoptr (i64 7 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 7 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 7 to ptr)) + call void @__quantum__qis__ccx__body(ptr inttoptr (i64 2 to ptr), ptr inttoptr (i64 3 to ptr), ptr inttoptr (i64 7 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 7 to ptr)) + call void @__quantum__qis__s__body(ptr inttoptr (i64 7 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 7 to ptr)) + call void @__quantum__qis__t__body(ptr inttoptr (i64 7 to ptr)) + call void @__quantum__qis__ccx__body(ptr inttoptr (i64 2 to ptr), ptr inttoptr (i64 3 to ptr), ptr inttoptr (i64 7 to ptr)) + call void @__quantum__qis__t__adj(ptr inttoptr (i64 7 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 7 to ptr)) + call void @__quantum__qis__s__adj(ptr inttoptr (i64 7 to ptr)) + call void @__quantum__qis__ccx__body(ptr inttoptr (i64 2 to ptr), ptr inttoptr (i64 3 to ptr), ptr inttoptr (i64 6 to ptr)) + call void @__quantum__qis__x__body(ptr inttoptr (i64 3 to ptr)) + call void @__quantum__qis__x__body(ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__m__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__m__body(ptr inttoptr (i64 1 to ptr), ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__m__body(ptr inttoptr (i64 4 to ptr), ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__m__body(ptr inttoptr (i64 5 to ptr), ptr inttoptr (i64 3 to ptr)) + call void @__quantum__qis__m__body(ptr inttoptr (i64 6 to ptr), ptr inttoptr (i64 4 to ptr)) + call void @__quantum__qis__m__body(ptr inttoptr (i64 7 to ptr), ptr inttoptr (i64 5 to ptr)) + call void @__quantum__qis__reset__body(ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__reset__body(ptr inttoptr (i64 3 to ptr)) + call void @__quantum__qis__reset__body(ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__reset__body(ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__reset__body(ptr inttoptr (i64 4 to ptr)) + call void @__quantum__qis__reset__body(ptr inttoptr (i64 5 to ptr)) + call void @__quantum__qis__reset__body(ptr inttoptr (i64 6 to ptr)) + call void @__quantum__qis__reset__body(ptr inttoptr (i64 7 to ptr)) + call void @__quantum__rt__tuple_record_output(i64 3, ptr @0) + call void @__quantum__rt__array_record_output(i64 2, ptr @1) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 0 to ptr), ptr @2) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 1 to ptr), ptr @3) + call void @__quantum__rt__array_record_output(i64 2, ptr @4) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 2 to ptr), ptr @5) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 3 to ptr), ptr @6) + call void @__quantum__rt__array_record_output(i64 2, ptr @7) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 4 to ptr), ptr @8) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 5 to ptr), ptr @9) + ret i64 0 +} + +declare void @__quantum__rt__initialize(ptr) + +declare void @__quantum__qis__x__body(ptr) + +declare void @__quantum__qis__h__body(ptr) + +declare void @__quantum__qis__z__body(ptr) + +declare void @__quantum__qis__ccx__body(ptr, ptr, ptr) + +declare void @__quantum__qis__s__body(ptr) + +declare void @__quantum__qis__t__body(ptr) + +declare void @__quantum__qis__t__adj(ptr) + +declare void @__quantum__qis__s__adj(ptr) + +declare void @__quantum__qis__m__body(ptr, ptr) #1 + +declare void @__quantum__qis__reset__body(ptr) #1 + +declare void @__quantum__rt__tuple_record_output(i64, ptr) + +declare void @__quantum__rt__array_record_output(i64, ptr) + +declare void @__quantum__rt__result_record_output(ptr, ptr) + +attributes #0 = { "entry_point" "output_labeling_schema" "qir_profiles"="adaptive_profile" "required_num_qubits"="8" "required_num_results"="6" } +attributes #1 = { "irreversible" } + +; module flags + +!llvm.module.flags = !{!0, !1, !2, !3, !4, !5, !6, !7} + +!0 = !{i32 1, !"qir_major_version", i32 2} +!1 = !{i32 7, !"qir_minor_version", i32 1} +!2 = !{i32 1, !"dynamic_qubit_management", i1 false} +!3 = !{i32 1, !"dynamic_result_management", i1 false} +!4 = !{i32 5, !"int_computations", !{!"i64"}} +!5 = !{i32 5, !"float_computations", !{!"double"}} +!6 = !{i32 7, !"backwards_branching", i2 3} +!7 = !{i32 1, !"arrays", i1 true} diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/Functors.out b/source/pip/tests-integration/resources/adaptive_rifla/output/Functors.out new file mode 100644 index 0000000000..098e167cb5 --- /dev/null +++ b/source/pip/tests-integration/resources/adaptive_rifla/output/Functors.out @@ -0,0 +1,17 @@ +START +METADATA entry_point +METADATA output_labeling_schema +METADATA qir_profiles adaptive_profile +METADATA required_num_qubits 8 +METADATA required_num_results 6 +OUTPUT TUPLE 3 0_t +OUTPUT ARRAY 2 1_t0a +OUTPUT RESULT 0 2_t0a0r +OUTPUT RESULT 0 3_t0a1r +OUTPUT ARRAY 2 4_t1a +OUTPUT RESULT 1 5_t1a0r +OUTPUT RESULT 1 6_t1a1r +OUTPUT ARRAY 2 7_t2a +OUTPUT RESULT 0 8_t2a0r +OUTPUT RESULT 0 9_t2a1r +END 0 diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/HiddenShiftNISQ.ll b/source/pip/tests-integration/resources/adaptive_rifla/output/HiddenShiftNISQ.ll new file mode 100644 index 0000000000..004d20bc58 --- /dev/null +++ b/source/pip/tests-integration/resources/adaptive_rifla/output/HiddenShiftNISQ.ll @@ -0,0 +1,84 @@ +@0 = internal constant [4 x i8] c"0_a\00" +@1 = internal constant [6 x i8] c"1_a0r\00" +@2 = internal constant [6 x i8] c"2_a1r\00" +@3 = internal constant [6 x i8] c"3_a2r\00" +@4 = internal constant [6 x i8] c"4_a3r\00" +@5 = internal constant [6 x i8] c"5_a4r\00" +@6 = internal constant [6 x i8] c"6_a5r\00" + +define i64 @ENTRYPOINT__main() #0 { +block_0: + call void @__quantum__rt__initialize(ptr null) + call void @__quantum__qis__h__body(ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 3 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 4 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 5 to ptr)) + call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__x__body(ptr inttoptr (i64 5 to ptr)) + call void @__quantum__qis__cz__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 3 to ptr)) + call void @__quantum__qis__cz__body(ptr inttoptr (i64 1 to ptr), ptr inttoptr (i64 4 to ptr)) + call void @__quantum__qis__cz__body(ptr inttoptr (i64 2 to ptr), ptr inttoptr (i64 5 to ptr)) + call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__x__body(ptr inttoptr (i64 5 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 3 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 4 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 5 to ptr)) + call void @__quantum__qis__cz__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 3 to ptr)) + call void @__quantum__qis__cz__body(ptr inttoptr (i64 1 to ptr), ptr inttoptr (i64 4 to ptr)) + call void @__quantum__qis__cz__body(ptr inttoptr (i64 2 to ptr), ptr inttoptr (i64 5 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 5 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 4 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 3 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__mresetz__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__mresetz__body(ptr inttoptr (i64 1 to ptr), ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__mresetz__body(ptr inttoptr (i64 2 to ptr), ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__mresetz__body(ptr inttoptr (i64 3 to ptr), ptr inttoptr (i64 3 to ptr)) + call void @__quantum__qis__mresetz__body(ptr inttoptr (i64 4 to ptr), ptr inttoptr (i64 4 to ptr)) + call void @__quantum__qis__mresetz__body(ptr inttoptr (i64 5 to ptr), ptr inttoptr (i64 5 to ptr)) + call void @__quantum__rt__array_record_output(i64 6, ptr @0) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 0 to ptr), ptr @1) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 1 to ptr), ptr @2) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 2 to ptr), ptr @3) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 3 to ptr), ptr @4) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 4 to ptr), ptr @5) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 5 to ptr), ptr @6) + ret i64 0 +} + +declare void @__quantum__rt__initialize(ptr) + +declare void @__quantum__qis__h__body(ptr) + +declare void @__quantum__qis__x__body(ptr) + +declare void @__quantum__qis__cz__body(ptr, ptr) + +declare void @__quantum__qis__mresetz__body(ptr, ptr) #1 + +declare void @__quantum__rt__array_record_output(i64, ptr) + +declare void @__quantum__rt__result_record_output(ptr, ptr) + +attributes #0 = { "entry_point" "output_labeling_schema" "qir_profiles"="adaptive_profile" "required_num_qubits"="6" "required_num_results"="6" } +attributes #1 = { "irreversible" } + +; module flags + +!llvm.module.flags = !{!0, !1, !2, !3, !4, !5, !6, !7} + +!0 = !{i32 1, !"qir_major_version", i32 2} +!1 = !{i32 7, !"qir_minor_version", i32 1} +!2 = !{i32 1, !"dynamic_qubit_management", i1 false} +!3 = !{i32 1, !"dynamic_result_management", i1 false} +!4 = !{i32 5, !"int_computations", !{!"i64"}} +!5 = !{i32 5, !"float_computations", !{!"double"}} +!6 = !{i32 7, !"backwards_branching", i2 3} +!7 = !{i32 1, !"arrays", i1 true} diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/HiddenShiftNISQ.out b/source/pip/tests-integration/resources/adaptive_rifla/output/HiddenShiftNISQ.out new file mode 100644 index 0000000000..f49932ad46 --- /dev/null +++ b/source/pip/tests-integration/resources/adaptive_rifla/output/HiddenShiftNISQ.out @@ -0,0 +1,14 @@ +START +METADATA entry_point +METADATA output_labeling_schema +METADATA qir_profiles adaptive_profile +METADATA required_num_qubits 6 +METADATA required_num_results 6 +OUTPUT ARRAY 6 0_a +OUTPUT RESULT 1 1_a0r +OUTPUT RESULT 0 2_a1r +OUTPUT RESULT 0 3_a2r +OUTPUT RESULT 0 4_a3r +OUTPUT RESULT 0 5_a4r +OUTPUT RESULT 1 6_a5r +END 0 diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/IntegerComparison.ll b/source/pip/tests-integration/resources/adaptive_rifla/output/IntegerComparison.ll new file mode 100644 index 0000000000..4816357964 --- /dev/null +++ b/source/pip/tests-integration/resources/adaptive_rifla/output/IntegerComparison.ll @@ -0,0 +1,159 @@ +@0 = internal constant [4 x i8] c"0_t\00" +@1 = internal constant [6 x i8] c"1_t0b\00" +@2 = internal constant [6 x i8] c"2_t1b\00" +@3 = internal constant [6 x i8] c"3_t2b\00" + +define i64 @ENTRYPOINT__main() #0 { +block_0: + %var_0 = alloca i64 + call void @__quantum__rt__initialize(ptr null) + store i64 0, ptr %var_0 + call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__m__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 0 to ptr)) + %var_2 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 0 to ptr)) + br i1 %var_2, label %block_1, label %block_2 +block_1: + call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) + store i64 1, ptr %var_0 + br label %block_2 +block_2: + call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__m__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 1 to ptr)) + %var_4 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 1 to ptr)) + br i1 %var_4, label %block_3, label %block_4 +block_3: + call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) + %var_52 = load i64, ptr %var_0 + %var_6 = add i64 %var_52, 1 + store i64 %var_6, ptr %var_0 + br label %block_4 +block_4: + call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__m__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 2 to ptr)) + %var_7 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 2 to ptr)) + br i1 %var_7, label %block_5, label %block_6 +block_5: + call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) + %var_50 = load i64, ptr %var_0 + %var_9 = add i64 %var_50, 1 + store i64 %var_9, ptr %var_0 + br label %block_6 +block_6: + call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__m__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 3 to ptr)) + %var_10 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 3 to ptr)) + br i1 %var_10, label %block_7, label %block_8 +block_7: + call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) + %var_48 = load i64, ptr %var_0 + %var_12 = add i64 %var_48, 1 + store i64 %var_12, ptr %var_0 + br label %block_8 +block_8: + call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__m__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 4 to ptr)) + %var_13 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 4 to ptr)) + br i1 %var_13, label %block_9, label %block_10 +block_9: + call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) + %var_46 = load i64, ptr %var_0 + %var_15 = add i64 %var_46, 1 + store i64 %var_15, ptr %var_0 + br label %block_10 +block_10: + call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__m__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 5 to ptr)) + %var_16 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 5 to ptr)) + br i1 %var_16, label %block_11, label %block_12 +block_11: + call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) + %var_44 = load i64, ptr %var_0 + %var_18 = add i64 %var_44, 1 + store i64 %var_18, ptr %var_0 + br label %block_12 +block_12: + call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__m__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 6 to ptr)) + %var_19 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 6 to ptr)) + br i1 %var_19, label %block_13, label %block_14 +block_13: + call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) + %var_42 = load i64, ptr %var_0 + %var_21 = add i64 %var_42, 1 + store i64 %var_21, ptr %var_0 + br label %block_14 +block_14: + call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__m__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 7 to ptr)) + %var_22 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 7 to ptr)) + br i1 %var_22, label %block_15, label %block_16 +block_15: + call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) + %var_40 = load i64, ptr %var_0 + %var_24 = add i64 %var_40, 1 + store i64 %var_24, ptr %var_0 + br label %block_16 +block_16: + call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__m__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 8 to ptr)) + %var_25 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 8 to ptr)) + br i1 %var_25, label %block_17, label %block_18 +block_17: + call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) + %var_38 = load i64, ptr %var_0 + %var_27 = add i64 %var_38, 1 + store i64 %var_27, ptr %var_0 + br label %block_18 +block_18: + call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__m__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 9 to ptr)) + %var_28 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 9 to ptr)) + br i1 %var_28, label %block_19, label %block_20 +block_19: + call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) + %var_36 = load i64, ptr %var_0 + %var_30 = add i64 %var_36, 1 + store i64 %var_30, ptr %var_0 + br label %block_20 +block_20: + call void @__quantum__qis__reset__body(ptr inttoptr (i64 0 to ptr)) + %var_35 = load i64, ptr %var_0 + %var_31 = icmp sgt i64 %var_35, 5 + %var_32 = icmp slt i64 %var_35, 5 + %var_33 = icmp eq i64 %var_35, 10 + call void @__quantum__rt__tuple_record_output(i64 3, ptr @0) + call void @__quantum__rt__bool_record_output(i1 %var_31, ptr @1) + call void @__quantum__rt__bool_record_output(i1 %var_32, ptr @2) + call void @__quantum__rt__bool_record_output(i1 %var_33, ptr @3) + ret i64 0 +} + +declare void @__quantum__rt__initialize(ptr) + +declare void @__quantum__qis__x__body(ptr) + +declare void @__quantum__qis__m__body(ptr, ptr) #1 + +declare i1 @__quantum__rt__read_result(ptr) + +declare void @__quantum__qis__reset__body(ptr) #1 + +declare void @__quantum__rt__tuple_record_output(i64, ptr) + +declare void @__quantum__rt__bool_record_output(i1, ptr) + +attributes #0 = { "entry_point" "output_labeling_schema" "qir_profiles"="adaptive_profile" "required_num_qubits"="1" "required_num_results"="10" } +attributes #1 = { "irreversible" } + +; module flags + +!llvm.module.flags = !{!0, !1, !2, !3, !4, !5, !6, !7} + +!0 = !{i32 1, !"qir_major_version", i32 2} +!1 = !{i32 7, !"qir_minor_version", i32 1} +!2 = !{i32 1, !"dynamic_qubit_management", i1 false} +!3 = !{i32 1, !"dynamic_result_management", i1 false} +!4 = !{i32 5, !"int_computations", !{!"i64"}} +!5 = !{i32 5, !"float_computations", !{!"double"}} +!6 = !{i32 7, !"backwards_branching", i2 3} +!7 = !{i32 1, !"arrays", i1 true} diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/IntegerComparison.out b/source/pip/tests-integration/resources/adaptive_rifla/output/IntegerComparison.out new file mode 100644 index 0000000000..08d89be312 --- /dev/null +++ b/source/pip/tests-integration/resources/adaptive_rifla/output/IntegerComparison.out @@ -0,0 +1,11 @@ +START +METADATA entry_point +METADATA output_labeling_schema +METADATA qir_profiles adaptive_profile +METADATA required_num_qubits 1 +METADATA required_num_results 10 +OUTPUT TUPLE 3 0_t +OUTPUT BOOL true 1_t0b +OUTPUT BOOL false 2_t1b +OUTPUT BOOL true 3_t2b +END 0 diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicCCNOT.ll b/source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicCCNOT.ll new file mode 100644 index 0000000000..c3cbb3657c --- /dev/null +++ b/source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicCCNOT.ll @@ -0,0 +1,88 @@ +@0 = internal constant [4 x i8] c"0_t\00" +@1 = internal constant [6 x i8] c"1_t0a\00" +@2 = internal constant [8 x i8] c"2_t0a0r\00" +@3 = internal constant [8 x i8] c"3_t0a1r\00" +@4 = internal constant [8 x i8] c"4_t0a2r\00" +@5 = internal constant [6 x i8] c"5_t1a\00" +@6 = internal constant [8 x i8] c"6_t1a0r\00" +@7 = internal constant [8 x i8] c"7_t1a1r\00" +@8 = internal constant [8 x i8] c"8_t1a2r\00" +@9 = internal constant [6 x i8] c"9_t2a\00" +@10 = internal constant [9 x i8] c"10_t2a0r\00" +@11 = internal constant [9 x i8] c"11_t2a1r\00" +@12 = internal constant [9 x i8] c"12_t2a2r\00" + +define i64 @ENTRYPOINT__main() #0 { +block_0: + call void @__quantum__rt__initialize(ptr null) + call void @__quantum__qis__ccx__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 1 to ptr), ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__m__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__m__body(ptr inttoptr (i64 1 to ptr), ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__m__body(ptr inttoptr (i64 2 to ptr), ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__reset__body(ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__reset__body(ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__reset__body(ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__x__body(ptr inttoptr (i64 3 to ptr)) + call void @__quantum__qis__ccx__body(ptr inttoptr (i64 3 to ptr), ptr inttoptr (i64 4 to ptr), ptr inttoptr (i64 5 to ptr)) + call void @__quantum__qis__m__body(ptr inttoptr (i64 3 to ptr), ptr inttoptr (i64 3 to ptr)) + call void @__quantum__qis__m__body(ptr inttoptr (i64 4 to ptr), ptr inttoptr (i64 4 to ptr)) + call void @__quantum__qis__m__body(ptr inttoptr (i64 5 to ptr), ptr inttoptr (i64 5 to ptr)) + call void @__quantum__qis__reset__body(ptr inttoptr (i64 3 to ptr)) + call void @__quantum__qis__reset__body(ptr inttoptr (i64 4 to ptr)) + call void @__quantum__qis__reset__body(ptr inttoptr (i64 5 to ptr)) + call void @__quantum__qis__x__body(ptr inttoptr (i64 6 to ptr)) + call void @__quantum__qis__x__body(ptr inttoptr (i64 7 to ptr)) + call void @__quantum__qis__ccx__body(ptr inttoptr (i64 6 to ptr), ptr inttoptr (i64 7 to ptr), ptr inttoptr (i64 8 to ptr)) + call void @__quantum__qis__m__body(ptr inttoptr (i64 6 to ptr), ptr inttoptr (i64 6 to ptr)) + call void @__quantum__qis__m__body(ptr inttoptr (i64 7 to ptr), ptr inttoptr (i64 7 to ptr)) + call void @__quantum__qis__m__body(ptr inttoptr (i64 8 to ptr), ptr inttoptr (i64 8 to ptr)) + call void @__quantum__qis__reset__body(ptr inttoptr (i64 6 to ptr)) + call void @__quantum__qis__reset__body(ptr inttoptr (i64 7 to ptr)) + call void @__quantum__qis__reset__body(ptr inttoptr (i64 8 to ptr)) + call void @__quantum__rt__tuple_record_output(i64 3, ptr @0) + call void @__quantum__rt__array_record_output(i64 3, ptr @1) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 0 to ptr), ptr @2) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 1 to ptr), ptr @3) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 2 to ptr), ptr @4) + call void @__quantum__rt__array_record_output(i64 3, ptr @5) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 3 to ptr), ptr @6) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 4 to ptr), ptr @7) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 5 to ptr), ptr @8) + call void @__quantum__rt__array_record_output(i64 3, ptr @9) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 6 to ptr), ptr @10) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 7 to ptr), ptr @11) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 8 to ptr), ptr @12) + ret i64 0 +} + +declare void @__quantum__rt__initialize(ptr) + +declare void @__quantum__qis__ccx__body(ptr, ptr, ptr) + +declare void @__quantum__qis__m__body(ptr, ptr) #1 + +declare void @__quantum__qis__reset__body(ptr) #1 + +declare void @__quantum__qis__x__body(ptr) + +declare void @__quantum__rt__tuple_record_output(i64, ptr) + +declare void @__quantum__rt__array_record_output(i64, ptr) + +declare void @__quantum__rt__result_record_output(ptr, ptr) + +attributes #0 = { "entry_point" "output_labeling_schema" "qir_profiles"="adaptive_profile" "required_num_qubits"="9" "required_num_results"="9" } +attributes #1 = { "irreversible" } + +; module flags + +!llvm.module.flags = !{!0, !1, !2, !3, !4, !5, !6, !7} + +!0 = !{i32 1, !"qir_major_version", i32 2} +!1 = !{i32 7, !"qir_minor_version", i32 1} +!2 = !{i32 1, !"dynamic_qubit_management", i1 false} +!3 = !{i32 1, !"dynamic_result_management", i1 false} +!4 = !{i32 5, !"int_computations", !{!"i64"}} +!5 = !{i32 5, !"float_computations", !{!"double"}} +!6 = !{i32 7, !"backwards_branching", i2 3} +!7 = !{i32 1, !"arrays", i1 true} diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicCCNOT.out b/source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicCCNOT.out new file mode 100644 index 0000000000..e7ac61ebf9 --- /dev/null +++ b/source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicCCNOT.out @@ -0,0 +1,20 @@ +START +METADATA entry_point +METADATA output_labeling_schema +METADATA qir_profiles adaptive_profile +METADATA required_num_qubits 9 +METADATA required_num_results 9 +OUTPUT TUPLE 3 0_t +OUTPUT ARRAY 3 1_t0a +OUTPUT RESULT 0 2_t0a0r +OUTPUT RESULT 0 3_t0a1r +OUTPUT RESULT 0 4_t0a2r +OUTPUT ARRAY 3 5_t1a +OUTPUT RESULT 1 6_t1a0r +OUTPUT RESULT 0 7_t1a1r +OUTPUT RESULT 0 8_t1a2r +OUTPUT ARRAY 3 9_t2a +OUTPUT RESULT 1 10_t2a0r +OUTPUT RESULT 1 11_t2a1r +OUTPUT RESULT 1 12_t2a2r +END 0 diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicCNOT.ll b/source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicCNOT.ll new file mode 100644 index 0000000000..286d489abe --- /dev/null +++ b/source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicCNOT.ll @@ -0,0 +1,63 @@ +@0 = internal constant [4 x i8] c"0_t\00" +@1 = internal constant [6 x i8] c"1_t0a\00" +@2 = internal constant [8 x i8] c"2_t0a0r\00" +@3 = internal constant [8 x i8] c"3_t0a1r\00" +@4 = internal constant [6 x i8] c"4_t1a\00" +@5 = internal constant [8 x i8] c"5_t1a0r\00" +@6 = internal constant [8 x i8] c"6_t1a1r\00" + +define i64 @ENTRYPOINT__main() #0 { +block_0: + call void @__quantum__rt__initialize(ptr null) + call void @__quantum__qis__cx__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__m__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__m__body(ptr inttoptr (i64 1 to ptr), ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__reset__body(ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__reset__body(ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__x__body(ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__cx__body(ptr inttoptr (i64 2 to ptr), ptr inttoptr (i64 3 to ptr)) + call void @__quantum__qis__m__body(ptr inttoptr (i64 2 to ptr), ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__m__body(ptr inttoptr (i64 3 to ptr), ptr inttoptr (i64 3 to ptr)) + call void @__quantum__qis__reset__body(ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__reset__body(ptr inttoptr (i64 3 to ptr)) + call void @__quantum__rt__tuple_record_output(i64 2, ptr @0) + call void @__quantum__rt__array_record_output(i64 2, ptr @1) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 0 to ptr), ptr @2) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 1 to ptr), ptr @3) + call void @__quantum__rt__array_record_output(i64 2, ptr @4) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 2 to ptr), ptr @5) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 3 to ptr), ptr @6) + ret i64 0 +} + +declare void @__quantum__rt__initialize(ptr) + +declare void @__quantum__qis__cx__body(ptr, ptr) + +declare void @__quantum__qis__m__body(ptr, ptr) #1 + +declare void @__quantum__qis__reset__body(ptr) #1 + +declare void @__quantum__qis__x__body(ptr) + +declare void @__quantum__rt__tuple_record_output(i64, ptr) + +declare void @__quantum__rt__array_record_output(i64, ptr) + +declare void @__quantum__rt__result_record_output(ptr, ptr) + +attributes #0 = { "entry_point" "output_labeling_schema" "qir_profiles"="adaptive_profile" "required_num_qubits"="4" "required_num_results"="4" } +attributes #1 = { "irreversible" } + +; module flags + +!llvm.module.flags = !{!0, !1, !2, !3, !4, !5, !6, !7} + +!0 = !{i32 1, !"qir_major_version", i32 2} +!1 = !{i32 7, !"qir_minor_version", i32 1} +!2 = !{i32 1, !"dynamic_qubit_management", i1 false} +!3 = !{i32 1, !"dynamic_result_management", i1 false} +!4 = !{i32 5, !"int_computations", !{!"i64"}} +!5 = !{i32 5, !"float_computations", !{!"double"}} +!6 = !{i32 7, !"backwards_branching", i2 3} +!7 = !{i32 1, !"arrays", i1 true} diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicCNOT.out b/source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicCNOT.out new file mode 100644 index 0000000000..87fd181987 --- /dev/null +++ b/source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicCNOT.out @@ -0,0 +1,14 @@ +START +METADATA entry_point +METADATA output_labeling_schema +METADATA qir_profiles adaptive_profile +METADATA required_num_qubits 4 +METADATA required_num_results 4 +OUTPUT TUPLE 2 0_t +OUTPUT ARRAY 2 1_t0a +OUTPUT RESULT 0 2_t0a0r +OUTPUT RESULT 0 3_t0a1r +OUTPUT ARRAY 2 4_t1a +OUTPUT RESULT 1 5_t1a0r +OUTPUT RESULT 1 6_t1a1r +END 0 diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicHIXYZ.ll b/source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicHIXYZ.ll new file mode 100644 index 0000000000..11fad4dd20 --- /dev/null +++ b/source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicHIXYZ.ll @@ -0,0 +1,70 @@ +@0 = internal constant [4 x i8] c"0_t\00" +@1 = internal constant [6 x i8] c"1_t0r\00" +@2 = internal constant [6 x i8] c"2_t1r\00" +@3 = internal constant [6 x i8] c"3_t2r\00" +@4 = internal constant [6 x i8] c"4_t3r\00" +@5 = internal constant [6 x i8] c"5_t4r\00" +@6 = internal constant [6 x i8] c"6_t5r\00" + +define i64 @ENTRYPOINT__main() #0 { +block_0: + call void @__quantum__rt__initialize(ptr null) + call void @__quantum__qis__h__body(ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__z__body(ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__mresetz__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__x__body(ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__mresetz__body(ptr inttoptr (i64 1 to ptr), ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__x__body(ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__mresetz__body(ptr inttoptr (i64 2 to ptr), ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__y__body(ptr inttoptr (i64 3 to ptr)) + call void @__quantum__qis__mresetz__body(ptr inttoptr (i64 3 to ptr), ptr inttoptr (i64 3 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 4 to ptr)) + call void @__quantum__qis__y__body(ptr inttoptr (i64 4 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 4 to ptr)) + call void @__quantum__qis__mresetz__body(ptr inttoptr (i64 4 to ptr), ptr inttoptr (i64 4 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 5 to ptr)) + call void @__quantum__qis__z__body(ptr inttoptr (i64 5 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 5 to ptr)) + call void @__quantum__qis__mresetz__body(ptr inttoptr (i64 5 to ptr), ptr inttoptr (i64 5 to ptr)) + call void @__quantum__rt__tuple_record_output(i64 6, ptr @0) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 0 to ptr), ptr @1) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 1 to ptr), ptr @2) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 2 to ptr), ptr @3) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 3 to ptr), ptr @4) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 4 to ptr), ptr @5) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 5 to ptr), ptr @6) + ret i64 0 +} + +declare void @__quantum__rt__initialize(ptr) + +declare void @__quantum__qis__h__body(ptr) + +declare void @__quantum__qis__z__body(ptr) + +declare void @__quantum__qis__mresetz__body(ptr, ptr) #1 + +declare void @__quantum__qis__x__body(ptr) + +declare void @__quantum__qis__y__body(ptr) + +declare void @__quantum__rt__tuple_record_output(i64, ptr) + +declare void @__quantum__rt__result_record_output(ptr, ptr) + +attributes #0 = { "entry_point" "output_labeling_schema" "qir_profiles"="adaptive_profile" "required_num_qubits"="6" "required_num_results"="6" } +attributes #1 = { "irreversible" } + +; module flags + +!llvm.module.flags = !{!0, !1, !2, !3, !4, !5, !6, !7} + +!0 = !{i32 1, !"qir_major_version", i32 2} +!1 = !{i32 7, !"qir_minor_version", i32 1} +!2 = !{i32 1, !"dynamic_qubit_management", i1 false} +!3 = !{i32 1, !"dynamic_result_management", i1 false} +!4 = !{i32 5, !"int_computations", !{!"i64"}} +!5 = !{i32 5, !"float_computations", !{!"double"}} +!6 = !{i32 7, !"backwards_branching", i2 3} +!7 = !{i32 1, !"arrays", i1 true} diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicHIXYZ.out b/source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicHIXYZ.out new file mode 100644 index 0000000000..4baac546a6 --- /dev/null +++ b/source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicHIXYZ.out @@ -0,0 +1,14 @@ +START +METADATA entry_point +METADATA output_labeling_schema +METADATA qir_profiles adaptive_profile +METADATA required_num_qubits 6 +METADATA required_num_results 6 +OUTPUT TUPLE 6 0_t +OUTPUT RESULT 1 1_t0r +OUTPUT RESULT 1 2_t1r +OUTPUT RESULT 1 3_t2r +OUTPUT RESULT 1 4_t3r +OUTPUT RESULT 1 5_t4r +OUTPUT RESULT 1 6_t5r +END 0 diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicM.ll b/source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicM.ll new file mode 100644 index 0000000000..fcb9f69398 --- /dev/null +++ b/source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicM.ll @@ -0,0 +1,45 @@ +@0 = internal constant [4 x i8] c"0_t\00" +@1 = internal constant [6 x i8] c"1_t0r\00" +@2 = internal constant [6 x i8] c"2_t1r\00" + +define i64 @ENTRYPOINT__main() #0 { +block_0: + call void @__quantum__rt__initialize(ptr null) + call void @__quantum__qis__m__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__x__body(ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__m__body(ptr inttoptr (i64 1 to ptr), ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__reset__body(ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__reset__body(ptr inttoptr (i64 1 to ptr)) + call void @__quantum__rt__tuple_record_output(i64 2, ptr @0) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 0 to ptr), ptr @1) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 1 to ptr), ptr @2) + ret i64 0 +} + +declare void @__quantum__rt__initialize(ptr) + +declare void @__quantum__qis__m__body(ptr, ptr) #1 + +declare void @__quantum__qis__x__body(ptr) + +declare void @__quantum__qis__reset__body(ptr) #1 + +declare void @__quantum__rt__tuple_record_output(i64, ptr) + +declare void @__quantum__rt__result_record_output(ptr, ptr) + +attributes #0 = { "entry_point" "output_labeling_schema" "qir_profiles"="adaptive_profile" "required_num_qubits"="2" "required_num_results"="2" } +attributes #1 = { "irreversible" } + +; module flags + +!llvm.module.flags = !{!0, !1, !2, !3, !4, !5, !6, !7} + +!0 = !{i32 1, !"qir_major_version", i32 2} +!1 = !{i32 7, !"qir_minor_version", i32 1} +!2 = !{i32 1, !"dynamic_qubit_management", i1 false} +!3 = !{i32 1, !"dynamic_result_management", i1 false} +!4 = !{i32 5, !"int_computations", !{!"i64"}} +!5 = !{i32 5, !"float_computations", !{!"double"}} +!6 = !{i32 7, !"backwards_branching", i2 3} +!7 = !{i32 1, !"arrays", i1 true} diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicM.out b/source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicM.out new file mode 100644 index 0000000000..4322e6240a --- /dev/null +++ b/source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicM.out @@ -0,0 +1,10 @@ +START +METADATA entry_point +METADATA output_labeling_schema +METADATA qir_profiles adaptive_profile +METADATA required_num_qubits 2 +METADATA required_num_results 2 +OUTPUT TUPLE 2 0_t +OUTPUT RESULT 0 1_t0r +OUTPUT RESULT 1 2_t1r +END 0 diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicMeasureWithBitFlipCode.ll b/source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicMeasureWithBitFlipCode.ll new file mode 100644 index 0000000000..dde7713e3b --- /dev/null +++ b/source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicMeasureWithBitFlipCode.ll @@ -0,0 +1,90 @@ +@0 = internal constant [4 x i8] c"0_t\00" +@1 = internal constant [6 x i8] c"1_t0a\00" +@2 = internal constant [8 x i8] c"2_t0a0r\00" +@3 = internal constant [8 x i8] c"3_t0a1r\00" +@4 = internal constant [6 x i8] c"4_t1a\00" +@5 = internal constant [8 x i8] c"5_t1a0r\00" +@6 = internal constant [8 x i8] c"6_t1a1r\00" +@7 = internal constant [6 x i8] c"7_t2a\00" +@8 = internal constant [8 x i8] c"8_t2a0r\00" +@9 = internal constant [8 x i8] c"9_t2a1r\00" +@10 = internal constant [9 x i8] c"10_t2a2r\00" + +define i64 @ENTRYPOINT__main() #0 { +block_0: + call void @__quantum__rt__initialize(ptr null) + call void @__quantum__qis__cx__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__cx__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 3 to ptr)) + call void @__quantum__qis__cz__body(ptr inttoptr (i64 3 to ptr), ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__cz__body(ptr inttoptr (i64 3 to ptr), ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 3 to ptr)) + call void @__quantum__qis__mresetz__body(ptr inttoptr (i64 3 to ptr), ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 3 to ptr)) + call void @__quantum__qis__cz__body(ptr inttoptr (i64 3 to ptr), ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__cz__body(ptr inttoptr (i64 3 to ptr), ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 3 to ptr)) + call void @__quantum__qis__mresetz__body(ptr inttoptr (i64 3 to ptr), ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__x__body(ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 3 to ptr)) + call void @__quantum__qis__cz__body(ptr inttoptr (i64 3 to ptr), ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__cz__body(ptr inttoptr (i64 3 to ptr), ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 3 to ptr)) + call void @__quantum__qis__mresetz__body(ptr inttoptr (i64 3 to ptr), ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 3 to ptr)) + call void @__quantum__qis__cz__body(ptr inttoptr (i64 3 to ptr), ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__cz__body(ptr inttoptr (i64 3 to ptr), ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 3 to ptr)) + call void @__quantum__qis__mresetz__body(ptr inttoptr (i64 3 to ptr), ptr inttoptr (i64 3 to ptr)) + call void @__quantum__qis__cx__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__cx__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__mresetz__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 4 to ptr)) + call void @__quantum__qis__mresetz__body(ptr inttoptr (i64 1 to ptr), ptr inttoptr (i64 5 to ptr)) + call void @__quantum__qis__mresetz__body(ptr inttoptr (i64 2 to ptr), ptr inttoptr (i64 6 to ptr)) + call void @__quantum__rt__tuple_record_output(i64 3, ptr @0) + call void @__quantum__rt__array_record_output(i64 2, ptr @1) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 0 to ptr), ptr @2) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 1 to ptr), ptr @3) + call void @__quantum__rt__array_record_output(i64 2, ptr @4) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 2 to ptr), ptr @5) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 3 to ptr), ptr @6) + call void @__quantum__rt__array_record_output(i64 3, ptr @7) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 4 to ptr), ptr @8) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 5 to ptr), ptr @9) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 6 to ptr), ptr @10) + ret i64 0 +} + +declare void @__quantum__rt__initialize(ptr) + +declare void @__quantum__qis__cx__body(ptr, ptr) + +declare void @__quantum__qis__h__body(ptr) + +declare void @__quantum__qis__cz__body(ptr, ptr) + +declare void @__quantum__qis__mresetz__body(ptr, ptr) #1 + +declare void @__quantum__qis__x__body(ptr) + +declare void @__quantum__rt__tuple_record_output(i64, ptr) + +declare void @__quantum__rt__array_record_output(i64, ptr) + +declare void @__quantum__rt__result_record_output(ptr, ptr) + +attributes #0 = { "entry_point" "output_labeling_schema" "qir_profiles"="adaptive_profile" "required_num_qubits"="4" "required_num_results"="7" } +attributes #1 = { "irreversible" } + +; module flags + +!llvm.module.flags = !{!0, !1, !2, !3, !4, !5, !6, !7} + +!0 = !{i32 1, !"qir_major_version", i32 2} +!1 = !{i32 7, !"qir_minor_version", i32 1} +!2 = !{i32 1, !"dynamic_qubit_management", i1 false} +!3 = !{i32 1, !"dynamic_result_management", i1 false} +!4 = !{i32 5, !"int_computations", !{!"i64"}} +!5 = !{i32 5, !"float_computations", !{!"double"}} +!6 = !{i32 7, !"backwards_branching", i2 3} +!7 = !{i32 1, !"arrays", i1 true} diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicMeasureWithBitFlipCode.out b/source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicMeasureWithBitFlipCode.out new file mode 100644 index 0000000000..431e1a2b83 --- /dev/null +++ b/source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicMeasureWithBitFlipCode.out @@ -0,0 +1,18 @@ +START +METADATA entry_point +METADATA output_labeling_schema +METADATA qir_profiles adaptive_profile +METADATA required_num_qubits 4 +METADATA required_num_results 7 +OUTPUT TUPLE 3 0_t +OUTPUT ARRAY 2 1_t0a +OUTPUT RESULT 0 2_t0a0r +OUTPUT RESULT 0 3_t0a1r +OUTPUT ARRAY 2 4_t1a +OUTPUT RESULT 0 5_t1a0r +OUTPUT RESULT 1 6_t1a1r +OUTPUT ARRAY 3 7_t2a +OUTPUT RESULT 0 8_t2a0r +OUTPUT RESULT 0 9_t2a1r +OUTPUT RESULT 1 10_t2a2r +END 0 diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicMeasureWithPhaseFlipCode.ll b/source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicMeasureWithPhaseFlipCode.ll new file mode 100644 index 0000000000..7348326371 --- /dev/null +++ b/source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicMeasureWithPhaseFlipCode.ll @@ -0,0 +1,94 @@ +@0 = internal constant [4 x i8] c"0_t\00" +@1 = internal constant [6 x i8] c"1_t0a\00" +@2 = internal constant [8 x i8] c"2_t0a0r\00" +@3 = internal constant [8 x i8] c"3_t0a1r\00" +@4 = internal constant [6 x i8] c"4_t1a\00" +@5 = internal constant [8 x i8] c"5_t1a0r\00" +@6 = internal constant [8 x i8] c"6_t1a1r\00" +@7 = internal constant [6 x i8] c"7_t2a\00" +@8 = internal constant [8 x i8] c"8_t2a0r\00" +@9 = internal constant [8 x i8] c"9_t2a1r\00" +@10 = internal constant [9 x i8] c"10_t2a2r\00" + +define i64 @ENTRYPOINT__main() #0 { +block_0: + call void @__quantum__rt__initialize(ptr null) + call void @__quantum__qis__cx__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__cx__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 3 to ptr)) + call void @__quantum__qis__cx__body(ptr inttoptr (i64 3 to ptr), ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__cx__body(ptr inttoptr (i64 3 to ptr), ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 3 to ptr)) + call void @__quantum__qis__mresetz__body(ptr inttoptr (i64 3 to ptr), ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 3 to ptr)) + call void @__quantum__qis__cx__body(ptr inttoptr (i64 3 to ptr), ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__cx__body(ptr inttoptr (i64 3 to ptr), ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 3 to ptr)) + call void @__quantum__qis__mresetz__body(ptr inttoptr (i64 3 to ptr), ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__z__body(ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 3 to ptr)) + call void @__quantum__qis__cx__body(ptr inttoptr (i64 3 to ptr), ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__cx__body(ptr inttoptr (i64 3 to ptr), ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 3 to ptr)) + call void @__quantum__qis__mresetz__body(ptr inttoptr (i64 3 to ptr), ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 3 to ptr)) + call void @__quantum__qis__cx__body(ptr inttoptr (i64 3 to ptr), ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__cx__body(ptr inttoptr (i64 3 to ptr), ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 3 to ptr)) + call void @__quantum__qis__mresetz__body(ptr inttoptr (i64 3 to ptr), ptr inttoptr (i64 3 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__cx__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__cx__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__mresetz__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 4 to ptr)) + call void @__quantum__qis__mresetz__body(ptr inttoptr (i64 1 to ptr), ptr inttoptr (i64 5 to ptr)) + call void @__quantum__qis__mresetz__body(ptr inttoptr (i64 2 to ptr), ptr inttoptr (i64 6 to ptr)) + call void @__quantum__rt__tuple_record_output(i64 3, ptr @0) + call void @__quantum__rt__array_record_output(i64 2, ptr @1) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 0 to ptr), ptr @2) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 1 to ptr), ptr @3) + call void @__quantum__rt__array_record_output(i64 2, ptr @4) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 2 to ptr), ptr @5) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 3 to ptr), ptr @6) + call void @__quantum__rt__array_record_output(i64 3, ptr @7) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 4 to ptr), ptr @8) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 5 to ptr), ptr @9) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 6 to ptr), ptr @10) + ret i64 0 +} + +declare void @__quantum__rt__initialize(ptr) + +declare void @__quantum__qis__cx__body(ptr, ptr) + +declare void @__quantum__qis__h__body(ptr) + +declare void @__quantum__qis__mresetz__body(ptr, ptr) #1 + +declare void @__quantum__qis__z__body(ptr) + +declare void @__quantum__rt__tuple_record_output(i64, ptr) + +declare void @__quantum__rt__array_record_output(i64, ptr) + +declare void @__quantum__rt__result_record_output(ptr, ptr) + +attributes #0 = { "entry_point" "output_labeling_schema" "qir_profiles"="adaptive_profile" "required_num_qubits"="4" "required_num_results"="7" } +attributes #1 = { "irreversible" } + +; module flags + +!llvm.module.flags = !{!0, !1, !2, !3, !4, !5, !6, !7} + +!0 = !{i32 1, !"qir_major_version", i32 2} +!1 = !{i32 7, !"qir_minor_version", i32 1} +!2 = !{i32 1, !"dynamic_qubit_management", i1 false} +!3 = !{i32 1, !"dynamic_result_management", i1 false} +!4 = !{i32 5, !"int_computations", !{!"i64"}} +!5 = !{i32 5, !"float_computations", !{!"double"}} +!6 = !{i32 7, !"backwards_branching", i2 3} +!7 = !{i32 1, !"arrays", i1 true} diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicMeasureWithPhaseFlipCode.out b/source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicMeasureWithPhaseFlipCode.out new file mode 100644 index 0000000000..1ddefa701c --- /dev/null +++ b/source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicMeasureWithPhaseFlipCode.out @@ -0,0 +1,18 @@ +START +METADATA entry_point +METADATA output_labeling_schema +METADATA qir_profiles adaptive_profile +METADATA required_num_qubits 4 +METADATA required_num_results 7 +OUTPUT TUPLE 3 0_t +OUTPUT ARRAY 2 1_t0a +OUTPUT RESULT 0 2_t0a0r +OUTPUT RESULT 0 3_t0a1r +OUTPUT ARRAY 2 4_t1a +OUTPUT RESULT 1 5_t1a0r +OUTPUT RESULT 1 6_t1a1r +OUTPUT ARRAY 3 7_t2a +OUTPUT RESULT 0 8_t2a0r +OUTPUT RESULT 1 9_t2a1r +OUTPUT RESULT 0 10_t2a2r +END 0 diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicRotationsWithPeriod.ll b/source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicRotationsWithPeriod.ll new file mode 100644 index 0000000000..54eca58683 --- /dev/null +++ b/source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicRotationsWithPeriod.ll @@ -0,0 +1,130 @@ +@0 = internal constant [4 x i8] c"0_t\00" +@1 = internal constant [6 x i8] c"1_t0a\00" +@2 = internal constant [8 x i8] c"2_t0a0r\00" +@3 = internal constant [8 x i8] c"3_t0a1r\00" +@4 = internal constant [6 x i8] c"4_t1a\00" +@5 = internal constant [8 x i8] c"5_t1a0r\00" +@6 = internal constant [8 x i8] c"6_t1a1r\00" +@7 = internal constant [6 x i8] c"7_t2a\00" +@8 = internal constant [8 x i8] c"8_t2a0r\00" +@9 = internal constant [8 x i8] c"9_t2a1r\00" + +define i64 @ENTRYPOINT__main() #0 { +block_0: + call void @__quantum__rt__initialize(ptr null) + call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__x__body(ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__y__body(ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__y__body(ptr inttoptr (i64 3 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 4 to ptr)) + call void @__quantum__qis__z__body(ptr inttoptr (i64 4 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 4 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 5 to ptr)) + call void @__quantum__qis__z__body(ptr inttoptr (i64 5 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 5 to ptr)) + call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__ry__body(double 1.5707963267948966, ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__ry__body(double 1.5707963267948966, ptr inttoptr (i64 3 to ptr)) + call void @__quantum__qis__rz__body(double 1.5707963267948966, ptr inttoptr (i64 4 to ptr)) + call void @__quantum__qis__rz__body(double 1.5707963267948966, ptr inttoptr (i64 5 to ptr)) + call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__ry__body(double 1.5707963267948966, ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__ry__body(double 1.5707963267948966, ptr inttoptr (i64 3 to ptr)) + call void @__quantum__qis__rz__body(double 1.5707963267948966, ptr inttoptr (i64 4 to ptr)) + call void @__quantum__qis__rz__body(double 1.5707963267948966, ptr inttoptr (i64 5 to ptr)) + call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__ry__body(double 1.5707963267948966, ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__ry__body(double 1.5707963267948966, ptr inttoptr (i64 3 to ptr)) + call void @__quantum__qis__rz__body(double 1.5707963267948966, ptr inttoptr (i64 4 to ptr)) + call void @__quantum__qis__rz__body(double 1.5707963267948966, ptr inttoptr (i64 5 to ptr)) + call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__ry__body(double 1.5707963267948966, ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__ry__body(double 1.5707963267948966, ptr inttoptr (i64 3 to ptr)) + call void @__quantum__qis__rz__body(double 1.5707963267948966, ptr inttoptr (i64 4 to ptr)) + call void @__quantum__qis__rz__body(double 1.5707963267948966, ptr inttoptr (i64 5 to ptr)) + call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__ry__body(double 1.5707963267948966, ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__ry__body(double 1.5707963267948966, ptr inttoptr (i64 3 to ptr)) + call void @__quantum__qis__rz__body(double 1.5707963267948966, ptr inttoptr (i64 4 to ptr)) + call void @__quantum__qis__rz__body(double 1.5707963267948966, ptr inttoptr (i64 5 to ptr)) + call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__ry__body(double 1.5707963267948966, ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__ry__body(double 1.5707963267948966, ptr inttoptr (i64 3 to ptr)) + call void @__quantum__qis__rz__body(double 1.5707963267948966, ptr inttoptr (i64 4 to ptr)) + call void @__quantum__qis__rz__body(double 1.5707963267948966, ptr inttoptr (i64 5 to ptr)) + call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__ry__body(double 1.5707963267948966, ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__ry__body(double 1.5707963267948966, ptr inttoptr (i64 3 to ptr)) + call void @__quantum__qis__rz__body(double 1.5707963267948966, ptr inttoptr (i64 4 to ptr)) + call void @__quantum__qis__rz__body(double 1.5707963267948966, ptr inttoptr (i64 5 to ptr)) + call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__ry__body(double 1.5707963267948966, ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__ry__body(double 1.5707963267948966, ptr inttoptr (i64 3 to ptr)) + call void @__quantum__qis__rz__body(double 1.5707963267948966, ptr inttoptr (i64 4 to ptr)) + call void @__quantum__qis__rz__body(double 1.5707963267948966, ptr inttoptr (i64 5 to ptr)) + call void @__quantum__qis__mresetz__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__mresetz__body(ptr inttoptr (i64 1 to ptr), ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__mresetz__body(ptr inttoptr (i64 2 to ptr), ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__mresetz__body(ptr inttoptr (i64 3 to ptr), ptr inttoptr (i64 3 to ptr)) + call void @__quantum__qis__mresetz__body(ptr inttoptr (i64 4 to ptr), ptr inttoptr (i64 4 to ptr)) + call void @__quantum__qis__mresetz__body(ptr inttoptr (i64 5 to ptr), ptr inttoptr (i64 5 to ptr)) + call void @__quantum__rt__tuple_record_output(i64 3, ptr @0) + call void @__quantum__rt__array_record_output(i64 2, ptr @1) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 0 to ptr), ptr @2) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 1 to ptr), ptr @3) + call void @__quantum__rt__array_record_output(i64 2, ptr @4) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 2 to ptr), ptr @5) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 3 to ptr), ptr @6) + call void @__quantum__rt__array_record_output(i64 2, ptr @7) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 4 to ptr), ptr @8) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 5 to ptr), ptr @9) + ret i64 0 +} + +declare void @__quantum__rt__initialize(ptr) + +declare void @__quantum__qis__x__body(ptr) + +declare void @__quantum__qis__y__body(ptr) + +declare void @__quantum__qis__h__body(ptr) + +declare void @__quantum__qis__z__body(ptr) + +declare void @__quantum__qis__rx__body(double, ptr) + +declare void @__quantum__qis__ry__body(double, ptr) + +declare void @__quantum__qis__rz__body(double, ptr) + +declare void @__quantum__qis__mresetz__body(ptr, ptr) #1 + +declare void @__quantum__rt__tuple_record_output(i64, ptr) + +declare void @__quantum__rt__array_record_output(i64, ptr) + +declare void @__quantum__rt__result_record_output(ptr, ptr) + +attributes #0 = { "entry_point" "output_labeling_schema" "qir_profiles"="adaptive_profile" "required_num_qubits"="6" "required_num_results"="6" } +attributes #1 = { "irreversible" } + +; module flags + +!llvm.module.flags = !{!0, !1, !2, !3, !4, !5, !6, !7} + +!0 = !{i32 1, !"qir_major_version", i32 2} +!1 = !{i32 7, !"qir_minor_version", i32 1} +!2 = !{i32 1, !"dynamic_qubit_management", i1 false} +!3 = !{i32 1, !"dynamic_result_management", i1 false} +!4 = !{i32 5, !"int_computations", !{!"i64"}} +!5 = !{i32 5, !"float_computations", !{!"double"}} +!6 = !{i32 7, !"backwards_branching", i2 3} +!7 = !{i32 1, !"arrays", i1 true} diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicRotationsWithPeriod.out b/source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicRotationsWithPeriod.out new file mode 100644 index 0000000000..c40d12d5ea --- /dev/null +++ b/source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicRotationsWithPeriod.out @@ -0,0 +1,17 @@ +START +METADATA entry_point +METADATA output_labeling_schema +METADATA qir_profiles adaptive_profile +METADATA required_num_qubits 6 +METADATA required_num_results 6 +OUTPUT TUPLE 3 0_t +OUTPUT ARRAY 2 1_t0a +OUTPUT RESULT 1 2_t0a0r +OUTPUT RESULT 1 3_t0a1r +OUTPUT ARRAY 2 4_t1a +OUTPUT RESULT 1 5_t1a0r +OUTPUT RESULT 1 6_t1a1r +OUTPUT ARRAY 2 7_t2a +OUTPUT RESULT 1 8_t2a0r +OUTPUT RESULT 1 9_t2a1r +END 0 diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicSTSWAP.ll b/source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicSTSWAP.ll new file mode 100644 index 0000000000..0435a1ad46 --- /dev/null +++ b/source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicSTSWAP.ll @@ -0,0 +1,70 @@ +@0 = internal constant [4 x i8] c"0_t\00" +@1 = internal constant [6 x i8] c"1_t0r\00" +@2 = internal constant [6 x i8] c"2_t1r\00" +@3 = internal constant [6 x i8] c"3_t2a\00" +@4 = internal constant [8 x i8] c"4_t2a0r\00" +@5 = internal constant [8 x i8] c"5_t2a1r\00" + +define i64 @ENTRYPOINT__main() #0 { +block_0: + call void @__quantum__rt__initialize(ptr null) + call void @__quantum__qis__h__body(ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__s__body(ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__s__body(ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__mresetz__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__t__body(ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__t__body(ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__t__body(ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__t__body(ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__mresetz__body(ptr inttoptr (i64 1 to ptr), ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__x__body(ptr inttoptr (i64 3 to ptr)) + call void @__quantum__qis__swap__body(ptr inttoptr (i64 2 to ptr), ptr inttoptr (i64 3 to ptr)) + call void @__quantum__qis__mresetz__body(ptr inttoptr (i64 2 to ptr), ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__mresetz__body(ptr inttoptr (i64 3 to ptr), ptr inttoptr (i64 3 to ptr)) + call void @__quantum__rt__tuple_record_output(i64 3, ptr @0) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 0 to ptr), ptr @1) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 1 to ptr), ptr @2) + call void @__quantum__rt__array_record_output(i64 2, ptr @3) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 2 to ptr), ptr @4) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 3 to ptr), ptr @5) + ret i64 0 +} + +declare void @__quantum__rt__initialize(ptr) + +declare void @__quantum__qis__h__body(ptr) + +declare void @__quantum__qis__s__body(ptr) + +declare void @__quantum__qis__mresetz__body(ptr, ptr) #1 + +declare void @__quantum__qis__t__body(ptr) + +declare void @__quantum__qis__x__body(ptr) + +declare void @__quantum__qis__swap__body(ptr, ptr) + +declare void @__quantum__rt__tuple_record_output(i64, ptr) + +declare void @__quantum__rt__result_record_output(ptr, ptr) + +declare void @__quantum__rt__array_record_output(i64, ptr) + +attributes #0 = { "entry_point" "output_labeling_schema" "qir_profiles"="adaptive_profile" "required_num_qubits"="4" "required_num_results"="4" } +attributes #1 = { "irreversible" } + +; module flags + +!llvm.module.flags = !{!0, !1, !2, !3, !4, !5, !6, !7} + +!0 = !{i32 1, !"qir_major_version", i32 2} +!1 = !{i32 7, !"qir_minor_version", i32 1} +!2 = !{i32 1, !"dynamic_qubit_management", i1 false} +!3 = !{i32 1, !"dynamic_result_management", i1 false} +!4 = !{i32 5, !"int_computations", !{!"i64"}} +!5 = !{i32 5, !"float_computations", !{!"double"}} +!6 = !{i32 7, !"backwards_branching", i2 3} +!7 = !{i32 1, !"arrays", i1 true} diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicSTSWAP.out b/source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicSTSWAP.out new file mode 100644 index 0000000000..b571a02b76 --- /dev/null +++ b/source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicSTSWAP.out @@ -0,0 +1,13 @@ +START +METADATA entry_point +METADATA output_labeling_schema +METADATA qir_profiles adaptive_profile +METADATA required_num_qubits 4 +METADATA required_num_results 4 +OUTPUT TUPLE 3 0_t +OUTPUT RESULT 1 1_t0r +OUTPUT RESULT 1 2_t1r +OUTPUT ARRAY 2 3_t2a +OUTPUT RESULT 1 4_t2a0r +OUTPUT RESULT 0 5_t2a1r +END 0 diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/MeasureAndReuse.ll b/source/pip/tests-integration/resources/adaptive_rifla/output/MeasureAndReuse.ll new file mode 100644 index 0000000000..aeed9d45f3 --- /dev/null +++ b/source/pip/tests-integration/resources/adaptive_rifla/output/MeasureAndReuse.ll @@ -0,0 +1,63 @@ +@0 = internal constant [4 x i8] c"0_t\00" +@1 = internal constant [6 x i8] c"1_t0r\00" +@2 = internal constant [6 x i8] c"2_t1r\00" +@3 = internal constant [6 x i8] c"3_t2r\00" +@4 = internal constant [6 x i8] c"4_t3r\00" +@5 = internal constant [6 x i8] c"5_t4r\00" + +define i64 @ENTRYPOINT__main() #0 { +block_0: + call void @__quantum__rt__initialize(ptr null) + call void @__quantum__qis__m__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__m__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__mresetz__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__mresetz__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 3 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__s__adj(ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__s__adj(ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__mresetz__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 4 to ptr)) + call void @__quantum__rt__tuple_record_output(i64 5, ptr @0) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 0 to ptr), ptr @1) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 1 to ptr), ptr @2) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 2 to ptr), ptr @3) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 3 to ptr), ptr @4) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 4 to ptr), ptr @5) + ret i64 0 +} + +declare void @__quantum__rt__initialize(ptr) + +declare void @__quantum__qis__m__body(ptr, ptr) #1 + +declare void @__quantum__qis__x__body(ptr) + +declare void @__quantum__qis__h__body(ptr) + +declare void @__quantum__qis__mresetz__body(ptr, ptr) #1 + +declare void @__quantum__qis__s__adj(ptr) + +declare void @__quantum__rt__tuple_record_output(i64, ptr) + +declare void @__quantum__rt__result_record_output(ptr, ptr) + +attributes #0 = { "entry_point" "output_labeling_schema" "qir_profiles"="adaptive_profile" "required_num_qubits"="1" "required_num_results"="5" } +attributes #1 = { "irreversible" } + +; module flags + +!llvm.module.flags = !{!0, !1, !2, !3, !4, !5, !6, !7} + +!0 = !{i32 1, !"qir_major_version", i32 2} +!1 = !{i32 7, !"qir_minor_version", i32 1} +!2 = !{i32 1, !"dynamic_qubit_management", i1 false} +!3 = !{i32 1, !"dynamic_result_management", i1 false} +!4 = !{i32 5, !"int_computations", !{!"i64"}} +!5 = !{i32 5, !"float_computations", !{!"double"}} +!6 = !{i32 7, !"backwards_branching", i2 3} +!7 = !{i32 1, !"arrays", i1 true} diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/MeasureAndReuse.out b/source/pip/tests-integration/resources/adaptive_rifla/output/MeasureAndReuse.out new file mode 100644 index 0000000000..8736ee6fdd --- /dev/null +++ b/source/pip/tests-integration/resources/adaptive_rifla/output/MeasureAndReuse.out @@ -0,0 +1,13 @@ +START +METADATA entry_point +METADATA output_labeling_schema +METADATA qir_profiles adaptive_profile +METADATA required_num_qubits 1 +METADATA required_num_results 5 +OUTPUT TUPLE 5 0_t +OUTPUT RESULT 0 1_t0r +OUTPUT RESULT 1 2_t1r +OUTPUT RESULT 1 3_t2r +OUTPUT RESULT 0 4_t3r +OUTPUT RESULT 0 5_t4r +END 0 diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/MeasurementComparison.ll b/source/pip/tests-integration/resources/adaptive_rifla/output/MeasurementComparison.ll new file mode 100644 index 0000000000..bbd16a44ec --- /dev/null +++ b/source/pip/tests-integration/resources/adaptive_rifla/output/MeasurementComparison.ll @@ -0,0 +1,75 @@ +@0 = internal constant [4 x i8] c"0_t\00" +@1 = internal constant [6 x i8] c"1_t0b\00" +@2 = internal constant [6 x i8] c"2_t1b\00" +@3 = internal constant [6 x i8] c"3_t2b\00" +@4 = internal constant [6 x i8] c"4_t3b\00" + +define i64 @ENTRYPOINT__main() #0 { +block_0: + %var_1 = alloca i1 + %var_9 = alloca i1 + call void @__quantum__rt__initialize(ptr null) + call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__cx__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__m__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__m__body(ptr inttoptr (i64 1 to ptr), ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__reset__body(ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__reset__body(ptr inttoptr (i64 1 to ptr)) + %var_0 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 0 to ptr)) + store i1 %var_0, ptr %var_1 + %var_2 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 1 to ptr)) + %var_3 = icmp eq i1 %var_2, false + %var_4 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 0 to ptr)) + %var_5 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 1 to ptr)) + %var_6 = icmp eq i1 %var_4, %var_5 + %var_7 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 0 to ptr)) + %var_8 = icmp eq i1 %var_7, false + br i1 %var_8, label %block_1, label %block_2 +block_1: + store i1 false, ptr %var_9 + br label %block_3 +block_2: + store i1 true, ptr %var_9 + br label %block_3 +block_3: + call void @__quantum__rt__tuple_record_output(i64 4, ptr @0) + %var_12 = load i1, ptr %var_1 + call void @__quantum__rt__bool_record_output(i1 %var_12, ptr @1) + call void @__quantum__rt__bool_record_output(i1 %var_3, ptr @2) + call void @__quantum__rt__bool_record_output(i1 %var_6, ptr @3) + %var_13 = load i1, ptr %var_9 + call void @__quantum__rt__bool_record_output(i1 %var_13, ptr @4) + ret i64 0 +} + +declare void @__quantum__rt__initialize(ptr) + +declare void @__quantum__qis__x__body(ptr) + +declare void @__quantum__qis__cx__body(ptr, ptr) + +declare void @__quantum__qis__m__body(ptr, ptr) #1 + +declare void @__quantum__qis__reset__body(ptr) #1 + +declare i1 @__quantum__rt__read_result(ptr) + +declare void @__quantum__rt__tuple_record_output(i64, ptr) + +declare void @__quantum__rt__bool_record_output(i1, ptr) + +attributes #0 = { "entry_point" "output_labeling_schema" "qir_profiles"="adaptive_profile" "required_num_qubits"="2" "required_num_results"="2" } +attributes #1 = { "irreversible" } + +; module flags + +!llvm.module.flags = !{!0, !1, !2, !3, !4, !5, !6, !7} + +!0 = !{i32 1, !"qir_major_version", i32 2} +!1 = !{i32 7, !"qir_minor_version", i32 1} +!2 = !{i32 1, !"dynamic_qubit_management", i1 false} +!3 = !{i32 1, !"dynamic_result_management", i1 false} +!4 = !{i32 5, !"int_computations", !{!"i64"}} +!5 = !{i32 5, !"float_computations", !{!"double"}} +!6 = !{i32 7, !"backwards_branching", i2 3} +!7 = !{i32 1, !"arrays", i1 true} diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/MeasurementComparison.out b/source/pip/tests-integration/resources/adaptive_rifla/output/MeasurementComparison.out new file mode 100644 index 0000000000..e1120c99da --- /dev/null +++ b/source/pip/tests-integration/resources/adaptive_rifla/output/MeasurementComparison.out @@ -0,0 +1,12 @@ +START +METADATA entry_point +METADATA output_labeling_schema +METADATA qir_profiles adaptive_profile +METADATA required_num_qubits 2 +METADATA required_num_results 2 +OUTPUT TUPLE 4 0_t +OUTPUT BOOL true 1_t0b +OUTPUT BOOL false 2_t1b +OUTPUT BOOL true 3_t2b +OUTPUT BOOL true 4_t3b +END 0 diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/NestedBranching.ll b/source/pip/tests-integration/resources/adaptive_rifla/output/NestedBranching.ll new file mode 100644 index 0000000000..6127a52e2d --- /dev/null +++ b/source/pip/tests-integration/resources/adaptive_rifla/output/NestedBranching.ll @@ -0,0 +1,383 @@ +@0 = internal constant [4 x i8] c"0_t\00" +@1 = internal constant [6 x i8] c"1_t0t\00" +@2 = internal constant [8 x i8] c"2_t0t0a\00" +@3 = internal constant [10 x i8] c"3_t0t0a0r\00" +@4 = internal constant [10 x i8] c"4_t0t0a1r\00" +@5 = internal constant [10 x i8] c"5_t0t0a2r\00" +@6 = internal constant [8 x i8] c"6_t0t1i\00" +@7 = internal constant [6 x i8] c"7_t1t\00" +@8 = internal constant [8 x i8] c"8_t1t0a\00" +@9 = internal constant [10 x i8] c"9_t1t0a0r\00" +@10 = internal constant [11 x i8] c"10_t1t0a1r\00" +@11 = internal constant [11 x i8] c"11_t1t0a2r\00" +@12 = internal constant [11 x i8] c"12_t1t0a3r\00" +@13 = internal constant [9 x i8] c"13_t1t1b\00" + +define i64 @ENTRYPOINT__main() #0 { +block_0: + %var_2 = alloca i64 + %var_7 = alloca i1 + %var_12 = alloca i1 + %var_17 = alloca i1 + %var_22 = alloca i1 + %var_27 = alloca i1 + %var_32 = alloca i1 + %var_50 = alloca i1 + %var_61 = alloca i1 + call void @__quantum__rt__initialize(ptr null) + call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__x__body(ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__m__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__m__body(ptr inttoptr (i64 1 to ptr), ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__m__body(ptr inttoptr (i64 2 to ptr), ptr inttoptr (i64 2 to ptr)) + store i64 0, ptr %var_2 + %var_3 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 0 to ptr)) + %var_4 = icmp eq i1 %var_3, false + br i1 %var_4, label %block_1, label %block_2 +block_1: + %var_5 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 1 to ptr)) + %var_6 = icmp eq i1 %var_5, false + store i1 false, ptr %var_7 + br i1 %var_6, label %block_3, label %block_5 +block_2: + %var_20 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 1 to ptr)) + %var_21 = icmp eq i1 %var_20, false + store i1 false, ptr %var_22 + br i1 %var_21, label %block_4, label %block_6 +block_3: + %var_8 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 2 to ptr)) + %var_9 = icmp eq i1 %var_8, false + store i1 %var_9, ptr %var_7 + br label %block_5 +block_4: + %var_23 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 2 to ptr)) + %var_24 = icmp eq i1 %var_23, false + store i1 %var_24, ptr %var_22 + br label %block_6 +block_5: + %var_100 = load i1, ptr %var_7 + br i1 %var_100, label %block_7, label %block_8 +block_6: + %var_80 = load i1, ptr %var_22 + br i1 %var_80, label %block_9, label %block_10 +block_7: + store i64 0, ptr %var_2 + br label %block_31 +block_8: + %var_10 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 1 to ptr)) + %var_11 = icmp eq i1 %var_10, false + store i1 false, ptr %var_12 + br i1 %var_11, label %block_11, label %block_13 +block_9: + store i64 4, ptr %var_2 + br label %block_32 +block_10: + %var_25 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 1 to ptr)) + %var_26 = icmp eq i1 %var_25, false + store i1 false, ptr %var_27 + br i1 %var_26, label %block_12, label %block_14 +block_11: + %var_13 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 2 to ptr)) + store i1 %var_13, ptr %var_12 + br label %block_13 +block_12: + %var_28 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 2 to ptr)) + store i1 %var_28, ptr %var_27 + br label %block_14 +block_13: + %var_102 = load i1, ptr %var_12 + br i1 %var_102, label %block_15, label %block_16 +block_14: + %var_82 = load i1, ptr %var_27 + br i1 %var_82, label %block_17, label %block_18 +block_15: + store i64 1, ptr %var_2 + br label %block_29 +block_16: + %var_15 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 1 to ptr)) + store i1 false, ptr %var_17 + br i1 %var_15, label %block_19, label %block_21 +block_17: + store i64 5, ptr %var_2 + br label %block_30 +block_18: + %var_30 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 1 to ptr)) + store i1 false, ptr %var_32 + br i1 %var_30, label %block_20, label %block_22 +block_19: + %var_18 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 2 to ptr)) + %var_19 = icmp eq i1 %var_18, false + store i1 %var_19, ptr %var_17 + br label %block_21 +block_20: + %var_33 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 2 to ptr)) + %var_34 = icmp eq i1 %var_33, false + store i1 %var_34, ptr %var_32 + br label %block_22 +block_21: + %var_104 = load i1, ptr %var_17 + br i1 %var_104, label %block_23, label %block_24 +block_22: + %var_84 = load i1, ptr %var_32 + br i1 %var_84, label %block_25, label %block_26 +block_23: + store i64 2, ptr %var_2 + br label %block_27 +block_24: + store i64 3, ptr %var_2 + br label %block_27 +block_25: + store i64 6, ptr %var_2 + br label %block_28 +block_26: + store i64 7, ptr %var_2 + br label %block_28 +block_27: + br label %block_29 +block_28: + br label %block_30 +block_29: + br label %block_31 +block_30: + br label %block_32 +block_31: + br label %block_33 +block_32: + br label %block_33 +block_33: + call void @__quantum__qis__reset__body(ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__reset__body(ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__reset__body(ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__x__body(ptr inttoptr (i64 7 to ptr)) + call void @__quantum__qis__x__body(ptr inttoptr (i64 3 to ptr)) + call void @__quantum__qis__x__body(ptr inttoptr (i64 4 to ptr)) + call void @__quantum__qis__x__body(ptr inttoptr (i64 5 to ptr)) + call void @__quantum__qis__m__body(ptr inttoptr (i64 3 to ptr), ptr inttoptr (i64 3 to ptr)) + call void @__quantum__qis__m__body(ptr inttoptr (i64 4 to ptr), ptr inttoptr (i64 4 to ptr)) + call void @__quantum__qis__m__body(ptr inttoptr (i64 5 to ptr), ptr inttoptr (i64 5 to ptr)) + call void @__quantum__qis__m__body(ptr inttoptr (i64 6 to ptr), ptr inttoptr (i64 6 to ptr)) + %var_40 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 3 to ptr)) + %var_41 = icmp eq i1 %var_40, false + br i1 %var_41, label %block_34, label %block_35 +block_34: + %var_42 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 4 to ptr)) + %var_43 = icmp eq i1 %var_42, false + br i1 %var_43, label %block_36, label %block_37 +block_35: + %var_48 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 3 to ptr)) + %var_49 = icmp eq i1 %var_48, false + store i1 false, ptr %var_50 + br i1 %var_49, label %block_38, label %block_43 +block_36: + %var_44 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 5 to ptr)) + %var_45 = icmp eq i1 %var_44, false + br i1 %var_45, label %block_39, label %block_40 +block_37: + %var_46 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 5 to ptr)) + %var_47 = icmp eq i1 %var_46, false + br i1 %var_47, label %block_41, label %block_42 +block_38: + %var_51 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 4 to ptr)) + store i1 %var_51, ptr %var_50 + br label %block_43 +block_39: + br label %block_44 +block_40: + call void @__quantum__qis__x__body(ptr inttoptr (i64 7 to ptr)) + call void @__quantum__qis__x__body(ptr inttoptr (i64 7 to ptr)) + br label %block_44 +block_41: + call void @__quantum__qis__y__body(ptr inttoptr (i64 7 to ptr)) + call void @__quantum__qis__y__body(ptr inttoptr (i64 7 to ptr)) + br label %block_45 +block_42: + call void @__quantum__qis__z__body(ptr inttoptr (i64 7 to ptr)) + call void @__quantum__qis__z__body(ptr inttoptr (i64 7 to ptr)) + br label %block_45 +block_43: + %var_87 = load i1, ptr %var_50 + br i1 %var_87, label %block_46, label %block_47 +block_44: + br label %block_48 +block_45: + br label %block_48 +block_46: + %var_53 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 4 to ptr)) + %var_54 = icmp eq i1 %var_53, false + br i1 %var_54, label %block_49, label %block_50 +block_47: + %var_59 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 3 to ptr)) + store i1 false, ptr %var_61 + br i1 %var_59, label %block_51, label %block_56 +block_48: + br label %block_82 +block_49: + %var_55 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 5 to ptr)) + %var_56 = icmp eq i1 %var_55, false + br i1 %var_56, label %block_52, label %block_53 +block_50: + %var_57 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 5 to ptr)) + %var_58 = icmp eq i1 %var_57, false + br i1 %var_58, label %block_54, label %block_55 +block_51: + %var_62 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 4 to ptr)) + %var_63 = icmp eq i1 %var_62, false + store i1 %var_63, ptr %var_61 + br label %block_56 +block_52: + br label %block_57 +block_53: + call void @__quantum__qis__x__body(ptr inttoptr (i64 7 to ptr)) + call void @__quantum__qis__x__body(ptr inttoptr (i64 7 to ptr)) + br label %block_57 +block_54: + call void @__quantum__qis__y__body(ptr inttoptr (i64 7 to ptr)) + call void @__quantum__qis__y__body(ptr inttoptr (i64 7 to ptr)) + br label %block_58 +block_55: + call void @__quantum__qis__z__body(ptr inttoptr (i64 7 to ptr)) + call void @__quantum__qis__z__body(ptr inttoptr (i64 7 to ptr)) + br label %block_58 +block_56: + %var_89 = load i1, ptr %var_61 + br i1 %var_89, label %block_59, label %block_60 +block_57: + br label %block_61 +block_58: + br label %block_61 +block_59: + %var_64 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 4 to ptr)) + %var_65 = icmp eq i1 %var_64, false + br i1 %var_65, label %block_62, label %block_63 +block_60: + %var_70 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 4 to ptr)) + %var_71 = icmp eq i1 %var_70, false + br i1 %var_71, label %block_64, label %block_65 +block_61: + br label %block_81 +block_62: + %var_66 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 5 to ptr)) + %var_67 = icmp eq i1 %var_66, false + br i1 %var_67, label %block_66, label %block_67 +block_63: + %var_68 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 5 to ptr)) + %var_69 = icmp eq i1 %var_68, false + br i1 %var_69, label %block_68, label %block_69 +block_64: + %var_72 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 5 to ptr)) + %var_73 = icmp eq i1 %var_72, false + br i1 %var_73, label %block_70, label %block_71 +block_65: + %var_74 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 5 to ptr)) + %var_75 = icmp eq i1 %var_74, false + br i1 %var_75, label %block_72, label %block_73 +block_66: + br label %block_74 +block_67: + call void @__quantum__qis__x__body(ptr inttoptr (i64 7 to ptr)) + call void @__quantum__qis__x__body(ptr inttoptr (i64 7 to ptr)) + br label %block_74 +block_68: + call void @__quantum__qis__y__body(ptr inttoptr (i64 7 to ptr)) + call void @__quantum__qis__y__body(ptr inttoptr (i64 7 to ptr)) + br label %block_75 +block_69: + call void @__quantum__qis__z__body(ptr inttoptr (i64 7 to ptr)) + call void @__quantum__qis__z__body(ptr inttoptr (i64 7 to ptr)) + br label %block_75 +block_70: + br label %block_76 +block_71: + call void @__quantum__qis__x__body(ptr inttoptr (i64 7 to ptr)) + call void @__quantum__qis__x__body(ptr inttoptr (i64 7 to ptr)) + br label %block_76 +block_72: + call void @__quantum__qis__y__body(ptr inttoptr (i64 7 to ptr)) + call void @__quantum__qis__y__body(ptr inttoptr (i64 7 to ptr)) + br label %block_77 +block_73: + call void @__quantum__qis__z__body(ptr inttoptr (i64 7 to ptr)) + call void @__quantum__qis__z__body(ptr inttoptr (i64 7 to ptr)) + br label %block_77 +block_74: + br label %block_78 +block_75: + br label %block_78 +block_76: + br label %block_79 +block_77: + br label %block_79 +block_78: + br label %block_80 +block_79: + br label %block_80 +block_80: + br label %block_81 +block_81: + br label %block_82 +block_82: + call void @__quantum__qis__reset__body(ptr inttoptr (i64 3 to ptr)) + call void @__quantum__qis__reset__body(ptr inttoptr (i64 4 to ptr)) + call void @__quantum__qis__reset__body(ptr inttoptr (i64 5 to ptr)) + call void @__quantum__qis__reset__body(ptr inttoptr (i64 6 to ptr)) + call void @__quantum__qis__mresetz__body(ptr inttoptr (i64 7 to ptr), ptr inttoptr (i64 7 to ptr)) + %var_77 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 7 to ptr)) + call void @__quantum__rt__tuple_record_output(i64 2, ptr @0) + call void @__quantum__rt__tuple_record_output(i64 2, ptr @1) + call void @__quantum__rt__array_record_output(i64 3, ptr @2) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 0 to ptr), ptr @3) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 1 to ptr), ptr @4) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 2 to ptr), ptr @5) + %var_90 = load i64, ptr %var_2 + call void @__quantum__rt__int_record_output(i64 %var_90, ptr @6) + call void @__quantum__rt__tuple_record_output(i64 2, ptr @7) + call void @__quantum__rt__array_record_output(i64 4, ptr @8) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 3 to ptr), ptr @9) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 4 to ptr), ptr @10) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 5 to ptr), ptr @11) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 6 to ptr), ptr @12) + call void @__quantum__rt__bool_record_output(i1 %var_77, ptr @13) + ret i64 0 +} + +declare void @__quantum__rt__initialize(ptr) + +declare void @__quantum__qis__x__body(ptr) + +declare void @__quantum__qis__m__body(ptr, ptr) #1 + +declare i1 @__quantum__rt__read_result(ptr) + +declare void @__quantum__qis__reset__body(ptr) #1 + +declare void @__quantum__qis__y__body(ptr) + +declare void @__quantum__qis__z__body(ptr) + +declare void @__quantum__qis__mresetz__body(ptr, ptr) #1 + +declare void @__quantum__rt__tuple_record_output(i64, ptr) + +declare void @__quantum__rt__array_record_output(i64, ptr) + +declare void @__quantum__rt__result_record_output(ptr, ptr) + +declare void @__quantum__rt__int_record_output(i64, ptr) + +declare void @__quantum__rt__bool_record_output(i1, ptr) + +attributes #0 = { "entry_point" "output_labeling_schema" "qir_profiles"="adaptive_profile" "required_num_qubits"="8" "required_num_results"="8" } +attributes #1 = { "irreversible" } + +; module flags + +!llvm.module.flags = !{!0, !1, !2, !3, !4, !5, !6, !7} + +!0 = !{i32 1, !"qir_major_version", i32 2} +!1 = !{i32 7, !"qir_minor_version", i32 1} +!2 = !{i32 1, !"dynamic_qubit_management", i1 false} +!3 = !{i32 1, !"dynamic_result_management", i1 false} +!4 = !{i32 5, !"int_computations", !{!"i64"}} +!5 = !{i32 5, !"float_computations", !{!"double"}} +!6 = !{i32 7, !"backwards_branching", i2 3} +!7 = !{i32 1, !"arrays", i1 true} diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/NestedBranching.out b/source/pip/tests-integration/resources/adaptive_rifla/output/NestedBranching.out new file mode 100644 index 0000000000..220d5cad3a --- /dev/null +++ b/source/pip/tests-integration/resources/adaptive_rifla/output/NestedBranching.out @@ -0,0 +1,21 @@ +START +METADATA entry_point +METADATA output_labeling_schema +METADATA qir_profiles adaptive_profile +METADATA required_num_qubits 8 +METADATA required_num_results 8 +OUTPUT TUPLE 2 0_t +OUTPUT TUPLE 2 1_t0t +OUTPUT ARRAY 3 2_t0t0a +OUTPUT RESULT 1 3_t0t0a0r +OUTPUT RESULT 1 4_t0t0a1r +OUTPUT RESULT 0 5_t0t0a2r +OUTPUT INT 6 6_t0t1i +OUTPUT TUPLE 2 7_t1t +OUTPUT ARRAY 4 8_t1t0a +OUTPUT RESULT 1 9_t1t0a0r +OUTPUT RESULT 1 10_t1t0a1r +OUTPUT RESULT 1 11_t1t0a2r +OUTPUT RESULT 0 12_t1t0a3r +OUTPUT BOOL true 13_t1t1b +END 0 diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/RandomBit.ll b/source/pip/tests-integration/resources/adaptive_rifla/output/RandomBit.ll new file mode 100644 index 0000000000..6c00ba169c --- /dev/null +++ b/source/pip/tests-integration/resources/adaptive_rifla/output/RandomBit.ll @@ -0,0 +1,37 @@ +@0 = internal constant [4 x i8] c"0_r\00" + +define i64 @ENTRYPOINT__main() #0 { +block_0: + call void @__quantum__rt__initialize(ptr null) + call void @__quantum__qis__h__body(ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__m__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__reset__body(ptr inttoptr (i64 0 to ptr)) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 0 to ptr), ptr @0) + ret i64 0 +} + +declare void @__quantum__rt__initialize(ptr) + +declare void @__quantum__qis__h__body(ptr) + +declare void @__quantum__qis__m__body(ptr, ptr) #1 + +declare void @__quantum__qis__reset__body(ptr) #1 + +declare void @__quantum__rt__result_record_output(ptr, ptr) + +attributes #0 = { "entry_point" "output_labeling_schema" "qir_profiles"="adaptive_profile" "required_num_qubits"="1" "required_num_results"="1" } +attributes #1 = { "irreversible" } + +; module flags + +!llvm.module.flags = !{!0, !1, !2, !3, !4, !5, !6, !7} + +!0 = !{i32 1, !"qir_major_version", i32 2} +!1 = !{i32 7, !"qir_minor_version", i32 1} +!2 = !{i32 1, !"dynamic_qubit_management", i1 false} +!3 = !{i32 1, !"dynamic_result_management", i1 false} +!4 = !{i32 5, !"int_computations", !{!"i64"}} +!5 = !{i32 5, !"float_computations", !{!"double"}} +!6 = !{i32 7, !"backwards_branching", i2 3} +!7 = !{i32 1, !"arrays", i1 true} diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/RandomBit.out b/source/pip/tests-integration/resources/adaptive_rifla/output/RandomBit.out new file mode 100644 index 0000000000..f3b3a065a9 --- /dev/null +++ b/source/pip/tests-integration/resources/adaptive_rifla/output/RandomBit.out @@ -0,0 +1,8 @@ +START +METADATA entry_point +METADATA output_labeling_schema +METADATA qir_profiles adaptive_profile +METADATA required_num_qubits 1 +METADATA required_num_results 1 +OUTPUT RESULT 0 0_r +END 0 diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/SampleTeleport.ll b/source/pip/tests-integration/resources/adaptive_rifla/output/SampleTeleport.ll new file mode 100644 index 0000000000..4a07c88276 --- /dev/null +++ b/source/pip/tests-integration/resources/adaptive_rifla/output/SampleTeleport.ll @@ -0,0 +1,69 @@ +@0 = internal constant [4 x i8] c"0_r\00" + +define i64 @ENTRYPOINT__main() #0 { +block_0: + call void @__quantum__rt__initialize(ptr null) + call void @__quantum__qis__h__body(ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__cx__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__x__body(ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__cx__body(ptr inttoptr (i64 2 to ptr), ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__m__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 0 to ptr)) + %var_1 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 0 to ptr)) + br i1 %var_1, label %block_1, label %block_2 +block_1: + call void @__quantum__qis__x__body(ptr inttoptr (i64 1 to ptr)) + br label %block_2 +block_2: + call void @__quantum__qis__mresetz__body(ptr inttoptr (i64 2 to ptr), ptr inttoptr (i64 1 to ptr)) + %var_3 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 1 to ptr)) + br i1 %var_3, label %block_3, label %block_4 +block_3: + call void @__quantum__qis__z__body(ptr inttoptr (i64 1 to ptr)) + br label %block_4 +block_4: + call void @__quantum__qis__h__body(ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__m__body(ptr inttoptr (i64 1 to ptr), ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__reset__body(ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__reset__body(ptr inttoptr (i64 1 to ptr)) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 2 to ptr), ptr @0) + ret i64 0 +} + +declare void @__quantum__rt__initialize(ptr) + +declare void @__quantum__qis__h__body(ptr) + +declare void @__quantum__qis__cx__body(ptr, ptr) + +declare void @__quantum__qis__x__body(ptr) + +declare void @__quantum__qis__m__body(ptr, ptr) #1 + +declare i1 @__quantum__rt__read_result(ptr) + +declare void @__quantum__qis__mresetz__body(ptr, ptr) #1 + +declare void @__quantum__qis__z__body(ptr) + +declare void @__quantum__qis__reset__body(ptr) #1 + +declare void @__quantum__rt__result_record_output(ptr, ptr) + +attributes #0 = { "entry_point" "output_labeling_schema" "qir_profiles"="adaptive_profile" "required_num_qubits"="3" "required_num_results"="3" } +attributes #1 = { "irreversible" } + +; module flags + +!llvm.module.flags = !{!0, !1, !2, !3, !4, !5, !6, !7} + +!0 = !{i32 1, !"qir_major_version", i32 2} +!1 = !{i32 7, !"qir_minor_version", i32 1} +!2 = !{i32 1, !"dynamic_qubit_management", i1 false} +!3 = !{i32 1, !"dynamic_result_management", i1 false} +!4 = !{i32 5, !"int_computations", !{!"i64"}} +!5 = !{i32 5, !"float_computations", !{!"double"}} +!6 = !{i32 7, !"backwards_branching", i2 3} +!7 = !{i32 1, !"arrays", i1 true} diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/SampleTeleport.out b/source/pip/tests-integration/resources/adaptive_rifla/output/SampleTeleport.out new file mode 100644 index 0000000000..64b941c5f9 --- /dev/null +++ b/source/pip/tests-integration/resources/adaptive_rifla/output/SampleTeleport.out @@ -0,0 +1,8 @@ +START +METADATA entry_point +METADATA output_labeling_schema +METADATA qir_profiles adaptive_profile +METADATA required_num_qubits 3 +METADATA required_num_results 3 +OUTPUT RESULT 1 0_r +END 0 diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/ShortcuttingMeasurement.ll b/source/pip/tests-integration/resources/adaptive_rifla/output/ShortcuttingMeasurement.ll new file mode 100644 index 0000000000..96e167e51c --- /dev/null +++ b/source/pip/tests-integration/resources/adaptive_rifla/output/ShortcuttingMeasurement.ll @@ -0,0 +1,68 @@ +@0 = internal constant [4 x i8] c"0_t\00" +@1 = internal constant [6 x i8] c"1_t0r\00" +@2 = internal constant [6 x i8] c"2_t1r\00" + +define i64 @ENTRYPOINT__main() #0 { +block_0: + %var_2 = alloca i1 + call void @__quantum__rt__initialize(ptr null) + call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__cx__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__m__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 0 to ptr)) + %var_0 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 0 to ptr)) + store i1 true, ptr %var_2 + br i1 %var_0, label %block_2, label %block_1 +block_1: + call void @__quantum__qis__m__body(ptr inttoptr (i64 1 to ptr), ptr inttoptr (i64 1 to ptr)) + %var_3 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 1 to ptr)) + store i1 %var_3, ptr %var_2 + br label %block_2 +block_2: + %var_6 = load i1, ptr %var_2 + br i1 %var_6, label %block_3, label %block_4 +block_3: + call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__x__body(ptr inttoptr (i64 1 to ptr)) + br label %block_4 +block_4: + call void @__quantum__qis__m__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__m__body(ptr inttoptr (i64 1 to ptr), ptr inttoptr (i64 3 to ptr)) + call void @__quantum__qis__reset__body(ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__reset__body(ptr inttoptr (i64 1 to ptr)) + call void @__quantum__rt__tuple_record_output(i64 2, ptr @0) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 2 to ptr), ptr @1) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 3 to ptr), ptr @2) + ret i64 0 +} + +declare void @__quantum__rt__initialize(ptr) + +declare void @__quantum__qis__x__body(ptr) + +declare void @__quantum__qis__cx__body(ptr, ptr) + +declare void @__quantum__qis__m__body(ptr, ptr) #1 + +declare i1 @__quantum__rt__read_result(ptr) + +declare void @__quantum__qis__reset__body(ptr) #1 + +declare void @__quantum__rt__tuple_record_output(i64, ptr) + +declare void @__quantum__rt__result_record_output(ptr, ptr) + +attributes #0 = { "entry_point" "output_labeling_schema" "qir_profiles"="adaptive_profile" "required_num_qubits"="2" "required_num_results"="4" } +attributes #1 = { "irreversible" } + +; module flags + +!llvm.module.flags = !{!0, !1, !2, !3, !4, !5, !6, !7} + +!0 = !{i32 1, !"qir_major_version", i32 2} +!1 = !{i32 7, !"qir_minor_version", i32 1} +!2 = !{i32 1, !"dynamic_qubit_management", i1 false} +!3 = !{i32 1, !"dynamic_result_management", i1 false} +!4 = !{i32 5, !"int_computations", !{!"i64"}} +!5 = !{i32 5, !"float_computations", !{!"double"}} +!6 = !{i32 7, !"backwards_branching", i2 3} +!7 = !{i32 1, !"arrays", i1 true} diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/ShortcuttingMeasurement.out b/source/pip/tests-integration/resources/adaptive_rifla/output/ShortcuttingMeasurement.out new file mode 100644 index 0000000000..e05d421fe1 --- /dev/null +++ b/source/pip/tests-integration/resources/adaptive_rifla/output/ShortcuttingMeasurement.out @@ -0,0 +1,10 @@ +START +METADATA entry_point +METADATA output_labeling_schema +METADATA qir_profiles adaptive_profile +METADATA required_num_qubits 2 +METADATA required_num_results 4 +OUTPUT TUPLE 2 0_t +OUTPUT RESULT 0 1_t0r +OUTPUT RESULT 0 2_t1r +END 0 diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/Slicing.ll b/source/pip/tests-integration/resources/adaptive_rifla/output/Slicing.ll new file mode 100644 index 0000000000..a5aa49581d --- /dev/null +++ b/source/pip/tests-integration/resources/adaptive_rifla/output/Slicing.ll @@ -0,0 +1,66 @@ +@0 = internal constant [4 x i8] c"0_a\00" +@1 = internal constant [6 x i8] c"1_a0r\00" +@2 = internal constant [6 x i8] c"2_a1r\00" +@3 = internal constant [6 x i8] c"3_a2r\00" +@4 = internal constant [6 x i8] c"4_a3r\00" +@5 = internal constant [6 x i8] c"5_a4r\00" + +define i64 @ENTRYPOINT__main() #0 { +block_0: + call void @__quantum__rt__initialize(ptr null) + call void @__quantum__qis__x__body(ptr inttoptr (i64 9 to ptr)) + call void @__quantum__qis__x__body(ptr inttoptr (i64 8 to ptr)) + call void @__quantum__qis__x__body(ptr inttoptr (i64 7 to ptr)) + call void @__quantum__qis__x__body(ptr inttoptr (i64 6 to ptr)) + call void @__quantum__qis__x__body(ptr inttoptr (i64 5 to ptr)) + call void @__quantum__qis__m__body(ptr inttoptr (i64 5 to ptr), ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__m__body(ptr inttoptr (i64 6 to ptr), ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__m__body(ptr inttoptr (i64 7 to ptr), ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__m__body(ptr inttoptr (i64 8 to ptr), ptr inttoptr (i64 3 to ptr)) + call void @__quantum__qis__m__body(ptr inttoptr (i64 9 to ptr), ptr inttoptr (i64 4 to ptr)) + call void @__quantum__qis__reset__body(ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__reset__body(ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__reset__body(ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__reset__body(ptr inttoptr (i64 3 to ptr)) + call void @__quantum__qis__reset__body(ptr inttoptr (i64 4 to ptr)) + call void @__quantum__qis__reset__body(ptr inttoptr (i64 5 to ptr)) + call void @__quantum__qis__reset__body(ptr inttoptr (i64 6 to ptr)) + call void @__quantum__qis__reset__body(ptr inttoptr (i64 7 to ptr)) + call void @__quantum__qis__reset__body(ptr inttoptr (i64 8 to ptr)) + call void @__quantum__qis__reset__body(ptr inttoptr (i64 9 to ptr)) + call void @__quantum__rt__array_record_output(i64 5, ptr @0) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 0 to ptr), ptr @1) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 1 to ptr), ptr @2) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 2 to ptr), ptr @3) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 3 to ptr), ptr @4) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 4 to ptr), ptr @5) + ret i64 0 +} + +declare void @__quantum__rt__initialize(ptr) + +declare void @__quantum__qis__x__body(ptr) + +declare void @__quantum__qis__m__body(ptr, ptr) #1 + +declare void @__quantum__qis__reset__body(ptr) #1 + +declare void @__quantum__rt__array_record_output(i64, ptr) + +declare void @__quantum__rt__result_record_output(ptr, ptr) + +attributes #0 = { "entry_point" "output_labeling_schema" "qir_profiles"="adaptive_profile" "required_num_qubits"="10" "required_num_results"="5" } +attributes #1 = { "irreversible" } + +; module flags + +!llvm.module.flags = !{!0, !1, !2, !3, !4, !5, !6, !7} + +!0 = !{i32 1, !"qir_major_version", i32 2} +!1 = !{i32 7, !"qir_minor_version", i32 1} +!2 = !{i32 1, !"dynamic_qubit_management", i1 false} +!3 = !{i32 1, !"dynamic_result_management", i1 false} +!4 = !{i32 5, !"int_computations", !{!"i64"}} +!5 = !{i32 5, !"float_computations", !{!"double"}} +!6 = !{i32 7, !"backwards_branching", i2 3} +!7 = !{i32 1, !"arrays", i1 true} diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/Slicing.out b/source/pip/tests-integration/resources/adaptive_rifla/output/Slicing.out new file mode 100644 index 0000000000..ad00511727 --- /dev/null +++ b/source/pip/tests-integration/resources/adaptive_rifla/output/Slicing.out @@ -0,0 +1,13 @@ +START +METADATA entry_point +METADATA output_labeling_schema +METADATA qir_profiles adaptive_profile +METADATA required_num_qubits 10 +METADATA required_num_results 5 +OUTPUT ARRAY 5 0_a +OUTPUT RESULT 1 1_a0r +OUTPUT RESULT 1 2_a1r +OUTPUT RESULT 1 3_a2r +OUTPUT RESULT 1 4_a3r +OUTPUT RESULT 1 5_a4r +END 0 diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/SuperdenseCoding.ll b/source/pip/tests-integration/resources/adaptive_rifla/output/SuperdenseCoding.ll new file mode 100644 index 0000000000..e632bd350c --- /dev/null +++ b/source/pip/tests-integration/resources/adaptive_rifla/output/SuperdenseCoding.ll @@ -0,0 +1,98 @@ +@0 = internal constant [4 x i8] c"0_t\00" +@1 = internal constant [6 x i8] c"1_t0t\00" +@2 = internal constant [8 x i8] c"2_t0t0b\00" +@3 = internal constant [8 x i8] c"3_t0t1b\00" +@4 = internal constant [6 x i8] c"4_t1t\00" +@5 = internal constant [8 x i8] c"5_t1t0b\00" +@6 = internal constant [8 x i8] c"6_t1t1b\00" + +define i64 @ENTRYPOINT__main() #0 { +block_0: + %var_3 = alloca i1 + %var_7 = alloca i1 + call void @__quantum__rt__initialize(ptr null) + call void @__quantum__qis__h__body(ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__cx__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__mresetz__body(ptr inttoptr (i64 2 to ptr), ptr inttoptr (i64 0 to ptr)) + %var_0 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 0 to ptr)) + store i1 %var_0, ptr %var_3 + call void @__quantum__qis__h__body(ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__mresetz__body(ptr inttoptr (i64 2 to ptr), ptr inttoptr (i64 1 to ptr)) + %var_4 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 1 to ptr)) + store i1 %var_4, ptr %var_7 + %var_16 = load i1, ptr %var_3 + br i1 %var_16, label %block_1, label %block_2 +block_1: + call void @__quantum__qis__z__body(ptr inttoptr (i64 0 to ptr)) + br label %block_2 +block_2: + %var_17 = load i1, ptr %var_7 + br i1 %var_17, label %block_3, label %block_4 +block_3: + call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) + br label %block_4 +block_4: + call void @__quantum__qis__h__body(ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__cx__body(ptr inttoptr (i64 2 to ptr), ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__cx__body(ptr inttoptr (i64 2 to ptr), ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__mresetz__body(ptr inttoptr (i64 2 to ptr), ptr inttoptr (i64 2 to ptr)) + %var_9 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__cz__body(ptr inttoptr (i64 2 to ptr), ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__cz__body(ptr inttoptr (i64 2 to ptr), ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__mresetz__body(ptr inttoptr (i64 2 to ptr), ptr inttoptr (i64 3 to ptr)) + %var_13 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 3 to ptr)) + call void @__quantum__qis__reset__body(ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__reset__body(ptr inttoptr (i64 1 to ptr)) + call void @__quantum__rt__tuple_record_output(i64 2, ptr @0) + call void @__quantum__rt__tuple_record_output(i64 2, ptr @1) + %var_18 = load i1, ptr %var_3 + call void @__quantum__rt__bool_record_output(i1 %var_18, ptr @2) + %var_19 = load i1, ptr %var_7 + call void @__quantum__rt__bool_record_output(i1 %var_19, ptr @3) + call void @__quantum__rt__tuple_record_output(i64 2, ptr @4) + call void @__quantum__rt__bool_record_output(i1 %var_9, ptr @5) + call void @__quantum__rt__bool_record_output(i1 %var_13, ptr @6) + ret i64 0 +} + +declare void @__quantum__rt__initialize(ptr) + +declare void @__quantum__qis__h__body(ptr) + +declare void @__quantum__qis__cx__body(ptr, ptr) + +declare void @__quantum__qis__mresetz__body(ptr, ptr) #1 + +declare i1 @__quantum__rt__read_result(ptr) + +declare void @__quantum__qis__z__body(ptr) + +declare void @__quantum__qis__x__body(ptr) + +declare void @__quantum__qis__cz__body(ptr, ptr) + +declare void @__quantum__qis__reset__body(ptr) #1 + +declare void @__quantum__rt__tuple_record_output(i64, ptr) + +declare void @__quantum__rt__bool_record_output(i1, ptr) + +attributes #0 = { "entry_point" "output_labeling_schema" "qir_profiles"="adaptive_profile" "required_num_qubits"="3" "required_num_results"="4" } +attributes #1 = { "irreversible" } + +; module flags + +!llvm.module.flags = !{!0, !1, !2, !3, !4, !5, !6, !7} + +!0 = !{i32 1, !"qir_major_version", i32 2} +!1 = !{i32 7, !"qir_minor_version", i32 1} +!2 = !{i32 1, !"dynamic_qubit_management", i1 false} +!3 = !{i32 1, !"dynamic_result_management", i1 false} +!4 = !{i32 5, !"int_computations", !{!"i64"}} +!5 = !{i32 5, !"float_computations", !{!"double"}} +!6 = !{i32 7, !"backwards_branching", i2 3} +!7 = !{i32 1, !"arrays", i1 true} diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/SuperdenseCoding.out b/source/pip/tests-integration/resources/adaptive_rifla/output/SuperdenseCoding.out new file mode 100644 index 0000000000..c2ea4843a7 --- /dev/null +++ b/source/pip/tests-integration/resources/adaptive_rifla/output/SuperdenseCoding.out @@ -0,0 +1,14 @@ +START +METADATA entry_point +METADATA output_labeling_schema +METADATA qir_profiles adaptive_profile +METADATA required_num_qubits 3 +METADATA required_num_results 4 +OUTPUT TUPLE 2 0_t +OUTPUT TUPLE 2 1_t0t +OUTPUT BOOL false 2_t0t0b +OUTPUT BOOL false 3_t0t1b +OUTPUT TUPLE 2 4_t1t +OUTPUT BOOL false 5_t1t0b +OUTPUT BOOL false 6_t1t1b +END 0 diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/SwitchHandling.ll b/source/pip/tests-integration/resources/adaptive_rifla/output/SwitchHandling.ll new file mode 100644 index 0000000000..f35bcaf1ab --- /dev/null +++ b/source/pip/tests-integration/resources/adaptive_rifla/output/SwitchHandling.ll @@ -0,0 +1,98 @@ +@0 = internal constant [4 x i8] c"0_r\00" + +define i64 @ENTRYPOINT__main() #0 { +block_0: + %var_2 = alloca i64 + call void @__quantum__rt__initialize(ptr null) + call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__x__body(ptr inttoptr (i64 1 to ptr)) + store i64 0, ptr %var_2 + call void @__quantum__qis__m__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__m__body(ptr inttoptr (i64 1 to ptr), ptr inttoptr (i64 1 to ptr)) + store i64 0, ptr %var_2 + %var_5 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 0 to ptr)) + br i1 %var_5, label %block_1, label %block_2 +block_1: + store i64 1, ptr %var_2 + br label %block_2 +block_2: + %var_17 = load i64, ptr %var_2 + %var_7 = shl i64 %var_17, 1 + store i64 %var_7, ptr %var_2 + %var_8 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 1 to ptr)) + br i1 %var_8, label %block_3, label %block_4 +block_3: + %var_22 = load i64, ptr %var_2 + %var_10 = add i64 %var_22, 1 + store i64 %var_10, ptr %var_2 + br label %block_4 +block_4: + call void @__quantum__qis__reset__body(ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__reset__body(ptr inttoptr (i64 1 to ptr)) + %var_19 = load i64, ptr %var_2 + %var_12 = icmp eq i64 %var_19, 0 + br i1 %var_12, label %block_5, label %block_6 +block_5: + br label %block_13 +block_6: + %var_20 = load i64, ptr %var_2 + %var_13 = icmp eq i64 %var_20, 1 + br i1 %var_13, label %block_7, label %block_8 +block_7: + call void @__quantum__qis__ry__body(double 3.141592653589793, ptr inttoptr (i64 2 to ptr)) + br label %block_12 +block_8: + %var_21 = load i64, ptr %var_2 + %var_14 = icmp eq i64 %var_21, 2 + br i1 %var_14, label %block_9, label %block_10 +block_9: + call void @__quantum__qis__rz__body(double 3.141592653589793, ptr inttoptr (i64 2 to ptr)) + br label %block_11 +block_10: + call void @__quantum__qis__rx__body(double 3.141592653589793, ptr inttoptr (i64 2 to ptr)) + br label %block_11 +block_11: + br label %block_12 +block_12: + br label %block_13 +block_13: + call void @__quantum__qis__mresetz__body(ptr inttoptr (i64 2 to ptr), ptr inttoptr (i64 2 to ptr)) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 2 to ptr), ptr @0) + ret i64 0 +} + +declare void @__quantum__rt__initialize(ptr) + +declare void @__quantum__qis__x__body(ptr) + +declare void @__quantum__qis__m__body(ptr, ptr) #1 + +declare i1 @__quantum__rt__read_result(ptr) + +declare void @__quantum__qis__reset__body(ptr) #1 + +declare void @__quantum__qis__ry__body(double, ptr) + +declare void @__quantum__qis__rz__body(double, ptr) + +declare void @__quantum__qis__rx__body(double, ptr) + +declare void @__quantum__qis__mresetz__body(ptr, ptr) #1 + +declare void @__quantum__rt__result_record_output(ptr, ptr) + +attributes #0 = { "entry_point" "output_labeling_schema" "qir_profiles"="adaptive_profile" "required_num_qubits"="3" "required_num_results"="3" } +attributes #1 = { "irreversible" } + +; module flags + +!llvm.module.flags = !{!0, !1, !2, !3, !4, !5, !6, !7} + +!0 = !{i32 1, !"qir_major_version", i32 2} +!1 = !{i32 7, !"qir_minor_version", i32 1} +!2 = !{i32 1, !"dynamic_qubit_management", i1 false} +!3 = !{i32 1, !"dynamic_result_management", i1 false} +!4 = !{i32 5, !"int_computations", !{!"i64"}} +!5 = !{i32 5, !"float_computations", !{!"double"}} +!6 = !{i32 7, !"backwards_branching", i2 3} +!7 = !{i32 1, !"arrays", i1 true} diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/SwitchHandling.out b/source/pip/tests-integration/resources/adaptive_rifla/output/SwitchHandling.out new file mode 100644 index 0000000000..64b941c5f9 --- /dev/null +++ b/source/pip/tests-integration/resources/adaptive_rifla/output/SwitchHandling.out @@ -0,0 +1,8 @@ +START +METADATA entry_point +METADATA output_labeling_schema +METADATA qir_profiles adaptive_profile +METADATA required_num_qubits 3 +METADATA required_num_results 3 +OUTPUT RESULT 1 0_r +END 0 diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/ThreeQubitRepetitionCode.ll b/source/pip/tests-integration/resources/adaptive_rifla/output/ThreeQubitRepetitionCode.ll new file mode 100644 index 0000000000..6dec076955 --- /dev/null +++ b/source/pip/tests-integration/resources/adaptive_rifla/output/ThreeQubitRepetitionCode.ll @@ -0,0 +1,329 @@ +@0 = internal constant [4 x i8] c"0_t\00" +@1 = internal constant [6 x i8] c"1_t0b\00" +@2 = internal constant [6 x i8] c"2_t1i\00" + +define i64 @ENTRYPOINT__main() #0 { +block_0: + %var_1 = alloca i64 + %var_10 = alloca i1 + %var_25 = alloca i1 + %var_41 = alloca i1 + %var_57 = alloca i1 + %var_73 = alloca i1 + call void @__quantum__rt__initialize(ptr null) + call void @__quantum__qis__h__body(ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__z__body(ptr inttoptr (i64 0 to ptr)) + store i64 0, ptr %var_1 + call void @__quantum__qis__cx__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__cx__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__cx__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 3 to ptr)) + call void @__quantum__qis__cx__body(ptr inttoptr (i64 1 to ptr), ptr inttoptr (i64 3 to ptr)) + call void @__quantum__qis__cx__body(ptr inttoptr (i64 1 to ptr), ptr inttoptr (i64 4 to ptr)) + call void @__quantum__qis__cx__body(ptr inttoptr (i64 2 to ptr), ptr inttoptr (i64 4 to ptr)) + call void @__quantum__qis__mresetz__body(ptr inttoptr (i64 3 to ptr), ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__mresetz__body(ptr inttoptr (i64 4 to ptr), ptr inttoptr (i64 1 to ptr)) + store i1 true, ptr %var_10 + %var_11 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 0 to ptr)) + br i1 %var_11, label %block_1, label %block_2 +block_1: + %var_13 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 1 to ptr)) + br i1 %var_13, label %block_3, label %block_4 +block_2: + %var_15 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 1 to ptr)) + br i1 %var_15, label %block_5, label %block_6 +block_3: + call void @__quantum__qis__x__body(ptr inttoptr (i64 1 to ptr)) + br label %block_7 +block_4: + call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) + br label %block_7 +block_5: + call void @__quantum__qis__x__body(ptr inttoptr (i64 2 to ptr)) + br label %block_8 +block_6: + store i1 false, ptr %var_10 + br label %block_8 +block_7: + br label %block_9 +block_8: + br label %block_9 +block_9: + %var_86 = load i1, ptr %var_10 + br i1 %var_86, label %block_10, label %block_11 +block_10: + store i64 1, ptr %var_1 + br label %block_11 +block_11: + call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__cx__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 3 to ptr)) + call void @__quantum__qis__cx__body(ptr inttoptr (i64 1 to ptr), ptr inttoptr (i64 3 to ptr)) + call void @__quantum__qis__cx__body(ptr inttoptr (i64 1 to ptr), ptr inttoptr (i64 4 to ptr)) + call void @__quantum__qis__cx__body(ptr inttoptr (i64 2 to ptr), ptr inttoptr (i64 4 to ptr)) + call void @__quantum__qis__mresetz__body(ptr inttoptr (i64 3 to ptr), ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__mresetz__body(ptr inttoptr (i64 4 to ptr), ptr inttoptr (i64 3 to ptr)) + store i1 true, ptr %var_25 + %var_26 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 2 to ptr)) + br i1 %var_26, label %block_12, label %block_13 +block_12: + %var_28 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 3 to ptr)) + br i1 %var_28, label %block_14, label %block_15 +block_13: + %var_30 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 3 to ptr)) + br i1 %var_30, label %block_16, label %block_17 +block_14: + call void @__quantum__qis__x__body(ptr inttoptr (i64 1 to ptr)) + br label %block_18 +block_15: + call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) + br label %block_18 +block_16: + call void @__quantum__qis__x__body(ptr inttoptr (i64 2 to ptr)) + br label %block_19 +block_17: + store i1 false, ptr %var_25 + br label %block_19 +block_18: + br label %block_20 +block_19: + br label %block_20 +block_20: + %var_89 = load i1, ptr %var_25 + br i1 %var_89, label %block_21, label %block_22 +block_21: + %var_106 = load i64, ptr %var_1 + %var_33 = add i64 %var_106, 1 + store i64 %var_33, ptr %var_1 + br label %block_22 +block_22: + call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__cx__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 3 to ptr)) + call void @__quantum__qis__cx__body(ptr inttoptr (i64 1 to ptr), ptr inttoptr (i64 3 to ptr)) + call void @__quantum__qis__cx__body(ptr inttoptr (i64 1 to ptr), ptr inttoptr (i64 4 to ptr)) + call void @__quantum__qis__cx__body(ptr inttoptr (i64 2 to ptr), ptr inttoptr (i64 4 to ptr)) + call void @__quantum__qis__mresetz__body(ptr inttoptr (i64 3 to ptr), ptr inttoptr (i64 4 to ptr)) + call void @__quantum__qis__mresetz__body(ptr inttoptr (i64 4 to ptr), ptr inttoptr (i64 5 to ptr)) + store i1 true, ptr %var_41 + %var_42 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 4 to ptr)) + br i1 %var_42, label %block_23, label %block_24 +block_23: + %var_44 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 5 to ptr)) + br i1 %var_44, label %block_25, label %block_26 +block_24: + %var_46 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 5 to ptr)) + br i1 %var_46, label %block_27, label %block_28 +block_25: + call void @__quantum__qis__x__body(ptr inttoptr (i64 1 to ptr)) + br label %block_29 +block_26: + call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) + br label %block_29 +block_27: + call void @__quantum__qis__x__body(ptr inttoptr (i64 2 to ptr)) + br label %block_30 +block_28: + store i1 false, ptr %var_41 + br label %block_30 +block_29: + br label %block_31 +block_30: + br label %block_31 +block_31: + %var_92 = load i1, ptr %var_41 + br i1 %var_92, label %block_32, label %block_33 +block_32: + %var_104 = load i64, ptr %var_1 + %var_49 = add i64 %var_104, 1 + store i64 %var_49, ptr %var_1 + br label %block_33 +block_33: + call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__cx__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 3 to ptr)) + call void @__quantum__qis__cx__body(ptr inttoptr (i64 1 to ptr), ptr inttoptr (i64 3 to ptr)) + call void @__quantum__qis__cx__body(ptr inttoptr (i64 1 to ptr), ptr inttoptr (i64 4 to ptr)) + call void @__quantum__qis__cx__body(ptr inttoptr (i64 2 to ptr), ptr inttoptr (i64 4 to ptr)) + call void @__quantum__qis__mresetz__body(ptr inttoptr (i64 3 to ptr), ptr inttoptr (i64 6 to ptr)) + call void @__quantum__qis__mresetz__body(ptr inttoptr (i64 4 to ptr), ptr inttoptr (i64 7 to ptr)) + store i1 true, ptr %var_57 + %var_58 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 6 to ptr)) + br i1 %var_58, label %block_34, label %block_35 +block_34: + %var_60 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 7 to ptr)) + br i1 %var_60, label %block_36, label %block_37 +block_35: + %var_62 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 7 to ptr)) + br i1 %var_62, label %block_38, label %block_39 +block_36: + call void @__quantum__qis__x__body(ptr inttoptr (i64 1 to ptr)) + br label %block_40 +block_37: + call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) + br label %block_40 +block_38: + call void @__quantum__qis__x__body(ptr inttoptr (i64 2 to ptr)) + br label %block_41 +block_39: + store i1 false, ptr %var_57 + br label %block_41 +block_40: + br label %block_42 +block_41: + br label %block_42 +block_42: + %var_95 = load i1, ptr %var_57 + br i1 %var_95, label %block_43, label %block_44 +block_43: + %var_102 = load i64, ptr %var_1 + %var_65 = add i64 %var_102, 1 + store i64 %var_65, ptr %var_1 + br label %block_44 +block_44: + call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__cx__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 3 to ptr)) + call void @__quantum__qis__cx__body(ptr inttoptr (i64 1 to ptr), ptr inttoptr (i64 3 to ptr)) + call void @__quantum__qis__cx__body(ptr inttoptr (i64 1 to ptr), ptr inttoptr (i64 4 to ptr)) + call void @__quantum__qis__cx__body(ptr inttoptr (i64 2 to ptr), ptr inttoptr (i64 4 to ptr)) + call void @__quantum__qis__mresetz__body(ptr inttoptr (i64 3 to ptr), ptr inttoptr (i64 8 to ptr)) + call void @__quantum__qis__mresetz__body(ptr inttoptr (i64 4 to ptr), ptr inttoptr (i64 9 to ptr)) + store i1 true, ptr %var_73 + %var_74 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 8 to ptr)) + br i1 %var_74, label %block_45, label %block_46 +block_45: + %var_76 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 9 to ptr)) + br i1 %var_76, label %block_47, label %block_48 +block_46: + %var_78 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 9 to ptr)) + br i1 %var_78, label %block_49, label %block_50 +block_47: + call void @__quantum__qis__x__body(ptr inttoptr (i64 1 to ptr)) + br label %block_51 +block_48: + call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) + br label %block_51 +block_49: + call void @__quantum__qis__x__body(ptr inttoptr (i64 2 to ptr)) + br label %block_52 +block_50: + store i1 false, ptr %var_73 + br label %block_52 +block_51: + br label %block_53 +block_52: + br label %block_53 +block_53: + %var_98 = load i1, ptr %var_73 + br i1 %var_98, label %block_54, label %block_55 +block_54: + %var_100 = load i64, ptr %var_1 + %var_81 = add i64 %var_100, 1 + store i64 %var_81, ptr %var_1 + br label %block_55 +block_55: + call void @__quantum__qis__cx__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__cx__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__mresetz__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 10 to ptr)) + %var_82 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 10 to ptr)) + call void @__quantum__qis__reset__body(ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__reset__body(ptr inttoptr (i64 2 to ptr)) + call void @__quantum__rt__tuple_record_output(i64 2, ptr @0) + call void @__quantum__rt__bool_record_output(i1 %var_82, ptr @1) + %var_99 = load i64, ptr %var_1 + call void @__quantum__rt__int_record_output(i64 %var_99, ptr @2) + ret i64 0 +} + +declare void @__quantum__rt__initialize(ptr) + +declare void @__quantum__qis__h__body(ptr) + +declare void @__quantum__qis__z__body(ptr) + +declare void @__quantum__qis__cx__body(ptr, ptr) + +declare void @__quantum__qis__rx__body(double, ptr) + +declare void @__quantum__qis__mresetz__body(ptr, ptr) #1 + +declare i1 @__quantum__rt__read_result(ptr) + +declare void @__quantum__qis__x__body(ptr) + +declare void @__quantum__qis__reset__body(ptr) #1 + +declare void @__quantum__rt__tuple_record_output(i64, ptr) + +declare void @__quantum__rt__bool_record_output(i1, ptr) + +declare void @__quantum__rt__int_record_output(i64, ptr) + +attributes #0 = { "entry_point" "output_labeling_schema" "qir_profiles"="adaptive_profile" "required_num_qubits"="5" "required_num_results"="11" } +attributes #1 = { "irreversible" } + +; module flags + +!llvm.module.flags = !{!0, !1, !2, !3, !4, !5, !6, !7} + +!0 = !{i32 1, !"qir_major_version", i32 2} +!1 = !{i32 7, !"qir_minor_version", i32 1} +!2 = !{i32 1, !"dynamic_qubit_management", i1 false} +!3 = !{i32 1, !"dynamic_result_management", i1 false} +!4 = !{i32 5, !"int_computations", !{!"i64"}} +!5 = !{i32 5, !"float_computations", !{!"double"}} +!6 = !{i32 7, !"backwards_branching", i2 3} +!7 = !{i32 1, !"arrays", i1 true} diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/ThreeQubitRepetitionCode.out b/source/pip/tests-integration/resources/adaptive_rifla/output/ThreeQubitRepetitionCode.out new file mode 100644 index 0000000000..44e0bb438a --- /dev/null +++ b/source/pip/tests-integration/resources/adaptive_rifla/output/ThreeQubitRepetitionCode.out @@ -0,0 +1,10 @@ +START +METADATA entry_point +METADATA output_labeling_schema +METADATA qir_profiles adaptive_profile +METADATA required_num_qubits 5 +METADATA required_num_results 11 +OUTPUT TUPLE 2 0_t +OUTPUT BOOL true 1_t0b +OUTPUT INT 0 2_t1i +END 0 diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/WithinApply.ll b/source/pip/tests-integration/resources/adaptive_rifla/output/WithinApply.ll new file mode 100644 index 0000000000..63fea7a987 --- /dev/null +++ b/source/pip/tests-integration/resources/adaptive_rifla/output/WithinApply.ll @@ -0,0 +1,55 @@ +@0 = internal constant [4 x i8] c"0_a\00" +@1 = internal constant [6 x i8] c"1_a0r\00" +@2 = internal constant [6 x i8] c"2_a1r\00" +@3 = internal constant [6 x i8] c"3_a2r\00" + +define i64 @ENTRYPOINT__main() #0 { +block_0: + call void @__quantum__rt__initialize(ptr null) + call void @__quantum__qis__x__body(ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__x__body(ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__ccx__body(ptr inttoptr (i64 1 to ptr), ptr inttoptr (i64 2 to ptr), ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__x__body(ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__x__body(ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__m__body(ptr inttoptr (i64 1 to ptr), ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__m__body(ptr inttoptr (i64 2 to ptr), ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__m__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__reset__body(ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__reset__body(ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__reset__body(ptr inttoptr (i64 0 to ptr)) + call void @__quantum__rt__array_record_output(i64 3, ptr @0) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 0 to ptr), ptr @1) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 1 to ptr), ptr @2) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 2 to ptr), ptr @3) + ret i64 0 +} + +declare void @__quantum__rt__initialize(ptr) + +declare void @__quantum__qis__x__body(ptr) + +declare void @__quantum__qis__ccx__body(ptr, ptr, ptr) + +declare void @__quantum__qis__m__body(ptr, ptr) #1 + +declare void @__quantum__qis__reset__body(ptr) #1 + +declare void @__quantum__rt__array_record_output(i64, ptr) + +declare void @__quantum__rt__result_record_output(ptr, ptr) + +attributes #0 = { "entry_point" "output_labeling_schema" "qir_profiles"="adaptive_profile" "required_num_qubits"="3" "required_num_results"="3" } +attributes #1 = { "irreversible" } + +; module flags + +!llvm.module.flags = !{!0, !1, !2, !3, !4, !5, !6, !7} + +!0 = !{i32 1, !"qir_major_version", i32 2} +!1 = !{i32 7, !"qir_minor_version", i32 1} +!2 = !{i32 1, !"dynamic_qubit_management", i1 false} +!3 = !{i32 1, !"dynamic_result_management", i1 false} +!4 = !{i32 5, !"int_computations", !{!"i64"}} +!5 = !{i32 5, !"float_computations", !{!"double"}} +!6 = !{i32 7, !"backwards_branching", i2 3} +!7 = !{i32 1, !"arrays", i1 true} diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/WithinApply.out b/source/pip/tests-integration/resources/adaptive_rifla/output/WithinApply.out new file mode 100644 index 0000000000..da892c6ba9 --- /dev/null +++ b/source/pip/tests-integration/resources/adaptive_rifla/output/WithinApply.out @@ -0,0 +1,11 @@ +START +METADATA entry_point +METADATA output_labeling_schema +METADATA qir_profiles adaptive_profile +METADATA required_num_qubits 3 +METADATA required_num_results 3 +OUTPUT ARRAY 3 0_a +OUTPUT RESULT 0 1_a0r +OUTPUT RESULT 0 2_a1r +OUTPUT RESULT 1 3_a2r +END 0 diff --git a/source/pip/tests-integration/test_adaptive_rifla_qir.py b/source/pip/tests-integration/test_adaptive_rifla_qir.py new file mode 100644 index 0000000000..ae07e4af44 --- /dev/null +++ b/source/pip/tests-integration/test_adaptive_rifla_qir.py @@ -0,0 +1,49 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + + +import pytest + +from qsharp import TargetProfile +from utils import ( + assert_strings_equal_ignore_line_endings, + compile_qsharp, + get_input_files, + get_output_ll_file, + get_output_out_file, + QIR_RUNNER_AVAILABLE, + read_file, + save_qir_to_temp_file_and_execute, + SKIP_REASON, +) + +TARGET_PROFILE = TargetProfile.Adaptive_RIFLA + + +# This function is used to generate the expected output files for the tests +# Rename the function to start with test_ to generate the expected output files +def generate_test_outputs(): + from utils import generate_test_outputs + + generate_test_outputs(TARGET_PROFILE) + + +@pytest.mark.parametrize("file_path", get_input_files(TARGET_PROFILE)) +@pytest.mark.skipif(not QIR_RUNNER_AVAILABLE, reason=SKIP_REASON) +def test_adaptive_rifla_qir(file_path: str) -> None: + source = read_file(file_path, TARGET_PROFILE) + ll_file_path = get_output_ll_file(file_path, TARGET_PROFILE) + expected_qir = read_file(ll_file_path, TARGET_PROFILE) + actual_qir = compile_qsharp(source, TARGET_PROFILE) + assert actual_qir == expected_qir + + +@pytest.mark.parametrize("file_path", get_input_files(TARGET_PROFILE)) +@pytest.mark.skipif(not QIR_RUNNER_AVAILABLE, reason=SKIP_REASON) +def test_adaptive_rifla_output(file_path: str) -> None: + source = read_file(file_path, TARGET_PROFILE) + qir = compile_qsharp(source, TARGET_PROFILE) + output_file_path = get_output_out_file(file_path, TARGET_PROFILE) + expected_output = read_file(output_file_path, TARGET_PROFILE) + actual_output = save_qir_to_temp_file_and_execute(qir) + assert_strings_equal_ignore_line_endings(actual_output, expected_output) diff --git a/source/pip/tests/test_enums.py b/source/pip/tests/test_enums.py index 839075b36f..0fb36f5e4f 100644 --- a/source/pip/tests/test_enums.py +++ b/source/pip/tests/test_enums.py @@ -10,6 +10,7 @@ import io from qsharp import TargetProfile + # pull in from native module for tests so that we don't have to install qiskit # using the interop module from qsharp._native import OutputSemantics, ProgramType @@ -19,7 +20,8 @@ def test_target_profile_int_values_match_enum_values() -> None: assert 0 == TargetProfile.Base assert 1 == TargetProfile.Adaptive_RI assert 2 == TargetProfile.Adaptive_RIF - assert 3 == TargetProfile.Unrestricted + assert 3 == TargetProfile.Adaptive_RIFLA + assert 4 == TargetProfile.Unrestricted def test_target_profile_serialization() -> None: @@ -46,6 +48,9 @@ def test_target_profile_str_values_match_enum_values() -> None: target_profile = TargetProfile.Adaptive_RIF str_value = str(target_profile) assert str_value == "Adaptive_RIF" + target_profile = TargetProfile.Adaptive_RIFLA + str_value = str(target_profile) + assert str_value == "Adaptive_RIFLA" target_profile = TargetProfile.Unrestricted str_value = str(target_profile) assert str_value == "Unrestricted" @@ -61,6 +66,9 @@ def test_target_profile_from_str_match_enum_values() -> None: target_profile = TargetProfile.Adaptive_RIF str_value = str(target_profile) assert TargetProfile.from_str(str_value) == target_profile + target_profile = TargetProfile.Adaptive_RIFLA + str_value = str(target_profile) + assert TargetProfile.from_str(str_value) == target_profile target_profile = TargetProfile.Unrestricted str_value = str(target_profile) assert TargetProfile.from_str(str_value) == target_profile From bcae97f2ca3124c82e2833bca554789f11ab58b9 Mon Sep 17 00:00:00 2001 From: "Stefan J. Wernli" Date: Fri, 20 Mar 2026 15:17:13 -0700 Subject: [PATCH 2/5] Add comments to target profiles and capabilities flags --- .../qsc_data_structures/src/target.rs | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/source/compiler/qsc_data_structures/src/target.rs b/source/compiler/qsc_data_structures/src/target.rs index 1349755fd8..93804f5f7d 100644 --- a/source/compiler/qsc_data_structures/src/target.rs +++ b/source/compiler/qsc_data_structures/src/target.rs @@ -4,13 +4,25 @@ use bitflags::bitflags; bitflags! { + /// These flags describe the capabilities of the target execution environment. They are used to determine which language features we can + /// emit into generated code for that target, and correlate to which design-time errors are surfaced by the capabilities check pass. + /// Note that the flags are in increasing order of capability and are expected to be largely additive. The code uses this "well-ordered" + /// property to perform some checks using comparison operators, inaddition to the user bitwise membership checks offered by bitflags. + /// Empty bitflags (0) corresponds to a "Base profile" target where no branching or classical computations are supported. #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct TargetCapabilityFlags: u32 { + /// Supports forward branching based on measurement results and reuse of qubits after measurement. const Adaptive = 0b0000_0001; + /// Supports classical computations on integers (e.g. addition, multiplication). const IntegerComputations = 0b0000_0010; + /// Supports classical computations on floating point numbers (e.g. addition, multiplication). const FloatingPointComputations = 0b0000_0100; + /// Supports backward branching based on measurement results aka loops. const BackwardsBranching = 0b0000_1000; + /// Supports statically sized arrays (i.e. array literals and array indexing with non-constant indices). const StaticSizedArrays = 0b0001_0000; + /// Catch-all for high level language constructs not covered by other flags. New flags should be added above this one, + /// such that this flag is reserved for the "all capabilities" targets that can run anything the langauge can express. const HigherLevelConstructs = 0b1000_0000; } } @@ -41,12 +53,19 @@ impl Default for TargetCapabilityFlags { use std::str::FromStr; +/// The profile of the target environment, which formalizes the combined set of capabilities that target supports. +/// Most user-facing APIs work in terms of profiles. #[derive(Clone, Copy, Debug, PartialEq)] pub enum Profile { + /// Corresponds to a target with no limitations on supported language features. Unrestricted, + /// Corresponds to a target with support only for gate operations on qubits with all measurements at the end of the program.. Base, + /// Corresponds to a target with support for forward branching, qubit reuse, and integer computations. AdaptiveRI, + /// Corresponds to a target with support for forward branching, qubit reuse, integer computations, and floating point computations. AdaptiveRIF, + /// Corresponds to a target with support for forward branching, qubit reuse, integer computations, floating point computations, loops, and static sized arrays. AdaptiveRIFLA, } From 16f3b8457827caf8e6f351269cf5ae6f62203257 Mon Sep 17 00:00:00 2001 From: "Stefan J. Wernli" Date: Sat, 21 Mar 2026 15:02:33 -0700 Subject: [PATCH 3/5] set adaptive_rifla in python config --- source/pip/qsharp/_qsharp.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/source/pip/qsharp/_qsharp.py b/source/pip/qsharp/_qsharp.py index 57b122f279..fe6411a09e 100644 --- a/source/pip/qsharp/_qsharp.py +++ b/source/pip/qsharp/_qsharp.py @@ -156,8 +156,10 @@ def __init__( ): if target_profile == TargetProfile.Adaptive_RI: self._config = {"targetProfile": "adaptive_ri"} - if target_profile == TargetProfile.Adaptive_RIF: + elif target_profile == TargetProfile.Adaptive_RIF: self._config = {"targetProfile": "adaptive_rif"} + elif target_profile == TargetProfile.Adaptive_RIFLA: + self._config = {"targetProfile": "adaptive_rifla"} elif target_profile == TargetProfile.Base: self._config = {"targetProfile": "base"} elif target_profile == TargetProfile.Unrestricted: From 01f0fb9f537260719667d26aa4448be7395b07a1 Mon Sep 17 00:00:00 2001 From: "Stefan J. Wernli" Date: Mon, 23 Mar 2026 16:07:11 -0700 Subject: [PATCH 4/5] Flatten instruction enum --- .../qsc_circuit/src/rir_to_circuit.rs | 3 +- source/compiler/qsc_codegen/src/qir/v1.rs | 2 +- source/compiler/qsc_codegen/src/qir/v2.rs | 12 +----- .../src/qir/v2/instruction_tests/alloca.rs | 16 ++------ .../src/qir/v2/instruction_tests/load.rs | 16 ++++---- .../qsc_rir/src/passes/insert_alloca_load.rs | 14 +++---- .../src/passes/insert_alloca_load/tests.rs | 7 ++-- .../src/passes/prune_unneeded_stores.rs | 6 +-- .../compiler/qsc_rir/src/passes/ssa_check.rs | 4 +- .../compiler/qsc_rir/src/passes/type_check.rs | 5 ++- source/compiler/qsc_rir/src/rir.rs | 39 ++----------------- source/compiler/qsc_rir/src/utils.rs | 12 +++--- 12 files changed, 42 insertions(+), 94 deletions(-) diff --git a/source/compiler/qsc_circuit/src/rir_to_circuit.rs b/source/compiler/qsc_circuit/src/rir_to_circuit.rs index ffb990a2f2..6d7eefab03 100644 --- a/source/compiler/qsc_circuit/src/rir_to_circuit.rs +++ b/source/compiler/qsc_circuit/src/rir_to_circuit.rs @@ -371,7 +371,8 @@ fn process_variables( } instruction @ (Instruction::Store(..) | Instruction::BitwiseNot(..) - | Instruction::Advanced(..)) => { + | Instruction::Alloca(..) + | Instruction::Load(..)) => { return Err(Error::UnsupportedFeature(format!( "unsupported instruction in block: {instruction:?}" ))); diff --git a/source/compiler/qsc_codegen/src/qir/v1.rs b/source/compiler/qsc_codegen/src/qir/v1.rs index aa44cd820e..8ed60ba29b 100644 --- a/source/compiler/qsc_codegen/src/qir/v1.rs +++ b/source/compiler/qsc_codegen/src/qir/v1.rs @@ -213,7 +213,7 @@ impl ToQir for rir::Instruction { rir::Instruction::Sub(lhs, rhs, variable) => { binop_to_qir("sub", lhs, rhs, *variable, program) } - rir::Instruction::Advanced(..) => { + rir::Instruction::Alloca(..) | rir::Instruction::Load(..) => { unimplemented!("advanced instructions are not supported in QIR v1 generation") } } diff --git a/source/compiler/qsc_codegen/src/qir/v2.rs b/source/compiler/qsc_codegen/src/qir/v2.rs index c721c01ee6..9a4a02f833 100644 --- a/source/compiler/qsc_codegen/src/qir/v2.rs +++ b/source/compiler/qsc_codegen/src/qir/v2.rs @@ -210,16 +210,8 @@ impl ToQir for rir::Instruction { rir::Instruction::Sub(lhs, rhs, variable) => { binop_to_qir("sub", lhs, rhs, *variable, program) } - rir::Instruction::Advanced(instr) => ToQir::::to_qir(instr, program), - } - } -} - -impl ToQir for rir::AdvancedInstr { - fn to_qir(&self, program: &rir::Program) -> String { - match self { - rir::AdvancedInstr::Alloca(variable) => alloca_to_qir(*variable, program), - rir::AdvancedInstr::Load(var_from, var_to) => load_to_qir(*var_from, *var_to, program), + rir::Instruction::Alloca(variable) => alloca_to_qir(*variable, program), + rir::Instruction::Load(var_from, var_to) => load_to_qir(*var_from, *var_to, program), } } } diff --git a/source/compiler/qsc_codegen/src/qir/v2/instruction_tests/alloca.rs b/source/compiler/qsc_codegen/src/qir/v2/instruction_tests/alloca.rs index 3823006e5f..bbed29d762 100644 --- a/source/compiler/qsc_codegen/src/qir/v2/instruction_tests/alloca.rs +++ b/source/compiler/qsc_codegen/src/qir/v2/instruction_tests/alloca.rs @@ -7,32 +7,24 @@ use qsc_rir::rir; #[test] fn alloca_integer_without_size() { - let inst = rir::Instruction::Advanced(rir::AdvancedInstr::Alloca(rir::Variable::new_integer( - rir::VariableId(0), - ))); + let inst = rir::Instruction::Alloca(rir::Variable::new_integer(rir::VariableId(0))); expect![" %var_0 = alloca i64"].assert_eq(&inst.to_qir(&rir::Program::default())); } #[test] fn alloca_bool_without_size() { - let inst = rir::Instruction::Advanced(rir::AdvancedInstr::Alloca(rir::Variable::new_boolean( - rir::VariableId(0), - ))); + let inst = rir::Instruction::Alloca(rir::Variable::new_boolean(rir::VariableId(0))); expect![" %var_0 = alloca i1"].assert_eq(&inst.to_qir(&rir::Program::default())); } #[test] fn alloca_double_without_size() { - let inst = rir::Instruction::Advanced(rir::AdvancedInstr::Alloca(rir::Variable::new_double( - rir::VariableId(0), - ))); + let inst = rir::Instruction::Alloca(rir::Variable::new_double(rir::VariableId(0))); expect![" %var_0 = alloca double"].assert_eq(&inst.to_qir(&rir::Program::default())); } #[test] fn alloca_pointer_without_size() { - let inst = rir::Instruction::Advanced(rir::AdvancedInstr::Alloca(rir::Variable::new_ptr( - rir::VariableId(0), - ))); + let inst = rir::Instruction::Alloca(rir::Variable::new_ptr(rir::VariableId(0))); expect![" %var_0 = alloca ptr"].assert_eq(&inst.to_qir(&rir::Program::default())); } diff --git a/source/compiler/qsc_codegen/src/qir/v2/instruction_tests/load.rs b/source/compiler/qsc_codegen/src/qir/v2/instruction_tests/load.rs index 4204d0788a..65b4fe7ec5 100644 --- a/source/compiler/qsc_codegen/src/qir/v2/instruction_tests/load.rs +++ b/source/compiler/qsc_codegen/src/qir/v2/instruction_tests/load.rs @@ -7,36 +7,36 @@ use qsc_rir::rir; #[test] fn load_integer_from_pointer() { - let inst = rir::Instruction::Advanced(rir::AdvancedInstr::Load( + let inst = rir::Instruction::Load( rir::Variable::new_ptr(rir::VariableId(1)), rir::Variable::new_integer(rir::VariableId(0)), - )); + ); expect![" %var_0 = load i64, ptr %var_1"].assert_eq(&inst.to_qir(&rir::Program::default())); } #[test] fn load_bool_from_pointer() { - let inst = rir::Instruction::Advanced(rir::AdvancedInstr::Load( + let inst = rir::Instruction::Load( rir::Variable::new_ptr(rir::VariableId(1)), rir::Variable::new_boolean(rir::VariableId(0)), - )); + ); expect![" %var_0 = load i1, ptr %var_1"].assert_eq(&inst.to_qir(&rir::Program::default())); } #[test] fn load_double_from_pointer() { - let inst = rir::Instruction::Advanced(rir::AdvancedInstr::Load( + let inst = rir::Instruction::Load( rir::Variable::new_ptr(rir::VariableId(1)), rir::Variable::new_double(rir::VariableId(0)), - )); + ); expect![" %var_0 = load double, ptr %var_1"].assert_eq(&inst.to_qir(&rir::Program::default())); } #[test] fn load_pointer_from_pointer() { - let inst = rir::Instruction::Advanced(rir::AdvancedInstr::Load( + let inst = rir::Instruction::Load( rir::Variable::new_ptr(rir::VariableId(1)), rir::Variable::new_ptr(rir::VariableId(0)), - )); + ); expect![" %var_0 = load ptr, ptr %var_1"].assert_eq(&inst.to_qir(&rir::Program::default())); } diff --git a/source/compiler/qsc_rir/src/passes/insert_alloca_load.rs b/source/compiler/qsc_rir/src/passes/insert_alloca_load.rs index e1811406ae..319f1aded4 100644 --- a/source/compiler/qsc_rir/src/passes/insert_alloca_load.rs +++ b/source/compiler/qsc_rir/src/passes/insert_alloca_load.rs @@ -8,9 +8,7 @@ use qsc_data_structures::index_map::IndexMap; use rustc_hash::{FxHashMap, FxHashSet}; use crate::{ - rir::{ - AdvancedInstr, BlockId, CallableId, Instruction, Operand, Program, Variable, VariableId, - }, + rir::{BlockId, CallableId, Instruction, Operand, Program, Variable, VariableId}, utils::{get_block_successors, get_variable_assignments}, }; @@ -52,7 +50,7 @@ fn process_callable(program: &mut Program, callable_id: CallableId, next_var_id: let mut alloca_instrs = Vec::new(); for (_, variable) in vars_to_alloca.iter() { - alloca_instrs.push(AdvancedInstr::Alloca(*variable).into()); + alloca_instrs.push(Instruction::Alloca(*variable)); } let entry_block = program.get_block_mut(entry_block_id); let new_instrs = alloca_instrs @@ -168,10 +166,10 @@ fn add_alloca_load_to_block( // like the unconditional terminators. Instruction::Phi(..) | Instruction::Jump(..) | Instruction::Return => {} - Instruction::Advanced(AdvancedInstr::Alloca(..)) => { + Instruction::Alloca(..) => { panic!("alloca not expected in alloca insertion") } - Instruction::Advanced(AdvancedInstr::Load(..)) => { + Instruction::Load(..) => { panic!("load not expected in alloca insertion") } } @@ -215,7 +213,7 @@ fn map_or_load_variable_to_operand( variable_id: *next_var_id, ty: variable.ty, }; - instrs.push(AdvancedInstr::Load(variable, new_var).into()); + instrs.push(Instruction::Load(variable, new_var)); var_map.insert(variable.variable_id, Operand::Variable(new_var)); *next_var_id = next_var_id.successor(); Operand::Variable(new_var) @@ -240,7 +238,7 @@ fn map_or_load_variable( variable_id: *next_var_id, ty: variable.ty, }; - instrs.push(AdvancedInstr::Load(variable, new_var).into()); + instrs.push(Instruction::Load(variable, new_var)); var_map.insert(variable.variable_id, Operand::Variable(new_var)); *next_var_id = next_var_id.successor(); new_var diff --git a/source/compiler/qsc_rir/src/passes/insert_alloca_load/tests.rs b/source/compiler/qsc_rir/src/passes/insert_alloca_load/tests.rs index ee37eee8f9..9e1ff74694 100644 --- a/source/compiler/qsc_rir/src/passes/insert_alloca_load/tests.rs +++ b/source/compiler/qsc_rir/src/passes/insert_alloca_load/tests.rs @@ -4,8 +4,7 @@ use expect_test::expect; use crate::rir::{ - AdvancedInstr, Block, BlockId, CallableId, Instruction, Literal, Operand, Program, Variable, - VariableId, + Block, BlockId, CallableId, Instruction, Literal, Operand, Program, Variable, VariableId, }; use super::insert_alloca_load_instrs; @@ -90,7 +89,7 @@ fn reuses_single_load_within_block() { let load_count = block .0 .iter() - .filter(|instr| matches!(instr, Instruction::Advanced(AdvancedInstr::Load(..)))) + .filter(|instr| matches!(instr, Instruction::Load(..))) .count(); assert_eq!( load_count, 1, @@ -194,7 +193,7 @@ fn leaves_unrelated_operands_unloaded() { block .0 .iter() - .all(|instr| !matches!(instr, Instruction::Advanced(AdvancedInstr::Load(..)))), + .all(|instr| !matches!(instr, Instruction::Load(..))), "no loads should be inserted for operands unrelated to stored variables", ); diff --git a/source/compiler/qsc_rir/src/passes/prune_unneeded_stores.rs b/source/compiler/qsc_rir/src/passes/prune_unneeded_stores.rs index 3977c8aa5b..c3e42f1dad 100644 --- a/source/compiler/qsc_rir/src/passes/prune_unneeded_stores.rs +++ b/source/compiler/qsc_rir/src/passes/prune_unneeded_stores.rs @@ -7,7 +7,7 @@ mod tests; use rustc_hash::{FxHashMap, FxHashSet}; use crate::{ - rir::{AdvancedInstr, CallableId, Instruction, Program, VariableId}, + rir::{CallableId, Instruction, Program, VariableId}, utils::{get_block_successors, map_variable_use_in_block}, }; @@ -162,10 +162,10 @@ fn check_var_usage( used_vars.insert(variable.variable_id); } - Instruction::Advanced(AdvancedInstr::Load(..)) => { + Instruction::Load(..) => { panic!("loads should not be present during store pruning") } - Instruction::Advanced(AdvancedInstr::Alloca(..)) => { + Instruction::Alloca(..) => { panic!("allocas should not be present during store pruning") } Instruction::Phi(..) => panic!("phis should not be present during store pruning"), diff --git a/source/compiler/qsc_rir/src/passes/ssa_check.rs b/source/compiler/qsc_rir/src/passes/ssa_check.rs index 477334527c..efb7b95c52 100644 --- a/source/compiler/qsc_rir/src/passes/ssa_check.rs +++ b/source/compiler/qsc_rir/src/passes/ssa_check.rs @@ -227,8 +227,8 @@ fn get_variable_uses(program: &Program) -> IndexMap { - panic!("Unexpected Advanced instruction at {block_id:?}, instruction {idx}") + Instruction::Alloca(..) | Instruction::Load(..) => { + panic!("Unexpected advanced instruction at {block_id:?}, instruction {idx}") } } } diff --git a/source/compiler/qsc_rir/src/passes/type_check.rs b/source/compiler/qsc_rir/src/passes/type_check.rs index af77d19e2b..e5d048ef21 100644 --- a/source/compiler/qsc_rir/src/passes/type_check.rs +++ b/source/compiler/qsc_rir/src/passes/type_check.rs @@ -1,7 +1,7 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -use crate::rir::{AdvancedInstr, Callable, Instruction, Operand, Program, Ty, Variable}; +use crate::rir::{Callable, Instruction, Operand, Program, Ty, Variable}; #[cfg(test)] mod tests; @@ -61,7 +61,8 @@ fn check_instr_types(program: &Program, instr: &Instruction) { Instruction::Convert(_, _) | Instruction::Jump(_) - | Instruction::Advanced(AdvancedInstr::Alloca(..) | AdvancedInstr::Load(..)) + | Instruction::Alloca(..) + | Instruction::Load(..) | Instruction::Return => {} } } diff --git a/source/compiler/qsc_rir/src/rir.rs b/source/compiler/qsc_rir/src/rir.rs index 68e10b1e11..c2321d7900 100644 --- a/source/compiler/qsc_rir/src/rir.rs +++ b/source/compiler/qsc_rir/src/rir.rs @@ -381,7 +381,8 @@ pub enum Instruction { BitwiseXor(Operand, Operand, Variable), Phi(Vec<(Operand, BlockId)>, Variable), Convert(Operand, Variable), - Advanced(AdvancedInstr), + Load(Variable, Variable), + Alloca(Variable), Return, } @@ -473,41 +474,6 @@ impl Display for Instruction { let mut indent = set_indentation(indented(f), 0); write!(indent, "{variable} = Convert {operand}")?; } - Self::Advanced(instr) => { - write!(f, "{instr}")?; - } - Self::Return => write!(f, "Return")?, - } - Ok(()) - } -} - -#[derive(Clone, Copy, Debug, PartialEq, Eq)] -pub enum AdvancedInstr { - Load(Variable, Variable), - Alloca(Variable), -} - -impl From for Instruction { - fn from(instr: AdvancedInstr) -> Self { - Self::Advanced(instr) - } -} - -impl TryFrom for AdvancedInstr { - type Error = (); - - fn try_from(instr: Instruction) -> Result { - match instr { - Instruction::Advanced(adv_instr) => Ok(adv_instr), - _ => Err(()), - } - } -} - -impl Display for AdvancedInstr { - fn fmt(&self, f: &mut Formatter) -> fmt::Result { - match &self { Self::Load(lhs, rhs) => { write_unary_instruction(f, "Load", &Operand::Variable(*lhs), *rhs)?; } @@ -515,6 +481,7 @@ impl Display for AdvancedInstr { let mut indent = set_indentation(indented(f), 0); write!(indent, "{variable} = Alloca")?; } + Self::Return => write!(f, "Return")?, } Ok(()) } diff --git a/source/compiler/qsc_rir/src/utils.rs b/source/compiler/qsc_rir/src/utils.rs index db16859449..f5e2420c15 100644 --- a/source/compiler/qsc_rir/src/utils.rs +++ b/source/compiler/qsc_rir/src/utils.rs @@ -1,9 +1,7 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -use crate::rir::{ - AdvancedInstr, Block, BlockId, Instruction, Operand, Program, Variable, VariableId, -}; +use crate::rir::{Block, BlockId, Instruction, Operand, Program, Variable, VariableId}; use qsc_data_structures::index_map::IndexMap; use rustc_hash::{FxHashMap, FxHashSet}; @@ -111,8 +109,8 @@ pub fn get_variable_assignments(program: &Program) -> IndexMap - { + | Instruction::Alloca(var) + | Instruction::Load(_, var) => { has_store = true; assignments.insert(var.variable_id, (block_id, idx)); } @@ -214,10 +212,10 @@ pub(crate) fn map_variable_use_in_block( // like the unconditional terminators. Instruction::Phi(..) | Instruction::Jump(..) | Instruction::Return => {} - Instruction::Advanced(AdvancedInstr::Alloca(..)) => { + Instruction::Alloca(..) => { panic!("alloca not supported in ssa transformation") } - Instruction::Advanced(AdvancedInstr::Load(..)) => { + Instruction::Load(..) => { panic!("load not supported in ssa transformation") } } From d3520dc73b761bd6028a3e4aefc37a6ac6fa4a90 Mon Sep 17 00:00:00 2001 From: "Stefan J. Wernli" Date: Tue, 31 Mar 2026 14:46:50 -0700 Subject: [PATCH 5/5] typos --- source/compiler/qsc_data_structures/src/target.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/source/compiler/qsc_data_structures/src/target.rs b/source/compiler/qsc_data_structures/src/target.rs index 93804f5f7d..771c301943 100644 --- a/source/compiler/qsc_data_structures/src/target.rs +++ b/source/compiler/qsc_data_structures/src/target.rs @@ -7,7 +7,7 @@ bitflags! { /// These flags describe the capabilities of the target execution environment. They are used to determine which language features we can /// emit into generated code for that target, and correlate to which design-time errors are surfaced by the capabilities check pass. /// Note that the flags are in increasing order of capability and are expected to be largely additive. The code uses this "well-ordered" - /// property to perform some checks using comparison operators, inaddition to the user bitwise membership checks offered by bitflags. + /// property to perform some checks using comparison operators, in addition to the bitwise membership checks offered by bitflags. /// Empty bitflags (0) corresponds to a "Base profile" target where no branching or classical computations are supported. #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct TargetCapabilityFlags: u32 { @@ -22,7 +22,7 @@ bitflags! { /// Supports statically sized arrays (i.e. array literals and array indexing with non-constant indices). const StaticSizedArrays = 0b0001_0000; /// Catch-all for high level language constructs not covered by other flags. New flags should be added above this one, - /// such that this flag is reserved for the "all capabilities" targets that can run anything the langauge can express. + /// such that this flag is reserved for the "all capabilities" targets that can run anything the language can express. const HigherLevelConstructs = 0b1000_0000; } }