From 0dcb9dee44eccc10f699b0b364c3727ebd1cd8e0 Mon Sep 17 00:00:00 2001 From: "Stefan J. Wernli" Date: Thu, 19 Mar 2026 23:40:53 -0700 Subject: [PATCH 1/2] Support emission of loops in `Adaptive_RIFLA` This change adds logic to support emission of loops into QIR when use the `Adaptive_RIFLA` profile. The strategy employed uses changes to both RCA and Partial Evaluation to accomplish this. In RCA, we add a new check that identifies loops whose condition is marked as static and speculatively marks them as dynamic to compute what capabilities they would require at runtime. If the resulting capabilities are compatible with the current target, the loop remains dynamic, otherwise the RCA state is reset and the loop is marked as the originally analyzed static. In Partial Evaluation, new support for dynamic loops is added, where a loop marked as dynamic is emitted as a sequence of blocks with appropriate backward branches to capture the while-loop structure. This change includes new unit tests to verify analysis and emission of loop structures into RIR and new integration tests to verify the whole program stucture and execution of QIR v2 with loops via qir-runner. --- source/compiler/qsc/benches/rca.rs | 9 +- source/compiler/qsc_codegen/src/qir.rs | 6 +- source/compiler/qsc_partial_eval/src/lib.rs | 108 +++- source/compiler/qsc_partial_eval/src/tests.rs | 30 +- .../qsc_partial_eval/src/tests/loops.rs | 597 +++++++++++++++++- .../compiler/qsc_passes/src/capabilitiesck.rs | 2 +- .../src/capabilitiesck/tests_common.rs | 20 +- source/compiler/qsc_rca/src/analyzer.rs | 16 +- source/compiler/qsc_rca/src/applications.rs | 16 +- source/compiler/qsc_rca/src/core.rs | 190 ++++-- source/compiler/qsc_rca/src/tests.rs | 11 +- .../compiler/qsc_rca/src/tests/intrinsics.rs | 75 --- source/compiler/qsc_rca/src/tests/loops.rs | 89 +++ .../resources/adaptive_rifla/input/RUS.qs | 14 + .../adaptive_rifla/output/ArithmeticOps.ll | 144 +++-- .../adaptive_rifla/output/ConstantFolding.ll | 37 +- .../adaptive_rifla/output/Doubles.ll | 244 ++----- .../adaptive_rifla/output/Doubles.out | 2 +- .../adaptive_rifla/output/ExpandedTests.ll | 21 + .../output/IntegerComparison.ll | 144 +---- .../output/IntegerComparison.out | 2 +- .../output/IntrinsicRotationsWithPeriod.ll | 63 +- .../resources/adaptive_rifla/output/RUS.ll | 70 ++ .../resources/adaptive_rifla/output/RUS.out | 10 + .../adaptive_rifla/output/SwitchHandling.ll | 38 +- .../output/ThreeQubitRepetitionCode.ll | 319 +++------- .../output/ThreeQubitRepetitionCode.out | 2 +- 27 files changed, 1394 insertions(+), 885 deletions(-) create mode 100644 source/pip/tests-integration/resources/adaptive_rifla/input/RUS.qs create mode 100644 source/pip/tests-integration/resources/adaptive_rifla/output/RUS.ll create mode 100644 source/pip/tests-integration/resources/adaptive_rifla/output/RUS.out diff --git a/source/compiler/qsc/benches/rca.rs b/source/compiler/qsc/benches/rca.rs index 9187dcf467..85f666f763 100644 --- a/source/compiler/qsc/benches/rca.rs +++ b/source/compiler/qsc/benches/rca.rs @@ -92,7 +92,7 @@ impl CompilationContext { } fn analyze_all(&mut self) { - let analyzer = Analyzer::init(&self.fir_store); + let analyzer = Analyzer::init(&self.fir_store, TargetCapabilityFlags::all()); let compute_properties = analyzer.analyze_all(); self.compute_properties = Some(compute_properties); } @@ -108,8 +108,11 @@ impl CompilationContext { package_compute_properties.clear(); // Analyze the open package without re-analyzing the other packages. - let analyzer = - Analyzer::init_with_compute_properties(&self.fir_store, compute_properties.clone()); + let analyzer = Analyzer::init_with_compute_properties( + &self.fir_store, + TargetCapabilityFlags::all(), + compute_properties.clone(), + ); self.compute_properties = Some(analyzer.analyze_package(open_package_id)); } diff --git a/source/compiler/qsc_codegen/src/qir.rs b/source/compiler/qsc_codegen/src/qir.rs index 95f3bfbd08..5efb55f353 100644 --- a/source/compiler/qsc_codegen/src/qir.rs +++ b/source/compiler/qsc_codegen/src/qir.rs @@ -65,7 +65,7 @@ pub fn fir_to_qir_from_callable( args: Value, ) -> Result { let compute_properties = compute_properties.unwrap_or_else(|| { - let analyzer = qsc_rca::Analyzer::init(fir_store); + let analyzer = qsc_rca::Analyzer::init(fir_store, capabilities); analyzer.analyze_all() }); @@ -97,7 +97,7 @@ pub fn fir_to_rir_from_callable( partial_eval_config: PartialEvalConfig, ) -> Result<(Program, Program), qsc_partial_eval::Error> { let compute_properties = compute_properties.unwrap_or_else(|| { - let analyzer = qsc_rca::Analyzer::init(fir_store); + let analyzer = qsc_rca::Analyzer::init(fir_store, capabilities); analyzer.analyze_all() }); @@ -122,7 +122,7 @@ fn get_rir_from_compilation( partial_eval_config: PartialEvalConfig, ) -> Result { let compute_properties = compute_properties.unwrap_or_else(|| { - let analyzer = qsc_rca::Analyzer::init(fir_store); + let analyzer = qsc_rca::Analyzer::init(fir_store, capabilities); analyzer.analyze_all() }); diff --git a/source/compiler/qsc_partial_eval/src/lib.rs b/source/compiler/qsc_partial_eval/src/lib.rs index da773d12f3..c8f912e0c0 100644 --- a/source/compiler/qsc_partial_eval/src/lib.rs +++ b/source/compiler/qsc_partial_eval/src/lib.rs @@ -2315,11 +2315,18 @@ impl<'a> PartialEvaluator<'a> { .get_current_scope() .get_hybrid_local_value(*local_var_id); - // Check whether the bound value is a mutable variable, and if so, return its value directly rather than - // the variable if it is static at this moment. + // Check whether the bound value is a mutable variable and we are not currently evaluating a branch. + // If so, return its value directly rather than the variable if it is static at this moment. if let Value::Var(var) = bound_value { let current_scope = self.eval_context.get_current_scope(); - if let Some(literal) = current_scope.get_static_value(var.id.into()) { + if let Some(literal) = current_scope.get_static_value(var.id.into()) + && (!current_scope.is_currently_evaluating_branch() + || !self + .program + .config + .capabilities + .contains(TargetCapabilityFlags::BackwardsBranching)) + { map_rir_literal_to_eval_value(*literal) } else { bound_value.clone() @@ -2337,6 +2344,18 @@ impl<'a> PartialEvaluator<'a> { condition_expr_id: ExprId, body_block_id: BlockId, ) -> Result { + if self + .program + .config + .capabilities + .contains(TargetCapabilityFlags::BackwardsBranching) + && !self.is_static_expr(condition_expr_id) + { + // If backwards branching is supported and the loop condition is not static, + // we can generate a while loop structure in RIR without unrolling the loop. + return self.eval_expr_emit_while(loop_expr_id, condition_expr_id, body_block_id); + } + // Verify assumptions: the condition expression must either static (such that it can be fully evaluated) or // dynamic but constant at runtime (such that it can be partially evaluated to a known value). assert!( @@ -2393,6 +2412,89 @@ impl<'a> PartialEvaluator<'a> { Ok(EvalControlFlow::Continue(Value::unit())) } + fn eval_expr_emit_while( + &mut self, + loop_expr_id: ExprId, + condition_expr_id: ExprId, + body_block_id: BlockId, + ) -> Result { + // Pop the current block node and create the necessary block nodes for the loop structure. + let current_block_node = self.eval_context.pop_block_node(); + let conditional_block_node_id = self.create_program_block(); + let conditional_block_node = BlockNode { + id: conditional_block_node_id, + successor: current_block_node.successor, + }; + let continuation_block_node_id = self.create_program_block(); + let continuation_block_node = BlockNode { + id: continuation_block_node_id, + successor: current_block_node.successor, + }; + self.eval_context.push_block_node(continuation_block_node); + + // Insert the jump instruction to the conditional block from the current block. + let jump_to_condition_ins = Instruction::Jump(conditional_block_node_id); + self.get_program_block_mut(current_block_node.id) + .0 + .push(jump_to_condition_ins); + + // In the conditional block, evaluate the condition expression and generate the branch instruction. + self.eval_context.push_block_node(conditional_block_node); + let condition_control_flow = self.try_eval_expr(condition_expr_id)?; + if condition_control_flow.is_return() { + return Err(Error::Unexpected( + "embedded return in loop condition".to_string(), + self.get_expr_package_span(condition_expr_id), + )); + } + let condition_value = condition_control_flow.into_value(); + + if let Value::Bool(false) = condition_value { + // If the condition is statically false, jump directly to the continuation block. + let jump_to_continuation_ins = Instruction::Jump(continuation_block_node_id); + self.get_current_rir_block_mut() + .0 + .push(jump_to_continuation_ins); + let _ = self.eval_context.pop_block_node(); + return Ok(EvalControlFlow::Continue(Value::unit())); + } + + // Otherwise, branch to either the body block or the continuation block. + let body_block_node_id = self.create_program_block(); + let body_block_node = BlockNode { + id: body_block_node_id, + successor: Some(conditional_block_node_id), + }; + let condition_value_var = condition_value.unwrap_var(); + let condition_rir_var = map_eval_var_to_rir_var(condition_value_var); + let metadata = self.metadata_from_expr(loop_expr_id); + let branch_ins = Instruction::Branch( + condition_rir_var, + body_block_node_id, + continuation_block_node_id, + metadata, + ); + self.get_current_rir_block_mut().0.push(branch_ins); + let _ = self.eval_context.pop_block_node(); + + // In the body block, evaluate the loop body and jump back to the conditional block. + self.eval_context.push_block_node(body_block_node); + let body_control_flow = self.try_eval_block(body_block_id)?; + if body_control_flow.is_return() { + return Err(Error::Unexpected( + "embedded return in loop body".to_string(), + self.get_expr_package_span(condition_expr_id), + )); + } + let jump_to_condition_ins = Instruction::Jump(conditional_block_node_id); + self.get_current_rir_block_mut() + .0 + .push(jump_to_condition_ins); + let _ = self.eval_context.pop_block_node(); + + Ok(EvalControlFlow::Continue(Value::unit())) + } + fn eval_result_as_bool_operand(&mut self, result: val::Result) -> Operand { match result { val::Result::Id(id) => { diff --git a/source/compiler/qsc_partial_eval/src/tests.rs b/source/compiler/qsc_partial_eval/src/tests.rs index 38dae7065c..d32db7eb31 100644 --- a/source/compiler/qsc_partial_eval/src/tests.rs +++ b/source/compiler/qsc_partial_eval/src/tests.rs @@ -67,17 +67,7 @@ pub fn assert_error(error: &Error, expected_error: &Expect) { #[must_use] pub fn get_partial_evaluation_error(source: &str) -> Error { - let maybe_program = compile_and_partially_evaluate( - source, - TargetCapabilityFlags::all(), - PartialEvalConfig { - generate_debug_metadata: false, - }, - ); - match maybe_program { - Ok(_) => panic!("partial evaluation succeeded"), - Err(error) => error, - } + get_partial_evaluation_error_with_capabilities(source, Profile::AdaptiveRIF.into()) } #[must_use] @@ -100,21 +90,7 @@ pub fn get_partial_evaluation_error_with_capabilities( #[must_use] pub fn get_rir_program(source: &str) -> Program { - let maybe_program = compile_and_partially_evaluate( - source, - Profile::AdaptiveRIF.into(), - PartialEvalConfig { - generate_debug_metadata: false, - }, - ); - match maybe_program { - Ok(program) => { - // Verify the program can go through transformations. - check_and_transform(&mut program.clone()); - program - } - Err(error) => panic!("partial evaluation failed: {error:?}"), - } + get_rir_program_with_capabilities(source, Profile::AdaptiveRIF.into()) } #[must_use] @@ -242,7 +218,7 @@ impl CompilationContext { .expect("should be able to create a new compiler"); let package_id = map_hir_package_to_fir(compiler.source_package_id()); let fir_store = lower_hir_package_store(compiler.package_store()); - let analyzer = Analyzer::init(&fir_store); + let analyzer = Analyzer::init(&fir_store, capabilities); let compute_properties = analyzer.analyze_all(); let package = fir_store.get(package_id); let entry = ProgramEntry { diff --git a/source/compiler/qsc_partial_eval/src/tests/loops.rs b/source/compiler/qsc_partial_eval/src/tests/loops.rs index 6fd0e95f0d..efb26b73a6 100644 --- a/source/compiler/qsc_partial_eval/src/tests/loops.rs +++ b/source/compiler/qsc_partial_eval/src/tests/loops.rs @@ -1,13 +1,16 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. +use crate::tests::get_rir_program_with_capabilities; + use super::{assert_block_instructions, assert_blocks, assert_callable, get_rir_program}; use expect_test::expect; use indoc::indoc; +use qsc_data_structures::target::TargetCapabilityFlags; use qsc_rir::rir::{BlockId, CallableId}; #[test] -fn unitary_call_within_a_for_loop() { +fn unitary_call_within_a_for_loop_unrolled() { let program = get_rir_program(indoc! { r#" namespace Test { @@ -55,7 +58,70 @@ fn unitary_call_within_a_for_loop() { } #[test] -fn unitary_call_within_a_while_loop() { +fn unitary_call_within_a_for_loop() { + let program = get_rir_program_with_capabilities( + indoc! { + r#" + namespace Test { + operation op(q : Qubit) : Unit { body intrinsic; } + @EntryPoint() + operation Main() : Unit { + use q = Qubit(); + for _ in 1..3 { + op(q); + } + } + } + "#, + }, + TargetCapabilityFlags::Adaptive + | TargetCapabilityFlags::IntegerComputations + | TargetCapabilityFlags::BackwardsBranching, + ); + + let op_callable_id = CallableId(1); + assert_callable( + &program, + op_callable_id, + &expect![[r#" + Callable: + name: __quantum__rt__initialize + call_type: Regular + input_type: + [0]: Pointer + output_type: + body: "#]], + ); + assert_blocks( + &program, + &expect![[r#" + Blocks: + Block 0:Block: + Call id(1), args( Pointer, ) + Variable(0, Integer) = Store Integer(1) + Jump(1) + Block 1:Block: + Variable(1, Boolean) = Icmp Sle, Variable(0, Integer), Integer(3) + Variable(2, Boolean) = Store Bool(true) + Branch Variable(1, Boolean), 3, 4 + Block 2:Block: + Call id(3), args( Integer(0), Tag(0, 3), ) + Return + Block 3:Block: + Branch Variable(2, Boolean), 5, 2 + Block 4:Block: + Variable(2, Boolean) = Store Bool(false) + Jump(3) + Block 5:Block: + Call id(2), args( Qubit(0), ) + Variable(3, Integer) = Add Variable(0, Integer), Integer(1) + Variable(0, Integer) = Store Variable(3, Integer) + Jump(1)"#]], + ); +} + +#[test] +fn unitary_call_within_a_while_loop_unrolled() { let program = get_rir_program(indoc! { r#" namespace Test { @@ -105,7 +171,66 @@ fn unitary_call_within_a_while_loop() { } #[test] -fn unitary_call_within_a_repeat_until_loop() { +fn unitary_call_within_a_while_loop() { + let program = get_rir_program_with_capabilities( + indoc! { + r#" + namespace Test { + operation op(q : Qubit) : Unit { body intrinsic; } + @EntryPoint() + operation Main() : Unit { + use q = Qubit(); + mutable idx = 0; + while idx < 3 { + op(q); + set idx += 1; + } + } + } + "#, + }, + TargetCapabilityFlags::Adaptive + | TargetCapabilityFlags::IntegerComputations + | TargetCapabilityFlags::BackwardsBranching, + ); + + let rotation_callable_id = CallableId(1); + assert_callable( + &program, + rotation_callable_id, + &expect![[r#" + Callable: + name: __quantum__rt__initialize + call_type: Regular + input_type: + [0]: Pointer + output_type: + body: "#]], + ); + assert_blocks( + &program, + &expect![[r#" + Blocks: + Block 0:Block: + Call id(1), args( Pointer, ) + Variable(0, Integer) = Store Integer(0) + Jump(1) + Block 1:Block: + Variable(1, Boolean) = Icmp Slt, Variable(0, Integer), Integer(3) + Branch Variable(1, Boolean), 3, 2 + Block 2:Block: + Call id(3), args( Integer(0), Tag(0, 3), ) + Return + Block 3:Block: + Call id(2), args( Qubit(0), ) + Variable(2, Integer) = Add Variable(0, Integer), Integer(1) + Variable(0, Integer) = Store Variable(2, Integer) + Jump(1)"#]], + ); +} + +#[test] +fn unitary_call_within_a_repeat_until_loop_unrolled() { let program = get_rir_program(indoc! { r#" namespace Test { @@ -159,7 +284,69 @@ fn unitary_call_within_a_repeat_until_loop() { } #[test] -fn rotation_call_within_a_for_loop() { +fn unitary_call_within_a_repeat_until_loop() { + let program = get_rir_program_with_capabilities( + indoc! { + r#" + namespace Test { + operation op(q : Qubit) : Unit { body intrinsic; } + @EntryPoint() + operation Main() : Unit { + use q = Qubit(); + mutable idx = 0; + repeat { + op(q); + set idx += 1; + } until idx >= 3; + } + } + "#, + }, + TargetCapabilityFlags::Adaptive + | TargetCapabilityFlags::IntegerComputations + | TargetCapabilityFlags::BackwardsBranching, + ); + + let op_callable_id = CallableId(1); + assert_callable( + &program, + op_callable_id, + &expect![[r#" + Callable: + name: __quantum__rt__initialize + call_type: Regular + input_type: + [0]: Pointer + output_type: + body: "#]], + ); + assert_blocks( + &program, + &expect![[r#" + Blocks: + Block 0:Block: + Call id(1), args( Pointer, ) + Variable(0, Integer) = Store Integer(0) + Variable(1, Boolean) = Store Bool(true) + Jump(1) + Block 1:Block: + Branch Variable(1, Boolean), 3, 2 + Block 2:Block: + Call id(3), args( Integer(0), Tag(0, 3), ) + Return + Block 3:Block: + Call id(2), args( Qubit(0), ) + Variable(2, Integer) = Add Variable(0, Integer), Integer(1) + Variable(0, Integer) = Store Variable(2, Integer) + Variable(3, Boolean) = Icmp Sge, Variable(0, Integer), Integer(3) + Variable(4, Boolean) = LogicalNot Variable(3, Boolean) + Variable(1, Boolean) = Store Variable(4, Boolean) + Jump(1)"#]], + ); +} + +#[test] +fn rotation_call_within_a_for_loop_unrolled() { let program = get_rir_program(indoc! { r#" namespace Test { @@ -207,7 +394,7 @@ fn rotation_call_within_a_for_loop() { } #[test] -fn rotation_call_within_a_while_loop() { +fn rotation_call_within_a_while_loop_unrolled() { let program = get_rir_program(indoc! { r#" namespace Test { @@ -258,7 +445,7 @@ fn rotation_call_within_a_while_loop() { } #[test] -fn rotation_call_within_a_repeat_until_loop() { +fn rotation_call_within_a_repeat_until_loop_unrolled() { let program = get_rir_program(indoc! { r#" namespace Test { @@ -313,7 +500,7 @@ fn rotation_call_within_a_repeat_until_loop() { } #[test] -fn mutable_bool_updated_in_loop() { +fn mutable_bool_updated_in_loop_unrolled() { let program = get_rir_program(indoc! { r#" namespace Test { @@ -350,7 +537,68 @@ fn mutable_bool_updated_in_loop() { } #[test] -fn mutable_int_updated_in_loop() { +fn mutable_bool_updated_in_loop() { + let program = get_rir_program_with_capabilities( + indoc! { + r#" + namespace Test { + @EntryPoint() + operation Main() : Unit { + use q = Qubit(); + mutable flag = false; + for _ in 1..3 { + if not flag { + set flag = MResetZ(q) == One; + } + } + } + } + "#, + }, + TargetCapabilityFlags::Adaptive + | TargetCapabilityFlags::IntegerComputations + | TargetCapabilityFlags::BackwardsBranching, + ); + + assert_blocks( + &program, + &expect![[r#" + Blocks: + Block 0:Block: + Call id(1), args( Pointer, ) + Variable(0, Boolean) = Store Bool(false) + Variable(1, Integer) = Store Integer(1) + Jump(1) + Block 1:Block: + Variable(2, Boolean) = Icmp Sle, Variable(1, Integer), Integer(3) + Variable(3, Boolean) = Store Bool(true) + Branch Variable(2, Boolean), 3, 4 + Block 2:Block: + Call id(4), args( Integer(0), Tag(0, 3), ) + Return + Block 3:Block: + Branch Variable(3, Boolean), 5, 2 + Block 4:Block: + Variable(3, Boolean) = Store Bool(false) + Jump(3) + Block 5:Block: + Variable(4, Boolean) = LogicalNot Variable(0, Boolean) + Branch Variable(4, Boolean), 7, 6 + Block 6:Block: + Variable(7, Integer) = Add Variable(1, Integer), Integer(1) + Variable(1, Integer) = Store Variable(7, Integer) + Jump(1) + Block 7:Block: + Call id(2), args( Qubit(0), Result(0), ) + Variable(5, Boolean) = Call id(3), args( Result(0), ) + Variable(6, Boolean) = Store Variable(5, Boolean) + Variable(0, Boolean) = Store Variable(6, Boolean) + Jump(6)"#]], + ); +} + +#[test] +fn mutable_int_updated_in_loop_unrolled() { let program = get_rir_program(indoc! { r#" namespace Test { @@ -384,7 +632,75 @@ fn mutable_int_updated_in_loop() { } #[test] -fn mutable_double_updated_in_loop() { +fn mutable_int_updated_in_loop() { + let program = get_rir_program_with_capabilities( + indoc! { + r#" + namespace Test { + @EntryPoint() + operation Main() : Unit { + use q = Qubit(); + mutable count = 1; + for _ in 1..3 { + if count > 0 and MResetZ(q) == One { + set count = -count; + } + } + } + } + "#, + }, + TargetCapabilityFlags::Adaptive + | TargetCapabilityFlags::IntegerComputations + | TargetCapabilityFlags::BackwardsBranching, + ); + + assert_blocks( + &program, + &expect![[r#" + Blocks: + Block 0:Block: + Call id(1), args( Pointer, ) + Variable(0, Integer) = Store Integer(1) + Variable(1, Integer) = Store Integer(1) + Jump(1) + Block 1:Block: + Variable(2, Boolean) = Icmp Sle, Variable(1, Integer), Integer(3) + Variable(3, Boolean) = Store Bool(true) + Branch Variable(2, Boolean), 3, 4 + Block 2:Block: + Call id(4), args( Integer(0), Tag(0, 3), ) + Return + Block 3:Block: + Branch Variable(3, Boolean), 5, 2 + Block 4:Block: + Variable(3, Boolean) = Store Bool(false) + Jump(3) + Block 5:Block: + Variable(4, Boolean) = Icmp Sgt, Variable(0, Integer), Integer(0) + Variable(5, Boolean) = Store Bool(false) + Branch Variable(4, Boolean), 7, 6 + Block 6:Block: + Branch Variable(5, Boolean), 9, 8 + Block 7:Block: + Call id(2), args( Qubit(0), Result(0), ) + Variable(6, Boolean) = Call id(3), args( Result(0), ) + Variable(7, Boolean) = Store Variable(6, Boolean) + Variable(5, Boolean) = Store Variable(7, Boolean) + Jump(6) + Block 8:Block: + Variable(9, Integer) = Add Variable(1, Integer), Integer(1) + Variable(1, Integer) = Store Variable(9, Integer) + Jump(1) + Block 9:Block: + Variable(8, Integer) = Mul Integer(-1), Variable(0, Integer) + Variable(0, Integer) = Store Variable(8, Integer) + Jump(8)"#]], + ); +} + +#[test] +fn mutable_double_updated_in_loop_unrolled() { let program = get_rir_program(indoc! { r#" namespace Test { @@ -459,7 +775,76 @@ fn mutable_double_updated_in_loop() { } #[test] -fn result_array_index_range_in_for_loop() { +fn mutable_double_updated_in_loop() { + let program = get_rir_program_with_capabilities( + indoc! { + r#" + namespace Test { + @EntryPoint() + operation Main() : Unit { + use q = Qubit(); + mutable count = 1.1; + for _ in 1..3 { + if count > 0.1 and MResetZ(q) == One { + set count = -count; + } + } + } + } + "#, + }, + TargetCapabilityFlags::Adaptive + | TargetCapabilityFlags::IntegerComputations + | TargetCapabilityFlags::FloatingPointComputations + | TargetCapabilityFlags::BackwardsBranching, + ); + + assert_blocks( + &program, + &expect![[r#" + Blocks: + Block 0:Block: + Call id(1), args( Pointer, ) + Variable(0, Double) = Store Double(1.1) + Variable(1, Integer) = Store Integer(1) + Jump(1) + Block 1:Block: + Variable(2, Boolean) = Icmp Sle, Variable(1, Integer), Integer(3) + Variable(3, Boolean) = Store Bool(true) + Branch Variable(2, Boolean), 3, 4 + Block 2:Block: + Call id(4), args( Integer(0), Tag(0, 3), ) + Return + Block 3:Block: + Branch Variable(3, Boolean), 5, 2 + Block 4:Block: + Variable(3, Boolean) = Store Bool(false) + Jump(3) + Block 5:Block: + Variable(4, Boolean) = Fcmp Ogt, Variable(0, Double), Double(0.1) + Variable(5, Boolean) = Store Bool(false) + Branch Variable(4, Boolean), 7, 6 + Block 6:Block: + Branch Variable(5, Boolean), 9, 8 + Block 7:Block: + Call id(2), args( Qubit(0), Result(0), ) + Variable(6, Boolean) = Call id(3), args( Result(0), ) + Variable(7, Boolean) = Store Variable(6, Boolean) + Variable(5, Boolean) = Store Variable(7, Boolean) + Jump(6) + Block 8:Block: + Variable(9, Integer) = Add Variable(1, Integer), Integer(1) + Variable(1, Integer) = Store Variable(9, Integer) + Jump(1) + Block 9:Block: + Variable(8, Double) = Fmul Double(-1), Variable(0, Double) + Variable(0, Double) = Store Variable(8, Double) + Jump(8)"#]], + ); +} + +#[test] +fn result_array_index_range_in_for_loop_unrolled() { let program = get_rir_program(indoc! {r#" namespace Test { @EntryPoint() @@ -560,3 +945,195 @@ fn result_array_index_range_in_for_loop() { [0]: 0_i "#]].assert_eq(&program.to_string()); } + +#[test] +fn dynamic_while_loop() { + let program = get_rir_program_with_capabilities( + indoc! { + r#" + namespace Test { + @EntryPoint() + operation Main() : Unit { + use q = Qubit(); + while MResetX(q) == One {} + } + } + "#, + }, + TargetCapabilityFlags::Adaptive | TargetCapabilityFlags::BackwardsBranching, + ); + + assert_blocks( + &program, + &expect![[r#" + Blocks: + Block 0:Block: + Call id(1), args( Pointer, ) + Jump(1) + Block 1:Block: + Call id(2), args( Qubit(0), ) + Call id(3), args( Qubit(0), Result(0), ) + Variable(0, Boolean) = Call id(4), args( Result(0), ) + Variable(1, Boolean) = Store Variable(0, Boolean) + Branch Variable(1, Boolean), 3, 2 + Block 2:Block: + Call id(5), args( Integer(0), Tag(0, 3), ) + Return + Block 3:Block: + Jump(1)"#]], + ); +} + +#[test] +fn dynamic_repeat_until_loop() { + let program = get_rir_program_with_capabilities( + indoc! { + r#" + namespace Test { + @EntryPoint() + operation Main() : Unit { + use q = Qubit(); + repeat { + H(q); + } until MResetZ(q) == One; + } + } + "#, + }, + TargetCapabilityFlags::Adaptive | TargetCapabilityFlags::BackwardsBranching, + ); + + assert_blocks( + &program, + &expect![[r#" + Blocks: + Block 0:Block: + Call id(1), args( Pointer, ) + Variable(0, Boolean) = Store Bool(true) + Jump(1) + Block 1:Block: + Branch Variable(0, Boolean), 3, 2 + Block 2:Block: + Call id(5), args( Integer(0), Tag(0, 3), ) + Return + Block 3:Block: + Call id(2), args( Qubit(0), ) + Call id(3), args( Qubit(0), Result(0), ) + Variable(1, Boolean) = Call id(4), args( Result(0), ) + Variable(2, Boolean) = Store Variable(1, Boolean) + Variable(3, Boolean) = LogicalNot Variable(2, Boolean) + Variable(0, Boolean) = Store Variable(3, Boolean) + Jump(1)"#]], + ); +} + +#[test] +fn dynamic_repeat_until_fixup_loop() { + let program = get_rir_program_with_capabilities( + indoc! { + r#" + namespace Test { + @EntryPoint() + operation Main() : Unit { + use q = Qubit(); + repeat { + H(q); + } until M(q) == Zero + fixup { + X(q); + } + } + } + "#, + }, + TargetCapabilityFlags::Adaptive | TargetCapabilityFlags::BackwardsBranching, + ); + + assert_blocks( + &program, + &expect![[r#" + Blocks: + Block 0:Block: + Call id(1), args( Pointer, ) + Variable(0, Boolean) = Store Bool(true) + Jump(1) + Block 1:Block: + Branch Variable(0, Boolean), 3, 2 + Block 2:Block: + Call id(6), args( Integer(0), Tag(0, 3), ) + Return + Block 3:Block: + Call id(2), args( Qubit(0), ) + Call id(3), args( Qubit(0), Result(0), ) + Variable(1, Boolean) = Call id(4), args( Result(0), ) + Variable(2, Boolean) = Icmp Eq, Variable(1, Boolean), Bool(false) + Variable(3, Boolean) = LogicalNot Variable(2, Boolean) + Variable(0, Boolean) = Store Variable(3, Boolean) + Branch Variable(0, Boolean), 5, 4 + Block 4:Block: + Jump(1) + Block 5:Block: + Call id(5), args( Qubit(0), ) + Jump(4)"#]], + ); +} + +#[test] +fn dynamic_nested_loop() { + let program = get_rir_program_with_capabilities( + indoc! { + r#" + namespace Test { + @EntryPoint() + operation Main() : Unit { + use q = Qubit(); + repeat { + while MResetX(q) == One {} + } until M(q) == Zero + fixup { + X(q); + } + } + } + "#, + }, + TargetCapabilityFlags::Adaptive | TargetCapabilityFlags::BackwardsBranching, + ); + + assert_blocks( + &program, + &expect![[r#" + Blocks: + Block 0:Block: + Call id(1), args( Pointer, ) + Variable(0, Boolean) = Store Bool(true) + Jump(1) + Block 1:Block: + Branch Variable(0, Boolean), 3, 2 + Block 2:Block: + Call id(7), args( Integer(0), Tag(0, 3), ) + Return + Block 3:Block: + Jump(4) + Block 4:Block: + Call id(2), args( Qubit(0), ) + Call id(3), args( Qubit(0), Result(0), ) + Variable(1, Boolean) = Call id(4), args( Result(0), ) + Variable(2, Boolean) = Store Variable(1, Boolean) + Branch Variable(2, Boolean), 6, 5 + Block 5:Block: + Call id(5), args( Qubit(0), Result(1), ) + Variable(3, Boolean) = Call id(4), args( Result(1), ) + Variable(4, Boolean) = Icmp Eq, Variable(3, Boolean), Bool(false) + Variable(5, Boolean) = LogicalNot Variable(4, Boolean) + Variable(0, Boolean) = Store Variable(5, Boolean) + Branch Variable(0, Boolean), 8, 7 + Block 6:Block: + Jump(4) + Block 7:Block: + Jump(1) + Block 8:Block: + Call id(6), args( Qubit(0), ) + Jump(7)"#]], + ); +} diff --git a/source/compiler/qsc_passes/src/capabilitiesck.rs b/source/compiler/qsc_passes/src/capabilitiesck.rs index 70448758b2..2bf707903d 100644 --- a/source/compiler/qsc_passes/src/capabilitiesck.rs +++ b/source/compiler/qsc_passes/src/capabilitiesck.rs @@ -53,7 +53,7 @@ pub fn run_rca_pass( package_id: qsc_fir::fir::PackageId, capabilities: TargetCapabilityFlags, ) -> Result> { - let analyzer = Analyzer::init(fir_store); + let analyzer = Analyzer::init(fir_store, capabilities); let compute_properties = analyzer.analyze_all(); let fir_package = fir_store.get(package_id); diff --git a/source/compiler/qsc_passes/src/capabilitiesck/tests_common.rs b/source/compiler/qsc_passes/src/capabilitiesck/tests_common.rs index 7d3371fcc6..0d557b02c3 100644 --- a/source/compiler/qsc_passes/src/capabilitiesck/tests_common.rs +++ b/source/compiler/qsc_passes/src/capabilitiesck/tests_common.rs @@ -14,7 +14,7 @@ use qsc_lowerer::{Lowerer, map_hir_package_to_fir}; use qsc_rca::{Analyzer, PackageComputeProperties, PackageStoreComputeProperties}; pub fn check(source: &str, expect: &Expect, capabilities: TargetCapabilityFlags) { - let compilation_context = CompilationContext::new(source); + let compilation_context = CompilationContext::new(source, capabilities); let (package, compute_properties) = compilation_context.get_package_compute_properties_tuple(); let errors = check_supported_capabilities( package, @@ -26,7 +26,7 @@ pub fn check(source: &str, expect: &Expect, capabilities: TargetCapabilityFlags) } pub fn check_for_exe(source: &str, expect: &Expect, capabilities: TargetCapabilityFlags) { - let compilation_context = CompilationContext::new_for_exe(source); + let compilation_context = CompilationContext::new_for_exe(source, capabilities); let (package, compute_properties) = compilation_context.get_package_compute_properties_tuple(); let errors = check_supported_capabilities( package, @@ -56,13 +56,13 @@ struct CompilationContext { } impl CompilationContext { - fn new(source: &str) -> Self { + fn new(source: &str, capabilities: TargetCapabilityFlags) -> Self { let mut store = qsc::PackageStore::new(qsc::compile::core()); - let std_id = store.insert(qsc::compile::std(&store, TargetCapabilityFlags::all())); + let std_id = store.insert(qsc::compile::std(&store, capabilities)); let mut compiler = Compiler::new( SourceMap::default(), PackageType::Lib, - TargetCapabilityFlags::all(), + capabilities, LanguageFeatures::default(), store, &[(std_id, None)], @@ -75,7 +75,7 @@ impl CompilationContext { compiler.update(increment); let mut lowerer = Lowerer::new(); let fir_store = lower_hir_package_store(&mut lowerer, compiler.package_store()); - let analyzer = Analyzer::init(&fir_store); + let analyzer = Analyzer::init(&fir_store, capabilities); let compute_properties = analyzer.analyze_all(); Self { fir_store, @@ -84,12 +84,12 @@ impl CompilationContext { } } - fn new_for_exe(source: &str) -> Self { - let (std_id, store) = qsc::compile::package_store_with_stdlib(TargetCapabilityFlags::all()); + fn new_for_exe(source: &str, capabilities: TargetCapabilityFlags) -> Self { + let (std_id, store) = qsc::compile::package_store_with_stdlib(capabilities); let compiler = Compiler::new( SourceMap::new([("test".into(), source.into())], Some("".into())), PackageType::Exe, - TargetCapabilityFlags::all(), + capabilities, LanguageFeatures::default(), store, &[(std_id, None)], @@ -98,7 +98,7 @@ impl CompilationContext { let package_id = map_hir_package_to_fir(compiler.source_package_id()); let mut lowerer = Lowerer::new(); let fir_store = lower_hir_package_store(&mut lowerer, compiler.package_store()); - let analyzer = Analyzer::init(&fir_store); + let analyzer = Analyzer::init(&fir_store, capabilities); let compute_properties = analyzer.analyze_all(); Self { fir_store, diff --git a/source/compiler/qsc_rca/src/analyzer.rs b/source/compiler/qsc_rca/src/analyzer.rs index 30b50b24a9..94f9d68cd6 100644 --- a/source/compiler/qsc_rca/src/analyzer.rs +++ b/source/compiler/qsc_rca/src/analyzer.rs @@ -5,31 +5,39 @@ use crate::{ PackageStoreComputeProperties, core, cyclic_callables, overrider::Overrider, scaffolding::InternalPackageStoreComputeProperties, }; +use qsc_data_structures::target::TargetCapabilityFlags; use qsc_fir::fir::{PackageId, PackageStore}; /// A runtime capabilities analyzer. pub struct Analyzer<'a> { package_store: &'a PackageStore, scaffolding: InternalPackageStoreComputeProperties, + target_capabilities: TargetCapabilityFlags, } impl<'a> Analyzer<'a> { #[must_use] - pub fn init(package_store: &'a PackageStore) -> Self { + pub fn init( + package_store: &'a PackageStore, + target_capabilities: TargetCapabilityFlags, + ) -> Self { Self { package_store, scaffolding: InternalPackageStoreComputeProperties::init(package_store), + target_capabilities, } } #[must_use] pub fn init_with_compute_properties( package_store: &'a PackageStore, + target_capabilities: TargetCapabilityFlags, package_store_compute_properties: PackageStoreComputeProperties, ) -> Self { Self { package_store, scaffolding: package_store_compute_properties.into(), + target_capabilities, } } @@ -46,7 +54,8 @@ impl<'a> Analyzer<'a> { let scaffolding = cyclic_callables_analyzer.analyze_all(); // Now we can safely analyze the rest of the items. - let core_analyzer = core::Analyzer::new(self.package_store, scaffolding); + let core_analyzer = + core::Analyzer::new(self.package_store, scaffolding, self.target_capabilities); core_analyzer.analyze_all().into() } @@ -57,7 +66,8 @@ impl<'a> Analyzer<'a> { let cyclic_callables_analyzer = cyclic_callables::Analyzer::new(self.package_store, self.scaffolding); let scaffolding = cyclic_callables_analyzer.analyze_package(package_id); - let core_analyzer = core::Analyzer::new(self.package_store, scaffolding); + let core_analyzer = + core::Analyzer::new(self.package_store, scaffolding, self.target_capabilities); core_analyzer.analyze_package(package_id).into() } } diff --git a/source/compiler/qsc_rca/src/applications.rs b/source/compiler/qsc_rca/src/applications.rs index aacd0417e5..be1a3786af 100644 --- a/source/compiler/qsc_rca/src/applications.rs +++ b/source/compiler/qsc_rca/src/applications.rs @@ -433,14 +433,26 @@ impl ApplicationInstance { self.blocks.insert(id, value); } + pub fn reset_block_compute_kind(&mut self, id: BlockId) { + self.blocks.remove(&id); + } + pub fn insert_expr_compute_kind(&mut self, id: ExprId, value: ComputeKind) { self.exprs.insert(id, value); } + pub fn reset_expr_compute_kind(&mut self, id: ExprId) { + self.exprs.remove(&id); + } + pub fn insert_stmt_compute_kind(&mut self, id: StmtId, value: ComputeKind) { self.stmts.insert(id, value); } + pub fn reset_stmt_compute_kind(&mut self, id: StmtId) { + self.stmts.remove(&id); + } + fn new( input_params: &Vec, controls: Option<&Local>, @@ -556,7 +568,7 @@ impl ApplicationInstance { } } -#[derive(Debug, Default)] +#[derive(Debug, Default, Clone)] pub struct LocalsComputeKindMap(IndexMap); impl LocalsLookup for LocalsComputeKindMap { @@ -585,7 +597,7 @@ impl LocalsComputeKindMap { } } -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct LocalComputeKind { pub local: Local, pub compute_kind: ComputeKind, diff --git a/source/compiler/qsc_rca/src/core.rs b/source/compiler/qsc_rca/src/core.rs index 3ecc4ec58b..52f250f317 100644 --- a/source/compiler/qsc_rca/src/core.rs +++ b/source/compiler/qsc_rca/src/core.rs @@ -9,9 +9,12 @@ use crate::{ AssignmentStmtCounter, Callee, FunctorAppExt, GlobalSpecId, Local, LocalKind, try_resolve_callee, }, + errors::get_missing_runtime_features, scaffolding::{InternalItemComputeProperties, InternalPackageStoreComputeProperties}, }; -use qsc_data_structures::{functors::FunctorApp, index_map::IndexMap}; +use qsc_data_structures::{ + functors::FunctorApp, index_map::IndexMap, target::TargetCapabilityFlags, +}; use qsc_fir::{ extensions::InputParam, fir::{ @@ -22,24 +25,27 @@ use qsc_fir::{ StringComponent, }, ty::{Arrow, FunctorSetValue, Prim, Ty}, - visit::{Visitor, walk_stmt}, + visit::{Visitor, walk_block, walk_expr, walk_stmt}, }; pub struct Analyzer<'a> { package_store: &'a PackageStore, package_store_compute_properties: InternalPackageStoreComputeProperties, active_contexts: Vec, + target_capabilities: TargetCapabilityFlags, } impl<'a> Analyzer<'a> { pub fn new( package_store: &'a PackageStore, package_store_compute_properties: InternalPackageStoreComputeProperties, + target_capabilities: TargetCapabilityFlags, ) -> Self { Self { package_store, package_store_compute_properties, active_contexts: Vec::::default(), + target_capabilities, } } @@ -1054,61 +1060,99 @@ impl<'a> Analyzer<'a> { } fn analyze_expr_while(&mut self, condition_expr_id: ExprId, block_id: BlockId) -> ComputeKind { - // Visit the condition expression to determine its initial compute kind. - self.visit_expr(condition_expr_id); - let application_instance = self.get_current_application_instance_mut(); - let mut condition_expr_compute_kind = - *application_instance.get_expr_compute_kind(condition_expr_id); + let mut should_emit_classical_loop = self.should_emit_classical_loops(); + let mut cached_locals_map = if should_emit_classical_loop { + Some(self.get_current_application_instance().locals_map.clone()) + } else { + None + }; - // We analyze both the condition expression and the block N times, where N is the analysis stabilization limit. - // The reason why we need a stabilization limit is because there can be up-to N levels of indirection for the - // condition due to variable assignments. - // The number of statements with assignments in the condition expression and the loop block is a good proxy for - // the worst case scenario regarding the propagation of properties throughout variables. Because of this, we use - // it as the stabilization limit. - let package_id = self.get_current_package_id(); - let package = self.package_store.get(package_id); - let stabilization_limit = AssignmentStmtCounter::new(package).count_in_block(block_id) - + AssignmentStmtCounter::new(package).count_in_expr(condition_expr_id); - for _ in 0..=stabilization_limit { - // If the condition expression is dynamic, we push a new dynamic scope before visiting the block. + let (condition_expr_compute_kind, mut compute_kind) = loop { + // Visit the condition expression to determine its initial compute kind. + self.visit_expr(condition_expr_id); let application_instance = self.get_current_application_instance_mut(); - condition_expr_compute_kind = + let mut condition_expr_compute_kind = *application_instance.get_expr_compute_kind(condition_expr_id); - let within_dynamic_scope = condition_expr_compute_kind.is_variable_value_kind(); - if within_dynamic_scope { - application_instance - .active_dynamic_scopes - .push(condition_expr_id); - } - self.visit_expr(condition_expr_id); - self.visit_block(block_id); - if within_dynamic_scope { + + let package_id = self.get_current_package_id(); + let package = self.package_store.get(package_id); + + // We analyze both the condition expression and the block N times, where N is the analysis stabilization limit. + // The reason why we need a stabilization limit is because there can be up-to N levels of indirection for the + // condition due to variable assignments. + // The number of statements with assignments in the condition expression and the loop block is a good proxy for + // the worst case scenario regarding the propagation of properties throughout variables. Because of this, we use + // it as the stabilization limit. + let stabilization_limit = AssignmentStmtCounter::new(package).count_in_block(block_id) + + AssignmentStmtCounter::new(package).count_in_expr(condition_expr_id); + for _ in 0..=stabilization_limit { + // If the condition expression is a variable value kind + // OR + // we are trying to emit loops and the block is dynamic, + // then we push a new dynamic scope before visiting the block. let application_instance = self.get_current_application_instance_mut(); - let dynamic_scope_expr_id = application_instance - .active_dynamic_scopes - .pop() - .expect("at least one dynamic scope should exist"); - assert!(dynamic_scope_expr_id == condition_expr_id); + condition_expr_compute_kind = + *application_instance.get_expr_compute_kind(condition_expr_id); + let within_dynamic_scope = condition_expr_compute_kind.is_variable_value_kind() + || (should_emit_classical_loop + && application_instance + .find_block_compute_kind(block_id) + .is_some_and(|k| !matches!(k, ComputeKind::Static))); + if within_dynamic_scope { + application_instance + .active_dynamic_scopes + .push(condition_expr_id); + } + self.visit_expr(condition_expr_id); + self.visit_block(block_id); + if within_dynamic_scope { + let application_instance = self.get_current_application_instance_mut(); + let dynamic_scope_expr_id = application_instance + .active_dynamic_scopes + .pop() + .expect("at least one dynamic scope should exist"); + assert!(dynamic_scope_expr_id == condition_expr_id); + } } - } - // Return the aggregated runtime features of the condition expression and the block. - let application_instance = self.get_current_application_instance(); - let block_compute_kind = *application_instance.get_block_compute_kind(block_id); - let mut compute_kind = ComputeKind::Static; - compute_kind = compute_kind - .aggregate_runtime_features(condition_expr_compute_kind, ValueKind::Constant); - compute_kind = - compute_kind.aggregate_runtime_features(block_compute_kind, ValueKind::Constant); + // Return the aggregated runtime features of the condition expression and the block. + let application_instance = self.get_current_application_instance(); + let block_compute_kind = *application_instance.get_block_compute_kind(block_id); + let default_value_kind = ValueKind::Constant; + let mut compute_kind = ComputeKind::Static; + compute_kind = compute_kind + .aggregate_runtime_features(condition_expr_compute_kind, default_value_kind); + compute_kind = + compute_kind.aggregate_runtime_features(block_compute_kind, default_value_kind); + + // If the resulting compute kind with dynamic loops would produce errors with the current target capabilities, + // we need to fall back to marking the loop as classical and re-analyzing. + if should_emit_classical_loop + && let ComputeKind::Dynamic { + runtime_features, .. + } = &compute_kind + && !get_missing_runtime_features(*runtime_features, self.target_capabilities) + .is_empty() + { + // Revert the calculated compute kinds and re-analyze marking the loop. + ClearComputeKinds::new(self).visit_expr(condition_expr_id); + ClearComputeKinds::new(self).visit_block(block_id); + self.get_current_application_instance_mut().locals_map = cached_locals_map.take().expect( + "cached locals map should exist when re-analyzing while loop with classical emission", + ); + should_emit_classical_loop = false; + } else { + break (condition_expr_compute_kind, compute_kind); + } + }; - // If the condition is variable, we require an additional runtime feature. - if condition_expr_compute_kind.is_variable_value_kind() { + // If the condition is dynamic, we require an additional runtime feature. + if !matches!(condition_expr_compute_kind, ComputeKind::Static) { let ComputeKind::Dynamic { runtime_features, .. } = &mut compute_kind else { - panic!("if the loop condition is variable, the loop expression must be dynamic"); + panic!("if the loop condition is quantum, the loop expression must be quantum too"); }; *runtime_features |= RuntimeFeatureFlags::LoopWithDynamicCondition; } @@ -1681,6 +1725,11 @@ impl<'a> Analyzer<'a> { && item.current_spec_context.as_ref().map(|s| s.functor_set_value) == Some(callee.functor_app.functor_set_value())) }) } + + fn should_emit_classical_loops(&self) -> bool { + self.target_capabilities + .contains(TargetCapabilityFlags::BackwardsBranching) + } } fn update_features_for_type( @@ -2552,3 +2601,52 @@ fn is_any_result(t: &Ty) -> bool { _ => false, } } + +struct ClearComputeKinds<'a, 'b> { + analyzer: &'a mut Analyzer<'b>, +} + +impl<'a, 'b> ClearComputeKinds<'a, 'b> { + pub fn new(analyzer: &'a mut Analyzer<'b>) -> Self { + Self { analyzer } + } +} + +impl<'a> Visitor<'a> for ClearComputeKinds<'a, '_> { + fn get_block(&self, id: BlockId) -> &'a Block { + self.analyzer.get_block(id) + } + + fn get_expr(&self, id: ExprId) -> &'a Expr { + self.analyzer.get_expr(id) + } + + fn get_pat(&self, id: PatId) -> &'a Pat { + self.analyzer.get_pat(id) + } + + fn get_stmt(&self, id: StmtId) -> &'a Stmt { + self.analyzer.get_stmt(id) + } + + fn visit_expr(&mut self, expr_id: ExprId) { + self.analyzer + .get_current_application_instance_mut() + .reset_expr_compute_kind(expr_id); + walk_expr(self, expr_id); + } + + fn visit_stmt(&mut self, stmt_id: StmtId) { + self.analyzer + .get_current_application_instance_mut() + .reset_stmt_compute_kind(stmt_id); + walk_stmt(self, stmt_id); + } + + fn visit_block(&mut self, block: BlockId) { + self.analyzer + .get_current_application_instance_mut() + .reset_block_compute_kind(block); + walk_block(self, block); + } +} diff --git a/source/compiler/qsc_rca/src/tests.rs b/source/compiler/qsc_rca/src/tests.rs index 0b19d09986..2057229e68 100644 --- a/source/compiler/qsc_rca/src/tests.rs +++ b/source/compiler/qsc_rca/src/tests.rs @@ -25,7 +25,9 @@ use crate::{Analyzer, ComputePropertiesLookup, PackageStoreComputeProperties}; use expect_test::Expect; use qsc::incremental::Compiler; use qsc_data_structures::{ - language_features::LanguageFeatures, source::SourceMap, target::TargetCapabilityFlags, + language_features::LanguageFeatures, + source::SourceMap, + target::{Profile, TargetCapabilityFlags}, }; use qsc_fir::fir::{ItemKind, LocalItemId, Package, PackageStore, StoreItemId}; use qsc_frontend::compile::PackageStore as HirPackageStore; @@ -37,6 +39,7 @@ pub struct CompilationContext { pub fir_store: PackageStore, pub compute_properties: PackageStoreComputeProperties, lowerer: Lowerer, + capabilities: TargetCapabilityFlags, } impl CompilationContext { @@ -53,13 +56,14 @@ impl CompilationContext { ) .expect("should be able to create a new compiler"); let fir_store = lower_hir_package_store(compiler.package_store()); - let analyzer = Analyzer::init(&fir_store); + let analyzer = Analyzer::init(&fir_store, capabilities); let compute_properties = analyzer.analyze_all(); Self { compiler, fir_store, compute_properties, lowerer: Lowerer::new(), + capabilities, } } @@ -84,6 +88,7 @@ impl CompilationContext { package_compute_properties.clear(); let analyzer = Analyzer::init_with_compute_properties( &self.fir_store, + self.capabilities, self.compute_properties.clone(), ); self.compute_properties = analyzer.analyze_package(package_id); @@ -92,7 +97,7 @@ impl CompilationContext { impl Default for CompilationContext { fn default() -> Self { - Self::new(TargetCapabilityFlags::all()) + Self::new(Profile::AdaptiveRIF.into()) } } diff --git a/source/compiler/qsc_rca/src/tests/intrinsics.rs b/source/compiler/qsc_rca/src/tests/intrinsics.rs index c6141d0723..166ef6e4d1 100644 --- a/source/compiler/qsc_rca/src/tests/intrinsics.rs +++ b/source/compiler/qsc_rca/src/tests/intrinsics.rs @@ -1024,81 +1024,6 @@ fn check_rca_for_quantum_qis_reset_body() { ); } -#[test] -fn check_rca_for_draw_random_int() { - let compilation_context = CompilationContext::default(); - check_callable_compute_properties( - &compilation_context.fir_store, - compilation_context.get_compute_properties(), - "DrawRandomInt", - &expect![[r#" - Callable: CallableComputeProperties: - body: ApplicationsGeneratorSet: - inherent: Dynamic: - runtime_features: RuntimeFeatureFlags(0x0) - value_kind: Variable - dynamic_param_applications: - [0]: [Parameter Type Element] Dynamic: - runtime_features: RuntimeFeatureFlags(UseOfDynamicInt) - value_kind: Variable - [1]: [Parameter Type Element] Dynamic: - runtime_features: RuntimeFeatureFlags(UseOfDynamicInt) - value_kind: Variable - adj: - ctl: - ctl-adj: "#]], - ); -} - -#[test] -fn check_rca_for_draw_random_double() { - let compilation_context = CompilationContext::default(); - check_callable_compute_properties( - &compilation_context.fir_store, - compilation_context.get_compute_properties(), - "DrawRandomDouble", - &expect![[r#" - Callable: CallableComputeProperties: - body: ApplicationsGeneratorSet: - inherent: Dynamic: - runtime_features: RuntimeFeatureFlags(0x0) - value_kind: Variable - dynamic_param_applications: - [0]: [Parameter Type Element] Dynamic: - runtime_features: RuntimeFeatureFlags(UseOfDynamicDouble) - value_kind: Variable - [1]: [Parameter Type Element] Dynamic: - runtime_features: RuntimeFeatureFlags(UseOfDynamicDouble) - value_kind: Variable - adj: - ctl: - ctl-adj: "#]], - ); -} - -#[test] -fn check_rca_for_draw_random_bool() { - let compilation_context = CompilationContext::default(); - check_callable_compute_properties( - &compilation_context.fir_store, - compilation_context.get_compute_properties(), - "DrawRandomBool", - &expect![[r#" - Callable: CallableComputeProperties: - body: ApplicationsGeneratorSet: - inherent: Dynamic: - runtime_features: RuntimeFeatureFlags(0x0) - value_kind: Variable - dynamic_param_applications: - [0]: [Parameter Type Element] Dynamic: - runtime_features: RuntimeFeatureFlags(UseOfDynamicDouble) - value_kind: Variable - adj: - ctl: - ctl-adj: "#]], - ); -} - #[test] fn check_rca_for_begin_estimate_caching() { let compilation_context = CompilationContext::default(); diff --git a/source/compiler/qsc_rca/src/tests/loops.rs b/source/compiler/qsc_rca/src/tests/loops.rs index 3cd33ea16a..76d743d187 100644 --- a/source/compiler/qsc_rca/src/tests/loops.rs +++ b/source/compiler/qsc_rca/src/tests/loops.rs @@ -3,6 +3,7 @@ use super::{CompilationContext, check_last_statement_compute_properties}; use expect_test::expect; +use qsc_data_structures::target::Profile; #[test] fn check_rca_for_classical_for_loop() { @@ -314,3 +315,91 @@ fn check_rca_for_dynamic_while_loop_with_assignments_in_both_the_condition_and_t dynamic_param_applications: "#]], ); } + +#[test] +fn check_rca_for_static_for_loop() { + let mut compilation_context = CompilationContext::new(Profile::AdaptiveRIF.into()); + compilation_context.update( + r#" + use q = Qubit(); + let loop = + for i in 0..5 { X(q); }; + loop"#, + ); + let package_store_compute_properties = compilation_context.get_compute_properties(); + check_last_statement_compute_properties( + package_store_compute_properties, + &expect![[r#" + ApplicationsGeneratorSet: + inherent: Dynamic: + runtime_features: RuntimeFeatureFlags(0x0) + value_kind: Constant + dynamic_param_applications: "#]], + ); +} + +#[test] +fn check_rca_for_static_for_loop_with_loop_and_array_support() { + let mut compilation_context = CompilationContext::new(Profile::AdaptiveRIFLA.into()); + compilation_context.update( + r#" + use q = Qubit(); + let loop = + for i in 0..5 { X(q); }; + loop"#, + ); + let package_store_compute_properties = compilation_context.get_compute_properties(); + check_last_statement_compute_properties( + package_store_compute_properties, + &expect![[r#" + ApplicationsGeneratorSet: + inherent: Dynamic: + runtime_features: RuntimeFeatureFlags(UseOfDynamicInt) + value_kind: Constant + dynamic_param_applications: "#]], + ); +} + +#[test] +fn check_rca_for_static_for_loop_over_array() { + let mut compilation_context = CompilationContext::new(Profile::AdaptiveRIF.into()); + compilation_context.update( + r#" + use q = Qubit(); + let loop = + for a in [1.0, 2.0, 3.0] { Rx(a, q); }; + loop"#, + ); + let package_store_compute_properties = compilation_context.get_compute_properties(); + check_last_statement_compute_properties( + package_store_compute_properties, + &expect![[r#" + ApplicationsGeneratorSet: + inherent: Dynamic: + runtime_features: RuntimeFeatureFlags(0x0) + value_kind: Constant + dynamic_param_applications: "#]], + ); +} + +#[test] +fn check_rca_for_static_for_loop_over_array_with_loop_and_array_support() { + let mut compilation_context = CompilationContext::new(Profile::AdaptiveRIFLA.into()); + compilation_context.update( + r#" + use q = Qubit(); + let loop = + for a in [1.0, 2.0, 3.0] { Rx(a, q); }; + loop"#, + ); + let package_store_compute_properties = compilation_context.get_compute_properties(); + check_last_statement_compute_properties( + package_store_compute_properties, + &expect![[r#" + ApplicationsGeneratorSet: + inherent: Dynamic: + runtime_features: RuntimeFeatureFlags(0x0) + value_kind: Constant + dynamic_param_applications: "#]], + ); +} diff --git a/source/pip/tests-integration/resources/adaptive_rifla/input/RUS.qs b/source/pip/tests-integration/resources/adaptive_rifla/input/RUS.qs new file mode 100644 index 0000000000..aa4755ef77 --- /dev/null +++ b/source/pip/tests-integration/resources/adaptive_rifla/input/RUS.qs @@ -0,0 +1,14 @@ +namespace Test { + operation Main() : Result[] { + use qs = Qubit[2]; + use anc = Qubit(); + repeat { + ApplyToEach(H, qs); + Controlled X(qs, anc); + } until MResetZ(anc) == Zero + fixup { + ResetAll(qs); + } + MResetEachZ(qs) + } +} diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/ArithmeticOps.ll b/source/pip/tests-integration/resources/adaptive_rifla/output/ArithmeticOps.ll index 4d38c5fd05..c9c84d5ad5 100644 --- a/source/pip/tests-integration/resources/adaptive_rifla/output/ArithmeticOps.ll +++ b/source/pip/tests-integration/resources/adaptive_rifla/output/ArithmeticOps.ll @@ -28,78 +28,86 @@ block_0: %var_8 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 0 to ptr)) br i1 %var_8, label %block_1, label %block_2 block_1: - store i64 1, ptr %var_0 - store i64 5, ptr %var_1 - store i64 8, ptr %var_2 - store i64 3, ptr %var_3 + %var_78 = load i64, ptr %var_0 + %var_10 = add i64 %var_78, 1 + store i64 %var_10, ptr %var_0 + %var_80 = load i64, ptr %var_1 + %var_11 = add i64 %var_80, 5 + store i64 %var_11, ptr %var_1 + %var_82 = load i64, ptr %var_2 + %var_12 = sub i64 %var_82, 2 + store i64 %var_12, ptr %var_2 + %var_84 = load i64, ptr %var_3 + %var_13 = mul i64 %var_84, 3 + store i64 %var_13, ptr %var_3 br label %block_2 block_2: - %var_10 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 1 to ptr)) - br i1 %var_10, label %block_3, label %block_4 + %var_14 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 1 to ptr)) + br i1 %var_14, label %block_3, label %block_4 block_3: - %var_66 = load i64, ptr %var_0 - %var_12 = add i64 %var_66, 1 - store i64 %var_12, ptr %var_0 - %var_68 = load i64, ptr %var_1 - %var_13 = add i64 %var_68, 5 - store i64 %var_13, ptr %var_1 - %var_70 = load i64, ptr %var_2 - %var_14 = sub i64 %var_70, 2 - store i64 %var_14, ptr %var_2 - %var_72 = load i64, ptr %var_3 - %var_15 = mul i64 %var_72, 3 - store i64 %var_15, ptr %var_3 + %var_70 = load i64, ptr %var_0 + %var_16 = add i64 %var_70, 1 + store i64 %var_16, ptr %var_0 + %var_72 = load i64, ptr %var_1 + %var_17 = add i64 %var_72, 5 + store i64 %var_17, ptr %var_1 + %var_74 = load i64, ptr %var_2 + %var_18 = sub i64 %var_74, 2 + store i64 %var_18, ptr %var_2 + %var_76 = load i64, ptr %var_3 + %var_19 = mul i64 %var_76, 3 + store i64 %var_19, ptr %var_3 br label %block_4 block_4: - %var_16 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 2 to ptr)) - br i1 %var_16, label %block_5, label %block_6 + %var_20 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 2 to ptr)) + br i1 %var_20, label %block_5, label %block_6 block_5: - %var_58 = load i64, ptr %var_0 - %var_18 = add i64 %var_58, 1 - store i64 %var_18, ptr %var_0 - %var_60 = load i64, ptr %var_1 - %var_19 = add i64 %var_60, 5 - store i64 %var_19, ptr %var_1 - %var_62 = load i64, ptr %var_2 - %var_20 = sub i64 %var_62, 2 - store i64 %var_20, ptr %var_2 - %var_64 = load i64, ptr %var_3 - %var_21 = mul i64 %var_64, 3 - store i64 %var_21, ptr %var_3 + %var_62 = load i64, ptr %var_0 + %var_22 = add i64 %var_62, 1 + store i64 %var_22, ptr %var_0 + %var_64 = load i64, ptr %var_1 + %var_23 = add i64 %var_64, 5 + store i64 %var_23, ptr %var_1 + %var_66 = load i64, ptr %var_2 + %var_24 = sub i64 %var_66, 2 + store i64 %var_24, ptr %var_2 + %var_68 = load i64, ptr %var_3 + %var_25 = mul i64 %var_68, 3 + store i64 %var_25, ptr %var_3 br label %block_6 block_6: - %var_22 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 3 to ptr)) - br i1 %var_22, label %block_7, label %block_8 + %var_26 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 3 to ptr)) + br i1 %var_26, label %block_7, label %block_8 block_7: - %var_50 = load i64, ptr %var_0 - %var_24 = add i64 %var_50, 1 - store i64 %var_24, ptr %var_0 - %var_52 = load i64, ptr %var_1 - %var_25 = add i64 %var_52, 5 - store i64 %var_25, ptr %var_1 - %var_54 = load i64, ptr %var_2 - %var_26 = sub i64 %var_54, 2 - store i64 %var_26, ptr %var_2 - %var_56 = load i64, ptr %var_3 - %var_27 = mul i64 %var_56, 3 - store i64 %var_27, ptr %var_3 + %var_54 = load i64, ptr %var_0 + %var_28 = add i64 %var_54, 1 + store i64 %var_28, ptr %var_0 + %var_56 = load i64, ptr %var_1 + %var_29 = add i64 %var_56, 5 + store i64 %var_29, ptr %var_1 + %var_58 = load i64, ptr %var_2 + %var_30 = sub i64 %var_58, 2 + store i64 %var_30, ptr %var_2 + %var_60 = load i64, ptr %var_3 + %var_31 = mul i64 %var_60, 3 + store i64 %var_31, ptr %var_3 br label %block_8 block_8: - %var_28 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 4 to ptr)) - br i1 %var_28, label %block_9, label %block_10 + %var_32 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 4 to ptr)) + br i1 %var_32, label %block_9, label %block_10 block_9: - %var_42 = load i64, ptr %var_0 - %var_30 = add i64 %var_42, 1 - store i64 %var_30, ptr %var_0 - %var_44 = load i64, ptr %var_1 - %var_31 = add i64 %var_44, 5 - store i64 %var_31, ptr %var_1 - %var_46 = load i64, ptr %var_2 - %var_32 = sub i64 %var_46, 2 - store i64 %var_32, ptr %var_2 - %var_48 = load i64, ptr %var_3 - %var_33 = mul i64 %var_48, 3 - store i64 %var_33, ptr %var_3 + %var_46 = load i64, ptr %var_0 + %var_34 = add i64 %var_46, 1 + store i64 %var_34, ptr %var_0 + %var_48 = load i64, ptr %var_1 + %var_35 = add i64 %var_48, 5 + store i64 %var_35, ptr %var_1 + %var_50 = load i64, ptr %var_2 + %var_36 = sub i64 %var_50, 2 + store i64 %var_36, ptr %var_2 + %var_52 = load i64, ptr %var_3 + %var_37 = mul i64 %var_52, 3 + store i64 %var_37, ptr %var_3 br label %block_10 block_10: call void @__quantum__qis__reset__body(ptr inttoptr (i64 0 to ptr)) @@ -108,14 +116,14 @@ block_10: call void @__quantum__qis__reset__body(ptr inttoptr (i64 3 to ptr)) call void @__quantum__qis__reset__body(ptr inttoptr (i64 4 to ptr)) call void @__quantum__rt__tuple_record_output(i64 4, ptr @0) - %var_38 = load i64, ptr %var_0 - call void @__quantum__rt__int_record_output(i64 %var_38, ptr @1) - %var_39 = load i64, ptr %var_1 - call void @__quantum__rt__int_record_output(i64 %var_39, ptr @2) - %var_40 = load i64, ptr %var_2 - call void @__quantum__rt__int_record_output(i64 %var_40, ptr @3) - %var_41 = load i64, ptr %var_3 - call void @__quantum__rt__int_record_output(i64 %var_41, ptr @4) + %var_42 = load i64, ptr %var_0 + call void @__quantum__rt__int_record_output(i64 %var_42, ptr @1) + %var_43 = load i64, ptr %var_1 + call void @__quantum__rt__int_record_output(i64 %var_43, ptr @2) + %var_44 = load i64, ptr %var_2 + call void @__quantum__rt__int_record_output(i64 %var_44, ptr @3) + %var_45 = load i64, ptr %var_3 + call void @__quantum__rt__int_record_output(i64 %var_45, ptr @4) ret i64 0 } diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/ConstantFolding.ll b/source/pip/tests-integration/resources/adaptive_rifla/output/ConstantFolding.ll index b1a1bd3938..0f7edbef37 100644 --- a/source/pip/tests-integration/resources/adaptive_rifla/output/ConstantFolding.ll +++ b/source/pip/tests-integration/resources/adaptive_rifla/output/ConstantFolding.ll @@ -6,26 +6,31 @@ define i64 @ENTRYPOINT__main() #0 { block_0: + %var_1 = alloca i64 + %var_3 = alloca i1 call void @__quantum__rt__initialize(ptr null) call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) + store i64 1, ptr %var_1 + br label %block_1 +block_1: + %var_7 = load i64, ptr %var_1 + %var_2 = icmp sle i64 %var_7, 9 + store i1 true, ptr %var_3 + br i1 %var_2, label %block_2, label %block_3 +block_2: + %var_10 = load i1, ptr %var_3 + br i1 %var_10, label %block_4, label %block_5 +block_3: + store i1 false, ptr %var_3 + br label %block_2 +block_4: call void @__quantum__qis__cx__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 1 to ptr)) call void @__quantum__qis__cx__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 2 to ptr)) - call void @__quantum__qis__cx__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 1 to ptr)) - call void @__quantum__qis__cx__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 2 to ptr)) - call void @__quantum__qis__cx__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 1 to ptr)) - call void @__quantum__qis__cx__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 2 to ptr)) - call void @__quantum__qis__cx__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 1 to ptr)) - call void @__quantum__qis__cx__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 2 to ptr)) - call void @__quantum__qis__cx__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 1 to ptr)) - call void @__quantum__qis__cx__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 2 to ptr)) - call void @__quantum__qis__cx__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 1 to ptr)) - call void @__quantum__qis__cx__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 2 to ptr)) - call void @__quantum__qis__cx__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 1 to ptr)) - call void @__quantum__qis__cx__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 2 to ptr)) - call void @__quantum__qis__cx__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 1 to ptr)) - call void @__quantum__qis__cx__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 2 to ptr)) - call void @__quantum__qis__cx__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 1 to ptr)) - call void @__quantum__qis__cx__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 2 to ptr)) + %var_11 = load i64, ptr %var_1 + %var_5 = add i64 %var_11, 1 + store i64 %var_5, ptr %var_1 + br label %block_1 +block_5: call void @__quantum__qis__rx__body(double 3.141592653589793, ptr inttoptr (i64 3 to ptr)) call void @__quantum__qis__m__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 0 to ptr)) call void @__quantum__qis__m__body(ptr inttoptr (i64 1 to ptr), ptr inttoptr (i64 1 to ptr)) diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/Doubles.ll b/source/pip/tests-integration/resources/adaptive_rifla/output/Doubles.ll index 8cdefdc1e9..6ccfc39d8c 100644 --- a/source/pip/tests-integration/resources/adaptive_rifla/output/Doubles.ll +++ b/source/pip/tests-integration/resources/adaptive_rifla/output/Doubles.ll @@ -11,211 +11,67 @@ define i64 @ENTRYPOINT__main() #0 { block_0: %var_0 = alloca double + %var_1 = alloca i64 + %var_3 = alloca i1 call void @__quantum__rt__initialize(ptr null) store double 0.0, ptr %var_0 - call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) - call void @__quantum__qis__m__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 0 to ptr)) - %var_2 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 0 to ptr)) - br i1 %var_2, label %block_1, label %block_2 + store i64 1, ptr %var_1 + br label %block_1 block_1: - call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) - store double 1.0, ptr %var_0 - store double 1.0, ptr %var_0 - store double 0.0, ptr %var_0 - store double 0.0, ptr %var_0 - store double 1.0, ptr %var_0 - br label %block_2 + %var_23 = load i64, ptr %var_1 + %var_2 = icmp sle i64 %var_23, 10 + store i1 true, ptr %var_3 + br i1 %var_2, label %block_2, label %block_3 block_2: - call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) - call void @__quantum__qis__m__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 1 to ptr)) - %var_4 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 1 to ptr)) - br i1 %var_4, label %block_3, label %block_4 + %var_26 = load i1, ptr %var_3 + br i1 %var_26, label %block_4, label %block_5 block_3: + store i1 false, ptr %var_3 + br label %block_2 +block_4: call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) - %var_126 = load double, ptr %var_0 - %var_6 = fadd double %var_126, 1.0 + call void @__quantum__qis__m__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 0 to ptr)) + %var_4 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 0 to ptr)) + br i1 %var_4, label %block_6, label %block_7 +block_5: + call void @__quantum__qis__reset__body(ptr inttoptr (i64 0 to ptr)) + %var_27 = load double, ptr %var_0 + %var_12 = fptosi double %var_27 to i64 + %var_14 = sitofp i64 %var_12 to double + %var_16 = fcmp ogt double %var_27, 5.0 + %var_17 = fcmp olt double %var_27, 5.0 + %var_18 = fcmp oge double %var_27, 10.0 + %var_19 = fcmp oeq double %var_27, 10.0 + %var_20 = fcmp one double %var_27, 10.0 + call void @__quantum__rt__tuple_record_output(i64 8, ptr @0) + call void @__quantum__rt__double_record_output(double %var_27, ptr @1) + call void @__quantum__rt__bool_record_output(i1 %var_16, ptr @2) + call void @__quantum__rt__bool_record_output(i1 %var_17, ptr @3) + call void @__quantum__rt__bool_record_output(i1 %var_18, ptr @4) + call void @__quantum__rt__bool_record_output(i1 %var_19, ptr @5) + call void @__quantum__rt__bool_record_output(i1 %var_20, ptr @6) + call void @__quantum__rt__int_record_output(i64 %var_12, ptr @7) + call void @__quantum__rt__double_record_output(double %var_14, ptr @8) + ret i64 0 +block_6: + call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) + %var_30 = load double, ptr %var_0 + %var_6 = fadd double %var_30, 1.0 store double %var_6, ptr %var_0 - %var_7 = fmul double %var_126, 1.0 + %var_7 = fmul double %var_30, 1.0 store double %var_7, ptr %var_0 - %var_8 = fsub double %var_126, 1.0 + %var_8 = fsub double %var_30, 1.0 store double %var_8, ptr %var_0 - %var_9 = fdiv double %var_126, 1.0 + %var_9 = fdiv double %var_30, 1.0 store double %var_9, ptr %var_0 - %var_10 = fadd double %var_126, 1.0 + %var_10 = fadd double %var_30, 1.0 store double %var_10, ptr %var_0 - br label %block_4 -block_4: - call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) - call void @__quantum__qis__m__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 2 to ptr)) - %var_11 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 2 to ptr)) - br i1 %var_11, label %block_5, label %block_6 -block_5: - call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) - %var_120 = load double, ptr %var_0 - %var_13 = fadd double %var_120, 1.0 - store double %var_13, ptr %var_0 - %var_14 = fmul double %var_120, 1.0 - store double %var_14, ptr %var_0 - %var_15 = fsub double %var_120, 1.0 - store double %var_15, ptr %var_0 - %var_16 = fdiv double %var_120, 1.0 - store double %var_16, ptr %var_0 - %var_17 = fadd double %var_120, 1.0 - store double %var_17, ptr %var_0 - br label %block_6 -block_6: - call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) - call void @__quantum__qis__m__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 3 to ptr)) - %var_18 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 3 to ptr)) - br i1 %var_18, label %block_7, label %block_8 + br label %block_7 block_7: - call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) - %var_114 = load double, ptr %var_0 - %var_20 = fadd double %var_114, 1.0 - store double %var_20, ptr %var_0 - %var_21 = fmul double %var_114, 1.0 - store double %var_21, ptr %var_0 - %var_22 = fsub double %var_114, 1.0 - store double %var_22, ptr %var_0 - %var_23 = fdiv double %var_114, 1.0 - store double %var_23, ptr %var_0 - %var_24 = fadd double %var_114, 1.0 - store double %var_24, ptr %var_0 - br label %block_8 -block_8: - call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) - call void @__quantum__qis__m__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 4 to ptr)) - %var_25 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 4 to ptr)) - br i1 %var_25, label %block_9, label %block_10 -block_9: - call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) - %var_108 = load double, ptr %var_0 - %var_27 = fadd double %var_108, 1.0 - store double %var_27, ptr %var_0 - %var_28 = fmul double %var_108, 1.0 - store double %var_28, ptr %var_0 - %var_29 = fsub double %var_108, 1.0 - store double %var_29, ptr %var_0 - %var_30 = fdiv double %var_108, 1.0 - store double %var_30, ptr %var_0 - %var_31 = fadd double %var_108, 1.0 - store double %var_31, ptr %var_0 - br label %block_10 -block_10: - call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) - call void @__quantum__qis__m__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 5 to ptr)) - %var_32 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 5 to ptr)) - br i1 %var_32, label %block_11, label %block_12 -block_11: - call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) - %var_102 = load double, ptr %var_0 - %var_34 = fadd double %var_102, 1.0 - store double %var_34, ptr %var_0 - %var_35 = fmul double %var_102, 1.0 - store double %var_35, ptr %var_0 - %var_36 = fsub double %var_102, 1.0 - store double %var_36, ptr %var_0 - %var_37 = fdiv double %var_102, 1.0 - store double %var_37, ptr %var_0 - %var_38 = fadd double %var_102, 1.0 - store double %var_38, ptr %var_0 - br label %block_12 -block_12: - call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) - call void @__quantum__qis__m__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 6 to ptr)) - %var_39 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 6 to ptr)) - br i1 %var_39, label %block_13, label %block_14 -block_13: - call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) - %var_96 = load double, ptr %var_0 - %var_41 = fadd double %var_96, 1.0 - store double %var_41, ptr %var_0 - %var_42 = fmul double %var_96, 1.0 - store double %var_42, ptr %var_0 - %var_43 = fsub double %var_96, 1.0 - store double %var_43, ptr %var_0 - %var_44 = fdiv double %var_96, 1.0 - store double %var_44, ptr %var_0 - %var_45 = fadd double %var_96, 1.0 - store double %var_45, ptr %var_0 - br label %block_14 -block_14: - call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) - call void @__quantum__qis__m__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 7 to ptr)) - %var_46 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 7 to ptr)) - br i1 %var_46, label %block_15, label %block_16 -block_15: - call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) - %var_90 = load double, ptr %var_0 - %var_48 = fadd double %var_90, 1.0 - store double %var_48, ptr %var_0 - %var_49 = fmul double %var_90, 1.0 - store double %var_49, ptr %var_0 - %var_50 = fsub double %var_90, 1.0 - store double %var_50, ptr %var_0 - %var_51 = fdiv double %var_90, 1.0 - store double %var_51, ptr %var_0 - %var_52 = fadd double %var_90, 1.0 - store double %var_52, ptr %var_0 - br label %block_16 -block_16: - call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) - call void @__quantum__qis__m__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 8 to ptr)) - %var_53 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 8 to ptr)) - br i1 %var_53, label %block_17, label %block_18 -block_17: - call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) - %var_84 = load double, ptr %var_0 - %var_55 = fadd double %var_84, 1.0 - store double %var_55, ptr %var_0 - %var_56 = fmul double %var_84, 1.0 - store double %var_56, ptr %var_0 - %var_57 = fsub double %var_84, 1.0 - store double %var_57, ptr %var_0 - %var_58 = fdiv double %var_84, 1.0 - store double %var_58, ptr %var_0 - %var_59 = fadd double %var_84, 1.0 - store double %var_59, ptr %var_0 - br label %block_18 -block_18: - call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) - call void @__quantum__qis__m__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 9 to ptr)) - %var_60 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 9 to ptr)) - br i1 %var_60, label %block_19, label %block_20 -block_19: - call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) - %var_78 = load double, ptr %var_0 - %var_62 = fadd double %var_78, 1.0 - store double %var_62, ptr %var_0 - %var_63 = fmul double %var_78, 1.0 - store double %var_63, ptr %var_0 - %var_64 = fsub double %var_78, 1.0 - store double %var_64, ptr %var_0 - %var_65 = fdiv double %var_78, 1.0 - store double %var_65, ptr %var_0 - %var_66 = fadd double %var_78, 1.0 - store double %var_66, ptr %var_0 - br label %block_20 -block_20: - call void @__quantum__qis__reset__body(ptr inttoptr (i64 0 to ptr)) - %var_77 = load double, ptr %var_0 - %var_67 = fptosi double %var_77 to i64 - %var_69 = sitofp i64 %var_67 to double - %var_71 = fcmp ogt double %var_77, 5.0 - %var_72 = fcmp olt double %var_77, 5.0 - %var_73 = fcmp oge double %var_77, 10.0 - %var_74 = fcmp oeq double %var_77, 10.0 - %var_75 = fcmp one double %var_77, 10.0 - call void @__quantum__rt__tuple_record_output(i64 8, ptr @0) - call void @__quantum__rt__double_record_output(double %var_77, ptr @1) - call void @__quantum__rt__bool_record_output(i1 %var_71, ptr @2) - call void @__quantum__rt__bool_record_output(i1 %var_72, ptr @3) - call void @__quantum__rt__bool_record_output(i1 %var_73, ptr @4) - call void @__quantum__rt__bool_record_output(i1 %var_74, ptr @5) - call void @__quantum__rt__bool_record_output(i1 %var_75, ptr @6) - call void @__quantum__rt__int_record_output(i64 %var_67, ptr @7) - call void @__quantum__rt__double_record_output(double %var_69, ptr @8) - ret i64 0 + %var_28 = load i64, ptr %var_1 + %var_11 = add i64 %var_28, 1 + store i64 %var_11, ptr %var_1 + br label %block_1 } declare void @__quantum__rt__initialize(ptr) @@ -236,7 +92,7 @@ declare void @__quantum__rt__bool_record_output(i1, ptr) declare void @__quantum__rt__int_record_output(i64, ptr) -attributes #0 = { "entry_point" "output_labeling_schema" "qir_profiles"="adaptive_profile" "required_num_qubits"="1" "required_num_results"="10" } +attributes #0 = { "entry_point" "output_labeling_schema" "qir_profiles"="adaptive_profile" "required_num_qubits"="1" "required_num_results"="1" } attributes #1 = { "irreversible" } ; module flags diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/Doubles.out b/source/pip/tests-integration/resources/adaptive_rifla/output/Doubles.out index 1e75619beb..bc12cf362b 100644 --- a/source/pip/tests-integration/resources/adaptive_rifla/output/Doubles.out +++ b/source/pip/tests-integration/resources/adaptive_rifla/output/Doubles.out @@ -3,7 +3,7 @@ METADATA entry_point METADATA output_labeling_schema METADATA qir_profiles adaptive_profile METADATA required_num_qubits 1 -METADATA required_num_results 10 +METADATA required_num_results 1 OUTPUT TUPLE 8 0_t OUTPUT DOUBLE 10.0 1_t0d OUTPUT BOOL true 2_t1b diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/ExpandedTests.ll b/source/pip/tests-integration/resources/adaptive_rifla/output/ExpandedTests.ll index 6e5083c457..dfe90bcc3e 100644 --- a/source/pip/tests-integration/resources/adaptive_rifla/output/ExpandedTests.ll +++ b/source/pip/tests-integration/resources/adaptive_rifla/output/ExpandedTests.ll @@ -6,9 +6,25 @@ define i64 @ENTRYPOINT__main() #0 { block_0: + %var_2 = alloca i64 + %var_4 = alloca i1 call void @__quantum__rt__initialize(ptr null) call void @__quantum__qis__h__body(ptr inttoptr (i64 0 to ptr)) call void @__quantum__qis__h__body(ptr inttoptr (i64 1 to ptr)) + store i64 0, ptr %var_2 + br label %block_1 +block_1: + %var_13 = load i64, ptr %var_2 + %var_3 = icmp sle i64 %var_13, 0 + store i1 true, ptr %var_4 + br i1 %var_3, label %block_2, label %block_3 +block_2: + %var_16 = load i1, ptr %var_4 + br i1 %var_16, label %block_4, label %block_5 +block_3: + store i1 false, ptr %var_4 + br label %block_2 +block_4: call void @__quantum__qis__x__body(ptr inttoptr (i64 2 to ptr)) call void @__quantum__qis__h__body(ptr inttoptr (i64 2 to ptr)) call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) @@ -25,6 +41,11 @@ block_0: call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) call void @__quantum__qis__h__body(ptr inttoptr (i64 0 to ptr)) call void @__quantum__qis__h__body(ptr inttoptr (i64 1 to ptr)) + %var_17 = load i64, ptr %var_2 + %var_11 = add i64 %var_17, 1 + store i64 %var_11, ptr %var_2 + br label %block_1 +block_5: call void @__quantum__qis__mresetz__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 0 to ptr)) call void @__quantum__qis__mresetz__body(ptr inttoptr (i64 1 to ptr), ptr inttoptr (i64 1 to ptr)) call void @__quantum__qis__h__body(ptr inttoptr (i64 2 to ptr)) diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/IntegerComparison.ll b/source/pip/tests-integration/resources/adaptive_rifla/output/IntegerComparison.ll index 4816357964..e8c2c982dc 100644 --- a/source/pip/tests-integration/resources/adaptive_rifla/output/IntegerComparison.ll +++ b/source/pip/tests-integration/resources/adaptive_rifla/output/IntegerComparison.ll @@ -6,126 +6,50 @@ define i64 @ENTRYPOINT__main() #0 { block_0: %var_0 = alloca i64 + %var_1 = alloca i64 + %var_3 = alloca i1 call void @__quantum__rt__initialize(ptr null) store i64 0, ptr %var_0 - call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) - call void @__quantum__qis__m__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 0 to ptr)) - %var_2 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 0 to ptr)) - br i1 %var_2, label %block_1, label %block_2 + store i64 1, ptr %var_1 + br label %block_1 block_1: - call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) - store i64 1, ptr %var_0 - br label %block_2 + %var_13 = load i64, ptr %var_1 + %var_2 = icmp sle i64 %var_13, 10 + store i1 true, ptr %var_3 + br i1 %var_2, label %block_2, label %block_3 block_2: - call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) - call void @__quantum__qis__m__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 1 to ptr)) - %var_4 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 1 to ptr)) - br i1 %var_4, label %block_3, label %block_4 + %var_16 = load i1, ptr %var_3 + br i1 %var_16, label %block_4, label %block_5 block_3: - call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) - %var_52 = load i64, ptr %var_0 - %var_6 = add i64 %var_52, 1 - store i64 %var_6, ptr %var_0 - br label %block_4 + store i1 false, ptr %var_3 + br label %block_2 block_4: call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) - call void @__quantum__qis__m__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 2 to ptr)) - %var_7 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 2 to ptr)) - br i1 %var_7, label %block_5, label %block_6 + call void @__quantum__qis__m__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 0 to ptr)) + %var_4 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 0 to ptr)) + br i1 %var_4, label %block_6, label %block_7 block_5: - call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) - %var_50 = load i64, ptr %var_0 - %var_9 = add i64 %var_50, 1 - store i64 %var_9, ptr %var_0 - br label %block_6 -block_6: - call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) - call void @__quantum__qis__m__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 3 to ptr)) - %var_10 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 3 to ptr)) - br i1 %var_10, label %block_7, label %block_8 -block_7: - call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) - %var_48 = load i64, ptr %var_0 - %var_12 = add i64 %var_48, 1 - store i64 %var_12, ptr %var_0 - br label %block_8 -block_8: - call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) - call void @__quantum__qis__m__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 4 to ptr)) - %var_13 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 4 to ptr)) - br i1 %var_13, label %block_9, label %block_10 -block_9: - call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) - %var_46 = load i64, ptr %var_0 - %var_15 = add i64 %var_46, 1 - store i64 %var_15, ptr %var_0 - br label %block_10 -block_10: - call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) - call void @__quantum__qis__m__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 5 to ptr)) - %var_16 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 5 to ptr)) - br i1 %var_16, label %block_11, label %block_12 -block_11: - call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) - %var_44 = load i64, ptr %var_0 - %var_18 = add i64 %var_44, 1 - store i64 %var_18, ptr %var_0 - br label %block_12 -block_12: - call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) - call void @__quantum__qis__m__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 6 to ptr)) - %var_19 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 6 to ptr)) - br i1 %var_19, label %block_13, label %block_14 -block_13: - call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) - %var_42 = load i64, ptr %var_0 - %var_21 = add i64 %var_42, 1 - store i64 %var_21, ptr %var_0 - br label %block_14 -block_14: - call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) - call void @__quantum__qis__m__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 7 to ptr)) - %var_22 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 7 to ptr)) - br i1 %var_22, label %block_15, label %block_16 -block_15: - call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) - %var_40 = load i64, ptr %var_0 - %var_24 = add i64 %var_40, 1 - store i64 %var_24, ptr %var_0 - br label %block_16 -block_16: - call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) - call void @__quantum__qis__m__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 8 to ptr)) - %var_25 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 8 to ptr)) - br i1 %var_25, label %block_17, label %block_18 -block_17: - call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) - %var_38 = load i64, ptr %var_0 - %var_27 = add i64 %var_38, 1 - store i64 %var_27, ptr %var_0 - br label %block_18 -block_18: - call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) - call void @__quantum__qis__m__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 9 to ptr)) - %var_28 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 9 to ptr)) - br i1 %var_28, label %block_19, label %block_20 -block_19: - call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) - %var_36 = load i64, ptr %var_0 - %var_30 = add i64 %var_36, 1 - store i64 %var_30, ptr %var_0 - br label %block_20 -block_20: call void @__quantum__qis__reset__body(ptr inttoptr (i64 0 to ptr)) - %var_35 = load i64, ptr %var_0 - %var_31 = icmp sgt i64 %var_35, 5 - %var_32 = icmp slt i64 %var_35, 5 - %var_33 = icmp eq i64 %var_35, 10 + %var_17 = load i64, ptr %var_0 + %var_8 = icmp sgt i64 %var_17, 5 + %var_9 = icmp slt i64 %var_17, 5 + %var_10 = icmp eq i64 %var_17, 10 call void @__quantum__rt__tuple_record_output(i64 3, ptr @0) - call void @__quantum__rt__bool_record_output(i1 %var_31, ptr @1) - call void @__quantum__rt__bool_record_output(i1 %var_32, ptr @2) - call void @__quantum__rt__bool_record_output(i1 %var_33, ptr @3) + call void @__quantum__rt__bool_record_output(i1 %var_8, ptr @1) + call void @__quantum__rt__bool_record_output(i1 %var_9, ptr @2) + call void @__quantum__rt__bool_record_output(i1 %var_10, ptr @3) ret i64 0 +block_6: + call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) + %var_20 = load i64, ptr %var_0 + %var_6 = add i64 %var_20, 1 + store i64 %var_6, ptr %var_0 + br label %block_7 +block_7: + %var_18 = load i64, ptr %var_1 + %var_7 = add i64 %var_18, 1 + store i64 %var_7, ptr %var_1 + br label %block_1 } declare void @__quantum__rt__initialize(ptr) @@ -142,7 +66,7 @@ declare void @__quantum__rt__tuple_record_output(i64, ptr) declare void @__quantum__rt__bool_record_output(i1, ptr) -attributes #0 = { "entry_point" "output_labeling_schema" "qir_profiles"="adaptive_profile" "required_num_qubits"="1" "required_num_results"="10" } +attributes #0 = { "entry_point" "output_labeling_schema" "qir_profiles"="adaptive_profile" "required_num_qubits"="1" "required_num_results"="1" } attributes #1 = { "irreversible" } ; module flags diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/IntegerComparison.out b/source/pip/tests-integration/resources/adaptive_rifla/output/IntegerComparison.out index 08d89be312..11f8013272 100644 --- a/source/pip/tests-integration/resources/adaptive_rifla/output/IntegerComparison.out +++ b/source/pip/tests-integration/resources/adaptive_rifla/output/IntegerComparison.out @@ -3,7 +3,7 @@ METADATA entry_point METADATA output_labeling_schema METADATA qir_profiles adaptive_profile METADATA required_num_qubits 1 -METADATA required_num_results 10 +METADATA required_num_results 1 OUTPUT TUPLE 3 0_t OUTPUT BOOL true 1_t0b OUTPUT BOOL false 2_t1b diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicRotationsWithPeriod.ll b/source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicRotationsWithPeriod.ll index 54eca58683..c1b4f252a7 100644 --- a/source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicRotationsWithPeriod.ll +++ b/source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicRotationsWithPeriod.ll @@ -11,6 +11,8 @@ define i64 @ENTRYPOINT__main() #0 { block_0: + %var_6 = alloca i64 + %var_8 = alloca i1 call void @__quantum__rt__initialize(ptr null) call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) call void @__quantum__qis__x__body(ptr inttoptr (i64 1 to ptr)) @@ -22,54 +24,31 @@ block_0: call void @__quantum__qis__h__body(ptr inttoptr (i64 5 to ptr)) call void @__quantum__qis__z__body(ptr inttoptr (i64 5 to ptr)) call void @__quantum__qis__h__body(ptr inttoptr (i64 5 to ptr)) + store i64 1, ptr %var_6 + br label %block_1 +block_1: + %var_11 = load i64, ptr %var_6 + %var_7 = icmp sle i64 %var_11, 8 + store i1 true, ptr %var_8 + br i1 %var_7, label %block_2, label %block_3 +block_2: + %var_14 = load i1, ptr %var_8 + br i1 %var_14, label %block_4, label %block_5 +block_3: + store i1 false, ptr %var_8 + br label %block_2 +block_4: call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 0 to ptr)) call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 1 to ptr)) call void @__quantum__qis__ry__body(double 1.5707963267948966, ptr inttoptr (i64 2 to ptr)) call void @__quantum__qis__ry__body(double 1.5707963267948966, ptr inttoptr (i64 3 to ptr)) call void @__quantum__qis__rz__body(double 1.5707963267948966, ptr inttoptr (i64 4 to ptr)) call void @__quantum__qis__rz__body(double 1.5707963267948966, ptr inttoptr (i64 5 to ptr)) - call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 0 to ptr)) - call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 1 to ptr)) - call void @__quantum__qis__ry__body(double 1.5707963267948966, ptr inttoptr (i64 2 to ptr)) - call void @__quantum__qis__ry__body(double 1.5707963267948966, ptr inttoptr (i64 3 to ptr)) - call void @__quantum__qis__rz__body(double 1.5707963267948966, ptr inttoptr (i64 4 to ptr)) - call void @__quantum__qis__rz__body(double 1.5707963267948966, ptr inttoptr (i64 5 to ptr)) - call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 0 to ptr)) - call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 1 to ptr)) - call void @__quantum__qis__ry__body(double 1.5707963267948966, ptr inttoptr (i64 2 to ptr)) - call void @__quantum__qis__ry__body(double 1.5707963267948966, ptr inttoptr (i64 3 to ptr)) - call void @__quantum__qis__rz__body(double 1.5707963267948966, ptr inttoptr (i64 4 to ptr)) - call void @__quantum__qis__rz__body(double 1.5707963267948966, ptr inttoptr (i64 5 to ptr)) - call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 0 to ptr)) - call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 1 to ptr)) - call void @__quantum__qis__ry__body(double 1.5707963267948966, ptr inttoptr (i64 2 to ptr)) - call void @__quantum__qis__ry__body(double 1.5707963267948966, ptr inttoptr (i64 3 to ptr)) - call void @__quantum__qis__rz__body(double 1.5707963267948966, ptr inttoptr (i64 4 to ptr)) - call void @__quantum__qis__rz__body(double 1.5707963267948966, ptr inttoptr (i64 5 to ptr)) - call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 0 to ptr)) - call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 1 to ptr)) - call void @__quantum__qis__ry__body(double 1.5707963267948966, ptr inttoptr (i64 2 to ptr)) - call void @__quantum__qis__ry__body(double 1.5707963267948966, ptr inttoptr (i64 3 to ptr)) - call void @__quantum__qis__rz__body(double 1.5707963267948966, ptr inttoptr (i64 4 to ptr)) - call void @__quantum__qis__rz__body(double 1.5707963267948966, ptr inttoptr (i64 5 to ptr)) - call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 0 to ptr)) - call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 1 to ptr)) - call void @__quantum__qis__ry__body(double 1.5707963267948966, ptr inttoptr (i64 2 to ptr)) - call void @__quantum__qis__ry__body(double 1.5707963267948966, ptr inttoptr (i64 3 to ptr)) - call void @__quantum__qis__rz__body(double 1.5707963267948966, ptr inttoptr (i64 4 to ptr)) - call void @__quantum__qis__rz__body(double 1.5707963267948966, ptr inttoptr (i64 5 to ptr)) - call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 0 to ptr)) - call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 1 to ptr)) - call void @__quantum__qis__ry__body(double 1.5707963267948966, ptr inttoptr (i64 2 to ptr)) - call void @__quantum__qis__ry__body(double 1.5707963267948966, ptr inttoptr (i64 3 to ptr)) - call void @__quantum__qis__rz__body(double 1.5707963267948966, ptr inttoptr (i64 4 to ptr)) - call void @__quantum__qis__rz__body(double 1.5707963267948966, ptr inttoptr (i64 5 to ptr)) - call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 0 to ptr)) - call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 1 to ptr)) - call void @__quantum__qis__ry__body(double 1.5707963267948966, ptr inttoptr (i64 2 to ptr)) - call void @__quantum__qis__ry__body(double 1.5707963267948966, ptr inttoptr (i64 3 to ptr)) - call void @__quantum__qis__rz__body(double 1.5707963267948966, ptr inttoptr (i64 4 to ptr)) - call void @__quantum__qis__rz__body(double 1.5707963267948966, ptr inttoptr (i64 5 to ptr)) + %var_15 = load i64, ptr %var_6 + %var_9 = add i64 %var_15, 1 + store i64 %var_9, ptr %var_6 + br label %block_1 +block_5: call void @__quantum__qis__mresetz__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 0 to ptr)) call void @__quantum__qis__mresetz__body(ptr inttoptr (i64 1 to ptr), ptr inttoptr (i64 1 to ptr)) call void @__quantum__qis__mresetz__body(ptr inttoptr (i64 2 to ptr), ptr inttoptr (i64 2 to ptr)) diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/RUS.ll b/source/pip/tests-integration/resources/adaptive_rifla/output/RUS.ll new file mode 100644 index 0000000000..ec5b579896 --- /dev/null +++ b/source/pip/tests-integration/resources/adaptive_rifla/output/RUS.ll @@ -0,0 +1,70 @@ +@0 = internal constant [4 x i8] c"0_a\00" +@1 = internal constant [6 x i8] c"1_a0r\00" +@2 = internal constant [6 x i8] c"2_a1r\00" + +define i64 @ENTRYPOINT__main() #0 { +block_0: + %var_1 = alloca i1 + call void @__quantum__rt__initialize(ptr null) + store i1 true, ptr %var_1 + br label %block_1 +block_1: + %var_7 = load i1, ptr %var_1 + br i1 %var_7, label %block_2, label %block_3 +block_2: + call void @__quantum__qis__h__body(ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__ccx__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 1 to ptr), ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__mresetz__body(ptr inttoptr (i64 2 to ptr), ptr inttoptr (i64 0 to ptr)) + %var_3 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 0 to ptr)) + %var_4 = icmp eq i1 %var_3, false + %var_5 = xor i1 %var_4, true + store i1 %var_5, ptr %var_1 + %var_9 = load i1, ptr %var_1 + br i1 %var_9, label %block_4, label %block_5 +block_3: + call void @__quantum__qis__mresetz__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__mresetz__body(ptr inttoptr (i64 1 to ptr), ptr inttoptr (i64 2 to ptr)) + call void @__quantum__rt__array_record_output(i64 2, ptr @0) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 1 to ptr), ptr @1) + call void @__quantum__rt__result_record_output(ptr inttoptr (i64 2 to ptr), ptr @2) + ret i64 0 +block_4: + call void @__quantum__qis__reset__body(ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__reset__body(ptr inttoptr (i64 1 to ptr)) + br label %block_5 +block_5: + br label %block_1 +} + +declare void @__quantum__rt__initialize(ptr) + +declare void @__quantum__qis__h__body(ptr) + +declare void @__quantum__qis__ccx__body(ptr, ptr, ptr) + +declare void @__quantum__qis__mresetz__body(ptr, ptr) #1 + +declare i1 @__quantum__rt__read_result(ptr) + +declare void @__quantum__qis__reset__body(ptr) #1 + +declare void @__quantum__rt__array_record_output(i64, ptr) + +declare void @__quantum__rt__result_record_output(ptr, ptr) + +attributes #0 = { "entry_point" "output_labeling_schema" "qir_profiles"="adaptive_profile" "required_num_qubits"="3" "required_num_results"="3" } +attributes #1 = { "irreversible" } + +; module flags + +!llvm.module.flags = !{!0, !1, !2, !3, !4, !5, !6, !7} + +!0 = !{i32 1, !"qir_major_version", i32 2} +!1 = !{i32 7, !"qir_minor_version", i32 1} +!2 = !{i32 1, !"dynamic_qubit_management", i1 false} +!3 = !{i32 1, !"dynamic_result_management", i1 false} +!4 = !{i32 5, !"int_computations", !{!"i64"}} +!5 = !{i32 5, !"float_computations", !{!"double"}} +!6 = !{i32 7, !"backwards_branching", i2 3} +!7 = !{i32 1, !"arrays", i1 true} diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/RUS.out b/source/pip/tests-integration/resources/adaptive_rifla/output/RUS.out new file mode 100644 index 0000000000..22c1c6699d --- /dev/null +++ b/source/pip/tests-integration/resources/adaptive_rifla/output/RUS.out @@ -0,0 +1,10 @@ +START +METADATA entry_point +METADATA output_labeling_schema +METADATA qir_profiles adaptive_profile +METADATA required_num_qubits 3 +METADATA required_num_results 3 +OUTPUT ARRAY 2 0_a +OUTPUT RESULT 0 1_a0r +OUTPUT RESULT 0 2_a1r +END 0 diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/SwitchHandling.ll b/source/pip/tests-integration/resources/adaptive_rifla/output/SwitchHandling.ll index f35bcaf1ab..c0c031303e 100644 --- a/source/pip/tests-integration/resources/adaptive_rifla/output/SwitchHandling.ll +++ b/source/pip/tests-integration/resources/adaptive_rifla/output/SwitchHandling.ll @@ -13,38 +13,40 @@ block_0: %var_5 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 0 to ptr)) br i1 %var_5, label %block_1, label %block_2 block_1: - store i64 1, ptr %var_2 + %var_25 = load i64, ptr %var_2 + %var_7 = add i64 %var_25, 1 + store i64 %var_7, ptr %var_2 br label %block_2 block_2: - %var_17 = load i64, ptr %var_2 - %var_7 = shl i64 %var_17, 1 - store i64 %var_7, ptr %var_2 - %var_8 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 1 to ptr)) - br i1 %var_8, label %block_3, label %block_4 + %var_18 = load i64, ptr %var_2 + %var_8 = shl i64 %var_18, 1 + store i64 %var_8, ptr %var_2 + %var_9 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 1 to ptr)) + br i1 %var_9, label %block_3, label %block_4 block_3: - %var_22 = load i64, ptr %var_2 - %var_10 = add i64 %var_22, 1 - store i64 %var_10, ptr %var_2 + %var_23 = load i64, ptr %var_2 + %var_11 = add i64 %var_23, 1 + store i64 %var_11, ptr %var_2 br label %block_4 block_4: call void @__quantum__qis__reset__body(ptr inttoptr (i64 0 to ptr)) call void @__quantum__qis__reset__body(ptr inttoptr (i64 1 to ptr)) - %var_19 = load i64, ptr %var_2 - %var_12 = icmp eq i64 %var_19, 0 - br i1 %var_12, label %block_5, label %block_6 + %var_20 = load i64, ptr %var_2 + %var_13 = icmp eq i64 %var_20, 0 + br i1 %var_13, label %block_5, label %block_6 block_5: br label %block_13 block_6: - %var_20 = load i64, ptr %var_2 - %var_13 = icmp eq i64 %var_20, 1 - br i1 %var_13, label %block_7, label %block_8 + %var_21 = load i64, ptr %var_2 + %var_14 = icmp eq i64 %var_21, 1 + br i1 %var_14, label %block_7, label %block_8 block_7: call void @__quantum__qis__ry__body(double 3.141592653589793, ptr inttoptr (i64 2 to ptr)) br label %block_12 block_8: - %var_21 = load i64, ptr %var_2 - %var_14 = icmp eq i64 %var_21, 2 - br i1 %var_14, label %block_9, label %block_10 + %var_22 = load i64, ptr %var_2 + %var_15 = icmp eq i64 %var_22, 2 + br i1 %var_15, label %block_9, label %block_10 block_9: call void @__quantum__qis__rz__body(double 3.141592653589793, ptr inttoptr (i64 2 to ptr)) br label %block_11 diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/ThreeQubitRepetitionCode.ll b/source/pip/tests-integration/resources/adaptive_rifla/output/ThreeQubitRepetitionCode.ll index 6dec076955..dc985de3e3 100644 --- a/source/pip/tests-integration/resources/adaptive_rifla/output/ThreeQubitRepetitionCode.ll +++ b/source/pip/tests-integration/resources/adaptive_rifla/output/ThreeQubitRepetitionCode.ll @@ -5,287 +5,110 @@ define i64 @ENTRYPOINT__main() #0 { block_0: %var_1 = alloca i64 - %var_10 = alloca i1 - %var_25 = alloca i1 - %var_41 = alloca i1 - %var_57 = alloca i1 - %var_73 = alloca i1 + %var_2 = alloca i64 + %var_4 = alloca i1 + %var_5 = alloca i64 + %var_7 = alloca i1 + %var_12 = alloca i1 call void @__quantum__rt__initialize(ptr null) call void @__quantum__qis__h__body(ptr inttoptr (i64 0 to ptr)) call void @__quantum__qis__z__body(ptr inttoptr (i64 0 to ptr)) store i64 0, ptr %var_1 call void @__quantum__qis__cx__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 1 to ptr)) call void @__quantum__qis__cx__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 2 to ptr)) - call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 0 to ptr)) - call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 1 to ptr)) - call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 2 to ptr)) - call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 0 to ptr)) - call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 1 to ptr)) - call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 2 to ptr)) - call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 0 to ptr)) - call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 1 to ptr)) - call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 2 to ptr)) - call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 0 to ptr)) - call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 1 to ptr)) - call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 2 to ptr)) - call void @__quantum__qis__cx__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 3 to ptr)) - call void @__quantum__qis__cx__body(ptr inttoptr (i64 1 to ptr), ptr inttoptr (i64 3 to ptr)) - call void @__quantum__qis__cx__body(ptr inttoptr (i64 1 to ptr), ptr inttoptr (i64 4 to ptr)) - call void @__quantum__qis__cx__body(ptr inttoptr (i64 2 to ptr), ptr inttoptr (i64 4 to ptr)) - call void @__quantum__qis__mresetz__body(ptr inttoptr (i64 3 to ptr), ptr inttoptr (i64 0 to ptr)) - call void @__quantum__qis__mresetz__body(ptr inttoptr (i64 4 to ptr), ptr inttoptr (i64 1 to ptr)) - store i1 true, ptr %var_10 - %var_11 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 0 to ptr)) - br i1 %var_11, label %block_1, label %block_2 + store i64 1, ptr %var_2 + br label %block_1 block_1: - %var_13 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 1 to ptr)) - br i1 %var_13, label %block_3, label %block_4 + %var_25 = load i64, ptr %var_2 + %var_3 = icmp sle i64 %var_25, 5 + store i1 true, ptr %var_4 + br i1 %var_3, label %block_2, label %block_3 block_2: - %var_15 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 1 to ptr)) - br i1 %var_15, label %block_5, label %block_6 + %var_28 = load i1, ptr %var_4 + br i1 %var_28, label %block_4, label %block_5 block_3: - call void @__quantum__qis__x__body(ptr inttoptr (i64 1 to ptr)) - br label %block_7 + store i1 false, ptr %var_4 + br label %block_2 block_4: - call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) - br label %block_7 + store i64 1, ptr %var_5 + br label %block_6 block_5: - call void @__quantum__qis__x__body(ptr inttoptr (i64 2 to ptr)) - br label %block_8 + call void @__quantum__qis__cx__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__cx__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__h__body(ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__mresetz__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 2 to ptr)) + %var_22 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 2 to ptr)) + call void @__quantum__qis__reset__body(ptr inttoptr (i64 1 to ptr)) + call void @__quantum__qis__reset__body(ptr inttoptr (i64 2 to ptr)) + call void @__quantum__rt__tuple_record_output(i64 2, ptr @0) + call void @__quantum__rt__bool_record_output(i1 %var_22, ptr @1) + %var_29 = load i64, ptr %var_1 + call void @__quantum__rt__int_record_output(i64 %var_29, ptr @2) + ret i64 0 block_6: - store i1 false, ptr %var_10 - br label %block_8 + %var_31 = load i64, ptr %var_5 + %var_6 = icmp sle i64 %var_31, 4 + store i1 true, ptr %var_7 + br i1 %var_6, label %block_7, label %block_8 block_7: - br label %block_9 + %var_34 = load i1, ptr %var_7 + br i1 %var_34, label %block_9, label %block_10 block_8: - br label %block_9 + store i1 false, ptr %var_7 + br label %block_7 block_9: - %var_86 = load i1, ptr %var_10 - br i1 %var_86, label %block_10, label %block_11 -block_10: - store i64 1, ptr %var_1 - br label %block_11 -block_11: - call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 0 to ptr)) - call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 1 to ptr)) - call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 2 to ptr)) - call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 0 to ptr)) - call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 1 to ptr)) - call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 2 to ptr)) - call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 0 to ptr)) - call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 1 to ptr)) - call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 2 to ptr)) call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 0 to ptr)) call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 1 to ptr)) call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 2 to ptr)) + %var_42 = load i64, ptr %var_5 + %var_9 = add i64 %var_42, 1 + store i64 %var_9, ptr %var_5 + br label %block_6 +block_10: call void @__quantum__qis__cx__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 3 to ptr)) call void @__quantum__qis__cx__body(ptr inttoptr (i64 1 to ptr), ptr inttoptr (i64 3 to ptr)) call void @__quantum__qis__cx__body(ptr inttoptr (i64 1 to ptr), ptr inttoptr (i64 4 to ptr)) call void @__quantum__qis__cx__body(ptr inttoptr (i64 2 to ptr), ptr inttoptr (i64 4 to ptr)) - call void @__quantum__qis__mresetz__body(ptr inttoptr (i64 3 to ptr), ptr inttoptr (i64 2 to ptr)) - call void @__quantum__qis__mresetz__body(ptr inttoptr (i64 4 to ptr), ptr inttoptr (i64 3 to ptr)) - store i1 true, ptr %var_25 - %var_26 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 2 to ptr)) - br i1 %var_26, label %block_12, label %block_13 + call void @__quantum__qis__mresetz__body(ptr inttoptr (i64 3 to ptr), ptr inttoptr (i64 0 to ptr)) + call void @__quantum__qis__mresetz__body(ptr inttoptr (i64 4 to ptr), ptr inttoptr (i64 1 to ptr)) + store i1 true, ptr %var_12 + %var_13 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 0 to ptr)) + br i1 %var_13, label %block_11, label %block_12 +block_11: + %var_15 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 1 to ptr)) + br i1 %var_15, label %block_13, label %block_14 block_12: - %var_28 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 3 to ptr)) - br i1 %var_28, label %block_14, label %block_15 + %var_17 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 1 to ptr)) + br i1 %var_17, label %block_15, label %block_16 block_13: - %var_30 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 3 to ptr)) - br i1 %var_30, label %block_16, label %block_17 -block_14: call void @__quantum__qis__x__body(ptr inttoptr (i64 1 to ptr)) - br label %block_18 -block_15: + br label %block_17 +block_14: call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) + br label %block_17 +block_15: + call void @__quantum__qis__x__body(ptr inttoptr (i64 2 to ptr)) br label %block_18 block_16: - call void @__quantum__qis__x__body(ptr inttoptr (i64 2 to ptr)) - br label %block_19 + store i1 false, ptr %var_12 + br label %block_18 block_17: - store i1 false, ptr %var_25 br label %block_19 block_18: - br label %block_20 + br label %block_19 block_19: - br label %block_20 + %var_37 = load i1, ptr %var_12 + br i1 %var_37, label %block_20, label %block_21 block_20: - %var_89 = load i1, ptr %var_25 - br i1 %var_89, label %block_21, label %block_22 + %var_40 = load i64, ptr %var_1 + %var_20 = add i64 %var_40, 1 + store i64 %var_20, ptr %var_1 + br label %block_21 block_21: - %var_106 = load i64, ptr %var_1 - %var_33 = add i64 %var_106, 1 - store i64 %var_33, ptr %var_1 - br label %block_22 -block_22: - call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 0 to ptr)) - call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 1 to ptr)) - call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 2 to ptr)) - call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 0 to ptr)) - call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 1 to ptr)) - call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 2 to ptr)) - call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 0 to ptr)) - call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 1 to ptr)) - call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 2 to ptr)) - call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 0 to ptr)) - call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 1 to ptr)) - call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 2 to ptr)) - call void @__quantum__qis__cx__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 3 to ptr)) - call void @__quantum__qis__cx__body(ptr inttoptr (i64 1 to ptr), ptr inttoptr (i64 3 to ptr)) - call void @__quantum__qis__cx__body(ptr inttoptr (i64 1 to ptr), ptr inttoptr (i64 4 to ptr)) - call void @__quantum__qis__cx__body(ptr inttoptr (i64 2 to ptr), ptr inttoptr (i64 4 to ptr)) - call void @__quantum__qis__mresetz__body(ptr inttoptr (i64 3 to ptr), ptr inttoptr (i64 4 to ptr)) - call void @__quantum__qis__mresetz__body(ptr inttoptr (i64 4 to ptr), ptr inttoptr (i64 5 to ptr)) - store i1 true, ptr %var_41 - %var_42 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 4 to ptr)) - br i1 %var_42, label %block_23, label %block_24 -block_23: - %var_44 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 5 to ptr)) - br i1 %var_44, label %block_25, label %block_26 -block_24: - %var_46 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 5 to ptr)) - br i1 %var_46, label %block_27, label %block_28 -block_25: - call void @__quantum__qis__x__body(ptr inttoptr (i64 1 to ptr)) - br label %block_29 -block_26: - call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) - br label %block_29 -block_27: - call void @__quantum__qis__x__body(ptr inttoptr (i64 2 to ptr)) - br label %block_30 -block_28: - store i1 false, ptr %var_41 - br label %block_30 -block_29: - br label %block_31 -block_30: - br label %block_31 -block_31: - %var_92 = load i1, ptr %var_41 - br i1 %var_92, label %block_32, label %block_33 -block_32: - %var_104 = load i64, ptr %var_1 - %var_49 = add i64 %var_104, 1 - store i64 %var_49, ptr %var_1 - br label %block_33 -block_33: - call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 0 to ptr)) - call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 1 to ptr)) - call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 2 to ptr)) - call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 0 to ptr)) - call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 1 to ptr)) - call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 2 to ptr)) - call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 0 to ptr)) - call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 1 to ptr)) - call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 2 to ptr)) - call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 0 to ptr)) - call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 1 to ptr)) - call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 2 to ptr)) - call void @__quantum__qis__cx__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 3 to ptr)) - call void @__quantum__qis__cx__body(ptr inttoptr (i64 1 to ptr), ptr inttoptr (i64 3 to ptr)) - call void @__quantum__qis__cx__body(ptr inttoptr (i64 1 to ptr), ptr inttoptr (i64 4 to ptr)) - call void @__quantum__qis__cx__body(ptr inttoptr (i64 2 to ptr), ptr inttoptr (i64 4 to ptr)) - call void @__quantum__qis__mresetz__body(ptr inttoptr (i64 3 to ptr), ptr inttoptr (i64 6 to ptr)) - call void @__quantum__qis__mresetz__body(ptr inttoptr (i64 4 to ptr), ptr inttoptr (i64 7 to ptr)) - store i1 true, ptr %var_57 - %var_58 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 6 to ptr)) - br i1 %var_58, label %block_34, label %block_35 -block_34: - %var_60 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 7 to ptr)) - br i1 %var_60, label %block_36, label %block_37 -block_35: - %var_62 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 7 to ptr)) - br i1 %var_62, label %block_38, label %block_39 -block_36: - call void @__quantum__qis__x__body(ptr inttoptr (i64 1 to ptr)) - br label %block_40 -block_37: - call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) - br label %block_40 -block_38: - call void @__quantum__qis__x__body(ptr inttoptr (i64 2 to ptr)) - br label %block_41 -block_39: - store i1 false, ptr %var_57 - br label %block_41 -block_40: - br label %block_42 -block_41: - br label %block_42 -block_42: - %var_95 = load i1, ptr %var_57 - br i1 %var_95, label %block_43, label %block_44 -block_43: - %var_102 = load i64, ptr %var_1 - %var_65 = add i64 %var_102, 1 - store i64 %var_65, ptr %var_1 - br label %block_44 -block_44: - call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 0 to ptr)) - call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 1 to ptr)) - call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 2 to ptr)) - call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 0 to ptr)) - call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 1 to ptr)) - call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 2 to ptr)) - call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 0 to ptr)) - call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 1 to ptr)) - call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 2 to ptr)) - call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 0 to ptr)) - call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 1 to ptr)) - call void @__quantum__qis__rx__body(double 1.5707963267948966, ptr inttoptr (i64 2 to ptr)) - call void @__quantum__qis__cx__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 3 to ptr)) - call void @__quantum__qis__cx__body(ptr inttoptr (i64 1 to ptr), ptr inttoptr (i64 3 to ptr)) - call void @__quantum__qis__cx__body(ptr inttoptr (i64 1 to ptr), ptr inttoptr (i64 4 to ptr)) - call void @__quantum__qis__cx__body(ptr inttoptr (i64 2 to ptr), ptr inttoptr (i64 4 to ptr)) - call void @__quantum__qis__mresetz__body(ptr inttoptr (i64 3 to ptr), ptr inttoptr (i64 8 to ptr)) - call void @__quantum__qis__mresetz__body(ptr inttoptr (i64 4 to ptr), ptr inttoptr (i64 9 to ptr)) - store i1 true, ptr %var_73 - %var_74 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 8 to ptr)) - br i1 %var_74, label %block_45, label %block_46 -block_45: - %var_76 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 9 to ptr)) - br i1 %var_76, label %block_47, label %block_48 -block_46: - %var_78 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 9 to ptr)) - br i1 %var_78, label %block_49, label %block_50 -block_47: - call void @__quantum__qis__x__body(ptr inttoptr (i64 1 to ptr)) - br label %block_51 -block_48: - call void @__quantum__qis__x__body(ptr inttoptr (i64 0 to ptr)) - br label %block_51 -block_49: - call void @__quantum__qis__x__body(ptr inttoptr (i64 2 to ptr)) - br label %block_52 -block_50: - store i1 false, ptr %var_73 - br label %block_52 -block_51: - br label %block_53 -block_52: - br label %block_53 -block_53: - %var_98 = load i1, ptr %var_73 - br i1 %var_98, label %block_54, label %block_55 -block_54: - %var_100 = load i64, ptr %var_1 - %var_81 = add i64 %var_100, 1 - store i64 %var_81, ptr %var_1 - br label %block_55 -block_55: - call void @__quantum__qis__cx__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 2 to ptr)) - call void @__quantum__qis__cx__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 1 to ptr)) - call void @__quantum__qis__h__body(ptr inttoptr (i64 0 to ptr)) - call void @__quantum__qis__mresetz__body(ptr inttoptr (i64 0 to ptr), ptr inttoptr (i64 10 to ptr)) - %var_82 = call i1 @__quantum__rt__read_result(ptr inttoptr (i64 10 to ptr)) - call void @__quantum__qis__reset__body(ptr inttoptr (i64 1 to ptr)) - call void @__quantum__qis__reset__body(ptr inttoptr (i64 2 to ptr)) - call void @__quantum__rt__tuple_record_output(i64 2, ptr @0) - call void @__quantum__rt__bool_record_output(i1 %var_82, ptr @1) - %var_99 = load i64, ptr %var_1 - call void @__quantum__rt__int_record_output(i64 %var_99, ptr @2) - ret i64 0 + %var_38 = load i64, ptr %var_2 + %var_21 = add i64 %var_38, 1 + store i64 %var_21, ptr %var_2 + br label %block_1 } declare void @__quantum__rt__initialize(ptr) @@ -312,7 +135,7 @@ declare void @__quantum__rt__bool_record_output(i1, ptr) declare void @__quantum__rt__int_record_output(i64, ptr) -attributes #0 = { "entry_point" "output_labeling_schema" "qir_profiles"="adaptive_profile" "required_num_qubits"="5" "required_num_results"="11" } +attributes #0 = { "entry_point" "output_labeling_schema" "qir_profiles"="adaptive_profile" "required_num_qubits"="5" "required_num_results"="3" } attributes #1 = { "irreversible" } ; module flags diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/ThreeQubitRepetitionCode.out b/source/pip/tests-integration/resources/adaptive_rifla/output/ThreeQubitRepetitionCode.out index 44e0bb438a..deb1ede0a7 100644 --- a/source/pip/tests-integration/resources/adaptive_rifla/output/ThreeQubitRepetitionCode.out +++ b/source/pip/tests-integration/resources/adaptive_rifla/output/ThreeQubitRepetitionCode.out @@ -3,7 +3,7 @@ METADATA entry_point METADATA output_labeling_schema METADATA qir_profiles adaptive_profile METADATA required_num_qubits 5 -METADATA required_num_results 11 +METADATA required_num_results 3 OUTPUT TUPLE 2 0_t OUTPUT BOOL true 1_t0b OUTPUT INT 0 2_t1i From bbb040b2af5963e427ca5e85f4417f22ab3cbe1c Mon Sep 17 00:00:00 2001 From: "Stefan J. Wernli" Date: Fri, 3 Apr 2026 14:54:31 -0700 Subject: [PATCH 2/2] Disallow dynamic exponentiation until follow up PR --- source/compiler/qsc_rca/src/lib.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/source/compiler/qsc_rca/src/lib.rs b/source/compiler/qsc_rca/src/lib.rs index 49391cbb5c..cfa86f5372 100644 --- a/source/compiler/qsc_rca/src/lib.rs +++ b/source/compiler/qsc_rca/src/lib.rs @@ -756,7 +756,11 @@ impl RuntimeFeatureFlags { capabilities |= TargetCapabilityFlags::HigherLevelConstructs; } if self.contains(RuntimeFeatureFlags::UseOfDynamicExponent) { - capabilities |= TargetCapabilityFlags::BackwardsBranching; + // capabilities |= TargetCapabilityFlags::BackwardsBranching; + + // For now, we are mapping use of a dynamic exponent to higher level constructs + // until we support emiting the equivalent loop. + capabilities |= TargetCapabilityFlags::HigherLevelConstructs; } if self.contains(RuntimeFeatureFlags::UseOfDynamicResult) { capabilities |= TargetCapabilityFlags::HigherLevelConstructs;