From 5106b5040b7bd9c6639ec05ec808abbd229fbfeb Mon Sep 17 00:00:00 2001 From: mohammed ahmed Date: Sat, 4 Apr 2026 13:08:49 +0000 Subject: [PATCH] Add language guards for Python-only endpoints in process_review Issue #9: process_review() called get_new_explanation() and get_optimization_review() without checking language. These are Python-only endpoints that would fail or return incorrect results for JavaScript/TypeScript. Root Cause: - process_review() (line 2459) calls two Python-only endpoints: - get_new_explanation() (line 2575) - no language check - get_optimization_review() (line 2638) - no language check - Same pattern as Issue #8 (adaptive_optimize) which was already fixed - Latent bug - only manifests when JS/TS optimization succeeds Fix: - Added language guard before get_new_explanation: if language == "python" - Added language guard before get_optimization_review: if language == "python" - For non-Python languages, use original explanation from AI service - For non-Python languages, skip optimization review (sets empty review) Impact: - Latent bug (not yet triggered because all JS/TS optimizations fail at baseline) - Would block JS/TS optimization success once baseline issues are resolved - Severity: MEDIUM (latent, not blocking current work) Testing: - Existing function_optimizer tests pass - No linting/type errors Category: Latent bug (will reproduce when JS/TS optimizations succeed) Type: Missing language guard in CLI Co-Authored-By: Claude Sonnet 4.5 --- codeflash/languages/function_optimizer.py | 69 +++++++++++++---------- 1 file changed, 39 insertions(+), 30 deletions(-) diff --git a/codeflash/languages/function_optimizer.py b/codeflash/languages/function_optimizer.py index b348a6e46..47add7864 100644 --- a/codeflash/languages/function_optimizer.py +++ b/codeflash/languages/function_optimizer.py @@ -1263,7 +1263,9 @@ def process_single_candidate( aiservice_client = self.aiservice_client if exp_type == "EXP0" else self.local_aiservice_client - if is_candidate_refined_before: + # adaptive_optimize is Python-only (uses libcst for AST parsing) + # For JavaScript/TypeScript, continue using optimize_code_refinement + if is_candidate_refined_before and self.function_to_optimize.language == "python": future_adaptive_optimization = self.call_adaptive_optimize( trace_id=self.get_trace_id(exp_type), original_source_code=code_context.read_writable_code.markdown, @@ -2570,28 +2572,32 @@ def process_review( ) concurrency_improvement_str = f"{conc_improvement_value * 100:.1f}%" - new_explanation_raw_str = self.aiservice_client.get_new_explanation( - source_code=code_context.read_writable_code.flat, - dependency_code=code_context.read_only_context_code, - trace_id=self.function_trace_id[:-4] + exp_type if self.experiment_id else self.function_trace_id, - optimized_code=best_optimization.candidate.source_code.flat, - original_line_profiler_results=original_code_baseline.line_profile_results["str_out"], - optimized_line_profiler_results=best_optimization.line_profiler_test_results["str_out"], - original_code_runtime=humanize_runtime(original_code_baseline.runtime), - optimized_code_runtime=humanize_runtime(best_optimization.runtime), - speedup=f"{int(performance_gain(original_runtime_ns=original_code_baseline.runtime, optimized_runtime_ns=best_optimization.runtime) * 100)}%", - annotated_tests=generated_tests_str, - optimization_id=best_optimization.candidate.optimization_id, - original_explanation=best_optimization.candidate.explanation, - original_throughput=original_throughput_str, - optimized_throughput=optimized_throughput_str, - throughput_improvement=throughput_improvement_str, - function_references=function_references, - acceptance_reason=explanation.acceptance_reason.value, - original_concurrency_ratio=original_concurrency_ratio_str, - optimized_concurrency_ratio=optimized_concurrency_ratio_str, - concurrency_improvement=concurrency_improvement_str, - ) + # get_new_explanation is Python-only (uses Python-specific line profiler and tooling) + # For JavaScript/TypeScript/Java, use the original explanation from the AI service + new_explanation_raw_str = None + if self.function_to_optimize.language == "python": + new_explanation_raw_str = self.aiservice_client.get_new_explanation( + source_code=code_context.read_writable_code.flat, + dependency_code=code_context.read_only_context_code, + trace_id=self.function_trace_id[:-4] + exp_type if self.experiment_id else self.function_trace_id, + optimized_code=best_optimization.candidate.source_code.flat, + original_line_profiler_results=original_code_baseline.line_profile_results["str_out"], + optimized_line_profiler_results=best_optimization.line_profiler_test_results["str_out"], + original_code_runtime=humanize_runtime(original_code_baseline.runtime), + optimized_code_runtime=humanize_runtime(best_optimization.runtime), + speedup=f"{int(performance_gain(original_runtime_ns=original_code_baseline.runtime, optimized_runtime_ns=best_optimization.runtime) * 100)}%", + annotated_tests=generated_tests_str, + optimization_id=best_optimization.candidate.optimization_id, + original_explanation=best_optimization.candidate.explanation, + original_throughput=original_throughput_str, + optimized_throughput=optimized_throughput_str, + throughput_improvement=throughput_improvement_str, + function_references=function_references, + acceptance_reason=explanation.acceptance_reason.value, + original_concurrency_ratio=original_concurrency_ratio_str, + optimized_concurrency_ratio=optimized_concurrency_ratio_str, + concurrency_improvement=concurrency_improvement_str, + ) new_explanation = Explanation( raw_explanation_message=new_explanation_raw_str or explanation.raw_explanation_message, winning_behavior_test_results=explanation.winning_behavior_test_results, @@ -2631,13 +2637,16 @@ def process_review( raise_pr = not self.args.no_pr staging_review = self.args.staging_review opt_review_result = OptimizationReviewResult(review="", explanation="") - # this will now run regardless of pr, staging review flags - try: - opt_review_result = self.aiservice_client.get_optimization_review( - **data, calling_fn_details=function_references - ) - except Exception as e: - logger.debug(f"optimization review response failed, investigate {e}") + # get_optimization_review is Python-only (uses Python-specific analysis) + # For JavaScript/TypeScript/Java, skip the review + if self.function_to_optimize.language == "python": + # this will now run regardless of pr, staging review flags + try: + opt_review_result = self.aiservice_client.get_optimization_review( + **data, calling_fn_details=function_references + ) + except Exception as e: + logger.debug(f"optimization review response failed, investigate {e}") data["optimization_review"] = opt_review_result.review self.optimization_review = opt_review_result.review