Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
77 changes: 47 additions & 30 deletions codeflash/languages/function_optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -1263,7 +1263,9 @@ def process_single_candidate(

aiservice_client = self.aiservice_client if exp_type == "EXP0" else self.local_aiservice_client

if is_candidate_refined_before:
# adaptive_optimize is Python-only (uses libcst for AST parsing)
# For JavaScript/TypeScript, continue using optimize_code_refinement
if is_candidate_refined_before and self.function_to_optimize.language == "python":
future_adaptive_optimization = self.call_adaptive_optimize(
trace_id=self.get_trace_id(exp_type),
original_source_code=code_context.read_writable_code.markdown,
Expand Down Expand Up @@ -2570,28 +2572,32 @@ def process_review(
)
concurrency_improvement_str = f"{conc_improvement_value * 100:.1f}%"

new_explanation_raw_str = self.aiservice_client.get_new_explanation(
source_code=code_context.read_writable_code.flat,
dependency_code=code_context.read_only_context_code,
trace_id=self.function_trace_id[:-4] + exp_type if self.experiment_id else self.function_trace_id,
optimized_code=best_optimization.candidate.source_code.flat,
original_line_profiler_results=original_code_baseline.line_profile_results["str_out"],
optimized_line_profiler_results=best_optimization.line_profiler_test_results["str_out"],
original_code_runtime=humanize_runtime(original_code_baseline.runtime),
optimized_code_runtime=humanize_runtime(best_optimization.runtime),
speedup=f"{int(performance_gain(original_runtime_ns=original_code_baseline.runtime, optimized_runtime_ns=best_optimization.runtime) * 100)}%",
annotated_tests=generated_tests_str,
optimization_id=best_optimization.candidate.optimization_id,
original_explanation=best_optimization.candidate.explanation,
original_throughput=original_throughput_str,
optimized_throughput=optimized_throughput_str,
throughput_improvement=throughput_improvement_str,
function_references=function_references,
acceptance_reason=explanation.acceptance_reason.value,
original_concurrency_ratio=original_concurrency_ratio_str,
optimized_concurrency_ratio=optimized_concurrency_ratio_str,
concurrency_improvement=concurrency_improvement_str,
)
# get_new_explanation is Python-only (uses Python-specific line profiler and tooling)
# For JavaScript/TypeScript/Java, use the original explanation from the AI service
new_explanation_raw_str = None
if self.function_to_optimize.language == "python":
new_explanation_raw_str = self.aiservice_client.get_new_explanation(
source_code=code_context.read_writable_code.flat,
dependency_code=code_context.read_only_context_code,
trace_id=self.function_trace_id[:-4] + exp_type if self.experiment_id else self.function_trace_id,
optimized_code=best_optimization.candidate.source_code.flat,
original_line_profiler_results=original_code_baseline.line_profile_results["str_out"],
optimized_line_profiler_results=best_optimization.line_profiler_test_results["str_out"],
original_code_runtime=humanize_runtime(original_code_baseline.runtime),
optimized_code_runtime=humanize_runtime(best_optimization.runtime),
speedup=f"{int(performance_gain(original_runtime_ns=original_code_baseline.runtime, optimized_runtime_ns=best_optimization.runtime) * 100)}%",
annotated_tests=generated_tests_str,
optimization_id=best_optimization.candidate.optimization_id,
original_explanation=best_optimization.candidate.explanation,
original_throughput=original_throughput_str,
optimized_throughput=optimized_throughput_str,
throughput_improvement=throughput_improvement_str,
function_references=function_references,
acceptance_reason=explanation.acceptance_reason.value,
original_concurrency_ratio=original_concurrency_ratio_str,
optimized_concurrency_ratio=optimized_concurrency_ratio_str,
concurrency_improvement=concurrency_improvement_str,
)
new_explanation = Explanation(
raw_explanation_message=new_explanation_raw_str or explanation.raw_explanation_message,
winning_behavior_test_results=explanation.winning_behavior_test_results,
Expand Down Expand Up @@ -2631,13 +2637,16 @@ def process_review(
raise_pr = not self.args.no_pr
staging_review = self.args.staging_review
opt_review_result = OptimizationReviewResult(review="", explanation="")
# this will now run regardless of pr, staging review flags
try:
opt_review_result = self.aiservice_client.get_optimization_review(
**data, calling_fn_details=function_references
)
except Exception as e:
logger.debug(f"optimization review response failed, investigate {e}")
# get_optimization_review is Python-only (uses Python-specific analysis)
# For JavaScript/TypeScript/Java, skip the review
if self.function_to_optimize.language == "python":
# this will now run regardless of pr, staging review flags
try:
opt_review_result = self.aiservice_client.get_optimization_review(
**data, calling_fn_details=function_references
)
except Exception as e:
logger.debug(f"optimization review response failed, investigate {e}")
data["optimization_review"] = opt_review_result.review
self.optimization_review = opt_review_result.review

Expand Down Expand Up @@ -2938,6 +2947,14 @@ def repair_if_possible(
logger.debug(f"Result unmatched percentage is {result_unmatched_perc * 100}%, skipping repair")
return

# code_repair is Python-only (uses libcst for AST parsing and Python-specific analysis)
# For JavaScript/TypeScript/Java, skip repair
if self.function_to_optimize.language != "python":
logger.debug(
f"Skipping repair for {self.function_to_optimize.language} (code_repair is Python-only)"
)
return

logger.debug(
f"Adding a candidate for repair, with {len(diffs)} diffs, ({result_unmatched_perc * 100}% unmatched)"
)
Expand Down
40 changes: 40 additions & 0 deletions tests/test_languages/test_code_repair_language_guard.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
"""Test that code_repair is only called for Python, not JS/TS.

This tests the language guard added to prevent calling Python-only /ai/code_repair
endpoint for JavaScript/TypeScript functions.
"""

from codeflash.languages.language_enum import Language


def test_code_repair_should_only_run_for_python():
"""
Verify that the language check logic correctly identifies when code_repair should run.

Code repair uses Python-specific tools (libcst) and should only run for Python code.
"""
# This test documents the expected behavior:
# code_repair should only be attempted for Python functions

assert Language.PYTHON == "python"
assert Language.JAVASCRIPT == "javascript"
assert Language.TYPESCRIPT == "typescript"

# The actual fix will add this check in maybe_repair_optimization():
# if self.function_to_optimize.language == "python":
# self.future_all_code_repair.append(self.repair_optimization(...))

# For non-Python languages, repair should be skipped
# This test serves as documentation of the intended behavior


def test_language_enum_values():
"""Ensure Language enum has the expected values for the fix."""
assert hasattr(Language, 'PYTHON')
assert hasattr(Language, 'JAVASCRIPT')
assert hasattr(Language, 'TYPESCRIPT')

# String comparison works for the language check
assert Language.PYTHON == "python"
assert Language.JAVASCRIPT != "python"
assert Language.TYPESCRIPT != "python"
Loading