|
4 | 4 | import os |
5 | 5 | import platform |
6 | 6 | import time |
7 | | -from typing import TYPE_CHECKING, Any |
| 7 | +from pathlib import Path |
| 8 | +from typing import TYPE_CHECKING, Any, cast |
8 | 9 |
|
9 | 10 | import requests |
10 | 11 | from pydantic.json import pydantic_encoder |
11 | 12 |
|
12 | 13 | from codeflash.cli_cmds.console import console, logger |
| 14 | +from codeflash.code_utils.code_replacer import is_zero_diff |
| 15 | +from codeflash.code_utils.code_utils import unified_diff_strings |
13 | 16 | from codeflash.code_utils.config_consts import N_CANDIDATES_EFFECTIVE, N_CANDIDATES_LP_EFFECTIVE |
14 | 17 | from codeflash.code_utils.env_utils import get_codeflash_api_key |
15 | 18 | from codeflash.code_utils.git_utils import get_last_commit_author_if_pr_exists, get_repo_owner_and_name |
| 19 | +from codeflash.code_utils.time_utils import humanize_runtime |
16 | 20 | from codeflash.lsp.helpers import is_LSP_enabled |
17 | 21 | from codeflash.models.ExperimentMetadata import ExperimentMetadata |
18 | 22 | from codeflash.models.models import AIServiceRefinerRequest, CodeStringsMarkdown, OptimizedCandidate |
19 | 23 | from codeflash.telemetry.posthog_cf import ph |
20 | 24 | from codeflash.version import __version__ as codeflash_version |
21 | 25 |
|
22 | 26 | if TYPE_CHECKING: |
23 | | - from pathlib import Path |
24 | | - |
25 | 27 | from codeflash.discovery.functions_to_optimize import FunctionToOptimize |
26 | 28 | from codeflash.models.ExperimentMetadata import ExperimentMetadata |
27 | 29 | from codeflash.models.models import AIServiceRefinerRequest |
| 30 | + from codeflash.result.explanation import Explanation |
28 | 31 |
|
29 | 32 |
|
30 | 33 | class AiServiceClient: |
@@ -529,6 +532,85 @@ def generate_regression_tests( # noqa: D417 |
529 | 532 | ph("cli-testgen-error-response", {"response_status_code": response.status_code, "error": response.text}) |
530 | 533 | return None |
531 | 534 |
|
| 535 | + def get_optimization_impact( |
| 536 | + self, |
| 537 | + original_code: dict[Path, str], |
| 538 | + new_code: dict[Path, str], |
| 539 | + explanation: Explanation, |
| 540 | + existing_tests_source: str, |
| 541 | + generated_original_test_source: str, |
| 542 | + function_trace_id: str, |
| 543 | + coverage_message: str, |
| 544 | + replay_tests: str, |
| 545 | + root_dir: Path, |
| 546 | + concolic_tests: str, # noqa: ARG002 |
| 547 | + ) -> str: |
| 548 | + """Compute the optimization impact of current Pull Request. |
| 549 | +
|
| 550 | + Args: |
| 551 | + original_code: dict -> data structure mapping file paths to function definition for original code |
| 552 | + new_code: dict -> data structure mapping file paths to function definition for optimized code |
| 553 | + explanation: Explanation -> data structure containing runtime information |
| 554 | + existing_tests_source: str -> existing tests table |
| 555 | + generated_original_test_source: str -> annotated generated tests |
| 556 | + function_trace_id: str -> traceid of function |
| 557 | + coverage_message: str -> coverage information |
| 558 | + replay_tests: str -> replay test table |
| 559 | + root_dir: Path -> path of git directory |
| 560 | + concolic_tests: str -> concolic_tests (not used) |
| 561 | +
|
| 562 | + Returns: |
| 563 | + ------- |
| 564 | + - 'high' or 'low' optimization impact |
| 565 | +
|
| 566 | + """ |
| 567 | + diff_str = "\n".join( |
| 568 | + [ |
| 569 | + unified_diff_strings( |
| 570 | + code1=original_code[p], |
| 571 | + code2=new_code[p], |
| 572 | + fromfile=Path(p).relative_to(root_dir).as_posix(), |
| 573 | + tofile=Path(p).relative_to(root_dir).as_posix(), |
| 574 | + ) |
| 575 | + for p in original_code |
| 576 | + if not is_zero_diff(original_code[p], new_code[p]) |
| 577 | + ] |
| 578 | + ) |
| 579 | + code_diff = f"```diff\n{diff_str}\n```" |
| 580 | + logger.info("!lsp|Computing Optimization Impact…") |
| 581 | + payload = { |
| 582 | + "code_diff": code_diff, |
| 583 | + "explanation": explanation.raw_explanation_message, |
| 584 | + "existing_tests": existing_tests_source, |
| 585 | + "generated_tests": generated_original_test_source, |
| 586 | + "trace_id": function_trace_id, |
| 587 | + "coverage_message": coverage_message, |
| 588 | + "replay_tests": replay_tests, |
| 589 | + "speedup": f"{(100 * float(explanation.speedup)):.2f}%", |
| 590 | + "loop_count": explanation.winning_benchmarking_test_results.number_of_loops(), |
| 591 | + "benchmark_details": explanation.benchmark_details if explanation.benchmark_details else None, |
| 592 | + "optimized_runtime": humanize_runtime(explanation.best_runtime_ns), |
| 593 | + "original_runtime": humanize_runtime(explanation.original_runtime_ns), |
| 594 | + } |
| 595 | + console.rule() |
| 596 | + try: |
| 597 | + response = self.make_ai_service_request("/optimization_impact", payload=payload, timeout=600) |
| 598 | + except requests.exceptions.RequestException as e: |
| 599 | + logger.exception(f"Error generating optimization refinements: {e}") |
| 600 | + ph("cli-optimize-error-caught", {"error": str(e)}) |
| 601 | + return "" |
| 602 | + |
| 603 | + if response.status_code == 200: |
| 604 | + return cast("str", response.json()["impact"]) |
| 605 | + try: |
| 606 | + error = cast("str", response.json()["error"]) |
| 607 | + except Exception: |
| 608 | + error = response.text |
| 609 | + logger.error(f"Error generating impact candidates: {response.status_code} - {error}") |
| 610 | + ph("cli-optimize-error-response", {"response_status_code": response.status_code, "error": error}) |
| 611 | + console.rule() |
| 612 | + return "" |
| 613 | + |
532 | 614 |
|
533 | 615 | class LocalAiServiceClient(AiServiceClient): |
534 | 616 | """Client for interacting with the local AI service.""" |
|
0 commit comments