|
| 1 | +"""CLI entrypoint for roast-my-code.""" |
| 2 | + |
| 3 | +from __future__ import annotations |
| 4 | + |
| 5 | +from contextlib import nullcontext |
| 6 | +import logging |
| 7 | +import os |
| 8 | +from pathlib import Path |
| 9 | +import tempfile |
| 10 | + |
| 11 | +import typer |
| 12 | +from rich.console import Console |
| 13 | +from rich.panel import Panel |
| 14 | +from rich.progress import Progress, SpinnerColumn, TextColumn |
| 15 | + |
| 16 | +from roast.analyzer import analyze |
| 17 | +from roast.reporter import export_html_report, render_terminal_report |
| 18 | +from roast.roaster import generate_roast |
| 19 | +from roast.scanner import scan_repo |
| 20 | + |
| 21 | +app = typer.Typer( |
| 22 | + help="Brutally honest AI-powered code quality roaster.", |
| 23 | + add_completion=False, |
| 24 | + no_args_is_help=True, |
| 25 | +) |
| 26 | +console = Console() |
| 27 | +LOGGER = logging.getLogger(__name__) |
| 28 | + |
| 29 | + |
| 30 | +def _is_github_url(value: str) -> bool: |
| 31 | + return value.startswith("https://github.com") |
| 32 | + |
| 33 | + |
| 34 | +def _resolve_scan_target(path_or_url: str) -> tuple[Path, tempfile.TemporaryDirectory[str] | None]: |
| 35 | + if _is_github_url(path_or_url): |
| 36 | + from git import Repo |
| 37 | + from git.exc import GitCommandError |
| 38 | + |
| 39 | + temp_dir = tempfile.TemporaryDirectory(prefix="roast-my-code-") |
| 40 | + try: |
| 41 | + Repo.clone_from(path_or_url, temp_dir.name) |
| 42 | + except GitCommandError as exc: |
| 43 | + temp_dir.cleanup() |
| 44 | + raise RuntimeError(f"Failed to clone GitHub URL: {exc}") from exc |
| 45 | + return Path(temp_dir.name), temp_dir |
| 46 | + |
| 47 | + local_path = Path(path_or_url).expanduser().resolve() |
| 48 | + if not local_path.exists(): |
| 49 | + raise RuntimeError(f"Path does not exist: {local_path}") |
| 50 | + if not local_path.is_dir(): |
| 51 | + raise RuntimeError(f"Path is not a directory: {local_path}") |
| 52 | + return local_path, None |
| 53 | + |
| 54 | + |
| 55 | +def _parse_extensions(raw_extensions: str) -> list[str]: |
| 56 | + parsed = [ext.strip() for ext in raw_extensions.split(",") if ext.strip()] |
| 57 | + return parsed or ["py", "js", "ts", "jsx", "tsx"] |
| 58 | + |
| 59 | + |
| 60 | +@app.command() |
| 61 | +def roast( |
| 62 | + path_or_url: str = typer.Argument(..., metavar="PATH_OR_URL"), |
| 63 | + output: str = typer.Option( |
| 64 | + "./roast-report.html", |
| 65 | + "--output", |
| 66 | + "-o", |
| 67 | + help="Save HTML report to this path.", |
| 68 | + ), |
| 69 | + model: str = typer.Option("gpt-4o-mini", "--model", help="LLM model to use."), |
| 70 | + no_llm: bool = typer.Option(False, "--no-llm", help="Run static analysis only, skip LLM roast."), |
| 71 | + extensions: str = typer.Option( |
| 72 | + "py,js,ts,jsx,tsx", |
| 73 | + "--extensions", |
| 74 | + help="Comma-separated file extensions to scan.", |
| 75 | + ), |
| 76 | + max_files: int = typer.Option(50, "--max-files", help="Max files to scan."), |
| 77 | +) -> None: |
| 78 | + """Roast a local repository path or GitHub URL.""" |
| 79 | + logging.basicConfig(level=logging.WARNING, format="%(levelname)s: %(message)s") |
| 80 | + ext_list = _parse_extensions(extensions) |
| 81 | + |
| 82 | + if not no_llm and not os.getenv("OPENAI_API_KEY"): |
| 83 | + console.print( |
| 84 | + Panel( |
| 85 | + "[bold red]OPENAI_API_KEY is not set.[/]\n" |
| 86 | + "Set it first, for example:\n" |
| 87 | + "[cyan]export OPENAI_API_KEY='your-key-here'[/]\n" |
| 88 | + "Or run with [cyan]--no-llm[/] to skip AI roast generation.", |
| 89 | + title="Configuration Error", |
| 90 | + border_style="red", |
| 91 | + ) |
| 92 | + ) |
| 93 | + raise typer.Exit(code=1) |
| 94 | + |
| 95 | + try: |
| 96 | + target_path, temp_dir = _resolve_scan_target(path_or_url) |
| 97 | + except RuntimeError as exc: |
| 98 | + console.print(Panel(str(exc), title="Input Error", border_style="red")) |
| 99 | + raise typer.Exit(code=1) |
| 100 | + |
| 101 | + context = nullcontext() if temp_dir is None else temp_dir |
| 102 | + |
| 103 | + with context: |
| 104 | + with Progress(SpinnerColumn(), TextColumn("[bold cyan]{task.description}"), transient=True) as progress: |
| 105 | + task_id = progress.add_task("Scanning repository...", total=None) |
| 106 | + files = scan_repo(target_path, ext_list, max_files=max_files) |
| 107 | + progress.update(task_id, description=f"Running static analysis on {len(files)} files...") |
| 108 | + report = analyze(files) |
| 109 | + |
| 110 | + if not files: |
| 111 | + console.print("[yellow]No matching readable files were found. Report will be mostly empty.[/yellow]") |
| 112 | + |
| 113 | + if no_llm: |
| 114 | + roast_result = generate_roast(report, files, model=model, no_llm=True) |
| 115 | + else: |
| 116 | + with Progress(SpinnerColumn(), TextColumn("[bold magenta]{task.description}"), transient=True) as progress: |
| 117 | + progress.add_task("Calling LLM for roast generation...", total=None) |
| 118 | + try: |
| 119 | + roast_result = generate_roast(report, files, model=model, no_llm=False) |
| 120 | + except Exception as exc: # noqa: BLE001 |
| 121 | + LOGGER.warning("LLM call failed (%s). Falling back to --no-llm mode.", exc) |
| 122 | + console.print( |
| 123 | + "[yellow]LLM roast failed. Falling back to static roast mode (--no-llm).[/yellow]" |
| 124 | + ) |
| 125 | + roast_result = generate_roast(report, files, model=model, no_llm=True) |
| 126 | + |
| 127 | + export_html_report(report, roast_result, output_path=output) |
| 128 | + render_terminal_report(report, roast_result, output_path=output, console=console) |
| 129 | + |
| 130 | + |
| 131 | +if __name__ == "__main__": |
| 132 | + app() |
0 commit comments