diff --git a/.github/workflows/coverage_check.yml b/.github/workflows/coverage_check.yml new file mode 100644 index 0000000..9fa717b --- /dev/null +++ b/.github/workflows/coverage_check.yml @@ -0,0 +1,82 @@ +name: Java Test Coverage Check + +on: + push: + branches: [ main, master ] # Adjust branches as needed + pull_request: + branches: [ main, master ] # Adjust branches as needed + +jobs: + test-coverage-check: + runs-on: ubuntu-latest + + env: + # SPRING_BOOT_PROJECT_ROOT: ${{ github.workspace }}/your-spring-boot-project-dir # If SB project is in a subdir + SPRING_BOOT_PROJECT_ROOT: ${{ github.workspace }} # Assuming SB project is at the root of the repo + # GOOGLE_API_KEY: ${{ secrets.GOOGLE_API_KEY }} # Required by TestCaseGenerator + # MAX_ITERATIONS: 3 # Optional: Override default max iterations + # TARGET_COVERAGE: 0.9 # Optional: Override default target coverage + # BUILD_TOOL: "maven" # Optional: specify maven or gradle, defaults to maven in scripts + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Set up JDK + uses: actions/setup-java@v4 + with: + java-version: '17' # Specify the Java version required by the Spring Boot project + distribution: 'temurin' # Or any other distribution + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: '3.10' # Specify Python version used for the test generator scripts + + - name: Install Python dependencies + run: | + python -m pip install --upgrade pip + if [ -f requirements.txt ]; then pip install -r requirements.txt; fi + + - name: Set up Maven (if using Maven) # Conditional step + if: env.BUILD_TOOL == 'maven' || env.BUILD_TOOL == '' # Default to maven if not set + run: | + # Maven is usually pre-installed on ubuntu-latest runners. This step can ensure specific version or settings if needed. + mvn --version + + - name: Set up Gradle (if using Gradle) # Conditional step + if: env.BUILD_TOOL == 'gradle' + run: | + # Gradle might need to be set up or ensured it's available + # Ensure gradlew is executable if it exists + if [ -f "./gradlew" ]; then chmod +x ./gradlew; fi + ./gradlew --version # Check if gradlew wrapper is present and executable + + - name: Run Test Generation and Coverage Check Pipeline + run: | + # Ensure SPRING_BOOT_PROJECT_ROOT is correctly set if it's a subdirectory + # export SPRING_BOOT_PROJECT_ROOT=${{ github.workspace }}/path-to-your-java-project + # Ensure GOOGLE_API_KEY is available if your LLM calls are not mocked + if [ -z "$GOOGLE_API_KEY" ]; then + echo "Warning: GOOGLE_API_KEY is not set. LLM calls might fail if not mocked." + # You might want to fail here if the key is essential: + # echo "Error: GOOGLE_API_KEY is required." + # exit 1 + fi + + echo "Running main pipeline from src/main.py..." + python src/main.py + env: + # Pass the Google API Key as an environment variable to the script + GOOGLE_API_KEY: ${{ secrets.GOOGLE_API_KEY }} + + # Optional: Upload JaCoCo reports as artifacts + - name: Upload JaCoCo Report + if: always() # Run this step even if the pipeline fails, to get the last report + uses: actions/upload-artifact@v4 + with: + name: jacoco-report + path: | # Adjust paths based on build tool and actual output + ${{ env.SPRING_BOOT_PROJECT_ROOT }}/target/site/jacoco/jacoco.xml + ${{ env.SPRING_BOOT_PROJECT_ROOT }}/build/reports/jacoco/test/jacocoTestReport.xml + if-no-files-found: ignore # Don't fail if a path is not found (e.g. Maven vs Gradle) diff --git a/README.md b/README.md index 43b09e9..845a30c 100644 --- a/README.md +++ b/README.md @@ -4,46 +4,136 @@ ## 🚀 Project Overview -Welcome to the **Java Test Generation Suite**! This project aims to automate the creation of comprehensive **JUnit 5** test cases for Java Spring Boot services and controllers, leveraging **Mockito** for mocking and **MockMvc** for integration testing. Our goal is to significantly reduce the manual effort involved in writing tests, thereby accelerating development cycles and improving code quality. +Welcome to the **Java Test Generation Suite**! This project automates the creation of JUnit 5 test cases for Java Spring Boot applications. It leverages Large Language Models (LLMs) to generate tests and iteratively improves test coverage based on feedback from JaCoCo code coverage reports. The core idea is to intelligently identify and target under-tested areas of the codebase, dynamically adjusting prompts to the LLM to generate more effective tests over multiple iterations. --- -## 💡 The Vision: Automated Test Generation +## 📈 Key Features -Our suite follows a structured approach to dynamically generate relevant test cases. Here's a high-level look at the project's flow: +* **Automated Test Generation**: Utilizes LLMs (specifically Gemini 1.5 Flash via Google API) to generate JUnit 5 test cases for Spring Boot services and controllers. +* **Iterative Coverage Improvement**: Employs an iterative process where JaCoCo code coverage reports are analyzed after each round of test generation and execution. +* **Targeted Prompt Engineering**: Dynamically adjusts prompts to the LLM to focus on methods and classes that have low coverage, aiming to improve specific areas. +* **Build Tool Integration**: Supports both Maven and Gradle based Spring Boot projects for building the project and generating JaCoCo reports. +* **Vector Database for Context**: Uses ChromaDB with Hugging Face embeddings (BAAI/bge-small-en-v1.5) to store and retrieve relevant code chunks, providing context to the LLM. +* **Configurable Pipeline**: Key parameters like maximum iterations, target coverage percentage, and build tool can be configured via environment variables. +* **GitHub Actions Workflow**: Includes a CI workflow (`.github/workflows/coverage_check.yml`) to automate the test generation and coverage checking process, suitable for integration into development pipelines. +* **Modular Design**: The system is broken down into distinct Python scripts/modules for pre-processing, code analysis, chunking, embedding, LLM interaction, and test execution. -![Project Flow](https://github.com/user-attachments/assets/c51a2223-bcdc-45ea-a8fc-11449e504b86) +--- + +## 🌊 Project Flow + +The system operates in an iterative loop: + +1. **Pre-processing**: Java source files are processed to remove comments, empty lines, etc. +2. **Code Analysis**: The Spring Boot application is analyzed to identify target classes (services, controllers) for test generation. +3. **Chunking & Embedding**: The relevant Java source code (target classes and their dependencies) is split into semantic chunks and stored in a ChromaDB vector database. +4. **Test Case Generation**: For each target class (or focused methods within a class): + * Relevant code chunks are retrieved from ChromaDB. + * An LLM (Gemini 1.5 Flash) is prompted with the code context and specific instructions (including focusing on low-coverage methods in later iterations) to generate a JUnit 5 test class. + * The generated test is saved to the appropriate test directory in the target Spring Boot project. +5. **Test Execution & Coverage Analysis**: + * The target Spring Boot project is built using the configured build tool (Maven/Gradle). + * Tests (including newly generated ones) are executed. + * A JaCoCo code coverage report (XML) is generated. +6. **Coverage Evaluation & Iteration**: + * The JaCoCo report is parsed to determine overall line coverage and method-level coverage. + * If the overall coverage meets the `TARGET_COVERAGE` or `MAX_ITERATIONS` is reached, the process stops. + * Otherwise, methods with coverage below the target are identified. + * The list of these low-coverage methods is fed back into the Test Case Generation step for the next iteration, guiding the LLM to focus on these areas. + +*(Note: A visual diagram illustrating this iterative flow would be beneficial here but is out of scope for this text-based update.)* --- -## 📈 Current Progress +## 🛠️ How to Run + +Follow these steps to set up and run the Java Test Generation Suite: + +**1. Prerequisites:** + +* **Python**: Version 3.9 or higher. +* **Java Development Kit (JDK)**: Version 11, 17, or as required by your Spring Boot project. +* **Build Tool**: Apache Maven or Gradle installed and configured for your Spring Boot project. +* **Git**: For cloning the repository. +* **Python Dependencies**: Install using `pip install -r requirements.txt` (ensure this file is present and up-to-date in the repository). + +**2. Environment Variables (Crucial):** + +You **must** set the following environment variables: + +* `SPRING_BOOT_PROJECT_ROOT`: The absolute path to the root directory of your target Java Spring Boot project. + * Example: `export SPRING_BOOT_PROJECT_ROOT="/home/user/dev/my-spring-app"` +* `GOOGLE_API_KEY`: Your Google API key for accessing the Gemini LLM. + * Example: `export GOOGLE_API_KEY="AIzaSy..."` + +Optionally, you can also set these to override defaults: + +* `MAX_ITERATIONS`: Maximum number of test generation iterations. Defaults to `5` (as set in `src/main.py`). + * Example: `export MAX_ITERATIONS=3` +* `TARGET_COVERAGE`: Desired overall line coverage percentage (0.0 to 1.0). Defaults to `0.9` (i.e., 90%) (as set in `src/main.py`). + * Example: `export TARGET_COVERAGE=0.85` +* `BUILD_TOOL`: Specify "maven" or "gradle". Defaults to "maven" if not set (this default is handled by `src/test_runner/java_test_runner.py` and `src/main.py`). + * Example: `export BUILD_TOOL="gradle"` -I have laid a strong foundation for the core functionalities of the test generation suite: +**3. Execution:** -1. **Semantic Chunker Developed:** Successfully built a robust chunker that creates **semantic chunks** from source code. These chunks are enriched with ample metadata, crucial for the LLM's understanding and context retention. -2. **Embedder Functionality Implemented:** Created a function to embed these semantic chunks into our vector database, enabling efficient similarity searches. -3. **Retrieval QA Chain Initiated:** Set up the initial **LangChain QA retrieval chain**. This foundational step allows us to fetch relevant documents (currently hardcoded for proof of concept) based on queries. +It is recommended to use the provided shell script to run the pipeline: + +* **Using `run.sh` (Recommended):** + 1. Make the script executable: `chmod +x run.sh` + 2. **Important**: Edit `run.sh` to set your `SPRING_BOOT_PROJECT_ROOT` and `GOOGLE_API_KEY` values. You can also uncomment and set optional variables. + 3. Execute the script: `./run.sh` + + The `run.sh` script includes sanity checks for the required environment variables. + +* **Directly using `python src/main.py`:** + 1. Ensure all required environment variables are exported in your current shell session. + 2. Run the main script: `python3 src/main.py` (or `python src/main.py` depending on your Python installation). + +**4. Output:** + +* The pipeline will log its progress to the console. +* Generated test files will be saved directly into the `src/test/java/...` directory of your `SPRING_BOOT_PROJECT_ROOT`. +* JaCoCo reports will be generated in the standard output directories of your build tool (e.g., `target/site/jacoco/jacoco.xml` for Maven). --- -## 🎯 Next Week's Goals +## ⚙️ GitHub Workflow for CI - immediate focus is on bringing dynamic intelligence and robustness to the system: +This project includes a GitHub Actions workflow defined in `.github/workflows/coverage_check.yml`. This workflow automates the execution of the test generation and coverage analysis pipeline on pushes and pull requests to the `main` or `master` branches. -1. **Complete Dynamic Retrieval QA Chain:** Fully implement the dynamic retrieval process to intelligently fetch context for test case generation. -2. **Develop CodeAnalyser Function:** Build out the `CodeAnalyser` function. This crucial component will parse Java source files and leverage **JaCoCo reports** to dynamically adjust prompts, ensuring the generated test cases are highly relevant and target areas needing coverage. -3. **Optimize LLM API Usage:** Devise strategies to run the entire chain efficiently, specifically addressing and mitigating **rate limiting** issues with the Gemini or OpenAI API keys. +Key aspects of the workflow: +* Sets up Java and Python environments. +* Installs Python dependencies. +* Runs `src/main.py`. +* Requires `GOOGLE_API_KEY` to be set as a repository secret in GitHub Actions settings. +* The `SPRING_BOOT_PROJECT_ROOT` is assumed to be the root of the checkout repository by default but can be configured. +* The workflow's success or failure is determined by the exit code of `src/main.py` (i.e., whether the target coverage was achieved). +* It can optionally upload JaCoCo reports as build artifacts. --- -## 🛠️ Tech Stack +## 💻 Tech Stack -This project is built using a powerful combination of modern technologies: +* **Orchestration & Logic**: Python 3 +* **LLM Interaction**: LangChain, Google Generative AI (for Gemini 1.5 Flash) +* **Vector Database**: ChromaDB +* **Embeddings**: Hugging Face BAAI/bge-small-en-v1.5 +* **Java Build & Coverage**: Maven or Gradle, JaCoCo +* **CI/CD**: GitHub Actions -* **Python:** The primary programming language orchestrating the entire suite. -* **LangChain:** Utilized for advanced capabilities like semantic chunking and constructing the QA retrieval chain. -* **ChromaDB:** Our chosen Vector Database for storing and retrieving embedded code chunks efficiently. -* **Hugging Face (BAAI/bge-small-en-v1.5):** The embedding model used for converting code chunks into high-quality vector representations. -* **LLM Model - Groq Llama (llama3-8b-8192):** Currently leveraging this fast and efficient LLM for test case generation (exploring alternatives for scalability). +--- + +## ☁️ Future Enhancements / TODO + +* Refine prompt engineering for even more precise test generation. +* Optimize context retrieval from ChromaDB. +* Allow selection of specific classes/packages to target from `code_analyzer.py` output. +* More sophisticated error handling and recovery within the pipeline. +* UI for easier configuration and monitoring (potentially). +* Support for other types of tests (e.g., performance, security) if feasible. + +--- -* GOOD NEWS - INTEGRATION WITH Gemini-1.5-flash was sucessfull and now will be used to generate testcases . :)) +*This README provides a guide to understanding, running, and contributing to the Java Test Generation Suite.* diff --git a/run.sh b/run.sh old mode 100755 new mode 100644 index 9c7bb73..a815a15 --- a/run.sh +++ b/run.sh @@ -1,24 +1,49 @@ #!/usr/bin/env bash -set -e - -echo "[Pipeline] Starting preprocessing..." - -python3 pre-processing/processing.py - -echo "[Pipeline] Starting Spring Boot application analysis..." - -python3 src/analyzer/code_analyzer.py - -echo "[Pipeline] Starting code chunking..." - -python3 scripts/chunker.py - -echo "[Pipeline] Starting embedding chunks into ChromaDB..." - -python3 scripts/embed_chunks.py - -echo "[Pipeline] Starting test case generation..." - -python3 src/llm/test_case_generator.py - -echo "[Pipeline] All steps completed. Test generation pipeline finished." \ No newline at end of file +set -e # Exit immediately if a command exits with a non-zero status. + +# --- Configuration --- +# REQUIRED: Set the absolute path to your Spring Boot project +export SPRING_BOOT_PROJECT_ROOT="/path/to/your/spring-boot-project" + +# REQUIRED: Set your Google API Key for the LLM +export GOOGLE_API_KEY="YOUR_GOOGLE_API_KEY" + +# OPTIONAL: Override default settings from src/main.py +# export MAX_ITERATIONS=3 # Default is 5 +# export TARGET_COVERAGE=0.85 # Default is 0.9 (90%) +# export BUILD_TOOL="gradle" # Default is "maven" + +# --- Sanity Checks --- +if [ -z "$SPRING_BOOT_PROJECT_ROOT" ] || [ "$SPRING_BOOT_PROJECT_ROOT" == "/path/to/your/spring-boot-project" ]; then + echo "ERROR: SPRING_BOOT_PROJECT_ROOT is not set or is still the placeholder value." + echo "Please edit run.sh and set it to the absolute path of your Spring Boot project." + exit 1 +fi + +if [ -z "$GOOGLE_API_KEY" ] || [ "$GOOGLE_API_KEY" == "YOUR_GOOGLE_API_KEY" ]; then + echo "ERROR: GOOGLE_API_KEY is not set or is still the placeholder value." + echo "Please edit run.sh and set your Google API key." + exit 1 +fi + +if [ ! -d "$SPRING_BOOT_PROJECT_ROOT" ]; then + echo "ERROR: SPRING_BOOT_PROJECT_ROOT directory does not exist: $SPRING_BOOT_PROJECT_ROOT" + exit 1 +fi + +# --- Run the main pipeline --- +echo "[Pipeline] Starting Java Test Generation Suite via src/main.py..." +echo "Spring Boot Project: $SPRING_BOOT_PROJECT_ROOT" +echo "Build Tool: ${BUILD_TOOL:-maven}" # Print default if not set +echo "Max Iterations: ${MAX_ITERATIONS:-5}" +echo "Target Coverage: ${TARGET_COVERAGE:-0.9}" + +# Ensure src/main.py is executable or called with python +if [ -f "src/main.py" ]; then + python3 src/main.py +else + echo "ERROR: src/main.py not found!" + exit 1 +fi + +echo "[Pipeline] Execution finished." diff --git a/src/analyzer/code_analyzer.py b/src/analyzer/code_analyzer.py index a32c1b6..ccfc511 100644 --- a/src/analyzer/code_analyzer.py +++ b/src/analyzer/code_analyzer.py @@ -5,7 +5,11 @@ #imports TESTGEN_AUTOMATION_ROOT = Path(__file__).parent.parent.parent -SPRING_BOOT_PROJECT_ROOT = Path("/Users/tanmay/Desktop/AMRIT/BeneficiaryID-Generation-API") +import os +SPRING_BOOT_PROJECT_ROOT_STR = os.getenv("SPRING_BOOT_PROJECT_ROOT") +if not SPRING_BOOT_PROJECT_ROOT_STR: + raise ValueError("Environment variable SPRING_BOOT_PROJECT_ROOT not set.") +SPRING_BOOT_PROJECT_ROOT = Path(SPRING_BOOT_PROJECT_ROOT_STR) SPRING_BOOT_MAIN_JAVA_DIR = SPRING_BOOT_PROJECT_ROOT / "src" / "main" / "java" PROCESSED_OUTPUT_ROOT = TESTGEN_AUTOMATION_ROOT / "processed_output" TESTGEN_AUTOMATION_SRC_DIR = TESTGEN_AUTOMATION_ROOT / "src" diff --git a/src/llm/test_case_generator.py b/src/llm/test_case_generator.py index fa1052a..c84fc1d 100644 --- a/src/llm/test_case_generator.py +++ b/src/llm/test_case_generator.py @@ -5,7 +5,11 @@ from typing import List, Dict, Any, Union TESTGEN_AUTOMATION_ROOT = Path(__file__).parent.parent.parent -SPRING_BOOT_PROJECT_ROOT = Path("/Users/tanmay/Desktop/AMRIT/BeneficiaryID-Generation-API") +import os +SPRING_BOOT_PROJECT_ROOT_STR = os.getenv("SPRING_BOOT_PROJECT_ROOT") +if not SPRING_BOOT_PROJECT_ROOT_STR: + raise ValueError("Environment variable SPRING_BOOT_PROJECT_ROOT not set.") +SPRING_BOOT_PROJECT_ROOT = Path(SPRING_BOOT_PROJECT_ROOT_STR) SPRING_BOOT_MAIN_JAVA_DIR = SPRING_BOOT_PROJECT_ROOT / "src" / "main" / "java" PROCESSED_OUTPUT_ROOT = TESTGEN_AUTOMATION_ROOT / "processed_output" TESTGEN_AUTOMATION_SRC_DIR = TESTGEN_AUTOMATION_ROOT / "src" @@ -22,7 +26,8 @@ from langchain_google_genai import ChatGoogleGenerativeAI from langchain.chains import RetrievalQA from langchain.prompts import PromptTemplate -import torch +import torch +import logging # Added for logger # --- Google API Configuration --- @@ -96,6 +101,7 @@ def __init__(self, collection_name: str = "code_chunks_collection", build_tool: self.java_test_runner = JavaTestRunner(project_root=SPRING_BOOT_PROJECT_ROOT, build_tool=build_tool) # Initialize the test runner self.last_test_run_results = None # Initialize to store feedback + self.logger = logging.getLogger(__name__) # Added logger instance def _instantiate_llm(self) -> ChatGoogleGenerativeAI: if not GOOGLE_API_KEY: @@ -170,6 +176,7 @@ def generate_test_case(self, custom_imports: List[str], relevant_java_files_for_context: List[str], test_output_file_path: Path, # Added to save generated code + focused_methods: List[Dict[str, Any]] | None = None, # New parameter additional_query_instructions: str = "") -> str: """ Generates a JUnit 5 test case by querying the RetrievalQA chain, @@ -177,8 +184,50 @@ def generate_test_case(self, """ self._update_retriever_filter(relevant_java_files_for_context) + current_additional_instructions = additional_query_instructions + + if focused_methods: + # Assuming class_name in focused_methods might be fully qualified or simple. + # target_class_name is simple. + # A more robust check would be to compare against fqcn if available in focused_methods. + relevant_focused_methods_for_this_class = [ + fm for fm in focused_methods + if fm.get("class_name","").endswith(target_class_name) + ] + if relevant_focused_methods_for_this_class: + instruction_intro = ( + "\n\n--- FOCUSED COVERAGE IMPROVEMENT ---\n" + "The following methods in this class have been identified as needing better test coverage. " + "Please prioritize generating tests that specifically exercise these methods and improve their line coverage. " + "Pay close attention to their logic, branches, and edge cases:\n" + ) + methods_str_parts = [] + # Ensure TARGET_COVERAGE is accessible, e.g. via os.getenv or passed in + # For simplicity here, directly using a default or assuming it's part of the broader context. + # If TestCaseGenerator is meant to be more isolated, this might need to be passed. + target_coverage_str = os.getenv('TARGET_COVERAGE', '0.9') + try: + target_coverage_val = float(target_coverage_str) + except ValueError: + target_coverage_val = 0.9 # Default if env var is invalid + + for fm_info in relevant_focused_methods_for_this_class: + methods_str_parts.append( + f"- Method: `{fm_info.get('method_name', 'N/A')}` (Signature: `{fm_info.get('method_signature', 'N/A')}` " + f"Current Coverage: {fm_info.get('line_coverage', 0.0)*100:.1f}%, " + f"Target: >{target_coverage_val*100:.0f}%)" + ) + focused_instruction = instruction_intro + "\n".join(methods_str_parts) + "\n--- END FOCUSED COVERAGE IMPROVEMENT ---\n" + + if current_additional_instructions: + current_additional_instructions += "\n" + focused_instruction + else: + current_additional_instructions = focused_instruction + + self.logger.info(f"Added focused coverage instructions for {len(relevant_focused_methods_for_this_class)} methods in {target_class_name}.") + base_template = self._get_base_prompt_template( - target_class_name, target_package_name, custom_imports, additional_query_instructions + target_class_name, target_package_name, custom_imports, current_additional_instructions ) generated_code = "" diff --git a/src/main.py b/src/main.py index e69de29..0202925 100644 --- a/src/main.py +++ b/src/main.py @@ -0,0 +1,259 @@ +import os +import sys +from pathlib import Path +import subprocess +import logging +import json # Added for loading analysis results + +# Setup logging +logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') + +# --- Configuration --- +# Add TESTGEN_AUTOMATION_ROOT to sys.path to allow importing sibling modules +TESTGEN_AUTOMATION_ROOT = Path(__file__).parent.parent +sys.path.insert(0, str(TESTGEN_AUTOMATION_ROOT)) # src.module can now be imported + +# Import TestCaseGenerator and get_test_paths after sys.path modification +from src.llm.test_case_generator import TestCaseGenerator, get_test_paths +from src.test_runner.java_test_runner import JavaTestRunner # Added for coverage analysis + + +SPRING_BOOT_PROJECT_ROOT_STR = os.getenv("SPRING_BOOT_PROJECT_ROOT") +if not SPRING_BOOT_PROJECT_ROOT_STR: + logging.error("Environment variable SPRING_BOOT_PROJECT_ROOT not set.") + sys.exit(1) +SPRING_BOOT_PROJECT_ROOT = Path(SPRING_BOOT_PROJECT_ROOT_STR) + +MAX_ITERATIONS = int(os.getenv("MAX_ITERATIONS", "5")) # Default to 5 iterations +TARGET_COVERAGE = float(os.getenv("TARGET_COVERAGE", "0.9")) # Default to 90% +methods_to_target = None # Initially, target all discovered classes/methods + +# Define paths to scripts - adjust if these scripts are refactored into functions later +PRE_PROCESSING_SCRIPT = TESTGEN_AUTOMATION_ROOT / "pre-processing" / "processing.py" +CODE_ANALYZER_SCRIPT = TESTGEN_AUTOMATION_ROOT / "src" / "analyzer" / "code_analyzer.py" +CHUNKER_SCRIPT = TESTGEN_AUTOMATION_ROOT / "scripts" / "chunker.py" +EMBED_CHUNKS_SCRIPT = TESTGEN_AUTOMATION_ROOT / "scripts" / "embed_chunks.py" + +# Output directory for analysis results (consistent with code_analyzer.py) +ANALYSIS_RESULTS_DIR = TESTGEN_AUTOMATION_ROOT / "analysis_results" +ANALYSIS_RESULTS_FILE = ANALYSIS_RESULTS_DIR / "spring_boot_targets.json" + + +def run_script(script_path: Path, description: str) -> bool: + """Runs a python script and logs its execution status.""" + logging.info(f"Starting: {description}...") + try: + process = subprocess.run(['python3', str(script_path)], capture_output=True, text=True, check=True) + logging.info(f"Output from {description}:\n{process.stdout}") + if process.stderr: + logging.warning(f"Stderr from {description}:\n{process.stderr}") + logging.info(f"Successfully completed: {description}.") + return True + except subprocess.CalledProcessError as e: + logging.error(f"Error during: {description}.") + logging.error(f"Return code: {e.returncode}") + logging.error(f"Stdout: {e.stdout}") + logging.error(f"Stderr: {e.stderr}") + return False + except FileNotFoundError: + logging.error(f"Error: Script not found at {script_path}") + return False + +def main_pipeline(): + logging.info("Starting main test generation pipeline...") + logging.info(f"Spring Boot project root: {SPRING_BOOT_PROJECT_ROOT}") + logging.info(f"Max iterations: {MAX_ITERATIONS}") + logging.info(f"Target coverage: {TARGET_COVERAGE*100}%") + + # 1. Pre-processing + if not run_script(PRE_PROCESSING_SCRIPT, "Pre-processing Java code"): + logging.error("Pre-processing failed. Exiting pipeline.") + sys.exit(1) + + # 2. Code Analysis + # Ensure the analysis results directory exists (code_analyzer.py should also do this) + ANALYSIS_RESULTS_DIR.mkdir(parents=True, exist_ok=True) + if not run_script(CODE_ANALYZER_SCRIPT, "Analyzing Spring Boot application"): + logging.error("Code analysis failed. Exiting pipeline.") + sys.exit(1) + + if not ANALYSIS_RESULTS_FILE.exists(): + logging.error(f"Code analysis did not produce the expected output file: {ANALYSIS_RESULTS_FILE}. Exiting.") + sys.exit(1) + logging.info(f"Code analysis results available at: {ANALYSIS_RESULTS_FILE}") + + # --- Main Loop (to be detailed in subsequent steps) --- + current_coverage = 0.0 + for iteration in range(1, MAX_ITERATIONS + 1): + logging.info(f"--- Starting Iteration {iteration}/{MAX_ITERATIONS} ---") + + # 3. Langchain/ChromaDB Setup (Chunking and Embedding) + logging.info("Running chunker script...") + if not run_script(CHUNKER_SCRIPT, "Chunking source files"): + logging.warning("Chunking script failed. This might impact test generation quality.") + # Potentially skip this iteration or exit if critical + # continue + + logging.info("Running embed chunks script...") + if not run_script(EMBED_CHUNKS_SCRIPT, "Embedding chunks into ChromaDB"): + logging.warning("Embedding script failed. This might impact test generation quality.") + # Potentially skip this iteration or exit if critical + # continue + + # 4. Test Case Generation + logging.info("Initializing Test Case Generator...") + try: + if not ANALYSIS_RESULTS_FILE.exists(): + logging.error(f"Analysis results file not found: {ANALYSIS_RESULTS_FILE}. Cannot proceed with test generation.") + break # Break the loop for this iteration + + with open(ANALYSIS_RESULTS_FILE, 'r', encoding='utf-8') as f: + discovered_targets_metadata = json.load(f) + + if not discovered_targets_metadata: + logging.info("No targets found in analysis results. Skipping test generation for this iteration.") + break # Break the loop + + # Determine build tool (e.g. from env var, default to maven) + build_tool = os.getenv("BUILD_TOOL", "maven").lower() + logging.info(f"Using build tool: {build_tool} for JavaTestRunner initialization.") + + test_generator = TestCaseGenerator(collection_name="code_chunks_collection", build_tool=build_tool) + + logging.info(f"Loaded {len(discovered_targets_metadata)} targets for test generation.") + + for target_info in discovered_targets_metadata: + java_file_path_abs = Path(target_info['java_file_path_abs']) + relative_processed_txt_path = Path(target_info['relative_processed_txt_path']) + target_class_name = target_info['class_name'] + target_package_name = target_info['package_name'] + identified_dependencies_filenames = target_info['dependent_filenames'] + custom_imports_list = target_info['custom_imports'] + + relevant_java_files_for_context = [java_file_path_abs.name] + identified_dependencies_filenames + relevant_java_files_for_context = list(set(relevant_java_files_for_context)) + + paths = get_test_paths(str(relative_processed_txt_path), SPRING_BOOT_PROJECT_ROOT) + test_output_dir = paths["test_output_dir"] + test_output_file_path = paths["test_output_file_path"] + + # Ensure test output directory exists + test_output_dir.mkdir(parents=True, exist_ok=True) + + logging.info(f"Generating test for: {target_class_name} ({target_package_name})") + + generated_test_code = test_generator.generate_test_case( + target_class_name=target_class_name, + target_package_name=target_package_name, + custom_imports=custom_imports_list, + relevant_java_files_for_context=relevant_java_files_for_context, + test_output_file_path=test_output_file_path, + focused_methods=methods_to_target, # New parameter + additional_query_instructions="" + ) + + if generated_test_code and test_generator.last_test_run_results and test_generator.last_test_run_results.get("status") == "SUCCESS": + logging.info(f"Successfully generated and passed test for {target_class_name}.") + else: + logging.warning(f"Failed to generate a passing test for {target_class_name} after retries. See logs from TestCaseGenerator.") + + except ImportError as e: + logging.error(f"Failed to import necessary modules for test generation: {e}") + logging.error("Ensure 'src' is in PYTHONPATH or sys.path is configured correctly.") + break # Critical error, break the loop + except Exception as e: + logging.error(f"An error occurred during test case generation setup or execution: {e}", exc_info=True) + break # Critical error, break the loop + + # 5. Build and JaCoCo Analysis + logging.info("Initializing Java Test Runner for coverage analysis...") + try: + # BUILD_TOOL is already defined from TestCaseGenerator section, or define again if needed + build_tool = os.getenv("BUILD_TOOL", "maven").lower() + java_test_runner = JavaTestRunner(project_root=SPRING_BOOT_PROJECT_ROOT, build_tool=build_tool) + + logging.info("Attempting to run build and get JaCoCo coverage...") + coverage_data = java_test_runner.get_coverage() + + if coverage_data: + current_coverage = coverage_data.get("overall_line_coverage", 0.0) + logging.info(f"Overall line coverage: {current_coverage * 100:.2f}%") + + methods_below_threshold_current_iteration = [] + if current_coverage < TARGET_COVERAGE: + logging.info(f"Coverage {current_coverage * 100:.2f}% is below target {TARGET_COVERAGE * 100}%.") + for method_info in coverage_data.get("methods", []): + if method_info.get("line_coverage", 0.0) < TARGET_COVERAGE: + methods_below_threshold_current_iteration.append(method_info) + + if methods_below_threshold_current_iteration: + logging.info(f"{len(methods_below_threshold_current_iteration)} methods found below target coverage. These will be focused on in the next iteration.") + methods_to_target = methods_below_threshold_current_iteration + else: + logging.info("Overall coverage is low, but no specific methods identified as below threshold in this iteration. Will revert to general targeting if this isn't the last iteration.") + methods_to_target = None + else: + logging.info(f"Target coverage of {TARGET_COVERAGE * 100}% reached!") + methods_to_target = None + else: + logging.error("Failed to get coverage data. Assuming 0% coverage for this iteration.") + current_coverage = 0.0 + methods_to_target = None + + except ImportError as e: + logging.error(f"Failed to import JavaTestRunner: {e}", exc_info=True) + current_coverage = 0.0 # Assume failure + methods_to_target = None + # break # Critical error + except Exception as e: + logging.error(f"An error occurred during build and coverage analysis: {e}", exc_info=True) + current_coverage = 0.0 # Assume failure + methods_to_target = None + # break # Critical error + + # --- Full Loop Logic --- + if current_coverage >= TARGET_COVERAGE: + logging.info(f"Target coverage of {TARGET_COVERAGE*100:.2f}% reached or exceeded. Stopping iterations.") + break # Exit loop + + if iteration == MAX_ITERATIONS: + logging.warning(f"Reached max iterations ({MAX_ITERATIONS}) without achieving target coverage.") + break # Exit loop + + if current_coverage < TARGET_COVERAGE: # This condition is implicitly met if loop continues + logging.info(f"Coverage is {current_coverage*100:.2f}%. Preparing for next iteration.") + if methods_to_target: + logging.info(f"Next iteration will focus on {len(methods_to_target)} specific methods.") + else: + logging.info("Next iteration will use general targeting due to no specific low-coverage methods identified or an issue in coverage data.") + # TODO: Implement logic to use 'methods_to_target' to guide the next round of test generation. + # This involves: + # 1. Modifying the targets for TestCaseGenerator for the next iteration if methods_to_target is not None. + # If methods_to_target IS None but coverage is low, it means we couldn't pinpoint methods, + # so TestCaseGenerator should proceed as it did in the first iteration (general targeting). + # 2. Adjusting prompts in TestCaseGenerator to focus on these specific methods. + # 3. Potentially re-running chunking/embedding if the context needs to be refined for these methods. + pass # Explicitly pass for now + + # --- End of Main Loop --- + + if current_coverage >= TARGET_COVERAGE: + logging.info("Pipeline completed successfully. Target coverage achieved.") + else: + logging.warning(f"Pipeline completed. Target coverage of {TARGET_COVERAGE*100}% was NOT achieved after {MAX_ITERATIONS} iterations. Final coverage: {current_coverage * 100:.2f}%.") + + # Determine exit code based on coverage + if current_coverage >= TARGET_COVERAGE: + sys.exit(0) # Success + else: + sys.exit(1) # Failure (for GitHub Actions) + + +if __name__ == "__main__": + # Ensure SPRING_BOOT_PROJECT_ROOT is set before running + if not SPRING_BOOT_PROJECT_ROOT_STR: + print("ERROR: The environment variable SPRING_BOOT_PROJECT_ROOT must be set.") + print("Example: export SPRING_BOOT_PROJECT_ROOT=/path/to/your/spring-boot-project") + sys.exit(1) + + main_pipeline() diff --git a/src/test_runner/java_test_runner.py b/src/test_runner/java_test_runner.py index be2c5bf..3b89fa3 100644 --- a/src/test_runner/java_test_runner.py +++ b/src/test_runner/java_test_runner.py @@ -1,100 +1,423 @@ import subprocess +import xml.etree.ElementTree as ET +import logging from pathlib import Path -from typing import Dict, Any, Optional +import os # For env vars if used in main example +import re # For parsing test output from single test run -class JavaTestRunner: +# Setup basic logging if no handlers are configured +if not logging.getLogger().handlers: + logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') +class JavaTestRunner: def __init__(self, project_root: Path, build_tool: str = "maven"): self.project_root = project_root self.build_tool = build_tool.lower() + self.logger = logging.getLogger(__name__) # Use a named logger - if self.build_tool not in ["maven", "gradle"]: - raise ValueError("Unsupported build tool. Please use 'maven' or 'gradle'.") + if self.build_tool == "maven": + self.jacoco_report_path = self.project_root / "target" / "site" / "jacoco" / "jacoco.xml" + # `install` is more likely to trigger all phases including JaCoCo report generation + # `verify` might also work if the lifecycle is configured correctly. + self.build_command = ["mvn", "clean", "install"] + self.single_test_command_prefix = ["mvn", "test"] # `-Dtest=...` will be added + elif self.build_tool == "gradle": + self.jacoco_report_path = self.project_root / "build" / "reports" / "jacoco" / "test" / "jacocoTestReport.xml" + # `build` usually includes `test` and `jacocoTestReport` if configured. + # `jacocoTestReport` explicitly runs the JaCoCo report task. + self.build_command = ["./gradlew", "clean", "build", "jacocoTestReport"] + self.single_test_command_prefix = ["./gradlew", "test"] # `--tests ...` will be added + else: + raise ValueError(f"Unsupported build tool: {self.build_tool}. Supported: 'maven', 'gradle'.") - print(f"Initialized JavaTestRunner with build tool: {self.build_tool} for project: {self.project_root}") + self.logger.info(f"Initialized JavaTestRunner with build tool: {self.build_tool} for project: {self.project_root}") + self.logger.info(f"JaCoCo report path set to: {self.jacoco_report_path}") + self.logger.info(f"Build command set to: {' '.join(self.build_command)}") - def _run_maven_test(self, test_file_path: Path) -> Dict[str, Any]: - relative_test_path = test_file_path.relative_to(self.project_root) - test_class_name = str(relative_test_path).replace(".java", "").replace(os.sep, ".") - command = [ - "mvn", - "test", - f"-Dtest={test_class_name}", - "-DfailIfNoTests=false" - ] + def _run_maven_single_test(self, test_file_path: Path) -> Dict[str, Any]: + relative_test_path = test_file_path.relative_to(self.project_root / "src" / "test" / "java") + test_class_name = str(relative_test_path).replace(".java", "").replace(os.sep, ".") - print(f"Executing Maven command: {' '.join(command)} in {self.project_root}") + command = self.single_test_command_prefix + [f"-Dtest={test_class_name}", "-DfailIfNoTests=false"] + + self.logger.info(f"Executing Maven single test command: {' '.join(command)} in {self.project_root}") try: process = subprocess.run( command, - cwd=self.project_root, # Run Maven from the project root + cwd=self.project_root, capture_output=True, text=True, - check=False # Do not raise an exception for non-zero exit codes (we want to check stderr/stdout) + check=False ) stdout = process.stdout stderr = process.stderr return_code = process.returncode - # Simple parsing for success/failure (can be improved) if return_code == 0 and "BUILD SUCCESS" in stdout: - # Look for test results summary if "Tests run: " in stdout: test_summary_match = re.search(r"Tests run: (\d+), Failures: (\d+), Errors: (\d+), Skipped: (\d+)", stdout) if test_summary_match: - total = int(test_summary_match.group(1)) failures = int(test_summary_match.group(2)) errors = int(test_summary_match.group(3)) - skipped = int(test_summary_match.group(4)) - if failures == 0 and errors == 0: - return {"status": "SUCCESS", "message": "Tests passed successfully.", "stdout": stdout, "stderr": stderr} + return {"status": "SUCCESS", "message": "Test passed.", "stdout": stdout, "stderr": stderr} else: - return {"status": "FAILED", "message": "Tests failed or had errors.", "stdout": stdout, "stderr": stderr} - else: - return {"status": "UNKNOWN", "message": "Maven build successful but no test summary found.", "stdout": stdout, "stderr": stderr} + return {"status": "FAILED", "message": "Test failed or had errors.", "stdout": stdout, "stderr": stderr} + return {"status": "UNKNOWN", "message": "Maven build successful but no test summary found.", "stdout": stdout, "stderr": stderr} else: - return {"status": "ERROR", "message": f"Maven build failed with exit code {return_code}.", "stdout": stdout, "stderr": stderr} + return {"status": "ERROR", "message": f"Maven test execution failed with exit code {return_code}.", "stdout": stdout, "stderr": stderr} + except FileNotFoundError: + return {"status": "ERROR", "message": "Maven (mvn) command not found.", "stdout": "", "stderr": "Maven not found."} + except Exception as e: + self.logger.error(f"Unexpected error during Maven single test execution: {e}", exc_info=True) + return {"status": "ERROR", "message": f"An unexpected error occurred: {e}", "stdout": "", "stderr": str(e)} + + def _run_gradle_single_test(self, test_file_path: Path) -> Dict[str, Any]: + relative_test_path = test_file_path.relative_to(self.project_root / "src" / "test" / "java") + test_class_name = str(relative_test_path).replace(".java", "").replace(os.sep, ".") + + command = self.single_test_command_prefix + [f"--tests", test_class_name] + + self.logger.info(f"Executing Gradle single test command: {' '.join(command)} in {self.project_root}") + + try: + process = subprocess.run( + command, + cwd=self.project_root, + capture_output=True, + text=True, + check=False + ) + stdout = process.stdout + stderr = process.stderr + return_code = process.returncode + + if return_code == 0 and "BUILD SUCCESSFUL" in stdout: # Gradle success message + # Gradle's output for test results is less standardized in basic console output than Maven's. + # Often, a separate HTML report is the primary source of detailed results. + # For simplicity, we'll assume if the build is successful and tests were run, it's a pass. + # More robust parsing would require looking for "X tests completed, Y failed" patterns. + if f"> Task :test" in stdout and "FAILED" not in stdout: # Basic check + return {"status": "SUCCESS", "message": "Test passed.", "stdout": stdout, "stderr": stderr} + elif "FAILED" in stdout: # If FAILED appears anywhere + return {"status": "FAILED", "message": "Test failed or had errors based on 'FAILED' in output.", "stdout": stdout, "stderr": stderr} + else: # If no clear failure, but also no clear success signal for tests. + return {"status": "UNKNOWN", "message": "Gradle build successful, but test pass/fail status unclear from stdout.", "stdout": stdout, "stderr": stderr} + elif "NO-SOURCE" in stdout or "No tests found for given includes" in stdout : # Gradle specific for no tests + return {"status": "SUCCESS", "message": "No tests found or executed, but build did not fail.", "stdout": stdout, "stderr": stderr} + + else: # Build failed + return {"status": "ERROR", "message": f"Gradle test execution failed with exit code {return_code}.", "stdout": stdout, "stderr": stderr} except FileNotFoundError: - return {"status": "ERROR", "message": "Maven (mvn) command not found. Please ensure Maven is installed and in your PATH.", "stdout": "", "stderr": "Maven not found."} + return {"status": "ERROR", "message": "Gradle (./gradlew) command not found.", "stdout": "", "stderr": "Gradle not found."} except Exception as e: - return {"status": "ERROR", "message": f"An unexpected error occurred during Maven execution: {e}", "stdout": "", "stderr": str(e)} + self.logger.error(f"Unexpected error during Gradle single test execution: {e}", exc_info=True) + return {"status": "ERROR", "message": f"An unexpected error occurred: {e}", "stdout": "", "stderr": str(e)} - def run_test(self, test_file_path: Path) -> Dict[str, Any]: - + """ + Runs a single test file using the configured build tool. + This is used for quick feedback by TestCaseGenerator. + """ if not test_file_path.exists(): return {"status": "ERROR", "message": f"Test file not found: {test_file_path}", "stdout": "", "stderr": ""} - print(f"\nRunning test: {test_file_path}") - + self.logger.info(f"Running single test: {test_file_path} using {self.build_tool}") if self.build_tool == "maven": - return self._run_maven_test(test_file_path) + return self._run_maven_single_test(test_file_path) elif self.build_tool == "gradle": - return self._run_gradle_test(test_file_path) - else: + return self._run_gradle_single_test(test_file_path) + else: # Should have been caught in __init__ return {"status": "ERROR", "message": "Invalid build tool configured.", "stdout": "", "stderr": ""} -if __name__ == "__main__": - project_root = Path("/Users/tanmay/Desktop/AMRIT/BeneficiaryID-Generation-API") - example_test_file = project_root / "src" / "test" / "java" / "com" / "iemr" / "common" / "bengen" / "service" / "GenerateBeneficiaryServiceTest.java" # Replace with a real test file + def run_build_and_generate_jacoco_report(self) -> bool: + self.logger.info(f"Running full build with command: {' '.join(self.build_command)} in {self.project_root}") + try: + process = subprocess.run( + self.build_command, + cwd=self.project_root, + capture_output=True, + text=True, + check=False + ) + # Log stdout/stderr regardless of success for debugging + if process.stdout: + self.logger.info(f"Build process stdout:\n{process.stdout}") + if process.stderr: + self.logger.warning(f"Build process stderr:\n{process.stderr}") + + if process.returncode == 0: + self.logger.info(f"Build command completed successfully.") + return True + else: + self.logger.error(f"Build command failed with return code {process.returncode}.") + return False + except FileNotFoundError: + self.logger.error(f"Build command '{self.build_command[0]}' not found. Ensure '{self.build_tool}' is installed and in PATH (or gradlew is executable).") + return False + except Exception as e: + self.logger.error(f"An unexpected error occurred during build: {e}", exc_info=True) + return False + + def parse_jacoco_report(self) -> dict | None: + if not self.jacoco_report_path.exists(): + self.logger.error(f"JaCoCo report not found at: {self.jacoco_report_path}. Ensure your build tool is configured to generate it.") + return None + + self.logger.info(f"Parsing JaCoCo report: {self.jacoco_report_path}") + try: + tree = ET.parse(self.jacoco_report_path) + root = tree.getroot() + + coverage_data = { + "overall_line_coverage": 0.0, + "methods": [] # To store method-level coverage details + } + + # Overall line coverage from the first 'report > counter[type="LINE"]' + report_line_counter = root.find('./counter[@type="LINE"]') + if report_line_counter is not None: + missed_lines = int(report_line_counter.get('missed', 0)) + covered_lines = int(report_line_counter.get('covered', 0)) + total_lines = missed_lines + covered_lines + if total_lines > 0: + coverage_data["overall_line_coverage"] = covered_lines / total_lines + else: + self.logger.warning("Could not find overall LINE counter in JaCoCo report root. Looking for session totals if any.") + # Attempt to sum up all counters if no root counter (less common for standard reports) + all_line_counters = root.findall('.//counter[@type="LINE"]') + total_missed = sum(int(c.get('missed',0)) for c in all_line_counters) + total_covered = sum(int(c.get('covered',0)) for c in all_line_counters) + grand_total = total_missed + total_covered + if grand_total > 0: + coverage_data["overall_line_coverage"] = total_covered / grand_total + else: + self.logger.warning("No LINE counters found anywhere in the report.") + + + # Method-level coverage: report > package > class > method > counter[@type="LINE"] + for package_element in root.findall('./package'): + package_name = package_element.get('name').replace('/', '.') + for class_element in package_element.findall('./class'): + # class_name = class_element.get('name').split('/')[-1] # Simple name + # For full_class_name, JaCoCo often provides it with slashes, convert to dots + full_class_name_from_report = package_name + '.' + class_element.get('name').replace('/', '.').split('.')[-1] + + for method_element in class_element.findall('./method'): + method_name = method_element.get('name') + method_desc = method_element.get('desc') # Signature + line_num_str = method_element.get('line') # Starting line number + + method_line_counter = method_element.find('./counter[@type="LINE"]') + if method_line_counter is not None: + missed = int(method_line_counter.get('missed', 0)) + covered = int(method_line_counter.get('covered', 0)) + method_total_lines = missed + covered + method_coverage = 0.0 + if method_total_lines > 0: + method_coverage = covered / method_total_lines + + coverage_data["methods"].append({ + "class_name": full_class_name_from_report, + "method_name": method_name, + "method_signature": method_desc, + "start_line": int(line_num_str) if line_num_str and line_num_str.isdigit() else 0, + "line_coverage": method_coverage, + "covered_lines": covered, + "missed_lines": missed, + "total_lines": method_total_lines + }) + self.logger.info(f"Successfully parsed JaCoCo report. Overall line coverage: {coverage_data['overall_line_coverage']:.2%}") + return coverage_data + except ET.ParseError as e: + self.logger.error(f"Error parsing JaCoCo XML report: {e}", exc_info=True) + return None + except Exception as e: + self.logger.error(f"An unexpected error occurred during JaCoCo report parsing: {e}", exc_info=True) + return None + + def get_coverage(self) -> dict | None: + self.logger.info("Attempting to run build, generate JaCoCo report, and parse coverage...") + build_success = self.run_build_and_generate_jacoco_report() + if build_success: + self.logger.info("Build successful. Proceeding to parse JaCoCo report.") + return self.parse_jacoco_report() + else: + self.logger.error("Build failed, cannot get coverage. Check build logs for details.") + return None + +if __name__ == '__main__': + # Ensure basicConfig is called for the main block if running standalone for testing + logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(levelname)s - [%(name)s] - %(message)s') + logger_main = logging.getLogger("JavaTestRunnerExample") # Specific logger for this block + + try: + PROJECT_ROOT_STR = os.getenv("SPRING_BOOT_PROJECT_ROOT") + if not PROJECT_ROOT_STR: + raise ValueError("SPRING_BOOT_PROJECT_ROOT environment variable not set for example execution.") + + project_root_path = Path(PROJECT_ROOT_STR) + + build_tool_env = os.getenv("BUILD_TOOL", "maven").lower() # Default to maven for example + logger_main.info(f"Using build tool: {build_tool_env} for JavaTestRunner example from SPRING_BOOT_PROJECT_ROOT: {project_root_path}") + + if not project_root_path.exists(): + raise FileNotFoundError(f"Project root path does not exist: {project_root_path}") + + runner = JavaTestRunner(project_root=project_root_path, build_tool=build_tool_env) + + # --- Example: Running a single test (if you have a specific test to run) --- + # This part is more for illustration, as TestCaseGenerator uses run_test internally. + # You'd need an actual test file path. + # example_test_file_str = os.getenv("EXAMPLE_TEST_FILE_PATH") + # if example_test_file_str: + # example_test_file = Path(example_test_file_str) + # if example_test_file.exists(): + # logger_main.info(f"Attempting to run single test: {example_test_file}") + # single_test_results = runner.run_test(example_test_file) + # logger_main.info(f"Single test run results: {single_test_results}") + # else: + # logger_main.warning(f"EXAMPLE_TEST_FILE_PATH '{example_test_file_str}' does not exist. Skipping single test run example.") + # else: + # logger_main.info("EXAMPLE_TEST_FILE_PATH not set. Skipping single test run example.") + + + # --- Example: Get overall coverage --- + logger_main.info("Attempting to get coverage information...") + coverage_info = runner.get_coverage() + + if coverage_info: + logger_main.info(f"Overall Line Coverage: {coverage_info['overall_line_coverage']:.2%}") + + low_coverage_methods = [m for m in coverage_info.get("methods", []) if m['line_coverage'] < 0.90 and m['total_lines'] > 0] + if low_coverage_methods: + logger_main.info("\nMethods with < 90% coverage:") + for method_cov in low_coverage_methods: + logger_main.info( + f" Class: {method_cov['class_name']}, Method: {method_cov['method_name']}{method_cov['method_signature']}" + f" - Coverage: {method_cov['line_coverage']:.2%} ({method_cov['covered_lines']}/{method_cov['total_lines']} lines)" + f" - Starts at line: {method_cov['start_line']}" + ) + else: + logger_main.info("All reported methods have >= 90% coverage or no methods reported with lines.") + else: + logger_main.error("Failed to get coverage information from the run.") - runner = JavaTestRunner(project_root=project_root, build_tool="maven") # or "gradle" - results = runner.run_test(example_test_file) + except ValueError as e: + logger_main.error(f"Configuration error in example: {e}") + except FileNotFoundError as e: + logger_main.error(f"File not found error in example: {e}") + except Exception as e: + logger_main.error(f"An unexpected error occurred in example usage: {e}", exc_info=True) - print("\n--- Test Results ---") - print(f"Status: {results['status']}") - print(f"Message: {results['message']}") - if results['stdout']: - print("\n--- STDOUT ---") - print(results['stdout']) - if results['stderr']: - print("\n--- STDERR ---") - print(results['stderr']) +# For reference, the previous content from read_files was: +# [start of src/test_runner/java_test_runner.py] +# import subprocess +# from pathlib import Path +# from typing import Dict, Any, Optional +# +# class JavaTestRunner: +# +# def __init__(self, project_root: Path, build_tool: str = "maven"): +# self.project_root = project_root +# self.build_tool = build_tool.lower() +# +# if self.build_tool not in ["maven", "gradle"]: +# raise ValueError("Unsupported build tool. Please use 'maven' or 'gradle'.") +# +# print(f"Initialized JavaTestRunner with build tool: {self.build_tool} for project: {self.project_root}") +# +# def _run_maven_test(self, test_file_path: Path) -> Dict[str, Any]: +# relative_test_path = test_file_path.relative_to(self.project_root) +# test_class_name = str(relative_test_path).replace(".java", "").replace(os.sep, ".") +# +# command = [ +# "mvn", +# "test", +# f"-Dtest={test_class_name}", +# "-DfailIfNoTests=false" +# ] +# +# print(f"Executing Maven command: {' '.join(command)} in {self.project_root}") +# +# try: +# process = subprocess.run( +# command, +# cwd=self.project_root, # Run Maven from the project root +# capture_output=True, +# text=True, +# check=False # Do not raise an exception for non-zero exit codes (we want to check stderr/stdout) +# ) +# stdout = process.stdout +# stderr = process.stderr +# return_code = process.returncode +# +# # Simple parsing for success/failure (can be improved) +# if return_code == 0 and "BUILD SUCCESS" in stdout: +# # Look for test results summary +# if "Tests run: " in stdout: +# test_summary_match = re.search(r"Tests run: (\d+), Failures: (\d+), Errors: (\d+), Skipped: (\d+)", stdout) +# if test_summary_match: +# total = int(test_summary_match.group(1)) +# failures = int(test_summary_match.group(2)) +# errors = int(test_summary_match.group(3)) +# skipped = int(test_summary_match.group(4)) +# +# if failures == 0 and errors == 0: +# return {"status": "SUCCESS", "message": "Tests passed successfully.", "stdout": stdout, "stderr": stderr} +# else: +# return {"status": "FAILED", "message": "Tests failed or had errors.", "stdout": stdout, "stderr": stderr} +# else: +# return {"status": "UNKNOWN", "message": "Maven build successful but no test summary found.", "stdout": stdout, "stderr": stderr} +# else: +# return {"status": "ERROR", "message": f"Maven build failed with exit code {return_code}.", "stdout": stdout, "stderr": stderr} +# +# except FileNotFoundError: +# return {"status": "ERROR", "message": "Maven (mvn) command not found. Please ensure Maven is installed and in your PATH.", "stdout": "", "stderr": "Maven not found."} +# except Exception as e: +# return {"status": "ERROR", "message": f"An unexpected error occurred during Maven execution: {e}", "stdout": "", "stderr": str(e)} +# +# +# +# def run_test(self, test_file_path: Path) -> Dict[str, Any]: +# +# if not test_file_path.exists(): +# return {"status": "ERROR", "message": f"Test file not found: {test_file_path}", "stdout": "", "stderr": ""} +# +# print(f"\nRunning test: {test_file_path}") +# +# if self.build_tool == "maven": +# return self._run_maven_test(test_file_path) +# elif self.build_tool == "gradle": +# return self._run_gradle_test(test_file_path) # This was missing in the provided old content +# else: +# return {"status": "ERROR", "message": "Invalid build tool configured.", "stdout": "", "stderr": ""} +# +# if __name__ == "__main__": +# +# project_root = Path("/Users/tanmay/Desktop/AMRIT/BeneficiaryID-Generation-API") +# example_test_file = project_root / "src" / "test" / "java" / "com" / "iemr" / "common" / "bengen" / "service" / "GenerateBeneficiaryServiceTest.java" # Replace with a real test file +# +# +# +# runner = JavaTestRunner(project_root=project_root, build_tool="maven") # or "gradle" +# results = runner.run_test(example_test_file) +# +# print("\n--- Test Results ---") +# print(f"Status: {results['status']}") +# print(f"Message: {results['message']}") +# if results['stdout']: +# print("\n--- STDOUT ---") +# print(results['stdout']) +# if results['stderr']: +# print("\n--- STDERR ---") +# print(results['stderr']) +# +# [end of src/test_runner/java_test_runner.py]