From 80b362e31eface6cd9823cbb7d9b4637e7bfbe5b Mon Sep 17 00:00:00 2001 From: Mehul Patel <11514627+nomadicmehul@users.noreply.github.com> Date: Sat, 14 Feb 2026 11:53:33 +0100 Subject: [PATCH 1/2] fix: improve Dockerfiles for all services - Add curl to Go and Python Dockerfiles for healthcheck support - Standardize healthcheck port configuration to use PORT env var - Remove duplicate HEALTHCHECK instruction in Go Dockerfile - Align EXPOSE directives with docker-compose configuration (port 8080) This ensures healthchecks work correctly in containerized environments and maintains consistency across all three language implementations. --- go-genai/Dockerfile | 7 +++++-- py-genai/Dockerfile | 12 ++++++++---- rust-genai/Dockerfile | 4 ++-- 3 files changed, 15 insertions(+), 8 deletions(-) diff --git a/go-genai/Dockerfile b/go-genai/Dockerfile index 707180e4..29e16005 100644 --- a/go-genai/Dockerfile +++ b/go-genai/Dockerfile @@ -23,6 +23,9 @@ FROM alpine:3.18 WORKDIR /app +# Install curl for healthcheck +RUN apk add --no-cache curl + # Create non-root user RUN adduser -D -g '' nomadicmehul @@ -39,12 +42,12 @@ RUN chown -R nomadicmehul:nomadicmehul /app # Switch to non-root user USER nomadicmehul -# Expose port 8080 +# Expose port EXPOSE 8080 # Health check HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \ - CMD wget -qO- http://localhost:8080/health || exit 1 + CMD curl -f http://localhost:8080/health || exit 1 # Run the application CMD ["./main"] diff --git a/py-genai/Dockerfile b/py-genai/Dockerfile index 9b8e2196..cc621007 100644 --- a/py-genai/Dockerfile +++ b/py-genai/Dockerfile @@ -10,6 +10,9 @@ FROM python:3.11-slim WORKDIR /app +# Install curl for healthcheck +RUN apt-get update && apt-get install -y curl && rm -rf /var/lib/apt/lists/* + # Create non-root user RUN adduser --disabled-password --gecos "" nomadicmehul @@ -28,16 +31,17 @@ COPY static/ static/ # Switch to non-root user USER nomadicmehul -# Expose port 8081 (matching docker-compose.yml) -EXPOSE 8081 +# Expose port 8080 +EXPOSE 8080 # Set environment variables ENV PYTHONDONTWRITEBYTECODE=1 \ - PYTHONUNBUFFERED=1 + PYTHONUNBUFFERED=1 \ + PORT=8080 # Health check HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \ - CMD curl -f http://localhost:8081/health || exit 1 + CMD curl -f http://localhost:8080/health || exit 1 # Run the application CMD ["python", "app.py"] diff --git a/rust-genai/Dockerfile b/rust-genai/Dockerfile index b4ede888..35036a66 100644 --- a/rust-genai/Dockerfile +++ b/rust-genai/Dockerfile @@ -20,8 +20,8 @@ RUN useradd -m nomadicmehul COPY --from=builder /usr/src/rust-genai/rust-genai/target/release/rust-genai . COPY static/ ./static/ COPY templates/ ./templates/ -EXPOSE 8083 +EXPOSE 8080 USER nomadicmehul HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \ - CMD curl -f http://localhost:8083/health || exit 1 + CMD curl -f http://localhost:${PORT:-8080}/health || exit 1 CMD ["./rust-genai"] From 3e01e240eaba9aab1647fcc828e50229716557a5 Mon Sep 17 00:00:00 2001 From: Mehul Patel <11514627+nomadicmehul@users.noreply.github.com> Date: Sat, 14 Feb 2026 11:53:44 +0100 Subject: [PATCH 2/2] docs: document Docker Desktop AI integration - Add documentation for LLAMA_URL and LLAMA_MODEL environment variables - Clarify Docker Desktop AI model integration as recommended approach - Document legacy LLM_BASE_URL and LLM_MODEL_NAME as fallback options - Explain port configuration differences between local and Docker usage - Update all service READMEs (Go, Python, Rust) for consistency This helps users understand how to use the apps with Docker Desktop's integrated AI models feature while maintaining backward compatibility. --- go-genai/README.md | 11 +++++++++-- py-genai/README.md | 15 +++++++++++---- rust-genai/INSTRUCTIONS.md | 11 +++++++---- rust-genai/README.md | 16 ++++++++++++---- 4 files changed, 39 insertions(+), 14 deletions(-) diff --git a/go-genai/README.md b/go-genai/README.md index 3cbeb65e..f9d2500b 100644 --- a/go-genai/README.md +++ b/go-genai/README.md @@ -17,11 +17,18 @@ A Go-powered GenAI app you can run locally using your favorite LLM — just foll ## Environment Variables +### Docker Desktop AI Integration (Recommended) +When using Docker Desktop with AI models: +- `LLAMA_URL`: Automatically injected by Docker Desktop (AI model endpoint) +- `LLAMA_MODEL`: Automatically injected by Docker Desktop (model name) - `PORT`: The port to run the server on (default: 8080) -- `LLM_BASE_URL`: The base URL of the LLM API (required) -- `LLM_MODEL_NAME`: The model name to use for API requests (required) - `LOG_LEVEL`: The logging level (default: INFO) +### Legacy Configuration +For custom LLM endpoints: +- `LLM_BASE_URL`: The base URL of the LLM API (fallback if LLAMA_URL not set) +- `LLM_MODEL_NAME`: The model name to use (fallback if LLAMA_MODEL not set) + ## API Endpoints - `GET /`: Main chat interface diff --git a/py-genai/README.md b/py-genai/README.md index c99b14b7..40634e02 100644 --- a/py-genai/README.md +++ b/py-genai/README.md @@ -6,11 +6,18 @@ A Python-powered GenAI app you can run locally using your favorite LLM — just The application uses the following environment variables: -- `LLM_BASE_URL`: The base URL of the LLM API -- `LLM_MODEL_NAME`: The model name to use -- `PORT`: The port to run the application on (default: 8081) -- `DEBUG`: Set to "true" to enable debug mode (default: "false") +### Docker Desktop AI Integration (Recommended) +When using Docker Desktop with AI models: +- `LLAMA_URL`: Automatically injected by Docker Desktop (AI model endpoint) +- `LLAMA_MODEL`: Automatically injected by Docker Desktop (model name) +- `PORT`: The port to run the application on (default: 8081 for local, 8080 in Docker) - `LOG_LEVEL`: Set the logging level (default: "INFO") +- `DEBUG`: Set to "true" to enable debug mode (default: "false") + +### Legacy Configuration +For custom LLM endpoints: +- `LLM_BASE_URL`: The base URL of the LLM API (fallback if LLAMA_URL not set) +- `LLM_MODEL_NAME`: The model name to use (fallback if LLAMA_MODEL not set) ## API Endpoints diff --git a/rust-genai/INSTRUCTIONS.md b/rust-genai/INSTRUCTIONS.md index 4095c192..1972f46e 100644 --- a/rust-genai/INSTRUCTIONS.md +++ b/rust-genai/INSTRUCTIONS.md @@ -21,8 +21,9 @@ cargo run ```bash docker build -t rust-genai . docker run -p 8083:8083 \ - -e LLM_BASE_URL=http://your-llm-api \ - -e LLM_MODEL_NAME=your-model \ + -e PORT=8083 \ + -e LLAMA_URL=http://your-llm-api \ + -e LLAMA_MODEL=your-model \ rust-genai ``` @@ -38,8 +39,10 @@ docker run -p 8083:8083 \ ## 4. Configuration - Edit `.env` or set environment variables: - `PORT` (default: 8083) - - `LLM_BASE_URL` (required) - - `LLM_MODEL_NAME` (required) + - `LLAMA_URL` (recommended, injected by Docker Desktop AI) + - `LLAMA_MODEL` (recommended, injected by Docker Desktop AI) + - `LLM_BASE_URL` (legacy fallback) + - `LLM_MODEL_NAME` (legacy fallback) - `LOG_LEVEL` (default: info) ## 5. Notes diff --git a/rust-genai/README.md b/rust-genai/README.md index 0fefe42f..843a2277 100644 --- a/rust-genai/README.md +++ b/rust-genai/README.md @@ -3,10 +3,18 @@ This is a Rust implementation of the Hello-GenAI application. ## Environment Variables -- `PORT`: The port to run the server on (default: 8083) -- `LLM_BASE_URL`: The base URL of the LLM API (required) -- `LLM_MODEL_NAME`: The model name to use for API requests (required) -- `LOG_LEVEL`: The logging level (default: INFO) + +### Docker Desktop AI Integration (Recommended) +When using Docker Desktop with AI models: +- `LLAMA_URL`: Automatically injected by Docker Desktop (AI model endpoint) +- `LLAMA_MODEL`: Automatically injected by Docker Desktop (model name) +- `PORT`: The port to run the server on (default: 8083 for local, 8080 in Docker) +- `LOG_LEVEL`: The logging level (default: info) + +### Legacy Configuration +For custom LLM endpoints: +- `LLM_BASE_URL`: The base URL of the LLM API (fallback if LLAMA_URL not set) +- `LLM_MODEL_NAME`: The model name to use (fallback if LLAMA_MODEL not set) ## API Endpoints - `GET /`: Main chat interface