diff --git a/astro.config.mjs b/astro.config.mjs
index e6219b0..d5b491c 100644
--- a/astro.config.mjs
+++ b/astro.config.mjs
@@ -127,6 +127,13 @@ export default defineConfig({
}
]
},
+ {
+ label: 'Demos',
+ translations: {
+ 'zh-Hant': '演示範例'
+ },
+ autogenerate: { directory: 'demos' }
+ },
...openAPISidebarGroups
],
customCss: ['./src/styles/global.css'],
diff --git a/src/content/docs/demos/01-coder.mdx b/src/content/docs/demos/01-coder.mdx
new file mode 100644
index 0000000..704fff0
--- /dev/null
+++ b/src/content/docs/demos/01-coder.mdx
@@ -0,0 +1,70 @@
+---
+title: Coder Demo
+description: Deploy Coder for cloud-based VS Code IDE environments with support for both Go and Python backends.
+---
+
+import { Steps, Aside, LinkCard } from '@astrojs/starlight/components';
+
+This guide demonstrates how to deploy Coder to provide cloud-based VS Code IDE environments for development and coding.
+
+## Deploy Coder
+
+
+
+
+
+1. Navigate to the Applications Store:
+
+ Open your browser and go to:
+ ```
+ /scope//applications/store
+ ```
+
+2. Search and install Coder:
+
+ - Search for `code-server` (available versions: `code-server-go` or `code-server-python`)
+ - Choose your preferred version (Go for better performance, Python for flexibility)
+ - Click on the Coder card
+ - Click the **Install** button and follow the installation wizard
+ - Review and confirm the Helm Chart configuration
+
+3. Verify the deployment:
+
+ Go to the Workloads page:
+ ```
+ /scope//applications/workloads
+ ```
+
+ - Use the **Namespace** filter to find the deployment
+ - Search for `coder` or `code-server` to locate the deployed Coder workload
+ - Verify that the workload status shows as running
+
+4. Access Coder VS Code IDE:
+
+ - In the Workloads list, locate the Coder workload
+ - Click on the **NodePort** link to open VS Code in your browser
+
+
+
+
+
+## Initial Setup
+
+Once Coder is deployed and running:
+
+1. Access the VS Code IDE through the NodePort link in your browser
+2. You'll see the VS Code interface with a file explorer and terminal
+3. Start creating or editing files directly in the browser-based editor
+4. Use the integrated terminal for running commands and development tasks
+
+## Next Steps
+
+Explore other demo applications to enhance your infrastructure:
+
+
+
+
\ No newline at end of file
diff --git a/src/content/docs/demos/02-jupyterhub.mdx b/src/content/docs/demos/02-jupyterhub.mdx
new file mode 100644
index 0000000..c2cc9b0
--- /dev/null
+++ b/src/content/docs/demos/02-jupyterhub.mdx
@@ -0,0 +1,73 @@
+---
+title: JupyterHub Demo
+description: Deploy JupyterHub for multi-user Jupyter notebook environments.
+---
+
+import { Steps, Aside, LinkCard } from '@astrojs/starlight/components';
+
+This guide demonstrates how to deploy JupyterHub to provide collaborative Jupyter notebook environments for multiple users.
+
+## Deploy JupyterHub
+
+
+
+
+
+1. Navigate to the Applications Store:
+
+ Open your browser and go to:
+ ```
+ /scope//applications/store
+ ```
+
+2. Search and install JupyterHub:
+
+ - Search for `jupyterhub`
+ - Click on the JupyterHub card
+ - Click the **Install** button and follow the installation wizard
+ - Review and confirm the Helm Chart configuration
+
+3. Verify the deployment:
+
+ Go to the Workloads page:
+ ```
+ /scope//applications/workloads
+ ```
+
+ - Use the **Namespace** filter to find the deployment
+ - Search for `jupyterhub` to locate the deployed JupyterHub workload
+ - Verify that the workload status shows as running
+
+4. Access JupyterHub:
+
+ - In the same Workloads list, locate the workload named **proxy**
+ - Click on the **NodePort** link to open JupyterHub in your browser
+
+
+
+
+
+## Initial Setup
+
+Once JupyterHub is deployed and running:
+
+1. Access the JupyterHub login page
+2. Create or log in with your credentials
+3. Start a new notebook server
+4. Begin using Jupyter notebooks for data analysis and development
+
+## Next Steps
+
+Explore other demo applications to enhance your infrastructure:
+
+
+
+
\ No newline at end of file
diff --git a/src/content/docs/demos/03-postgres.mdx b/src/content/docs/demos/03-postgres.mdx
new file mode 100644
index 0000000..039d597
--- /dev/null
+++ b/src/content/docs/demos/03-postgres.mdx
@@ -0,0 +1,274 @@
+---
+title: PostgreSQL Demo
+description: Deploy PostgreSQL database using Helm charts from the registry.
+---
+
+import { Steps, Aside, Tabs, TabItem } from '@astrojs/starlight/components';
+
+This guide demonstrates how to deploy PostgreSQL to your applications using a Helm chart from Artifact Hub.
+
+## Prerequisites
+
+Ensure you have the following tools installed:
+
+- **Helm**: [Installation Guide](https://helm.sh/docs/intro/install)
+- **Access to Registry URL**: Obtain this from the Commands button on the Repositories page or Applications Services page
+
+## Deploy PostgreSQL
+
+
+
+
+
+1. Download the PostgreSQL Helm chart from Artifact Hub:
+
+ ```bash
+ helm pull oci://registry-1.docker.io/bitnamicharts/postgresql --version 18.2.0
+ ```
+
+ This command downloads `postgresql-18.2.0.tgz` to your local directory.
+
+2. Upload the chart to your private registry:
+
+ ```bash
+ helm push postgresql-18.2.0.tgz oci:///postgres --plain-http
+ ```
+
+ Replace `` with your actual registry URL (e.g., `192.168.196.42:5736`).
+
+ **Example:**
+ ```bash
+ helm push postgresql-18.2.0.tgz oci://192.168.196.42:5736/postgres --plain-http
+ ```
+
+3. After uploading, navigate to the `Applications Store` to deploy the Helm chart.
+
+ **To allow external connections**, configure the service as NodePort in the deployment settings:
+ ```yaml
+ service:
+ type: NodePort
+ port: 5432
+ nodePort: 30432 # Port range: 30000-32767
+ ```
+ After deployment, you can connect externally using `:`.
+
+4. Retrieve the PostgreSQL password from `Applications Secrets`:
+ - Navigate to the **Applications Secrets** page
+ - Adjust the **namespace filter** in the top-right corner to select your namespace
+ - Click on the postgres-related secret entry
+ - Copy the password and decode it using base64:
+ ```bash
+ echo "" | base64 --decode
+ ```
+ Or use this online tool: [base64decode.org](https://www.base64decode.org/)
+ - Use the decoded password for your PostgreSQL connections
+
+
+## Test with Python
+
+You can verify your PostgreSQL deployment by using Python to perform read and write operations.
+
+
+
+### Connection Information
+
+Before running the test scripts, you'll need:
+- **Host**: PostgreSQL service endpoint
+- **Port**: Database port (default: 5432)
+- **Database**: Database name (default: postgres)
+- **User**: Username (default: postgres)
+- **Password**: Database password
+
+
+
+
+```python
+import psycopg2
+
+try:
+ connection = psycopg2.connect(
+ host="",
+ port=5432,
+ database="postgres",
+ user="postgres",
+ password=""
+ )
+ cursor = connection.cursor()
+ cursor.execute("SELECT version();")
+ version = cursor.fetchone()
+ print("PostgreSQL version:", version)
+ cursor.close()
+ connection.close()
+ print("✓ Connection successful!")
+except Exception as e:
+ print("✗ Connection failed:", str(e))
+```
+
+
+
+
+
+```python
+import psycopg2
+
+connection = psycopg2.connect(
+ host="",
+ port=5432,
+ database="postgres",
+ user="postgres",
+ password=""
+)
+cursor = connection.cursor()
+
+# Create a test table
+cursor.execute("""
+ CREATE TABLE IF NOT EXISTS demo_users (
+ id SERIAL PRIMARY KEY,
+ name VARCHAR(100),
+ email VARCHAR(100),
+ created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
+ )
+""")
+
+# Insert sample data
+cursor.execute("""
+ INSERT INTO demo_users (name, email)
+ VALUES (%s, %s)
+""", ("John Doe", "john@example.com"))
+
+cursor.execute("""
+ INSERT INTO demo_users (name, email)
+ VALUES (%s, %s)
+""", ("Jane Smith", "jane@example.com"))
+
+connection.commit()
+print("✓ Data inserted successfully!")
+
+cursor.close()
+connection.close()
+```
+
+
+
+
+
+```python
+import psycopg2
+
+connection = psycopg2.connect(
+ host="",
+ port=5432,
+ database="postgres",
+ user="postgres",
+ password=""
+)
+cursor = connection.cursor()
+
+# Read all data from the table
+cursor.execute("SELECT id, name, email, created_at FROM demo_users")
+rows = cursor.fetchall()
+
+print("Records in demo_users table:")
+for row in rows:
+ print(f"ID: {row[0]}, Name: {row[1]}, Email: {row[2]}, Created: {row[3]}")
+
+cursor.close()
+connection.close()
+```
+
+
+
+
+
+```python
+import psycopg2
+from datetime import datetime
+
+class PostgreSQLDemo:
+ def __init__(self, host, port, database, user, password):
+ self.connection = psycopg2.connect(
+ host=host,
+ port=port,
+ database=database,
+ user=user,
+ password=password
+ )
+ self.cursor = self.connection.cursor()
+
+ def test_connection(self):
+ try:
+ self.cursor.execute("SELECT version();")
+ version = self.cursor.fetchone()
+ print("✓ PostgreSQL version:", version[0])
+ return True
+ except Exception as e:
+ print("✗ Connection test failed:", str(e))
+ return False
+
+ def write_data(self, name, email):
+ try:
+ self.cursor.execute("""
+ CREATE TABLE IF NOT EXISTS demo_users (
+ id SERIAL PRIMARY KEY,
+ name VARCHAR(100),
+ email VARCHAR(100),
+ created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
+ )
+ """)
+ self.cursor.execute("""
+ INSERT INTO demo_users (name, email) VALUES (%s, %s)
+ """, (name, email))
+ self.connection.commit()
+ print(f"✓ Data written: {name} ({email})")
+ except Exception as e:
+ print("✗ Write failed:", str(e))
+ self.connection.rollback()
+
+ def read_data(self):
+ try:
+ self.cursor.execute("SELECT id, name, email, created_at FROM demo_users ORDER BY id")
+ rows = self.cursor.fetchall()
+ print(f"✓ Retrieved {len(rows)} records:")
+ for row in rows:
+ print(f" ID: {row[0]}, Name: {row[1]}, Email: {row[2]}")
+ return rows
+ except Exception as e:
+ print("✗ Read failed:", str(e))
+ return None
+
+ def close(self):
+ self.cursor.close()
+ self.connection.close()
+
+# Usage
+if __name__ == "__main__":
+ db = PostgreSQLDemo(
+ host="",
+ port=5432,
+ database="postgres",
+ user="postgres",
+ password=""
+ )
+
+ db.test_connection()
+ db.write_data("Alice Johnson", "alice@example.com")
+ db.write_data("Bob Wilson", "bob@example.com")
+ db.read_data()
+ db.close()
+```
+
+
+
+
+
\ No newline at end of file
diff --git a/src/content/docs/demos/04-llm-model.mdx b/src/content/docs/demos/04-llm-model.mdx
new file mode 100644
index 0000000..91a4109
--- /dev/null
+++ b/src/content/docs/demos/04-llm-model.mdx
@@ -0,0 +1,303 @@
+---
+title: LLM Model Demo
+description: Deploy and test LLM models integrated with OpenAI API.
+---
+
+import { Steps, Aside, Tabs, TabItem } from '@astrojs/starlight/components';
+
+This guide demonstrates how to deploy a Large Language Model (LLM) in your OtterScale cluster and test it using Python with OpenAI API integration.
+
+## Prerequisites
+
+Ensure you have the following:
+
+- **Python 3.8+**: For running the test scripts
+- **OpenAI API Key**: Obtain from [OpenAI Platform](https://platform.openai.com/api-keys)
+- **Python Libraries**: `requests` and `openai`
+
+```bash
+pip install requests openai
+```
+
+## Deploy LLM Model
+
+
+
+
+
+1. Navigate to the **Models** page in your OtterScale cluster.
+
+2. Click the **Create** button to create a new model.
+
+3. Select a model from your model artifacts:
+ - Search for available models using the cloud icon in the search box
+ - Or click the archive icon to browse model artifacts
+ - Select your desired LLM (e.g., `meta-llama/Llama-2-7b-chat`)
+
+4. Configure the model deployment:
+ - **Name**: Choose a descriptive name (e.g., `llm-demo`)
+ - **Namespace**: Select your target namespace
+ - **Prefill Configuration**: Set vGPU memory %, replica count, and tensor configuration (if needed)
+ - **Decode Configuration**: Set decoding parameters similarly
+ - **Description**: Add any relevant notes about the deployment
+
+5. Review the configuration and click **Create** to deploy the model.
+
+6. Monitor the deployment status on the Models page. The status will change from `Pending` → `Running` → `Ready`.
+
+7. Once the status shows **Ready**, click the **Test** button to verify the model API is working.
+
+
+
+
+
+## Test with Python
+
+Once your LLM model is deployed and ready, you can test it using Python with OpenAI API integration.
+
+
+
+### Connection Information
+
+Before running the test scripts, you'll need:
+- **OpenAI API Key**: Your API key from OpenAI
+- **Model Name**: The model you created (e.g., `llm-demo`)
+- **API Base URL**: Optional, if using a custom endpoint
+
+
+
+
+
+
+```python
+import requests
+import json
+
+# Configuration
+SERVICE_URL = "" # e.g., http://localhost:8000
+MODEL_NAME = "" # e.g., llm-demo
+MODEL_ID = "RedHatAI/Llama-3.2-1B-Instruct-FP8"
+
+def ask_question(question):
+ """Send a simple question to the LLM and get a response."""
+ headers = {
+ "OtterScale-Model-Name": MODEL_NAME,
+ "Content-Type": "application/json"
+ }
+
+ payload = {
+ "model": MODEL_ID,
+ "prompt": question
+ }
+
+ try:
+ response = requests.post(
+ f"{SERVICE_URL}/v1/chat",
+ headers=headers,
+ json=payload
+ )
+ response.raise_for_status()
+ result = response.json()
+ return result.get("response", result)
+ except Exception as e:
+ return f"✗ Error: {str(e)}"
+
+# Test
+question = "Are you alive? Please respond if you can process this message."
+answer = ask_question(question)
+print(f"Q: {question}")
+print(f"A: {answer}")
+```
+
+
+
+
+
+```python
+import requests
+import json
+
+# Configuration
+SERVICE_URL = "" # e.g., http://localhost:8000
+MODEL_NAME = "" # e.g., llm-demo
+MODEL_ID = "RedHatAI/Llama-3.2-1B-Instruct-FP8"
+
+class LLMChat:
+ def __init__(self, service_url, model_name, model_id):
+ self.service_url = service_url
+ self.model_name = model_name
+ self.model_id = model_id
+ self.conversation = []
+
+ def add_message(self, role, content):
+ """Add a message to the conversation history."""
+ self.conversation.append({"role": role, "content": content})
+
+ def send_message(self, user_message):
+ """Send a user message and get a response."""
+ self.add_message("user", user_message)
+
+ headers = {
+ "OtterScale-Model-Name": self.model_name,
+ "Content-Type": "application/json"
+ }
+
+ # Build context from conversation history
+ context = "\n".join([f"{msg['role']}: {msg['content']}" for msg in self.conversation])
+
+ payload = {
+ "model": self.model_id,
+ "prompt": context
+ }
+
+ try:
+ response = requests.post(
+ f"{self.service_url}/v1/chat",
+ headers=headers,
+ json=payload
+ )
+ response.raise_for_status()
+ result = response.json()
+ assistant_response = result.get("response", str(result))
+ self.add_message("assistant", assistant_response)
+ return assistant_response
+ except Exception as e:
+ return f"✗ Error: {str(e)}"
+
+ def clear_history(self):
+ """Clear conversation history."""
+ self.conversation = []
+
+# Test
+chat = LLMChat(service_url=SERVICE_URL, model_name=MODEL_NAME, model_id=MODEL_ID)
+
+# First message
+response1 = chat.send_message("What are the three main colors of the French flag?")
+print(f"Q: What are the three main colors of the French flag?")
+print(f"A: {response1}\n")
+
+# Follow-up message (maintains context)
+response2 = chat.send_message("Which one represents liberty?")
+print(f"Q: Which one represents liberty?")
+print(f"A: {response2}\n")
+```
+
+
+
+
+
+```python
+import requests
+import json
+
+# Configuration
+SERVICE_URL = "" # e.g., http://localhost:8000
+MODEL_NAME = "" # e.g., llm-demo
+MODEL_ID = "RedHatAI/Llama-3.2-1B-Instruct-FP8"
+
+class LLMDemo:
+ def __init__(self, service_url, model_name, model_id):
+ self.service_url = service_url
+ self.model_name = model_name
+ self.model_id = model_id
+ self.conversation = []
+
+ def test_connection(self):
+ """Test if the service is accessible."""
+ try:
+ headers = {
+ "OtterScale-Model-Name": self.model_name,
+ "Content-Type": "application/json"
+ }
+ payload = {
+ "model": self.model_id,
+ "prompt": "Hello"
+ }
+ response = requests.post(
+ f"{self.service_url}/v1/chat",
+ headers=headers,
+ json=payload,
+ timeout=10
+ )
+ response.raise_for_status()
+ print("✓ Connection successful!")
+ return True
+ except Exception as e:
+ print(f"✗ Connection failed: {str(e)}")
+ return False
+
+ def send_message(self, user_message):
+ """Send a message and get a response."""
+ self.conversation.append({"role": "user", "content": user_message})
+
+ headers = {
+ "OtterScale-Model-Name": self.model_name,
+ "Content-Type": "application/json"
+ }
+
+ # Build context from conversation history
+ context = "\n".join([f"{msg['role']}: {msg['content']}" for msg in self.conversation])
+
+ payload = {
+ "model": self.model_id,
+ "prompt": context
+ }
+
+ try:
+ response = requests.post(
+ f"{self.service_url}/v1/chat",
+ headers=headers,
+ json=payload
+ )
+ response.raise_for_status()
+ result = response.json()
+ assistant_response = result.get("response", str(result))
+ self.conversation.append({"role": "assistant", "content": assistant_response})
+ return assistant_response
+ except Exception as e:
+ return f"✗ Error: {str(e)}"
+
+ def clear_history(self):
+ """Clear conversation history."""
+ self.conversation = []
+
+# Usage
+if __name__ == "__main__":
+ demo = LLMDemo(service_url=SERVICE_URL, model_name=MODEL_NAME, model_id=MODEL_ID)
+
+ # Test connection
+ demo.test_connection()
+
+ # Start conversation
+ response1 = demo.send_message("Tell me about artificial intelligence in 2 sentences.")
+ print(f"Q: Tell me about artificial intelligence in 2 sentences.")
+ print(f"A: {response1}\n")
+
+ # Follow-up questions
+ response2 = demo.send_message("What are the main applications?")
+ print(f"Q: What are the main applications?")
+ print(f"A: {response2}\n")
+
+ response3 = demo.send_message("How does machine learning fit into this?")
+ print(f"Q: How does machine learning fit into this?")
+ print(f"A: {response3}")
+```
+
+
+
+
+
\ No newline at end of file