11services :
22 # The main entry point for the user
33 launcher :
4- build : .
5- image : ghcr.io/amperecomputingai/ai-playground:0.2
4+ build :
5+ context : .
6+ dockerfile : Dockerfile
7+ image : ghcr.io/amperecomputingai/ai-playground:0.3-rc1
68 container_name : demo_launcher
79 ports :
810 - " 7860:7860" # Expose the Gradio UI on port 7860
11+ networks :
12+ - public
913 volumes :
10- # Mount the Docker socket to allow this container to control others
1114 - /var/run/docker.sock:/var/run/docker.sock
1215
1316 yolo_demo_service :
@@ -26,10 +29,10 @@ services:
2629 VIDEO_SRC : ${VIDEO_SRC:-''}
2730 WEBCAM0_SRC : ${WEBCAM0_SRC:-0}
2831 WEBCAM1_SRC : ${WEBCAM1_SRC:-2}
29- # network_mode: host
3032 ports :
31- # - "${HOST_PORT}:${GRADIO_SERVER_PORT:-7861}"
3233 - " 7862:7862"
34+ networks :
35+ - public
3336
3437 whisper_demo_service :
3538 image : ghcr.io/amperecomputingai/ampere-ai-ref-apps:whisper-0.3.10rc1
@@ -46,18 +49,21 @@ services:
4649 NTHREADS : ${NTHREADS:-64}
4750 AIO_NUM_THREADS : ${AIO_NUM_THREADS:-64}
4851 ports :
49- # - "${HOST_PORT}:${GRADIO_SERVER_PORT:-5001}"
5052 - " 7863:7863"
53+ networks :
54+ - public
5155
5256 ollama_demo_service :
5357 image : ghcr.io/amperecomputingai/ollama-ampere:1.0.0-ol9
5458 container_name : ollama_demo_service
5559 volumes :
5660 - ollama:/root/.ollama
61+ ports :
62+ - " 11434:11434"
5763 networks :
5864 - public
5965 environment :
60- - " OLLAMA_HOST=0.0.0.0 :11434"
66+ - " OLLAMA_HOST=http://ollama_demo_service :11434"
6167 tty : true
6268 restart : unless-stopped
6369
@@ -83,10 +89,83 @@ services:
8389 - ' WEBUI_AUTH=False'
8490 restart : unless-stopped
8591
86- volumes :
87- ollama : {}
88- open-webui : {}
92+ ollama_for_agent_service :
93+ image : ghcr.io/amperecomputingai/ollama-ampere:1.0.0-ol9
94+ container_name : ollama_for_agent_service
95+ restart : unless-stopped
96+ ports :
97+ - " 11434:11434"
98+ networks :
99+ - public
100+ volumes :
101+ - n8n_ollama_data:/root/.ollama
102+ environment :
103+ - " OLLAMA_HOST=http://ollama_for_agent_service:11434"
104+ tty : true
105+ entrypoint : " bash -c \" ollama serve & sleep 5 && ollama pull llama3.2:1b && wait\" "
106+
107+ searxng :
108+ container_name : searxng
109+ image : docker.io/searxng/searxng:2025.9.23-a57b29b00
110+ user : " 977:977"
111+ restart : unless-stopped
112+ ports :
113+ - " 8081:8080"
114+ volumes :
115+ - ./searxng:/etc/searxng:rw
116+ - n8n_searxng:/var/cache/searxng:rw
117+ - /var/run/docker.sock:/var/run/docker.sock
118+ networks :
119+ - public
120+ environment :
121+ - SEARXNG_BASE_URL=http://searxng:8080
122+ - SEARXNG_DEBUG=1
123+ - SEARXNG_HOSTNAME=searxng
124+ logging :
125+ driver : " json-file"
126+ options :
127+ max-size : " 1m"
128+ max-file : " 1"
129+
130+ agentic_ai_demo_service :
131+ # image: ghcr.io/amperecomputingai/ampere-ai-agents:0.1.3
132+ image : ghcr.io/amperecomputingai/ampere-ai-agents:0.1.1
133+ build :
134+ context : .
135+ dockerfile : Dockerfile
136+ container_name : agentic_ai_demo_service
137+ depends_on :
138+ - ollama_for_agent_service
139+ - searxng
140+ restart : always
141+ ports :
142+ - " 7864:5678"
143+ networks :
144+ - public
145+ volumes :
146+ - n8n_data:/home/node/.n8n
147+ - /var/run/docker.sock:/var/run/docker.sock
148+ environment :
149+ - NODE_ENV=development
150+ - N8N_BASIC_AUTH_ACTIVE=false # Disables authentication
151+ - N8N_HOST=localhost
152+ - N8N_PORT=5678
153+ - N8N_PROTOCOL=http
154+ - N8N_USER_MANAGEMENT_DISABLED=true
155+ - N8N_READ_ONLY=true
156+ - N8N_INITIAL_SETUP_COMPLETED=true
157+ - N8N_LOG_LEVEL=debug
158+ - N8N_COMMUNITY_PACKAGES_ENABLED=true
159+ - N8N_UNVERIFIED_COMMUNITY_PACKAGES_ENABLED=true
160+ - N8N_COMMUNITY_PACKAGES_ALLOW_TOOL_USAGE=true
89161
90162networks :
91163 public :
92164 driver : bridge
165+
166+ volumes :
167+ ollama : {}
168+ open-webui : {}
169+ n8n_data : {}
170+ n8n_ollama_data : {}
171+ n8n_searxng : {}
0 commit comments