forked from JSKitty/Vector-LLM
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy path.env.example
More file actions
32 lines (23 loc) · 960 Bytes
/
.env.example
File metadata and controls
32 lines (23 loc) · 960 Bytes
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
# Vector LLM Bot - Environment Configuration
# === LLM Configuration ===
# Base URL for OpenAI-compatible API (supports OpenAI, llama.cpp, Groq, etc)
LLM_BASE_URL=https://api.openai.com/v1
# API Key (optional for local LLMs like llama.cpp)
LLM_API_KEY=sk-...
# Model name
LLM_MODEL=gpt-4o-mini
# Sampling temperature (0.0 = deterministic, 1.0 = creative)
LLM_TEMPERATURE=0.2
# === Bot Memory ===
# Maximum number of user/assistant message pairs to retain per chat
HISTORY_LIMIT=16
# Directory for storing per-chat memory files
DATA_DIR=data
# === System Prompt ===
# Default system prompt for all chats (can be overridden per chat if needed)
SYSTEM_PROMPT=You are a helpful assistant. Keep responses concise and factual.
# === Vector SDK ===
# Optional Vector secret key (nsec bech32 or hex). If unset, ephemeral keys are used.
# VECTOR_SECRET_KEY=nsec...
# Whether to send typing indicators (kind 30078) when processing messages
TYPING_INDICATOR=true