-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathbenchmark.py
More file actions
168 lines (131 loc) · 5.47 KB
/
benchmark.py
File metadata and controls
168 lines (131 loc) · 5.47 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
"""
Performance Benchmark Script.
Measures embedding and RAG query performance for the AI Knowledge Assistant.
"""
import asyncio
import time
from statistics import mean, median, stdev
import httpx
class PerformanceBenchmark:
"""Benchmark tool for measuring system performance."""
def __init__(self, base_url: str = "http://localhost:8000/api/v1"):
"""Initialize benchmark with API base URL."""
self.base_url = base_url
self.token = None
async def login(self, username: str, password: str) -> None:
"""Login and get authentication token."""
async with httpx.AsyncClient() as client:
response = await client.post(
f"{self.base_url}/auth/login",
data={"username": username, "password": password},
)
response.raise_for_status()
data = response.json()
self.token = data["access_token"]
print(f"✓ Logged in as {username}")
async def benchmark_query(
self, question: str, iterations: int = 10
) -> dict[str, float]:
"""Benchmark RAG query performance."""
if not self.token:
raise ValueError("Must login first")
times = []
confidences = []
source_counts = []
print(f"\n🔄 Running {iterations} query iterations...")
async with httpx.AsyncClient(timeout=60.0) as client:
for i in range(iterations):
start = time.perf_counter()
response = await client.post(
f"{self.base_url}/query/ask",
headers={"Authorization": f"Bearer {self.token}"},
json={"question": question},
)
response.raise_for_status()
elapsed = time.perf_counter() - start
times.append(elapsed)
data = response.json()
confidences.append(data.get("confidence", 0))
source_counts.append(len(data.get("sources", [])))
print(f" Iteration {i+1}/{iterations}: {elapsed:.3f}s")
return {
"mean_time": mean(times),
"median_time": median(times),
"min_time": min(times),
"max_time": max(times),
"stdev_time": stdev(times) if len(times) > 1 else 0,
"mean_confidence": mean(confidences),
"mean_sources": mean(source_counts),
}
async def benchmark_document_upload(
self, file_path: str, iterations: int = 5
) -> dict[str, float]:
"""Benchmark document upload and embedding performance."""
if not self.token:
raise ValueError("Must login first")
times = []
print(f"\n🔄 Running {iterations} upload iterations...")
async with httpx.AsyncClient(timeout=120.0) as client:
for i in range(iterations):
with open(file_path, "rb") as f:
start = time.perf_counter()
response = await client.post(
f"{self.base_url}/query/upload",
headers={"Authorization": f"Bearer {self.token}"},
files={"file": f},
data={"access_level": "public"},
)
response.raise_for_status()
elapsed = time.perf_counter() - start
times.append(elapsed)
# Delete the uploaded document for next iteration
data = response.json()
doc_id = data.get("document_id")
if doc_id:
await client.delete(
f"{self.base_url}/query/documents/{doc_id}",
headers={"Authorization": f"Bearer {self.token}"},
)
print(f" Iteration {i+1}/{iterations}: {elapsed:.3f}s")
return {
"mean_time": mean(times),
"median_time": median(times),
"min_time": min(times),
"max_time": max(times),
"stdev_time": stdev(times) if len(times) > 1 else 0,
}
def print_results(self, title: str, results: dict[str, float]) -> None:
"""Print benchmark results in a formatted table."""
print(f"\n{'='*60}")
print(f" {title}")
print(f"{'='*60}")
for key, value in results.items():
label = key.replace("_", " ").title()
if "time" in key:
print(f" {label:.<40} {value:.3f}s")
elif "confidence" in key:
print(f" {label:.<40} {value:.2%}")
else:
print(f" {label:.<40} {value:.2f}")
print(f"{'='*60}\n")
async def main():
"""Run performance benchmarks."""
print("🚀 AI Knowledge Assistant Performance Benchmark\n")
benchmark = PerformanceBenchmark()
# Login
await benchmark.login("sabry", "sabry")
# Benchmark RAG queries
query_results = await benchmark.benchmark_query(
question="What is this document about?",
iterations=10,
)
benchmark.print_results("RAG Query Performance", query_results)
# Optionally benchmark document upload
# upload_results = await benchmark.benchmark_document_upload(
# file_path="path/to/test.pdf",
# iterations=5,
# )
# benchmark.print_results("Document Upload Performance", upload_results)
print("✅ Benchmark complete!")
if __name__ == "__main__":
asyncio.run(main())