Skip to content

Commit 9bcad6b

Browse files
authored
Merge pull request #96 from trubrics/remove_default_latency
remove default latency
2 parents d58461d + 07dabc8 commit 9bcad6b

File tree

5 files changed

+24
-6
lines changed

5 files changed

+24
-6
lines changed

CHANGELOG.md

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
1010
## [1.8.2] - 2025-02-10
1111
### Fixed
1212
- Added missing flushing limit values, and setup automated flush if limit is reached
13+
### Changed
14+
- Change LLM tracking latency to seconds and remove default latency from event properties
1315

1416

1517
## [1.8.1] - 2025-02-07

README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@ client.track_llm(
4040
assistant_id="gpt-4",
4141
generation="The capital of France is Paris.",
4242
properties={"model": "gpt-4"},
43-
latency=150 # milliseconds
43+
latency=1.5 # seconds
4444
)
4545
```
4646

examples/sdk_test.py

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,16 @@
1+
import os
2+
3+
from trubrics import Trubrics
4+
5+
trubrics = Trubrics(api_key=os.environ["TRUBRICS_API_KEY"])
6+
trubrics.track(event="Sign up", user_id="user_id")
7+
8+
trubrics.track_llm(
9+
user_id="user_id",
10+
prompt="What is Trubrics?",
11+
assistant_id="gpt4o",
12+
generation="Trubrics is a product analytics platform for AI applications.",
13+
latency=2,
14+
)
15+
16+
trubrics.close()

trubrics/main.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -116,7 +116,7 @@ def track_llm(
116116
generation: str,
117117
properties: dict | None = None,
118118
timestamp: datetime | None = None,
119-
latency: int = 1,
119+
latency: float | None = None,
120120
):
121121
"""
122122
Track an LLM prompt and generation.
@@ -127,10 +127,10 @@ def track_llm(
127127
generation (str): The generated response from the LLM.
128128
properties (dict | None): Additional properties to track.
129129
timestamp (datetime | None): The timestamp of the generation event. If None, the current time in UTC is used.
130-
latency (int): The latency in milliseconds between the prompt and the generation. Defaults to 1.
130+
latency (float | None): The latency in seconds between the prompt and the generation. Defaults to 1.
131131
"""
132132
generation_timestamp = timestamp or datetime.now(timezone.utc)
133-
prompt_timestamp = generation_timestamp - timedelta(milliseconds=latency)
133+
prompt_timestamp = generation_timestamp - timedelta(seconds=latency or 1)
134134

135135
self.track(
136136
event="Prompt",
@@ -146,7 +146,7 @@ def track_llm(
146146
"$text": generation,
147147
"$assistant_id": assistant_id,
148148
"$prompt": prompt,
149-
"latency(ms)": latency,
149+
"latency(s)": latency,
150150
**(properties or {}),
151151
},
152152
timestamp=generation_timestamp,

uv.lock

Lines changed: 1 addition & 1 deletion
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

0 commit comments

Comments
 (0)