File tree Expand file tree Collapse file tree 5 files changed +24
-6
lines changed
Expand file tree Collapse file tree 5 files changed +24
-6
lines changed Original file line number Diff line number Diff line change @@ -10,6 +10,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
1010## [ 1.8.2] - 2025-02-10
1111### Fixed
1212- Added missing flushing limit values, and setup automated flush if limit is reached
13+ ### Changed
14+ - Change LLM tracking latency to seconds and remove default latency from event properties
1315
1416
1517## [ 1.8.1] - 2025-02-07
Original file line number Diff line number Diff line change @@ -40,7 +40,7 @@ client.track_llm(
4040 assistant_id = " gpt-4" ,
4141 generation = " The capital of France is Paris." ,
4242 properties = {" model" : " gpt-4" },
43- latency = 150 # milliseconds
43+ latency = 1.5 # seconds
4444)
4545```
4646
Original file line number Diff line number Diff line change 1+ import os
2+
3+ from trubrics import Trubrics
4+
5+ trubrics = Trubrics (api_key = os .environ ["TRUBRICS_API_KEY" ])
6+ trubrics .track (event = "Sign up" , user_id = "user_id" )
7+
8+ trubrics .track_llm (
9+ user_id = "user_id" ,
10+ prompt = "What is Trubrics?" ,
11+ assistant_id = "gpt4o" ,
12+ generation = "Trubrics is a product analytics platform for AI applications." ,
13+ latency = 2 ,
14+ )
15+
16+ trubrics .close ()
Original file line number Diff line number Diff line change @@ -116,7 +116,7 @@ def track_llm(
116116 generation : str ,
117117 properties : dict | None = None ,
118118 timestamp : datetime | None = None ,
119- latency : int = 1 ,
119+ latency : float | None = None ,
120120 ):
121121 """
122122 Track an LLM prompt and generation.
@@ -127,10 +127,10 @@ def track_llm(
127127 generation (str): The generated response from the LLM.
128128 properties (dict | None): Additional properties to track.
129129 timestamp (datetime | None): The timestamp of the generation event. If None, the current time in UTC is used.
130- latency (int ): The latency in milliseconds between the prompt and the generation. Defaults to 1.
130+ latency (float | None ): The latency in seconds between the prompt and the generation. Defaults to 1.
131131 """
132132 generation_timestamp = timestamp or datetime .now (timezone .utc )
133- prompt_timestamp = generation_timestamp - timedelta (milliseconds = latency )
133+ prompt_timestamp = generation_timestamp - timedelta (seconds = latency or 1 )
134134
135135 self .track (
136136 event = "Prompt" ,
@@ -146,7 +146,7 @@ def track_llm(
146146 "$text" : generation ,
147147 "$assistant_id" : assistant_id ,
148148 "$prompt" : prompt ,
149- "latency(ms )" : latency ,
149+ "latency(s )" : latency ,
150150 ** (properties or {}),
151151 },
152152 timestamp = generation_timestamp ,
You can’t perform that action at this time.
0 commit comments