diff --git a/packages/ai-semantic-conventions/src/SemanticAttributes.ts b/packages/ai-semantic-conventions/src/SemanticAttributes.ts index b73b961f..ebeaf522 100644 --- a/packages/ai-semantic-conventions/src/SemanticAttributes.ts +++ b/packages/ai-semantic-conventions/src/SemanticAttributes.ts @@ -15,22 +15,68 @@ */ export const SpanAttributes = { + // OpenTelemetry GenAI Semantic Conventions (Current) + // Required attributes + GEN_AI_OPERATION_NAME: "gen_ai.operation.name", + GEN_AI_PROVIDER_NAME: "gen_ai.provider.name", + + // Request attributes + GEN_AI_REQUEST_MODEL: "gen_ai.request.model", + GEN_AI_REQUEST_TEMPERATURE: "gen_ai.request.temperature", + GEN_AI_REQUEST_TOP_P: "gen_ai.request.top_p", + GEN_AI_REQUEST_TOP_K: "gen_ai.request.top_k", + GEN_AI_REQUEST_MAX_TOKENS: "gen_ai.request.max_tokens", + GEN_AI_REQUEST_FREQUENCY_PENALTY: "gen_ai.request.frequency_penalty", + GEN_AI_REQUEST_PRESENCE_PENALTY: "gen_ai.request.presence_penalty", + GEN_AI_REQUEST_STOP_SEQUENCES: "gen_ai.request.stop_sequences", + + // Response attributes + GEN_AI_RESPONSE_ID: "gen_ai.response.id", + GEN_AI_RESPONSE_MODEL: "gen_ai.response.model", + GEN_AI_RESPONSE_FINISH_REASONS: "gen_ai.response.finish_reasons", + + // Token usage (Current OTel naming) + GEN_AI_USAGE_INPUT_TOKENS: "gen_ai.usage.input_tokens", + GEN_AI_USAGE_OUTPUT_TOKENS: "gen_ai.usage.output_tokens", + + // Messages + GEN_AI_INPUT_MESSAGES: "gen_ai.input.messages", + GEN_AI_OUTPUT_MESSAGES: "gen_ai.output.messages", + GEN_AI_SYSTEM_INSTRUCTIONS: "gen_ai.system_instructions", + + // Tool definitions + GEN_AI_TOOL_DEFINITIONS: "gen_ai.tool.definitions", + + // Agent attributes + GEN_AI_AGENT_NAME: "gen_ai.agent.name", + + // Deprecated attributes (kept for backward compatibility) + /** @deprecated Use GEN_AI_PROVIDER_NAME instead */ LLM_SYSTEM: "gen_ai.system", + /** @deprecated Use GEN_AI_REQUEST_MODEL instead */ LLM_REQUEST_MODEL: "gen_ai.request.model", + /** @deprecated Use GEN_AI_REQUEST_MAX_TOKENS instead */ LLM_REQUEST_MAX_TOKENS: "gen_ai.request.max_tokens", + /** @deprecated Use GEN_AI_REQUEST_TEMPERATURE instead */ LLM_REQUEST_TEMPERATURE: "gen_ai.request.temperature", + /** @deprecated Use GEN_AI_REQUEST_TOP_P instead */ LLM_REQUEST_TOP_P: "gen_ai.request.top_p", + /** @deprecated Use GEN_AI_INPUT_MESSAGES and events instead */ LLM_PROMPTS: "gen_ai.prompt", + /** @deprecated Use GEN_AI_OUTPUT_MESSAGES and events instead */ LLM_COMPLETIONS: "gen_ai.completion", + /** @deprecated Use GEN_AI_INPUT_MESSAGES instead */ LLM_INPUT_MESSAGES: "gen_ai.input.messages", + /** @deprecated Use GEN_AI_OUTPUT_MESSAGES instead */ LLM_OUTPUT_MESSAGES: "gen_ai.output.messages", + /** @deprecated Use GEN_AI_RESPONSE_MODEL instead */ LLM_RESPONSE_MODEL: "gen_ai.response.model", + /** @deprecated Use GEN_AI_USAGE_INPUT_TOKENS instead */ LLM_USAGE_PROMPT_TOKENS: "gen_ai.usage.prompt_tokens", + /** @deprecated Use GEN_AI_USAGE_OUTPUT_TOKENS instead */ LLM_USAGE_COMPLETION_TOKENS: "gen_ai.usage.completion_tokens", - GEN_AI_AGENT_NAME: "gen_ai.agent.name", - - // LLM + // LLM (Non-standard attributes) LLM_REQUEST_TYPE: "llm.request.type", LLM_USAGE_TOTAL_TOKENS: "llm.usage.total_tokens", LLM_TOP_K: "llm.top_k", diff --git a/packages/traceloop-sdk/recordings/AI-SDK-OTel-GenAI-Semantic-Conventions_247892713/Backward-compatibility_2327270860/should-maintain-all-deprecated-attributes-alongside-new-ones_410416891/recording.har b/packages/traceloop-sdk/recordings/AI-SDK-OTel-GenAI-Semantic-Conventions_247892713/Backward-compatibility_2327270860/should-maintain-all-deprecated-attributes-alongside-new-ones_410416891/recording.har new file mode 100644 index 00000000..4e1e05d4 --- /dev/null +++ b/packages/traceloop-sdk/recordings/AI-SDK-OTel-GenAI-Semantic-Conventions_247892713/Backward-compatibility_2327270860/should-maintain-all-deprecated-attributes-alongside-new-ones_410416891/recording.har @@ -0,0 +1,172 @@ +{ + "log": { + "_recordingName": "AI SDK OTel GenAI Semantic Conventions/Backward compatibility/should maintain all deprecated attributes alongside new ones", + "creator": { + "comment": "persister:fs", + "name": "Polly.JS", + "version": "6.0.6" + }, + "entries": [ + { + "_id": "df289a7f0d80329eff9ec2fc92f65890", + "_order": 0, + "cache": {}, + "request": { + "bodySize": 146, + "cookies": [], + "headers": [ + { + "name": "content-type", + "value": "application/json" + } + ], + "headersSize": 165, + "httpVersion": "HTTP/1.1", + "method": "POST", + "postData": { + "mimeType": "application/json", + "params": [], + "text": "{\"model\":\"gpt-3.5-turbo\",\"input\":[{\"role\":\"system\",\"content\":\"You are helpful\"},{\"role\":\"user\",\"content\":[{\"type\":\"input_text\",\"text\":\"Hello\"}]}]}" + }, + "queryString": [], + "url": "https://api.openai.com/v1/responses" + }, + "response": { + "bodySize": 1473, + "content": { + "mimeType": "application/json", + "size": 1473, + "text": "{\n \"id\": \"resp_0faf8f16567bd63e00691dfb1e71a08195aded450e62456fe5\",\n \"object\": \"response\",\n \"created_at\": 1763572510,\n \"status\": \"completed\",\n \"background\": false,\n \"billing\": {\n \"payer\": \"developer\"\n },\n \"error\": null,\n \"incomplete_details\": null,\n \"instructions\": null,\n \"max_output_tokens\": null,\n \"max_tool_calls\": null,\n \"model\": \"gpt-3.5-turbo-0125\",\n \"output\": [\n {\n \"id\": \"msg_0faf8f16567bd63e00691dfb1eabc88195845a609342cf410b\",\n \"type\": \"message\",\n \"status\": \"completed\",\n \"content\": [\n {\n \"type\": \"output_text\",\n \"annotations\": [],\n \"logprobs\": [],\n \"text\": \"Hello! How can I assist you today?\"\n }\n ],\n \"role\": \"assistant\"\n }\n ],\n \"parallel_tool_calls\": true,\n \"previous_response_id\": null,\n \"prompt_cache_key\": null,\n \"prompt_cache_retention\": null,\n \"reasoning\": {\n \"effort\": null,\n \"summary\": null\n },\n \"safety_identifier\": null,\n \"service_tier\": \"default\",\n \"store\": true,\n \"temperature\": 1.0,\n \"text\": {\n \"format\": {\n \"type\": \"text\"\n },\n \"verbosity\": \"medium\"\n },\n \"tool_choice\": \"auto\",\n \"tools\": [],\n \"top_logprobs\": 0,\n \"top_p\": 1.0,\n \"truncation\": \"disabled\",\n \"usage\": {\n \"input_tokens\": 15,\n \"input_tokens_details\": {\n \"cached_tokens\": 0\n },\n \"output_tokens\": 10,\n \"output_tokens_details\": {\n \"reasoning_tokens\": 0\n },\n \"total_tokens\": 25\n },\n \"user\": null,\n \"metadata\": {}\n}" + }, + "cookies": [ + { + "domain": ".api.openai.com", + "httpOnly": true, + "name": "_cfuvid", + "path": "/", + "sameSite": "None", + "secure": true, + "value": "yjFQ1AZqezCnRWiCWX31Xdy0VPiTyaz1iSbwhB24PsU-1763572510963-0.0.1.1-604800000" + } + ], + "headers": [ + { + "name": "alt-svc", + "value": "h3=\":443\"; ma=86400" + }, + { + "name": "cf-cache-status", + "value": "DYNAMIC" + }, + { + "name": "cf-ray", + "value": "9a11591dab317d98-TLV" + }, + { + "name": "connection", + "value": "keep-alive" + }, + { + "name": "content-encoding", + "value": "br" + }, + { + "name": "content-type", + "value": "application/json" + }, + { + "name": "date", + "value": "Wed, 19 Nov 2025 17:15:10 GMT" + }, + { + "name": "openai-organization", + "value": "traceloop" + }, + { + "name": "openai-processing-ms", + "value": "441" + }, + { + "name": "openai-project", + "value": "proj_tzz1TbPPOXaf6j9tEkVUBIAa" + }, + { + "name": "openai-version", + "value": "2020-10-01" + }, + { + "name": "server", + "value": "cloudflare" + }, + { + "name": "set-cookie", + "value": "_cfuvid=yjFQ1AZqezCnRWiCWX31Xdy0VPiTyaz1iSbwhB24PsU-1763572510963-0.0.1.1-604800000; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None" + }, + { + "name": "strict-transport-security", + "value": "max-age=31536000; includeSubDomains; preload" + }, + { + "name": "transfer-encoding", + "value": "chunked" + }, + { + "name": "x-content-type-options", + "value": "nosniff" + }, + { + "name": "x-envoy-upstream-service-time", + "value": "445" + }, + { + "name": "x-ratelimit-limit-requests", + "value": "10000" + }, + { + "name": "x-ratelimit-limit-tokens", + "value": "50000000" + }, + { + "name": "x-ratelimit-remaining-requests", + "value": "9999" + }, + { + "name": "x-ratelimit-remaining-tokens", + "value": "49999980" + }, + { + "name": "x-ratelimit-reset-requests", + "value": "6ms" + }, + { + "name": "x-ratelimit-reset-tokens", + "value": "0s" + }, + { + "name": "x-request-id", + "value": "req_2c9e9d910e1a4a148bab90e480e0b2d3" + } + ], + "headersSize": 953, + "httpVersion": "HTTP/1.1", + "redirectURL": "", + "status": 200, + "statusText": "OK" + }, + "startedDateTime": "2025-11-19T17:15:10.283Z", + "time": 634, + "timings": { + "blocked": -1, + "connect": -1, + "dns": -1, + "receive": 0, + "send": 0, + "ssl": -1, + "wait": 634 + } + } + ], + "pages": [], + "version": "1.2" + } +} diff --git a/packages/traceloop-sdk/recordings/AI-SDK-OTel-GenAI-Semantic-Conventions_247892713/Span-naming_1988960947/should-follow-OTel-pattern-operation-model_2170500561/recording.har b/packages/traceloop-sdk/recordings/AI-SDK-OTel-GenAI-Semantic-Conventions_247892713/Span-naming_1988960947/should-follow-OTel-pattern-operation-model_2170500561/recording.har new file mode 100644 index 00000000..7b8fef04 --- /dev/null +++ b/packages/traceloop-sdk/recordings/AI-SDK-OTel-GenAI-Semantic-Conventions_247892713/Span-naming_1988960947/should-follow-OTel-pattern-operation-model_2170500561/recording.har @@ -0,0 +1,172 @@ +{ + "log": { + "_recordingName": "AI SDK OTel GenAI Semantic Conventions/Span naming/should follow OTel pattern: {operation} {model}", + "creator": { + "comment": "persister:fs", + "name": "Polly.JS", + "version": "6.0.6" + }, + "entries": [ + { + "_id": "f072dee5cb26fe537e680e2b7e4b48ea", + "_order": 0, + "cache": {}, + "request": { + "bodySize": 101, + "cookies": [], + "headers": [ + { + "name": "content-type", + "value": "application/json" + } + ], + "headersSize": 165, + "httpVersion": "HTTP/1.1", + "method": "POST", + "postData": { + "mimeType": "application/json", + "params": [], + "text": "{\"model\":\"gpt-3.5-turbo\",\"input\":[{\"role\":\"user\",\"content\":[{\"type\":\"input_text\",\"text\":\"Say hi\"}]}]}" + }, + "queryString": [], + "url": "https://api.openai.com/v1/responses" + }, + "response": { + "bodySize": 1462, + "content": { + "mimeType": "application/json", + "size": 1462, + "text": "{\n \"id\": \"resp_0d98810da5876b6700691dfb1f184081969958dfdc21c33a5b\",\n \"object\": \"response\",\n \"created_at\": 1763572511,\n \"status\": \"completed\",\n \"background\": false,\n \"billing\": {\n \"payer\": \"developer\"\n },\n \"error\": null,\n \"incomplete_details\": null,\n \"instructions\": null,\n \"max_output_tokens\": null,\n \"max_tool_calls\": null,\n \"model\": \"gpt-3.5-turbo-0125\",\n \"output\": [\n {\n \"id\": \"msg_0d98810da5876b6700691dfb1f4f048196973677426246aecb\",\n \"type\": \"message\",\n \"status\": \"completed\",\n \"content\": [\n {\n \"type\": \"output_text\",\n \"annotations\": [],\n \"logprobs\": [],\n \"text\": \"Hello! How are you today?\"\n }\n ],\n \"role\": \"assistant\"\n }\n ],\n \"parallel_tool_calls\": true,\n \"previous_response_id\": null,\n \"prompt_cache_key\": null,\n \"prompt_cache_retention\": null,\n \"reasoning\": {\n \"effort\": null,\n \"summary\": null\n },\n \"safety_identifier\": null,\n \"service_tier\": \"default\",\n \"store\": true,\n \"temperature\": 1.0,\n \"text\": {\n \"format\": {\n \"type\": \"text\"\n },\n \"verbosity\": \"medium\"\n },\n \"tool_choice\": \"auto\",\n \"tools\": [],\n \"top_logprobs\": 0,\n \"top_p\": 1.0,\n \"truncation\": \"disabled\",\n \"usage\": {\n \"input_tokens\": 9,\n \"input_tokens_details\": {\n \"cached_tokens\": 0\n },\n \"output_tokens\": 8,\n \"output_tokens_details\": {\n \"reasoning_tokens\": 0\n },\n \"total_tokens\": 17\n },\n \"user\": null,\n \"metadata\": {}\n}" + }, + "cookies": [ + { + "domain": ".api.openai.com", + "httpOnly": true, + "name": "_cfuvid", + "path": "/", + "sameSite": "None", + "secure": true, + "value": "nzJ_mAvG_5nHQ2d7DErCiKiIKfXfCrPElzv9Vu3KG5s-1763572511578-0.0.1.1-604800000" + } + ], + "headers": [ + { + "name": "alt-svc", + "value": "h3=\":443\"; ma=86400" + }, + { + "name": "cf-cache-status", + "value": "DYNAMIC" + }, + { + "name": "cf-ray", + "value": "9a115921af047d98-TLV" + }, + { + "name": "connection", + "value": "keep-alive" + }, + { + "name": "content-encoding", + "value": "br" + }, + { + "name": "content-type", + "value": "application/json" + }, + { + "name": "date", + "value": "Wed, 19 Nov 2025 17:15:11 GMT" + }, + { + "name": "openai-organization", + "value": "traceloop" + }, + { + "name": "openai-processing-ms", + "value": "410" + }, + { + "name": "openai-project", + "value": "proj_tzz1TbPPOXaf6j9tEkVUBIAa" + }, + { + "name": "openai-version", + "value": "2020-10-01" + }, + { + "name": "server", + "value": "cloudflare" + }, + { + "name": "set-cookie", + "value": "_cfuvid=nzJ_mAvG_5nHQ2d7DErCiKiIKfXfCrPElzv9Vu3KG5s-1763572511578-0.0.1.1-604800000; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None" + }, + { + "name": "strict-transport-security", + "value": "max-age=31536000; includeSubDomains; preload" + }, + { + "name": "transfer-encoding", + "value": "chunked" + }, + { + "name": "x-content-type-options", + "value": "nosniff" + }, + { + "name": "x-envoy-upstream-service-time", + "value": "413" + }, + { + "name": "x-ratelimit-limit-requests", + "value": "10000" + }, + { + "name": "x-ratelimit-limit-tokens", + "value": "50000000" + }, + { + "name": "x-ratelimit-remaining-requests", + "value": "9999" + }, + { + "name": "x-ratelimit-remaining-tokens", + "value": "49999985" + }, + { + "name": "x-ratelimit-reset-requests", + "value": "6ms" + }, + { + "name": "x-ratelimit-reset-tokens", + "value": "0s" + }, + { + "name": "x-request-id", + "value": "req_fc1b5ea9e7d4420989eaebe91eb1b78c" + } + ], + "headersSize": 953, + "httpVersion": "HTTP/1.1", + "redirectURL": "", + "status": 200, + "statusText": "OK" + }, + "startedDateTime": "2025-11-19T17:15:10.931Z", + "time": 606, + "timings": { + "blocked": -1, + "connect": -1, + "dns": -1, + "receive": 0, + "send": 0, + "ssl": -1, + "wait": 606 + } + } + ], + "pages": [], + "version": "1.2" + } +} diff --git a/packages/traceloop-sdk/recordings/AI-SDK-OTel-GenAI-Semantic-Conventions_247892713/gen_ai-operation-name-attribute_34593844/should-set-operation-name-to-chat-for-generateText_962058512/recording.har b/packages/traceloop-sdk/recordings/AI-SDK-OTel-GenAI-Semantic-Conventions_247892713/gen_ai-operation-name-attribute_34593844/should-set-operation-name-to-chat-for-generateText_962058512/recording.har new file mode 100644 index 00000000..a882d9a6 --- /dev/null +++ b/packages/traceloop-sdk/recordings/AI-SDK-OTel-GenAI-Semantic-Conventions_247892713/gen_ai-operation-name-attribute_34593844/should-set-operation-name-to-chat-for-generateText_962058512/recording.har @@ -0,0 +1,172 @@ +{ + "log": { + "_recordingName": "AI SDK OTel GenAI Semantic Conventions/gen_ai.operation.name attribute/should set operation.name to 'chat' for generateText", + "creator": { + "comment": "persister:fs", + "name": "Polly.JS", + "version": "6.0.6" + }, + "entries": [ + { + "_id": "8b8f75885c4eb529fa0a00b633ff871a", + "_order": 0, + "cache": {}, + "request": { + "bodySize": 104, + "cookies": [], + "headers": [ + { + "name": "content-type", + "value": "application/json" + } + ], + "headersSize": 165, + "httpVersion": "HTTP/1.1", + "method": "POST", + "postData": { + "mimeType": "application/json", + "params": [], + "text": "{\"model\":\"gpt-3.5-turbo\",\"input\":[{\"role\":\"user\",\"content\":[{\"type\":\"input_text\",\"text\":\"Say hello\"}]}]}" + }, + "queryString": [], + "url": "https://api.openai.com/v1/responses" + }, + "response": { + "bodySize": 1472, + "content": { + "mimeType": "application/json", + "size": 1472, + "text": "{\n \"id\": \"resp_0d44f8cf517884e800691dfb173ff08190a49d7f4b4dc232b4\",\n \"object\": \"response\",\n \"created_at\": 1763572503,\n \"status\": \"completed\",\n \"background\": false,\n \"billing\": {\n \"payer\": \"developer\"\n },\n \"error\": null,\n \"incomplete_details\": null,\n \"instructions\": null,\n \"max_output_tokens\": null,\n \"max_tool_calls\": null,\n \"model\": \"gpt-3.5-turbo-0125\",\n \"output\": [\n {\n \"id\": \"msg_0d44f8cf517884e800691dfb1836f88190a86e82130ed0d0c7\",\n \"type\": \"message\",\n \"status\": \"completed\",\n \"content\": [\n {\n \"type\": \"output_text\",\n \"annotations\": [],\n \"logprobs\": [],\n \"text\": \"Hello! How can I assist you today?\"\n }\n ],\n \"role\": \"assistant\"\n }\n ],\n \"parallel_tool_calls\": true,\n \"previous_response_id\": null,\n \"prompt_cache_key\": null,\n \"prompt_cache_retention\": null,\n \"reasoning\": {\n \"effort\": null,\n \"summary\": null\n },\n \"safety_identifier\": null,\n \"service_tier\": \"default\",\n \"store\": true,\n \"temperature\": 1.0,\n \"text\": {\n \"format\": {\n \"type\": \"text\"\n },\n \"verbosity\": \"medium\"\n },\n \"tool_choice\": \"auto\",\n \"tools\": [],\n \"top_logprobs\": 0,\n \"top_p\": 1.0,\n \"truncation\": \"disabled\",\n \"usage\": {\n \"input_tokens\": 9,\n \"input_tokens_details\": {\n \"cached_tokens\": 0\n },\n \"output_tokens\": 10,\n \"output_tokens_details\": {\n \"reasoning_tokens\": 0\n },\n \"total_tokens\": 19\n },\n \"user\": null,\n \"metadata\": {}\n}" + }, + "cookies": [ + { + "domain": ".api.openai.com", + "httpOnly": true, + "name": "_cfuvid", + "path": "/", + "sameSite": "None", + "secure": true, + "value": "GnsDcPRc2JXA6WYXqOP6Cib7jdag23KcQGZJENLIGFk-1763572504802-0.0.1.1-604800000" + } + ], + "headers": [ + { + "name": "alt-svc", + "value": "h3=\":443\"; ma=86400" + }, + { + "name": "cf-cache-status", + "value": "DYNAMIC" + }, + { + "name": "cf-ray", + "value": "9a1158eb0a267d98-TLV" + }, + { + "name": "connection", + "value": "keep-alive" + }, + { + "name": "content-encoding", + "value": "br" + }, + { + "name": "content-type", + "value": "application/json" + }, + { + "name": "date", + "value": "Wed, 19 Nov 2025 17:15:04 GMT" + }, + { + "name": "openai-organization", + "value": "traceloop" + }, + { + "name": "openai-processing-ms", + "value": "1592" + }, + { + "name": "openai-project", + "value": "proj_tzz1TbPPOXaf6j9tEkVUBIAa" + }, + { + "name": "openai-version", + "value": "2020-10-01" + }, + { + "name": "server", + "value": "cloudflare" + }, + { + "name": "set-cookie", + "value": "_cfuvid=GnsDcPRc2JXA6WYXqOP6Cib7jdag23KcQGZJENLIGFk-1763572504802-0.0.1.1-604800000; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None" + }, + { + "name": "strict-transport-security", + "value": "max-age=31536000; includeSubDomains; preload" + }, + { + "name": "transfer-encoding", + "value": "chunked" + }, + { + "name": "x-content-type-options", + "value": "nosniff" + }, + { + "name": "x-envoy-upstream-service-time", + "value": "2373" + }, + { + "name": "x-ratelimit-limit-requests", + "value": "10000" + }, + { + "name": "x-ratelimit-limit-tokens", + "value": "50000000" + }, + { + "name": "x-ratelimit-remaining-requests", + "value": "9999" + }, + { + "name": "x-ratelimit-remaining-tokens", + "value": "49999985" + }, + { + "name": "x-ratelimit-reset-requests", + "value": "6ms" + }, + { + "name": "x-ratelimit-reset-tokens", + "value": "0s" + }, + { + "name": "x-request-id", + "value": "req_ff558741f6e149a09db4e918b7b280aa" + } + ], + "headersSize": 955, + "httpVersion": "HTTP/1.1", + "redirectURL": "", + "status": 200, + "statusText": "OK" + }, + "startedDateTime": "2025-11-19T17:15:02.150Z", + "time": 2615, + "timings": { + "blocked": -1, + "connect": -1, + "dns": -1, + "receive": 0, + "send": 0, + "ssl": -1, + "wait": 2615 + } + } + ], + "pages": [], + "version": "1.2" + } +} diff --git a/packages/traceloop-sdk/recordings/AI-SDK-OTel-GenAI-Semantic-Conventions_247892713/gen_ai-provider-name-attribute_149611164/should-set-provider-name-to-anthropic-for-Anthropic_274976646/recording.har b/packages/traceloop-sdk/recordings/AI-SDK-OTel-GenAI-Semantic-Conventions_247892713/gen_ai-provider-name-attribute_149611164/should-set-provider-name-to-anthropic-for-Anthropic_274976646/recording.har new file mode 100644 index 00000000..753e1bc3 --- /dev/null +++ b/packages/traceloop-sdk/recordings/AI-SDK-OTel-GenAI-Semantic-Conventions_247892713/gen_ai-provider-name-attribute_149611164/should-set-provider-name-to-anthropic-for-Anthropic_274976646/recording.har @@ -0,0 +1,170 @@ +{ + "log": { + "_recordingName": "AI SDK OTel GenAI Semantic Conventions/gen_ai.provider.name attribute/should set provider.name to 'anthropic' for Anthropic", + "creator": { + "comment": "persister:fs", + "name": "Polly.JS", + "version": "6.0.6" + }, + "entries": [ + { + "_id": "d3a53b09e7d2ed4f040068017cb1347e", + "_order": 0, + "cache": {}, + "request": { + "bodySize": 125, + "cookies": [], + "headers": [ + { + "name": "content-type", + "value": "application/json" + } + ], + "headersSize": 239, + "httpVersion": "HTTP/1.1", + "method": "POST", + "postData": { + "mimeType": "application/json", + "params": [], + "text": "{\"model\":\"claude-3-haiku-20240307\",\"max_tokens\":4096,\"messages\":[{\"role\":\"user\",\"content\":[{\"type\":\"text\",\"text\":\"Hello\"}]}]}" + }, + "queryString": [], + "url": "https://api.anthropic.com/v1/messages" + }, + "response": { + "bodySize": 436, + "content": { + "mimeType": "application/json", + "size": 436, + "text": "{\"model\":\"claude-3-haiku-20240307\",\"id\":\"msg_01RRXVLq5fWEF2uvo2Xt93GA\",\"type\":\"message\",\"role\":\"assistant\",\"content\":[{\"type\":\"text\",\"text\":\"Hello! How can I assist you today?\"}],\"stop_reason\":\"end_turn\",\"stop_sequence\":null,\"usage\":{\"input_tokens\":8,\"cache_creation_input_tokens\":0,\"cache_read_input_tokens\":0,\"cache_creation\":{\"ephemeral_5m_input_tokens\":0,\"ephemeral_1h_input_tokens\":0},\"output_tokens\":12,\"service_tier\":\"standard\"}}" + }, + "cookies": [], + "headers": [ + { + "name": "anthropic-organization-id", + "value": "617d109c-a187-4902-889d-689223d134aa" + }, + { + "name": "anthropic-ratelimit-input-tokens-limit", + "value": "400000" + }, + { + "name": "anthropic-ratelimit-input-tokens-remaining", + "value": "400000" + }, + { + "name": "anthropic-ratelimit-input-tokens-reset", + "value": "2025-11-19T17:15:07Z" + }, + { + "name": "anthropic-ratelimit-output-tokens-limit", + "value": "80000" + }, + { + "name": "anthropic-ratelimit-output-tokens-remaining", + "value": "80000" + }, + { + "name": "anthropic-ratelimit-output-tokens-reset", + "value": "2025-11-19T17:15:07Z" + }, + { + "name": "anthropic-ratelimit-requests-limit", + "value": "4000" + }, + { + "name": "anthropic-ratelimit-requests-remaining", + "value": "3999" + }, + { + "name": "anthropic-ratelimit-requests-reset", + "value": "2025-11-19T17:15:07Z" + }, + { + "name": "anthropic-ratelimit-tokens-limit", + "value": "480000" + }, + { + "name": "anthropic-ratelimit-tokens-remaining", + "value": "480000" + }, + { + "name": "anthropic-ratelimit-tokens-reset", + "value": "2025-11-19T17:15:07Z" + }, + { + "name": "cf-cache-status", + "value": "DYNAMIC" + }, + { + "name": "cf-ray", + "value": "9a115907fab2d31d-TLV" + }, + { + "name": "connection", + "value": "keep-alive" + }, + { + "name": "content-encoding", + "value": "gzip" + }, + { + "name": "content-type", + "value": "application/json" + }, + { + "name": "date", + "value": "Wed, 19 Nov 2025 17:15:08 GMT" + }, + { + "name": "request-id", + "value": "req_011CVHdCwqFtFbhVbj71hyMf" + }, + { + "name": "retry-after", + "value": "58" + }, + { + "name": "server", + "value": "cloudflare" + }, + { + "name": "strict-transport-security", + "value": "max-age=31536000; includeSubDomains; preload" + }, + { + "name": "transfer-encoding", + "value": "chunked" + }, + { + "name": "x-envoy-upstream-service-time", + "value": "988" + }, + { + "name": "x-robots-tag", + "value": "none" + } + ], + "headersSize": 1091, + "httpVersion": "HTTP/1.1", + "redirectURL": "", + "status": 200, + "statusText": "OK" + }, + "startedDateTime": "2025-11-19T17:15:06.802Z", + "time": 1165, + "timings": { + "blocked": -1, + "connect": -1, + "dns": -1, + "receive": 0, + "send": 0, + "ssl": -1, + "wait": 1165 + } + } + ], + "pages": [], + "version": "1.2" + } +} diff --git a/packages/traceloop-sdk/recordings/AI-SDK-OTel-GenAI-Semantic-Conventions_247892713/gen_ai-provider-name-attribute_149611164/should-set-provider-name-to-openai-for-OpenAI_936627494/recording.har b/packages/traceloop-sdk/recordings/AI-SDK-OTel-GenAI-Semantic-Conventions_247892713/gen_ai-provider-name-attribute_149611164/should-set-provider-name-to-openai-for-OpenAI_936627494/recording.har new file mode 100644 index 00000000..fd9bfedc --- /dev/null +++ b/packages/traceloop-sdk/recordings/AI-SDK-OTel-GenAI-Semantic-Conventions_247892713/gen_ai-provider-name-attribute_149611164/should-set-provider-name-to-openai-for-OpenAI_936627494/recording.har @@ -0,0 +1,172 @@ +{ + "log": { + "_recordingName": "AI SDK OTel GenAI Semantic Conventions/gen_ai.provider.name attribute/should set provider.name to 'openai' for OpenAI", + "creator": { + "comment": "persister:fs", + "name": "Polly.JS", + "version": "6.0.6" + }, + "entries": [ + { + "_id": "faf4c8a42b70f0baf6ae6ea7dfe34197", + "_order": 0, + "cache": {}, + "request": { + "bodySize": 100, + "cookies": [], + "headers": [ + { + "name": "content-type", + "value": "application/json" + } + ], + "headersSize": 165, + "httpVersion": "HTTP/1.1", + "method": "POST", + "postData": { + "mimeType": "application/json", + "params": [], + "text": "{\"model\":\"gpt-3.5-turbo\",\"input\":[{\"role\":\"user\",\"content\":[{\"type\":\"input_text\",\"text\":\"Hello\"}]}]}" + }, + "queryString": [], + "url": "https://api.openai.com/v1/responses" + }, + "response": { + "bodySize": 1472, + "content": { + "mimeType": "application/json", + "size": 1472, + "text": "{\n \"id\": \"resp_0a865902eb4801b500691dfb1928d48193b576bd45912572f9\",\n \"object\": \"response\",\n \"created_at\": 1763572505,\n \"status\": \"completed\",\n \"background\": false,\n \"billing\": {\n \"payer\": \"developer\"\n },\n \"error\": null,\n \"incomplete_details\": null,\n \"instructions\": null,\n \"max_output_tokens\": null,\n \"max_tool_calls\": null,\n \"model\": \"gpt-3.5-turbo-0125\",\n \"output\": [\n {\n \"id\": \"msg_0a865902eb4801b500691dfb1a81288193bb1b05a201f0fb0f\",\n \"type\": \"message\",\n \"status\": \"completed\",\n \"content\": [\n {\n \"type\": \"output_text\",\n \"annotations\": [],\n \"logprobs\": [],\n \"text\": \"Hello! How can I assist you today?\"\n }\n ],\n \"role\": \"assistant\"\n }\n ],\n \"parallel_tool_calls\": true,\n \"previous_response_id\": null,\n \"prompt_cache_key\": null,\n \"prompt_cache_retention\": null,\n \"reasoning\": {\n \"effort\": null,\n \"summary\": null\n },\n \"safety_identifier\": null,\n \"service_tier\": \"default\",\n \"store\": true,\n \"temperature\": 1.0,\n \"text\": {\n \"format\": {\n \"type\": \"text\"\n },\n \"verbosity\": \"medium\"\n },\n \"tool_choice\": \"auto\",\n \"tools\": [],\n \"top_logprobs\": 0,\n \"top_p\": 1.0,\n \"truncation\": \"disabled\",\n \"usage\": {\n \"input_tokens\": 8,\n \"input_tokens_details\": {\n \"cached_tokens\": 0\n },\n \"output_tokens\": 10,\n \"output_tokens_details\": {\n \"reasoning_tokens\": 0\n },\n \"total_tokens\": 18\n },\n \"user\": null,\n \"metadata\": {}\n}" + }, + "cookies": [ + { + "domain": ".api.openai.com", + "httpOnly": true, + "name": "_cfuvid", + "path": "/", + "sameSite": "None", + "secure": true, + "value": "V65jkPCPcfb3sUvdDgPFcx2CeLo5tyZ51QUdJBmw.ys-1763572506822-0.0.1.1-604800000" + } + ], + "headers": [ + { + "name": "alt-svc", + "value": "h3=\":443\"; ma=86400" + }, + { + "name": "cf-cache-status", + "value": "DYNAMIC" + }, + { + "name": "cf-ray", + "value": "9a1158fb3a0c7d98-TLV" + }, + { + "name": "connection", + "value": "keep-alive" + }, + { + "name": "content-encoding", + "value": "br" + }, + { + "name": "content-type", + "value": "application/json" + }, + { + "name": "date", + "value": "Wed, 19 Nov 2025 17:15:06 GMT" + }, + { + "name": "openai-organization", + "value": "traceloop" + }, + { + "name": "openai-processing-ms", + "value": "1668" + }, + { + "name": "openai-project", + "value": "proj_tzz1TbPPOXaf6j9tEkVUBIAa" + }, + { + "name": "openai-version", + "value": "2020-10-01" + }, + { + "name": "server", + "value": "cloudflare" + }, + { + "name": "set-cookie", + "value": "_cfuvid=V65jkPCPcfb3sUvdDgPFcx2CeLo5tyZ51QUdJBmw.ys-1763572506822-0.0.1.1-604800000; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None" + }, + { + "name": "strict-transport-security", + "value": "max-age=31536000; includeSubDomains; preload" + }, + { + "name": "transfer-encoding", + "value": "chunked" + }, + { + "name": "x-content-type-options", + "value": "nosniff" + }, + { + "name": "x-envoy-upstream-service-time", + "value": "1813" + }, + { + "name": "x-ratelimit-limit-requests", + "value": "10000" + }, + { + "name": "x-ratelimit-limit-tokens", + "value": "50000000" + }, + { + "name": "x-ratelimit-remaining-requests", + "value": "9999" + }, + { + "name": "x-ratelimit-remaining-tokens", + "value": "49999986" + }, + { + "name": "x-ratelimit-reset-requests", + "value": "6ms" + }, + { + "name": "x-ratelimit-reset-tokens", + "value": "0s" + }, + { + "name": "x-request-id", + "value": "req_e8510ecfab554f41ae13d056b54d43c0" + } + ], + "headersSize": 955, + "httpVersion": "HTTP/1.1", + "redirectURL": "", + "status": 200, + "statusText": "OK" + }, + "startedDateTime": "2025-11-19T17:15:04.779Z", + "time": 2007, + "timings": { + "blocked": -1, + "connect": -1, + "dns": -1, + "receive": 0, + "send": 0, + "ssl": -1, + "wait": 2007 + } + } + ], + "pages": [], + "version": "1.2" + } +} diff --git a/packages/traceloop-sdk/recordings/AI-SDK-OTel-GenAI-Semantic-Conventions_247892713/gen_ai-system_instructions-attribute_2872379897/should-separate-system-instructions-from-input-messages_554757073/recording.har b/packages/traceloop-sdk/recordings/AI-SDK-OTel-GenAI-Semantic-Conventions_247892713/gen_ai-system_instructions-attribute_2872379897/should-separate-system-instructions-from-input-messages_554757073/recording.har new file mode 100644 index 00000000..6ac17e25 --- /dev/null +++ b/packages/traceloop-sdk/recordings/AI-SDK-OTel-GenAI-Semantic-Conventions_247892713/gen_ai-system_instructions-attribute_2872379897/should-separate-system-instructions-from-input-messages_554757073/recording.har @@ -0,0 +1,172 @@ +{ + "log": { + "_recordingName": "AI SDK OTel GenAI Semantic Conventions/gen_ai.system_instructions attribute/should separate system instructions from input messages", + "creator": { + "comment": "persister:fs", + "name": "Polly.JS", + "version": "6.0.6" + }, + "entries": [ + { + "_id": "69da78c8ba89249246c7a2acf10efa65", + "_order": 0, + "cache": {}, + "request": { + "bodySize": 196, + "cookies": [], + "headers": [ + { + "name": "content-type", + "value": "application/json" + } + ], + "headersSize": 165, + "httpVersion": "HTTP/1.1", + "method": "POST", + "postData": { + "mimeType": "application/json", + "params": [], + "text": "{\"model\":\"gpt-3.5-turbo\",\"input\":[{\"role\":\"system\",\"content\":\"You are a helpful assistant specialized in weather.\"},{\"role\":\"user\",\"content\":[{\"type\":\"input_text\",\"text\":\"What's the weather?\"}]}]}" + }, + "queryString": [], + "url": "https://api.openai.com/v1/responses" + }, + "response": { + "bodySize": 1591, + "content": { + "mimeType": "application/json", + "size": 1591, + "text": "{\n \"id\": \"resp_0606cb6e2acd126f00691dfb1c253c8197b0ac8c591d1a910c\",\n \"object\": \"response\",\n \"created_at\": 1763572508,\n \"status\": \"completed\",\n \"background\": false,\n \"billing\": {\n \"payer\": \"developer\"\n },\n \"error\": null,\n \"incomplete_details\": null,\n \"instructions\": null,\n \"max_output_tokens\": null,\n \"max_tool_calls\": null,\n \"model\": \"gpt-3.5-turbo-0125\",\n \"output\": [\n {\n \"id\": \"msg_0606cb6e2acd126f00691dfb1c67d88197b97278a3193666db\",\n \"type\": \"message\",\n \"status\": \"completed\",\n \"content\": [\n {\n \"type\": \"output_text\",\n \"annotations\": [],\n \"logprobs\": [],\n \"text\": \"I'm sorry, but I don't have real-time weather information. I recommend checking a weather website or app for up-to-date weather conditions in your area.\"\n }\n ],\n \"role\": \"assistant\"\n }\n ],\n \"parallel_tool_calls\": true,\n \"previous_response_id\": null,\n \"prompt_cache_key\": null,\n \"prompt_cache_retention\": null,\n \"reasoning\": {\n \"effort\": null,\n \"summary\": null\n },\n \"safety_identifier\": null,\n \"service_tier\": \"default\",\n \"store\": true,\n \"temperature\": 1.0,\n \"text\": {\n \"format\": {\n \"type\": \"text\"\n },\n \"verbosity\": \"medium\"\n },\n \"tool_choice\": \"auto\",\n \"tools\": [],\n \"top_logprobs\": 0,\n \"top_p\": 1.0,\n \"truncation\": \"disabled\",\n \"usage\": {\n \"input_tokens\": 25,\n \"input_tokens_details\": {\n \"cached_tokens\": 0\n },\n \"output_tokens\": 33,\n \"output_tokens_details\": {\n \"reasoning_tokens\": 0\n },\n \"total_tokens\": 58\n },\n \"user\": null,\n \"metadata\": {}\n}" + }, + "cookies": [ + { + "domain": ".api.openai.com", + "httpOnly": true, + "name": "_cfuvid", + "path": "/", + "sameSite": "None", + "secure": true, + "value": "Wo1qSxz1s4QuR9tSiE70HQPIxYkJ6dBA4Ho66lwt9kc-1763572509107-0.0.1.1-604800000" + } + ], + "headers": [ + { + "name": "alt-svc", + "value": "h3=\":443\"; ma=86400" + }, + { + "name": "cf-cache-status", + "value": "DYNAMIC" + }, + { + "name": "cf-ray", + "value": "9a11590f3d9e7d98-TLV" + }, + { + "name": "connection", + "value": "keep-alive" + }, + { + "name": "content-encoding", + "value": "br" + }, + { + "name": "content-type", + "value": "application/json" + }, + { + "name": "date", + "value": "Wed, 19 Nov 2025 17:15:09 GMT" + }, + { + "name": "openai-organization", + "value": "traceloop" + }, + { + "name": "openai-processing-ms", + "value": "887" + }, + { + "name": "openai-project", + "value": "proj_tzz1TbPPOXaf6j9tEkVUBIAa" + }, + { + "name": "openai-version", + "value": "2020-10-01" + }, + { + "name": "server", + "value": "cloudflare" + }, + { + "name": "set-cookie", + "value": "_cfuvid=Wo1qSxz1s4QuR9tSiE70HQPIxYkJ6dBA4Ho66lwt9kc-1763572509107-0.0.1.1-604800000; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None" + }, + { + "name": "strict-transport-security", + "value": "max-age=31536000; includeSubDomains; preload" + }, + { + "name": "transfer-encoding", + "value": "chunked" + }, + { + "name": "x-content-type-options", + "value": "nosniff" + }, + { + "name": "x-envoy-upstream-service-time", + "value": "891" + }, + { + "name": "x-ratelimit-limit-requests", + "value": "10000" + }, + { + "name": "x-ratelimit-limit-tokens", + "value": "50000000" + }, + { + "name": "x-ratelimit-remaining-requests", + "value": "9999" + }, + { + "name": "x-ratelimit-remaining-tokens", + "value": "49999970" + }, + { + "name": "x-ratelimit-reset-requests", + "value": "6ms" + }, + { + "name": "x-ratelimit-reset-tokens", + "value": "0s" + }, + { + "name": "x-request-id", + "value": "req_4137c4af806c4bd4a31fe6fa0f4b7ab1" + } + ], + "headersSize": 953, + "httpVersion": "HTTP/1.1", + "redirectURL": "", + "status": 200, + "statusText": "OK" + }, + "startedDateTime": "2025-11-19T17:15:07.981Z", + "time": 1079, + "timings": { + "blocked": -1, + "connect": -1, + "dns": -1, + "receive": 0, + "send": 0, + "ssl": -1, + "wait": 1079 + } + } + ], + "pages": [], + "version": "1.2" + } +} diff --git a/packages/traceloop-sdk/recordings/AI-SDK-OTel-GenAI-Semantic-Conventions_247892713/gen_ai-tool-definitions-attribute_2710797144/should-create-structured-tool-definitions-for-tools_2917601873/recording.har b/packages/traceloop-sdk/recordings/AI-SDK-OTel-GenAI-Semantic-Conventions_247892713/gen_ai-tool-definitions-attribute_2710797144/should-create-structured-tool-definitions-for-tools_2917601873/recording.har new file mode 100644 index 00000000..cb56dcb3 --- /dev/null +++ b/packages/traceloop-sdk/recordings/AI-SDK-OTel-GenAI-Semantic-Conventions_247892713/gen_ai-tool-definitions-attribute_2710797144/should-create-structured-tool-definitions-for-tools_2917601873/recording.har @@ -0,0 +1,144 @@ +{ + "log": { + "_recordingName": "AI SDK OTel GenAI Semantic Conventions/gen_ai.tool.definitions attribute/should create structured tool.definitions for tools", + "creator": { + "comment": "persister:fs", + "name": "Polly.JS", + "version": "6.0.6" + }, + "entries": [ + { + "_id": "e75d3cd9af9b4e407d6f9a306806c3e5", + "_order": 0, + "cache": {}, + "request": { + "bodySize": 320, + "cookies": [], + "headers": [ + { + "name": "content-type", + "value": "application/json" + } + ], + "headersSize": 165, + "httpVersion": "HTTP/1.1", + "method": "POST", + "postData": { + "mimeType": "application/json", + "params": [], + "text": "{\"model\":\"gpt-3.5-turbo\",\"input\":[{\"role\":\"user\",\"content\":[{\"type\":\"input_text\",\"text\":\"Say you'll check weather\"}]}],\"tools\":[{\"type\":\"function\",\"name\":\"getWeather\",\"description\":\"Get the current weather for a location\",\"parameters\":{\"properties\":{},\"additionalProperties\":false},\"strict\":false}],\"tool_choice\":\"auto\"}" + }, + "queryString": [], + "url": "https://api.openai.com/v1/responses" + }, + "response": { + "bodySize": 269, + "content": { + "mimeType": "application/json", + "size": 269, + "text": "{\n \"error\": {\n \"message\": \"Invalid schema for function 'getWeather': schema must be a JSON Schema of 'type: \\\"object\\\"', got 'type: \\\"None\\\"'.\",\n \"type\": \"invalid_request_error\",\n \"param\": \"tools[0].parameters\",\n \"code\": \"invalid_function_parameters\"\n }\n}" + }, + "cookies": [ + { + "domain": ".api.openai.com", + "httpOnly": true, + "name": "_cfuvid", + "path": "/", + "sameSite": "None", + "secure": true, + "value": "0n71w9IJuh123QqUOKxdzu4_rYT3Ono.OtFXdx4MMfA-1763572442729-0.0.1.1-604800000" + } + ], + "headers": [ + { + "name": "alt-svc", + "value": "h3=\":443\"; ma=86400" + }, + { + "name": "cf-cache-status", + "value": "DYNAMIC" + }, + { + "name": "cf-ray", + "value": "9a115775eac59d70-TLV" + }, + { + "name": "connection", + "value": "keep-alive" + }, + { + "name": "content-length", + "value": "269" + }, + { + "name": "content-type", + "value": "application/json" + }, + { + "name": "date", + "value": "Wed, 19 Nov 2025 17:14:02 GMT" + }, + { + "name": "openai-organization", + "value": "traceloop" + }, + { + "name": "openai-processing-ms", + "value": "15" + }, + { + "name": "openai-project", + "value": "proj_tzz1TbPPOXaf6j9tEkVUBIAa" + }, + { + "name": "openai-version", + "value": "2020-10-01" + }, + { + "name": "server", + "value": "cloudflare" + }, + { + "name": "set-cookie", + "value": "_cfuvid=0n71w9IJuh123QqUOKxdzu4_rYT3Ono.OtFXdx4MMfA-1763572442729-0.0.1.1-604800000; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None" + }, + { + "name": "strict-transport-security", + "value": "max-age=31536000; includeSubDomains; preload" + }, + { + "name": "x-content-type-options", + "value": "nosniff" + }, + { + "name": "x-envoy-upstream-service-time", + "value": "18" + }, + { + "name": "x-request-id", + "value": "req_c0ae39d5aee64f8fa5adfdb80b019370" + } + ], + "headersSize": 710, + "httpVersion": "HTTP/1.1", + "redirectURL": "", + "status": 400, + "statusText": "Bad Request" + }, + "startedDateTime": "2025-11-19T17:14:02.483Z", + "time": 195, + "timings": { + "blocked": -1, + "connect": -1, + "dns": -1, + "receive": 0, + "send": 0, + "ssl": -1, + "wait": 195 + } + } + ], + "pages": [], + "version": "1.2" + } +} diff --git a/packages/traceloop-sdk/recordings/AI-SDK-OTel-GenAI-Semantic-Conventions_247892713/gen_ai-usage-tokens-attributes_1495437318/should-set-both-new-and-deprecated-token-attributes_3016620579/recording.har b/packages/traceloop-sdk/recordings/AI-SDK-OTel-GenAI-Semantic-Conventions_247892713/gen_ai-usage-tokens-attributes_1495437318/should-set-both-new-and-deprecated-token-attributes_3016620579/recording.har new file mode 100644 index 00000000..0f46f7e7 --- /dev/null +++ b/packages/traceloop-sdk/recordings/AI-SDK-OTel-GenAI-Semantic-Conventions_247892713/gen_ai-usage-tokens-attributes_1495437318/should-set-both-new-and-deprecated-token-attributes_3016620579/recording.har @@ -0,0 +1,172 @@ +{ + "log": { + "_recordingName": "AI SDK OTel GenAI Semantic Conventions/gen_ai.usage tokens attributes/should set both new and deprecated token attributes", + "creator": { + "comment": "persister:fs", + "name": "Polly.JS", + "version": "6.0.6" + }, + "entries": [ + { + "_id": "774e9837684f7be8aa69eadaf2181428", + "_order": 0, + "cache": {}, + "request": { + "bodySize": 105, + "cookies": [], + "headers": [ + { + "name": "content-type", + "value": "application/json" + } + ], + "headersSize": 165, + "httpVersion": "HTTP/1.1", + "method": "POST", + "postData": { + "mimeType": "application/json", + "params": [], + "text": "{\"model\":\"gpt-3.5-turbo\",\"input\":[{\"role\":\"user\",\"content\":[{\"type\":\"input_text\",\"text\":\"Count to 5\"}]}]}" + }, + "queryString": [], + "url": "https://api.openai.com/v1/responses" + }, + "response": { + "bodySize": 1452, + "content": { + "mimeType": "application/json", + "size": 1452, + "text": "{\n \"id\": \"resp_0e829279d33d00d500691dfb1d779881979c6ed8757292a9b1\",\n \"object\": \"response\",\n \"created_at\": 1763572509,\n \"status\": \"completed\",\n \"background\": false,\n \"billing\": {\n \"payer\": \"developer\"\n },\n \"error\": null,\n \"incomplete_details\": null,\n \"instructions\": null,\n \"max_output_tokens\": null,\n \"max_tool_calls\": null,\n \"model\": \"gpt-3.5-turbo-0125\",\n \"output\": [\n {\n \"id\": \"msg_0e829279d33d00d500691dfb1dfed881979fbb7b1ce04685ac\",\n \"type\": \"message\",\n \"status\": \"completed\",\n \"content\": [\n {\n \"type\": \"output_text\",\n \"annotations\": [],\n \"logprobs\": [],\n \"text\": \"1, 2, 3, 4, 5\"\n }\n ],\n \"role\": \"assistant\"\n }\n ],\n \"parallel_tool_calls\": true,\n \"previous_response_id\": null,\n \"prompt_cache_key\": null,\n \"prompt_cache_retention\": null,\n \"reasoning\": {\n \"effort\": null,\n \"summary\": null\n },\n \"safety_identifier\": null,\n \"service_tier\": \"default\",\n \"store\": true,\n \"temperature\": 1.0,\n \"text\": {\n \"format\": {\n \"type\": \"text\"\n },\n \"verbosity\": \"medium\"\n },\n \"tool_choice\": \"auto\",\n \"tools\": [],\n \"top_logprobs\": 0,\n \"top_p\": 1.0,\n \"truncation\": \"disabled\",\n \"usage\": {\n \"input_tokens\": 11,\n \"input_tokens_details\": {\n \"cached_tokens\": 0\n },\n \"output_tokens\": 14,\n \"output_tokens_details\": {\n \"reasoning_tokens\": 0\n },\n \"total_tokens\": 25\n },\n \"user\": null,\n \"metadata\": {}\n}" + }, + "cookies": [ + { + "domain": ".api.openai.com", + "httpOnly": true, + "name": "_cfuvid", + "path": "/", + "sameSite": "None", + "secure": true, + "value": "2ATU3yXqmYpT79ZVIVzsy2bfUWddYMe5J8LTdoXdEJI-1763572510323-0.0.1.1-604800000" + } + ], + "headers": [ + { + "name": "alt-svc", + "value": "h3=\":443\"; ma=86400" + }, + { + "name": "cf-cache-status", + "value": "DYNAMIC" + }, + { + "name": "cf-ray", + "value": "9a1159160c447d98-TLV" + }, + { + "name": "connection", + "value": "keep-alive" + }, + { + "name": "content-encoding", + "value": "br" + }, + { + "name": "content-type", + "value": "application/json" + }, + { + "name": "date", + "value": "Wed, 19 Nov 2025 17:15:10 GMT" + }, + { + "name": "openai-organization", + "value": "traceloop" + }, + { + "name": "openai-processing-ms", + "value": "786" + }, + { + "name": "openai-project", + "value": "proj_tzz1TbPPOXaf6j9tEkVUBIAa" + }, + { + "name": "openai-version", + "value": "2020-10-01" + }, + { + "name": "server", + "value": "cloudflare" + }, + { + "name": "set-cookie", + "value": "_cfuvid=2ATU3yXqmYpT79ZVIVzsy2bfUWddYMe5J8LTdoXdEJI-1763572510323-0.0.1.1-604800000; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None" + }, + { + "name": "strict-transport-security", + "value": "max-age=31536000; includeSubDomains; preload" + }, + { + "name": "transfer-encoding", + "value": "chunked" + }, + { + "name": "x-content-type-options", + "value": "nosniff" + }, + { + "name": "x-envoy-upstream-service-time", + "value": "791" + }, + { + "name": "x-ratelimit-limit-requests", + "value": "10000" + }, + { + "name": "x-ratelimit-limit-tokens", + "value": "50000000" + }, + { + "name": "x-ratelimit-remaining-requests", + "value": "9999" + }, + { + "name": "x-ratelimit-remaining-tokens", + "value": "49999984" + }, + { + "name": "x-ratelimit-reset-requests", + "value": "6ms" + }, + { + "name": "x-ratelimit-reset-tokens", + "value": "0s" + }, + { + "name": "x-request-id", + "value": "req_30c6d640e246495c99f510af6991bdce" + } + ], + "headersSize": 953, + "httpVersion": "HTTP/1.1", + "redirectURL": "", + "status": 200, + "statusText": "OK" + }, + "startedDateTime": "2025-11-19T17:15:09.069Z", + "time": 1206, + "timings": { + "blocked": -1, + "connect": -1, + "dns": -1, + "receive": 0, + "send": 0, + "ssl": -1, + "wait": 1206 + } + } + ], + "pages": [], + "version": "1.2" + } +} diff --git a/packages/traceloop-sdk/src/lib/tracing/ai-sdk-transformations.ts b/packages/traceloop-sdk/src/lib/tracing/ai-sdk-transformations.ts index f6f07645..9af63e21 100644 --- a/packages/traceloop-sdk/src/lib/tracing/ai-sdk-transformations.ts +++ b/packages/traceloop-sdk/src/lib/tracing/ai-sdk-transformations.ts @@ -22,16 +22,41 @@ const AI_PROMPT = "ai.prompt"; const AI_USAGE_PROMPT_TOKENS = "ai.usage.promptTokens"; const AI_USAGE_COMPLETION_TOKENS = "ai.usage.completionTokens"; const AI_MODEL_PROVIDER = "ai.model.provider"; +const AI_MODEL_ID = "ai.model.id"; const AI_PROMPT_TOOLS = "ai.prompt.tools"; const AI_TELEMETRY_METADATA_PREFIX = "ai.telemetry.metadata."; +const AI_TELEMETRY_FUNCTION_ID = "ai.telemetry.functionId"; +const AI_RESPONSE_PROVIDER_METADATA = "ai.response.providerMetadata"; +const AI_RESPONSE_ID = "ai.response.id"; +const AI_RESPONSE_MODEL = "ai.response.model"; +const AI_RESPONSE_FINISH_REASON = "ai.response.finishReason"; +const GEN_AI_SYSTEM = "gen_ai.system"; const TYPE_TEXT = "text"; const TYPE_TOOL_CALL = "tool_call"; const ROLE_ASSISTANT = "assistant"; const ROLE_USER = "user"; +const ROLE_SYSTEM = "system"; + +// OTel GenAI provider name mapping +// Maps AI SDK provider prefixes to OpenTelemetry standard provider names +// See: https://opentelemetry.io/docs/specs/semconv/attributes-registry/gen-ai/ +const OTEL_PROVIDER_MAPPING: Record = { + openai: "openai", + "azure-openai": "azure.ai.openai", + anthropic: "anthropic", + cohere: "cohere", + mistral: "mistral_ai", + groq: "groq", + deepseek: "deepseek", + perplexity: "perplexity", + "amazon-bedrock": "aws.bedrock", + bedrock: "aws.bedrock", + google: "gcp.vertex_ai", + vertex: "gcp.vertex_ai", +}; -// Vendor mapping from AI SDK provider prefixes to standardized LLM_SYSTEM values -// Uses prefixes to match AI SDK patterns like "openai.chat", "anthropic.messages", etc. -const VENDOR_MAPPING: Record = { +// Legacy vendor mapping for backward compatibility (deprecated attribute names) +const LEGACY_VENDOR_MAPPING: Record = { openai: "OpenAI", "azure-openai": "Azure", anthropic: "Anthropic", @@ -52,6 +77,91 @@ const VENDOR_MAPPING: Record = { openrouter: "OpenRouter", }; +/** + * Adds gen_ai.operation.name attribute based on AI SDK span name + */ +const addOperationName = ( + spanName: string, + attributes: Record, +): void => { + // Map AI SDK span names to OTel operation names + const operationMapping: Record = { + [AI_GENERATE_TEXT_DO_GENERATE]: "chat", + [AI_GENERATE_OBJECT_DO_GENERATE]: "generate_content", + [AI_STREAM_TEXT_DO_STREAM]: "chat", + "ai.embed.doEmbed": "embeddings", + "ai.embedMany.doEmbed": "embeddings", + }; + + const operation = operationMapping[spanName] || "chat"; + attributes[SpanAttributes.GEN_AI_OPERATION_NAME] = operation; +}; + +/** + * Transforms ai.model.id to gen_ai.request.model + */ +const transformModelId = (attributes: Record): void => { + if (AI_MODEL_ID in attributes) { + // Set as gen_ai.request.model if not already set by AI SDK + if (!attributes[SpanAttributes.GEN_AI_REQUEST_MODEL]) { + attributes[SpanAttributes.GEN_AI_REQUEST_MODEL] = + attributes[AI_MODEL_ID]; + } + delete attributes[AI_MODEL_ID]; + } +}; + +/** + * Transforms ai.telemetry.functionId to traceloop.entity.name + */ +const transformFunctionId = (attributes: Record): void => { + if (AI_TELEMETRY_FUNCTION_ID in attributes) { + // Map to traceloop entity name for consistency with other instrumentations + attributes[SpanAttributes.TRACELOOP_ENTITY_NAME] = + attributes[AI_TELEMETRY_FUNCTION_ID]; + delete attributes[AI_TELEMETRY_FUNCTION_ID]; + } +}; + +/** + * Transforms ai.response.providerMetadata to a custom gen_ai attribute + */ +const transformProviderMetadata = (attributes: Record): void => { + if (AI_RESPONSE_PROVIDER_METADATA in attributes) { + // Store as provider.metadata under gen_ai namespace + attributes["gen_ai.provider.metadata"] = + attributes[AI_RESPONSE_PROVIDER_METADATA]; + delete attributes[AI_RESPONSE_PROVIDER_METADATA]; + } +}; + +/** + * Transforms AI SDK response metadata attributes to OTel format + */ +const transformResponseMetadata = (attributes: Record): void => { + // Transform response ID + if (AI_RESPONSE_ID in attributes) { + attributes[SpanAttributes.GEN_AI_RESPONSE_ID] = + attributes[AI_RESPONSE_ID]; + delete attributes[AI_RESPONSE_ID]; + } + + // Transform response model + if (AI_RESPONSE_MODEL in attributes) { + attributes[SpanAttributes.GEN_AI_RESPONSE_MODEL] = + attributes[AI_RESPONSE_MODEL]; + delete attributes[AI_RESPONSE_MODEL]; + } + + // Transform finish reason to finish reasons array + if (AI_RESPONSE_FINISH_REASON in attributes) { + const finishReason = attributes[AI_RESPONSE_FINISH_REASON]; + // OTel expects an array of finish reasons + attributes[SpanAttributes.GEN_AI_RESPONSE_FINISH_REASONS] = [finishReason]; + delete attributes[AI_RESPONSE_FINISH_REASON]; + } +}; + const transformResponseText = (attributes: Record): void => { if (AI_RESPONSE_TEXT in attributes) { attributes[`${SpanAttributes.LLM_COMPLETIONS}.0.content`] = @@ -67,6 +177,13 @@ const transformResponseText = (attributes: Record): void => { }, ], }; + + // Set new OTel attribute + attributes[SpanAttributes.GEN_AI_OUTPUT_MESSAGES] = JSON.stringify([ + outputMessage, + ]); + + // Set deprecated attribute for backward compatibility attributes[SpanAttributes.LLM_OUTPUT_MESSAGES] = JSON.stringify([ outputMessage, ]); @@ -90,6 +207,13 @@ const transformResponseObject = (attributes: Record): void => { }, ], }; + + // Set new OTel attribute + attributes[SpanAttributes.GEN_AI_OUTPUT_MESSAGES] = JSON.stringify([ + outputMessage, + ]); + + // Set deprecated attribute for backward compatibility attributes[SpanAttributes.LLM_OUTPUT_MESSAGES] = JSON.stringify([ outputMessage, ]); @@ -132,6 +256,13 @@ const transformResponseToolCalls = (attributes: Record): void => { role: ROLE_ASSISTANT, parts: toolCallParts, }; + + // Set new OTel attribute + attributes[SpanAttributes.GEN_AI_OUTPUT_MESSAGES] = JSON.stringify([ + outputMessage, + ]); + + // Set deprecated attribute for backward compatibility attributes[SpanAttributes.LLM_OUTPUT_MESSAGES] = JSON.stringify([ outputMessage, ]); @@ -202,6 +333,9 @@ const transformTools = (attributes: Record): void => { try { const tools = attributes[AI_PROMPT_TOOLS]; if (Array.isArray(tools)) { + // Create OTel-compliant tool definitions structure + const toolDefinitions: any[] = []; + tools.forEach((toolItem: any, index: number) => { let tool = toolItem; if (typeof toolItem === "string") { @@ -213,6 +347,22 @@ const transformTools = (attributes: Record): void => { } if (tool && typeof tool === "object") { + // Add to structured tool definitions for OTel + const toolDef: any = { + type: tool.type || "function", + }; + + if (tool.type === "function" || !tool.type) { + toolDef.function = { + name: tool.name, + description: tool.description, + parameters: tool.parameters, + }; + } + + toolDefinitions.push(toolDef); + + // Also keep flat format for backward compatibility if (tool.name) { attributes[ `${SpanAttributes.LLM_REQUEST_FUNCTIONS}.${index}.name` @@ -235,6 +385,12 @@ const transformTools = (attributes: Record): void => { } } }); + + // Set OTel-compliant tool definitions attribute + if (toolDefinitions.length > 0) { + attributes[SpanAttributes.GEN_AI_TOOL_DEFINITIONS] = + JSON.stringify(toolDefinitions); + } } delete attributes[AI_PROMPT_TOOLS]; } catch { @@ -257,6 +413,7 @@ const transformPrompts = (attributes: Record): void => { const messages = JSON.parse(jsonString); const inputMessages: any[] = []; + const systemInstructions: any[] = []; messages.forEach((msg: { role: string; content: any }, index: number) => { const processedContent = processMessageContent(msg.content); @@ -264,22 +421,42 @@ const transformPrompts = (attributes: Record): void => { attributes[contentKey] = processedContent; attributes[`${SpanAttributes.LLM_PROMPTS}.${index}.role`] = msg.role; - // Add to OpenTelemetry standard gen_ai.input.messages format - inputMessages.push({ - role: msg.role, - parts: [ - { - type: TYPE_TEXT, - content: processedContent, - }, - ], - }); + const messagePart = { + type: TYPE_TEXT, + content: processedContent, + }; + + // Separate system messages into system instructions per OTel spec + if (msg.role === ROLE_SYSTEM) { + systemInstructions.push({ + role: ROLE_SYSTEM, + parts: [messagePart], + }); + } else { + // Non-system messages go to input messages + inputMessages.push({ + role: msg.role, + parts: [messagePart], + }); + } }); + // Set system instructions separately (OTel spec) + if (systemInstructions.length > 0) { + attributes[SpanAttributes.GEN_AI_SYSTEM_INSTRUCTIONS] = + JSON.stringify(systemInstructions); + } + // Set the OpenTelemetry standard input messages attribute - if (inputMessages.length > 0) { + // Note: For backward compatibility, we include all messages here + // but OTel spec recommends separating system messages + const allMessages = [...systemInstructions, ...inputMessages]; + if (allMessages.length > 0) { + attributes[SpanAttributes.GEN_AI_INPUT_MESSAGES] = + JSON.stringify(allMessages); + // Also set deprecated attribute for backward compatibility attributes[SpanAttributes.LLM_INPUT_MESSAGES] = - JSON.stringify(inputMessages); + JSON.stringify(allMessages); } delete attributes[AI_PROMPT_MESSAGES]; @@ -305,6 +482,13 @@ const transformPrompts = (attributes: Record): void => { }, ], }; + + // Set new OTel attribute + attributes[SpanAttributes.GEN_AI_INPUT_MESSAGES] = JSON.stringify([ + inputMessage, + ]); + + // Set deprecated attribute for backward compatibility attributes[SpanAttributes.LLM_INPUT_MESSAGES] = JSON.stringify([ inputMessage, ]); @@ -319,16 +503,28 @@ const transformPrompts = (attributes: Record): void => { const transformPromptTokens = (attributes: Record): void => { if (AI_USAGE_PROMPT_TOKENS in attributes) { - attributes[`${SpanAttributes.LLM_USAGE_PROMPT_TOKENS}`] = - attributes[AI_USAGE_PROMPT_TOKENS]; + const value = attributes[AI_USAGE_PROMPT_TOKENS]; + + // Set new OTel-compliant attribute + attributes[SpanAttributes.GEN_AI_USAGE_INPUT_TOKENS] = value; + + // Set deprecated attribute for backward compatibility + attributes[SpanAttributes.LLM_USAGE_PROMPT_TOKENS] = value; + delete attributes[AI_USAGE_PROMPT_TOKENS]; } }; const transformCompletionTokens = (attributes: Record): void => { if (AI_USAGE_COMPLETION_TOKENS in attributes) { - attributes[`${SpanAttributes.LLM_USAGE_COMPLETION_TOKENS}`] = - attributes[AI_USAGE_COMPLETION_TOKENS]; + const value = attributes[AI_USAGE_COMPLETION_TOKENS]; + + // Set new OTel-compliant attribute + attributes[SpanAttributes.GEN_AI_USAGE_OUTPUT_TOKENS] = value; + + // Set deprecated attribute for backward compatibility + attributes[SpanAttributes.LLM_USAGE_COMPLETION_TOKENS] = value; + delete attributes[AI_USAGE_COMPLETION_TOKENS]; } }; @@ -345,22 +541,46 @@ const calculateTotalTokens = (attributes: Record): void => { }; const transformVendor = (attributes: Record): void => { - if (AI_MODEL_PROVIDER in attributes) { - const vendor = attributes[AI_MODEL_PROVIDER]; - - // Find matching vendor prefix in mapping - let mappedVendor = null; - if (typeof vendor === "string" && vendor.length > 0) { - for (const prefix of Object.keys(VENDOR_MAPPING)) { - if (vendor.startsWith(prefix)) { - mappedVendor = VENDOR_MAPPING[prefix]; - break; - } + let providerValue: string | null = null; + + // Check if AI SDK already set gen_ai.system (deprecated attribute) + // AI SDK emits this in "Call LLM Span Information" + if (GEN_AI_SYSTEM in attributes) { + providerValue = attributes[GEN_AI_SYSTEM] as string; + delete attributes[GEN_AI_SYSTEM]; // Remove deprecated attribute + } else if (AI_MODEL_PROVIDER in attributes) { + // Otherwise get from ai.model.provider + providerValue = attributes[AI_MODEL_PROVIDER] as string; + delete attributes[AI_MODEL_PROVIDER]; + } + + if (typeof providerValue === "string") { + // Handle empty string case + if (providerValue.length === 0) { + attributes[SpanAttributes.GEN_AI_PROVIDER_NAME] = ""; + attributes[SpanAttributes.LLM_SYSTEM] = ""; + return; + } + + // Find matching provider prefix for OTel standard name + let otelProvider: string | null = null; + let legacyProvider: string | null = null; + + for (const prefix of Object.keys(OTEL_PROVIDER_MAPPING)) { + if (providerValue.toLowerCase().startsWith(prefix)) { + otelProvider = OTEL_PROVIDER_MAPPING[prefix]; + legacyProvider = LEGACY_VENDOR_MAPPING[prefix]; + break; } } - attributes[SpanAttributes.LLM_SYSTEM] = mappedVendor || vendor; - delete attributes[AI_MODEL_PROVIDER]; + // Set new OTel-compliant provider name + attributes[SpanAttributes.GEN_AI_PROVIDER_NAME] = + otelProvider || providerValue; + + // Set deprecated attribute for backward compatibility + attributes[SpanAttributes.LLM_SYSTEM] = + legacyProvider || providerValue; } }; @@ -398,16 +618,37 @@ const transformTelemetryMetadata = (attributes: Record): void => { // not during transformation. Use `withTelemetryMetadataContext` function for context propagation. }; -export const transformLLMSpans = (attributes: Record): void => { +export const transformLLMSpans = ( + attributes: Record, + spanName?: string, +): void => { + // Add operation name first (required OTel attribute) + if (spanName) { + addOperationName(spanName, attributes); + } + + // Transform AI SDK-specific attributes + transformModelId(attributes); + transformFunctionId(attributes); + transformProviderMetadata(attributes); + transformResponseMetadata(attributes); + + // Transform request/response content transformResponseText(attributes); transformResponseObject(attributes); transformResponseToolCalls(attributes); transformPrompts(attributes); transformTools(attributes); + + // Transform usage metrics transformPromptTokens(attributes); transformCompletionTokens(attributes); calculateTotalTokens(attributes); + + // Transform vendor/provider (must be after tokens for backward compat) transformVendor(attributes); + + // Transform metadata transformTelemetryMetadata(attributes); }; @@ -432,9 +673,23 @@ const shouldHandleSpan = (span: ReadableSpan): boolean => { export const transformAiSdkSpanNames = (span: Span): void => { if (span.name === TOOL_SPAN_NAME) { span.updateName(`${span.attributes["ai.toolCall.name"] as string}.tool`); + return; } + if (span.name in HANDLED_SPAN_NAMES) { - span.updateName(HANDLED_SPAN_NAMES[span.name]); + const newBaseName = HANDLED_SPAN_NAMES[span.name]; + + // Try to append model name for OTel compliance: "{operation} {model}" + const model = + span.attributes[AI_MODEL_ID] || + span.attributes[SpanAttributes.GEN_AI_REQUEST_MODEL]; + + if (model && typeof model === "string") { + // Append model to create OTel-compliant name + span.updateName(`${newBaseName} ${model}`); + } else { + span.updateName(newBaseName); + } } }; @@ -442,6 +697,7 @@ export const transformAiSdkSpanAttributes = (span: ReadableSpan): void => { if (!shouldHandleSpan(span)) { return; } - transformLLMSpans(span.attributes); + // Pass span name to transformations so operation name can be set + transformLLMSpans(span.attributes, span.name); transformToolCalls(span); }; diff --git a/packages/traceloop-sdk/test/ai-sdk-integration.test.ts b/packages/traceloop-sdk/test/ai-sdk-integration.test.ts index 4e0d3c04..835207b2 100644 --- a/packages/traceloop-sdk/test/ai-sdk-integration.test.ts +++ b/packages/traceloop-sdk/test/ai-sdk-integration.test.ts @@ -105,22 +105,37 @@ describe("Test AI SDK Integration with Recording", function () { const spans = memoryExporter.getFinishedSpans(); const generateTextSpan = spans.find( - (span) => span.name === "text.generate", + (span) => span.name.startsWith("text.generate"), ); assert.ok(result); assert.ok(result.text); assert.ok(generateTextSpan); - // Verify span name (should be transformed from ai.generateText.doGenerate to text.generate) - assert.strictEqual(generateTextSpan.name, "text.generate"); + // Verify span name (should be transformed and include model name) + assert.ok(generateTextSpan.name.startsWith("text.generate")); - // Verify vendor - assert.strictEqual(generateTextSpan.attributes["gen_ai.system"], "OpenAI"); + // Verify operation name (new OTel attribute) + assert.strictEqual( + generateTextSpan.attributes[SpanAttributes.GEN_AI_OPERATION_NAME], + "chat", + ); + + // Verify provider (new OTel attribute) + assert.strictEqual( + generateTextSpan.attributes[SpanAttributes.GEN_AI_PROVIDER_NAME], + "openai", + ); + + // Verify backward compatibility - deprecated attribute should still be set + assert.strictEqual( + generateTextSpan.attributes[SpanAttributes.LLM_SYSTEM], + "OpenAI", + ); // Verify model information assert.strictEqual( - generateTextSpan.attributes["gen_ai.request.model"], + generateTextSpan.attributes[SpanAttributes.GEN_AI_REQUEST_MODEL], "gpt-3.5-turbo", ); @@ -141,9 +156,21 @@ describe("Test AI SDK Integration with Recording", function () { result.text, ); - // Verify token usage - assert.ok(generateTextSpan.attributes["gen_ai.usage.prompt_tokens"]); - assert.ok(generateTextSpan.attributes["gen_ai.usage.completion_tokens"]); + // Verify token usage (new OTel attributes) + assert.ok( + generateTextSpan.attributes[SpanAttributes.GEN_AI_USAGE_INPUT_TOKENS], + ); + assert.ok( + generateTextSpan.attributes[SpanAttributes.GEN_AI_USAGE_OUTPUT_TOKENS], + ); + + // Verify backward compatibility - deprecated attributes should still be set + assert.ok( + generateTextSpan.attributes[SpanAttributes.LLM_USAGE_PROMPT_TOKENS], + ); + assert.ok( + generateTextSpan.attributes[SpanAttributes.LLM_USAGE_COMPLETION_TOKENS], + ); assert.ok(generateTextSpan.attributes["llm.usage.total_tokens"]); }); @@ -172,7 +199,7 @@ describe("Test AI SDK Integration with Recording", function () { // Find the Google span specifically (should have workflow name test_google_workflow) const generateTextSpan = spans.find( (span) => - span.name === "text.generate" && + span.name.startsWith("text.generate") && span.attributes["traceloop.workflow.name"] === "test_google_workflow", ); @@ -180,15 +207,30 @@ describe("Test AI SDK Integration with Recording", function () { assert.ok(result.text); assert.ok(generateTextSpan, "Could not find Google generateText span"); - // Verify span name (should be transformed from ai.generateText.doGenerate to text.generate) - assert.strictEqual(generateTextSpan.name, "text.generate"); + // Verify span name (should be transformed and include model name) + assert.ok(generateTextSpan.name.startsWith("text.generate")); - // Verify vendor - assert.strictEqual(generateTextSpan.attributes["gen_ai.system"], "Google"); + // Verify operation name (new OTel attribute) + assert.strictEqual( + generateTextSpan.attributes[SpanAttributes.GEN_AI_OPERATION_NAME], + "chat", + ); + + // Verify provider (new OTel attribute - should be gcp.vertex_ai) + assert.strictEqual( + generateTextSpan.attributes[SpanAttributes.GEN_AI_PROVIDER_NAME], + "gcp.vertex_ai", + ); + + // Verify backward compatibility - deprecated attribute should still be set + assert.strictEqual( + generateTextSpan.attributes[SpanAttributes.LLM_SYSTEM], + "Google", + ); // Verify model information assert.strictEqual( - generateTextSpan.attributes["gen_ai.request.model"], + generateTextSpan.attributes[SpanAttributes.GEN_AI_REQUEST_MODEL], "gemini-1.5-flash", ); @@ -209,9 +251,21 @@ describe("Test AI SDK Integration with Recording", function () { result.text, ); - // Verify token usage - assert.ok(generateTextSpan.attributes["gen_ai.usage.prompt_tokens"]); - assert.ok(generateTextSpan.attributes["gen_ai.usage.completion_tokens"]); + // Verify token usage (new OTel attributes) + assert.ok( + generateTextSpan.attributes[SpanAttributes.GEN_AI_USAGE_INPUT_TOKENS], + ); + assert.ok( + generateTextSpan.attributes[SpanAttributes.GEN_AI_USAGE_OUTPUT_TOKENS], + ); + + // Verify backward compatibility - deprecated attributes should still be set + assert.ok( + generateTextSpan.attributes[SpanAttributes.LLM_USAGE_PROMPT_TOKENS], + ); + assert.ok( + generateTextSpan.attributes[SpanAttributes.LLM_USAGE_COMPLETION_TOKENS], + ); assert.ok(generateTextSpan.attributes["llm.usage.total_tokens"]); }); @@ -233,7 +287,7 @@ describe("Test AI SDK Integration with Recording", function () { assert.ok(result.text); const spans = memoryExporter.getFinishedSpans(); - const aiSdkSpan = spans.find((span) => span.name === "text.generate"); + const aiSdkSpan = spans.find((span) => span.name.startsWith("text.generate")); assert.ok(aiSdkSpan); diff --git a/packages/traceloop-sdk/test/ai-sdk-otel-attributes.test.ts b/packages/traceloop-sdk/test/ai-sdk-otel-attributes.test.ts new file mode 100644 index 00000000..91f34e58 --- /dev/null +++ b/packages/traceloop-sdk/test/ai-sdk-otel-attributes.test.ts @@ -0,0 +1,417 @@ +/* + * Comprehensive tests for OTel GenAI Semantic Conventions compliance + * Tests all new gen_ai.* attributes added for OTel compliance + */ + +import * as assert from "assert"; +import { openai as vercel_openai } from "@ai-sdk/openai"; +import { anthropic as vercel_anthropic } from "@ai-sdk/anthropic"; +import { generateText } from "ai"; +import { SpanAttributes } from "@traceloop/ai-semantic-conventions"; + +import * as traceloop from "../src"; + +import { Polly, setupMocha as setupPolly } from "@pollyjs/core"; +import NodeHttpAdapter from "@pollyjs/adapter-node-http"; +import FetchAdapter from "@pollyjs/adapter-fetch"; +import FSPersister from "@pollyjs/persister-fs"; +import { initializeSharedTraceloop, getSharedExporter } from "./test-setup"; + +const memoryExporter = getSharedExporter(); + +Polly.register(NodeHttpAdapter); +Polly.register(FetchAdapter); +Polly.register(FSPersister); + +describe("AI SDK OTel GenAI Semantic Conventions", function () { + // Increase timeout for all tests in this suite + this.timeout(10000); + + setupPolly({ + adapters: ["node-http", "fetch"], + persister: "fs", + recordIfMissing: process.env.RECORD_MODE === "NEW", + recordFailedRequests: true, + mode: process.env.RECORD_MODE === "NEW" ? "record" : "replay", + matchRequestsBy: { + headers: false, + url: { + protocol: true, + hostname: true, + pathname: true, + query: false, + }, + }, + logging: true, + }); + + before(async function () { + if (process.env.RECORD_MODE !== "NEW") { + // Set dummy API keys for replay mode + process.env.OPENAI_API_KEY = "test"; + process.env.ANTHROPIC_API_KEY = "test"; + process.env.GOOGLE_GENERATIVE_AI_API_KEY = "test"; + } + + initializeSharedTraceloop(); + }); + + beforeEach(function () { + const { server } = this.polly as Polly; + server.any().on("beforePersist", (_req, recording) => { + recording.request.headers = recording.request.headers.filter( + ({ name }: { name: string }) => + !["authorization", "x-api-key", "anthropic-version"].includes( + name.toLowerCase(), + ), + ); + }); + }); + + afterEach(async () => { + await traceloop.forceFlush(); + memoryExporter.reset(); + }); + + describe("gen_ai.operation.name attribute", () => { + it("should set operation.name to 'chat' for generateText", async () => { + await traceloop.withWorkflow({ name: "test_operation_name" }, async () => { + await generateText({ + messages: [{ role: "user", content: "Say hello" }], + model: vercel_openai("gpt-3.5-turbo"), + experimental_telemetry: { isEnabled: true }, + }); + }); + + await traceloop.forceFlush(); + const spans = memoryExporter.getFinishedSpans(); + const aiSpan = spans.find((s) => s.name.startsWith("text.generate")); + + assert.ok(aiSpan, "AI span not found"); + assert.strictEqual( + aiSpan.attributes[SpanAttributes.GEN_AI_OPERATION_NAME], + "chat", + "Operation name should be 'chat'", + ); + }); + }); + + describe("gen_ai.provider.name attribute", () => { + it("should set provider.name to 'openai' for OpenAI", async () => { + await traceloop.withWorkflow({ name: "test_openai_provider" }, async () => { + await generateText({ + messages: [{ role: "user", content: "Hello" }], + model: vercel_openai("gpt-3.5-turbo"), + experimental_telemetry: { isEnabled: true }, + }); + }); + + await traceloop.forceFlush(); + const spans = memoryExporter.getFinishedSpans(); + const aiSpan = spans.find((s) => s.name.startsWith("text.generate")); + + assert.ok(aiSpan); + assert.strictEqual( + aiSpan.attributes[SpanAttributes.GEN_AI_PROVIDER_NAME], + "openai", + "Provider name should be 'openai' (OTel standard)", + ); + }); + + it("should set provider.name to 'anthropic' for Anthropic", async () => { + await traceloop.withWorkflow( + { name: "test_anthropic_provider" }, + async () => { + await generateText({ + messages: [{ role: "user", content: "Hello" }], + model: vercel_anthropic("claude-3-haiku-20240307"), + experimental_telemetry: { isEnabled: true }, + }); + }, + ); + + await traceloop.forceFlush(); + const spans = memoryExporter.getFinishedSpans(); + const aiSpan = spans.find((s) => s.name.startsWith("text.generate")); + + assert.ok(aiSpan); + assert.strictEqual( + aiSpan.attributes[SpanAttributes.GEN_AI_PROVIDER_NAME], + "anthropic", + "Provider name should be 'anthropic' (OTel standard)", + ); + }); + }); + + describe("gen_ai.tool.definitions attribute", () => { + it("should create structured tool.definitions via transformation", () => { + // Test transformation directly rather than full API call + // since tool schema validation is complex + const { transformLLMSpans } = require("../src/lib/tracing/ai-sdk-transformations"); + + const attributes: Record = { + "ai.prompt.tools": [ + { + type: "function", + name: "getWeather", + description: "Get the current weather for a location", + parameters: { + type: "object", + properties: { + location: { type: "string", description: "The city and state" }, + unit: { + type: "string", + enum: ["celsius", "fahrenheit"], + default: "celsius", + }, + }, + required: ["location"], + }, + }, + ], + }; + + transformLLMSpans(attributes); + + // Check for gen_ai.tool.definitions (new OTel attribute) + const toolDefs = attributes[SpanAttributes.GEN_AI_TOOL_DEFINITIONS]; + assert.ok(toolDefs, "tool.definitions should be set"); + + const parsed = JSON.parse(toolDefs); + assert.ok(Array.isArray(parsed), "tool.definitions should be an array"); + assert.strictEqual(parsed.length, 1, "Should have 1 tool"); + assert.strictEqual(parsed[0].type, "function", "Tool type should be 'function'"); + assert.strictEqual( + parsed[0].function.name, + "getWeather", + "Tool name should be 'getWeather'", + ); + assert.ok( + parsed[0].function.description, + "Tool should have description", + ); + assert.ok(parsed[0].function.parameters, "Tool should have parameters"); + + // Also verify backward compatibility - flat format should still exist + assert.strictEqual( + attributes[`${SpanAttributes.LLM_REQUEST_FUNCTIONS}.0.name`], + "getWeather", + ); + }); + }); + + describe("gen_ai.system_instructions attribute", () => { + it("should separate system instructions from input messages", async () => { + await traceloop.withWorkflow( + { name: "test_system_instructions" }, + async () => { + await generateText({ + messages: [ + { + role: "system", + content: "You are a helpful assistant specialized in weather.", + }, + { role: "user", content: "What's the weather?" }, + ], + model: vercel_openai("gpt-3.5-turbo"), + experimental_telemetry: { isEnabled: true }, + }); + }, + ); + + await traceloop.forceFlush(); + const spans = memoryExporter.getFinishedSpans(); + const aiSpan = spans.find((s) => s.name.startsWith("text.generate")); + + assert.ok(aiSpan); + + // Check for gen_ai.system_instructions (new OTel attribute) + const systemInstructions = + aiSpan.attributes[SpanAttributes.GEN_AI_SYSTEM_INSTRUCTIONS]; + assert.ok( + systemInstructions, + "system_instructions should be set", + ); + + const parsed = JSON.parse(systemInstructions as string); + assert.ok(Array.isArray(parsed), "system_instructions should be an array"); + assert.strictEqual(parsed.length, 1, "Should have 1 system message"); + assert.strictEqual(parsed[0].role, "system"); + assert.ok( + parsed[0].parts[0].content.includes("helpful assistant"), + "Should contain system message content", + ); + + // Check that input messages still include both (for backward compat) + const inputMessages = + aiSpan.attributes[SpanAttributes.GEN_AI_INPUT_MESSAGES]; + assert.ok(inputMessages); + const inputParsed = JSON.parse(inputMessages as string); + assert.strictEqual(inputParsed.length, 2, "Input messages should include both"); + }); + }); + + describe("gen_ai.usage tokens attributes", () => { + it("should set both new and deprecated token attributes", async () => { + await traceloop.withWorkflow({ name: "test_token_attributes" }, async () => { + await generateText({ + messages: [{ role: "user", content: "Count to 5" }], + model: vercel_openai("gpt-3.5-turbo"), + experimental_telemetry: { isEnabled: true }, + }); + }); + + await traceloop.forceFlush(); + const spans = memoryExporter.getFinishedSpans(); + const aiSpan = spans.find((s) => s.name.startsWith("text.generate")); + + assert.ok(aiSpan); + + // Check new OTel attributes + assert.ok( + aiSpan.attributes[SpanAttributes.GEN_AI_USAGE_INPUT_TOKENS], + "gen_ai.usage.input_tokens should be set", + ); + assert.ok( + aiSpan.attributes[SpanAttributes.GEN_AI_USAGE_OUTPUT_TOKENS], + "gen_ai.usage.output_tokens should be set", + ); + + // Check deprecated attributes still exist (backward compatibility) + assert.ok( + aiSpan.attributes[SpanAttributes.LLM_USAGE_PROMPT_TOKENS], + "gen_ai.usage.prompt_tokens should still be set", + ); + assert.ok( + aiSpan.attributes[SpanAttributes.LLM_USAGE_COMPLETION_TOKENS], + "gen_ai.usage.completion_tokens should still be set", + ); + + // Verify values match + assert.strictEqual( + aiSpan.attributes[SpanAttributes.GEN_AI_USAGE_INPUT_TOKENS], + aiSpan.attributes[SpanAttributes.LLM_USAGE_PROMPT_TOKENS], + "Input tokens should match prompt tokens", + ); + assert.strictEqual( + aiSpan.attributes[SpanAttributes.GEN_AI_USAGE_OUTPUT_TOKENS], + aiSpan.attributes[SpanAttributes.LLM_USAGE_COMPLETION_TOKENS], + "Output tokens should match completion tokens", + ); + }); + }); + + describe("Backward compatibility", () => { + it("should maintain all deprecated attributes alongside new ones", async () => { + await traceloop.withWorkflow( + { name: "test_backward_compatibility" }, + async () => { + await generateText({ + messages: [ + { role: "system", content: "You are helpful" }, + { role: "user", content: "Hello" }, + ], + model: vercel_openai("gpt-3.5-turbo"), + experimental_telemetry: { isEnabled: true }, + }); + }, + ); + + await traceloop.forceFlush(); + const spans = memoryExporter.getFinishedSpans(); + const aiSpan = spans.find((s) => s.name.startsWith("text.generate")); + + assert.ok(aiSpan); + + // New attributes should exist + assert.ok( + aiSpan.attributes[SpanAttributes.GEN_AI_OPERATION_NAME], + "New: operation.name", + ); + assert.ok( + aiSpan.attributes[SpanAttributes.GEN_AI_PROVIDER_NAME], + "New: provider.name", + ); + assert.ok( + aiSpan.attributes[SpanAttributes.GEN_AI_USAGE_INPUT_TOKENS], + "New: usage.input_tokens", + ); + assert.ok( + aiSpan.attributes[SpanAttributes.GEN_AI_USAGE_OUTPUT_TOKENS], + "New: usage.output_tokens", + ); + assert.ok( + aiSpan.attributes[SpanAttributes.GEN_AI_INPUT_MESSAGES], + "New: input.messages", + ); + assert.ok( + aiSpan.attributes[SpanAttributes.GEN_AI_OUTPUT_MESSAGES], + "New: output.messages", + ); + + // Deprecated attributes should still exist + assert.ok( + aiSpan.attributes[SpanAttributes.LLM_SYSTEM], + "Deprecated: gen_ai.system", + ); + assert.ok( + aiSpan.attributes[SpanAttributes.LLM_USAGE_PROMPT_TOKENS], + "Deprecated: usage.prompt_tokens", + ); + assert.ok( + aiSpan.attributes[SpanAttributes.LLM_USAGE_COMPLETION_TOKENS], + "Deprecated: usage.completion_tokens", + ); + assert.ok( + aiSpan.attributes[SpanAttributes.LLM_INPUT_MESSAGES], + "Deprecated: LLM_INPUT_MESSAGES", + ); + assert.ok( + aiSpan.attributes[SpanAttributes.LLM_OUTPUT_MESSAGES], + "Deprecated: LLM_OUTPUT_MESSAGES", + ); + + // Flat format prompts/completions should still exist + assert.ok( + aiSpan.attributes["gen_ai.prompt.0.role"], + "Flat format: prompt.0.role", + ); + assert.ok( + aiSpan.attributes["gen_ai.completion.0.role"], + "Flat format: completion.0.role", + ); + }); + }); + + describe("Span naming", () => { + it("should follow OTel pattern: {operation} {model}", async function () { + this.timeout(10000); // Increase timeout for API call + + // Clear any previous spans + memoryExporter.reset(); + + await traceloop.withWorkflow({ name: "test_span_naming" }, async () => { + await generateText({ + messages: [{ role: "user", content: "Say hi" }], + model: vercel_openai("gpt-3.5-turbo"), + experimental_telemetry: { isEnabled: true }, + }); + }); + + await traceloop.forceFlush(); + const spans = memoryExporter.getFinishedSpans(); + const aiSpan = spans.find((s) => s.name.startsWith("text.generate")); + + assert.ok(aiSpan, "AI span should exist"); + // Should be like "text.generate gpt-3.5-turbo" + assert.ok( + aiSpan.name.includes("text.generate"), + "Span name should include operation", + ); + assert.ok( + aiSpan.name.includes("gpt-3.5-turbo") || aiSpan.name === "text.generate", + "Span name should include model name when available", + ); + }); + }); +}); diff --git a/packages/traceloop-sdk/test/ai-sdk-transformations.test.ts b/packages/traceloop-sdk/test/ai-sdk-transformations.test.ts index 07ef9b37..b12c9cda 100644 --- a/packages/traceloop-sdk/test/ai-sdk-transformations.test.ts +++ b/packages/traceloop-sdk/test/ai-sdk-transformations.test.ts @@ -1005,6 +1005,11 @@ describe("AI SDK Transformations", () => { transformLLMSpans(attributes); + // Should set both new and deprecated attributes to empty string + assert.strictEqual( + attributes[SpanAttributes.GEN_AI_PROVIDER_NAME], + "", + ); assert.strictEqual(attributes[SpanAttributes.LLM_SYSTEM], ""); assert.strictEqual(attributes["ai.model.provider"], undefined); });