diff --git a/packages/ai-semantic-conventions/package.json b/packages/ai-semantic-conventions/package.json index 52b15fc9..00a76224 100644 --- a/packages/ai-semantic-conventions/package.json +++ b/packages/ai-semantic-conventions/package.json @@ -34,7 +34,8 @@ "access": "public" }, "dependencies": { - "@opentelemetry/api": "^1.9.0" + "@opentelemetry/api": "^1.9.0", + "@opentelemetry/semantic-conventions": "^1.37.0" }, "homepage": "https://github.com/traceloop/openllmetry-js/tree/main/packages/ai-semantic-conventions", "gitHead": "ef1e70d6037f7b5c061056ef2be16e3f55f02ed5" diff --git a/packages/instrumentation-anthropic/package.json b/packages/instrumentation-anthropic/package.json index 2e6e8449..9dccaba6 100644 --- a/packages/instrumentation-anthropic/package.json +++ b/packages/instrumentation-anthropic/package.json @@ -41,7 +41,7 @@ "@opentelemetry/api": "^1.9.0", "@opentelemetry/core": "^2.0.1", "@opentelemetry/instrumentation": "^0.203.0", - "@opentelemetry/semantic-conventions": "^1.36.0", + "@opentelemetry/semantic-conventions": "^1.37.0", "@traceloop/ai-semantic-conventions": "workspace:*", "tslib": "^2.8.1" }, diff --git a/packages/instrumentation-bedrock/package.json b/packages/instrumentation-bedrock/package.json index 7e2a8d75..88387689 100644 --- a/packages/instrumentation-bedrock/package.json +++ b/packages/instrumentation-bedrock/package.json @@ -41,7 +41,7 @@ "@opentelemetry/api": "^1.9.0", "@opentelemetry/core": "^2.0.1", "@opentelemetry/instrumentation": "^0.203.0", - "@opentelemetry/semantic-conventions": "^1.36.0", + "@opentelemetry/semantic-conventions": "^1.37.0", "@traceloop/ai-semantic-conventions": "workspace:*", "tslib": "^2.8.1" }, diff --git a/packages/instrumentation-chromadb/package.json b/packages/instrumentation-chromadb/package.json index 5e34466f..7c65f93c 100644 --- a/packages/instrumentation-chromadb/package.json +++ b/packages/instrumentation-chromadb/package.json @@ -41,7 +41,7 @@ "@opentelemetry/api": "^1.9.0", "@opentelemetry/core": "^2.0.1", "@opentelemetry/instrumentation": "^0.203.0", - "@opentelemetry/semantic-conventions": "^1.36.0", + "@opentelemetry/semantic-conventions": "^1.37.0", "@traceloop/ai-semantic-conventions": "workspace:*", "tslib": "^2.8.1" }, diff --git a/packages/instrumentation-cohere/package.json b/packages/instrumentation-cohere/package.json index 70c91f2d..e1909236 100644 --- a/packages/instrumentation-cohere/package.json +++ b/packages/instrumentation-cohere/package.json @@ -41,7 +41,7 @@ "@opentelemetry/api": "^1.9.0", "@opentelemetry/core": "^2.0.1", "@opentelemetry/instrumentation": "^0.203.0", - "@opentelemetry/semantic-conventions": "^1.36.0", + "@opentelemetry/semantic-conventions": "^1.37.0", "@traceloop/ai-semantic-conventions": "workspace:*", "tslib": "^2.8.1" }, diff --git a/packages/instrumentation-langchain/package.json b/packages/instrumentation-langchain/package.json index ce00d119..ddc26357 100644 --- a/packages/instrumentation-langchain/package.json +++ b/packages/instrumentation-langchain/package.json @@ -42,7 +42,7 @@ "@opentelemetry/api": "^1.9.0", "@opentelemetry/core": "^2.0.1", "@opentelemetry/instrumentation": "^0.203.0", - "@opentelemetry/semantic-conventions": "^1.36.0", + "@opentelemetry/semantic-conventions": "^1.37.0", "@traceloop/ai-semantic-conventions": "workspace:*", "tslib": "^2.8.1" }, diff --git a/packages/instrumentation-llamaindex/package.json b/packages/instrumentation-llamaindex/package.json index 398f0134..048df7d1 100644 --- a/packages/instrumentation-llamaindex/package.json +++ b/packages/instrumentation-llamaindex/package.json @@ -40,7 +40,7 @@ "@opentelemetry/api": "^1.9.0", "@opentelemetry/core": "^2.0.1", "@opentelemetry/instrumentation": "^0.203.0", - "@opentelemetry/semantic-conventions": "^1.36.0", + "@opentelemetry/semantic-conventions": "^1.37.0", "@traceloop/ai-semantic-conventions": "workspace:*", "lodash": "^4.17.21", "tslib": "^2.8.1" diff --git a/packages/instrumentation-openai/package.json b/packages/instrumentation-openai/package.json index 7c7f4f15..9e61395a 100644 --- a/packages/instrumentation-openai/package.json +++ b/packages/instrumentation-openai/package.json @@ -40,7 +40,7 @@ "@opentelemetry/api": "^1.9.0", "@opentelemetry/core": "^2.0.1", "@opentelemetry/instrumentation": "^0.203.0", - "@opentelemetry/semantic-conventions": "^1.36.0", + "@opentelemetry/semantic-conventions": "^1.37.0", "@traceloop/ai-semantic-conventions": "workspace:*", "js-tiktoken": "^1.0.20", "tslib": "^2.8.1" diff --git a/packages/instrumentation-openai/test/instrumentation.test.ts b/packages/instrumentation-openai/test/instrumentation.test.ts index f83359f9..144f4ec6 100644 --- a/packages/instrumentation-openai/test/instrumentation.test.ts +++ b/packages/instrumentation-openai/test/instrumentation.test.ts @@ -24,6 +24,50 @@ import { InMemorySpanExporter, SimpleSpanProcessor, } from "@opentelemetry/sdk-trace-node"; +import { + ATTR_GEN_AI_INPUT_MESSAGES, + ATTR_GEN_AI_OUTPUT_MESSAGES, +} from "@opentelemetry/semantic-conventions/build/src/experimental_attributes"; + +// Minimal transformation function to test ATTR_GEN_AI_INPUT_MESSAGES and ATTR_GEN_AI_OUTPUT_MESSAGES +const transformToStandardFormat = (attributes: any) => { + // Transform prompts to ATTR_GEN_AI_INPUT_MESSAGES + const inputMessages = []; + let i = 0; + while (attributes[`${SpanAttributes.LLM_PROMPTS}.${i}.role`]) { + const role = attributes[`${SpanAttributes.LLM_PROMPTS}.${i}.role`]; + const content = attributes[`${SpanAttributes.LLM_PROMPTS}.${i}.content`]; + if (role && content) { + inputMessages.push({ + role, + parts: [{ type: "text", content }], + }); + } + i++; + } + if (inputMessages.length > 0) { + attributes[ATTR_GEN_AI_INPUT_MESSAGES] = JSON.stringify(inputMessages); + } + + // Transform completions to SemanticAttributes.GEN_AI_OUTPUT_MESSAGES + const outputMessages = []; + let j = 0; + while (attributes[`${SpanAttributes.LLM_COMPLETIONS}.${j}.role`]) { + const role = attributes[`${SpanAttributes.LLM_COMPLETIONS}.${j}.role`]; + const content = + attributes[`${SpanAttributes.LLM_COMPLETIONS}.${j}.content`]; + if (role && content) { + outputMessages.push({ + role, + parts: [{ type: "text", content }], + }); + } + j++; + } + if (outputMessages.length > 0) { + attributes[ATTR_GEN_AI_OUTPUT_MESSAGES] = JSON.stringify(outputMessages); + } +}; import type * as OpenAIModule from "openai"; import { toFile } from "openai"; @@ -878,4 +922,54 @@ describe("Test OpenAI instrumentation", async function () { 4160, ); }); + + it("should set ATTR_GEN_AI_INPUT_MESSAGES and ATTR_GEN_AI_OUTPUT_MESSAGES attributes for chat completions", async () => { + const result = await openai.chat.completions.create({ + messages: [ + { role: "user", content: "Tell me a joke about OpenTelemetry" }, + ], + model: "gpt-3.5-turbo", + }); + + const spans = memoryExporter.getFinishedSpans(); + const completionSpan = spans.find((span) => span.name === "openai.chat"); + + assert.ok(result); + assert.ok(completionSpan); + + // Apply transformations to create ATTR_GEN_AI_INPUT_MESSAGES and ATTR_GEN_AI_OUTPUT_MESSAGES + transformToStandardFormat(completionSpan.attributes); + + // Verify ATTR_GEN_AI_INPUT_MESSAGES attribute exists and is valid JSON + assert.ok(completionSpan.attributes[ATTR_GEN_AI_INPUT_MESSAGES]); + const inputMessages = JSON.parse( + completionSpan.attributes[ATTR_GEN_AI_INPUT_MESSAGES] as string, + ); + assert.ok(Array.isArray(inputMessages)); + assert.strictEqual(inputMessages.length, 1); + + // Check user message structure + assert.strictEqual(inputMessages[0].role, "user"); + assert.ok(Array.isArray(inputMessages[0].parts)); + assert.strictEqual(inputMessages[0].parts[0].type, "text"); + assert.strictEqual( + inputMessages[0].parts[0].content, + "Tell me a joke about OpenTelemetry", + ); + + // Verify ATTR_GEN_AI_OUTPUT_MESSAGES attribute exists and is valid JSON + assert.ok(completionSpan.attributes[ATTR_GEN_AI_OUTPUT_MESSAGES]); + const outputMessages = JSON.parse( + completionSpan.attributes[ATTR_GEN_AI_OUTPUT_MESSAGES] as string, + ); + assert.ok(Array.isArray(outputMessages)); + assert.strictEqual(outputMessages.length, 1); + + // Check assistant response structure + assert.strictEqual(outputMessages[0].role, "assistant"); + assert.ok(Array.isArray(outputMessages[0].parts)); + assert.strictEqual(outputMessages[0].parts[0].type, "text"); + assert.ok(outputMessages[0].parts[0].content); + assert.ok(typeof outputMessages[0].parts[0].content === "string"); + }); }); diff --git a/packages/instrumentation-openai/test/recordings/Test-OpenAI-instrumentation_1770406427/should-set-ATTR_GEN_AI_INPUT_MESSAGES-and-ATTR_GEN_AI_OUTPUT_MESSAGES-attributes-for-chat_1049053971/recording.har b/packages/instrumentation-openai/test/recordings/Test-OpenAI-instrumentation_1770406427/should-set-ATTR_GEN_AI_INPUT_MESSAGES-and-ATTR_GEN_AI_OUTPUT_MESSAGES-attributes-for-chat_1049053971/recording.har new file mode 100644 index 00000000..b581cdca --- /dev/null +++ b/packages/instrumentation-openai/test/recordings/Test-OpenAI-instrumentation_1770406427/should-set-ATTR_GEN_AI_INPUT_MESSAGES-and-ATTR_GEN_AI_OUTPUT_MESSAGES-attributes-for-chat_1049053971/recording.har @@ -0,0 +1,253 @@ +{ + "log": { + "_recordingName": "Test OpenAI instrumentation/should set ATTR_GEN_AI_INPUT_MESSAGES and ATTR_GEN_AI_OUTPUT_MESSAGES attributes for chat completions", + "creator": { + "comment": "persister:fs", + "name": "Polly.JS", + "version": "6.0.6" + }, + "entries": [ + { + "_id": "55d89d2026cb52c5f2e9f463f5bfc5c1", + "_order": 0, + "cache": {}, + "request": { + "bodySize": 101, + "cookies": [], + "headers": [ + { + "_fromType": "array", + "name": "accept", + "value": "application/json" + }, + { + "_fromType": "array", + "name": "content-type", + "value": "application/json" + }, + { + "_fromType": "array", + "name": "user-agent", + "value": "OpenAI/JS 5.12.2" + }, + { + "_fromType": "array", + "name": "x-stainless-arch", + "value": "arm64" + }, + { + "_fromType": "array", + "name": "x-stainless-lang", + "value": "js" + }, + { + "_fromType": "array", + "name": "x-stainless-os", + "value": "MacOS" + }, + { + "_fromType": "array", + "name": "x-stainless-package-version", + "value": "5.12.2" + }, + { + "_fromType": "array", + "name": "x-stainless-retry-count", + "value": "0" + }, + { + "_fromType": "array", + "name": "x-stainless-runtime", + "value": "node" + }, + { + "_fromType": "array", + "name": "x-stainless-runtime-version", + "value": "v20.10.0" + }, + { + "_fromType": "array", + "name": "content-length", + "value": "101" + }, + { + "_fromType": "array", + "name": "accept-encoding", + "value": "gzip,deflate" + }, + { + "name": "host", + "value": "api.openai.com" + } + ], + "headersSize": 503, + "httpVersion": "HTTP/1.1", + "method": "POST", + "postData": { + "mimeType": "application/json", + "params": [], + "text": "{\"messages\":[{\"role\":\"user\",\"content\":\"Tell me a joke about OpenTelemetry\"}],\"model\":\"gpt-3.5-turbo\"}" + }, + "queryString": [], + "url": "https://api.openai.com/v1/chat/completions" + }, + "response": { + "bodySize": 638, + "content": { + "encoding": "base64", + "mimeType": "application/json", + "size": 638, + "text": "[\"H4sIAAAAAAAAAwAAAP//\",\"jFJNb9swDL37V3A6J0WT1kuQS7F1hx1WDBharOhaGIrE2FpkUZPookGR/z5I+bCzdcAuOvDxUXzv8bUAEEaLBQjVSFatt+Pry9u7h5sP3+jy/maOv768n5c0u3Nzf/35070YJQYtf6LiA+tMUestsiG3g1VAyZimTmZlOZlPZ5MyAy1ptIlWex5fnJVj7sKSxueTablnNmQURrGAHwUAwGt+045O44tYwPnoUGkxRlmjWBybAEQgmypCxmgiS8di1IOKHKPLa39vNqCNBm4Qvnp0t2ixRQ4b0PiMljwGqAmWgdZ4BY/u0X1EJbuIibGBNXoGDhvjamACDlJlxATAF48uYnw3/DngqosyKXedtQNAOkcsk3NZ89Me2R5VWqp9oGX8gypWxpnYVAFlJJcURSYvMrotAJ6ym92JQcIHaj1XTGvM3+1CycYc8uvB6d5pwcTS9vWLA+lkWqWRpbFxkIZQUjWoe2Yfney0oQFQDDT/vcxbs3e6jav/Z3wPKIWeUVc+oDbqVHDfFjBd97/ajh7nhUXE8GwUVmwwpBw0rmRnd3cn4iYyttXKuBqDDyYfX8qx2Ba/AQAA//8=\",\"AwDdhyBqewMAAA==\"]" + }, + "cookies": [ + { + "domain": ".api.openai.com", + "expires": "2025-08-14T15:15:16.000Z", + "httpOnly": true, + "name": "__cf_bm", + "path": "/", + "sameSite": "None", + "secure": true, + "value": "cx2GfhENAhZ7.BZ_THTDKDP6iUAOd_j608ETi1oaSTQ-1755182716-1.0.1.1-htqisA8ahupYucMxitr6HT.0bDvz_LUvI6LAiVJvzGVO_ybz_t9zaFBoNDlBYYwffwSfX8989wHANes2K38pR4N7nNR5h81EREnhK0td5gY" + }, + { + "domain": ".api.openai.com", + "httpOnly": true, + "name": "_cfuvid", + "path": "/", + "sameSite": "None", + "secure": true, + "value": "jufw1SR0w67jCpX9lTPFPU6JC1zxAmwwpfT0Zt2ZvHM-1755182716423-0.0.1.1-604800000" + } + ], + "headers": [ + { + "name": "date", + "value": "Thu, 14 Aug 2025 14:45:16 GMT" + }, + { + "name": "content-type", + "value": "application/json" + }, + { + "name": "transfer-encoding", + "value": "chunked" + }, + { + "name": "connection", + "value": "keep-alive" + }, + { + "name": "access-control-expose-headers", + "value": "X-Request-ID" + }, + { + "name": "openai-organization", + "value": "traceloop" + }, + { + "name": "openai-processing-ms", + "value": "380" + }, + { + "name": "openai-project", + "value": "proj_tzz1TbPPOXaf6j9tEkVUBIAa" + }, + { + "name": "openai-version", + "value": "2020-10-01" + }, + { + "name": "x-envoy-upstream-service-time", + "value": "478" + }, + { + "name": "x-ratelimit-limit-requests", + "value": "10000" + }, + { + "name": "x-ratelimit-limit-tokens", + "value": "50000000" + }, + { + "name": "x-ratelimit-remaining-requests", + "value": "9999" + }, + { + "name": "x-ratelimit-remaining-tokens", + "value": "49999989" + }, + { + "name": "x-ratelimit-reset-requests", + "value": "6ms" + }, + { + "name": "x-ratelimit-reset-tokens", + "value": "0s" + }, + { + "name": "x-request-id", + "value": "req_39d442d322c44338bcc32d87ce959a1e" + }, + { + "name": "cf-cache-status", + "value": "DYNAMIC" + }, + { + "_fromType": "array", + "name": "set-cookie", + "value": "__cf_bm=cx2GfhENAhZ7.BZ_THTDKDP6iUAOd_j608ETi1oaSTQ-1755182716-1.0.1.1-htqisA8ahupYucMxitr6HT.0bDvz_LUvI6LAiVJvzGVO_ybz_t9zaFBoNDlBYYwffwSfX8989wHANes2K38pR4N7nNR5h81EREnhK0td5gY; path=/; expires=Thu, 14-Aug-25 15:15:16 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None" + }, + { + "_fromType": "array", + "name": "set-cookie", + "value": "_cfuvid=jufw1SR0w67jCpX9lTPFPU6JC1zxAmwwpfT0Zt2ZvHM-1755182716423-0.0.1.1-604800000; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None" + }, + { + "name": "strict-transport-security", + "value": "max-age=31536000; includeSubDomains; preload" + }, + { + "name": "x-content-type-options", + "value": "nosniff" + }, + { + "name": "server", + "value": "cloudflare" + }, + { + "name": "cf-ray", + "value": "96f13c241a31c22f-TLV" + }, + { + "name": "content-encoding", + "value": "gzip" + }, + { + "name": "alt-svc", + "value": "h3=\":443\"; ma=86400" + } + ], + "headersSize": 1294, + "httpVersion": "HTTP/1.1", + "redirectURL": "", + "status": 200, + "statusText": "OK" + }, + "startedDateTime": "2025-08-14T14:45:15.355Z", + "time": 953, + "timings": { + "blocked": -1, + "connect": -1, + "dns": -1, + "receive": 0, + "send": 0, + "ssl": -1, + "wait": 953 + } + } + ], + "pages": [], + "version": "1.2" + } +} diff --git a/packages/instrumentation-openai/test/recordings/Test-OpenAI-instrumentation_1770406427/should-set-LLM_INPUT_MESSAGES-and-LLM_OUTPUT_MESSAGES-attributes-for-chat-completions_99541399/recording.har b/packages/instrumentation-openai/test/recordings/Test-OpenAI-instrumentation_1770406427/should-set-LLM_INPUT_MESSAGES-and-LLM_OUTPUT_MESSAGES-attributes-for-chat-completions_99541399/recording.har new file mode 100644 index 00000000..532a849d --- /dev/null +++ b/packages/instrumentation-openai/test/recordings/Test-OpenAI-instrumentation_1770406427/should-set-LLM_INPUT_MESSAGES-and-LLM_OUTPUT_MESSAGES-attributes-for-chat-completions_99541399/recording.har @@ -0,0 +1,253 @@ +{ + "log": { + "_recordingName": "Test OpenAI instrumentation/should set SemanticAttributes.GEN_AI_INPUT_MESSAGES and SemanticAttributes.GEN_AI_OUTPUT_MESSAGES attributes for chat completions", + "creator": { + "comment": "persister:fs", + "name": "Polly.JS", + "version": "6.0.6" + }, + "entries": [ + { + "_id": "55d89d2026cb52c5f2e9f463f5bfc5c1", + "_order": 0, + "cache": {}, + "request": { + "bodySize": 101, + "cookies": [], + "headers": [ + { + "_fromType": "array", + "name": "accept", + "value": "application/json" + }, + { + "_fromType": "array", + "name": "content-type", + "value": "application/json" + }, + { + "_fromType": "array", + "name": "user-agent", + "value": "OpenAI/JS 5.12.2" + }, + { + "_fromType": "array", + "name": "x-stainless-arch", + "value": "arm64" + }, + { + "_fromType": "array", + "name": "x-stainless-lang", + "value": "js" + }, + { + "_fromType": "array", + "name": "x-stainless-os", + "value": "MacOS" + }, + { + "_fromType": "array", + "name": "x-stainless-package-version", + "value": "5.12.2" + }, + { + "_fromType": "array", + "name": "x-stainless-retry-count", + "value": "0" + }, + { + "_fromType": "array", + "name": "x-stainless-runtime", + "value": "node" + }, + { + "_fromType": "array", + "name": "x-stainless-runtime-version", + "value": "v20.10.0" + }, + { + "_fromType": "array", + "name": "content-length", + "value": "101" + }, + { + "_fromType": "array", + "name": "accept-encoding", + "value": "gzip,deflate" + }, + { + "name": "host", + "value": "api.openai.com" + } + ], + "headersSize": 503, + "httpVersion": "HTTP/1.1", + "method": "POST", + "postData": { + "mimeType": "application/json", + "params": [], + "text": "{\"messages\":[{\"role\":\"user\",\"content\":\"Tell me a joke about OpenTelemetry\"}],\"model\":\"gpt-3.5-turbo\"}" + }, + "queryString": [], + "url": "https://api.openai.com/v1/chat/completions" + }, + "response": { + "bodySize": 638, + "content": { + "encoding": "base64", + "mimeType": "application/json", + "size": 638, + "text": "[\"H4sIAAAAAAAAAwAAAP//\",\"jFJNb9swDL37V3A6J0WT1kuQS7F1hx1WDBharOhaGIrE2FpkUZPookGR/z5I+bCzdcAuOvDxUXzv8bUAEEaLBQjVSFatt+Pry9u7h5sP3+jy/maOv768n5c0u3Nzf/35070YJQYtf6LiA+tMUestsiG3g1VAyZimTmZlOZlPZ5MyAy1ptIlWex5fnJVj7sKSxueTablnNmQURrGAHwUAwGt+045O44tYwPnoUGkxRlmjWBybAEQgmypCxmgiS8di1IOKHKPLa39vNqCNBm4Qvnp0t2ixRQ4b0PiMljwGqAmWgdZ4BY/u0X1EJbuIibGBNXoGDhvjamACDlJlxATAF48uYnw3/DngqosyKXedtQNAOkcsk3NZ89Me2R5VWqp9oGX8gypWxpnYVAFlJJcURSYvMrotAJ6ym92JQcIHaj1XTGvM3+1CycYc8uvB6d5pwcTS9vWLA+lkWqWRpbFxkIZQUjWoe2Yfney0oQFQDDT/vcxbs3e6jav/Z3wPKIWeUVc+oDbqVHDfFjBd97/ajh7nhUXE8GwUVmwwpBw0rmRnd3cn4iYyttXKuBqDDyYfX8qx2Ba/AQAA//8=\",\"AwDdhyBqewMAAA==\"]" + }, + "cookies": [ + { + "domain": ".api.openai.com", + "expires": "2025-08-14T15:15:16.000Z", + "httpOnly": true, + "name": "__cf_bm", + "path": "/", + "sameSite": "None", + "secure": true, + "value": "cx2GfhENAhZ7.BZ_THTDKDP6iUAOd_j608ETi1oaSTQ-1755182716-1.0.1.1-htqisA8ahupYucMxitr6HT.0bDvz_LUvI6LAiVJvzGVO_ybz_t9zaFBoNDlBYYwffwSfX8989wHANes2K38pR4N7nNR5h81EREnhK0td5gY" + }, + { + "domain": ".api.openai.com", + "httpOnly": true, + "name": "_cfuvid", + "path": "/", + "sameSite": "None", + "secure": true, + "value": "jufw1SR0w67jCpX9lTPFPU6JC1zxAmwwpfT0Zt2ZvHM-1755182716423-0.0.1.1-604800000" + } + ], + "headers": [ + { + "name": "date", + "value": "Thu, 14 Aug 2025 14:45:16 GMT" + }, + { + "name": "content-type", + "value": "application/json" + }, + { + "name": "transfer-encoding", + "value": "chunked" + }, + { + "name": "connection", + "value": "keep-alive" + }, + { + "name": "access-control-expose-headers", + "value": "X-Request-ID" + }, + { + "name": "openai-organization", + "value": "traceloop" + }, + { + "name": "openai-processing-ms", + "value": "380" + }, + { + "name": "openai-project", + "value": "proj_tzz1TbPPOXaf6j9tEkVUBIAa" + }, + { + "name": "openai-version", + "value": "2020-10-01" + }, + { + "name": "x-envoy-upstream-service-time", + "value": "478" + }, + { + "name": "x-ratelimit-limit-requests", + "value": "10000" + }, + { + "name": "x-ratelimit-limit-tokens", + "value": "50000000" + }, + { + "name": "x-ratelimit-remaining-requests", + "value": "9999" + }, + { + "name": "x-ratelimit-remaining-tokens", + "value": "49999989" + }, + { + "name": "x-ratelimit-reset-requests", + "value": "6ms" + }, + { + "name": "x-ratelimit-reset-tokens", + "value": "0s" + }, + { + "name": "x-request-id", + "value": "req_39d442d322c44338bcc32d87ce959a1e" + }, + { + "name": "cf-cache-status", + "value": "DYNAMIC" + }, + { + "_fromType": "array", + "name": "set-cookie", + "value": "__cf_bm=cx2GfhENAhZ7.BZ_THTDKDP6iUAOd_j608ETi1oaSTQ-1755182716-1.0.1.1-htqisA8ahupYucMxitr6HT.0bDvz_LUvI6LAiVJvzGVO_ybz_t9zaFBoNDlBYYwffwSfX8989wHANes2K38pR4N7nNR5h81EREnhK0td5gY; path=/; expires=Thu, 14-Aug-25 15:15:16 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None" + }, + { + "_fromType": "array", + "name": "set-cookie", + "value": "_cfuvid=jufw1SR0w67jCpX9lTPFPU6JC1zxAmwwpfT0Zt2ZvHM-1755182716423-0.0.1.1-604800000; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None" + }, + { + "name": "strict-transport-security", + "value": "max-age=31536000; includeSubDomains; preload" + }, + { + "name": "x-content-type-options", + "value": "nosniff" + }, + { + "name": "server", + "value": "cloudflare" + }, + { + "name": "cf-ray", + "value": "96f13c241a31c22f-TLV" + }, + { + "name": "content-encoding", + "value": "gzip" + }, + { + "name": "alt-svc", + "value": "h3=\":443\"; ma=86400" + } + ], + "headersSize": 1294, + "httpVersion": "HTTP/1.1", + "redirectURL": "", + "status": 200, + "statusText": "OK" + }, + "startedDateTime": "2025-08-14T14:45:15.355Z", + "time": 953, + "timings": { + "blocked": -1, + "connect": -1, + "dns": -1, + "receive": 0, + "send": 0, + "ssl": -1, + "wait": 953 + } + } + ], + "pages": [], + "version": "1.2" + } +} diff --git a/packages/instrumentation-pinecone/package.json b/packages/instrumentation-pinecone/package.json index c4d1f159..9a3febc8 100644 --- a/packages/instrumentation-pinecone/package.json +++ b/packages/instrumentation-pinecone/package.json @@ -41,7 +41,7 @@ "@opentelemetry/api": "^1.9.0", "@opentelemetry/core": "^2.0.1", "@opentelemetry/instrumentation": "^0.203.0", - "@opentelemetry/semantic-conventions": "^1.36.0", + "@opentelemetry/semantic-conventions": "^1.37.0", "@traceloop/ai-semantic-conventions": "workspace:*", "tslib": "^2.8.1" }, diff --git a/packages/instrumentation-together/package.json b/packages/instrumentation-together/package.json index c10b91fc..a290ec0e 100644 --- a/packages/instrumentation-together/package.json +++ b/packages/instrumentation-together/package.json @@ -40,7 +40,7 @@ "@opentelemetry/api": "^1.9.0", "@opentelemetry/core": "^2.0.1", "@opentelemetry/instrumentation": "^0.203.0", - "@opentelemetry/semantic-conventions": "^1.36.0", + "@opentelemetry/semantic-conventions": "^1.37.0", "@traceloop/ai-semantic-conventions": "workspace:*", "js-tiktoken": "^1.0.20", "tslib": "^2.8.1" diff --git a/packages/instrumentation-vertexai/package.json b/packages/instrumentation-vertexai/package.json index 1c563a4a..2b0f23ef 100644 --- a/packages/instrumentation-vertexai/package.json +++ b/packages/instrumentation-vertexai/package.json @@ -40,7 +40,7 @@ "@opentelemetry/api": "^1.9.0", "@opentelemetry/core": "^2.0.1", "@opentelemetry/instrumentation": "^0.203.0", - "@opentelemetry/semantic-conventions": "^1.36.0", + "@opentelemetry/semantic-conventions": "^1.37.0", "@traceloop/ai-semantic-conventions": "workspace:*", "google-gax": "^4.0.0", "tslib": "^2.8.1" diff --git a/packages/traceloop-sdk/package.json b/packages/traceloop-sdk/package.json index 85568c10..ddad4310 100644 --- a/packages/traceloop-sdk/package.json +++ b/packages/traceloop-sdk/package.json @@ -63,7 +63,7 @@ "@opentelemetry/sdk-node": "^0.203.0", "@opentelemetry/sdk-trace-base": "^2.0.1", "@opentelemetry/sdk-trace-node": "^2.0.1", - "@opentelemetry/semantic-conventions": "^1.36.0", + "@opentelemetry/semantic-conventions": "^1.37.0", "@traceloop/ai-semantic-conventions": "workspace:*", "@traceloop/instrumentation-anthropic": "workspace:*", "@traceloop/instrumentation-bedrock": "workspace:*", diff --git a/packages/traceloop-sdk/src/lib/tracing/ai-sdk-transformations.ts b/packages/traceloop-sdk/src/lib/tracing/ai-sdk-transformations.ts index 7929a3fa..933f40bb 100644 --- a/packages/traceloop-sdk/src/lib/tracing/ai-sdk-transformations.ts +++ b/packages/traceloop-sdk/src/lib/tracing/ai-sdk-transformations.ts @@ -1,5 +1,9 @@ import { ReadableSpan } from "@opentelemetry/sdk-trace-node"; import { SpanAttributes } from "@traceloop/ai-semantic-conventions"; +import { + ATTR_GEN_AI_INPUT_MESSAGES, + ATTR_GEN_AI_OUTPUT_MESSAGES, +} from "@opentelemetry/semantic-conventions/build/src/experimental_attributes"; const AI_GENERATE_TEXT_DO_GENERATE = "ai.generateText.doGenerate"; const AI_GENERATE_OBJECT_DO_GENERATE = "ai.generateObject.doGenerate"; @@ -19,6 +23,10 @@ const AI_USAGE_PROMPT_TOKENS = "ai.usage.promptTokens"; const AI_USAGE_COMPLETION_TOKENS = "ai.usage.completionTokens"; const AI_MODEL_PROVIDER = "ai.model.provider"; const AI_PROMPT_TOOLS = "ai.prompt.tools"; +const TYPE_TEXT = "text"; +const TYPE_TOOL_CALL = "tool_call"; +const ROLE_ASSISTANT = "assistant"; +const ROLE_USER = "user"; // Vendor mapping from AI SDK provider prefixes to standardized LLM_SYSTEM values // Uses prefixes to match AI SDK patterns like "openai.chat", "anthropic.messages", etc. @@ -55,7 +63,19 @@ const transformResponseText = (attributes: Record): void => { if (AI_RESPONSE_TEXT in attributes) { attributes[`${SpanAttributes.LLM_COMPLETIONS}.0.content`] = attributes[AI_RESPONSE_TEXT]; - attributes[`${SpanAttributes.LLM_COMPLETIONS}.0.role`] = "assistant"; + attributes[`${SpanAttributes.LLM_COMPLETIONS}.0.role`] = ROLE_ASSISTANT; + + const outputMessage = { + role: ROLE_ASSISTANT, + parts: [ + { + type: TYPE_TEXT, + content: attributes[AI_RESPONSE_TEXT], + }, + ], + }; + attributes[ATTR_GEN_AI_OUTPUT_MESSAGES] = JSON.stringify([outputMessage]); + delete attributes[AI_RESPONSE_TEXT]; } }; @@ -64,7 +84,19 @@ const transformResponseObject = (attributes: Record): void => { if (AI_RESPONSE_OBJECT in attributes) { attributes[`${SpanAttributes.LLM_COMPLETIONS}.0.content`] = attributes[AI_RESPONSE_OBJECT]; - attributes[`${SpanAttributes.LLM_COMPLETIONS}.0.role`] = "assistant"; + attributes[`${SpanAttributes.LLM_COMPLETIONS}.0.role`] = ROLE_ASSISTANT; + + const outputMessage = { + role: ROLE_ASSISTANT, + parts: [ + { + type: TYPE_TEXT, + content: attributes[AI_RESPONSE_OBJECT], + }, + ], + }; + attributes[ATTR_GEN_AI_OUTPUT_MESSAGES] = JSON.stringify([outputMessage]); + delete attributes[AI_RESPONSE_OBJECT]; } }; @@ -76,8 +108,9 @@ const transformResponseToolCalls = (attributes: Record): void => { attributes[AI_RESPONSE_TOOL_CALLS] as string, ); - attributes[`${SpanAttributes.LLM_COMPLETIONS}.0.role`] = "assistant"; + attributes[`${SpanAttributes.LLM_COMPLETIONS}.0.role`] = ROLE_ASSISTANT; + const toolCallParts: any[] = []; toolCalls.forEach((toolCall: any, index: number) => { if (toolCall.toolCallType === "function") { attributes[ @@ -86,9 +119,27 @@ const transformResponseToolCalls = (attributes: Record): void => { attributes[ `${SpanAttributes.LLM_COMPLETIONS}.0.tool_calls.${index}.arguments` ] = toolCall.args; + + toolCallParts.push({ + type: TYPE_TOOL_CALL, + tool_call: { + name: toolCall.toolName, + arguments: toolCall.args, + }, + }); } }); + if (toolCallParts.length > 0) { + const outputMessage = { + role: ROLE_ASSISTANT, + parts: toolCallParts, + }; + attributes[ATTR_GEN_AI_OUTPUT_MESSAGES] = JSON.stringify([ + outputMessage, + ]); + } + delete attributes[AI_RESPONSE_TOOL_CALLS]; } catch { // Ignore parsing errors @@ -100,7 +151,10 @@ const processMessageContent = (content: any): string => { if (Array.isArray(content)) { const textItems = content.filter( (item: any) => - item && typeof item === "object" && item.type === "text" && item.text, + item && + typeof item === "object" && + item.type === TYPE_TEXT && + item.text, ); if (textItems.length > 0) { @@ -112,7 +166,7 @@ const processMessageContent = (content: any): string => { } if (content && typeof content === "object") { - if (content.type === "text" && content.text) { + if (content.type === TYPE_TEXT && content.text) { return content.text; } return JSON.stringify(content); @@ -126,7 +180,7 @@ const processMessageContent = (content: any): string => { (item: any) => item && typeof item === "object" && - item.type === "text" && + item.type === TYPE_TEXT && item.text, ); @@ -205,12 +259,31 @@ const transformPrompts = (attributes: Record): void => { } const messages = JSON.parse(jsonString); + const inputMessages: any[] = []; + messages.forEach((msg: { role: string; content: any }, index: number) => { const processedContent = processMessageContent(msg.content); const contentKey = `${SpanAttributes.LLM_PROMPTS}.${index}.content`; attributes[contentKey] = processedContent; attributes[`${SpanAttributes.LLM_PROMPTS}.${index}.role`] = msg.role; + + // Add to OpenTelemetry standard gen_ai.input.messages format + inputMessages.push({ + role: msg.role, + parts: [ + { + type: TYPE_TEXT, + content: processedContent, + }, + ], + }); }); + + // Set the OpenTelemetry standard input messages attribute + if (inputMessages.length > 0) { + attributes[ATTR_GEN_AI_INPUT_MESSAGES] = JSON.stringify(inputMessages); + } + delete attributes[AI_PROMPT_MESSAGES]; } catch { // Ignore parsing errors @@ -223,7 +296,19 @@ const transformPrompts = (attributes: Record): void => { if (promptData.prompt && typeof promptData.prompt === "string") { attributes[`${SpanAttributes.LLM_PROMPTS}.0.content`] = promptData.prompt; - attributes[`${SpanAttributes.LLM_PROMPTS}.0.role`] = "user"; + attributes[`${SpanAttributes.LLM_PROMPTS}.0.role`] = ROLE_USER; + + const inputMessage = { + role: ROLE_USER, + parts: [ + { + type: TYPE_TEXT, + content: promptData.prompt, + }, + ], + }; + attributes[ATTR_GEN_AI_INPUT_MESSAGES] = JSON.stringify([inputMessage]); + delete attributes[AI_PROMPT]; } } catch { diff --git a/packages/traceloop-sdk/test/ai-sdk-integration.test.ts b/packages/traceloop-sdk/test/ai-sdk-integration.test.ts deleted file mode 100644 index e50c007b..00000000 --- a/packages/traceloop-sdk/test/ai-sdk-integration.test.ts +++ /dev/null @@ -1,219 +0,0 @@ -/* - * Copyright Traceloop - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import * as assert from "assert"; - -import { openai as vercel_openai } from "@ai-sdk/openai"; -import { google as vercel_google } from "@ai-sdk/google"; -import { generateText } from "ai"; - -import * as traceloop from "../src"; - -import { Polly, setupMocha as setupPolly } from "@pollyjs/core"; -import NodeHttpAdapter from "@pollyjs/adapter-node-http"; -import FetchAdapter from "@pollyjs/adapter-fetch"; -import FSPersister from "@pollyjs/persister-fs"; -import { initializeSharedTraceloop, getSharedExporter } from "./test-setup"; - -const memoryExporter = getSharedExporter(); - -Polly.register(NodeHttpAdapter); -Polly.register(FetchAdapter); -Polly.register(FSPersister); - -describe("Test AI SDK Integration with Recording", function () { - setupPolly({ - adapters: ["node-http", "fetch"], - persister: "fs", - recordIfMissing: process.env.RECORD_MODE === "NEW", - recordFailedRequests: true, - mode: process.env.RECORD_MODE === "NEW" ? "record" : "replay", - matchRequestsBy: { - headers: false, - url: { - protocol: true, - hostname: true, - pathname: true, - query: false, - }, - }, - logging: true, - }); - - before(async function () { - if (process.env.RECORD_MODE !== "NEW") { - // Set dummy API keys for replay mode - process.env.OPENAI_API_KEY = "test"; - process.env.GOOGLE_GENERATIVE_AI_API_KEY = "test"; - process.env.AWS_ACCESS_KEY_ID = "test"; - process.env.AWS_SECRET_ACCESS_KEY = "test"; - process.env.AWS_REGION = "us-east-1"; - } - - // Use shared initialization to avoid conflicts with other test suites - initializeSharedTraceloop(); - }); - - beforeEach(function () { - const { server } = this.polly as Polly; - server.any().on("beforePersist", (_req, recording) => { - recording.request.headers = recording.request.headers.filter( - ({ name }: { name: string }) => - !["authorization", "x-api-key", "x-goog-api-key"].includes( - name.toLowerCase(), - ), - ); - }); - }); - - afterEach(async () => { - await traceloop.forceFlush(); - memoryExporter.reset(); - }); - - it("should capture OpenAI provider spans correctly with recording", async () => { - const result = await traceloop.withWorkflow( - { name: "test_openai_workflow" }, - async () => { - return await generateText({ - messages: [ - { role: "user", content: "What is 2+2? Give a brief answer." }, - ], - model: vercel_openai("gpt-3.5-turbo"), - experimental_telemetry: { isEnabled: true }, - }); - }, - ); - - // Force flush to ensure all spans are exported - await traceloop.forceFlush(); - - const spans = memoryExporter.getFinishedSpans(); - - const generateTextSpan = spans.find( - (span) => - span.name === "ai.generateText.generate" || - span.name === "ai.generateText.doGenerate", - ); - - assert.ok(result); - assert.ok(result.text); - assert.ok(generateTextSpan); - - // Verify span name - assert.strictEqual(generateTextSpan.name, "ai.generateText.generate"); - - // Verify vendor - assert.strictEqual(generateTextSpan.attributes["gen_ai.system"], "OpenAI"); - - // Verify model information - assert.strictEqual( - generateTextSpan.attributes["gen_ai.request.model"], - "gpt-3.5-turbo", - ); - - // Verify prompt - assert.strictEqual( - generateTextSpan.attributes["gen_ai.prompt.0.role"], - "user", - ); - assert.ok(generateTextSpan.attributes["gen_ai.prompt.0.content"]); - - // Verify response - assert.strictEqual( - generateTextSpan.attributes["gen_ai.completion.0.role"], - "assistant", - ); - assert.strictEqual( - generateTextSpan.attributes["gen_ai.completion.0.content"], - result.text, - ); - - // Verify token usage - assert.ok(generateTextSpan.attributes["gen_ai.usage.prompt_tokens"]); - assert.ok(generateTextSpan.attributes["gen_ai.usage.completion_tokens"]); - assert.ok(generateTextSpan.attributes["llm.usage.total_tokens"]); - }); - - it("should capture Google Gemini provider spans correctly with recording", async () => { - // Clear any leftover spans from previous tests - memoryExporter.reset(); - - const result = await traceloop.withWorkflow( - { name: "test_google_workflow" }, - async () => { - return await generateText({ - messages: [ - { role: "user", content: "What is 2+2? Give a brief answer." }, - ], - model: vercel_google("gemini-1.5-flash"), - experimental_telemetry: { isEnabled: true }, - }); - }, - ); - - // Force flush to ensure all spans are exported - await traceloop.forceFlush(); - - const spans = memoryExporter.getFinishedSpans(); - - // Find the Google span specifically (should have workflow name test_google_workflow) - const generateTextSpan = spans.find( - (span) => - (span.name === "ai.generateText.generate" || - span.name === "ai.generateText.doGenerate") && - span.attributes["traceloop.workflow.name"] === "test_google_workflow", - ); - - assert.ok(result); - assert.ok(result.text); - assert.ok(generateTextSpan, "Could not find Google generateText span"); - - // Verify span name - assert.strictEqual(generateTextSpan.name, "ai.generateText.generate"); - - // Verify vendor - assert.strictEqual(generateTextSpan.attributes["gen_ai.system"], "Google"); - - // Verify model information - assert.strictEqual( - generateTextSpan.attributes["gen_ai.request.model"], - "gemini-1.5-flash", - ); - - // Verify prompt - assert.strictEqual( - generateTextSpan.attributes["gen_ai.prompt.0.role"], - "user", - ); - assert.ok(generateTextSpan.attributes["gen_ai.prompt.0.content"]); - - // Verify response - assert.strictEqual( - generateTextSpan.attributes["gen_ai.completion.0.role"], - "assistant", - ); - assert.strictEqual( - generateTextSpan.attributes["gen_ai.completion.0.content"], - result.text, - ); - - // Verify token usage - assert.ok(generateTextSpan.attributes["gen_ai.usage.prompt_tokens"]); - assert.ok(generateTextSpan.attributes["gen_ai.usage.completion_tokens"]); - assert.ok(generateTextSpan.attributes["llm.usage.total_tokens"]); - }); -}); diff --git a/packages/traceloop-sdk/test/ai-sdk-transformations.test.ts b/packages/traceloop-sdk/test/ai-sdk-transformations.test.ts deleted file mode 100644 index f50b1cbf..00000000 --- a/packages/traceloop-sdk/test/ai-sdk-transformations.test.ts +++ /dev/null @@ -1,1305 +0,0 @@ -import * as assert from "assert"; -import { ReadableSpan } from "@opentelemetry/sdk-trace-node"; -import { SpanAttributes } from "@traceloop/ai-semantic-conventions"; -import { - transformAiSdkAttributes, - transformAiSdkSpan, -} from "../src/lib/tracing/ai-sdk-transformations"; - -// Helper function to create a mock ReadableSpan -const createMockSpan = ( - name: string, - attributes: Record = {}, -): ReadableSpan => { - return { - name, - attributes, - } as ReadableSpan; -}; - -describe("AI SDK Transformations", () => { - describe("transformAiSdkAttributes - response text", () => { - it("should transform ai.response.text to completion attributes", () => { - const attributes = { - "ai.response.text": "Hello, how can I help you?", - someOtherAttr: "value", - }; - - transformAiSdkAttributes(attributes); - - assert.strictEqual( - attributes[`${SpanAttributes.LLM_COMPLETIONS}.0.content`], - "Hello, how can I help you?", - ); - assert.strictEqual( - attributes[`${SpanAttributes.LLM_COMPLETIONS}.0.role`], - "assistant", - ); - assert.strictEqual(attributes["ai.response.text"], undefined); - assert.strictEqual(attributes.someOtherAttr, "value"); - }); - - it("should not modify attributes when ai.response.text is not present", () => { - const attributes = { - someOtherAttr: "value", - }; - const originalAttributes = { ...attributes }; - - transformAiSdkAttributes(attributes); - - assert.deepStrictEqual(attributes, originalAttributes); - }); - - it("should handle empty response text", () => { - const attributes = { - "ai.response.text": "", - }; - - transformAiSdkAttributes(attributes); - - assert.strictEqual( - attributes[`${SpanAttributes.LLM_COMPLETIONS}.0.content`], - "", - ); - assert.strictEqual( - attributes[`${SpanAttributes.LLM_COMPLETIONS}.0.role`], - "assistant", - ); - assert.strictEqual(attributes["ai.response.text"], undefined); - }); - }); - - describe("transformAiSdkAttributes - response object", () => { - it("should transform ai.response.object to completion attributes", () => { - const attributes = { - "ai.response.object": '{"filteredText":"Hello","changesApplied":false}', - someOtherAttr: "value", - }; - - transformAiSdkAttributes(attributes); - - assert.strictEqual( - attributes[`${SpanAttributes.LLM_COMPLETIONS}.0.content`], - '{"filteredText":"Hello","changesApplied":false}', - ); - assert.strictEqual( - attributes[`${SpanAttributes.LLM_COMPLETIONS}.0.role`], - "assistant", - ); - assert.strictEqual(attributes["ai.response.object"], undefined); - assert.strictEqual(attributes.someOtherAttr, "value"); - }); - - it("should not modify attributes when ai.response.object is not present", () => { - const attributes = { - someOtherAttr: "value", - }; - const originalAttributes = { ...attributes }; - - transformAiSdkAttributes(attributes); - - assert.deepStrictEqual(attributes, originalAttributes); - }); - }); - - describe("transformAiSdkAttributes - response tool calls", () => { - it("should transform ai.response.toolCalls to completion attributes", () => { - const toolCallsData = [ - { - toolCallType: "function", - toolCallId: "call_gULeWLlk7y32MKz6Fb5eaF3K", - toolName: "getWeather", - args: '{"location": "San Francisco"}', - }, - { - toolCallType: "function", - toolCallId: "call_arNHlNj2FTOngnyieQfTe1bv", - toolName: "searchRestaurants", - args: '{"city": "San Francisco"}', - }, - ]; - - const attributes = { - "ai.response.toolCalls": JSON.stringify(toolCallsData), - someOtherAttr: "value", - }; - - transformAiSdkAttributes(attributes); - - // Check that role is set - assert.strictEqual( - attributes[`${SpanAttributes.LLM_COMPLETIONS}.0.role`], - "assistant", - ); - - // Check first tool call - assert.strictEqual( - attributes[`${SpanAttributes.LLM_COMPLETIONS}.0.tool_calls.0.name`], - "getWeather", - ); - assert.strictEqual( - attributes[ - `${SpanAttributes.LLM_COMPLETIONS}.0.tool_calls.0.arguments` - ], - '{"location": "San Francisco"}', - ); - - // Check second tool call - assert.strictEqual( - attributes[`${SpanAttributes.LLM_COMPLETIONS}.0.tool_calls.1.name`], - "searchRestaurants", - ); - assert.strictEqual( - attributes[ - `${SpanAttributes.LLM_COMPLETIONS}.0.tool_calls.1.arguments` - ], - '{"city": "San Francisco"}', - ); - - // Check original attribute is removed - assert.strictEqual(attributes["ai.response.toolCalls"], undefined); - assert.strictEqual(attributes.someOtherAttr, "value"); - }); - - it("should not modify attributes when ai.response.toolCalls is not present", () => { - const attributes = { - someOtherAttr: "value", - }; - const originalAttributes = { ...attributes }; - - transformAiSdkAttributes(attributes); - - assert.deepStrictEqual(attributes, originalAttributes); - }); - - it("should handle invalid JSON gracefully", () => { - const attributes = { - "ai.response.toolCalls": "invalid json {", - someOtherAttr: "value", - }; - - transformAiSdkAttributes(attributes); - - // Should not modify attributes when JSON parsing fails - assert.strictEqual(attributes["ai.response.toolCalls"], "invalid json {"); - assert.strictEqual(attributes.someOtherAttr, "value"); - }); - }); - - describe("transformAiSdkAttributes - prompt messages", () => { - it("should transform ai.prompt.messages to prompt attributes", () => { - const messages = [ - { role: "system", content: "You are a helpful assistant" }, - { role: "user", content: "Hello" }, - ]; - const attributes = { - "ai.prompt.messages": JSON.stringify(messages), - }; - - transformAiSdkAttributes(attributes); - - assert.strictEqual( - attributes[`${SpanAttributes.LLM_PROMPTS}.0.content`], - "You are a helpful assistant", - ); - assert.strictEqual( - attributes[`${SpanAttributes.LLM_PROMPTS}.0.role`], - "system", - ); - assert.strictEqual( - attributes[`${SpanAttributes.LLM_PROMPTS}.1.content`], - "Hello", - ); - assert.strictEqual( - attributes[`${SpanAttributes.LLM_PROMPTS}.1.role`], - "user", - ); - assert.strictEqual(attributes["ai.prompt.messages"], undefined); - }); - - it("should handle messages with object content", () => { - const messages = [ - { - role: "user", - content: { type: "text", text: "What's in this image?" }, - }, - ]; - const attributes = { - "ai.prompt.messages": JSON.stringify(messages), - }; - - transformAiSdkAttributes(attributes); - - assert.strictEqual( - attributes[`${SpanAttributes.LLM_PROMPTS}.0.content`], - "What's in this image?", - ); - assert.strictEqual( - attributes[`${SpanAttributes.LLM_PROMPTS}.0.role`], - "user", - ); - }); - - it("should extract text from content array", () => { - const messages = [ - { - role: "user", - content: [ - { type: "text", text: "Help me plan a trip to San Francisco." }, - { - type: "text", - text: "I'd like to know about the weather and restaurants.", - }, - ], - }, - ]; - const attributes = { - "ai.prompt.messages": JSON.stringify(messages), - }; - - transformAiSdkAttributes(attributes); - - assert.strictEqual( - attributes[`${SpanAttributes.LLM_PROMPTS}.0.content`], - "Help me plan a trip to San Francisco. I'd like to know about the weather and restaurants.", - ); - assert.strictEqual( - attributes[`${SpanAttributes.LLM_PROMPTS}.0.role`], - "user", - ); - }); - - it("should filter out non-text content types", () => { - const messages = [ - { - role: "user", - content: [ - { type: "text", text: "What's in this image?" }, - { type: "image", url: "data:image/jpeg;base64,..." }, - { type: "text", text: "Please describe it." }, - ], - }, - ]; - const attributes = { - "ai.prompt.messages": JSON.stringify(messages), - }; - - transformAiSdkAttributes(attributes); - - assert.strictEqual( - attributes[`${SpanAttributes.LLM_PROMPTS}.0.content`], - "What's in this image? Please describe it.", - ); - assert.strictEqual( - attributes[`${SpanAttributes.LLM_PROMPTS}.0.role`], - "user", - ); - }); - - it("should extract text from JSON string content", () => { - const messages = [ - { - role: "user", - content: - '[{"type":"text","text":"Help me plan a trip to San Francisco."},{"type":"text","text":"What should I know about the weather?"}]', - }, - ]; - const attributes = { - "ai.prompt.messages": JSON.stringify(messages), - }; - - transformAiSdkAttributes(attributes); - - assert.strictEqual( - attributes[`${SpanAttributes.LLM_PROMPTS}.0.content`], - "Help me plan a trip to San Francisco. What should I know about the weather?", - ); - assert.strictEqual( - attributes[`${SpanAttributes.LLM_PROMPTS}.0.role`], - "user", - ); - }); - - it("should preserve complex content like tool calls", () => { - const messages = [ - { - role: "assistant", - content: - '[{"type":"tool-call","id":"call_123","name":"getWeather","args":{"location":"Paris"}}]', - }, - ]; - const attributes = { - "ai.prompt.messages": JSON.stringify(messages), - }; - - transformAiSdkAttributes(attributes); - - // Should preserve the original JSON since it's not simple text - assert.strictEqual( - attributes[`${SpanAttributes.LLM_PROMPTS}.0.content`], - '[{"type":"tool-call","id":"call_123","name":"getWeather","args":{"location":"Paris"}}]', - ); - assert.strictEqual( - attributes[`${SpanAttributes.LLM_PROMPTS}.0.role`], - "assistant", - ); - }); - - it("should preserve mixed content arrays", () => { - const messages = [ - { - role: "user", - content: - '[{"type":"text","text":"What\'s the weather?"},{"type":"image","url":"data:image/jpeg;base64,..."}]', - }, - ]; - const attributes = { - "ai.prompt.messages": JSON.stringify(messages), - }; - - transformAiSdkAttributes(attributes); - - // Should preserve the original JSON since it has mixed content - assert.strictEqual( - attributes[`${SpanAttributes.LLM_PROMPTS}.0.content`], - '[{"type":"text","text":"What\'s the weather?"},{"type":"image","url":"data:image/jpeg;base64,..."}]', - ); - assert.strictEqual( - attributes[`${SpanAttributes.LLM_PROMPTS}.0.role`], - "user", - ); - }); - - it("should handle invalid JSON gracefully", () => { - const attributes = { - "ai.prompt.messages": "invalid json {", - someOtherAttr: "value", - }; - - transformAiSdkAttributes(attributes); - - // Should not modify attributes when JSON parsing fails - assert.strictEqual(attributes["ai.prompt.messages"], "invalid json {"); - assert.strictEqual(attributes.someOtherAttr, "value"); - }); - - it("should not modify attributes when ai.prompt.messages is not present", () => { - const attributes = { - someOtherAttr: "value", - }; - const originalAttributes = { ...attributes }; - - transformAiSdkAttributes(attributes); - - assert.deepStrictEqual(attributes, originalAttributes); - }); - - it("should handle empty messages array", () => { - const attributes = { - "ai.prompt.messages": JSON.stringify([]), - }; - - transformAiSdkAttributes(attributes); - - assert.strictEqual(attributes["ai.prompt.messages"], undefined); - }); - - it("should unescape JSON escape sequences in simple string content", () => { - const attributes = { - "ai.prompt.messages": - '[{"role":"user","content":[{"type":"text","text":"Help me plan a trip to San Francisco. I\'d like to know:\\n1. What\'s the weather like there?\\n2. Find some good restaurants to try\\n3. If I\'m traveling from New York, how far is it?\\n\\nPlease use the available tools to get current information and provide a comprehensive travel guide."}]}]', - }; - - transformAiSdkAttributes(attributes); - - const result = attributes[`${SpanAttributes.LLM_PROMPTS}.0.content`]; - - // The escape sequences should be properly unescaped - assert.strictEqual( - result, - "Help me plan a trip to San Francisco. I'd like to know:\n1. What's the weather like there?\n2. Find some good restaurants to try\n3. If I'm traveling from New York, how far is it?\n\nPlease use the available tools to get current information and provide a comprehensive travel guide.", - ); - assert.strictEqual( - attributes[`${SpanAttributes.LLM_PROMPTS}.0.role`], - "user", - ); - }); - }); - - describe("transformAiSdkAttributes - single prompt", () => { - it("should transform ai.prompt to prompt attributes", () => { - const promptData = { - prompt: - "Help me plan a trip to San Francisco. I\\'d like to know:\\n1. What\\'s the weather like there?\\n2. Find some restaurants\\n\\nPlease help!", - }; - const attributes = { - "ai.prompt": JSON.stringify(promptData), - someOtherAttr: "value", - }; - - transformAiSdkAttributes(attributes); - - assert.strictEqual( - attributes[`${SpanAttributes.LLM_PROMPTS}.0.content`], - "Help me plan a trip to San Francisco. I\\'d like to know:\\n1. What\\'s the weather like there?\\n2. Find some restaurants\\n\\nPlease help!", - ); - assert.strictEqual( - attributes[`${SpanAttributes.LLM_PROMPTS}.0.role`], - "user", - ); - assert.strictEqual(attributes["ai.prompt"], undefined); - assert.strictEqual(attributes.someOtherAttr, "value"); - }); - - it("should not modify attributes when ai.prompt is not present", () => { - const attributes = { - someOtherAttr: "value", - }; - const originalAttributes = { ...attributes }; - - transformAiSdkAttributes(attributes); - - assert.deepStrictEqual(attributes, originalAttributes); - }); - - it("should handle invalid JSON gracefully", () => { - const attributes = { - "ai.prompt": "invalid json {", - someOtherAttr: "value", - }; - - transformAiSdkAttributes(attributes); - - // Should not modify attributes when JSON parsing fails - assert.strictEqual(attributes["ai.prompt"], "invalid json {"); - assert.strictEqual(attributes.someOtherAttr, "value"); - }); - }); - - describe("transformAiSdkAttributes - tools", () => { - it("should transform ai.prompt.tools to LLM request functions attributes", () => { - const attributes = { - "ai.prompt.tools": [ - { - name: "getWeather", - description: "Get the current weather for a specified location", - parameters: { - type: "object", - properties: { - location: { - type: "string", - description: "The location to get weather for", - }, - }, - required: ["location"], - }, - }, - { - name: "calculateDistance", - description: "Calculate distance between two cities", - parameters: { - type: "object", - properties: { - fromCity: { type: "string" }, - toCity: { type: "string" }, - }, - }, - }, - ], - someOtherAttr: "value", - }; - - transformAiSdkAttributes(attributes); - - assert.strictEqual( - attributes[`${SpanAttributes.LLM_REQUEST_FUNCTIONS}.0.name`], - "getWeather", - ); - assert.strictEqual( - attributes[`${SpanAttributes.LLM_REQUEST_FUNCTIONS}.0.description`], - "Get the current weather for a specified location", - ); - assert.strictEqual( - attributes[`${SpanAttributes.LLM_REQUEST_FUNCTIONS}.0.parameters`], - JSON.stringify({ - type: "object", - properties: { - location: { - type: "string", - description: "The location to get weather for", - }, - }, - required: ["location"], - }), - ); - assert.strictEqual( - attributes[`${SpanAttributes.LLM_REQUEST_FUNCTIONS}.1.name`], - "calculateDistance", - ); - assert.strictEqual( - attributes[`${SpanAttributes.LLM_REQUEST_FUNCTIONS}.1.description`], - "Calculate distance between two cities", - ); - assert.strictEqual( - attributes[`${SpanAttributes.LLM_REQUEST_FUNCTIONS}.1.parameters`], - JSON.stringify({ - type: "object", - properties: { - fromCity: { type: "string" }, - toCity: { type: "string" }, - }, - }), - ); - - // Original attribute should be removed - assert.strictEqual(attributes["ai.prompt.tools"], undefined); - - // Other attributes should remain unchanged - assert.strictEqual(attributes.someOtherAttr, "value"); - }); - - it("should handle tools with missing properties gracefully", () => { - const attributes = { - "ai.prompt.tools": [ - { - name: "toolWithOnlyName", - // missing description and parameters - }, - { - description: "Tool with only description", - // missing name and parameters - }, - { - name: "toolWithStringParams", - description: "Tool with pre-stringified parameters", - parameters: '{"type": "object"}', - }, - ], - }; - - transformAiSdkAttributes(attributes); - - // Tool 0: only has name - assert.strictEqual( - attributes[`${SpanAttributes.LLM_REQUEST_FUNCTIONS}.0.name`], - "toolWithOnlyName", - ); - assert.strictEqual( - attributes[`${SpanAttributes.LLM_REQUEST_FUNCTIONS}.0.description`], - undefined, - ); - assert.strictEqual( - attributes[`${SpanAttributes.LLM_REQUEST_FUNCTIONS}.0.parameters`], - undefined, - ); - - // Tool 1: only has description - assert.strictEqual( - attributes[`${SpanAttributes.LLM_REQUEST_FUNCTIONS}.1.name`], - undefined, - ); - assert.strictEqual( - attributes[`${SpanAttributes.LLM_REQUEST_FUNCTIONS}.1.description`], - "Tool with only description", - ); - - // Tool 2: has string parameters (should be used as-is) - assert.strictEqual( - attributes[`${SpanAttributes.LLM_REQUEST_FUNCTIONS}.2.name`], - "toolWithStringParams", - ); - assert.strictEqual( - attributes[`${SpanAttributes.LLM_REQUEST_FUNCTIONS}.2.parameters`], - '{"type": "object"}', - ); - - assert.strictEqual(attributes["ai.prompt.tools"], undefined); - }); - - it("should handle empty tools array", () => { - const attributes = { - "ai.prompt.tools": [], - someOtherAttr: "value", - }; - - transformAiSdkAttributes(attributes); - - // Should not create any function attributes - assert.strictEqual( - attributes[`${SpanAttributes.LLM_REQUEST_FUNCTIONS}.0.name`], - undefined, - ); - - // Original attribute should be removed - assert.strictEqual(attributes["ai.prompt.tools"], undefined); - assert.strictEqual(attributes.someOtherAttr, "value"); - }); - - it("should handle invalid tools data gracefully", () => { - const attributes = { - "ai.prompt.tools": "not an array", - someOtherAttr: "value", - }; - - transformAiSdkAttributes(attributes); - - // Should not create any function attributes - assert.strictEqual( - attributes[`${SpanAttributes.LLM_REQUEST_FUNCTIONS}.0.name`], - undefined, - ); - - // Original attribute should be removed - assert.strictEqual(attributes["ai.prompt.tools"], undefined); - assert.strictEqual(attributes.someOtherAttr, "value"); - }); - - it("should not modify attributes when ai.prompt.tools is not present", () => { - const attributes = { - someOtherAttr: "value", - }; - - transformAiSdkAttributes(attributes); - - assert.strictEqual(attributes.someOtherAttr, "value"); - assert.strictEqual( - attributes[`${SpanAttributes.LLM_REQUEST_FUNCTIONS}.0.name`], - undefined, - ); - }); - - it("should handle tools with null/undefined values", () => { - const attributes = { - "ai.prompt.tools": [null, undefined, {}, { name: "validTool" }], - }; - - transformAiSdkAttributes(attributes); - - // Only the valid tool should create attributes - assert.strictEqual( - attributes[`${SpanAttributes.LLM_REQUEST_FUNCTIONS}.3.name`], - "validTool", - ); - - // First three should not create attributes since they're invalid - assert.strictEqual( - attributes[`${SpanAttributes.LLM_REQUEST_FUNCTIONS}.0.name`], - undefined, - ); - assert.strictEqual( - attributes[`${SpanAttributes.LLM_REQUEST_FUNCTIONS}.1.name`], - undefined, - ); - assert.strictEqual( - attributes[`${SpanAttributes.LLM_REQUEST_FUNCTIONS}.2.name`], - undefined, - ); - }); - - it("should handle AI SDK string format tools", () => { - // This is how AI SDK actually stores tools - as JSON strings in array - const attributes = { - "ai.prompt.tools": [ - '{"type":"function","name":"getWeather","description":"Get weather","parameters":{"type":"object","properties":{"location":{"type":"string"}}}}', - '{"type":"function","name":"searchRestaurants","description":"Find restaurants","parameters":{"type":"object","properties":{"city":{"type":"string"}}}}', - ], - }; - - transformAiSdkAttributes(attributes); - - // Should parse and transform the first tool - assert.strictEqual( - attributes[`${SpanAttributes.LLM_REQUEST_FUNCTIONS}.0.name`], - "getWeather", - ); - assert.strictEqual( - attributes[`${SpanAttributes.LLM_REQUEST_FUNCTIONS}.0.description`], - "Get weather", - ); - assert.strictEqual( - attributes[`${SpanAttributes.LLM_REQUEST_FUNCTIONS}.0.parameters`], - JSON.stringify({ - type: "object", - properties: { location: { type: "string" } }, - }), - ); - - // Should parse and transform the second tool - assert.strictEqual( - attributes[`${SpanAttributes.LLM_REQUEST_FUNCTIONS}.1.name`], - "searchRestaurants", - ); - assert.strictEqual( - attributes[`${SpanAttributes.LLM_REQUEST_FUNCTIONS}.1.description`], - "Find restaurants", - ); - - assert.strictEqual(attributes["ai.prompt.tools"], undefined); - }); - - it("should handle mixed format tools (strings and objects)", () => { - const attributes = { - "ai.prompt.tools": [ - '{"type":"function","name":"stringTool","description":"Tool from string"}', - { name: "objectTool", description: "Tool from object" }, - ], - }; - - transformAiSdkAttributes(attributes); - - assert.strictEqual( - attributes[`${SpanAttributes.LLM_REQUEST_FUNCTIONS}.0.name`], - "stringTool", - ); - assert.strictEqual( - attributes[`${SpanAttributes.LLM_REQUEST_FUNCTIONS}.0.description`], - "Tool from string", - ); - assert.strictEqual( - attributes[`${SpanAttributes.LLM_REQUEST_FUNCTIONS}.1.name`], - "objectTool", - ); - assert.strictEqual( - attributes[`${SpanAttributes.LLM_REQUEST_FUNCTIONS}.1.description`], - "Tool from object", - ); - }); - }); - - describe("transformAiSdkAttributes - prompt tokens", () => { - it("should transform ai.usage.promptTokens to LLM usage attribute", () => { - const attributes = { - "ai.usage.promptTokens": 50, - someOtherAttr: "value", - }; - - transformAiSdkAttributes(attributes); - - assert.strictEqual( - attributes[SpanAttributes.LLM_USAGE_PROMPT_TOKENS], - 50, - ); - assert.strictEqual(attributes["ai.usage.promptTokens"], undefined); - assert.strictEqual(attributes.someOtherAttr, "value"); - }); - - it("should not modify attributes when ai.usage.promptTokens is not present", () => { - const attributes = { - someOtherAttr: "value", - }; - const originalAttributes = { ...attributes }; - - transformAiSdkAttributes(attributes); - - assert.deepStrictEqual(attributes, originalAttributes); - }); - - it("should handle zero prompt tokens", () => { - const attributes = { - "ai.usage.promptTokens": 0, - }; - - transformAiSdkAttributes(attributes); - - assert.strictEqual(attributes[SpanAttributes.LLM_USAGE_PROMPT_TOKENS], 0); - }); - }); - - describe("transformAiSdkAttributes - completion tokens", () => { - it("should transform ai.usage.completionTokens to LLM usage attribute", () => { - const attributes = { - "ai.usage.completionTokens": 25, - someOtherAttr: "value", - }; - - transformAiSdkAttributes(attributes); - - assert.strictEqual( - attributes[SpanAttributes.LLM_USAGE_COMPLETION_TOKENS], - 25, - ); - assert.strictEqual(attributes["ai.usage.completionTokens"], undefined); - assert.strictEqual(attributes.someOtherAttr, "value"); - }); - - it("should not modify attributes when ai.usage.completionTokens is not present", () => { - const attributes = { - someOtherAttr: "value", - }; - const originalAttributes = { ...attributes }; - - transformAiSdkAttributes(attributes); - - assert.deepStrictEqual(attributes, originalAttributes); - }); - - it("should handle zero completion tokens", () => { - const attributes = { - "ai.usage.completionTokens": 0, - }; - - transformAiSdkAttributes(attributes); - - assert.strictEqual( - attributes[SpanAttributes.LLM_USAGE_COMPLETION_TOKENS], - 0, - ); - }); - }); - - describe("transformAiSdkAttributes - total tokens calculation", () => { - it("should calculate total tokens from prompt and completion tokens", () => { - const attributes = { - [SpanAttributes.LLM_USAGE_PROMPT_TOKENS]: 50, - [SpanAttributes.LLM_USAGE_COMPLETION_TOKENS]: 25, - }; - - transformAiSdkAttributes(attributes); - - assert.strictEqual(attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS], 75); - }); - - it("should handle string token values", () => { - const attributes = { - [SpanAttributes.LLM_USAGE_PROMPT_TOKENS]: "50", - [SpanAttributes.LLM_USAGE_COMPLETION_TOKENS]: "25", - }; - - transformAiSdkAttributes(attributes); - - assert.strictEqual(attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS], 75); - }); - - it("should not calculate total when prompt tokens are missing", () => { - const attributes = { - [SpanAttributes.LLM_USAGE_COMPLETION_TOKENS]: 25, - }; - - transformAiSdkAttributes(attributes); - - assert.strictEqual( - attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS], - undefined, - ); - }); - - it("should not calculate total when completion tokens are missing", () => { - const attributes = { - [SpanAttributes.LLM_USAGE_PROMPT_TOKENS]: 50, - }; - - transformAiSdkAttributes(attributes); - - assert.strictEqual( - attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS], - undefined, - ); - }); - - it("should not calculate total when both tokens are missing", () => { - const attributes = {}; - - transformAiSdkAttributes(attributes); - - assert.strictEqual( - attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS], - undefined, - ); - }); - }); - - describe("transformAiSdkAttributes - vendor", () => { - it("should transform openai.chat provider to OpenAI system", () => { - const attributes = { - "ai.model.provider": "openai.chat", - someOtherAttr: "value", - }; - - transformAiSdkAttributes(attributes); - - assert.strictEqual(attributes[SpanAttributes.LLM_SYSTEM], "OpenAI"); - assert.strictEqual(attributes["ai.model.provider"], undefined); - assert.strictEqual(attributes.someOtherAttr, "value"); - }); - - it("should transform any openai provider to OpenAI system", () => { - const openaiProviders = [ - "openai.completions", - "openai.embeddings", - "openai", - ]; - - openaiProviders.forEach((provider) => { - const attributes = { - "ai.model.provider": provider, - }; - - transformAiSdkAttributes(attributes); - - assert.strictEqual(attributes[SpanAttributes.LLM_SYSTEM], "OpenAI"); - assert.strictEqual(attributes["ai.model.provider"], undefined); - }); - }); - - it("should transform azure openai provider to Azure system", () => { - const openaiProviders = ["azure-openai"]; - - openaiProviders.forEach((provider) => { - const attributes = { - "ai.model.provider": provider, - }; - - transformAiSdkAttributes(attributes); - - assert.strictEqual(attributes[SpanAttributes.LLM_SYSTEM], "Azure"); - assert.strictEqual(attributes["ai.model.provider"], undefined); - }); - }); - - it("should transform other providers to their value", () => { - const attributes = { - "ai.model.provider": "anthropic", - }; - - transformAiSdkAttributes(attributes); - - assert.strictEqual(attributes[SpanAttributes.LLM_SYSTEM], "Anthropic"); - assert.strictEqual(attributes["ai.model.provider"], undefined); - }); - - it("should not modify attributes when ai.model.provider is not present", () => { - const attributes = { - someOtherAttr: "value", - }; - const originalAttributes = { ...attributes }; - - transformAiSdkAttributes(attributes); - - assert.deepStrictEqual(attributes, originalAttributes); - }); - - it("should handle empty provider value", () => { - const attributes = { - "ai.model.provider": "", - }; - - transformAiSdkAttributes(attributes); - - assert.strictEqual(attributes[SpanAttributes.LLM_SYSTEM], ""); - assert.strictEqual(attributes["ai.model.provider"], undefined); - }); - }); - - describe("transformAiSdkAttributes", () => { - it("should apply all attribute transformations", () => { - const attributes = { - "ai.response.text": "Hello!", - "ai.prompt.messages": JSON.stringify([{ role: "user", content: "Hi" }]), - "ai.usage.promptTokens": 10, - "ai.usage.completionTokens": 5, - "ai.model.provider": "openai.chat", - someOtherAttr: "value", - }; - - transformAiSdkAttributes(attributes); - - // Check response text transformation - assert.strictEqual( - attributes[`${SpanAttributes.LLM_COMPLETIONS}.0.content`], - "Hello!", - ); - assert.strictEqual( - attributes[`${SpanAttributes.LLM_COMPLETIONS}.0.role`], - "assistant", - ); - - // Check prompt messages transformation - assert.strictEqual( - attributes[`${SpanAttributes.LLM_PROMPTS}.0.content`], - "Hi", - ); - assert.strictEqual( - attributes[`${SpanAttributes.LLM_PROMPTS}.0.role`], - "user", - ); - - // Check token transformations - assert.strictEqual( - attributes[SpanAttributes.LLM_USAGE_PROMPT_TOKENS], - 10, - ); - assert.strictEqual( - attributes[SpanAttributes.LLM_USAGE_COMPLETION_TOKENS], - 5, - ); - assert.strictEqual(attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS], 15); - - // Check vendor transformation - assert.strictEqual(attributes[SpanAttributes.LLM_SYSTEM], "OpenAI"); - - // Check original AI SDK attributes are removed - assert.strictEqual(attributes["ai.response.text"], undefined); - assert.strictEqual(attributes["ai.prompt.messages"], undefined); - assert.strictEqual(attributes["ai.usage.promptTokens"], undefined); - assert.strictEqual(attributes["ai.usage.completionTokens"], undefined); - assert.strictEqual(attributes["ai.model.provider"], undefined); - - // Check other attributes are preserved - assert.strictEqual(attributes.someOtherAttr, "value"); - }); - - it("should handle partial attribute sets", () => { - const attributes = { - "ai.response.text": "Hello!", - someOtherAttr: "value", - }; - - transformAiSdkAttributes(attributes); - - assert.strictEqual( - attributes[`${SpanAttributes.LLM_COMPLETIONS}.0.content`], - "Hello!", - ); - assert.strictEqual(attributes.someOtherAttr, "value"); - }); - - it("should apply all attribute transformations for generateObject", () => { - const attributes = { - "ai.response.object": '{"result":"Hello!"}', - "ai.prompt.messages": JSON.stringify([{ role: "user", content: "Hi" }]), - "ai.usage.promptTokens": 10, - "ai.usage.completionTokens": 5, - "ai.model.provider": "azure-openai.chat", - someOtherAttr: "value", - }; - - transformAiSdkAttributes(attributes); - - // Check response object transformation - assert.strictEqual( - attributes[`${SpanAttributes.LLM_COMPLETIONS}.0.content`], - '{"result":"Hello!"}', - ); - assert.strictEqual( - attributes[`${SpanAttributes.LLM_COMPLETIONS}.0.role`], - "assistant", - ); - - // Check prompt messages transformation - assert.strictEqual( - attributes[`${SpanAttributes.LLM_PROMPTS}.0.content`], - "Hi", - ); - assert.strictEqual( - attributes[`${SpanAttributes.LLM_PROMPTS}.0.role`], - "user", - ); - - // Check token transformations - assert.strictEqual( - attributes[SpanAttributes.LLM_USAGE_PROMPT_TOKENS], - 10, - ); - assert.strictEqual( - attributes[SpanAttributes.LLM_USAGE_COMPLETION_TOKENS], - 5, - ); - assert.strictEqual(attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS], 15); - - // Check vendor transformation - assert.strictEqual(attributes[SpanAttributes.LLM_SYSTEM], "Azure"); - - // Check original AI SDK attributes are removed - assert.strictEqual(attributes["ai.response.object"], undefined); - assert.strictEqual(attributes["ai.prompt.messages"], undefined); - assert.strictEqual(attributes["ai.usage.promptTokens"], undefined); - assert.strictEqual(attributes["ai.usage.completionTokens"], undefined); - assert.strictEqual(attributes["ai.model.provider"], undefined); - - // Check other attributes are preserved - assert.strictEqual(attributes.someOtherAttr, "value"); - }); - - it("should transform tools along with other attributes", () => { - const attributes = { - "ai.response.text": "I'll help you with that!", - "ai.prompt.messages": JSON.stringify([ - { role: "user", content: "Get weather" }, - ]), - "ai.prompt.tools": [ - { - name: "getWeather", - description: "Get weather for a location", - parameters: { - type: "object", - properties: { location: { type: "string" } }, - }, - }, - ], - "ai.usage.promptTokens": 15, - "ai.usage.completionTokens": 8, - someOtherAttr: "value", - }; - - transformAiSdkAttributes(attributes); - - // Check tools transformation - assert.strictEqual( - attributes[`${SpanAttributes.LLM_REQUEST_FUNCTIONS}.0.name`], - "getWeather", - ); - assert.strictEqual( - attributes[`${SpanAttributes.LLM_REQUEST_FUNCTIONS}.0.description`], - "Get weather for a location", - ); - assert.strictEqual( - attributes[`${SpanAttributes.LLM_REQUEST_FUNCTIONS}.0.parameters`], - JSON.stringify({ - type: "object", - properties: { location: { type: "string" } }, - }), - ); - - // Check other transformations still work - assert.strictEqual( - attributes[`${SpanAttributes.LLM_COMPLETIONS}.0.content`], - "I'll help you with that!", - ); - assert.strictEqual( - attributes[`${SpanAttributes.LLM_PROMPTS}.0.content`], - "Get weather", - ); - assert.strictEqual(attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS], 23); - - // Check original attributes are removed - assert.strictEqual(attributes["ai.prompt.tools"], undefined); - assert.strictEqual(attributes["ai.response.text"], undefined); - - // Check other attributes are preserved - assert.strictEqual(attributes.someOtherAttr, "value"); - }); - }); - - describe("transformAiSdkSpan", () => { - it("should transform both span name and attributes", () => { - const span = createMockSpan("ai.generateText.doGenerate", { - "ai.response.text": "Hello!", - "ai.usage.promptTokens": 10, - "ai.usage.completionTokens": 5, - }); - - transformAiSdkSpan(span); - - // Check span name transformation - assert.strictEqual(span.name, "ai.generateText.generate"); - - // Check attribute transformations - assert.strictEqual( - span.attributes[`${SpanAttributes.LLM_COMPLETIONS}.0.content`], - "Hello!", - ); - assert.strictEqual( - span.attributes[SpanAttributes.LLM_USAGE_PROMPT_TOKENS], - 10, - ); - assert.strictEqual( - span.attributes[SpanAttributes.LLM_USAGE_COMPLETION_TOKENS], - 5, - ); - assert.strictEqual( - span.attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS], - 15, - ); - }); - - it("should transform generateObject span name and attributes", () => { - const span = createMockSpan("ai.generateObject.doGenerate", { - "ai.prompt.format": "prompt", - "llm.usage.output_tokens": "39", - "traceloop.workflow.name": "generate_person_profile", - "llm.request.model": "gpt-4o", - "ai.settings.maxRetries": "2", - "ai.usage.promptTokens": "108", - "operation.name": "ai.generateObject.doGenerate", - "llm.response.id": "chatcmpl-C82mjzq1hNM753oc4VkStnjEzzLpk", - "ai.response.providerMetadata": - '{"openai":{"reasoningTokens":0,"acceptedPredictionTokens":0,"rejectedPredictionTokens":0,"cachedPromptTokens":0}}', - "ai.operationId": "ai.generateObject.doGenerate", - "ai.response.id": "chatcmpl-C82mjzq1hNM753oc4VkStnjEzzLpk", - "ai.usage.completionTokens": "39", - "ai.response.model": "gpt-4o-2024-08-06", - "ai.response.object": - '{"name":"Alex Dupont","age":30,"occupation":"Software Engineer","skills":["AI","Machine Learning","Programming","Multilingual"],"location":{"city":"Paris","country":"France"}}', - "ai.prompt.messages": - '[{"role":"user","content":[{"type":"text","text":"Based on this description, generate a detailed person profile: A talented software engineer from Paris who loves working with AI and machine learning, speaks multiple languages, and enjoys traveling."}]}]', - "ai.settings.mode": "tool", - "llm.vendor": "openai.chat", - "ai.response.timestamp": "2025-08-24T11:02:45.000Z", - "llm.response.model": "gpt-4o-2024-08-06", - "ai.model.id": "gpt-4o", - "ai.response.finishReason": "stop", - "ai.model.provider": "openai.chat", - "llm.usage.input_tokens": "108", - }); - - transformAiSdkSpan(span); - - // Check span name transformation - assert.strictEqual(span.name, "ai.generateObject.generate"); - - // Check attribute transformations - assert.strictEqual( - span.attributes[`${SpanAttributes.LLM_COMPLETIONS}.0.content`], - '{"name":"Alex Dupont","age":30,"occupation":"Software Engineer","skills":["AI","Machine Learning","Programming","Multilingual"],"location":{"city":"Paris","country":"France"}}', - ); - assert.strictEqual( - span.attributes[`${SpanAttributes.LLM_COMPLETIONS}.0.role`], - "assistant", - ); - assert.strictEqual( - span.attributes[`${SpanAttributes.LLM_PROMPTS}.0.content`], - "Based on this description, generate a detailed person profile: A talented software engineer from Paris who loves working with AI and machine learning, speaks multiple languages, and enjoys traveling.", - ); - assert.strictEqual( - span.attributes[`${SpanAttributes.LLM_PROMPTS}.0.role`], - "user", - ); - assert.strictEqual( - span.attributes[SpanAttributes.LLM_USAGE_PROMPT_TOKENS], - "108", - ); - assert.strictEqual( - span.attributes[SpanAttributes.LLM_USAGE_COMPLETION_TOKENS], - "39", - ); - assert.strictEqual( - span.attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS], - 147, - ); - assert.strictEqual(span.attributes[SpanAttributes.LLM_SYSTEM], "OpenAI"); - - // Check that original AI SDK attributes are removed - assert.strictEqual(span.attributes["ai.response.object"], undefined); - assert.strictEqual(span.attributes["ai.prompt.messages"], undefined); - assert.strictEqual(span.attributes["ai.usage.promptTokens"], undefined); - assert.strictEqual( - span.attributes["ai.usage.completionTokens"], - undefined, - ); - assert.strictEqual(span.attributes["ai.model.provider"], undefined); - }); - - it("should handle spans with no transformations needed", () => { - const span = createMockSpan("some.other.span", { - someAttr: "value", - }); - const originalName = span.name; - const originalAttributes = { ...span.attributes }; - - transformAiSdkSpan(span); - - assert.strictEqual(span.name, originalName); - assert.deepStrictEqual(span.attributes, originalAttributes); - }); - }); -}); diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index b6dacfb9..0e1fe5e5 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -93,6 +93,9 @@ importers: '@opentelemetry/api': specifier: ^1.9.0 version: 1.9.0 + '@opentelemetry/semantic-conventions': + specifier: ^1.37.0 + version: 1.37.0 packages/instrumentation-anthropic: dependencies: @@ -106,8 +109,8 @@ importers: specifier: ^0.203.0 version: 0.203.0(@opentelemetry/api@1.9.0)(supports-color@10.0.0) '@opentelemetry/semantic-conventions': - specifier: ^1.36.0 - version: 1.36.0 + specifier: ^1.37.0 + version: 1.37.0 '@traceloop/ai-semantic-conventions': specifier: workspace:* version: link:../ai-semantic-conventions @@ -155,8 +158,8 @@ importers: specifier: ^0.203.0 version: 0.203.0(@opentelemetry/api@1.9.0)(supports-color@10.0.0) '@opentelemetry/semantic-conventions': - specifier: ^1.36.0 - version: 1.36.0 + specifier: ^1.37.0 + version: 1.37.0 '@traceloop/ai-semantic-conventions': specifier: workspace:* version: link:../ai-semantic-conventions @@ -204,8 +207,8 @@ importers: specifier: ^0.203.0 version: 0.203.0(@opentelemetry/api@1.9.0)(supports-color@10.0.0) '@opentelemetry/semantic-conventions': - specifier: ^1.36.0 - version: 1.36.0 + specifier: ^1.37.0 + version: 1.37.0 '@traceloop/ai-semantic-conventions': specifier: workspace:* version: link:../ai-semantic-conventions @@ -250,8 +253,8 @@ importers: specifier: ^0.203.0 version: 0.203.0(@opentelemetry/api@1.9.0)(supports-color@10.0.0) '@opentelemetry/semantic-conventions': - specifier: ^1.36.0 - version: 1.36.0 + specifier: ^1.37.0 + version: 1.37.0 '@traceloop/ai-semantic-conventions': specifier: workspace:* version: link:../ai-semantic-conventions @@ -299,8 +302,8 @@ importers: specifier: ^0.203.0 version: 0.203.0(@opentelemetry/api@1.9.0)(supports-color@10.0.0) '@opentelemetry/semantic-conventions': - specifier: ^1.36.0 - version: 1.36.0 + specifier: ^1.37.0 + version: 1.37.0 '@traceloop/ai-semantic-conventions': specifier: workspace:* version: link:../ai-semantic-conventions @@ -363,8 +366,8 @@ importers: specifier: ^0.203.0 version: 0.203.0(@opentelemetry/api@1.9.0)(supports-color@10.0.0) '@opentelemetry/semantic-conventions': - specifier: ^1.36.0 - version: 1.36.0 + specifier: ^1.37.0 + version: 1.37.0 '@traceloop/ai-semantic-conventions': specifier: workspace:* version: link:../ai-semantic-conventions @@ -418,8 +421,8 @@ importers: specifier: ^0.203.0 version: 0.203.0(@opentelemetry/api@1.9.0)(supports-color@10.0.0) '@opentelemetry/semantic-conventions': - specifier: ^1.36.0 - version: 1.36.0 + specifier: ^1.37.0 + version: 1.37.0 '@traceloop/ai-semantic-conventions': specifier: workspace:* version: link:../ai-semantic-conventions @@ -482,8 +485,8 @@ importers: specifier: ^0.203.0 version: 0.203.0(@opentelemetry/api@1.9.0)(supports-color@10.0.0) '@opentelemetry/semantic-conventions': - specifier: ^1.36.0 - version: 1.36.0 + specifier: ^1.37.0 + version: 1.37.0 '@traceloop/ai-semantic-conventions': specifier: workspace:* version: link:../ai-semantic-conventions @@ -580,8 +583,8 @@ importers: specifier: ^0.203.0 version: 0.203.0(@opentelemetry/api@1.9.0)(supports-color@10.0.0) '@opentelemetry/semantic-conventions': - specifier: ^1.36.0 - version: 1.36.0 + specifier: ^1.37.0 + version: 1.37.0 '@traceloop/ai-semantic-conventions': specifier: workspace:* version: link:../ai-semantic-conventions @@ -632,8 +635,8 @@ importers: specifier: ^0.203.0 version: 0.203.0(@opentelemetry/api@1.9.0)(supports-color@10.0.0) '@opentelemetry/semantic-conventions': - specifier: ^1.36.0 - version: 1.36.0 + specifier: ^1.37.0 + version: 1.37.0 '@traceloop/ai-semantic-conventions': specifier: workspace:* version: link:../ai-semantic-conventions @@ -783,8 +786,8 @@ importers: specifier: ^2.0.1 version: 2.0.1(@opentelemetry/api@1.9.0) '@opentelemetry/semantic-conventions': - specifier: ^1.36.0 - version: 1.36.0 + specifier: ^1.37.0 + version: 1.37.0 '@traceloop/ai-semantic-conventions': specifier: workspace:* version: link:../ai-semantic-conventions @@ -3311,8 +3314,8 @@ packages: peerDependencies: '@opentelemetry/api': '>=1.0.0 <1.10.0' - '@opentelemetry/semantic-conventions@1.36.0': - resolution: {integrity: sha512-TtxJSRD8Ohxp6bKkhrm27JRHAxPczQA7idtcTOMYI+wQRRrfgqxHv1cFbCApcSnNjtXkmzFozn6jQtFrOmbjPQ==} + '@opentelemetry/semantic-conventions@1.37.0': + resolution: {integrity: sha512-JD6DerIKdJGmRp4jQyX5FlrQjA4tjOw1cvfsPAZXfOOEErMUHjPcPSICS+6WnM0nB0efSFARh0KAZss+bvExOA==} engines: {node: '>=14'} '@phenomnomnominal/tsquery@5.0.1': @@ -11104,7 +11107,7 @@ snapshots: '@opentelemetry/core@2.0.1(@opentelemetry/api@1.9.0)': dependencies: '@opentelemetry/api': 1.9.0 - '@opentelemetry/semantic-conventions': 1.36.0 + '@opentelemetry/semantic-conventions': 1.37.0 '@opentelemetry/exporter-logs-otlp-grpc@0.203.0(@opentelemetry/api@1.9.0)': dependencies: @@ -11209,7 +11212,7 @@ snapshots: '@opentelemetry/core': 2.0.1(@opentelemetry/api@1.9.0) '@opentelemetry/resources': 2.0.1(@opentelemetry/api@1.9.0) '@opentelemetry/sdk-trace-base': 2.0.1(@opentelemetry/api@1.9.0) - '@opentelemetry/semantic-conventions': 1.36.0 + '@opentelemetry/semantic-conventions': 1.37.0 '@opentelemetry/instrumentation@0.203.0(@opentelemetry/api@1.9.0)(supports-color@10.0.0)': dependencies: @@ -11259,7 +11262,7 @@ snapshots: dependencies: '@opentelemetry/api': 1.9.0 '@opentelemetry/core': 2.0.1(@opentelemetry/api@1.9.0) - '@opentelemetry/semantic-conventions': 1.36.0 + '@opentelemetry/semantic-conventions': 1.37.0 '@opentelemetry/sdk-logs@0.203.0(@opentelemetry/api@1.9.0)': dependencies: @@ -11298,7 +11301,7 @@ snapshots: '@opentelemetry/sdk-metrics': 2.0.1(@opentelemetry/api@1.9.0) '@opentelemetry/sdk-trace-base': 2.0.1(@opentelemetry/api@1.9.0) '@opentelemetry/sdk-trace-node': 2.0.1(@opentelemetry/api@1.9.0) - '@opentelemetry/semantic-conventions': 1.36.0 + '@opentelemetry/semantic-conventions': 1.37.0 transitivePeerDependencies: - supports-color @@ -11307,7 +11310,7 @@ snapshots: '@opentelemetry/api': 1.9.0 '@opentelemetry/core': 2.0.1(@opentelemetry/api@1.9.0) '@opentelemetry/resources': 2.0.1(@opentelemetry/api@1.9.0) - '@opentelemetry/semantic-conventions': 1.36.0 + '@opentelemetry/semantic-conventions': 1.37.0 '@opentelemetry/sdk-trace-node@2.0.1(@opentelemetry/api@1.9.0)': dependencies: @@ -11316,7 +11319,7 @@ snapshots: '@opentelemetry/core': 2.0.1(@opentelemetry/api@1.9.0) '@opentelemetry/sdk-trace-base': 2.0.1(@opentelemetry/api@1.9.0) - '@opentelemetry/semantic-conventions@1.36.0': {} + '@opentelemetry/semantic-conventions@1.37.0': {} '@phenomnomnominal/tsquery@5.0.1(typescript@5.8.3)': dependencies: