From 64d0381906c18d58a0cc198b401ec2019b280ba8 Mon Sep 17 00:00:00 2001 From: "jiangqi.rrt" Date: Mon, 24 Nov 2025 18:01:01 +0800 Subject: [PATCH 1/3] support langchian v1 --- cozeloop/integration/langchain/trace_callback.py | 5 +++-- cozeloop/integration/langchain/util.py | 3 +-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/cozeloop/integration/langchain/trace_callback.py b/cozeloop/integration/langchain/trace_callback.py index 02ab5e5..6404fdb 100644 --- a/cozeloop/integration/langchain/trace_callback.py +++ b/cozeloop/integration/langchain/trace_callback.py @@ -9,8 +9,9 @@ import pydantic from pydantic import Field, BaseModel -from langchain.callbacks.base import BaseCallbackHandler -from langchain.schema import AgentFinish, AgentAction, LLMResult +from langchain_core.callbacks.base import BaseCallbackHandler +from langchain_core.outputs import LLMResult +from langchain_core.agents import AgentFinish, AgentAction from langchain_core.prompt_values import PromptValue, ChatPromptValue from langchain_core.messages import BaseMessage, AIMessageChunk from langchain_core.prompts import AIMessagePromptTemplate, HumanMessagePromptTemplate, SystemMessagePromptTemplate diff --git a/cozeloop/integration/langchain/util.py b/cozeloop/integration/langchain/util.py index 2d7b432..b688aef 100644 --- a/cozeloop/integration/langchain/util.py +++ b/cozeloop/integration/langchain/util.py @@ -3,8 +3,7 @@ import tiktoken from typing import List, Dict, Union, Any, Optional -from langchain.schema import LLMResult -from langchain_core.outputs import Generation, ChatGeneration +from langchain_core.outputs import LLMResult, Generation, ChatGeneration def calc_token_usage(inputs: Union[List[Dict], LLMResult], model: str = 'gpt-3.5-turbo-0613'): From ef19382542bfdc094abb28819ea9ca2ce2a71361 Mon Sep 17 00:00:00 2001 From: "jiangqi.rrt" Date: Tue, 25 Nov 2025 10:23:44 +0800 Subject: [PATCH 2/3] fix --- CHANGLOG.md | 5 + .../integration/langchain/trace_callback.py | 147 ++++++++++++++---- .../langchain/trace_model/llm_model.py | 84 ++++++++-- cozeloop/internal/version.py | 2 +- examples/lcel/lcel.py | 15 +- pyproject.toml | 2 +- 6 files changed, 208 insertions(+), 47 deletions(-) diff --git a/CHANGLOG.md b/CHANGLOG.md index 68f5c05..07c7cc8 100644 --- a/CHANGLOG.md +++ b/CHANGLOG.md @@ -1,3 +1,8 @@ +## [0.1.20] - 2025-11-10 +### Added +- langchain callback support langchain V1 +- langchain callback support set tag and name + ## [0.1.19] - 2025-11-10 ### Fixed - fix baggage escape problem diff --git a/cozeloop/integration/langchain/trace_callback.py b/cozeloop/integration/langchain/trace_callback.py index 6404fdb..caa1cf5 100644 --- a/cozeloop/integration/langchain/trace_callback.py +++ b/cozeloop/integration/langchain/trace_callback.py @@ -5,15 +5,15 @@ import json import time import traceback -from typing import List, Dict, Union, Any, Optional +from typing import List, Dict, Union, Any, Optional, Callable, Protocol import pydantic from pydantic import Field, BaseModel from langchain_core.callbacks.base import BaseCallbackHandler -from langchain_core.outputs import LLMResult +from langchain_core.outputs import LLMResult, ChatGeneration from langchain_core.agents import AgentFinish, AgentAction from langchain_core.prompt_values import PromptValue, ChatPromptValue -from langchain_core.messages import BaseMessage, AIMessageChunk +from langchain_core.messages import BaseMessage, AIMessageChunk, AIMessage from langchain_core.prompts import AIMessagePromptTemplate, HumanMessagePromptTemplate, SystemMessagePromptTemplate from langchain_core.outputs import ChatGenerationChunk, GenerationChunk @@ -29,9 +29,17 @@ class LoopTracer: @classmethod - def get_callback_handler(cls, client: Client = None): + def get_callback_handler( + cls, + client: Client = None, + modify_name_fn: Optional[Callable[[str], str]] = None, + add_tags_fn: Optional[Callable[[str], Dict[str, Any]]] = None, + tags: Dict[str, Any] = None, + ): """ Do not hold it for a long time, get a new callback_handler for each request. + modify_name_fn: modify name function, input is node name(if you use langgraph, like add_node(node_name, node_func), it is node name), output is span name. + add_tags_fn: add tags function, input is node name(if you use langgraph, like add_node(node_name, node_func), it is node name), output is tags dict. """ global _trace_callback_client if client: @@ -39,14 +47,22 @@ def get_callback_handler(cls, client: Client = None): else: _trace_callback_client = get_default_client() - return LoopTraceCallbackHandler() + return LoopTraceCallbackHandler(modify_name_fn, add_tags_fn, tags) class LoopTraceCallbackHandler(BaseCallbackHandler): - def __init__(self): + def __init__( + self, + name_fn: Optional[Callable[[str], str]] = None, + tags_fn: Optional[Callable[[str], Dict[str, Any]]] = None, + tags: Dict[str, Any] = None, + ): super().__init__() self._space_id = _trace_callback_client.workspace_id self.run_map: Dict[str, Run] = {} + self.name_fn = name_fn + self.tags_fn = tags_fn + self._tags = tags if tags else {} def on_llm_start(self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any) -> Any: span_tags = {} @@ -98,32 +114,26 @@ def on_llm_end(self, response: LLMResult, **kwargs: Any) -> Any: try: # set output span_tag flow_span.set_tags({'output': ModelTraceOutput(response.generations).to_json()}) + # set model tags + tags = self._get_model_tags(response, **kwargs) + if tags: + self._set_span_tags(flow_span, tags, need_convert_tag_value=False) except Exception as e: flow_span.set_error(e) - # calculate token usage,and set span_tag - if response.llm_output is not None and 'token_usage' in response.llm_output and response.llm_output['token_usage']: - self._set_span_tags(flow_span, response.llm_output['token_usage'], need_convert_tag_value=False) - else: - try: - run_info = self.run_map[str(kwargs['run_id'])] - if run_info is not None and run_info.model_meta is not None: - model_name = run_info.model_meta.model_name - input_messages = run_info.model_meta.message - flow_span.set_input_tokens(calc_token_usage(input_messages, model_name)) - flow_span.set_output_tokens(calc_token_usage(response, model_name)) - except Exception as e: - flow_span.set_error(e) # finish flow_span - flow_span.finish() + self._end_flow_span(flow_span) def on_chain_start(self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any) -> Any: flow_span = None try: if kwargs.get('run_type', '') == 'prompt' or kwargs.get('name', '') == 'ChatPromptTemplate': - flow_span = self._new_flow_span(kwargs['name'], kwargs['name'], **kwargs) + flow_span = self._new_flow_span(kwargs['name'], 'prompt', **kwargs) self._on_prompt_start(flow_span, serialized, inputs, **kwargs) else: - flow_span = self._new_flow_span(kwargs['name'], kwargs['name'], **kwargs) + span_type = 'chain' + if kwargs['name'] == 'LangGraph': # LangGraph is agent span_type,for trajectory evaluation aggregate to an agent + span_type = 'agent' + flow_span = self._new_flow_span(kwargs['name'], span_type, **kwargs) flow_span.set_tags({'input': _convert_2_json(inputs)}) except Exception as e: if flow_span is not None: @@ -142,7 +152,7 @@ def on_chain_end(self, outputs: Union[Dict[str, Any], Any], **kwargs: Any) -> An flow_span.set_tags({'output': _convert_2_json(outputs)}) except Exception as e: flow_span.set_error(e) - flow_span.finish() + self._end_flow_span(flow_span) def on_chain_error(self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any) -> Any: flow_span = self._get_flow_span(**kwargs) @@ -151,7 +161,7 @@ def on_chain_error(self, error: Union[Exception, KeyboardInterrupt], **kwargs: A flow_span = self._new_flow_span(span_name, 'chain_error', **kwargs) flow_span.set_error(error) flow_span.set_tags({'error_trace': traceback.format_exc()}) - flow_span.finish() + self._end_flow_span(flow_span) def on_tool_start( self, serialized: Dict[str, Any], input_str: str, **kwargs: Any @@ -167,7 +177,7 @@ def on_tool_end(self, output: str, **kwargs: Any) -> Any: flow_span.set_tags({'output': _convert_2_json(output)}) except Exception as e: flow_span.set_error(e) - flow_span.finish() + self._end_flow_span(flow_span) def on_tool_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any @@ -178,7 +188,7 @@ def on_tool_error( flow_span = self._new_flow_span(span_name, 'tool_error', **kwargs) flow_span.set_error(error) flow_span.set_tags({'error_trace': traceback.format_exc()}) - flow_span.finish() + self._end_flow_span(flow_span) def on_text(self, text: str, **kwargs: Any) -> Any: """Run on arbitrary text.""" @@ -189,6 +199,67 @@ def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any: def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> Any: return + def _end_flow_span(self, span: Span): + span.set_tags(self._tags) + span.finish() + + def _get_model_tags(self, response: LLMResult, **kwargs: Any) -> Dict[str, Any]: + return self._get_model_token_tags(response, **kwargs) + + def _get_model_token_tags(self, response: LLMResult, **kwargs: Any) -> Dict[str, Any]: + result = {} + is_get_from_langchain = False + if response.llm_output is not None and 'token_usage' in response.llm_output and response.llm_output[ + 'token_usage']: + is_get_from_langchain = True + result['input_tokens'] = response.llm_output.get('token_usage', {}).get('prompt_tokens', 0) + result['output_tokens'] = response.llm_output.get('token_usage', {}).get('completion_tokens', 0) + result['tokens'] = result['input_tokens'] + result['output_tokens'] + reasoning_tokens = response.llm_output.get('token_usage', {}).get('completion_tokens_details', {}).get( + 'reasoning_tokens', 0) + if reasoning_tokens: + result['reasoning_tokens'] = reasoning_tokens + input_cached_tokens = response.llm_output.get('token_usage', {}).get('prompt_tokens_details', {}).get( + 'cached_tokens', 0) + if input_cached_tokens: + result['input_cached_tokens'] = input_cached_tokens + elif response.generations is not None and len(response.generations) > 0 and response.generations[0] is not None: + for i, generation in enumerate(response.generations[0]): + if isinstance(generation, ChatGeneration) and isinstance(generation.message,(AIMessageChunk, AIMessage)) and generation.message.usage_metadata: + is_get_from_langchain = True + result['input_tokens'] = generation.message.usage_metadata.get('input_tokens', 0) + result['output_tokens'] = generation.message.usage_metadata.get('output_tokens', 0) + result['tokens'] = result['input_tokens'] + result['output_tokens'] + if generation.message.usage_metadata.get('output_token_details', {}): + reasoning_tokens = generation.message.usage_metadata.get('output_token_details', {}).get('reasoning', 0) + if reasoning_tokens: + result['reasoning_tokens'] = reasoning_tokens + if generation.message.usage_metadata.get('input_token_details', {}): + input_read_cached_tokens = generation.message.usage_metadata.get('input_token_details', {}).get('cache_read', 0) + if input_read_cached_tokens: + result['input_cached_tokens'] = input_read_cached_tokens + input_creation_cached_tokens = generation.message.usage_metadata.get('input_token_details', {}).get('cache_creation', 0) + if input_creation_cached_tokens: + result['input_creation_cached_tokens'] = input_creation_cached_tokens + if is_get_from_langchain: + return result + else: + try: + run_info = self.run_map[str(kwargs['run_id'])] + if run_info is not None and run_info.model_meta is not None: + model_name = run_info.model_meta.model_name + input_messages = run_info.model_meta.message + token_usage = { + 'input_tokens': calc_token_usage(input_messages, model_name), + 'output_tokens': calc_token_usage(response, model_name), + 'tokens': 0 + } + token_usage['tokens'] = token_usage['input_tokens'] + token_usage['output_tokens'] + return token_usage + except Exception as e: + span_tags = {'error_info': repr(e), 'error_trace': traceback.format_exc()} + return span_tags + def _on_prompt_start(self, flow_span, serialized: Dict[str, Any], inputs: (Dict[str, Any], str), **kwargs: Any) -> None: # get inputs params: List[Argument] = [] @@ -234,18 +305,38 @@ def _on_prompt_start(self, flow_span, serialized: Dict[str, Any], inputs: (Dict[ flow_span.set_tags({'prompt_version': kwargs['metadata']['lc_hub_commit_hash']}) flow_span.set_tags({'prompt_provider': 'langsmith'}) - def _new_flow_span(self, span_name: str, span_type: str, **kwargs: Any) -> Span: + def _new_flow_span(self, node_name: str, span_type: str, **kwargs: Any) -> Span: span_type = _span_type_mapping(span_type) + span_name = node_name # set parent span parent_span: Span = None if 'parent_run_id' in kwargs and kwargs['parent_run_id'] is not None and str(kwargs['parent_run_id']) in self.run_map: parent_span = self.run_map[str(kwargs['parent_run_id'])].span + # modify name + error_tag = {} + try: + if self.name_fn: + name = self.name_fn(node_name) + if name: + span_name = name + except Exception as e: + error_tag = {'error_info': f'name_fn error {repr(e)}', 'error_trace': traceback.format_exc()} # new span flow_span = _trace_callback_client.start_span(span_name, span_type, child_of=parent_span) run_id = str(kwargs['run_id']) self.run_map[run_id] = Run(run_id, flow_span, span_type) # set default tags flow_span.set_runtime(RuntimeInfo()) + # set extra tags + try: + if self.tags_fn: + tags = self.tags_fn(node_name) + if isinstance(tags, dict): + flow_span.set_tags(tags) + except Exception as e: + error_tag = {'error_info': f'tags_fn error {repr(e)}', 'error_trace': traceback.format_exc()} + if error_tag: + flow_span.set_tags(error_tag) return flow_span def _get_flow_span(self, **kwargs: Any) -> Span: @@ -417,7 +508,7 @@ def _convert_inputs(inputs: Any) -> Any: for each in inputs: format_inputs.append(_convert_inputs(each)) return format_inputs - if isinstance(inputs, AIMessageChunk): + if isinstance(inputs, (AIMessageChunk, AIMessage)): """ Must be before BaseMessage. """ diff --git a/cozeloop/integration/langchain/trace_model/llm_model.py b/cozeloop/integration/langchain/trace_model/llm_model.py index 172ce61..5e18cf0 100644 --- a/cozeloop/integration/langchain/trace_model/llm_model.py +++ b/cozeloop/integration/langchain/trace_model/llm_model.py @@ -2,12 +2,15 @@ # SPDX-License-Identifier: MIT import json +import logging import time from typing import List, Optional, Union, Dict, Any from pydantic.dataclasses import dataclass from langchain_core.messages import BaseMessage, ToolMessage, AIMessageChunk, AIMessage from langchain_core.outputs import Generation, ChatGeneration +logger = logging.getLogger(__name__) + @dataclass class ToolFunction: @@ -48,6 +51,8 @@ class Message: content: Optional[Union[str, List[Union[dict, Parts]], dict]] = None parts: Optional[List[Parts]] = None tool_calls: List[ToolCall] = None + metadata: Optional[dict] = None + reasoning_content: Optional[str] = None def __post_init__(self): if self.role is not None and (self.role == 'AIMessageChunk' or self.role == 'ai'): @@ -126,16 +131,19 @@ def __init__(self, messages: List[Union[BaseMessage, List[BaseMessage]]], invoca tool_call_id_name_map = {} for message in process_messages: if isinstance(message, (AIMessageChunk, AIMessage)): - for tool_call in message.additional_kwargs.get('tool_calls', []): - if tool_call.get('id', ''): - tool_call_id_name_map[tool_call.get('id', '')] = tool_call.get('function', {}).get('name', '') + if message.additional_kwargs: + for tool_call in message.additional_kwargs.get('tool_calls', []): + if tool_call and tool_call.get('id', ''): + tool_call_id_name_map[tool_call.get('id', '')] = tool_call.get('function', {}).get('name', '') for tool_call in message.tool_calls: - if tool_call.get('id', ''): + if tool_call and tool_call.get('id', ''): tool_call_id_name_map[tool_call.get('id', '')] = tool_call.get('name', '') for message in process_messages: if isinstance(message, (AIMessageChunk, AIMessage)): - tool_calls = convert_tool_calls_by_additional_kwargs(message.additional_kwargs.get('tool_calls', [])) + tool_calls = [] + if message.additional_kwargs: + tool_calls = convert_tool_calls_by_additional_kwargs(message.additional_kwargs.get('tool_calls', [])) if len(tool_calls) == 0: tool_calls = convert_tool_calls_by_raw(message.tool_calls) self._messages.append(Message(role=message.type, content=message.content, tool_calls=tool_calls)) @@ -143,7 +151,7 @@ def __init__(self, messages: List[Union[BaseMessage, List[BaseMessage]]], invoca name = '' if tool_call_id_name_map.get(message.tool_call_id, None) is not None: name = tool_call_id_name_map[message.tool_call_id] - if message.additional_kwargs.get('name', ''): + if message.additional_kwargs is not None and message.additional_kwargs.get('name', ''): name = message.additional_kwargs.get('name', '') tool_call = ToolCall(id=message.tool_call_id, type=message.type, function=ToolFunction(name=name)) self._messages.append(Message(role=message.type, content=message.content, tool_calls=[tool_call])) @@ -151,15 +159,22 @@ def __init__(self, messages: List[Union[BaseMessage, List[BaseMessage]]], invoca self._messages.append(Message(role=message.type, content=message.content)) def to_json(self): + if self._invocation_params is None: + return '{}' tools: List[Tool] = [] for tool in self._invocation_params.get('tools', []): + if tool.get('function', {}) is None: + continue function = ToolFunction(name=tool.get('function', {}).get('name', ''), description=tool.get('function', {}).get('description', ''), parameters=tool.get('function', {}).get('parameters', {})) tools.append(Tool(type=tool.get('type', ''), function=function)) if len(tools) == 0 and 'functions' in self._invocation_params: - for bind_function in self._invocation_params['functions']: - function = ToolFunction(name=bind_function.get('function', {}).get('name', ''), + for bind_function in self._invocation_params.get('functions', []): + name = '' + if bind_function.get('function', {}): + name = bind_function.get('function', {}).get('name', '') + function = ToolFunction(name=name, description=bind_function.get('description', ''), parameters=bind_function.get('parameters', {})) tools.append(Tool(type=bind_function.get('type', ''), function=function)) @@ -180,12 +195,7 @@ def to_json(self): for i, generation in enumerate(self.generations): choice: Choice = None if isinstance(generation, ChatGeneration): - tool_calls = convert_tool_calls_by_additional_kwargs(generation.message.additional_kwargs.get('tool_calls', [])) - if len(tool_calls) == 0 and 'function_call' in generation.message.additional_kwargs: - function_call = generation.message.additional_kwargs.get('function_call', {}) - function = ToolFunction(name=function_call.get('name', ''), arguments=json.loads(function_call.get('arguments', {}))) - tool_calls.append(ToolCall(function=function, type='function_call(deprecated)')) - message = Message(role=generation.message.type, content=generation.message.content, tool_calls=tool_calls) + message = convert_output_message(generation.message) choice = Choice(index=i, message=message, finish_reason=generation.generation_info.get('finish_reason', '')) elif isinstance(generation, Generation): choice = Choice(index=i, message=Message(content=generation.text)) @@ -200,6 +210,8 @@ def to_json(self): def convert_tool_calls_by_raw(tool_calls: list) -> List[ToolCall]: format_tool_calls: List[ToolCall] = [] for tool_call in tool_calls: + if tool_call is None: + continue function = ToolFunction(name=tool_call.get('name', ''), arguments=tool_call.get('args', {})) format_tool_calls.append(ToolCall(id=tool_call.get('id', ''), type=tool_call.get('type', ''), function=function)) return format_tool_calls @@ -208,6 +220,46 @@ def convert_tool_calls_by_raw(tool_calls: list) -> List[ToolCall]: def convert_tool_calls_by_additional_kwargs(tool_calls: list) -> List[ToolCall]: format_tool_calls: List[ToolCall] = [] for tool_call in tool_calls: - function = ToolFunction(name=tool_call.get('function', {}).get('name', ''), arguments=json.loads(tool_call.get('function', {}).get('arguments', '{}'))) + if tool_call is None or tool_call.get('function', {}) is None: + continue + raw_args = tool_call.get('function', {}).get('arguments', '{}') + final_args = None + try: + final_args = json.loads(raw_args) + except Exception as e: + final_args = raw_args + logger.error(f"convert_tool_calls_by_additional_kwargs failed, error: {e}, tool_call.function.arguments: {raw_args}") + function = ToolFunction(name=tool_call.get('function', {}).get('name', ''), arguments=final_args) format_tool_calls.append(ToolCall(id=tool_call.get('id', ''), type=tool_call.get('type', ''), function=function)) - return format_tool_calls \ No newline at end of file + return format_tool_calls + + +def convert_output_message(message: BaseMessage) -> Message: + if message is None: + return None + tool_calls = convert_tool_calls_by_additional_kwargs(message.additional_kwargs.get('tool_calls', [])) + if len(tool_calls) == 0 and isinstance(message, (AIMessage, AIMessageChunk)): + tool_calls = convert_tool_calls_by_raw(message.tool_calls) + if len(tool_calls) == 0 and 'function_call' in message.additional_kwargs: + function_call = message.additional_kwargs.get('function_call', {}) + try: + arg = json.loads(function_call.get('arguments', {})) + except Exception as e: + logging.error(f"ModelTraceOutput.to_json arguments loads failed, exception: {e}") + arg = {} + function = ToolFunction(name=function_call.get('name', ''), arguments=arg) + tool_calls.append(ToolCall(function=function, type='function_call(deprecated)')) + metadata = {} + if message.response_metadata is not None: + if message.response_metadata.get('id', ''): + response_id = message.response_metadata.get('id', '') + metadata['id'] = response_id + message = Message( + role=message.type, + content=message.content, + tool_calls=tool_calls, + metadata=metadata, + reasoning_content=message.additional_kwargs.get('reasoning_content', ''), + ) + + return message \ No newline at end of file diff --git a/cozeloop/internal/version.py b/cozeloop/internal/version.py index 0a924f8..10d35b1 100644 --- a/cozeloop/internal/version.py +++ b/cozeloop/internal/version.py @@ -1,4 +1,4 @@ # Copyright (c) 2025 Bytedance Ltd. and/or its affiliates # SPDX-License-Identifier: MIT -VERSION = 'v0.1.18' \ No newline at end of file +VERSION = 'v0.1.20' \ No newline at end of file diff --git a/examples/lcel/lcel.py b/examples/lcel/lcel.py index 16a82f4..79dd322 100644 --- a/examples/lcel/lcel.py +++ b/examples/lcel/lcel.py @@ -12,6 +12,19 @@ logger = logging.getLogger(__name__) +def name_config_fn(node_name: str) -> str | None: + if node_name == "RunnableSequence": # use original name, if you use langgraph, like add_node(node_name, node_func), it is node name + return "RunnableSequence_modify" + + +def tags_config_fn(node_name: str) -> dict | None: + if node_name == "RunnableSequence": # use original name + return { + "key1": "val1", + "key2": 2, + } + + def do_lcel_demo(): # Configure the parameters for the large model. The keys in os.environ are standard keys for Langchain and must be # followed. This is just a demo, and the connectivity of the large model needs to be ensured by the user. @@ -24,7 +37,7 @@ def do_lcel_demo(): # os.environ['COZELOOP_WORKSPACE_ID'] = 'your workspace id' client = new_client() - trace_callback_handler = LoopTracer.get_callback_handler(client) + trace_callback_handler = LoopTracer.get_callback_handler(client, modify_name_fn=name_config_fn, add_tags_fn=tags_config_fn) # init llm model llm_model = ChatOpenAI(model="doubao-1-5-vision-pro-32k-250115", base_url="https://ark.cn-beijing.volces.com/api/v3") diff --git a/pyproject.toml b/pyproject.toml index ad1a87d..ff50f52 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "cozeloop" -version = "0.1.19" +version = "0.1.20" description = "coze loop sdk" authors = ["JiangQi715 "] license = "MIT" From 93e2bbef18bd8803141b19bbe0f75d9771f83fb5 Mon Sep 17 00:00:00 2001 From: "jiangqi.rrt" Date: Mon, 8 Dec 2025 12:01:21 +0800 Subject: [PATCH 3/3] update --- cozeloop/integration/langchain/trace_callback.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cozeloop/integration/langchain/trace_callback.py b/cozeloop/integration/langchain/trace_callback.py index caa1cf5..f5d4f72 100644 --- a/cozeloop/integration/langchain/trace_callback.py +++ b/cozeloop/integration/langchain/trace_callback.py @@ -200,7 +200,6 @@ def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> Any: return def _end_flow_span(self, span: Span): - span.set_tags(self._tags) span.finish() def _get_model_tags(self, response: LLMResult, **kwargs: Any) -> Dict[str, Any]: @@ -328,8 +327,9 @@ def _new_flow_span(self, node_name: str, span_type: str, **kwargs: Any) -> Span: # set default tags flow_span.set_runtime(RuntimeInfo()) # set extra tags + flow_span.set_tags(self._tags) # global tags try: - if self.tags_fn: + if self.tags_fn: # add tags fn tags = self.tags_fn(node_name) if isinstance(tags, dict): flow_span.set_tags(tags)