From f100c1dd93d304ea01fcbe3284d6c83a5bbfcc42 Mon Sep 17 00:00:00 2001 From: harvey_xiang Date: Wed, 10 Dec 2025 10:25:44 +0800 Subject: [PATCH 1/6] feat: timer false --- src/memos/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/memos/utils.py b/src/memos/utils.py index a29eaf99..cb50aab8 100644 --- a/src/memos/utils.py +++ b/src/memos/utils.py @@ -80,7 +80,7 @@ def wrapper(*args, **kwargs): return decorator(func) -def timed(func=None, *, log=True, log_prefix=""): +def timed(func=None, *, log=False, log_prefix=""): def decorator(fn): def wrapper(*args, **kwargs): start = time.perf_counter() From 308cd6b7fa8a12761a4b9780844401cae9cac2da Mon Sep 17 00:00:00 2001 From: harvey_xiang Date: Wed, 10 Dec 2025 10:30:38 +0800 Subject: [PATCH 2/6] feat: timer false --- src/memos/log.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/memos/log.py b/src/memos/log.py index c0bb5bf3..511b5b26 100644 --- a/src/memos/log.py +++ b/src/memos/log.py @@ -200,7 +200,7 @@ def close(self): "class": "concurrent_log_handler.ConcurrentTimedRotatingFileHandler", "when": "midnight", "interval": 1, - "backupCount": 3, + "backupCount": 1, "filename": _setup_logfile(), "formatter": "standard", "filters": ["context_filter"], From e51376311c3698eea725d90a8b7dfbc96eae68c6 Mon Sep 17 00:00:00 2001 From: harvey_xiang Date: Thu, 11 Dec 2025 17:44:08 +0800 Subject: [PATCH 3/6] feat: add model log --- src/memos/llms/openai.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/memos/llms/openai.py b/src/memos/llms/openai.py index 35a9c711..ae833f05 100644 --- a/src/memos/llms/openai.py +++ b/src/memos/llms/openai.py @@ -31,6 +31,9 @@ def __init__(self, config: OpenAILLMConfig): @timed_with_status(log_prefix="OpenAI LLM", log_args=["model_name_or_path"]) def generate(self, messages: MessageList, **kwargs) -> str: """Generate a response from OpenAI LLM, optionally overriding generation params.""" + logger.info( + f"LLM Model: {self.config.model_name_or_path} {kwargs.get('model_name_or_path')}" + ) response = self.client.chat.completions.create( model=kwargs.get("model_name_or_path", self.config.model_name_or_path), messages=messages, From 1f645a446cc23255bf65313dcf1263d063b727e3 Mon Sep 17 00:00:00 2001 From: harvey_xiang Date: Thu, 11 Dec 2025 18:55:06 +0800 Subject: [PATCH 4/6] feat: add model_name --- src/memos/llms/openai.py | 17 ++++++++++++----- src/memos/utils.py | 22 ++++++++++++++++++---- 2 files changed, 30 insertions(+), 9 deletions(-) diff --git a/src/memos/llms/openai.py b/src/memos/llms/openai.py index ae833f05..1d180eeb 100644 --- a/src/memos/llms/openai.py +++ b/src/memos/llms/openai.py @@ -28,12 +28,14 @@ def __init__(self, config: OpenAILLMConfig): ) logger.info("OpenAI LLM instance initialized") - @timed_with_status(log_prefix="OpenAI LLM", log_args=["model_name_or_path"]) + @timed_with_status( + log_prefix="OpenAI LLM", + log_extra_args=lambda self, messages, **kwargs: { + "model_name_or_path": kwargs.get("model_name_or_path", self.config.model_name_or_path) + }, + ) def generate(self, messages: MessageList, **kwargs) -> str: """Generate a response from OpenAI LLM, optionally overriding generation params.""" - logger.info( - f"LLM Model: {self.config.model_name_or_path} {kwargs.get('model_name_or_path')}" - ) response = self.client.chat.completions.create( model=kwargs.get("model_name_or_path", self.config.model_name_or_path), messages=messages, @@ -58,7 +60,12 @@ def generate(self, messages: MessageList, **kwargs) -> str: return reasoning_content + response_content return response_content - @timed_with_status(log_prefix="OpenAI LLM", log_args=["model_name_or_path"]) + @timed_with_status( + log_prefix="OpenAI LLM", + log_extra_args=lambda self, messages, **kwargs: { + "model_name_or_path": self.config.model_name_or_path + }, + ) def generate_stream(self, messages: MessageList, **kwargs) -> Generator[str, None, None]: """Stream response from OpenAI LLM with optional reasoning support.""" if kwargs.get("tools"): diff --git a/src/memos/utils.py b/src/memos/utils.py index cb50aab8..7767747d 100644 --- a/src/memos/utils.py +++ b/src/memos/utils.py @@ -19,8 +19,10 @@ def timed_with_status( Parameters: - log: enable timing logs (default True) - log_prefix: prefix; falls back to function name - - log_args: names to include in logs (str or list/tuple of str). - - log_extra_args: extra arguments to include in logs (dict). + - log_args: names to include in logs (str or list/tuple of str), values are taken from kwargs by name. + - log_extra_args: + - can be a dict: fixed contextual fields that are always attached to logs; + - or a callable: like `fn(*args, **kwargs) -> dict`, used to dynamically generate contextual fields at runtime. """ if isinstance(log_args, str): @@ -51,12 +53,24 @@ def wrapper(*args, **kwargs): elapsed_ms = (time.perf_counter() - start) * 1000.0 ctx_parts = [] + # 1) Collect parameters from kwargs by name for key in effective_log_args: val = kwargs.get(key) ctx_parts.append(f"{key}={val}") - if log_extra_args: - ctx_parts.extend(f"{key}={val}" for key, val in log_extra_args.items()) + # 2) Support log_extra_args as dict or callable, so we can dynamically + # extract values from self or other runtime context + extra_items = {} + try: + if callable(log_extra_args): + extra_items = log_extra_args(*args, **kwargs) or {} + elif isinstance(log_extra_args, dict): + extra_items = log_extra_args + except Exception as e: + logger.warning(f"[TIMER_WITH_STATUS] log_extra_args callback error: {e!r}") + + if extra_items: + ctx_parts.extend(f"{key}={val}" for key, val in extra_items.items()) ctx_str = f" [{', '.join(ctx_parts)}]" if ctx_parts else "" From 350971faef7c6f1d0237bbd544c1271704a7e840 Mon Sep 17 00:00:00 2001 From: harvey_xiang Date: Thu, 11 Dec 2025 19:49:22 +0800 Subject: [PATCH 5/6] feat: add model_name --- src/memos/log.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/memos/log.py b/src/memos/log.py index 511b5b26..c0bb5bf3 100644 --- a/src/memos/log.py +++ b/src/memos/log.py @@ -200,7 +200,7 @@ def close(self): "class": "concurrent_log_handler.ConcurrentTimedRotatingFileHandler", "when": "midnight", "interval": 1, - "backupCount": 1, + "backupCount": 3, "filename": _setup_logfile(), "formatter": "standard", "filters": ["context_filter"], From 5dbb31854deb01cce1af6976c036bfe59338122c Mon Sep 17 00:00:00 2001 From: harvey_xiang Date: Thu, 11 Dec 2025 23:04:18 +0800 Subject: [PATCH 6/6] feat: add model_name --- src/memos/utils.py | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/src/memos/utils.py b/src/memos/utils.py index 4ed099c5..d787b7ae 100644 --- a/src/memos/utils.py +++ b/src/memos/utils.py @@ -87,15 +87,8 @@ def wrapper(*args, **kwargs): f"[TIMER_WITH_STATUS] {log_prefix or fn.__name__} " f"took {elapsed_ms:.0f} ms{status_info}, args: {ctx_str}" ) - threshold_ms = DEFAULT_TIME_BAR * 1000.0 - if log_extra_args and "time_threshold" in log_extra_args: - try: - threshold_ms = float(log_extra_args["time_threshold"]) * 1000.0 - except Exception: - threshold_ms = DEFAULT_TIME_BAR * 1000.0 - - if elapsed_ms >= threshold_ms: - logger.info(msg) + + logger.info(msg) return wrapper