diff --git a/ddtrace/llmobs/_llmobs.py b/ddtrace/llmobs/_llmobs.py index 8336ee40991..e1de8a074d8 100644 --- a/ddtrace/llmobs/_llmobs.py +++ b/ddtrace/llmobs/_llmobs.py @@ -4,6 +4,7 @@ import inspect import json import os +import sys import time from typing import Any from typing import Callable @@ -645,6 +646,8 @@ def enable( log.debug("%s already enabled", cls.__name__) return + cls._warn_if_litellm_was_imported() + if os.getenv("DD_LLMOBS_ENABLED") and not asbool(os.getenv("DD_LLMOBS_ENABLED")): log.debug("LLMObs.enable() called when DD_LLMOBS_ENABLED is set to false or 0, not starting LLMObs service") return @@ -767,6 +770,20 @@ def enable( config._llmobs_ml_app, ) + @staticmethod + def _warn_if_litellm_was_imported() -> None: + if "litellm" in sys.modules: + import litellm + + if not getattr(litellm, "_datadog_patch", False): + log.warning( + "LLMObs.enable() called after litellm was imported but before it was patched. " + "This may cause tracing issues if you are importing patched methods like 'litellm.completion' " + "directly. To ensure proper tracing, either run your application with ddtrace-run, " + "call ddtrace.patch_all() before importing litellm, or " + "enable LLMObs before importing other modules." + ) + def _on_asyncio_create_task(self, task_data: Dict[str, Any]) -> None: """Propagates llmobs active trace context across asyncio tasks.""" task_data["llmobs_ctx"] = self._current_trace_context() diff --git a/tests/contrib/litellm/conftest.py b/tests/contrib/litellm/conftest.py index 62ea0c234fd..be0754b82e7 100644 --- a/tests/contrib/litellm/conftest.py +++ b/tests/contrib/litellm/conftest.py @@ -1,4 +1,3 @@ -from litellm import Router import pytest from ddtrace._trace.pin import Pin @@ -81,4 +80,6 @@ def request_vcr_include_localhost(): @pytest.fixture def router(): + from litellm import Router + yield Router(model_list=model_list) diff --git a/tests/contrib/litellm/test_litellm_llmobs.py b/tests/contrib/litellm/test_litellm_llmobs.py index 5c039c56d55..619797dd983 100644 --- a/tests/contrib/litellm/test_litellm_llmobs.py +++ b/tests/contrib/litellm/test_litellm_llmobs.py @@ -488,3 +488,37 @@ def test_completion_openai_enabled( assert len(llmobs_events) == 1 assert llmobs_events[0]["name"] == "OpenAI.createChatCompletion" if not stream else "litellm.request" + + +def test_enable_llmobs_after_litellm_was_imported(run_python_code_in_subprocess): + """ + Test that LLMObs.enable() logs a warning if litellm is imported before LLMObs.enable() is called. + """ + _, err, _, _ = run_python_code_in_subprocess( + """ +import litellm +from ddtrace.llmobs import LLMObs +LLMObs.enable(ml_app="", integrations_enabled=False) +assert LLMObs.enabled +LLMObs.disable() +""" + ) + + assert ("LLMObs.enable() called after litellm was imported but before it was patched") in err.decode() + + +def test_import_litellm_after_llmobs_was_enabled(run_python_code_in_subprocess): + """ + Test that LLMObs.enable() does not logs a warning if litellm is imported after LLMObs.enable() is called. + """ + _, err, _, _ = run_python_code_in_subprocess( + """ +from ddtrace.llmobs import LLMObs +LLMObs.enable(ml_app="", integrations_enabled=False) +assert LLMObs.enabled +import litellm +LLMObs.disable() +""" + ) + + assert ("LLMObs.enable() called after litellm was imported but before it was patched") not in err.decode()