Skip to content
17 changes: 17 additions & 0 deletions ddtrace/llmobs/_llmobs.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
import inspect
import json
import os
import sys
import time
from typing import Any
from typing import Callable
Expand Down Expand Up @@ -645,6 +646,8 @@ def enable(
log.debug("%s already enabled", cls.__name__)
return

cls._warn_if_litellm_was_imported()

if os.getenv("DD_LLMOBS_ENABLED") and not asbool(os.getenv("DD_LLMOBS_ENABLED")):
log.debug("LLMObs.enable() called when DD_LLMOBS_ENABLED is set to false or 0, not starting LLMObs service")
return
Expand Down Expand Up @@ -767,6 +770,20 @@ def enable(
config._llmobs_ml_app,
)

@staticmethod
def _warn_if_litellm_was_imported() -> None:
if "litellm" in sys.modules:
import litellm

if not getattr(litellm, "_datadog_patch", False):
log.warning(
"LLMObs.enable() called after litellm was imported but before it was patched. "
"This may cause tracing issues if you are importing patched methods like 'litellm.completion' "
"directly. To ensure proper tracing, either run your application with ddtrace-run, "
"call ddtrace.patch_all() before importing litellm, or "
"enable LLMObs before importing other modules."
)

def _on_asyncio_create_task(self, task_data: Dict[str, Any]) -> None:
"""Propagates llmobs active trace context across asyncio tasks."""
task_data["llmobs_ctx"] = self._current_trace_context()
Expand Down
3 changes: 2 additions & 1 deletion tests/contrib/litellm/conftest.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
from litellm import Router
import pytest

from ddtrace._trace.pin import Pin
Expand Down Expand Up @@ -81,4 +80,6 @@ def request_vcr_include_localhost():

@pytest.fixture
def router():
from litellm import Router

yield Router(model_list=model_list)
34 changes: 34 additions & 0 deletions tests/contrib/litellm/test_litellm_llmobs.py
Original file line number Diff line number Diff line change
Expand Up @@ -488,3 +488,37 @@ def test_completion_openai_enabled(

assert len(llmobs_events) == 1
assert llmobs_events[0]["name"] == "OpenAI.createChatCompletion" if not stream else "litellm.request"


def test_enable_llmobs_after_litellm_was_imported(run_python_code_in_subprocess):
"""
Test that LLMObs.enable() logs a warning if litellm is imported before LLMObs.enable() is called.
"""
_, err, _, _ = run_python_code_in_subprocess(
"""
import litellm
from ddtrace.llmobs import LLMObs
LLMObs.enable(ml_app="<ml-app-name>", integrations_enabled=False)
assert LLMObs.enabled
LLMObs.disable()
"""
)

assert ("LLMObs.enable() called after litellm was imported but before it was patched") in err.decode()


def test_import_litellm_after_llmobs_was_enabled(run_python_code_in_subprocess):
"""
Test that LLMObs.enable() does not logs a warning if litellm is imported after LLMObs.enable() is called.
"""
_, err, _, _ = run_python_code_in_subprocess(
"""
from ddtrace.llmobs import LLMObs
LLMObs.enable(ml_app="<ml-app-name>", integrations_enabled=False)
assert LLMObs.enabled
import litellm
LLMObs.disable()
"""
)

assert ("LLMObs.enable() called after litellm was imported but before it was patched") not in err.decode()
Loading