Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0

- Record prompt and completion events regardless of span sampling decision.
([#3226](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3226))
- Filter out attributes with the value of NOT_GIVEN for chat.completions.create.
([#3760](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3760))

## Version 2.1b0 (2025-01-18)

Expand All @@ -30,4 +32,4 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
([#2925](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/2925))

- Initial OpenAI instrumentation
([#2759](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/2759))
([#2759](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/2759))
Original file line number Diff line number Diff line change
Expand Up @@ -225,8 +225,10 @@ def get_llm_request_attributes(
service_tier if service_tier != "auto" else None
)

# filter out None values
return {k: v for k, v in attributes.items() if v is not None}
# filter out None values and NOT_GIVEN values
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Instead of explicitly defining this logic here, would it make more sense to create a function for this similar to - non_numerical_value_is_set ?

return {
k: v for k, v in attributes.items() if v is not None and v != NOT_GIVEN
}


def handle_span_exception(span, error):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,10 +13,11 @@
# limitations under the License.
# pylint: disable=too-many-locals

import logging
from typing import Optional

import pytest
from openai import APIConnectionError, NotFoundError, OpenAI
from openai import NOT_GIVEN, APIConnectionError, NotFoundError, OpenAI
from openai.resources.chat.completions import ChatCompletion

from opentelemetry.sdk.trace import ReadableSpan
Expand All @@ -43,7 +44,9 @@ def test_chat_completion_with_content(
messages_value = [{"role": "user", "content": "Say this is a test"}]

response = openai_client.chat.completions.create(
messages=messages_value, model=llm_model_value, stream=False
messages=messages_value,
model=llm_model_value,
stream=False,
)

spans = span_exporter.get_finished_spans()
Expand All @@ -68,6 +71,30 @@ def test_chat_completion_with_content(
assert_message_in_logs(logs[1], "gen_ai.choice", choice_event, spans[0])


@pytest.mark.vcr()
def test_chat_completion_handles_not_given(
span_exporter, log_exporter, openai_client, instrument_no_content, caplog
):
caplog.set_level(logging.WARNING)
llm_model_value = "gpt-4o-mini"
messages_value = [{"role": "user", "content": "Say this is a test"}]

response = openai_client.chat.completions.create(
messages=messages_value,
model=llm_model_value,
stream=False,
top_p=NOT_GIVEN,
)

spans = span_exporter.get_finished_spans()
assert_completion_attributes(spans[0], llm_model_value, response)

logs = log_exporter.get_finished_logs()
assert len(logs) == 2

assert_no_invalid_type_warning(caplog)


@pytest.mark.vcr()
def test_chat_completion_no_content(
span_exporter, log_exporter, openai_client, instrument_no_content
Expand Down Expand Up @@ -947,3 +974,7 @@ def get_current_weather_tool_definition():
},
},
}


def assert_no_invalid_type_warning(caplog):
assert "Invalid type" not in caplog.text