Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,9 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0

## Unreleased

- Fix service tier attribute names: use `GEN_AI_OPENAI_REQUEST_SERVICE_TIER` for request
attributes and `GEN_AI_OPENAI_RESPONSE_SERVICE_TIER` for response attributes.
([#3920](https://github.com/open-telemetry/opentelemetry-python-contrib/issues/3920))
- Added support for OpenAI embeddings instrumentation
([#3461](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3461))
- Record prompt and completion events regardless of span sampling decision.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -370,7 +370,7 @@ def _set_response_attributes(
if getattr(result, "service_tier", None):
set_span_attribute(
span,
GenAIAttributes.GEN_AI_OPENAI_REQUEST_SERVICE_TIER,
GenAIAttributes.GEN_AI_OPENAI_RESPONSE_SERVICE_TIER,
result.service_tier,
)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -230,8 +230,13 @@ def get_llm_request_attributes(
GenAIAttributes.GEN_AI_OPENAI_REQUEST_RESPONSE_FORMAT
] = response_format

# service_tier can be passed directly or in extra_body (in SDK 1.26.0 it's via extra_body)
service_tier = kwargs.get("service_tier")
attributes[GenAIAttributes.GEN_AI_OPENAI_RESPONSE_SERVICE_TIER] = (
if service_tier is None:
extra_body = kwargs.get("extra_body")
if isinstance(extra_body, Mapping):
service_tier = extra_body.get("service_tier")
attributes[GenAIAttributes.GEN_AI_OPENAI_REQUEST_SERVICE_TIER] = (
service_tier if service_tier != "auto" else None
)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -183,6 +183,8 @@ async def test_async_chat_completion_extra_params(
response.model,
response.usage.prompt_tokens,
response.usage.completion_tokens,
request_service_tier="default",
response_service_tier=getattr(response, "service_tier", None),
)
assert (
spans[0].attributes[GenAIAttributes.GEN_AI_OPENAI_REQUEST_SEED] == 42
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -221,6 +221,8 @@ def test_chat_completion_extra_params(
response.model,
response.usage.prompt_tokens,
response.usage.completion_tokens,
request_service_tier="default",
response_service_tier=getattr(response, "service_tier", None),
)
assert (
spans[0].attributes[GenAIAttributes.GEN_AI_OPENAI_REQUEST_SEED] == 42
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,14 @@
)


def _assert_optional_attribute(span, attribute_name, expected_value):
"""Helper to assert optional span attributes."""
if expected_value is not None:
assert expected_value == span.attributes[attribute_name]
else:
assert attribute_name not in span.attributes


def assert_all_attributes(
span: ReadableSpan,
request_model: str,
Expand All @@ -35,6 +43,8 @@ def assert_all_attributes(
operation_name: str = "chat",
server_address: str = "api.openai.com",
server_port: int = 443,
request_service_tier: Optional[str] = None,
response_service_tier: Optional[str] = None,
):
assert span.name == f"{operation_name} {request_model}"
assert (
Expand All @@ -49,44 +59,35 @@ def assert_all_attributes(
request_model == span.attributes[GenAIAttributes.GEN_AI_REQUEST_MODEL]
)

if response_model:
assert (
response_model
== span.attributes[GenAIAttributes.GEN_AI_RESPONSE_MODEL]
)
else:
assert GenAIAttributes.GEN_AI_RESPONSE_MODEL not in span.attributes

if response_id:
assert (
response_id == span.attributes[GenAIAttributes.GEN_AI_RESPONSE_ID]
)
else:
assert GenAIAttributes.GEN_AI_RESPONSE_ID not in span.attributes

if input_tokens:
assert (
input_tokens
== span.attributes[GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS]
)
else:
assert GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS not in span.attributes

if output_tokens:
assert (
output_tokens
== span.attributes[GenAIAttributes.GEN_AI_USAGE_OUTPUT_TOKENS]
)
else:
assert (
GenAIAttributes.GEN_AI_USAGE_OUTPUT_TOKENS not in span.attributes
)
_assert_optional_attribute(
span, GenAIAttributes.GEN_AI_RESPONSE_MODEL, response_model
)
_assert_optional_attribute(
span, GenAIAttributes.GEN_AI_RESPONSE_ID, response_id
)
_assert_optional_attribute(
span, GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS, input_tokens
)
_assert_optional_attribute(
span, GenAIAttributes.GEN_AI_USAGE_OUTPUT_TOKENS, output_tokens
)

assert server_address == span.attributes[ServerAttributes.SERVER_ADDRESS]

if server_port != 443 and server_port > 0:
assert server_port == span.attributes[ServerAttributes.SERVER_PORT]

_assert_optional_attribute(
span,
GenAIAttributes.GEN_AI_OPENAI_REQUEST_SERVICE_TIER,
request_service_tier,
)
_assert_optional_attribute(
span,
GenAIAttributes.GEN_AI_OPENAI_RESPONSE_SERVICE_TIER,
response_service_tier,
)


def assert_log_parent(log, span):
"""Assert that the log record has the correct parent span context"""
Expand Down