Skip to content

Commit df9d3ab

Browse files
authored
Propagate token usage when generating images with Gemini (#17987)
1 parent edfb414 commit df9d3ab

File tree

2 files changed

+47
-2
lines changed

2 files changed

+47
-2
lines changed

litellm/llms/vertex_ai/image_generation/vertex_gemini_transformation.py

Lines changed: 25 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313
AllMessageValues,
1414
OpenAIImageGenerationOptionalParams,
1515
)
16-
from litellm.types.utils import ImageObject, ImageResponse
16+
from litellm.types.utils import ImageObject, ImageResponse, ImageUsage, ImageUsageInputTokensDetails
1717

1818
if TYPE_CHECKING:
1919
from litellm.litellm_core_utils.litellm_logging import Logging as _LiteLLMLoggingObj
@@ -234,6 +234,27 @@ def transform_image_generation_request(
234234

235235
return request_body
236236

237+
def _transform_image_usage(self, usage: dict) -> ImageUsage:
238+
input_tokens_details = ImageUsageInputTokensDetails(
239+
image_tokens=0,
240+
text_tokens=0,
241+
)
242+
tokens_details = usage.get("promptTokensDetails", [])
243+
for details in tokens_details:
244+
if isinstance(details, dict) and (modality := details.get("modality")):
245+
token_count = details.get("tokenCount", 0)
246+
if modality == "TEXT":
247+
input_tokens_details.text_tokens += token_count
248+
elif modality == "IMAGE":
249+
input_tokens_details.image_tokens += token_count
250+
251+
return ImageUsage(
252+
input_tokens=usage.get("promptTokenCount", 0),
253+
input_tokens_details=input_tokens_details,
254+
output_tokens=usage.get("candidatesTokenCount", 0),
255+
total_tokens=usage.get("totalTokenCount", 0),
256+
)
257+
237258
def transform_image_generation_response(
238259
self,
239260
model: str,
@@ -276,6 +297,9 @@ def transform_image_generation_response(
276297
b64_json=inline_data["data"],
277298
url=None,
278299
))
300+
301+
if usage_metadata := response_data.get("usageMetadata", None):
302+
model_response.usage = self._transform_image_usage(usage_metadata)
279303

280304
return model_response
281305

tests/test_litellm/llms/vertex_ai/image_generation/test_vertex_ai_image_generation_transformation.py

Lines changed: 22 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -141,7 +141,22 @@ def test_transform_image_generation_response(self):
141141
]
142142
}
143143
}
144-
]
144+
],
145+
"usageMetadata": {
146+
"promptTokenCount": 93,
147+
"promptTokensDetails": [
148+
{
149+
"modality": "TEXT",
150+
"tokenCount": 54,
151+
},
152+
{
153+
"modality": "IMAGE",
154+
"tokenCount": 39,
155+
}
156+
],
157+
"candidatesTokenCount": 17,
158+
"totalTokenCount": 110,
159+
}
145160
}
146161
mock_response.headers = {}
147162

@@ -162,6 +177,12 @@ def test_transform_image_generation_response(self):
162177
assert len(result.data) == 1
163178
assert result.data[0].b64_json == "base64_encoded_image_data"
164179
assert result.data[0].url is None
180+
assert result.usage.input_tokens == 93
181+
assert result.usage.input_tokens_details.text_tokens == 54
182+
assert result.usage.input_tokens_details.image_tokens == 39
183+
assert result.usage.output_tokens == 17
184+
assert result.usage.total_tokens == 110
185+
165186

166187
def test_transform_image_generation_response_multiple_images(self):
167188
"""Test response transformation with multiple images"""

0 commit comments

Comments
 (0)