Skip to content

Commit dee2442

Browse files
Decorate models that don't use infer
1 parent bd0ef2d commit dee2442

File tree

4 files changed

+16
-1
lines changed

4 files changed

+16
-1
lines changed

inference/models/clip/clip_model.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,12 +24,12 @@
2424
CLIP_MODEL_ID,
2525
ONNXRUNTIME_EXECUTION_PROVIDERS,
2626
REQUIRED_ONNX_PROVIDERS,
27-
TENSORRT_CACHE_PATH,
2827
)
2928
from inference.core.exceptions import OnnxProviderNotAvailable
3029
from inference.core.models.roboflow import OnnxRoboflowCoreModel
3130
from inference.core.models.types import PreprocessReturnMetadata
3231
from inference.core.models.utils.batching import create_batches
32+
from inference.usage_tracking.collector import usage_collector
3333
from inference.core.utils.image_utils import load_image_rgb
3434
from inference.core.utils.onnx import get_onnxruntime_execution_providers
3535
from inference.core.utils.postprocess import cosine_similarity
@@ -304,6 +304,7 @@ def get_infer_bucket_file_list(self) -> List[str]:
304304
"""
305305
return ["textual.onnx", "visual.onnx"]
306306

307+
@usage_collector("model")
307308
def infer_from_request(
308309
self, request: ClipInferenceRequest
309310
) -> ClipEmbeddingResponse:

inference/models/depth_estimation/depthestimation.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@
2121
WorkflowImageData,
2222
)
2323
from inference.models.transformers import TransformerModel
24+
from inference.usage_tracking.collector import usage_collector
2425

2526

2627
class DepthEstimator(TransformerModel):
@@ -50,6 +51,7 @@ def __init__(self, *args, **kwargs):
5051
"Running DepthPro on CPU. This may be very slow. Consider using GPU or MPS if available."
5152
)
5253

54+
@usage_collector("model")
5355
def predict(self, image_in: Image.Image, prompt="", history=None, **kwargs):
5456
try:
5557
# Process input image

inference/models/sam3/segment_anything3.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -65,6 +65,7 @@
6565
)
6666
from inference.core.utils.image_utils import load_image_rgb
6767
from inference.core.utils.postprocess import masks2multipoly
68+
from inference.usage_tracking.collector import usage_collector
6869

6970

7071
def _to_numpy_masks(masks_any) -> np.ndarray:
@@ -372,6 +373,7 @@ def infer_from_request(self, request: Sam3InferenceRequest):
372373
else:
373374
raise ValueError(f"Invalid request type {type(request)}")
374375

376+
@usage_collector("model")
375377
def segment_image(
376378
self,
377379
image: Optional[InferenceRequestImage],

inference/usage_tracking/decorator_helpers.py

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -109,6 +109,10 @@ def get_request_resource_id_from_kwargs(func_kwargs: Dict[str, Any]) -> Optional
109109
return str(dataset_id)
110110
if hasattr(inference_request, "model_id"):
111111
return str(inference_request.model_id)
112+
if "request" in func_kwargs:
113+
request = func_kwargs["request"]
114+
if hasattr(request, "model_id"):
115+
return str(request.model_id)
112116
if "dataset_id" in func_kwargs and "version_id" in func_kwargs:
113117
dataset_id = func_kwargs["dataset_id"]
114118
version_id = func_kwargs["version_id"]
@@ -121,6 +125,12 @@ def get_request_resource_id_from_kwargs(func_kwargs: Dict[str, Any]) -> Optional
121125
workflow_request = func_kwargs["workflow_request"]
122126
if hasattr(workflow_request, "workflow_id"):
123127
return str(workflow_request.workflow_id)
128+
if "self" in func_kwargs:
129+
_self = func_kwargs["self"]
130+
if hasattr(_self, "model_id"):
131+
return str(_self.model_id)
132+
if hasattr(_self, "endpoint"):
133+
return str(_self.endpoint)
124134
return None
125135

126136

0 commit comments

Comments
 (0)