1010import httpx
1111import json
1212import re
13+ import uuid
14+ import shutil
1315from datetime import datetime
1416from typing import Optional , Dict , Any
17+ from pathlib import Path
1518
16- from fastapi import FastAPI , HTTPException , Request
19+ from fastapi import FastAPI , HTTPException , Request , UploadFile , File
1720from fastapi .middleware .cors import CORSMiddleware
1821from fastapi .responses import JSONResponse
1922from contextlib import asynccontextmanager
4750# Global flag to track if tokens are ready
4851tokens_ready = False
4952
53+ # Workspace paths for file management
54+ WORKSPACE_PATH = Path ("/app/workspace" )
55+ INPUTS_PATH = WORKSPACE_PATH / "input" # Note: singular "input" to match your JSON
56+ OUTPUTS_PATH = WORKSPACE_PATH / "output" # Note: singular "output" to match your JSON
57+
5058# Enhanced endpoint to token type mapping using regex patterns
5159ENDPOINT_TOKEN_MAPPING = {
5260 # Management API endpoints
@@ -196,7 +204,6 @@ async def wait_for_torchserve_tokens():
196204 print ("🚀 Continuing startup - some TorchServe features may be unavailable" )
197205 return False
198206
199- # Startup/Shutdown Events
200207@asynccontextmanager
201208async def lifespan (app : FastAPI ):
202209 # Startup
@@ -208,6 +215,15 @@ async def lifespan(app: FastAPI):
208215 print (f"🎯 TorchServe Management URL: { TORCHSERVE_MANAGEMENT_URL } " )
209216 print (f"📂 Token file path: { torchserve_tokens .tokens_file_path } " )
210217 print ("🔓 Open access - no authentication required" )
218+
219+ # Create workspace directories
220+ WORKSPACE_PATH .mkdir (exist_ok = True )
221+ INPUTS_PATH .mkdir (exist_ok = True )
222+ OUTPUTS_PATH .mkdir (exist_ok = True )
223+
224+ print (f"📁 Workspace: { WORKSPACE_PATH } " )
225+ print (f"📥 Input folder: { INPUTS_PATH } " )
226+ print (f"📤 Output folder: { OUTPUTS_PATH } " )
211227 print ("=" * 60 )
212228
213229 # Wait for TorchServe tokens
@@ -374,43 +390,91 @@ async def model_info_alias(model_name: str):
374390 return await torchserve_model_info (model_name )
375391
376392@app .post ("/predict/breast-density" )
377- async def predict_breast_density (request : Request ):
393+ async def predict_breast_density (file : UploadFile = File (...) ):
378394 """
379- Proxy inference request to TorchServe breast density model - Uses inference token
395+ Upload image file and make breast density prediction
396+
397+ Process:
398+ 1. Save uploaded file to /app/workspace/input
399+ 2. Generate JSON payload for TorchServe
400+ 3. Forward request to TorchServe
401+ 4. Return TorchServe response to caller
380402 """
381403 try :
382- # Get request body
383- body = await request . body ( )
404+ # Generate unique UUID for this request
405+ unique_id = str ( uuid . uuid4 () )
384406
385- # Get content type
386- content_type = request .headers .get ("content-type" )
407+ # Validate file type
408+ if not file .content_type or not file .content_type .startswith ('image/' ):
409+ raise HTTPException (status_code = 400 , detail = "File must be an image" )
410+
411+ # Create workspace directories if they don't exist
412+ WORKSPACE_PATH .mkdir (exist_ok = True )
413+ INPUTS_PATH .mkdir (exist_ok = True )
414+ OUTPUTS_PATH .mkdir (exist_ok = True )
415+
416+ # Save uploaded file to /app/workspace/input with original filename
417+ input_file_path = INPUTS_PATH / file .filename
418+
419+ print (f"💾 Saving uploaded file: { file .filename } to { input_file_path } " )
420+
421+ # Save the uploaded file
422+ with open (input_file_path , "wb" ) as buffer :
423+ shutil .copyfileobj (file .file , buffer )
424+
425+ print (f"✅ File saved successfully: { input_file_path } " )
426+
427+ # Prepare JSON payload for TorchServe
428+ torchserve_payload = {
429+ "id" : unique_id ,
430+ "inputs" : [{
431+ "name" : "data" ,
432+ "datatype" : "json" ,
433+ "shape" : [1 ],
434+ "data" : [{
435+ "bucket" : "not_needed" ,
436+ "root_path" : "/home/model-server/workspace" ,
437+ "input_folder" : "input" ,
438+ "output_folder" : "output" ,
439+ "input_files" : [file .filename ]
440+ }]
441+ }]
442+ }
443+
444+ print (f"🔮 Making prediction request with UUID: { unique_id } " )
445+ print (f"📋 Payload: { json .dumps (torchserve_payload , indent = 2 )} " )
387446
388447 # Forward request to TorchServe with retry logic
389448 response = await request_with_token_retry (
390449 f"{ TORCHSERVE_URL } /predictions/breast-density" ,
391450 method = "POST" ,
392451 endpoint_path = "/predictions/breast-density" ,
393- content = body ,
394- headers = {"Content-Type" : content_type } if content_type else { }
452+ json = torchserve_payload ,
453+ headers = {"Content-Type" : "application/json" }
395454 )
396455
397- # Return response
456+ print (f"📤 TorchServe response status: { response .status_code } " )
457+
458+ # Parse and return TorchServe response
398459 if response .headers .get ("content-type" , "" ).startswith ("application/json" ):
399460 content = response .json ()
461+ print (f"📄 TorchServe JSON response: { json .dumps (content , indent = 2 )} " )
400462 else :
401463 content = {"result" : response .text }
464+ print (f"📄 TorchServe text response: { response .text } " )
402465
403466 return JSONResponse (content = content , status_code = response .status_code )
404467
405468 except HTTPException :
406469 raise
407470 except Exception as e :
471+ print (f"❌ Prediction failed: { str (e )} " )
408472 raise HTTPException (status_code = 503 , detail = f"TorchServe unavailable: { str (e )} " )
409473
410- @app .post ("/predictions/{model_name} " )
411- async def predict_generic ( model_name : str , request : Request ):
474+ @app .post ("/predict/breast-density-json " )
475+ async def predict_breast_density_json ( request : Request ):
412476 """
413- Generic prediction endpoint for any model - Uses inference token
477+ Alternative endpoint for JSON-based prediction (for backward compatibility)
414478 """
415479 try :
416480 # Get request body
@@ -419,28 +483,79 @@ async def predict_generic(model_name: str, request: Request):
419483 # Get content type
420484 content_type = request .headers .get ("content-type" )
421485
486+ print (f"📋 Received JSON prediction request" )
487+ print (f"📄 Payload: { body .decode ('utf-8' )} " )
488+
422489 # Forward request to TorchServe with retry logic
423490 response = await request_with_token_retry (
424- f"{ TORCHSERVE_URL } /predictions/{ model_name } " ,
491+ f"{ TORCHSERVE_URL } /predictions/breast-density " ,
425492 method = "POST" ,
426- endpoint_path = f "/predictions/{ model_name } " ,
493+ endpoint_path = "/predictions/breast-density " ,
427494 content = body ,
428495 headers = {"Content-Type" : content_type } if content_type else {}
429496 )
430497
498+ print (f"📤 TorchServe response status: { response .status_code } " )
499+
431500 # Return response
432501 if response .headers .get ("content-type" , "" ).startswith ("application/json" ):
433502 content = response .json ()
503+ print (f"📄 TorchServe JSON response: { json .dumps (content , indent = 2 )} " )
434504 else :
435505 content = {"result" : response .text }
506+ print (f"📄 TorchServe text response: { response .text } " )
436507
437508 return JSONResponse (content = content , status_code = response .status_code )
438509
439510 except HTTPException :
440511 raise
441512 except Exception as e :
513+ print (f"❌ JSON prediction failed: { str (e )} " )
442514 raise HTTPException (status_code = 503 , detail = f"TorchServe unavailable: { str (e )} " )
443515
516+ @app .get ("/workspace/files" )
517+ async def list_workspace_files ():
518+ """
519+ List files in the workspace directories
520+ """
521+ try :
522+ input_files = [f .name for f in INPUTS_PATH .iterdir () if f .is_file ()] if INPUTS_PATH .exists () else []
523+ output_files = [f .name for f in OUTPUTS_PATH .iterdir () if f .is_file ()] if OUTPUTS_PATH .exists () else []
524+
525+ return {
526+ "workspace" : str (WORKSPACE_PATH ),
527+ "input_folder" : str (INPUTS_PATH ),
528+ "output_folder" : str (OUTPUTS_PATH ),
529+ "input_files" : input_files ,
530+ "output_files" : output_files ,
531+ "total_input_files" : len (input_files ),
532+ "total_output_files" : len (output_files )
533+ }
534+ except Exception as e :
535+ raise HTTPException (status_code = 500 , detail = f"Failed to list workspace files: { str (e )} " )
536+
537+ @app .get ("/workspace/status" )
538+ async def workspace_status ():
539+ """
540+ Get workspace status and directory information
541+ """
542+ try :
543+ return {
544+ "workspace_path" : str (WORKSPACE_PATH ),
545+ "workspace_exists" : WORKSPACE_PATH .exists (),
546+ "input_path" : str (INPUTS_PATH ),
547+ "input_exists" : INPUTS_PATH .exists (),
548+ "output_path" : str (OUTPUTS_PATH ),
549+ "output_exists" : OUTPUTS_PATH .exists (),
550+ "permissions" : {
551+ "workspace_writable" : os .access (WORKSPACE_PATH , os .W_OK ) if WORKSPACE_PATH .exists () else False ,
552+ "input_writable" : os .access (INPUTS_PATH , os .W_OK ) if INPUTS_PATH .exists () else False ,
553+ "output_readable" : os .access (OUTPUTS_PATH , os .R_OK ) if OUTPUTS_PATH .exists () else False
554+ }
555+ }
556+ except Exception as e :
557+ raise HTTPException (status_code = 500 , detail = f"Failed to get workspace status: { str (e )} " )
558+
444559@app .get ("/torchserve/metrics" )
445560async def torchserve_metrics ():
446561 """
@@ -540,29 +655,45 @@ async def root():
540655 return {
541656 "service" : "MONAI Gateway Service" ,
542657 "version" : "1.0.0" ,
543- "description" : "Open gateway for MONAI Breast Density Classification" ,
658+ "description" : "Open gateway for MONAI Breast Density Classification with file upload support " ,
544659 "access_mode" : "open" ,
545660 "authentication" : "disabled" ,
546661 "endpoints" : {
662+ "prediction" : {
663+ "upload_and_predict" : "POST /predict/breast-density (multipart/form-data with file)" ,
664+ "json_predict" : "POST /predict/breast-density-json (application/json)" ,
665+ "description" : "Upload image file or send JSON payload for breast density analysis"
666+ },
667+ "workspace" : {
668+ "list_files" : "GET /workspace/files" ,
669+ "status" : "GET /workspace/status" ,
670+ "description" : "Manage and monitor workspace files"
671+ },
547672 "torchserve" : {
548- "ping" : "/ping or /torchserve/ping" ,
549- "models" : "/models or /torchserve/models" ,
550- "model_info" : "/models/{model_name} or /torchserve/models/{model_name}" ,
551- "predict_breast_density" : "/predict/breast-density" ,
552- "predict_generic" : "/predictions/{model_name}" ,
553- "metrics" : "/metrics or /torchserve/metrics" ,
554- "token_info" : "/torchserve/token-info"
673+ "ping" : "GET /ping or /torchserve/ping" ,
674+ "models" : "GET /models or /torchserve/models" ,
675+ "model_info" : "GET /models/{model_name} or /torchserve/models/{model_name}" ,
676+ "metrics" : "GET /metrics or /torchserve/metrics" ,
677+ "token_info" : "GET /torchserve/token-info"
555678 },
556679 "system" : {
557- "health" : "/health" ,
558- "docs" : "/docs" ,
559- "root" : "/"
680+ "health" : "GET /health" ,
681+ "docs" : "GET /docs" ,
682+ "root" : "GET /"
560683 }
561684 },
685+ "usage_example" : {
686+ "curl_upload" :
"curl -X POST -F \" [email protected] \" http://localhost:8090/predict/breast-density" ,
687+ "curl_json" : "curl -X POST -H \" Content-Type: application/json\" -d '{\" id\" :\" test\" ,\" inputs\" :[...]}' http://localhost:8090/predict/breast-density-json"
688+ },
689+ "workspace" : {
690+ "input_path" : str (INPUTS_PATH ),
691+ "output_path" : str (OUTPUTS_PATH ),
692+ "note" : "Uploaded files are saved to input folder, results are written to output folder"
693+ },
562694 "tokens_ready" : tokens_ready ,
563695 "endpoint_mappings" : ENDPOINT_TOKEN_MAPPING ,
564- "torchserve_url" : TORCHSERVE_URL ,
565- "note" : "All endpoints are accessible without authentication"
696+ "torchserve_url" : TORCHSERVE_URL
566697 }
567698
568699# Error Handlers
0 commit comments