@@ -278,7 +278,7 @@ struct bio_dev_health {
278278 void * bdh_intel_smart_buf ; /*Intel SMART attributes*/
279279 uint64_t bdh_stat_age ;
280280 unsigned int bdh_inflights ;
281- unsigned int bdh_stopping : 1 ;
281+ unsigned int bdh_stopping : 1 , bdh_io_stalled : 1 ;
282282 uint16_t bdh_vendor_id ; /* PCI vendor ID */
283283
284284 /**
@@ -365,10 +365,21 @@ struct bio_blobstore {
365365 bb_faulty_done :1 ; /* Faulty reaction is done */
366366};
367367
368+ struct bio_io_lug {
369+ /* Link to bio_xs_blobstore::bxb_pending_ios */
370+ d_list_t bil_link ;
371+ /* When the I/O is submitted */
372+ uint64_t bil_submit_ts ;
373+ /* Reference count */
374+ uint32_t bil_ref ;
375+ };
376+
368377/* Per-xstream blobstore */
369378struct bio_xs_blobstore {
370379 /* In-flight blob read/write */
371380 unsigned int bxb_blob_rw ;
381+ /* Pending I/Os */
382+ d_list_t bxb_pending_ios ;
372383 /* spdk io channel */
373384 struct spdk_io_channel * bxb_io_channel ;
374385 /* per bio blobstore */
@@ -381,13 +392,60 @@ struct bio_xs_blobstore {
381392/* Per-xstream NVMe context */
382393struct bio_xs_context {
383394 int bxc_tgt_id ;
395+ uint64_t bxc_io_monitor_ts ;
384396 struct spdk_thread * bxc_thread ;
385397 struct bio_xs_blobstore * bxc_xs_blobstores [SMD_DEV_TYPE_MAX ];
386398 struct bio_dma_buffer * bxc_dma_buf ;
387399 unsigned int bxc_self_polling :1 ; /* for standalone VOS */
388400 unsigned int bxc_skip_draining : 1 ;
389401};
390402
403+ static inline void
404+ bio_io_lug_init (struct bio_io_lug * io_lug )
405+ {
406+ D_INIT_LIST_HEAD (& io_lug -> bil_link );
407+ io_lug -> bil_submit_ts = 0 ;
408+ io_lug -> bil_ref = 0 ;
409+ }
410+
411+ static inline void
412+ bio_io_lug_fini (struct bio_io_lug * io_lug )
413+ {
414+ D_ASSERT (io_lug -> bil_ref == 0 );
415+ D_ASSERT (d_list_empty (& io_lug -> bil_link ));
416+ }
417+
418+ static inline void
419+ bio_io_lug_dequeue (struct bio_xs_blobstore * bxb , struct bio_io_lug * io_lug )
420+ {
421+ D_ASSERT (bxb -> bxb_blob_rw > 0 );
422+ bxb -> bxb_blob_rw -- ;
423+
424+ D_ASSERT (!d_list_empty (& io_lug -> bil_link ));
425+ D_ASSERT (io_lug -> bil_submit_ts != 0 );
426+ D_ASSERT (io_lug -> bil_ref > 0 );
427+ io_lug -> bil_ref -- ;
428+ if (io_lug -> bil_ref == 0 )
429+ d_list_del_init (& io_lug -> bil_link );
430+ }
431+
432+ static inline void
433+ bio_io_lug_enqueue (struct bio_xs_context * xs_ctxt , struct bio_xs_blobstore * bxb ,
434+ struct bio_io_lug * io_lug )
435+ {
436+ bxb -> bxb_blob_rw ++ ;
437+ if (io_lug -> bil_ref == 0 ) {
438+ if (xs_ctxt -> bxc_io_monitor_ts )
439+ io_lug -> bil_submit_ts = xs_ctxt -> bxc_io_monitor_ts ;
440+ else
441+ io_lug -> bil_submit_ts = d_timeus_secdiff (0 );
442+
443+ D_ASSERT (d_list_empty (& io_lug -> bil_link ));
444+ d_list_add_tail (& io_lug -> bil_link , & bxb -> bxb_pending_ios );
445+ }
446+ io_lug -> bil_ref ++ ;
447+ }
448+
391449/* Per VOS instance I/O context */
392450struct bio_io_context {
393451 d_list_t bic_link ; /* link to bxb_io_ctxts */
@@ -437,6 +495,7 @@ struct bio_rsrvd_dma {
437495
438496/* I/O descriptor */
439497struct bio_desc {
498+ struct bio_io_lug bd_io_lug ;
440499 struct umem_instance * bd_umem ;
441500 struct bio_io_context * bd_ctxt ;
442501 /* DMA buffers reserved by this io descriptor */
@@ -546,6 +605,7 @@ extern unsigned int bio_chk_cnt_max;
546605extern unsigned int bio_numa_node ;
547606extern unsigned int bio_spdk_max_unmap_cnt ;
548607extern unsigned int bio_max_async_sz ;
608+ extern unsigned int bio_io_timeout ;
549609
550610int xs_poll_completion (struct bio_xs_context * ctxt , unsigned int * inflights ,
551611 uint64_t timeout );
@@ -583,6 +643,8 @@ int iod_add_region(struct bio_desc *biod, struct bio_dma_chunk *chk,
583643 uint64_t end , uint8_t media );
584644int dma_buffer_grow (struct bio_dma_buffer * buf , unsigned int cnt );
585645void iod_dma_wait (struct bio_desc * biod );
646+ void
647+ bio_io_monitor (struct bio_xs_context * xs_ctxt , uint64_t now );
586648
587649static inline struct bio_dma_buffer *
588650iod_dma_buf (struct bio_desc * biod )
0 commit comments