@@ -203,8 +203,20 @@ struct apple_nvme {
203203
204204 int irq ;
205205 spinlock_t lock ;
206+
207+ /*
208+ * Delayed cache flush handling state
209+ */
210+ struct nvme_ns * flush_ns ;
211+ unsigned long flush_interval ;
212+ unsigned long last_flush ;
213+ struct delayed_work flush_dwork ;
206214};
207215
216+ unsigned int flush_interval = 1000 ;
217+ module_param (flush_interval , uint , 0644 );
218+ MODULE_PARM_DESC (flush_interval , "Grace period in msecs between flushes" );
219+
208220static_assert (sizeof (struct nvme_command ) == 64 );
209221static_assert (sizeof (struct apple_nvmmu_tcb ) == 128 );
210222
@@ -762,6 +774,26 @@ static int apple_nvme_remove_sq(struct apple_nvme *anv)
762774 return nvme_submit_sync_cmd (anv -> ctrl .admin_q , & c , NULL , 0 );
763775}
764776
777+ static bool apple_nvme_delayed_flush (struct apple_nvme * anv , struct nvme_ns * ns ,
778+ struct request * req )
779+ {
780+ if (!anv -> flush_interval || req_op (req ) != REQ_OP_FLUSH )
781+ return false;
782+ if (delayed_work_pending (& anv -> flush_dwork ))
783+ return true;
784+ if (time_before (jiffies , anv -> last_flush + anv -> flush_interval )) {
785+ kblockd_mod_delayed_work_on (WORK_CPU_UNBOUND , & anv -> flush_dwork ,
786+ anv -> flush_interval );
787+ if (WARN_ON_ONCE (anv -> flush_ns && anv -> flush_ns != ns ))
788+ goto out ;
789+ anv -> flush_ns = ns ;
790+ return true;
791+ }
792+ out :
793+ anv -> last_flush = jiffies ;
794+ return false;
795+ }
796+
765797static blk_status_t apple_nvme_queue_rq (struct blk_mq_hw_ctx * hctx ,
766798 const struct blk_mq_queue_data * bd )
767799{
@@ -798,6 +830,11 @@ static blk_status_t apple_nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
798830
799831 nvme_start_request (req );
800832
833+ if (apple_nvme_delayed_flush (anv , ns , req )) {
834+ blk_mq_complete_request (req );
835+ return BLK_STS_OK ;
836+ }
837+
801838 if (anv -> hw -> has_lsq_nvmmu )
802839 apple_nvme_submit_cmd_t8103 (q , cmnd );
803840 else
@@ -1452,6 +1489,28 @@ static void devm_apple_nvme_mempool_destroy(void *data)
14521489 mempool_destroy (data );
14531490}
14541491
1492+ static void apple_nvme_flush_work (struct work_struct * work )
1493+ {
1494+ struct nvme_command c = { };
1495+ struct apple_nvme * anv ;
1496+ struct nvme_ns * ns ;
1497+ int err ;
1498+
1499+ anv = container_of (work , struct apple_nvme , flush_dwork .work );
1500+ ns = anv -> flush_ns ;
1501+ if (WARN_ON_ONCE (!ns ))
1502+ return ;
1503+
1504+ c .common .opcode = nvme_cmd_flush ;
1505+ c .common .nsid = cpu_to_le32 (anv -> flush_ns -> head -> ns_id );
1506+ err = nvme_submit_sync_cmd (ns -> queue , & c , NULL , 0 );
1507+ if (err ) {
1508+ dev_err (anv -> dev , "Deferred flush failed: %d\n" , err );
1509+ } else {
1510+ anv -> last_flush = jiffies ;
1511+ }
1512+ }
1513+
14551514static struct apple_nvme * apple_nvme_alloc (struct platform_device * pdev )
14561515{
14571516 struct device * dev = & pdev -> dev ;
@@ -1620,6 +1679,14 @@ static int apple_nvme_probe(struct platform_device *pdev)
16201679 goto out_uninit_ctrl ;
16211680 }
16221681
1682+ if (flush_interval ) {
1683+ anv -> flush_interval = msecs_to_jiffies (flush_interval );
1684+ anv -> flush_ns = NULL ;
1685+ anv -> last_flush = jiffies - anv -> flush_interval ;
1686+ }
1687+
1688+ INIT_DELAYED_WORK (& anv -> flush_dwork , apple_nvme_flush_work );
1689+
16231690 nvme_reset_ctrl (& anv -> ctrl );
16241691 async_schedule (apple_nvme_async_probe , anv );
16251692
@@ -1657,6 +1724,7 @@ static void apple_nvme_shutdown(struct platform_device *pdev)
16571724{
16581725 struct apple_nvme * anv = platform_get_drvdata (pdev );
16591726
1727+ flush_delayed_work (& anv -> flush_dwork );
16601728 apple_nvme_disable (anv , true);
16611729 if (apple_rtkit_is_running (anv -> rtk )) {
16621730 apple_rtkit_shutdown (anv -> rtk );
0 commit comments