@@ -195,8 +195,20 @@ struct apple_nvme {
195195
196196 int irq ;
197197 spinlock_t lock ;
198+
199+ /*
200+ * Delayed cache flush handling state
201+ */
202+ struct nvme_ns * flush_ns ;
203+ unsigned long flush_interval ;
204+ unsigned long last_flush ;
205+ struct delayed_work flush_dwork ;
198206};
199207
208+ unsigned int flush_interval = 1000 ;
209+ module_param (flush_interval , uint , 0644 );
210+ MODULE_PARM_DESC (flush_interval , "Grace period in msecs between flushes" );
211+
200212static_assert (sizeof (struct nvme_command ) == 64 );
201213static_assert (sizeof (struct apple_nvmmu_tcb ) == 128 );
202214
@@ -729,6 +741,26 @@ static int apple_nvme_remove_sq(struct apple_nvme *anv)
729741 return nvme_submit_sync_cmd (anv -> ctrl .admin_q , & c , NULL , 0 );
730742}
731743
744+ static bool apple_nvme_delayed_flush (struct apple_nvme * anv , struct nvme_ns * ns ,
745+ struct request * req )
746+ {
747+ if (!anv -> flush_interval || req_op (req ) != REQ_OP_FLUSH )
748+ return false;
749+ if (delayed_work_pending (& anv -> flush_dwork ))
750+ return true;
751+ if (time_before (jiffies , anv -> last_flush + anv -> flush_interval )) {
752+ kblockd_mod_delayed_work_on (WORK_CPU_UNBOUND , & anv -> flush_dwork ,
753+ anv -> flush_interval );
754+ if (WARN_ON_ONCE (anv -> flush_ns && anv -> flush_ns != ns ))
755+ goto out ;
756+ anv -> flush_ns = ns ;
757+ return true;
758+ }
759+ out :
760+ anv -> last_flush = jiffies ;
761+ return false;
762+ }
763+
732764static blk_status_t apple_nvme_queue_rq (struct blk_mq_hw_ctx * hctx ,
733765 const struct blk_mq_queue_data * bd )
734766{
@@ -764,6 +796,12 @@ static blk_status_t apple_nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
764796 }
765797
766798 nvme_start_request (req );
799+
800+ if (apple_nvme_delayed_flush (anv , ns , req )) {
801+ blk_mq_complete_request (req );
802+ return BLK_STS_OK ;
803+ }
804+
767805 apple_nvme_submit_cmd (q , cmnd );
768806 return BLK_STS_OK ;
769807
@@ -1388,6 +1426,28 @@ static void devm_apple_nvme_mempool_destroy(void *data)
13881426 mempool_destroy (data );
13891427}
13901428
1429+ static void apple_nvme_flush_work (struct work_struct * work )
1430+ {
1431+ struct nvme_command c = { };
1432+ struct apple_nvme * anv ;
1433+ struct nvme_ns * ns ;
1434+ int err ;
1435+
1436+ anv = container_of (work , struct apple_nvme , flush_dwork .work );
1437+ ns = anv -> flush_ns ;
1438+ if (WARN_ON_ONCE (!ns ))
1439+ return ;
1440+
1441+ c .common .opcode = nvme_cmd_flush ;
1442+ c .common .nsid = cpu_to_le32 (anv -> flush_ns -> head -> ns_id );
1443+ err = nvme_submit_sync_cmd (ns -> queue , & c , NULL , 0 );
1444+ if (err ) {
1445+ dev_err (anv -> dev , "Deferred flush failed: %d\n" , err );
1446+ } else {
1447+ anv -> last_flush = jiffies ;
1448+ }
1449+ }
1450+
13911451static struct apple_nvme * apple_nvme_alloc (struct platform_device * pdev )
13921452{
13931453 struct device * dev = & pdev -> dev ;
@@ -1542,6 +1602,14 @@ static int apple_nvme_probe(struct platform_device *pdev)
15421602 goto out_uninit_ctrl ;
15431603 }
15441604
1605+ if (flush_interval ) {
1606+ anv -> flush_interval = msecs_to_jiffies (flush_interval );
1607+ anv -> flush_ns = NULL ;
1608+ anv -> last_flush = jiffies - anv -> flush_interval ;
1609+ }
1610+
1611+ INIT_DELAYED_WORK (& anv -> flush_dwork , apple_nvme_flush_work );
1612+
15451613 nvme_reset_ctrl (& anv -> ctrl );
15461614 async_schedule (apple_nvme_async_probe , anv );
15471615
@@ -1575,6 +1643,7 @@ static void apple_nvme_shutdown(struct platform_device *pdev)
15751643{
15761644 struct apple_nvme * anv = platform_get_drvdata (pdev );
15771645
1646+ flush_delayed_work (& anv -> flush_dwork );
15781647 apple_nvme_disable (anv , true);
15791648 if (apple_rtkit_is_running (anv -> rtk ))
15801649 apple_rtkit_shutdown (anv -> rtk );
0 commit comments