@@ -195,8 +195,20 @@ struct apple_nvme {
195195
196196 int irq ;
197197 spinlock_t lock ;
198+
199+ /*
200+ * Delayed cache flush handling state
201+ */
202+ struct nvme_ns * flush_ns ;
203+ unsigned long flush_interval ;
204+ unsigned long last_flush ;
205+ struct delayed_work flush_dwork ;
198206};
199207
208+ unsigned int flush_interval = 1000 ;
209+ module_param (flush_interval , uint , 0644 );
210+ MODULE_PARM_DESC (flush_interval , "Grace period in msecs between flushes" );
211+
200212static_assert (sizeof (struct nvme_command ) == 64 );
201213static_assert (sizeof (struct apple_nvmmu_tcb ) == 128 );
202214
@@ -730,6 +742,26 @@ static int apple_nvme_remove_sq(struct apple_nvme *anv)
730742 return nvme_submit_sync_cmd (anv -> ctrl .admin_q , & c , NULL , 0 );
731743}
732744
745+ static bool apple_nvme_delayed_flush (struct apple_nvme * anv , struct nvme_ns * ns ,
746+ struct request * req )
747+ {
748+ if (!anv -> flush_interval || req_op (req ) != REQ_OP_FLUSH )
749+ return false;
750+ if (delayed_work_pending (& anv -> flush_dwork ))
751+ return true;
752+ if (time_before (jiffies , anv -> last_flush + anv -> flush_interval )) {
753+ kblockd_mod_delayed_work_on (WORK_CPU_UNBOUND , & anv -> flush_dwork ,
754+ anv -> flush_interval );
755+ if (WARN_ON_ONCE (anv -> flush_ns && anv -> flush_ns != ns ))
756+ goto out ;
757+ anv -> flush_ns = ns ;
758+ return true;
759+ }
760+ out :
761+ anv -> last_flush = jiffies ;
762+ return false;
763+ }
764+
733765static blk_status_t apple_nvme_queue_rq (struct blk_mq_hw_ctx * hctx ,
734766 const struct blk_mq_queue_data * bd )
735767{
@@ -765,6 +797,12 @@ static blk_status_t apple_nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
765797 }
766798
767799 nvme_start_request (req );
800+
801+ if (apple_nvme_delayed_flush (anv , ns , req )) {
802+ blk_mq_complete_request (req );
803+ return BLK_STS_OK ;
804+ }
805+
768806 apple_nvme_submit_cmd (q , cmnd );
769807 return BLK_STS_OK ;
770808
@@ -1399,6 +1437,28 @@ static void devm_apple_nvme_mempool_destroy(void *data)
13991437 mempool_destroy (data );
14001438}
14011439
1440+ static void apple_nvme_flush_work (struct work_struct * work )
1441+ {
1442+ struct nvme_command c = { };
1443+ struct apple_nvme * anv ;
1444+ struct nvme_ns * ns ;
1445+ int err ;
1446+
1447+ anv = container_of (work , struct apple_nvme , flush_dwork .work );
1448+ ns = anv -> flush_ns ;
1449+ if (WARN_ON_ONCE (!ns ))
1450+ return ;
1451+
1452+ c .common .opcode = nvme_cmd_flush ;
1453+ c .common .nsid = cpu_to_le32 (anv -> flush_ns -> head -> ns_id );
1454+ err = nvme_submit_sync_cmd (ns -> queue , & c , NULL , 0 );
1455+ if (err ) {
1456+ dev_err (anv -> dev , "Deferred flush failed: %d\n" , err );
1457+ } else {
1458+ anv -> last_flush = jiffies ;
1459+ }
1460+ }
1461+
14021462static struct apple_nvme * apple_nvme_alloc (struct platform_device * pdev )
14031463{
14041464 struct device * dev = & pdev -> dev ;
@@ -1554,6 +1614,14 @@ static int apple_nvme_probe(struct platform_device *pdev)
15541614 goto out_uninit_ctrl ;
15551615 }
15561616
1617+ if (flush_interval ) {
1618+ anv -> flush_interval = msecs_to_jiffies (flush_interval );
1619+ anv -> flush_ns = NULL ;
1620+ anv -> last_flush = jiffies - anv -> flush_interval ;
1621+ }
1622+
1623+ INIT_DELAYED_WORK (& anv -> flush_dwork , apple_nvme_flush_work );
1624+
15571625 nvme_reset_ctrl (& anv -> ctrl );
15581626 async_schedule (apple_nvme_async_probe , anv );
15591627
@@ -1591,6 +1659,7 @@ static void apple_nvme_shutdown(struct platform_device *pdev)
15911659{
15921660 struct apple_nvme * anv = platform_get_drvdata (pdev );
15931661
1662+ flush_delayed_work (& anv -> flush_dwork );
15941663 apple_nvme_disable (anv , true);
15951664 if (apple_rtkit_is_running (anv -> rtk )) {
15961665 apple_rtkit_shutdown (anv -> rtk );
0 commit comments