@@ -203,8 +203,20 @@ struct apple_nvme {
203203
204204 int irq ;
205205 spinlock_t lock ;
206+
207+ /*
208+ * Delayed cache flush handling state
209+ */
210+ struct nvme_ns * flush_ns ;
211+ unsigned long flush_interval ;
212+ unsigned long last_flush ;
213+ struct delayed_work flush_dwork ;
206214};
207215
216+ unsigned int flush_interval = 1000 ;
217+ module_param (flush_interval , uint , 0644 );
218+ MODULE_PARM_DESC (flush_interval , "Grace period in msecs between flushes" );
219+
208220static_assert (sizeof (struct nvme_command ) == 64 );
209221static_assert (sizeof (struct apple_nvmmu_tcb ) == 128 );
210222
@@ -762,6 +774,26 @@ static int apple_nvme_remove_sq(struct apple_nvme *anv)
762774 return nvme_submit_sync_cmd (anv -> ctrl .admin_q , & c , NULL , 0 );
763775}
764776
777+ static bool apple_nvme_delayed_flush (struct apple_nvme * anv , struct nvme_ns * ns ,
778+ struct request * req )
779+ {
780+ if (!anv -> flush_interval || req_op (req ) != REQ_OP_FLUSH )
781+ return false;
782+ if (delayed_work_pending (& anv -> flush_dwork ))
783+ return true;
784+ if (time_before (jiffies , anv -> last_flush + anv -> flush_interval )) {
785+ kblockd_mod_delayed_work_on (WORK_CPU_UNBOUND , & anv -> flush_dwork ,
786+ anv -> flush_interval );
787+ if (WARN_ON_ONCE (anv -> flush_ns && anv -> flush_ns != ns ))
788+ goto out ;
789+ anv -> flush_ns = ns ;
790+ return true;
791+ }
792+ out :
793+ anv -> last_flush = jiffies ;
794+ return false;
795+ }
796+
765797static blk_status_t apple_nvme_queue_rq (struct blk_mq_hw_ctx * hctx ,
766798 const struct blk_mq_queue_data * bd )
767799{
@@ -798,6 +830,11 @@ static blk_status_t apple_nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
798830
799831 nvme_start_request (req );
800832
833+ if (apple_nvme_delayed_flush (anv , ns , req )) {
834+ blk_mq_complete_request (req );
835+ return BLK_STS_OK ;
836+ }
837+
801838 if (anv -> hw -> has_lsq_nvmmu )
802839 apple_nvme_submit_cmd_t8103 (q , cmnd );
803840 else
@@ -1453,6 +1490,28 @@ static void devm_apple_nvme_mempool_destroy(void *data)
14531490 mempool_destroy (data );
14541491}
14551492
1493+ static void apple_nvme_flush_work (struct work_struct * work )
1494+ {
1495+ struct nvme_command c = { };
1496+ struct apple_nvme * anv ;
1497+ struct nvme_ns * ns ;
1498+ int err ;
1499+
1500+ anv = container_of (work , struct apple_nvme , flush_dwork .work );
1501+ ns = anv -> flush_ns ;
1502+ if (WARN_ON_ONCE (!ns ))
1503+ return ;
1504+
1505+ c .common .opcode = nvme_cmd_flush ;
1506+ c .common .nsid = cpu_to_le32 (anv -> flush_ns -> head -> ns_id );
1507+ err = nvme_submit_sync_cmd (ns -> queue , & c , NULL , 0 );
1508+ if (err ) {
1509+ dev_err (anv -> dev , "Deferred flush failed: %d\n" , err );
1510+ } else {
1511+ anv -> last_flush = jiffies ;
1512+ }
1513+ }
1514+
14561515static struct apple_nvme * apple_nvme_alloc (struct platform_device * pdev )
14571516{
14581517 struct device * dev = & pdev -> dev ;
@@ -1621,6 +1680,14 @@ static int apple_nvme_probe(struct platform_device *pdev)
16211680 goto out_uninit_ctrl ;
16221681 }
16231682
1683+ if (flush_interval ) {
1684+ anv -> flush_interval = msecs_to_jiffies (flush_interval );
1685+ anv -> flush_ns = NULL ;
1686+ anv -> last_flush = jiffies - anv -> flush_interval ;
1687+ }
1688+
1689+ INIT_DELAYED_WORK (& anv -> flush_dwork , apple_nvme_flush_work );
1690+
16241691 nvme_reset_ctrl (& anv -> ctrl );
16251692 async_schedule (apple_nvme_async_probe , anv );
16261693
@@ -1658,6 +1725,7 @@ static void apple_nvme_shutdown(struct platform_device *pdev)
16581725{
16591726 struct apple_nvme * anv = platform_get_drvdata (pdev );
16601727
1728+ flush_delayed_work (& anv -> flush_dwork );
16611729 apple_nvme_disable (anv , true);
16621730 if (apple_rtkit_is_running (anv -> rtk )) {
16631731 apple_rtkit_shutdown (anv -> rtk );
0 commit comments