@@ -8894,6 +8894,56 @@ static void amdgpu_dm_update_cursor(struct drm_plane *plane,
88948894 }
88958895}
88968896
8897+ static void amdgpu_dm_enable_self_refresh (struct amdgpu_crtc * acrtc_attach ,
8898+ const struct dm_crtc_state * acrtc_state ,
8899+ const u64 current_ts )
8900+ {
8901+ struct psr_settings * psr = & acrtc_state -> stream -> link -> psr_settings ;
8902+ struct replay_settings * pr = & acrtc_state -> stream -> link -> replay_settings ;
8903+ struct amdgpu_dm_connector * aconn =
8904+ (struct amdgpu_dm_connector * )acrtc_state -> stream -> dm_stream_context ;
8905+
8906+ if (acrtc_state -> update_type > UPDATE_TYPE_FAST ) {
8907+ if (pr -> config .replay_supported && !pr -> replay_feature_enabled )
8908+ amdgpu_dm_link_setup_replay (acrtc_state -> stream -> link , aconn );
8909+ else if (psr -> psr_version != DC_PSR_VERSION_UNSUPPORTED &&
8910+ !psr -> psr_feature_enabled )
8911+ if (!aconn -> disallow_edp_enter_psr )
8912+ amdgpu_dm_link_setup_psr (acrtc_state -> stream );
8913+ }
8914+
8915+ /* Decrement skip count when SR is enabled and we're doing fast updates. */
8916+ if (acrtc_state -> update_type == UPDATE_TYPE_FAST &&
8917+ (psr -> psr_feature_enabled || pr -> config .replay_supported )) {
8918+ if (aconn -> sr_skip_count > 0 )
8919+ aconn -> sr_skip_count -- ;
8920+
8921+ /* Allow SR when skip count is 0. */
8922+ acrtc_attach -> dm_irq_params .allow_sr_entry = !aconn -> sr_skip_count ;
8923+
8924+ /*
8925+ * If sink supports PSR SU/Panel Replay, there is no need to rely on
8926+ * a vblank event disable request to enable PSR/RP. PSR SU/RP
8927+ * can be enabled immediately once OS demonstrates an
8928+ * adequate number of fast atomic commits to notify KMD
8929+ * of update events. See `vblank_control_worker()`.
8930+ */
8931+ if (acrtc_attach -> dm_irq_params .allow_sr_entry &&
8932+ #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
8933+ !amdgpu_dm_crc_window_is_activated (acrtc_state -> base .crtc ) &&
8934+ #endif
8935+ (current_ts - psr -> psr_dirty_rects_change_timestamp_ns ) > 500000000 ) {
8936+ if (pr -> replay_feature_enabled && !pr -> replay_allow_active )
8937+ amdgpu_dm_replay_enable (acrtc_state -> stream , true);
8938+ if (psr -> psr_version >= DC_PSR_VERSION_SU_1 &&
8939+ !psr -> psr_allow_active && !aconn -> disallow_edp_enter_psr )
8940+ amdgpu_dm_psr_enable (acrtc_state -> stream );
8941+ }
8942+ } else {
8943+ acrtc_attach -> dm_irq_params .allow_sr_entry = false;
8944+ }
8945+ }
8946+
88978947static void amdgpu_dm_commit_planes (struct drm_atomic_state * state ,
88988948 struct drm_device * dev ,
88998949 struct amdgpu_display_manager * dm ,
@@ -9222,9 +9272,12 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
92229272 bundle -> stream_update .abm_level = & acrtc_state -> abm_level ;
92239273
92249274 mutex_lock (& dm -> dc_lock );
9225- if ((acrtc_state -> update_type > UPDATE_TYPE_FAST ) &&
9226- acrtc_state -> stream -> link -> psr_settings .psr_allow_active )
9227- amdgpu_dm_psr_disable (acrtc_state -> stream );
9275+ if (acrtc_state -> update_type > UPDATE_TYPE_FAST ) {
9276+ if (acrtc_state -> stream -> link -> replay_settings .replay_allow_active )
9277+ amdgpu_dm_replay_disable (acrtc_state -> stream );
9278+ if (acrtc_state -> stream -> link -> psr_settings .psr_allow_active )
9279+ amdgpu_dm_psr_disable (acrtc_state -> stream );
9280+ }
92289281 mutex_unlock (& dm -> dc_lock );
92299282
92309283 /*
@@ -9265,57 +9318,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
92659318 dm_update_pflip_irq_state (drm_to_adev (dev ),
92669319 acrtc_attach );
92679320
9268- if (acrtc_state -> update_type > UPDATE_TYPE_FAST ) {
9269- if (acrtc_state -> stream -> link -> replay_settings .config .replay_supported &&
9270- !acrtc_state -> stream -> link -> replay_settings .replay_feature_enabled ) {
9271- struct amdgpu_dm_connector * aconn =
9272- (struct amdgpu_dm_connector * )acrtc_state -> stream -> dm_stream_context ;
9273- amdgpu_dm_link_setup_replay (acrtc_state -> stream -> link , aconn );
9274- } else if (acrtc_state -> stream -> link -> psr_settings .psr_version != DC_PSR_VERSION_UNSUPPORTED &&
9275- !acrtc_state -> stream -> link -> psr_settings .psr_feature_enabled ) {
9276-
9277- struct amdgpu_dm_connector * aconn = (struct amdgpu_dm_connector * )
9278- acrtc_state -> stream -> dm_stream_context ;
9279-
9280- if (!aconn -> disallow_edp_enter_psr )
9281- amdgpu_dm_link_setup_psr (acrtc_state -> stream );
9282- }
9283- }
9284-
9285- /* Decrement skip count when SR is enabled and we're doing fast updates. */
9286- if (acrtc_state -> update_type == UPDATE_TYPE_FAST &&
9287- acrtc_state -> stream -> link -> psr_settings .psr_feature_enabled ) {
9288- struct amdgpu_dm_connector * aconn =
9289- (struct amdgpu_dm_connector * )acrtc_state -> stream -> dm_stream_context ;
9290-
9291- if (aconn -> sr_skip_count > 0 )
9292- aconn -> sr_skip_count -- ;
9293-
9294- /* Allow SR when skip count is 0. */
9295- acrtc_attach -> dm_irq_params .allow_sr_entry = !aconn -> sr_skip_count ;
9296-
9297- /*
9298- * If sink supports PSR SU/Panel Replay, there is no need to rely on
9299- * a vblank event disable request to enable PSR/RP. PSR SU/RP
9300- * can be enabled immediately once OS demonstrates an
9301- * adequate number of fast atomic commits to notify KMD
9302- * of update events. See `vblank_control_worker()`.
9303- */
9304- if (acrtc_state -> stream -> link -> psr_settings .psr_version >= DC_PSR_VERSION_SU_1 &&
9305- acrtc_attach -> dm_irq_params .allow_sr_entry &&
9306- #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
9307- !amdgpu_dm_crc_window_is_activated (acrtc_state -> base .crtc ) &&
9308- #endif
9309- !acrtc_state -> stream -> link -> psr_settings .psr_allow_active &&
9310- !aconn -> disallow_edp_enter_psr &&
9311- (timestamp_ns -
9312- acrtc_state -> stream -> link -> psr_settings .psr_dirty_rects_change_timestamp_ns ) >
9313- 500000000 )
9314- amdgpu_dm_psr_enable (acrtc_state -> stream );
9315- } else {
9316- acrtc_attach -> dm_irq_params .allow_sr_entry = false;
9317- }
9318-
9321+ amdgpu_dm_enable_self_refresh (acrtc_attach , acrtc_state , timestamp_ns );
93199322 mutex_unlock (& dm -> dc_lock );
93209323 }
93219324
0 commit comments