Skip to content

Commit c1d12c1

Browse files
lumagrobclark
authored andcommitted
drm/msm/mdp5: provide dynamic bandwidth management
Instead of using static bandwidth setup, manage bandwidth dynamically, depending on the amount of allocated planes, their format and resolution. Co-developed-with: James Willcox <jwillcox@squareup.com> Signed-off-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org> Reported-by: kernel test robot <lkp@intel.com> Link: https://lore.kernel.org/r/20210525131316.3117809-8-dmitry.baryshkov@linaro.org Signed-off-by: Rob Clark <robdclark@chromium.org>
1 parent 3103177 commit c1d12c1

4 files changed

Lines changed: 181 additions & 36 deletions

File tree

drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c

Lines changed: 44 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -43,6 +43,9 @@ struct mdp5_crtc {
4343
/* for unref'ing cursor bo's after scanout completes: */
4444
struct drm_flip_work unref_cursor_work;
4545

46+
/* for lowering down the bandwidth after previous frame is complete */
47+
struct drm_flip_work lower_bw_work;
48+
4649
struct mdp_irq vblank;
4750
struct mdp_irq err;
4851
struct mdp_irq pp_done;
@@ -171,12 +174,28 @@ static void unref_cursor_worker(struct drm_flip_work *work, void *val)
171174
drm_gem_object_put(val);
172175
}
173176

177+
static void lower_bw_worker(struct drm_flip_work *work, void *val)
178+
{
179+
struct mdp5_crtc *mdp5_crtc =
180+
container_of(work, struct mdp5_crtc, lower_bw_work);
181+
struct drm_crtc *crtc = &mdp5_crtc->base;
182+
struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
183+
struct mdp5_kms *mdp5_kms = get_kms(&mdp5_crtc->base);
184+
185+
if (mdp5_cstate->old_crtc_bw > mdp5_cstate->new_crtc_bw) {
186+
DBG("DOWN BW to %lld\n", mdp5_cstate->new_crtc_bw);
187+
mdp5_kms_set_bandwidth(mdp5_kms);
188+
mdp5_cstate->old_crtc_bw = mdp5_cstate->new_crtc_bw;
189+
}
190+
}
191+
174192
static void mdp5_crtc_destroy(struct drm_crtc *crtc)
175193
{
176194
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
177195

178196
drm_crtc_cleanup(crtc);
179197
drm_flip_work_cleanup(&mdp5_crtc->unref_cursor_work);
198+
drm_flip_work_cleanup(&mdp5_crtc->lower_bw_work);
180199

181200
kfree(mdp5_crtc);
182201
}
@@ -691,6 +710,7 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
691710
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
692711
crtc);
693712
struct mdp5_kms *mdp5_kms = get_kms(crtc);
713+
struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc_state);
694714
struct drm_plane *plane;
695715
struct drm_device *dev = crtc->dev;
696716
struct plane_state pstates[STAGE_MAX + 1];
@@ -701,6 +721,7 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
701721
bool need_right_mixer = false;
702722
int cnt = 0, i;
703723
int ret;
724+
u64 crtc_bw = 0;
704725
enum mdp_mixer_stage_id start;
705726

706727
DBG("%s: check", crtc->name);
@@ -718,6 +739,9 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
718739
*/
719740
if (pstates[cnt].state->r_hwpipe)
720741
need_right_mixer = true;
742+
743+
crtc_bw += pstates[cnt].state->plane_bw;
744+
721745
cnt++;
722746

723747
if (plane->type == DRM_PLANE_TYPE_CURSOR)
@@ -730,6 +754,10 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
730754

731755
hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
732756

757+
if (hw_cfg->perf.ab_inefficiency)
758+
crtc_bw = mult_frac(crtc_bw, hw_cfg->perf.ab_inefficiency, 100);
759+
mdp5_cstate->new_crtc_bw = crtc_bw;
760+
733761
/*
734762
* we need a right hwmixer if the mode's width is greater than a single
735763
* LM's max width
@@ -785,6 +813,7 @@ static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc,
785813
{
786814
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
787815
struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
816+
struct mdp5_kms *mdp5_kms = get_kms(crtc);
788817
struct drm_device *dev = crtc->dev;
789818
unsigned long flags;
790819

@@ -808,6 +837,12 @@ static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc,
808837

809838
blend_setup(crtc);
810839

840+
if (mdp5_cstate->old_crtc_bw < mdp5_cstate->new_crtc_bw) {
841+
DBG("UP BW to %lld\n", mdp5_cstate->new_crtc_bw);
842+
mdp5_kms_set_bandwidth(mdp5_kms);
843+
mdp5_cstate->old_crtc_bw = mdp5_cstate->new_crtc_bw;
844+
}
845+
811846
/* PP_DONE irq is only used by command mode for now.
812847
* It is better to request pending before FLUSH and START trigger
813848
* to make sure no pp_done irq missed.
@@ -1155,13 +1190,19 @@ static void mdp5_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
11551190
{
11561191
struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, vblank);
11571192
struct drm_crtc *crtc = &mdp5_crtc->base;
1193+
struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
11581194
struct msm_drm_private *priv = crtc->dev->dev_private;
11591195
unsigned pending;
11601196

11611197
mdp_irq_unregister(&get_kms(crtc)->base, &mdp5_crtc->vblank);
11621198

11631199
pending = atomic_xchg(&mdp5_crtc->pending, 0);
11641200

1201+
if (mdp5_cstate->old_crtc_bw > mdp5_cstate->new_crtc_bw) {
1202+
drm_flip_work_queue(&mdp5_crtc->lower_bw_work, NULL);
1203+
drm_flip_work_commit(&mdp5_crtc->lower_bw_work, priv->wq);
1204+
}
1205+
11651206
if (pending & PENDING_FLIP) {
11661207
complete_flip(crtc, NULL);
11671208
}
@@ -1318,6 +1359,9 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
13181359
drm_flip_work_init(&mdp5_crtc->unref_cursor_work,
13191360
"unref cursor", unref_cursor_worker);
13201361

1362+
drm_flip_work_init(&mdp5_crtc->lower_bw_work,
1363+
"lower bw", lower_bw_worker);
1364+
13211365
drm_crtc_helper_add(crtc, &mdp5_crtc_helper_funcs);
13221366

13231367
return crtc;

drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c

Lines changed: 83 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,8 @@
1919
#include "msm_mmu.h"
2020
#include "mdp5_kms.h"
2121

22+
#define MDP5_DEFAULT_BW MBps_to_icc(6400)
23+
2224
static int mdp5_hw_init(struct msm_kms *kms)
2325
{
2426
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
@@ -296,6 +298,28 @@ static const struct mdp_kms_funcs kms_funcs = {
296298
.set_irqmask = mdp5_set_irqmask,
297299
};
298300

301+
void mdp5_kms_set_bandwidth(struct mdp5_kms *mdp5_kms)
302+
{
303+
int i;
304+
u32 full_bw = 0;
305+
struct drm_crtc *tmp_crtc;
306+
307+
if (!mdp5_kms->num_paths)
308+
return;
309+
310+
drm_for_each_crtc(tmp_crtc, mdp5_kms->dev) {
311+
if (!tmp_crtc->enabled)
312+
continue;
313+
314+
full_bw += Bps_to_icc(to_mdp5_crtc_state(tmp_crtc->state)->new_crtc_bw / mdp5_kms->num_paths);
315+
}
316+
317+
DBG("SET BW to %d\n", full_bw);
318+
319+
for (i = 0; i < mdp5_kms->num_paths; i++)
320+
icc_set_bw(mdp5_kms->paths[i], full_bw, full_bw);
321+
}
322+
299323
static int mdp5_disable(struct mdp5_kms *mdp5_kms)
300324
{
301325
DBG("");
@@ -313,6 +337,14 @@ static int mdp5_disable(struct mdp5_kms *mdp5_kms)
313337
if (mdp5_kms->lut_clk)
314338
clk_disable_unprepare(mdp5_kms->lut_clk);
315339

340+
if (!mdp5_kms->enable_count) {
341+
int i;
342+
343+
for (i = 0; i < mdp5_kms->num_paths; i++)
344+
icc_set_bw(mdp5_kms->paths[i], 0, 0);
345+
icc_set_bw(mdp5_kms->path_rot, 0, 0);
346+
}
347+
316348
return 0;
317349
}
318350

@@ -322,6 +354,14 @@ static int mdp5_enable(struct mdp5_kms *mdp5_kms)
322354

323355
mdp5_kms->enable_count++;
324356

357+
if (mdp5_kms->enable_count == 1) {
358+
int i;
359+
360+
for (i = 0; i < mdp5_kms->num_paths; i++)
361+
icc_set_bw(mdp5_kms->paths[i], 0, MDP5_DEFAULT_BW);
362+
icc_set_bw(mdp5_kms->path_rot, 0, MDP5_DEFAULT_BW);
363+
}
364+
325365
clk_prepare_enable(mdp5_kms->ahb_clk);
326366
clk_prepare_enable(mdp5_kms->axi_clk);
327367
clk_prepare_enable(mdp5_kms->core_clk);
@@ -828,13 +868,48 @@ static int interface_init(struct mdp5_kms *mdp5_kms)
828868
return 0;
829869
}
830870

871+
static int mdp5_setup_interconnect(struct mdp5_kms *mdp5_kms)
872+
{
873+
struct icc_path *path0 = of_icc_get(&mdp5_kms->pdev->dev, "mdp0-mem");
874+
struct icc_path *path1 = of_icc_get(&mdp5_kms->pdev->dev, "mdp1-mem");
875+
struct icc_path *path_rot = of_icc_get(&mdp5_kms->pdev->dev, "rotator-mem");
876+
877+
if (IS_ERR(path0))
878+
return PTR_ERR(path0);
879+
880+
if (!path0) {
881+
/* no interconnect support is not necessarily a fatal
882+
* condition, the platform may simply not have an
883+
* interconnect driver yet. But warn about it in case
884+
* bootloader didn't setup bus clocks high enough for
885+
* scanout.
886+
*/
887+
dev_warn(&mdp5_kms->pdev->dev, "No interconnect support may cause display underflows!\n");
888+
return 0;
889+
}
890+
891+
mdp5_kms->paths[0] = path0;
892+
mdp5_kms->num_paths = 1;
893+
894+
if (!IS_ERR_OR_NULL(path1)) {
895+
mdp5_kms->paths[1] = path1;
896+
mdp5_kms->num_paths++;
897+
}
898+
899+
if (!IS_ERR_OR_NULL(path_rot))
900+
mdp5_kms->path_rot = path_rot;
901+
902+
return 0;
903+
}
904+
831905
static int mdp5_init(struct platform_device *pdev, struct drm_device *dev)
832906
{
833907
struct msm_drm_private *priv = dev->dev_private;
834908
struct mdp5_kms *mdp5_kms;
835909
struct mdp5_cfg *config;
836910
u32 major, minor;
837911
int ret;
912+
int i;
838913

839914
mdp5_kms = devm_kzalloc(&pdev->dev, sizeof(*mdp5_kms), GFP_KERNEL);
840915
if (!mdp5_kms) {
@@ -859,6 +934,14 @@ static int mdp5_init(struct platform_device *pdev, struct drm_device *dev)
859934
goto fail;
860935
}
861936

937+
ret = mdp5_setup_interconnect(mdp5_kms);
938+
if (ret)
939+
goto fail;
940+
941+
for (i = 0; i < mdp5_kms->num_paths; i++)
942+
icc_set_bw(mdp5_kms->paths[i], 0, MDP5_DEFAULT_BW);
943+
icc_set_bw(mdp5_kms->path_rot, 0, MDP5_DEFAULT_BW);
944+
862945
/* mandatory clocks: */
863946
ret = get_clk(pdev, &mdp5_kms->axi_clk, "bus", true);
864947
if (ret)
@@ -968,46 +1051,10 @@ static const struct component_ops mdp5_ops = {
9681051
.unbind = mdp5_unbind,
9691052
};
9701053

971-
static int mdp5_setup_interconnect(struct platform_device *pdev)
972-
{
973-
struct icc_path *path0 = of_icc_get(&pdev->dev, "mdp0-mem");
974-
struct icc_path *path1 = of_icc_get(&pdev->dev, "mdp1-mem");
975-
struct icc_path *path_rot = of_icc_get(&pdev->dev, "rotator-mem");
976-
977-
if (IS_ERR(path0))
978-
return PTR_ERR(path0);
979-
980-
if (!path0) {
981-
/* no interconnect support is not necessarily a fatal
982-
* condition, the platform may simply not have an
983-
* interconnect driver yet. But warn about it in case
984-
* bootloader didn't setup bus clocks high enough for
985-
* scanout.
986-
*/
987-
dev_warn(&pdev->dev, "No interconnect support may cause display underflows!\n");
988-
return 0;
989-
}
990-
991-
icc_set_bw(path0, 0, MBps_to_icc(6400));
992-
993-
if (!IS_ERR_OR_NULL(path1))
994-
icc_set_bw(path1, 0, MBps_to_icc(6400));
995-
if (!IS_ERR_OR_NULL(path_rot))
996-
icc_set_bw(path_rot, 0, MBps_to_icc(6400));
997-
998-
return 0;
999-
}
1000-
10011054
static int mdp5_dev_probe(struct platform_device *pdev)
10021055
{
1003-
int ret;
1004-
10051056
DBG("");
10061057

1007-
ret = mdp5_setup_interconnect(pdev);
1008-
if (ret)
1009-
return ret;
1010-
10111058
return component_add(&pdev->dev, &mdp5_ops);
10121059
}
10131060

drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@
1717
#include "mdp5_ctl.h"
1818
#include "mdp5_smp.h"
1919

20+
struct icc_path;
2021
struct mdp5_kms {
2122
struct mdp_kms base;
2223

@@ -68,6 +69,10 @@ struct mdp5_kms {
6869
struct mdp_irq error_handler;
6970

7071
int enable_count;
72+
73+
int num_paths;
74+
struct icc_path *paths[2];
75+
struct icc_path *path_rot;
7176
};
7277
#define to_mdp5_kms(x) container_of(x, struct mdp5_kms, base)
7378

@@ -100,6 +105,8 @@ struct mdp5_plane_state {
100105

101106
/* assigned by crtc blender */
102107
enum mdp_mixer_stage_id stage;
108+
109+
u64 plane_bw;
103110
};
104111
#define to_mdp5_plane_state(x) \
105112
container_of(x, struct mdp5_plane_state, base)
@@ -130,6 +137,9 @@ struct mdp5_crtc_state {
130137
* writing CTL[n].START until encoder->enable()
131138
*/
132139
bool defer_start;
140+
141+
u64 new_crtc_bw;
142+
u64 old_crtc_bw;
133143
};
134144
#define to_mdp5_crtc_state(x) \
135145
container_of(x, struct mdp5_crtc_state, base)
@@ -292,6 +302,8 @@ void mdp5_encoder_set_intf_mode(struct drm_encoder *encoder, bool cmd_mode);
292302
int mdp5_encoder_get_linecount(struct drm_encoder *encoder);
293303
u32 mdp5_encoder_get_framecount(struct drm_encoder *encoder);
294304

305+
void mdp5_kms_set_bandwidth(struct mdp5_kms *mdp5_kms);
306+
295307
#ifdef CONFIG_DRM_MSM_DSI
296308
void mdp5_cmd_encoder_mode_set(struct drm_encoder *encoder,
297309
struct drm_display_mode *mode,

0 commit comments

Comments
 (0)