Skip to content

Commit c53a1ae

Browse files
lumagrobclark
authored andcommitted
drm/msm/dpu: move resource allocation to the _probe function
To let the probe function bail early if any of the resources is unavailable, move resource allocattion from kms_init directly to the probe callback. While we are at it, replace irq_of_parse_and_map() with platform_get_irq(). This also drops devm_iounmap() calls. It is too early to have them _dpu_kms_hw_destroy() (or it will break if for some reason DPU device is rebound into the composite device) and it doesn't make sense to have them in dpu_dev_remove (as everything will be torn down by the devres anyway after the device is unbound from the driver). Reviewed-by: Rob Clark <robdclark@gmail.com> Signed-off-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org> Patchwork: https://patchwork.freedesktop.org/patch/561629/ Signed-off-by: Rob Clark <robdclark@chromium.org>
1 parent a2ab5d5 commit c53a1ae

1 file changed

Lines changed: 61 additions & 72 deletions

File tree

drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c

Lines changed: 61 additions & 72 deletions
Original file line numberDiff line numberDiff line change
@@ -389,8 +389,7 @@ static int dpu_kms_parse_data_bus_icc_path(struct dpu_kms *dpu_kms)
389389
{
390390
struct icc_path *path0;
391391
struct icc_path *path1;
392-
struct drm_device *dev = dpu_kms->dev;
393-
struct device *dpu_dev = dev->dev;
392+
struct device *dpu_dev = &dpu_kms->pdev->dev;
394393

395394
path0 = msm_icc_get(dpu_dev, "mdp0-mem");
396395
path1 = msm_icc_get(dpu_dev, "mdp1-mem");
@@ -829,21 +828,9 @@ static void _dpu_kms_hw_destroy(struct dpu_kms *dpu_kms)
829828

830829
dpu_kms->catalog = NULL;
831830

832-
if (dpu_kms->vbif[VBIF_NRT])
833-
devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->vbif[VBIF_NRT]);
834-
dpu_kms->vbif[VBIF_NRT] = NULL;
835-
836-
if (dpu_kms->vbif[VBIF_RT])
837-
devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->vbif[VBIF_RT]);
838-
dpu_kms->vbif[VBIF_RT] = NULL;
839-
840831
if (dpu_kms->hw_mdp)
841832
dpu_hw_mdp_destroy(dpu_kms->hw_mdp);
842833
dpu_kms->hw_mdp = NULL;
843-
844-
if (dpu_kms->mmio)
845-
devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->mmio);
846-
dpu_kms->mmio = NULL;
847834
}
848835

849836
static void dpu_kms_destroy(struct msm_kms *kms)
@@ -1079,30 +1066,6 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
10791066

10801067
atomic_set(&dpu_kms->bandwidth_ref, 0);
10811068

1082-
dpu_kms->mmio = msm_ioremap(dpu_kms->pdev, "mdp");
1083-
if (IS_ERR(dpu_kms->mmio)) {
1084-
rc = PTR_ERR(dpu_kms->mmio);
1085-
DPU_ERROR("mdp register memory map failed: %d\n", rc);
1086-
dpu_kms->mmio = NULL;
1087-
goto error;
1088-
}
1089-
DRM_DEBUG("mapped dpu address space @%pK\n", dpu_kms->mmio);
1090-
1091-
dpu_kms->vbif[VBIF_RT] = msm_ioremap(dpu_kms->pdev, "vbif");
1092-
if (IS_ERR(dpu_kms->vbif[VBIF_RT])) {
1093-
rc = PTR_ERR(dpu_kms->vbif[VBIF_RT]);
1094-
DPU_ERROR("vbif register memory map failed: %d\n", rc);
1095-
dpu_kms->vbif[VBIF_RT] = NULL;
1096-
goto error;
1097-
}
1098-
dpu_kms->vbif[VBIF_NRT] = msm_ioremap_quiet(dpu_kms->pdev, "vbif_nrt");
1099-
if (IS_ERR(dpu_kms->vbif[VBIF_NRT])) {
1100-
dpu_kms->vbif[VBIF_NRT] = NULL;
1101-
DPU_DEBUG("VBIF NRT is not defined");
1102-
}
1103-
1104-
dpu_kms_parse_data_bus_icc_path(dpu_kms);
1105-
11061069
rc = pm_runtime_resume_and_get(&dpu_kms->pdev->dev);
11071070
if (rc < 0)
11081071
goto error;
@@ -1241,33 +1204,11 @@ static int dpu_kms_init(struct drm_device *ddev)
12411204
struct msm_drm_private *priv = ddev->dev_private;
12421205
struct device *dev = ddev->dev;
12431206
struct platform_device *pdev = to_platform_device(dev);
1244-
struct dpu_kms *dpu_kms;
1245-
int irq;
1207+
struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
12461208
struct dev_pm_opp *opp;
12471209
int ret = 0;
12481210
unsigned long max_freq = ULONG_MAX;
12491211

1250-
dpu_kms = devm_kzalloc(&pdev->dev, sizeof(*dpu_kms), GFP_KERNEL);
1251-
if (!dpu_kms)
1252-
return -ENOMEM;
1253-
1254-
ret = devm_pm_opp_set_clkname(dev, "core");
1255-
if (ret)
1256-
return ret;
1257-
/* OPP table is optional */
1258-
ret = devm_pm_opp_of_add_table(dev);
1259-
if (ret && ret != -ENODEV) {
1260-
dev_err(dev, "invalid OPP table in device tree\n");
1261-
return ret;
1262-
}
1263-
1264-
ret = devm_clk_bulk_get_all(&pdev->dev, &dpu_kms->clocks);
1265-
if (ret < 0) {
1266-
DPU_ERROR("failed to parse clocks, ret=%d\n", ret);
1267-
return ret;
1268-
}
1269-
dpu_kms->num_clocks = ret;
1270-
12711212
opp = dev_pm_opp_find_freq_floor(dev, &max_freq);
12721213
if (!IS_ERR(opp))
12731214
dev_pm_opp_put(opp);
@@ -1280,26 +1221,74 @@ static int dpu_kms_init(struct drm_device *ddev)
12801221
return ret;
12811222
}
12821223
dpu_kms->dev = ddev;
1283-
dpu_kms->pdev = pdev;
12841224

12851225
pm_runtime_enable(&pdev->dev);
12861226
dpu_kms->rpm_enabled = true;
12871227

1288-
priv->kms = &dpu_kms->base;
1289-
1290-
irq = irq_of_parse_and_map(dpu_kms->pdev->dev.of_node, 0);
1291-
if (!irq) {
1292-
DPU_ERROR("failed to get irq\n");
1293-
return -EINVAL;
1294-
}
1295-
dpu_kms->base.irq = irq;
1296-
12971228
return 0;
12981229
}
12991230

13001231
static int dpu_dev_probe(struct platform_device *pdev)
13011232
{
1302-
return msm_drv_probe(&pdev->dev, dpu_kms_init, NULL);
1233+
struct device *dev = &pdev->dev;
1234+
struct dpu_kms *dpu_kms;
1235+
int irq;
1236+
int ret = 0;
1237+
1238+
dpu_kms = devm_kzalloc(dev, sizeof(*dpu_kms), GFP_KERNEL);
1239+
if (!dpu_kms)
1240+
return -ENOMEM;
1241+
1242+
dpu_kms->pdev = pdev;
1243+
1244+
ret = devm_pm_opp_set_clkname(dev, "core");
1245+
if (ret)
1246+
return ret;
1247+
/* OPP table is optional */
1248+
ret = devm_pm_opp_of_add_table(dev);
1249+
if (ret && ret != -ENODEV)
1250+
return dev_err_probe(dev, ret, "invalid OPP table in device tree\n");
1251+
1252+
ret = devm_clk_bulk_get_all(&pdev->dev, &dpu_kms->clocks);
1253+
if (ret < 0)
1254+
return dev_err_probe(dev, ret, "failed to parse clocks\n");
1255+
1256+
dpu_kms->num_clocks = ret;
1257+
1258+
irq = platform_get_irq(pdev, 0);
1259+
if (irq < 0)
1260+
return dev_err_probe(dev, irq, "failed to get irq\n");
1261+
1262+
dpu_kms->base.irq = irq;
1263+
1264+
dpu_kms->mmio = msm_ioremap(pdev, "mdp");
1265+
if (IS_ERR(dpu_kms->mmio)) {
1266+
ret = PTR_ERR(dpu_kms->mmio);
1267+
DPU_ERROR("mdp register memory map failed: %d\n", ret);
1268+
dpu_kms->mmio = NULL;
1269+
return ret;
1270+
}
1271+
DRM_DEBUG("mapped dpu address space @%pK\n", dpu_kms->mmio);
1272+
1273+
dpu_kms->vbif[VBIF_RT] = msm_ioremap(pdev, "vbif");
1274+
if (IS_ERR(dpu_kms->vbif[VBIF_RT])) {
1275+
ret = PTR_ERR(dpu_kms->vbif[VBIF_RT]);
1276+
DPU_ERROR("vbif register memory map failed: %d\n", ret);
1277+
dpu_kms->vbif[VBIF_RT] = NULL;
1278+
return ret;
1279+
}
1280+
1281+
dpu_kms->vbif[VBIF_NRT] = msm_ioremap_quiet(pdev, "vbif_nrt");
1282+
if (IS_ERR(dpu_kms->vbif[VBIF_NRT])) {
1283+
dpu_kms->vbif[VBIF_NRT] = NULL;
1284+
DPU_DEBUG("VBIF NRT is not defined");
1285+
}
1286+
1287+
ret = dpu_kms_parse_data_bus_icc_path(dpu_kms);
1288+
if (ret)
1289+
return ret;
1290+
1291+
return msm_drv_probe(&pdev->dev, dpu_kms_init, &dpu_kms->base);
13031292
}
13041293

13051294
static int dpu_dev_remove(struct platform_device *pdev)

0 commit comments

Comments
 (0)