Skip to content

Commit 887cf6a

Browse files
YongWu-HFjoergroedel
authored andcommitted
iommu/mediatek: Only adjust code about register base
No functional change. Use "base" instead of the data->base. This is avoid to touch too many lines in the next patches. Signed-off-by: Yong Wu <yong.wu@mediatek.com> Reviewed-by: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com> Reviewed-by: Matthias Brugger <matthias.bgg@gmail.com> Link: https://lore.kernel.org/r/20220503071427.2285-25-yong.wu@mediatek.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
1 parent ef68a19 commit 887cf6a

1 file changed

Lines changed: 27 additions & 24 deletions

File tree

drivers/iommu/mtk_iommu.c

Lines changed: 27 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -227,12 +227,12 @@ static struct mtk_iommu_domain *to_mtk_domain(struct iommu_domain *dom)
227227

228228
static void mtk_iommu_tlb_flush_all(struct mtk_iommu_data *data)
229229
{
230+
void __iomem *base = data->base;
230231
unsigned long flags;
231232

232233
spin_lock_irqsave(&data->tlb_lock, flags);
233-
writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0,
234-
data->base + data->plat_data->inv_sel_reg);
235-
writel_relaxed(F_ALL_INVLD, data->base + REG_MMU_INVALIDATE);
234+
writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0, base + data->plat_data->inv_sel_reg);
235+
writel_relaxed(F_ALL_INVLD, base + REG_MMU_INVALIDATE);
236236
wmb(); /* Make sure the tlb flush all done */
237237
spin_unlock_irqrestore(&data->tlb_lock, flags);
238238
}
@@ -243,6 +243,7 @@ static void mtk_iommu_tlb_flush_range_sync(unsigned long iova, size_t size,
243243
struct list_head *head = data->hw_list;
244244
bool check_pm_status;
245245
unsigned long flags;
246+
void __iomem *base;
246247
int ret;
247248
u32 tmp;
248249

@@ -269,23 +270,23 @@ static void mtk_iommu_tlb_flush_range_sync(unsigned long iova, size_t size,
269270
continue;
270271
}
271272

273+
base = data->base;
274+
272275
spin_lock_irqsave(&data->tlb_lock, flags);
273276
writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0,
274-
data->base + data->plat_data->inv_sel_reg);
277+
base + data->plat_data->inv_sel_reg);
275278

276-
writel_relaxed(MTK_IOMMU_TLB_ADDR(iova),
277-
data->base + REG_MMU_INVLD_START_A);
279+
writel_relaxed(MTK_IOMMU_TLB_ADDR(iova), base + REG_MMU_INVLD_START_A);
278280
writel_relaxed(MTK_IOMMU_TLB_ADDR(iova + size - 1),
279-
data->base + REG_MMU_INVLD_END_A);
280-
writel_relaxed(F_MMU_INV_RANGE,
281-
data->base + REG_MMU_INVALIDATE);
281+
base + REG_MMU_INVLD_END_A);
282+
writel_relaxed(F_MMU_INV_RANGE, base + REG_MMU_INVALIDATE);
282283

283284
/* tlb sync */
284-
ret = readl_poll_timeout_atomic(data->base + REG_MMU_CPE_DONE,
285+
ret = readl_poll_timeout_atomic(base + REG_MMU_CPE_DONE,
285286
tmp, tmp != 0, 10, 1000);
286287

287288
/* Clear the CPE status */
288-
writel_relaxed(0, data->base + REG_MMU_CPE_DONE);
289+
writel_relaxed(0, base + REG_MMU_CPE_DONE);
289290
spin_unlock_irqrestore(&data->tlb_lock, flags);
290291

291292
if (ret) {
@@ -305,36 +306,38 @@ static irqreturn_t mtk_iommu_isr(int irq, void *dev_id)
305306
struct mtk_iommu_domain *dom = data->m4u_dom;
306307
unsigned int fault_larb = MTK_INVALID_LARBID, fault_port = 0, sub_comm = 0;
307308
u32 int_state, regval, va34_32, pa34_32;
309+
const struct mtk_iommu_plat_data *plat_data = data->plat_data;
310+
void __iomem *base = data->base;
308311
u64 fault_iova, fault_pa;
309312
bool layer, write;
310313

311314
/* Read error info from registers */
312-
int_state = readl_relaxed(data->base + REG_MMU_FAULT_ST1);
315+
int_state = readl_relaxed(base + REG_MMU_FAULT_ST1);
313316
if (int_state & F_REG_MMU0_FAULT_MASK) {
314-
regval = readl_relaxed(data->base + REG_MMU0_INT_ID);
315-
fault_iova = readl_relaxed(data->base + REG_MMU0_FAULT_VA);
316-
fault_pa = readl_relaxed(data->base + REG_MMU0_INVLD_PA);
317+
regval = readl_relaxed(base + REG_MMU0_INT_ID);
318+
fault_iova = readl_relaxed(base + REG_MMU0_FAULT_VA);
319+
fault_pa = readl_relaxed(base + REG_MMU0_INVLD_PA);
317320
} else {
318-
regval = readl_relaxed(data->base + REG_MMU1_INT_ID);
319-
fault_iova = readl_relaxed(data->base + REG_MMU1_FAULT_VA);
320-
fault_pa = readl_relaxed(data->base + REG_MMU1_INVLD_PA);
321+
regval = readl_relaxed(base + REG_MMU1_INT_ID);
322+
fault_iova = readl_relaxed(base + REG_MMU1_FAULT_VA);
323+
fault_pa = readl_relaxed(base + REG_MMU1_INVLD_PA);
321324
}
322325
layer = fault_iova & F_MMU_FAULT_VA_LAYER_BIT;
323326
write = fault_iova & F_MMU_FAULT_VA_WRITE_BIT;
324-
if (MTK_IOMMU_HAS_FLAG(data->plat_data, IOVA_34_EN)) {
327+
if (MTK_IOMMU_HAS_FLAG(plat_data, IOVA_34_EN)) {
325328
va34_32 = FIELD_GET(F_MMU_INVAL_VA_34_32_MASK, fault_iova);
326329
fault_iova = fault_iova & F_MMU_INVAL_VA_31_12_MASK;
327330
fault_iova |= (u64)va34_32 << 32;
328331
}
329332
pa34_32 = FIELD_GET(F_MMU_INVAL_PA_34_32_MASK, fault_iova);
330333
fault_pa |= (u64)pa34_32 << 32;
331334

332-
if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) {
335+
if (MTK_IOMMU_IS_TYPE(plat_data, MTK_IOMMU_TYPE_MM)) {
333336
fault_port = F_MMU_INT_ID_PORT_ID(regval);
334-
if (MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_SUB_COMM_2BITS)) {
337+
if (MTK_IOMMU_HAS_FLAG(plat_data, HAS_SUB_COMM_2BITS)) {
335338
fault_larb = F_MMU_INT_ID_COMM_ID(regval);
336339
sub_comm = F_MMU_INT_ID_SUB_COMM_ID(regval);
337-
} else if (MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_SUB_COMM_3BITS)) {
340+
} else if (MTK_IOMMU_HAS_FLAG(plat_data, HAS_SUB_COMM_3BITS)) {
338341
fault_larb = F_MMU_INT_ID_COMM_ID_EXT(regval);
339342
sub_comm = F_MMU_INT_ID_SUB_COMM_ID_EXT(regval);
340343
} else {
@@ -353,9 +356,9 @@ static irqreturn_t mtk_iommu_isr(int irq, void *dev_id)
353356
}
354357

355358
/* Interrupt clear */
356-
regval = readl_relaxed(data->base + REG_MMU_INT_CONTROL0);
359+
regval = readl_relaxed(base + REG_MMU_INT_CONTROL0);
357360
regval |= F_INT_CLR_BIT;
358-
writel_relaxed(regval, data->base + REG_MMU_INT_CONTROL0);
361+
writel_relaxed(regval, base + REG_MMU_INT_CONTROL0);
359362

360363
mtk_iommu_tlb_flush_all(data);
361364

0 commit comments

Comments
 (0)