Skip to content

Commit 18d2199

Browse files
avpatelMarc Zyngier
authored andcommitted
RISC-V: Use IPIs for remote TLB flush when possible
If we have specialized interrupt controller (such as AIA IMSIC) which allows supervisor mode to directly inject IPIs without any assistance from M-mode or HS-mode then using such specialized interrupt controller, we can do remote TLB flushes directly from supervisor mode instead of using the SBI RFENCE calls. This patch extends remote TLB flush functions to use supervisor mode IPIs whenever direct supervisor mode IPIs.are supported by interrupt controller. Signed-off-by: Anup Patel <apatel@ventanamicro.com> Reviewed-by: Atish Patra <atishp@rivosinc.com> Acked-by: Palmer Dabbelt <palmer@rivosinc.com> Signed-off-by: Marc Zyngier <maz@kernel.org> Link: https://lore.kernel.org/r/20230328035223.1480939-6-apatel@ventanamicro.com
1 parent fb0f3d2 commit 18d2199

1 file changed

Lines changed: 78 additions & 15 deletions

File tree

arch/riscv/mm/tlbflush.c

Lines changed: 78 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -23,14 +23,62 @@ static inline void local_flush_tlb_page_asid(unsigned long addr,
2323
: "memory");
2424
}
2525

26+
static inline void local_flush_tlb_range(unsigned long start,
27+
unsigned long size, unsigned long stride)
28+
{
29+
if (size <= stride)
30+
local_flush_tlb_page(start);
31+
else
32+
local_flush_tlb_all();
33+
}
34+
35+
static inline void local_flush_tlb_range_asid(unsigned long start,
36+
unsigned long size, unsigned long stride, unsigned long asid)
37+
{
38+
if (size <= stride)
39+
local_flush_tlb_page_asid(start, asid);
40+
else
41+
local_flush_tlb_all_asid(asid);
42+
}
43+
44+
static void __ipi_flush_tlb_all(void *info)
45+
{
46+
local_flush_tlb_all();
47+
}
48+
2649
void flush_tlb_all(void)
2750
{
28-
sbi_remote_sfence_vma(NULL, 0, -1);
51+
if (riscv_use_ipi_for_rfence())
52+
on_each_cpu(__ipi_flush_tlb_all, NULL, 1);
53+
else
54+
sbi_remote_sfence_vma(NULL, 0, -1);
55+
}
56+
57+
struct flush_tlb_range_data {
58+
unsigned long asid;
59+
unsigned long start;
60+
unsigned long size;
61+
unsigned long stride;
62+
};
63+
64+
static void __ipi_flush_tlb_range_asid(void *info)
65+
{
66+
struct flush_tlb_range_data *d = info;
67+
68+
local_flush_tlb_range_asid(d->start, d->size, d->stride, d->asid);
69+
}
70+
71+
static void __ipi_flush_tlb_range(void *info)
72+
{
73+
struct flush_tlb_range_data *d = info;
74+
75+
local_flush_tlb_range(d->start, d->size, d->stride);
2976
}
3077

31-
static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start,
32-
unsigned long size, unsigned long stride)
78+
static void __flush_tlb_range(struct mm_struct *mm, unsigned long start,
79+
unsigned long size, unsigned long stride)
3380
{
81+
struct flush_tlb_range_data ftd;
3482
struct cpumask *cmask = mm_cpumask(mm);
3583
unsigned int cpuid;
3684
bool broadcast;
@@ -45,19 +93,34 @@ static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start,
4593
unsigned long asid = atomic_long_read(&mm->context.id) & asid_mask;
4694

4795
if (broadcast) {
48-
sbi_remote_sfence_vma_asid(cmask, start, size, asid);
49-
} else if (size <= stride) {
50-
local_flush_tlb_page_asid(start, asid);
96+
if (riscv_use_ipi_for_rfence()) {
97+
ftd.asid = asid;
98+
ftd.start = start;
99+
ftd.size = size;
100+
ftd.stride = stride;
101+
on_each_cpu_mask(cmask,
102+
__ipi_flush_tlb_range_asid,
103+
&ftd, 1);
104+
} else
105+
sbi_remote_sfence_vma_asid(cmask,
106+
start, size, asid);
51107
} else {
52-
local_flush_tlb_all_asid(asid);
108+
local_flush_tlb_range_asid(start, size, stride, asid);
53109
}
54110
} else {
55111
if (broadcast) {
56-
sbi_remote_sfence_vma(cmask, start, size);
57-
} else if (size <= stride) {
58-
local_flush_tlb_page(start);
112+
if (riscv_use_ipi_for_rfence()) {
113+
ftd.asid = 0;
114+
ftd.start = start;
115+
ftd.size = size;
116+
ftd.stride = stride;
117+
on_each_cpu_mask(cmask,
118+
__ipi_flush_tlb_range,
119+
&ftd, 1);
120+
} else
121+
sbi_remote_sfence_vma(cmask, start, size);
59122
} else {
60-
local_flush_tlb_all();
123+
local_flush_tlb_range(start, size, stride);
61124
}
62125
}
63126

@@ -66,23 +129,23 @@ static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start,
66129

67130
void flush_tlb_mm(struct mm_struct *mm)
68131
{
69-
__sbi_tlb_flush_range(mm, 0, -1, PAGE_SIZE);
132+
__flush_tlb_range(mm, 0, -1, PAGE_SIZE);
70133
}
71134

72135
void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
73136
{
74-
__sbi_tlb_flush_range(vma->vm_mm, addr, PAGE_SIZE, PAGE_SIZE);
137+
__flush_tlb_range(vma->vm_mm, addr, PAGE_SIZE, PAGE_SIZE);
75138
}
76139

77140
void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
78141
unsigned long end)
79142
{
80-
__sbi_tlb_flush_range(vma->vm_mm, start, end - start, PAGE_SIZE);
143+
__flush_tlb_range(vma->vm_mm, start, end - start, PAGE_SIZE);
81144
}
82145
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
83146
void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
84147
unsigned long end)
85148
{
86-
__sbi_tlb_flush_range(vma->vm_mm, start, end - start, PMD_SIZE);
149+
__flush_tlb_range(vma->vm_mm, start, end - start, PMD_SIZE);
87150
}
88151
#endif

0 commit comments

Comments
 (0)