Skip to content

Commit f035b44

Browse files
committed
RISC-V: KVM: Introduce struct kvm_gstage_mapping
Introduce struct kvm_gstage_mapping which represents a g-stage mapping at a particular g-stage page table level. Also, update the kvm_riscv_gstage_map() to return the g-stage mapping upon success. Signed-off-by: Anup Patel <apatel@ventanamicro.com> Reviewed-by: Atish Patra <atishp@rivosinc.com> Tested-by: Atish Patra <atishp@rivosinc.com> Reviewed-by: Nutty Liu <liujingqi@lanxincomputing.com> Link: https://lore.kernel.org/r/20250618113532.471448-10-apatel@ventanamicro.com Signed-off-by: Anup Patel <anup@brainfault.org>
1 parent 4ecbd3e commit f035b44

3 files changed

Lines changed: 43 additions & 27 deletions

File tree

arch/riscv/include/asm/kvm_mmu.h

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,14 +8,21 @@
88

99
#include <linux/kvm_types.h>
1010

11+
struct kvm_gstage_mapping {
12+
gpa_t addr;
13+
pte_t pte;
14+
u32 level;
15+
};
16+
1117
int kvm_riscv_gstage_ioremap(struct kvm *kvm, gpa_t gpa,
1218
phys_addr_t hpa, unsigned long size,
1319
bool writable, bool in_atomic);
1420
void kvm_riscv_gstage_iounmap(struct kvm *kvm, gpa_t gpa,
1521
unsigned long size);
1622
int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
1723
struct kvm_memory_slot *memslot,
18-
gpa_t gpa, unsigned long hva, bool is_write);
24+
gpa_t gpa, unsigned long hva, bool is_write,
25+
struct kvm_gstage_mapping *out_map);
1926
int kvm_riscv_gstage_alloc_pgd(struct kvm *kvm);
2027
void kvm_riscv_gstage_free_pgd(struct kvm *kvm);
2128
void kvm_riscv_gstage_update_hgatp(struct kvm_vcpu *vcpu);

arch/riscv/kvm/mmu.c

Lines changed: 33 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -135,18 +135,18 @@ static void gstage_remote_tlb_flush(struct kvm *kvm, u32 level, gpa_t addr)
135135
kvm_riscv_hfence_gvma_vmid_gpa(kvm, -1UL, 0, addr, BIT(order), order);
136136
}
137137

138-
static int gstage_set_pte(struct kvm *kvm, u32 level,
139-
struct kvm_mmu_memory_cache *pcache,
140-
gpa_t addr, const pte_t *new_pte)
138+
static int gstage_set_pte(struct kvm *kvm,
139+
struct kvm_mmu_memory_cache *pcache,
140+
const struct kvm_gstage_mapping *map)
141141
{
142142
u32 current_level = gstage_pgd_levels - 1;
143143
pte_t *next_ptep = (pte_t *)kvm->arch.pgd;
144-
pte_t *ptep = &next_ptep[gstage_pte_index(addr, current_level)];
144+
pte_t *ptep = &next_ptep[gstage_pte_index(map->addr, current_level)];
145145

146-
if (current_level < level)
146+
if (current_level < map->level)
147147
return -EINVAL;
148148

149-
while (current_level != level) {
149+
while (current_level != map->level) {
150150
if (gstage_pte_leaf(ptep))
151151
return -EEXIST;
152152

@@ -165,13 +165,13 @@ static int gstage_set_pte(struct kvm *kvm, u32 level,
165165
}
166166

167167
current_level--;
168-
ptep = &next_ptep[gstage_pte_index(addr, current_level)];
168+
ptep = &next_ptep[gstage_pte_index(map->addr, current_level)];
169169
}
170170

171-
if (pte_val(*ptep) != pte_val(*new_pte)) {
172-
set_pte(ptep, *new_pte);
171+
if (pte_val(*ptep) != pte_val(map->pte)) {
172+
set_pte(ptep, map->pte);
173173
if (gstage_pte_leaf(ptep))
174-
gstage_remote_tlb_flush(kvm, current_level, addr);
174+
gstage_remote_tlb_flush(kvm, current_level, map->addr);
175175
}
176176

177177
return 0;
@@ -181,14 +181,16 @@ static int gstage_map_page(struct kvm *kvm,
181181
struct kvm_mmu_memory_cache *pcache,
182182
gpa_t gpa, phys_addr_t hpa,
183183
unsigned long page_size,
184-
bool page_rdonly, bool page_exec)
184+
bool page_rdonly, bool page_exec,
185+
struct kvm_gstage_mapping *out_map)
185186
{
186-
int ret;
187-
u32 level = 0;
188-
pte_t new_pte;
189187
pgprot_t prot;
188+
int ret;
190189

191-
ret = gstage_page_size_to_level(page_size, &level);
190+
out_map->addr = gpa;
191+
out_map->level = 0;
192+
193+
ret = gstage_page_size_to_level(page_size, &out_map->level);
192194
if (ret)
193195
return ret;
194196

@@ -216,10 +218,10 @@ static int gstage_map_page(struct kvm *kvm,
216218
else
217219
prot = PAGE_WRITE;
218220
}
219-
new_pte = pfn_pte(PFN_DOWN(hpa), prot);
220-
new_pte = pte_mkdirty(new_pte);
221+
out_map->pte = pfn_pte(PFN_DOWN(hpa), prot);
222+
out_map->pte = pte_mkdirty(out_map->pte);
221223

222-
return gstage_set_pte(kvm, level, pcache, gpa, &new_pte);
224+
return gstage_set_pte(kvm, pcache, out_map);
223225
}
224226

225227
enum gstage_op {
@@ -352,30 +354,32 @@ int kvm_riscv_gstage_ioremap(struct kvm *kvm, gpa_t gpa,
352354
phys_addr_t hpa, unsigned long size,
353355
bool writable, bool in_atomic)
354356
{
355-
pte_t pte;
356357
int ret = 0;
357358
unsigned long pfn;
358359
phys_addr_t addr, end;
359360
struct kvm_mmu_memory_cache pcache = {
360361
.gfp_custom = (in_atomic) ? GFP_ATOMIC | __GFP_ACCOUNT : 0,
361362
.gfp_zero = __GFP_ZERO,
362363
};
364+
struct kvm_gstage_mapping map;
363365

364366
end = (gpa + size + PAGE_SIZE - 1) & PAGE_MASK;
365367
pfn = __phys_to_pfn(hpa);
366368

367369
for (addr = gpa; addr < end; addr += PAGE_SIZE) {
368-
pte = pfn_pte(pfn, PAGE_KERNEL_IO);
370+
map.addr = addr;
371+
map.pte = pfn_pte(pfn, PAGE_KERNEL_IO);
372+
map.level = 0;
369373

370374
if (!writable)
371-
pte = pte_wrprotect(pte);
375+
map.pte = pte_wrprotect(map.pte);
372376

373377
ret = kvm_mmu_topup_memory_cache(&pcache, gstage_pgd_levels);
374378
if (ret)
375379
goto out;
376380

377381
spin_lock(&kvm->mmu_lock);
378-
ret = gstage_set_pte(kvm, 0, &pcache, addr, &pte);
382+
ret = gstage_set_pte(kvm, &pcache, &map);
379383
spin_unlock(&kvm->mmu_lock);
380384
if (ret)
381385
goto out;
@@ -593,7 +597,8 @@ bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
593597

594598
int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
595599
struct kvm_memory_slot *memslot,
596-
gpa_t gpa, unsigned long hva, bool is_write)
600+
gpa_t gpa, unsigned long hva, bool is_write,
601+
struct kvm_gstage_mapping *out_map)
597602
{
598603
int ret;
599604
kvm_pfn_t hfn;
@@ -608,6 +613,9 @@ int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
608613
unsigned long vma_pagesize, mmu_seq;
609614
struct page *page;
610615

616+
/* Setup initial state of output mapping */
617+
memset(out_map, 0, sizeof(*out_map));
618+
611619
/* We need minimum second+third level pages */
612620
ret = kvm_mmu_topup_memory_cache(pcache, gstage_pgd_levels);
613621
if (ret) {
@@ -677,10 +685,10 @@ int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
677685
if (writable) {
678686
mark_page_dirty(kvm, gfn);
679687
ret = gstage_map_page(kvm, pcache, gpa, hfn << PAGE_SHIFT,
680-
vma_pagesize, false, true);
688+
vma_pagesize, false, true, out_map);
681689
} else {
682690
ret = gstage_map_page(kvm, pcache, gpa, hfn << PAGE_SHIFT,
683-
vma_pagesize, true, true);
691+
vma_pagesize, true, true, out_map);
684692
}
685693

686694
if (ret)

arch/riscv/kvm/vcpu_exit.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@
1515
static int gstage_page_fault(struct kvm_vcpu *vcpu, struct kvm_run *run,
1616
struct kvm_cpu_trap *trap)
1717
{
18+
struct kvm_gstage_mapping host_map;
1819
struct kvm_memory_slot *memslot;
1920
unsigned long hva, fault_addr;
2021
bool writable;
@@ -43,7 +44,7 @@ static int gstage_page_fault(struct kvm_vcpu *vcpu, struct kvm_run *run,
4344
}
4445

4546
ret = kvm_riscv_gstage_map(vcpu, memslot, fault_addr, hva,
46-
(trap->scause == EXC_STORE_GUEST_PAGE_FAULT) ? true : false);
47+
(trap->scause == EXC_STORE_GUEST_PAGE_FAULT) ? true : false, &host_map);
4748
if (ret < 0)
4849
return ret;
4950

0 commit comments

Comments
 (0)