Skip to content

Commit c9d5737

Browse files
committed
RISC-V: KVM: Add G-stage ioremap() and iounmap() functions
The in-kernel AIA IMSIC support requires on-demand mapping / unmapping of Guest IMSIC address to Host IMSIC guest files. To help achieve this, we add kvm_riscv_stage2_ioremap() and kvm_riscv_stage2_iounmap() functions. These new functions for updating G-stage page table mappings will be called in atomic context so we have special "in_atomic" parameter for this purpose. Signed-off-by: Anup Patel <apatel@ventanamicro.com> Reviewed-by: Atish Patra <atishp@rivosinc.com> Signed-off-by: Anup Patel <anup@brainfault.org>
1 parent 4ab0e47 commit c9d5737

2 files changed

Lines changed: 19 additions & 4 deletions

File tree

arch/riscv/include/asm/kvm_host.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -284,6 +284,11 @@ void kvm_riscv_hfence_vvma_gva(struct kvm *kvm,
284284
void kvm_riscv_hfence_vvma_all(struct kvm *kvm,
285285
unsigned long hbase, unsigned long hmask);
286286

287+
int kvm_riscv_gstage_ioremap(struct kvm *kvm, gpa_t gpa,
288+
phys_addr_t hpa, unsigned long size,
289+
bool writable, bool in_atomic);
290+
void kvm_riscv_gstage_iounmap(struct kvm *kvm, gpa_t gpa,
291+
unsigned long size);
287292
int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
288293
struct kvm_memory_slot *memslot,
289294
gpa_t gpa, unsigned long hva, bool is_write);

arch/riscv/kvm/mmu.c

Lines changed: 14 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -343,8 +343,9 @@ static void gstage_wp_memory_region(struct kvm *kvm, int slot)
343343
kvm_flush_remote_tlbs(kvm);
344344
}
345345

346-
static int gstage_ioremap(struct kvm *kvm, gpa_t gpa, phys_addr_t hpa,
347-
unsigned long size, bool writable)
346+
int kvm_riscv_gstage_ioremap(struct kvm *kvm, gpa_t gpa,
347+
phys_addr_t hpa, unsigned long size,
348+
bool writable, bool in_atomic)
348349
{
349350
pte_t pte;
350351
int ret = 0;
@@ -353,6 +354,7 @@ static int gstage_ioremap(struct kvm *kvm, gpa_t gpa, phys_addr_t hpa,
353354
struct kvm_mmu_memory_cache pcache;
354355

355356
memset(&pcache, 0, sizeof(pcache));
357+
pcache.gfp_custom = (in_atomic) ? GFP_ATOMIC | __GFP_ACCOUNT : 0;
356358
pcache.gfp_zero = __GFP_ZERO;
357359

358360
end = (gpa + size + PAGE_SIZE - 1) & PAGE_MASK;
@@ -382,6 +384,13 @@ static int gstage_ioremap(struct kvm *kvm, gpa_t gpa, phys_addr_t hpa,
382384
return ret;
383385
}
384386

387+
void kvm_riscv_gstage_iounmap(struct kvm *kvm, gpa_t gpa, unsigned long size)
388+
{
389+
spin_lock(&kvm->mmu_lock);
390+
gstage_unmap_range(kvm, gpa, size, false);
391+
spin_unlock(&kvm->mmu_lock);
392+
}
393+
385394
void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
386395
struct kvm_memory_slot *slot,
387396
gfn_t gfn_offset,
@@ -517,8 +526,9 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
517526
goto out;
518527
}
519528

520-
ret = gstage_ioremap(kvm, gpa, pa,
521-
vm_end - vm_start, writable);
529+
ret = kvm_riscv_gstage_ioremap(kvm, gpa, pa,
530+
vm_end - vm_start,
531+
writable, false);
522532
if (ret)
523533
break;
524534
}

0 commit comments

Comments
 (0)