Skip to content

Commit 65164fd

Browse files
committed
Merge tag 'kvm-riscv-6.17-2' of https://github.com/kvm-riscv/linux into HEAD
KVM/riscv changes for 6.17 - Enabled ring-based dirty memory tracking - Improved perf kvm stat to report interrupt events - Delegate illegal instruction trap to VS-mode - MMU related improvements for KVM RISC-V for upcoming nested virtualization
2 parents 038d61f + 07a289a commit 65164fd

29 files changed

Lines changed: 1000 additions & 681 deletions

Documentation/virt/kvm/api.rst

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8387,7 +8387,7 @@ core crystal clock frequency, if a non-zero CPUID 0x15 is exposed to the guest.
83878387
7.36 KVM_CAP_DIRTY_LOG_RING/KVM_CAP_DIRTY_LOG_RING_ACQ_REL
83888388
----------------------------------------------------------
83898389

8390-
:Architectures: x86, arm64
8390+
:Architectures: x86, arm64, riscv
83918391
:Type: vm
83928392
:Parameters: args[0] - size of the dirty log ring
83938393

arch/riscv/include/asm/kvm_aia.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -150,7 +150,7 @@ int kvm_riscv_vcpu_aia_rmw_ireg(struct kvm_vcpu *vcpu, unsigned int csr_num,
150150

151151
int kvm_riscv_vcpu_aia_update(struct kvm_vcpu *vcpu);
152152
void kvm_riscv_vcpu_aia_reset(struct kvm_vcpu *vcpu);
153-
int kvm_riscv_vcpu_aia_init(struct kvm_vcpu *vcpu);
153+
void kvm_riscv_vcpu_aia_init(struct kvm_vcpu *vcpu);
154154
void kvm_riscv_vcpu_aia_deinit(struct kvm_vcpu *vcpu);
155155

156156
int kvm_riscv_aia_inject_msi_by_id(struct kvm *kvm, u32 hart_index,
Lines changed: 72 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,72 @@
1+
/* SPDX-License-Identifier: GPL-2.0-only */
2+
/*
3+
* Copyright (C) 2019 Western Digital Corporation or its affiliates.
4+
* Copyright (c) 2025 Ventana Micro Systems Inc.
5+
*/
6+
7+
#ifndef __RISCV_KVM_GSTAGE_H_
8+
#define __RISCV_KVM_GSTAGE_H_
9+
10+
#include <linux/kvm_types.h>
11+
12+
struct kvm_gstage {
13+
struct kvm *kvm;
14+
unsigned long flags;
15+
#define KVM_GSTAGE_FLAGS_LOCAL BIT(0)
16+
unsigned long vmid;
17+
pgd_t *pgd;
18+
};
19+
20+
struct kvm_gstage_mapping {
21+
gpa_t addr;
22+
pte_t pte;
23+
u32 level;
24+
};
25+
26+
#ifdef CONFIG_64BIT
27+
#define kvm_riscv_gstage_index_bits 9
28+
#else
29+
#define kvm_riscv_gstage_index_bits 10
30+
#endif
31+
32+
extern unsigned long kvm_riscv_gstage_mode;
33+
extern unsigned long kvm_riscv_gstage_pgd_levels;
34+
35+
#define kvm_riscv_gstage_pgd_xbits 2
36+
#define kvm_riscv_gstage_pgd_size (1UL << (HGATP_PAGE_SHIFT + kvm_riscv_gstage_pgd_xbits))
37+
#define kvm_riscv_gstage_gpa_bits (HGATP_PAGE_SHIFT + \
38+
(kvm_riscv_gstage_pgd_levels * \
39+
kvm_riscv_gstage_index_bits) + \
40+
kvm_riscv_gstage_pgd_xbits)
41+
#define kvm_riscv_gstage_gpa_size ((gpa_t)(1ULL << kvm_riscv_gstage_gpa_bits))
42+
43+
bool kvm_riscv_gstage_get_leaf(struct kvm_gstage *gstage, gpa_t addr,
44+
pte_t **ptepp, u32 *ptep_level);
45+
46+
int kvm_riscv_gstage_set_pte(struct kvm_gstage *gstage,
47+
struct kvm_mmu_memory_cache *pcache,
48+
const struct kvm_gstage_mapping *map);
49+
50+
int kvm_riscv_gstage_map_page(struct kvm_gstage *gstage,
51+
struct kvm_mmu_memory_cache *pcache,
52+
gpa_t gpa, phys_addr_t hpa, unsigned long page_size,
53+
bool page_rdonly, bool page_exec,
54+
struct kvm_gstage_mapping *out_map);
55+
56+
enum kvm_riscv_gstage_op {
57+
GSTAGE_OP_NOP = 0, /* Nothing */
58+
GSTAGE_OP_CLEAR, /* Clear/Unmap */
59+
GSTAGE_OP_WP, /* Write-protect */
60+
};
61+
62+
void kvm_riscv_gstage_op_pte(struct kvm_gstage *gstage, gpa_t addr,
63+
pte_t *ptep, u32 ptep_level, enum kvm_riscv_gstage_op op);
64+
65+
void kvm_riscv_gstage_unmap_range(struct kvm_gstage *gstage,
66+
gpa_t start, gpa_t size, bool may_block);
67+
68+
void kvm_riscv_gstage_wp_range(struct kvm_gstage *gstage, gpa_t start, gpa_t end);
69+
70+
void kvm_riscv_gstage_mode_detect(void);
71+
72+
#endif

arch/riscv/include/asm/kvm_host.h

Lines changed: 5 additions & 100 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,8 @@
1616
#include <asm/hwcap.h>
1717
#include <asm/kvm_aia.h>
1818
#include <asm/ptrace.h>
19+
#include <asm/kvm_tlb.h>
20+
#include <asm/kvm_vmid.h>
1921
#include <asm/kvm_vcpu_fp.h>
2022
#include <asm/kvm_vcpu_insn.h>
2123
#include <asm/kvm_vcpu_sbi.h>
@@ -36,14 +38,16 @@
3638
#define KVM_REQ_UPDATE_HGATP KVM_ARCH_REQ(2)
3739
#define KVM_REQ_FENCE_I \
3840
KVM_ARCH_REQ_FLAGS(3, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
39-
#define KVM_REQ_HFENCE_GVMA_VMID_ALL KVM_REQ_TLB_FLUSH
4041
#define KVM_REQ_HFENCE_VVMA_ALL \
4142
KVM_ARCH_REQ_FLAGS(4, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
4243
#define KVM_REQ_HFENCE \
4344
KVM_ARCH_REQ_FLAGS(5, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
4445
#define KVM_REQ_STEAL_UPDATE KVM_ARCH_REQ(6)
4546

47+
#define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS_RANGE
48+
4649
#define KVM_HEDELEG_DEFAULT (BIT(EXC_INST_MISALIGNED) | \
50+
BIT(EXC_INST_ILLEGAL) | \
4751
BIT(EXC_BREAKPOINT) | \
4852
BIT(EXC_SYSCALL) | \
4953
BIT(EXC_INST_PAGE_FAULT) | \
@@ -54,24 +58,6 @@
5458
BIT(IRQ_VS_TIMER) | \
5559
BIT(IRQ_VS_EXT))
5660

57-
enum kvm_riscv_hfence_type {
58-
KVM_RISCV_HFENCE_UNKNOWN = 0,
59-
KVM_RISCV_HFENCE_GVMA_VMID_GPA,
60-
KVM_RISCV_HFENCE_VVMA_ASID_GVA,
61-
KVM_RISCV_HFENCE_VVMA_ASID_ALL,
62-
KVM_RISCV_HFENCE_VVMA_GVA,
63-
};
64-
65-
struct kvm_riscv_hfence {
66-
enum kvm_riscv_hfence_type type;
67-
unsigned long asid;
68-
unsigned long order;
69-
gpa_t addr;
70-
gpa_t size;
71-
};
72-
73-
#define KVM_RISCV_VCPU_MAX_HFENCE 64
74-
7561
struct kvm_vm_stat {
7662
struct kvm_vm_stat_generic generic;
7763
};
@@ -97,15 +83,6 @@ struct kvm_vcpu_stat {
9783
struct kvm_arch_memory_slot {
9884
};
9985

100-
struct kvm_vmid {
101-
/*
102-
* Writes to vmid_version and vmid happen with vmid_lock held
103-
* whereas reads happen without any lock held.
104-
*/
105-
unsigned long vmid_version;
106-
unsigned long vmid;
107-
};
108-
10986
struct kvm_arch {
11087
/* G-stage vmid */
11188
struct kvm_vmid vmid;
@@ -309,77 +286,6 @@ static inline bool kvm_arch_pmi_in_guest(struct kvm_vcpu *vcpu)
309286
static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
310287
static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
311288

312-
#define KVM_RISCV_GSTAGE_TLB_MIN_ORDER 12
313-
314-
void kvm_riscv_local_hfence_gvma_vmid_gpa(unsigned long vmid,
315-
gpa_t gpa, gpa_t gpsz,
316-
unsigned long order);
317-
void kvm_riscv_local_hfence_gvma_vmid_all(unsigned long vmid);
318-
void kvm_riscv_local_hfence_gvma_gpa(gpa_t gpa, gpa_t gpsz,
319-
unsigned long order);
320-
void kvm_riscv_local_hfence_gvma_all(void);
321-
void kvm_riscv_local_hfence_vvma_asid_gva(unsigned long vmid,
322-
unsigned long asid,
323-
unsigned long gva,
324-
unsigned long gvsz,
325-
unsigned long order);
326-
void kvm_riscv_local_hfence_vvma_asid_all(unsigned long vmid,
327-
unsigned long asid);
328-
void kvm_riscv_local_hfence_vvma_gva(unsigned long vmid,
329-
unsigned long gva, unsigned long gvsz,
330-
unsigned long order);
331-
void kvm_riscv_local_hfence_vvma_all(unsigned long vmid);
332-
333-
void kvm_riscv_local_tlb_sanitize(struct kvm_vcpu *vcpu);
334-
335-
void kvm_riscv_fence_i_process(struct kvm_vcpu *vcpu);
336-
void kvm_riscv_hfence_gvma_vmid_all_process(struct kvm_vcpu *vcpu);
337-
void kvm_riscv_hfence_vvma_all_process(struct kvm_vcpu *vcpu);
338-
void kvm_riscv_hfence_process(struct kvm_vcpu *vcpu);
339-
340-
void kvm_riscv_fence_i(struct kvm *kvm,
341-
unsigned long hbase, unsigned long hmask);
342-
void kvm_riscv_hfence_gvma_vmid_gpa(struct kvm *kvm,
343-
unsigned long hbase, unsigned long hmask,
344-
gpa_t gpa, gpa_t gpsz,
345-
unsigned long order);
346-
void kvm_riscv_hfence_gvma_vmid_all(struct kvm *kvm,
347-
unsigned long hbase, unsigned long hmask);
348-
void kvm_riscv_hfence_vvma_asid_gva(struct kvm *kvm,
349-
unsigned long hbase, unsigned long hmask,
350-
unsigned long gva, unsigned long gvsz,
351-
unsigned long order, unsigned long asid);
352-
void kvm_riscv_hfence_vvma_asid_all(struct kvm *kvm,
353-
unsigned long hbase, unsigned long hmask,
354-
unsigned long asid);
355-
void kvm_riscv_hfence_vvma_gva(struct kvm *kvm,
356-
unsigned long hbase, unsigned long hmask,
357-
unsigned long gva, unsigned long gvsz,
358-
unsigned long order);
359-
void kvm_riscv_hfence_vvma_all(struct kvm *kvm,
360-
unsigned long hbase, unsigned long hmask);
361-
362-
int kvm_riscv_gstage_ioremap(struct kvm *kvm, gpa_t gpa,
363-
phys_addr_t hpa, unsigned long size,
364-
bool writable, bool in_atomic);
365-
void kvm_riscv_gstage_iounmap(struct kvm *kvm, gpa_t gpa,
366-
unsigned long size);
367-
int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
368-
struct kvm_memory_slot *memslot,
369-
gpa_t gpa, unsigned long hva, bool is_write);
370-
int kvm_riscv_gstage_alloc_pgd(struct kvm *kvm);
371-
void kvm_riscv_gstage_free_pgd(struct kvm *kvm);
372-
void kvm_riscv_gstage_update_hgatp(struct kvm_vcpu *vcpu);
373-
void __init kvm_riscv_gstage_mode_detect(void);
374-
unsigned long __init kvm_riscv_gstage_mode(void);
375-
int kvm_riscv_gstage_gpa_bits(void);
376-
377-
void __init kvm_riscv_gstage_vmid_detect(void);
378-
unsigned long kvm_riscv_gstage_vmid_bits(void);
379-
int kvm_riscv_gstage_vmid_init(struct kvm *kvm);
380-
bool kvm_riscv_gstage_vmid_ver_changed(struct kvm_vmid *vmid);
381-
void kvm_riscv_gstage_vmid_update(struct kvm_vcpu *vcpu);
382-
383289
int kvm_riscv_setup_default_irq_routing(struct kvm *kvm, u32 lines);
384290

385291
void __kvm_riscv_unpriv_trap(void);
@@ -415,7 +321,6 @@ void __kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu);
415321
void kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu);
416322
bool kvm_riscv_vcpu_stopped(struct kvm_vcpu *vcpu);
417323

418-
void kvm_riscv_vcpu_sbi_sta_reset(struct kvm_vcpu *vcpu);
419324
void kvm_riscv_vcpu_record_steal_time(struct kvm_vcpu *vcpu);
420325

421326
#endif /* __RISCV_KVM_HOST_H__ */

arch/riscv/include/asm/kvm_mmu.h

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
/* SPDX-License-Identifier: GPL-2.0-only */
2+
/*
3+
* Copyright (c) 2025 Ventana Micro Systems Inc.
4+
*/
5+
6+
#ifndef __RISCV_KVM_MMU_H_
7+
#define __RISCV_KVM_MMU_H_
8+
9+
#include <asm/kvm_gstage.h>
10+
11+
int kvm_riscv_mmu_ioremap(struct kvm *kvm, gpa_t gpa, phys_addr_t hpa,
12+
unsigned long size, bool writable, bool in_atomic);
13+
void kvm_riscv_mmu_iounmap(struct kvm *kvm, gpa_t gpa, unsigned long size);
14+
int kvm_riscv_mmu_map(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot,
15+
gpa_t gpa, unsigned long hva, bool is_write,
16+
struct kvm_gstage_mapping *out_map);
17+
int kvm_riscv_mmu_alloc_pgd(struct kvm *kvm);
18+
void kvm_riscv_mmu_free_pgd(struct kvm *kvm);
19+
void kvm_riscv_mmu_update_hgatp(struct kvm_vcpu *vcpu);
20+
21+
#endif

arch/riscv/include/asm/kvm_tlb.h

Lines changed: 84 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,84 @@
1+
/* SPDX-License-Identifier: GPL-2.0-only */
2+
/*
3+
* Copyright (c) 2025 Ventana Micro Systems Inc.
4+
*/
5+
6+
#ifndef __RISCV_KVM_TLB_H_
7+
#define __RISCV_KVM_TLB_H_
8+
9+
#include <linux/kvm_types.h>
10+
11+
enum kvm_riscv_hfence_type {
12+
KVM_RISCV_HFENCE_UNKNOWN = 0,
13+
KVM_RISCV_HFENCE_GVMA_VMID_GPA,
14+
KVM_RISCV_HFENCE_GVMA_VMID_ALL,
15+
KVM_RISCV_HFENCE_VVMA_ASID_GVA,
16+
KVM_RISCV_HFENCE_VVMA_ASID_ALL,
17+
KVM_RISCV_HFENCE_VVMA_GVA,
18+
KVM_RISCV_HFENCE_VVMA_ALL
19+
};
20+
21+
struct kvm_riscv_hfence {
22+
enum kvm_riscv_hfence_type type;
23+
unsigned long asid;
24+
unsigned long vmid;
25+
unsigned long order;
26+
gpa_t addr;
27+
gpa_t size;
28+
};
29+
30+
#define KVM_RISCV_VCPU_MAX_HFENCE 64
31+
32+
#define KVM_RISCV_GSTAGE_TLB_MIN_ORDER 12
33+
34+
void kvm_riscv_local_hfence_gvma_vmid_gpa(unsigned long vmid,
35+
gpa_t gpa, gpa_t gpsz,
36+
unsigned long order);
37+
void kvm_riscv_local_hfence_gvma_vmid_all(unsigned long vmid);
38+
void kvm_riscv_local_hfence_gvma_gpa(gpa_t gpa, gpa_t gpsz,
39+
unsigned long order);
40+
void kvm_riscv_local_hfence_gvma_all(void);
41+
void kvm_riscv_local_hfence_vvma_asid_gva(unsigned long vmid,
42+
unsigned long asid,
43+
unsigned long gva,
44+
unsigned long gvsz,
45+
unsigned long order);
46+
void kvm_riscv_local_hfence_vvma_asid_all(unsigned long vmid,
47+
unsigned long asid);
48+
void kvm_riscv_local_hfence_vvma_gva(unsigned long vmid,
49+
unsigned long gva, unsigned long gvsz,
50+
unsigned long order);
51+
void kvm_riscv_local_hfence_vvma_all(unsigned long vmid);
52+
53+
void kvm_riscv_tlb_flush_process(struct kvm_vcpu *vcpu);
54+
55+
void kvm_riscv_fence_i_process(struct kvm_vcpu *vcpu);
56+
void kvm_riscv_hfence_vvma_all_process(struct kvm_vcpu *vcpu);
57+
void kvm_riscv_hfence_process(struct kvm_vcpu *vcpu);
58+
59+
void kvm_riscv_fence_i(struct kvm *kvm,
60+
unsigned long hbase, unsigned long hmask);
61+
void kvm_riscv_hfence_gvma_vmid_gpa(struct kvm *kvm,
62+
unsigned long hbase, unsigned long hmask,
63+
gpa_t gpa, gpa_t gpsz,
64+
unsigned long order, unsigned long vmid);
65+
void kvm_riscv_hfence_gvma_vmid_all(struct kvm *kvm,
66+
unsigned long hbase, unsigned long hmask,
67+
unsigned long vmid);
68+
void kvm_riscv_hfence_vvma_asid_gva(struct kvm *kvm,
69+
unsigned long hbase, unsigned long hmask,
70+
unsigned long gva, unsigned long gvsz,
71+
unsigned long order, unsigned long asid,
72+
unsigned long vmid);
73+
void kvm_riscv_hfence_vvma_asid_all(struct kvm *kvm,
74+
unsigned long hbase, unsigned long hmask,
75+
unsigned long asid, unsigned long vmid);
76+
void kvm_riscv_hfence_vvma_gva(struct kvm *kvm,
77+
unsigned long hbase, unsigned long hmask,
78+
unsigned long gva, unsigned long gvsz,
79+
unsigned long order, unsigned long vmid);
80+
void kvm_riscv_hfence_vvma_all(struct kvm *kvm,
81+
unsigned long hbase, unsigned long hmask,
82+
unsigned long vmid);
83+
84+
#endif

arch/riscv/include/asm/kvm_vcpu_sbi.h

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -49,6 +49,16 @@ struct kvm_vcpu_sbi_extension {
4949

5050
/* Extension specific probe function */
5151
unsigned long (*probe)(struct kvm_vcpu *vcpu);
52+
53+
/*
54+
* Init/deinit function called once during VCPU init/destroy. These
55+
* might be use if the SBI extensions need to allocate or do specific
56+
* init time only configuration.
57+
*/
58+
int (*init)(struct kvm_vcpu *vcpu);
59+
void (*deinit)(struct kvm_vcpu *vcpu);
60+
61+
void (*reset)(struct kvm_vcpu *vcpu);
5262
};
5363

5464
void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu, struct kvm_run *run);
@@ -72,6 +82,8 @@ const struct kvm_vcpu_sbi_extension *kvm_vcpu_sbi_find_ext(
7282
bool riscv_vcpu_supports_sbi_ext(struct kvm_vcpu *vcpu, int idx);
7383
int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu, struct kvm_run *run);
7484
void kvm_riscv_vcpu_sbi_init(struct kvm_vcpu *vcpu);
85+
void kvm_riscv_vcpu_sbi_deinit(struct kvm_vcpu *vcpu);
86+
void kvm_riscv_vcpu_sbi_reset(struct kvm_vcpu *vcpu);
7587

7688
int kvm_riscv_vcpu_get_reg_sbi_sta(struct kvm_vcpu *vcpu, unsigned long reg_num,
7789
unsigned long *reg_val);

0 commit comments

Comments
 (0)