Skip to content

Commit 1f6d0ee

Browse files
committed
RISC-V: KVM: Pass VMID as parameter to kvm_riscv_hfence_xyz() APIs
Currently, all kvm_riscv_hfence_xyz() APIs assume VMID to be the host VMID of the Guest/VM which resticts use of these APIs only for host TLB maintenance. Let's allow passing VMID as a parameter to all kvm_riscv_hfence_xyz() APIs so that they can be re-used for nested virtualization related TLB maintenance. Signed-off-by: Anup Patel <apatel@ventanamicro.com> Tested-by: Atish Patra <atishp@rivosinc.com> Reviewed-by: Nutty Liu <liujingqi@lanxincomputing.com> Link: https://lore.kernel.org/r/20250618113532.471448-13-apatel@ventanamicro.com Signed-off-by: Anup Patel <anup@brainfault.org>
1 parent dd82e35 commit 1f6d0ee

5 files changed

Lines changed: 73 additions & 50 deletions

File tree

arch/riscv/include/asm/kvm_tlb.h

Lines changed: 11 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -11,9 +11,11 @@
1111
enum kvm_riscv_hfence_type {
1212
KVM_RISCV_HFENCE_UNKNOWN = 0,
1313
KVM_RISCV_HFENCE_GVMA_VMID_GPA,
14+
KVM_RISCV_HFENCE_GVMA_VMID_ALL,
1415
KVM_RISCV_HFENCE_VVMA_ASID_GVA,
1516
KVM_RISCV_HFENCE_VVMA_ASID_ALL,
1617
KVM_RISCV_HFENCE_VVMA_GVA,
18+
KVM_RISCV_HFENCE_VVMA_ALL
1719
};
1820

1921
struct kvm_riscv_hfence {
@@ -59,21 +61,24 @@ void kvm_riscv_fence_i(struct kvm *kvm,
5961
void kvm_riscv_hfence_gvma_vmid_gpa(struct kvm *kvm,
6062
unsigned long hbase, unsigned long hmask,
6163
gpa_t gpa, gpa_t gpsz,
62-
unsigned long order);
64+
unsigned long order, unsigned long vmid);
6365
void kvm_riscv_hfence_gvma_vmid_all(struct kvm *kvm,
64-
unsigned long hbase, unsigned long hmask);
66+
unsigned long hbase, unsigned long hmask,
67+
unsigned long vmid);
6568
void kvm_riscv_hfence_vvma_asid_gva(struct kvm *kvm,
6669
unsigned long hbase, unsigned long hmask,
6770
unsigned long gva, unsigned long gvsz,
68-
unsigned long order, unsigned long asid);
71+
unsigned long order, unsigned long asid,
72+
unsigned long vmid);
6973
void kvm_riscv_hfence_vvma_asid_all(struct kvm *kvm,
7074
unsigned long hbase, unsigned long hmask,
71-
unsigned long asid);
75+
unsigned long asid, unsigned long vmid);
7276
void kvm_riscv_hfence_vvma_gva(struct kvm *kvm,
7377
unsigned long hbase, unsigned long hmask,
7478
unsigned long gva, unsigned long gvsz,
75-
unsigned long order);
79+
unsigned long order, unsigned long vmid);
7680
void kvm_riscv_hfence_vvma_all(struct kvm *kvm,
77-
unsigned long hbase, unsigned long hmask);
81+
unsigned long hbase, unsigned long hmask,
82+
unsigned long vmid);
7883

7984
#endif

arch/riscv/kvm/gstage.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -117,7 +117,8 @@ static void gstage_tlb_flush(struct kvm_gstage *gstage, u32 level, gpa_t addr)
117117
if (gstage->flags & KVM_GSTAGE_FLAGS_LOCAL)
118118
kvm_riscv_local_hfence_gvma_vmid_gpa(gstage->vmid, addr, BIT(order), order);
119119
else
120-
kvm_riscv_hfence_gvma_vmid_gpa(gstage->kvm, -1UL, 0, addr, BIT(order), order);
120+
kvm_riscv_hfence_gvma_vmid_gpa(gstage->kvm, -1UL, 0, addr, BIT(order), order,
121+
gstage->vmid);
121122
}
122123

123124
int kvm_riscv_gstage_set_pte(struct kvm_gstage *gstage,

arch/riscv/kvm/tlb.c

Lines changed: 40 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -251,6 +251,12 @@ void kvm_riscv_hfence_process(struct kvm_vcpu *vcpu)
251251
kvm_riscv_local_hfence_gvma_vmid_gpa(d.vmid, d.addr,
252252
d.size, d.order);
253253
break;
254+
case KVM_RISCV_HFENCE_GVMA_VMID_ALL:
255+
if (kvm_riscv_nacl_available())
256+
nacl_hfence_gvma_vmid_all(nacl_shmem(), d.vmid);
257+
else
258+
kvm_riscv_local_hfence_gvma_vmid_all(d.vmid);
259+
break;
254260
case KVM_RISCV_HFENCE_VVMA_ASID_GVA:
255261
kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_ASID_RCVD);
256262
if (kvm_riscv_nacl_available())
@@ -276,6 +282,13 @@ void kvm_riscv_hfence_process(struct kvm_vcpu *vcpu)
276282
kvm_riscv_local_hfence_vvma_gva(d.vmid, d.addr,
277283
d.size, d.order);
278284
break;
285+
case KVM_RISCV_HFENCE_VVMA_ALL:
286+
kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_RCVD);
287+
if (kvm_riscv_nacl_available())
288+
nacl_hfence_vvma_all(nacl_shmem(), d.vmid);
289+
else
290+
kvm_riscv_local_hfence_vvma_all(d.vmid);
291+
break;
279292
default:
280293
break;
281294
}
@@ -328,14 +341,13 @@ void kvm_riscv_fence_i(struct kvm *kvm,
328341
void kvm_riscv_hfence_gvma_vmid_gpa(struct kvm *kvm,
329342
unsigned long hbase, unsigned long hmask,
330343
gpa_t gpa, gpa_t gpsz,
331-
unsigned long order)
344+
unsigned long order, unsigned long vmid)
332345
{
333-
struct kvm_vmid *v = &kvm->arch.vmid;
334346
struct kvm_riscv_hfence data;
335347

336348
data.type = KVM_RISCV_HFENCE_GVMA_VMID_GPA;
337349
data.asid = 0;
338-
data.vmid = READ_ONCE(v->vmid);
350+
data.vmid = vmid;
339351
data.addr = gpa;
340352
data.size = gpsz;
341353
data.order = order;
@@ -344,23 +356,28 @@ void kvm_riscv_hfence_gvma_vmid_gpa(struct kvm *kvm,
344356
}
345357

346358
void kvm_riscv_hfence_gvma_vmid_all(struct kvm *kvm,
347-
unsigned long hbase, unsigned long hmask)
359+
unsigned long hbase, unsigned long hmask,
360+
unsigned long vmid)
348361
{
349-
make_xfence_request(kvm, hbase, hmask, KVM_REQ_TLB_FLUSH,
350-
KVM_REQ_TLB_FLUSH, NULL);
362+
struct kvm_riscv_hfence data = {0};
363+
364+
data.type = KVM_RISCV_HFENCE_GVMA_VMID_ALL;
365+
data.vmid = vmid;
366+
make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE,
367+
KVM_REQ_TLB_FLUSH, &data);
351368
}
352369

353370
void kvm_riscv_hfence_vvma_asid_gva(struct kvm *kvm,
354371
unsigned long hbase, unsigned long hmask,
355372
unsigned long gva, unsigned long gvsz,
356-
unsigned long order, unsigned long asid)
373+
unsigned long order, unsigned long asid,
374+
unsigned long vmid)
357375
{
358-
struct kvm_vmid *v = &kvm->arch.vmid;
359376
struct kvm_riscv_hfence data;
360377

361378
data.type = KVM_RISCV_HFENCE_VVMA_ASID_GVA;
362379
data.asid = asid;
363-
data.vmid = READ_ONCE(v->vmid);
380+
data.vmid = vmid;
364381
data.addr = gva;
365382
data.size = gvsz;
366383
data.order = order;
@@ -370,30 +387,27 @@ void kvm_riscv_hfence_vvma_asid_gva(struct kvm *kvm,
370387

371388
void kvm_riscv_hfence_vvma_asid_all(struct kvm *kvm,
372389
unsigned long hbase, unsigned long hmask,
373-
unsigned long asid)
390+
unsigned long asid, unsigned long vmid)
374391
{
375-
struct kvm_vmid *v = &kvm->arch.vmid;
376-
struct kvm_riscv_hfence data;
392+
struct kvm_riscv_hfence data = {0};
377393

378394
data.type = KVM_RISCV_HFENCE_VVMA_ASID_ALL;
379395
data.asid = asid;
380-
data.vmid = READ_ONCE(v->vmid);
381-
data.addr = data.size = data.order = 0;
396+
data.vmid = vmid;
382397
make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE,
383398
KVM_REQ_HFENCE_VVMA_ALL, &data);
384399
}
385400

386401
void kvm_riscv_hfence_vvma_gva(struct kvm *kvm,
387402
unsigned long hbase, unsigned long hmask,
388403
unsigned long gva, unsigned long gvsz,
389-
unsigned long order)
404+
unsigned long order, unsigned long vmid)
390405
{
391-
struct kvm_vmid *v = &kvm->arch.vmid;
392406
struct kvm_riscv_hfence data;
393407

394408
data.type = KVM_RISCV_HFENCE_VVMA_GVA;
395409
data.asid = 0;
396-
data.vmid = READ_ONCE(v->vmid);
410+
data.vmid = vmid;
397411
data.addr = gva;
398412
data.size = gvsz;
399413
data.order = order;
@@ -402,16 +416,21 @@ void kvm_riscv_hfence_vvma_gva(struct kvm *kvm,
402416
}
403417

404418
void kvm_riscv_hfence_vvma_all(struct kvm *kvm,
405-
unsigned long hbase, unsigned long hmask)
419+
unsigned long hbase, unsigned long hmask,
420+
unsigned long vmid)
406421
{
407-
make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE_VVMA_ALL,
408-
KVM_REQ_HFENCE_VVMA_ALL, NULL);
422+
struct kvm_riscv_hfence data = {0};
423+
424+
data.type = KVM_RISCV_HFENCE_VVMA_ALL;
425+
data.vmid = vmid;
426+
make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE,
427+
KVM_REQ_HFENCE_VVMA_ALL, &data);
409428
}
410429

411430
int kvm_arch_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages)
412431
{
413432
kvm_riscv_hfence_gvma_vmid_gpa(kvm, -1UL, 0,
414433
gfn << PAGE_SHIFT, nr_pages << PAGE_SHIFT,
415-
PAGE_SHIFT);
434+
PAGE_SHIFT, READ_ONCE(kvm->arch.vmid.vmid));
416435
return 0;
417436
}

arch/riscv/kvm/vcpu_sbi_replace.c

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -96,29 +96,30 @@ static int kvm_sbi_ext_rfence_handler(struct kvm_vcpu *vcpu, struct kvm_run *run
9696
unsigned long hmask = cp->a0;
9797
unsigned long hbase = cp->a1;
9898
unsigned long funcid = cp->a6;
99+
unsigned long vmid;
99100

100101
switch (funcid) {
101102
case SBI_EXT_RFENCE_REMOTE_FENCE_I:
102103
kvm_riscv_fence_i(vcpu->kvm, hbase, hmask);
103104
kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_FENCE_I_SENT);
104105
break;
105106
case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA:
107+
vmid = READ_ONCE(vcpu->kvm->arch.vmid.vmid);
106108
if ((cp->a2 == 0 && cp->a3 == 0) || cp->a3 == -1UL)
107-
kvm_riscv_hfence_vvma_all(vcpu->kvm, hbase, hmask);
109+
kvm_riscv_hfence_vvma_all(vcpu->kvm, hbase, hmask, vmid);
108110
else
109111
kvm_riscv_hfence_vvma_gva(vcpu->kvm, hbase, hmask,
110-
cp->a2, cp->a3, PAGE_SHIFT);
112+
cp->a2, cp->a3, PAGE_SHIFT, vmid);
111113
kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_SENT);
112114
break;
113115
case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID:
116+
vmid = READ_ONCE(vcpu->kvm->arch.vmid.vmid);
114117
if ((cp->a2 == 0 && cp->a3 == 0) || cp->a3 == -1UL)
115-
kvm_riscv_hfence_vvma_asid_all(vcpu->kvm,
116-
hbase, hmask, cp->a4);
118+
kvm_riscv_hfence_vvma_asid_all(vcpu->kvm, hbase, hmask,
119+
cp->a4, vmid);
117120
else
118-
kvm_riscv_hfence_vvma_asid_gva(vcpu->kvm,
119-
hbase, hmask,
120-
cp->a2, cp->a3,
121-
PAGE_SHIFT, cp->a4);
121+
kvm_riscv_hfence_vvma_asid_gva(vcpu->kvm, hbase, hmask, cp->a2,
122+
cp->a3, PAGE_SHIFT, cp->a4, vmid);
122123
kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_ASID_SENT);
123124
break;
124125
case SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA:

arch/riscv/kvm/vcpu_sbi_v01.c

Lines changed: 11 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,7 @@ static int kvm_sbi_ext_v01_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
2323
struct kvm *kvm = vcpu->kvm;
2424
struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
2525
struct kvm_cpu_trap *utrap = retdata->utrap;
26+
unsigned long vmid;
2627

2728
switch (cp->a7) {
2829
case SBI_EXT_0_1_CONSOLE_GETCHAR:
@@ -78,25 +79,21 @@ static int kvm_sbi_ext_v01_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
7879
if (cp->a7 == SBI_EXT_0_1_REMOTE_FENCE_I)
7980
kvm_riscv_fence_i(vcpu->kvm, 0, hmask);
8081
else if (cp->a7 == SBI_EXT_0_1_REMOTE_SFENCE_VMA) {
82+
vmid = READ_ONCE(vcpu->kvm->arch.vmid.vmid);
8183
if (cp->a1 == 0 && cp->a2 == 0)
82-
kvm_riscv_hfence_vvma_all(vcpu->kvm,
83-
0, hmask);
84+
kvm_riscv_hfence_vvma_all(vcpu->kvm, 0, hmask, vmid);
8485
else
85-
kvm_riscv_hfence_vvma_gva(vcpu->kvm,
86-
0, hmask,
87-
cp->a1, cp->a2,
88-
PAGE_SHIFT);
86+
kvm_riscv_hfence_vvma_gva(vcpu->kvm, 0, hmask, cp->a1,
87+
cp->a2, PAGE_SHIFT, vmid);
8988
} else {
89+
vmid = READ_ONCE(vcpu->kvm->arch.vmid.vmid);
9090
if (cp->a1 == 0 && cp->a2 == 0)
91-
kvm_riscv_hfence_vvma_asid_all(vcpu->kvm,
92-
0, hmask,
93-
cp->a3);
91+
kvm_riscv_hfence_vvma_asid_all(vcpu->kvm, 0, hmask,
92+
cp->a3, vmid);
9493
else
95-
kvm_riscv_hfence_vvma_asid_gva(vcpu->kvm,
96-
0, hmask,
97-
cp->a1, cp->a2,
98-
PAGE_SHIFT,
99-
cp->a3);
94+
kvm_riscv_hfence_vvma_asid_gva(vcpu->kvm, 0, hmask,
95+
cp->a1, cp->a2, PAGE_SHIFT,
96+
cp->a3, vmid);
10097
}
10198
break;
10299
default:

0 commit comments

Comments
 (0)