Skip to content

Commit 4c933f3

Browse files
committed
RISC-V: KVM: Add vmid field to struct kvm_riscv_hfence
Currently, the struct kvm_riscv_hfence does not have vmid field and various hfence processing functions always pick vmid assigned to the guest/VM. This prevents us from doing hfence operation on arbitrary vmid hence add vmid field to struct kvm_riscv_hfence and use it wherever applicable. Signed-off-by: Anup Patel <apatel@ventanamicro.com> Reviewed-by: Atish Patra <atishp@rivosinc.com> Tested-by: Atish Patra <atishp@rivosinc.com> Reviewed-by: Nutty Liu <liujingqi@lanxincomputing.com> Link: https://lore.kernel.org/r/20250618113532.471448-11-apatel@ventanamicro.com Signed-off-by: Anup Patel <anup@brainfault.org>
1 parent f035b44 commit 4c933f3

2 files changed

Lines changed: 17 additions & 14 deletions

File tree

arch/riscv/include/asm/kvm_tlb.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@ enum kvm_riscv_hfence_type {
1919
struct kvm_riscv_hfence {
2020
enum kvm_riscv_hfence_type type;
2121
unsigned long asid;
22+
unsigned long vmid;
2223
unsigned long order;
2324
gpa_t addr;
2425
gpa_t size;

arch/riscv/kvm/tlb.c

Lines changed: 16 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -237,49 +237,43 @@ static bool vcpu_hfence_enqueue(struct kvm_vcpu *vcpu,
237237

238238
void kvm_riscv_hfence_process(struct kvm_vcpu *vcpu)
239239
{
240-
unsigned long vmid;
241240
struct kvm_riscv_hfence d = { 0 };
242-
struct kvm_vmid *v = &vcpu->kvm->arch.vmid;
243241

244242
while (vcpu_hfence_dequeue(vcpu, &d)) {
245243
switch (d.type) {
246244
case KVM_RISCV_HFENCE_UNKNOWN:
247245
break;
248246
case KVM_RISCV_HFENCE_GVMA_VMID_GPA:
249-
vmid = READ_ONCE(v->vmid);
250247
if (kvm_riscv_nacl_available())
251-
nacl_hfence_gvma_vmid(nacl_shmem(), vmid,
248+
nacl_hfence_gvma_vmid(nacl_shmem(), d.vmid,
252249
d.addr, d.size, d.order);
253250
else
254-
kvm_riscv_local_hfence_gvma_vmid_gpa(vmid, d.addr,
251+
kvm_riscv_local_hfence_gvma_vmid_gpa(d.vmid, d.addr,
255252
d.size, d.order);
256253
break;
257254
case KVM_RISCV_HFENCE_VVMA_ASID_GVA:
258255
kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_ASID_RCVD);
259-
vmid = READ_ONCE(v->vmid);
260256
if (kvm_riscv_nacl_available())
261-
nacl_hfence_vvma_asid(nacl_shmem(), vmid, d.asid,
257+
nacl_hfence_vvma_asid(nacl_shmem(), d.vmid, d.asid,
262258
d.addr, d.size, d.order);
263259
else
264-
kvm_riscv_local_hfence_vvma_asid_gva(vmid, d.asid, d.addr,
260+
kvm_riscv_local_hfence_vvma_asid_gva(d.vmid, d.asid, d.addr,
265261
d.size, d.order);
266262
break;
267263
case KVM_RISCV_HFENCE_VVMA_ASID_ALL:
268264
kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_ASID_RCVD);
269-
vmid = READ_ONCE(v->vmid);
270265
if (kvm_riscv_nacl_available())
271-
nacl_hfence_vvma_asid_all(nacl_shmem(), vmid, d.asid);
266+
nacl_hfence_vvma_asid_all(nacl_shmem(), d.vmid, d.asid);
272267
else
273-
kvm_riscv_local_hfence_vvma_asid_all(vmid, d.asid);
268+
kvm_riscv_local_hfence_vvma_asid_all(d.vmid, d.asid);
274269
break;
275270
case KVM_RISCV_HFENCE_VVMA_GVA:
276271
kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_RCVD);
277-
vmid = READ_ONCE(v->vmid);
278272
if (kvm_riscv_nacl_available())
279-
nacl_hfence_vvma(nacl_shmem(), vmid,
273+
nacl_hfence_vvma(nacl_shmem(), d.vmid,
280274
d.addr, d.size, d.order);
281275
else
282-
kvm_riscv_local_hfence_vvma_gva(vmid, d.addr,
276+
kvm_riscv_local_hfence_vvma_gva(d.vmid, d.addr,
283277
d.size, d.order);
284278
break;
285279
default:
@@ -336,10 +330,12 @@ void kvm_riscv_hfence_gvma_vmid_gpa(struct kvm *kvm,
336330
gpa_t gpa, gpa_t gpsz,
337331
unsigned long order)
338332
{
333+
struct kvm_vmid *v = &kvm->arch.vmid;
339334
struct kvm_riscv_hfence data;
340335

341336
data.type = KVM_RISCV_HFENCE_GVMA_VMID_GPA;
342337
data.asid = 0;
338+
data.vmid = READ_ONCE(v->vmid);
343339
data.addr = gpa;
344340
data.size = gpsz;
345341
data.order = order;
@@ -359,10 +355,12 @@ void kvm_riscv_hfence_vvma_asid_gva(struct kvm *kvm,
359355
unsigned long gva, unsigned long gvsz,
360356
unsigned long order, unsigned long asid)
361357
{
358+
struct kvm_vmid *v = &kvm->arch.vmid;
362359
struct kvm_riscv_hfence data;
363360

364361
data.type = KVM_RISCV_HFENCE_VVMA_ASID_GVA;
365362
data.asid = asid;
363+
data.vmid = READ_ONCE(v->vmid);
366364
data.addr = gva;
367365
data.size = gvsz;
368366
data.order = order;
@@ -374,10 +372,12 @@ void kvm_riscv_hfence_vvma_asid_all(struct kvm *kvm,
374372
unsigned long hbase, unsigned long hmask,
375373
unsigned long asid)
376374
{
375+
struct kvm_vmid *v = &kvm->arch.vmid;
377376
struct kvm_riscv_hfence data;
378377

379378
data.type = KVM_RISCV_HFENCE_VVMA_ASID_ALL;
380379
data.asid = asid;
380+
data.vmid = READ_ONCE(v->vmid);
381381
data.addr = data.size = data.order = 0;
382382
make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE,
383383
KVM_REQ_HFENCE_VVMA_ALL, &data);
@@ -388,10 +388,12 @@ void kvm_riscv_hfence_vvma_gva(struct kvm *kvm,
388388
unsigned long gva, unsigned long gvsz,
389389
unsigned long order)
390390
{
391+
struct kvm_vmid *v = &kvm->arch.vmid;
391392
struct kvm_riscv_hfence data;
392393

393394
data.type = KVM_RISCV_HFENCE_VVMA_GVA;
394395
data.asid = 0;
396+
data.vmid = READ_ONCE(v->vmid);
395397
data.addr = gva;
396398
data.size = gvsz;
397399
data.order = order;

0 commit comments

Comments
 (0)