Skip to content

Commit 795a0bb

Browse files
author
Marc Zyngier
committed
KVM: arm64: Add helper for last ditch idreg adjustments
We already have to perform a set of last-chance adjustments for NV purposes. We will soon have to do the same for the GIC, so introduce a helper for that exact purpose. Reviewed-by: Oliver Upton <oliver.upton@linux.dev> Link: https://lore.kernel.org/r/20240827152517.3909653-5-maz@kernel.org Signed-off-by: Marc Zyngier <maz@kernel.org>
1 parent 8d917e0 commit 795a0bb

4 files changed

Lines changed: 37 additions & 17 deletions

File tree

arch/arm64/kvm/arm.c

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -46,6 +46,8 @@
4646
#include <kvm/arm_pmu.h>
4747
#include <kvm/arm_psci.h>
4848

49+
#include "sys_regs.h"
50+
4951
static enum kvm_mode kvm_mode = KVM_MODE_DEFAULT;
5052

5153
enum kvm_wfx_trap_policy {
@@ -821,15 +823,13 @@ int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
821823
return ret;
822824
}
823825

824-
if (vcpu_has_nv(vcpu)) {
825-
ret = kvm_init_nv_sysregs(vcpu->kvm);
826-
if (ret)
827-
return ret;
828-
}
826+
ret = kvm_finalize_sys_regs(vcpu);
827+
if (ret)
828+
return ret;
829829

830830
/*
831-
* This needs to happen after NV has imposed its own restrictions on
832-
* the feature set
831+
* This needs to happen after any restriction has been applied
832+
* to the feature set.
833833
*/
834834
kvm_calculate_traps(vcpu);
835835

arch/arm64/kvm/nested.c

Lines changed: 5 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -954,19 +954,16 @@ static void set_sysreg_masks(struct kvm *kvm, int sr, u64 res0, u64 res1)
954954
int kvm_init_nv_sysregs(struct kvm *kvm)
955955
{
956956
u64 res0, res1;
957-
int ret = 0;
958957

959-
mutex_lock(&kvm->arch.config_lock);
958+
lockdep_assert_held(&kvm->arch.config_lock);
960959

961960
if (kvm->arch.sysreg_masks)
962-
goto out;
961+
return 0;
963962

964963
kvm->arch.sysreg_masks = kzalloc(sizeof(*(kvm->arch.sysreg_masks)),
965964
GFP_KERNEL_ACCOUNT);
966-
if (!kvm->arch.sysreg_masks) {
967-
ret = -ENOMEM;
968-
goto out;
969-
}
965+
if (!kvm->arch.sysreg_masks)
966+
return -ENOMEM;
970967

971968
limit_nv_id_regs(kvm);
972969

@@ -1195,8 +1192,6 @@ int kvm_init_nv_sysregs(struct kvm *kvm)
11951192
if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, AMU, V1P1))
11961193
res0 |= ~(res0 | res1);
11971194
set_sysreg_masks(kvm, HAFGRTR_EL2, res0, res1);
1198-
out:
1199-
mutex_unlock(&kvm->arch.config_lock);
12001195

1201-
return ret;
1196+
return 0;
12021197
}

arch/arm64/kvm/sys_regs.c

Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4620,6 +4620,29 @@ void kvm_calculate_traps(struct kvm_vcpu *vcpu)
46204620
mutex_unlock(&kvm->arch.config_lock);
46214621
}
46224622

4623+
/*
4624+
* Perform last adjustments to the ID registers that are implied by the
4625+
* configuration outside of the ID regs themselves, as well as any
4626+
* initialisation that directly depend on these ID registers (such as
4627+
* RES0/RES1 behaviours). This is not the place to configure traps though.
4628+
*
4629+
* Because this can be called once per CPU, changes must be idempotent.
4630+
*/
4631+
int kvm_finalize_sys_regs(struct kvm_vcpu *vcpu)
4632+
{
4633+
struct kvm *kvm = vcpu->kvm;
4634+
4635+
guard(mutex)(&kvm->arch.config_lock);
4636+
4637+
if (vcpu_has_nv(vcpu)) {
4638+
int ret = kvm_init_nv_sysregs(kvm);
4639+
if (ret)
4640+
return ret;
4641+
}
4642+
4643+
return 0;
4644+
}
4645+
46234646
int __init kvm_sys_reg_table_init(void)
46244647
{
46254648
bool valid = true;

arch/arm64/kvm/sys_regs.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -235,6 +235,8 @@ int kvm_sys_reg_set_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg,
235235

236236
bool triage_sysreg_trap(struct kvm_vcpu *vcpu, int *sr_index);
237237

238+
int kvm_finalize_sys_regs(struct kvm_vcpu *vcpu);
239+
238240
#define AA32(_x) .aarch32_map = AA32_##_x
239241
#define Op0(_x) .Op0 = _x
240242
#define Op1(_x) .Op1 = _x

0 commit comments

Comments
 (0)