Skip to content

Commit b6ed4fa

Browse files
Fuad TabbaMarc Zyngier
andcommitted
KVM: arm64: Introduce and use predicates that check for protected VMs
In order to determine whether or not a VM or vcpu are protected, introduce helpers to query this state. While at it, use the vcpu helper to check vcpus protected state instead of the kvm one. Co-authored-by: Marc Zyngier <maz@kernel.org> Signed-off-by: Fuad Tabba <tabba@google.com> Acked-by: Oliver Upton <oliver.upton@linux.dev> Link: https://lore.kernel.org/r/20240423150538.2103045-19-tabba@google.com Signed-off-by: Marc Zyngier <maz@kernel.org>
1 parent d81a91a commit b6ed4fa

3 files changed

Lines changed: 11 additions & 8 deletions

File tree

arch/arm64/include/asm/kvm_host.h

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -211,6 +211,7 @@ typedef unsigned int pkvm_handle_t;
211211
struct kvm_protected_vm {
212212
pkvm_handle_t handle;
213213
struct kvm_hyp_memcache teardown_mc;
214+
bool enabled;
214215
};
215216

216217
struct kvm_mpidr_data {
@@ -1295,10 +1296,9 @@ struct kvm *kvm_arch_alloc_vm(void);
12951296

12961297
#define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS_RANGE
12971298

1298-
static inline bool kvm_vm_is_protected(struct kvm *kvm)
1299-
{
1300-
return false;
1301-
}
1299+
#define kvm_vm_is_protected(kvm) (is_protected_kvm_enabled() && (kvm)->arch.pkvm.enabled)
1300+
1301+
#define vcpu_is_protected(vcpu) kvm_vm_is_protected((vcpu)->kvm)
13021302

13031303
int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature);
13041304
bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu);

arch/arm64/kvm/hyp/include/nvhe/pkvm.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -53,6 +53,11 @@ pkvm_hyp_vcpu_to_hyp_vm(struct pkvm_hyp_vcpu *hyp_vcpu)
5353
return container_of(hyp_vcpu->vcpu.kvm, struct pkvm_hyp_vm, kvm);
5454
}
5555

56+
static inline bool pkvm_hyp_vcpu_is_protected(struct pkvm_hyp_vcpu *hyp_vcpu)
57+
{
58+
return vcpu_is_protected(&hyp_vcpu->vcpu);
59+
}
60+
5661
void pkvm_hyp_vm_table_init(void *tbl);
5762
void pkvm_host_fpsimd_state_init(void);
5863

arch/arm64/kvm/hyp/nvhe/switch.c

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -209,7 +209,7 @@ static const exit_handler_fn pvm_exit_handlers[] = {
209209

210210
static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu)
211211
{
212-
if (unlikely(kvm_vm_is_protected(kern_hyp_va(vcpu->kvm))))
212+
if (unlikely(vcpu_is_protected(vcpu)))
213213
return pvm_exit_handlers;
214214

215215
return hyp_exit_handlers;
@@ -228,9 +228,7 @@ static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu)
228228
*/
229229
static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code)
230230
{
231-
struct kvm *kvm = kern_hyp_va(vcpu->kvm);
232-
233-
if (kvm_vm_is_protected(kvm) && vcpu_mode_is_32bit(vcpu)) {
231+
if (unlikely(vcpu_is_protected(vcpu) && vcpu_mode_is_32bit(vcpu))) {
234232
/*
235233
* As we have caught the guest red-handed, decide that it isn't
236234
* fit for purpose anymore by making the vcpu invalid. The VMM

0 commit comments

Comments
 (0)