Skip to content

Commit 517987e

Browse files
committed
KVM: x86: add fields to struct kvm_arch for CoCo features
Some VM types have characteristics in common; in fact, the only use of VM types right now is kvm_arch_has_private_mem and it assumes that _all_ nonzero VM types have private memory. We will soon introduce a VM type for SEV and SEV-ES VMs, and at that point we will have two special characteristics of confidential VMs that depend on the VM type: not just if memory is private, but also whether guest state is protected. For the latter we have kvm->arch.guest_state_protected, which is only set on a fully initialized VM. For VM types with protected guest state, we can actually fix a problem in the SEV-ES implementation, where ioctls to set registers do not cause an error even if the VM has been initialized and the guest state encrypted. Make sure that when using VM types that will become an error. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Message-Id: <20240209183743.22030-7-pbonzini@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Reviewed-by: Isaku Yamahata <isaku.yamahata@intel.com> Message-ID: <20240404121327.3107131-8-pbonzini@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
1 parent 605bbdc commit 517987e

2 files changed

Lines changed: 79 additions & 21 deletions

File tree

arch/x86/include/asm/kvm_host.h

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1279,12 +1279,14 @@ enum kvm_apicv_inhibit {
12791279
};
12801280

12811281
struct kvm_arch {
1282-
unsigned long vm_type;
12831282
unsigned long n_used_mmu_pages;
12841283
unsigned long n_requested_mmu_pages;
12851284
unsigned long n_max_mmu_pages;
12861285
unsigned int indirect_shadow_pages;
12871286
u8 mmu_valid_gen;
1287+
u8 vm_type;
1288+
bool has_private_mem;
1289+
bool has_protected_state;
12881290
struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
12891291
struct list_head active_mmu_pages;
12901292
struct list_head zapped_obsolete_pages;
@@ -2153,8 +2155,9 @@ void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd);
21532155
void kvm_configure_mmu(bool enable_tdp, int tdp_forced_root_level,
21542156
int tdp_max_root_level, int tdp_huge_page_level);
21552157

2158+
21562159
#ifdef CONFIG_KVM_PRIVATE_MEM
2157-
#define kvm_arch_has_private_mem(kvm) ((kvm)->arch.vm_type != KVM_X86_DEFAULT_VM)
2160+
#define kvm_arch_has_private_mem(kvm) ((kvm)->arch.has_private_mem)
21582161
#else
21592162
#define kvm_arch_has_private_mem(kvm) false
21602163
#endif

arch/x86/kvm/x86.c

Lines changed: 74 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -5555,11 +5555,15 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
55555555
return 0;
55565556
}
55575557

5558-
static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu,
5559-
struct kvm_debugregs *dbgregs)
5558+
static int kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu,
5559+
struct kvm_debugregs *dbgregs)
55605560
{
55615561
unsigned int i;
55625562

5563+
if (vcpu->kvm->arch.has_protected_state &&
5564+
vcpu->arch.guest_state_protected)
5565+
return -EINVAL;
5566+
55635567
memset(dbgregs, 0, sizeof(*dbgregs));
55645568

55655569
BUILD_BUG_ON(ARRAY_SIZE(vcpu->arch.db) != ARRAY_SIZE(dbgregs->db));
@@ -5568,13 +5572,18 @@ static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu,
55685572

55695573
dbgregs->dr6 = vcpu->arch.dr6;
55705574
dbgregs->dr7 = vcpu->arch.dr7;
5575+
return 0;
55715576
}
55725577

55735578
static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
55745579
struct kvm_debugregs *dbgregs)
55755580
{
55765581
unsigned int i;
55775582

5583+
if (vcpu->kvm->arch.has_protected_state &&
5584+
vcpu->arch.guest_state_protected)
5585+
return -EINVAL;
5586+
55785587
if (dbgregs->flags)
55795588
return -EINVAL;
55805589

@@ -5595,8 +5604,8 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
55955604
}
55965605

55975606

5598-
static void kvm_vcpu_ioctl_x86_get_xsave2(struct kvm_vcpu *vcpu,
5599-
u8 *state, unsigned int size)
5607+
static int kvm_vcpu_ioctl_x86_get_xsave2(struct kvm_vcpu *vcpu,
5608+
u8 *state, unsigned int size)
56005609
{
56015610
/*
56025611
* Only copy state for features that are enabled for the guest. The
@@ -5614,50 +5623,60 @@ static void kvm_vcpu_ioctl_x86_get_xsave2(struct kvm_vcpu *vcpu,
56145623
XFEATURE_MASK_FPSSE;
56155624

56165625
if (fpstate_is_confidential(&vcpu->arch.guest_fpu))
5617-
return;
5626+
return vcpu->kvm->arch.has_protected_state ? -EINVAL : 0;
56185627

56195628
fpu_copy_guest_fpstate_to_uabi(&vcpu->arch.guest_fpu, state, size,
56205629
supported_xcr0, vcpu->arch.pkru);
5630+
return 0;
56215631
}
56225632

5623-
static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
5624-
struct kvm_xsave *guest_xsave)
5633+
static int kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
5634+
struct kvm_xsave *guest_xsave)
56255635
{
5626-
kvm_vcpu_ioctl_x86_get_xsave2(vcpu, (void *)guest_xsave->region,
5627-
sizeof(guest_xsave->region));
5636+
return kvm_vcpu_ioctl_x86_get_xsave2(vcpu, (void *)guest_xsave->region,
5637+
sizeof(guest_xsave->region));
56285638
}
56295639

56305640
static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
56315641
struct kvm_xsave *guest_xsave)
56325642
{
56335643
if (fpstate_is_confidential(&vcpu->arch.guest_fpu))
5634-
return 0;
5644+
return vcpu->kvm->arch.has_protected_state ? -EINVAL : 0;
56355645

56365646
return fpu_copy_uabi_to_guest_fpstate(&vcpu->arch.guest_fpu,
56375647
guest_xsave->region,
56385648
kvm_caps.supported_xcr0,
56395649
&vcpu->arch.pkru);
56405650
}
56415651

5642-
static void kvm_vcpu_ioctl_x86_get_xcrs(struct kvm_vcpu *vcpu,
5643-
struct kvm_xcrs *guest_xcrs)
5652+
static int kvm_vcpu_ioctl_x86_get_xcrs(struct kvm_vcpu *vcpu,
5653+
struct kvm_xcrs *guest_xcrs)
56445654
{
5655+
if (vcpu->kvm->arch.has_protected_state &&
5656+
vcpu->arch.guest_state_protected)
5657+
return -EINVAL;
5658+
56455659
if (!boot_cpu_has(X86_FEATURE_XSAVE)) {
56465660
guest_xcrs->nr_xcrs = 0;
5647-
return;
5661+
return 0;
56485662
}
56495663

56505664
guest_xcrs->nr_xcrs = 1;
56515665
guest_xcrs->flags = 0;
56525666
guest_xcrs->xcrs[0].xcr = XCR_XFEATURE_ENABLED_MASK;
56535667
guest_xcrs->xcrs[0].value = vcpu->arch.xcr0;
5668+
return 0;
56545669
}
56555670

56565671
static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu,
56575672
struct kvm_xcrs *guest_xcrs)
56585673
{
56595674
int i, r = 0;
56605675

5676+
if (vcpu->kvm->arch.has_protected_state &&
5677+
vcpu->arch.guest_state_protected)
5678+
return -EINVAL;
5679+
56615680
if (!boot_cpu_has(X86_FEATURE_XSAVE))
56625681
return -EINVAL;
56635682

@@ -6040,7 +6059,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
60406059
case KVM_GET_DEBUGREGS: {
60416060
struct kvm_debugregs dbgregs;
60426061

6043-
kvm_vcpu_ioctl_x86_get_debugregs(vcpu, &dbgregs);
6062+
r = kvm_vcpu_ioctl_x86_get_debugregs(vcpu, &dbgregs);
6063+
if (r < 0)
6064+
break;
60446065

60456066
r = -EFAULT;
60466067
if (copy_to_user(argp, &dbgregs,
@@ -6070,7 +6091,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
60706091
if (!u.xsave)
60716092
break;
60726093

6073-
kvm_vcpu_ioctl_x86_get_xsave(vcpu, u.xsave);
6094+
r = kvm_vcpu_ioctl_x86_get_xsave(vcpu, u.xsave);
6095+
if (r < 0)
6096+
break;
60746097

60756098
r = -EFAULT;
60766099
if (copy_to_user(argp, u.xsave, sizeof(struct kvm_xsave)))
@@ -6099,7 +6122,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
60996122
if (!u.xsave)
61006123
break;
61016124

6102-
kvm_vcpu_ioctl_x86_get_xsave2(vcpu, u.buffer, size);
6125+
r = kvm_vcpu_ioctl_x86_get_xsave2(vcpu, u.buffer, size);
6126+
if (r < 0)
6127+
break;
61036128

61046129
r = -EFAULT;
61056130
if (copy_to_user(argp, u.xsave, size))
@@ -6115,7 +6140,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
61156140
if (!u.xcrs)
61166141
break;
61176142

6118-
kvm_vcpu_ioctl_x86_get_xcrs(vcpu, u.xcrs);
6143+
r = kvm_vcpu_ioctl_x86_get_xcrs(vcpu, u.xcrs);
6144+
if (r < 0)
6145+
break;
61196146

61206147
r = -EFAULT;
61216148
if (copy_to_user(argp, u.xcrs,
@@ -6259,6 +6286,11 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
62596286
}
62606287
#endif
62616288
case KVM_GET_SREGS2: {
6289+
r = -EINVAL;
6290+
if (vcpu->kvm->arch.has_protected_state &&
6291+
vcpu->arch.guest_state_protected)
6292+
goto out;
6293+
62626294
u.sregs2 = kzalloc(sizeof(struct kvm_sregs2), GFP_KERNEL);
62636295
r = -ENOMEM;
62646296
if (!u.sregs2)
@@ -6271,6 +6303,11 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
62716303
break;
62726304
}
62736305
case KVM_SET_SREGS2: {
6306+
r = -EINVAL;
6307+
if (vcpu->kvm->arch.has_protected_state &&
6308+
vcpu->arch.guest_state_protected)
6309+
goto out;
6310+
62746311
u.sregs2 = memdup_user(argp, sizeof(struct kvm_sregs2));
62756312
if (IS_ERR(u.sregs2)) {
62766313
r = PTR_ERR(u.sregs2);
@@ -11478,6 +11515,10 @@ static void __get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1147811515

1147911516
int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1148011517
{
11518+
if (vcpu->kvm->arch.has_protected_state &&
11519+
vcpu->arch.guest_state_protected)
11520+
return -EINVAL;
11521+
1148111522
vcpu_load(vcpu);
1148211523
__get_regs(vcpu, regs);
1148311524
vcpu_put(vcpu);
@@ -11519,6 +11560,10 @@ static void __set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1151911560

1152011561
int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1152111562
{
11563+
if (vcpu->kvm->arch.has_protected_state &&
11564+
vcpu->arch.guest_state_protected)
11565+
return -EINVAL;
11566+
1152211567
vcpu_load(vcpu);
1152311568
__set_regs(vcpu, regs);
1152411569
vcpu_put(vcpu);
@@ -11591,6 +11636,10 @@ static void __get_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2)
1159111636
int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1159211637
struct kvm_sregs *sregs)
1159311638
{
11639+
if (vcpu->kvm->arch.has_protected_state &&
11640+
vcpu->arch.guest_state_protected)
11641+
return -EINVAL;
11642+
1159411643
vcpu_load(vcpu);
1159511644
__get_sregs(vcpu, sregs);
1159611645
vcpu_put(vcpu);
@@ -11858,6 +11907,10 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1185811907
{
1185911908
int ret;
1186011909

11910+
if (vcpu->kvm->arch.has_protected_state &&
11911+
vcpu->arch.guest_state_protected)
11912+
return -EINVAL;
11913+
1186111914
vcpu_load(vcpu);
1186211915
ret = __set_sregs(vcpu, sregs);
1186311916
vcpu_put(vcpu);
@@ -11975,7 +12028,7 @@ int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1197512028
struct fxregs_state *fxsave;
1197612029

1197712030
if (fpstate_is_confidential(&vcpu->arch.guest_fpu))
11978-
return 0;
12031+
return vcpu->kvm->arch.has_protected_state ? -EINVAL : 0;
1197912032

1198012033
vcpu_load(vcpu);
1198112034

@@ -11998,7 +12051,7 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1199812051
struct fxregs_state *fxsave;
1199912052

1200012053
if (fpstate_is_confidential(&vcpu->arch.guest_fpu))
12001-
return 0;
12054+
return vcpu->kvm->arch.has_protected_state ? -EINVAL : 0;
1200212055

1200312056
vcpu_load(vcpu);
1200412057

@@ -12524,6 +12577,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
1252412577
return -EINVAL;
1252512578

1252612579
kvm->arch.vm_type = type;
12580+
kvm->arch.has_private_mem =
12581+
(type == KVM_X86_SW_PROTECTED_VM);
1252712582

1252812583
ret = kvm_page_track_init(kvm);
1252912584
if (ret)

0 commit comments

Comments
 (0)