Skip to content

Commit 3d4e882

Browse files
committed
KVM: SEV: Move init of SNP guest state into sev_init_vmcb()
Move the initialization of SNP guest state from svm_vcpu_reset() into sev_init_vmcb() to reduce the number of paths that deal with INIT/RESET for SEV+ vCPUs from 4+ to 1. Plumb in @init_event as necessary. Opportunistically check for an SNP guest outside of sev_snp_init_protected_guest_state() so that sev_init_vmcb() is consistent with respect to checking for SEV-ES+ and SNP+ guests. No functional change intended. Reviewed-by: Nikunj A Dadhania <nikunj@amd.com> Link: https://lore.kernel.org/r/20250819234833.3080255-6-seanjc@google.com Signed-off-by: Sean Christopherson <seanjc@google.com>
1 parent 34bd82a commit 3d4e882

3 files changed

Lines changed: 13 additions & 16 deletions

File tree

arch/x86/kvm/svm/sev.c

Lines changed: 9 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1975,7 +1975,7 @@ static void sev_migrate_from(struct kvm *dst_kvm, struct kvm *src_kvm)
19751975
kvm_for_each_vcpu(i, dst_vcpu, dst_kvm) {
19761976
dst_svm = to_svm(dst_vcpu);
19771977

1978-
sev_init_vmcb(dst_svm);
1978+
sev_init_vmcb(dst_svm, false);
19791979

19801980
if (!dst->es_active)
19811981
continue;
@@ -3887,17 +3887,14 @@ static int snp_begin_psc(struct vcpu_svm *svm, struct psc_buffer *psc)
38873887
/*
38883888
* Invoked as part of svm_vcpu_reset() processing of an init event.
38893889
*/
3890-
void sev_snp_init_protected_guest_state(struct kvm_vcpu *vcpu)
3890+
static void sev_snp_init_protected_guest_state(struct kvm_vcpu *vcpu)
38913891
{
38923892
struct vcpu_svm *svm = to_svm(vcpu);
38933893
struct kvm_memory_slot *slot;
38943894
struct page *page;
38953895
kvm_pfn_t pfn;
38963896
gfn_t gfn;
38973897

3898-
if (!sev_snp_guest(vcpu->kvm))
3899-
return;
3900-
39013898
guard(mutex)(&svm->sev_es.snp_vmsa_mutex);
39023899

39033900
if (!svm->sev_es.snp_ap_waiting_for_reset)
@@ -4546,8 +4543,10 @@ static void sev_es_init_vmcb(struct vcpu_svm *svm)
45464543
svm_clr_intercept(svm, INTERCEPT_XSETBV);
45474544
}
45484545

4549-
void sev_init_vmcb(struct vcpu_svm *svm)
4546+
void sev_init_vmcb(struct vcpu_svm *svm, bool init_event)
45504547
{
4548+
struct kvm_vcpu *vcpu = &svm->vcpu;
4549+
45514550
svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ENABLE;
45524551
clr_exception_intercept(svm, UD_VECTOR);
45534552

@@ -4557,7 +4556,10 @@ void sev_init_vmcb(struct vcpu_svm *svm)
45574556
*/
45584557
clr_exception_intercept(svm, GP_VECTOR);
45594558

4560-
if (sev_es_guest(svm->vcpu.kvm))
4559+
if (init_event && sev_snp_guest(vcpu->kvm))
4560+
sev_snp_init_protected_guest_state(vcpu);
4561+
4562+
if (sev_es_guest(vcpu->kvm))
45614563
sev_es_init_vmcb(svm);
45624564
}
45634565

arch/x86/kvm/svm/svm.c

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1083,7 +1083,7 @@ static void svm_recalc_intercepts_after_set_cpuid(struct kvm_vcpu *vcpu)
10831083
svm_recalc_msr_intercepts(vcpu);
10841084
}
10851085

1086-
static void init_vmcb(struct kvm_vcpu *vcpu)
1086+
static void init_vmcb(struct kvm_vcpu *vcpu, bool init_event)
10871087
{
10881088
struct vcpu_svm *svm = to_svm(vcpu);
10891089
struct vmcb *vmcb = svm->vmcb01.ptr;
@@ -1221,7 +1221,7 @@ static void init_vmcb(struct kvm_vcpu *vcpu)
12211221
svm_set_intercept(svm, INTERCEPT_BUSLOCK);
12221222

12231223
if (sev_guest(vcpu->kvm))
1224-
sev_init_vmcb(svm);
1224+
sev_init_vmcb(svm, init_event);
12251225

12261226
svm_hv_init_vmcb(vmcb);
12271227

@@ -1256,10 +1256,7 @@ static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
12561256
svm->spec_ctrl = 0;
12571257
svm->virt_spec_ctrl = 0;
12581258

1259-
if (init_event)
1260-
sev_snp_init_protected_guest_state(vcpu);
1261-
1262-
init_vmcb(vcpu);
1259+
init_vmcb(vcpu, init_event);
12631260

12641261
if (!init_event)
12651262
__svm_vcpu_reset(vcpu);

arch/x86/kvm/svm/svm.h

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -826,7 +826,7 @@ void avic_refresh_virtual_apic_mode(struct kvm_vcpu *vcpu);
826826
/* sev.c */
827827

828828
int pre_sev_run(struct vcpu_svm *svm, int cpu);
829-
void sev_init_vmcb(struct vcpu_svm *svm);
829+
void sev_init_vmcb(struct vcpu_svm *svm, bool init_event);
830830
void sev_vcpu_after_set_cpuid(struct vcpu_svm *svm);
831831
int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in);
832832
void sev_es_vcpu_reset(struct vcpu_svm *svm);
@@ -864,7 +864,6 @@ int sev_cpu_init(struct svm_cpu_data *sd);
864864
int sev_dev_get_attr(u32 group, u64 attr, u64 *val);
865865
extern unsigned int max_sev_asid;
866866
void sev_handle_rmp_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code);
867-
void sev_snp_init_protected_guest_state(struct kvm_vcpu *vcpu);
868867
int sev_gmem_prepare(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, int max_order);
869868
void sev_gmem_invalidate(kvm_pfn_t start, kvm_pfn_t end);
870869
int sev_private_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn);
@@ -891,7 +890,6 @@ static inline int sev_cpu_init(struct svm_cpu_data *sd) { return 0; }
891890
static inline int sev_dev_get_attr(u32 group, u64 attr, u64 *val) { return -ENXIO; }
892891
#define max_sev_asid 0
893892
static inline void sev_handle_rmp_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code) {}
894-
static inline void sev_snp_init_protected_guest_state(struct kvm_vcpu *vcpu) {}
895893
static inline int sev_gmem_prepare(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, int max_order)
896894
{
897895
return 0;

0 commit comments

Comments
 (0)