Skip to content

Commit 74bd160

Browse files
committed
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull kvm "This is a pretty large diffstat for this time of the release. The main culprit is a reorganization of the AMD assembly trampoline, allowing percpu variables to be accessed early. This is needed for the return stack depth tracking retbleed mitigation that will be in 6.2, but it also makes it possible to tighten the IBRS restore on vmexit. The latter change is a long tail of the spectrev2/retbleed patches (the corresponding Intel change was simpler and went in already last June), which is why I am including it right now instead of sharing a topic branch with tip. Being assembly and being rich in comments makes the line count balloon a bit, but I am pretty confident in the change (famous last words) because the reorganization actually makes everything simpler and more understandable than before. It has also had external review and has been tested on the aforementioned 6.2 changes, which explode quite brutally without the fix. Apart from this, things are pretty normal. s390: - PCI fix - PV clock fix x86: - Fix clash between PMU MSRs and other MSRs - Prepare SVM assembly trampoline for 6.2 retbleed mitigation and for... - ... tightening IBRS restore on vmexit, moving it before the first RET or indirect branch - Fix log level for VMSA dump - Block all page faults during kvm_zap_gfn_range() Tools: - kvm_stat: fix incorrect detection of debugfs - kvm_stat: update vmexit definitions" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: KVM: x86/mmu: Block all page faults during kvm_zap_gfn_range() KVM: x86/pmu: Limit the maximum number of supported AMD GP counters KVM: x86/pmu: Limit the maximum number of supported Intel GP counters KVM: x86/pmu: Do not speculatively query Intel GP PMCs that don't exist yet KVM: SVM: Only dump VMSA to klog at KERN_DEBUG level tools/kvm_stat: update exit reasons for vmx/svm/aarch64/userspace tools/kvm_stat: fix incorrect detection of debugfs x86, KVM: remove unnecessary argument to x86_virt_spec_ctrl and callers KVM: SVM: move MSR_IA32_SPEC_CTRL save/restore to assembly KVM: SVM: restore host save area from assembly KVM: SVM: move guest vmsave/vmload back to assembly KVM: SVM: do not allocate struct svm_cpu_data dynamically KVM: SVM: remove dead field from struct svm_cpu_data KVM: SVM: remove unused field from struct vcpu_svm KVM: SVM: retrieve VMCB from assembly KVM: SVM: adjust register allocation for __svm_vcpu_run() KVM: SVM: replace regs argument of __svm_vcpu_run() with vcpu_svm KVM: x86: use a separate asm-offsets.c file KVM: s390: pci: Fix allocation size of aift kzdev elements KVM: s390: pv: don't allow userspace to set the clock under PV
2 parents 5be07b3 + 6d3085e commit 74bd160

23 files changed

Lines changed: 435 additions & 207 deletions

File tree

Documentation/virt/kvm/devices/vm.rst

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -215,6 +215,7 @@ KVM_S390_VM_TOD_EXT).
215215
:Parameters: address of a buffer in user space to store the data (u8) to
216216
:Returns: -EFAULT if the given address is not accessible from kernel space;
217217
-EINVAL if setting the TOD clock extension to != 0 is not supported
218+
-EOPNOTSUPP for a PV guest (TOD managed by the ultravisor)
218219

219220
3.2. ATTRIBUTE: KVM_S390_VM_TOD_LOW
220221
-----------------------------------
@@ -224,6 +225,7 @@ the POP (u64).
224225

225226
:Parameters: address of a buffer in user space to store the data (u64) to
226227
:Returns: -EFAULT if the given address is not accessible from kernel space
228+
-EOPNOTSUPP for a PV guest (TOD managed by the ultravisor)
227229

228230
3.3. ATTRIBUTE: KVM_S390_VM_TOD_EXT
229231
-----------------------------------
@@ -237,6 +239,7 @@ it, it is stored as 0 and not allowed to be set to a value != 0.
237239
(kvm_s390_vm_tod_clock) to
238240
:Returns: -EFAULT if the given address is not accessible from kernel space;
239241
-EINVAL if setting the TOD clock extension to != 0 is not supported
242+
-EOPNOTSUPP for a PV guest (TOD managed by the ultravisor)
240243

241244
4. GROUP: KVM_S390_VM_CRYPTO
242245
============================

arch/s390/kvm/kvm-s390.c

Lines changed: 17 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1207,6 +1207,8 @@ static int kvm_s390_vm_get_migration(struct kvm *kvm,
12071207
return 0;
12081208
}
12091209

1210+
static void __kvm_s390_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod);
1211+
12101212
static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
12111213
{
12121214
struct kvm_s390_vm_tod_clock gtod;
@@ -1216,7 +1218,7 @@ static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
12161218

12171219
if (!test_kvm_facility(kvm, 139) && gtod.epoch_idx)
12181220
return -EINVAL;
1219-
kvm_s390_set_tod_clock(kvm, &gtod);
1221+
__kvm_s390_set_tod_clock(kvm, &gtod);
12201222

12211223
VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx",
12221224
gtod.epoch_idx, gtod.tod);
@@ -1247,7 +1249,7 @@ static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
12471249
sizeof(gtod.tod)))
12481250
return -EFAULT;
12491251

1250-
kvm_s390_set_tod_clock(kvm, &gtod);
1252+
__kvm_s390_set_tod_clock(kvm, &gtod);
12511253
VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod.tod);
12521254
return 0;
12531255
}
@@ -1259,6 +1261,16 @@ static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
12591261
if (attr->flags)
12601262
return -EINVAL;
12611263

1264+
mutex_lock(&kvm->lock);
1265+
/*
1266+
* For protected guests, the TOD is managed by the ultravisor, so trying
1267+
* to change it will never bring the expected results.
1268+
*/
1269+
if (kvm_s390_pv_is_protected(kvm)) {
1270+
ret = -EOPNOTSUPP;
1271+
goto out_unlock;
1272+
}
1273+
12621274
switch (attr->attr) {
12631275
case KVM_S390_VM_TOD_EXT:
12641276
ret = kvm_s390_set_tod_ext(kvm, attr);
@@ -1273,6 +1285,9 @@ static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
12731285
ret = -ENXIO;
12741286
break;
12751287
}
1288+
1289+
out_unlock:
1290+
mutex_unlock(&kvm->lock);
12761291
return ret;
12771292
}
12781293

@@ -4377,13 +4392,6 @@ static void __kvm_s390_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_t
43774392
preempt_enable();
43784393
}
43794394

4380-
void kvm_s390_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod)
4381-
{
4382-
mutex_lock(&kvm->lock);
4383-
__kvm_s390_set_tod_clock(kvm, gtod);
4384-
mutex_unlock(&kvm->lock);
4385-
}
4386-
43874395
int kvm_s390_try_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod)
43884396
{
43894397
if (!mutex_trylock(&kvm->lock))

arch/s390/kvm/kvm-s390.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -363,7 +363,6 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu);
363363
int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu);
364364

365365
/* implemented in kvm-s390.c */
366-
void kvm_s390_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod);
367366
int kvm_s390_try_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod);
368367
long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable);
369368
int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr);

arch/s390/kvm/pci.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -126,7 +126,7 @@ int kvm_s390_pci_aen_init(u8 nisc)
126126
return -EPERM;
127127

128128
mutex_lock(&aift->aift_lock);
129-
aift->kzdev = kcalloc(ZPCI_NR_DEVICES, sizeof(struct kvm_zdev),
129+
aift->kzdev = kcalloc(ZPCI_NR_DEVICES, sizeof(struct kvm_zdev *),
130130
GFP_KERNEL);
131131
if (!aift->kzdev) {
132132
rc = -ENOMEM;

arch/x86/include/asm/kvm_host.h

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -501,7 +501,12 @@ struct kvm_pmc {
501501
bool intr;
502502
};
503503

504+
/* More counters may conflict with other existing Architectural MSRs */
505+
#define KVM_INTEL_PMC_MAX_GENERIC 8
506+
#define MSR_ARCH_PERFMON_PERFCTR_MAX (MSR_ARCH_PERFMON_PERFCTR0 + KVM_INTEL_PMC_MAX_GENERIC - 1)
507+
#define MSR_ARCH_PERFMON_EVENTSEL_MAX (MSR_ARCH_PERFMON_EVENTSEL0 + KVM_INTEL_PMC_MAX_GENERIC - 1)
504508
#define KVM_PMC_MAX_FIXED 3
509+
#define KVM_AMD_PMC_MAX_GENERIC 6
505510
struct kvm_pmu {
506511
unsigned nr_arch_gp_counters;
507512
unsigned nr_arch_fixed_counters;
@@ -516,7 +521,7 @@ struct kvm_pmu {
516521
u64 reserved_bits;
517522
u64 raw_event_mask;
518523
u8 version;
519-
struct kvm_pmc gp_counters[INTEL_PMC_MAX_GENERIC];
524+
struct kvm_pmc gp_counters[KVM_INTEL_PMC_MAX_GENERIC];
520525
struct kvm_pmc fixed_counters[KVM_PMC_MAX_FIXED];
521526
struct irq_work irq_work;
522527
DECLARE_BITMAP(reprogram_pmi, X86_PMC_IDX_MAX);

arch/x86/include/asm/spec-ctrl.h

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313
* Takes the guest view of SPEC_CTRL MSR as a parameter and also
1414
* the guest's version of VIRT_SPEC_CTRL, if emulated.
1515
*/
16-
extern void x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool guest);
16+
extern void x86_virt_spec_ctrl(u64 guest_virt_spec_ctrl, bool guest);
1717

1818
/**
1919
* x86_spec_ctrl_set_guest - Set speculation control registers for the guest
@@ -24,9 +24,9 @@ extern void x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bo
2424
* Avoids writing to the MSR if the content/bits are the same
2525
*/
2626
static inline
27-
void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl)
27+
void x86_spec_ctrl_set_guest(u64 guest_virt_spec_ctrl)
2828
{
29-
x86_virt_spec_ctrl(guest_spec_ctrl, guest_virt_spec_ctrl, true);
29+
x86_virt_spec_ctrl(guest_virt_spec_ctrl, true);
3030
}
3131

3232
/**
@@ -38,9 +38,9 @@ void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl)
3838
* Avoids writing to the MSR if the content/bits are the same
3939
*/
4040
static inline
41-
void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl)
41+
void x86_spec_ctrl_restore_host(u64 guest_virt_spec_ctrl)
4242
{
43-
x86_virt_spec_ctrl(guest_spec_ctrl, guest_virt_spec_ctrl, false);
43+
x86_virt_spec_ctrl(guest_virt_spec_ctrl, false);
4444
}
4545

4646
/* AMD specific Speculative Store Bypass MSR data */

arch/x86/kernel/asm-offsets.c

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,6 @@
1919
#include <asm/suspend.h>
2020
#include <asm/tlbflush.h>
2121
#include <asm/tdx.h>
22-
#include "../kvm/vmx/vmx.h"
2322

2423
#ifdef CONFIG_XEN
2524
#include <xen/interface/xen.h>
@@ -108,9 +107,4 @@ static void __used common(void)
108107
OFFSET(TSS_sp0, tss_struct, x86_tss.sp0);
109108
OFFSET(TSS_sp1, tss_struct, x86_tss.sp1);
110109
OFFSET(TSS_sp2, tss_struct, x86_tss.sp2);
111-
112-
if (IS_ENABLED(CONFIG_KVM_INTEL)) {
113-
BLANK();
114-
OFFSET(VMX_spec_ctrl, vcpu_vmx, spec_ctrl);
115-
}
116110
}

arch/x86/kernel/cpu/bugs.c

Lines changed: 4 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -196,22 +196,15 @@ void __init check_bugs(void)
196196
}
197197

198198
/*
199-
* NOTE: This function is *only* called for SVM. VMX spec_ctrl handling is
200-
* done in vmenter.S.
199+
* NOTE: This function is *only* called for SVM, since Intel uses
200+
* MSR_IA32_SPEC_CTRL for SSBD.
201201
*/
202202
void
203-
x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
203+
x86_virt_spec_ctrl(u64 guest_virt_spec_ctrl, bool setguest)
204204
{
205-
u64 msrval, guestval = guest_spec_ctrl, hostval = spec_ctrl_current();
205+
u64 guestval, hostval;
206206
struct thread_info *ti = current_thread_info();
207207

208-
if (static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) {
209-
if (hostval != guestval) {
210-
msrval = setguest ? guestval : hostval;
211-
wrmsrl(MSR_IA32_SPEC_CTRL, msrval);
212-
}
213-
}
214-
215208
/*
216209
* If SSBD is not handled in MSR_SPEC_CTRL on AMD, update
217210
* MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported.

arch/x86/kvm/.gitignore

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,2 @@
1+
/kvm-asm-offsets.s
2+
/kvm-asm-offsets.h

arch/x86/kvm/Makefile

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -34,3 +34,15 @@ endif
3434
obj-$(CONFIG_KVM) += kvm.o
3535
obj-$(CONFIG_KVM_INTEL) += kvm-intel.o
3636
obj-$(CONFIG_KVM_AMD) += kvm-amd.o
37+
38+
AFLAGS_svm/vmenter.o := -iquote $(obj)
39+
$(obj)/svm/vmenter.o: $(obj)/kvm-asm-offsets.h
40+
41+
AFLAGS_vmx/vmenter.o := -iquote $(obj)
42+
$(obj)/vmx/vmenter.o: $(obj)/kvm-asm-offsets.h
43+
44+
$(obj)/kvm-asm-offsets.h: $(obj)/kvm-asm-offsets.s FORCE
45+
$(call filechk,offsets,__KVM_ASM_OFFSETS_H__)
46+
47+
targets += kvm-asm-offsets.s
48+
clean-files += kvm-asm-offsets.h

0 commit comments

Comments
 (0)