Skip to content

Commit 0afdfd8

Browse files
committed
Merge tag 'kvm-x86-hyperv-6.8' of https://github.com/kvm-x86/linux into HEAD
KVM x86 Hyper-V changes for 6.8: - Guard KVM-on-HyperV's range-based TLB flush hooks with an #ifdef on CONFIG_HYPERV as a minor optimization, and to self-document the code. - Add CONFIG_KVM_HYPERV to allow disabling KVM support for HyperV "emulation" at build time.
2 parents fb872da + 017a99a commit 0afdfd8

35 files changed

Lines changed: 1074 additions & 750 deletions

arch/x86/include/asm/kvm-x86-ops.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -55,8 +55,10 @@ KVM_X86_OP(set_rflags)
5555
KVM_X86_OP(get_if_flag)
5656
KVM_X86_OP(flush_tlb_all)
5757
KVM_X86_OP(flush_tlb_current)
58+
#if IS_ENABLED(CONFIG_HYPERV)
5859
KVM_X86_OP_OPTIONAL(flush_remote_tlbs)
5960
KVM_X86_OP_OPTIONAL(flush_remote_tlbs_range)
61+
#endif
6062
KVM_X86_OP(flush_tlb_gva)
6163
KVM_X86_OP(flush_tlb_guest)
6264
KVM_X86_OP(vcpu_pre_run)

arch/x86/include/asm/kvm_host.h

Lines changed: 24 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -937,8 +937,10 @@ struct kvm_vcpu_arch {
937937
/* used for guest single stepping over the given code position */
938938
unsigned long singlestep_rip;
939939

940+
#ifdef CONFIG_KVM_HYPERV
940941
bool hyperv_enabled;
941942
struct kvm_vcpu_hv *hyperv;
943+
#endif
942944
#ifdef CONFIG_KVM_XEN
943945
struct kvm_vcpu_xen xen;
944946
#endif
@@ -1095,6 +1097,7 @@ enum hv_tsc_page_status {
10951097
HV_TSC_PAGE_BROKEN,
10961098
};
10971099

1100+
#ifdef CONFIG_KVM_HYPERV
10981101
/* Hyper-V emulation context */
10991102
struct kvm_hv {
11001103
struct mutex hv_lock;
@@ -1125,9 +1128,9 @@ struct kvm_hv {
11251128
*/
11261129
unsigned int synic_auto_eoi_used;
11271130

1128-
struct hv_partition_assist_pg *hv_pa_pg;
11291131
struct kvm_hv_syndbg hv_syndbg;
11301132
};
1133+
#endif
11311134

11321135
struct msr_bitmap_range {
11331136
u32 flags;
@@ -1136,6 +1139,7 @@ struct msr_bitmap_range {
11361139
unsigned long *bitmap;
11371140
};
11381141

1142+
#ifdef CONFIG_KVM_XEN
11391143
/* Xen emulation context */
11401144
struct kvm_xen {
11411145
struct mutex xen_lock;
@@ -1147,6 +1151,7 @@ struct kvm_xen {
11471151
struct idr evtchn_ports;
11481152
unsigned long poll_mask[BITS_TO_LONGS(KVM_MAX_VCPUS)];
11491153
};
1154+
#endif
11501155

11511156
enum kvm_irqchip_mode {
11521157
KVM_IRQCHIP_NONE,
@@ -1348,8 +1353,13 @@ struct kvm_arch {
13481353
/* reads protected by irq_srcu, writes by irq_lock */
13491354
struct hlist_head mask_notifier_list;
13501355

1356+
#ifdef CONFIG_KVM_HYPERV
13511357
struct kvm_hv hyperv;
1358+
#endif
1359+
1360+
#ifdef CONFIG_KVM_XEN
13521361
struct kvm_xen xen;
1362+
#endif
13531363

13541364
bool backwards_tsc_observed;
13551365
bool boot_vcpu_runs_old_kvmclock;
@@ -1442,6 +1452,7 @@ struct kvm_arch {
14421452
#if IS_ENABLED(CONFIG_HYPERV)
14431453
hpa_t hv_root_tdp;
14441454
spinlock_t hv_root_tdp_lock;
1455+
struct hv_partition_assist_pg *hv_pa_pg;
14451456
#endif
14461457
/*
14471458
* VM-scope maximum vCPU ID. Used to determine the size of structures
@@ -1614,9 +1625,11 @@ struct kvm_x86_ops {
16141625

16151626
void (*flush_tlb_all)(struct kvm_vcpu *vcpu);
16161627
void (*flush_tlb_current)(struct kvm_vcpu *vcpu);
1628+
#if IS_ENABLED(CONFIG_HYPERV)
16171629
int (*flush_remote_tlbs)(struct kvm *kvm);
16181630
int (*flush_remote_tlbs_range)(struct kvm *kvm, gfn_t gfn,
16191631
gfn_t nr_pages);
1632+
#endif
16201633

16211634
/*
16221635
* Flush any TLB entries associated with the given GVA.
@@ -1825,6 +1838,7 @@ static inline struct kvm *kvm_arch_alloc_vm(void)
18251838
#define __KVM_HAVE_ARCH_VM_FREE
18261839
void kvm_arch_free_vm(struct kvm *kvm);
18271840

1841+
#if IS_ENABLED(CONFIG_HYPERV)
18281842
#define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS
18291843
static inline int kvm_arch_flush_remote_tlbs(struct kvm *kvm)
18301844
{
@@ -1836,6 +1850,15 @@ static inline int kvm_arch_flush_remote_tlbs(struct kvm *kvm)
18361850
}
18371851

18381852
#define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS_RANGE
1853+
static inline int kvm_arch_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn,
1854+
u64 nr_pages)
1855+
{
1856+
if (!kvm_x86_ops.flush_remote_tlbs_range)
1857+
return -EOPNOTSUPP;
1858+
1859+
return static_call(kvm_x86_flush_remote_tlbs_range)(kvm, gfn, nr_pages);
1860+
}
1861+
#endif /* CONFIG_HYPERV */
18391862

18401863
#define kvm_arch_pmi_in_guest(vcpu) \
18411864
((vcpu) && (vcpu)->arch.handling_intr_from_guest)

arch/x86/kvm/Kconfig

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -138,6 +138,20 @@ config KVM_SMM
138138

139139
If unsure, say Y.
140140

141+
config KVM_HYPERV
142+
bool "Support for Microsoft Hyper-V emulation"
143+
depends on KVM
144+
default y
145+
help
146+
Provides KVM support for emulating Microsoft Hyper-V. This allows KVM
147+
to expose a subset of the paravirtualized interfaces defined in the
148+
Hyper-V Hypervisor Top-Level Functional Specification (TLFS):
149+
https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/reference/tlfs
150+
These interfaces are required for the correct and performant functioning
151+
of Windows and Hyper-V guests on KVM.
152+
153+
If unsure, say "Y".
154+
141155
config KVM_XEN
142156
bool "Support for Xen hypercall interface"
143157
depends on KVM

arch/x86/kvm/Makefile

Lines changed: 9 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -11,25 +11,27 @@ include $(srctree)/virt/kvm/Makefile.kvm
1111

1212
kvm-y += x86.o emulate.o i8259.o irq.o lapic.o \
1313
i8254.o ioapic.o irq_comm.o cpuid.o pmu.o mtrr.o \
14-
hyperv.o debugfs.o mmu/mmu.o mmu/page_track.o \
14+
debugfs.o mmu/mmu.o mmu/page_track.o \
1515
mmu/spte.o
1616

17-
ifdef CONFIG_HYPERV
18-
kvm-y += kvm_onhyperv.o
19-
endif
20-
2117
kvm-$(CONFIG_X86_64) += mmu/tdp_iter.o mmu/tdp_mmu.o
18+
kvm-$(CONFIG_KVM_HYPERV) += hyperv.o
2219
kvm-$(CONFIG_KVM_XEN) += xen.o
2320
kvm-$(CONFIG_KVM_SMM) += smm.o
2421

2522
kvm-intel-y += vmx/vmx.o vmx/vmenter.o vmx/pmu_intel.o vmx/vmcs12.o \
26-
vmx/hyperv.o vmx/nested.o vmx/posted_intr.o
23+
vmx/nested.o vmx/posted_intr.o
24+
2725
kvm-intel-$(CONFIG_X86_SGX_KVM) += vmx/sgx.o
26+
kvm-intel-$(CONFIG_KVM_HYPERV) += vmx/hyperv.o vmx/hyperv_evmcs.o
2827

2928
kvm-amd-y += svm/svm.o svm/vmenter.o svm/pmu.o svm/nested.o svm/avic.o \
30-
svm/sev.o svm/hyperv.o
29+
svm/sev.o
30+
kvm-amd-$(CONFIG_KVM_HYPERV) += svm/hyperv.o
3131

3232
ifdef CONFIG_HYPERV
33+
kvm-y += kvm_onhyperv.o
34+
kvm-intel-y += vmx/vmx_onhyperv.o vmx/hyperv_evmcs.o
3335
kvm-amd-y += svm/svm_onhyperv.o
3436
endif
3537

arch/x86/kvm/cpuid.c

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -314,11 +314,15 @@ EXPORT_SYMBOL_GPL(kvm_update_cpuid_runtime);
314314

315315
static bool kvm_cpuid_has_hyperv(struct kvm_cpuid_entry2 *entries, int nent)
316316
{
317+
#ifdef CONFIG_KVM_HYPERV
317318
struct kvm_cpuid_entry2 *entry;
318319

319320
entry = cpuid_entry2_find(entries, nent, HYPERV_CPUID_INTERFACE,
320321
KVM_CPUID_INDEX_NOT_SIGNIFICANT);
321322
return entry && entry->eax == HYPERV_CPUID_SIGNATURE_EAX;
323+
#else
324+
return false;
325+
#endif
322326
}
323327

324328
static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
@@ -433,11 +437,13 @@ static int kvm_set_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2 *e2,
433437
return 0;
434438
}
435439

440+
#ifdef CONFIG_KVM_HYPERV
436441
if (kvm_cpuid_has_hyperv(e2, nent)) {
437442
r = kvm_hv_vcpu_init(vcpu);
438443
if (r)
439444
return r;
440445
}
446+
#endif
441447

442448
r = kvm_check_cpuid(vcpu, e2, nent);
443449
if (r)

arch/x86/kvm/hyperv.h

Lines changed: 84 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,8 @@
2424
#include <linux/kvm_host.h>
2525
#include "x86.h"
2626

27+
#ifdef CONFIG_KVM_HYPERV
28+
2729
/* "Hv#1" signature */
2830
#define HYPERV_CPUID_SIGNATURE_EAX 0x31237648
2931

@@ -105,6 +107,17 @@ int kvm_hv_synic_set_irq(struct kvm *kvm, u32 vcpu_id, u32 sint);
105107
void kvm_hv_synic_send_eoi(struct kvm_vcpu *vcpu, int vector);
106108
int kvm_hv_activate_synic(struct kvm_vcpu *vcpu, bool dont_zero_synic_pages);
107109

110+
static inline bool kvm_hv_synic_has_vector(struct kvm_vcpu *vcpu, int vector)
111+
{
112+
return to_hv_vcpu(vcpu) && test_bit(vector, to_hv_synic(vcpu)->vec_bitmap);
113+
}
114+
115+
static inline bool kvm_hv_synic_auto_eoi_set(struct kvm_vcpu *vcpu, int vector)
116+
{
117+
return to_hv_vcpu(vcpu) &&
118+
test_bit(vector, to_hv_synic(vcpu)->auto_eoi_bitmap);
119+
}
120+
108121
void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu);
109122

110123
bool kvm_hv_assist_page_enabled(struct kvm_vcpu *vcpu);
@@ -236,6 +249,76 @@ static inline int kvm_hv_verify_vp_assist(struct kvm_vcpu *vcpu)
236249
return kvm_hv_get_assist_page(vcpu);
237250
}
238251

252+
static inline void kvm_hv_nested_transtion_tlb_flush(struct kvm_vcpu *vcpu,
253+
bool tdp_enabled)
254+
{
255+
/*
256+
* KVM_REQ_HV_TLB_FLUSH flushes entries from either L1's VP_ID or
257+
* L2's VP_ID upon request from the guest. Make sure we check for
258+
* pending entries in the right FIFO upon L1/L2 transition as these
259+
* requests are put by other vCPUs asynchronously.
260+
*/
261+
if (to_hv_vcpu(vcpu) && tdp_enabled)
262+
kvm_make_request(KVM_REQ_HV_TLB_FLUSH, vcpu);
263+
}
264+
239265
int kvm_hv_vcpu_flush_tlb(struct kvm_vcpu *vcpu);
266+
#else /* CONFIG_KVM_HYPERV */
267+
static inline void kvm_hv_setup_tsc_page(struct kvm *kvm,
268+
struct pvclock_vcpu_time_info *hv_clock) {}
269+
static inline void kvm_hv_request_tsc_page_update(struct kvm *kvm) {}
270+
static inline void kvm_hv_init_vm(struct kvm *kvm) {}
271+
static inline void kvm_hv_destroy_vm(struct kvm *kvm) {}
272+
static inline int kvm_hv_vcpu_init(struct kvm_vcpu *vcpu)
273+
{
274+
return 0;
275+
}
276+
static inline void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu) {}
277+
static inline bool kvm_hv_hypercall_enabled(struct kvm_vcpu *vcpu)
278+
{
279+
return false;
280+
}
281+
static inline int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
282+
{
283+
return HV_STATUS_ACCESS_DENIED;
284+
}
285+
static inline void kvm_hv_vcpu_purge_flush_tlb(struct kvm_vcpu *vcpu) {}
286+
static inline void kvm_hv_free_pa_page(struct kvm *kvm) {}
287+
static inline bool kvm_hv_synic_has_vector(struct kvm_vcpu *vcpu, int vector)
288+
{
289+
return false;
290+
}
291+
static inline bool kvm_hv_synic_auto_eoi_set(struct kvm_vcpu *vcpu, int vector)
292+
{
293+
return false;
294+
}
295+
static inline void kvm_hv_synic_send_eoi(struct kvm_vcpu *vcpu, int vector) {}
296+
static inline bool kvm_hv_invtsc_suppressed(struct kvm_vcpu *vcpu)
297+
{
298+
return false;
299+
}
300+
static inline void kvm_hv_set_cpuid(struct kvm_vcpu *vcpu, bool hyperv_enabled) {}
301+
static inline bool kvm_hv_has_stimer_pending(struct kvm_vcpu *vcpu)
302+
{
303+
return false;
304+
}
305+
static inline bool kvm_hv_is_tlb_flush_hcall(struct kvm_vcpu *vcpu)
306+
{
307+
return false;
308+
}
309+
static inline bool guest_hv_cpuid_has_l2_tlb_flush(struct kvm_vcpu *vcpu)
310+
{
311+
return false;
312+
}
313+
static inline int kvm_hv_verify_vp_assist(struct kvm_vcpu *vcpu)
314+
{
315+
return 0;
316+
}
317+
static inline u32 kvm_hv_get_vpindex(struct kvm_vcpu *vcpu)
318+
{
319+
return vcpu->vcpu_idx;
320+
}
321+
static inline void kvm_hv_nested_transtion_tlb_flush(struct kvm_vcpu *vcpu, bool tdp_enabled) {}
322+
#endif /* CONFIG_KVM_HYPERV */
240323

241-
#endif
324+
#endif /* __ARCH_X86_KVM_HYPERV_H__ */

arch/x86/kvm/irq.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -118,8 +118,10 @@ static int kvm_cpu_get_extint(struct kvm_vcpu *v)
118118
if (!lapic_in_kernel(v))
119119
return v->arch.interrupt.nr;
120120

121+
#ifdef CONFIG_KVM_XEN
121122
if (kvm_xen_has_interrupt(v))
122123
return v->kvm->arch.xen.upcall_vector;
124+
#endif
123125

124126
if (irqchip_split(v->kvm)) {
125127
int vector = v->arch.pending_external_vector;

arch/x86/kvm/irq_comm.c

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -144,7 +144,7 @@ int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
144144
return kvm_irq_delivery_to_apic(kvm, NULL, &irq, NULL);
145145
}
146146

147-
147+
#ifdef CONFIG_KVM_HYPERV
148148
static int kvm_hv_set_sint(struct kvm_kernel_irq_routing_entry *e,
149149
struct kvm *kvm, int irq_source_id, int level,
150150
bool line_status)
@@ -154,6 +154,7 @@ static int kvm_hv_set_sint(struct kvm_kernel_irq_routing_entry *e,
154154

155155
return kvm_hv_synic_set_irq(kvm, e->hv_sint.vcpu, e->hv_sint.sint);
156156
}
157+
#endif
157158

158159
int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e,
159160
struct kvm *kvm, int irq_source_id, int level,
@@ -163,9 +164,11 @@ int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e,
163164
int r;
164165

165166
switch (e->type) {
167+
#ifdef CONFIG_KVM_HYPERV
166168
case KVM_IRQ_ROUTING_HV_SINT:
167169
return kvm_hv_set_sint(e, kvm, irq_source_id, level,
168170
line_status);
171+
#endif
169172

170173
case KVM_IRQ_ROUTING_MSI:
171174
if (kvm_msi_route_invalid(kvm, e))
@@ -314,11 +317,13 @@ int kvm_set_routing_entry(struct kvm *kvm,
314317
if (kvm_msi_route_invalid(kvm, e))
315318
return -EINVAL;
316319
break;
320+
#ifdef CONFIG_KVM_HYPERV
317321
case KVM_IRQ_ROUTING_HV_SINT:
318322
e->set = kvm_hv_set_sint;
319323
e->hv_sint.vcpu = ue->u.hv_sint.vcpu;
320324
e->hv_sint.sint = ue->u.hv_sint.sint;
321325
break;
326+
#endif
322327
#ifdef CONFIG_KVM_XEN
323328
case KVM_IRQ_ROUTING_XEN_EVTCHN:
324329
return kvm_xen_setup_evtchn(kvm, e, ue);
@@ -438,5 +443,7 @@ void kvm_scan_ioapic_routes(struct kvm_vcpu *vcpu,
438443

439444
void kvm_arch_irq_routing_update(struct kvm *kvm)
440445
{
446+
#ifdef CONFIG_KVM_HYPERV
441447
kvm_hv_irq_routing_update(kvm);
448+
#endif
442449
}

arch/x86/kvm/kvm_onhyperv.h

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,26 @@
1010
int hv_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, gfn_t nr_pages);
1111
int hv_flush_remote_tlbs(struct kvm *kvm);
1212
void hv_track_root_tdp(struct kvm_vcpu *vcpu, hpa_t root_tdp);
13+
static inline hpa_t hv_get_partition_assist_page(struct kvm_vcpu *vcpu)
14+
{
15+
/*
16+
* Partition assist page is something which Hyper-V running in L0
17+
* requires from KVM running in L1 before direct TLB flush for L2
18+
* guests can be enabled. KVM doesn't currently use the page but to
19+
* comply with TLFS it still needs to be allocated. For now, this
20+
* is a single page shared among all vCPUs.
21+
*/
22+
struct hv_partition_assist_pg **p_hv_pa_pg =
23+
&vcpu->kvm->arch.hv_pa_pg;
24+
25+
if (!*p_hv_pa_pg)
26+
*p_hv_pa_pg = kzalloc(PAGE_SIZE, GFP_KERNEL_ACCOUNT);
27+
28+
if (!*p_hv_pa_pg)
29+
return INVALID_PAGE;
30+
31+
return __pa(*p_hv_pa_pg);
32+
}
1333
#else /* !CONFIG_HYPERV */
1434
static inline int hv_flush_remote_tlbs(struct kvm *kvm)
1535
{

0 commit comments

Comments
 (0)