@@ -48,6 +48,35 @@ extern bool host_cpu_is_amd;
4848#define X86_CR4_SMAP (1ul << 21)
4949#define X86_CR4_PKE (1ul << 22)
5050
51+ struct xstate_header {
52+ u64 xstate_bv ;
53+ u64 xcomp_bv ;
54+ u64 reserved [6 ];
55+ } __attribute__((packed ));
56+
57+ struct xstate {
58+ u8 i387 [512 ];
59+ struct xstate_header header ;
60+ u8 extended_state_area [0 ];
61+ } __attribute__ ((packed , aligned (64 )));
62+
63+ #define XFEATURE_MASK_FP BIT_ULL(0)
64+ #define XFEATURE_MASK_SSE BIT_ULL(1)
65+ #define XFEATURE_MASK_YMM BIT_ULL(2)
66+ #define XFEATURE_MASK_BNDREGS BIT_ULL(3)
67+ #define XFEATURE_MASK_BNDCSR BIT_ULL(4)
68+ #define XFEATURE_MASK_OPMASK BIT_ULL(5)
69+ #define XFEATURE_MASK_ZMM_Hi256 BIT_ULL(6)
70+ #define XFEATURE_MASK_Hi16_ZMM BIT_ULL(7)
71+ #define XFEATURE_MASK_XTILE_CFG BIT_ULL(17)
72+ #define XFEATURE_MASK_XTILE_DATA BIT_ULL(18)
73+
74+ #define XFEATURE_MASK_AVX512 (XFEATURE_MASK_OPMASK | \
75+ XFEATURE_MASK_ZMM_Hi256 | \
76+ XFEATURE_MASK_Hi16_ZMM)
77+ #define XFEATURE_MASK_XTILE (XFEATURE_MASK_XTILE_DATA | \
78+ XFEATURE_MASK_XTILE_CFG)
79+
5180/* Note, these are ordered alphabetically to match kvm_cpuid_entry2. Eww. */
5281enum cpuid_output_regs {
5382 KVM_CPUID_EAX ,
@@ -131,6 +160,7 @@ struct kvm_x86_cpu_feature {
131160#define X86_FEATURE_XTILEDATA KVM_X86_CPU_FEATURE(0xD, 0, EAX, 18)
132161#define X86_FEATURE_XSAVES KVM_X86_CPU_FEATURE(0xD, 1, EAX, 3)
133162#define X86_FEATURE_XFD KVM_X86_CPU_FEATURE(0xD, 1, EAX, 4)
163+ #define X86_FEATURE_XTILEDATA_XFD KVM_X86_CPU_FEATURE(0xD, 18, ECX, 2)
134164
135165/*
136166 * Extended Leafs, a.k.a. AMD defined
@@ -211,10 +241,14 @@ struct kvm_x86_cpu_property {
211241#define X86_PROPERTY_PMU_NR_GP_COUNTERS KVM_X86_CPU_PROPERTY(0xa, 0, EAX, 8, 15)
212242#define X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH KVM_X86_CPU_PROPERTY(0xa, 0, EAX, 24, 31)
213243
244+ #define X86_PROPERTY_SUPPORTED_XCR0_LO KVM_X86_CPU_PROPERTY(0xd, 0, EAX, 0, 31)
214245#define X86_PROPERTY_XSTATE_MAX_SIZE_XCR0 KVM_X86_CPU_PROPERTY(0xd, 0, EBX, 0, 31)
215246#define X86_PROPERTY_XSTATE_MAX_SIZE KVM_X86_CPU_PROPERTY(0xd, 0, ECX, 0, 31)
247+ #define X86_PROPERTY_SUPPORTED_XCR0_HI KVM_X86_CPU_PROPERTY(0xd, 0, EDX, 0, 31)
248+
216249#define X86_PROPERTY_XSTATE_TILE_SIZE KVM_X86_CPU_PROPERTY(0xd, 18, EAX, 0, 31)
217250#define X86_PROPERTY_XSTATE_TILE_OFFSET KVM_X86_CPU_PROPERTY(0xd, 18, EBX, 0, 31)
251+ #define X86_PROPERTY_AMX_MAX_PALETTE_TABLES KVM_X86_CPU_PROPERTY(0x1d, 0, EAX, 0, 31)
218252#define X86_PROPERTY_AMX_TOTAL_TILE_BYTES KVM_X86_CPU_PROPERTY(0x1d, 1, EAX, 0, 15)
219253#define X86_PROPERTY_AMX_BYTES_PER_TILE KVM_X86_CPU_PROPERTY(0x1d, 1, EAX, 16, 31)
220254#define X86_PROPERTY_AMX_BYTES_PER_ROW KVM_X86_CPU_PROPERTY(0x1d, 1, EBX, 0, 15)
@@ -496,6 +530,24 @@ static inline void set_cr4(uint64_t val)
496530 __asm__ __volatile__("mov %0, %%cr4" : : "r" (val ) : "memory" );
497531}
498532
533+ static inline u64 xgetbv (u32 index )
534+ {
535+ u32 eax , edx ;
536+
537+ __asm__ __volatile__("xgetbv;"
538+ : "=a" (eax ), "=d" (edx )
539+ : "c" (index ));
540+ return eax | ((u64 )edx << 32 );
541+ }
542+
543+ static inline void xsetbv (u32 index , u64 value )
544+ {
545+ u32 eax = value ;
546+ u32 edx = value >> 32 ;
547+
548+ __asm__ __volatile__("xsetbv" :: "a" (eax ), "d" (edx ), "c" (index ));
549+ }
550+
499551static inline struct desc_ptr get_gdt (void )
500552{
501553 struct desc_ptr gdt ;
@@ -632,6 +684,15 @@ static inline bool this_pmu_has(struct kvm_x86_pmu_feature feature)
632684 !this_cpu_has (feature .anti_feature );
633685}
634686
687+ static __always_inline uint64_t this_cpu_supported_xcr0 (void )
688+ {
689+ if (!this_cpu_has_p (X86_PROPERTY_SUPPORTED_XCR0_LO ))
690+ return 0 ;
691+
692+ return this_cpu_property (X86_PROPERTY_SUPPORTED_XCR0_LO ) |
693+ ((uint64_t )this_cpu_property (X86_PROPERTY_SUPPORTED_XCR0_HI ) << 32 );
694+ }
695+
635696typedef u32 __attribute__ ((vector_size (16 ))) sse128_t ;
636697#define __sse128_u union { sse128_t vec; u64 as_u64[2]; u32 as_u32[4]; }
637698#define sse128_lo (x ) ({ __sse128_u t; t.vec = x; t.as_u64[0]; })
@@ -1086,6 +1147,14 @@ static inline uint8_t wrmsr_safe(uint32_t msr, uint64_t val)
10861147 return kvm_asm_safe ("wrmsr" , "a" (val & -1u ), "d" (val >> 32 ), "c" (msr ));
10871148}
10881149
1150+ static inline uint8_t xsetbv_safe (uint32_t index , uint64_t value )
1151+ {
1152+ u32 eax = value ;
1153+ u32 edx = value >> 32 ;
1154+
1155+ return kvm_asm_safe ("xsetbv" , "a" (eax ), "d" (edx ), "c" (index ));
1156+ }
1157+
10891158bool kvm_is_tdp_enabled (void );
10901159
10911160uint64_t * __vm_get_page_table_entry (struct kvm_vm * vm , uint64_t vaddr ,
@@ -1097,10 +1166,10 @@ uint64_t kvm_hypercall(uint64_t nr, uint64_t a0, uint64_t a1, uint64_t a2,
10971166uint64_t __xen_hypercall (uint64_t nr , uint64_t a0 , void * a1 );
10981167void xen_hypercall (uint64_t nr , uint64_t a0 , void * a1 );
10991168
1100- void __vm_xsave_require_permission (int bit , const char * name );
1169+ void __vm_xsave_require_permission (uint64_t xfeature , const char * name );
11011170
1102- #define vm_xsave_require_permission (perm ) \
1103- __vm_xsave_require_permission(perm , #perm )
1171+ #define vm_xsave_require_permission (xfeature ) \
1172+ __vm_xsave_require_permission(xfeature , #xfeature )
11041173
11051174enum pg_level {
11061175 PG_LEVEL_NONE ,
@@ -1137,14 +1206,6 @@ void virt_map_level(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
11371206#define X86_CR0_CD (1UL<<30) /* Cache Disable */
11381207#define X86_CR0_PG (1UL<<31) /* Paging */
11391208
1140- #define XSTATE_XTILE_CFG_BIT 17
1141- #define XSTATE_XTILE_DATA_BIT 18
1142-
1143- #define XSTATE_XTILE_CFG_MASK (1ULL << XSTATE_XTILE_CFG_BIT)
1144- #define XSTATE_XTILE_DATA_MASK (1ULL << XSTATE_XTILE_DATA_BIT)
1145- #define XFEATURE_XTILE_MASK (XSTATE_XTILE_CFG_MASK | \
1146- XSTATE_XTILE_DATA_MASK)
1147-
11481209#define PFERR_PRESENT_BIT 0
11491210#define PFERR_WRITE_BIT 1
11501211#define PFERR_USER_BIT 2
0 commit comments