Skip to content

Commit 93d1c9f

Browse files
robert-hoosean-jc
authored andcommitted
KVM: x86: Virtualize LAM for supervisor pointer
Add support to allow guests to set the new CR4 control bit for LAM and add implementation to get untagged address for supervisor pointers. LAM modifies the canonicality check applied to 64-bit linear addresses for data accesses, allowing software to use of the untranslated address bits for metadata and masks the metadata bits before using them as linear addresses to access memory. LAM uses CR4.LAM_SUP (bit 28) to configure and enable LAM for supervisor pointers. It also changes VMENTER to allow the bit to be set in VMCS's HOST_CR4 and GUEST_CR4 to support virtualization. Note CR4.LAM_SUP is allowed to be set even not in 64-bit mode, but it will not take effect since LAM only applies to 64-bit linear addresses. Move CR4.LAM_SUP out of CR4_RESERVED_BITS, its reservation depends on vcpu supporting LAM or not. Leave it intercepted to prevent guest from setting the bit if LAM is not exposed to guest as well as to avoid vmread every time when KVM fetches its value, with the expectation that guest won't toggle the bit frequently. Set CR4.LAM_SUP bit in the emulated IA32_VMX_CR4_FIXED1 MSR for guests to allow guests to enable LAM for supervisor pointers in nested VMX operation. Hardware is not required to do TLB flush when CR4.LAM_SUP toggled, KVM doesn't need to emulate TLB flush based on it. There's no other features or vmx_exec_controls connection, and no other code needed in {kvm,vmx}_set_cr4(). Skip address untag for instruction fetches (which includes branch targets), operand of INVLPG instructions, and implicit system accesses, all of which are not subject to untagging. Note, get_untagged_addr() isn't invoked for implicit system accesses as there is no reason to do so, but check the flag anyways for documentation purposes. Signed-off-by: Robert Hoo <robert.hu@linux.intel.com> Co-developed-by: Binbin Wu <binbin.wu@linux.intel.com> Signed-off-by: Binbin Wu <binbin.wu@linux.intel.com> Reviewed-by: Chao Gao <chao.gao@intel.com> Reviewed-by: Kai Huang <kai.huang@intel.com> Tested-by: Xuelian Guo <xuelian.guo@intel.com> Link: https://lore.kernel.org/r/20230913124227.12574-11-binbin.wu@linux.intel.com Signed-off-by: Sean Christopherson <seanjc@google.com>
1 parent b39bd52 commit 93d1c9f

3 files changed

Lines changed: 42 additions & 2 deletions

File tree

arch/x86/include/asm/kvm_host.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -133,7 +133,8 @@
133133
| X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR | X86_CR4_PCIDE \
134134
| X86_CR4_OSXSAVE | X86_CR4_SMEP | X86_CR4_FSGSBASE \
135135
| X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_VMXE \
136-
| X86_CR4_SMAP | X86_CR4_PKE | X86_CR4_UMIP))
136+
| X86_CR4_SMAP | X86_CR4_PKE | X86_CR4_UMIP \
137+
| X86_CR4_LAM_SUP))
137138

138139
#define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
139140

arch/x86/kvm/vmx/vmx.c

Lines changed: 38 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7673,6 +7673,9 @@ static void nested_vmx_cr_fixed1_bits_update(struct kvm_vcpu *vcpu)
76737673
cr4_fixed1_update(X86_CR4_UMIP, ecx, feature_bit(UMIP));
76747674
cr4_fixed1_update(X86_CR4_LA57, ecx, feature_bit(LA57));
76757675

7676+
entry = kvm_find_cpuid_entry_index(vcpu, 0x7, 1);
7677+
cr4_fixed1_update(X86_CR4_LAM_SUP, eax, feature_bit(LAM));
7678+
76767679
#undef cr4_fixed1_update
76777680
}
76787681

@@ -8205,9 +8208,43 @@ static void vmx_vm_destroy(struct kvm *kvm)
82058208
free_pages((unsigned long)kvm_vmx->pid_table, vmx_get_pid_table_order(kvm));
82068209
}
82078210

8211+
/*
8212+
* Note, the SDM states that the linear address is masked *after* the modified
8213+
* canonicality check, whereas KVM masks (untags) the address and then performs
8214+
* a "normal" canonicality check. Functionally, the two methods are identical,
8215+
* and when the masking occurs relative to the canonicality check isn't visible
8216+
* to software, i.e. KVM's behavior doesn't violate the SDM.
8217+
*/
82088218
gva_t vmx_get_untagged_addr(struct kvm_vcpu *vcpu, gva_t gva, unsigned int flags)
82098219
{
8210-
return gva;
8220+
int lam_bit;
8221+
8222+
if (flags & (X86EMUL_F_FETCH | X86EMUL_F_IMPLICIT | X86EMUL_F_INVLPG))
8223+
return gva;
8224+
8225+
if (!is_64_bit_mode(vcpu))
8226+
return gva;
8227+
8228+
/*
8229+
* Bit 63 determines if the address should be treated as user address
8230+
* or a supervisor address.
8231+
*/
8232+
if (!(gva & BIT_ULL(63))) {
8233+
/* KVM doesn't yet virtualize LAM_U{48,57}. */
8234+
return gva;
8235+
} else {
8236+
if (!kvm_is_cr4_bit_set(vcpu, X86_CR4_LAM_SUP))
8237+
return gva;
8238+
8239+
lam_bit = kvm_is_cr4_bit_set(vcpu, X86_CR4_LA57) ? 56 : 47;
8240+
}
8241+
8242+
/*
8243+
* Untag the address by sign-extending the lam_bit, but NOT to bit 63.
8244+
* Bit 63 is retained from the raw virtual address so that untagging
8245+
* doesn't change a user access to a supervisor access, and vice versa.
8246+
*/
8247+
return (sign_extend64(gva, lam_bit) & ~BIT_ULL(63)) | (gva & BIT_ULL(63));
82118248
}
82128249

82138250
static struct kvm_x86_ops vmx_x86_ops __initdata = {

arch/x86/kvm/x86.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -530,6 +530,8 @@ bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32 index, u32 type);
530530
__reserved_bits |= X86_CR4_VMXE; \
531531
if (!__cpu_has(__c, X86_FEATURE_PCID)) \
532532
__reserved_bits |= X86_CR4_PCIDE; \
533+
if (!__cpu_has(__c, X86_FEATURE_LAM)) \
534+
__reserved_bits |= X86_CR4_LAM_SUP; \
533535
__reserved_bits; \
534536
})
535537

0 commit comments

Comments
 (0)