Skip to content

Commit 4f4aa80

Browse files
Lai Jiangshanbonzini
authored andcommitted
KVM: X86: Handle implicit supervisor access with SMAP
There are two kinds of implicit supervisor access implicit supervisor access when CPL = 3 implicit supervisor access when CPL < 3 Current permission_fault() handles only the first kind for SMAP. But if the access is implicit when SMAP is on, data may not be read nor write from any user-mode address regardless the current CPL. So the second kind should be also supported. The first kind can be detect via CPL and access mode: if it is supervisor access and CPL = 3, it must be implicit supervisor access. But it is not possible to detect the second kind without extra information, so this patch adds an artificial PFERR_EXPLICIT_ACCESS into @access. This extra information also works for the first kind, so the logic is changed to use this information for both cases. The value of PFERR_EXPLICIT_ACCESS is deliberately chosen to be bit 48 which is in the most significant 16 bits of u64 and less likely to be forced to change due to future hardware uses it. This patch removes the call to ->get_cpl() for access mode is determined by @access. Not only does it reduce a function call, but also remove confusions when the permission is checked for nested TDP. The nested TDP shouldn't have SMAP checking nor even the L2's CPL have any bearing on it. The original code works just because it is always user walk for NPT and SMAP fault is not set for EPT in update_permission_bitmask. Signed-off-by: Lai Jiangshan <jiangshan.ljs@antgroup.com> Message-Id: <20220311070346.45023-5-jiangshanlai@gmail.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
1 parent 8873c14 commit 4f4aa80

4 files changed

Lines changed: 21 additions & 17 deletions

File tree

arch/x86/include/asm/kvm_host.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -249,6 +249,7 @@ enum x86_intercept_stage;
249249
#define PFERR_SGX_BIT 15
250250
#define PFERR_GUEST_FINAL_BIT 32
251251
#define PFERR_GUEST_PAGE_BIT 33
252+
#define PFERR_IMPLICIT_ACCESS_BIT 48
252253

253254
#define PFERR_PRESENT_MASK (1U << PFERR_PRESENT_BIT)
254255
#define PFERR_WRITE_MASK (1U << PFERR_WRITE_BIT)
@@ -259,6 +260,7 @@ enum x86_intercept_stage;
259260
#define PFERR_SGX_MASK (1U << PFERR_SGX_BIT)
260261
#define PFERR_GUEST_FINAL_MASK (1ULL << PFERR_GUEST_FINAL_BIT)
261262
#define PFERR_GUEST_PAGE_MASK (1ULL << PFERR_GUEST_PAGE_BIT)
263+
#define PFERR_IMPLICIT_ACCESS (1ULL << PFERR_IMPLICIT_ACCESS_BIT)
262264

263265
#define PFERR_NESTED_GUEST_PAGE (PFERR_GUEST_PAGE_MASK | \
264266
PFERR_WRITE_MASK | \

arch/x86/kvm/mmu.h

Lines changed: 11 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -218,25 +218,23 @@ static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
218218
{
219219
/* strip nested paging fault error codes */
220220
unsigned int pfec = access;
221-
int cpl = static_call(kvm_x86_get_cpl)(vcpu);
222221
unsigned long rflags = static_call(kvm_x86_get_rflags)(vcpu);
223222

224223
/*
225-
* If CPL < 3, SMAP prevention are disabled if EFLAGS.AC = 1.
224+
* For explicit supervisor accesses, SMAP is disabled if EFLAGS.AC = 1.
225+
* For implicit supervisor accesses, SMAP cannot be overridden.
226226
*
227-
* If CPL = 3, SMAP applies to all supervisor-mode data accesses
228-
* (these are implicit supervisor accesses) regardless of the value
229-
* of EFLAGS.AC.
227+
* SMAP works on supervisor accesses only, and not_smap can
228+
* be set or not set when user access with neither has any bearing
229+
* on the result.
230230
*
231-
* This computes (cpl < 3) && (rflags & X86_EFLAGS_AC), leaving
232-
* the result in X86_EFLAGS_AC. We then insert it in place of
233-
* the PFERR_RSVD_MASK bit; this bit will always be zero in pfec,
234-
* but it will be one in index if SMAP checks are being overridden.
235-
* It is important to keep this branchless.
231+
* We put the SMAP checking bit in place of the PFERR_RSVD_MASK bit;
232+
* this bit will always be zero in pfec, but it will be one in index
233+
* if SMAP checks are being disabled.
236234
*/
237-
unsigned long not_smap = (cpl - 3) & (rflags & X86_EFLAGS_AC);
238-
int index = (pfec >> 1) +
239-
(not_smap >> (X86_EFLAGS_AC_BIT - PFERR_RSVD_BIT + 1));
235+
u64 implicit_access = access & PFERR_IMPLICIT_ACCESS;
236+
bool not_smap = ((rflags & X86_EFLAGS_AC) | implicit_access) == X86_EFLAGS_AC;
237+
int index = (pfec + (not_smap << PFERR_RSVD_BIT)) >> 1;
240238
bool fault = (mmu->permissions[index] >> pte_access) & 1;
241239
u32 errcode = PFERR_PRESENT_MASK;
242240

arch/x86/kvm/mmu/mmu.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4591,8 +4591,8 @@ static void update_permission_bitmask(struct kvm_mmu *mmu, bool ept)
45914591
* - X86_CR4_SMAP is set in CR4
45924592
* - A user page is accessed
45934593
* - The access is not a fetch
4594-
* - Page fault in kernel mode
4595-
* - if CPL = 3 or X86_EFLAGS_AC is clear
4594+
* - The access is supervisor mode
4595+
* - If implicit supervisor access or X86_EFLAGS_AC is clear
45964596
*
45974597
* Here, we cover the first four conditions.
45984598
* The fifth is computed dynamically in permission_fault();

arch/x86/kvm/x86.c

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -6865,7 +6865,9 @@ static int emulator_read_std(struct x86_emulate_ctxt *ctxt,
68656865
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
68666866
u64 access = 0;
68676867

6868-
if (!system && static_call(kvm_x86_get_cpl)(vcpu) == 3)
6868+
if (system)
6869+
access |= PFERR_IMPLICIT_ACCESS;
6870+
else if (static_call(kvm_x86_get_cpl)(vcpu) == 3)
68696871
access |= PFERR_USER_MASK;
68706872

68716873
return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, exception);
@@ -6917,7 +6919,9 @@ static int emulator_write_std(struct x86_emulate_ctxt *ctxt, gva_t addr, void *v
69176919
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
69186920
u64 access = PFERR_WRITE_MASK;
69196921

6920-
if (!system && static_call(kvm_x86_get_cpl)(vcpu) == 3)
6922+
if (system)
6923+
access |= PFERR_IMPLICIT_ACCESS;
6924+
else if (static_call(kvm_x86_get_cpl)(vcpu) == 3)
69216925
access |= PFERR_USER_MASK;
69226926

69236927
return kvm_write_guest_virt_helper(addr, val, bytes, vcpu,

0 commit comments

Comments
 (0)