Skip to content

Commit 5b22bbe

Browse files
Lai Jiangshanbonzini
authored andcommitted
KVM: X86: Change the type of access u32 to u64
Change the type of access u32 to u64 for FNAME(walk_addr) and ->gva_to_gpa(). The kinds of accesses are usually combinations of UWX, and VMX/SVM's nested paging adds a new factor of access: is it an access for a guest page table or for a final guest physical address. And SMAP relies a factor for supervisor access: explicit or implicit. So @access in FNAME(walk_addr) and ->gva_to_gpa() is better to include all these information to do the walk. Although @access(u32) has enough bits to encode all the kinds, this patch extends it to u64: o Extra bits will be in the higher 32 bits, so that we can easily obtain the traditional access mode (UWX) by converting it to u32. o Reuse the value for the access kind defined by SVM's nested paging (PFERR_GUEST_FINAL_MASK and PFERR_GUEST_PAGE_MASK) as @error_code in kvm_handle_page_fault(). Signed-off-by: Lai Jiangshan <jiangshan.ljs@antgroup.com> Message-Id: <20220311070346.45023-2-jiangshanlai@gmail.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
1 parent cf1d88b commit 5b22bbe

5 files changed

Lines changed: 23 additions & 21 deletions

File tree

arch/x86/include/asm/kvm_host.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -430,7 +430,7 @@ struct kvm_mmu {
430430
void (*inject_page_fault)(struct kvm_vcpu *vcpu,
431431
struct x86_exception *fault);
432432
gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
433-
gpa_t gva_or_gpa, u32 access,
433+
gpa_t gva_or_gpa, u64 access,
434434
struct x86_exception *exception);
435435
int (*sync_page)(struct kvm_vcpu *vcpu,
436436
struct kvm_mmu_page *sp);

arch/x86/kvm/mmu.h

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -214,8 +214,10 @@ static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
214214
*/
215215
static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
216216
unsigned pte_access, unsigned pte_pkey,
217-
unsigned pfec)
217+
u64 access)
218218
{
219+
/* strip nested paging fault error codes */
220+
unsigned int pfec = access;
219221
int cpl = static_call(kvm_x86_get_cpl)(vcpu);
220222
unsigned long rflags = static_call(kvm_x86_get_rflags)(vcpu);
221223

@@ -317,12 +319,12 @@ static inline void kvm_update_page_stats(struct kvm *kvm, int level, int count)
317319
atomic64_add(count, &kvm->stat.pages[level - 1]);
318320
}
319321

320-
gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
322+
gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u64 access,
321323
struct x86_exception *exception);
322324

323325
static inline gpa_t kvm_translate_gpa(struct kvm_vcpu *vcpu,
324326
struct kvm_mmu *mmu,
325-
gpa_t gpa, u32 access,
327+
gpa_t gpa, u64 access,
326328
struct x86_exception *exception)
327329
{
328330
if (mmu != &vcpu->arch.nested_mmu)

arch/x86/kvm/mmu/mmu.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3703,7 +3703,7 @@ void kvm_mmu_sync_prev_roots(struct kvm_vcpu *vcpu)
37033703
}
37043704

37053705
static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
3706-
gpa_t vaddr, u32 access,
3706+
gpa_t vaddr, u64 access,
37073707
struct x86_exception *exception)
37083708
{
37093709
if (exception)

arch/x86/kvm/mmu/paging_tmpl.h

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -339,15 +339,15 @@ static inline bool FNAME(is_last_gpte)(struct kvm_mmu *mmu,
339339
*/
340340
static int FNAME(walk_addr_generic)(struct guest_walker *walker,
341341
struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
342-
gpa_t addr, u32 access)
342+
gpa_t addr, u64 access)
343343
{
344344
int ret;
345345
pt_element_t pte;
346346
pt_element_t __user *ptep_user;
347347
gfn_t table_gfn;
348348
u64 pt_access, pte_access;
349349
unsigned index, accessed_dirty, pte_pkey;
350-
unsigned nested_access;
350+
u64 nested_access;
351351
gpa_t pte_gpa;
352352
bool have_ad;
353353
int offset;
@@ -540,7 +540,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
540540
}
541541

542542
static int FNAME(walk_addr)(struct guest_walker *walker,
543-
struct kvm_vcpu *vcpu, gpa_t addr, u32 access)
543+
struct kvm_vcpu *vcpu, gpa_t addr, u64 access)
544544
{
545545
return FNAME(walk_addr_generic)(walker, vcpu, vcpu->arch.mmu, addr,
546546
access);
@@ -988,7 +988,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa)
988988

989989
/* Note, @addr is a GPA when gva_to_gpa() translates an L2 GPA to an L1 GPA. */
990990
static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
991-
gpa_t addr, u32 access,
991+
gpa_t addr, u64 access,
992992
struct x86_exception *exception)
993993
{
994994
struct guest_walker walker;

arch/x86/kvm/x86.c

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -6726,7 +6726,7 @@ void kvm_get_segment(struct kvm_vcpu *vcpu,
67266726
static_call(kvm_x86_get_segment)(vcpu, var, seg);
67276727
}
67286728

6729-
gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
6729+
gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u64 access,
67306730
struct x86_exception *exception)
67316731
{
67326732
struct kvm_mmu *mmu = vcpu->arch.mmu;
@@ -6746,7 +6746,7 @@ gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
67466746
{
67476747
struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
67486748

6749-
u32 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0;
6749+
u64 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0;
67506750
return mmu->gva_to_gpa(vcpu, mmu, gva, access, exception);
67516751
}
67526752
EXPORT_SYMBOL_GPL(kvm_mmu_gva_to_gpa_read);
@@ -6756,7 +6756,7 @@ EXPORT_SYMBOL_GPL(kvm_mmu_gva_to_gpa_read);
67566756
{
67576757
struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
67586758

6759-
u32 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0;
6759+
u64 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0;
67606760
access |= PFERR_FETCH_MASK;
67616761
return mmu->gva_to_gpa(vcpu, mmu, gva, access, exception);
67626762
}
@@ -6766,7 +6766,7 @@ gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva,
67666766
{
67676767
struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
67686768

6769-
u32 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0;
6769+
u64 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0;
67706770
access |= PFERR_WRITE_MASK;
67716771
return mmu->gva_to_gpa(vcpu, mmu, gva, access, exception);
67726772
}
@@ -6782,7 +6782,7 @@ gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
67826782
}
67836783

67846784
static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
6785-
struct kvm_vcpu *vcpu, u32 access,
6785+
struct kvm_vcpu *vcpu, u64 access,
67866786
struct x86_exception *exception)
67876787
{
67886788
struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
@@ -6819,7 +6819,7 @@ static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt,
68196819
{
68206820
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
68216821
struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
6822-
u32 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0;
6822+
u64 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0;
68236823
unsigned offset;
68246824
int ret;
68256825

@@ -6844,7 +6844,7 @@ int kvm_read_guest_virt(struct kvm_vcpu *vcpu,
68446844
gva_t addr, void *val, unsigned int bytes,
68456845
struct x86_exception *exception)
68466846
{
6847-
u32 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0;
6847+
u64 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0;
68486848

68496849
/*
68506850
* FIXME: this should call handle_emulation_failure if X86EMUL_IO_NEEDED
@@ -6863,7 +6863,7 @@ static int emulator_read_std(struct x86_emulate_ctxt *ctxt,
68636863
struct x86_exception *exception, bool system)
68646864
{
68656865
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
6866-
u32 access = 0;
6866+
u64 access = 0;
68676867

68686868
if (!system && static_call(kvm_x86_get_cpl)(vcpu) == 3)
68696869
access |= PFERR_USER_MASK;
@@ -6881,7 +6881,7 @@ static int kvm_read_guest_phys_system(struct x86_emulate_ctxt *ctxt,
68816881
}
68826882

68836883
static int kvm_write_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
6884-
struct kvm_vcpu *vcpu, u32 access,
6884+
struct kvm_vcpu *vcpu, u64 access,
68856885
struct x86_exception *exception)
68866886
{
68876887
struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
@@ -6915,7 +6915,7 @@ static int emulator_write_std(struct x86_emulate_ctxt *ctxt, gva_t addr, void *v
69156915
bool system)
69166916
{
69176917
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
6918-
u32 access = PFERR_WRITE_MASK;
6918+
u64 access = PFERR_WRITE_MASK;
69196919

69206920
if (!system && static_call(kvm_x86_get_cpl)(vcpu) == 3)
69216921
access |= PFERR_USER_MASK;
@@ -6984,7 +6984,7 @@ static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
69846984
bool write)
69856985
{
69866986
struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
6987-
u32 access = ((static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0)
6987+
u64 access = ((static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0)
69886988
| (write ? PFERR_WRITE_MASK : 0);
69896989

69906990
/*
@@ -12598,7 +12598,7 @@ void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 error_c
1259812598
{
1259912599
struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
1260012600
struct x86_exception fault;
12601-
u32 access = error_code &
12601+
u64 access = error_code &
1260212602
(PFERR_WRITE_MASK | PFERR_FETCH_MASK | PFERR_USER_MASK);
1260312603

1260412604
if (!(error_code & PFERR_PRESENT_MASK) ||

0 commit comments

Comments
 (0)