Skip to content

Commit 8a009d5

Browse files
sean-jcbonzini
authored andcommitted
KVM: x86/mmu: Make all page fault handlers internal to the MMU
Move kvm_arch_async_page_ready() to mmu.c where it belongs, and move all of the page fault handling collateral that was in mmu.h purely for the async #PF handler into mmu_internal.h, where it belongs. This will allow kvm_mmu_do_page_fault() to act on the RET_PF_* return without having to expose those enums outside of the MMU. No functional change intended. Signed-off-by: Sean Christopherson <seanjc@google.com> Message-Id: <20220423034752.1161007-8-seanjc@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
1 parent 5276c61 commit 8a009d5

4 files changed

Lines changed: 108 additions & 107 deletions

File tree

arch/x86/kvm/mmu.h

Lines changed: 0 additions & 87 deletions
Original file line numberDiff line numberDiff line change
@@ -141,93 +141,6 @@ static inline void kvm_mmu_load_pgd(struct kvm_vcpu *vcpu)
141141
vcpu->arch.mmu->root_role.level);
142142
}
143143

144-
struct kvm_page_fault {
145-
/* arguments to kvm_mmu_do_page_fault. */
146-
const gpa_t addr;
147-
const u32 error_code;
148-
const bool prefetch;
149-
150-
/* Derived from error_code. */
151-
const bool exec;
152-
const bool write;
153-
const bool present;
154-
const bool rsvd;
155-
const bool user;
156-
157-
/* Derived from mmu and global state. */
158-
const bool is_tdp;
159-
const bool nx_huge_page_workaround_enabled;
160-
161-
/*
162-
* Whether a >4KB mapping can be created or is forbidden due to NX
163-
* hugepages.
164-
*/
165-
bool huge_page_disallowed;
166-
167-
/*
168-
* Maximum page size that can be created for this fault; input to
169-
* FNAME(fetch), __direct_map and kvm_tdp_mmu_map.
170-
*/
171-
u8 max_level;
172-
173-
/*
174-
* Page size that can be created based on the max_level and the
175-
* page size used by the host mapping.
176-
*/
177-
u8 req_level;
178-
179-
/*
180-
* Page size that will be created based on the req_level and
181-
* huge_page_disallowed.
182-
*/
183-
u8 goal_level;
184-
185-
/* Shifted addr, or result of guest page table walk if addr is a gva. */
186-
gfn_t gfn;
187-
188-
/* The memslot containing gfn. May be NULL. */
189-
struct kvm_memory_slot *slot;
190-
191-
/* Outputs of kvm_faultin_pfn. */
192-
kvm_pfn_t pfn;
193-
hva_t hva;
194-
bool map_writable;
195-
};
196-
197-
int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault);
198-
199-
extern int nx_huge_pages;
200-
static inline bool is_nx_huge_page_enabled(void)
201-
{
202-
return READ_ONCE(nx_huge_pages);
203-
}
204-
205-
static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
206-
u32 err, bool prefetch)
207-
{
208-
struct kvm_page_fault fault = {
209-
.addr = cr2_or_gpa,
210-
.error_code = err,
211-
.exec = err & PFERR_FETCH_MASK,
212-
.write = err & PFERR_WRITE_MASK,
213-
.present = err & PFERR_PRESENT_MASK,
214-
.rsvd = err & PFERR_RSVD_MASK,
215-
.user = err & PFERR_USER_MASK,
216-
.prefetch = prefetch,
217-
.is_tdp = likely(vcpu->arch.mmu->page_fault == kvm_tdp_page_fault),
218-
.nx_huge_page_workaround_enabled = is_nx_huge_page_enabled(),
219-
220-
.max_level = KVM_MAX_HUGEPAGE_LEVEL,
221-
.req_level = PG_LEVEL_4K,
222-
.goal_level = PG_LEVEL_4K,
223-
};
224-
#ifdef CONFIG_RETPOLINE
225-
if (fault.is_tdp)
226-
return kvm_tdp_page_fault(vcpu, &fault);
227-
#endif
228-
return vcpu->arch.mmu->page_fault(vcpu, &fault);
229-
}
230-
231144
/*
232145
* Check if a given access (described through the I/D, W/R and U/S bits of a
233146
* page fault error code pfec) causes a permission fault with the given PTE

arch/x86/kvm/mmu/mmu.c

Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3942,6 +3942,25 @@ static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
39423942
kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch);
39433943
}
39443944

3945+
void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
3946+
{
3947+
int r;
3948+
3949+
if ((vcpu->arch.mmu->root_role.direct != work->arch.direct_map) ||
3950+
work->wakeup_all)
3951+
return;
3952+
3953+
r = kvm_mmu_reload(vcpu);
3954+
if (unlikely(r))
3955+
return;
3956+
3957+
if (!vcpu->arch.mmu->root_role.direct &&
3958+
work->arch.cr3 != vcpu->arch.mmu->get_guest_pgd(vcpu))
3959+
return;
3960+
3961+
kvm_mmu_do_page_fault(vcpu, work->cr2_or_gpa, 0, true);
3962+
}
3963+
39453964
static int kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
39463965
{
39473966
struct kvm_memory_slot *slot = fault->slot;

arch/x86/kvm/mmu/mmu_internal.h

Lines changed: 89 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -140,8 +140,70 @@ void kvm_flush_remote_tlbs_with_address(struct kvm *kvm,
140140
u64 start_gfn, u64 pages);
141141
unsigned int pte_list_count(struct kvm_rmap_head *rmap_head);
142142

143+
extern int nx_huge_pages;
144+
static inline bool is_nx_huge_page_enabled(void)
145+
{
146+
return READ_ONCE(nx_huge_pages);
147+
}
148+
149+
struct kvm_page_fault {
150+
/* arguments to kvm_mmu_do_page_fault. */
151+
const gpa_t addr;
152+
const u32 error_code;
153+
const bool prefetch;
154+
155+
/* Derived from error_code. */
156+
const bool exec;
157+
const bool write;
158+
const bool present;
159+
const bool rsvd;
160+
const bool user;
161+
162+
/* Derived from mmu and global state. */
163+
const bool is_tdp;
164+
const bool nx_huge_page_workaround_enabled;
165+
166+
/*
167+
* Whether a >4KB mapping can be created or is forbidden due to NX
168+
* hugepages.
169+
*/
170+
bool huge_page_disallowed;
171+
172+
/*
173+
* Maximum page size that can be created for this fault; input to
174+
* FNAME(fetch), __direct_map and kvm_tdp_mmu_map.
175+
*/
176+
u8 max_level;
177+
178+
/*
179+
* Page size that can be created based on the max_level and the
180+
* page size used by the host mapping.
181+
*/
182+
u8 req_level;
183+
184+
/*
185+
* Page size that will be created based on the req_level and
186+
* huge_page_disallowed.
187+
*/
188+
u8 goal_level;
189+
190+
/* Shifted addr, or result of guest page table walk if addr is a gva. */
191+
gfn_t gfn;
192+
193+
/* The memslot containing gfn. May be NULL. */
194+
struct kvm_memory_slot *slot;
195+
196+
/* Outputs of kvm_faultin_pfn. */
197+
kvm_pfn_t pfn;
198+
hva_t hva;
199+
bool map_writable;
200+
};
201+
202+
int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault);
203+
143204
/*
144-
* Return values of handle_mmio_page_fault, mmu.page_fault, and fast_page_fault().
205+
* Return values of handle_mmio_page_fault(), mmu.page_fault(), fast_page_fault(),
206+
* and of course kvm_mmu_do_page_fault().
145207
*
146208
* RET_PF_CONTINUE: So far, so good, keep handling the page fault.
147209
* RET_PF_RETRY: let CPU fault again on the address.
@@ -167,6 +229,32 @@ enum {
167229
RET_PF_SPURIOUS,
168230
};
169231

232+
static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
233+
u32 err, bool prefetch)
234+
{
235+
struct kvm_page_fault fault = {
236+
.addr = cr2_or_gpa,
237+
.error_code = err,
238+
.exec = err & PFERR_FETCH_MASK,
239+
.write = err & PFERR_WRITE_MASK,
240+
.present = err & PFERR_PRESENT_MASK,
241+
.rsvd = err & PFERR_RSVD_MASK,
242+
.user = err & PFERR_USER_MASK,
243+
.prefetch = prefetch,
244+
.is_tdp = likely(vcpu->arch.mmu->page_fault == kvm_tdp_page_fault),
245+
.nx_huge_page_workaround_enabled = is_nx_huge_page_enabled(),
246+
247+
.max_level = KVM_MAX_HUGEPAGE_LEVEL,
248+
.req_level = PG_LEVEL_4K,
249+
.goal_level = PG_LEVEL_4K,
250+
};
251+
#ifdef CONFIG_RETPOLINE
252+
if (fault.is_tdp)
253+
return kvm_tdp_page_fault(vcpu, &fault);
254+
#endif
255+
return vcpu->arch.mmu->page_fault(vcpu, &fault);
256+
}
257+
170258
int kvm_mmu_max_mapping_level(struct kvm *kvm,
171259
const struct kvm_memory_slot *slot, gfn_t gfn,
172260
kvm_pfn_t pfn, int max_level);

arch/x86/kvm/x86.c

Lines changed: 0 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -12358,25 +12358,6 @@ void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
1235812358
}
1235912359
EXPORT_SYMBOL_GPL(kvm_set_rflags);
1236012360

12361-
void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
12362-
{
12363-
int r;
12364-
12365-
if ((vcpu->arch.mmu->root_role.direct != work->arch.direct_map) ||
12366-
work->wakeup_all)
12367-
return;
12368-
12369-
r = kvm_mmu_reload(vcpu);
12370-
if (unlikely(r))
12371-
return;
12372-
12373-
if (!vcpu->arch.mmu->root_role.direct &&
12374-
work->arch.cr3 != vcpu->arch.mmu->get_guest_pgd(vcpu))
12375-
return;
12376-
12377-
kvm_mmu_do_page_fault(vcpu, work->cr2_or_gpa, 0, true);
12378-
}
12379-
1238012361
static inline u32 kvm_async_pf_hash_fn(gfn_t gfn)
1238112362
{
1238212363
BUILD_BUG_ON(!is_power_of_2(ASYNC_PF_PER_VCPU));

0 commit comments

Comments
 (0)