Skip to content

Commit a7b57e0

Browse files
Fuad Tabbabonzini
authored andcommitted
KVM: arm64: Handle guest_memfd-backed guest page faults
Add arm64 architecture support for handling guest page faults on memory slots backed by guest_memfd. This change introduces a new function, gmem_abort(), which encapsulates the fault handling logic specific to guest_memfd-backed memory. The kvm_handle_guest_abort() entry point is updated to dispatch to gmem_abort() when a fault occurs on a guest_memfd-backed memory slot (as determined by kvm_slot_has_gmem()). Until guest_memfd gains support for huge pages, the fault granule for these memory regions is restricted to PAGE_SIZE. Reviewed-by: Gavin Shan <gshan@redhat.com> Reviewed-by: James Houghton <jthoughton@google.com> Reviewed-by: Marc Zyngier <maz@kernel.org> Signed-off-by: Fuad Tabba <tabba@google.com> Signed-off-by: Sean Christopherson <seanjc@google.com> Message-ID: <20250729225455.670324-19-seanjc@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
1 parent 638ea79 commit a7b57e0

1 file changed

Lines changed: 83 additions & 3 deletions

File tree

arch/arm64/kvm/mmu.c

Lines changed: 83 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1519,6 +1519,82 @@ static void adjust_nested_fault_perms(struct kvm_s2_trans *nested,
15191519
*prot |= kvm_encode_nested_level(nested);
15201520
}
15211521

1522+
#define KVM_PGTABLE_WALK_MEMABORT_FLAGS (KVM_PGTABLE_WALK_HANDLE_FAULT | KVM_PGTABLE_WALK_SHARED)
1523+
1524+
static int gmem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
1525+
struct kvm_s2_trans *nested,
1526+
struct kvm_memory_slot *memslot, bool is_perm)
1527+
{
1528+
bool write_fault, exec_fault, writable;
1529+
enum kvm_pgtable_walk_flags flags = KVM_PGTABLE_WALK_MEMABORT_FLAGS;
1530+
enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_R;
1531+
struct kvm_pgtable *pgt = vcpu->arch.hw_mmu->pgt;
1532+
unsigned long mmu_seq;
1533+
struct page *page;
1534+
struct kvm *kvm = vcpu->kvm;
1535+
void *memcache;
1536+
kvm_pfn_t pfn;
1537+
gfn_t gfn;
1538+
int ret;
1539+
1540+
ret = prepare_mmu_memcache(vcpu, true, &memcache);
1541+
if (ret)
1542+
return ret;
1543+
1544+
if (nested)
1545+
gfn = kvm_s2_trans_output(nested) >> PAGE_SHIFT;
1546+
else
1547+
gfn = fault_ipa >> PAGE_SHIFT;
1548+
1549+
write_fault = kvm_is_write_fault(vcpu);
1550+
exec_fault = kvm_vcpu_trap_is_exec_fault(vcpu);
1551+
1552+
VM_WARN_ON_ONCE(write_fault && exec_fault);
1553+
1554+
mmu_seq = kvm->mmu_invalidate_seq;
1555+
/* Pairs with the smp_wmb() in kvm_mmu_invalidate_end(). */
1556+
smp_rmb();
1557+
1558+
ret = kvm_gmem_get_pfn(kvm, memslot, gfn, &pfn, &page, NULL);
1559+
if (ret) {
1560+
kvm_prepare_memory_fault_exit(vcpu, fault_ipa, PAGE_SIZE,
1561+
write_fault, exec_fault, false);
1562+
return ret;
1563+
}
1564+
1565+
writable = !(memslot->flags & KVM_MEM_READONLY);
1566+
1567+
if (nested)
1568+
adjust_nested_fault_perms(nested, &prot, &writable);
1569+
1570+
if (writable)
1571+
prot |= KVM_PGTABLE_PROT_W;
1572+
1573+
if (exec_fault ||
1574+
(cpus_have_final_cap(ARM64_HAS_CACHE_DIC) &&
1575+
(!nested || kvm_s2_trans_executable(nested))))
1576+
prot |= KVM_PGTABLE_PROT_X;
1577+
1578+
kvm_fault_lock(kvm);
1579+
if (mmu_invalidate_retry(kvm, mmu_seq)) {
1580+
ret = -EAGAIN;
1581+
goto out_unlock;
1582+
}
1583+
1584+
ret = KVM_PGT_FN(kvm_pgtable_stage2_map)(pgt, fault_ipa, PAGE_SIZE,
1585+
__pfn_to_phys(pfn), prot,
1586+
memcache, flags);
1587+
1588+
out_unlock:
1589+
kvm_release_faultin_page(kvm, page, !!ret, writable);
1590+
kvm_fault_unlock(kvm);
1591+
1592+
if (writable && !ret)
1593+
mark_page_dirty_in_slot(kvm, memslot, gfn);
1594+
1595+
return ret != -EAGAIN ? ret : 0;
1596+
}
1597+
15221598
static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
15231599
struct kvm_s2_trans *nested,
15241600
struct kvm_memory_slot *memslot, unsigned long hva,
@@ -1544,7 +1620,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
15441620
struct kvm_pgtable *pgt;
15451621
struct page *page;
15461622
vm_flags_t vm_flags;
1547-
enum kvm_pgtable_walk_flags flags = KVM_PGTABLE_WALK_HANDLE_FAULT | KVM_PGTABLE_WALK_SHARED;
1623+
enum kvm_pgtable_walk_flags flags = KVM_PGTABLE_WALK_MEMABORT_FLAGS;
15481624

15491625
if (fault_is_perm)
15501626
fault_granule = kvm_vcpu_trap_get_perm_fault_granule(vcpu);
@@ -1989,8 +2065,12 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
19892065
VM_WARN_ON_ONCE(kvm_vcpu_trap_is_permission_fault(vcpu) &&
19902066
!write_fault && !kvm_vcpu_trap_is_exec_fault(vcpu));
19912067

1992-
ret = user_mem_abort(vcpu, fault_ipa, nested, memslot, hva,
1993-
esr_fsc_is_permission_fault(esr));
2068+
if (kvm_slot_has_gmem(memslot))
2069+
ret = gmem_abort(vcpu, fault_ipa, nested, memslot,
2070+
esr_fsc_is_permission_fault(esr));
2071+
else
2072+
ret = user_mem_abort(vcpu, fault_ipa, nested, memslot, hva,
2073+
esr_fsc_is_permission_fault(esr));
19942074
if (ret == 0)
19952075
ret = 1;
19962076
out:

0 commit comments

Comments
 (0)