Skip to content

Commit 11e5ea5

Browse files
ardbiesheuvelMarc Zyngier
authored andcommitted
KVM: arm64: Use helpers to classify exception types reported via ESR
Currently, we rely on the fact that exceptions can be trivially classified by applying a mask/value pair to the syndrome value reported via the ESR register, but this will no longer be true once we enable support for 5 level paging. So introduce a couple of helpers that encapsulate this mask/value pair matching, and wire them up in the code. No functional change intended, the actual handling of translation level -1 will be added in a subsequent patch. Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Will Deacon <will@kernel.org> Cc: Marc Zyngier <maz@kernel.org> Cc: Oliver Upton <oliver.upton@linux.dev> Cc: Ryan Roberts <ryan.roberts@arm.com> Signed-off-by: Ard Biesheuvel <ardb@kernel.org> Acked-by: Mark Rutland <mark.rutland@arm.com> [maz: folded in changes suggested by Mark] Signed-off-by: Marc Zyngier <maz@kernel.org> Link: https://lore.kernel.org/r/20231128140400.3132145-2-ardb@google.com
1 parent 10a0cc3 commit 11e5ea5

5 files changed

Lines changed: 50 additions & 40 deletions

File tree

arch/arm64/include/asm/esr.h

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -392,6 +392,21 @@ static inline bool esr_is_data_abort(unsigned long esr)
392392
return ec == ESR_ELx_EC_DABT_LOW || ec == ESR_ELx_EC_DABT_CUR;
393393
}
394394

395+
static inline bool esr_fsc_is_translation_fault(unsigned long esr)
396+
{
397+
return (esr & ESR_ELx_FSC_TYPE) == ESR_ELx_FSC_FAULT;
398+
}
399+
400+
static inline bool esr_fsc_is_permission_fault(unsigned long esr)
401+
{
402+
return (esr & ESR_ELx_FSC_TYPE) == ESR_ELx_FSC_PERM;
403+
}
404+
405+
static inline bool esr_fsc_is_access_flag_fault(unsigned long esr)
406+
{
407+
return (esr & ESR_ELx_FSC_TYPE) == ESR_ELx_FSC_ACCESS;
408+
}
409+
395410
const char *esr_get_class_string(unsigned long esr);
396411
#endif /* __ASSEMBLY */
397412

arch/arm64/include/asm/kvm_emulate.h

Lines changed: 16 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -404,24 +404,25 @@ static __always_inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
404404
return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC;
405405
}
406406

407-
static __always_inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu)
407+
static inline
408+
bool kvm_vcpu_trap_is_permission_fault(const struct kvm_vcpu *vcpu)
408409
{
409-
return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC_TYPE;
410+
return esr_fsc_is_permission_fault(kvm_vcpu_get_esr(vcpu));
410411
}
411412

412-
static __always_inline s8 kvm_vcpu_trap_get_fault_level(const struct kvm_vcpu *vcpu)
413+
static inline
414+
bool kvm_vcpu_trap_is_translation_fault(const struct kvm_vcpu *vcpu)
413415
{
414-
/*
415-
* Note: With the introduction of FEAT_LPA2 an extra level of
416-
* translation (level -1) is added. This level (obviously) doesn't
417-
* follow the previous convention of encoding the 4 levels in the 2 LSBs
418-
* of the FSC so this function breaks if the fault is for level -1.
419-
*
420-
* However, stage2 tables always use concatenated tables for first level
421-
* lookup and therefore it is guaranteed that the level will be between
422-
* 0 and 3, and this function continues to work.
423-
*/
424-
return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC_LEVEL;
416+
return esr_fsc_is_translation_fault(kvm_vcpu_get_esr(vcpu));
417+
}
418+
419+
static inline
420+
u64 kvm_vcpu_trap_get_perm_fault_granule(const struct kvm_vcpu *vcpu)
421+
{
422+
unsigned long esr = kvm_vcpu_get_esr(vcpu);
423+
424+
BUG_ON(!esr_fsc_is_permission_fault(esr));
425+
return BIT(ARM64_HW_PGTABLE_LEVEL_SHIFT(esr & ESR_ELx_FSC_LEVEL));
425426
}
426427

427428
static __always_inline bool kvm_vcpu_abt_issea(const struct kvm_vcpu *vcpu)
@@ -464,12 +465,7 @@ static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
464465
* first), then a permission fault to allow the flags
465466
* to be set.
466467
*/
467-
switch (kvm_vcpu_trap_get_fault_type(vcpu)) {
468-
case ESR_ELx_FSC_PERM:
469-
return true;
470-
default:
471-
return false;
472-
}
468+
return kvm_vcpu_trap_is_permission_fault(vcpu);
473469
}
474470

475471
if (kvm_vcpu_trap_is_iabt(vcpu))

arch/arm64/kvm/hyp/include/hyp/fault.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,7 @@ static inline bool __get_fault_info(u64 esr, struct kvm_vcpu_fault_info *fault)
6060
*/
6161
if (!(esr & ESR_ELx_S1PTW) &&
6262
(cpus_have_final_cap(ARM64_WORKAROUND_834220) ||
63-
(esr & ESR_ELx_FSC_TYPE) == ESR_ELx_FSC_PERM)) {
63+
esr_fsc_is_permission_fault(esr))) {
6464
if (!__translate_far_to_hpfar(far, &hpfar))
6565
return false;
6666
} else {

arch/arm64/kvm/hyp/include/hyp/switch.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -591,7 +591,7 @@ static bool kvm_hyp_handle_dabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
591591
if (static_branch_unlikely(&vgic_v2_cpuif_trap)) {
592592
bool valid;
593593

594-
valid = kvm_vcpu_trap_get_fault_type(vcpu) == ESR_ELx_FSC_FAULT &&
594+
valid = kvm_vcpu_trap_is_translation_fault(vcpu) &&
595595
kvm_vcpu_dabt_isvalid(vcpu) &&
596596
!kvm_vcpu_abt_issea(vcpu) &&
597597
!kvm_vcpu_abt_iss1tw(vcpu);

arch/arm64/kvm/mmu.c

Lines changed: 17 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -1376,7 +1376,7 @@ static bool kvm_vma_mte_allowed(struct vm_area_struct *vma)
13761376

13771377
static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
13781378
struct kvm_memory_slot *memslot, unsigned long hva,
1379-
unsigned long fault_status)
1379+
bool fault_is_perm)
13801380
{
13811381
int ret = 0;
13821382
bool write_fault, writable, force_pte = false;
@@ -1390,17 +1390,17 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
13901390
gfn_t gfn;
13911391
kvm_pfn_t pfn;
13921392
bool logging_active = memslot_is_logging(memslot);
1393-
s8 fault_level = kvm_vcpu_trap_get_fault_level(vcpu);
13941393
long vma_pagesize, fault_granule;
13951394
enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_R;
13961395
struct kvm_pgtable *pgt;
13971396

1398-
fault_granule = 1UL << ARM64_HW_PGTABLE_LEVEL_SHIFT(fault_level);
1397+
if (fault_is_perm)
1398+
fault_granule = kvm_vcpu_trap_get_perm_fault_granule(vcpu);
13991399
write_fault = kvm_is_write_fault(vcpu);
14001400
exec_fault = kvm_vcpu_trap_is_exec_fault(vcpu);
14011401
VM_BUG_ON(write_fault && exec_fault);
14021402

1403-
if (fault_status == ESR_ELx_FSC_PERM && !write_fault && !exec_fault) {
1403+
if (fault_is_perm && !write_fault && !exec_fault) {
14041404
kvm_err("Unexpected L2 read permission error\n");
14051405
return -EFAULT;
14061406
}
@@ -1411,8 +1411,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
14111411
* only exception to this is when dirty logging is enabled at runtime
14121412
* and a write fault needs to collapse a block entry into a table.
14131413
*/
1414-
if (fault_status != ESR_ELx_FSC_PERM ||
1415-
(logging_active && write_fault)) {
1414+
if (!fault_is_perm || (logging_active && write_fault)) {
14161415
ret = kvm_mmu_topup_memory_cache(memcache,
14171416
kvm_mmu_cache_min_pages(vcpu->arch.hw_mmu));
14181417
if (ret)
@@ -1529,8 +1528,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
15291528
* backed by a THP and thus use block mapping if possible.
15301529
*/
15311530
if (vma_pagesize == PAGE_SIZE && !(force_pte || device)) {
1532-
if (fault_status == ESR_ELx_FSC_PERM &&
1533-
fault_granule > PAGE_SIZE)
1531+
if (fault_is_perm && fault_granule > PAGE_SIZE)
15341532
vma_pagesize = fault_granule;
15351533
else
15361534
vma_pagesize = transparent_hugepage_adjust(kvm, memslot,
@@ -1543,7 +1541,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
15431541
}
15441542
}
15451543

1546-
if (fault_status != ESR_ELx_FSC_PERM && !device && kvm_has_mte(kvm)) {
1544+
if (!fault_is_perm && !device && kvm_has_mte(kvm)) {
15471545
/* Check the VMM hasn't introduced a new disallowed VMA */
15481546
if (mte_allowed) {
15491547
sanitise_mte_tags(kvm, pfn, vma_pagesize);
@@ -1569,7 +1567,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
15691567
* permissions only if vma_pagesize equals fault_granule. Otherwise,
15701568
* kvm_pgtable_stage2_map() should be called to change block size.
15711569
*/
1572-
if (fault_status == ESR_ELx_FSC_PERM && vma_pagesize == fault_granule)
1570+
if (fault_is_perm && vma_pagesize == fault_granule)
15731571
ret = kvm_pgtable_stage2_relax_perms(pgt, fault_ipa, prot);
15741572
else
15751573
ret = kvm_pgtable_stage2_map(pgt, fault_ipa, vma_pagesize,
@@ -1620,20 +1618,20 @@ static void handle_access_fault(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
16201618
*/
16211619
int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
16221620
{
1623-
unsigned long fault_status;
1621+
unsigned long esr;
16241622
phys_addr_t fault_ipa;
16251623
struct kvm_memory_slot *memslot;
16261624
unsigned long hva;
16271625
bool is_iabt, write_fault, writable;
16281626
gfn_t gfn;
16291627
int ret, idx;
16301628

1631-
fault_status = kvm_vcpu_trap_get_fault_type(vcpu);
1629+
esr = kvm_vcpu_get_esr(vcpu);
16321630

16331631
fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
16341632
is_iabt = kvm_vcpu_trap_is_iabt(vcpu);
16351633

1636-
if (fault_status == ESR_ELx_FSC_FAULT) {
1634+
if (esr_fsc_is_permission_fault(esr)) {
16371635
/* Beyond sanitised PARange (which is the IPA limit) */
16381636
if (fault_ipa >= BIT_ULL(get_kvm_ipa_limit())) {
16391637
kvm_inject_size_fault(vcpu);
@@ -1668,9 +1666,9 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
16681666
kvm_vcpu_get_hfar(vcpu), fault_ipa);
16691667

16701668
/* Check the stage-2 fault is trans. fault or write fault */
1671-
if (fault_status != ESR_ELx_FSC_FAULT &&
1672-
fault_status != ESR_ELx_FSC_PERM &&
1673-
fault_status != ESR_ELx_FSC_ACCESS) {
1669+
if (!esr_fsc_is_translation_fault(esr) &&
1670+
!esr_fsc_is_permission_fault(esr) &&
1671+
!esr_fsc_is_access_flag_fault(esr)) {
16741672
kvm_err("Unsupported FSC: EC=%#x xFSC=%#lx ESR_EL2=%#lx\n",
16751673
kvm_vcpu_trap_get_class(vcpu),
16761674
(unsigned long)kvm_vcpu_trap_get_fault(vcpu),
@@ -1732,13 +1730,14 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
17321730
/* Userspace should not be able to register out-of-bounds IPAs */
17331731
VM_BUG_ON(fault_ipa >= kvm_phys_size(vcpu->arch.hw_mmu));
17341732

1735-
if (fault_status == ESR_ELx_FSC_ACCESS) {
1733+
if (esr_fsc_is_access_flag_fault(esr)) {
17361734
handle_access_fault(vcpu, fault_ipa);
17371735
ret = 1;
17381736
goto out_unlock;
17391737
}
17401738

1741-
ret = user_mem_abort(vcpu, fault_ipa, memslot, hva, fault_status);
1739+
ret = user_mem_abort(vcpu, fault_ipa, memslot, hva,
1740+
esr_fsc_is_permission_fault(esr));
17421741
if (ret == 0)
17431742
ret = 1;
17441743
out:

0 commit comments

Comments
 (0)