Skip to content

Commit 5f53d88

Browse files
committed
Merge tag 'kvmarm-6.8' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into HEAD
KVM/arm64 updates for Linux 6.8 - LPA2 support, adding 52bit IPA/PA capability for 4kB and 16kB base granule sizes. Branch shared with the arm64 tree. - Large Fine-Grained Trap rework, bringing some sanity to the feature, although there is more to come. This comes with a prefix branch shared with the arm64 tree. - Some additional Nested Virtualization groundwork, mostly introducing the NV2 VNCR support and retargetting the NV support to that version of the architecture. - A small set of vgic fixes and associated cleanups.
2 parents 7832880 + 040113f commit 5f53d88

41 files changed

Lines changed: 1423 additions & 466 deletions

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

arch/arm64/include/asm/cpufeature.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -819,6 +819,11 @@ static inline bool system_supports_tlb_range(void)
819819
return alternative_has_cap_unlikely(ARM64_HAS_TLB_RANGE);
820820
}
821821

822+
static inline bool system_supports_lpa2(void)
823+
{
824+
return cpus_have_final_cap(ARM64_HAS_LPA2);
825+
}
826+
822827
int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt);
823828
bool try_emulate_mrs(struct pt_regs *regs, u32 isn);
824829

arch/arm64/include/asm/esr.h

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -392,6 +392,21 @@ static inline bool esr_is_data_abort(unsigned long esr)
392392
return ec == ESR_ELx_EC_DABT_LOW || ec == ESR_ELx_EC_DABT_CUR;
393393
}
394394

395+
static inline bool esr_fsc_is_translation_fault(unsigned long esr)
396+
{
397+
return (esr & ESR_ELx_FSC_TYPE) == ESR_ELx_FSC_FAULT;
398+
}
399+
400+
static inline bool esr_fsc_is_permission_fault(unsigned long esr)
401+
{
402+
return (esr & ESR_ELx_FSC_TYPE) == ESR_ELx_FSC_PERM;
403+
}
404+
405+
static inline bool esr_fsc_is_access_flag_fault(unsigned long esr)
406+
{
407+
return (esr & ESR_ELx_FSC_TYPE) == ESR_ELx_FSC_ACCESS;
408+
}
409+
395410
const char *esr_get_class_string(unsigned long esr);
396411
#endif /* __ASSEMBLY */
397412

arch/arm64/include/asm/kvm_arm.h

Lines changed: 38 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -108,6 +108,7 @@
108108
#define HCRX_HOST_FLAGS (HCRX_EL2_MSCEn | HCRX_EL2_TCR2En)
109109

110110
/* TCR_EL2 Registers bits */
111+
#define TCR_EL2_DS (1UL << 32)
111112
#define TCR_EL2_RES1 ((1U << 31) | (1 << 23))
112113
#define TCR_EL2_TBI (1 << 20)
113114
#define TCR_EL2_PS_SHIFT 16
@@ -122,6 +123,7 @@
122123
TCR_EL2_ORGN0_MASK | TCR_EL2_IRGN0_MASK | TCR_EL2_T0SZ_MASK)
123124

124125
/* VTCR_EL2 Registers bits */
126+
#define VTCR_EL2_DS TCR_EL2_DS
125127
#define VTCR_EL2_RES1 (1U << 31)
126128
#define VTCR_EL2_HD (1 << 22)
127129
#define VTCR_EL2_HA (1 << 21)
@@ -344,36 +346,47 @@
344346
* Once we get to a point where the two describe the same thing, we'll
345347
* merge the definitions. One day.
346348
*/
347-
#define __HFGRTR_EL2_RES0 (GENMASK(63, 56) | GENMASK(53, 51))
349+
#define __HFGRTR_EL2_RES0 HFGxTR_EL2_RES0
348350
#define __HFGRTR_EL2_MASK GENMASK(49, 0)
349-
#define __HFGRTR_EL2_nMASK (GENMASK(58, 57) | GENMASK(55, 54) | BIT(50))
351+
#define __HFGRTR_EL2_nMASK ~(__HFGRTR_EL2_RES0 | __HFGRTR_EL2_MASK)
350352

351-
#define __HFGWTR_EL2_RES0 (GENMASK(63, 56) | GENMASK(53, 51) | \
352-
BIT(46) | BIT(42) | BIT(40) | BIT(28) | \
353-
GENMASK(26, 25) | BIT(21) | BIT(18) | \
353+
/*
354+
* The HFGWTR bits are a subset of HFGRTR bits. To ensure we don't miss any
355+
* future additions, define __HFGWTR* macros relative to __HFGRTR* ones.
356+
*/
357+
#define __HFGRTR_ONLY_MASK (BIT(46) | BIT(42) | BIT(40) | BIT(28) | \
358+
GENMASK(26, 25) | BIT(21) | BIT(18) | \
354359
GENMASK(15, 14) | GENMASK(10, 9) | BIT(2))
355-
#define __HFGWTR_EL2_MASK GENMASK(49, 0)
356-
#define __HFGWTR_EL2_nMASK (GENMASK(58, 57) | GENMASK(55, 54) | BIT(50))
357-
358-
#define __HFGITR_EL2_RES0 GENMASK(63, 57)
359-
#define __HFGITR_EL2_MASK GENMASK(54, 0)
360-
#define __HFGITR_EL2_nMASK GENMASK(56, 55)
361-
362-
#define __HDFGRTR_EL2_RES0 (BIT(49) | BIT(42) | GENMASK(39, 38) | \
363-
GENMASK(21, 20) | BIT(8))
364-
#define __HDFGRTR_EL2_MASK ~__HDFGRTR_EL2_nMASK
365-
#define __HDFGRTR_EL2_nMASK GENMASK(62, 59)
366-
367-
#define __HDFGWTR_EL2_RES0 (BIT(63) | GENMASK(59, 58) | BIT(51) | BIT(47) | \
368-
BIT(43) | GENMASK(40, 38) | BIT(34) | BIT(30) | \
369-
BIT(22) | BIT(9) | BIT(6))
370-
#define __HDFGWTR_EL2_MASK ~__HDFGWTR_EL2_nMASK
371-
#define __HDFGWTR_EL2_nMASK GENMASK(62, 60)
360+
#define __HFGWTR_EL2_RES0 (__HFGRTR_EL2_RES0 | __HFGRTR_ONLY_MASK)
361+
#define __HFGWTR_EL2_MASK (__HFGRTR_EL2_MASK & ~__HFGRTR_ONLY_MASK)
362+
#define __HFGWTR_EL2_nMASK ~(__HFGWTR_EL2_RES0 | __HFGWTR_EL2_MASK)
363+
364+
#define __HFGITR_EL2_RES0 HFGITR_EL2_RES0
365+
#define __HFGITR_EL2_MASK (BIT(62) | BIT(60) | GENMASK(54, 0))
366+
#define __HFGITR_EL2_nMASK ~(__HFGITR_EL2_RES0 | __HFGITR_EL2_MASK)
367+
368+
#define __HDFGRTR_EL2_RES0 HDFGRTR_EL2_RES0
369+
#define __HDFGRTR_EL2_MASK (BIT(63) | GENMASK(58, 50) | GENMASK(48, 43) | \
370+
GENMASK(41, 40) | GENMASK(37, 22) | \
371+
GENMASK(19, 9) | GENMASK(7, 0))
372+
#define __HDFGRTR_EL2_nMASK ~(__HDFGRTR_EL2_RES0 | __HDFGRTR_EL2_MASK)
373+
374+
#define __HDFGWTR_EL2_RES0 HDFGWTR_EL2_RES0
375+
#define __HDFGWTR_EL2_MASK (GENMASK(57, 52) | GENMASK(50, 48) | \
376+
GENMASK(46, 44) | GENMASK(42, 41) | \
377+
GENMASK(37, 35) | GENMASK(33, 31) | \
378+
GENMASK(29, 23) | GENMASK(21, 10) | \
379+
GENMASK(8, 7) | GENMASK(5, 0))
380+
#define __HDFGWTR_EL2_nMASK ~(__HDFGWTR_EL2_RES0 | __HDFGWTR_EL2_MASK)
381+
382+
#define __HAFGRTR_EL2_RES0 HAFGRTR_EL2_RES0
383+
#define __HAFGRTR_EL2_MASK (GENMASK(49, 17) | GENMASK(4, 0))
384+
#define __HAFGRTR_EL2_nMASK ~(__HAFGRTR_EL2_RES0 | __HAFGRTR_EL2_MASK)
372385

373386
/* Similar definitions for HCRX_EL2 */
374-
#define __HCRX_EL2_RES0 (GENMASK(63, 16) | GENMASK(13, 12))
375-
#define __HCRX_EL2_MASK (0)
376-
#define __HCRX_EL2_nMASK (GENMASK(15, 14) | GENMASK(4, 0))
387+
#define __HCRX_EL2_RES0 HCRX_EL2_RES0
388+
#define __HCRX_EL2_MASK (BIT(6))
389+
#define __HCRX_EL2_nMASK ~(__HCRX_EL2_RES0 | __HCRX_EL2_MASK)
377390

378391
/* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */
379392
#define HPFAR_MASK (~UL(0xf))

arch/arm64/include/asm/kvm_emulate.h

Lines changed: 18 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@
1717
#include <asm/esr.h>
1818
#include <asm/kvm_arm.h>
1919
#include <asm/kvm_hyp.h>
20+
#include <asm/kvm_nested.h>
2021
#include <asm/ptrace.h>
2122
#include <asm/cputype.h>
2223
#include <asm/virt.h>
@@ -54,11 +55,6 @@ void kvm_emulate_nested_eret(struct kvm_vcpu *vcpu);
5455
int kvm_inject_nested_sync(struct kvm_vcpu *vcpu, u64 esr_el2);
5556
int kvm_inject_nested_irq(struct kvm_vcpu *vcpu);
5657

57-
static inline bool vcpu_has_feature(const struct kvm_vcpu *vcpu, int feature)
58-
{
59-
return test_bit(feature, vcpu->kvm->arch.vcpu_features);
60-
}
61-
6258
#if defined(__KVM_VHE_HYPERVISOR__) || defined(__KVM_NVHE_HYPERVISOR__)
6359
static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
6460
{
@@ -248,7 +244,7 @@ static inline bool __is_hyp_ctxt(const struct kvm_cpu_context *ctxt)
248244

249245
static inline bool is_hyp_ctxt(const struct kvm_vcpu *vcpu)
250246
{
251-
return __is_hyp_ctxt(&vcpu->arch.ctxt);
247+
return vcpu_has_nv(vcpu) && __is_hyp_ctxt(&vcpu->arch.ctxt);
252248
}
253249

254250
/*
@@ -404,14 +400,25 @@ static __always_inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
404400
return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC;
405401
}
406402

407-
static __always_inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu)
403+
static inline
404+
bool kvm_vcpu_trap_is_permission_fault(const struct kvm_vcpu *vcpu)
408405
{
409-
return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC_TYPE;
406+
return esr_fsc_is_permission_fault(kvm_vcpu_get_esr(vcpu));
410407
}
411408

412-
static __always_inline u8 kvm_vcpu_trap_get_fault_level(const struct kvm_vcpu *vcpu)
409+
static inline
410+
bool kvm_vcpu_trap_is_translation_fault(const struct kvm_vcpu *vcpu)
413411
{
414-
return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC_LEVEL;
412+
return esr_fsc_is_translation_fault(kvm_vcpu_get_esr(vcpu));
413+
}
414+
415+
static inline
416+
u64 kvm_vcpu_trap_get_perm_fault_granule(const struct kvm_vcpu *vcpu)
417+
{
418+
unsigned long esr = kvm_vcpu_get_esr(vcpu);
419+
420+
BUG_ON(!esr_fsc_is_permission_fault(esr));
421+
return BIT(ARM64_HW_PGTABLE_LEVEL_SHIFT(esr & ESR_ELx_FSC_LEVEL));
415422
}
416423

417424
static __always_inline bool kvm_vcpu_abt_issea(const struct kvm_vcpu *vcpu)
@@ -454,12 +461,7 @@ static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
454461
* first), then a permission fault to allow the flags
455462
* to be set.
456463
*/
457-
switch (kvm_vcpu_trap_get_fault_type(vcpu)) {
458-
case ESR_ELx_FSC_PERM:
459-
return true;
460-
default:
461-
return false;
462-
}
464+
return kvm_vcpu_trap_is_permission_fault(vcpu);
463465
}
464466

465467
if (kvm_vcpu_trap_is_iabt(vcpu))

0 commit comments

Comments
 (0)