Skip to content

Commit 2937aee

Browse files
Yicong Yangwilldeacon
authored andcommitted
KVM: arm64: Handle DABT caused by LS64* instructions on unsupported memory
If FEAT_LS64WB not supported, FEAT_LS64* instructions only support to access Device/Uncacheable memory, otherwise a data abort for unsupported Exclusive or atomic access (0x35, UAoEF) is generated per spec. It's implementation defined whether the target exception level is routed and is possible to implemented as route to EL2 on a VHE VM according to DDI0487L.b Section C3.2.6 Single-copy atomic 64-byte load/store. If it's implemented as generate the DABT to the final enabled stage (stage-2), inject the UAoEF back to the guest after checking the memslot is valid. Acked-by: Arnd Bergmann <arnd@arndb.de> Acked-by: Oliver Upton <oupton@kernel.org> Signed-off-by: Yicong Yang <yangyicong@hisilicon.com> Signed-off-by: Zhou Wang <wangzhou1@hisilicon.com> Signed-off-by: Will Deacon <will@kernel.org>
1 parent 902eeba commit 2937aee

4 files changed

Lines changed: 56 additions & 1 deletion

File tree

arch/arm64/include/asm/esr.h

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -124,6 +124,7 @@
124124
#define ESR_ELx_FSC_SEA_TTW(n) (0x14 + (n))
125125
#define ESR_ELx_FSC_SECC (0x18)
126126
#define ESR_ELx_FSC_SECC_TTW(n) (0x1c + (n))
127+
#define ESR_ELx_FSC_EXCL_ATOMIC (0x35)
127128
#define ESR_ELx_FSC_ADDRSZ (0x00)
128129

129130
/*
@@ -488,6 +489,13 @@ static inline bool esr_fsc_is_access_flag_fault(unsigned long esr)
488489
(esr == ESR_ELx_FSC_ACCESS_L(0));
489490
}
490491

492+
static inline bool esr_fsc_is_excl_atomic_fault(unsigned long esr)
493+
{
494+
esr = esr & ESR_ELx_FSC;
495+
496+
return esr == ESR_ELx_FSC_EXCL_ATOMIC;
497+
}
498+
491499
static inline bool esr_fsc_is_addr_sz_fault(unsigned long esr)
492500
{
493501
esr &= ESR_ELx_FSC;

arch/arm64/include/asm/kvm_emulate.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -47,6 +47,7 @@ void kvm_skip_instr32(struct kvm_vcpu *vcpu);
4747
void kvm_inject_undefined(struct kvm_vcpu *vcpu);
4848
int kvm_inject_serror_esr(struct kvm_vcpu *vcpu, u64 esr);
4949
int kvm_inject_sea(struct kvm_vcpu *vcpu, bool iabt, u64 addr);
50+
int kvm_inject_dabt_excl_atomic(struct kvm_vcpu *vcpu, u64 addr);
5051
void kvm_inject_size_fault(struct kvm_vcpu *vcpu);
5152

5253
static inline int kvm_inject_sea_dabt(struct kvm_vcpu *vcpu, u64 addr)

arch/arm64/kvm/inject_fault.c

Lines changed: 34 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -253,6 +253,40 @@ int kvm_inject_sea(struct kvm_vcpu *vcpu, bool iabt, u64 addr)
253253
return 1;
254254
}
255255

256+
static int kvm_inject_nested_excl_atomic(struct kvm_vcpu *vcpu, u64 addr)
257+
{
258+
u64 esr = FIELD_PREP(ESR_ELx_EC_MASK, ESR_ELx_EC_DABT_LOW) |
259+
FIELD_PREP(ESR_ELx_FSC, ESR_ELx_FSC_EXCL_ATOMIC) |
260+
ESR_ELx_IL;
261+
262+
vcpu_write_sys_reg(vcpu, addr, FAR_EL2);
263+
return kvm_inject_nested_sync(vcpu, esr);
264+
}
265+
266+
/**
267+
* kvm_inject_dabt_excl_atomic - inject a data abort for unsupported exclusive
268+
* or atomic access
269+
* @vcpu: The VCPU to receive the data abort
270+
* @addr: The address to report in the DFAR
271+
*
272+
* It is assumed that this code is called from the VCPU thread and that the
273+
* VCPU therefore is not currently executing guest code.
274+
*/
275+
int kvm_inject_dabt_excl_atomic(struct kvm_vcpu *vcpu, u64 addr)
276+
{
277+
u64 esr;
278+
279+
if (is_nested_ctxt(vcpu) && (vcpu_read_sys_reg(vcpu, HCR_EL2) & HCR_VM))
280+
return kvm_inject_nested_excl_atomic(vcpu, addr);
281+
282+
__kvm_inject_sea(vcpu, false, addr);
283+
esr = vcpu_read_sys_reg(vcpu, exception_esr_elx(vcpu));
284+
esr &= ~ESR_ELx_FSC;
285+
esr |= ESR_ELx_FSC_EXCL_ATOMIC;
286+
vcpu_write_sys_reg(vcpu, esr, exception_esr_elx(vcpu));
287+
return 1;
288+
}
289+
256290
void kvm_inject_size_fault(struct kvm_vcpu *vcpu)
257291
{
258292
unsigned long addr, esr;

arch/arm64/kvm/mmu.c

Lines changed: 13 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1845,6 +1845,17 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
18451845
return ret;
18461846
}
18471847

1848+
/*
1849+
* Guest performs atomic/exclusive operations on memory with unsupported
1850+
* attributes (e.g. ld64b/st64b on normal memory when no FEAT_LS64WB)
1851+
* and trigger the exception here. Since the memslot is valid, inject
1852+
* the fault back to the guest.
1853+
*/
1854+
if (esr_fsc_is_excl_atomic_fault(kvm_vcpu_get_esr(vcpu))) {
1855+
kvm_inject_dabt_excl_atomic(vcpu, kvm_vcpu_get_hfar(vcpu));
1856+
return 1;
1857+
}
1858+
18481859
if (nested)
18491860
adjust_nested_fault_perms(nested, &prot, &writable);
18501861

@@ -2082,7 +2093,8 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
20822093
/* Check the stage-2 fault is trans. fault or write fault */
20832094
if (!esr_fsc_is_translation_fault(esr) &&
20842095
!esr_fsc_is_permission_fault(esr) &&
2085-
!esr_fsc_is_access_flag_fault(esr)) {
2096+
!esr_fsc_is_access_flag_fault(esr) &&
2097+
!esr_fsc_is_excl_atomic_fault(esr)) {
20862098
kvm_err("Unsupported FSC: EC=%#x xFSC=%#lx ESR_EL2=%#lx\n",
20872099
kvm_vcpu_trap_get_class(vcpu),
20882100
(unsigned long)kvm_vcpu_trap_get_fault(vcpu),

0 commit comments

Comments
 (0)