Skip to content

Commit 9f03db6

Browse files
author
Marc Zyngier
committed
Merge branch kvm-arm64/mmu/mte into kvmarm-master/next
KVM/arm64 support for MTE, courtesy of Steven Price. It allows the guest to use memory tagging, and offers a new userspace API to save/restore the tags. * kvm-arm64/mmu/mte: KVM: arm64: Document MTE capability and ioctl KVM: arm64: Add ioctl to fetch/store tags in a guest KVM: arm64: Expose KVM_ARM_CAP_MTE KVM: arm64: Save/restore MTE registers KVM: arm64: Introduce MTE VM feature arm64: mte: Sync tags for pages where PTE is untagged Signed-off-by: Marc Zyngier <maz@kernel.org>
2 parents 2fea6cf + 04c02c2 commit 9f03db6

21 files changed

Lines changed: 423 additions & 17 deletions

File tree

Documentation/virt/kvm/api.rst

Lines changed: 61 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5034,6 +5034,43 @@ see KVM_XEN_VCPU_SET_ATTR above.
50345034
The KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADJUST type may not be used
50355035
with the KVM_XEN_VCPU_GET_ATTR ioctl.
50365036

5037+
4.130 KVM_ARM_MTE_COPY_TAGS
5038+
---------------------------
5039+
5040+
:Capability: KVM_CAP_ARM_MTE
5041+
:Architectures: arm64
5042+
:Type: vm ioctl
5043+
:Parameters: struct kvm_arm_copy_mte_tags
5044+
:Returns: number of bytes copied, < 0 on error (-EINVAL for incorrect
5045+
arguments, -EFAULT if memory cannot be accessed).
5046+
5047+
::
5048+
5049+
struct kvm_arm_copy_mte_tags {
5050+
__u64 guest_ipa;
5051+
__u64 length;
5052+
void __user *addr;
5053+
__u64 flags;
5054+
__u64 reserved[2];
5055+
};
5056+
5057+
Copies Memory Tagging Extension (MTE) tags to/from guest tag memory. The
5058+
``guest_ipa`` and ``length`` fields must be ``PAGE_SIZE`` aligned. The ``addr``
5059+
field must point to a buffer which the tags will be copied to or from.
5060+
5061+
``flags`` specifies the direction of copy, either ``KVM_ARM_TAGS_TO_GUEST`` or
5062+
``KVM_ARM_TAGS_FROM_GUEST``.
5063+
5064+
The size of the buffer to store the tags is ``(length / 16)`` bytes
5065+
(granules in MTE are 16 bytes long). Each byte contains a single tag
5066+
value. This matches the format of ``PTRACE_PEEKMTETAGS`` and
5067+
``PTRACE_POKEMTETAGS``.
5068+
5069+
If an error occurs before any data is copied then a negative error code is
5070+
returned. If some tags have been copied before an error occurs then the number
5071+
of bytes successfully copied is returned. If the call completes successfully
5072+
then ``length`` is returned.
5073+
50375074
5. The kvm_run structure
50385075
========================
50395076

@@ -6362,6 +6399,30 @@ default.
63626399

63636400
See Documentation/x86/sgx/2.Kernel-internals.rst for more details.
63646401

6402+
7.26 KVM_CAP_ARM_MTE
6403+
--------------------
6404+
6405+
:Architectures: arm64
6406+
:Parameters: none
6407+
6408+
This capability indicates that KVM (and the hardware) supports exposing the
6409+
Memory Tagging Extensions (MTE) to the guest. It must also be enabled by the
6410+
VMM before creating any VCPUs to allow the guest access. Note that MTE is only
6411+
available to a guest running in AArch64 mode and enabling this capability will
6412+
cause attempts to create AArch32 VCPUs to fail.
6413+
6414+
When enabled the guest is able to access tags associated with any memory given
6415+
to the guest. KVM will ensure that the tags are maintained during swap or
6416+
hibernation of the host; however the VMM needs to manually save/restore the
6417+
tags as appropriate if the VM is migrated.
6418+
6419+
When this capability is enabled all memory in memslots must be mapped as
6420+
not-shareable (no MAP_SHARED), attempts to create a memslot with a
6421+
MAP_SHARED mmap will result in an -EINVAL return.
6422+
6423+
When enabled the VMM may make use of the ``KVM_ARM_MTE_COPY_TAGS`` ioctl to
6424+
perform a bulk copy of tags to/from the guest.
6425+
63656426
8. Other capabilities.
63666427
======================
63676428

arch/arm64/include/asm/kvm_arm.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,8 @@
1212
#include <asm/types.h>
1313

1414
/* Hyp Configuration Register (HCR) bits */
15-
#define HCR_ATA (UL(1) << 56)
15+
#define HCR_ATA_SHIFT 56
16+
#define HCR_ATA (UL(1) << HCR_ATA_SHIFT)
1617
#define HCR_FWB (UL(1) << 46)
1718
#define HCR_API (UL(1) << 41)
1819
#define HCR_APK (UL(1) << 40)

arch/arm64/include/asm/kvm_emulate.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -84,6 +84,9 @@ static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
8484
if (cpus_have_const_cap(ARM64_MISMATCHED_CACHE_TYPE) ||
8585
vcpu_el1_is_32bit(vcpu))
8686
vcpu->arch.hcr_el2 |= HCR_TID2;
87+
88+
if (kvm_has_mte(vcpu->kvm))
89+
vcpu->arch.hcr_el2 |= HCR_ATA;
8790
}
8891

8992
static inline unsigned long *vcpu_hcr(struct kvm_vcpu *vcpu)

arch/arm64/include/asm/kvm_host.h

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -133,6 +133,9 @@ struct kvm_arch {
133133

134134
u8 pfr0_csv2;
135135
u8 pfr0_csv3;
136+
137+
/* Memory Tagging Extension enabled for the guest */
138+
bool mte_enabled;
136139
};
137140

138141
struct kvm_vcpu_fault_info {
@@ -207,6 +210,12 @@ enum vcpu_sysreg {
207210
CNTP_CVAL_EL0,
208211
CNTP_CTL_EL0,
209212

213+
/* Memory Tagging Extension registers */
214+
RGSR_EL1, /* Random Allocation Tag Seed Register */
215+
GCR_EL1, /* Tag Control Register */
216+
TFSR_EL1, /* Tag Fault Status Register (EL1) */
217+
TFSRE0_EL1, /* Tag Fault Status Register (EL0) */
218+
210219
/* 32bit specific registers. Keep them at the end of the range */
211220
DACR32_EL2, /* Domain Access Control Register */
212221
IFSR32_EL2, /* Instruction Fault Status Register */
@@ -722,6 +731,9 @@ int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
722731
int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
723732
struct kvm_device_attr *attr);
724733

734+
long kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm,
735+
struct kvm_arm_copy_mte_tags *copy_tags);
736+
725737
/* Guest/host FPSIMD coordination helpers */
726738
int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu);
727739
void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu);
@@ -770,6 +782,7 @@ bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu);
770782
#define kvm_arm_vcpu_sve_finalized(vcpu) \
771783
((vcpu)->arch.flags & KVM_ARM64_VCPU_SVE_FINALIZED)
772784

785+
#define kvm_has_mte(kvm) (system_supports_mte() && (kvm)->arch.mte_enabled)
773786
#define kvm_vcpu_has_pmu(vcpu) \
774787
(test_bit(KVM_ARM_VCPU_PMU_V3, (vcpu)->arch.features))
775788

arch/arm64/include/asm/kvm_mte.h

Lines changed: 66 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,66 @@
1+
/* SPDX-License-Identifier: GPL-2.0 */
2+
/*
3+
* Copyright (C) 2020-2021 ARM Ltd.
4+
*/
5+
#ifndef __ASM_KVM_MTE_H
6+
#define __ASM_KVM_MTE_H
7+
8+
#ifdef __ASSEMBLY__
9+
10+
#include <asm/sysreg.h>
11+
12+
#ifdef CONFIG_ARM64_MTE
13+
14+
.macro mte_switch_to_guest g_ctxt, h_ctxt, reg1
15+
alternative_if_not ARM64_MTE
16+
b .L__skip_switch\@
17+
alternative_else_nop_endif
18+
mrs \reg1, hcr_el2
19+
tbz \reg1, #(HCR_ATA_SHIFT), .L__skip_switch\@
20+
21+
mrs_s \reg1, SYS_RGSR_EL1
22+
str \reg1, [\h_ctxt, #CPU_RGSR_EL1]
23+
mrs_s \reg1, SYS_GCR_EL1
24+
str \reg1, [\h_ctxt, #CPU_GCR_EL1]
25+
26+
ldr \reg1, [\g_ctxt, #CPU_RGSR_EL1]
27+
msr_s SYS_RGSR_EL1, \reg1
28+
ldr \reg1, [\g_ctxt, #CPU_GCR_EL1]
29+
msr_s SYS_GCR_EL1, \reg1
30+
31+
.L__skip_switch\@:
32+
.endm
33+
34+
.macro mte_switch_to_hyp g_ctxt, h_ctxt, reg1
35+
alternative_if_not ARM64_MTE
36+
b .L__skip_switch\@
37+
alternative_else_nop_endif
38+
mrs \reg1, hcr_el2
39+
tbz \reg1, #(HCR_ATA_SHIFT), .L__skip_switch\@
40+
41+
mrs_s \reg1, SYS_RGSR_EL1
42+
str \reg1, [\g_ctxt, #CPU_RGSR_EL1]
43+
mrs_s \reg1, SYS_GCR_EL1
44+
str \reg1, [\g_ctxt, #CPU_GCR_EL1]
45+
46+
ldr \reg1, [\h_ctxt, #CPU_RGSR_EL1]
47+
msr_s SYS_RGSR_EL1, \reg1
48+
ldr \reg1, [\h_ctxt, #CPU_GCR_EL1]
49+
msr_s SYS_GCR_EL1, \reg1
50+
51+
isb
52+
53+
.L__skip_switch\@:
54+
.endm
55+
56+
#else /* !CONFIG_ARM64_MTE */
57+
58+
.macro mte_switch_to_guest g_ctxt, h_ctxt, reg1
59+
.endm
60+
61+
.macro mte_switch_to_hyp g_ctxt, h_ctxt, reg1
62+
.endm
63+
64+
#endif /* CONFIG_ARM64_MTE */
65+
#endif /* __ASSEMBLY__ */
66+
#endif /* __ASM_KVM_MTE_H */

arch/arm64/include/asm/mte-def.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@
77

88
#define MTE_GRANULE_SIZE UL(16)
99
#define MTE_GRANULE_MASK (~(MTE_GRANULE_SIZE - 1))
10+
#define MTE_GRANULES_PER_PAGE (PAGE_SIZE / MTE_GRANULE_SIZE)
1011
#define MTE_TAG_SHIFT 56
1112
#define MTE_TAG_SIZE 4
1213
#define MTE_TAG_MASK GENMASK((MTE_TAG_SHIFT + (MTE_TAG_SIZE - 1)), MTE_TAG_SHIFT)

arch/arm64/include/asm/mte.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@ void mte_free_tag_storage(char *storage);
3737
/* track which pages have valid allocation tags */
3838
#define PG_mte_tagged PG_arch_2
3939

40-
void mte_sync_tags(pte_t *ptep, pte_t pte);
40+
void mte_sync_tags(pte_t old_pte, pte_t pte);
4141
void mte_copy_page_tags(void *kto, const void *kfrom);
4242
void mte_thread_init_user(void);
4343
void mte_thread_switch(struct task_struct *next);
@@ -53,7 +53,7 @@ int mte_ptrace_copy_tags(struct task_struct *child, long request,
5353
/* unused if !CONFIG_ARM64_MTE, silence the compiler */
5454
#define PG_mte_tagged 0
5555

56-
static inline void mte_sync_tags(pte_t *ptep, pte_t pte)
56+
static inline void mte_sync_tags(pte_t old_pte, pte_t pte)
5757
{
5858
}
5959
static inline void mte_copy_page_tags(void *kto, const void *kfrom)

arch/arm64/include/asm/pgtable.h

Lines changed: 19 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -314,9 +314,25 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
314314
if (pte_present(pte) && pte_user_exec(pte) && !pte_special(pte))
315315
__sync_icache_dcache(pte);
316316

317-
if (system_supports_mte() &&
318-
pte_present(pte) && pte_tagged(pte) && !pte_special(pte))
319-
mte_sync_tags(ptep, pte);
317+
/*
318+
* If the PTE would provide user space access to the tags associated
319+
* with it then ensure that the MTE tags are synchronised. Although
320+
* pte_access_permitted() returns false for exec only mappings, they
321+
* don't expose tags (instruction fetches don't check tags).
322+
*/
323+
if (system_supports_mte() && pte_access_permitted(pte, false) &&
324+
!pte_special(pte)) {
325+
pte_t old_pte = READ_ONCE(*ptep);
326+
/*
327+
* We only need to synchronise if the new PTE has tags enabled
328+
* or if swapping in (in which case another mapping may have
329+
* set tags in the past even if this PTE isn't tagged).
330+
* (!pte_none() && !pte_present()) is an open coded version of
331+
* is_swap_pte()
332+
*/
333+
if (pte_tagged(pte) || (!pte_none(old_pte) && !pte_present(old_pte)))
334+
mte_sync_tags(old_pte, pte);
335+
}
320336

321337
__check_racy_pte_update(mm, ptep, pte);
322338

arch/arm64/include/asm/sysreg.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -651,7 +651,8 @@
651651

652652
#define INIT_SCTLR_EL2_MMU_ON \
653653
(SCTLR_ELx_M | SCTLR_ELx_C | SCTLR_ELx_SA | SCTLR_ELx_I | \
654-
SCTLR_ELx_IESB | SCTLR_ELx_WXN | ENDIAN_SET_EL2 | SCTLR_EL2_RES1)
654+
SCTLR_ELx_IESB | SCTLR_ELx_WXN | ENDIAN_SET_EL2 | \
655+
SCTLR_ELx_ITFSB | SCTLR_EL2_RES1)
655656

656657
#define INIT_SCTLR_EL2_MMU_OFF \
657658
(SCTLR_EL2_RES1 | ENDIAN_SET_EL2)

arch/arm64/include/uapi/asm/kvm.h

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -184,6 +184,17 @@ struct kvm_vcpu_events {
184184
__u32 reserved[12];
185185
};
186186

187+
struct kvm_arm_copy_mte_tags {
188+
__u64 guest_ipa;
189+
__u64 length;
190+
void __user *addr;
191+
__u64 flags;
192+
__u64 reserved[2];
193+
};
194+
195+
#define KVM_ARM_TAGS_TO_GUEST 0
196+
#define KVM_ARM_TAGS_FROM_GUEST 1
197+
187198
/* If you need to interpret the index values, here is the key: */
188199
#define KVM_REG_ARM_COPROC_MASK 0x000000000FFF0000
189200
#define KVM_REG_ARM_COPROC_SHIFT 16

0 commit comments

Comments
 (0)