Skip to content

Commit 84d49bd

Browse files
committed
Merge branch kvm-arm64/stage2-vhe-load into kvmarm/next
* kvm-arm64/stage2-vhe-load: : Setup stage-2 MMU from vcpu_load() for VHE : : Unlike nVHE, there is no need to switch the stage-2 MMU around on guest : entry/exit in VHE mode as the host is running at EL2. Despite this KVM : reloads the stage-2 on every guest entry, which is needless. : : This series moves the setup of the stage-2 MMU context to vcpu_load() : when running in VHE mode. This is likely to be a win across the board, : but also allows us to remove an ISB on the guest entry path for systems : with one of the speculative AT errata. KVM: arm64: Move VTCR_EL2 into struct s2_mmu Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
2 parents c0e1ad6 + fe49fd9 commit 84d49bd

9 files changed

Lines changed: 33 additions & 24 deletions

File tree

arch/arm64/include/asm/kvm_host.h

Lines changed: 10 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -158,6 +158,16 @@ struct kvm_s2_mmu {
158158
phys_addr_t pgd_phys;
159159
struct kvm_pgtable *pgt;
160160

161+
/*
162+
* VTCR value used on the host. For a non-NV guest (or a NV
163+
* guest that runs in a context where its own S2 doesn't
164+
* apply), its T0SZ value reflects that of the IPA size.
165+
*
166+
* For a shadow S2 MMU, T0SZ reflects the PARange exposed to
167+
* the guest.
168+
*/
169+
u64 vtcr;
170+
161171
/* The last vcpu id that ran on each physical CPU */
162172
int __percpu *last_vcpu_ran;
163173

@@ -230,9 +240,6 @@ static inline u16 kvm_mpidr_index(struct kvm_mpidr_data *data, u64 mpidr)
230240
struct kvm_arch {
231241
struct kvm_s2_mmu mmu;
232242

233-
/* VTCR_EL2 value for this VM */
234-
u64 vtcr;
235-
236243
/* Interrupt controller */
237244
struct vgic_dist vgic;
238245

arch/arm64/include/asm/kvm_mmu.h

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -150,9 +150,9 @@ static __always_inline unsigned long __kern_hyp_va(unsigned long v)
150150
*/
151151
#define KVM_PHYS_SHIFT (40)
152152

153-
#define kvm_phys_shift(kvm) VTCR_EL2_IPA(kvm->arch.vtcr)
154-
#define kvm_phys_size(kvm) (_AC(1, ULL) << kvm_phys_shift(kvm))
155-
#define kvm_phys_mask(kvm) (kvm_phys_size(kvm) - _AC(1, ULL))
153+
#define kvm_phys_shift(mmu) VTCR_EL2_IPA((mmu)->vtcr)
154+
#define kvm_phys_size(mmu) (_AC(1, ULL) << kvm_phys_shift(mmu))
155+
#define kvm_phys_mask(mmu) (kvm_phys_size(mmu) - _AC(1, ULL))
156156

157157
#include <asm/kvm_pgtable.h>
158158
#include <asm/stage2_pgtable.h>
@@ -324,7 +324,7 @@ static __always_inline u64 kvm_get_vttbr(struct kvm_s2_mmu *mmu)
324324
static __always_inline void __load_stage2(struct kvm_s2_mmu *mmu,
325325
struct kvm_arch *arch)
326326
{
327-
write_sysreg(arch->vtcr, vtcr_el2);
327+
write_sysreg(mmu->vtcr, vtcr_el2);
328328
write_sysreg(kvm_get_vttbr(mmu), vttbr_el2);
329329

330330
/*

arch/arm64/include/asm/stage2_pgtable.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -21,13 +21,13 @@
2121
* (IPA_SHIFT - 4).
2222
*/
2323
#define stage2_pgtable_levels(ipa) ARM64_HW_PGTABLE_LEVELS((ipa) - 4)
24-
#define kvm_stage2_levels(kvm) VTCR_EL2_LVLS(kvm->arch.vtcr)
24+
#define kvm_stage2_levels(mmu) VTCR_EL2_LVLS((mmu)->vtcr)
2525

2626
/*
2727
* kvm_mmmu_cache_min_pages() is the number of pages required to install
2828
* a stage-2 translation. We pre-allocate the entry level page table at
2929
* the VM creation.
3030
*/
31-
#define kvm_mmu_cache_min_pages(kvm) (kvm_stage2_levels(kvm) - 1)
31+
#define kvm_mmu_cache_min_pages(mmu) (kvm_stage2_levels(mmu) - 1)
3232

3333
#endif /* __ARM64_S2_PGTABLE_H_ */

arch/arm64/kvm/hyp/nvhe/mem_protect.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -129,8 +129,8 @@ static void prepare_host_vtcr(void)
129129
parange = kvm_get_parange(id_aa64mmfr0_el1_sys_val);
130130
phys_shift = id_aa64mmfr0_parange_to_phys_shift(parange);
131131

132-
host_mmu.arch.vtcr = kvm_get_vtcr(id_aa64mmfr0_el1_sys_val,
133-
id_aa64mmfr1_el1_sys_val, phys_shift);
132+
host_mmu.arch.mmu.vtcr = kvm_get_vtcr(id_aa64mmfr0_el1_sys_val,
133+
id_aa64mmfr1_el1_sys_val, phys_shift);
134134
}
135135

136136
static bool host_stage2_force_pte_cb(u64 addr, u64 end, enum kvm_pgtable_prot prot);
@@ -235,7 +235,7 @@ int kvm_guest_prepare_stage2(struct pkvm_hyp_vm *vm, void *pgd)
235235
unsigned long nr_pages;
236236
int ret;
237237

238-
nr_pages = kvm_pgtable_stage2_pgd_size(vm->kvm.arch.vtcr) >> PAGE_SHIFT;
238+
nr_pages = kvm_pgtable_stage2_pgd_size(mmu->vtcr) >> PAGE_SHIFT;
239239
ret = hyp_pool_init(&vm->pool, hyp_virt_to_pfn(pgd), nr_pages, 0);
240240
if (ret)
241241
return ret;
@@ -295,7 +295,7 @@ int __pkvm_prot_finalize(void)
295295
return -EPERM;
296296

297297
params->vttbr = kvm_get_vttbr(mmu);
298-
params->vtcr = host_mmu.arch.vtcr;
298+
params->vtcr = mmu->vtcr;
299299
params->hcr_el2 |= HCR_VM;
300300

301301
/*

arch/arm64/kvm/hyp/nvhe/pkvm.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -303,7 +303,7 @@ static void init_pkvm_hyp_vm(struct kvm *host_kvm, struct pkvm_hyp_vm *hyp_vm,
303303
{
304304
hyp_vm->host_kvm = host_kvm;
305305
hyp_vm->kvm.created_vcpus = nr_vcpus;
306-
hyp_vm->kvm.arch.vtcr = host_mmu.arch.vtcr;
306+
hyp_vm->kvm.arch.mmu.vtcr = host_mmu.arch.mmu.vtcr;
307307
}
308308

309309
static int init_pkvm_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu,
@@ -483,7 +483,7 @@ int __pkvm_init_vm(struct kvm *host_kvm, unsigned long vm_hva,
483483
}
484484

485485
vm_size = pkvm_get_hyp_vm_size(nr_vcpus);
486-
pgd_size = kvm_pgtable_stage2_pgd_size(host_mmu.arch.vtcr);
486+
pgd_size = kvm_pgtable_stage2_pgd_size(host_mmu.arch.mmu.vtcr);
487487

488488
ret = -ENOMEM;
489489

arch/arm64/kvm/hyp/pgtable.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1511,7 +1511,7 @@ int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu,
15111511
kvm_pgtable_force_pte_cb_t force_pte_cb)
15121512
{
15131513
size_t pgd_sz;
1514-
u64 vtcr = mmu->arch->vtcr;
1514+
u64 vtcr = mmu->vtcr;
15151515
u32 ia_bits = VTCR_EL2_IPA(vtcr);
15161516
u32 sl0 = FIELD_GET(VTCR_EL2_SL0_MASK, vtcr);
15171517
u32 start_level = VTCR_EL2_TGRAN_SL0_BASE - sl0;

arch/arm64/kvm/mmu.c

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -847,7 +847,7 @@ int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu, unsigned long t
847847

848848
mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
849849
mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
850-
kvm->arch.vtcr = kvm_get_vtcr(mmfr0, mmfr1, phys_shift);
850+
mmu->vtcr = kvm_get_vtcr(mmfr0, mmfr1, phys_shift);
851851

852852
if (mmu->pgt != NULL) {
853853
kvm_err("kvm_arch already initialized?\n");
@@ -1022,7 +1022,8 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
10221022
phys_addr_t addr;
10231023
int ret = 0;
10241024
struct kvm_mmu_memory_cache cache = { .gfp_zero = __GFP_ZERO };
1025-
struct kvm_pgtable *pgt = kvm->arch.mmu.pgt;
1025+
struct kvm_s2_mmu *mmu = &kvm->arch.mmu;
1026+
struct kvm_pgtable *pgt = mmu->pgt;
10261027
enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_DEVICE |
10271028
KVM_PGTABLE_PROT_R |
10281029
(writable ? KVM_PGTABLE_PROT_W : 0);
@@ -1035,7 +1036,7 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
10351036

10361037
for (addr = guest_ipa; addr < guest_ipa + size; addr += PAGE_SIZE) {
10371038
ret = kvm_mmu_topup_memory_cache(&cache,
1038-
kvm_mmu_cache_min_pages(kvm));
1039+
kvm_mmu_cache_min_pages(mmu));
10391040
if (ret)
10401041
break;
10411042

@@ -1363,7 +1364,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
13631364
if (fault_status != ESR_ELx_FSC_PERM ||
13641365
(logging_active && write_fault)) {
13651366
ret = kvm_mmu_topup_memory_cache(memcache,
1366-
kvm_mmu_cache_min_pages(kvm));
1367+
kvm_mmu_cache_min_pages(vcpu->arch.hw_mmu));
13671368
if (ret)
13681369
return ret;
13691370
}
@@ -1674,7 +1675,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
16741675
}
16751676

16761677
/* Userspace should not be able to register out-of-bounds IPAs */
1677-
VM_BUG_ON(fault_ipa >= kvm_phys_size(vcpu->kvm));
1678+
VM_BUG_ON(fault_ipa >= kvm_phys_size(vcpu->arch.hw_mmu));
16781679

16791680
if (fault_status == ESR_ELx_FSC_ACCESS) {
16801681
handle_access_fault(vcpu, fault_ipa);
@@ -1948,7 +1949,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
19481949
* Prevent userspace from creating a memory region outside of the IPA
19491950
* space addressable by the KVM guest IPA space.
19501951
*/
1951-
if ((new->base_gfn + new->npages) > (kvm_phys_size(kvm) >> PAGE_SHIFT))
1952+
if ((new->base_gfn + new->npages) > (kvm_phys_size(&kvm->arch.mmu) >> PAGE_SHIFT))
19521953
return -EFAULT;
19531954

19541955
hva = new->userspace_addr;

arch/arm64/kvm/pkvm.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -123,7 +123,7 @@ static int __pkvm_create_hyp_vm(struct kvm *host_kvm)
123123
if (host_kvm->created_vcpus < 1)
124124
return -EINVAL;
125125

126-
pgd_sz = kvm_pgtable_stage2_pgd_size(host_kvm->arch.vtcr);
126+
pgd_sz = kvm_pgtable_stage2_pgd_size(host_kvm->arch.mmu.vtcr);
127127

128128
/*
129129
* The PGD pages will be reclaimed using a hyp_memcache which implies

arch/arm64/kvm/vgic/vgic-kvm-device.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,8 @@ int vgic_check_iorange(struct kvm *kvm, phys_addr_t ioaddr,
2727
if (addr + size < addr)
2828
return -EINVAL;
2929

30-
if (addr & ~kvm_phys_mask(kvm) || addr + size > kvm_phys_size(kvm))
30+
if (addr & ~kvm_phys_mask(&kvm->arch.mmu) ||
31+
(addr + size) > kvm_phys_size(&kvm->arch.mmu))
3132
return -E2BIG;
3233

3334
return 0;

0 commit comments

Comments
 (0)