@@ -189,6 +189,33 @@ struct kvm_s2_mmu {
189189 uint64_t split_page_chunk_size ;
190190
191191 struct kvm_arch * arch ;
192+
193+ /*
194+ * For a shadow stage-2 MMU, the virtual vttbr used by the
195+ * host to parse the guest S2.
196+ * This either contains:
197+ * - the virtual VTTBR programmed by the guest hypervisor with
198+ * CnP cleared
199+ * - The value 1 (VMID=0, BADDR=0, CnP=1) if invalid
200+ *
201+ * We also cache the full VTCR which gets used for TLB invalidation,
202+ * taking the ARM ARM's "Any of the bits in VTCR_EL2 are permitted
203+ * to be cached in a TLB" to the letter.
204+ */
205+ u64 tlb_vttbr ;
206+ u64 tlb_vtcr ;
207+
208+ /*
209+ * true when this represents a nested context where virtual
210+ * HCR_EL2.VM == 1
211+ */
212+ bool nested_stage2_enabled ;
213+
214+ /*
215+ * 0: Nobody is currently using this, check vttbr for validity
216+ * >0: Somebody is actively using this.
217+ */
218+ atomic_t refcnt ;
192219};
193220
194221struct kvm_arch_memory_slot {
@@ -256,6 +283,14 @@ struct kvm_arch {
256283 */
257284 u64 fgu [__NR_FGT_GROUP_IDS__ ];
258285
286+ /*
287+ * Stage 2 paging state for VMs with nested S2 using a virtual
288+ * VMID.
289+ */
290+ struct kvm_s2_mmu * nested_mmus ;
291+ size_t nested_mmus_size ;
292+ int nested_mmus_next ;
293+
259294 /* Interrupt controller */
260295 struct vgic_dist vgic ;
261296
@@ -327,11 +362,11 @@ struct kvm_arch {
327362 * Atomic access to multiple idregs are guarded by kvm_arch.config_lock.
328363 */
329364#define IDREG_IDX (id ) (((sys_reg_CRm(id) - 1) << 3) | sys_reg_Op2(id))
330- #define IDX_IDREG (idx ) sys_reg(3, 0, 0, ((idx) >> 3) + 1, (idx) & Op2_mask)
331- #define IDREG (kvm , id ) ((kvm)->arch.id_regs[IDREG_IDX(id)])
332365#define KVM_ARM_ID_REG_NUM (IDREG_IDX(sys_reg(3, 0, 0, 7, 7)) + 1)
333366 u64 id_regs [KVM_ARM_ID_REG_NUM ];
334367
368+ u64 ctr_el0 ;
369+
335370 /* Masks for VNCR-baked sysregs */
336371 struct kvm_sysreg_masks * sysreg_masks ;
337372
@@ -423,6 +458,7 @@ enum vcpu_sysreg {
423458 MDCR_EL2 , /* Monitor Debug Configuration Register (EL2) */
424459 CPTR_EL2 , /* Architectural Feature Trap Register (EL2) */
425460 HACR_EL2 , /* Hypervisor Auxiliary Control Register */
461+ ZCR_EL2 , /* SVE Control Register (EL2) */
426462 TTBR0_EL2 , /* Translation Table Base Register 0 (EL2) */
427463 TTBR1_EL2 , /* Translation Table Base Register 1 (EL2) */
428464 TCR_EL2 , /* Translation Control Register (EL2) */
@@ -867,6 +903,9 @@ struct kvm_vcpu_arch {
867903
868904#define vcpu_sve_max_vq (vcpu ) sve_vq_from_vl((vcpu)->arch.sve_max_vl)
869905
906+ #define vcpu_sve_zcr_elx (vcpu ) \
907+ (unlikely(is_hyp_ctxt(vcpu)) ? ZCR_EL2 : ZCR_EL1)
908+
870909#define vcpu_sve_state_size (vcpu ) ({ \
871910 size_t __size_ret; \
872911 unsigned int __vcpu_vq; \
@@ -991,6 +1030,7 @@ static inline bool __vcpu_read_sys_reg_from_cpu(int reg, u64 *val)
9911030 case DACR32_EL2 : * val = read_sysreg_s (SYS_DACR32_EL2 ); break ;
9921031 case IFSR32_EL2 : * val = read_sysreg_s (SYS_IFSR32_EL2 ); break ;
9931032 case DBGVCR32_EL2 : * val = read_sysreg_s (SYS_DBGVCR32_EL2 ); break ;
1033+ case ZCR_EL1 : * val = read_sysreg_s (SYS_ZCR_EL12 ); break ;
9941034 default : return false;
9951035 }
9961036
@@ -1036,6 +1076,7 @@ static inline bool __vcpu_write_sys_reg_to_cpu(u64 val, int reg)
10361076 case DACR32_EL2 : write_sysreg_s (val , SYS_DACR32_EL2 ); break ;
10371077 case IFSR32_EL2 : write_sysreg_s (val , SYS_IFSR32_EL2 ); break ;
10381078 case DBGVCR32_EL2 : write_sysreg_s (val , SYS_DBGVCR32_EL2 ); break ;
1079+ case ZCR_EL1 : write_sysreg_s (val , SYS_ZCR_EL12 ); break ;
10391080 default : return false;
10401081 }
10411082
@@ -1145,7 +1186,7 @@ int __init populate_nv_trap_config(void);
11451186bool lock_all_vcpus (struct kvm * kvm );
11461187void unlock_all_vcpus (struct kvm * kvm );
11471188
1148- void kvm_init_sysreg (struct kvm_vcpu * );
1189+ void kvm_calculate_traps (struct kvm_vcpu * vcpu );
11491190
11501191/* MMIO helpers */
11511192void kvm_mmio_write_buf (void * buf , unsigned int len , unsigned long data );
@@ -1306,6 +1347,7 @@ void kvm_vcpu_load_vhe(struct kvm_vcpu *vcpu);
13061347void kvm_vcpu_put_vhe (struct kvm_vcpu * vcpu );
13071348
13081349int __init kvm_set_ipa_limit (void );
1350+ u32 kvm_get_pa_bits (struct kvm * kvm );
13091351
13101352#define __KVM_HAVE_ARCH_VM_ALLOC
13111353struct kvm * kvm_arch_alloc_vm (void );
@@ -1355,6 +1397,24 @@ static inline void kvm_hyp_reserve(void) { }
13551397void kvm_arm_vcpu_power_off (struct kvm_vcpu * vcpu );
13561398bool kvm_arm_vcpu_stopped (struct kvm_vcpu * vcpu );
13571399
1400+ static inline u64 * __vm_id_reg (struct kvm_arch * ka , u32 reg )
1401+ {
1402+ switch (reg ) {
1403+ case sys_reg (3 , 0 , 0 , 1 , 0 ) ... sys_reg (3 , 0 , 0 , 7 , 7 ):
1404+ return & ka -> id_regs [IDREG_IDX (reg )];
1405+ case SYS_CTR_EL0 :
1406+ return & ka -> ctr_el0 ;
1407+ default :
1408+ WARN_ON_ONCE (1 );
1409+ return NULL ;
1410+ }
1411+ }
1412+
1413+ #define kvm_read_vm_id_reg (kvm , reg ) \
1414+ ({ u64 __val = *__vm_id_reg(&(kvm)->arch, reg); __val; })
1415+
1416+ void kvm_set_vm_id_reg (struct kvm * kvm , u32 reg , u64 val );
1417+
13581418#define __expand_field_sign_unsigned (id , fld , val ) \
13591419 ((u64)SYS_FIELD_VALUE(id, fld, val))
13601420
@@ -1371,7 +1431,7 @@ bool kvm_arm_vcpu_stopped(struct kvm_vcpu *vcpu);
13711431
13721432#define get_idreg_field_unsigned (kvm , id , fld ) \
13731433 ({ \
1374- u64 __val = IDREG ((kvm), SYS_##id); \
1434+ u64 __val = kvm_read_vm_id_reg ((kvm), SYS_##id); \
13751435 FIELD_GET(id##_##fld##_MASK, __val); \
13761436 })
13771437
0 commit comments