@@ -23,20 +23,80 @@ DEFINE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params);
2323
2424void __kvm_hyp_host_forward_smc (struct kvm_cpu_context * host_ctxt );
2525
26+ static void __hyp_sve_save_guest (struct kvm_vcpu * vcpu )
27+ {
28+ __vcpu_sys_reg (vcpu , ZCR_EL1 ) = read_sysreg_el1 (SYS_ZCR );
29+ /*
30+ * On saving/restoring guest sve state, always use the maximum VL for
31+ * the guest. The layout of the data when saving the sve state depends
32+ * on the VL, so use a consistent (i.e., the maximum) guest VL.
33+ */
34+ sve_cond_update_zcr_vq (vcpu_sve_max_vq (vcpu ) - 1 , SYS_ZCR_EL2 );
35+ __sve_save_state (vcpu_sve_pffr (vcpu ), & vcpu -> arch .ctxt .fp_regs .fpsr , true);
36+ write_sysreg_s (ZCR_ELx_LEN_MASK , SYS_ZCR_EL2 );
37+ }
38+
39+ static void __hyp_sve_restore_host (void )
40+ {
41+ struct cpu_sve_state * sve_state = * host_data_ptr (sve_state );
42+
43+ /*
44+ * On saving/restoring host sve state, always use the maximum VL for
45+ * the host. The layout of the data when saving the sve state depends
46+ * on the VL, so use a consistent (i.e., the maximum) host VL.
47+ *
48+ * Setting ZCR_EL2 to ZCR_ELx_LEN_MASK sets the effective length
49+ * supported by the system (or limited at EL3).
50+ */
51+ write_sysreg_s (ZCR_ELx_LEN_MASK , SYS_ZCR_EL2 );
52+ __sve_restore_state (sve_state -> sve_regs + sve_ffr_offset (kvm_host_sve_max_vl ),
53+ & sve_state -> fpsr ,
54+ true);
55+ write_sysreg_el1 (sve_state -> zcr_el1 , SYS_ZCR );
56+ }
57+
58+ static void fpsimd_sve_flush (void )
59+ {
60+ * host_data_ptr (fp_owner ) = FP_STATE_HOST_OWNED ;
61+ }
62+
63+ static void fpsimd_sve_sync (struct kvm_vcpu * vcpu )
64+ {
65+ if (!guest_owns_fp_regs ())
66+ return ;
67+
68+ cpacr_clear_set (0 , CPACR_ELx_FPEN | CPACR_ELx_ZEN );
69+ isb ();
70+
71+ if (vcpu_has_sve (vcpu ))
72+ __hyp_sve_save_guest (vcpu );
73+ else
74+ __fpsimd_save_state (& vcpu -> arch .ctxt .fp_regs );
75+
76+ if (system_supports_sve ())
77+ __hyp_sve_restore_host ();
78+ else
79+ __fpsimd_restore_state (* host_data_ptr (fpsimd_state ));
80+
81+ * host_data_ptr (fp_owner ) = FP_STATE_HOST_OWNED ;
82+ }
83+
2684static void flush_hyp_vcpu (struct pkvm_hyp_vcpu * hyp_vcpu )
2785{
2886 struct kvm_vcpu * host_vcpu = hyp_vcpu -> host_vcpu ;
2987
88+ fpsimd_sve_flush ();
89+
3090 hyp_vcpu -> vcpu .arch .ctxt = host_vcpu -> arch .ctxt ;
3191
3292 hyp_vcpu -> vcpu .arch .sve_state = kern_hyp_va (host_vcpu -> arch .sve_state );
33- hyp_vcpu -> vcpu .arch .sve_max_vl = host_vcpu -> arch .sve_max_vl ;
93+ /* Limit guest vector length to the maximum supported by the host. */
94+ hyp_vcpu -> vcpu .arch .sve_max_vl = min (host_vcpu -> arch .sve_max_vl , kvm_host_sve_max_vl );
3495
3596 hyp_vcpu -> vcpu .arch .hw_mmu = host_vcpu -> arch .hw_mmu ;
3697
3798 hyp_vcpu -> vcpu .arch .hcr_el2 = host_vcpu -> arch .hcr_el2 ;
3899 hyp_vcpu -> vcpu .arch .mdcr_el2 = host_vcpu -> arch .mdcr_el2 ;
39- hyp_vcpu -> vcpu .arch .cptr_el2 = host_vcpu -> arch .cptr_el2 ;
40100
41101 hyp_vcpu -> vcpu .arch .iflags = host_vcpu -> arch .iflags ;
42102
@@ -54,10 +114,11 @@ static void sync_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
54114 struct vgic_v3_cpu_if * host_cpu_if = & host_vcpu -> arch .vgic_cpu .vgic_v3 ;
55115 unsigned int i ;
56116
117+ fpsimd_sve_sync (& hyp_vcpu -> vcpu );
118+
57119 host_vcpu -> arch .ctxt = hyp_vcpu -> vcpu .arch .ctxt ;
58120
59121 host_vcpu -> arch .hcr_el2 = hyp_vcpu -> vcpu .arch .hcr_el2 ;
60- host_vcpu -> arch .cptr_el2 = hyp_vcpu -> vcpu .arch .cptr_el2 ;
61122
62123 host_vcpu -> arch .fault = hyp_vcpu -> vcpu .arch .fault ;
63124
0 commit comments