Skip to content

Commit 118e10c

Browse files
zhaotianrui-loongsonchenhuacai
authored andcommitted
LoongArch: KVM: Add LASX (256bit SIMD) support
This patch adds LASX (256bit SIMD) support for LoongArch KVM. There will be LASX exception in KVM when guest use the LASX instructions. KVM will enable LASX and restore the vector registers for guest and then return to guest to continue running. Reviewed-by: Bibo Mao <maobibo@loongson.cn> Signed-off-by: Tianrui Zhao <zhaotianrui@loongson.cn> Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
1 parent db1ecca commit 118e10c

7 files changed

Lines changed: 103 additions & 4 deletions

File tree

arch/loongarch/include/asm/kvm_host.h

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -96,8 +96,9 @@ enum emulation_result {
9696

9797
#define KVM_LARCH_FPU (0x1 << 0)
9898
#define KVM_LARCH_LSX (0x1 << 1)
99-
#define KVM_LARCH_SWCSR_LATEST (0x1 << 2)
100-
#define KVM_LARCH_HWCSR_USABLE (0x1 << 3)
99+
#define KVM_LARCH_LASX (0x1 << 2)
100+
#define KVM_LARCH_SWCSR_LATEST (0x1 << 3)
101+
#define KVM_LARCH_HWCSR_USABLE (0x1 << 4)
101102

102103
struct kvm_vcpu_arch {
103104
/*
@@ -189,6 +190,11 @@ static inline bool kvm_guest_has_lsx(struct kvm_vcpu_arch *arch)
189190
return arch->cpucfg[2] & CPUCFG2_LSX;
190191
}
191192

193+
static inline bool kvm_guest_has_lasx(struct kvm_vcpu_arch *arch)
194+
{
195+
return arch->cpucfg[2] & CPUCFG2_LASX;
196+
}
197+
192198
/* Debug: dump vcpu state */
193199
int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);
194200

arch/loongarch/include/asm/kvm_vcpu.h

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -65,6 +65,16 @@ static inline void kvm_save_lsx(struct loongarch_fpu *fpu) { }
6565
static inline void kvm_restore_lsx(struct loongarch_fpu *fpu) { }
6666
#endif
6767

68+
#ifdef CONFIG_CPU_HAS_LASX
69+
int kvm_own_lasx(struct kvm_vcpu *vcpu);
70+
void kvm_save_lasx(struct loongarch_fpu *fpu);
71+
void kvm_restore_lasx(struct loongarch_fpu *fpu);
72+
#else
73+
static inline int kvm_own_lasx(struct kvm_vcpu *vcpu) { }
74+
static inline void kvm_save_lasx(struct loongarch_fpu *fpu) { }
75+
static inline void kvm_restore_lasx(struct loongarch_fpu *fpu) { }
76+
#endif
77+
6878
void kvm_init_timer(struct kvm_vcpu *vcpu, unsigned long hz);
6979
void kvm_reset_timer(struct kvm_vcpu *vcpu);
7080
void kvm_save_timer(struct kvm_vcpu *vcpu);

arch/loongarch/kernel/fpu.S

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -385,6 +385,7 @@ SYM_FUNC_START(_restore_lasx_upper)
385385
lasx_restore_all_upper a0 t0 t1
386386
jr ra
387387
SYM_FUNC_END(_restore_lasx_upper)
388+
EXPORT_SYMBOL(_restore_lasx_upper)
388389

389390
SYM_FUNC_START(_init_lasx_upper)
390391
lasx_init_all_upper t1

arch/loongarch/kvm/exit.c

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -670,6 +670,21 @@ static int kvm_handle_lsx_disabled(struct kvm_vcpu *vcpu)
670670
return RESUME_GUEST;
671671
}
672672

673+
/*
674+
* kvm_handle_lasx_disabled() - Guest used LASX while disabled in root.
675+
* @vcpu: Virtual CPU context.
676+
*
677+
* Handle when the guest attempts to use LASX when it is disabled in the root
678+
* context.
679+
*/
680+
static int kvm_handle_lasx_disabled(struct kvm_vcpu *vcpu)
681+
{
682+
if (kvm_own_lasx(vcpu))
683+
kvm_queue_exception(vcpu, EXCCODE_INE, 0);
684+
685+
return RESUME_GUEST;
686+
}
687+
673688
/*
674689
* LoongArch KVM callback handling for unimplemented guest exiting
675690
*/
@@ -699,6 +714,7 @@ static exit_handle_fn kvm_fault_tables[EXCCODE_INT_START] = {
699714
[EXCCODE_TLBM] = kvm_handle_write_fault,
700715
[EXCCODE_FPDIS] = kvm_handle_fpu_disabled,
701716
[EXCCODE_LSXDIS] = kvm_handle_lsx_disabled,
717+
[EXCCODE_LASXDIS] = kvm_handle_lasx_disabled,
702718
[EXCCODE_GSPR] = kvm_handle_gspr,
703719
};
704720

arch/loongarch/kvm/switch.S

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -261,6 +261,21 @@ SYM_FUNC_START(kvm_restore_lsx)
261261
SYM_FUNC_END(kvm_restore_lsx)
262262
#endif
263263

264+
#ifdef CONFIG_CPU_HAS_LASX
265+
SYM_FUNC_START(kvm_save_lasx)
266+
fpu_save_csr a0 t1
267+
fpu_save_cc a0 t1 t2
268+
lasx_save_data a0 t1
269+
jr ra
270+
SYM_FUNC_END(kvm_save_lasx)
271+
272+
SYM_FUNC_START(kvm_restore_lasx)
273+
lasx_restore_data a0 t1
274+
fpu_restore_cc a0 t1 t2
275+
fpu_restore_csr a0 t1 t2
276+
jr ra
277+
SYM_FUNC_END(kvm_restore_lasx)
278+
#endif
264279
.section ".rodata"
265280
SYM_DATA(kvm_exception_size, .quad kvm_exc_entry_end - kvm_exc_entry)
266281
SYM_DATA(kvm_enter_guest_size, .quad kvm_enter_guest_end - kvm_enter_guest)

arch/loongarch/kvm/trace.h

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -103,6 +103,7 @@ TRACE_EVENT(kvm_exit_gspr,
103103

104104
#define KVM_TRACE_AUX_FPU 1
105105
#define KVM_TRACE_AUX_LSX 2
106+
#define KVM_TRACE_AUX_LASX 3
106107

107108
#define kvm_trace_symbol_aux_op \
108109
{ KVM_TRACE_AUX_SAVE, "save" }, \
@@ -113,7 +114,8 @@ TRACE_EVENT(kvm_exit_gspr,
113114

114115
#define kvm_trace_symbol_aux_state \
115116
{ KVM_TRACE_AUX_FPU, "FPU" }, \
116-
{ KVM_TRACE_AUX_LSX, "LSX" }
117+
{ KVM_TRACE_AUX_LSX, "LSX" }, \
118+
{ KVM_TRACE_AUX_LASX, "LASX" }
117119

118120
TRACE_EVENT(kvm_aux,
119121
TP_PROTO(struct kvm_vcpu *vcpu, unsigned int op,

arch/loongarch/kvm/vcpu.c

Lines changed: 50 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -317,6 +317,13 @@ static int _kvm_get_cpucfg(int id, u64 *v)
317317
*/
318318
if (cpu_has_lsx)
319319
*v |= CPUCFG2_LSX;
320+
/*
321+
* if LASX is supported by CPU, it is also supported by KVM,
322+
* as we implement it.
323+
*/
324+
if (cpu_has_lasx)
325+
*v |= CPUCFG2_LASX;
326+
320327
break;
321328
default:
322329
ret = -EINVAL;
@@ -753,12 +760,54 @@ int kvm_own_lsx(struct kvm_vcpu *vcpu)
753760
}
754761
#endif
755762

763+
#ifdef CONFIG_CPU_HAS_LASX
764+
/* Enable LASX and restore context */
765+
int kvm_own_lasx(struct kvm_vcpu *vcpu)
766+
{
767+
if (!kvm_guest_has_fpu(&vcpu->arch) || !kvm_guest_has_lsx(&vcpu->arch) || !kvm_guest_has_lasx(&vcpu->arch))
768+
return -EINVAL;
769+
770+
preempt_disable();
771+
772+
set_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN | CSR_EUEN_LASXEN);
773+
switch (vcpu->arch.aux_inuse & (KVM_LARCH_FPU | KVM_LARCH_LSX)) {
774+
case KVM_LARCH_LSX:
775+
case KVM_LARCH_LSX | KVM_LARCH_FPU:
776+
/* Guest LSX state already loaded, only restore upper LASX state */
777+
_restore_lasx_upper(&vcpu->arch.fpu);
778+
break;
779+
case KVM_LARCH_FPU:
780+
/* Guest FP state already loaded, only restore upper LSX & LASX state */
781+
_restore_lsx_upper(&vcpu->arch.fpu);
782+
_restore_lasx_upper(&vcpu->arch.fpu);
783+
break;
784+
default:
785+
/* Neither FP or LSX already active, restore full LASX state */
786+
kvm_restore_lasx(&vcpu->arch.fpu);
787+
break;
788+
}
789+
790+
trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_LASX);
791+
vcpu->arch.aux_inuse |= KVM_LARCH_LASX | KVM_LARCH_LSX | KVM_LARCH_FPU;
792+
preempt_enable();
793+
794+
return 0;
795+
}
796+
#endif
797+
756798
/* Save context and disable FPU */
757799
void kvm_lose_fpu(struct kvm_vcpu *vcpu)
758800
{
759801
preempt_disable();
760802

761-
if (vcpu->arch.aux_inuse & KVM_LARCH_LSX) {
803+
if (vcpu->arch.aux_inuse & KVM_LARCH_LASX) {
804+
kvm_save_lasx(&vcpu->arch.fpu);
805+
vcpu->arch.aux_inuse &= ~(KVM_LARCH_LSX | KVM_LARCH_FPU | KVM_LARCH_LASX);
806+
trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_LASX);
807+
808+
/* Disable LASX & LSX & FPU */
809+
clear_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN | CSR_EUEN_LASXEN);
810+
} else if (vcpu->arch.aux_inuse & KVM_LARCH_LSX) {
762811
kvm_save_lsx(&vcpu->arch.fpu);
763812
vcpu->arch.aux_inuse &= ~(KVM_LARCH_LSX | KVM_LARCH_FPU);
764813
trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_LSX);

0 commit comments

Comments
 (0)