Skip to content

Commit 382c38c

Browse files
bibo-maochenhuacai
authored andcommitted
LoongArch: KVM: Add FPU/LBT delay load support
FPU/LBT are lazy enabled with KVM hypervisor. After FPU/LBT enabled and loaded, vCPU can be preempted and FPU/LBT will be lost again, there will be unnecessary FPU/LBT exceptions, load and store stuff. Here delay the FPU/LBT load until the guest entry. Signed-off-by: Bibo Mao <maobibo@loongson.cn> Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
1 parent b1388a9 commit 382c38c

3 files changed

Lines changed: 37 additions & 19 deletions

File tree

arch/loongarch/include/asm/kvm_host.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -37,6 +37,7 @@
3737
#define KVM_REQ_TLB_FLUSH_GPA KVM_ARCH_REQ(0)
3838
#define KVM_REQ_STEAL_UPDATE KVM_ARCH_REQ(1)
3939
#define KVM_REQ_PMU KVM_ARCH_REQ(2)
40+
#define KVM_REQ_AUX_LOAD KVM_ARCH_REQ(3)
4041

4142
#define KVM_GUESTDBG_SW_BP_MASK \
4243
(KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)
@@ -200,6 +201,7 @@ struct kvm_vcpu_arch {
200201

201202
/* Which auxiliary state is loaded (KVM_LARCH_*) */
202203
unsigned int aux_inuse;
204+
unsigned int aux_ldtype;
203205

204206
/* FPU state */
205207
struct loongarch_fpu fpu FPU_ALIGN;

arch/loongarch/kvm/exit.c

Lines changed: 14 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -754,7 +754,8 @@ static int kvm_handle_fpu_disabled(struct kvm_vcpu *vcpu, int ecode)
754754
return RESUME_HOST;
755755
}
756756

757-
kvm_own_fpu(vcpu);
757+
vcpu->arch.aux_ldtype = KVM_LARCH_FPU;
758+
kvm_make_request(KVM_REQ_AUX_LOAD, vcpu);
758759

759760
return RESUME_GUEST;
760761
}
@@ -794,8 +795,10 @@ static int kvm_handle_lsx_disabled(struct kvm_vcpu *vcpu, int ecode)
794795
{
795796
if (!kvm_guest_has_lsx(&vcpu->arch))
796797
kvm_queue_exception(vcpu, EXCCODE_INE, 0);
797-
else
798-
kvm_own_lsx(vcpu);
798+
else {
799+
vcpu->arch.aux_ldtype = KVM_LARCH_LSX;
800+
kvm_make_request(KVM_REQ_AUX_LOAD, vcpu);
801+
}
799802

800803
return RESUME_GUEST;
801804
}
@@ -812,8 +815,10 @@ static int kvm_handle_lasx_disabled(struct kvm_vcpu *vcpu, int ecode)
812815
{
813816
if (!kvm_guest_has_lasx(&vcpu->arch))
814817
kvm_queue_exception(vcpu, EXCCODE_INE, 0);
815-
else
816-
kvm_own_lasx(vcpu);
818+
else {
819+
vcpu->arch.aux_ldtype = KVM_LARCH_LASX;
820+
kvm_make_request(KVM_REQ_AUX_LOAD, vcpu);
821+
}
817822

818823
return RESUME_GUEST;
819824
}
@@ -822,8 +827,10 @@ static int kvm_handle_lbt_disabled(struct kvm_vcpu *vcpu, int ecode)
822827
{
823828
if (!kvm_guest_has_lbt(&vcpu->arch))
824829
kvm_queue_exception(vcpu, EXCCODE_INE, 0);
825-
else
826-
kvm_own_lbt(vcpu);
830+
else {
831+
vcpu->arch.aux_ldtype = KVM_LARCH_LBT;
832+
kvm_make_request(KVM_REQ_AUX_LOAD, vcpu);
833+
}
827834

828835
return RESUME_GUEST;
829836
}

arch/loongarch/kvm/vcpu.c

Lines changed: 21 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -232,6 +232,27 @@ static void kvm_late_check_requests(struct kvm_vcpu *vcpu)
232232
kvm_flush_tlb_gpa(vcpu, vcpu->arch.flush_gpa);
233233
vcpu->arch.flush_gpa = INVALID_GPA;
234234
}
235+
236+
if (kvm_check_request(KVM_REQ_AUX_LOAD, vcpu)) {
237+
switch (vcpu->arch.aux_ldtype) {
238+
case KVM_LARCH_FPU:
239+
kvm_own_fpu(vcpu);
240+
break;
241+
case KVM_LARCH_LSX:
242+
kvm_own_lsx(vcpu);
243+
break;
244+
case KVM_LARCH_LASX:
245+
kvm_own_lasx(vcpu);
246+
break;
247+
case KVM_LARCH_LBT:
248+
kvm_own_lbt(vcpu);
249+
break;
250+
default:
251+
break;
252+
}
253+
254+
vcpu->arch.aux_ldtype = 0;
255+
}
235256
}
236257

237258
/*
@@ -1304,13 +1325,11 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
13041325
#ifdef CONFIG_CPU_HAS_LBT
13051326
int kvm_own_lbt(struct kvm_vcpu *vcpu)
13061327
{
1307-
preempt_disable();
13081328
if (!(vcpu->arch.aux_inuse & KVM_LARCH_LBT)) {
13091329
set_csr_euen(CSR_EUEN_LBTEN);
13101330
_restore_lbt(&vcpu->arch.lbt);
13111331
vcpu->arch.aux_inuse |= KVM_LARCH_LBT;
13121332
}
1313-
preempt_enable();
13141333

13151334
return 0;
13161335
}
@@ -1353,8 +1372,6 @@ static inline void kvm_check_fcsr_alive(struct kvm_vcpu *vcpu) { }
13531372
/* Enable FPU and restore context */
13541373
void kvm_own_fpu(struct kvm_vcpu *vcpu)
13551374
{
1356-
preempt_disable();
1357-
13581375
/*
13591376
* Enable FPU for guest
13601377
* Set FR and FRE according to guest context
@@ -1365,16 +1382,12 @@ void kvm_own_fpu(struct kvm_vcpu *vcpu)
13651382
kvm_restore_fpu(&vcpu->arch.fpu);
13661383
vcpu->arch.aux_inuse |= KVM_LARCH_FPU;
13671384
trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_FPU);
1368-
1369-
preempt_enable();
13701385
}
13711386

13721387
#ifdef CONFIG_CPU_HAS_LSX
13731388
/* Enable LSX and restore context */
13741389
int kvm_own_lsx(struct kvm_vcpu *vcpu)
13751390
{
1376-
preempt_disable();
1377-
13781391
/* Enable LSX for guest */
13791392
kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr);
13801393
set_csr_euen(CSR_EUEN_LSXEN | CSR_EUEN_FPEN);
@@ -1396,7 +1409,6 @@ int kvm_own_lsx(struct kvm_vcpu *vcpu)
13961409

13971410
trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_LSX);
13981411
vcpu->arch.aux_inuse |= KVM_LARCH_LSX | KVM_LARCH_FPU;
1399-
preempt_enable();
14001412

14011413
return 0;
14021414
}
@@ -1406,8 +1418,6 @@ int kvm_own_lsx(struct kvm_vcpu *vcpu)
14061418
/* Enable LASX and restore context */
14071419
int kvm_own_lasx(struct kvm_vcpu *vcpu)
14081420
{
1409-
preempt_disable();
1410-
14111421
kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr);
14121422
set_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN | CSR_EUEN_LASXEN);
14131423
switch (vcpu->arch.aux_inuse & (KVM_LARCH_FPU | KVM_LARCH_LSX)) {
@@ -1429,7 +1439,6 @@ int kvm_own_lasx(struct kvm_vcpu *vcpu)
14291439

14301440
trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_LASX);
14311441
vcpu->arch.aux_inuse |= KVM_LARCH_LASX | KVM_LARCH_LSX | KVM_LARCH_FPU;
1432-
preempt_enable();
14331442

14341443
return 0;
14351444
}

0 commit comments

Comments
 (0)