@@ -569,6 +569,209 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
569569 }
570570}
571571
572+ static int _kvm_vcpu_load (struct kvm_vcpu * vcpu , int cpu )
573+ {
574+ bool migrated ;
575+ struct kvm_context * context ;
576+ struct loongarch_csrs * csr = vcpu -> arch .csr ;
577+
578+ /*
579+ * Have we migrated to a different CPU?
580+ * If so, any old guest TLB state may be stale.
581+ */
582+ migrated = (vcpu -> arch .last_sched_cpu != cpu );
583+
584+ /*
585+ * Was this the last vCPU to run on this CPU?
586+ * If not, any old guest state from this vCPU will have been clobbered.
587+ */
588+ context = per_cpu_ptr (vcpu -> kvm -> arch .vmcs , cpu );
589+ if (migrated || (context -> last_vcpu != vcpu ))
590+ vcpu -> arch .aux_inuse &= ~KVM_LARCH_HWCSR_USABLE ;
591+ context -> last_vcpu = vcpu ;
592+
593+ /* Restore timer state regardless */
594+ kvm_restore_timer (vcpu );
595+
596+ /* Control guest page CCA attribute */
597+ change_csr_gcfg (CSR_GCFG_MATC_MASK , CSR_GCFG_MATC_ROOT );
598+
599+ /* Don't bother restoring registers multiple times unless necessary */
600+ if (vcpu -> arch .aux_inuse & KVM_LARCH_HWCSR_USABLE )
601+ return 0 ;
602+
603+ write_csr_gcntc ((ulong )vcpu -> kvm -> arch .time_offset );
604+
605+ /* Restore guest CSR registers */
606+ kvm_restore_hw_gcsr (csr , LOONGARCH_CSR_CRMD );
607+ kvm_restore_hw_gcsr (csr , LOONGARCH_CSR_PRMD );
608+ kvm_restore_hw_gcsr (csr , LOONGARCH_CSR_EUEN );
609+ kvm_restore_hw_gcsr (csr , LOONGARCH_CSR_MISC );
610+ kvm_restore_hw_gcsr (csr , LOONGARCH_CSR_ECFG );
611+ kvm_restore_hw_gcsr (csr , LOONGARCH_CSR_ERA );
612+ kvm_restore_hw_gcsr (csr , LOONGARCH_CSR_BADV );
613+ kvm_restore_hw_gcsr (csr , LOONGARCH_CSR_BADI );
614+ kvm_restore_hw_gcsr (csr , LOONGARCH_CSR_EENTRY );
615+ kvm_restore_hw_gcsr (csr , LOONGARCH_CSR_TLBIDX );
616+ kvm_restore_hw_gcsr (csr , LOONGARCH_CSR_TLBEHI );
617+ kvm_restore_hw_gcsr (csr , LOONGARCH_CSR_TLBELO0 );
618+ kvm_restore_hw_gcsr (csr , LOONGARCH_CSR_TLBELO1 );
619+ kvm_restore_hw_gcsr (csr , LOONGARCH_CSR_ASID );
620+ kvm_restore_hw_gcsr (csr , LOONGARCH_CSR_PGDL );
621+ kvm_restore_hw_gcsr (csr , LOONGARCH_CSR_PGDH );
622+ kvm_restore_hw_gcsr (csr , LOONGARCH_CSR_PWCTL0 );
623+ kvm_restore_hw_gcsr (csr , LOONGARCH_CSR_PWCTL1 );
624+ kvm_restore_hw_gcsr (csr , LOONGARCH_CSR_STLBPGSIZE );
625+ kvm_restore_hw_gcsr (csr , LOONGARCH_CSR_RVACFG );
626+ kvm_restore_hw_gcsr (csr , LOONGARCH_CSR_CPUID );
627+ kvm_restore_hw_gcsr (csr , LOONGARCH_CSR_KS0 );
628+ kvm_restore_hw_gcsr (csr , LOONGARCH_CSR_KS1 );
629+ kvm_restore_hw_gcsr (csr , LOONGARCH_CSR_KS2 );
630+ kvm_restore_hw_gcsr (csr , LOONGARCH_CSR_KS3 );
631+ kvm_restore_hw_gcsr (csr , LOONGARCH_CSR_KS4 );
632+ kvm_restore_hw_gcsr (csr , LOONGARCH_CSR_KS5 );
633+ kvm_restore_hw_gcsr (csr , LOONGARCH_CSR_KS6 );
634+ kvm_restore_hw_gcsr (csr , LOONGARCH_CSR_KS7 );
635+ kvm_restore_hw_gcsr (csr , LOONGARCH_CSR_TMID );
636+ kvm_restore_hw_gcsr (csr , LOONGARCH_CSR_CNTC );
637+ kvm_restore_hw_gcsr (csr , LOONGARCH_CSR_TLBRENTRY );
638+ kvm_restore_hw_gcsr (csr , LOONGARCH_CSR_TLBRBADV );
639+ kvm_restore_hw_gcsr (csr , LOONGARCH_CSR_TLBRERA );
640+ kvm_restore_hw_gcsr (csr , LOONGARCH_CSR_TLBRSAVE );
641+ kvm_restore_hw_gcsr (csr , LOONGARCH_CSR_TLBRELO0 );
642+ kvm_restore_hw_gcsr (csr , LOONGARCH_CSR_TLBRELO1 );
643+ kvm_restore_hw_gcsr (csr , LOONGARCH_CSR_TLBREHI );
644+ kvm_restore_hw_gcsr (csr , LOONGARCH_CSR_TLBRPRMD );
645+ kvm_restore_hw_gcsr (csr , LOONGARCH_CSR_DMWIN0 );
646+ kvm_restore_hw_gcsr (csr , LOONGARCH_CSR_DMWIN1 );
647+ kvm_restore_hw_gcsr (csr , LOONGARCH_CSR_DMWIN2 );
648+ kvm_restore_hw_gcsr (csr , LOONGARCH_CSR_DMWIN3 );
649+ kvm_restore_hw_gcsr (csr , LOONGARCH_CSR_LLBCTL );
650+
651+ /* Restore Root.GINTC from unused Guest.GINTC register */
652+ write_csr_gintc (csr -> csrs [LOONGARCH_CSR_GINTC ]);
653+
654+ /*
655+ * We should clear linked load bit to break interrupted atomics. This
656+ * prevents a SC on the next vCPU from succeeding by matching a LL on
657+ * the previous vCPU.
658+ */
659+ if (vcpu -> kvm -> created_vcpus > 1 )
660+ set_gcsr_llbctl (CSR_LLBCTL_WCLLB );
661+
662+ vcpu -> arch .aux_inuse |= KVM_LARCH_HWCSR_USABLE ;
663+
664+ return 0 ;
665+ }
666+
667+ void kvm_arch_vcpu_load (struct kvm_vcpu * vcpu , int cpu )
668+ {
669+ unsigned long flags ;
670+
671+ local_irq_save (flags );
672+ if (vcpu -> arch .last_sched_cpu != cpu ) {
673+ kvm_debug ("[%d->%d]KVM vCPU[%d] switch\n" ,
674+ vcpu -> arch .last_sched_cpu , cpu , vcpu -> vcpu_id );
675+ /*
676+ * Migrate the timer interrupt to the current CPU so that it
677+ * always interrupts the guest and synchronously triggers a
678+ * guest timer interrupt.
679+ */
680+ kvm_migrate_count (vcpu );
681+ }
682+
683+ /* Restore guest state to registers */
684+ _kvm_vcpu_load (vcpu , cpu );
685+ local_irq_restore (flags );
686+ }
687+
688+ static int _kvm_vcpu_put (struct kvm_vcpu * vcpu , int cpu )
689+ {
690+ struct loongarch_csrs * csr = vcpu -> arch .csr ;
691+
692+ kvm_lose_fpu (vcpu );
693+
694+ /*
695+ * Update CSR state from hardware if software CSR state is stale,
696+ * most CSR registers are kept unchanged during process context
697+ * switch except CSR registers like remaining timer tick value and
698+ * injected interrupt state.
699+ */
700+ if (vcpu -> arch .aux_inuse & KVM_LARCH_SWCSR_LATEST )
701+ goto out ;
702+
703+ kvm_save_hw_gcsr (csr , LOONGARCH_CSR_CRMD );
704+ kvm_save_hw_gcsr (csr , LOONGARCH_CSR_PRMD );
705+ kvm_save_hw_gcsr (csr , LOONGARCH_CSR_EUEN );
706+ kvm_save_hw_gcsr (csr , LOONGARCH_CSR_MISC );
707+ kvm_save_hw_gcsr (csr , LOONGARCH_CSR_ECFG );
708+ kvm_save_hw_gcsr (csr , LOONGARCH_CSR_ERA );
709+ kvm_save_hw_gcsr (csr , LOONGARCH_CSR_BADV );
710+ kvm_save_hw_gcsr (csr , LOONGARCH_CSR_BADI );
711+ kvm_save_hw_gcsr (csr , LOONGARCH_CSR_EENTRY );
712+ kvm_save_hw_gcsr (csr , LOONGARCH_CSR_TLBIDX );
713+ kvm_save_hw_gcsr (csr , LOONGARCH_CSR_TLBEHI );
714+ kvm_save_hw_gcsr (csr , LOONGARCH_CSR_TLBELO0 );
715+ kvm_save_hw_gcsr (csr , LOONGARCH_CSR_TLBELO1 );
716+ kvm_save_hw_gcsr (csr , LOONGARCH_CSR_ASID );
717+ kvm_save_hw_gcsr (csr , LOONGARCH_CSR_PGDL );
718+ kvm_save_hw_gcsr (csr , LOONGARCH_CSR_PGDH );
719+ kvm_save_hw_gcsr (csr , LOONGARCH_CSR_PWCTL0 );
720+ kvm_save_hw_gcsr (csr , LOONGARCH_CSR_PWCTL1 );
721+ kvm_save_hw_gcsr (csr , LOONGARCH_CSR_STLBPGSIZE );
722+ kvm_save_hw_gcsr (csr , LOONGARCH_CSR_RVACFG );
723+ kvm_save_hw_gcsr (csr , LOONGARCH_CSR_CPUID );
724+ kvm_save_hw_gcsr (csr , LOONGARCH_CSR_PRCFG1 );
725+ kvm_save_hw_gcsr (csr , LOONGARCH_CSR_PRCFG2 );
726+ kvm_save_hw_gcsr (csr , LOONGARCH_CSR_PRCFG3 );
727+ kvm_save_hw_gcsr (csr , LOONGARCH_CSR_KS0 );
728+ kvm_save_hw_gcsr (csr , LOONGARCH_CSR_KS1 );
729+ kvm_save_hw_gcsr (csr , LOONGARCH_CSR_KS2 );
730+ kvm_save_hw_gcsr (csr , LOONGARCH_CSR_KS3 );
731+ kvm_save_hw_gcsr (csr , LOONGARCH_CSR_KS4 );
732+ kvm_save_hw_gcsr (csr , LOONGARCH_CSR_KS5 );
733+ kvm_save_hw_gcsr (csr , LOONGARCH_CSR_KS6 );
734+ kvm_save_hw_gcsr (csr , LOONGARCH_CSR_KS7 );
735+ kvm_save_hw_gcsr (csr , LOONGARCH_CSR_TMID );
736+ kvm_save_hw_gcsr (csr , LOONGARCH_CSR_CNTC );
737+ kvm_save_hw_gcsr (csr , LOONGARCH_CSR_LLBCTL );
738+ kvm_save_hw_gcsr (csr , LOONGARCH_CSR_TLBRENTRY );
739+ kvm_save_hw_gcsr (csr , LOONGARCH_CSR_TLBRBADV );
740+ kvm_save_hw_gcsr (csr , LOONGARCH_CSR_TLBRERA );
741+ kvm_save_hw_gcsr (csr , LOONGARCH_CSR_TLBRSAVE );
742+ kvm_save_hw_gcsr (csr , LOONGARCH_CSR_TLBRELO0 );
743+ kvm_save_hw_gcsr (csr , LOONGARCH_CSR_TLBRELO1 );
744+ kvm_save_hw_gcsr (csr , LOONGARCH_CSR_TLBREHI );
745+ kvm_save_hw_gcsr (csr , LOONGARCH_CSR_TLBRPRMD );
746+ kvm_save_hw_gcsr (csr , LOONGARCH_CSR_DMWIN0 );
747+ kvm_save_hw_gcsr (csr , LOONGARCH_CSR_DMWIN1 );
748+ kvm_save_hw_gcsr (csr , LOONGARCH_CSR_DMWIN2 );
749+ kvm_save_hw_gcsr (csr , LOONGARCH_CSR_DMWIN3 );
750+
751+ vcpu -> arch .aux_inuse |= KVM_LARCH_SWCSR_LATEST ;
752+
753+ out :
754+ kvm_save_timer (vcpu );
755+ /* Save Root.GINTC into unused Guest.GINTC register */
756+ csr -> csrs [LOONGARCH_CSR_GINTC ] = read_csr_gintc ();
757+
758+ return 0 ;
759+ }
760+
761+ void kvm_arch_vcpu_put (struct kvm_vcpu * vcpu )
762+ {
763+ int cpu ;
764+ unsigned long flags ;
765+
766+ local_irq_save (flags );
767+ cpu = smp_processor_id ();
768+ vcpu -> arch .last_sched_cpu = cpu ;
769+
770+ /* Save guest state in registers */
771+ _kvm_vcpu_put (vcpu , cpu );
772+ local_irq_restore (flags );
773+ }
774+
572775int kvm_arch_vcpu_ioctl_run (struct kvm_vcpu * vcpu )
573776{
574777 int r = - EINTR ;
0 commit comments