5858
5959extern bool itlb_multihit_kvm_mitigation ;
6060
61+ static bool nx_hugepage_mitigation_hard_disabled ;
62+
6163int __read_mostly nx_huge_pages = -1 ;
6264static uint __read_mostly nx_huge_pages_recovery_period_ms ;
6365#ifdef CONFIG_PREEMPT_RT
@@ -67,12 +69,13 @@ static uint __read_mostly nx_huge_pages_recovery_ratio = 0;
6769static uint __read_mostly nx_huge_pages_recovery_ratio = 60 ;
6870#endif
6971
72+ static int get_nx_huge_pages (char * buffer , const struct kernel_param * kp );
7073static int set_nx_huge_pages (const char * val , const struct kernel_param * kp );
7174static int set_nx_huge_pages_recovery_param (const char * val , const struct kernel_param * kp );
7275
7376static const struct kernel_param_ops nx_huge_pages_ops = {
7477 .set = set_nx_huge_pages ,
75- .get = param_get_bool ,
78+ .get = get_nx_huge_pages ,
7679};
7780
7881static const struct kernel_param_ops nx_huge_pages_recovery_param_ops = {
@@ -5797,6 +5800,14 @@ static void __kvm_mmu_invalidate_addr(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu
57975800
57985801 vcpu_clear_mmio_info (vcpu , addr );
57995802
5803+ /*
5804+ * Walking and synchronizing SPTEs both assume they are operating in
5805+ * the context of the current MMU, and would need to be reworked if
5806+ * this is ever used to sync the guest_mmu, e.g. to emulate INVEPT.
5807+ */
5808+ if (WARN_ON_ONCE (mmu != vcpu -> arch .mmu ))
5809+ return ;
5810+
58005811 if (!VALID_PAGE (root_hpa ))
58015812 return ;
58025813
@@ -6844,6 +6855,14 @@ static void mmu_destroy_caches(void)
68446855 kmem_cache_destroy (mmu_page_header_cache );
68456856}
68466857
6858+ static int get_nx_huge_pages (char * buffer , const struct kernel_param * kp )
6859+ {
6860+ if (nx_hugepage_mitigation_hard_disabled )
6861+ return sprintf (buffer , "never\n" );
6862+
6863+ return param_get_bool (buffer , kp );
6864+ }
6865+
68476866static bool get_nx_auto_mode (void )
68486867{
68496868 /* Return true when CPU has the bug, and mitigations are ON */
@@ -6860,15 +6879,29 @@ static int set_nx_huge_pages(const char *val, const struct kernel_param *kp)
68606879 bool old_val = nx_huge_pages ;
68616880 bool new_val ;
68626881
6882+ if (nx_hugepage_mitigation_hard_disabled )
6883+ return - EPERM ;
6884+
68636885 /* In "auto" mode deploy workaround only if CPU has the bug. */
6864- if (sysfs_streq (val , "off" ))
6886+ if (sysfs_streq (val , "off" )) {
68656887 new_val = 0 ;
6866- else if (sysfs_streq (val , "force" ))
6888+ } else if (sysfs_streq (val , "force" )) {
68676889 new_val = 1 ;
6868- else if (sysfs_streq (val , "auto" ))
6890+ } else if (sysfs_streq (val , "auto" )) {
68696891 new_val = get_nx_auto_mode ();
6870- else if (kstrtobool (val , & new_val ) < 0 )
6892+ } else if (sysfs_streq (val , "never" )) {
6893+ new_val = 0 ;
6894+
6895+ mutex_lock (& kvm_lock );
6896+ if (!list_empty (& vm_list )) {
6897+ mutex_unlock (& kvm_lock );
6898+ return - EBUSY ;
6899+ }
6900+ nx_hugepage_mitigation_hard_disabled = true;
6901+ mutex_unlock (& kvm_lock );
6902+ } else if (kstrtobool (val , & new_val ) < 0 ) {
68716903 return - EINVAL ;
6904+ }
68726905
68736906 __set_nx_huge_pages (new_val );
68746907
@@ -7006,6 +7039,9 @@ static int set_nx_huge_pages_recovery_param(const char *val, const struct kernel
70067039 uint old_period , new_period ;
70077040 int err ;
70087041
7042+ if (nx_hugepage_mitigation_hard_disabled )
7043+ return - EPERM ;
7044+
70097045 was_recovery_enabled = calc_nx_huge_pages_recovery_period (& old_period );
70107046
70117047 err = param_set_uint (val , kp );
@@ -7164,6 +7200,9 @@ int kvm_mmu_post_init_vm(struct kvm *kvm)
71647200{
71657201 int err ;
71667202
7203+ if (nx_hugepage_mitigation_hard_disabled )
7204+ return 0 ;
7205+
71677206 err = kvm_vm_create_worker_thread (kvm , kvm_nx_huge_page_recovery_worker , 0 ,
71687207 "kvm-nx-lpage-recovery" ,
71697208 & kvm -> arch .nx_huge_page_recovery_thread );
0 commit comments