88
99#include <linux/acpi.h>
1010#include <linux/bitmap.h>
11+ #include <linux/cpu.h>
1112#include <linux/cpuhotplug.h>
1213#include <linux/ctype.h>
14+ #include <linux/jump_label.h>
1315#include <linux/log2.h>
1416#include <linux/memory.h>
1517#include <linux/module.h>
@@ -44,6 +46,8 @@ struct riscv_isainfo hart_isa[NR_CPUS];
4446/* Performance information */
4547DEFINE_PER_CPU (long , misaligned_access_speed );
4648
49+ static cpumask_t fast_misaligned_access ;
50+
4751/**
4852 * riscv_isa_extension_base() - Get base extension word
4953 *
@@ -643,6 +647,16 @@ static int check_unaligned_access(void *param)
643647 (speed == RISCV_HWPROBE_MISALIGNED_FAST ) ? "fast" : "slow" );
644648
645649 per_cpu (misaligned_access_speed , cpu ) = speed ;
650+
651+ /*
652+ * Set the value of fast_misaligned_access of a CPU. These operations
653+ * are atomic to avoid race conditions.
654+ */
655+ if (speed == RISCV_HWPROBE_MISALIGNED_FAST )
656+ cpumask_set_cpu (cpu , & fast_misaligned_access );
657+ else
658+ cpumask_clear_cpu (cpu , & fast_misaligned_access );
659+
646660 return 0 ;
647661}
648662
@@ -655,13 +669,69 @@ static void check_unaligned_access_nonboot_cpu(void *param)
655669 check_unaligned_access (pages [cpu ]);
656670}
657671
672+ DEFINE_STATIC_KEY_FALSE (fast_misaligned_access_speed_key );
673+
674+ static void modify_unaligned_access_branches (cpumask_t * mask , int weight )
675+ {
676+ if (cpumask_weight (mask ) == weight )
677+ static_branch_enable_cpuslocked (& fast_misaligned_access_speed_key );
678+ else
679+ static_branch_disable_cpuslocked (& fast_misaligned_access_speed_key );
680+ }
681+
682+ static void set_unaligned_access_static_branches_except_cpu (int cpu )
683+ {
684+ /*
685+ * Same as set_unaligned_access_static_branches, except excludes the
686+ * given CPU from the result. When a CPU is hotplugged into an offline
687+ * state, this function is called before the CPU is set to offline in
688+ * the cpumask, and thus the CPU needs to be explicitly excluded.
689+ */
690+
691+ cpumask_t fast_except_me ;
692+
693+ cpumask_and (& fast_except_me , & fast_misaligned_access , cpu_online_mask );
694+ cpumask_clear_cpu (cpu , & fast_except_me );
695+
696+ modify_unaligned_access_branches (& fast_except_me , num_online_cpus () - 1 );
697+ }
698+
699+ static void set_unaligned_access_static_branches (void )
700+ {
701+ /*
702+ * This will be called after check_unaligned_access_all_cpus so the
703+ * result of unaligned access speed for all CPUs will be available.
704+ *
705+ * To avoid the number of online cpus changing between reading
706+ * cpu_online_mask and calling num_online_cpus, cpus_read_lock must be
707+ * held before calling this function.
708+ */
709+
710+ cpumask_t fast_and_online ;
711+
712+ cpumask_and (& fast_and_online , & fast_misaligned_access , cpu_online_mask );
713+
714+ modify_unaligned_access_branches (& fast_and_online , num_online_cpus ());
715+ }
716+
717+ static int lock_and_set_unaligned_access_static_branch (void )
718+ {
719+ cpus_read_lock ();
720+ set_unaligned_access_static_branches ();
721+ cpus_read_unlock ();
722+
723+ return 0 ;
724+ }
725+
726+ arch_initcall_sync (lock_and_set_unaligned_access_static_branch );
727+
658728static int riscv_online_cpu (unsigned int cpu )
659729{
660730 static struct page * buf ;
661731
662732 /* We are already set since the last check */
663733 if (per_cpu (misaligned_access_speed , cpu ) != RISCV_HWPROBE_MISALIGNED_UNKNOWN )
664- return 0 ;
734+ goto exit ;
665735
666736 buf = alloc_pages (GFP_KERNEL , MISALIGNED_BUFFER_ORDER );
667737 if (!buf ) {
@@ -671,6 +741,17 @@ static int riscv_online_cpu(unsigned int cpu)
671741
672742 check_unaligned_access (buf );
673743 __free_pages (buf , MISALIGNED_BUFFER_ORDER );
744+
745+ exit :
746+ set_unaligned_access_static_branches ();
747+
748+ return 0 ;
749+ }
750+
751+ static int riscv_offline_cpu (unsigned int cpu )
752+ {
753+ set_unaligned_access_static_branches_except_cpu (cpu );
754+
674755 return 0 ;
675756}
676757
@@ -705,9 +786,12 @@ static int check_unaligned_access_all_cpus(void)
705786 /* Check core 0. */
706787 smp_call_on_cpu (0 , check_unaligned_access , bufs [0 ], true);
707788
708- /* Setup hotplug callback for any new CPUs that come online. */
789+ /*
790+ * Setup hotplug callbacks for any new CPUs that come online or go
791+ * offline.
792+ */
709793 cpuhp_setup_state_nocalls (CPUHP_AP_ONLINE_DYN , "riscv:online" ,
710- riscv_online_cpu , NULL );
794+ riscv_online_cpu , riscv_offline_cpu );
711795
712796out :
713797 unaligned_emulation_finish ();
0 commit comments