@@ -3987,13 +3987,16 @@ static void rcu_barrier_callback(struct rcu_head *rhp)
39873987}
39883988
39893989/*
3990- * Called with preemption disabled, and from cross-cpu IRQ context .
3990+ * If needed, entrain an rcu_barrier() callback on rdp->cblist .
39913991 */
3992- static void rcu_barrier_func ( void * cpu_in )
3992+ static void rcu_barrier_entrain ( struct rcu_data * rdp )
39933993{
3994- uintptr_t cpu = ( uintptr_t ) cpu_in ;
3995- struct rcu_data * rdp = per_cpu_ptr ( & rcu_data , cpu );
3994+ unsigned long gseq = READ_ONCE ( rcu_state . barrier_sequence ) ;
3995+ unsigned long lseq = READ_ONCE ( rdp -> barrier_seq_snap );
39963996
3997+ lockdep_assert_held (& rdp -> barrier_lock );
3998+ if (rcu_seq_state (lseq ) || !rcu_seq_state (gseq ) || rcu_seq_ctr (lseq ) != rcu_seq_ctr (gseq ))
3999+ return ;
39974000 rcu_barrier_trace (TPS ("IRQ" ), -1 , rcu_state .barrier_sequence );
39984001 rdp -> barrier_head .func = rcu_barrier_callback ;
39994002 debug_rcu_head_queue (& rdp -> barrier_head );
@@ -4003,10 +4006,26 @@ static void rcu_barrier_func(void *cpu_in)
40034006 atomic_inc (& rcu_state .barrier_cpu_count );
40044007 } else {
40054008 debug_rcu_head_unqueue (& rdp -> barrier_head );
4006- rcu_barrier_trace (TPS ("IRQNQ" ), -1 ,
4007- rcu_state .barrier_sequence );
4009+ rcu_barrier_trace (TPS ("IRQNQ" ), -1 , rcu_state .barrier_sequence );
40084010 }
40094011 rcu_nocb_unlock (rdp );
4012+ smp_store_release (& rdp -> barrier_seq_snap , gseq );
4013+ }
4014+
4015+ /*
4016+ * Called with preemption disabled, and from cross-cpu IRQ context.
4017+ */
4018+ static void rcu_barrier_handler (void * cpu_in )
4019+ {
4020+ uintptr_t cpu = (uintptr_t )cpu_in ;
4021+ struct rcu_data * rdp = per_cpu_ptr (& rcu_data , cpu );
4022+
4023+ lockdep_assert_irqs_disabled ();
4024+ WARN_ON_ONCE (cpu != rdp -> cpu );
4025+ WARN_ON_ONCE (cpu != smp_processor_id ());
4026+ raw_spin_lock (& rdp -> barrier_lock );
4027+ rcu_barrier_entrain (rdp );
4028+ raw_spin_unlock (& rdp -> barrier_lock );
40104029}
40114030
40124031/**
@@ -4020,6 +4039,8 @@ static void rcu_barrier_func(void *cpu_in)
40204039void rcu_barrier (void )
40214040{
40224041 uintptr_t cpu ;
4042+ unsigned long flags ;
4043+ unsigned long gseq ;
40234044 struct rcu_data * rdp ;
40244045 unsigned long s = rcu_seq_snap (& rcu_state .barrier_sequence );
40254046
@@ -4038,6 +4059,7 @@ void rcu_barrier(void)
40384059
40394060 /* Mark the start of the barrier operation. */
40404061 rcu_seq_start (& rcu_state .barrier_sequence );
4062+ gseq = rcu_state .barrier_sequence ;
40414063 rcu_barrier_trace (TPS ("Inc1" ), -1 , rcu_state .barrier_sequence );
40424064
40434065 /*
@@ -4058,19 +4080,30 @@ void rcu_barrier(void)
40584080 */
40594081 for_each_possible_cpu (cpu ) {
40604082 rdp = per_cpu_ptr (& rcu_data , cpu );
4083+ retry :
4084+ if (smp_load_acquire (& rdp -> barrier_seq_snap ) == gseq )
4085+ continue ;
4086+ raw_spin_lock_irqsave (& rdp -> barrier_lock , flags );
40614087 if (!rcu_segcblist_n_cbs (& rdp -> cblist )) {
4088+ WRITE_ONCE (rdp -> barrier_seq_snap , gseq );
4089+ raw_spin_unlock_irqrestore (& rdp -> barrier_lock , flags );
40624090 rcu_barrier_trace (TPS ("NQ" ), cpu , rcu_state .barrier_sequence );
40634091 continue ;
40644092 }
4065- if (cpu_online ( cpu )) {
4066- rcu_barrier_trace ( TPS ( "OnlineQ" ), cpu , rcu_state . barrier_sequence );
4067- smp_call_function_single ( cpu , rcu_barrier_func , ( void * ) cpu , 1 );
4068- } else {
4093+ if (! rcu_rdp_cpu_online ( rdp )) {
4094+ rcu_barrier_entrain ( rdp );
4095+ WARN_ON_ONCE ( READ_ONCE ( rdp -> barrier_seq_snap ) != gseq );
4096+ raw_spin_unlock_irqrestore ( & rdp -> barrier_lock , flags );
40694097 rcu_barrier_trace (TPS ("OfflineNoCBQ" ), cpu , rcu_state .barrier_sequence );
4070- local_irq_disable ();
4071- rcu_barrier_func ((void * )cpu );
4072- local_irq_enable ();
4098+ continue ;
40734099 }
4100+ raw_spin_unlock_irqrestore (& rdp -> barrier_lock , flags );
4101+ if (smp_call_function_single (cpu , rcu_barrier_handler , (void * )cpu , 1 )) {
4102+ schedule_timeout_uninterruptible (1 );
4103+ goto retry ;
4104+ }
4105+ WARN_ON_ONCE (READ_ONCE (rdp -> barrier_seq_snap ) != gseq );
4106+ rcu_barrier_trace (TPS ("OnlineQ" ), cpu , rcu_state .barrier_sequence );
40744107 }
40754108 cpus_read_unlock ();
40764109
@@ -4087,6 +4120,12 @@ void rcu_barrier(void)
40874120 /* Mark the end of the barrier operation. */
40884121 rcu_barrier_trace (TPS ("Inc2" ), -1 , rcu_state .barrier_sequence );
40894122 rcu_seq_end (& rcu_state .barrier_sequence );
4123+ gseq = rcu_state .barrier_sequence ;
4124+ for_each_possible_cpu (cpu ) {
4125+ rdp = per_cpu_ptr (& rcu_data , cpu );
4126+
4127+ WRITE_ONCE (rdp -> barrier_seq_snap , gseq );
4128+ }
40904129
40914130 /* Other rcu_barrier() invocations can now safely proceed. */
40924131 mutex_unlock (& rcu_state .barrier_mutex );
@@ -4134,6 +4173,8 @@ rcu_boot_init_percpu_data(int cpu)
41344173 INIT_WORK (& rdp -> strict_work , strict_work_handler );
41354174 WARN_ON_ONCE (rdp -> dynticks_nesting != 1 );
41364175 WARN_ON_ONCE (rcu_dynticks_in_eqs (rcu_dynticks_snap (rdp )));
4176+ raw_spin_lock_init (& rdp -> barrier_lock );
4177+ rdp -> barrier_seq_snap = rcu_state .barrier_sequence ;
41374178 rdp -> rcu_ofl_gp_seq = rcu_state .gp_seq ;
41384179 rdp -> rcu_ofl_gp_flags = RCU_GP_CLEANED ;
41394180 rdp -> rcu_onl_gp_seq = rcu_state .gp_seq ;
@@ -4284,8 +4325,10 @@ void rcu_cpu_starting(unsigned int cpu)
42844325 local_irq_save (flags );
42854326 arch_spin_lock (& rcu_state .ofl_lock );
42864327 rcu_dynticks_eqs_online ();
4328+ raw_spin_lock (& rdp -> barrier_lock );
42874329 raw_spin_lock_rcu_node (rnp );
42884330 WRITE_ONCE (rnp -> qsmaskinitnext , rnp -> qsmaskinitnext | mask );
4331+ raw_spin_unlock (& rdp -> barrier_lock );
42894332 newcpu = !(rnp -> expmaskinitnext & mask );
42904333 rnp -> expmaskinitnext |= mask ;
42914334 /* Allow lockless access for expedited grace periods. */
@@ -4372,7 +4415,9 @@ void rcutree_migrate_callbacks(int cpu)
43724415 rcu_segcblist_empty (& rdp -> cblist ))
43734416 return ; /* No callbacks to migrate. */
43744417
4375- local_irq_save (flags );
4418+ raw_spin_lock_irqsave (& rdp -> barrier_lock , flags );
4419+ WARN_ON_ONCE (rcu_rdp_cpu_online (rdp ));
4420+ rcu_barrier_entrain (rdp );
43764421 my_rdp = this_cpu_ptr (& rcu_data );
43774422 my_rnp = my_rdp -> mynode ;
43784423 rcu_nocb_lock (my_rdp ); /* irqs already disabled. */
@@ -4382,10 +4427,10 @@ void rcutree_migrate_callbacks(int cpu)
43824427 needwake = rcu_advance_cbs (my_rnp , rdp ) ||
43834428 rcu_advance_cbs (my_rnp , my_rdp );
43844429 rcu_segcblist_merge (& my_rdp -> cblist , & rdp -> cblist );
4430+ raw_spin_unlock (& rdp -> barrier_lock ); /* irqs remain disabled. */
43854431 needwake = needwake || rcu_advance_cbs (my_rnp , my_rdp );
43864432 rcu_segcblist_disable (& rdp -> cblist );
4387- WARN_ON_ONCE (rcu_segcblist_empty (& my_rdp -> cblist ) !=
4388- !rcu_segcblist_n_cbs (& my_rdp -> cblist ));
4433+ WARN_ON_ONCE (rcu_segcblist_empty (& my_rdp -> cblist ) != !rcu_segcblist_n_cbs (& my_rdp -> cblist ));
43894434 if (rcu_rdp_is_offloaded (my_rdp )) {
43904435 raw_spin_unlock_rcu_node (my_rnp ); /* irqs remain disabled. */
43914436 __call_rcu_nocb_wake (my_rdp , true, flags );
0 commit comments