@@ -72,79 +72,79 @@ For example, if it uses call_rcu(), call_srcu() on srcu_struct_1, and
7272call_srcu() on srcu_struct_2, then the following three lines of code
7373will be required when unloading::
7474
75- 1 rcu_barrier();
76- 2 srcu_barrier(&srcu_struct_1);
77- 3 srcu_barrier(&srcu_struct_2);
75+ 1 rcu_barrier();
76+ 2 srcu_barrier(&srcu_struct_1);
77+ 3 srcu_barrier(&srcu_struct_2);
7878
7979If latency is of the essence, workqueues could be used to run these
8080three functions concurrently.
8181
8282An ancient version of the rcutorture module makes use of rcu_barrier()
8383in its exit function as follows::
8484
85- 1 static void
86- 2 rcu_torture_cleanup(void)
87- 3 {
88- 4 int i;
89- 5
90- 6 fullstop = 1;
91- 7 if (shuffler_task != NULL) {
92- 8 VERBOSE_PRINTK_STRING("Stopping rcu_torture_shuffle task");
93- 9 kthread_stop(shuffler_task);
94- 10 }
95- 11 shuffler_task = NULL;
85+ 1 static void
86+ 2 rcu_torture_cleanup(void)
87+ 3 {
88+ 4 int i;
89+ 5
90+ 6 fullstop = 1;
91+ 7 if (shuffler_task != NULL) {
92+ 8 VERBOSE_PRINTK_STRING("Stopping rcu_torture_shuffle task");
93+ 9 kthread_stop(shuffler_task);
94+ 10 }
95+ 11 shuffler_task = NULL;
9696 12
97- 13 if (writer_task != NULL) {
98- 14 VERBOSE_PRINTK_STRING("Stopping rcu_torture_writer task");
99- 15 kthread_stop(writer_task);
100- 16 }
101- 17 writer_task = NULL;
97+ 13 if (writer_task != NULL) {
98+ 14 VERBOSE_PRINTK_STRING("Stopping rcu_torture_writer task");
99+ 15 kthread_stop(writer_task);
100+ 16 }
101+ 17 writer_task = NULL;
102102 18
103- 19 if (reader_tasks != NULL) {
104- 20 for (i = 0; i < nrealreaders; i++) {
105- 21 if (reader_tasks[i] != NULL) {
106- 22 VERBOSE_PRINTK_STRING(
107- 23 "Stopping rcu_torture_reader task");
108- 24 kthread_stop(reader_tasks[i]);
109- 25 }
110- 26 reader_tasks[i] = NULL;
111- 27 }
112- 28 kfree(reader_tasks);
113- 29 reader_tasks = NULL;
114- 30 }
115- 31 rcu_torture_current = NULL;
103+ 19 if (reader_tasks != NULL) {
104+ 20 for (i = 0; i < nrealreaders; i++) {
105+ 21 if (reader_tasks[i] != NULL) {
106+ 22 VERBOSE_PRINTK_STRING(
107+ 23 "Stopping rcu_torture_reader task");
108+ 24 kthread_stop(reader_tasks[i]);
109+ 25 }
110+ 26 reader_tasks[i] = NULL;
111+ 27 }
112+ 28 kfree(reader_tasks);
113+ 29 reader_tasks = NULL;
114+ 30 }
115+ 31 rcu_torture_current = NULL;
116116 32
117- 33 if (fakewriter_tasks != NULL) {
118- 34 for (i = 0; i < nfakewriters; i++) {
119- 35 if (fakewriter_tasks[i] != NULL) {
120- 36 VERBOSE_PRINTK_STRING(
121- 37 "Stopping rcu_torture_fakewriter task");
122- 38 kthread_stop(fakewriter_tasks[i]);
123- 39 }
124- 40 fakewriter_tasks[i] = NULL;
125- 41 }
126- 42 kfree(fakewriter_tasks);
127- 43 fakewriter_tasks = NULL;
128- 44 }
117+ 33 if (fakewriter_tasks != NULL) {
118+ 34 for (i = 0; i < nfakewriters; i++) {
119+ 35 if (fakewriter_tasks[i] != NULL) {
120+ 36 VERBOSE_PRINTK_STRING(
121+ 37 "Stopping rcu_torture_fakewriter task");
122+ 38 kthread_stop(fakewriter_tasks[i]);
123+ 39 }
124+ 40 fakewriter_tasks[i] = NULL;
125+ 41 }
126+ 42 kfree(fakewriter_tasks);
127+ 43 fakewriter_tasks = NULL;
128+ 44 }
129129 45
130- 46 if (stats_task != NULL) {
131- 47 VERBOSE_PRINTK_STRING("Stopping rcu_torture_stats task");
132- 48 kthread_stop(stats_task);
133- 49 }
134- 50 stats_task = NULL;
130+ 46 if (stats_task != NULL) {
131+ 47 VERBOSE_PRINTK_STRING("Stopping rcu_torture_stats task");
132+ 48 kthread_stop(stats_task);
133+ 49 }
134+ 50 stats_task = NULL;
135135 51
136- 52 /* Wait for all RCU callbacks to fire. */
137- 53 rcu_barrier();
136+ 52 /* Wait for all RCU callbacks to fire. */
137+ 53 rcu_barrier();
138138 54
139- 55 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
139+ 55 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
140140 56
141- 57 if (cur_ops->cleanup != NULL)
142- 58 cur_ops->cleanup();
143- 59 if (atomic_read(&n_rcu_torture_error))
144- 60 rcu_torture_print_module_parms("End of test: FAILURE");
145- 61 else
146- 62 rcu_torture_print_module_parms("End of test: SUCCESS");
147- 63 }
141+ 57 if (cur_ops->cleanup != NULL)
142+ 58 cur_ops->cleanup();
143+ 59 if (atomic_read(&n_rcu_torture_error))
144+ 60 rcu_torture_print_module_parms("End of test: FAILURE");
145+ 61 else
146+ 62 rcu_torture_print_module_parms("End of test: SUCCESS");
147+ 63 }
148148
149149Line 6 sets a global variable that prevents any RCU callbacks from
150150re-posting themselves. This will not be necessary in most cases, since
@@ -193,16 +193,16 @@ which point, all earlier RCU callbacks are guaranteed to have completed.
193193
194194The original code for rcu_barrier() was roughly as follows::
195195
196- 1 void rcu_barrier(void)
197- 2 {
198- 3 BUG_ON(in_interrupt());
199- 4 /* Take cpucontrol mutex to protect against CPU hotplug */
200- 5 mutex_lock(&rcu_barrier_mutex);
201- 6 init_completion(&rcu_barrier_completion);
202- 7 atomic_set(&rcu_barrier_cpu_count, 1);
203- 8 on_each_cpu(rcu_barrier_func, NULL, 0, 1);
204- 9 if (atomic_dec_and_test(&rcu_barrier_cpu_count))
205- 10 complete(&rcu_barrier_completion);
196+ 1 void rcu_barrier(void)
197+ 2 {
198+ 3 BUG_ON(in_interrupt());
199+ 4 /* Take cpucontrol mutex to protect against CPU hotplug */
200+ 5 mutex_lock(&rcu_barrier_mutex);
201+ 6 init_completion(&rcu_barrier_completion);
202+ 7 atomic_set(&rcu_barrier_cpu_count, 1);
203+ 8 on_each_cpu(rcu_barrier_func, NULL, 0, 1);
204+ 9 if (atomic_dec_and_test(&rcu_barrier_cpu_count))
205+ 10 complete(&rcu_barrier_completion);
206206 11 wait_for_completion(&rcu_barrier_completion);
207207 12 mutex_unlock(&rcu_barrier_mutex);
208208 13 }
@@ -232,16 +232,16 @@ still gives the general idea.
232232The rcu_barrier_func() runs on each CPU, where it invokes call_rcu()
233233to post an RCU callback, as follows::
234234
235- 1 static void rcu_barrier_func(void *notused)
236- 2 {
237- 3 int cpu = smp_processor_id();
238- 4 struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
239- 5 struct rcu_head *head;
240- 6
241- 7 head = &rdp->barrier;
242- 8 atomic_inc(&rcu_barrier_cpu_count);
243- 9 call_rcu(head, rcu_barrier_callback);
244- 10 }
235+ 1 static void rcu_barrier_func(void *notused)
236+ 2 {
237+ 3 int cpu = smp_processor_id();
238+ 4 struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
239+ 5 struct rcu_head *head;
240+ 6
241+ 7 head = &rdp->barrier;
242+ 8 atomic_inc(&rcu_barrier_cpu_count);
243+ 9 call_rcu(head, rcu_barrier_callback);
244+ 10 }
245245
246246Lines 3 and 4 locate RCU's internal per-CPU rcu_data structure,
247247which contains the struct rcu_head that needed for the later call to
@@ -254,11 +254,11 @@ The rcu_barrier_callback() function simply atomically decrements the
254254rcu_barrier_cpu_count variable and finalizes the completion when it
255255reaches zero, as follows::
256256
257- 1 static void rcu_barrier_callback(struct rcu_head *notused)
258- 2 {
259- 3 if (atomic_dec_and_test(&rcu_barrier_cpu_count))
260- 4 complete(&rcu_barrier_completion);
261- 5 }
257+ 1 static void rcu_barrier_callback(struct rcu_head *notused)
258+ 2 {
259+ 3 if (atomic_dec_and_test(&rcu_barrier_cpu_count))
260+ 4 complete(&rcu_barrier_completion);
261+ 5 }
262262
263263.. _rcubarrier_quiz_3 :
264264
0 commit comments