|
16 | 16 | static bool rcu_rdp_is_offloaded(struct rcu_data *rdp) |
17 | 17 | { |
18 | 18 | /* |
19 | | - * In order to read the offloaded state of an rdp is a safe |
| 19 | + * In order to read the offloaded state of an rdp in a safe |
20 | 20 | * and stable way and prevent from its value to be changed |
21 | 21 | * under us, we must either hold the barrier mutex, the cpu |
22 | 22 | * hotplug lock (read or write) or the nocb lock. Local |
@@ -56,7 +56,7 @@ static void __init rcu_bootup_announce_oddness(void) |
56 | 56 | if (IS_ENABLED(CONFIG_PROVE_RCU)) |
57 | 57 | pr_info("\tRCU lockdep checking is enabled.\n"); |
58 | 58 | if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD)) |
59 | | - pr_info("\tRCU strict (and thus non-scalable) grace periods enabled.\n"); |
| 59 | + pr_info("\tRCU strict (and thus non-scalable) grace periods are enabled.\n"); |
60 | 60 | if (RCU_NUM_LVLS >= 4) |
61 | 61 | pr_info("\tFour(or more)-level hierarchy is enabled.\n"); |
62 | 62 | if (RCU_FANOUT_LEAF != 16) |
@@ -88,13 +88,13 @@ static void __init rcu_bootup_announce_oddness(void) |
88 | 88 | if (rcu_kick_kthreads) |
89 | 89 | pr_info("\tKick kthreads if too-long grace period.\n"); |
90 | 90 | if (IS_ENABLED(CONFIG_DEBUG_OBJECTS_RCU_HEAD)) |
91 | | - pr_info("\tRCU callback double-/use-after-free debug enabled.\n"); |
| 91 | + pr_info("\tRCU callback double-/use-after-free debug is enabled.\n"); |
92 | 92 | if (gp_preinit_delay) |
93 | 93 | pr_info("\tRCU debug GP pre-init slowdown %d jiffies.\n", gp_preinit_delay); |
94 | 94 | if (gp_init_delay) |
95 | 95 | pr_info("\tRCU debug GP init slowdown %d jiffies.\n", gp_init_delay); |
96 | 96 | if (gp_cleanup_delay) |
97 | | - pr_info("\tRCU debug GP init slowdown %d jiffies.\n", gp_cleanup_delay); |
| 97 | + pr_info("\tRCU debug GP cleanup slowdown %d jiffies.\n", gp_cleanup_delay); |
98 | 98 | if (!use_softirq) |
99 | 99 | pr_info("\tRCU_SOFTIRQ processing moved to rcuc kthreads.\n"); |
100 | 100 | if (IS_ENABLED(CONFIG_RCU_EQS_DEBUG)) |
@@ -1153,7 +1153,6 @@ static void rcu_preempt_boost_start_gp(struct rcu_node *rnp) |
1153 | 1153 | /* |
1154 | 1154 | * Create an RCU-boost kthread for the specified node if one does not |
1155 | 1155 | * already exist. We only create this kthread for preemptible RCU. |
1156 | | - * Returns zero if all is well, a negated errno otherwise. |
1157 | 1156 | */ |
1158 | 1157 | static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp) |
1159 | 1158 | { |
@@ -1455,7 +1454,7 @@ static void rcu_cleanup_after_idle(void) |
1455 | 1454 | * CPU unless the grace period has extended for too long. |
1456 | 1455 | * |
1457 | 1456 | * This code relies on the fact that all NO_HZ_FULL CPUs are also |
1458 | | - * CONFIG_RCU_NOCB_CPU CPUs. |
| 1457 | + * RCU_NOCB_CPU CPUs. |
1459 | 1458 | */ |
1460 | 1459 | static bool rcu_nohz_full_cpu(void) |
1461 | 1460 | { |
|
0 commit comments