Skip to content

Commit ccd0256

Browse files
committed
Merge branch 'rcu.2025.09.23a' into HEAD
RCU miscellaneous updates: * Document that rcu_barrier() hurries lazy callbacks * Remove local_irq_save/restore() in rcu_preempt_deferred_qs_handler() * Move list_for_each_rcu() to where it belongs * Replace use of system_wq with system_percpu_wq * WQ_PERCPU added to alloc_workqueue users * WQ_UNBOUND added to sync_wq workqueue
2 parents 0e9e702 + 82c427b commit ccd0256

6 files changed

Lines changed: 21 additions & 18 deletions

File tree

include/linux/list.h

Lines changed: 0 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -686,16 +686,6 @@ static inline void list_splice_tail_init(struct list_head *list,
686686
#define list_for_each(pos, head) \
687687
for (pos = (head)->next; !list_is_head(pos, (head)); pos = pos->next)
688688

689-
/**
690-
* list_for_each_rcu - Iterate over a list in an RCU-safe fashion
691-
* @pos: the &struct list_head to use as a loop cursor.
692-
* @head: the head for your list.
693-
*/
694-
#define list_for_each_rcu(pos, head) \
695-
for (pos = rcu_dereference((head)->next); \
696-
!list_is_head(pos, (head)); \
697-
pos = rcu_dereference(pos->next))
698-
699689
/**
700690
* list_for_each_continue - continue iteration over a list
701691
* @pos: the &struct list_head to use as a loop cursor.

include/linux/rculist.h

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -42,6 +42,16 @@ static inline void INIT_LIST_HEAD_RCU(struct list_head *list)
4242
*/
4343
#define list_bidir_prev_rcu(list) (*((struct list_head __rcu **)(&(list)->prev)))
4444

45+
/**
46+
* list_for_each_rcu - Iterate over a list in an RCU-safe fashion
47+
* @pos: the &struct list_head to use as a loop cursor.
48+
* @head: the head for your list.
49+
*/
50+
#define list_for_each_rcu(pos, head) \
51+
for (pos = rcu_dereference((head)->next); \
52+
!list_is_head(pos, (head)); \
53+
pos = rcu_dereference(pos->next))
54+
4555
/**
4656
* list_tail_rcu - returns the prev pointer of the head of the list
4757
* @head: the head of the list

kernel/cgroup/dmem.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@
1414
#include <linux/mutex.h>
1515
#include <linux/page_counter.h>
1616
#include <linux/parser.h>
17+
#include <linux/rculist.h>
1718
#include <linux/slab.h>
1819

1920
struct dmem_cgroup_region {

kernel/rcu/tasks.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -553,13 +553,13 @@ static void rcu_tasks_invoke_cbs(struct rcu_tasks *rtp, struct rcu_tasks_percpu
553553
rtpcp_next = rtp->rtpcp_array[index];
554554
if (rtpcp_next->cpu < smp_load_acquire(&rtp->percpu_dequeue_lim)) {
555555
cpuwq = rcu_cpu_beenfullyonline(rtpcp_next->cpu) ? rtpcp_next->cpu : WORK_CPU_UNBOUND;
556-
queue_work_on(cpuwq, system_wq, &rtpcp_next->rtp_work);
556+
queue_work_on(cpuwq, system_percpu_wq, &rtpcp_next->rtp_work);
557557
index++;
558558
if (index < num_possible_cpus()) {
559559
rtpcp_next = rtp->rtpcp_array[index];
560560
if (rtpcp_next->cpu < smp_load_acquire(&rtp->percpu_dequeue_lim)) {
561561
cpuwq = rcu_cpu_beenfullyonline(rtpcp_next->cpu) ? rtpcp_next->cpu : WORK_CPU_UNBOUND;
562-
queue_work_on(cpuwq, system_wq, &rtpcp_next->rtp_work);
562+
queue_work_on(cpuwq, system_percpu_wq, &rtpcp_next->rtp_work);
563563
}
564564
}
565565
}

kernel/rcu/tree.c

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3800,6 +3800,11 @@ static void rcu_barrier_handler(void *cpu_in)
38003800
* to complete. For example, if there are no RCU callbacks queued anywhere
38013801
* in the system, then rcu_barrier() is within its rights to return
38023802
* immediately, without waiting for anything, much less an RCU grace period.
3803+
* In fact, rcu_barrier() will normally not result in any RCU grace periods
3804+
* beyond those that were already destined to be executed.
3805+
*
3806+
* In kernels built with CONFIG_RCU_LAZY=y, this function also hurries all
3807+
* pending lazy RCU callbacks.
38033808
*/
38043809
void rcu_barrier(void)
38053810
{
@@ -4885,10 +4890,10 @@ void __init rcu_init(void)
48854890
rcutree_online_cpu(cpu);
48864891

48874892
/* Create workqueue for Tree SRCU and for expedited GPs. */
4888-
rcu_gp_wq = alloc_workqueue("rcu_gp", WQ_MEM_RECLAIM, 0);
4893+
rcu_gp_wq = alloc_workqueue("rcu_gp", WQ_MEM_RECLAIM | WQ_PERCPU, 0);
48894894
WARN_ON(!rcu_gp_wq);
48904895

4891-
sync_wq = alloc_workqueue("sync_wq", WQ_MEM_RECLAIM, 0);
4896+
sync_wq = alloc_workqueue("sync_wq", WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
48924897
WARN_ON(!sync_wq);
48934898

48944899
/* Respect if explicitly disabled via a boot parameter. */

kernel/rcu/tree_plugin.h

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -626,11 +626,10 @@ notrace void rcu_preempt_deferred_qs(struct task_struct *t)
626626
*/
627627
static void rcu_preempt_deferred_qs_handler(struct irq_work *iwp)
628628
{
629-
unsigned long flags;
630629
struct rcu_data *rdp;
631630

631+
lockdep_assert_irqs_disabled();
632632
rdp = container_of(iwp, struct rcu_data, defer_qs_iw);
633-
local_irq_save(flags);
634633

635634
/*
636635
* If the IRQ work handler happens to run in the middle of RCU read-side
@@ -647,8 +646,6 @@ static void rcu_preempt_deferred_qs_handler(struct irq_work *iwp)
647646
*/
648647
if (rcu_preempt_depth() > 0)
649648
WRITE_ONCE(rdp->defer_qs_iw_pending, DEFER_QS_IDLE);
650-
651-
local_irq_restore(flags);
652649
}
653650

654651
/*

0 commit comments

Comments
 (0)