Skip to content

Commit c04507a

Browse files
Peter ZijlstraKAGA-KOKO
authored andcommitted
sched: Provide and use set_need_resched_current()
set_tsk_need_resched(current) requires set_preempt_need_resched(current) to work correctly outside of the scheduler. Provide set_need_resched_current() which wraps this correctly and replace all the open coded instances. Signed-off-by: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Link: https://patch.msgid.link/20251116174750.665769842@linutronix.de
1 parent 33cf66d commit c04507a

7 files changed

Lines changed: 21 additions & 26 deletions

File tree

arch/s390/mm/pfault.c

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -199,8 +199,7 @@ static void pfault_interrupt(struct ext_code ext_code,
199199
* return to userspace schedule() to block.
200200
*/
201201
__set_current_state(TASK_UNINTERRUPTIBLE);
202-
set_tsk_need_resched(tsk);
203-
set_preempt_need_resched();
202+
set_need_resched_current();
204203
}
205204
}
206205
out:

include/linux/sched.h

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2058,6 +2058,13 @@ static inline int test_tsk_need_resched(struct task_struct *tsk)
20582058
return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
20592059
}
20602060

2061+
static inline void set_need_resched_current(void)
2062+
{
2063+
lockdep_assert_irqs_disabled();
2064+
set_tsk_need_resched(current);
2065+
set_preempt_need_resched();
2066+
}
2067+
20612068
/*
20622069
* cond_resched() and cond_resched_lock(): latency reduction via
20632070
* explicit rescheduling in places that are safe. The return

kernel/rcu/tiny.c

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -70,12 +70,10 @@ void rcu_qs(void)
7070
*/
7171
void rcu_sched_clock_irq(int user)
7272
{
73-
if (user) {
73+
if (user)
7474
rcu_qs();
75-
} else if (rcu_ctrlblk.donetail != rcu_ctrlblk.curtail) {
76-
set_tsk_need_resched(current);
77-
set_preempt_need_resched();
78-
}
75+
else if (rcu_ctrlblk.donetail != rcu_ctrlblk.curtail)
76+
set_need_resched_current();
7977
}
8078

8179
/*

kernel/rcu/tree.c

Lines changed: 5 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -2696,10 +2696,8 @@ void rcu_sched_clock_irq(int user)
26962696
/* The load-acquire pairs with the store-release setting to true. */
26972697
if (smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) {
26982698
/* Idle and userspace execution already are quiescent states. */
2699-
if (!rcu_is_cpu_rrupt_from_idle() && !user) {
2700-
set_tsk_need_resched(current);
2701-
set_preempt_need_resched();
2702-
}
2699+
if (!rcu_is_cpu_rrupt_from_idle() && !user)
2700+
set_need_resched_current();
27032701
__this_cpu_write(rcu_data.rcu_urgent_qs, false);
27042702
}
27052703
rcu_flavor_sched_clock_irq(user);
@@ -2824,7 +2822,6 @@ static void strict_work_handler(struct work_struct *work)
28242822
/* Perform RCU core processing work for the current CPU. */
28252823
static __latent_entropy void rcu_core(void)
28262824
{
2827-
unsigned long flags;
28282825
struct rcu_data *rdp = raw_cpu_ptr(&rcu_data);
28292826
struct rcu_node *rnp = rdp->mynode;
28302827

@@ -2837,8 +2834,8 @@ static __latent_entropy void rcu_core(void)
28372834
if (IS_ENABLED(CONFIG_PREEMPT_COUNT) && (!(preempt_count() & PREEMPT_MASK))) {
28382835
rcu_preempt_deferred_qs(current);
28392836
} else if (rcu_preempt_need_deferred_qs(current)) {
2840-
set_tsk_need_resched(current);
2841-
set_preempt_need_resched();
2837+
guard(irqsave)();
2838+
set_need_resched_current();
28422839
}
28432840

28442841
/* Update RCU state based on any recent quiescent states. */
@@ -2847,10 +2844,9 @@ static __latent_entropy void rcu_core(void)
28472844
/* No grace period and unregistered callbacks? */
28482845
if (!rcu_gp_in_progress() &&
28492846
rcu_segcblist_is_enabled(&rdp->cblist) && !rcu_rdp_is_offloaded(rdp)) {
2850-
local_irq_save(flags);
2847+
guard(irqsave)();
28512848
if (!rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
28522849
rcu_accelerate_cbs_unlocked(rnp, rdp);
2853-
local_irq_restore(flags);
28542850
}
28552851

28562852
rcu_check_gp_start_stall(rnp, rdp, rcu_jiffies_till_stall_check());

kernel/rcu/tree_exp.h

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -729,8 +729,7 @@ static void rcu_exp_need_qs(void)
729729
__this_cpu_write(rcu_data.cpu_no_qs.b.exp, true);
730730
/* Store .exp before .rcu_urgent_qs. */
731731
smp_store_release(this_cpu_ptr(&rcu_data.rcu_urgent_qs), true);
732-
set_tsk_need_resched(current);
733-
set_preempt_need_resched();
732+
set_need_resched_current();
734733
}
735734

736735
#ifdef CONFIG_PREEMPT_RCU

kernel/rcu/tree_plugin.h

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -753,8 +753,7 @@ static void rcu_read_unlock_special(struct task_struct *t)
753753
// Also if no expediting and no possible deboosting,
754754
// slow is OK. Plus nohz_full CPUs eventually get
755755
// tick enabled.
756-
set_tsk_need_resched(current);
757-
set_preempt_need_resched();
756+
set_need_resched_current();
758757
if (IS_ENABLED(CONFIG_IRQ_WORK) && irqs_were_disabled &&
759758
needs_exp && rdp->defer_qs_iw_pending != DEFER_QS_PENDING &&
760759
cpu_online(rdp->cpu)) {
@@ -813,10 +812,8 @@ static void rcu_flavor_sched_clock_irq(int user)
813812
if (rcu_preempt_depth() > 0 ||
814813
(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK))) {
815814
/* No QS, force context switch if deferred. */
816-
if (rcu_preempt_need_deferred_qs(t)) {
817-
set_tsk_need_resched(t);
818-
set_preempt_need_resched();
819-
}
815+
if (rcu_preempt_need_deferred_qs(t))
816+
set_need_resched_current();
820817
} else if (rcu_preempt_need_deferred_qs(t)) {
821818
rcu_preempt_deferred_qs(t); /* Report deferred QS. */
822819
return;

kernel/rcu/tree_stall.h

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -763,8 +763,7 @@ static void print_cpu_stall(unsigned long gp_seq, unsigned long gps)
763763
* progress and it could be we're stuck in kernel space without context
764764
* switches for an entirely unreasonable amount of time.
765765
*/
766-
set_tsk_need_resched(current);
767-
set_preempt_need_resched();
766+
set_need_resched_current();
768767
}
769768

770769
static bool csd_lock_suppress_rcu_stall;

0 commit comments

Comments
 (0)