@@ -113,13 +113,13 @@ struct task_group;
113113 __TASK_TRACED | EXIT_DEAD | EXIT_ZOMBIE | \
114114 TASK_PARKED)
115115
116- #define task_is_running (task ) (READ_ONCE((task)->state ) == TASK_RUNNING)
116+ #define task_is_running (task ) (READ_ONCE((task)->__state ) == TASK_RUNNING)
117117
118- #define task_is_traced (task ) ((task->state & __TASK_TRACED) != 0)
118+ #define task_is_traced (task ) ((READ_ONCE( task->__state) & __TASK_TRACED) != 0)
119119
120- #define task_is_stopped (task ) ((task->state & __TASK_STOPPED) != 0)
120+ #define task_is_stopped (task ) ((READ_ONCE( task->__state) & __TASK_STOPPED) != 0)
121121
122- #define task_is_stopped_or_traced (task ) ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
122+ #define task_is_stopped_or_traced (task ) ((READ_ONCE( task->__state) & (__TASK_STOPPED | __TASK_TRACED)) != 0)
123123
124124#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
125125
@@ -134,14 +134,14 @@ struct task_group;
134134 do { \
135135 WARN_ON_ONCE(is_special_task_state(state_value));\
136136 current->task_state_change = _THIS_IP_; \
137- current->state = (state_value); \
137+ WRITE_ONCE( current->__state, (state_value)); \
138138 } while (0)
139139
140140#define set_current_state (state_value ) \
141141 do { \
142142 WARN_ON_ONCE(is_special_task_state(state_value));\
143143 current->task_state_change = _THIS_IP_; \
144- smp_store_mb(current->state , (state_value)); \
144+ smp_store_mb(current->__state , (state_value)); \
145145 } while (0)
146146
147147#define set_special_state (state_value ) \
@@ -150,7 +150,7 @@ struct task_group;
150150 WARN_ON_ONCE (!is_special_task_state (state_value )); \
151151 raw_spin_lock_irqsave (& current -> pi_lock , flags ); \
152152 current -> task_state_change = _THIS_IP_ ; \
153- current -> state = (state_value ); \
153+ WRITE_ONCE ( current -> __state , (state_value )); \
154154 raw_spin_unlock_irqrestore (& current -> pi_lock , flags ); \
155155 } while (0 )
156156#else
@@ -192,10 +192,10 @@ struct task_group;
192192 * Also see the comments of try_to_wake_up().
193193 */
194194#define __set_current_state (state_value ) \
195- current->state = (state_value)
195+ WRITE_ONCE( current->__state, (state_value) )
196196
197197#define set_current_state (state_value ) \
198- smp_store_mb(current->state , (state_value))
198+ smp_store_mb(current->__state , (state_value))
199199
200200/*
201201 * set_special_state() should be used for those states when the blocking task
@@ -207,13 +207,13 @@ struct task_group;
207207 do { \
208208 unsigned long flags; /* may shadow */ \
209209 raw_spin_lock_irqsave (& current -> pi_lock , flags ); \
210- current -> state = (state_value ); \
210+ WRITE_ONCE ( current -> __state , (state_value )); \
211211 raw_spin_unlock_irqrestore (& current -> pi_lock , flags ); \
212212 } while (0 )
213213
214214#endif
215215
216- #define get_current_state () READ_ONCE(current->state )
216+ #define get_current_state () READ_ONCE(current->__state )
217217
218218/* Task command name length: */
219219#define TASK_COMM_LEN 16
@@ -666,8 +666,7 @@ struct task_struct {
666666 */
667667 struct thread_info thread_info ;
668668#endif
669- /* -1 unrunnable, 0 runnable, >0 stopped: */
670- volatile long state ;
669+ unsigned int __state ;
671670
672671 /*
673672 * This begins the randomizable portion of task_struct. Only
@@ -1532,7 +1531,7 @@ static inline pid_t task_pgrp_nr(struct task_struct *tsk)
15321531
15331532static inline unsigned int task_state_index (struct task_struct * tsk )
15341533{
1535- unsigned int tsk_state = READ_ONCE (tsk -> state );
1534+ unsigned int tsk_state = READ_ONCE (tsk -> __state );
15361535 unsigned int state = (tsk_state | tsk -> exit_state ) & TASK_REPORT ;
15371536
15381537 BUILD_BUG_ON_NOT_POWER_OF_2 (TASK_REPORT_MAX );
@@ -1840,10 +1839,10 @@ static __always_inline void scheduler_ipi(void)
18401839 */
18411840 preempt_fold_need_resched ();
18421841}
1843- extern unsigned long wait_task_inactive (struct task_struct * , long match_state );
1842+ extern unsigned long wait_task_inactive (struct task_struct * , unsigned int match_state );
18441843#else
18451844static inline void scheduler_ipi (void ) { }
1846- static inline unsigned long wait_task_inactive (struct task_struct * p , long match_state )
1845+ static inline unsigned long wait_task_inactive (struct task_struct * p , unsigned int match_state )
18471846{
18481847 return 1 ;
18491848}
0 commit comments