@@ -395,15 +395,10 @@ enum uclamp_id {
395395 UCLAMP_CNT
396396};
397397
398- #ifdef CONFIG_SMP
399398extern struct root_domain def_root_domain ;
400399extern struct mutex sched_domains_mutex ;
401400extern void sched_domains_mutex_lock (void );
402401extern void sched_domains_mutex_unlock (void );
403- #else
404- static inline void sched_domains_mutex_lock (void ) { }
405- static inline void sched_domains_mutex_unlock (void ) { }
406- #endif
407402
408403struct sched_param {
409404 int sched_priority ;
@@ -604,15 +599,13 @@ struct sched_entity {
604599 unsigned long runnable_weight ;
605600#endif
606601
607- #ifdef CONFIG_SMP
608602 /*
609603 * Per entity load average tracking.
610604 *
611605 * Put into separate cache line so it does not
612606 * collide with read-mostly values above.
613607 */
614608 struct sched_avg avg ;
615- #endif
616609};
617610
618611struct sched_rt_entity {
@@ -842,7 +835,6 @@ struct task_struct {
842835 struct alloc_tag * alloc_tag ;
843836#endif
844837
845- #ifdef CONFIG_SMP
846838 int on_cpu ;
847839 struct __call_single_node wake_entry ;
848840 unsigned int wakee_flips ;
@@ -858,7 +850,6 @@ struct task_struct {
858850 */
859851 int recent_used_cpu ;
860852 int wake_cpu ;
861- #endif
862853 int on_rq ;
863854
864855 int prio ;
@@ -917,9 +908,7 @@ struct task_struct {
917908 cpumask_t * user_cpus_ptr ;
918909 cpumask_t cpus_mask ;
919910 void * migration_pending ;
920- #ifdef CONFIG_SMP
921911 unsigned short migration_disabled ;
922- #endif
923912 unsigned short migration_flags ;
924913
925914#ifdef CONFIG_PREEMPT_RCU
@@ -951,10 +940,8 @@ struct task_struct {
951940 struct sched_info sched_info ;
952941
953942 struct list_head tasks ;
954- #ifdef CONFIG_SMP
955943 struct plist_node pushable_tasks ;
956944 struct rb_node pushable_dl_tasks ;
957- #endif
958945
959946 struct mm_struct * mm ;
960947 struct mm_struct * active_mm ;
@@ -1778,12 +1765,8 @@ extern struct pid *cad_pid;
17781765
17791766static __always_inline bool is_percpu_thread (void )
17801767{
1781- #ifdef CONFIG_SMP
17821768 return (current -> flags & PF_NO_SETAFFINITY ) &&
17831769 (current -> nr_cpus_allowed == 1 );
1784- #else
1785- return true;
1786- #endif
17871770}
17881771
17891772/* Per-process atomic flags. */
@@ -1848,7 +1831,6 @@ extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpu
18481831extern int task_can_attach (struct task_struct * p );
18491832extern int dl_bw_alloc (int cpu , u64 dl_bw );
18501833extern void dl_bw_free (int cpu , u64 dl_bw );
1851- #ifdef CONFIG_SMP
18521834
18531835/* do_set_cpus_allowed() - consider using set_cpus_allowed_ptr() instead */
18541836extern void do_set_cpus_allowed (struct task_struct * p , const struct cpumask * new_mask );
@@ -1866,33 +1848,6 @@ extern void release_user_cpus_ptr(struct task_struct *p);
18661848extern int dl_task_check_affinity (struct task_struct * p , const struct cpumask * mask );
18671849extern void force_compatible_cpus_allowed_ptr (struct task_struct * p );
18681850extern void relax_compatible_cpus_allowed_ptr (struct task_struct * p );
1869- #else
1870- static inline void do_set_cpus_allowed (struct task_struct * p , const struct cpumask * new_mask )
1871- {
1872- }
1873- static inline int set_cpus_allowed_ptr (struct task_struct * p , const struct cpumask * new_mask )
1874- {
1875- /* Opencoded cpumask_test_cpu(0, new_mask) to avoid dependency on cpumask.h */
1876- if ((* cpumask_bits (new_mask ) & 1 ) == 0 )
1877- return - EINVAL ;
1878- return 0 ;
1879- }
1880- static inline int dup_user_cpus_ptr (struct task_struct * dst , struct task_struct * src , int node )
1881- {
1882- if (src -> user_cpus_ptr )
1883- return - EINVAL ;
1884- return 0 ;
1885- }
1886- static inline void release_user_cpus_ptr (struct task_struct * p )
1887- {
1888- WARN_ON (p -> user_cpus_ptr );
1889- }
1890-
1891- static inline int dl_task_check_affinity (struct task_struct * p , const struct cpumask * mask )
1892- {
1893- return 0 ;
1894- }
1895- #endif
18961851
18971852extern int yield_to (struct task_struct * p , bool preempt );
18981853extern void set_user_nice (struct task_struct * p , long nice );
@@ -1981,11 +1936,7 @@ extern int wake_up_state(struct task_struct *tsk, unsigned int state);
19811936extern int wake_up_process (struct task_struct * tsk );
19821937extern void wake_up_new_task (struct task_struct * tsk );
19831938
1984- #ifdef CONFIG_SMP
19851939extern void kick_process (struct task_struct * tsk );
1986- #else
1987- static inline void kick_process (struct task_struct * tsk ) { }
1988- #endif
19891940
19901941extern void __set_task_comm (struct task_struct * tsk , const char * from , bool exec );
19911942#define set_task_comm (tsk , from ) ({ \
@@ -2012,7 +1963,6 @@ extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec
20121963 buf; \
20131964})
20141965
2015- #ifdef CONFIG_SMP
20161966static __always_inline void scheduler_ipi (void )
20171967{
20181968 /*
@@ -2022,9 +1972,6 @@ static __always_inline void scheduler_ipi(void)
20221972 */
20231973 preempt_fold_need_resched ();
20241974}
2025- #else
2026- static inline void scheduler_ipi (void ) { }
2027- #endif
20281975
20291976extern unsigned long wait_task_inactive (struct task_struct * , unsigned int match_state );
20301977
@@ -2230,7 +2177,6 @@ extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
22302177#define TASK_SIZE_OF (tsk ) TASK_SIZE
22312178#endif
22322179
2233- #ifdef CONFIG_SMP
22342180static inline bool owner_on_cpu (struct task_struct * owner )
22352181{
22362182 /*
@@ -2242,7 +2188,6 @@ static inline bool owner_on_cpu(struct task_struct *owner)
22422188
22432189/* Returns effective CPU energy utilization, as seen by the scheduler */
22442190unsigned long sched_cpu_util (int cpu );
2245- #endif /* CONFIG_SMP */
22462191
22472192#ifdef CONFIG_SCHED_CORE
22482193extern void sched_core_free (struct task_struct * tsk );
0 commit comments