@@ -169,8 +169,6 @@ static const struct file_operations sched_feat_fops = {
169169 .release = single_release ,
170170};
171171
172- #ifdef CONFIG_SMP
173-
174172static ssize_t sched_scaling_write (struct file * filp , const char __user * ubuf ,
175173 size_t cnt , loff_t * ppos )
176174{
@@ -217,8 +215,6 @@ static const struct file_operations sched_scaling_fops = {
217215 .release = single_release ,
218216};
219217
220- #endif /* CONFIG_SMP */
221-
222218#ifdef CONFIG_PREEMPT_DYNAMIC
223219
224220static ssize_t sched_dynamic_write (struct file * filp , const char __user * ubuf ,
@@ -511,15 +507,13 @@ static __init int sched_init_debug(void)
511507 debugfs_create_u32 ("latency_warn_ms" , 0644 , debugfs_sched , & sysctl_resched_latency_warn_ms );
512508 debugfs_create_u32 ("latency_warn_once" , 0644 , debugfs_sched , & sysctl_resched_latency_warn_once );
513509
514- #ifdef CONFIG_SMP
515510 debugfs_create_file ("tunable_scaling" , 0644 , debugfs_sched , NULL , & sched_scaling_fops );
516511 debugfs_create_u32 ("migration_cost_ns" , 0644 , debugfs_sched , & sysctl_sched_migration_cost );
517512 debugfs_create_u32 ("nr_migrate" , 0644 , debugfs_sched , & sysctl_sched_nr_migrate );
518513
519514 sched_domains_mutex_lock ();
520515 update_sched_domain_debugfs ();
521516 sched_domains_mutex_unlock ();
522- #endif /* CONFIG_SMP */
523517
524518#ifdef CONFIG_NUMA_BALANCING
525519 numa = debugfs_create_dir ("numa_balancing" , debugfs_sched );
@@ -685,11 +679,9 @@ static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group
685679 }
686680
687681 P (se -> load .weight );
688- #ifdef CONFIG_SMP
689682 P (se -> avg .load_avg );
690683 P (se -> avg .util_avg );
691684 P (se -> avg .runnable_avg );
692- #endif /* CONFIG_SMP */
693685
694686#undef PN_SCHEDSTAT
695687#undef PN
@@ -849,7 +841,6 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
849841 SEQ_printf (m , " .%-30s: %d\n" , "h_nr_queued" , cfs_rq -> h_nr_queued );
850842 SEQ_printf (m , " .%-30s: %d\n" , "h_nr_idle" , cfs_rq -> h_nr_idle );
851843 SEQ_printf (m , " .%-30s: %ld\n" , "load" , cfs_rq -> load .weight );
852- #ifdef CONFIG_SMP
853844 SEQ_printf (m , " .%-30s: %lu\n" , "load_avg" ,
854845 cfs_rq -> avg .load_avg );
855846 SEQ_printf (m , " .%-30s: %lu\n" , "runnable_avg" ,
@@ -870,7 +861,6 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
870861 SEQ_printf (m , " .%-30s: %ld\n" , "tg_load_avg" ,
871862 atomic_long_read (& cfs_rq -> tg -> load_avg ));
872863#endif /* CONFIG_FAIR_GROUP_SCHED */
873- #endif /* CONFIG_SMP */
874864#ifdef CONFIG_CFS_BANDWIDTH
875865 SEQ_printf (m , " .%-30s: %d\n" , "throttled" ,
876866 cfs_rq -> throttled );
@@ -967,12 +957,10 @@ do { \
967957#undef P
968958#undef PN
969959
970- #ifdef CONFIG_SMP
971960#define P64 (n ) SEQ_printf(m, " .%-30s: %Ld\n", #n, rq->n);
972961 P64 (avg_idle );
973962 P64 (max_idle_balance_cost );
974963#undef P64
975- #endif /* CONFIG_SMP */
976964
977965#define P (n ) SEQ_printf(m, " .%-30s: %d\n", #n, schedstat_val(rq->n));
978966 if (schedstat_enabled ()) {
@@ -1242,7 +1230,6 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
12421230 __PS ("nr_involuntary_switches" , p -> nivcsw );
12431231
12441232 P (se .load .weight );
1245- #ifdef CONFIG_SMP
12461233 P (se .avg .load_sum );
12471234 P (se .avg .runnable_sum );
12481235 P (se .avg .util_sum );
@@ -1251,7 +1238,6 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
12511238 P (se .avg .util_avg );
12521239 P (se .avg .last_update_time );
12531240 PM (se .avg .util_est , ~UTIL_AVG_UNCHANGED );
1254- #endif /* CONFIG_SMP */
12551241#ifdef CONFIG_UCLAMP_TASK
12561242 __PS ("uclamp.min" , p -> uclamp_req [UCLAMP_MIN ].value );
12571243 __PS ("uclamp.max" , p -> uclamp_req [UCLAMP_MAX ].value );
0 commit comments