Skip to content

Commit fa28abe

Browse files
committed
sched/headers: sched/clock: Mark all functions 'notrace', remove CC_FLAGS_FTRACE build asymmetry
Mark all non-init functions in kernel/sched.c as 'notrace', instead of turning them all off via CC_FLAGS_FTRACE. This is going to allow the treatment of this file as any other scheduler file, and it can be #include-ed in compound compilation units as well. Signed-off-by: Ingo Molnar <mingo@kernel.org> Reviewed-by: Peter Zijlstra <peterz@infradead.org>
1 parent d90a2f1 commit fa28abe

2 files changed

Lines changed: 21 additions & 24 deletions

File tree

kernel/sched/Makefile

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,4 @@
11
# SPDX-License-Identifier: GPL-2.0
2-
ifdef CONFIG_FUNCTION_TRACER
3-
CFLAGS_REMOVE_clock.o = $(CC_FLAGS_FTRACE)
4-
endif
52

63
# The compilers are complaining about unused variables inside an if(0) scope
74
# block. This is daft, shut them up.

kernel/sched/clock.c

Lines changed: 21 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,7 @@
6161
* This is default implementation.
6262
* Architectures and sub-architectures can override this.
6363
*/
64-
unsigned long long __weak sched_clock(void)
64+
notrace unsigned long long __weak sched_clock(void)
6565
{
6666
return (unsigned long long)(jiffies - INITIAL_JIFFIES)
6767
* (NSEC_PER_SEC / HZ);
@@ -95,28 +95,28 @@ struct sched_clock_data {
9595

9696
static DEFINE_PER_CPU_SHARED_ALIGNED(struct sched_clock_data, sched_clock_data);
9797

98-
static inline struct sched_clock_data *this_scd(void)
98+
notrace static inline struct sched_clock_data *this_scd(void)
9999
{
100100
return this_cpu_ptr(&sched_clock_data);
101101
}
102102

103-
static inline struct sched_clock_data *cpu_sdc(int cpu)
103+
notrace static inline struct sched_clock_data *cpu_sdc(int cpu)
104104
{
105105
return &per_cpu(sched_clock_data, cpu);
106106
}
107107

108-
int sched_clock_stable(void)
108+
notrace int sched_clock_stable(void)
109109
{
110110
return static_branch_likely(&__sched_clock_stable);
111111
}
112112

113-
static void __scd_stamp(struct sched_clock_data *scd)
113+
notrace static void __scd_stamp(struct sched_clock_data *scd)
114114
{
115115
scd->tick_gtod = ktime_get_ns();
116116
scd->tick_raw = sched_clock();
117117
}
118118

119-
static void __set_sched_clock_stable(void)
119+
notrace static void __set_sched_clock_stable(void)
120120
{
121121
struct sched_clock_data *scd;
122122

@@ -151,7 +151,7 @@ static void __set_sched_clock_stable(void)
151151
* The only way to fully avoid random clock jumps is to boot with:
152152
* "tsc=unstable".
153153
*/
154-
static void __sched_clock_work(struct work_struct *work)
154+
notrace static void __sched_clock_work(struct work_struct *work)
155155
{
156156
struct sched_clock_data *scd;
157157
int cpu;
@@ -177,7 +177,7 @@ static void __sched_clock_work(struct work_struct *work)
177177

178178
static DECLARE_WORK(sched_clock_work, __sched_clock_work);
179179

180-
static void __clear_sched_clock_stable(void)
180+
notrace static void __clear_sched_clock_stable(void)
181181
{
182182
if (!sched_clock_stable())
183183
return;
@@ -186,7 +186,7 @@ static void __clear_sched_clock_stable(void)
186186
schedule_work(&sched_clock_work);
187187
}
188188

189-
void clear_sched_clock_stable(void)
189+
notrace void clear_sched_clock_stable(void)
190190
{
191191
__sched_clock_stable_early = 0;
192192

@@ -196,7 +196,7 @@ void clear_sched_clock_stable(void)
196196
__clear_sched_clock_stable();
197197
}
198198

199-
static void __sched_clock_gtod_offset(void)
199+
notrace static void __sched_clock_gtod_offset(void)
200200
{
201201
struct sched_clock_data *scd = this_scd();
202202

@@ -246,12 +246,12 @@ late_initcall(sched_clock_init_late);
246246
* min, max except they take wrapping into account
247247
*/
248248

249-
static inline u64 wrap_min(u64 x, u64 y)
249+
notrace static inline u64 wrap_min(u64 x, u64 y)
250250
{
251251
return (s64)(x - y) < 0 ? x : y;
252252
}
253253

254-
static inline u64 wrap_max(u64 x, u64 y)
254+
notrace static inline u64 wrap_max(u64 x, u64 y)
255255
{
256256
return (s64)(x - y) > 0 ? x : y;
257257
}
@@ -262,7 +262,7 @@ static inline u64 wrap_max(u64 x, u64 y)
262262
* - filter out backward motion
263263
* - use the GTOD tick value to create a window to filter crazy TSC values
264264
*/
265-
static u64 sched_clock_local(struct sched_clock_data *scd)
265+
notrace static u64 sched_clock_local(struct sched_clock_data *scd)
266266
{
267267
u64 now, clock, old_clock, min_clock, max_clock, gtod;
268268
s64 delta;
@@ -295,7 +295,7 @@ static u64 sched_clock_local(struct sched_clock_data *scd)
295295
return clock;
296296
}
297297

298-
static u64 sched_clock_remote(struct sched_clock_data *scd)
298+
notrace static u64 sched_clock_remote(struct sched_clock_data *scd)
299299
{
300300
struct sched_clock_data *my_scd = this_scd();
301301
u64 this_clock, remote_clock;
@@ -362,7 +362,7 @@ static u64 sched_clock_remote(struct sched_clock_data *scd)
362362
*
363363
* See cpu_clock().
364364
*/
365-
u64 sched_clock_cpu(int cpu)
365+
notrace u64 sched_clock_cpu(int cpu)
366366
{
367367
struct sched_clock_data *scd;
368368
u64 clock;
@@ -386,7 +386,7 @@ u64 sched_clock_cpu(int cpu)
386386
}
387387
EXPORT_SYMBOL_GPL(sched_clock_cpu);
388388

389-
void sched_clock_tick(void)
389+
notrace void sched_clock_tick(void)
390390
{
391391
struct sched_clock_data *scd;
392392

@@ -403,7 +403,7 @@ void sched_clock_tick(void)
403403
sched_clock_local(scd);
404404
}
405405

406-
void sched_clock_tick_stable(void)
406+
notrace void sched_clock_tick_stable(void)
407407
{
408408
if (!sched_clock_stable())
409409
return;
@@ -423,7 +423,7 @@ void sched_clock_tick_stable(void)
423423
/*
424424
* We are going deep-idle (irqs are disabled):
425425
*/
426-
void sched_clock_idle_sleep_event(void)
426+
notrace void sched_clock_idle_sleep_event(void)
427427
{
428428
sched_clock_cpu(smp_processor_id());
429429
}
@@ -432,7 +432,7 @@ EXPORT_SYMBOL_GPL(sched_clock_idle_sleep_event);
432432
/*
433433
* We just idled; resync with ktime.
434434
*/
435-
void sched_clock_idle_wakeup_event(void)
435+
notrace void sched_clock_idle_wakeup_event(void)
436436
{
437437
unsigned long flags;
438438

@@ -458,7 +458,7 @@ void __init sched_clock_init(void)
458458
local_irq_enable();
459459
}
460460

461-
u64 sched_clock_cpu(int cpu)
461+
notrace u64 sched_clock_cpu(int cpu)
462462
{
463463
if (!static_branch_likely(&sched_clock_running))
464464
return 0;
@@ -476,7 +476,7 @@ u64 sched_clock_cpu(int cpu)
476476
* On bare metal this function should return the same as local_clock.
477477
* Architectures and sub-architectures can override this.
478478
*/
479-
u64 __weak running_clock(void)
479+
notrace u64 __weak running_clock(void)
480480
{
481481
return local_clock();
482482
}

0 commit comments

Comments
 (0)