6161 * This is default implementation.
6262 * Architectures and sub-architectures can override this.
6363 */
64- unsigned long long __weak sched_clock (void )
64+ notrace unsigned long long __weak sched_clock (void )
6565{
6666 return (unsigned long long )(jiffies - INITIAL_JIFFIES )
6767 * (NSEC_PER_SEC / HZ );
@@ -95,28 +95,28 @@ struct sched_clock_data {
9595
9696static DEFINE_PER_CPU_SHARED_ALIGNED (struct sched_clock_data , sched_clock_data ) ;
9797
98- static inline struct sched_clock_data * this_scd (void )
98+ notrace static inline struct sched_clock_data * this_scd (void )
9999{
100100 return this_cpu_ptr (& sched_clock_data );
101101}
102102
103- static inline struct sched_clock_data * cpu_sdc (int cpu )
103+ notrace static inline struct sched_clock_data * cpu_sdc (int cpu )
104104{
105105 return & per_cpu (sched_clock_data , cpu );
106106}
107107
108- int sched_clock_stable (void )
108+ notrace int sched_clock_stable (void )
109109{
110110 return static_branch_likely (& __sched_clock_stable );
111111}
112112
113- static void __scd_stamp (struct sched_clock_data * scd )
113+ notrace static void __scd_stamp (struct sched_clock_data * scd )
114114{
115115 scd -> tick_gtod = ktime_get_ns ();
116116 scd -> tick_raw = sched_clock ();
117117}
118118
119- static void __set_sched_clock_stable (void )
119+ notrace static void __set_sched_clock_stable (void )
120120{
121121 struct sched_clock_data * scd ;
122122
@@ -151,7 +151,7 @@ static void __set_sched_clock_stable(void)
151151 * The only way to fully avoid random clock jumps is to boot with:
152152 * "tsc=unstable".
153153 */
154- static void __sched_clock_work (struct work_struct * work )
154+ notrace static void __sched_clock_work (struct work_struct * work )
155155{
156156 struct sched_clock_data * scd ;
157157 int cpu ;
@@ -177,7 +177,7 @@ static void __sched_clock_work(struct work_struct *work)
177177
178178static DECLARE_WORK (sched_clock_work , __sched_clock_work ) ;
179179
180- static void __clear_sched_clock_stable (void )
180+ notrace static void __clear_sched_clock_stable (void )
181181{
182182 if (!sched_clock_stable ())
183183 return ;
@@ -186,7 +186,7 @@ static void __clear_sched_clock_stable(void)
186186 schedule_work (& sched_clock_work );
187187}
188188
189- void clear_sched_clock_stable (void )
189+ notrace void clear_sched_clock_stable (void )
190190{
191191 __sched_clock_stable_early = 0 ;
192192
@@ -196,7 +196,7 @@ void clear_sched_clock_stable(void)
196196 __clear_sched_clock_stable ();
197197}
198198
199- static void __sched_clock_gtod_offset (void )
199+ notrace static void __sched_clock_gtod_offset (void )
200200{
201201 struct sched_clock_data * scd = this_scd ();
202202
@@ -246,12 +246,12 @@ late_initcall(sched_clock_init_late);
246246 * min, max except they take wrapping into account
247247 */
248248
249- static inline u64 wrap_min (u64 x , u64 y )
249+ notrace static inline u64 wrap_min (u64 x , u64 y )
250250{
251251 return (s64 )(x - y ) < 0 ? x : y ;
252252}
253253
254- static inline u64 wrap_max (u64 x , u64 y )
254+ notrace static inline u64 wrap_max (u64 x , u64 y )
255255{
256256 return (s64 )(x - y ) > 0 ? x : y ;
257257}
@@ -262,7 +262,7 @@ static inline u64 wrap_max(u64 x, u64 y)
262262 * - filter out backward motion
263263 * - use the GTOD tick value to create a window to filter crazy TSC values
264264 */
265- static u64 sched_clock_local (struct sched_clock_data * scd )
265+ notrace static u64 sched_clock_local (struct sched_clock_data * scd )
266266{
267267 u64 now , clock , old_clock , min_clock , max_clock , gtod ;
268268 s64 delta ;
@@ -295,7 +295,7 @@ static u64 sched_clock_local(struct sched_clock_data *scd)
295295 return clock ;
296296}
297297
298- static u64 sched_clock_remote (struct sched_clock_data * scd )
298+ notrace static u64 sched_clock_remote (struct sched_clock_data * scd )
299299{
300300 struct sched_clock_data * my_scd = this_scd ();
301301 u64 this_clock , remote_clock ;
@@ -362,7 +362,7 @@ static u64 sched_clock_remote(struct sched_clock_data *scd)
362362 *
363363 * See cpu_clock().
364364 */
365- u64 sched_clock_cpu (int cpu )
365+ notrace u64 sched_clock_cpu (int cpu )
366366{
367367 struct sched_clock_data * scd ;
368368 u64 clock ;
@@ -386,7 +386,7 @@ u64 sched_clock_cpu(int cpu)
386386}
387387EXPORT_SYMBOL_GPL (sched_clock_cpu );
388388
389- void sched_clock_tick (void )
389+ notrace void sched_clock_tick (void )
390390{
391391 struct sched_clock_data * scd ;
392392
@@ -403,7 +403,7 @@ void sched_clock_tick(void)
403403 sched_clock_local (scd );
404404}
405405
406- void sched_clock_tick_stable (void )
406+ notrace void sched_clock_tick_stable (void )
407407{
408408 if (!sched_clock_stable ())
409409 return ;
@@ -423,7 +423,7 @@ void sched_clock_tick_stable(void)
423423/*
424424 * We are going deep-idle (irqs are disabled):
425425 */
426- void sched_clock_idle_sleep_event (void )
426+ notrace void sched_clock_idle_sleep_event (void )
427427{
428428 sched_clock_cpu (smp_processor_id ());
429429}
@@ -432,7 +432,7 @@ EXPORT_SYMBOL_GPL(sched_clock_idle_sleep_event);
432432/*
433433 * We just idled; resync with ktime.
434434 */
435- void sched_clock_idle_wakeup_event (void )
435+ notrace void sched_clock_idle_wakeup_event (void )
436436{
437437 unsigned long flags ;
438438
@@ -458,7 +458,7 @@ void __init sched_clock_init(void)
458458 local_irq_enable ();
459459}
460460
461- u64 sched_clock_cpu (int cpu )
461+ notrace u64 sched_clock_cpu (int cpu )
462462{
463463 if (!static_branch_likely (& sched_clock_running ))
464464 return 0 ;
@@ -476,7 +476,7 @@ u64 sched_clock_cpu(int cpu)
476476 * On bare metal this function should return the same as local_clock.
477477 * Architectures and sub-architectures can override this.
478478 */
479- u64 __weak running_clock (void )
479+ notrace u64 __weak running_clock (void )
480480{
481481 return local_clock ();
482482}
0 commit comments