Skip to content

Commit 5b26ef6

Browse files
ahunter6KAGA-KOKO
authored andcommitted
vdso: Consolidate nanoseconds calculation
Consolidate nanoseconds calculation to simplify and reduce code duplication. Suggested-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Link: https://lore.kernel.org/r/20240325064023.2997-3-adrian.hunter@intel.com
1 parent c8e3a8b commit 5b26ef6

2 files changed

Lines changed: 27 additions & 33 deletions

File tree

arch/x86/include/asm/vdso/gettimeofday.h

Lines changed: 8 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -300,42 +300,41 @@ static inline bool arch_vdso_cycles_ok(u64 cycles)
300300
#define vdso_cycles_ok arch_vdso_cycles_ok
301301

302302
/*
303-
* x86 specific delta calculation.
303+
* x86 specific calculation of nanoseconds for the current cycle count
304304
*
305305
* The regular implementation assumes that clocksource reads are globally
306306
* monotonic. The TSC can be slightly off across sockets which can cause
307307
* the regular delta calculation (@cycles - @last) to return a huge time
308308
* jump.
309309
*
310310
* Therefore it needs to be verified that @cycles are greater than
311-
* @last. If not then use @last, which is the base time of the current
312-
* conversion period.
311+
* @vd->cycles_last. If not then use @vd->cycles_last, which is the base
312+
* time of the current conversion period.
313313
*
314314
* This variant also uses a custom mask because while the clocksource mask of
315315
* all the VDSO capable clocksources on x86 is U64_MAX, the above code uses
316316
* U64_MASK as an exception value, additionally arch_vdso_cycles_ok() above
317317
* declares everything with the MSB/Sign-bit set as invalid. Therefore the
318318
* effective mask is S64_MAX.
319319
*/
320-
static __always_inline
321-
u64 vdso_calc_delta(u64 cycles, u64 last, u64 mask, u32 mult)
320+
static __always_inline u64 vdso_calc_ns(const struct vdso_data *vd, u64 cycles, u64 base)
322321
{
323322
/*
324323
* Due to the MSB/Sign-bit being used as invalid marker (see
325324
* arch_vdso_cycles_valid() above), the effective mask is S64_MAX.
326325
*/
327-
u64 delta = (cycles - last) & S64_MAX;
326+
u64 delta = (cycles - vd->cycle_last) & S64_MAX;
328327

329328
/*
330329
* Due to the above mentioned TSC wobbles, filter out negative motion.
331330
* Per the above masking, the effective sign bit is now bit 62.
332331
*/
333332
if (unlikely(delta & (1ULL << 62)))
334-
return 0;
333+
return base >> vd->shift;
335334

336-
return delta * mult;
335+
return ((delta * vd->mult) + base) >> vd->shift;
337336
}
338-
#define vdso_calc_delta vdso_calc_delta
337+
#define vdso_calc_ns vdso_calc_ns
339338

340339
#endif /* !__ASSEMBLY__ */
341340

lib/vdso/gettimeofday.c

Lines changed: 19 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -5,31 +5,32 @@
55
#include <vdso/datapage.h>
66
#include <vdso/helpers.h>
77

8-
#ifndef vdso_calc_delta
8+
#ifndef vdso_calc_ns
99

1010
#ifdef VDSO_DELTA_NOMASK
11-
# define VDSO_DELTA_MASK(mask) U64_MAX
11+
# define VDSO_DELTA_MASK(vd) U64_MAX
1212
#else
13-
# define VDSO_DELTA_MASK(mask) (mask)
13+
# define VDSO_DELTA_MASK(vd) (vd->mask)
14+
#endif
15+
16+
#ifndef vdso_shift_ns
17+
static __always_inline u64 vdso_shift_ns(u64 ns, u32 shift)
18+
{
19+
return ns >> shift;
20+
}
1421
#endif
1522

1623
/*
1724
* Default implementation which works for all sane clocksources. That
1825
* obviously excludes x86/TSC.
1926
*/
20-
static __always_inline
21-
u64 vdso_calc_delta(u64 cycles, u64 last, u64 mask, u32 mult)
27+
static __always_inline u64 vdso_calc_ns(const struct vdso_data *vd, u64 cycles, u64 base)
2228
{
23-
return ((cycles - last) & VDSO_DELTA_MASK(mask)) * mult;
24-
}
25-
#endif
29+
u64 delta = (cycles - vd->cycle_last) & VDSO_DELTA_MASK(vd);
2630

27-
#ifndef vdso_shift_ns
28-
static __always_inline u64 vdso_shift_ns(u64 ns, u32 shift)
29-
{
30-
return ns >> shift;
31+
return vdso_shift_ns((delta * vd->mult) + base, vd->shift);
3132
}
32-
#endif
33+
#endif /* vdso_calc_ns */
3334

3435
#ifndef __arch_vdso_hres_capable
3536
static inline bool __arch_vdso_hres_capable(void)
@@ -56,10 +57,10 @@ static inline bool vdso_cycles_ok(u64 cycles)
5657
static __always_inline int do_hres_timens(const struct vdso_data *vdns, clockid_t clk,
5758
struct __kernel_timespec *ts)
5859
{
59-
const struct vdso_data *vd;
6060
const struct timens_offset *offs = &vdns->offset[clk];
6161
const struct vdso_timestamp *vdso_ts;
62-
u64 cycles, last, ns;
62+
const struct vdso_data *vd;
63+
u64 cycles, ns;
6364
u32 seq;
6465
s64 sec;
6566

@@ -80,10 +81,7 @@ static __always_inline int do_hres_timens(const struct vdso_data *vdns, clockid_
8081
cycles = __arch_get_hw_counter(vd->clock_mode, vd);
8182
if (unlikely(!vdso_cycles_ok(cycles)))
8283
return -1;
83-
ns = vdso_ts->nsec;
84-
last = vd->cycle_last;
85-
ns += vdso_calc_delta(cycles, last, vd->mask, vd->mult);
86-
ns = vdso_shift_ns(ns, vd->shift);
84+
ns = vdso_calc_ns(vd, cycles, vdso_ts->nsec);
8785
sec = vdso_ts->sec;
8886
} while (unlikely(vdso_read_retry(vd, seq)));
8987

@@ -118,7 +116,7 @@ static __always_inline int do_hres(const struct vdso_data *vd, clockid_t clk,
118116
struct __kernel_timespec *ts)
119117
{
120118
const struct vdso_timestamp *vdso_ts = &vd->basetime[clk];
121-
u64 cycles, last, sec, ns;
119+
u64 cycles, sec, ns;
122120
u32 seq;
123121

124122
/* Allows to compile the high resolution parts out */
@@ -151,10 +149,7 @@ static __always_inline int do_hres(const struct vdso_data *vd, clockid_t clk,
151149
cycles = __arch_get_hw_counter(vd->clock_mode, vd);
152150
if (unlikely(!vdso_cycles_ok(cycles)))
153151
return -1;
154-
ns = vdso_ts->nsec;
155-
last = vd->cycle_last;
156-
ns += vdso_calc_delta(cycles, last, vd->mask, vd->mult);
157-
ns = vdso_shift_ns(ns, vd->shift);
152+
ns = vdso_calc_ns(vd, cycles, vdso_ts->nsec);
158153
sec = vdso_ts->sec;
159154
} while (unlikely(vdso_read_retry(vd, seq)));
160155

0 commit comments

Comments
 (0)