Skip to content

Commit c35a824

Browse files
arndbctmarinas
authored andcommitted
arm64: make atomic helpers __always_inline
With UBSAN enabled and building with clang, there are occasionally warnings like WARNING: modpost: vmlinux.o(.text+0xc533ec): Section mismatch in reference from the function arch_atomic64_or() to the variable .init.data:numa_nodes_parsed The function arch_atomic64_or() references the variable __initdata numa_nodes_parsed. This is often because arch_atomic64_or lacks a __initdata annotation or the annotation of numa_nodes_parsed is wrong. for functions that end up not being inlined as intended but operating on __initdata variables. Mark these as __always_inline, along with the corresponding asm-generic wrappers. Signed-off-by: Arnd Bergmann <arnd@arndb.de> Acked-by: Will Deacon <will@kernel.org> Link: https://lore.kernel.org/r/20210108092024.4034860-1-arnd@kernel.org Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
1 parent 71e7018 commit c35a824

2 files changed

Lines changed: 8 additions & 8 deletions

File tree

arch/arm64/include/asm/atomic.h

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@
1717
#include <asm/lse.h>
1818

1919
#define ATOMIC_OP(op) \
20-
static inline void arch_##op(int i, atomic_t *v) \
20+
static __always_inline void arch_##op(int i, atomic_t *v) \
2121
{ \
2222
__lse_ll_sc_body(op, i, v); \
2323
}
@@ -32,7 +32,7 @@ ATOMIC_OP(atomic_sub)
3232
#undef ATOMIC_OP
3333

3434
#define ATOMIC_FETCH_OP(name, op) \
35-
static inline int arch_##op##name(int i, atomic_t *v) \
35+
static __always_inline int arch_##op##name(int i, atomic_t *v) \
3636
{ \
3737
return __lse_ll_sc_body(op##name, i, v); \
3838
}
@@ -56,7 +56,7 @@ ATOMIC_FETCH_OPS(atomic_sub_return)
5656
#undef ATOMIC_FETCH_OPS
5757

5858
#define ATOMIC64_OP(op) \
59-
static inline void arch_##op(long i, atomic64_t *v) \
59+
static __always_inline void arch_##op(long i, atomic64_t *v) \
6060
{ \
6161
__lse_ll_sc_body(op, i, v); \
6262
}
@@ -71,7 +71,7 @@ ATOMIC64_OP(atomic64_sub)
7171
#undef ATOMIC64_OP
7272

7373
#define ATOMIC64_FETCH_OP(name, op) \
74-
static inline long arch_##op##name(long i, atomic64_t *v) \
74+
static __always_inline long arch_##op##name(long i, atomic64_t *v) \
7575
{ \
7676
return __lse_ll_sc_body(op##name, i, v); \
7777
}
@@ -94,7 +94,7 @@ ATOMIC64_FETCH_OPS(atomic64_sub_return)
9494
#undef ATOMIC64_FETCH_OP
9595
#undef ATOMIC64_FETCH_OPS
9696

97-
static inline long arch_atomic64_dec_if_positive(atomic64_t *v)
97+
static __always_inline long arch_atomic64_dec_if_positive(atomic64_t *v)
9898
{
9999
return __lse_ll_sc_body(atomic64_dec_if_positive, v);
100100
}

include/asm-generic/bitops/atomic.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -11,19 +11,19 @@
1111
* See Documentation/atomic_bitops.txt for details.
1212
*/
1313

14-
static inline void set_bit(unsigned int nr, volatile unsigned long *p)
14+
static __always_inline void set_bit(unsigned int nr, volatile unsigned long *p)
1515
{
1616
p += BIT_WORD(nr);
1717
atomic_long_or(BIT_MASK(nr), (atomic_long_t *)p);
1818
}
1919

20-
static inline void clear_bit(unsigned int nr, volatile unsigned long *p)
20+
static __always_inline void clear_bit(unsigned int nr, volatile unsigned long *p)
2121
{
2222
p += BIT_WORD(nr);
2323
atomic_long_andnot(BIT_MASK(nr), (atomic_long_t *)p);
2424
}
2525

26-
static inline void change_bit(unsigned int nr, volatile unsigned long *p)
26+
static __always_inline void change_bit(unsigned int nr, volatile unsigned long *p)
2727
{
2828
p += BIT_WORD(nr);
2929
atomic_long_xor(BIT_MASK(nr), (atomic_long_t *)p);

0 commit comments

Comments
 (0)