|
3 | 3 | #define _ASM_X86_CMPXCHG_32_H |
4 | 4 |
|
5 | 5 | /* |
6 | | - * Note: if you use set64_bit(), __cmpxchg64(), or their variants, |
| 6 | + * Note: if you use __cmpxchg64(), or their variants, |
7 | 7 | * you need to test for the feature in boot_cpu_data. |
8 | 8 | */ |
9 | 9 |
|
10 | | -#ifdef CONFIG_X86_CMPXCHG64 |
11 | | -#define arch_cmpxchg64(ptr, o, n) \ |
12 | | - ((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o), \ |
13 | | - (unsigned long long)(n))) |
14 | | -#define arch_cmpxchg64_local(ptr, o, n) \ |
15 | | - ((__typeof__(*(ptr)))__cmpxchg64_local((ptr), (unsigned long long)(o), \ |
16 | | - (unsigned long long)(n))) |
17 | | -#define arch_try_cmpxchg64(ptr, po, n) \ |
18 | | - __try_cmpxchg64((ptr), (unsigned long long *)(po), \ |
19 | | - (unsigned long long)(n)) |
20 | | -#endif |
| 10 | +union __u64_halves { |
| 11 | + u64 full; |
| 12 | + struct { |
| 13 | + u32 low, high; |
| 14 | + }; |
| 15 | +}; |
| 16 | + |
| 17 | +#define __arch_cmpxchg64(_ptr, _old, _new, _lock) \ |
| 18 | +({ \ |
| 19 | + union __u64_halves o = { .full = (_old), }, \ |
| 20 | + n = { .full = (_new), }; \ |
| 21 | + \ |
| 22 | + asm volatile(_lock "cmpxchg8b %[ptr]" \ |
| 23 | + : [ptr] "+m" (*(_ptr)), \ |
| 24 | + "+a" (o.low), "+d" (o.high) \ |
| 25 | + : "b" (n.low), "c" (n.high) \ |
| 26 | + : "memory"); \ |
| 27 | + \ |
| 28 | + o.full; \ |
| 29 | +}) |
21 | 30 |
|
22 | | -static inline u64 __cmpxchg64(volatile u64 *ptr, u64 old, u64 new) |
| 31 | + |
| 32 | +static __always_inline u64 __cmpxchg64(volatile u64 *ptr, u64 old, u64 new) |
23 | 33 | { |
24 | | - u64 prev; |
25 | | - asm volatile(LOCK_PREFIX "cmpxchg8b %1" |
26 | | - : "=A" (prev), |
27 | | - "+m" (*ptr) |
28 | | - : "b" ((u32)new), |
29 | | - "c" ((u32)(new >> 32)), |
30 | | - "0" (old) |
31 | | - : "memory"); |
32 | | - return prev; |
| 34 | + return __arch_cmpxchg64(ptr, old, new, LOCK_PREFIX); |
33 | 35 | } |
34 | 36 |
|
35 | | -static inline u64 __cmpxchg64_local(volatile u64 *ptr, u64 old, u64 new) |
| 37 | +static __always_inline u64 __cmpxchg64_local(volatile u64 *ptr, u64 old, u64 new) |
36 | 38 | { |
37 | | - u64 prev; |
38 | | - asm volatile("cmpxchg8b %1" |
39 | | - : "=A" (prev), |
40 | | - "+m" (*ptr) |
41 | | - : "b" ((u32)new), |
42 | | - "c" ((u32)(new >> 32)), |
43 | | - "0" (old) |
44 | | - : "memory"); |
45 | | - return prev; |
| 39 | + return __arch_cmpxchg64(ptr, old, new,); |
46 | 40 | } |
47 | 41 |
|
48 | | -static inline bool __try_cmpxchg64(volatile u64 *ptr, u64 *pold, u64 new) |
| 42 | +#define __arch_try_cmpxchg64(_ptr, _oldp, _new, _lock) \ |
| 43 | +({ \ |
| 44 | + union __u64_halves o = { .full = *(_oldp), }, \ |
| 45 | + n = { .full = (_new), }; \ |
| 46 | + bool ret; \ |
| 47 | + \ |
| 48 | + asm volatile(_lock "cmpxchg8b %[ptr]" \ |
| 49 | + CC_SET(e) \ |
| 50 | + : CC_OUT(e) (ret), \ |
| 51 | + [ptr] "+m" (*(_ptr)), \ |
| 52 | + "+a" (o.low), "+d" (o.high) \ |
| 53 | + : "b" (n.low), "c" (n.high) \ |
| 54 | + : "memory"); \ |
| 55 | + \ |
| 56 | + if (unlikely(!ret)) \ |
| 57 | + *(_oldp) = o.full; \ |
| 58 | + \ |
| 59 | + likely(ret); \ |
| 60 | +}) |
| 61 | + |
| 62 | +static __always_inline bool __try_cmpxchg64(volatile u64 *ptr, u64 *oldp, u64 new) |
49 | 63 | { |
50 | | - bool success; |
51 | | - u64 old = *pold; |
52 | | - asm volatile(LOCK_PREFIX "cmpxchg8b %[ptr]" |
53 | | - CC_SET(z) |
54 | | - : CC_OUT(z) (success), |
55 | | - [ptr] "+m" (*ptr), |
56 | | - "+A" (old) |
57 | | - : "b" ((u32)new), |
58 | | - "c" ((u32)(new >> 32)) |
59 | | - : "memory"); |
60 | | - |
61 | | - if (unlikely(!success)) |
62 | | - *pold = old; |
63 | | - return success; |
| 64 | + return __arch_try_cmpxchg64(ptr, oldp, new, LOCK_PREFIX); |
64 | 65 | } |
65 | 66 |
|
66 | | -#ifndef CONFIG_X86_CMPXCHG64 |
| 67 | +#ifdef CONFIG_X86_CMPXCHG64 |
| 68 | + |
| 69 | +#define arch_cmpxchg64 __cmpxchg64 |
| 70 | + |
| 71 | +#define arch_cmpxchg64_local __cmpxchg64_local |
| 72 | + |
| 73 | +#define arch_try_cmpxchg64 __try_cmpxchg64 |
| 74 | + |
| 75 | +#else |
| 76 | + |
67 | 77 | /* |
68 | 78 | * Building a kernel capable running on 80386 and 80486. It may be necessary |
69 | 79 | * to simulate the cmpxchg8b on the 80386 and 80486 CPU. |
70 | 80 | */ |
71 | 81 |
|
72 | | -#define arch_cmpxchg64(ptr, o, n) \ |
73 | | -({ \ |
74 | | - __typeof__(*(ptr)) __ret; \ |
75 | | - __typeof__(*(ptr)) __old = (o); \ |
76 | | - __typeof__(*(ptr)) __new = (n); \ |
77 | | - alternative_io(LOCK_PREFIX_HERE \ |
78 | | - "call cmpxchg8b_emu", \ |
79 | | - "lock; cmpxchg8b (%%esi)" , \ |
80 | | - X86_FEATURE_CX8, \ |
81 | | - "=A" (__ret), \ |
82 | | - "S" ((ptr)), "0" (__old), \ |
83 | | - "b" ((unsigned int)__new), \ |
84 | | - "c" ((unsigned int)(__new>>32)) \ |
85 | | - : "memory"); \ |
86 | | - __ret; }) |
87 | | - |
88 | | - |
89 | | -#define arch_cmpxchg64_local(ptr, o, n) \ |
90 | | -({ \ |
91 | | - __typeof__(*(ptr)) __ret; \ |
92 | | - __typeof__(*(ptr)) __old = (o); \ |
93 | | - __typeof__(*(ptr)) __new = (n); \ |
94 | | - alternative_io("call cmpxchg8b_emu", \ |
95 | | - "cmpxchg8b (%%esi)" , \ |
96 | | - X86_FEATURE_CX8, \ |
97 | | - "=A" (__ret), \ |
98 | | - "S" ((ptr)), "0" (__old), \ |
99 | | - "b" ((unsigned int)__new), \ |
100 | | - "c" ((unsigned int)(__new>>32)) \ |
101 | | - : "memory"); \ |
102 | | - __ret; }) |
| 82 | +#define __arch_cmpxchg64_emu(_ptr, _old, _new) \ |
| 83 | +({ \ |
| 84 | + union __u64_halves o = { .full = (_old), }, \ |
| 85 | + n = { .full = (_new), }; \ |
| 86 | + \ |
| 87 | + asm volatile(ALTERNATIVE(LOCK_PREFIX_HERE \ |
| 88 | + "call cmpxchg8b_emu", \ |
| 89 | + "lock; cmpxchg8b %[ptr]", X86_FEATURE_CX8) \ |
| 90 | + : [ptr] "+m" (*(_ptr)), \ |
| 91 | + "+a" (o.low), "+d" (o.high) \ |
| 92 | + : "b" (n.low), "c" (n.high), "S" (_ptr) \ |
| 93 | + : "memory"); \ |
| 94 | + \ |
| 95 | + o.full; \ |
| 96 | +}) |
| 97 | + |
| 98 | +static __always_inline u64 arch_cmpxchg64(volatile u64 *ptr, u64 old, u64 new) |
| 99 | +{ |
| 100 | + return __arch_cmpxchg64_emu(ptr, old, new); |
| 101 | +} |
| 102 | +#define arch_cmpxchg64 arch_cmpxchg64 |
| 103 | + |
| 104 | +#define __arch_cmpxchg64_emu_local(_ptr, _old, _new) \ |
| 105 | +({ \ |
| 106 | + union __u64_halves o = { .full = (_old), }, \ |
| 107 | + n = { .full = (_new), }; \ |
| 108 | + \ |
| 109 | + asm volatile(ALTERNATIVE("call cmpxchg8b_emu", \ |
| 110 | + "cmpxchg8b %[ptr]", X86_FEATURE_CX8) \ |
| 111 | + : [ptr] "+m" (*(_ptr)), \ |
| 112 | + "+a" (o.low), "+d" (o.high) \ |
| 113 | + : "b" (n.low), "c" (n.high), "S" (_ptr) \ |
| 114 | + : "memory"); \ |
| 115 | + \ |
| 116 | + o.full; \ |
| 117 | +}) |
| 118 | + |
| 119 | +static __always_inline u64 arch_cmpxchg64_local(volatile u64 *ptr, u64 old, u64 new) |
| 120 | +{ |
| 121 | + return __arch_cmpxchg64_emu_local(ptr, old, new); |
| 122 | +} |
| 123 | +#define arch_cmpxchg64_local arch_cmpxchg64_local |
103 | 124 |
|
104 | 125 | #endif |
105 | 126 |
|
|
0 commit comments