Skip to content

Commit b0b449e

Browse files
jgross1bp3tk0v
authored andcommitted
x86/pvlocks: Move paravirt spinlock functions into own header
Instead of having the pv spinlock function definitions in paravirt.h, move them into the new header paravirt-spinlock.h. Signed-off-by: Juergen Gross <jgross@suse.com> Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de> Link: https://patch.msgid.link/20260105110520.21356-22-jgross@suse.com
1 parent 392afe8 commit b0b449e

12 files changed

Lines changed: 198 additions & 200 deletions

File tree

arch/x86/hyperv/hv_spinlock.c

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -78,11 +78,11 @@ void __init hv_init_spinlocks(void)
7878
pr_info("PV spinlocks enabled\n");
7979

8080
__pv_init_lock_hash();
81-
pv_ops.lock.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
82-
pv_ops.lock.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock);
83-
pv_ops.lock.wait = hv_qlock_wait;
84-
pv_ops.lock.kick = hv_qlock_kick;
85-
pv_ops.lock.vcpu_is_preempted = PV_CALLEE_SAVE(hv_vcpu_is_preempted);
81+
pv_ops_lock.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
82+
pv_ops_lock.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock);
83+
pv_ops_lock.wait = hv_qlock_wait;
84+
pv_ops_lock.kick = hv_qlock_kick;
85+
pv_ops_lock.vcpu_is_preempted = PV_CALLEE_SAVE(hv_vcpu_is_preempted);
8686
}
8787

8888
static __init int hv_parse_nopvspin(char *arg)

arch/x86/include/asm/paravirt-base.h

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -26,4 +26,10 @@ u64 _paravirt_ident_64(u64);
2626
#endif
2727
#define paravirt_nop ((void *)nop_func)
2828

29+
#ifdef CONFIG_PARAVIRT_SPINLOCKS
30+
void paravirt_set_cap(void);
31+
#else
32+
static inline void paravirt_set_cap(void) { }
33+
#endif
34+
2935
#endif /* _ASM_X86_PARAVIRT_BASE_H */
Lines changed: 145 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,145 @@
1+
/* SPDX-License-Identifier: GPL-2.0-only */
2+
#ifndef _ASM_X86_PARAVIRT_SPINLOCK_H
3+
#define _ASM_X86_PARAVIRT_SPINLOCK_H
4+
5+
#include <asm/paravirt_types.h>
6+
7+
#ifdef CONFIG_SMP
8+
#include <asm/spinlock_types.h>
9+
#endif
10+
11+
struct qspinlock;
12+
13+
struct pv_lock_ops {
14+
void (*queued_spin_lock_slowpath)(struct qspinlock *lock, u32 val);
15+
struct paravirt_callee_save queued_spin_unlock;
16+
17+
void (*wait)(u8 *ptr, u8 val);
18+
void (*kick)(int cpu);
19+
20+
struct paravirt_callee_save vcpu_is_preempted;
21+
} __no_randomize_layout;
22+
23+
extern struct pv_lock_ops pv_ops_lock;
24+
25+
#ifdef CONFIG_PARAVIRT_SPINLOCKS
26+
extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
27+
extern void __pv_init_lock_hash(void);
28+
extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
29+
extern void __raw_callee_save___pv_queued_spin_unlock(struct qspinlock *lock);
30+
extern bool nopvspin;
31+
32+
static __always_inline void pv_queued_spin_lock_slowpath(struct qspinlock *lock,
33+
u32 val)
34+
{
35+
PVOP_VCALL2(pv_ops_lock, queued_spin_lock_slowpath, lock, val);
36+
}
37+
38+
static __always_inline void pv_queued_spin_unlock(struct qspinlock *lock)
39+
{
40+
PVOP_ALT_VCALLEE1(pv_ops_lock, queued_spin_unlock, lock,
41+
"movb $0, (%%" _ASM_ARG1 ");",
42+
ALT_NOT(X86_FEATURE_PVUNLOCK));
43+
}
44+
45+
static __always_inline bool pv_vcpu_is_preempted(long cpu)
46+
{
47+
return PVOP_ALT_CALLEE1(bool, pv_ops_lock, vcpu_is_preempted, cpu,
48+
"xor %%" _ASM_AX ", %%" _ASM_AX ";",
49+
ALT_NOT(X86_FEATURE_VCPUPREEMPT));
50+
}
51+
52+
#define queued_spin_unlock queued_spin_unlock
53+
/**
54+
* queued_spin_unlock - release a queued spinlock
55+
* @lock : Pointer to queued spinlock structure
56+
*
57+
* A smp_store_release() on the least-significant byte.
58+
*/
59+
static inline void native_queued_spin_unlock(struct qspinlock *lock)
60+
{
61+
smp_store_release(&lock->locked, 0);
62+
}
63+
64+
static inline void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
65+
{
66+
pv_queued_spin_lock_slowpath(lock, val);
67+
}
68+
69+
static inline void queued_spin_unlock(struct qspinlock *lock)
70+
{
71+
kcsan_release();
72+
pv_queued_spin_unlock(lock);
73+
}
74+
75+
#define vcpu_is_preempted vcpu_is_preempted
76+
static inline bool vcpu_is_preempted(long cpu)
77+
{
78+
return pv_vcpu_is_preempted(cpu);
79+
}
80+
81+
static __always_inline void pv_wait(u8 *ptr, u8 val)
82+
{
83+
PVOP_VCALL2(pv_ops_lock, wait, ptr, val);
84+
}
85+
86+
static __always_inline void pv_kick(int cpu)
87+
{
88+
PVOP_VCALL1(pv_ops_lock, kick, cpu);
89+
}
90+
91+
void __raw_callee_save___native_queued_spin_unlock(struct qspinlock *lock);
92+
bool __raw_callee_save___native_vcpu_is_preempted(long cpu);
93+
#endif /* CONFIG_PARAVIRT_SPINLOCKS */
94+
95+
void __init native_pv_lock_init(void);
96+
__visible void __native_queued_spin_unlock(struct qspinlock *lock);
97+
bool pv_is_native_spin_unlock(void);
98+
__visible bool __native_vcpu_is_preempted(long cpu);
99+
bool pv_is_native_vcpu_is_preempted(void);
100+
101+
/*
102+
* virt_spin_lock_key - disables by default the virt_spin_lock() hijack.
103+
*
104+
* Native (and PV wanting native due to vCPU pinning) should keep this key
105+
* disabled. Native does not touch the key.
106+
*
107+
* When in a guest then native_pv_lock_init() enables the key first and
108+
* KVM/XEN might conditionally disable it later in the boot process again.
109+
*/
110+
DECLARE_STATIC_KEY_FALSE(virt_spin_lock_key);
111+
112+
/*
113+
* Shortcut for the queued_spin_lock_slowpath() function that allows
114+
* virt to hijack it.
115+
*
116+
* Returns:
117+
* true - lock has been negotiated, all done;
118+
* false - queued_spin_lock_slowpath() will do its thing.
119+
*/
120+
#define virt_spin_lock virt_spin_lock
121+
static inline bool virt_spin_lock(struct qspinlock *lock)
122+
{
123+
int val;
124+
125+
if (!static_branch_likely(&virt_spin_lock_key))
126+
return false;
127+
128+
/*
129+
* On hypervisors without PARAVIRT_SPINLOCKS support we fall
130+
* back to a Test-and-Set spinlock, because fair locks have
131+
* horrible lock 'holder' preemption issues.
132+
*/
133+
134+
__retry:
135+
val = atomic_read(&lock->val);
136+
137+
if (val || !atomic_try_cmpxchg(&lock->val, &val, _Q_LOCKED_VAL)) {
138+
cpu_relax();
139+
goto __retry;
140+
}
141+
142+
return true;
143+
}
144+
145+
#endif /* _ASM_X86_PARAVIRT_SPINLOCK_H */

arch/x86/include/asm/paravirt.h

Lines changed: 0 additions & 61 deletions
Original file line numberDiff line numberDiff line change
@@ -19,15 +19,6 @@
1919
#include <linux/cpumask.h>
2020
#include <asm/frame.h>
2121

22-
__visible void __native_queued_spin_unlock(struct qspinlock *lock);
23-
bool pv_is_native_spin_unlock(void);
24-
__visible bool __native_vcpu_is_preempted(long cpu);
25-
bool pv_is_native_vcpu_is_preempted(void);
26-
27-
#ifdef CONFIG_PARAVIRT_SPINLOCKS
28-
void __init paravirt_set_cap(void);
29-
#endif
30-
3122
/* The paravirtualized I/O functions */
3223
static inline void slow_down_io(void)
3324
{
@@ -522,46 +513,7 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
522513
{
523514
pv_ops.mmu.set_fixmap(idx, phys, flags);
524515
}
525-
#endif
526-
527-
#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
528-
529-
static __always_inline void pv_queued_spin_lock_slowpath(struct qspinlock *lock,
530-
u32 val)
531-
{
532-
PVOP_VCALL2(pv_ops, lock.queued_spin_lock_slowpath, lock, val);
533-
}
534-
535-
static __always_inline void pv_queued_spin_unlock(struct qspinlock *lock)
536-
{
537-
PVOP_ALT_VCALLEE1(pv_ops, lock.queued_spin_unlock, lock,
538-
"movb $0, (%%" _ASM_ARG1 ");",
539-
ALT_NOT(X86_FEATURE_PVUNLOCK));
540-
}
541-
542-
static __always_inline void pv_wait(u8 *ptr, u8 val)
543-
{
544-
PVOP_VCALL2(pv_ops, lock.wait, ptr, val);
545-
}
546-
547-
static __always_inline void pv_kick(int cpu)
548-
{
549-
PVOP_VCALL1(pv_ops, lock.kick, cpu);
550-
}
551-
552-
static __always_inline bool pv_vcpu_is_preempted(long cpu)
553-
{
554-
return PVOP_ALT_CALLEE1(bool, pv_ops, lock.vcpu_is_preempted, cpu,
555-
"xor %%" _ASM_AX ", %%" _ASM_AX ";",
556-
ALT_NOT(X86_FEATURE_VCPUPREEMPT));
557-
}
558516

559-
void __raw_callee_save___native_queued_spin_unlock(struct qspinlock *lock);
560-
bool __raw_callee_save___native_vcpu_is_preempted(long cpu);
561-
562-
#endif /* SMP && PARAVIRT_SPINLOCKS */
563-
564-
#ifdef CONFIG_PARAVIRT_XXL
565517
static __always_inline unsigned long arch_local_save_flags(void)
566518
{
567519
return PVOP_ALT_CALLEE0(unsigned long, pv_ops, irq.save_fl, "pushf; pop %%rax;",
@@ -588,8 +540,6 @@ static __always_inline unsigned long arch_local_irq_save(void)
588540
}
589541
#endif
590542

591-
void native_pv_lock_init(void) __init;
592-
593543
#else /* __ASSEMBLER__ */
594544

595545
#ifdef CONFIG_X86_64
@@ -613,12 +563,6 @@ void native_pv_lock_init(void) __init;
613563
#endif /* __ASSEMBLER__ */
614564
#else /* CONFIG_PARAVIRT */
615565
# define default_banner x86_init_noop
616-
617-
#ifndef __ASSEMBLER__
618-
static inline void native_pv_lock_init(void)
619-
{
620-
}
621-
#endif
622566
#endif /* !CONFIG_PARAVIRT */
623567

624568
#ifndef __ASSEMBLER__
@@ -634,10 +578,5 @@ static inline void paravirt_arch_exit_mmap(struct mm_struct *mm)
634578
}
635579
#endif
636580

637-
#ifndef CONFIG_PARAVIRT_SPINLOCKS
638-
static inline void paravirt_set_cap(void)
639-
{
640-
}
641-
#endif
642581
#endif /* __ASSEMBLER__ */
643582
#endif /* _ASM_X86_PARAVIRT_H */

arch/x86/include/asm/paravirt_types.h

Lines changed: 0 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -184,30 +184,13 @@ struct pv_mmu_ops {
184184
#endif
185185
} __no_randomize_layout;
186186

187-
#ifdef CONFIG_SMP
188-
#include <asm/spinlock_types.h>
189-
#endif
190-
191-
struct qspinlock;
192-
193-
struct pv_lock_ops {
194-
void (*queued_spin_lock_slowpath)(struct qspinlock *lock, u32 val);
195-
struct paravirt_callee_save queued_spin_unlock;
196-
197-
void (*wait)(u8 *ptr, u8 val);
198-
void (*kick)(int cpu);
199-
200-
struct paravirt_callee_save vcpu_is_preempted;
201-
} __no_randomize_layout;
202-
203187
/* This contains all the paravirt structures: we get a convenient
204188
* number for each function using the offset which we use to indicate
205189
* what to patch. */
206190
struct paravirt_patch_template {
207191
struct pv_cpu_ops cpu;
208192
struct pv_irq_ops irq;
209193
struct pv_mmu_ops mmu;
210-
struct pv_lock_ops lock;
211194
} __no_randomize_layout;
212195

213196
extern struct paravirt_patch_template pv_ops;

0 commit comments

Comments
 (0)