1111 * Peter Zijlstra <peterz@infradead.org>
1212 */
1313
14- #ifndef _GEN_PV_LOCK_SLOWPATH
15-
1614#include <linux/smp.h>
1715#include <linux/bug.h>
1816#include <linux/cpumask.h>
2927 * Include queued spinlock definitions and statistics code
3028 */
3129#include "../locking/qspinlock.h"
32- #include "../locking/qspinlock_stat .h"
30+ #include "../locking/lock_events .h"
3331
3432/*
3533 * The basic principle of a queue-based spinlock can best be understood
7573 * contexts: task, softirq, hardirq, nmi.
7674 *
7775 * Exactly fits one 64-byte cacheline on a 64-bit architecture.
78- *
79- * PV doubles the storage and uses the second cacheline for PV state.
8076 */
8177static DEFINE_PER_CPU_ALIGNED (struct qnode , rqnodes [_Q_MAX_NODES ]) ;
8278
83- /*
84- * Generate the native code for resilient_queued_spin_unlock_slowpath(); provide NOPs
85- * for all the PV callbacks.
86- */
87-
88- static __always_inline void __pv_init_node (struct mcs_spinlock * node ) { }
89- static __always_inline void __pv_wait_node (struct mcs_spinlock * node ,
90- struct mcs_spinlock * prev ) { }
91- static __always_inline void __pv_kick_node (struct qspinlock * lock ,
92- struct mcs_spinlock * node ) { }
93- static __always_inline u32 __pv_wait_head_or_lock (struct qspinlock * lock ,
94- struct mcs_spinlock * node )
95- { return 0 ; }
96-
97- #define pv_enabled () false
98-
99- #define pv_init_node __pv_init_node
100- #define pv_wait_node __pv_wait_node
101- #define pv_kick_node __pv_kick_node
102- #define pv_wait_head_or_lock __pv_wait_head_or_lock
103-
104- #ifdef CONFIG_PARAVIRT_SPINLOCKS
105- #define resilient_queued_spin_lock_slowpath native_resilient_queued_spin_lock_slowpath
106- #endif
107-
108- #endif /* _GEN_PV_LOCK_SLOWPATH */
109-
11079/**
11180 * resilient_queued_spin_lock_slowpath - acquire the queued spinlock
11281 * @lock: Pointer to queued spinlock structure
@@ -136,12 +105,6 @@ void __lockfunc resilient_queued_spin_lock_slowpath(rqspinlock_t *lock, u32 val)
136105
137106 BUILD_BUG_ON (CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS ));
138107
139- if (pv_enabled ())
140- goto pv_queue ;
141-
142- if (virt_spin_lock (lock ))
143- return ;
144-
145108 /*
146109 * Wait for in-progress pending->locked hand-overs with a bounded
147110 * number of spins so that we guarantee forward progress.
@@ -212,7 +175,6 @@ void __lockfunc resilient_queued_spin_lock_slowpath(rqspinlock_t *lock, u32 val)
212175 */
213176queue :
214177 lockevent_inc (lock_slowpath );
215- pv_queue :
216178 node = this_cpu_ptr (& rqnodes [0 ].mcs );
217179 idx = node -> count ++ ;
218180 tail = encode_tail (smp_processor_id (), idx );
@@ -251,7 +213,6 @@ void __lockfunc resilient_queued_spin_lock_slowpath(rqspinlock_t *lock, u32 val)
251213
252214 node -> locked = 0 ;
253215 node -> next = NULL ;
254- pv_init_node (node );
255216
256217 /*
257218 * We touched a (possibly) cold cacheline in the per-cpu queue node;
@@ -288,7 +249,6 @@ void __lockfunc resilient_queued_spin_lock_slowpath(rqspinlock_t *lock, u32 val)
288249 /* Link @node into the waitqueue. */
289250 WRITE_ONCE (prev -> next , node );
290251
291- pv_wait_node (node , prev );
292252 arch_mcs_spin_lock_contended (& node -> locked );
293253
294254 /*
@@ -312,23 +272,9 @@ void __lockfunc resilient_queued_spin_lock_slowpath(rqspinlock_t *lock, u32 val)
312272 * store-release that clears the locked bit and create lock
313273 * sequentiality; this is because the set_locked() function below
314274 * does not imply a full barrier.
315- *
316- * The PV pv_wait_head_or_lock function, if active, will acquire
317- * the lock and return a non-zero value. So we have to skip the
318- * atomic_cond_read_acquire() call. As the next PV queue head hasn't
319- * been designated yet, there is no way for the locked value to become
320- * _Q_SLOW_VAL. So both the set_locked() and the
321- * atomic_cmpxchg_relaxed() calls will be safe.
322- *
323- * If PV isn't active, 0 will be returned instead.
324- *
325275 */
326- if ((val = pv_wait_head_or_lock (lock , node )))
327- goto locked ;
328-
329276 val = atomic_cond_read_acquire (& lock -> val , !(VAL & _Q_LOCKED_PENDING_MASK ));
330277
331- locked :
332278 /*
333279 * claim the lock:
334280 *
@@ -341,11 +287,6 @@ void __lockfunc resilient_queued_spin_lock_slowpath(rqspinlock_t *lock, u32 val)
341287 */
342288
343289 /*
344- * In the PV case we might already have _Q_LOCKED_VAL set, because
345- * of lock stealing; therefore we must also allow:
346- *
347- * n,0,1 -> 0,0,1
348- *
349290 * Note: at this point: (val & _Q_PENDING_MASK) == 0, because of the
350291 * above wait condition, therefore any concurrent setting of
351292 * PENDING will make the uncontended transition fail.
@@ -369,7 +310,6 @@ void __lockfunc resilient_queued_spin_lock_slowpath(rqspinlock_t *lock, u32 val)
369310 next = smp_cond_load_relaxed (& node -> next , (VAL ));
370311
371312 arch_mcs_spin_unlock_contended (& next -> locked );
372- pv_kick_node (lock , next );
373313
374314release :
375315 trace_contention_end (lock , 0 );
@@ -380,32 +320,3 @@ void __lockfunc resilient_queued_spin_lock_slowpath(rqspinlock_t *lock, u32 val)
380320 __this_cpu_dec (rqnodes [0 ].mcs .count );
381321}
382322EXPORT_SYMBOL_GPL (resilient_queued_spin_lock_slowpath );
383-
384- /*
385- * Generate the paravirt code for resilient_queued_spin_unlock_slowpath().
386- */
387- #if !defined(_GEN_PV_LOCK_SLOWPATH ) && defined(CONFIG_PARAVIRT_SPINLOCKS )
388- #define _GEN_PV_LOCK_SLOWPATH
389-
390- #undef pv_enabled
391- #define pv_enabled () true
392-
393- #undef pv_init_node
394- #undef pv_wait_node
395- #undef pv_kick_node
396- #undef pv_wait_head_or_lock
397-
398- #undef resilient_queued_spin_lock_slowpath
399- #define resilient_queued_spin_lock_slowpath __pv_resilient_queued_spin_lock_slowpath
400-
401- #include "../locking/qspinlock_paravirt.h"
402- #include "rqspinlock.c"
403-
404- bool nopvspin ;
405- static __init int parse_nopvspin (char * arg )
406- {
407- nopvspin = true;
408- return 0 ;
409- }
410- early_param ("nopvspin" , parse_nopvspin );
411- #endif
0 commit comments