@@ -16,7 +16,8 @@ struct qnode {
1616 struct qnode * next ;
1717 struct qspinlock * lock ;
1818 int cpu ;
19- int yield_cpu ;
19+ u8 sleepy ; /* 1 if the previous vCPU was preempted or
20+ * if the previous node was sleepy */
2021 u8 locked ; /* 1 if lock acquired */
2122};
2223
@@ -349,7 +350,7 @@ static __always_inline bool yield_head_to_locked_owner(struct qspinlock *lock, u
349350 return __yield_to_locked_owner (lock , val , paravirt , mustq );
350351}
351352
352- static __always_inline void propagate_yield_cpu (struct qnode * node , u32 val , int * set_yield_cpu , bool paravirt )
353+ static __always_inline void propagate_sleepy (struct qnode * node , u32 val , bool * set_sleepy , bool paravirt )
353354{
354355 struct qnode * next ;
355356 int owner ;
@@ -358,29 +359,24 @@ static __always_inline void propagate_yield_cpu(struct qnode *node, u32 val, int
358359 return ;
359360 if (!pv_yield_propagate_owner )
360361 return ;
361-
362- owner = get_owner_cpu (val );
363- if (* set_yield_cpu == owner )
362+ if (* set_sleepy )
364363 return ;
365364
366365 next = READ_ONCE (node -> next );
367366 if (!next )
368367 return ;
369368
369+ owner = get_owner_cpu (val );
370370 if (vcpu_is_preempted (owner )) {
371- next -> yield_cpu = owner ;
372- * set_yield_cpu = owner ;
373- } else if (* set_yield_cpu != -1 ) {
374- next -> yield_cpu = owner ;
375- * set_yield_cpu = owner ;
371+ next -> sleepy = 1 ;
372+ * set_sleepy = true;
376373 }
377374}
378375
379376/* Called inside spin_begin() */
380377static __always_inline bool yield_to_prev (struct qspinlock * lock , struct qnode * node , int prev_cpu , bool paravirt )
381378{
382379 u32 yield_count ;
383- int yield_cpu ;
384380 bool preempted = false;
385381
386382 if (!paravirt )
@@ -389,36 +385,32 @@ static __always_inline bool yield_to_prev(struct qspinlock *lock, struct qnode *
389385 if (!pv_yield_propagate_owner )
390386 goto yield_prev ;
391387
392- yield_cpu = READ_ONCE (node -> yield_cpu );
393- if (yield_cpu == -1 ) {
394- /* Propagate back the -1 CPU */
395- if (node -> next && node -> next -> yield_cpu != -1 )
396- node -> next -> yield_cpu = yield_cpu ;
388+ if (!READ_ONCE (node -> sleepy )) {
389+ /* Propagate back sleepy==false */
390+ if (node -> next && node -> next -> sleepy )
391+ node -> next -> sleepy = 0 ;
397392 goto yield_prev ;
398- }
399-
400- yield_count = yield_count_of ( yield_cpu );
401- if (( yield_count & 1 ) == 0 )
402- goto yield_prev ; /* owner vcpu is running */
403-
404- if ( get_owner_cpu ( READ_ONCE ( lock -> val )) != yield_cpu )
405- goto yield_prev ; /* re-sample lock owner */
406-
407- spin_end ();
408-
409- preempted = true;
410- seen_sleepy_node ();
411-
412- smp_rmb ();
393+ } else {
394+ u32 val = READ_ONCE ( lock -> val );
395+
396+ if (val & _Q_LOCKED_VAL ) {
397+ if ( node -> next && ! node -> next -> sleepy ) {
398+ /*
399+ * Propagate sleepy to next waiter. Only if
400+ * owner is preempted, which allows the queue
401+ * to become "non-sleepy" if vCPU preemption
402+ * ceases to occur, even if the lock remains
403+ * highly contended.
404+ */
405+ if ( vcpu_is_preempted ( get_owner_cpu ( val )))
406+ node -> next -> sleepy = 1 ;
407+ }
413408
414- if (yield_cpu == node -> yield_cpu ) {
415- if (node -> next && node -> next -> yield_cpu != yield_cpu )
416- node -> next -> yield_cpu = yield_cpu ;
417- yield_to_preempted (yield_cpu , yield_count );
418- spin_begin ();
419- return preempted ;
409+ preempted = yield_to_locked_owner (lock , val , paravirt );
410+ if (preempted )
411+ return preempted ;
412+ }
420413 }
421- spin_begin ();
422414
423415yield_prev :
424416 if (!pv_yield_prev )
@@ -541,7 +533,7 @@ static __always_inline void queued_spin_lock_mcs_queue(struct qspinlock *lock, b
541533 bool sleepy = false;
542534 bool mustq = false;
543535 int idx ;
544- int set_yield_cpu = -1 ;
536+ bool set_sleepy = false ;
545537 int iters = 0 ;
546538
547539 BUILD_BUG_ON (CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS ));
@@ -565,7 +557,7 @@ static __always_inline void queued_spin_lock_mcs_queue(struct qspinlock *lock, b
565557 node -> next = NULL ;
566558 node -> lock = lock ;
567559 node -> cpu = smp_processor_id ();
568- node -> yield_cpu = -1 ;
560+ node -> sleepy = 0 ;
569561 node -> locked = 0 ;
570562
571563 tail = encode_tail_cpu (node -> cpu );
@@ -599,9 +591,9 @@ static __always_inline void queued_spin_lock_mcs_queue(struct qspinlock *lock, b
599591 spec_barrier ();
600592 spin_end ();
601593
602- /* Clear out stale propagated yield_cpu */
603- if (paravirt && pv_yield_propagate_owner && node -> yield_cpu != -1 )
604- node -> yield_cpu = -1 ;
594+ /* Clear out stale propagated sleepy */
595+ if (paravirt && pv_yield_propagate_owner && node -> sleepy )
596+ node -> sleepy = 0 ;
605597
606598 smp_rmb (); /* acquire barrier for the mcs lock */
607599
@@ -644,7 +636,7 @@ static __always_inline void queued_spin_lock_mcs_queue(struct qspinlock *lock, b
644636 }
645637 }
646638
647- propagate_yield_cpu (node , val , & set_yield_cpu , paravirt );
639+ propagate_sleepy (node , val , & set_sleepy , paravirt );
648640 preempted = yield_head_to_locked_owner (lock , val , paravirt );
649641 if (!maybe_stealers )
650642 continue ;
0 commit comments