@@ -256,16 +256,13 @@ static inline bool rwsem_read_trylock(struct rw_semaphore *sem, long *cntp)
256256static inline bool rwsem_write_trylock (struct rw_semaphore * sem )
257257{
258258 long tmp = RWSEM_UNLOCKED_VALUE ;
259- bool ret = false;
260259
261- preempt_disable ();
262260 if (atomic_long_try_cmpxchg_acquire (& sem -> count , & tmp , RWSEM_WRITER_LOCKED )) {
263261 rwsem_set_owner (sem );
264- ret = true;
262+ return true;
265263 }
266264
267- preempt_enable ();
268- return ret ;
265+ return false;
269266}
270267
271268/*
@@ -624,18 +621,16 @@ static inline bool rwsem_try_write_lock(struct rw_semaphore *sem,
624621 */
625622 if (first -> handoff_set && (waiter != first ))
626623 return false;
627-
628- /*
629- * First waiter can inherit a previously set handoff
630- * bit and spin on rwsem if lock acquisition fails.
631- */
632- if (waiter == first )
633- waiter -> handoff_set = true;
634624 }
635625
636626 new = count ;
637627
638628 if (count & RWSEM_LOCK_MASK ) {
629+ /*
630+ * A waiter (first or not) can set the handoff bit
631+ * if it is an RT task or wait in the wait queue
632+ * for too long.
633+ */
639634 if (has_handoff || (!rt_task (waiter -> task ) &&
640635 !time_after (jiffies , waiter -> timeout )))
641636 return false;
@@ -651,11 +646,12 @@ static inline bool rwsem_try_write_lock(struct rw_semaphore *sem,
651646 } while (!atomic_long_try_cmpxchg_acquire (& sem -> count , & count , new ));
652647
653648 /*
654- * We have either acquired the lock with handoff bit cleared or
655- * set the handoff bit.
649+ * We have either acquired the lock with handoff bit cleared or set
650+ * the handoff bit. Only the first waiter can have its handoff_set
651+ * set here to enable optimistic spinning in slowpath loop.
656652 */
657653 if (new & RWSEM_FLAG_HANDOFF ) {
658- waiter -> handoff_set = true;
654+ first -> handoff_set = true;
659655 lockevent_inc (rwsem_wlock_handoff );
660656 return false;
661657 }
@@ -717,7 +713,6 @@ static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
717713 return false;
718714 }
719715
720- preempt_disable ();
721716 /*
722717 * Disable preemption is equal to the RCU read-side crital section,
723718 * thus the task_strcut structure won't go away.
@@ -729,7 +724,6 @@ static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
729724 if ((flags & RWSEM_NONSPINNABLE ) ||
730725 (owner && !(flags & RWSEM_READER_OWNED ) && !owner_on_cpu (owner )))
731726 ret = false;
732- preempt_enable ();
733727
734728 lockevent_cond_inc (rwsem_opt_fail , !ret );
735729 return ret ;
@@ -829,8 +823,6 @@ static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
829823 int loop = 0 ;
830824 u64 rspin_threshold = 0 ;
831825
832- preempt_disable ();
833-
834826 /* sem->wait_lock should not be held when doing optimistic spinning */
835827 if (!osq_lock (& sem -> osq ))
836828 goto done ;
@@ -938,7 +930,6 @@ static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
938930 }
939931 osq_unlock (& sem -> osq );
940932done :
941- preempt_enable ();
942933 lockevent_cond_inc (rwsem_opt_fail , !taken );
943934 return taken ;
944935}
@@ -1092,7 +1083,7 @@ rwsem_down_read_slowpath(struct rw_semaphore *sem, long count, unsigned int stat
10921083 /* Ordered by sem->wait_lock against rwsem_mark_wake(). */
10931084 break ;
10941085 }
1095- schedule ();
1086+ schedule_preempt_disabled ();
10961087 lockevent_inc (rwsem_sleep_reader );
10971088 }
10981089
@@ -1179,15 +1170,12 @@ rwsem_down_write_slowpath(struct rw_semaphore *sem, int state)
11791170 if (waiter .handoff_set ) {
11801171 enum owner_state owner_state ;
11811172
1182- preempt_disable ();
11831173 owner_state = rwsem_spin_on_owner (sem );
1184- preempt_enable ();
1185-
11861174 if (owner_state == OWNER_NULL )
11871175 goto trylock_again ;
11881176 }
11891177
1190- schedule ();
1178+ schedule_preempt_disabled ();
11911179 lockevent_inc (rwsem_sleep_writer );
11921180 set_current_state (state );
11931181trylock_again :
@@ -1254,14 +1242,20 @@ static struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
12541242 */
12551243static inline int __down_read_common (struct rw_semaphore * sem , int state )
12561244{
1245+ int ret = 0 ;
12571246 long count ;
12581247
1248+ preempt_disable ();
12591249 if (!rwsem_read_trylock (sem , & count )) {
1260- if (IS_ERR (rwsem_down_read_slowpath (sem , count , state )))
1261- return - EINTR ;
1250+ if (IS_ERR (rwsem_down_read_slowpath (sem , count , state ))) {
1251+ ret = - EINTR ;
1252+ goto out ;
1253+ }
12621254 DEBUG_RWSEMS_WARN_ON (!is_rwsem_reader_owned (sem ), sem );
12631255 }
1264- return 0 ;
1256+ out :
1257+ preempt_enable ();
1258+ return ret ;
12651259}
12661260
12671261static inline void __down_read (struct rw_semaphore * sem )
@@ -1281,32 +1275,39 @@ static inline int __down_read_killable(struct rw_semaphore *sem)
12811275
12821276static inline int __down_read_trylock (struct rw_semaphore * sem )
12831277{
1278+ int ret = 0 ;
12841279 long tmp ;
12851280
12861281 DEBUG_RWSEMS_WARN_ON (sem -> magic != sem , sem );
12871282
1283+ preempt_disable ();
12881284 tmp = atomic_long_read (& sem -> count );
12891285 while (!(tmp & RWSEM_READ_FAILED_MASK )) {
12901286 if (atomic_long_try_cmpxchg_acquire (& sem -> count , & tmp ,
12911287 tmp + RWSEM_READER_BIAS )) {
12921288 rwsem_set_reader_owned (sem );
1293- return 1 ;
1289+ ret = 1 ;
1290+ break ;
12941291 }
12951292 }
1296- return 0 ;
1293+ preempt_enable ();
1294+ return ret ;
12971295}
12981296
12991297/*
13001298 * lock for writing
13011299 */
13021300static inline int __down_write_common (struct rw_semaphore * sem , int state )
13031301{
1302+ int ret = 0 ;
1303+
1304+ preempt_disable ();
13041305 if (unlikely (!rwsem_write_trylock (sem ))) {
13051306 if (IS_ERR (rwsem_down_write_slowpath (sem , state )))
1306- return - EINTR ;
1307+ ret = - EINTR ;
13071308 }
1308-
1309- return 0 ;
1309+ preempt_enable ();
1310+ return ret ;
13101311}
13111312
13121313static inline void __down_write (struct rw_semaphore * sem )
@@ -1321,8 +1322,14 @@ static inline int __down_write_killable(struct rw_semaphore *sem)
13211322
13221323static inline int __down_write_trylock (struct rw_semaphore * sem )
13231324{
1325+ int ret ;
1326+
1327+ preempt_disable ();
13241328 DEBUG_RWSEMS_WARN_ON (sem -> magic != sem , sem );
1325- return rwsem_write_trylock (sem );
1329+ ret = rwsem_write_trylock (sem );
1330+ preempt_enable ();
1331+
1332+ return ret ;
13261333}
13271334
13281335/*
@@ -1335,6 +1342,7 @@ static inline void __up_read(struct rw_semaphore *sem)
13351342 DEBUG_RWSEMS_WARN_ON (sem -> magic != sem , sem );
13361343 DEBUG_RWSEMS_WARN_ON (!is_rwsem_reader_owned (sem ), sem );
13371344
1345+ preempt_disable ();
13381346 rwsem_clear_reader_owned (sem );
13391347 tmp = atomic_long_add_return_release (- RWSEM_READER_BIAS , & sem -> count );
13401348 DEBUG_RWSEMS_WARN_ON (tmp < 0 , sem );
@@ -1343,6 +1351,7 @@ static inline void __up_read(struct rw_semaphore *sem)
13431351 clear_nonspinnable (sem );
13441352 rwsem_wake (sem );
13451353 }
1354+ preempt_enable ();
13461355}
13471356
13481357/*
@@ -1363,9 +1372,9 @@ static inline void __up_write(struct rw_semaphore *sem)
13631372 preempt_disable ();
13641373 rwsem_clear_owner (sem );
13651374 tmp = atomic_long_fetch_add_release (- RWSEM_WRITER_LOCKED , & sem -> count );
1366- preempt_enable ();
13671375 if (unlikely (tmp & RWSEM_FLAG_WAITERS ))
13681376 rwsem_wake (sem );
1377+ preempt_enable ();
13691378}
13701379
13711380/*
@@ -1383,11 +1392,13 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
13831392 * write side. As such, rely on RELEASE semantics.
13841393 */
13851394 DEBUG_RWSEMS_WARN_ON (rwsem_owner (sem ) != current , sem );
1395+ preempt_disable ();
13861396 tmp = atomic_long_fetch_add_release (
13871397 - RWSEM_WRITER_LOCKED + RWSEM_READER_BIAS , & sem -> count );
13881398 rwsem_set_reader_owned (sem );
13891399 if (tmp & RWSEM_FLAG_WAITERS )
13901400 rwsem_downgrade_wake (sem );
1401+ preempt_enable ();
13911402}
13921403
13931404#else /* !CONFIG_PREEMPT_RT */
@@ -1662,6 +1673,12 @@ void down_read_non_owner(struct rw_semaphore *sem)
16621673{
16631674 might_sleep ();
16641675 __down_read (sem );
1676+ /*
1677+ * The owner value for a reader-owned lock is mostly for debugging
1678+ * purpose only and is not critical to the correct functioning of
1679+ * rwsem. So it is perfectly fine to set it in a preempt-enabled
1680+ * context here.
1681+ */
16651682 __rwsem_set_reader_owned (sem , NULL );
16661683}
16671684EXPORT_SYMBOL (down_read_non_owner );
0 commit comments