@@ -254,7 +254,7 @@ enum evict_result {
254254
255255typedef enum evict_result (* le_predicate )(struct lru_entry * le , void * context );
256256
257- static struct lru_entry * lru_evict (struct lru * lru , le_predicate pred , void * context )
257+ static struct lru_entry * lru_evict (struct lru * lru , le_predicate pred , void * context , bool no_sleep )
258258{
259259 unsigned long tested = 0 ;
260260 struct list_head * h = lru -> cursor ;
@@ -295,7 +295,8 @@ static struct lru_entry *lru_evict(struct lru *lru, le_predicate pred, void *con
295295
296296 h = h -> next ;
297297
298- cond_resched ();
298+ if (!no_sleep )
299+ cond_resched ();
299300 }
300301
301302 return NULL ;
@@ -382,7 +383,10 @@ struct dm_buffer {
382383 */
383384
384385struct buffer_tree {
385- struct rw_semaphore lock ;
386+ union {
387+ struct rw_semaphore lock ;
388+ rwlock_t spinlock ;
389+ } u ;
386390 struct rb_root root ;
387391} ____cacheline_aligned_in_smp ;
388392
@@ -393,32 +397,47 @@ struct dm_buffer_cache {
393397 * on the locks.
394398 */
395399 unsigned int num_locks ;
400+ bool no_sleep ;
396401 struct buffer_tree trees [];
397402};
398403
404+ static DEFINE_STATIC_KEY_FALSE (no_sleep_enabled );
405+
399406static inline unsigned int cache_index (sector_t block , unsigned int num_locks )
400407{
401408 return dm_hash_locks_index (block , num_locks );
402409}
403410
404411static inline void cache_read_lock (struct dm_buffer_cache * bc , sector_t block )
405412{
406- down_read (& bc -> trees [cache_index (block , bc -> num_locks )].lock );
413+ if (static_branch_unlikely (& no_sleep_enabled ) && bc -> no_sleep )
414+ read_lock_bh (& bc -> trees [cache_index (block , bc -> num_locks )].u .spinlock );
415+ else
416+ down_read (& bc -> trees [cache_index (block , bc -> num_locks )].u .lock );
407417}
408418
409419static inline void cache_read_unlock (struct dm_buffer_cache * bc , sector_t block )
410420{
411- up_read (& bc -> trees [cache_index (block , bc -> num_locks )].lock );
421+ if (static_branch_unlikely (& no_sleep_enabled ) && bc -> no_sleep )
422+ read_unlock_bh (& bc -> trees [cache_index (block , bc -> num_locks )].u .spinlock );
423+ else
424+ up_read (& bc -> trees [cache_index (block , bc -> num_locks )].u .lock );
412425}
413426
414427static inline void cache_write_lock (struct dm_buffer_cache * bc , sector_t block )
415428{
416- down_write (& bc -> trees [cache_index (block , bc -> num_locks )].lock );
429+ if (static_branch_unlikely (& no_sleep_enabled ) && bc -> no_sleep )
430+ write_lock_bh (& bc -> trees [cache_index (block , bc -> num_locks )].u .spinlock );
431+ else
432+ down_write (& bc -> trees [cache_index (block , bc -> num_locks )].u .lock );
417433}
418434
419435static inline void cache_write_unlock (struct dm_buffer_cache * bc , sector_t block )
420436{
421- up_write (& bc -> trees [cache_index (block , bc -> num_locks )].lock );
437+ if (static_branch_unlikely (& no_sleep_enabled ) && bc -> no_sleep )
438+ write_unlock_bh (& bc -> trees [cache_index (block , bc -> num_locks )].u .spinlock );
439+ else
440+ up_write (& bc -> trees [cache_index (block , bc -> num_locks )].u .lock );
422441}
423442
424443/*
@@ -442,18 +461,32 @@ static void lh_init(struct lock_history *lh, struct dm_buffer_cache *cache, bool
442461
443462static void __lh_lock (struct lock_history * lh , unsigned int index )
444463{
445- if (lh -> write )
446- down_write (& lh -> cache -> trees [index ].lock );
447- else
448- down_read (& lh -> cache -> trees [index ].lock );
464+ if (lh -> write ) {
465+ if (static_branch_unlikely (& no_sleep_enabled ) && lh -> cache -> no_sleep )
466+ write_lock_bh (& lh -> cache -> trees [index ].u .spinlock );
467+ else
468+ down_write (& lh -> cache -> trees [index ].u .lock );
469+ } else {
470+ if (static_branch_unlikely (& no_sleep_enabled ) && lh -> cache -> no_sleep )
471+ read_lock_bh (& lh -> cache -> trees [index ].u .spinlock );
472+ else
473+ down_read (& lh -> cache -> trees [index ].u .lock );
474+ }
449475}
450476
451477static void __lh_unlock (struct lock_history * lh , unsigned int index )
452478{
453- if (lh -> write )
454- up_write (& lh -> cache -> trees [index ].lock );
455- else
456- up_read (& lh -> cache -> trees [index ].lock );
479+ if (lh -> write ) {
480+ if (static_branch_unlikely (& no_sleep_enabled ) && lh -> cache -> no_sleep )
481+ write_unlock_bh (& lh -> cache -> trees [index ].u .spinlock );
482+ else
483+ up_write (& lh -> cache -> trees [index ].u .lock );
484+ } else {
485+ if (static_branch_unlikely (& no_sleep_enabled ) && lh -> cache -> no_sleep )
486+ read_unlock_bh (& lh -> cache -> trees [index ].u .spinlock );
487+ else
488+ up_read (& lh -> cache -> trees [index ].u .lock );
489+ }
457490}
458491
459492/*
@@ -502,14 +535,18 @@ static struct dm_buffer *list_to_buffer(struct list_head *l)
502535 return le_to_buffer (le );
503536}
504537
505- static void cache_init (struct dm_buffer_cache * bc , unsigned int num_locks )
538+ static void cache_init (struct dm_buffer_cache * bc , unsigned int num_locks , bool no_sleep )
506539{
507540 unsigned int i ;
508541
509542 bc -> num_locks = num_locks ;
543+ bc -> no_sleep = no_sleep ;
510544
511545 for (i = 0 ; i < bc -> num_locks ; i ++ ) {
512- init_rwsem (& bc -> trees [i ].lock );
546+ if (no_sleep )
547+ rwlock_init (& bc -> trees [i ].u .spinlock );
548+ else
549+ init_rwsem (& bc -> trees [i ].u .lock );
513550 bc -> trees [i ].root = RB_ROOT ;
514551 }
515552
@@ -648,7 +685,7 @@ static struct dm_buffer *__cache_evict(struct dm_buffer_cache *bc, int list_mode
648685 struct lru_entry * le ;
649686 struct dm_buffer * b ;
650687
651- le = lru_evict (& bc -> lru [list_mode ], __evict_pred , & w );
688+ le = lru_evict (& bc -> lru [list_mode ], __evict_pred , & w , bc -> no_sleep );
652689 if (!le )
653690 return NULL ;
654691
@@ -702,7 +739,7 @@ static void __cache_mark_many(struct dm_buffer_cache *bc, int old_mode, int new_
702739 struct evict_wrapper w = {.lh = lh , .pred = pred , .context = context };
703740
704741 while (true) {
705- le = lru_evict (& bc -> lru [old_mode ], __evict_pred , & w );
742+ le = lru_evict (& bc -> lru [old_mode ], __evict_pred , & w , bc -> no_sleep );
706743 if (!le )
707744 break ;
708745
@@ -915,10 +952,11 @@ static void cache_remove_range(struct dm_buffer_cache *bc,
915952{
916953 unsigned int i ;
917954
955+ BUG_ON (bc -> no_sleep );
918956 for (i = 0 ; i < bc -> num_locks ; i ++ ) {
919- down_write (& bc -> trees [i ].lock );
957+ down_write (& bc -> trees [i ].u . lock );
920958 __remove_range (bc , & bc -> trees [i ].root , begin , end , pred , release );
921- up_write (& bc -> trees [i ].lock );
959+ up_write (& bc -> trees [i ].u . lock );
922960 }
923961}
924962
@@ -979,8 +1017,6 @@ struct dm_bufio_client {
9791017 struct dm_buffer_cache cache ; /* must be last member */
9801018};
9811019
982- static DEFINE_STATIC_KEY_FALSE (no_sleep_enabled );
983-
9841020/*----------------------------------------------------------------*/
9851021
9861022#define dm_bufio_in_request () (!!current->bio_list)
@@ -1871,7 +1907,8 @@ static void *new_read(struct dm_bufio_client *c, sector_t block,
18711907 if (need_submit )
18721908 submit_io (b , REQ_OP_READ , read_endio );
18731909
1874- wait_on_bit_io (& b -> state , B_READING , TASK_UNINTERRUPTIBLE );
1910+ if (nf != NF_GET ) /* we already tested this condition above */
1911+ wait_on_bit_io (& b -> state , B_READING , TASK_UNINTERRUPTIBLE );
18751912
18761913 if (b -> read_error ) {
18771914 int error = blk_status_to_errno (b -> read_error );
@@ -2421,7 +2458,7 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign
24212458 r = - ENOMEM ;
24222459 goto bad_client ;
24232460 }
2424- cache_init (& c -> cache , num_locks );
2461+ cache_init (& c -> cache , num_locks , ( flags & DM_BUFIO_CLIENT_NO_SLEEP ) != 0 );
24252462
24262463 c -> bdev = bdev ;
24272464 c -> block_size = block_size ;
0 commit comments