@@ -40,10 +40,15 @@ static const char dm_snapshot_merge_target_name[] = "snapshot-merge";
4040#define DM_TRACKED_CHUNK_HASH (x ) ((unsigned long)(x) & \
4141 (DM_TRACKED_CHUNK_HASH_SIZE - 1))
4242
43+ struct dm_hlist_head {
44+ struct hlist_head head ;
45+ spinlock_t lock ;
46+ };
47+
4348struct dm_exception_table {
4449 uint32_t hash_mask ;
4550 unsigned int hash_shift ;
46- struct hlist_bl_head * table ;
51+ struct dm_hlist_head * table ;
4752};
4853
4954struct dm_snapshot {
@@ -628,8 +633,8 @@ static uint32_t exception_hash(struct dm_exception_table *et, chunk_t chunk);
628633
629634/* Lock to protect access to the completed and pending exception hash tables. */
630635struct dm_exception_table_lock {
631- struct hlist_bl_head * complete_slot ;
632- struct hlist_bl_head * pending_slot ;
636+ spinlock_t * complete_slot ;
637+ spinlock_t * pending_slot ;
633638};
634639
635640static void dm_exception_table_lock_init (struct dm_snapshot * s , chunk_t chunk ,
@@ -638,20 +643,20 @@ static void dm_exception_table_lock_init(struct dm_snapshot *s, chunk_t chunk,
638643 struct dm_exception_table * complete = & s -> complete ;
639644 struct dm_exception_table * pending = & s -> pending ;
640645
641- lock -> complete_slot = & complete -> table [exception_hash (complete , chunk )];
642- lock -> pending_slot = & pending -> table [exception_hash (pending , chunk )];
646+ lock -> complete_slot = & complete -> table [exception_hash (complete , chunk )]. lock ;
647+ lock -> pending_slot = & pending -> table [exception_hash (pending , chunk )]. lock ;
643648}
644649
645650static void dm_exception_table_lock (struct dm_exception_table_lock * lock )
646651{
647- hlist_bl_lock (lock -> complete_slot );
648- hlist_bl_lock (lock -> pending_slot );
652+ spin_lock_nested (lock -> complete_slot , 1 );
653+ spin_lock_nested (lock -> pending_slot , 2 );
649654}
650655
651656static void dm_exception_table_unlock (struct dm_exception_table_lock * lock )
652657{
653- hlist_bl_unlock (lock -> pending_slot );
654- hlist_bl_unlock (lock -> complete_slot );
658+ spin_unlock (lock -> pending_slot );
659+ spin_unlock (lock -> complete_slot );
655660}
656661
657662static int dm_exception_table_init (struct dm_exception_table * et ,
@@ -661,30 +666,33 @@ static int dm_exception_table_init(struct dm_exception_table *et,
661666
662667 et -> hash_shift = hash_shift ;
663668 et -> hash_mask = size - 1 ;
664- et -> table = kvmalloc_array (size , sizeof (struct hlist_bl_head ),
669+ et -> table = kvmalloc_array (size , sizeof (struct dm_hlist_head ),
665670 GFP_KERNEL );
666671 if (!et -> table )
667672 return - ENOMEM ;
668673
669- for (i = 0 ; i < size ; i ++ )
670- INIT_HLIST_BL_HEAD (et -> table + i );
674+ for (i = 0 ; i < size ; i ++ ) {
675+ INIT_HLIST_HEAD (& et -> table [i ].head );
676+ spin_lock_init (& et -> table [i ].lock );
677+ }
671678
672679 return 0 ;
673680}
674681
675682static void dm_exception_table_exit (struct dm_exception_table * et ,
676683 struct kmem_cache * mem )
677684{
678- struct hlist_bl_head * slot ;
685+ struct dm_hlist_head * slot ;
679686 struct dm_exception * ex ;
680- struct hlist_bl_node * pos , * n ;
687+ struct hlist_node * pos ;
681688 int i , size ;
682689
683690 size = et -> hash_mask + 1 ;
684691 for (i = 0 ; i < size ; i ++ ) {
685692 slot = et -> table + i ;
686693
687- hlist_bl_for_each_entry_safe (ex , pos , n , slot , hash_list ) {
694+ hlist_for_each_entry_safe (ex , pos , & slot -> head , hash_list ) {
695+ hlist_del (& ex -> hash_list );
688696 kmem_cache_free (mem , ex );
689697 cond_resched ();
690698 }
@@ -700,7 +708,7 @@ static uint32_t exception_hash(struct dm_exception_table *et, chunk_t chunk)
700708
701709static void dm_remove_exception (struct dm_exception * e )
702710{
703- hlist_bl_del (& e -> hash_list );
711+ hlist_del (& e -> hash_list );
704712}
705713
706714/*
@@ -710,12 +718,11 @@ static void dm_remove_exception(struct dm_exception *e)
710718static struct dm_exception * dm_lookup_exception (struct dm_exception_table * et ,
711719 chunk_t chunk )
712720{
713- struct hlist_bl_head * slot ;
714- struct hlist_bl_node * pos ;
721+ struct hlist_head * slot ;
715722 struct dm_exception * e ;
716723
717- slot = & et -> table [exception_hash (et , chunk )];
718- hlist_bl_for_each_entry ( e , pos , slot , hash_list )
724+ slot = & et -> table [exception_hash (et , chunk )]. head ;
725+ hlist_for_each_entry ( e , slot , hash_list )
719726 if (chunk >= e -> old_chunk &&
720727 chunk <= e -> old_chunk + dm_consecutive_chunk_count (e ))
721728 return e ;
@@ -762,18 +769,17 @@ static void free_pending_exception(struct dm_snap_pending_exception *pe)
762769static void dm_insert_exception (struct dm_exception_table * eh ,
763770 struct dm_exception * new_e )
764771{
765- struct hlist_bl_head * l ;
766- struct hlist_bl_node * pos ;
772+ struct hlist_head * l ;
767773 struct dm_exception * e = NULL ;
768774
769- l = & eh -> table [exception_hash (eh , new_e -> old_chunk )];
775+ l = & eh -> table [exception_hash (eh , new_e -> old_chunk )]. head ;
770776
771777 /* Add immediately if this table doesn't support consecutive chunks */
772778 if (!eh -> hash_shift )
773779 goto out ;
774780
775781 /* List is ordered by old_chunk */
776- hlist_bl_for_each_entry ( e , pos , l , hash_list ) {
782+ hlist_for_each_entry ( e , l , hash_list ) {
777783 /* Insert after an existing chunk? */
778784 if (new_e -> old_chunk == (e -> old_chunk +
779785 dm_consecutive_chunk_count (e ) + 1 ) &&
@@ -804,13 +810,13 @@ static void dm_insert_exception(struct dm_exception_table *eh,
804810 * Either the table doesn't support consecutive chunks or slot
805811 * l is empty.
806812 */
807- hlist_bl_add_head (& new_e -> hash_list , l );
813+ hlist_add_head (& new_e -> hash_list , l );
808814 } else if (new_e -> old_chunk < e -> old_chunk ) {
809815 /* Add before an existing exception */
810- hlist_bl_add_before (& new_e -> hash_list , & e -> hash_list );
816+ hlist_add_before (& new_e -> hash_list , & e -> hash_list );
811817 } else {
812818 /* Add to l's tail: e is the last exception in this slot */
813- hlist_bl_add_behind (& new_e -> hash_list , & e -> hash_list );
819+ hlist_add_behind (& new_e -> hash_list , & e -> hash_list );
814820 }
815821}
816822
@@ -820,7 +826,6 @@ static void dm_insert_exception(struct dm_exception_table *eh,
820826 */
821827static int dm_add_exception (void * context , chunk_t old , chunk_t new )
822828{
823- struct dm_exception_table_lock lock ;
824829 struct dm_snapshot * s = context ;
825830 struct dm_exception * e ;
826831
@@ -833,17 +838,7 @@ static int dm_add_exception(void *context, chunk_t old, chunk_t new)
833838 /* Consecutive_count is implicitly initialised to zero */
834839 e -> new_chunk = new ;
835840
836- /*
837- * Although there is no need to lock access to the exception tables
838- * here, if we don't then hlist_bl_add_head(), called by
839- * dm_insert_exception(), will complain about accessing the
840- * corresponding list without locking it first.
841- */
842- dm_exception_table_lock_init (s , old , & lock );
843-
844- dm_exception_table_lock (& lock );
845841 dm_insert_exception (& s -> complete , e );
846- dm_exception_table_unlock (& lock );
847842
848843 return 0 ;
849844}
@@ -873,7 +868,7 @@ static int calc_max_buckets(void)
873868 /* use a fixed size of 2MB */
874869 unsigned long mem = 2 * 1024 * 1024 ;
875870
876- mem /= sizeof (struct hlist_bl_head );
871+ mem /= sizeof (struct dm_hlist_head );
877872
878873 return mem ;
879874}
0 commit comments