Skip to content

Commit 8581b19

Browse files
author
Mikulas Patocka
committed
dm-snapshot: fix 'scheduling while atomic' on real-time kernels
There is reported 'scheduling while atomic' bug when using dm-snapshot on real-time kernels. The reason for the bug is that the hlist_bl code does preempt_disable() when taking the lock and the kernel attempts to take other spinlocks while holding the hlist_bl lock. Fix this by converting a hlist_bl spinlock into a regular spinlock. Signed-off-by: Mikulas Patocka <mpatocka@redhat.com> Reported-by: Jiping Ma <jiping.ma2@windriver.com>
1 parent f4412c7 commit 8581b19

2 files changed

Lines changed: 35 additions & 40 deletions

File tree

drivers/md/dm-exception-store.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@ typedef sector_t chunk_t;
2929
* chunk within the device.
3030
*/
3131
struct dm_exception {
32-
struct hlist_bl_node hash_list;
32+
struct hlist_node hash_list;
3333

3434
chunk_t old_chunk;
3535
chunk_t new_chunk;

drivers/md/dm-snap.c

Lines changed: 34 additions & 39 deletions
Original file line numberDiff line numberDiff line change
@@ -40,10 +40,15 @@ static const char dm_snapshot_merge_target_name[] = "snapshot-merge";
4040
#define DM_TRACKED_CHUNK_HASH(x) ((unsigned long)(x) & \
4141
(DM_TRACKED_CHUNK_HASH_SIZE - 1))
4242

43+
struct dm_hlist_head {
44+
struct hlist_head head;
45+
spinlock_t lock;
46+
};
47+
4348
struct dm_exception_table {
4449
uint32_t hash_mask;
4550
unsigned int hash_shift;
46-
struct hlist_bl_head *table;
51+
struct dm_hlist_head *table;
4752
};
4853

4954
struct dm_snapshot {
@@ -628,8 +633,8 @@ static uint32_t exception_hash(struct dm_exception_table *et, chunk_t chunk);
628633

629634
/* Lock to protect access to the completed and pending exception hash tables. */
630635
struct dm_exception_table_lock {
631-
struct hlist_bl_head *complete_slot;
632-
struct hlist_bl_head *pending_slot;
636+
spinlock_t *complete_slot;
637+
spinlock_t *pending_slot;
633638
};
634639

635640
static void dm_exception_table_lock_init(struct dm_snapshot *s, chunk_t chunk,
@@ -638,20 +643,20 @@ static void dm_exception_table_lock_init(struct dm_snapshot *s, chunk_t chunk,
638643
struct dm_exception_table *complete = &s->complete;
639644
struct dm_exception_table *pending = &s->pending;
640645

641-
lock->complete_slot = &complete->table[exception_hash(complete, chunk)];
642-
lock->pending_slot = &pending->table[exception_hash(pending, chunk)];
646+
lock->complete_slot = &complete->table[exception_hash(complete, chunk)].lock;
647+
lock->pending_slot = &pending->table[exception_hash(pending, chunk)].lock;
643648
}
644649

645650
static void dm_exception_table_lock(struct dm_exception_table_lock *lock)
646651
{
647-
hlist_bl_lock(lock->complete_slot);
648-
hlist_bl_lock(lock->pending_slot);
652+
spin_lock_nested(lock->complete_slot, 1);
653+
spin_lock_nested(lock->pending_slot, 2);
649654
}
650655

651656
static void dm_exception_table_unlock(struct dm_exception_table_lock *lock)
652657
{
653-
hlist_bl_unlock(lock->pending_slot);
654-
hlist_bl_unlock(lock->complete_slot);
658+
spin_unlock(lock->pending_slot);
659+
spin_unlock(lock->complete_slot);
655660
}
656661

657662
static int dm_exception_table_init(struct dm_exception_table *et,
@@ -661,30 +666,33 @@ static int dm_exception_table_init(struct dm_exception_table *et,
661666

662667
et->hash_shift = hash_shift;
663668
et->hash_mask = size - 1;
664-
et->table = kvmalloc_array(size, sizeof(struct hlist_bl_head),
669+
et->table = kvmalloc_array(size, sizeof(struct dm_hlist_head),
665670
GFP_KERNEL);
666671
if (!et->table)
667672
return -ENOMEM;
668673

669-
for (i = 0; i < size; i++)
670-
INIT_HLIST_BL_HEAD(et->table + i);
674+
for (i = 0; i < size; i++) {
675+
INIT_HLIST_HEAD(&et->table[i].head);
676+
spin_lock_init(&et->table[i].lock);
677+
}
671678

672679
return 0;
673680
}
674681

675682
static void dm_exception_table_exit(struct dm_exception_table *et,
676683
struct kmem_cache *mem)
677684
{
678-
struct hlist_bl_head *slot;
685+
struct dm_hlist_head *slot;
679686
struct dm_exception *ex;
680-
struct hlist_bl_node *pos, *n;
687+
struct hlist_node *pos;
681688
int i, size;
682689

683690
size = et->hash_mask + 1;
684691
for (i = 0; i < size; i++) {
685692
slot = et->table + i;
686693

687-
hlist_bl_for_each_entry_safe(ex, pos, n, slot, hash_list) {
694+
hlist_for_each_entry_safe(ex, pos, &slot->head, hash_list) {
695+
hlist_del(&ex->hash_list);
688696
kmem_cache_free(mem, ex);
689697
cond_resched();
690698
}
@@ -700,7 +708,7 @@ static uint32_t exception_hash(struct dm_exception_table *et, chunk_t chunk)
700708

701709
static void dm_remove_exception(struct dm_exception *e)
702710
{
703-
hlist_bl_del(&e->hash_list);
711+
hlist_del(&e->hash_list);
704712
}
705713

706714
/*
@@ -710,12 +718,11 @@ static void dm_remove_exception(struct dm_exception *e)
710718
static struct dm_exception *dm_lookup_exception(struct dm_exception_table *et,
711719
chunk_t chunk)
712720
{
713-
struct hlist_bl_head *slot;
714-
struct hlist_bl_node *pos;
721+
struct hlist_head *slot;
715722
struct dm_exception *e;
716723

717-
slot = &et->table[exception_hash(et, chunk)];
718-
hlist_bl_for_each_entry(e, pos, slot, hash_list)
724+
slot = &et->table[exception_hash(et, chunk)].head;
725+
hlist_for_each_entry(e, slot, hash_list)
719726
if (chunk >= e->old_chunk &&
720727
chunk <= e->old_chunk + dm_consecutive_chunk_count(e))
721728
return e;
@@ -762,18 +769,17 @@ static void free_pending_exception(struct dm_snap_pending_exception *pe)
762769
static void dm_insert_exception(struct dm_exception_table *eh,
763770
struct dm_exception *new_e)
764771
{
765-
struct hlist_bl_head *l;
766-
struct hlist_bl_node *pos;
772+
struct hlist_head *l;
767773
struct dm_exception *e = NULL;
768774

769-
l = &eh->table[exception_hash(eh, new_e->old_chunk)];
775+
l = &eh->table[exception_hash(eh, new_e->old_chunk)].head;
770776

771777
/* Add immediately if this table doesn't support consecutive chunks */
772778
if (!eh->hash_shift)
773779
goto out;
774780

775781
/* List is ordered by old_chunk */
776-
hlist_bl_for_each_entry(e, pos, l, hash_list) {
782+
hlist_for_each_entry(e, l, hash_list) {
777783
/* Insert after an existing chunk? */
778784
if (new_e->old_chunk == (e->old_chunk +
779785
dm_consecutive_chunk_count(e) + 1) &&
@@ -804,13 +810,13 @@ static void dm_insert_exception(struct dm_exception_table *eh,
804810
* Either the table doesn't support consecutive chunks or slot
805811
* l is empty.
806812
*/
807-
hlist_bl_add_head(&new_e->hash_list, l);
813+
hlist_add_head(&new_e->hash_list, l);
808814
} else if (new_e->old_chunk < e->old_chunk) {
809815
/* Add before an existing exception */
810-
hlist_bl_add_before(&new_e->hash_list, &e->hash_list);
816+
hlist_add_before(&new_e->hash_list, &e->hash_list);
811817
} else {
812818
/* Add to l's tail: e is the last exception in this slot */
813-
hlist_bl_add_behind(&new_e->hash_list, &e->hash_list);
819+
hlist_add_behind(&new_e->hash_list, &e->hash_list);
814820
}
815821
}
816822

@@ -820,7 +826,6 @@ static void dm_insert_exception(struct dm_exception_table *eh,
820826
*/
821827
static int dm_add_exception(void *context, chunk_t old, chunk_t new)
822828
{
823-
struct dm_exception_table_lock lock;
824829
struct dm_snapshot *s = context;
825830
struct dm_exception *e;
826831

@@ -833,17 +838,7 @@ static int dm_add_exception(void *context, chunk_t old, chunk_t new)
833838
/* Consecutive_count is implicitly initialised to zero */
834839
e->new_chunk = new;
835840

836-
/*
837-
* Although there is no need to lock access to the exception tables
838-
* here, if we don't then hlist_bl_add_head(), called by
839-
* dm_insert_exception(), will complain about accessing the
840-
* corresponding list without locking it first.
841-
*/
842-
dm_exception_table_lock_init(s, old, &lock);
843-
844-
dm_exception_table_lock(&lock);
845841
dm_insert_exception(&s->complete, e);
846-
dm_exception_table_unlock(&lock);
847842

848843
return 0;
849844
}
@@ -873,7 +868,7 @@ static int calc_max_buckets(void)
873868
/* use a fixed size of 2MB */
874869
unsigned long mem = 2 * 1024 * 1024;
875870

876-
mem /= sizeof(struct hlist_bl_head);
871+
mem /= sizeof(struct dm_hlist_head);
877872

878873
return mem;
879874
}

0 commit comments

Comments
 (0)