Skip to content

Commit e273634

Browse files
committed
dm: factor out dm_io_complete
Optimizes dm_io_dec_pending() slightly by avoiding local variables. Signed-off-by: Mike Snitzer <snitzer@redhat.com>
1 parent 69596f5 commit e273634

1 file changed

Lines changed: 77 additions & 72 deletions

File tree

drivers/md/dm.c

Lines changed: 77 additions & 72 deletions
Original file line numberDiff line numberDiff line change
@@ -841,89 +841,94 @@ static int __noflush_suspending(struct mapped_device *md)
841841
return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
842842
}
843843

844+
static void dm_io_complete(struct dm_io *io)
845+
{
846+
blk_status_t io_error;
847+
struct mapped_device *md = io->md;
848+
struct bio *bio = io->orig_bio;
849+
850+
if (io->status == BLK_STS_DM_REQUEUE) {
851+
unsigned long flags;
852+
/*
853+
* Target requested pushing back the I/O.
854+
*/
855+
spin_lock_irqsave(&md->deferred_lock, flags);
856+
if (__noflush_suspending(md) &&
857+
!WARN_ON_ONCE(dm_is_zone_write(md, bio))) {
858+
/* NOTE early return due to BLK_STS_DM_REQUEUE below */
859+
bio_list_add_head(&md->deferred, bio);
860+
} else {
861+
/*
862+
* noflush suspend was interrupted or this is
863+
* a write to a zoned target.
864+
*/
865+
io->status = BLK_STS_IOERR;
866+
}
867+
spin_unlock_irqrestore(&md->deferred_lock, flags);
868+
}
869+
870+
io_error = io->status;
871+
if (io->was_accounted)
872+
dm_end_io_acct(io, bio);
873+
else if (!io_error) {
874+
/*
875+
* Must handle target that DM_MAPIO_SUBMITTED only to
876+
* then bio_endio() rather than dm_submit_bio_remap()
877+
*/
878+
__dm_start_io_acct(io, bio);
879+
dm_end_io_acct(io, bio);
880+
}
881+
free_io(io);
882+
smp_wmb();
883+
this_cpu_dec(*md->pending_io);
884+
885+
/* nudge anyone waiting on suspend queue */
886+
if (unlikely(wq_has_sleeper(&md->wait)))
887+
wake_up(&md->wait);
888+
889+
if (io_error == BLK_STS_DM_REQUEUE) {
890+
/*
891+
* Upper layer won't help us poll split bio, io->orig_bio
892+
* may only reflect a subset of the pre-split original,
893+
* so clear REQ_POLLED in case of requeue
894+
*/
895+
bio->bi_opf &= ~REQ_POLLED;
896+
return;
897+
}
898+
899+
if (bio_is_flush_with_data(bio)) {
900+
/*
901+
* Preflush done for flush with data, reissue
902+
* without REQ_PREFLUSH.
903+
*/
904+
bio->bi_opf &= ~REQ_PREFLUSH;
905+
queue_io(md, bio);
906+
} else {
907+
/* done with normal IO or empty flush */
908+
if (io_error)
909+
bio->bi_status = io_error;
910+
bio_endio(bio);
911+
}
912+
}
913+
844914
/*
845915
* Decrements the number of outstanding ios that a bio has been
846916
* cloned into, completing the original io if necc.
847917
*/
848918
void dm_io_dec_pending(struct dm_io *io, blk_status_t error)
849919
{
850-
unsigned long flags;
851-
blk_status_t io_error;
852-
struct bio *bio;
853-
struct mapped_device *md = io->md;
854-
855920
/* Push-back supersedes any I/O errors */
856921
if (unlikely(error)) {
922+
unsigned long flags;
857923
spin_lock_irqsave(&io->endio_lock, flags);
858-
if (!(io->status == BLK_STS_DM_REQUEUE && __noflush_suspending(md)))
924+
if (!(io->status == BLK_STS_DM_REQUEUE &&
925+
__noflush_suspending(io->md)))
859926
io->status = error;
860927
spin_unlock_irqrestore(&io->endio_lock, flags);
861928
}
862929

863-
if (atomic_dec_and_test(&io->io_count)) {
864-
bio = io->orig_bio;
865-
if (io->status == BLK_STS_DM_REQUEUE) {
866-
/*
867-
* Target requested pushing back the I/O.
868-
*/
869-
spin_lock_irqsave(&md->deferred_lock, flags);
870-
if (__noflush_suspending(md) &&
871-
!WARN_ON_ONCE(dm_is_zone_write(md, bio))) {
872-
/* NOTE early return due to BLK_STS_DM_REQUEUE below */
873-
bio_list_add_head(&md->deferred, bio);
874-
} else {
875-
/*
876-
* noflush suspend was interrupted or this is
877-
* a write to a zoned target.
878-
*/
879-
io->status = BLK_STS_IOERR;
880-
}
881-
spin_unlock_irqrestore(&md->deferred_lock, flags);
882-
}
883-
884-
io_error = io->status;
885-
if (io->was_accounted)
886-
dm_end_io_acct(io, bio);
887-
else if (!io_error) {
888-
/*
889-
* Must handle target that DM_MAPIO_SUBMITTED only to
890-
* then bio_endio() rather than dm_submit_bio_remap()
891-
*/
892-
__dm_start_io_acct(io, bio);
893-
dm_end_io_acct(io, bio);
894-
}
895-
free_io(io);
896-
smp_wmb();
897-
this_cpu_dec(*md->pending_io);
898-
899-
/* nudge anyone waiting on suspend queue */
900-
if (unlikely(wq_has_sleeper(&md->wait)))
901-
wake_up(&md->wait);
902-
903-
if (io_error == BLK_STS_DM_REQUEUE) {
904-
/*
905-
* Upper layer won't help us poll split bio, io->orig_bio
906-
* may only reflect a subset of the pre-split original,
907-
* so clear REQ_POLLED in case of requeue
908-
*/
909-
bio->bi_opf &= ~REQ_POLLED;
910-
return;
911-
}
912-
913-
if (bio_is_flush_with_data(bio)) {
914-
/*
915-
* Preflush done for flush with data, reissue
916-
* without REQ_PREFLUSH.
917-
*/
918-
bio->bi_opf &= ~REQ_PREFLUSH;
919-
queue_io(md, bio);
920-
} else {
921-
/* done with normal IO or empty flush */
922-
if (io_error)
923-
bio->bi_status = io_error;
924-
bio_endio(bio);
925-
}
926-
}
930+
if (atomic_dec_and_test(&io->io_count))
931+
dm_io_complete(io);
927932
}
928933

929934
void disable_discard(struct mapped_device *md)
@@ -1562,7 +1567,7 @@ static void dm_split_and_process_bio(struct mapped_device *md,
15621567

15631568
if (bio->bi_opf & REQ_PREFLUSH) {
15641569
error = __send_empty_flush(&ci);
1565-
/* dm_io_dec_pending submits any data associated with flush */
1570+
/* dm_io_complete submits any data associated with flush */
15661571
goto out;
15671572
}
15681573

@@ -1575,7 +1580,7 @@ static void dm_split_and_process_bio(struct mapped_device *md,
15751580
* Remainder must be passed to submit_bio_noacct() so it gets handled
15761581
* *after* bios already submitted have been completely processed.
15771582
* We take a clone of the original to store in ci.io->orig_bio to be
1578-
* used by dm_end_io_acct() and for dm_io_dec_pending() to use for
1583+
* used by dm_end_io_acct() and for dm_io_complete() to use for
15791584
* completion handling.
15801585
*/
15811586
orig_bio = bio_split(bio, bio_sectors(bio) - ci.sector_count,

0 commit comments

Comments
 (0)