@@ -578,7 +578,7 @@ static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio)
578578
579579 io = container_of (tio , struct dm_io , tio );
580580 io -> magic = DM_IO_MAGIC ;
581- io -> status = 0 ;
581+ io -> status = BLK_STS_OK ;
582582 atomic_set (& io -> io_count , 1 );
583583 this_cpu_inc (* md -> pending_io );
584584 io -> orig_bio = NULL ;
@@ -933,20 +933,31 @@ static inline bool dm_tio_is_normal(struct dm_target_io *tio)
933933 * Decrements the number of outstanding ios that a bio has been
934934 * cloned into, completing the original io if necc.
935935 */
936- void dm_io_dec_pending (struct dm_io * io , blk_status_t error )
936+ static inline void __dm_io_dec_pending (struct dm_io * io )
937+ {
938+ if (atomic_dec_and_test (& io -> io_count ))
939+ dm_io_complete (io );
940+ }
941+
942+ static void dm_io_set_error (struct dm_io * io , blk_status_t error )
937943{
944+ unsigned long flags ;
945+
938946 /* Push-back supersedes any I/O errors */
939- if (unlikely (error )) {
940- unsigned long flags ;
941- spin_lock_irqsave (& io -> lock , flags );
942- if (!(io -> status == BLK_STS_DM_REQUEUE &&
943- __noflush_suspending (io -> md )))
944- io -> status = error ;
945- spin_unlock_irqrestore (& io -> lock , flags );
947+ spin_lock_irqsave (& io -> lock , flags );
948+ if (!(io -> status == BLK_STS_DM_REQUEUE &&
949+ __noflush_suspending (io -> md ))) {
950+ io -> status = error ;
946951 }
952+ spin_unlock_irqrestore (& io -> lock , flags );
953+ }
947954
948- if (atomic_dec_and_test (& io -> io_count ))
949- dm_io_complete (io );
955+ void dm_io_dec_pending (struct dm_io * io , blk_status_t error )
956+ {
957+ if (unlikely (error ))
958+ dm_io_set_error (io , error );
959+
960+ __dm_io_dec_pending (io );
950961}
951962
952963void disable_discard (struct mapped_device * md )
@@ -1428,7 +1439,7 @@ static bool is_abnormal_io(struct bio *bio)
14281439}
14291440
14301441static bool __process_abnormal_io (struct clone_info * ci , struct dm_target * ti ,
1431- int * result )
1442+ blk_status_t * status )
14321443{
14331444 unsigned num_bios = 0 ;
14341445
@@ -1452,11 +1463,11 @@ static bool __process_abnormal_io(struct clone_info *ci, struct dm_target *ti,
14521463 * reconfiguration might also have changed that since the
14531464 * check was performed.
14541465 */
1455- if (!num_bios )
1456- * result = - EOPNOTSUPP ;
1466+ if (unlikely ( !num_bios ) )
1467+ * status = BLK_STS_NOTSUPP ;
14571468 else {
14581469 __send_changing_extent_only (ci , ti , num_bios );
1459- * result = 0 ;
1470+ * status = BLK_STS_OK ;
14601471 }
14611472 return true;
14621473}
@@ -1505,19 +1516,16 @@ static void dm_queue_poll_io(struct bio *bio, struct dm_io *io)
15051516/*
15061517 * Select the correct strategy for processing a non-flush bio.
15071518 */
1508- static int __split_and_process_bio (struct clone_info * ci )
1519+ static blk_status_t __split_and_process_bio (struct clone_info * ci )
15091520{
15101521 struct bio * clone ;
15111522 struct dm_target * ti ;
15121523 unsigned len ;
1513- int r ;
1524+ blk_status_t error = BLK_STS_IOERR ;
15141525
15151526 ti = dm_table_find_target (ci -> map , ci -> sector );
1516- if (!ti )
1517- return - EIO ;
1518-
1519- if (__process_abnormal_io (ci , ti , & r ))
1520- return r ;
1527+ if (unlikely (!ti || __process_abnormal_io (ci , ti , & error )))
1528+ return error ;
15211529
15221530 /*
15231531 * Only support bio polling for normal IO, and the target io is
@@ -1532,7 +1540,7 @@ static int __split_and_process_bio(struct clone_info *ci)
15321540 ci -> sector += len ;
15331541 ci -> sector_count -= len ;
15341542
1535- return 0 ;
1543+ return BLK_STS_OK ;
15361544}
15371545
15381546static void init_clone_info (struct clone_info * ci , struct mapped_device * md ,
@@ -1558,7 +1566,7 @@ static void dm_split_and_process_bio(struct mapped_device *md,
15581566{
15591567 struct clone_info ci ;
15601568 struct bio * orig_bio = NULL ;
1561- int error = 0 ;
1569+ blk_status_t error = BLK_STS_OK ;
15621570
15631571 init_clone_info (& ci , md , map , bio );
15641572
@@ -1600,7 +1608,7 @@ static void dm_split_and_process_bio(struct mapped_device *md,
16001608 * bio->bi_private, so that dm_poll_bio can poll them all.
16011609 */
16021610 if (error || !ci .submit_as_polled )
1603- dm_io_dec_pending (ci .io , errno_to_blk_status ( error ) );
1611+ dm_io_dec_pending (ci .io , error );
16041612 else
16051613 dm_queue_poll_io (bio , ci .io );
16061614}
@@ -1681,10 +1689,10 @@ static int dm_poll_bio(struct bio *bio, struct io_comp_batch *iob,
16811689 if (dm_poll_dm_io (io , iob , flags )) {
16821690 hlist_del_init (& io -> node );
16831691 /*
1684- * clone_endio() has already occurred, so passing
1685- * error as 0 here doesn't override io->status
1692+ * clone_endio() has already occurred, so no
1693+ * error handling is needed here.
16861694 */
1687- dm_io_dec_pending (io , 0 );
1695+ __dm_io_dec_pending (io );
16881696 }
16891697 }
16901698
0 commit comments