@@ -1479,40 +1479,23 @@ static u32
14791479iomap_finish_ioend (struct iomap_ioend * ioend , int error )
14801480{
14811481 struct inode * inode = ioend -> io_inode ;
1482- struct bio * bio = & ioend -> io_inline_bio ;
1483- struct bio * last = ioend -> io_bio , * next ;
1484- u64 start = bio -> bi_iter .bi_sector ;
1485- loff_t offset = ioend -> io_offset ;
1486- bool quiet = bio_flagged (bio , BIO_QUIET );
1482+ struct bio * bio = & ioend -> io_bio ;
1483+ struct folio_iter fi ;
14871484 u32 folio_count = 0 ;
14881485
1489- for (bio = & ioend -> io_inline_bio ; bio ; bio = next ) {
1490- struct folio_iter fi ;
1491-
1492- /*
1493- * For the last bio, bi_private points to the ioend, so we
1494- * need to explicitly end the iteration here.
1495- */
1496- if (bio == last )
1497- next = NULL ;
1498- else
1499- next = bio -> bi_private ;
1500-
1501- /* walk all folios in bio, ending page IO on them */
1502- bio_for_each_folio_all (fi , bio ) {
1503- iomap_finish_folio_write (inode , fi .folio , fi .length ,
1504- error );
1505- folio_count ++ ;
1506- }
1507- bio_put (bio );
1486+ /* walk all folios in bio, ending page IO on them */
1487+ bio_for_each_folio_all (fi , bio ) {
1488+ iomap_finish_folio_write (inode , fi .folio , fi .length , error );
1489+ folio_count ++ ;
15081490 }
1509- /* The ioend has been freed by bio_put() */
15101491
1511- if (unlikely (error && !quiet )) {
1492+ if (unlikely (error && !bio_flagged ( bio , BIO_QUIET ) )) {
15121493 printk_ratelimited (KERN_ERR
15131494"%s: writeback error on inode %lu, offset %lld, sector %llu" ,
1514- inode -> i_sb -> s_id , inode -> i_ino , offset , start );
1495+ inode -> i_sb -> s_id , inode -> i_ino ,
1496+ ioend -> io_offset , ioend -> io_sector );
15151497 }
1498+ bio_put (bio ); /* frees the ioend */
15161499 return folio_count ;
15171500}
15181501
@@ -1553,7 +1536,7 @@ EXPORT_SYMBOL_GPL(iomap_finish_ioends);
15531536static bool
15541537iomap_ioend_can_merge (struct iomap_ioend * ioend , struct iomap_ioend * next )
15551538{
1556- if (ioend -> io_bio -> bi_status != next -> io_bio -> bi_status )
1539+ if (ioend -> io_bio . bi_status != next -> io_bio . bi_status )
15571540 return false;
15581541 if ((ioend -> io_flags & IOMAP_F_SHARED ) ^
15591542 (next -> io_flags & IOMAP_F_SHARED ))
@@ -1618,9 +1601,8 @@ EXPORT_SYMBOL_GPL(iomap_sort_ioends);
16181601
16191602static void iomap_writepage_end_bio (struct bio * bio )
16201603{
1621- struct iomap_ioend * ioend = bio -> bi_private ;
1622-
1623- iomap_finish_ioend (ioend , blk_status_to_errno (bio -> bi_status ));
1604+ iomap_finish_ioend (iomap_ioend_from_bio (bio ),
1605+ blk_status_to_errno (bio -> bi_status ));
16241606}
16251607
16261608/*
@@ -1635,9 +1617,6 @@ static int
16351617iomap_submit_ioend (struct iomap_writepage_ctx * wpc , struct iomap_ioend * ioend ,
16361618 int error )
16371619{
1638- ioend -> io_bio -> bi_private = ioend ;
1639- ioend -> io_bio -> bi_end_io = iomap_writepage_end_bio ;
1640-
16411620 if (wpc -> ops -> prepare_ioend )
16421621 error = wpc -> ops -> prepare_ioend (ioend , error );
16431622 if (error ) {
@@ -1647,12 +1626,12 @@ iomap_submit_ioend(struct iomap_writepage_ctx *wpc, struct iomap_ioend *ioend,
16471626 * as there is only one reference to the ioend at this point in
16481627 * time.
16491628 */
1650- ioend -> io_bio -> bi_status = errno_to_blk_status (error );
1651- bio_endio (ioend -> io_bio );
1629+ ioend -> io_bio . bi_status = errno_to_blk_status (error );
1630+ bio_endio (& ioend -> io_bio );
16521631 return error ;
16531632 }
16541633
1655- submit_bio (ioend -> io_bio );
1634+ submit_bio (& ioend -> io_bio );
16561635 return 0 ;
16571636}
16581637
@@ -1666,44 +1645,22 @@ static struct iomap_ioend *iomap_alloc_ioend(struct iomap_writepage_ctx *wpc,
16661645 REQ_OP_WRITE | wbc_to_write_flags (wbc ),
16671646 GFP_NOFS , & iomap_ioend_bioset );
16681647 bio -> bi_iter .bi_sector = iomap_sector (& wpc -> iomap , pos );
1648+ bio -> bi_end_io = iomap_writepage_end_bio ;
16691649 wbc_init_bio (wbc , bio );
16701650
1671- ioend = container_of (bio , struct iomap_ioend , io_inline_bio );
1651+ ioend = iomap_ioend_from_bio (bio );
16721652 INIT_LIST_HEAD (& ioend -> io_list );
16731653 ioend -> io_type = wpc -> iomap .type ;
16741654 ioend -> io_flags = wpc -> iomap .flags ;
16751655 ioend -> io_inode = inode ;
16761656 ioend -> io_size = 0 ;
16771657 ioend -> io_offset = pos ;
1678- ioend -> io_bio = bio ;
16791658 ioend -> io_sector = bio -> bi_iter .bi_sector ;
16801659
16811660 wpc -> nr_folios = 0 ;
16821661 return ioend ;
16831662}
16841663
1685- /*
1686- * Allocate a new bio, and chain the old bio to the new one.
1687- *
1688- * Note that we have to perform the chaining in this unintuitive order
1689- * so that the bi_private linkage is set up in the right direction for the
1690- * traversal in iomap_finish_ioend().
1691- */
1692- static struct bio *
1693- iomap_chain_bio (struct bio * prev )
1694- {
1695- struct bio * new ;
1696-
1697- new = bio_alloc (prev -> bi_bdev , BIO_MAX_VECS , prev -> bi_opf , GFP_NOFS );
1698- bio_clone_blkg_association (new , prev );
1699- new -> bi_iter .bi_sector = bio_end_sector (prev );
1700-
1701- bio_chain (prev , new );
1702- bio_get (prev ); /* for iomap_finish_ioend */
1703- submit_bio (prev );
1704- return new ;
1705- }
1706-
17071664static bool iomap_can_add_to_ioend (struct iomap_writepage_ctx * wpc , loff_t pos )
17081665{
17091666 if ((wpc -> iomap .flags & IOMAP_F_SHARED ) !=
@@ -1714,7 +1671,7 @@ static bool iomap_can_add_to_ioend(struct iomap_writepage_ctx *wpc, loff_t pos)
17141671 if (pos != wpc -> ioend -> io_offset + wpc -> ioend -> io_size )
17151672 return false;
17161673 if (iomap_sector (& wpc -> iomap , pos ) !=
1717- bio_end_sector (wpc -> ioend -> io_bio ))
1674+ bio_end_sector (& wpc -> ioend -> io_bio ))
17181675 return false;
17191676 /*
17201677 * Limit ioend bio chain lengths to minimise IO completion latency. This
@@ -1739,15 +1696,14 @@ static void iomap_add_to_ioend(struct iomap_writepage_ctx *wpc,
17391696 size_t poff = offset_in_folio (folio , pos );
17401697
17411698 if (!wpc -> ioend || !iomap_can_add_to_ioend (wpc , pos )) {
1699+ new_ioend :
17421700 if (wpc -> ioend )
17431701 list_add (& wpc -> ioend -> io_list , iolist );
17441702 wpc -> ioend = iomap_alloc_ioend (wpc , wbc , inode , pos );
17451703 }
17461704
1747- if (!bio_add_folio (wpc -> ioend -> io_bio , folio , len , poff )) {
1748- wpc -> ioend -> io_bio = iomap_chain_bio (wpc -> ioend -> io_bio );
1749- bio_add_folio_nofail (wpc -> ioend -> io_bio , folio , len , poff );
1750- }
1705+ if (!bio_add_folio (& wpc -> ioend -> io_bio , folio , len , poff ))
1706+ goto new_ioend ;
17511707
17521708 if (ifs )
17531709 atomic_add (len , & ifs -> write_bytes_pending );
@@ -1978,7 +1934,7 @@ EXPORT_SYMBOL_GPL(iomap_writepages);
19781934static int __init iomap_init (void )
19791935{
19801936 return bioset_init (& iomap_ioend_bioset , 4 * (PAGE_SIZE / SECTOR_SIZE ),
1981- offsetof(struct iomap_ioend , io_inline_bio ),
1937+ offsetof(struct iomap_ioend , io_bio ),
19821938 BIOSET_NEED_BVECS );
19831939}
19841940fs_initcall (iomap_init );
0 commit comments