1212#include <linux/buffer_head.h>
1313#include <linux/dax.h>
1414#include <linux/writeback.h>
15- #include <linux/list_sort.h>
1615#include <linux/swap.h>
1716#include <linux/bio.h>
1817#include <linux/sched/signal.h>
1918#include <linux/migrate.h>
19+ #include "internal.h"
2020#include "trace.h"
2121
2222#include "../internal.h"
2323
24- #define IOEND_BATCH_SIZE 4096
25-
2624/*
2725 * Structure allocated for each folio to track per-block uptodate, dirty state
2826 * and I/O completions.
@@ -40,9 +38,6 @@ struct iomap_folio_state {
4038 unsigned long state [];
4139};
4240
43- struct bio_set iomap_ioend_bioset ;
44- EXPORT_SYMBOL_GPL (iomap_ioend_bioset );
45-
4641static inline bool ifs_is_fully_uptodate (struct folio * folio ,
4742 struct iomap_folio_state * ifs )
4843{
@@ -1539,8 +1534,7 @@ static void iomap_finish_folio_write(struct inode *inode, struct folio *folio,
15391534 * state, release holds on bios, and finally free up memory. Do not use the
15401535 * ioend after this.
15411536 */
1542- static u32
1543- iomap_finish_ioend_buffered (struct iomap_ioend * ioend )
1537+ u32 iomap_finish_ioend_buffered (struct iomap_ioend * ioend )
15441538{
15451539 struct inode * inode = ioend -> io_inode ;
15461540 struct bio * bio = & ioend -> io_bio ;
@@ -1567,123 +1561,6 @@ iomap_finish_ioend_buffered(struct iomap_ioend *ioend)
15671561 return folio_count ;
15681562}
15691563
1570- static u32
1571- iomap_finish_ioend (struct iomap_ioend * ioend , int error )
1572- {
1573- if (ioend -> io_parent ) {
1574- struct bio * bio = & ioend -> io_bio ;
1575-
1576- ioend = ioend -> io_parent ;
1577- bio_put (bio );
1578- }
1579-
1580- if (error )
1581- cmpxchg (& ioend -> io_error , 0 , error );
1582-
1583- if (!atomic_dec_and_test (& ioend -> io_remaining ))
1584- return 0 ;
1585- return iomap_finish_ioend_buffered (ioend );
1586- }
1587-
1588- /*
1589- * Ioend completion routine for merged bios. This can only be called from task
1590- * contexts as merged ioends can be of unbound length. Hence we have to break up
1591- * the writeback completions into manageable chunks to avoid long scheduler
1592- * holdoffs. We aim to keep scheduler holdoffs down below 10ms so that we get
1593- * good batch processing throughput without creating adverse scheduler latency
1594- * conditions.
1595- */
1596- void
1597- iomap_finish_ioends (struct iomap_ioend * ioend , int error )
1598- {
1599- struct list_head tmp ;
1600- u32 completions ;
1601-
1602- might_sleep ();
1603-
1604- list_replace_init (& ioend -> io_list , & tmp );
1605- completions = iomap_finish_ioend (ioend , error );
1606-
1607- while (!list_empty (& tmp )) {
1608- if (completions > IOEND_BATCH_SIZE * 8 ) {
1609- cond_resched ();
1610- completions = 0 ;
1611- }
1612- ioend = list_first_entry (& tmp , struct iomap_ioend , io_list );
1613- list_del_init (& ioend -> io_list );
1614- completions += iomap_finish_ioend (ioend , error );
1615- }
1616- }
1617- EXPORT_SYMBOL_GPL (iomap_finish_ioends );
1618-
1619- /*
1620- * We can merge two adjacent ioends if they have the same set of work to do.
1621- */
1622- static bool
1623- iomap_ioend_can_merge (struct iomap_ioend * ioend , struct iomap_ioend * next )
1624- {
1625- if (ioend -> io_bio .bi_status != next -> io_bio .bi_status )
1626- return false;
1627- if (next -> io_flags & IOMAP_IOEND_BOUNDARY )
1628- return false;
1629- if ((ioend -> io_flags & IOMAP_IOEND_NOMERGE_FLAGS ) !=
1630- (next -> io_flags & IOMAP_IOEND_NOMERGE_FLAGS ))
1631- return false;
1632- if (ioend -> io_offset + ioend -> io_size != next -> io_offset )
1633- return false;
1634- /*
1635- * Do not merge physically discontiguous ioends. The filesystem
1636- * completion functions will have to iterate the physical
1637- * discontiguities even if we merge the ioends at a logical level, so
1638- * we don't gain anything by merging physical discontiguities here.
1639- *
1640- * We cannot use bio->bi_iter.bi_sector here as it is modified during
1641- * submission so does not point to the start sector of the bio at
1642- * completion.
1643- */
1644- if (ioend -> io_sector + (ioend -> io_size >> 9 ) != next -> io_sector )
1645- return false;
1646- return true;
1647- }
1648-
1649- void
1650- iomap_ioend_try_merge (struct iomap_ioend * ioend , struct list_head * more_ioends )
1651- {
1652- struct iomap_ioend * next ;
1653-
1654- INIT_LIST_HEAD (& ioend -> io_list );
1655-
1656- while ((next = list_first_entry_or_null (more_ioends , struct iomap_ioend ,
1657- io_list ))) {
1658- if (!iomap_ioend_can_merge (ioend , next ))
1659- break ;
1660- list_move_tail (& next -> io_list , & ioend -> io_list );
1661- ioend -> io_size += next -> io_size ;
1662- }
1663- }
1664- EXPORT_SYMBOL_GPL (iomap_ioend_try_merge );
1665-
1666- static int
1667- iomap_ioend_compare (void * priv , const struct list_head * a ,
1668- const struct list_head * b )
1669- {
1670- struct iomap_ioend * ia = container_of (a , struct iomap_ioend , io_list );
1671- struct iomap_ioend * ib = container_of (b , struct iomap_ioend , io_list );
1672-
1673- if (ia -> io_offset < ib -> io_offset )
1674- return -1 ;
1675- if (ia -> io_offset > ib -> io_offset )
1676- return 1 ;
1677- return 0 ;
1678- }
1679-
1680- void
1681- iomap_sort_ioends (struct list_head * ioend_list )
1682- {
1683- list_sort (NULL , ioend_list , iomap_ioend_compare );
1684- }
1685- EXPORT_SYMBOL_GPL (iomap_sort_ioends );
1686-
16871564static void iomap_writepage_end_bio (struct bio * bio )
16881565{
16891566 struct iomap_ioend * ioend = iomap_ioend_from_bio (bio );
@@ -2081,11 +1958,3 @@ iomap_writepages(struct address_space *mapping, struct writeback_control *wbc,
20811958 return iomap_submit_ioend (wpc , error );
20821959}
20831960EXPORT_SYMBOL_GPL (iomap_writepages );
2084-
2085- static int __init iomap_buffered_init (void )
2086- {
2087- return bioset_init (& iomap_ioend_bioset , 4 * (PAGE_SIZE / SECTOR_SIZE ),
2088- offsetof(struct iomap_ioend , io_bio ),
2089- BIOSET_NEED_BVECS );
2090- }
2091- fs_initcall (iomap_buffered_init );
0 commit comments