@@ -29,11 +29,6 @@ struct kmem_cache *xfs_buf_cache;
2929/*
3030 * Locking orders
3131 *
32- * xfs_buf_ioacct_inc:
33- * xfs_buf_ioacct_dec:
34- * b_sema (caller holds)
35- * b_lock
36- *
3732 * xfs_buf_stale:
3833 * b_sema (caller holds)
3934 * b_lock
@@ -81,51 +76,6 @@ xfs_buf_vmap_len(
8176 return (bp -> b_page_count * PAGE_SIZE );
8277}
8378
84- /*
85- * Bump the I/O in flight count on the buftarg if we haven't yet done so for
86- * this buffer. The count is incremented once per buffer (per hold cycle)
87- * because the corresponding decrement is deferred to buffer release. Buffers
88- * can undergo I/O multiple times in a hold-release cycle and per buffer I/O
89- * tracking adds unnecessary overhead. This is used for sychronization purposes
90- * with unmount (see xfs_buftarg_drain()), so all we really need is a count of
91- * in-flight buffers.
92- *
93- * Buffers that are never released (e.g., superblock, iclog buffers) must set
94- * the XBF_NO_IOACCT flag before I/O submission. Otherwise, the buftarg count
95- * never reaches zero and unmount hangs indefinitely.
96- */
97- static inline void
98- xfs_buf_ioacct_inc (
99- struct xfs_buf * bp )
100- {
101- if (bp -> b_flags & XBF_NO_IOACCT )
102- return ;
103-
104- ASSERT (bp -> b_flags & XBF_ASYNC );
105- spin_lock (& bp -> b_lock );
106- if (!(bp -> b_state & XFS_BSTATE_IN_FLIGHT )) {
107- bp -> b_state |= XFS_BSTATE_IN_FLIGHT ;
108- percpu_counter_inc (& bp -> b_target -> bt_io_count );
109- }
110- spin_unlock (& bp -> b_lock );
111- }
112-
113- /*
114- * Clear the in-flight state on a buffer about to be released to the LRU or
115- * freed and unaccount from the buftarg.
116- */
117- static inline void
118- __xfs_buf_ioacct_dec (
119- struct xfs_buf * bp )
120- {
121- lockdep_assert_held (& bp -> b_lock );
122-
123- if (bp -> b_state & XFS_BSTATE_IN_FLIGHT ) {
124- bp -> b_state &= ~XFS_BSTATE_IN_FLIGHT ;
125- percpu_counter_dec (& bp -> b_target -> bt_io_count );
126- }
127- }
128-
12979/*
13080 * When we mark a buffer stale, we remove the buffer from the LRU and clear the
13181 * b_lru_ref count so that the buffer is freed immediately when the buffer
@@ -156,8 +106,6 @@ xfs_buf_stale(
156106 * status now to preserve accounting consistency.
157107 */
158108 spin_lock (& bp -> b_lock );
159- __xfs_buf_ioacct_dec (bp );
160-
161109 atomic_set (& bp -> b_lru_ref , 0 );
162110 if (!(bp -> b_state & XFS_BSTATE_DISPOSE ) &&
163111 (list_lru_del_obj (& bp -> b_target -> bt_lru , & bp -> b_lru )))
@@ -946,6 +894,7 @@ xfs_buf_readahead_map(
946894 bp -> b_ops = ops ;
947895 bp -> b_flags &= ~(XBF_WRITE | XBF_DONE );
948896 bp -> b_flags |= flags ;
897+ percpu_counter_inc (& target -> bt_readahead_count );
949898 xfs_buf_submit (bp );
950899}
951900
@@ -1002,10 +951,12 @@ xfs_buf_get_uncached(
1002951 struct xfs_buf * bp ;
1003952 DEFINE_SINGLE_BUF_MAP (map , XFS_BUF_DADDR_NULL , numblks );
1004953
954+ /* there are currently no valid flags for xfs_buf_get_uncached */
955+ ASSERT (flags == 0 );
956+
1005957 * bpp = NULL ;
1006958
1007- /* flags might contain irrelevant bits, pass only what we care about */
1008- error = _xfs_buf_alloc (target , & map , 1 , flags & XBF_NO_IOACCT , & bp );
959+ error = _xfs_buf_alloc (target , & map , 1 , flags , & bp );
1009960 if (error )
1010961 return error ;
1011962
@@ -1059,7 +1010,6 @@ xfs_buf_rele_uncached(
10591010 spin_unlock (& bp -> b_lock );
10601011 return ;
10611012 }
1062- __xfs_buf_ioacct_dec (bp );
10631013 spin_unlock (& bp -> b_lock );
10641014 xfs_buf_free (bp );
10651015}
@@ -1078,19 +1028,11 @@ xfs_buf_rele_cached(
10781028 spin_lock (& bp -> b_lock );
10791029 ASSERT (bp -> b_hold >= 1 );
10801030 if (bp -> b_hold > 1 ) {
1081- /*
1082- * Drop the in-flight state if the buffer is already on the LRU
1083- * and it holds the only reference. This is racy because we
1084- * haven't acquired the pag lock, but the use of _XBF_IN_FLIGHT
1085- * ensures the decrement occurs only once per-buf.
1086- */
1087- if (-- bp -> b_hold == 1 && !list_empty (& bp -> b_lru ))
1088- __xfs_buf_ioacct_dec (bp );
1031+ bp -> b_hold -- ;
10891032 goto out_unlock ;
10901033 }
10911034
10921035 /* we are asked to drop the last reference */
1093- __xfs_buf_ioacct_dec (bp );
10941036 if (!(bp -> b_flags & XBF_STALE ) && atomic_read (& bp -> b_lru_ref )) {
10951037 /*
10961038 * If the buffer is added to the LRU, keep the reference to the
@@ -1370,6 +1312,8 @@ __xfs_buf_ioend(
13701312 bp -> b_ops -> verify_read (bp );
13711313 if (!bp -> b_error )
13721314 bp -> b_flags |= XBF_DONE ;
1315+ if (bp -> b_flags & XBF_READ_AHEAD )
1316+ percpu_counter_dec (& bp -> b_target -> bt_readahead_count );
13731317 } else {
13741318 if (!bp -> b_error ) {
13751319 bp -> b_flags &= ~XBF_WRITE_FAIL ;
@@ -1658,9 +1602,6 @@ xfs_buf_submit(
16581602 */
16591603 bp -> b_error = 0 ;
16601604
1661- if (bp -> b_flags & XBF_ASYNC )
1662- xfs_buf_ioacct_inc (bp );
1663-
16641605 if ((bp -> b_flags & XBF_WRITE ) && !xfs_buf_verify_write (bp )) {
16651606 xfs_force_shutdown (bp -> b_mount , SHUTDOWN_CORRUPT_INCORE );
16661607 xfs_buf_ioend (bp );
@@ -1786,9 +1727,8 @@ xfs_buftarg_wait(
17861727 struct xfs_buftarg * btp )
17871728{
17881729 /*
1789- * First wait on the buftarg I/O count for all in-flight buffers to be
1790- * released. This is critical as new buffers do not make the LRU until
1791- * they are released.
1730+ * First wait for all in-flight readahead buffers to be released. This is
1731+ * critical as new buffers do not make the LRU until they are released.
17921732 *
17931733 * Next, flush the buffer workqueue to ensure all completion processing
17941734 * has finished. Just waiting on buffer locks is not sufficient for
@@ -1797,7 +1737,7 @@ xfs_buftarg_wait(
17971737 * all reference counts have been dropped before we start walking the
17981738 * LRU list.
17991739 */
1800- while (percpu_counter_sum (& btp -> bt_io_count ))
1740+ while (percpu_counter_sum (& btp -> bt_readahead_count ))
18011741 delay (100 );
18021742 flush_workqueue (btp -> bt_mount -> m_buf_workqueue );
18031743}
@@ -1914,8 +1854,8 @@ xfs_destroy_buftarg(
19141854 struct xfs_buftarg * btp )
19151855{
19161856 shrinker_free (btp -> bt_shrinker );
1917- ASSERT (percpu_counter_sum (& btp -> bt_io_count ) == 0 );
1918- percpu_counter_destroy (& btp -> bt_io_count );
1857+ ASSERT (percpu_counter_sum (& btp -> bt_readahead_count ) == 0 );
1858+ percpu_counter_destroy (& btp -> bt_readahead_count );
19191859 list_lru_destroy (& btp -> bt_lru );
19201860}
19211861
@@ -1969,7 +1909,7 @@ xfs_init_buftarg(
19691909
19701910 if (list_lru_init (& btp -> bt_lru ))
19711911 return - ENOMEM ;
1972- if (percpu_counter_init (& btp -> bt_io_count , 0 , GFP_KERNEL ))
1912+ if (percpu_counter_init (& btp -> bt_readahead_count , 0 , GFP_KERNEL ))
19731913 goto out_destroy_lru ;
19741914
19751915 btp -> bt_shrinker =
@@ -1983,7 +1923,7 @@ xfs_init_buftarg(
19831923 return 0 ;
19841924
19851925out_destroy_io_count :
1986- percpu_counter_destroy (& btp -> bt_io_count );
1926+ percpu_counter_destroy (& btp -> bt_readahead_count );
19871927out_destroy_lru :
19881928 list_lru_destroy (& btp -> bt_lru );
19891929 return - ENOMEM ;
0 commit comments