@@ -166,8 +166,7 @@ static void wb_wakeup_delayed(struct bdi_writeback *wb)
166166 spin_unlock_irq (& wb -> work_lock );
167167}
168168
169- static void finish_writeback_work (struct bdi_writeback * wb ,
170- struct wb_writeback_work * work )
169+ static void finish_writeback_work (struct wb_writeback_work * work )
171170{
172171 struct wb_completion * done = work -> done ;
173172
@@ -196,7 +195,7 @@ static void wb_queue_work(struct bdi_writeback *wb,
196195 list_add_tail (& work -> list , & wb -> work_list );
197196 mod_delayed_work (bdi_wq , & wb -> dwork , 0 );
198197 } else
199- finish_writeback_work (wb , work );
198+ finish_writeback_work (work );
200199
201200 spin_unlock_irq (& wb -> work_lock );
202201}
@@ -1561,7 +1560,8 @@ static void inode_sleep_on_writeback(struct inode *inode)
15611560 * thread's back can have unexpected consequences.
15621561 */
15631562static void requeue_inode (struct inode * inode , struct bdi_writeback * wb ,
1564- struct writeback_control * wbc )
1563+ struct writeback_control * wbc ,
1564+ unsigned long dirtied_before )
15651565{
15661566 if (inode -> i_state & I_FREEING )
15671567 return ;
@@ -1594,7 +1594,8 @@ static void requeue_inode(struct inode *inode, struct bdi_writeback *wb,
15941594 * We didn't write back all the pages. nfs_writepages()
15951595 * sometimes bales out without doing anything.
15961596 */
1597- if (wbc -> nr_to_write <= 0 ) {
1597+ if (wbc -> nr_to_write <= 0 &&
1598+ !inode_dirtied_after (inode , dirtied_before )) {
15981599 /* Slice used up. Queue for next turn. */
15991600 requeue_io (inode , wb );
16001601 } else {
@@ -1862,6 +1863,11 @@ static long writeback_sb_inodes(struct super_block *sb,
18621863 unsigned long start_time = jiffies ;
18631864 long write_chunk ;
18641865 long total_wrote = 0 ; /* count both pages and inodes */
1866+ unsigned long dirtied_before = jiffies ;
1867+
1868+ if (work -> for_kupdate )
1869+ dirtied_before = jiffies -
1870+ msecs_to_jiffies (dirty_expire_interval * 10 );
18651871
18661872 while (!list_empty (& wb -> b_io )) {
18671873 struct inode * inode = wb_inode (wb -> b_io .prev );
@@ -1967,7 +1973,7 @@ static long writeback_sb_inodes(struct super_block *sb,
19671973 spin_lock (& inode -> i_lock );
19681974 if (!(inode -> i_state & I_DIRTY_ALL ))
19691975 total_wrote ++ ;
1970- requeue_inode (inode , tmp_wb , & wbc );
1976+ requeue_inode (inode , tmp_wb , & wbc , dirtied_before );
19711977 inode_sync_complete (inode );
19721978 spin_unlock (& inode -> i_lock );
19731979
@@ -2069,6 +2075,7 @@ static long wb_writeback(struct bdi_writeback *wb,
20692075 struct inode * inode ;
20702076 long progress ;
20712077 struct blk_plug plug ;
2078+ bool queued = false;
20722079
20732080 blk_start_plug (& plug );
20742081 for (;;) {
@@ -2098,21 +2105,24 @@ static long wb_writeback(struct bdi_writeback *wb,
20982105
20992106 spin_lock (& wb -> list_lock );
21002107
2101- /*
2102- * Kupdate and background works are special and we want to
2103- * include all inodes that need writing. Livelock avoidance is
2104- * handled by these works yielding to any other work so we are
2105- * safe.
2106- */
2107- if (work -> for_kupdate ) {
2108- dirtied_before = jiffies -
2109- msecs_to_jiffies (dirty_expire_interval * 10 );
2110- } else if (work -> for_background )
2111- dirtied_before = jiffies ;
2112-
21132108 trace_writeback_start (wb , work );
2114- if (list_empty (& wb -> b_io ))
2109+ if (list_empty (& wb -> b_io )) {
2110+ /*
2111+ * Kupdate and background works are special and we want
2112+ * to include all inodes that need writing. Livelock
2113+ * avoidance is handled by these works yielding to any
2114+ * other work so we are safe.
2115+ */
2116+ if (work -> for_kupdate ) {
2117+ dirtied_before = jiffies -
2118+ msecs_to_jiffies (dirty_expire_interval *
2119+ 10 );
2120+ } else if (work -> for_background )
2121+ dirtied_before = jiffies ;
2122+
21152123 queue_io (wb , work , dirtied_before );
2124+ queued = true;
2125+ }
21162126 if (work -> sb )
21172127 progress = writeback_sb_inodes (work -> sb , wb , work );
21182128 else
@@ -2127,7 +2137,7 @@ static long wb_writeback(struct bdi_writeback *wb,
21272137 * mean the overall work is done. So we keep looping as long
21282138 * as made some progress on cleaning pages or inodes.
21292139 */
2130- if (progress ) {
2140+ if (progress || ! queued ) {
21312141 spin_unlock (& wb -> list_lock );
21322142 continue ;
21332143 }
@@ -2262,7 +2272,7 @@ static long wb_do_writeback(struct bdi_writeback *wb)
22622272 while ((work = get_next_work_item (wb )) != NULL ) {
22632273 trace_writeback_exec (wb , work );
22642274 wrote += wb_writeback (wb , work );
2265- finish_writeback_work (wb , work );
2275+ finish_writeback_work (work );
22662276 }
22672277
22682278 /*
@@ -2322,8 +2332,7 @@ void wb_workfn(struct work_struct *work)
23222332}
23232333
23242334/*
2325- * Start writeback of `nr_pages' pages on this bdi. If `nr_pages' is zero,
2326- * write back the whole world.
2335+ * Start writeback of all dirty pages on this bdi.
23272336 */
23282337static void __wakeup_flusher_threads_bdi (struct backing_dev_info * bdi ,
23292338 enum wb_reason reason )
@@ -2726,7 +2735,7 @@ EXPORT_SYMBOL(writeback_inodes_sb_nr);
27262735 */
27272736void writeback_inodes_sb (struct super_block * sb , enum wb_reason reason )
27282737{
2729- return writeback_inodes_sb_nr (sb , get_nr_dirty_pages (), reason );
2738+ writeback_inodes_sb_nr (sb , get_nr_dirty_pages (), reason );
27302739}
27312740EXPORT_SYMBOL (writeback_inodes_sb );
27322741
0 commit comments