Skip to content

Commit 543467d

Browse files
laveeshbbrauner
authored andcommitted
writeback: fix 100% CPU usage when dirtytime_expire_interval is 0
When vm.dirtytime_expire_seconds is set to 0, wakeup_dirtytime_writeback() schedules delayed work with a delay of 0, causing immediate execution. The function then reschedules itself with 0 delay again, creating an infinite busy loop that causes 100% kworker CPU usage. Fix by: - Only scheduling delayed work in wakeup_dirtytime_writeback() when dirtytime_expire_interval is non-zero - Cancelling the delayed work in dirtytime_interval_handler() when the interval is set to 0 - Adding a guard in start_dirtytime_writeback() for defensive coding Tested by booting kernel in QEMU with virtme-ng: - Before fix: kworker CPU spikes to ~73% - After fix: CPU remains at normal levels - Setting interval back to non-zero correctly resumes writeback Fixes: a2f4870 ("fs: make sure the timestamps for lazytime inodes eventually get written") Cc: stable@vger.kernel.org Closes: https://bugzilla.kernel.org/show_bug.cgi?id=220227 Signed-off-by: Laveesh Bansal <laveeshb@laveeshbansal.com> Link: https://patch.msgid.link/20260106145059.543282-2-laveeshb@laveeshbansal.com Reviewed-by: Jan Kara <jack@suse.cz> Signed-off-by: Christian Brauner <brauner@kernel.org>
1 parent c644bce commit 543467d

1 file changed

Lines changed: 10 additions & 4 deletions

File tree

fs/fs-writeback.c

Lines changed: 10 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -2492,7 +2492,8 @@ static void wakeup_dirtytime_writeback(struct work_struct *w)
24922492
wb_wakeup(wb);
24932493
}
24942494
rcu_read_unlock();
2495-
schedule_delayed_work(&dirtytime_work, dirtytime_expire_interval * HZ);
2495+
if (dirtytime_expire_interval)
2496+
schedule_delayed_work(&dirtytime_work, dirtytime_expire_interval * HZ);
24962497
}
24972498

24982499
static int dirtytime_interval_handler(const struct ctl_table *table, int write,
@@ -2501,8 +2502,12 @@ static int dirtytime_interval_handler(const struct ctl_table *table, int write,
25012502
int ret;
25022503

25032504
ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
2504-
if (ret == 0 && write)
2505-
mod_delayed_work(system_percpu_wq, &dirtytime_work, 0);
2505+
if (ret == 0 && write) {
2506+
if (dirtytime_expire_interval)
2507+
mod_delayed_work(system_percpu_wq, &dirtytime_work, 0);
2508+
else
2509+
cancel_delayed_work_sync(&dirtytime_work);
2510+
}
25062511
return ret;
25072512
}
25082513

@@ -2519,7 +2524,8 @@ static const struct ctl_table vm_fs_writeback_table[] = {
25192524

25202525
static int __init start_dirtytime_writeback(void)
25212526
{
2522-
schedule_delayed_work(&dirtytime_work, dirtytime_expire_interval * HZ);
2527+
if (dirtytime_expire_interval)
2528+
schedule_delayed_work(&dirtytime_work, dirtytime_expire_interval * HZ);
25232529
register_sysctl_init("vm", vm_fs_writeback_table);
25242530
return 0;
25252531
}

0 commit comments

Comments
 (0)