Skip to content

Commit a78d4a2

Browse files
urezkipaulmckrcu
authored andcommitted
kvfree_rcu: Refactor kfree_rcu_monitor()
Currently we have three functions which depend on each other. Two of them are quite tiny and the last one where the most work is done. All of them are related to queuing RCU batches to reclaim objects after a GP. 1. kfree_rcu_monitor(). It consist of few lines. It acquires a spin-lock and calls kfree_rcu_drain_unlock(). 2. kfree_rcu_drain_unlock(). It also consists of few lines of code. It calls queue_kfree_rcu_work() to queue the batch. If this fails, it rearms the monitor work to try again later. 3. queue_kfree_rcu_work(). This provides the bulk of the functionality, attempting to start a new batch to free objects after a GP. Since there are no external users of functions [2] and [3], both can eliminated by moving all logic directly into [1], which both shrinks and simplifies the code. Also replace comments which start with "/*" to "//" format to make it unified across the file. Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com> Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
1 parent d8628f3 commit a78d4a2

1 file changed

Lines changed: 26 additions & 58 deletions

File tree

kernel/rcu/tree.c

Lines changed: 26 additions & 58 deletions
Original file line numberDiff line numberDiff line change
@@ -3379,29 +3379,26 @@ static void kfree_rcu_work(struct work_struct *work)
33793379
}
33803380

33813381
/*
3382-
* Schedule the kfree batch RCU work to run in workqueue context after a GP.
3383-
*
3384-
* This function is invoked by kfree_rcu_monitor() when the KFREE_DRAIN_JIFFIES
3385-
* timeout has been reached.
3382+
* This function is invoked after the KFREE_DRAIN_JIFFIES timeout.
33863383
*/
3387-
static inline bool queue_kfree_rcu_work(struct kfree_rcu_cpu *krcp)
3384+
static void kfree_rcu_monitor(struct work_struct *work)
33883385
{
3389-
struct kfree_rcu_cpu_work *krwp;
3390-
bool repeat = false;
3386+
struct kfree_rcu_cpu *krcp = container_of(work,
3387+
struct kfree_rcu_cpu, monitor_work.work);
3388+
unsigned long flags;
33913389
int i, j;
33923390

3393-
lockdep_assert_held(&krcp->lock);
3391+
raw_spin_lock_irqsave(&krcp->lock, flags);
33943392

3393+
// Attempt to start a new batch.
33953394
for (i = 0; i < KFREE_N_BATCHES; i++) {
3396-
krwp = &(krcp->krw_arr[i]);
3395+
struct kfree_rcu_cpu_work *krwp = &(krcp->krw_arr[i]);
33973396

3398-
/*
3399-
* Try to detach bkvhead or head and attach it over any
3400-
* available corresponding free channel. It can be that
3401-
* a previous RCU batch is in progress, it means that
3402-
* immediately to queue another one is not possible so
3403-
* return false to tell caller to retry.
3404-
*/
3397+
// Try to detach bkvhead or head and attach it over any
3398+
// available corresponding free channel. It can be that
3399+
// a previous RCU batch is in progress, it means that
3400+
// immediately to queue another one is not possible so
3401+
// in that case the monitor work is rearmed.
34053402
if ((krcp->bkvhead[0] && !krwp->bkvhead_free[0]) ||
34063403
(krcp->bkvhead[1] && !krwp->bkvhead_free[1]) ||
34073404
(krcp->head && !krwp->head_free)) {
@@ -3423,57 +3420,28 @@ static inline bool queue_kfree_rcu_work(struct kfree_rcu_cpu *krcp)
34233420

34243421
WRITE_ONCE(krcp->count, 0);
34253422

3426-
/*
3427-
* One work is per one batch, so there are three
3428-
* "free channels", the batch can handle. It can
3429-
* be that the work is in the pending state when
3430-
* channels have been detached following by each
3431-
* other.
3432-
*/
3423+
// One work is per one batch, so there are three
3424+
// "free channels", the batch can handle. It can
3425+
// be that the work is in the pending state when
3426+
// channels have been detached following by each
3427+
// other.
34333428
queue_rcu_work(system_wq, &krwp->rcu_work);
34343429
}
3435-
3436-
// Repeat if any "free" corresponding channel is still busy.
3437-
if (krcp->bkvhead[0] || krcp->bkvhead[1] || krcp->head)
3438-
repeat = true;
34393430
}
34403431

3441-
return !repeat;
3442-
}
3443-
3444-
static inline void kfree_rcu_drain_unlock(struct kfree_rcu_cpu *krcp,
3445-
unsigned long flags)
3446-
{
3447-
// Attempt to start a new batch.
3448-
if (queue_kfree_rcu_work(krcp)) {
3449-
// Success! Our job is done here.
3432+
// If there is nothing to detach, it means that our job is
3433+
// successfully done here. In case of having at least one
3434+
// of the channels that is still busy we should rearm the
3435+
// work to repeat an attempt. Because previous batches are
3436+
// still in progress.
3437+
if (!krcp->bkvhead[0] && !krcp->bkvhead[1] && !krcp->head)
34503438
krcp->monitor_todo = false;
3451-
raw_spin_unlock_irqrestore(&krcp->lock, flags);
3452-
return;
3453-
}
3439+
else
3440+
schedule_delayed_work(&krcp->monitor_work, KFREE_DRAIN_JIFFIES);
34543441

3455-
// Previous RCU batch still in progress, try again later.
3456-
schedule_delayed_work(&krcp->monitor_work, KFREE_DRAIN_JIFFIES);
34573442
raw_spin_unlock_irqrestore(&krcp->lock, flags);
34583443
}
34593444

3460-
/*
3461-
* This function is invoked after the KFREE_DRAIN_JIFFIES timeout.
3462-
* It invokes kfree_rcu_drain_unlock() to attempt to start another batch.
3463-
*/
3464-
static void kfree_rcu_monitor(struct work_struct *work)
3465-
{
3466-
unsigned long flags;
3467-
struct kfree_rcu_cpu *krcp = container_of(work, struct kfree_rcu_cpu,
3468-
monitor_work.work);
3469-
3470-
raw_spin_lock_irqsave(&krcp->lock, flags);
3471-
if (krcp->monitor_todo)
3472-
kfree_rcu_drain_unlock(krcp, flags);
3473-
else
3474-
raw_spin_unlock_irqrestore(&krcp->lock, flags);
3475-
}
3476-
34773445
static enum hrtimer_restart
34783446
schedule_page_work_fn(struct hrtimer *t)
34793447
{

0 commit comments

Comments
 (0)