Skip to content

Commit 8cea569

Browse files
KAGA-KOKOPeter Zijlstra
authored andcommitted
sched/mmcid: Use proper data structures
Having a lot of CID functionality specific members in struct task_struct and struct mm_struct is not really making the code easier to read. Encapsulate the CID specific parts in data structures and keep them separate from the stuff they are embedded in. No functional change. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> Link: https://patch.msgid.link/20251119172549.131573768@linutronix.de
1 parent 77d7dc8 commit 8cea569

7 files changed

Lines changed: 85 additions & 75 deletions

File tree

include/linux/mm_types.h

Lines changed: 14 additions & 42 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@
2020
#include <linux/seqlock.h>
2121
#include <linux/percpu_counter.h>
2222
#include <linux/types.h>
23+
#include <linux/rseq_types.h>
2324
#include <linux/bitmap.h>
2425

2526
#include <asm/mmu.h>
@@ -922,10 +923,6 @@ struct vm_area_struct {
922923
#define vma_policy(vma) NULL
923924
#endif
924925

925-
struct mm_cid {
926-
unsigned int cid;
927-
};
928-
929926
/*
930927
* Opaque type representing current mm_struct flag state. Must be accessed via
931928
* mm_flags_xxx() helper functions.
@@ -987,30 +984,9 @@ struct mm_struct {
987984
*/
988985
atomic_t mm_users;
989986

990-
#ifdef CONFIG_SCHED_MM_CID
991-
/**
992-
* @pcpu_cid: Per-cpu current cid.
993-
*
994-
* Keep track of the currently allocated mm_cid for each cpu.
995-
* The per-cpu mm_cid values are serialized by their respective
996-
* runqueue locks.
997-
*/
998-
struct mm_cid __percpu *pcpu_cid;
999-
/**
1000-
* @nr_cpus_allowed: Number of CPUs allowed for mm.
1001-
*
1002-
* Number of CPUs allowed in the union of all mm's
1003-
* threads allowed CPUs.
1004-
*/
1005-
unsigned int nr_cpus_allowed;
1006-
/**
1007-
* @cpus_allowed_lock: Lock protecting mm cpus_allowed.
1008-
*
1009-
* Provide mutual exclusion for mm cpus_allowed and
1010-
* mm nr_cpus_allowed updates.
1011-
*/
1012-
raw_spinlock_t cpus_allowed_lock;
1013-
#endif
987+
/* MM CID related storage */
988+
struct mm_mm_cid mm_cid;
989+
1014990
#ifdef CONFIG_MMU
1015991
atomic_long_t pgtables_bytes; /* size of all page tables */
1016992
#endif
@@ -1352,9 +1328,6 @@ static inline void vma_iter_init(struct vma_iterator *vmi,
13521328
}
13531329

13541330
#ifdef CONFIG_SCHED_MM_CID
1355-
1356-
#define MM_CID_UNSET (~0U)
1357-
13581331
/*
13591332
* mm_cpus_allowed: Union of all mm's threads allowed CPUs.
13601333
*/
@@ -1383,20 +1356,20 @@ static inline void mm_init_cid(struct mm_struct *mm, struct task_struct *p)
13831356
int i;
13841357

13851358
for_each_possible_cpu(i) {
1386-
struct mm_cid *pcpu_cid = per_cpu_ptr(mm->pcpu_cid, i);
1359+
struct mm_cid_pcpu *pcpu = per_cpu_ptr(mm->mm_cid.pcpu, i);
13871360

1388-
pcpu_cid->cid = MM_CID_UNSET;
1361+
pcpu->cid = MM_CID_UNSET;
13891362
}
1390-
mm->nr_cpus_allowed = p->nr_cpus_allowed;
1391-
raw_spin_lock_init(&mm->cpus_allowed_lock);
1363+
mm->mm_cid.nr_cpus_allowed = p->nr_cpus_allowed;
1364+
raw_spin_lock_init(&mm->mm_cid.lock);
13921365
cpumask_copy(mm_cpus_allowed(mm), &p->cpus_mask);
13931366
cpumask_clear(mm_cidmask(mm));
13941367
}
13951368

13961369
static inline int mm_alloc_cid_noprof(struct mm_struct *mm, struct task_struct *p)
13971370
{
1398-
mm->pcpu_cid = alloc_percpu_noprof(struct mm_cid);
1399-
if (!mm->pcpu_cid)
1371+
mm->mm_cid.pcpu = alloc_percpu_noprof(struct mm_cid_pcpu);
1372+
if (!mm->mm_cid.pcpu)
14001373
return -ENOMEM;
14011374
mm_init_cid(mm, p);
14021375
return 0;
@@ -1405,8 +1378,8 @@ static inline int mm_alloc_cid_noprof(struct mm_struct *mm, struct task_struct *
14051378

14061379
static inline void mm_destroy_cid(struct mm_struct *mm)
14071380
{
1408-
free_percpu(mm->pcpu_cid);
1409-
mm->pcpu_cid = NULL;
1381+
free_percpu(mm->mm_cid.pcpu);
1382+
mm->mm_cid.pcpu = NULL;
14101383
}
14111384

14121385
static inline unsigned int mm_cid_size(void)
@@ -1421,10 +1394,9 @@ static inline void mm_set_cpus_allowed(struct mm_struct *mm, const struct cpumas
14211394
if (!mm)
14221395
return;
14231396
/* The mm_cpus_allowed is the union of each thread allowed CPUs masks. */
1424-
raw_spin_lock(&mm->cpus_allowed_lock);
1397+
guard(raw_spinlock)(&mm->mm_cid.lock);
14251398
cpumask_or(mm_allowed, mm_allowed, cpumask);
1426-
WRITE_ONCE(mm->nr_cpus_allowed, cpumask_weight(mm_allowed));
1427-
raw_spin_unlock(&mm->cpus_allowed_lock);
1399+
WRITE_ONCE(mm->mm_cid.nr_cpus_allowed, cpumask_weight(mm_allowed));
14281400
}
14291401
#else /* CONFIG_SCHED_MM_CID */
14301402
static inline void mm_init_cid(struct mm_struct *mm, struct task_struct *p) { }

include/linux/rseq_types.h

Lines changed: 42 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -90,4 +90,46 @@ struct rseq_data {
9090
struct rseq_data { };
9191
#endif /* !CONFIG_RSEQ */
9292

93+
#ifdef CONFIG_SCHED_MM_CID
94+
95+
#define MM_CID_UNSET (~0U)
96+
97+
/**
98+
* struct sched_mm_cid - Storage for per task MM CID data
99+
* @active: MM CID is active for the task
100+
* @cid: The CID associated to the task
101+
* @last_cid: The last CID associated to the task
102+
*/
103+
struct sched_mm_cid {
104+
unsigned int active;
105+
unsigned int cid;
106+
unsigned int last_cid;
107+
};
108+
109+
/**
110+
* struct mm_cid_pcpu - Storage for per CPU MM_CID data
111+
* @cid: The CID associated to the CPU
112+
*/
113+
struct mm_cid_pcpu {
114+
unsigned int cid;
115+
};
116+
117+
/**
118+
* struct mm_mm_cid - Storage for per MM CID data
119+
* @pcpu: Per CPU storage for CIDs associated to a CPU
120+
* @nr_cpus_allowed: The number of CPUs in the per MM allowed CPUs map. The map
121+
* is growth only.
122+
* @lock: Spinlock to protect all fields except @pcpu. It also protects
123+
* the MM cid cpumask and the MM cidmask bitmap.
124+
*/
125+
struct mm_mm_cid {
126+
struct mm_cid_pcpu __percpu *pcpu;
127+
unsigned int nr_cpus_allowed;
128+
raw_spinlock_t lock;
129+
};
130+
#else /* CONFIG_SCHED_MM_CID */
131+
struct mm_mm_cid { };
132+
struct sched_mm_cid { };
133+
#endif /* !CONFIG_SCHED_MM_CID */
134+
93135
#endif

include/linux/sched.h

Lines changed: 2 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1407,14 +1407,7 @@ struct task_struct {
14071407
#endif /* CONFIG_NUMA_BALANCING */
14081408

14091409
struct rseq_data rseq;
1410-
1411-
#ifdef CONFIG_SCHED_MM_CID
1412-
int mm_cid; /* Current cid in mm */
1413-
int last_mm_cid; /* Most recent cid in mm */
1414-
int migrate_from_cpu;
1415-
int mm_cid_active; /* Whether cid bitmap is active */
1416-
struct callback_head cid_work;
1417-
#endif
1410+
struct sched_mm_cid mm_cid;
14181411

14191412
struct tlbflush_unmap_batch tlb_ubc;
14201413

@@ -2308,7 +2301,7 @@ void sched_mm_cid_fork(struct task_struct *t);
23082301
void sched_mm_cid_exit_signals(struct task_struct *t);
23092302
static inline int task_mm_cid(struct task_struct *t)
23102303
{
2311-
return t->mm_cid;
2304+
return t->mm_cid.cid;
23122305
}
23132306
#else
23142307
static inline void sched_mm_cid_before_execve(struct task_struct *t) { }

init/init_task.c

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -223,6 +223,9 @@ struct task_struct init_task __aligned(L1_CACHE_BYTES) = {
223223
#ifdef CONFIG_SECCOMP_FILTER
224224
.seccomp = { .filter_count = ATOMIC_INIT(0) },
225225
#endif
226+
#ifdef CONFIG_SCHED_MM_CID
227+
.mm_cid = { .cid = MM_CID_UNSET, },
228+
#endif
226229
};
227230
EXPORT_SYMBOL(init_task);
228231

kernel/fork.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -955,9 +955,9 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
955955
#endif
956956

957957
#ifdef CONFIG_SCHED_MM_CID
958-
tsk->mm_cid = MM_CID_UNSET;
959-
tsk->last_mm_cid = MM_CID_UNSET;
960-
tsk->mm_cid_active = 0;
958+
tsk->mm_cid.cid = MM_CID_UNSET;
959+
tsk->mm_cid.last_cid = MM_CID_UNSET;
960+
tsk->mm_cid.active = 0;
961961
#endif
962962
return tsk;
963963

kernel/sched/core.c

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -10376,14 +10376,14 @@ void sched_mm_cid_exit_signals(struct task_struct *t)
1037610376
{
1037710377
struct mm_struct *mm = t->mm;
1037810378

10379-
if (!mm || !t->mm_cid_active)
10379+
if (!mm || !t->mm_cid.active)
1038010380
return;
1038110381

1038210382
guard(preempt)();
10383-
t->mm_cid_active = 0;
10384-
if (t->mm_cid != MM_CID_UNSET) {
10385-
cpumask_clear_cpu(t->mm_cid, mm_cidmask(mm));
10386-
t->mm_cid = MM_CID_UNSET;
10383+
t->mm_cid.active = 0;
10384+
if (t->mm_cid.cid != MM_CID_UNSET) {
10385+
cpumask_clear_cpu(t->mm_cid.cid, mm_cidmask(mm));
10386+
t->mm_cid.cid = MM_CID_UNSET;
1038710387
}
1038810388
}
1038910389

@@ -10402,14 +10402,14 @@ void sched_mm_cid_after_execve(struct task_struct *t)
1040210402
return;
1040310403

1040410404
guard(preempt)();
10405-
t->mm_cid_active = 1;
10405+
t->mm_cid.active = 1;
1040610406
mm_cid_select(t);
1040710407
}
1040810408

1040910409
void sched_mm_cid_fork(struct task_struct *t)
1041010410
{
10411-
WARN_ON_ONCE(!t->mm || t->mm_cid != MM_CID_UNSET);
10412-
t->mm_cid_active = 1;
10411+
WARN_ON_ONCE(!t->mm || t->mm_cid.cid != MM_CID_UNSET);
10412+
t->mm_cid.active = 1;
1041310413
}
1041410414
#endif /* CONFIG_SCHED_MM_CID */
1041510415

kernel/sched/sched.h

Lines changed: 13 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -3549,8 +3549,8 @@ static inline void init_sched_mm_cid(struct task_struct *t)
35493549
return;
35503550

35513551
/* Preset last_mm_cid */
3552-
max_cid = min_t(int, READ_ONCE(mm->nr_cpus_allowed), atomic_read(&mm->mm_users));
3553-
t->last_mm_cid = max_cid - 1;
3552+
max_cid = min_t(int, READ_ONCE(mm->mm_cid.nr_cpus_allowed), atomic_read(&mm->mm_users));
3553+
t->mm_cid.last_cid = max_cid - 1;
35543554
}
35553555

35563556
static inline bool __mm_cid_get(struct task_struct *t, unsigned int cid, unsigned int max_cids)
@@ -3561,8 +3561,8 @@ static inline bool __mm_cid_get(struct task_struct *t, unsigned int cid, unsigne
35613561
return false;
35623562
if (cpumask_test_and_set_cpu(cid, mm_cidmask(mm)))
35633563
return false;
3564-
t->mm_cid = t->last_mm_cid = cid;
3565-
__this_cpu_write(mm->pcpu_cid->cid, cid);
3564+
t->mm_cid.cid = t->mm_cid.last_cid = cid;
3565+
__this_cpu_write(mm->mm_cid.pcpu->cid, cid);
35663566
return true;
35673567
}
35683568

@@ -3571,14 +3571,14 @@ static inline bool mm_cid_get(struct task_struct *t)
35713571
struct mm_struct *mm = t->mm;
35723572
unsigned int max_cids;
35733573

3574-
max_cids = min_t(int, READ_ONCE(mm->nr_cpus_allowed), atomic_read(&mm->mm_users));
3574+
max_cids = min_t(int, READ_ONCE(mm->mm_cid.nr_cpus_allowed), atomic_read(&mm->mm_users));
35753575

35763576
/* Try to reuse the last CID of this task */
3577-
if (__mm_cid_get(t, t->last_mm_cid, max_cids))
3577+
if (__mm_cid_get(t, t->mm_cid.last_cid, max_cids))
35783578
return true;
35793579

35803580
/* Try to reuse the last CID of this mm on this CPU */
3581-
if (__mm_cid_get(t, __this_cpu_read(mm->pcpu_cid->cid), max_cids))
3581+
if (__mm_cid_get(t, __this_cpu_read(mm->mm_cid.pcpu->cid), max_cids))
35823582
return true;
35833583

35843584
/* Try the first zero bit in the cidmask. */
@@ -3601,15 +3601,15 @@ static inline void mm_cid_select(struct task_struct *t)
36013601

36023602
static inline void switch_mm_cid(struct task_struct *prev, struct task_struct *next)
36033603
{
3604-
if (prev->mm_cid_active) {
3605-
if (prev->mm_cid != MM_CID_UNSET)
3606-
cpumask_clear_cpu(prev->mm_cid, mm_cidmask(prev->mm));
3607-
prev->mm_cid = MM_CID_UNSET;
3604+
if (prev->mm_cid.active) {
3605+
if (prev->mm_cid.cid != MM_CID_UNSET)
3606+
cpumask_clear_cpu(prev->mm_cid.cid, mm_cidmask(prev->mm));
3607+
prev->mm_cid.cid = MM_CID_UNSET;
36083608
}
36093609

3610-
if (next->mm_cid_active) {
3610+
if (next->mm_cid.active) {
36113611
mm_cid_select(next);
3612-
rseq_sched_set_task_mm_cid(next, next->mm_cid);
3612+
rseq_sched_set_task_mm_cid(next, next->mm_cid.cid);
36133613
}
36143614
}
36153615

0 commit comments

Comments
 (0)