Skip to content

Commit be9a227

Browse files
Sebastian Andrzej SiewiorKAGA-KOKO
authored andcommitted
fork: Redo ifdefs around task stack handling
The use of ifdef CONFIG_VMAP_STACK is confusing in terms what is actually happenning and what can happen. For instance from reading free_thread_stack() it appears that in the CONFIG_VMAP_STACK case it may receive a non-NULL vm pointer but it may also be NULL in which case __free_pages() is used to free the stack. This is however not the case because in the VMAP case a non-NULL pointer is always returned here. Since it looks like this might happen, the compiler creates the correct dead code with the invocation to __free_pages() and everything around it. Twice. Add spaces between the ifdef and the identifer to recognize the ifdef level which is currently in scope. Add the current identifer as a comment behind #else and #endif. Move the code within free_thread_stack() and alloc_thread_stack_node() into the relevant ifdef blocks. Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: Andy Lutomirski <luto@kernel.org> Link: https://lore.kernel.org/r/20220217102406.3697941-2-bigeasy@linutronix.de
1 parent cfb9244 commit be9a227

1 file changed

Lines changed: 39 additions & 35 deletions

File tree

kernel/fork.c

Lines changed: 39 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -185,7 +185,7 @@ static inline void free_task_struct(struct task_struct *tsk)
185185
*/
186186
# if THREAD_SIZE >= PAGE_SIZE || defined(CONFIG_VMAP_STACK)
187187

188-
#ifdef CONFIG_VMAP_STACK
188+
# ifdef CONFIG_VMAP_STACK
189189
/*
190190
* vmalloc() is a bit slow, and calling vfree() enough times will force a TLB
191191
* flush. Try to minimize the number of calls by caching stacks.
@@ -210,11 +210,9 @@ static int free_vm_stack_cache(unsigned int cpu)
210210

211211
return 0;
212212
}
213-
#endif
214213

215214
static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node)
216215
{
217-
#ifdef CONFIG_VMAP_STACK
218216
void *stack;
219217
int i;
220218

@@ -258,45 +256,53 @@ static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node)
258256
tsk->stack = stack;
259257
}
260258
return stack;
261-
#else
262-
struct page *page = alloc_pages_node(node, THREADINFO_GFP,
263-
THREAD_SIZE_ORDER);
264-
265-
if (likely(page)) {
266-
tsk->stack = kasan_reset_tag(page_address(page));
267-
return tsk->stack;
268-
}
269-
return NULL;
270-
#endif
271259
}
272260

273-
static inline void free_thread_stack(struct task_struct *tsk)
261+
static void free_thread_stack(struct task_struct *tsk)
274262
{
275-
#ifdef CONFIG_VMAP_STACK
276263
struct vm_struct *vm = task_stack_vm_area(tsk);
264+
int i;
277265

278-
if (vm) {
279-
int i;
266+
for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++)
267+
memcg_kmem_uncharge_page(vm->pages[i], 0);
280268

281-
for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++)
282-
memcg_kmem_uncharge_page(vm->pages[i], 0);
269+
for (i = 0; i < NR_CACHED_STACKS; i++) {
270+
if (this_cpu_cmpxchg(cached_stacks[i], NULL,
271+
tsk->stack_vm_area) != NULL)
272+
continue;
283273

284-
for (i = 0; i < NR_CACHED_STACKS; i++) {
285-
if (this_cpu_cmpxchg(cached_stacks[i],
286-
NULL, tsk->stack_vm_area) != NULL)
287-
continue;
274+
tsk->stack = NULL;
275+
tsk->stack_vm_area = NULL;
276+
return;
277+
}
278+
vfree_atomic(tsk->stack);
279+
tsk->stack = NULL;
280+
tsk->stack_vm_area = NULL;
281+
}
288282

289-
return;
290-
}
283+
# else /* !CONFIG_VMAP_STACK */
291284

292-
vfree_atomic(tsk->stack);
293-
return;
285+
static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node)
286+
{
287+
struct page *page = alloc_pages_node(node, THREADINFO_GFP,
288+
THREAD_SIZE_ORDER);
289+
290+
if (likely(page)) {
291+
tsk->stack = kasan_reset_tag(page_address(page));
292+
return tsk->stack;
294293
}
295-
#endif
294+
return NULL;
295+
}
296296

297+
static void free_thread_stack(struct task_struct *tsk)
298+
{
297299
__free_pages(virt_to_page(tsk->stack), THREAD_SIZE_ORDER);
300+
tsk->stack = NULL;
298301
}
299-
# else
302+
303+
# endif /* CONFIG_VMAP_STACK */
304+
# else /* !(THREAD_SIZE >= PAGE_SIZE || defined(CONFIG_VMAP_STACK)) */
305+
300306
static struct kmem_cache *thread_stack_cache;
301307

302308
static unsigned long *alloc_thread_stack_node(struct task_struct *tsk,
@@ -312,6 +318,7 @@ static unsigned long *alloc_thread_stack_node(struct task_struct *tsk,
312318
static void free_thread_stack(struct task_struct *tsk)
313319
{
314320
kmem_cache_free(thread_stack_cache, tsk->stack);
321+
tsk->stack = NULL;
315322
}
316323

317324
void thread_stack_cache_init(void)
@@ -321,8 +328,9 @@ void thread_stack_cache_init(void)
321328
THREAD_SIZE, NULL);
322329
BUG_ON(thread_stack_cache == NULL);
323330
}
324-
# endif
325-
#endif
331+
332+
# endif /* THREAD_SIZE >= PAGE_SIZE || defined(CONFIG_VMAP_STACK) */
333+
#endif /* !CONFIG_ARCH_THREAD_STACK_ALLOCATOR */
326334

327335
/* SLAB cache for signal_struct structures (tsk->signal) */
328336
static struct kmem_cache *signal_cachep;
@@ -432,10 +440,6 @@ static void release_task_stack(struct task_struct *tsk)
432440

433441
account_kernel_stack(tsk, -1);
434442
free_thread_stack(tsk);
435-
tsk->stack = NULL;
436-
#ifdef CONFIG_VMAP_STACK
437-
tsk->stack_vm_area = NULL;
438-
#endif
439443
}
440444

441445
#ifdef CONFIG_THREAD_INFO_IN_TASK

0 commit comments

Comments
 (0)