Skip to content

Commit 7a3b835

Browse files
pccwilldeacon
authored andcommitted
kasan: use separate (un)poison implementation for integrated init
Currently with integrated init page_alloc.c needs to know whether kasan_alloc_pages() will zero initialize memory, but this will start becoming more complicated once we start adding tag initialization support for user pages. To avoid page_alloc.c needing to know more details of what integrated init will do, move the unpoisoning logic for integrated init into the HW tags implementation. Currently the logic is identical but it will diverge in subsequent patches. For symmetry do the same for poisoning although this logic will be unaffected by subsequent patches. Signed-off-by: Peter Collingbourne <pcc@google.com> Reviewed-by: Andrey Konovalov <andreyknvl@gmail.com> Link: https://linux-review.googlesource.com/id/I2c550234c6c4a893c48c18ff0c6ce658c7c67056 Link: https://lore.kernel.org/r/20210602235230.3928842-3-pcc@google.com Signed-off-by: Will Deacon <will@kernel.org>
1 parent 92638b4 commit 7a3b835

5 files changed

Lines changed: 95 additions & 56 deletions

File tree

include/linux/kasan.h

Lines changed: 38 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22
#ifndef _LINUX_KASAN_H
33
#define _LINUX_KASAN_H
44

5+
#include <linux/bug.h>
56
#include <linux/static_key.h>
67
#include <linux/types.h>
78

@@ -79,14 +80,6 @@ static inline void kasan_disable_current(void) {}
7980

8081
#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
8182

82-
#ifdef CONFIG_KASAN
83-
84-
struct kasan_cache {
85-
int alloc_meta_offset;
86-
int free_meta_offset;
87-
bool is_kmalloc;
88-
};
89-
9083
#ifdef CONFIG_KASAN_HW_TAGS
9184

9285
DECLARE_STATIC_KEY_FALSE(kasan_flag_enabled);
@@ -101,20 +94,45 @@ static inline bool kasan_has_integrated_init(void)
10194
return kasan_enabled();
10295
}
10396

97+
void kasan_alloc_pages(struct page *page, unsigned int order, gfp_t flags);
98+
void kasan_free_pages(struct page *page, unsigned int order);
99+
104100
#else /* CONFIG_KASAN_HW_TAGS */
105101

106102
static inline bool kasan_enabled(void)
107103
{
108-
return true;
104+
return IS_ENABLED(CONFIG_KASAN);
109105
}
110106

111107
static inline bool kasan_has_integrated_init(void)
112108
{
113109
return false;
114110
}
115111

112+
static __always_inline void kasan_alloc_pages(struct page *page,
113+
unsigned int order, gfp_t flags)
114+
{
115+
/* Only available for integrated init. */
116+
BUILD_BUG();
117+
}
118+
119+
static __always_inline void kasan_free_pages(struct page *page,
120+
unsigned int order)
121+
{
122+
/* Only available for integrated init. */
123+
BUILD_BUG();
124+
}
125+
116126
#endif /* CONFIG_KASAN_HW_TAGS */
117127

128+
#ifdef CONFIG_KASAN
129+
130+
struct kasan_cache {
131+
int alloc_meta_offset;
132+
int free_meta_offset;
133+
bool is_kmalloc;
134+
};
135+
118136
slab_flags_t __kasan_never_merge(void);
119137
static __always_inline slab_flags_t kasan_never_merge(void)
120138
{
@@ -130,20 +148,20 @@ static __always_inline void kasan_unpoison_range(const void *addr, size_t size)
130148
__kasan_unpoison_range(addr, size);
131149
}
132150

133-
void __kasan_alloc_pages(struct page *page, unsigned int order, bool init);
134-
static __always_inline void kasan_alloc_pages(struct page *page,
151+
void __kasan_poison_pages(struct page *page, unsigned int order, bool init);
152+
static __always_inline void kasan_poison_pages(struct page *page,
135153
unsigned int order, bool init)
136154
{
137155
if (kasan_enabled())
138-
__kasan_alloc_pages(page, order, init);
156+
__kasan_poison_pages(page, order, init);
139157
}
140158

141-
void __kasan_free_pages(struct page *page, unsigned int order, bool init);
142-
static __always_inline void kasan_free_pages(struct page *page,
143-
unsigned int order, bool init)
159+
void __kasan_unpoison_pages(struct page *page, unsigned int order, bool init);
160+
static __always_inline void kasan_unpoison_pages(struct page *page,
161+
unsigned int order, bool init)
144162
{
145163
if (kasan_enabled())
146-
__kasan_free_pages(page, order, init);
164+
__kasan_unpoison_pages(page, order, init);
147165
}
148166

149167
void __kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
@@ -285,21 +303,15 @@ void kasan_restore_multi_shot(bool enabled);
285303

286304
#else /* CONFIG_KASAN */
287305

288-
static inline bool kasan_enabled(void)
289-
{
290-
return false;
291-
}
292-
static inline bool kasan_has_integrated_init(void)
293-
{
294-
return false;
295-
}
296306
static inline slab_flags_t kasan_never_merge(void)
297307
{
298308
return 0;
299309
}
300310
static inline void kasan_unpoison_range(const void *address, size_t size) {}
301-
static inline void kasan_alloc_pages(struct page *page, unsigned int order, bool init) {}
302-
static inline void kasan_free_pages(struct page *page, unsigned int order, bool init) {}
311+
static inline void kasan_poison_pages(struct page *page, unsigned int order,
312+
bool init) {}
313+
static inline void kasan_unpoison_pages(struct page *page, unsigned int order,
314+
bool init) {}
303315
static inline void kasan_cache_create(struct kmem_cache *cache,
304316
unsigned int *size,
305317
slab_flags_t *flags) {}

mm/kasan/common.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -97,7 +97,7 @@ slab_flags_t __kasan_never_merge(void)
9797
return 0;
9898
}
9999

100-
void __kasan_alloc_pages(struct page *page, unsigned int order, bool init)
100+
void __kasan_unpoison_pages(struct page *page, unsigned int order, bool init)
101101
{
102102
u8 tag;
103103
unsigned long i;
@@ -111,7 +111,7 @@ void __kasan_alloc_pages(struct page *page, unsigned int order, bool init)
111111
kasan_unpoison(page_address(page), PAGE_SIZE << order, init);
112112
}
113113

114-
void __kasan_free_pages(struct page *page, unsigned int order, bool init)
114+
void __kasan_poison_pages(struct page *page, unsigned int order, bool init)
115115
{
116116
if (likely(!PageHighMem(page)))
117117
kasan_poison(page_address(page), PAGE_SIZE << order,

mm/kasan/hw_tags.c

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -238,6 +238,28 @@ struct kasan_track *kasan_get_free_track(struct kmem_cache *cache,
238238
return &alloc_meta->free_track[0];
239239
}
240240

241+
void kasan_alloc_pages(struct page *page, unsigned int order, gfp_t flags)
242+
{
243+
/*
244+
* This condition should match the one in post_alloc_hook() in
245+
* page_alloc.c.
246+
*/
247+
bool init = !want_init_on_free() && want_init_on_alloc(flags);
248+
249+
kasan_unpoison_pages(page, order, init);
250+
}
251+
252+
void kasan_free_pages(struct page *page, unsigned int order)
253+
{
254+
/*
255+
* This condition should match the one in free_pages_prepare() in
256+
* page_alloc.c.
257+
*/
258+
bool init = want_init_on_free();
259+
260+
kasan_poison_pages(page, order, init);
261+
}
262+
241263
#if IS_ENABLED(CONFIG_KASAN_KUNIT_TEST)
242264

243265
void kasan_set_tagging_report_once(bool state)

mm/mempool.c

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -106,15 +106,17 @@ static __always_inline void kasan_poison_element(mempool_t *pool, void *element)
106106
if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
107107
kasan_slab_free_mempool(element);
108108
else if (pool->alloc == mempool_alloc_pages)
109-
kasan_free_pages(element, (unsigned long)pool->pool_data, false);
109+
kasan_poison_pages(element, (unsigned long)pool->pool_data,
110+
false);
110111
}
111112

112113
static void kasan_unpoison_element(mempool_t *pool, void *element)
113114
{
114115
if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
115116
kasan_unpoison_range(element, __ksize(element));
116117
else if (pool->alloc == mempool_alloc_pages)
117-
kasan_alloc_pages(element, (unsigned long)pool->pool_data, false);
118+
kasan_unpoison_pages(element, (unsigned long)pool->pool_data,
119+
false);
118120
}
119121

120122
static __always_inline void add_element(mempool_t *pool, void *element)

mm/page_alloc.c

Lines changed: 29 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -382,7 +382,7 @@ int page_group_by_mobility_disabled __read_mostly;
382382
static DEFINE_STATIC_KEY_TRUE(deferred_pages);
383383

384384
/*
385-
* Calling kasan_free_pages() only after deferred memory initialization
385+
* Calling kasan_poison_pages() only after deferred memory initialization
386386
* has completed. Poisoning pages during deferred memory init will greatly
387387
* lengthen the process and cause problem in large memory systems as the
388388
* deferred pages initialization is done with interrupt disabled.
@@ -394,15 +394,11 @@ static DEFINE_STATIC_KEY_TRUE(deferred_pages);
394394
* on-demand allocation and then freed again before the deferred pages
395395
* initialization is done, but this is not likely to happen.
396396
*/
397-
static inline void kasan_free_nondeferred_pages(struct page *page, int order,
398-
bool init, fpi_t fpi_flags)
397+
static inline bool should_skip_kasan_poison(fpi_t fpi_flags)
399398
{
400-
if (static_branch_unlikely(&deferred_pages))
401-
return;
402-
if (!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
403-
(fpi_flags & FPI_SKIP_KASAN_POISON))
404-
return;
405-
kasan_free_pages(page, order, init);
399+
return static_branch_unlikely(&deferred_pages) ||
400+
(!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
401+
(fpi_flags & FPI_SKIP_KASAN_POISON));
406402
}
407403

408404
/* Returns true if the struct page for the pfn is uninitialised */
@@ -453,13 +449,10 @@ defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
453449
return false;
454450
}
455451
#else
456-
static inline void kasan_free_nondeferred_pages(struct page *page, int order,
457-
bool init, fpi_t fpi_flags)
452+
static inline bool should_skip_kasan_poison(fpi_t fpi_flags)
458453
{
459-
if (!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
460-
(fpi_flags & FPI_SKIP_KASAN_POISON))
461-
return;
462-
kasan_free_pages(page, order, init);
454+
return (!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
455+
(fpi_flags & FPI_SKIP_KASAN_POISON));
463456
}
464457

465458
static inline bool early_page_uninitialised(unsigned long pfn)
@@ -1245,7 +1238,7 @@ static __always_inline bool free_pages_prepare(struct page *page,
12451238
unsigned int order, bool check_free, fpi_t fpi_flags)
12461239
{
12471240
int bad = 0;
1248-
bool init;
1241+
bool skip_kasan_poison = should_skip_kasan_poison(fpi_flags);
12491242

12501243
VM_BUG_ON_PAGE(PageTail(page), page);
12511244

@@ -1314,10 +1307,17 @@ static __always_inline bool free_pages_prepare(struct page *page,
13141307
* With hardware tag-based KASAN, memory tags must be set before the
13151308
* page becomes unavailable via debug_pagealloc or arch_free_page.
13161309
*/
1317-
init = want_init_on_free();
1318-
if (init && !kasan_has_integrated_init())
1319-
kernel_init_free_pages(page, 1 << order);
1320-
kasan_free_nondeferred_pages(page, order, init, fpi_flags);
1310+
if (kasan_has_integrated_init()) {
1311+
if (!skip_kasan_poison)
1312+
kasan_free_pages(page, order);
1313+
} else {
1314+
bool init = want_init_on_free();
1315+
1316+
if (init)
1317+
kernel_init_free_pages(page, 1 << order);
1318+
if (!skip_kasan_poison)
1319+
kasan_poison_pages(page, order, init);
1320+
}
13211321

13221322
/*
13231323
* arch_free_page() can make the page's contents inaccessible. s390
@@ -2324,8 +2324,6 @@ static bool check_new_pages(struct page *page, unsigned int order)
23242324
inline void post_alloc_hook(struct page *page, unsigned int order,
23252325
gfp_t gfp_flags)
23262326
{
2327-
bool init;
2328-
23292327
set_page_private(page, 0);
23302328
set_page_refcounted(page);
23312329

@@ -2344,10 +2342,15 @@ inline void post_alloc_hook(struct page *page, unsigned int order,
23442342
* kasan_alloc_pages and kernel_init_free_pages must be
23452343
* kept together to avoid discrepancies in behavior.
23462344
*/
2347-
init = !want_init_on_free() && want_init_on_alloc(gfp_flags);
2348-
kasan_alloc_pages(page, order, init);
2349-
if (init && !kasan_has_integrated_init())
2350-
kernel_init_free_pages(page, 1 << order);
2345+
if (kasan_has_integrated_init()) {
2346+
kasan_alloc_pages(page, order, gfp_flags);
2347+
} else {
2348+
bool init = !want_init_on_free() && want_init_on_alloc(gfp_flags);
2349+
2350+
kasan_unpoison_pages(page, order, init);
2351+
if (init)
2352+
kernel_init_free_pages(page, 1 << order);
2353+
}
23512354

23522355
set_page_owner(page, order, gfp_flags);
23532356
}

0 commit comments

Comments
 (0)