Skip to content

Commit 9ddb3c1

Browse files
Matthew Wilcox (Oracle)torvalds
authored andcommitted
mm: fix struct page layout on 32-bit systems
32-bit architectures which expect 8-byte alignment for 8-byte integers and need 64-bit DMA addresses (arm, mips, ppc) had their struct page inadvertently expanded in 2019. When the dma_addr_t was added, it forced the alignment of the union to 8 bytes, which inserted a 4 byte gap between 'flags' and the union. Fix this by storing the dma_addr_t in one or two adjacent unsigned longs. This restores the alignment to that of an unsigned long. We always store the low bits in the first word to prevent the PageTail bit from being inadvertently set on a big endian platform. If that happened, get_user_pages_fast() racing against a page which was freed and reallocated to the page_pool could dereference a bogus compound_head(), which would be hard to trace back to this cause. Link: https://lkml.kernel.org/r/20210510153211.1504886-1-willy@infradead.org Fixes: c25fff7 ("mm: add dma_addr_t to struct page") Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Acked-by: Ilias Apalodimas <ilias.apalodimas@linaro.org> Acked-by: Jesper Dangaard Brouer <brouer@redhat.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Tested-by: Matteo Croce <mcroce@linux.microsoft.com> Cc: <stable@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
1 parent 6286229 commit 9ddb3c1

3 files changed

Lines changed: 20 additions & 8 deletions

File tree

include/linux/mm_types.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -97,10 +97,10 @@ struct page {
9797
};
9898
struct { /* page_pool used by netstack */
9999
/**
100-
* @dma_addr: might require a 64-bit value even on
100+
* @dma_addr: might require a 64-bit value on
101101
* 32-bit architectures.
102102
*/
103-
dma_addr_t dma_addr;
103+
unsigned long dma_addr[2];
104104
};
105105
struct { /* slab, slob and slub */
106106
union {

include/net/page_pool.h

Lines changed: 11 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -198,7 +198,17 @@ static inline void page_pool_recycle_direct(struct page_pool *pool,
198198

199199
static inline dma_addr_t page_pool_get_dma_addr(struct page *page)
200200
{
201-
return page->dma_addr;
201+
dma_addr_t ret = page->dma_addr[0];
202+
if (sizeof(dma_addr_t) > sizeof(unsigned long))
203+
ret |= (dma_addr_t)page->dma_addr[1] << 16 << 16;
204+
return ret;
205+
}
206+
207+
static inline void page_pool_set_dma_addr(struct page *page, dma_addr_t addr)
208+
{
209+
page->dma_addr[0] = addr;
210+
if (sizeof(dma_addr_t) > sizeof(unsigned long))
211+
page->dma_addr[1] = upper_32_bits(addr);
202212
}
203213

204214
static inline bool is_page_pool_compiled_in(void)

net/core/page_pool.c

Lines changed: 7 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -174,8 +174,10 @@ static void page_pool_dma_sync_for_device(struct page_pool *pool,
174174
struct page *page,
175175
unsigned int dma_sync_size)
176176
{
177+
dma_addr_t dma_addr = page_pool_get_dma_addr(page);
178+
177179
dma_sync_size = min(dma_sync_size, pool->p.max_len);
178-
dma_sync_single_range_for_device(pool->p.dev, page->dma_addr,
180+
dma_sync_single_range_for_device(pool->p.dev, dma_addr,
179181
pool->p.offset, dma_sync_size,
180182
pool->p.dma_dir);
181183
}
@@ -195,7 +197,7 @@ static bool page_pool_dma_map(struct page_pool *pool, struct page *page)
195197
if (dma_mapping_error(pool->p.dev, dma))
196198
return false;
197199

198-
page->dma_addr = dma;
200+
page_pool_set_dma_addr(page, dma);
199201

200202
if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
201203
page_pool_dma_sync_for_device(pool, page, pool->p.max_len);
@@ -331,13 +333,13 @@ void page_pool_release_page(struct page_pool *pool, struct page *page)
331333
*/
332334
goto skip_dma_unmap;
333335

334-
dma = page->dma_addr;
336+
dma = page_pool_get_dma_addr(page);
335337

336-
/* When page is unmapped, it cannot be returned our pool */
338+
/* When page is unmapped, it cannot be returned to our pool */
337339
dma_unmap_page_attrs(pool->p.dev, dma,
338340
PAGE_SIZE << pool->p.order, pool->p.dma_dir,
339341
DMA_ATTR_SKIP_CPU_SYNC);
340-
page->dma_addr = 0;
342+
page_pool_set_dma_addr(page, 0);
341343
skip_dma_unmap:
342344
/* This may be the last page returned, releasing the pool, so
343345
* it is not safe to reference pool afterwards.

0 commit comments

Comments
 (0)