Skip to content

Commit 379989e

Browse files
author
Thomas Hellström
committed
drm/ttm/pool: Fix ttm_pool_alloc error path
When hitting an error, the error path forgot to unmap dma mappings and could call set_pages_wb() on already uncached pages. Fix this by introducing a common ttm_pool_free_range() function that does the right thing. v2: - Simplify that common function (Christian König) v3: - Rename that common function to ttm_pool_free_range() (Christian König) Fixes: d099fc8 ("drm/ttm: new TT backend allocation pool v3") Cc: Christian König <christian.koenig@amd.com> Cc: Dave Airlie <airlied@redhat.com> Cc: Christian Koenig <christian.koenig@amd.com> Cc: Huang Rui <ray.huang@amd.com> Cc: dri-devel@lists.freedesktop.org Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com> Reviewed-by: Christian König <christian.koenig@amd.com> Link: https://patchwork.freedesktop.org/patch/msgid/20230404200650.11043-2-thomas.hellstrom@linux.intel.com
1 parent 864b438 commit 379989e

1 file changed

Lines changed: 51 additions & 30 deletions

File tree

drivers/gpu/drm/ttm/ttm_pool.c

Lines changed: 51 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -367,6 +367,43 @@ static int ttm_pool_page_allocated(struct ttm_pool *pool, unsigned int order,
367367
return 0;
368368
}
369369

370+
/**
371+
* ttm_pool_free_range() - Free a range of TTM pages
372+
* @pool: The pool used for allocating.
373+
* @tt: The struct ttm_tt holding the page pointers.
374+
* @caching: The page caching mode used by the range.
375+
* @start_page: index for first page to free.
376+
* @end_page: index for last page to free + 1.
377+
*
378+
* During allocation the ttm_tt page-vector may be populated with ranges of
379+
* pages with different attributes if allocation hit an error without being
380+
* able to completely fulfill the allocation. This function can be used
381+
* to free these individual ranges.
382+
*/
383+
static void ttm_pool_free_range(struct ttm_pool *pool, struct ttm_tt *tt,
384+
enum ttm_caching caching,
385+
pgoff_t start_page, pgoff_t end_page)
386+
{
387+
struct page **pages = tt->pages;
388+
unsigned int order;
389+
pgoff_t i, nr;
390+
391+
for (i = start_page; i < end_page; i += nr, pages += nr) {
392+
struct ttm_pool_type *pt = NULL;
393+
394+
order = ttm_pool_page_order(pool, *pages);
395+
nr = (1UL << order);
396+
if (tt->dma_address)
397+
ttm_pool_unmap(pool, tt->dma_address[i], nr);
398+
399+
pt = ttm_pool_select_type(pool, caching, order);
400+
if (pt)
401+
ttm_pool_type_give(pt, *pages);
402+
else
403+
ttm_pool_free_page(pool, caching, order, *pages);
404+
}
405+
}
406+
370407
/**
371408
* ttm_pool_alloc - Fill a ttm_tt object
372409
*
@@ -382,12 +419,14 @@ static int ttm_pool_page_allocated(struct ttm_pool *pool, unsigned int order,
382419
int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
383420
struct ttm_operation_ctx *ctx)
384421
{
385-
unsigned long num_pages = tt->num_pages;
422+
pgoff_t num_pages = tt->num_pages;
386423
dma_addr_t *dma_addr = tt->dma_address;
387424
struct page **caching = tt->pages;
388425
struct page **pages = tt->pages;
426+
enum ttm_caching page_caching;
389427
gfp_t gfp_flags = GFP_USER;
390-
unsigned int i, order;
428+
pgoff_t caching_divide;
429+
unsigned int order;
391430
struct page *p;
392431
int r;
393432

@@ -410,6 +449,7 @@ int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
410449
order = min_t(unsigned int, order, __fls(num_pages))) {
411450
struct ttm_pool_type *pt;
412451

452+
page_caching = tt->caching;
413453
pt = ttm_pool_select_type(pool, tt->caching, order);
414454
p = pt ? ttm_pool_type_take(pt) : NULL;
415455
if (p) {
@@ -418,6 +458,7 @@ int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
418458
if (r)
419459
goto error_free_page;
420460

461+
caching = pages;
421462
do {
422463
r = ttm_pool_page_allocated(pool, order, p,
423464
&dma_addr,
@@ -426,14 +467,15 @@ int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
426467
if (r)
427468
goto error_free_page;
428469

470+
caching = pages;
429471
if (num_pages < (1 << order))
430472
break;
431473

432474
p = ttm_pool_type_take(pt);
433475
} while (p);
434-
caching = pages;
435476
}
436477

478+
page_caching = ttm_cached;
437479
while (num_pages >= (1 << order) &&
438480
(p = ttm_pool_alloc_page(pool, gfp_flags, order))) {
439481

@@ -442,6 +484,7 @@ int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
442484
tt->caching);
443485
if (r)
444486
goto error_free_page;
487+
caching = pages;
445488
}
446489
r = ttm_pool_page_allocated(pool, order, p, &dma_addr,
447490
&num_pages, &pages);
@@ -468,15 +511,13 @@ int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
468511
return 0;
469512

470513
error_free_page:
471-
ttm_pool_free_page(pool, tt->caching, order, p);
514+
ttm_pool_free_page(pool, page_caching, order, p);
472515

473516
error_free_all:
474517
num_pages = tt->num_pages - num_pages;
475-
for (i = 0; i < num_pages; ) {
476-
order = ttm_pool_page_order(pool, tt->pages[i]);
477-
ttm_pool_free_page(pool, tt->caching, order, tt->pages[i]);
478-
i += 1 << order;
479-
}
518+
caching_divide = caching - tt->pages;
519+
ttm_pool_free_range(pool, tt, tt->caching, 0, caching_divide);
520+
ttm_pool_free_range(pool, tt, ttm_cached, caching_divide, num_pages);
480521

481522
return r;
482523
}
@@ -492,27 +533,7 @@ EXPORT_SYMBOL(ttm_pool_alloc);
492533
*/
493534
void ttm_pool_free(struct ttm_pool *pool, struct ttm_tt *tt)
494535
{
495-
unsigned int i;
496-
497-
for (i = 0; i < tt->num_pages; ) {
498-
struct page *p = tt->pages[i];
499-
unsigned int order, num_pages;
500-
struct ttm_pool_type *pt;
501-
502-
order = ttm_pool_page_order(pool, p);
503-
num_pages = 1ULL << order;
504-
if (tt->dma_address)
505-
ttm_pool_unmap(pool, tt->dma_address[i], num_pages);
506-
507-
pt = ttm_pool_select_type(pool, tt->caching, order);
508-
if (pt)
509-
ttm_pool_type_give(pt, tt->pages[i]);
510-
else
511-
ttm_pool_free_page(pool, tt->caching, order,
512-
tt->pages[i]);
513-
514-
i += num_pages;
515-
}
536+
ttm_pool_free_range(pool, tt, tt->caching, 0, tt->num_pages);
516537

517538
while (atomic_long_read(&allocated_pages) > page_pool_size)
518539
ttm_pool_shrink();

0 commit comments

Comments
 (0)