@@ -256,9 +256,11 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs,
256256 * Statically reserve bounce buffer space and initialize bounce buffer data
257257 * structures for the software IO TLB used to implement the DMA API.
258258 */
259- void __init swiotlb_init (bool addressing_limit , unsigned int flags )
259+ void __init swiotlb_init_remap (bool addressing_limit , unsigned int flags ,
260+ int (* remap )(void * tlb , unsigned long nslabs ))
260261{
261- size_t bytes = PAGE_ALIGN (default_nslabs << IO_TLB_SHIFT );
262+ unsigned long nslabs = default_nslabs ;
263+ size_t bytes ;
262264 void * tlb ;
263265
264266 if (!addressing_limit && !swiotlb_force_bounce )
@@ -271,12 +273,23 @@ void __init swiotlb_init(bool addressing_limit, unsigned int flags)
271273 * allow to pick a location everywhere for hypervisors with guest
272274 * memory encryption.
273275 */
276+ retry :
277+ bytes = PAGE_ALIGN (default_nslabs << IO_TLB_SHIFT );
274278 if (flags & SWIOTLB_ANY )
275279 tlb = memblock_alloc (bytes , PAGE_SIZE );
276280 else
277281 tlb = memblock_alloc_low (bytes , PAGE_SIZE );
278282 if (!tlb )
279283 goto fail ;
284+ if (remap && remap (tlb , nslabs ) < 0 ) {
285+ memblock_free (tlb , PAGE_ALIGN (bytes ));
286+
287+ nslabs = ALIGN (nslabs >> 1 , IO_TLB_SEGSIZE );
288+ if (nslabs < IO_TLB_MIN_SLABS )
289+ panic ("%s: Failed to remap %zu bytes\n" ,
290+ __func__ , bytes );
291+ goto retry ;
292+ }
280293 if (swiotlb_init_with_tbl (tlb , default_nslabs , flags ))
281294 goto fail_free_mem ;
282295 return ;
@@ -287,12 +300,18 @@ void __init swiotlb_init(bool addressing_limit, unsigned int flags)
287300 pr_warn ("Cannot allocate buffer" );
288301}
289302
303+ void __init swiotlb_init (bool addressing_limit , unsigned int flags )
304+ {
305+ return swiotlb_init_remap (addressing_limit , flags , NULL );
306+ }
307+
290308/*
291309 * Systems with larger DMA zones (those that don't support ISA) can
292310 * initialize the swiotlb later using the slab allocator if needed.
293311 * This should be just like above, but with some error catching.
294312 */
295- int swiotlb_init_late (size_t size , gfp_t gfp_mask )
313+ int swiotlb_init_late (size_t size , gfp_t gfp_mask ,
314+ int (* remap )(void * tlb , unsigned long nslabs ))
296315{
297316 unsigned long nslabs = ALIGN (size >> IO_TLB_SHIFT , IO_TLB_SEGSIZE );
298317 unsigned long bytes ;
@@ -303,6 +322,7 @@ int swiotlb_init_late(size_t size, gfp_t gfp_mask)
303322 if (swiotlb_force_disable )
304323 return 0 ;
305324
325+ retry :
306326 order = get_order (nslabs << IO_TLB_SHIFT );
307327 nslabs = SLABS_PER_PAGE << order ;
308328 bytes = nslabs << IO_TLB_SHIFT ;
@@ -323,6 +343,16 @@ int swiotlb_init_late(size_t size, gfp_t gfp_mask)
323343 (PAGE_SIZE << order ) >> 20 );
324344 nslabs = SLABS_PER_PAGE << order ;
325345 }
346+ if (remap )
347+ rc = remap (vstart , nslabs );
348+ if (rc ) {
349+ free_pages ((unsigned long )vstart , order );
350+
351+ nslabs = ALIGN (nslabs >> 1 , IO_TLB_SEGSIZE );
352+ if (nslabs < IO_TLB_MIN_SLABS )
353+ return rc ;
354+ goto retry ;
355+ }
326356 rc = swiotlb_late_init_with_tbl (vstart , nslabs );
327357 if (rc )
328358 free_pages ((unsigned long )vstart , order );
0 commit comments