2626
2727#include <linux/scatterlist.h>
2828
29+ #include <drm/drm_pagemap.h>
2930#include <drm/ttm/ttm_placement.h>
3031#include <drm/ttm/ttm_range_manager.h>
3132#include <drm/ttm/ttm_resource.h>
3435#include "xe_bo.h"
3536#include "xe_device.h"
3637#include "xe_macros.h"
38+ #include "xe_svm.h"
3739#include "xe_ttm_vram_mgr.h"
3840
39- /* state back for walking over vram_mgr, stolen_mgr, and gtt_mgr allocations */
41+ /**
42+ * struct xe_res_cursor - state for walking over dma mapping, vram_mgr,
43+ * stolen_mgr, and gtt_mgr allocations
44+ */
4045struct xe_res_cursor {
46+ /** @start: Start of cursor */
4147 u64 start ;
48+ /** @size: Size of the current segment. */
4249 u64 size ;
50+ /** @remaining: Remaining bytes in cursor */
4351 u64 remaining ;
52+ /** @node: Opaque point current node cursor */
4453 void * node ;
54+ /** @mem_type: Memory type */
4555 u32 mem_type ;
56+ /** @sgl: Scatterlist for cursor */
4657 struct scatterlist * sgl ;
58+ /** @dma_addr: Current element in a struct drm_pagemap_device_addr array */
59+ const struct drm_pagemap_device_addr * dma_addr ;
60+ /** @mm: Buddy allocator for VRAM cursor */
4761 struct drm_buddy * mm ;
62+ /**
63+ * @dma_start: DMA start address for the current segment.
64+ * This may be different to @dma_addr.addr since elements in
65+ * the array may be coalesced to a single segment.
66+ */
67+ u64 dma_start ;
68+ /** @dma_seg_size: Size of the current DMA segment. */
69+ u64 dma_seg_size ;
4870};
4971
5072static struct drm_buddy * xe_res_get_buddy (struct ttm_resource * res )
@@ -70,6 +92,7 @@ static inline void xe_res_first(struct ttm_resource *res,
7092 struct xe_res_cursor * cur )
7193{
7294 cur -> sgl = NULL ;
95+ cur -> dma_addr = NULL ;
7396 if (!res )
7497 goto fallback ;
7598
@@ -141,6 +164,36 @@ static inline void __xe_res_sg_next(struct xe_res_cursor *cur)
141164 cur -> sgl = sgl ;
142165}
143166
167+ /**
168+ * __xe_res_dma_next() - Advance the cursor when end-of-segment is reached
169+ * @cur: The cursor
170+ */
171+ static inline void __xe_res_dma_next (struct xe_res_cursor * cur )
172+ {
173+ const struct drm_pagemap_device_addr * addr = cur -> dma_addr ;
174+ u64 start = cur -> start ;
175+
176+ while (start >= cur -> dma_seg_size ) {
177+ start -= cur -> dma_seg_size ;
178+ addr ++ ;
179+ cur -> dma_seg_size = PAGE_SIZE << addr -> order ;
180+ }
181+ cur -> dma_start = addr -> addr ;
182+
183+ /* Coalesce array_elements */
184+ while (cur -> dma_seg_size - start < cur -> remaining ) {
185+ if (cur -> dma_start + cur -> dma_seg_size != addr [1 ].addr ||
186+ addr -> proto != addr [1 ].proto )
187+ break ;
188+ addr ++ ;
189+ cur -> dma_seg_size += PAGE_SIZE << addr -> order ;
190+ }
191+
192+ cur -> dma_addr = addr ;
193+ cur -> start = start ;
194+ cur -> size = cur -> dma_seg_size - start ;
195+ }
196+
144197/**
145198 * xe_res_first_sg - initialize a xe_res_cursor with a scatter gather table
146199 *
@@ -160,11 +213,42 @@ static inline void xe_res_first_sg(const struct sg_table *sg,
160213 cur -> start = start ;
161214 cur -> remaining = size ;
162215 cur -> size = 0 ;
216+ cur -> dma_addr = NULL ;
163217 cur -> sgl = sg -> sgl ;
164218 cur -> mem_type = XE_PL_TT ;
165219 __xe_res_sg_next (cur );
166220}
167221
222+ /**
223+ * xe_res_first_dma - initialize a xe_res_cursor with dma_addr array
224+ *
225+ * @dma_addr: struct drm_pagemap_device_addr array to walk
226+ * @start: Start of the range
227+ * @size: Size of the range
228+ * @cur: cursor object to initialize
229+ *
230+ * Start walking over the range of allocations between @start and @size.
231+ */
232+ static inline void xe_res_first_dma (const struct drm_pagemap_device_addr * dma_addr ,
233+ u64 start , u64 size ,
234+ struct xe_res_cursor * cur )
235+ {
236+ XE_WARN_ON (!dma_addr );
237+ XE_WARN_ON (!IS_ALIGNED (start , PAGE_SIZE ) ||
238+ !IS_ALIGNED (size , PAGE_SIZE ));
239+
240+ cur -> node = NULL ;
241+ cur -> start = start ;
242+ cur -> remaining = size ;
243+ cur -> dma_seg_size = PAGE_SIZE << dma_addr -> order ;
244+ cur -> dma_start = 0 ;
245+ cur -> size = 0 ;
246+ cur -> dma_addr = dma_addr ;
247+ __xe_res_dma_next (cur );
248+ cur -> sgl = NULL ;
249+ cur -> mem_type = XE_PL_TT ;
250+ }
251+
168252/**
169253 * xe_res_next - advance the cursor
170254 *
@@ -191,6 +275,12 @@ static inline void xe_res_next(struct xe_res_cursor *cur, u64 size)
191275 return ;
192276 }
193277
278+ if (cur -> dma_addr ) {
279+ cur -> start += size ;
280+ __xe_res_dma_next (cur );
281+ return ;
282+ }
283+
194284 if (cur -> sgl ) {
195285 cur -> start += size ;
196286 __xe_res_sg_next (cur );
@@ -232,6 +322,35 @@ static inline void xe_res_next(struct xe_res_cursor *cur, u64 size)
232322 */
233323static inline u64 xe_res_dma (const struct xe_res_cursor * cur )
234324{
235- return cur -> sgl ? sg_dma_address (cur -> sgl ) + cur -> start : cur -> start ;
325+ if (cur -> dma_addr )
326+ return cur -> dma_start + cur -> start ;
327+ else if (cur -> sgl )
328+ return sg_dma_address (cur -> sgl ) + cur -> start ;
329+ else
330+ return cur -> start ;
331+ }
332+
333+ /**
334+ * xe_res_is_vram() - Whether the cursor current dma address points to
335+ * same-device VRAM
336+ * @cur: The cursor.
337+ *
338+ * Return: true iff the address returned by xe_res_dma() points to internal vram.
339+ */
340+ static inline bool xe_res_is_vram (const struct xe_res_cursor * cur )
341+ {
342+ if (cur -> dma_addr )
343+ return cur -> dma_addr -> proto == XE_INTERCONNECT_VRAM ;
344+
345+ switch (cur -> mem_type ) {
346+ case XE_PL_STOLEN :
347+ case XE_PL_VRAM0 :
348+ case XE_PL_VRAM1 :
349+ return true;
350+ default :
351+ break ;
352+ }
353+
354+ return false;
236355}
237356#endif
0 commit comments