@@ -1527,7 +1527,7 @@ static bool xe_ttm_bo_lock_in_destructor(struct ttm_buffer_object *ttm_bo)
15271527 * always succeed here, as long as we hold the lru lock.
15281528 */
15291529 spin_lock (& ttm_bo -> bdev -> lru_lock );
1530- locked = dma_resv_trylock (ttm_bo -> base .resv );
1530+ locked = dma_resv_trylock (& ttm_bo -> base ._resv );
15311531 spin_unlock (& ttm_bo -> bdev -> lru_lock );
15321532 xe_assert (xe , locked );
15331533
@@ -1547,13 +1547,6 @@ static void xe_ttm_bo_release_notify(struct ttm_buffer_object *ttm_bo)
15471547 bo = ttm_to_xe_bo (ttm_bo );
15481548 xe_assert (xe_bo_device (bo ), !(bo -> created && kref_read (& ttm_bo -> base .refcount )));
15491549
1550- /*
1551- * Corner case where TTM fails to allocate memory and this BOs resv
1552- * still points the VMs resv
1553- */
1554- if (ttm_bo -> base .resv != & ttm_bo -> base ._resv )
1555- return ;
1556-
15571550 if (!xe_ttm_bo_lock_in_destructor (ttm_bo ))
15581551 return ;
15591552
@@ -1563,22 +1556,22 @@ static void xe_ttm_bo_release_notify(struct ttm_buffer_object *ttm_bo)
15631556 * TODO: Don't do this for external bos once we scrub them after
15641557 * unbind.
15651558 */
1566- dma_resv_for_each_fence (& cursor , ttm_bo -> base .resv ,
1559+ dma_resv_for_each_fence (& cursor , & ttm_bo -> base ._resv ,
15671560 DMA_RESV_USAGE_BOOKKEEP , fence ) {
15681561 if (xe_fence_is_xe_preempt (fence ) &&
15691562 !dma_fence_is_signaled (fence )) {
15701563 if (!replacement )
15711564 replacement = dma_fence_get_stub ();
15721565
1573- dma_resv_replace_fences (ttm_bo -> base .resv ,
1566+ dma_resv_replace_fences (& ttm_bo -> base ._resv ,
15741567 fence -> context ,
15751568 replacement ,
15761569 DMA_RESV_USAGE_BOOKKEEP );
15771570 }
15781571 }
15791572 dma_fence_put (replacement );
15801573
1581- dma_resv_unlock (ttm_bo -> base .resv );
1574+ dma_resv_unlock (& ttm_bo -> base ._resv );
15821575}
15831576
15841577static void xe_ttm_bo_delete_mem_notify (struct ttm_buffer_object * ttm_bo )
0 commit comments