|
25 | 25 | #include <linux/pagemap.h> |
26 | 26 | #include <linux/sched/mm.h> |
27 | 27 | #include <linux/sched/task.h> |
| 28 | +#include <linux/fdtable.h> |
28 | 29 | #include <drm/ttm/ttm_tt.h> |
29 | 30 |
|
30 | 31 | #include <drm/drm_exec.h> |
@@ -806,13 +807,22 @@ kfd_mem_dmaunmap_attachment(struct kgd_mem *mem, |
806 | 807 | static int kfd_mem_export_dmabuf(struct kgd_mem *mem) |
807 | 808 | { |
808 | 809 | if (!mem->dmabuf) { |
809 | | - struct dma_buf *ret = amdgpu_gem_prime_export( |
810 | | - &mem->bo->tbo.base, |
| 810 | + struct amdgpu_device *bo_adev; |
| 811 | + struct dma_buf *dmabuf; |
| 812 | + int r, fd; |
| 813 | + |
| 814 | + bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev); |
| 815 | + r = drm_gem_prime_handle_to_fd(&bo_adev->ddev, bo_adev->kfd.client.file, |
| 816 | + mem->gem_handle, |
811 | 817 | mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ? |
812 | | - DRM_RDWR : 0); |
813 | | - if (IS_ERR(ret)) |
814 | | - return PTR_ERR(ret); |
815 | | - mem->dmabuf = ret; |
| 818 | + DRM_RDWR : 0, &fd); |
| 819 | + if (r) |
| 820 | + return r; |
| 821 | + dmabuf = dma_buf_get(fd); |
| 822 | + close_fd(fd); |
| 823 | + if (WARN_ON_ONCE(IS_ERR(dmabuf))) |
| 824 | + return PTR_ERR(dmabuf); |
| 825 | + mem->dmabuf = dmabuf; |
816 | 826 | } |
817 | 827 |
|
818 | 828 | return 0; |
@@ -1778,6 +1788,9 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( |
1778 | 1788 | pr_debug("Failed to allow vma node access. ret %d\n", ret); |
1779 | 1789 | goto err_node_allow; |
1780 | 1790 | } |
| 1791 | + ret = drm_gem_handle_create(adev->kfd.client.file, gobj, &(*mem)->gem_handle); |
| 1792 | + if (ret) |
| 1793 | + goto err_gem_handle_create; |
1781 | 1794 | bo = gem_to_amdgpu_bo(gobj); |
1782 | 1795 | if (bo_type == ttm_bo_type_sg) { |
1783 | 1796 | bo->tbo.sg = sg; |
@@ -1829,6 +1842,8 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( |
1829 | 1842 | err_pin_bo: |
1830 | 1843 | err_validate_bo: |
1831 | 1844 | remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info); |
| 1845 | + drm_gem_handle_delete(adev->kfd.client.file, (*mem)->gem_handle); |
| 1846 | +err_gem_handle_create: |
1832 | 1847 | drm_vma_node_revoke(&gobj->vma_node, drm_priv); |
1833 | 1848 | err_node_allow: |
1834 | 1849 | /* Don't unreserve system mem limit twice */ |
@@ -1941,8 +1956,11 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( |
1941 | 1956 |
|
1942 | 1957 | /* Free the BO*/ |
1943 | 1958 | drm_vma_node_revoke(&mem->bo->tbo.base.vma_node, drm_priv); |
1944 | | - if (mem->dmabuf) |
| 1959 | + drm_gem_handle_delete(adev->kfd.client.file, mem->gem_handle); |
| 1960 | + if (mem->dmabuf) { |
1945 | 1961 | dma_buf_put(mem->dmabuf); |
| 1962 | + mem->dmabuf = NULL; |
| 1963 | + } |
1946 | 1964 | mutex_destroy(&mem->lock); |
1947 | 1965 |
|
1948 | 1966 | /* If this releases the last reference, it will end up calling |
@@ -2294,34 +2312,26 @@ int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct amdgpu_device *adev, |
2294 | 2312 | return 0; |
2295 | 2313 | } |
2296 | 2314 |
|
2297 | | -int amdgpu_amdkfd_gpuvm_import_dmabuf(struct amdgpu_device *adev, |
2298 | | - struct dma_buf *dma_buf, |
2299 | | - uint64_t va, void *drm_priv, |
2300 | | - struct kgd_mem **mem, uint64_t *size, |
2301 | | - uint64_t *mmap_offset) |
| 2315 | +static int import_obj_create(struct amdgpu_device *adev, |
| 2316 | + struct dma_buf *dma_buf, |
| 2317 | + struct drm_gem_object *obj, |
| 2318 | + uint64_t va, void *drm_priv, |
| 2319 | + struct kgd_mem **mem, uint64_t *size, |
| 2320 | + uint64_t *mmap_offset) |
2302 | 2321 | { |
2303 | 2322 | struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv); |
2304 | | - struct drm_gem_object *obj; |
2305 | 2323 | struct amdgpu_bo *bo; |
2306 | 2324 | int ret; |
2307 | 2325 |
|
2308 | | - obj = amdgpu_gem_prime_import(adev_to_drm(adev), dma_buf); |
2309 | | - if (IS_ERR(obj)) |
2310 | | - return PTR_ERR(obj); |
2311 | | - |
2312 | 2326 | bo = gem_to_amdgpu_bo(obj); |
2313 | 2327 | if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM | |
2314 | | - AMDGPU_GEM_DOMAIN_GTT))) { |
| 2328 | + AMDGPU_GEM_DOMAIN_GTT))) |
2315 | 2329 | /* Only VRAM and GTT BOs are supported */ |
2316 | | - ret = -EINVAL; |
2317 | | - goto err_put_obj; |
2318 | | - } |
| 2330 | + return -EINVAL; |
2319 | 2331 |
|
2320 | 2332 | *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL); |
2321 | | - if (!*mem) { |
2322 | | - ret = -ENOMEM; |
2323 | | - goto err_put_obj; |
2324 | | - } |
| 2333 | + if (!*mem) |
| 2334 | + return -ENOMEM; |
2325 | 2335 |
|
2326 | 2336 | ret = drm_vma_node_allow(&obj->vma_node, drm_priv); |
2327 | 2337 | if (ret) |
@@ -2371,8 +2381,41 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct amdgpu_device *adev, |
2371 | 2381 | drm_vma_node_revoke(&obj->vma_node, drm_priv); |
2372 | 2382 | err_free_mem: |
2373 | 2383 | kfree(*mem); |
| 2384 | + return ret; |
| 2385 | +} |
| 2386 | + |
| 2387 | +int amdgpu_amdkfd_gpuvm_import_dmabuf_fd(struct amdgpu_device *adev, int fd, |
| 2388 | + uint64_t va, void *drm_priv, |
| 2389 | + struct kgd_mem **mem, uint64_t *size, |
| 2390 | + uint64_t *mmap_offset) |
| 2391 | +{ |
| 2392 | + struct drm_gem_object *obj; |
| 2393 | + uint32_t handle; |
| 2394 | + int ret; |
| 2395 | + |
| 2396 | + ret = drm_gem_prime_fd_to_handle(&adev->ddev, adev->kfd.client.file, fd, |
| 2397 | + &handle); |
| 2398 | + if (ret) |
| 2399 | + return ret; |
| 2400 | + obj = drm_gem_object_lookup(adev->kfd.client.file, handle); |
| 2401 | + if (!obj) { |
| 2402 | + ret = -EINVAL; |
| 2403 | + goto err_release_handle; |
| 2404 | + } |
| 2405 | + |
| 2406 | + ret = import_obj_create(adev, obj->dma_buf, obj, va, drm_priv, mem, size, |
| 2407 | + mmap_offset); |
| 2408 | + if (ret) |
| 2409 | + goto err_put_obj; |
| 2410 | + |
| 2411 | + (*mem)->gem_handle = handle; |
| 2412 | + |
| 2413 | + return 0; |
| 2414 | + |
2374 | 2415 | err_put_obj: |
2375 | 2416 | drm_gem_object_put(obj); |
| 2417 | +err_release_handle: |
| 2418 | + drm_gem_handle_delete(adev->kfd.client.file, handle); |
2376 | 2419 | return ret; |
2377 | 2420 | } |
2378 | 2421 |
|
|
0 commit comments