@@ -329,7 +329,7 @@ nouveau_uvma_region_create(struct nouveau_uvmm *uvmm,
329329 struct nouveau_uvma_region * reg ;
330330 int ret ;
331331
332- if (!drm_gpuvm_interval_empty (& uvmm -> umgr , addr , range ))
332+ if (!drm_gpuvm_interval_empty (& uvmm -> base , addr , range ))
333333 return - ENOSPC ;
334334
335335 ret = nouveau_uvma_region_alloc (& reg );
@@ -384,7 +384,7 @@ nouveau_uvma_region_empty(struct nouveau_uvma_region *reg)
384384{
385385 struct nouveau_uvmm * uvmm = reg -> uvmm ;
386386
387- return drm_gpuvm_interval_empty (& uvmm -> umgr ,
387+ return drm_gpuvm_interval_empty (& uvmm -> base ,
388388 reg -> va .addr ,
389389 reg -> va .range );
390390}
@@ -589,7 +589,7 @@ op_map_prepare(struct nouveau_uvmm *uvmm,
589589 uvma -> region = args -> region ;
590590 uvma -> kind = args -> kind ;
591591
592- drm_gpuva_map (& uvmm -> umgr , & uvma -> va , op );
592+ drm_gpuva_map (& uvmm -> base , & uvma -> va , op );
593593
594594 /* Keep a reference until this uvma is destroyed. */
595595 nouveau_uvma_gem_get (uvma );
@@ -1194,7 +1194,7 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job)
11941194 goto unwind_continue ;
11951195 }
11961196
1197- op -> ops = drm_gpuvm_sm_unmap_ops_create (& uvmm -> umgr ,
1197+ op -> ops = drm_gpuvm_sm_unmap_ops_create (& uvmm -> base ,
11981198 op -> va .addr ,
11991199 op -> va .range );
12001200 if (IS_ERR (op -> ops )) {
@@ -1205,7 +1205,7 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job)
12051205 ret = nouveau_uvmm_sm_unmap_prepare (uvmm , & op -> new ,
12061206 op -> ops );
12071207 if (ret ) {
1208- drm_gpuva_ops_free (& uvmm -> umgr , op -> ops );
1208+ drm_gpuva_ops_free (& uvmm -> base , op -> ops );
12091209 op -> ops = NULL ;
12101210 op -> reg = NULL ;
12111211 goto unwind_continue ;
@@ -1240,7 +1240,7 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job)
12401240 }
12411241 }
12421242
1243- op -> ops = drm_gpuvm_sm_map_ops_create (& uvmm -> umgr ,
1243+ op -> ops = drm_gpuvm_sm_map_ops_create (& uvmm -> base ,
12441244 op -> va .addr ,
12451245 op -> va .range ,
12461246 op -> gem .obj ,
@@ -1256,15 +1256,15 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job)
12561256 op -> va .range ,
12571257 op -> flags & 0xff );
12581258 if (ret ) {
1259- drm_gpuva_ops_free (& uvmm -> umgr , op -> ops );
1259+ drm_gpuva_ops_free (& uvmm -> base , op -> ops );
12601260 op -> ops = NULL ;
12611261 goto unwind_continue ;
12621262 }
12631263
12641264 break ;
12651265 }
12661266 case OP_UNMAP :
1267- op -> ops = drm_gpuvm_sm_unmap_ops_create (& uvmm -> umgr ,
1267+ op -> ops = drm_gpuvm_sm_unmap_ops_create (& uvmm -> base ,
12681268 op -> va .addr ,
12691269 op -> va .range );
12701270 if (IS_ERR (op -> ops )) {
@@ -1275,7 +1275,7 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job)
12751275 ret = nouveau_uvmm_sm_unmap_prepare (uvmm , & op -> new ,
12761276 op -> ops );
12771277 if (ret ) {
1278- drm_gpuva_ops_free (& uvmm -> umgr , op -> ops );
1278+ drm_gpuva_ops_free (& uvmm -> base , op -> ops );
12791279 op -> ops = NULL ;
12801280 goto unwind_continue ;
12811281 }
@@ -1404,7 +1404,7 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job)
14041404 break ;
14051405 }
14061406
1407- drm_gpuva_ops_free (& uvmm -> umgr , op -> ops );
1407+ drm_gpuva_ops_free (& uvmm -> base , op -> ops );
14081408 op -> ops = NULL ;
14091409 op -> reg = NULL ;
14101410 }
@@ -1509,7 +1509,7 @@ nouveau_uvmm_bind_job_free_work_fn(struct work_struct *work)
15091509 }
15101510
15111511 if (!IS_ERR_OR_NULL (op -> ops ))
1512- drm_gpuva_ops_free (& uvmm -> umgr , op -> ops );
1512+ drm_gpuva_ops_free (& uvmm -> base , op -> ops );
15131513
15141514 if (obj )
15151515 drm_gem_object_put (obj );
@@ -1836,7 +1836,7 @@ nouveau_uvmm_init(struct nouveau_uvmm *uvmm, struct nouveau_cli *cli,
18361836 uvmm -> kernel_managed_addr = kernel_managed_addr ;
18371837 uvmm -> kernel_managed_size = kernel_managed_size ;
18381838
1839- drm_gpuvm_init (& uvmm -> umgr , cli -> name ,
1839+ drm_gpuvm_init (& uvmm -> base , cli -> name ,
18401840 NOUVEAU_VA_SPACE_START ,
18411841 NOUVEAU_VA_SPACE_END ,
18421842 kernel_managed_addr , kernel_managed_size ,
@@ -1855,7 +1855,7 @@ nouveau_uvmm_init(struct nouveau_uvmm *uvmm, struct nouveau_cli *cli,
18551855 return 0 ;
18561856
18571857out_free_gpuva_mgr :
1858- drm_gpuvm_destroy (& uvmm -> umgr );
1858+ drm_gpuvm_destroy (& uvmm -> base );
18591859out_unlock :
18601860 mutex_unlock (& cli -> mutex );
18611861 return ret ;
@@ -1877,11 +1877,11 @@ nouveau_uvmm_fini(struct nouveau_uvmm *uvmm)
18771877 wait_event (entity -> job .wq , list_empty (& entity -> job .list .head ));
18781878
18791879 nouveau_uvmm_lock (uvmm );
1880- drm_gpuvm_for_each_va_safe (va , next , & uvmm -> umgr ) {
1880+ drm_gpuvm_for_each_va_safe (va , next , & uvmm -> base ) {
18811881 struct nouveau_uvma * uvma = uvma_from_va (va );
18821882 struct drm_gem_object * obj = va -> gem .obj ;
18831883
1884- if (unlikely (va == & uvmm -> umgr .kernel_alloc_node ))
1884+ if (unlikely (va == & uvmm -> base .kernel_alloc_node ))
18851885 continue ;
18861886
18871887 drm_gpuva_remove (va );
@@ -1910,7 +1910,7 @@ nouveau_uvmm_fini(struct nouveau_uvmm *uvmm)
19101910
19111911 mutex_lock (& cli -> mutex );
19121912 nouveau_vmm_fini (& uvmm -> vmm );
1913- drm_gpuvm_destroy (& uvmm -> umgr );
1913+ drm_gpuvm_destroy (& uvmm -> base );
19141914 mutex_unlock (& cli -> mutex );
19151915
19161916 dma_resv_fini (& uvmm -> resv );
0 commit comments