@@ -3974,16 +3974,13 @@ int bnxt_re_dealloc_mw(struct ib_mw *ib_mw)
39743974 return rc ;
39753975}
39763976
3977- /* uverbs */
3978- struct ib_mr * bnxt_re_reg_user_mr (struct ib_pd * ib_pd , u64 start , u64 length ,
3979- u64 virt_addr , int mr_access_flags ,
3980- struct ib_udata * udata )
3977+ static struct ib_mr * __bnxt_re_user_reg_mr (struct ib_pd * ib_pd , u64 length , u64 virt_addr ,
3978+ int mr_access_flags , struct ib_umem * umem )
39813979{
39823980 struct bnxt_re_pd * pd = container_of (ib_pd , struct bnxt_re_pd , ib_pd );
39833981 struct bnxt_re_dev * rdev = pd -> rdev ;
3984- struct bnxt_re_mr * mr ;
3985- struct ib_umem * umem ;
39863982 unsigned long page_size ;
3983+ struct bnxt_re_mr * mr ;
39873984 int umem_pgs , rc ;
39883985 u32 active_mrs ;
39893986
@@ -3993,6 +3990,12 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
39933990 return ERR_PTR (- ENOMEM );
39943991 }
39953992
3993+ page_size = ib_umem_find_best_pgsz (umem , BNXT_RE_PAGE_SIZE_SUPPORTED , virt_addr );
3994+ if (!page_size ) {
3995+ ibdev_err (& rdev -> ibdev , "umem page size unsupported!" );
3996+ return ERR_PTR (- EINVAL );
3997+ }
3998+
39963999 mr = kzalloc (sizeof (* mr ), GFP_KERNEL );
39974000 if (!mr )
39984001 return ERR_PTR (- ENOMEM );
@@ -4004,36 +4007,23 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
40044007
40054008 rc = bnxt_qplib_alloc_mrw (& rdev -> qplib_res , & mr -> qplib_mr );
40064009 if (rc ) {
4007- ibdev_err (& rdev -> ibdev , "Failed to allocate MR" );
4010+ ibdev_err (& rdev -> ibdev , "Failed to allocate MR rc = %d" , rc );
4011+ rc = - EIO ;
40084012 goto free_mr ;
40094013 }
40104014 /* The fixed portion of the rkey is the same as the lkey */
40114015 mr -> ib_mr .rkey = mr -> qplib_mr .rkey ;
4012-
4013- umem = ib_umem_get (& rdev -> ibdev , start , length , mr_access_flags );
4014- if (IS_ERR (umem )) {
4015- ibdev_err (& rdev -> ibdev , "Failed to get umem" );
4016- rc = - EFAULT ;
4017- goto free_mrw ;
4018- }
40194016 mr -> ib_umem = umem ;
4020-
40214017 mr -> qplib_mr .va = virt_addr ;
4022- page_size = ib_umem_find_best_pgsz (
4023- umem , BNXT_RE_PAGE_SIZE_SUPPORTED , virt_addr );
4024- if (!page_size ) {
4025- ibdev_err (& rdev -> ibdev , "umem page size unsupported!" );
4026- rc = - EFAULT ;
4027- goto free_umem ;
4028- }
40294018 mr -> qplib_mr .total_size = length ;
40304019
40314020 umem_pgs = ib_umem_num_dma_blocks (umem , page_size );
40324021 rc = bnxt_qplib_reg_mr (& rdev -> qplib_res , & mr -> qplib_mr , umem ,
40334022 umem_pgs , page_size );
40344023 if (rc ) {
4035- ibdev_err (& rdev -> ibdev , "Failed to register user MR" );
4036- goto free_umem ;
4024+ ibdev_err (& rdev -> ibdev , "Failed to register user MR - rc = %d\n" , rc );
4025+ rc = - EIO ;
4026+ goto free_mrw ;
40374027 }
40384028
40394029 mr -> ib_mr .lkey = mr -> qplib_mr .lkey ;
@@ -4043,15 +4033,56 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
40434033 rdev -> stats .res .mr_watermark = active_mrs ;
40444034
40454035 return & mr -> ib_mr ;
4046- free_umem :
4047- ib_umem_release (umem );
4036+
40484037free_mrw :
40494038 bnxt_qplib_free_mrw (& rdev -> qplib_res , & mr -> qplib_mr );
40504039free_mr :
40514040 kfree (mr );
40524041 return ERR_PTR (rc );
40534042}
40544043
4044+ struct ib_mr * bnxt_re_reg_user_mr (struct ib_pd * ib_pd , u64 start , u64 length ,
4045+ u64 virt_addr , int mr_access_flags ,
4046+ struct ib_udata * udata )
4047+ {
4048+ struct bnxt_re_pd * pd = container_of (ib_pd , struct bnxt_re_pd , ib_pd );
4049+ struct bnxt_re_dev * rdev = pd -> rdev ;
4050+ struct ib_umem * umem ;
4051+ struct ib_mr * ib_mr ;
4052+
4053+ umem = ib_umem_get (& rdev -> ibdev , start , length , mr_access_flags );
4054+ if (IS_ERR (umem ))
4055+ return ERR_CAST (umem );
4056+
4057+ ib_mr = __bnxt_re_user_reg_mr (ib_pd , length , virt_addr , mr_access_flags , umem );
4058+ if (IS_ERR (ib_mr ))
4059+ ib_umem_release (umem );
4060+ return ib_mr ;
4061+ }
4062+
4063+ struct ib_mr * bnxt_re_reg_user_mr_dmabuf (struct ib_pd * ib_pd , u64 start ,
4064+ u64 length , u64 virt_addr , int fd ,
4065+ int mr_access_flags , struct ib_udata * udata )
4066+ {
4067+ struct bnxt_re_pd * pd = container_of (ib_pd , struct bnxt_re_pd , ib_pd );
4068+ struct bnxt_re_dev * rdev = pd -> rdev ;
4069+ struct ib_umem_dmabuf * umem_dmabuf ;
4070+ struct ib_umem * umem ;
4071+ struct ib_mr * ib_mr ;
4072+
4073+ umem_dmabuf = ib_umem_dmabuf_get_pinned (& rdev -> ibdev , start , length ,
4074+ fd , mr_access_flags );
4075+ if (IS_ERR (umem_dmabuf ))
4076+ return ERR_CAST (umem_dmabuf );
4077+
4078+ umem = & umem_dmabuf -> umem ;
4079+
4080+ ib_mr = __bnxt_re_user_reg_mr (ib_pd , length , virt_addr , mr_access_flags , umem );
4081+ if (IS_ERR (ib_mr ))
4082+ ib_umem_release (umem );
4083+ return ib_mr ;
4084+ }
4085+
40554086int bnxt_re_alloc_ucontext (struct ib_ucontext * ctx , struct ib_udata * udata )
40564087{
40574088 struct ib_device * ibdev = ctx -> device ;
0 commit comments