@@ -153,10 +153,10 @@ static void create_mkey_callback(int status, struct mlx5_async_work *context)
153153 if (status ) {
154154 create_mkey_warn (dev , status , mr -> out );
155155 kfree (mr );
156- spin_lock_irqsave (& ent -> lock , flags );
156+ xa_lock_irqsave (& ent -> mkeys , flags );
157157 ent -> pending -- ;
158158 WRITE_ONCE (dev -> fill_delay , 1 );
159- spin_unlock_irqrestore (& ent -> lock , flags );
159+ xa_unlock_irqrestore (& ent -> mkeys , flags );
160160 mod_timer (& dev -> delay_timer , jiffies + HZ );
161161 return ;
162162 }
@@ -168,14 +168,14 @@ static void create_mkey_callback(int status, struct mlx5_async_work *context)
168168
169169 WRITE_ONCE (dev -> cache .last_add , jiffies );
170170
171- spin_lock_irqsave (& ent -> lock , flags );
171+ xa_lock_irqsave (& ent -> mkeys , flags );
172172 list_add_tail (& mr -> list , & ent -> head );
173173 ent -> available_mrs ++ ;
174174 ent -> total_mrs ++ ;
175175 /* If we are doing fill_to_high_water then keep going. */
176176 queue_adjust_cache_locked (ent );
177177 ent -> pending -- ;
178- spin_unlock_irqrestore (& ent -> lock , flags );
178+ xa_unlock_irqrestore (& ent -> mkeys , flags );
179179}
180180
181181static int get_mkc_octo_size (unsigned int access_mode , unsigned int ndescs )
@@ -239,23 +239,23 @@ static int add_keys(struct mlx5_cache_ent *ent, unsigned int num)
239239 err = - ENOMEM ;
240240 break ;
241241 }
242- spin_lock_irq (& ent -> lock );
242+ xa_lock_irq (& ent -> mkeys );
243243 if (ent -> pending >= MAX_PENDING_REG_MR ) {
244244 err = - EAGAIN ;
245- spin_unlock_irq (& ent -> lock );
245+ xa_unlock_irq (& ent -> mkeys );
246246 kfree (mr );
247247 break ;
248248 }
249249 ent -> pending ++ ;
250- spin_unlock_irq (& ent -> lock );
250+ xa_unlock_irq (& ent -> mkeys );
251251 err = mlx5_ib_create_mkey_cb (ent -> dev , & mr -> mmkey ,
252252 & ent -> dev -> async_ctx , in , inlen ,
253253 mr -> out , sizeof (mr -> out ),
254254 & mr -> cb_work );
255255 if (err ) {
256- spin_lock_irq (& ent -> lock );
256+ xa_lock_irq (& ent -> mkeys );
257257 ent -> pending -- ;
258- spin_unlock_irq (& ent -> lock );
258+ xa_unlock_irq (& ent -> mkeys );
259259 mlx5_ib_warn (ent -> dev , "create mkey failed %d\n" , err );
260260 kfree (mr );
261261 break ;
@@ -293,9 +293,9 @@ static struct mlx5_ib_mr *create_cache_mr(struct mlx5_cache_ent *ent)
293293 init_waitqueue_head (& mr -> mmkey .wait );
294294 mr -> mmkey .type = MLX5_MKEY_MR ;
295295 WRITE_ONCE (ent -> dev -> cache .last_add , jiffies );
296- spin_lock_irq (& ent -> lock );
296+ xa_lock_irq (& ent -> mkeys );
297297 ent -> total_mrs ++ ;
298- spin_unlock_irq (& ent -> lock );
298+ xa_unlock_irq (& ent -> mkeys );
299299 kfree (in );
300300 return mr ;
301301free_mr :
@@ -309,25 +309,25 @@ static void remove_cache_mr_locked(struct mlx5_cache_ent *ent)
309309{
310310 struct mlx5_ib_mr * mr ;
311311
312- lockdep_assert_held (& ent -> lock );
312+ lockdep_assert_held (& ent -> mkeys . xa_lock );
313313 if (list_empty (& ent -> head ))
314314 return ;
315315 mr = list_first_entry (& ent -> head , struct mlx5_ib_mr , list );
316316 list_del (& mr -> list );
317317 ent -> available_mrs -- ;
318318 ent -> total_mrs -- ;
319- spin_unlock_irq (& ent -> lock );
319+ xa_unlock_irq (& ent -> mkeys );
320320 mlx5_core_destroy_mkey (ent -> dev -> mdev , mr -> mmkey .key );
321321 kfree (mr );
322- spin_lock_irq (& ent -> lock );
322+ xa_lock_irq (& ent -> mkeys );
323323}
324324
325325static int resize_available_mrs (struct mlx5_cache_ent * ent , unsigned int target ,
326326 bool limit_fill )
327327{
328328 int err ;
329329
330- lockdep_assert_held (& ent -> lock );
330+ lockdep_assert_held (& ent -> mkeys . xa_lock );
331331
332332 while (true) {
333333 if (limit_fill )
@@ -337,11 +337,11 @@ static int resize_available_mrs(struct mlx5_cache_ent *ent, unsigned int target,
337337 if (target > ent -> available_mrs + ent -> pending ) {
338338 u32 todo = target - (ent -> available_mrs + ent -> pending );
339339
340- spin_unlock_irq (& ent -> lock );
340+ xa_unlock_irq (& ent -> mkeys );
341341 err = add_keys (ent , todo );
342342 if (err == - EAGAIN )
343343 usleep_range (3000 , 5000 );
344- spin_lock_irq (& ent -> lock );
344+ xa_lock_irq (& ent -> mkeys );
345345 if (err ) {
346346 if (err != - EAGAIN )
347347 return err ;
@@ -369,7 +369,7 @@ static ssize_t size_write(struct file *filp, const char __user *buf,
369369 * cannot free MRs that are in use. Compute the target value for
370370 * available_mrs.
371371 */
372- spin_lock_irq (& ent -> lock );
372+ xa_lock_irq (& ent -> mkeys );
373373 if (target < ent -> total_mrs - ent -> available_mrs ) {
374374 err = - EINVAL ;
375375 goto err_unlock ;
@@ -382,12 +382,12 @@ static ssize_t size_write(struct file *filp, const char __user *buf,
382382 err = resize_available_mrs (ent , target , false);
383383 if (err )
384384 goto err_unlock ;
385- spin_unlock_irq (& ent -> lock );
385+ xa_unlock_irq (& ent -> mkeys );
386386
387387 return count ;
388388
389389err_unlock :
390- spin_unlock_irq (& ent -> lock );
390+ xa_unlock_irq (& ent -> mkeys );
391391 return err ;
392392}
393393
@@ -427,10 +427,10 @@ static ssize_t limit_write(struct file *filp, const char __user *buf,
427427 * Upon set we immediately fill the cache to high water mark implied by
428428 * the limit.
429429 */
430- spin_lock_irq (& ent -> lock );
430+ xa_lock_irq (& ent -> mkeys );
431431 ent -> limit = var ;
432432 err = resize_available_mrs (ent , 0 , true);
433- spin_unlock_irq (& ent -> lock );
433+ xa_unlock_irq (& ent -> mkeys );
434434 if (err )
435435 return err ;
436436 return count ;
@@ -465,9 +465,9 @@ static bool someone_adding(struct mlx5_mr_cache *cache)
465465 struct mlx5_cache_ent * ent = & cache -> ent [i ];
466466 bool ret ;
467467
468- spin_lock_irq (& ent -> lock );
468+ xa_lock_irq (& ent -> mkeys );
469469 ret = ent -> available_mrs < ent -> limit ;
470- spin_unlock_irq (& ent -> lock );
470+ xa_unlock_irq (& ent -> mkeys );
471471 if (ret )
472472 return true;
473473 }
@@ -481,7 +481,7 @@ static bool someone_adding(struct mlx5_mr_cache *cache)
481481 */
482482static void queue_adjust_cache_locked (struct mlx5_cache_ent * ent )
483483{
484- lockdep_assert_held (& ent -> lock );
484+ lockdep_assert_held (& ent -> mkeys . xa_lock );
485485
486486 if (ent -> disabled || READ_ONCE (ent -> dev -> fill_delay ))
487487 return ;
@@ -514,16 +514,16 @@ static void __cache_work_func(struct mlx5_cache_ent *ent)
514514 struct mlx5_mr_cache * cache = & dev -> cache ;
515515 int err ;
516516
517- spin_lock_irq (& ent -> lock );
517+ xa_lock_irq (& ent -> mkeys );
518518 if (ent -> disabled )
519519 goto out ;
520520
521521 if (ent -> fill_to_high_water &&
522522 ent -> available_mrs + ent -> pending < 2 * ent -> limit &&
523523 !READ_ONCE (dev -> fill_delay )) {
524- spin_unlock_irq (& ent -> lock );
524+ xa_unlock_irq (& ent -> mkeys );
525525 err = add_keys (ent , 1 );
526- spin_lock_irq (& ent -> lock );
526+ xa_lock_irq (& ent -> mkeys );
527527 if (ent -> disabled )
528528 goto out ;
529529 if (err ) {
@@ -556,11 +556,11 @@ static void __cache_work_func(struct mlx5_cache_ent *ent)
556556 * the garbage collection work to try to run in next cycle, in
557557 * order to free CPU resources to other tasks.
558558 */
559- spin_unlock_irq (& ent -> lock );
559+ xa_unlock_irq (& ent -> mkeys );
560560 need_delay = need_resched () || someone_adding (cache ) ||
561561 !time_after (jiffies ,
562562 READ_ONCE (cache -> last_add ) + 300 * HZ );
563- spin_lock_irq (& ent -> lock );
563+ xa_lock_irq (& ent -> mkeys );
564564 if (ent -> disabled )
565565 goto out ;
566566 if (need_delay ) {
@@ -571,7 +571,7 @@ static void __cache_work_func(struct mlx5_cache_ent *ent)
571571 queue_adjust_cache_locked (ent );
572572 }
573573out :
574- spin_unlock_irq (& ent -> lock );
574+ xa_unlock_irq (& ent -> mkeys );
575575}
576576
577577static void delayed_cache_work_func (struct work_struct * work )
@@ -592,11 +592,11 @@ struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
592592 if (!mlx5r_umr_can_reconfig (dev , 0 , access_flags ))
593593 return ERR_PTR (- EOPNOTSUPP );
594594
595- spin_lock_irq (& ent -> lock );
595+ xa_lock_irq (& ent -> mkeys );
596596 if (list_empty (& ent -> head )) {
597597 queue_adjust_cache_locked (ent );
598598 ent -> miss ++ ;
599- spin_unlock_irq (& ent -> lock );
599+ xa_unlock_irq (& ent -> mkeys );
600600 mr = create_cache_mr (ent );
601601 if (IS_ERR (mr ))
602602 return mr ;
@@ -605,7 +605,7 @@ struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
605605 list_del (& mr -> list );
606606 ent -> available_mrs -- ;
607607 queue_adjust_cache_locked (ent );
608- spin_unlock_irq (& ent -> lock );
608+ xa_unlock_irq (& ent -> mkeys );
609609
610610 mlx5_clear_mr (mr );
611611 }
@@ -617,11 +617,11 @@ static void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
617617 struct mlx5_cache_ent * ent = mr -> cache_ent ;
618618
619619 WRITE_ONCE (dev -> cache .last_add , jiffies );
620- spin_lock_irq (& ent -> lock );
620+ xa_lock_irq (& ent -> mkeys );
621621 list_add_tail (& mr -> list , & ent -> head );
622622 ent -> available_mrs ++ ;
623623 queue_adjust_cache_locked (ent );
624- spin_unlock_irq (& ent -> lock );
624+ xa_unlock_irq (& ent -> mkeys );
625625}
626626
627627static void clean_keys (struct mlx5_ib_dev * dev , int c )
@@ -634,16 +634,16 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c)
634634
635635 cancel_delayed_work (& ent -> dwork );
636636 while (1 ) {
637- spin_lock_irq (& ent -> lock );
637+ xa_lock_irq (& ent -> mkeys );
638638 if (list_empty (& ent -> head )) {
639- spin_unlock_irq (& ent -> lock );
639+ xa_unlock_irq (& ent -> mkeys );
640640 break ;
641641 }
642642 mr = list_first_entry (& ent -> head , struct mlx5_ib_mr , list );
643643 list_move (& mr -> list , & del_list );
644644 ent -> available_mrs -- ;
645645 ent -> total_mrs -- ;
646- spin_unlock_irq (& ent -> lock );
646+ xa_unlock_irq (& ent -> mkeys );
647647 mlx5_core_destroy_mkey (dev -> mdev , mr -> mmkey .key );
648648 }
649649
@@ -710,7 +710,7 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
710710 for (i = 0 ; i < MAX_MR_CACHE_ENTRIES ; i ++ ) {
711711 ent = & cache -> ent [i ];
712712 INIT_LIST_HEAD (& ent -> head );
713- spin_lock_init (& ent -> lock );
713+ xa_init_flags (& ent -> mkeys , XA_FLAGS_LOCK_IRQ );
714714 ent -> order = i + 2 ;
715715 ent -> dev = dev ;
716716 ent -> limit = 0 ;
@@ -734,9 +734,9 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
734734 ent -> limit = dev -> mdev -> profile .mr_cache [i ].limit ;
735735 else
736736 ent -> limit = 0 ;
737- spin_lock_irq (& ent -> lock );
737+ xa_lock_irq (& ent -> mkeys );
738738 queue_adjust_cache_locked (ent );
739- spin_unlock_irq (& ent -> lock );
739+ xa_unlock_irq (& ent -> mkeys );
740740 }
741741
742742 mlx5_mr_cache_debugfs_init (dev );
@@ -754,9 +754,9 @@ int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
754754 for (i = 0 ; i < MAX_MR_CACHE_ENTRIES ; i ++ ) {
755755 struct mlx5_cache_ent * ent = & dev -> cache .ent [i ];
756756
757- spin_lock_irq (& ent -> lock );
757+ xa_lock_irq (& ent -> mkeys );
758758 ent -> disabled = true;
759- spin_unlock_irq (& ent -> lock );
759+ xa_unlock_irq (& ent -> mkeys );
760760 cancel_delayed_work_sync (& ent -> dwork );
761761 }
762762
@@ -1572,9 +1572,9 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
15721572 /* Stop DMA */
15731573 if (mr -> cache_ent ) {
15741574 if (mlx5r_umr_revoke_mr (mr )) {
1575- spin_lock_irq (& mr -> cache_ent -> lock );
1575+ xa_lock_irq (& mr -> cache_ent -> mkeys );
15761576 mr -> cache_ent -> total_mrs -- ;
1577- spin_unlock_irq (& mr -> cache_ent -> lock );
1577+ xa_unlock_irq (& mr -> cache_ent -> mkeys );
15781578 mr -> cache_ent = NULL ;
15791579 }
15801580 }
0 commit comments