Skip to content

Commit bfdc0ed

Browse files
rpearsonhpe-designjgunthorpe
authored andcommitted
RDMA/rxe: Change mcg_lock to a _bh lock
rxe_mcast.c currently uses _irqsave spinlocks for rxe->mcg_lock while rxe_recv.c uses _bh spinlocks for the same lock. As there is no case where the mcg_lock can be taken from an IRQ, change these all to bh locks so we don't have confusing mismatched lock types on the same spinlock. Fixes: 6090a0c ("RDMA/rxe: Cleanup rxe_mcast.c") Link: https://lore.kernel.org/r/20220504202817.98247-1-rpearsonhpe@gmail.com Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
1 parent a926a90 commit bfdc0ed

1 file changed

Lines changed: 15 additions & 21 deletions

File tree

drivers/infiniband/sw/rxe/rxe_mcast.c

Lines changed: 15 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -143,11 +143,10 @@ static struct rxe_mcg *__rxe_lookup_mcg(struct rxe_dev *rxe,
143143
struct rxe_mcg *rxe_lookup_mcg(struct rxe_dev *rxe, union ib_gid *mgid)
144144
{
145145
struct rxe_mcg *mcg;
146-
unsigned long flags;
147146

148-
spin_lock_irqsave(&rxe->mcg_lock, flags);
147+
spin_lock_bh(&rxe->mcg_lock);
149148
mcg = __rxe_lookup_mcg(rxe, mgid);
150-
spin_unlock_irqrestore(&rxe->mcg_lock, flags);
149+
spin_unlock_bh(&rxe->mcg_lock);
151150

152151
return mcg;
153152
}
@@ -189,7 +188,6 @@ static void __rxe_init_mcg(struct rxe_dev *rxe, union ib_gid *mgid,
189188
static struct rxe_mcg *rxe_get_mcg(struct rxe_dev *rxe, union ib_gid *mgid)
190189
{
191190
struct rxe_mcg *mcg, *tmp;
192-
unsigned long flags;
193191
int err;
194192

195193
if (rxe->attr.max_mcast_grp == 0)
@@ -211,18 +209,18 @@ static struct rxe_mcg *rxe_get_mcg(struct rxe_dev *rxe, union ib_gid *mgid)
211209
if (!mcg)
212210
return ERR_PTR(-ENOMEM);
213211

214-
spin_lock_irqsave(&rxe->mcg_lock, flags);
212+
spin_lock_bh(&rxe->mcg_lock);
215213
/* re-check to see if someone else just added it */
216214
tmp = __rxe_lookup_mcg(rxe, mgid);
217215
if (tmp) {
218-
spin_unlock_irqrestore(&rxe->mcg_lock, flags);
216+
spin_unlock_bh(&rxe->mcg_lock);
219217
atomic_dec(&rxe->mcg_num);
220218
kfree(mcg);
221219
return tmp;
222220
}
223221

224222
__rxe_init_mcg(rxe, mgid, mcg);
225-
spin_unlock_irqrestore(&rxe->mcg_lock, flags);
223+
spin_unlock_bh(&rxe->mcg_lock);
226224

227225
/* add mcast address outside of lock */
228226
err = rxe_mcast_add(rxe, mgid);
@@ -272,14 +270,12 @@ static void __rxe_destroy_mcg(struct rxe_mcg *mcg)
272270
*/
273271
static void rxe_destroy_mcg(struct rxe_mcg *mcg)
274272
{
275-
unsigned long flags;
276-
277273
/* delete mcast address outside of lock */
278274
rxe_mcast_del(mcg->rxe, &mcg->mgid);
279275

280-
spin_lock_irqsave(&mcg->rxe->mcg_lock, flags);
276+
spin_lock_bh(&mcg->rxe->mcg_lock);
281277
__rxe_destroy_mcg(mcg);
282-
spin_unlock_irqrestore(&mcg->rxe->mcg_lock, flags);
278+
spin_unlock_bh(&mcg->rxe->mcg_lock);
283279
}
284280

285281
/**
@@ -334,25 +330,24 @@ static int rxe_attach_mcg(struct rxe_mcg *mcg, struct rxe_qp *qp)
334330
{
335331
struct rxe_dev *rxe = mcg->rxe;
336332
struct rxe_mca *mca, *tmp;
337-
unsigned long flags;
338333
int err;
339334

340335
/* check to see if the qp is already a member of the group */
341-
spin_lock_irqsave(&rxe->mcg_lock, flags);
336+
spin_lock_bh(&rxe->mcg_lock);
342337
list_for_each_entry(mca, &mcg->qp_list, qp_list) {
343338
if (mca->qp == qp) {
344-
spin_unlock_irqrestore(&rxe->mcg_lock, flags);
339+
spin_unlock_bh(&rxe->mcg_lock);
345340
return 0;
346341
}
347342
}
348-
spin_unlock_irqrestore(&rxe->mcg_lock, flags);
343+
spin_unlock_bh(&rxe->mcg_lock);
349344

350345
/* speculative alloc new mca without using GFP_ATOMIC */
351346
mca = kzalloc(sizeof(*mca), GFP_KERNEL);
352347
if (!mca)
353348
return -ENOMEM;
354349

355-
spin_lock_irqsave(&rxe->mcg_lock, flags);
350+
spin_lock_bh(&rxe->mcg_lock);
356351
/* re-check to see if someone else just attached qp */
357352
list_for_each_entry(tmp, &mcg->qp_list, qp_list) {
358353
if (tmp->qp == qp) {
@@ -366,7 +361,7 @@ static int rxe_attach_mcg(struct rxe_mcg *mcg, struct rxe_qp *qp)
366361
if (err)
367362
kfree(mca);
368363
out:
369-
spin_unlock_irqrestore(&rxe->mcg_lock, flags);
364+
spin_unlock_bh(&rxe->mcg_lock);
370365
return err;
371366
}
372367

@@ -400,9 +395,8 @@ static int rxe_detach_mcg(struct rxe_mcg *mcg, struct rxe_qp *qp)
400395
{
401396
struct rxe_dev *rxe = mcg->rxe;
402397
struct rxe_mca *mca, *tmp;
403-
unsigned long flags;
404398

405-
spin_lock_irqsave(&rxe->mcg_lock, flags);
399+
spin_lock_bh(&rxe->mcg_lock);
406400
list_for_each_entry_safe(mca, tmp, &mcg->qp_list, qp_list) {
407401
if (mca->qp == qp) {
408402
__rxe_cleanup_mca(mca, mcg);
@@ -416,13 +410,13 @@ static int rxe_detach_mcg(struct rxe_mcg *mcg, struct rxe_qp *qp)
416410
if (atomic_read(&mcg->qp_num) <= 0)
417411
__rxe_destroy_mcg(mcg);
418412

419-
spin_unlock_irqrestore(&rxe->mcg_lock, flags);
413+
spin_unlock_bh(&rxe->mcg_lock);
420414
return 0;
421415
}
422416
}
423417

424418
/* we didn't find the qp on the list */
425-
spin_unlock_irqrestore(&rxe->mcg_lock, flags);
419+
spin_unlock_bh(&rxe->mcg_lock);
426420
return -EINVAL;
427421
}
428422

0 commit comments

Comments
 (0)