Skip to content

Commit a942ec2

Browse files
Chengchang Tangrleon
authored andcommitted
RDMA/hns: Fix UAF for cq async event
The refcount of CQ is not protected by locks. When CQ asynchronous events and CQ destruction are concurrent, CQ may have been released, which will cause UAF. Use the xa_lock() to protect the CQ refcount. Fixes: 9a44353 ("IB/hns: Add driver files for hns RoCE driver") Signed-off-by: Chengchang Tang <tangchengchang@huawei.com> Signed-off-by: Junxian Huang <huangjunxian6@hisilicon.com> Link: https://lore.kernel.org/r/20240412091616.370789-6-huangjunxian6@hisilicon.com Signed-off-by: Leon Romanovsky <leon@kernel.org>
1 parent b46494b commit a942ec2

1 file changed

Lines changed: 13 additions & 11 deletions

File tree

drivers/infiniband/hw/hns/hns_roce_cq.c

Lines changed: 13 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -149,7 +149,7 @@ static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
149149
return ret;
150150
}
151151

152-
ret = xa_err(xa_store(&cq_table->array, hr_cq->cqn, hr_cq, GFP_KERNEL));
152+
ret = xa_err(xa_store_irq(&cq_table->array, hr_cq->cqn, hr_cq, GFP_KERNEL));
153153
if (ret) {
154154
ibdev_err(ibdev, "failed to xa_store CQ, ret = %d.\n", ret);
155155
goto err_put;
@@ -163,7 +163,7 @@ static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
163163
return 0;
164164

165165
err_xa:
166-
xa_erase(&cq_table->array, hr_cq->cqn);
166+
xa_erase_irq(&cq_table->array, hr_cq->cqn);
167167
err_put:
168168
hns_roce_table_put(hr_dev, &cq_table->table, hr_cq->cqn);
169169

@@ -182,7 +182,7 @@ static void free_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
182182
dev_err(dev, "DESTROY_CQ failed (%d) for CQN %06lx\n", ret,
183183
hr_cq->cqn);
184184

185-
xa_erase(&cq_table->array, hr_cq->cqn);
185+
xa_erase_irq(&cq_table->array, hr_cq->cqn);
186186

187187
/* Waiting interrupt process procedure carried out */
188188
synchronize_irq(hr_dev->eq_table.eq[hr_cq->vector].irq);
@@ -476,13 +476,6 @@ void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type)
476476
struct ib_event event;
477477
struct ib_cq *ibcq;
478478

479-
hr_cq = xa_load(&hr_dev->cq_table.array,
480-
cqn & (hr_dev->caps.num_cqs - 1));
481-
if (!hr_cq) {
482-
dev_warn(dev, "async event for bogus CQ 0x%06x\n", cqn);
483-
return;
484-
}
485-
486479
if (event_type != HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID &&
487480
event_type != HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR &&
488481
event_type != HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW) {
@@ -491,7 +484,16 @@ void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type)
491484
return;
492485
}
493486

494-
refcount_inc(&hr_cq->refcount);
487+
xa_lock(&hr_dev->cq_table.array);
488+
hr_cq = xa_load(&hr_dev->cq_table.array,
489+
cqn & (hr_dev->caps.num_cqs - 1));
490+
if (hr_cq)
491+
refcount_inc(&hr_cq->refcount);
492+
xa_unlock(&hr_dev->cq_table.array);
493+
if (!hr_cq) {
494+
dev_warn(dev, "async event for bogus CQ 0x%06x\n", cqn);
495+
return;
496+
}
495497

496498
ibcq = &hr_cq->ib_cq;
497499
if (ibcq->event_handler) {

0 commit comments

Comments
 (0)