Skip to content

Commit af38772

Browse files
committed
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
Pull rdma updates from Jason Gunthorpe: "Usual wide collection of unrelated items in drivers: - Driver bug fixes and treewide cleanups in hfi1, siw, qib, mlx5, rxe, usnic, usnic, bnxt_re, ocrdma, iser: - remove unnecessary NULL checks - kmap obsolescence - pci_enable_pcie_error_reporting() obsolescence - unused variables and macros - trace event related warnings - casting warnings - Code cleanups for irdm and erdma - EFA reporting of 128 byte PCIe TLP support - mlx5 more agressively uses the out of order HW feature - Big rework of how state machines and tasks work in rxe - Fix a syzkaller found crash netdev refcount leak in siw - bnxt_re revises their HW description header - Congestion control for bnxt_re - Use mmu_notifiers more safely in hfi1 - mlx5 gets better support for PCIe relaxed ordering inside VMs" * tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: (81 commits) RDMA/efa: Add rdma write capability to device caps RDMA/mlx5: Use correct device num_ports when modify DC RDMA/irdma: Drop spurious WQ_UNBOUND from alloc_ordered_workqueue() call RDMA/rxe: Fix spinlock recursion deadlock on requester RDMA/mlx5: Fix flow counter query via DEVX RDMA/rxe: Protect QP state with qp->state_lock RDMA/rxe: Move code to check if drained to subroutine RDMA/rxe: Remove qp->req.state RDMA/rxe: Remove qp->comp.state RDMA/rxe: Remove qp->resp.state RDMA/mlx5: Allow relaxed ordering read in VFs and VMs net/mlx5: Update relaxed ordering read HCA capabilities RDMA/mlx5: Check pcie_relaxed_ordering_enabled() in UMR RDMA/mlx5: Remove pcie_relaxed_ordering_enabled() check for RO write RDMA: Add ib_virt_dma_to_page() RDMA/rxe: Fix the error "trying to register non-static key in rxe_cleanup_task" RDMA/irdma: Slightly optimize irdma_form_ah_cm_frame() RDMA/rxe: Fix incorrect TASKLET_STATE_SCHED check in rxe_task.c IB/hfi1: Place struct mmu_rb_handler on cache line start IB/hfi1: Fix bugs with non-PAGE_SIZE-end multi-iovec user SDMA requests ...
2 parents 1ae78a1 + 531094d commit af38772

106 files changed

Lines changed: 8007 additions & 5519 deletions

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

drivers/infiniband/core/cm.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2912,6 +2912,8 @@ static int cm_send_rej_locked(struct cm_id_private *cm_id_priv,
29122912
(ari && ari_length > IB_CM_REJ_ARI_LENGTH))
29132913
return -EINVAL;
29142914

2915+
trace_icm_send_rej(&cm_id_priv->id, reason);
2916+
29152917
switch (state) {
29162918
case IB_CM_REQ_SENT:
29172919
case IB_CM_MRA_REQ_RCVD:
@@ -2942,7 +2944,6 @@ static int cm_send_rej_locked(struct cm_id_private *cm_id_priv,
29422944
return -EINVAL;
29432945
}
29442946

2945-
trace_icm_send_rej(&cm_id_priv->id, reason);
29462947
ret = ib_post_send_mad(msg, NULL);
29472948
if (ret) {
29482949
cm_free_msg(msg);

drivers/infiniband/core/cma.c

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -709,8 +709,7 @@ cma_validate_port(struct ib_device *device, u32 port,
709709
}
710710

711711
sgid_attr = rdma_find_gid_by_port(device, gid, gid_type, port, ndev);
712-
if (ndev)
713-
dev_put(ndev);
712+
dev_put(ndev);
714713
return sgid_attr;
715714
}
716715

@@ -2429,8 +2428,7 @@ static int cma_ib_req_handler(struct ib_cm_id *cm_id,
24292428
mutex_unlock(&listen_id->handler_mutex);
24302429

24312430
net_dev_put:
2432-
if (net_dev)
2433-
dev_put(net_dev);
2431+
dev_put(net_dev);
24342432

24352433
return ret;
24362434
}

drivers/infiniband/core/user_mad.c

Lines changed: 14 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -131,6 +131,11 @@ struct ib_umad_packet {
131131
struct ib_user_mad mad;
132132
};
133133

134+
struct ib_rmpp_mad_hdr {
135+
struct ib_mad_hdr mad_hdr;
136+
struct ib_rmpp_hdr rmpp_hdr;
137+
} __packed;
138+
134139
#define CREATE_TRACE_POINTS
135140
#include <trace/events/ib_umad.h>
136141

@@ -494,19 +499,19 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
494499
size_t count, loff_t *pos)
495500
{
496501
struct ib_umad_file *file = filp->private_data;
502+
struct ib_rmpp_mad_hdr *rmpp_mad_hdr;
497503
struct ib_umad_packet *packet;
498504
struct ib_mad_agent *agent;
499505
struct rdma_ah_attr ah_attr;
500506
struct ib_ah *ah;
501-
struct ib_rmpp_mad *rmpp_mad;
502507
__be64 *tid;
503508
int ret, data_len, hdr_len, copy_offset, rmpp_active;
504509
u8 base_version;
505510

506511
if (count < hdr_size(file) + IB_MGMT_RMPP_HDR)
507512
return -EINVAL;
508513

509-
packet = kzalloc(sizeof *packet + IB_MGMT_RMPP_HDR, GFP_KERNEL);
514+
packet = kzalloc(sizeof(*packet) + IB_MGMT_RMPP_HDR, GFP_KERNEL);
510515
if (!packet)
511516
return -ENOMEM;
512517

@@ -560,13 +565,13 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
560565
goto err_up;
561566
}
562567

563-
rmpp_mad = (struct ib_rmpp_mad *) packet->mad.data;
564-
hdr_len = ib_get_mad_data_offset(rmpp_mad->mad_hdr.mgmt_class);
568+
rmpp_mad_hdr = (struct ib_rmpp_mad_hdr *)packet->mad.data;
569+
hdr_len = ib_get_mad_data_offset(rmpp_mad_hdr->mad_hdr.mgmt_class);
565570

566-
if (ib_is_mad_class_rmpp(rmpp_mad->mad_hdr.mgmt_class)
571+
if (ib_is_mad_class_rmpp(rmpp_mad_hdr->mad_hdr.mgmt_class)
567572
&& ib_mad_kernel_rmpp_agent(agent)) {
568573
copy_offset = IB_MGMT_RMPP_HDR;
569-
rmpp_active = ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
574+
rmpp_active = ib_get_rmpp_flags(&rmpp_mad_hdr->rmpp_hdr) &
570575
IB_MGMT_RMPP_FLAG_ACTIVE;
571576
} else {
572577
copy_offset = IB_MGMT_MAD_HDR;
@@ -615,12 +620,12 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
615620
tid = &((struct ib_mad_hdr *) packet->msg->mad)->tid;
616621
*tid = cpu_to_be64(((u64) agent->hi_tid) << 32 |
617622
(be64_to_cpup(tid) & 0xffffffff));
618-
rmpp_mad->mad_hdr.tid = *tid;
623+
rmpp_mad_hdr->mad_hdr.tid = *tid;
619624
}
620625

621626
if (!ib_mad_kernel_rmpp_agent(agent)
622-
&& ib_is_mad_class_rmpp(rmpp_mad->mad_hdr.mgmt_class)
623-
&& (ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & IB_MGMT_RMPP_FLAG_ACTIVE)) {
627+
&& ib_is_mad_class_rmpp(rmpp_mad_hdr->mad_hdr.mgmt_class)
628+
&& (ib_get_rmpp_flags(&rmpp_mad_hdr->rmpp_hdr) & IB_MGMT_RMPP_FLAG_ACTIVE)) {
624629
spin_lock_irq(&file->send_lock);
625630
list_add_tail(&packet->list, &file->send_list);
626631
spin_unlock_irq(&file->send_lock);

drivers/infiniband/hw/bnxt_re/ib_verbs.c

Lines changed: 109 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2912,6 +2912,106 @@ int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
29122912
return rc;
29132913
}
29142914

2915+
static void bnxt_re_resize_cq_complete(struct bnxt_re_cq *cq)
2916+
{
2917+
struct bnxt_re_dev *rdev = cq->rdev;
2918+
2919+
bnxt_qplib_resize_cq_complete(&rdev->qplib_res, &cq->qplib_cq);
2920+
2921+
cq->qplib_cq.max_wqe = cq->resize_cqe;
2922+
if (cq->resize_umem) {
2923+
ib_umem_release(cq->umem);
2924+
cq->umem = cq->resize_umem;
2925+
cq->resize_umem = NULL;
2926+
cq->resize_cqe = 0;
2927+
}
2928+
}
2929+
2930+
int bnxt_re_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
2931+
{
2932+
struct bnxt_qplib_sg_info sg_info = {};
2933+
struct bnxt_qplib_dpi *orig_dpi = NULL;
2934+
struct bnxt_qplib_dev_attr *dev_attr;
2935+
struct bnxt_re_ucontext *uctx = NULL;
2936+
struct bnxt_re_resize_cq_req req;
2937+
struct bnxt_re_dev *rdev;
2938+
struct bnxt_re_cq *cq;
2939+
int rc, entries;
2940+
2941+
cq = container_of(ibcq, struct bnxt_re_cq, ib_cq);
2942+
rdev = cq->rdev;
2943+
dev_attr = &rdev->dev_attr;
2944+
if (!ibcq->uobject) {
2945+
ibdev_err(&rdev->ibdev, "Kernel CQ Resize not supported");
2946+
return -EOPNOTSUPP;
2947+
}
2948+
2949+
if (cq->resize_umem) {
2950+
ibdev_err(&rdev->ibdev, "Resize CQ %#x failed - Busy",
2951+
cq->qplib_cq.id);
2952+
return -EBUSY;
2953+
}
2954+
2955+
/* Check the requested cq depth out of supported depth */
2956+
if (cqe < 1 || cqe > dev_attr->max_cq_wqes) {
2957+
ibdev_err(&rdev->ibdev, "Resize CQ %#x failed - out of range cqe %d",
2958+
cq->qplib_cq.id, cqe);
2959+
return -EINVAL;
2960+
}
2961+
2962+
entries = roundup_pow_of_two(cqe + 1);
2963+
if (entries > dev_attr->max_cq_wqes + 1)
2964+
entries = dev_attr->max_cq_wqes + 1;
2965+
2966+
uctx = rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext,
2967+
ib_uctx);
2968+
/* uverbs consumer */
2969+
if (ib_copy_from_udata(&req, udata, sizeof(req))) {
2970+
rc = -EFAULT;
2971+
goto fail;
2972+
}
2973+
2974+
cq->resize_umem = ib_umem_get(&rdev->ibdev, req.cq_va,
2975+
entries * sizeof(struct cq_base),
2976+
IB_ACCESS_LOCAL_WRITE);
2977+
if (IS_ERR(cq->resize_umem)) {
2978+
rc = PTR_ERR(cq->resize_umem);
2979+
cq->resize_umem = NULL;
2980+
ibdev_err(&rdev->ibdev, "%s: ib_umem_get failed! rc = %d\n",
2981+
__func__, rc);
2982+
goto fail;
2983+
}
2984+
cq->resize_cqe = entries;
2985+
memcpy(&sg_info, &cq->qplib_cq.sg_info, sizeof(sg_info));
2986+
orig_dpi = cq->qplib_cq.dpi;
2987+
2988+
cq->qplib_cq.sg_info.umem = cq->resize_umem;
2989+
cq->qplib_cq.sg_info.pgsize = PAGE_SIZE;
2990+
cq->qplib_cq.sg_info.pgshft = PAGE_SHIFT;
2991+
cq->qplib_cq.dpi = &uctx->dpi;
2992+
2993+
rc = bnxt_qplib_resize_cq(&rdev->qplib_res, &cq->qplib_cq, entries);
2994+
if (rc) {
2995+
ibdev_err(&rdev->ibdev, "Resize HW CQ %#x failed!",
2996+
cq->qplib_cq.id);
2997+
goto fail;
2998+
}
2999+
3000+
cq->ib_cq.cqe = cq->resize_cqe;
3001+
3002+
return 0;
3003+
3004+
fail:
3005+
if (cq->resize_umem) {
3006+
ib_umem_release(cq->resize_umem);
3007+
cq->resize_umem = NULL;
3008+
cq->resize_cqe = 0;
3009+
memcpy(&cq->qplib_cq.sg_info, &sg_info, sizeof(sg_info));
3010+
cq->qplib_cq.dpi = orig_dpi;
3011+
}
3012+
return rc;
3013+
}
3014+
29153015
static u8 __req_to_ib_wc_status(u8 qstatus)
29163016
{
29173017
switch (qstatus) {
@@ -3425,6 +3525,15 @@ int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
34253525
struct bnxt_re_sqp_entries *sqp_entry = NULL;
34263526
unsigned long flags;
34273527

3528+
/* User CQ; the only processing we do is to
3529+
* complete any pending CQ resize operation.
3530+
*/
3531+
if (cq->umem) {
3532+
if (cq->resize_umem)
3533+
bnxt_re_resize_cq_complete(cq);
3534+
return 0;
3535+
}
3536+
34283537
spin_lock_irqsave(&cq->cq_lock, flags);
34293538
budget = min_t(u32, num_entries, cq->max_cql);
34303539
num_entries = budget;

drivers/infiniband/hw/bnxt_re/ib_verbs.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -104,6 +104,8 @@ struct bnxt_re_cq {
104104
#define MAX_CQL_PER_POLL 1024
105105
u32 max_cql;
106106
struct ib_umem *umem;
107+
struct ib_umem *resize_umem;
108+
int resize_cqe;
107109
};
108110

109111
struct bnxt_re_mr {
@@ -191,6 +193,7 @@ int bnxt_re_post_recv(struct ib_qp *qp, const struct ib_recv_wr *recv_wr,
191193
const struct ib_recv_wr **bad_recv_wr);
192194
int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
193195
struct ib_udata *udata);
196+
int bnxt_re_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata);
194197
int bnxt_re_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
195198
int bnxt_re_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc);
196199
int bnxt_re_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags);

0 commit comments

Comments
 (0)