diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2025-12-04 18:54:37 -0800 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2025-12-04 18:54:37 -0800 |
| commit | 55aa394a5ed871208eac11c5f4677cafd258c4dd (patch) | |
| tree | ffe8559c27c7b5b7ef1bf68c9cc2ce8c748e2439 /drivers/infiniband/hw/irdma/utils.c | |
| parent | 056daec2925dc200b22c30419bc7b9e01f7843c4 (diff) | |
| parent | 80a85a771deb113cfe2e295fb9e84467a43ebfe4 (diff) | |
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
Pull rdma updates from Jason Gunthorpe:
"This has another new RDMA driver 'bng_en' for latest generation
Broadcom NICs. There might be one more new driver still to come.
Otherwise it is a fairly quite cycle. Summary:
- Minor driver bug fixes and updates to cxgb4, rxe, rdmavt, bnxt_re,
mlx5
- Many bug fix patches for irdma
- WQ_PERCPU annotations and system_dfl_wq changes
- Improved mlx5 support for "other eswitches" and multiple PFs
- 1600Gbps link speed reporting support. Four Digits Now!
- New driver bng_en for latest generation Broadcom NICs
- Bonding support for hns
- Adjust mlx5's hmm based ODP to work with the very large address
space created by the new 5 level paging default on x86
- Lockdep fixups in rxe and siw"
* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: (65 commits)
RDMA/rxe: reclassify sockets in order to avoid false positives from lockdep
RDMA/siw: reclassify sockets in order to avoid false positives from lockdep
RDMA/bng_re: Remove prefetch instruction
RDMA/core: Reduce cond_resched() frequency in __ib_umem_release
RDMA/irdma: Fix SRQ shadow area address initialization
RDMA/irdma: Remove doorbell elision logic
RDMA/irdma: Do not set IBK_LOCAL_DMA_LKEY for GEN3+
RDMA/irdma: Do not directly rely on IB_PD_UNSAFE_GLOBAL_RKEY
RDMA/irdma: Add missing mutex destroy
RDMA/irdma: Fix SIGBUS in AEQ destroy
RDMA/irdma: Add a missing kfree of struct irdma_pci_f for GEN2
RDMA/irdma: Fix data race in irdma_free_pble
RDMA/irdma: Fix data race in irdma_sc_ccq_arm
RDMA/mlx5: Add support for 1600_8x lane speed
RDMA/core: Add new IB rate for XDR (8x) support
IB/mlx5: Reduce IMR KSM size when 5-level paging is enabled
RDMA/bnxt_re: Pass correct flag for dma mr creation
RDMA/bnxt_re: Fix the inline size for GenP7 devices
RDMA/hns: Support reset recovery for bond
RDMA/hns: Support link state reporting for bond
...
Diffstat (limited to 'drivers/infiniband/hw/irdma/utils.c')
| -rw-r--r-- | drivers/infiniband/hw/irdma/utils.c | 58 |
1 files changed, 19 insertions, 39 deletions
diff --git a/drivers/infiniband/hw/irdma/utils.c b/drivers/infiniband/hw/irdma/utils.c index 8b94d87b0192..cc2a12f735d3 100644 --- a/drivers/infiniband/hw/irdma/utils.c +++ b/drivers/infiniband/hw/irdma/utils.c @@ -452,6 +452,7 @@ struct irdma_cqp_request *irdma_alloc_and_get_cqp_request(struct irdma_cqp *cqp, cqp_request->waiting = wait; refcount_set(&cqp_request->refcnt, 1); memset(&cqp_request->compl_info, 0, sizeof(cqp_request->compl_info)); + memset(&cqp_request->info, 0, sizeof(cqp_request->info)); return cqp_request; } @@ -1068,7 +1069,6 @@ int irdma_cqp_qp_create_cmd(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp) cqp_info = &cqp_request->info; qp_info = &cqp_request->info.in.u.qp_create.info; - memset(qp_info, 0, sizeof(*qp_info)); qp_info->cq_num_valid = true; qp_info->next_iwarp_state = IRDMA_QP_STATE_RTS; cqp_info->cqp_cmd = IRDMA_OP_QP_CREATE; @@ -1343,7 +1343,6 @@ int irdma_cqp_qp_destroy_cmd(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp) return -ENOMEM; cqp_info = &cqp_request->info; - memset(cqp_info, 0, sizeof(*cqp_info)); cqp_info->cqp_cmd = IRDMA_OP_QP_DESTROY; cqp_info->post_sq = 1; cqp_info->in.u.qp_destroy.qp = qp; @@ -1749,7 +1748,6 @@ int irdma_cqp_gather_stats_cmd(struct irdma_sc_dev *dev, return -ENOMEM; cqp_info = &cqp_request->info; - memset(cqp_info, 0, sizeof(*cqp_info)); cqp_info->cqp_cmd = IRDMA_OP_STATS_GATHER; cqp_info->post_sq = 1; cqp_info->in.u.stats_gather.info = pestat->gather_info; @@ -1789,7 +1787,6 @@ int irdma_cqp_stats_inst_cmd(struct irdma_sc_vsi *vsi, u8 cmd, return -ENOMEM; cqp_info = &cqp_request->info; - memset(cqp_info, 0, sizeof(*cqp_info)); cqp_info->cqp_cmd = cmd; cqp_info->post_sq = 1; cqp_info->in.u.stats_manage.info = *stats_info; @@ -1890,7 +1887,6 @@ int irdma_cqp_ws_node_cmd(struct irdma_sc_dev *dev, u8 cmd, return -ENOMEM; cqp_info = &cqp_request->info; - memset(cqp_info, 0, sizeof(*cqp_info)); cqp_info->cqp_cmd = cmd; cqp_info->post_sq = 1; cqp_info->in.u.ws_node.info = *node_info; @@ -2357,24 +2353,6 @@ void irdma_ib_qp_event(struct irdma_qp *iwqp, enum irdma_qp_event_type event) iwqp->ibqp.event_handler(&ibevent, iwqp->ibqp.qp_context); } -bool irdma_cq_empty(struct irdma_cq *iwcq) -{ - struct irdma_cq_uk *ukcq; - u64 qword3; - __le64 *cqe; - u8 polarity; - - ukcq = &iwcq->sc_cq.cq_uk; - if (ukcq->avoid_mem_cflct) - cqe = IRDMA_GET_CURRENT_EXTENDED_CQ_ELEM(ukcq); - else - cqe = IRDMA_GET_CURRENT_CQ_ELEM(ukcq); - get_64bit_val(cqe, 24, &qword3); - polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3); - - return polarity != ukcq->polarity; -} - void irdma_remove_cmpls_list(struct irdma_cq *iwcq) { struct irdma_cmpl_gen *cmpl_node; @@ -2436,6 +2414,8 @@ void irdma_generate_flush_completions(struct irdma_qp *iwqp) struct irdma_qp_uk *qp = &iwqp->sc_qp.qp_uk; struct irdma_ring *sq_ring = &qp->sq_ring; struct irdma_ring *rq_ring = &qp->rq_ring; + struct irdma_cq *iwscq = iwqp->iwscq; + struct irdma_cq *iwrcq = iwqp->iwrcq; struct irdma_cmpl_gen *cmpl; __le64 *sw_wqe; u64 wqe_qword; @@ -2443,8 +2423,8 @@ void irdma_generate_flush_completions(struct irdma_qp *iwqp) bool compl_generated = false; unsigned long flags1; - spin_lock_irqsave(&iwqp->iwscq->lock, flags1); - if (irdma_cq_empty(iwqp->iwscq)) { + spin_lock_irqsave(&iwscq->lock, flags1); + if (irdma_uk_cq_empty(&iwscq->sc_cq.cq_uk)) { unsigned long flags2; spin_lock_irqsave(&iwqp->lock, flags2); @@ -2452,7 +2432,7 @@ void irdma_generate_flush_completions(struct irdma_qp *iwqp) cmpl = kzalloc(sizeof(*cmpl), GFP_ATOMIC); if (!cmpl) { spin_unlock_irqrestore(&iwqp->lock, flags2); - spin_unlock_irqrestore(&iwqp->iwscq->lock, flags1); + spin_unlock_irqrestore(&iwscq->lock, flags1); return; } @@ -2471,24 +2451,24 @@ void irdma_generate_flush_completions(struct irdma_qp *iwqp) kfree(cmpl); continue; } - ibdev_dbg(iwqp->iwscq->ibcq.device, + ibdev_dbg(iwscq->ibcq.device, "DEV: %s: adding wr_id = 0x%llx SQ Completion to list qp_id=%d\n", __func__, cmpl->cpi.wr_id, qp->qp_id); - list_add_tail(&cmpl->list, &iwqp->iwscq->cmpl_generated); + list_add_tail(&cmpl->list, &iwscq->cmpl_generated); compl_generated = true; } spin_unlock_irqrestore(&iwqp->lock, flags2); - spin_unlock_irqrestore(&iwqp->iwscq->lock, flags1); + spin_unlock_irqrestore(&iwscq->lock, flags1); if (compl_generated) - irdma_comp_handler(iwqp->iwscq); + irdma_comp_handler(iwscq); } else { - spin_unlock_irqrestore(&iwqp->iwscq->lock, flags1); + spin_unlock_irqrestore(&iwscq->lock, flags1); mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush, msecs_to_jiffies(IRDMA_FLUSH_DELAY_MS)); } - spin_lock_irqsave(&iwqp->iwrcq->lock, flags1); - if (irdma_cq_empty(iwqp->iwrcq)) { + spin_lock_irqsave(&iwrcq->lock, flags1); + if (irdma_uk_cq_empty(&iwrcq->sc_cq.cq_uk)) { unsigned long flags2; spin_lock_irqsave(&iwqp->lock, flags2); @@ -2496,7 +2476,7 @@ void irdma_generate_flush_completions(struct irdma_qp *iwqp) cmpl = kzalloc(sizeof(*cmpl), GFP_ATOMIC); if (!cmpl) { spin_unlock_irqrestore(&iwqp->lock, flags2); - spin_unlock_irqrestore(&iwqp->iwrcq->lock, flags1); + spin_unlock_irqrestore(&iwrcq->lock, flags1); return; } @@ -2508,20 +2488,20 @@ void irdma_generate_flush_completions(struct irdma_qp *iwqp) cmpl->cpi.q_type = IRDMA_CQE_QTYPE_RQ; /* remove the RQ WR by moving RQ tail */ IRDMA_RING_SET_TAIL(*rq_ring, rq_ring->tail + 1); - ibdev_dbg(iwqp->iwrcq->ibcq.device, + ibdev_dbg(iwrcq->ibcq.device, "DEV: %s: adding wr_id = 0x%llx RQ Completion to list qp_id=%d, wqe_idx=%d\n", __func__, cmpl->cpi.wr_id, qp->qp_id, wqe_idx); - list_add_tail(&cmpl->list, &iwqp->iwrcq->cmpl_generated); + list_add_tail(&cmpl->list, &iwrcq->cmpl_generated); compl_generated = true; } spin_unlock_irqrestore(&iwqp->lock, flags2); - spin_unlock_irqrestore(&iwqp->iwrcq->lock, flags1); + spin_unlock_irqrestore(&iwrcq->lock, flags1); if (compl_generated) - irdma_comp_handler(iwqp->iwrcq); + irdma_comp_handler(iwrcq); } else { - spin_unlock_irqrestore(&iwqp->iwrcq->lock, flags1); + spin_unlock_irqrestore(&iwrcq->lock, flags1); mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush, msecs_to_jiffies(IRDMA_FLUSH_DELAY_MS)); } |