diff options
| author | Jay Bhat <jay.bhat@intel.com> | 2025-10-30 21:17:23 -0500 |
|---|---|---|
| committer | Leon Romanovsky <leon@kernel.org> | 2025-11-02 06:52:58 -0500 |
| commit | 0a192745551cd2e999532075f21c0cfa174bc065 (patch) | |
| tree | d689c6cf941b039e78d7508b3e42ab2abb7d59cc /drivers/infiniband/hw/irdma/utils.c | |
| parent | cd84d8001e5446f6d1a4eca15a502188207523b4 (diff) | |
RDMA/irdma: CQ size and shadow update changes for GEN3
CQ shadow area should not be updated at the end of a page (once every
64th CQ entry), except when CQ has no more CQEs. SW must also increase
the requested CQ size by 1 and make sure the CQ is not exactly one page
in size. This is to address a quirk in the hardware.
Signed-off-by: Jay Bhat <jay.bhat@intel.com>
Signed-off-by: Tatyana Nikolova <tatyana.e.nikolova@intel.com>
Link: https://patch.msgid.link/20251031021726.1003-4-tatyana.e.nikolova@intel.com
Signed-off-by: Leon Romanovsky <leon@kernel.org>
Diffstat (limited to 'drivers/infiniband/hw/irdma/utils.c')
| -rw-r--r-- | drivers/infiniband/hw/irdma/utils.c | 52 |
1 files changed, 18 insertions, 34 deletions
diff --git a/drivers/infiniband/hw/irdma/utils.c b/drivers/infiniband/hw/irdma/utils.c index 3b19e2b997b0..cc2a12f735d3 100644 --- a/drivers/infiniband/hw/irdma/utils.c +++ b/drivers/infiniband/hw/irdma/utils.c @@ -2353,24 +2353,6 @@ void irdma_ib_qp_event(struct irdma_qp *iwqp, enum irdma_qp_event_type event) iwqp->ibqp.event_handler(&ibevent, iwqp->ibqp.qp_context); } -bool irdma_cq_empty(struct irdma_cq *iwcq) -{ - struct irdma_cq_uk *ukcq; - u64 qword3; - __le64 *cqe; - u8 polarity; - - ukcq = &iwcq->sc_cq.cq_uk; - if (ukcq->avoid_mem_cflct) - cqe = IRDMA_GET_CURRENT_EXTENDED_CQ_ELEM(ukcq); - else - cqe = IRDMA_GET_CURRENT_CQ_ELEM(ukcq); - get_64bit_val(cqe, 24, &qword3); - polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3); - - return polarity != ukcq->polarity; -} - void irdma_remove_cmpls_list(struct irdma_cq *iwcq) { struct irdma_cmpl_gen *cmpl_node; @@ -2432,6 +2414,8 @@ void irdma_generate_flush_completions(struct irdma_qp *iwqp) struct irdma_qp_uk *qp = &iwqp->sc_qp.qp_uk; struct irdma_ring *sq_ring = &qp->sq_ring; struct irdma_ring *rq_ring = &qp->rq_ring; + struct irdma_cq *iwscq = iwqp->iwscq; + struct irdma_cq *iwrcq = iwqp->iwrcq; struct irdma_cmpl_gen *cmpl; __le64 *sw_wqe; u64 wqe_qword; @@ -2439,8 +2423,8 @@ void irdma_generate_flush_completions(struct irdma_qp *iwqp) bool compl_generated = false; unsigned long flags1; - spin_lock_irqsave(&iwqp->iwscq->lock, flags1); - if (irdma_cq_empty(iwqp->iwscq)) { + spin_lock_irqsave(&iwscq->lock, flags1); + if (irdma_uk_cq_empty(&iwscq->sc_cq.cq_uk)) { unsigned long flags2; spin_lock_irqsave(&iwqp->lock, flags2); @@ -2448,7 +2432,7 @@ void irdma_generate_flush_completions(struct irdma_qp *iwqp) cmpl = kzalloc(sizeof(*cmpl), GFP_ATOMIC); if (!cmpl) { spin_unlock_irqrestore(&iwqp->lock, flags2); - spin_unlock_irqrestore(&iwqp->iwscq->lock, flags1); + spin_unlock_irqrestore(&iwscq->lock, flags1); return; } @@ -2467,24 +2451,24 @@ void irdma_generate_flush_completions(struct irdma_qp *iwqp) kfree(cmpl); continue; } - ibdev_dbg(iwqp->iwscq->ibcq.device, + ibdev_dbg(iwscq->ibcq.device, "DEV: %s: adding wr_id = 0x%llx SQ Completion to list qp_id=%d\n", __func__, cmpl->cpi.wr_id, qp->qp_id); - list_add_tail(&cmpl->list, &iwqp->iwscq->cmpl_generated); + list_add_tail(&cmpl->list, &iwscq->cmpl_generated); compl_generated = true; } spin_unlock_irqrestore(&iwqp->lock, flags2); - spin_unlock_irqrestore(&iwqp->iwscq->lock, flags1); + spin_unlock_irqrestore(&iwscq->lock, flags1); if (compl_generated) - irdma_comp_handler(iwqp->iwscq); + irdma_comp_handler(iwscq); } else { - spin_unlock_irqrestore(&iwqp->iwscq->lock, flags1); + spin_unlock_irqrestore(&iwscq->lock, flags1); mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush, msecs_to_jiffies(IRDMA_FLUSH_DELAY_MS)); } - spin_lock_irqsave(&iwqp->iwrcq->lock, flags1); - if (irdma_cq_empty(iwqp->iwrcq)) { + spin_lock_irqsave(&iwrcq->lock, flags1); + if (irdma_uk_cq_empty(&iwrcq->sc_cq.cq_uk)) { unsigned long flags2; spin_lock_irqsave(&iwqp->lock, flags2); @@ -2492,7 +2476,7 @@ void irdma_generate_flush_completions(struct irdma_qp *iwqp) cmpl = kzalloc(sizeof(*cmpl), GFP_ATOMIC); if (!cmpl) { spin_unlock_irqrestore(&iwqp->lock, flags2); - spin_unlock_irqrestore(&iwqp->iwrcq->lock, flags1); + spin_unlock_irqrestore(&iwrcq->lock, flags1); return; } @@ -2504,20 +2488,20 @@ void irdma_generate_flush_completions(struct irdma_qp *iwqp) cmpl->cpi.q_type = IRDMA_CQE_QTYPE_RQ; /* remove the RQ WR by moving RQ tail */ IRDMA_RING_SET_TAIL(*rq_ring, rq_ring->tail + 1); - ibdev_dbg(iwqp->iwrcq->ibcq.device, + ibdev_dbg(iwrcq->ibcq.device, "DEV: %s: adding wr_id = 0x%llx RQ Completion to list qp_id=%d, wqe_idx=%d\n", __func__, cmpl->cpi.wr_id, qp->qp_id, wqe_idx); - list_add_tail(&cmpl->list, &iwqp->iwrcq->cmpl_generated); + list_add_tail(&cmpl->list, &iwrcq->cmpl_generated); compl_generated = true; } spin_unlock_irqrestore(&iwqp->lock, flags2); - spin_unlock_irqrestore(&iwqp->iwrcq->lock, flags1); + spin_unlock_irqrestore(&iwrcq->lock, flags1); if (compl_generated) - irdma_comp_handler(iwqp->iwrcq); + irdma_comp_handler(iwrcq); } else { - spin_unlock_irqrestore(&iwqp->iwrcq->lock, flags1); + spin_unlock_irqrestore(&iwrcq->lock, flags1); mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush, msecs_to_jiffies(IRDMA_FLUSH_DELAY_MS)); } |