summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/intel/idpf/idpf_txrx.c
diff options
context:
space:
mode:
authorJoshua Hay <joshua.a.hay@intel.com>2025-07-25 11:42:19 -0700
committerTony Nguyen <anthony.l.nguyen@intel.com>2025-08-21 09:04:58 -0700
commitf2d18e16479cac7a708d77cbfb4220a9114a71fc (patch)
treefb770ed552dedee9e49bec4d605a709f7adb2ee7 /drivers/net/ethernet/intel/idpf/idpf_txrx.c
parentcb83b559bea39f207ee214ee2972657e8576ed18 (diff)
idpf: improve when to set RE bit logic
Track the gap between next_to_use and the last RE index. Set RE again if the gap is large enough to ensure RE bit is set frequently. This is critical before removing the stashing mechanisms because the opportunistic descriptor ring cleaning from the out-of-order completions will go away. Previously the descriptors would be "cleaned" by both the descriptor (RE) completion and the out-of-order completions. Without the latter, we must ensure the RE bit is set more frequently. Otherwise, it's theoretically possible for the descriptor ring next_to_clean to never advance. The previous implementation was dependent on the start of a packet falling on a 64th index in the descriptor ring, which is not guaranteed with large packets. Signed-off-by: Luigi Rizzo <lrizzo@google.com> Signed-off-by: Brian Vazquez <brianvv@google.com> Signed-off-by: Joshua Hay <joshua.a.hay@intel.com> Reviewed-by: Madhu Chittim <madhu.chittim@intel.com> Tested-by: Samuel Salin <Samuel.salin@intel.com> Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
Diffstat (limited to 'drivers/net/ethernet/intel/idpf/idpf_txrx.c')
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.c20
1 files changed, 19 insertions, 1 deletions
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
index 9b63944235fb..ee59153508af 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
@@ -294,6 +294,8 @@ static int idpf_tx_desc_alloc(const struct idpf_vport *vport,
*/
idpf_queue_change(GEN_CHK, refillq);
+ tx_q->last_re = tx_q->desc_count - IDPF_TX_SPLITQ_RE_MIN_GAP;
+
return 0;
err_alloc:
@@ -2913,6 +2915,21 @@ static void idpf_tx_set_tstamp_desc(union idpf_flex_tx_ctx_desc *ctx_desc,
#endif /* CONFIG_PTP_1588_CLOCK */
/**
+ * idpf_tx_splitq_need_re - check whether RE bit needs to be set
+ * @tx_q: pointer to Tx queue
+ *
+ * Return: true if RE bit needs to be set, false otherwise
+ */
+static bool idpf_tx_splitq_need_re(struct idpf_tx_queue *tx_q)
+{
+ int gap = tx_q->next_to_use - tx_q->last_re;
+
+ gap += (gap < 0) ? tx_q->desc_count : 0;
+
+ return gap >= IDPF_TX_SPLITQ_RE_MIN_GAP;
+}
+
+/**
* idpf_tx_splitq_frame - Sends buffer on Tx ring using flex descriptors
* @skb: send buffer
* @tx_q: queue to send buffer on
@@ -2998,9 +3015,10 @@ static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb,
* MIN_RING size to ensure it will be set at least once each
* time around the ring.
*/
- if (!(tx_q->next_to_use % IDPF_TX_SPLITQ_RE_MIN_GAP)) {
+ if (idpf_tx_splitq_need_re(tx_q)) {
tx_params.eop_cmd |= IDPF_TXD_FLEX_FLOW_CMD_RE;
tx_q->txq_grp->num_completions_pending++;
+ tx_q->last_re = tx_q->next_to_use;
}
if (skb->ip_summed == CHECKSUM_PARTIAL)