summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/intel/ice/ice_txrx.h
diff options
context:
space:
mode:
authorMichal Kubiak <michal.kubiak@intel.com>2025-09-25 11:22:52 +0200
committerTony Nguyen <anthony.l.nguyen@intel.com>2025-10-29 13:53:12 -0700
commit3a4f419f750946181e3d6a339a1ef1942c5b5685 (patch)
treeba5f2e97739b5e034fdf1d597db1098478f5acd4 /drivers/net/ethernet/intel/ice/ice_txrx.h
parent9e314a3c525c91c1e918546f472585cdbd44fbd1 (diff)
ice: drop page splitting and recycling
As part of the transition toward Page Pool integration, remove the legacy page splitting and recycling logic from the ice driver. This mirrors the approach taken in commit 920d86f3c552 ("iavf: drop page splitting and recycling"). The previous model attempted to reuse partially consumed pages by splitting them and tracking their usage across descriptors. While this was once a memory optimization, it introduced significant complexity and overhead in the Rx path, including: - Manual refcount management and page reuse heuristics; - Per-descriptor buffer shuffling, which could involve moving dozens of `ice_rx_buf` structures per NAPI cycle; - Increased branching and cache pressure in the hotpath. This change simplifies the Rx logic by always allocating fresh pages and letting the networking stack handle their lifecycle. Although this may temporarily reduce performance (up to ~98% in some XDP cases), it greatly improves maintainability and paves the way for Page Pool, which will restore and exceed previous performance levels. The `ice_rx_buf` array is retained for now to minimize diffstat and ease future replacement with a shared buffer abstraction. Co-developed-by: Alexander Lobakin <aleksander.lobakin@intel.com> Signed-off-by: Alexander Lobakin <aleksander.lobakin@intel.com> Reviewed-by: Alexander Lobakin <aleksander.lobakin@intel.com> Reviewed-by: Jacob Keller <jacob.e.keller@intel.com> Signed-off-by: Michal Kubiak <michal.kubiak@intel.com> Tested-by: Alexander Nowlin <alexander.nowlin@intel.com> Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
Diffstat (limited to 'drivers/net/ethernet/intel/ice/ice_txrx.h')
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h72
1 files changed, 0 insertions, 72 deletions
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index 427f672fe053..3c7830f787de 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -27,71 +27,7 @@
#define ICE_MAX_TXQ_PER_TXQG 128
-/* Attempt to maximize the headroom available for incoming frames. We use a 2K
- * buffer for MTUs <= 1500 and need 1536/1534 to store the data for the frame.
- * This leaves us with 512 bytes of room. From that we need to deduct the
- * space needed for the shared info and the padding needed to IP align the
- * frame.
- *
- * Note: For cache line sizes 256 or larger this value is going to end
- * up negative. In these cases we should fall back to the legacy
- * receive path.
- */
-#if (PAGE_SIZE < 8192)
-#define ICE_2K_TOO_SMALL_WITH_PADDING \
- ((unsigned int)(NET_SKB_PAD + ICE_RXBUF_1536) > \
- SKB_WITH_OVERHEAD(ICE_RXBUF_2048))
-
-/**
- * ice_compute_pad - compute the padding
- * @rx_buf_len: buffer length
- *
- * Figure out the size of half page based on given buffer length and
- * then subtract the skb_shared_info followed by subtraction of the
- * actual buffer length; this in turn results in the actual space that
- * is left for padding usage
- */
-static inline int ice_compute_pad(int rx_buf_len)
-{
- int half_page_size;
-
- half_page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2);
- return SKB_WITH_OVERHEAD(half_page_size) - rx_buf_len;
-}
-
-/**
- * ice_skb_pad - determine the padding that we can supply
- *
- * Figure out the right Rx buffer size and based on that calculate the
- * padding
- */
-static inline int ice_skb_pad(void)
-{
- int rx_buf_len;
-
- /* If a 2K buffer cannot handle a standard Ethernet frame then
- * optimize padding for a 3K buffer instead of a 1.5K buffer.
- *
- * For a 3K buffer we need to add enough padding to allow for
- * tailroom due to NET_IP_ALIGN possibly shifting us out of
- * cache-line alignment.
- */
- if (ICE_2K_TOO_SMALL_WITH_PADDING)
- rx_buf_len = ICE_RXBUF_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN);
- else
- rx_buf_len = ICE_RXBUF_1536;
-
- /* if needed make room for NET_IP_ALIGN */
- rx_buf_len -= NET_IP_ALIGN;
-
- return ice_compute_pad(rx_buf_len);
-}
-
-#define ICE_SKB_PAD ice_skb_pad()
-#else
-#define ICE_2K_TOO_SMALL_WITH_PADDING false
#define ICE_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
-#endif
/* We are assuming that the cache line is always 64 Bytes here for ice.
* In order to make sure that is a correct assumption there is a check in probe
@@ -202,7 +138,6 @@ struct ice_rx_buf {
struct page *page;
unsigned int page_offset;
unsigned int pgcnt;
- unsigned int pagecnt_bias;
};
struct ice_q_stats {
@@ -368,7 +303,6 @@ struct ice_rx_ring {
struct ice_tx_ring *xdp_ring;
struct ice_rx_ring *next; /* pointer to next ring in q_vector */
struct xsk_buff_pool *xsk_pool;
- u16 max_frame;
u16 rx_buf_len;
dma_addr_t dma; /* physical address of ring */
u8 dcb_tc; /* Traffic class of ring */
@@ -475,15 +409,9 @@ struct ice_coalesce_stored {
static inline unsigned int ice_rx_pg_order(struct ice_rx_ring *ring)
{
-#if (PAGE_SIZE < 8192)
- if (ring->rx_buf_len > (PAGE_SIZE / 2))
- return 1;
-#endif
return 0;
}
-#define ice_rx_pg_size(_ring) (PAGE_SIZE << ice_rx_pg_order(_ring))
-
union ice_32b_rx_flex_desc;
void ice_init_ctrl_rx_descs(struct ice_rx_ring *rx_ring, u32 num_descs);