summaryrefslogtreecommitdiff
path: root/include/net/libeth
diff options
context:
space:
mode:
authorAlexander Lobakin <aleksander.lobakin@intel.com>2025-08-26 17:54:55 +0200
committerTony Nguyen <anthony.l.nguyen@intel.com>2025-09-08 10:26:25 -0700
commit17d370a70bae277678b6ea82d71ef5892e7aaa97 (patch)
tree0110bb6e5ef7ee62b15b2e8935e71a5a1baadc28 /include/net/libeth
parentc6142e1913de563ab772f7b0e4ae78d6de9cc5b1 (diff)
xdp, libeth: make the xdp_init_buff() micro-optimization generic
Often times the compilers are not able to expand two consecutive 32-bit writes into one 64-bit on the corresponding architectures. This applies to xdp_init_buff() called for every received frame (or at least once per each 64 frames when the frag size is fixed). Move the not-so-pretty hack from libeth_xdp straight to xdp_init_buff(), but using a proper union around ::frame_sz and ::flags. The optimization is limited to LE architectures due to the structure layout. One simple example from idpf with the XDP series applied (Clang 22-git, CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE => -O2): add/remove: 0/0 grow/shrink: 0/1 up/down: 0/-27 (-27) Function old new delta idpf_vport_splitq_napi_poll 5076 5049 -27 The perf difference with XDP_DROP is around +0.8-1% which I see as more than satisfying. Suggested-by: Simon Horman <horms@kernel.org> Signed-off-by: Alexander Lobakin <aleksander.lobakin@intel.com> Tested-by: Ramu R <ramu.r@intel.com> Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
Diffstat (limited to 'include/net/libeth')
-rw-r--r--include/net/libeth/xdp.h11
1 files changed, 1 insertions, 10 deletions
diff --git a/include/net/libeth/xdp.h b/include/net/libeth/xdp.h
index f4880b50e804..bc3507edd589 100644
--- a/include/net/libeth/xdp.h
+++ b/include/net/libeth/xdp.h
@@ -1274,7 +1274,6 @@ bool libeth_xdp_buff_add_frag(struct libeth_xdp_buff *xdp,
* Internal, use libeth_xdp_process_buff() instead. Initializes XDP buffer
* head with the Rx buffer data: data pointer, length, headroom, and
* truesize/tailroom. Zeroes the flags.
- * Uses faster single u64 write instead of per-field access.
*/
static inline void libeth_xdp_prepare_buff(struct libeth_xdp_buff *xdp,
const struct libeth_fqe *fqe,
@@ -1282,17 +1281,9 @@ static inline void libeth_xdp_prepare_buff(struct libeth_xdp_buff *xdp,
{
const struct page *page = __netmem_to_page(fqe->netmem);
-#ifdef __LIBETH_WORD_ACCESS
- static_assert(offsetofend(typeof(xdp->base), flags) -
- offsetof(typeof(xdp->base), frame_sz) ==
- sizeof(u64));
-
- *(u64 *)&xdp->base.frame_sz = fqe->truesize;
-#else
- xdp_init_buff(&xdp->base, fqe->truesize, xdp->base.rxq);
-#endif
xdp_prepare_buff(&xdp->base, page_address(page) + fqe->offset,
pp_page_to_nmdesc(page)->pp->p.offset, len, true);
+ xdp_init_buff(&xdp->base, fqe->truesize, xdp->base.rxq);
}
/**