summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAnkit Garg <nktgrg@google.com>2025-11-06 11:27:45 -0800
committerJakub Kicinski <kuba@kernel.org>2025-11-10 17:36:37 -0800
commitd235bb213f411ace8317bcca3740a1008628ea9c (patch)
treec001367c17fe717dddef3777cda38591b85e6a9a
parent091a3b6ff2b98354270cb9278faad6d17d5aa27d (diff)
gve: Allow ethtool to configure rx_buf_len
Add support for getting and setting the RX buffer length via the ethtool ring parameters (`ethtool -g`/`-G`). The driver restricts the allowed buffer length to 2048 (SZ_2K) by default and allows 4096 (SZ_4K) based on device options. As XDP is only supported when the `rx_buf_len` is 2048, the driver now enforces this in two places: 1. In `gve_xdp_set`, rejecting XDP programs if the current buffer length is not 2048. 2. In `gve_set_rx_buf_len_config`, rejecting buffer length changes if XDP is loaded and the new length is not 2048. Signed-off-by: Ankit Garg <nktgrg@google.com> Reviewed-by: Harshitha Ramamurthy <hramamurthy@google.com> Reviewed-by: Jordan Rhee <jordanrhee@google.com> Reviewed-by: Willem de Bruijn <willemb@google.com> Signed-off-by: Joshua Washington <joshwash@google.com> Link: https://patch.msgid.link/20251106192746.243525-4-joshwash@google.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
-rw-r--r--drivers/net/ethernet/google/gve/gve.h9
-rw-r--r--drivers/net/ethernet/google/gve/gve_ethtool.c13
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c39
3 files changed, 60 insertions, 1 deletions
diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h
index c237d00c5ab3..a33b44c1eb86 100644
--- a/drivers/net/ethernet/google/gve/gve.h
+++ b/drivers/net/ethernet/google/gve/gve.h
@@ -1167,6 +1167,12 @@ static inline bool gve_is_gqi(struct gve_priv *priv)
priv->queue_format == GVE_GQI_QPL_FORMAT;
}
+static inline bool gve_is_dqo(struct gve_priv *priv)
+{
+ return priv->queue_format == GVE_DQO_RDA_FORMAT ||
+ priv->queue_format == GVE_DQO_QPL_FORMAT;
+}
+
static inline u32 gve_num_tx_queues(struct gve_priv *priv)
{
return priv->tx_cfg.num_queues + priv->tx_cfg.num_xdp_queues;
@@ -1248,6 +1254,9 @@ void gve_rx_free_rings_gqi(struct gve_priv *priv,
void gve_rx_start_ring_gqi(struct gve_priv *priv, int idx);
void gve_rx_stop_ring_gqi(struct gve_priv *priv, int idx);
bool gve_header_split_supported(const struct gve_priv *priv);
+int gve_set_rx_buf_len_config(struct gve_priv *priv, u32 rx_buf_len,
+ struct netlink_ext_ack *extack,
+ struct gve_rx_alloc_rings_cfg *rx_alloc_cfg);
int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split,
struct gve_rx_alloc_rings_cfg *rx_alloc_cfg);
/* rx buffer handling */
diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c
index db6fc855a511..52500ae8348e 100644
--- a/drivers/net/ethernet/google/gve/gve_ethtool.c
+++ b/drivers/net/ethernet/google/gve/gve_ethtool.c
@@ -529,6 +529,8 @@ static void gve_get_ringparam(struct net_device *netdev,
cmd->rx_pending = priv->rx_desc_cnt;
cmd->tx_pending = priv->tx_desc_cnt;
+ kernel_cmd->rx_buf_len = priv->rx_cfg.packet_buffer_size;
+
if (!gve_header_split_supported(priv))
kernel_cmd->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_UNKNOWN;
else if (priv->header_split_enabled)
@@ -589,6 +591,12 @@ static int gve_set_ringparam(struct net_device *netdev,
int err;
gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg);
+
+ err = gve_set_rx_buf_len_config(priv, kernel_cmd->rx_buf_len, extack,
+ &rx_alloc_cfg);
+ if (err)
+ return err;
+
err = gve_set_hsplit_config(priv, kernel_cmd->tcp_data_split,
&rx_alloc_cfg);
if (err)
@@ -605,6 +613,8 @@ static int gve_set_ringparam(struct net_device *netdev,
return err;
} else {
/* Set ring params for the next up */
+ priv->rx_cfg.packet_buffer_size =
+ rx_alloc_cfg.packet_buffer_size;
priv->header_split_enabled = rx_alloc_cfg.enable_header_split;
priv->tx_desc_cnt = tx_alloc_cfg.ring_size;
priv->rx_desc_cnt = rx_alloc_cfg.ring_size;
@@ -944,7 +954,8 @@ static int gve_get_ts_info(struct net_device *netdev,
const struct ethtool_ops gve_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS,
- .supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT,
+ .supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT |
+ ETHTOOL_RING_USE_RX_BUF_LEN,
.get_drvinfo = gve_get_drvinfo,
.get_strings = gve_get_strings,
.get_sset_count = gve_get_sset_count,
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index 616182f4fb81..6fb8fbb38a7d 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -1725,6 +1725,13 @@ static int gve_verify_xdp_configuration(struct net_device *dev,
return -EOPNOTSUPP;
}
+ if (priv->rx_cfg.packet_buffer_size != SZ_2K) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "XDP is not supported for Rx buf len %d, only %d supported.",
+ priv->rx_cfg.packet_buffer_size, SZ_2K);
+ return -EOPNOTSUPP;
+ }
+
max_xdp_mtu = priv->rx_cfg.packet_buffer_size - sizeof(struct ethhdr);
if (priv->queue_format == GVE_GQI_QPL_FORMAT)
max_xdp_mtu -= GVE_RX_PAD;
@@ -2056,6 +2063,38 @@ bool gve_header_split_supported(const struct gve_priv *priv)
priv->queue_format == GVE_DQO_RDA_FORMAT && !priv->xdp_prog;
}
+int gve_set_rx_buf_len_config(struct gve_priv *priv, u32 rx_buf_len,
+ struct netlink_ext_ack *extack,
+ struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
+{
+ u32 old_rx_buf_len = rx_alloc_cfg->packet_buffer_size;
+
+ if (rx_buf_len == old_rx_buf_len)
+ return 0;
+
+ /* device options may not always contain support for 4K buffers */
+ if (!gve_is_dqo(priv) || priv->max_rx_buffer_size < SZ_4K) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Modifying Rx buf len is not supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (priv->xdp_prog && rx_buf_len != SZ_2K) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Rx buf len can only be 2048 when XDP is on");
+ return -EINVAL;
+ }
+
+ if (rx_buf_len != SZ_2K && rx_buf_len != SZ_4K) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Rx buf len can only be 2048 or 4096");
+ return -EINVAL;
+ }
+ rx_alloc_cfg->packet_buffer_size = rx_buf_len;
+
+ return 0;
+}
+
int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split,
struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
{