summaryrefslogtreecommitdiff
path: root/drivers/net/veth.c
diff options
context:
space:
mode:
authorTakashi Iwai <tiwai@suse.de>2025-12-02 07:12:56 +0100
committerTakashi Iwai <tiwai@suse.de>2025-12-02 07:12:56 +0100
commit9747b22a417d2a7c478678143863b9777de104e4 (patch)
tree2690242ac1e95570c3e56a3106752af4cc2b5472 /drivers/net/veth.c
parentef5e0a02d842b2c6dfcfd9b80feb185769b892ef (diff)
parentc5fae31f60a91dbe884ef2789fb3440bb4cddf05 (diff)
Merge tag 'asoc-v6.19' of https://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound into for-linus
ASoC: Updates for v6.19 This is a very large set of updates, as well as some more extensive cleanup work from Morimto-san we've also added a generic SCDA class driver for SoundWire devices enabling us to support many chips with no custom code. There's also a batch of new drivers added for both SoCs and CODECs. - Added a SoundWire SCDA generic class driver, pulling in a little regmap work to support it. - A *lot* of cleaup and API improvement work from Morimoto-san. - Lots of work on the existing Cirrus, Intel, Maxim and Qualcomm drivers. - Support for Allwinner A523, Mediatek MT8189, Qualcomm QCM2290, QRB2210 and SM6115, SpacemiT K1, and TI TAS2568, TAS5802, TAS5806, TAS5815, TAS5828 and TAS5830. This also pulls in some gpiolib changes supporting shared GPIOs in the core there so we can convert some of the ASoC drivers open coding handling of that to the core functionality.
Diffstat (limited to 'drivers/net/veth.c')
-rw-r--r--drivers/net/veth.c38
1 files changed, 20 insertions, 18 deletions
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index a3046142cb8e..35dd89aff4a9 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -392,14 +392,12 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
}
/* Restore Eth hdr pulled by dev_forward_skb/eth_type_trans */
__skb_push(skb, ETH_HLEN);
- /* Depend on prior success packets started NAPI consumer via
- * __veth_xdp_flush(). Cancel TXQ stop if consumer stopped,
- * paired with empty check in veth_poll().
- */
netif_tx_stop_queue(txq);
- smp_mb__after_atomic();
- if (unlikely(__ptr_ring_empty(&rq->xdp_ring)))
- netif_tx_wake_queue(txq);
+ /* Makes sure NAPI peer consumer runs. Consumer is responsible
+ * for starting txq again, until then ndo_start_xmit (this
+ * function) will not be invoked by the netstack again.
+ */
+ __veth_xdp_flush(rq);
break;
case NET_RX_DROP: /* same as NET_XMIT_DROP */
drop:
@@ -900,17 +898,9 @@ static int veth_xdp_rcv(struct veth_rq *rq, int budget,
struct veth_xdp_tx_bq *bq,
struct veth_stats *stats)
{
- struct veth_priv *priv = netdev_priv(rq->dev);
- int queue_idx = rq->xdp_rxq.queue_index;
- struct netdev_queue *peer_txq;
- struct net_device *peer_dev;
int i, done = 0, n_xdpf = 0;
void *xdpf[VETH_XDP_BATCH];
- /* NAPI functions as RCU section */
- peer_dev = rcu_dereference_check(priv->peer, rcu_read_lock_bh_held());
- peer_txq = peer_dev ? netdev_get_tx_queue(peer_dev, queue_idx) : NULL;
-
for (i = 0; i < budget; i++) {
void *ptr = __ptr_ring_consume(&rq->xdp_ring);
@@ -959,9 +949,6 @@ static int veth_xdp_rcv(struct veth_rq *rq, int budget,
rq->stats.vs.xdp_packets += done;
u64_stats_update_end(&rq->stats.syncp);
- if (peer_txq && unlikely(netif_tx_queue_stopped(peer_txq)))
- netif_tx_wake_queue(peer_txq);
-
return done;
}
@@ -969,12 +956,20 @@ static int veth_poll(struct napi_struct *napi, int budget)
{
struct veth_rq *rq =
container_of(napi, struct veth_rq, xdp_napi);
+ struct veth_priv *priv = netdev_priv(rq->dev);
+ int queue_idx = rq->xdp_rxq.queue_index;
+ struct netdev_queue *peer_txq;
struct veth_stats stats = {};
+ struct net_device *peer_dev;
struct veth_xdp_tx_bq bq;
int done;
bq.count = 0;
+ /* NAPI functions as RCU section */
+ peer_dev = rcu_dereference_check(priv->peer, rcu_read_lock_bh_held());
+ peer_txq = peer_dev ? netdev_get_tx_queue(peer_dev, queue_idx) : NULL;
+
xdp_set_return_frame_no_direct();
done = veth_xdp_rcv(rq, budget, &bq, &stats);
@@ -996,6 +991,13 @@ static int veth_poll(struct napi_struct *napi, int budget)
veth_xdp_flush(rq, &bq);
xdp_clear_return_frame_no_direct();
+ /* Release backpressure per NAPI poll */
+ smp_rmb(); /* Paired with netif_tx_stop_queue set_bit */
+ if (peer_txq && netif_tx_queue_stopped(peer_txq)) {
+ txq_trans_cond_update(peer_txq);
+ netif_tx_wake_queue(peer_txq);
+ }
+
return done;
}