summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
authorJakub Kicinski <kuba@kernel.org>2025-12-02 15:37:48 -0800
committerJakub Kicinski <kuba@kernel.org>2025-12-02 15:37:53 -0800
commit4de44542991ed4cb8c9fb2ccd766d6e6015101b0 (patch)
treeaa93b5a498684431c97d4b57d65a7b036387f084 /net
parent9954464d737dd12f12b274d3da46397e3656f079 (diff)
parent108f9405ce81085284c7ab09b84784b94b611435 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Merge in late fixes in preparation for the net-next PR. Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'net')
-rw-r--r--net/core/netpoll.c2
-rw-r--r--net/sched/sch_cake.c58
2 files changed, 33 insertions, 27 deletions
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 331764845e8f..09f72f10813c 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -554,6 +554,7 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev)
int err;
skb_queue_head_init(&np->skb_pool);
+ INIT_WORK(&np->refill_wq, refill_skbs_work_handler);
if (ndev->priv_flags & IFF_DISABLE_NETPOLL) {
np_err(np, "%s doesn't support polling, aborting\n",
@@ -591,7 +592,6 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev)
/* fill up the skb queue */
refill_skbs(np);
- INIT_WORK(&np->refill_wq, refill_skbs_work_handler);
/* last thing to do is link it to the net device structure */
rcu_assign_pointer(ndev->npinfo, npinfo);
diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index 0ea9440f68c6..4a64d6397b6f 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -1591,7 +1591,6 @@ static unsigned int cake_drop(struct Qdisc *sch, struct sk_buff **to_free)
qdisc_drop_reason(skb, sch, to_free, SKB_DROP_REASON_QDISC_OVERLIMIT);
sch->q.qlen--;
- qdisc_tree_reduce_backlog(sch, 1, len);
cake_heapify(q, 0);
@@ -1737,14 +1736,14 @@ static void cake_reconfigure(struct Qdisc *sch);
static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{
+ u32 idx, tin, prev_qlen, prev_backlog, drop_id;
struct cake_sched_data *q = qdisc_priv(sch);
- int len = qdisc_pkt_len(skb);
- int ret;
+ int len = qdisc_pkt_len(skb), ret;
struct sk_buff *ack = NULL;
ktime_t now = ktime_get();
struct cake_tin_data *b;
struct cake_flow *flow;
- u32 idx, tin;
+ bool same_flow = false;
/* choose flow to insert into */
idx = cake_classify(sch, &b, skb, q->flow_mode, &ret);
@@ -1818,6 +1817,8 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
consume_skb(skb);
} else {
/* not splitting */
+ int ack_pkt_len = 0;
+
cobalt_set_enqueue_time(skb, now);
get_cobalt_cb(skb)->adjusted_len = cake_overhead(q, skb);
flow_queue_add(flow, skb);
@@ -1828,13 +1829,13 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
if (ack) {
b->ack_drops++;
sch->qstats.drops++;
- b->bytes += qdisc_pkt_len(ack);
- len -= qdisc_pkt_len(ack);
+ ack_pkt_len = qdisc_pkt_len(ack);
+ b->bytes += ack_pkt_len;
q->buffer_used += skb->truesize - ack->truesize;
if (q->rate_flags & CAKE_FLAG_INGRESS)
cake_advance_shaper(q, b, ack, now, true);
- qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(ack));
+ qdisc_tree_reduce_backlog(sch, 1, ack_pkt_len);
consume_skb(ack);
} else {
sch->q.qlen++;
@@ -1843,11 +1844,11 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
/* stats */
b->packets++;
- b->bytes += len;
- b->backlogs[idx] += len;
- b->tin_backlog += len;
- sch->qstats.backlog += len;
- q->avg_window_bytes += len;
+ b->bytes += len - ack_pkt_len;
+ b->backlogs[idx] += len - ack_pkt_len;
+ b->tin_backlog += len - ack_pkt_len;
+ sch->qstats.backlog += len - ack_pkt_len;
+ q->avg_window_bytes += len - ack_pkt_len;
}
if (q->overflow_timeout)
@@ -1922,24 +1923,29 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
if (q->buffer_used > q->buffer_max_used)
q->buffer_max_used = q->buffer_used;
- if (q->buffer_used > q->buffer_limit) {
- bool same_flow = false;
- u32 dropped = 0;
- u32 drop_id;
+ if (q->buffer_used <= q->buffer_limit)
+ return NET_XMIT_SUCCESS;
- while (q->buffer_used > q->buffer_limit) {
- dropped++;
- drop_id = cake_drop(sch, to_free);
+ prev_qlen = sch->q.qlen;
+ prev_backlog = sch->qstats.backlog;
- if ((drop_id >> 16) == tin &&
- (drop_id & 0xFFFF) == idx)
- same_flow = true;
- }
- b->drop_overlimit += dropped;
+ while (q->buffer_used > q->buffer_limit) {
+ drop_id = cake_drop(sch, to_free);
+ if ((drop_id >> 16) == tin &&
+ (drop_id & 0xFFFF) == idx)
+ same_flow = true;
+ }
+
+ prev_qlen -= sch->q.qlen;
+ prev_backlog -= sch->qstats.backlog;
+ b->drop_overlimit += prev_qlen;
- if (same_flow)
- return NET_XMIT_CN;
+ if (same_flow) {
+ qdisc_tree_reduce_backlog(sch, prev_qlen - 1,
+ prev_backlog - len);
+ return NET_XMIT_CN;
}
+ qdisc_tree_reduce_backlog(sch, prev_qlen, prev_backlog);
return NET_XMIT_SUCCESS;
}