summaryrefslogtreecommitdiff
path: root/net/mptcp/protocol.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/mptcp/protocol.c')
-rw-r--r--net/mptcp/protocol.c78
1 files changed, 76 insertions, 2 deletions
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index f5526855a2e5..dfed036e0591 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -659,6 +659,39 @@ static void mptcp_dss_corruption(struct mptcp_sock *msk, struct sock *ssk)
}
}
+static void __mptcp_add_backlog(struct sock *sk,
+ struct mptcp_subflow_context *subflow,
+ struct sk_buff *skb)
+{
+ struct mptcp_sock *msk = mptcp_sk(sk);
+ struct sk_buff *tail = NULL;
+ bool fragstolen;
+ int delta;
+
+ if (unlikely(sk->sk_state == TCP_CLOSE)) {
+ kfree_skb_reason(skb, SKB_DROP_REASON_SOCKET_CLOSE);
+ return;
+ }
+
+ /* Try to coalesce with the last skb in our backlog */
+ if (!list_empty(&msk->backlog_list))
+ tail = list_last_entry(&msk->backlog_list, struct sk_buff, list);
+
+ if (tail && MPTCP_SKB_CB(skb)->map_seq == MPTCP_SKB_CB(tail)->end_seq &&
+ skb->sk == tail->sk &&
+ __mptcp_try_coalesce(sk, tail, skb, &fragstolen, &delta)) {
+ skb->truesize -= delta;
+ kfree_skb_partial(skb, fragstolen);
+ __mptcp_subflow_lend_fwdmem(subflow, delta);
+ WRITE_ONCE(msk->backlog_len, msk->backlog_len + delta);
+ return;
+ }
+
+ list_add_tail(&skb->list, &msk->backlog_list);
+ mptcp_subflow_lend_fwdmem(subflow, skb);
+ WRITE_ONCE(msk->backlog_len, msk->backlog_len + skb->truesize);
+}
+
static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk,
struct sock *ssk)
{
@@ -705,8 +738,13 @@ static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk,
size_t len = skb->len - offset;
mptcp_init_skb(ssk, skb, offset, len);
- mptcp_subflow_lend_fwdmem(subflow, skb);
- ret = __mptcp_move_skb(sk, skb) || ret;
+
+ if (true) {
+ mptcp_subflow_lend_fwdmem(subflow, skb);
+ ret |= __mptcp_move_skb(sk, skb);
+ } else {
+ __mptcp_add_backlog(sk, subflow, skb);
+ }
seq += len;
if (unlikely(map_remaining < len)) {
@@ -2531,6 +2569,9 @@ out:
void mptcp_close_ssk(struct sock *sk, struct sock *ssk,
struct mptcp_subflow_context *subflow)
{
+ struct mptcp_sock *msk = mptcp_sk(sk);
+ struct sk_buff *skb;
+
/* The first subflow can already be closed and still in the list */
if (subflow->close_event_done)
return;
@@ -2540,6 +2581,17 @@ void mptcp_close_ssk(struct sock *sk, struct sock *ssk,
if (sk->sk_state == TCP_ESTABLISHED)
mptcp_event(MPTCP_EVENT_SUB_CLOSED, mptcp_sk(sk), ssk, GFP_KERNEL);
+ /* Remove any reference from the backlog to this ssk; backlog skbs consume
+ * space in the msk receive queue, no need to touch sk->sk_rmem_alloc
+ */
+ list_for_each_entry(skb, &msk->backlog_list, list) {
+ if (skb->sk != ssk)
+ continue;
+
+ atomic_sub(skb->truesize, &skb->sk->sk_rmem_alloc);
+ skb->sk = NULL;
+ }
+
/* subflow aborted before reaching the fully_established status
* attempt the creation of the next subflow
*/
@@ -2769,12 +2821,31 @@ static void mptcp_mp_fail_no_response(struct mptcp_sock *msk)
unlock_sock_fast(ssk, slow);
}
+static void mptcp_backlog_purge(struct sock *sk)
+{
+ struct mptcp_sock *msk = mptcp_sk(sk);
+ struct sk_buff *tmp, *skb;
+ LIST_HEAD(backlog);
+
+ mptcp_data_lock(sk);
+ list_splice_init(&msk->backlog_list, &backlog);
+ msk->backlog_len = 0;
+ mptcp_data_unlock(sk);
+
+ list_for_each_entry_safe(skb, tmp, &backlog, list) {
+ mptcp_borrow_fwdmem(sk, skb);
+ kfree_skb_reason(skb, SKB_DROP_REASON_SOCKET_CLOSE);
+ }
+ sk_mem_reclaim(sk);
+}
+
static void mptcp_do_fastclose(struct sock *sk)
{
struct mptcp_subflow_context *subflow, *tmp;
struct mptcp_sock *msk = mptcp_sk(sk);
mptcp_set_state(sk, TCP_CLOSE);
+ mptcp_backlog_purge(sk);
/* Explicitly send the fastclose reset as need */
if (__mptcp_check_fallback(msk))
@@ -2853,11 +2924,13 @@ static void __mptcp_init_sock(struct sock *sk)
INIT_LIST_HEAD(&msk->conn_list);
INIT_LIST_HEAD(&msk->join_list);
INIT_LIST_HEAD(&msk->rtx_queue);
+ INIT_LIST_HEAD(&msk->backlog_list);
INIT_WORK(&msk->work, mptcp_worker);
msk->out_of_order_queue = RB_ROOT;
msk->first_pending = NULL;
msk->timer_ival = TCP_RTO_MIN;
msk->scaling_ratio = TCP_DEFAULT_SCALING_RATIO;
+ msk->backlog_len = 0;
WRITE_ONCE(msk->first, NULL);
inet_csk(sk)->icsk_sync_mss = mptcp_sync_mss;
@@ -3234,6 +3307,7 @@ static void mptcp_destroy_common(struct mptcp_sock *msk)
struct sock *sk = (struct sock *)msk;
__mptcp_clear_xmit(sk);
+ mptcp_backlog_purge(sk);
/* join list will be eventually flushed (with rst) at sock lock release time */
mptcp_for_each_subflow_safe(msk, subflow, tmp)