diff options
| author | David Howells <dhowells@redhat.com> | 2024-12-04 07:47:01 +0000 |
|---|---|---|
| committer | Jakub Kicinski <kuba@kernel.org> | 2024-12-09 13:48:31 -0800 |
| commit | a2ea9a9072607c2fd6442bd1ffb4dbdbf882aed7 (patch) | |
| tree | 06b5cb2f466316d1479762cd903a7e2a055c8c10 /net/rxrpc/input.c | |
| parent | 08d55d7cf3f33c730ce2694393efe16b7983a9c8 (diff) | |
rxrpc: Use irq-disabling spinlocks between app and I/O thread
Where a spinlock is used by both the application thread and the I/O thread,
use irq-disabling locking so that an interrupt taken on the app thread
doesn't also slow down the I/O thread.
Signed-off-by: David Howells <dhowells@redhat.com>
cc: Marc Dionne <marc.dionne@auristor.com>
cc: linux-afs@lists.infradead.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'net/rxrpc/input.c')
| -rw-r--r-- | net/rxrpc/input.c | 5 |
1 files changed, 1 insertions, 4 deletions
diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c index a7a249872a54..821e10c03086 100644 --- a/net/rxrpc/input.c +++ b/net/rxrpc/input.c @@ -424,7 +424,7 @@ static void rxrpc_input_queue_data(struct rxrpc_call *call, struct sk_buff *skb, struct rxrpc_skb_priv *sp = rxrpc_skb(skb); bool last = sp->hdr.flags & RXRPC_LAST_PACKET; - __skb_queue_tail(&call->recvmsg_queue, skb); + skb_queue_tail(&call->recvmsg_queue, skb); rxrpc_input_update_ack_window(call, window, wtop); trace_rxrpc_receive(call, last ? why + 1 : why, sp->hdr.serial, sp->hdr.seq); if (last) @@ -501,7 +501,6 @@ static void rxrpc_input_data_one(struct rxrpc_call *call, struct sk_buff *skb, rxrpc_get_skb(skb, rxrpc_skb_get_to_recvmsg); - spin_lock(&call->recvmsg_queue.lock); rxrpc_input_queue_data(call, skb, window, wtop, rxrpc_receive_queue); *_notify = true; @@ -523,8 +522,6 @@ static void rxrpc_input_data_one(struct rxrpc_call *call, struct sk_buff *skb, rxrpc_receive_queue_oos); } - spin_unlock(&call->recvmsg_queue.lock); - call->ackr_sack_base = sack; } else { unsigned int slot; |