summaryrefslogtreecommitdiff
path: root/io_uring/kbuf.c
diff options
context:
space:
mode:
authorJakub Kicinski <kuba@kernel.org>2025-10-16 10:53:13 -0700
committerJakub Kicinski <kuba@kernel.org>2025-10-31 06:46:03 -0700
commit1a2352ad82b515035efe563f997ef8f5ca4f8080 (patch)
tree5ab09f8bb295fef3b0ae901438132fbe757e9d95 /io_uring/kbuf.c
parent0d0eb186421d0886ac466008235f6d9eedaf918e (diff)
parente5763491237ffee22d9b554febc2d00669f81dee (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Cross-merge networking fixes after downstream PR (net-6.18-rc4). No conflicts, adjacent changes: drivers/net/ethernet/stmicro/stmmac/stmmac_main.c ded9813d17d3 ("net: stmmac: Consider Tx VLAN offload tag length for maxSDU") 26ab9830beab ("net: stmmac: replace has_xxxx with core_type") Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'io_uring/kbuf.c')
-rw-r--r--io_uring/kbuf.c33
1 files changed, 22 insertions, 11 deletions
diff --git a/io_uring/kbuf.c b/io_uring/kbuf.c
index aad655e38672..a727e020fe03 100644
--- a/io_uring/kbuf.c
+++ b/io_uring/kbuf.c
@@ -155,6 +155,27 @@ static int io_provided_buffers_select(struct io_kiocb *req, size_t *len,
return 1;
}
+static bool io_should_commit(struct io_kiocb *req, unsigned int issue_flags)
+{
+ /*
+ * If we came in unlocked, we have no choice but to consume the
+ * buffer here, otherwise nothing ensures that the buffer won't
+ * get used by others. This does mean it'll be pinned until the
+ * IO completes, coming in unlocked means we're being called from
+ * io-wq context and there may be further retries in async hybrid
+ * mode. For the locked case, the caller must call commit when
+ * the transfer completes (or if we get -EAGAIN and must poll of
+ * retry).
+ */
+ if (issue_flags & IO_URING_F_UNLOCKED)
+ return true;
+
+ /* uring_cmd commits kbuf upfront, no need to auto-commit */
+ if (!io_file_can_poll(req) && req->opcode != IORING_OP_URING_CMD)
+ return true;
+ return false;
+}
+
static struct io_br_sel io_ring_buffer_select(struct io_kiocb *req, size_t *len,
struct io_buffer_list *bl,
unsigned int issue_flags)
@@ -181,17 +202,7 @@ static struct io_br_sel io_ring_buffer_select(struct io_kiocb *req, size_t *len,
sel.buf_list = bl;
sel.addr = u64_to_user_ptr(buf->addr);
- if (issue_flags & IO_URING_F_UNLOCKED || !io_file_can_poll(req)) {
- /*
- * If we came in unlocked, we have no choice but to consume the
- * buffer here, otherwise nothing ensures that the buffer won't
- * get used by others. This does mean it'll be pinned until the
- * IO completes, coming in unlocked means we're being called from
- * io-wq context and there may be further retries in async hybrid
- * mode. For the locked case, the caller must call commit when
- * the transfer completes (or if we get -EAGAIN and must poll of
- * retry).
- */
+ if (io_should_commit(req, issue_flags)) {
io_kbuf_commit(req, sel.buf_list, *len, 1);
sel.buf_list = NULL;
}