diff options
| -rw-r--r-- | io_uring/opdef.c | 1 | ||||
| -rw-r--r-- | io_uring/uring_cmd.c | 21 | ||||
| -rw-r--r-- | io_uring/uring_cmd.h | 1 |
3 files changed, 14 insertions, 9 deletions
diff --git a/io_uring/opdef.c b/io_uring/opdef.c index 6e0882b051f9..287f9a23b816 100644 --- a/io_uring/opdef.c +++ b/io_uring/opdef.c @@ -759,6 +759,7 @@ const struct io_cold_def io_cold_defs[] = { }, [IORING_OP_URING_CMD] = { .name = "URING_CMD", + .sqe_copy = io_uring_cmd_sqe_copy, .cleanup = io_uring_cmd_cleanup, }, [IORING_OP_SEND_ZC] = { diff --git a/io_uring/uring_cmd.c b/io_uring/uring_cmd.c index e204f4941d72..9ad0ea5398c2 100644 --- a/io_uring/uring_cmd.c +++ b/io_uring/uring_cmd.c @@ -205,17 +205,20 @@ int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) if (!ac) return -ENOMEM; ac->data.op_data = NULL; + ioucmd->sqe = sqe; + return 0; +} + +void io_uring_cmd_sqe_copy(struct io_kiocb *req) +{ + struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd); + struct io_async_cmd *ac = req->async_data; - /* - * Unconditionally cache the SQE for now - this is only needed for - * requests that go async, but prep handlers must ensure that any - * sqe data is stable beyond prep. Since uring_cmd is special in - * that it doesn't read in per-op data, play it safe and ensure that - * any SQE data is stable beyond prep. This can later get relaxed. - */ - memcpy(ac->sqes, sqe, uring_sqe_size(req->ctx)); + /* Should not happen, as REQ_F_SQE_COPIED covers this */ + if (WARN_ON_ONCE(ioucmd->sqe == ac->sqes)) + return; + memcpy(ac->sqes, ioucmd->sqe, uring_sqe_size(req->ctx)); ioucmd->sqe = ac->sqes; - return 0; } int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags) diff --git a/io_uring/uring_cmd.h b/io_uring/uring_cmd.h index e6a5142c890e..a6dad47afc6b 100644 --- a/io_uring/uring_cmd.h +++ b/io_uring/uring_cmd.h @@ -11,6 +11,7 @@ struct io_async_cmd { int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags); int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe); +void io_uring_cmd_sqe_copy(struct io_kiocb *req); void io_uring_cmd_cleanup(struct io_kiocb *req); bool io_uring_try_cancel_uring_cmd(struct io_ring_ctx *ctx, |