summaryrefslogtreecommitdiff
path: root/io_uring/rw.c
diff options
context:
space:
mode:
Diffstat (limited to 'io_uring/rw.c')
-rw-r--r--io_uring/rw.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/io_uring/rw.c b/io_uring/rw.c
index ae5229ae7dca..7fe188872279 100644
--- a/io_uring/rw.c
+++ b/io_uring/rw.c
@@ -576,7 +576,7 @@ void io_req_rw_complete(struct io_kiocb *req, io_tw_token_t tw)
io_req_io_end(req);
if (req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING))
- req->cqe.flags |= io_put_kbuf(req, req->cqe.res);
+ req->cqe.flags |= io_put_kbuf(req, req->cqe.res, req->buf_list);
io_req_rw_cleanup(req, 0);
io_req_task_complete(req, tw);
@@ -659,7 +659,7 @@ static int kiocb_done(struct io_kiocb *req, ssize_t ret,
* from the submission path.
*/
io_req_io_end(req);
- io_req_set_res(req, final_ret, io_put_kbuf(req, ret));
+ io_req_set_res(req, final_ret, io_put_kbuf(req, ret, req->buf_list));
io_req_rw_cleanup(req, issue_flags);
return IOU_COMPLETE;
} else {
@@ -1049,15 +1049,15 @@ int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags)
* Reset rw->len to 0 again to avoid clamping future mshot
* reads, in case the buffer size varies.
*/
- if (io_kbuf_recycle(req, issue_flags))
+ if (io_kbuf_recycle(req, req->buf_list, issue_flags))
rw->len = 0;
return IOU_RETRY;
} else if (ret <= 0) {
- io_kbuf_recycle(req, issue_flags);
+ io_kbuf_recycle(req, req->buf_list, issue_flags);
if (ret < 0)
req_set_fail(req);
} else if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
- cflags = io_put_kbuf(req, ret);
+ cflags = io_put_kbuf(req, ret, req->buf_list);
} else {
/*
* Any successful return value will keep the multishot read
@@ -1065,7 +1065,7 @@ int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags)
* we fail to post a CQE, or multishot is no longer set, then
* jump to the termination path. This request is then done.
*/
- cflags = io_put_kbuf(req, ret);
+ cflags = io_put_kbuf(req, ret, req->buf_list);
rw->len = 0; /* similarly to above, reset len to 0 */
if (io_req_post_cqe(req, ret, cflags | IORING_CQE_F_MORE)) {
@@ -1362,7 +1362,7 @@ int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
if (!smp_load_acquire(&req->iopoll_completed))
break;
nr_events++;
- req->cqe.flags = io_put_kbuf(req, req->cqe.res);
+ req->cqe.flags = io_put_kbuf(req, req->cqe.res, req->buf_list);
if (req->opcode != IORING_OP_URING_CMD)
io_req_rw_cleanup(req, 0);
}