summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/filesystems/iomap/operations.rst4
-rw-r--r--fs/backing-file.c6
-rw-r--r--fs/iomap/direct-io.c197
-rw-r--r--include/linux/fs.h43
-rw-r--r--io_uring/rw.c16
5 files changed, 114 insertions, 152 deletions
diff --git a/Documentation/filesystems/iomap/operations.rst b/Documentation/filesystems/iomap/operations.rst
index 64f4baf5750e..da982ca7e413 100644
--- a/Documentation/filesystems/iomap/operations.rst
+++ b/Documentation/filesystems/iomap/operations.rst
@@ -490,10 +490,6 @@ These ``struct kiocb`` flags are significant for direct I/O with iomap:
Only meaningful for asynchronous I/O, and only if the entire I/O can
be issued as a single ``struct bio``.
- * ``IOCB_DIO_CALLER_COMP``: Try to run I/O completion from the caller's
- process context.
- See ``linux/fs.h`` for more details.
-
Filesystems should call ``iomap_dio_rw`` from ``->read_iter`` and
``->write_iter``, and set ``FMODE_CAN_ODIRECT`` in the ``->open``
function for the file.
diff --git a/fs/backing-file.c b/fs/backing-file.c
index 15a7f8031084..2a86bb6fcd13 100644
--- a/fs/backing-file.c
+++ b/fs/backing-file.c
@@ -227,12 +227,6 @@ ssize_t backing_file_write_iter(struct file *file, struct iov_iter *iter,
!(file->f_mode & FMODE_CAN_ODIRECT))
return -EINVAL;
- /*
- * Stacked filesystems don't support deferred completions, don't copy
- * this property in case it is set by the issuer.
- */
- flags &= ~IOCB_DIO_CALLER_COMP;
-
old_cred = override_creds(ctx->cred);
if (is_sync_kiocb(iocb)) {
rwf_t rwf = iocb_to_rw_flags(flags);
diff --git a/fs/iomap/direct-io.c b/fs/iomap/direct-io.c
index 8b2f9fb89eb3..00ab58d55c54 100644
--- a/fs/iomap/direct-io.c
+++ b/fs/iomap/direct-io.c
@@ -16,9 +16,8 @@
* Private flags for iomap_dio, must not overlap with the public ones in
* iomap.h:
*/
-#define IOMAP_DIO_NO_INVALIDATE (1U << 25)
-#define IOMAP_DIO_CALLER_COMP (1U << 26)
-#define IOMAP_DIO_INLINE_COMP (1U << 27)
+#define IOMAP_DIO_NO_INVALIDATE (1U << 26)
+#define IOMAP_DIO_COMP_WORK (1U << 27)
#define IOMAP_DIO_WRITE_THROUGH (1U << 28)
#define IOMAP_DIO_NEED_SYNC (1U << 29)
#define IOMAP_DIO_WRITE (1U << 30)
@@ -140,11 +139,6 @@ ssize_t iomap_dio_complete(struct iomap_dio *dio)
}
EXPORT_SYMBOL_GPL(iomap_dio_complete);
-static ssize_t iomap_dio_deferred_complete(void *data)
-{
- return iomap_dio_complete(data);
-}
-
static void iomap_dio_complete_work(struct work_struct *work)
{
struct iomap_dio *dio = container_of(work, struct iomap_dio, aio.work);
@@ -179,33 +173,33 @@ static void iomap_dio_done(struct iomap_dio *dio)
WRITE_ONCE(dio->submit.waiter, NULL);
blk_wake_io_task(waiter);
- } else if (dio->flags & IOMAP_DIO_INLINE_COMP) {
- WRITE_ONCE(iocb->private, NULL);
- iomap_dio_complete_work(&dio->aio.work);
- } else if (dio->flags & IOMAP_DIO_CALLER_COMP) {
- /*
- * If this dio is flagged with IOMAP_DIO_CALLER_COMP, then
- * schedule our completion that way to avoid an async punt to a
- * workqueue.
- */
- /* only polled IO cares about private cleared */
- iocb->private = dio;
- iocb->dio_complete = iomap_dio_deferred_complete;
+ return;
+ }
- /*
- * Invoke ->ki_complete() directly. We've assigned our
- * dio_complete callback handler, and since the issuer set
- * IOCB_DIO_CALLER_COMP, we know their ki_complete handler will
- * notice ->dio_complete being set and will defer calling that
- * handler until it can be done from a safe task context.
- *
- * Note that the 'res' being passed in here is not important
- * for this case. The actual completion value of the request
- * will be gotten from dio_complete when that is run by the
- * issuer.
- */
- iocb->ki_complete(iocb, 0);
- } else {
+ /*
+ * Always run error completions in user context. These are not
+ * performance critical and some code relies on taking sleeping locks
+ * for error handling.
+ */
+ if (dio->error)
+ dio->flags |= IOMAP_DIO_COMP_WORK;
+
+ /*
+ * Never invalidate pages from this context to avoid deadlocks with
+ * buffered I/O completions when called from the ioend workqueue,
+ * or avoid sleeping when called directly from ->bi_end_io.
+ * Tough luck if you hit the tiny race with someone dirtying the range
+ * right between this check and the actual completion.
+ */
+ if ((dio->flags & IOMAP_DIO_WRITE) &&
+ !(dio->flags & IOMAP_DIO_COMP_WORK)) {
+ if (dio->iocb->ki_filp->f_mapping->nrpages)
+ dio->flags |= IOMAP_DIO_COMP_WORK;
+ else
+ dio->flags |= IOMAP_DIO_NO_INVALIDATE;
+ }
+
+ if (dio->flags & IOMAP_DIO_COMP_WORK) {
struct inode *inode = file_inode(iocb->ki_filp);
/*
@@ -216,7 +210,11 @@ static void iomap_dio_done(struct iomap_dio *dio)
*/
INIT_WORK(&dio->aio.work, iomap_dio_complete_work);
queue_work(inode->i_sb->s_dio_done_wq, &dio->aio.work);
+ return;
}
+
+ WRITE_ONCE(iocb->private, NULL);
+ iomap_dio_complete_work(&dio->aio.work);
}
void iomap_dio_bio_end_io(struct bio *bio)
@@ -252,16 +250,9 @@ u32 iomap_finish_ioend_direct(struct iomap_ioend *ioend)
/*
* Try to avoid another context switch for the completion given
* that we are already called from the ioend completion
- * workqueue, but never invalidate pages from this thread to
- * avoid deadlocks with buffered I/O completions. Tough luck if
- * you hit the tiny race with someone dirtying the range now
- * between this check and the actual completion.
+ * workqueue.
*/
- if (!dio->iocb->ki_filp->f_mapping->nrpages) {
- dio->flags |= IOMAP_DIO_INLINE_COMP;
- dio->flags |= IOMAP_DIO_NO_INVALIDATE;
- }
- dio->flags &= ~IOMAP_DIO_CALLER_COMP;
+ dio->flags &= ~IOMAP_DIO_COMP_WORK;
iomap_dio_done(dio);
}
@@ -306,23 +297,6 @@ static int iomap_dio_zero(const struct iomap_iter *iter, struct iomap_dio *dio,
return 0;
}
-/*
- * Use a FUA write if we need datasync semantics and this is a pure data I/O
- * that doesn't require any metadata updates (including after I/O completion
- * such as unwritten extent conversion) and the underlying device either
- * doesn't have a volatile write cache or supports FUA.
- * This allows us to avoid cache flushes on I/O completion.
- */
-static inline bool iomap_dio_can_use_fua(const struct iomap *iomap,
- struct iomap_dio *dio)
-{
- if (iomap->flags & (IOMAP_F_SHARED | IOMAP_F_DIRTY))
- return false;
- if (!(dio->flags & IOMAP_DIO_WRITE_THROUGH))
- return false;
- return !bdev_write_cache(iomap->bdev) || bdev_fua(iomap->bdev);
-}
-
static int iomap_dio_bio_iter(struct iomap_iter *iter, struct iomap_dio *dio)
{
const struct iomap *iomap = &iter->iomap;
@@ -351,7 +325,24 @@ static int iomap_dio_bio_iter(struct iomap_iter *iter, struct iomap_dio *dio)
return -EINVAL;
if (dio->flags & IOMAP_DIO_WRITE) {
- bio_opf |= REQ_OP_WRITE;
+ bool need_completion_work = true;
+
+ switch (iomap->type) {
+ case IOMAP_MAPPED:
+ /*
+ * Directly mapped I/O does not inherently need to do
+ * work at I/O completion time. But there are various
+ * cases below where this will get set again.
+ */
+ need_completion_work = false;
+ break;
+ case IOMAP_UNWRITTEN:
+ dio->flags |= IOMAP_DIO_UNWRITTEN;
+ need_zeroout = true;
+ break;
+ default:
+ break;
+ }
if (iomap->flags & IOMAP_F_ATOMIC_BIO) {
/*
@@ -364,35 +355,54 @@ static int iomap_dio_bio_iter(struct iomap_iter *iter, struct iomap_dio *dio)
bio_opf |= REQ_ATOMIC;
}
- if (iomap->type == IOMAP_UNWRITTEN) {
- dio->flags |= IOMAP_DIO_UNWRITTEN;
- need_zeroout = true;
- }
-
- if (iomap->flags & IOMAP_F_SHARED)
+ if (iomap->flags & IOMAP_F_SHARED) {
+ /*
+ * Unsharing of needs to update metadata at I/O
+ * completion time.
+ */
+ need_completion_work = true;
dio->flags |= IOMAP_DIO_COW;
+ }
- if (iomap->flags & IOMAP_F_NEW)
+ if (iomap->flags & IOMAP_F_NEW) {
+ /*
+ * Newly allocated blocks might need recording in
+ * metadata at I/O completion time.
+ */
+ need_completion_work = true;
need_zeroout = true;
- else if (iomap->type == IOMAP_MAPPED &&
- iomap_dio_can_use_fua(iomap, dio))
- bio_opf |= REQ_FUA;
+ }
- if (!(bio_opf & REQ_FUA))
- dio->flags &= ~IOMAP_DIO_WRITE_THROUGH;
+ /*
+ * Use a FUA write if we need datasync semantics and this is a
+ * pure overwrite that doesn't require any metadata updates.
+ *
+ * This allows us to avoid cache flushes on I/O completion.
+ */
+ if (dio->flags & IOMAP_DIO_WRITE_THROUGH) {
+ if (!need_completion_work &&
+ !(iomap->flags & IOMAP_F_DIRTY) &&
+ (!bdev_write_cache(iomap->bdev) ||
+ bdev_fua(iomap->bdev)))
+ bio_opf |= REQ_FUA;
+ else
+ dio->flags &= ~IOMAP_DIO_WRITE_THROUGH;
+ }
/*
- * We can only do deferred completion for pure overwrites that
+ * We can only do inline completion for pure overwrites that
* don't require additional I/O at completion time.
*
- * This rules out writes that need zeroing or extent conversion,
- * extend the file size, or issue metadata I/O or cache flushes
- * during completion processing.
+ * This rules out writes that need zeroing or metdata updates to
+ * convert unwritten or shared extents.
+ *
+ * Writes that extend i_size are also not supported, but this is
+ * handled in __iomap_dio_rw().
*/
- if (need_zeroout || (pos >= i_size_read(inode)) ||
- ((dio->flags & IOMAP_DIO_NEED_SYNC) &&
- !(bio_opf & REQ_FUA)))
- dio->flags &= ~IOMAP_DIO_CALLER_COMP;
+ if (need_completion_work)
+ dio->flags |= IOMAP_DIO_COMP_WORK;
+
+ bio_opf |= REQ_OP_WRITE;
} else {
bio_opf |= REQ_OP_READ;
}
@@ -413,7 +423,7 @@ static int iomap_dio_bio_iter(struct iomap_iter *iter, struct iomap_dio *dio)
* ones we set for inline and deferred completions. If none of those
* are available for this IO, clear the polled flag.
*/
- if (!(dio->flags & (IOMAP_DIO_INLINE_COMP|IOMAP_DIO_CALLER_COMP)))
+ if (dio->flags & IOMAP_DIO_COMP_WORK)
dio->iocb->ki_flags &= ~IOCB_HIPRI;
if (need_zeroout) {
@@ -653,9 +663,6 @@ __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
dio->flags |= IOMAP_DIO_FSBLOCK_ALIGNED;
if (iov_iter_rw(iter) == READ) {
- /* reads can always complete inline */
- dio->flags |= IOMAP_DIO_INLINE_COMP;
-
if (iomi.pos >= dio->i_size)
goto out_free_dio;
@@ -669,15 +676,6 @@ __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
iomi.flags |= IOMAP_WRITE;
dio->flags |= IOMAP_DIO_WRITE;
- /*
- * Flag as supporting deferred completions, if the issuer
- * groks it. This can avoid a workqueue punt for writes.
- * We may later clear this flag if we need to do other IO
- * as part of this IO completion.
- */
- if (iocb->ki_flags & IOCB_DIO_CALLER_COMP)
- dio->flags |= IOMAP_DIO_CALLER_COMP;
-
if (dio_flags & IOMAP_DIO_OVERWRITE_ONLY) {
ret = -EAGAIN;
if (iomi.pos >= dio->i_size ||
@@ -707,6 +705,12 @@ __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
}
/*
+ * i_size updates must to happen from process context.
+ */
+ if (iomi.pos + iomi.len > dio->i_size)
+ dio->flags |= IOMAP_DIO_COMP_WORK;
+
+ /*
* Try to invalidate cache pages for the range we are writing.
* If this invalidation fails, let the caller fall back to
* buffered I/O.
@@ -778,9 +782,14 @@ __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
* If all the writes we issued were already written through to the
* media, we don't need to flush the cache on IO completion. Clear the
* sync flag for this case.
+ *
+ * Otherwise clear the inline completion flag if any sync work is
+ * needed, as that needs to be performed from process context.
*/
if (dio->flags & IOMAP_DIO_WRITE_THROUGH)
dio->flags &= ~IOMAP_DIO_NEED_SYNC;
+ else if (dio->flags & IOMAP_DIO_NEED_SYNC)
+ dio->flags |= IOMAP_DIO_COMP_WORK;
/*
* We are about to drop our additional submission reference, which
diff --git a/include/linux/fs.h b/include/linux/fs.h
index c895146c1444..e210d2d8af53 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -367,23 +367,9 @@ struct readahead_control;
#define IOCB_NOIO (1 << 20)
/* can use bio alloc cache */
#define IOCB_ALLOC_CACHE (1 << 21)
-/*
- * IOCB_DIO_CALLER_COMP can be set by the iocb owner, to indicate that the
- * iocb completion can be passed back to the owner for execution from a safe
- * context rather than needing to be punted through a workqueue. If this
- * flag is set, the bio completion handling may set iocb->dio_complete to a
- * handler function and iocb->private to context information for that handler.
- * The issuer should call the handler with that context information from task
- * context to complete the processing of the iocb. Note that while this
- * provides a task context for the dio_complete() callback, it should only be
- * used on the completion side for non-IO generating completions. It's fine to
- * call blocking functions from this callback, but they should not wait for
- * unrelated IO (like cache flushing, new IO generation, etc).
- */
-#define IOCB_DIO_CALLER_COMP (1 << 22)
/* kiocb is a read or write operation submitted by fs/aio.c. */
-#define IOCB_AIO_RW (1 << 23)
-#define IOCB_HAS_METADATA (1 << 24)
+#define IOCB_AIO_RW (1 << 22)
+#define IOCB_HAS_METADATA (1 << 23)
/* for use in trace events */
#define TRACE_IOCB_STRINGS \
@@ -400,7 +386,6 @@ struct readahead_control;
{ IOCB_WAITQ, "WAITQ" }, \
{ IOCB_NOIO, "NOIO" }, \
{ IOCB_ALLOC_CACHE, "ALLOC_CACHE" }, \
- { IOCB_DIO_CALLER_COMP, "CALLER_COMP" }, \
{ IOCB_AIO_RW, "AIO_RW" }, \
{ IOCB_HAS_METADATA, "AIO_HAS_METADATA" }
@@ -412,23 +397,13 @@ struct kiocb {
int ki_flags;
u16 ki_ioprio; /* See linux/ioprio.h */
u8 ki_write_stream;
- union {
- /*
- * Only used for async buffered reads, where it denotes the
- * page waitqueue associated with completing the read. Valid
- * IFF IOCB_WAITQ is set.
- */
- struct wait_page_queue *ki_waitq;
- /*
- * Can be used for O_DIRECT IO, where the completion handling
- * is punted back to the issuer of the IO. May only be set
- * if IOCB_DIO_CALLER_COMP is set by the issuer, and the issuer
- * must then check for presence of this handler when ki_complete
- * is invoked. The data passed in to this handler must be
- * assigned to ->private when dio_complete is assigned.
- */
- ssize_t (*dio_complete)(void *data);
- };
+
+ /*
+ * Only used for async buffered reads, where it denotes the page
+ * waitqueue associated with completing the read.
+ * Valid IFF IOCB_WAITQ is set.
+ */
+ struct wait_page_queue *ki_waitq;
};
static inline bool is_sync_kiocb(struct kiocb *kiocb)
diff --git a/io_uring/rw.c b/io_uring/rw.c
index 08882648d569..4d0ab8f50d14 100644
--- a/io_uring/rw.c
+++ b/io_uring/rw.c
@@ -277,7 +277,6 @@ static int __io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe,
} else {
rw->kiocb.ki_ioprio = get_current_ioprio();
}
- rw->kiocb.dio_complete = NULL;
rw->kiocb.ki_flags = 0;
rw->kiocb.ki_write_stream = READ_ONCE(sqe->write_stream);
@@ -566,15 +565,6 @@ static inline int io_fixup_rw_res(struct io_kiocb *req, long res)
void io_req_rw_complete(struct io_kiocb *req, io_tw_token_t tw)
{
- struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
- struct kiocb *kiocb = &rw->kiocb;
-
- if ((kiocb->ki_flags & IOCB_DIO_CALLER_COMP) && kiocb->dio_complete) {
- long res = kiocb->dio_complete(rw->kiocb.private);
-
- io_req_set_res(req, io_fixup_rw_res(req, res), 0);
- }
-
io_req_io_end(req);
if (req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING))
@@ -589,10 +579,8 @@ static void io_complete_rw(struct kiocb *kiocb, long res)
struct io_rw *rw = container_of(kiocb, struct io_rw, kiocb);
struct io_kiocb *req = cmd_to_io_kiocb(rw);
- if (!kiocb->dio_complete || !(kiocb->ki_flags & IOCB_DIO_CALLER_COMP)) {
- __io_complete_rw_common(req, res);
- io_req_set_res(req, io_fixup_rw_res(req, res), 0);
- }
+ __io_complete_rw_common(req, res);
+ io_req_set_res(req, io_fixup_rw_res(req, res), 0);
req->io_task_work.func = io_req_rw_complete;
__io_req_task_work_add(req, IOU_F_TWQ_LAZY_WAKE);
}