summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
authorJoanne Koong <joannelkoong@gmail.com>2025-11-18 13:11:11 -0800
committerChristian Brauner <brauner@kernel.org>2025-11-25 10:22:19 +0100
commitd7ff85d4b899e02b4b8a8ca9f44f54a06aee1b4d (patch)
treef42fdbcff8eb8c7432c993ce8125ecf8d1a536d1 /fs
parent5ec58e6acdecb0c2b1707002123883fe1da29a98 (diff)
iomap: fix iomap_read_end() for already uptodate folios
There are some cases where when iomap_read_end() is called, the folio may already have been marked uptodate. For example, if the iomap block needed zeroing, then the folio may have been marked uptodate after the zeroing. iomap_read_end() should unlock the folio instead of calling folio_end_read(), which is how these cases were handled prior to commit f8eaf79406fe ("iomap: simplify ->read_folio_range() error handling for reads"). Calling folio_end_read() on an uptodate folio leads to buggy behavior where marking an already uptodate folio as uptodate will XOR it to be marked nonuptodate. Fixes: f8eaf79406fe ("iomap: simplify ->read_folio_range() error handling for reads") Signed-off-by: Joanne Koong <joannelkoong@gmail.com> Link: https://patch.msgid.link/20251118211111.1027272-2-joannelkoong@gmail.com Tested-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Christoph Hellwig <hch@lst.de> Reported-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Christian Brauner <brauner@kernel.org>
Diffstat (limited to 'fs')
-rw-r--r--fs/iomap/buffered-io.c37
1 files changed, 19 insertions, 18 deletions
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index 089566a36cff..f68fc6ac15e0 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -458,25 +458,26 @@ static void iomap_read_end(struct folio *folio, size_t bytes_submitted)
spin_lock_irq(&ifs->state_lock);
if (!ifs->read_bytes_pending) {
WARN_ON_ONCE(bytes_submitted);
- end_read = true;
- } else {
- /*
- * Subtract any bytes that were initially accounted to
- * read_bytes_pending but skipped for IO. The +1
- * accounts for the bias we added in iomap_read_init().
- */
- size_t bytes_not_submitted = folio_size(folio) + 1 -
- bytes_submitted;
- ifs->read_bytes_pending -= bytes_not_submitted;
- /*
- * If !ifs->read_bytes_pending, this means all pending
- * reads by the IO helper have already completed, which
- * means we need to end the folio read here. If
- * ifs->read_bytes_pending != 0, the IO helper will end
- * the folio read.
- */
- end_read = !ifs->read_bytes_pending;
+ spin_unlock_irq(&ifs->state_lock);
+ folio_unlock(folio);
+ return;
}
+
+ /*
+ * Subtract any bytes that were initially accounted to
+ * read_bytes_pending but skipped for IO. The +1 accounts for
+ * the bias we added in iomap_read_init().
+ */
+ ifs->read_bytes_pending -=
+ (folio_size(folio) + 1 - bytes_submitted);
+
+ /*
+ * If !ifs->read_bytes_pending, this means all pending reads by
+ * the IO helper have already completed, which means we need to
+ * end the folio read here. If ifs->read_bytes_pending != 0,
+ * the IO helper will end the folio read.
+ */
+ end_read = !ifs->read_bytes_pending;
if (end_read)
uptodate = ifs_is_fully_uptodate(folio, ifs);
spin_unlock_irq(&ifs->state_lock);