summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKairui Song <kasong@tencent.com>2025-08-12 01:20:17 +0800
committerAndrew Morton <akpm@linux-foundation.org>2025-09-13 16:54:49 -0700
commit27763edac9288bbb35a9feecb82652de04e637fd (patch)
tree9af7c647bdfef9faa68942874e899b3ab53b9c1b
parent7bca1760cd86b9ef62d4c2baf168b68a708011bd (diff)
mm/mincore, swap: consolidate swap cache checking for mincore
Patch series "mm/mincore: minor clean up for swap cache checking". This series cleans up a swap cache helper only used by mincore, move it back into mincore code. Also separate the swap cache related logics out of shmem / page cache logics in mincore. With this series we have less lines of code and better performance. Before this series: mincore on a swaped out 16G anon mmap range: Took 488220 us mincore on 16G shmem mmap range: Took 530272 us. After this series: mincore on a swaped out 16G anon mmap range: Took 446763 us mincore on 16G shmem mmap range: Took 460496 us. About ~10% faster. This patch (of 2): The filemap_get_incore_folio (previously find_get_incore_page) helper was introduced by commit 61ef18655704 ("mm: factor find_get_incore_page out of mincore_page") to be used by later commit f5df8635c5a3 ("mm: use find_get_incore_page in memcontrol"), so memory cgroup charge move code can be simplified. But commit 6b611388b626 ("memcg-v1: remove charge move code") removed that user completely, it's only used by mincore now. So this commit basically reverts commit 61ef18655704 ("mm: factor find_get_incore_page out of mincore_page"). Move it back to mincore side to simplify the code. Link: https://lkml.kernel.org/r/20250811172018.48901-1-ryncsn@gmail.com Link: https://lkml.kernel.org/r/20250811172018.48901-2-ryncsn@gmail.com Signed-off-by: Kairui Song <kasong@tencent.com> Acked-by: Nhat Pham <nphamcs@gmail.com> Cc: Baoquan He <bhe@redhat.com> Cc: Barry Song <baohua@kernel.org> Cc: Chris Li <chrisl@kernel.org> Cc: David Hildenbrand <david@redhat.com> Cc: Hugh Dickins <hughd@google.com> Cc: Jann Horn <jannh@google.com> Cc: Kemeng Shi <shikemeng@huaweicloud.com> Cc: Liam Howlett <liam.howlett@oracle.com> Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Vlastimil Babka <vbabka@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-rw-r--r--mm/mincore.c29
-rw-r--r--mm/swap.h10
-rw-r--r--mm/swap_state.c38
3 files changed, 27 insertions, 50 deletions
diff --git a/mm/mincore.c b/mm/mincore.c
index 10dabefc3acc..20fd0967d3cb 100644
--- a/mm/mincore.c
+++ b/mm/mincore.c
@@ -64,8 +64,33 @@ static unsigned char mincore_page(struct address_space *mapping, pgoff_t index)
* any other file mapping (ie. marked !present and faulted in with
* tmpfs's .fault). So swapped out tmpfs mappings are tested here.
*/
- folio = filemap_get_incore_folio(mapping, index);
- if (!IS_ERR(folio)) {
+ if (IS_ENABLED(CONFIG_SWAP) && shmem_mapping(mapping)) {
+ folio = filemap_get_entry(mapping, index);
+ /*
+ * shmem/tmpfs may return swap: account for swapcache
+ * page too.
+ */
+ if (xa_is_value(folio)) {
+ struct swap_info_struct *si;
+ swp_entry_t swp = radix_to_swp_entry(folio);
+ /* There might be swapin error entries in shmem mapping. */
+ if (non_swap_entry(swp))
+ return 0;
+ /* Prevent swap device to being swapoff under us */
+ si = get_swap_device(swp);
+ if (si) {
+ folio = filemap_get_folio(swap_address_space(swp),
+ swap_cache_index(swp));
+ put_swap_device(si);
+ } else {
+ return 0;
+ }
+ }
+ } else {
+ folio = filemap_get_folio(mapping, index);
+ }
+
+ if (!IS_ERR_OR_NULL(folio)) {
present = folio_test_uptodate(folio);
folio_put(folio);
}
diff --git a/mm/swap.h b/mm/swap.h
index 911ad5ff0f89..1ae44d4193b1 100644
--- a/mm/swap.h
+++ b/mm/swap.h
@@ -64,9 +64,6 @@ void clear_shadow_from_swap_cache(int type, unsigned long begin,
void swapcache_clear(struct swap_info_struct *si, swp_entry_t entry, int nr);
struct folio *swap_cache_get_folio(swp_entry_t entry,
struct vm_area_struct *vma, unsigned long addr);
-struct folio *filemap_get_incore_folio(struct address_space *mapping,
- pgoff_t index);
-
struct folio *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
struct vm_area_struct *vma, unsigned long addr,
struct swap_iocb **plug);
@@ -178,13 +175,6 @@ static inline struct folio *swap_cache_get_folio(swp_entry_t entry,
return NULL;
}
-static inline
-struct folio *filemap_get_incore_folio(struct address_space *mapping,
- pgoff_t index)
-{
- return filemap_get_folio(mapping, index);
-}
-
static inline void *get_shadow_from_swap_cache(swp_entry_t entry)
{
return NULL;
diff --git a/mm/swap_state.c b/mm/swap_state.c
index c354435a0923..99513b74b5d8 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -323,44 +323,6 @@ struct folio *swap_cache_get_folio(swp_entry_t entry,
return folio;
}
-/**
- * filemap_get_incore_folio - Find and get a folio from the page or swap caches.
- * @mapping: The address_space to search.
- * @index: The page cache index.
- *
- * This differs from filemap_get_folio() in that it will also look for the
- * folio in the swap cache.
- *
- * Return: The found folio or %NULL.
- */
-struct folio *filemap_get_incore_folio(struct address_space *mapping,
- pgoff_t index)
-{
- swp_entry_t swp;
- struct swap_info_struct *si;
- struct folio *folio = filemap_get_entry(mapping, index);
-
- if (!folio)
- return ERR_PTR(-ENOENT);
- if (!xa_is_value(folio))
- return folio;
- if (!shmem_mapping(mapping))
- return ERR_PTR(-ENOENT);
-
- swp = radix_to_swp_entry(folio);
- /* There might be swapin error entries in shmem mapping. */
- if (non_swap_entry(swp))
- return ERR_PTR(-ENOENT);
- /* Prevent swapoff from happening to us */
- si = get_swap_device(swp);
- if (!si)
- return ERR_PTR(-ENOENT);
- index = swap_cache_index(swp);
- folio = filemap_get_folio(swap_address_space(swp), index);
- put_swap_device(si);
- return folio;
-}
-
struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
struct mempolicy *mpol, pgoff_t ilx, bool *new_page_allocated,
bool skip_if_exists)