summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorLorenzo Stoakes <lorenzo.stoakes@oracle.com>2025-11-10 22:21:20 +0000
committerAndrew Morton <akpm@linux-foundation.org>2025-11-24 15:08:50 -0800
commit68aa2fdbf57f769e552f472ddb762aba028a207e (patch)
tree92d35788666246103dec44f20867c675110ff694 /mm
parentc093cf451094a9a03c4d4929bc30122a53038b7b (diff)
mm: introduce leaf entry type and use to simplify leaf entry logic
The kernel maintains leaf page table entries which contain either: The kernel maintains leaf page table entries which contain either: - Nothing ('none' entries) - Present entries* - Everything else that will cause a fault which the kernel handles * Present entries are either entries the hardware can navigate without page fault or special cases like NUMA hint protnone or PMD with cleared present bit which contain hardware-valid entries modulo the present bit. In the 'everything else' group we include swap entries, but we also include a number of other things such as migration entries, device private entries and marker entries. Unfortunately this 'everything else' group expresses everything through a swp_entry_t type, and these entries are referred to swap entries even though they may well not contain a... swap entry. This is compounded by the rather mind-boggling concept of a non-swap swap entry (checked via non_swap_entry()) and the means by which we twist and turn to satisfy this. This patch lays the foundation for reducing this confusion. We refer to 'everything else' as a 'software-define leaf entry' or 'softleaf'. for short And in fact we scoop up the 'none' entries into this concept also so we are left with: - Present entries. - Softleaf entries (which may be empty). This allows for radical simplification across the board - one can simply convert any leaf page table entry to a leaf entry via softleaf_from_pte(). If the entry is present, we return an empty leaf entry, so it is assumed the caller is aware that they must differentiate between the two categories of page table entries, checking for the former via pte_present(). As a result, we can eliminate a number of places where we would otherwise need to use predicates to see if we can proceed with leaf page table entry conversion and instead just go ahead and do it unconditionally. We do so where we can, adjusting surrounding logic as necessary to integrate the new softleaf_t logic as far as seems reasonable at this stage. We typedef swp_entry_t to softleaf_t for the time being until the conversion can be complete, meaning everything remains compatible regardless of which type is used. We will eventually remove swp_entry_t when the conversion is complete. We introduce a new header file to keep things clear - leafops.h - this imports swapops.h so can direct replace swapops imports without issue, and we do so in all the files that require it. Additionally, add new leafops.h file to core mm maintainers entry. Link: https://lkml.kernel.org/r/c879383aac77d96a03e4d38f7daba893cd35fc76.1762812360.git.lorenzo.stoakes@oracle.com Signed-off-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> Acked-by: Zi Yan <ziy@nvidia.com> Reviewed-by: Vlastimil Babka <vbabka@suse.cz> Cc: Alexander Gordeev <agordeev@linux.ibm.com> Cc: Alistair Popple <apopple@nvidia.com> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Axel Rasmussen <axelrasmussen@google.com> Cc: Baolin Wang <baolin.wang@linux.alibaba.com> Cc: Baoquan He <bhe@redhat.com> Cc: Barry Song <baohua@kernel.org> Cc: Byungchul Park <byungchul@sk.com> Cc: Chengming Zhou <chengming.zhou@linux.dev> Cc: Chris Li <chrisl@kernel.org> Cc: Christian Borntraeger <borntraeger@linux.ibm.com> Cc: Christian Brauner <brauner@kernel.org> Cc: Claudio Imbrenda <imbrenda@linux.ibm.com> Cc: David Hildenbrand <david@redhat.com> Cc: Dev Jain <dev.jain@arm.com> Cc: Gerald Schaefer <gerald.schaefer@linux.ibm.com> Cc: Gregory Price <gourry@gourry.net> Cc: Heiko Carstens <hca@linux.ibm.com> Cc: "Huang, Ying" <ying.huang@linux.alibaba.com> Cc: Hugh Dickins <hughd@google.com> Cc: Jan Kara <jack@suse.cz> Cc: Jann Horn <jannh@google.com> Cc: Janosch Frank <frankja@linux.ibm.com> Cc: Jason Gunthorpe <jgg@ziepe.ca> Cc: Joshua Hahn <joshua.hahnjy@gmail.com> Cc: Kairui Song <kasong@tencent.com> Cc: Kemeng Shi <shikemeng@huaweicloud.com> Cc: Lance Yang <lance.yang@linux.dev> Cc: Leon Romanovsky <leon@kernel.org> Cc: Liam Howlett <liam.howlett@oracle.com> Cc: Mathew Brost <matthew.brost@intel.com> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Miaohe Lin <linmiaohe@huawei.com> Cc: Michal Hocko <mhocko@suse.com> Cc: Mike Rapoport <rppt@kernel.org> Cc: Muchun Song <muchun.song@linux.dev> Cc: Naoya Horiguchi <nao.horiguchi@gmail.com> Cc: Nhat Pham <nphamcs@gmail.com> Cc: Nico Pache <npache@redhat.com> Cc: Oscar Salvador <osalvador@suse.de> Cc: Pasha Tatashin <pasha.tatashin@soleen.com> Cc: Peter Xu <peterx@redhat.com> Cc: Rakie Kim <rakie.kim@sk.com> Cc: Rik van Riel <riel@surriel.com> Cc: Ryan Roberts <ryan.roberts@arm.com> Cc: SeongJae Park <sj@kernel.org> Cc: Suren Baghdasaryan <surenb@google.com> Cc: Sven Schnelle <svens@linux.ibm.com> Cc: Vasily Gorbik <gor@linux.ibm.com> Cc: Wei Xu <weixugc@google.com> Cc: xu xin <xu.xin16@zte.com.cn> Cc: Yuanchu Xie <yuanchu@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/hmm.c2
-rw-r--r--mm/hugetlb.c37
-rw-r--r--mm/madvise.c16
-rw-r--r--mm/memory.c41
-rw-r--r--mm/mincore.c6
-rw-r--r--mm/mprotect.c6
-rw-r--r--mm/mremap.c4
-rw-r--r--mm/page_vma_mapped.c11
-rw-r--r--mm/shmem.c7
-rw-r--r--mm/userfaultfd.c6
10 files changed, 68 insertions, 68 deletions
diff --git a/mm/hmm.c b/mm/hmm.c
index 387a38bbaf6a..e350d0cc9d41 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -249,7 +249,7 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
* that will be correctly handled, so we need only check for UFFD WP
* here.
*/
- if (pte_none(pte) || pte_marker_uffd_wp(pte)) {
+ if (pte_none(pte) || pte_is_uffd_wp_marker(pte)) {
required_fault =
hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, 0);
if (required_fault)
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 96c991f54f7a..12853cdefc9b 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -26,7 +26,7 @@
#include <linux/string_choices.h>
#include <linux/string_helpers.h>
#include <linux/swap.h>
-#include <linux/swapops.h>
+#include <linux/leafops.h>
#include <linux/jhash.h>
#include <linux/numa.h>
#include <linux/llist.h>
@@ -4956,17 +4956,17 @@ again:
entry = huge_pte_clear_uffd_wp(entry);
set_huge_pte_at(dst, addr, dst_pte, entry, sz);
} else if (unlikely(is_hugetlb_entry_migration(entry))) {
- swp_entry_t swp_entry = pte_to_swp_entry(entry);
+ softleaf_t softleaf = softleaf_from_pte(entry);
bool uffd_wp = pte_swp_uffd_wp(entry);
- if (!is_readable_migration_entry(swp_entry) && cow) {
+ if (!is_readable_migration_entry(softleaf) && cow) {
/*
* COW mappings require pages in both
* parent and child to be set to read.
*/
- swp_entry = make_readable_migration_entry(
- swp_offset(swp_entry));
- entry = swp_entry_to_pte(swp_entry);
+ softleaf = make_readable_migration_entry(
+ swp_offset(softleaf));
+ entry = swp_entry_to_pte(softleaf);
if (userfaultfd_wp(src_vma) && uffd_wp)
entry = pte_swp_mkuffd_wp(entry);
set_huge_pte_at(src, addr, src_pte, entry, sz);
@@ -4974,9 +4974,9 @@ again:
if (!userfaultfd_wp(dst_vma))
entry = huge_pte_clear_uffd_wp(entry);
set_huge_pte_at(dst, addr, dst_pte, entry, sz);
- } else if (unlikely(is_pte_marker(entry))) {
- pte_marker marker = copy_pte_marker(
- pte_to_swp_entry(entry), dst_vma);
+ } else if (unlikely(pte_is_marker(entry))) {
+ const softleaf_t softleaf = softleaf_from_pte(entry);
+ const pte_marker marker = copy_pte_marker(softleaf, dst_vma);
if (marker)
set_huge_pte_at(dst, addr, dst_pte,
@@ -5092,7 +5092,7 @@ static void move_huge_pte(struct vm_area_struct *vma, unsigned long old_addr,
pte = huge_ptep_get_and_clear(mm, old_addr, src_pte, sz);
- if (need_clear_uffd_wp && pte_marker_uffd_wp(pte))
+ if (need_clear_uffd_wp && pte_is_uffd_wp_marker(pte))
huge_pte_clear(mm, new_addr, dst_pte, sz);
else {
if (need_clear_uffd_wp) {
@@ -5911,7 +5911,7 @@ static vm_fault_t hugetlb_no_page(struct address_space *mapping,
* If this pte was previously wr-protected, keep it wr-protected even
* if populated.
*/
- if (unlikely(pte_marker_uffd_wp(vmf->orig_pte)))
+ if (unlikely(pte_is_uffd_wp_marker(vmf->orig_pte)))
new_pte = huge_pte_mkuffd_wp(new_pte);
set_huge_pte_at(mm, vmf->address, vmf->pte, new_pte, huge_page_size(h));
@@ -6044,9 +6044,9 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
*/
return hugetlb_no_page(mapping, &vmf);
- if (is_pte_marker(vmf.orig_pte)) {
+ if (pte_is_marker(vmf.orig_pte)) {
const pte_marker marker =
- pte_marker_get(pte_to_swp_entry(vmf.orig_pte));
+ softleaf_to_marker(softleaf_from_pte(vmf.orig_pte));
if (marker & PTE_MARKER_POISONED) {
ret = VM_FAULT_HWPOISON_LARGE |
@@ -6374,7 +6374,7 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
* See comment about UFFD marker overwriting in
* mfill_atomic_install_pte().
*/
- if (!huge_pte_none(dst_ptep) && !is_uffd_pte_marker(dst_ptep))
+ if (!huge_pte_none(dst_ptep) && !pte_is_uffd_marker(dst_ptep))
goto out_release_unlock;
if (folio_in_pagecache)
@@ -6495,8 +6495,9 @@ long hugetlb_change_protection(struct vm_area_struct *vma,
if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
/* Nothing to do. */
} else if (unlikely(is_hugetlb_entry_migration(pte))) {
- swp_entry_t entry = pte_to_swp_entry(pte);
- struct folio *folio = pfn_swap_entry_folio(entry);
+ softleaf_t entry = softleaf_from_pte(pte);
+
+ struct folio *folio = softleaf_to_folio(entry);
pte_t newpte = pte;
if (is_writable_migration_entry(entry)) {
@@ -6516,14 +6517,14 @@ long hugetlb_change_protection(struct vm_area_struct *vma,
newpte = pte_swp_clear_uffd_wp(newpte);
if (!pte_same(pte, newpte))
set_huge_pte_at(mm, address, ptep, newpte, psize);
- } else if (unlikely(is_pte_marker(pte))) {
+ } else if (unlikely(pte_is_marker(pte))) {
/*
* Do nothing on a poison marker; page is
* corrupted, permissions do not apply. Here
* pte_marker_uffd_wp()==true implies !poison
* because they're mutual exclusive.
*/
- if (pte_marker_uffd_wp(pte) && uffd_wp_resolve)
+ if (pte_is_uffd_wp_marker(pte) && uffd_wp_resolve)
/* Safe to modify directly (non-present->none). */
huge_pte_clear(mm, address, ptep, psize);
} else if (!huge_pte_none(pte)) {
diff --git a/mm/madvise.c b/mm/madvise.c
index 2a165e9beb5b..c8381b954235 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -29,7 +29,7 @@
#include <linux/backing-dev.h>
#include <linux/pagewalk.h>
#include <linux/swap.h>
-#include <linux/swapops.h>
+#include <linux/leafops.h>
#include <linux/shmem_fs.h>
#include <linux/mmu_notifier.h>
@@ -690,17 +690,16 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
* (page allocation + zeroing).
*/
if (!pte_present(ptent)) {
- swp_entry_t entry;
+ softleaf_t entry = softleaf_from_pte(ptent);
- entry = pte_to_swp_entry(ptent);
- if (!non_swap_entry(entry)) {
+ if (softleaf_is_swap(entry)) {
max_nr = (end - addr) / PAGE_SIZE;
nr = swap_pte_batch(pte, max_nr, ptent);
nr_swap -= nr;
free_swap_and_cache_nr(entry, nr);
clear_not_present_full_ptes(mm, addr, pte, nr, tlb->fullmm);
- } else if (is_hwpoison_entry(entry) ||
- is_poisoned_swp_entry(entry)) {
+ } else if (softleaf_is_hwpoison(entry) ||
+ softleaf_is_poison_marker(entry)) {
pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
}
continue;
@@ -1071,8 +1070,9 @@ static bool is_valid_guard_vma(struct vm_area_struct *vma, bool allow_locked)
static bool is_guard_pte_marker(pte_t ptent)
{
- return is_swap_pte(ptent) &&
- is_guard_swp_entry(pte_to_swp_entry(ptent));
+ const softleaf_t entry = softleaf_from_pte(ptent);
+
+ return softleaf_is_guard_marker(entry);
}
static int guard_install_pud_entry(pud_t *pud, unsigned long addr,
diff --git a/mm/memory.c b/mm/memory.c
index 732414852570..0caf8c5c8c68 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -60,7 +60,7 @@
#include <linux/writeback.h>
#include <linux/memcontrol.h>
#include <linux/mmu_notifier.h>
-#include <linux/swapops.h>
+#include <linux/leafops.h>
#include <linux/elf.h>
#include <linux/gfp.h>
#include <linux/migrate.h>
@@ -109,7 +109,7 @@ static __always_inline bool vmf_orig_pte_uffd_wp(struct vm_fault *vmf)
if (!(vmf->flags & FAULT_FLAG_ORIG_PTE_VALID))
return false;
- return pte_marker_uffd_wp(vmf->orig_pte);
+ return pte_is_uffd_wp_marker(vmf->orig_pte);
}
/*
@@ -927,10 +927,10 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
{
vm_flags_t vm_flags = dst_vma->vm_flags;
pte_t orig_pte = ptep_get(src_pte);
+ softleaf_t entry = softleaf_from_pte(orig_pte);
pte_t pte = orig_pte;
struct folio *folio;
struct page *page;
- swp_entry_t entry = pte_to_swp_entry(orig_pte);
if (likely(!non_swap_entry(entry))) {
if (swap_duplicate(entry) < 0)
@@ -1016,7 +1016,7 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
if (try_restore_exclusive_pte(src_vma, addr, src_pte, orig_pte))
return -EBUSY;
return -ENOENT;
- } else if (is_pte_marker_entry(entry)) {
+ } else if (softleaf_is_marker(entry)) {
pte_marker marker = copy_pte_marker(entry, dst_vma);
if (marker)
@@ -1711,14 +1711,14 @@ static inline int zap_nonpresent_ptes(struct mmu_gather *tlb,
unsigned int max_nr, unsigned long addr,
struct zap_details *details, int *rss, bool *any_skipped)
{
- swp_entry_t entry;
+ softleaf_t entry;
int nr = 1;
*any_skipped = true;
- entry = pte_to_swp_entry(ptent);
- if (is_device_private_entry(entry) ||
- is_device_exclusive_entry(entry)) {
- struct page *page = pfn_swap_entry_to_page(entry);
+ entry = softleaf_from_pte(ptent);
+ if (softleaf_is_device_private(entry) ||
+ softleaf_is_device_exclusive(entry)) {
+ struct page *page = softleaf_to_page(entry);
struct folio *folio = page_folio(page);
if (unlikely(!should_zap_folio(details, folio)))
@@ -1733,7 +1733,7 @@ static inline int zap_nonpresent_ptes(struct mmu_gather *tlb,
rss[mm_counter(folio)]--;
folio_remove_rmap_pte(folio, page, vma);
folio_put(folio);
- } else if (!non_swap_entry(entry)) {
+ } else if (softleaf_is_swap(entry)) {
/* Genuine swap entries, hence a private anon pages */
if (!should_zap_cows(details))
return 1;
@@ -1741,20 +1741,20 @@ static inline int zap_nonpresent_ptes(struct mmu_gather *tlb,
nr = swap_pte_batch(pte, max_nr, ptent);
rss[MM_SWAPENTS] -= nr;
free_swap_and_cache_nr(entry, nr);
- } else if (is_migration_entry(entry)) {
- struct folio *folio = pfn_swap_entry_folio(entry);
+ } else if (softleaf_is_migration(entry)) {
+ struct folio *folio = softleaf_to_folio(entry);
if (!should_zap_folio(details, folio))
return 1;
rss[mm_counter(folio)]--;
- } else if (pte_marker_entry_uffd_wp(entry)) {
+ } else if (softleaf_is_uffd_wp_marker(entry)) {
/*
* For anon: always drop the marker; for file: only
* drop the marker if explicitly requested.
*/
if (!vma_is_anonymous(vma) && !zap_drop_markers(details))
return 1;
- } else if (is_guard_swp_entry(entry)) {
+ } else if (softleaf_is_guard_marker(entry)) {
/*
* Ordinary zapping should not remove guard PTE
* markers. Only do so if we should remove PTE markers
@@ -1762,7 +1762,8 @@ static inline int zap_nonpresent_ptes(struct mmu_gather *tlb,
*/
if (!zap_drop_markers(details))
return 1;
- } else if (is_hwpoison_entry(entry) || is_poisoned_swp_entry(entry)) {
+ } else if (softleaf_is_hwpoison(entry) ||
+ softleaf_is_poison_marker(entry)) {
if (!should_zap_cows(details))
return 1;
} else {
@@ -4380,7 +4381,7 @@ static vm_fault_t pte_marker_clear(struct vm_fault *vmf)
*
* This should also cover the case where e.g. the pte changed
* quickly from a PTE_MARKER_UFFD_WP into PTE_MARKER_POISONED.
- * So is_pte_marker() check is not enough to safely drop the pte.
+ * So pte_is_marker() check is not enough to safely drop the pte.
*/
if (pte_same(vmf->orig_pte, ptep_get(vmf->pte)))
pte_clear(vmf->vma->vm_mm, vmf->address, vmf->pte);
@@ -4414,8 +4415,8 @@ static vm_fault_t pte_marker_handle_uffd_wp(struct vm_fault *vmf)
static vm_fault_t handle_pte_marker(struct vm_fault *vmf)
{
- swp_entry_t entry = pte_to_swp_entry(vmf->orig_pte);
- unsigned long marker = pte_marker_get(entry);
+ const softleaf_t entry = softleaf_from_pte(vmf->orig_pte);
+ const pte_marker marker = softleaf_to_marker(entry);
/*
* PTE markers should never be empty. If anything weird happened,
@@ -4432,7 +4433,7 @@ static vm_fault_t handle_pte_marker(struct vm_fault *vmf)
if (marker & PTE_MARKER_GUARD)
return VM_FAULT_SIGSEGV;
- if (pte_marker_entry_uffd_wp(entry))
+ if (softleaf_is_uffd_wp_marker(entry))
return pte_marker_handle_uffd_wp(vmf);
/* This is an unknown pte marker */
@@ -4680,7 +4681,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
}
} else if (is_hwpoison_entry(entry)) {
ret = VM_FAULT_HWPOISON;
- } else if (is_pte_marker_entry(entry)) {
+ } else if (softleaf_is_marker(entry)) {
ret = handle_pte_marker(vmf);
} else {
print_bad_pte(vma, vmf->address, vmf->orig_pte, NULL);
diff --git a/mm/mincore.c b/mm/mincore.c
index fb80becd6119..b3682488a65d 100644
--- a/mm/mincore.c
+++ b/mm/mincore.c
@@ -14,7 +14,7 @@
#include <linux/mman.h>
#include <linux/syscalls.h>
#include <linux/swap.h>
-#include <linux/swapops.h>
+#include <linux/leafops.h>
#include <linux/shmem_fs.h>
#include <linux/hugetlb.h>
#include <linux/pgtable.h>
@@ -42,7 +42,7 @@ static int mincore_hugetlb(pte_t *pte, unsigned long hmask, unsigned long addr,
} else {
const pte_t ptep = huge_ptep_get(walk->mm, addr, pte);
- if (huge_pte_none(ptep) || is_pte_marker(ptep))
+ if (huge_pte_none(ptep) || pte_is_marker(ptep))
present = 0;
else
present = 1;
@@ -187,7 +187,7 @@ static int mincore_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
step = 1;
/* We need to do cache lookup too for markers */
- if (pte_none(pte) || is_pte_marker(pte))
+ if (pte_none(pte) || pte_is_marker(pte))
__mincore_unmapped_range(addr, addr + PAGE_SIZE,
vma, vec);
else if (pte_present(pte)) {
diff --git a/mm/mprotect.c b/mm/mprotect.c
index db93d3bb1a5e..918a64cc6033 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -326,14 +326,14 @@ static long change_pte_range(struct mmu_gather *tlb,
newpte = swp_entry_to_pte(entry);
if (pte_swp_uffd_wp(oldpte))
newpte = pte_swp_mkuffd_wp(newpte);
- } else if (is_pte_marker_entry(entry)) {
+ } else if (softleaf_is_marker(entry)) {
/*
* Ignore error swap entries unconditionally,
* because any access should sigbus/sigsegv
* anyway.
*/
- if (is_poisoned_swp_entry(entry) ||
- is_guard_swp_entry(entry))
+ if (softleaf_is_poison_marker(entry) ||
+ softleaf_is_guard_marker(entry))
continue;
/*
* If this is uffd-wp pte marker and we'd like
diff --git a/mm/mremap.c b/mm/mremap.c
index 8ad06cf50783..7c21b2ad13f6 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -17,7 +17,7 @@
#include <linux/swap.h>
#include <linux/capability.h>
#include <linux/fs.h>
-#include <linux/swapops.h>
+#include <linux/leafops.h>
#include <linux/highmem.h>
#include <linux/security.h>
#include <linux/syscalls.h>
@@ -288,7 +288,7 @@ static int move_ptes(struct pagetable_move_control *pmc,
pte = move_pte(pte, old_addr, new_addr);
pte = move_soft_dirty_pte(pte);
- if (need_clear_uffd_wp && pte_marker_uffd_wp(pte))
+ if (need_clear_uffd_wp && pte_is_uffd_wp_marker(pte))
pte_clear(mm, new_addr, new_ptep);
else {
if (need_clear_uffd_wp) {
diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
index 137ce27ff68c..be20468fb5a9 100644
--- a/mm/page_vma_mapped.c
+++ b/mm/page_vma_mapped.c
@@ -3,7 +3,7 @@
#include <linux/rmap.h>
#include <linux/hugetlb.h>
#include <linux/swap.h>
-#include <linux/swapops.h>
+#include <linux/leafops.h>
#include "internal.h"
@@ -107,15 +107,12 @@ static bool check_pte(struct page_vma_mapped_walk *pvmw, unsigned long pte_nr)
pte_t ptent = ptep_get(pvmw->pte);
if (pvmw->flags & PVMW_MIGRATION) {
- swp_entry_t entry;
- if (!is_swap_pte(ptent))
- return false;
- entry = pte_to_swp_entry(ptent);
+ const softleaf_t entry = softleaf_from_pte(ptent);
- if (!is_migration_entry(entry))
+ if (!softleaf_is_migration(entry))
return false;
- pfn = swp_offset_pfn(entry);
+ pfn = softleaf_to_pfn(entry);
} else if (is_swap_pte(ptent)) {
swp_entry_t entry;
diff --git a/mm/shmem.c b/mm/shmem.c
index 6580f3cd24bb..395ca58ac4a5 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -66,7 +66,7 @@ static struct vfsmount *shm_mnt __ro_after_init;
#include <linux/falloc.h>
#include <linux/splice.h>
#include <linux/security.h>
-#include <linux/swapops.h>
+#include <linux/leafops.h>
#include <linux/mempolicy.h>
#include <linux/namei.h>
#include <linux/ctype.h>
@@ -2286,7 +2286,8 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
struct address_space *mapping = inode->i_mapping;
struct mm_struct *fault_mm = vma ? vma->vm_mm : NULL;
struct shmem_inode_info *info = SHMEM_I(inode);
- swp_entry_t swap, index_entry;
+ swp_entry_t swap;
+ softleaf_t index_entry;
struct swap_info_struct *si;
struct folio *folio = NULL;
bool skip_swapcache = false;
@@ -2298,7 +2299,7 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
swap = index_entry;
*foliop = NULL;
- if (is_poisoned_swp_entry(index_entry))
+ if (softleaf_is_poison_marker(index_entry))
return -EIO;
si = get_swap_device(index_entry);
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
index cc4ce205bbec..055ec1050776 100644
--- a/mm/userfaultfd.c
+++ b/mm/userfaultfd.c
@@ -10,7 +10,7 @@
#include <linux/pagemap.h>
#include <linux/rmap.h>
#include <linux/swap.h>
-#include <linux/swapops.h>
+#include <linux/leafops.h>
#include <linux/userfaultfd_k.h>
#include <linux/mmu_notifier.h>
#include <linux/hugetlb.h>
@@ -208,7 +208,7 @@ int mfill_atomic_install_pte(pmd_t *dst_pmd,
* MISSING|WP registered, we firstly wr-protect a none pte which has no
* page cache page backing it, then access the page.
*/
- if (!pte_none(dst_ptep) && !is_uffd_pte_marker(dst_ptep))
+ if (!pte_none(dst_ptep) && !pte_is_uffd_marker(dst_ptep))
goto out_unlock;
if (page_in_cache) {
@@ -590,7 +590,7 @@ retry:
if (!uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE)) {
const pte_t ptep = huge_ptep_get(dst_mm, dst_addr, dst_pte);
- if (!huge_pte_none(ptep) && !is_uffd_pte_marker(ptep)) {
+ if (!huge_pte_none(ptep) && !pte_is_uffd_marker(ptep)) {
err = -EEXIST;
hugetlb_vma_unlock_read(dst_vma);
mutex_unlock(&hugetlb_fault_mutex_table[hash]);