diff options
| author | Lorenzo Stoakes <lorenzo.stoakes@oracle.com> | 2025-11-10 22:21:33 +0000 |
|---|---|---|
| committer | Andrew Morton <akpm@linux-foundation.org> | 2025-11-24 15:08:52 -0800 |
| commit | 93976a20345b4aff1ac7598ec1223d65ca33d49c (patch) | |
| tree | 3a5dde127c5342d889182b4d642ea520bc8bab7c /mm | |
| parent | 03bfbc3ad6e496fb576ca9ace08211943232fdf9 (diff) | |
mm: eliminate further swapops predicates
Having converted so much of the code base to software leaf entries, we can
mop up some remaining cases.
We replace is_pfn_swap_entry(), pfn_swap_entry_to_page(),
is_writable_device_private_entry(), is_device_exclusive_entry(),
is_migration_entry(), is_writable_migration_entry(),
is_readable_migration_entry(), swp_offset_pfn() and pfn_swap_entry_folio()
with softleaf equivalents.
No functional change intended.
Link: https://lkml.kernel.org/r/956bc9c031604811c0070d2f4bf2f1373f230213.1762812360.git.lorenzo.stoakes@oracle.com
Signed-off-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Alexander Gordeev <agordeev@linux.ibm.com>
Cc: Alistair Popple <apopple@nvidia.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Axel Rasmussen <axelrasmussen@google.com>
Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: Baoquan He <bhe@redhat.com>
Cc: Barry Song <baohua@kernel.org>
Cc: Byungchul Park <byungchul@sk.com>
Cc: Chengming Zhou <chengming.zhou@linux.dev>
Cc: Chris Li <chrisl@kernel.org>
Cc: Christian Borntraeger <borntraeger@linux.ibm.com>
Cc: Christian Brauner <brauner@kernel.org>
Cc: Claudio Imbrenda <imbrenda@linux.ibm.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Dev Jain <dev.jain@arm.com>
Cc: Gerald Schaefer <gerald.schaefer@linux.ibm.com>
Cc: Gregory Price <gourry@gourry.net>
Cc: Heiko Carstens <hca@linux.ibm.com>
Cc: "Huang, Ying" <ying.huang@linux.alibaba.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Jann Horn <jannh@google.com>
Cc: Janosch Frank <frankja@linux.ibm.com>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: Joshua Hahn <joshua.hahnjy@gmail.com>
Cc: Kairui Song <kasong@tencent.com>
Cc: Kemeng Shi <shikemeng@huaweicloud.com>
Cc: Lance Yang <lance.yang@linux.dev>
Cc: Leon Romanovsky <leon@kernel.org>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Mathew Brost <matthew.brost@intel.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Naoya Horiguchi <nao.horiguchi@gmail.com>
Cc: Nhat Pham <nphamcs@gmail.com>
Cc: Nico Pache <npache@redhat.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Pasha Tatashin <pasha.tatashin@soleen.com>
Cc: Peter Xu <peterx@redhat.com>
Cc: Rakie Kim <rakie.kim@sk.com>
Cc: Rik van Riel <riel@surriel.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: SeongJae Park <sj@kernel.org>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Sven Schnelle <svens@linux.ibm.com>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Wei Xu <weixugc@google.com>
Cc: xu xin <xu.xin16@zte.com.cn>
Cc: Yuanchu Xie <yuanchu@google.com>
Cc: Zi Yan <ziy@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm')
| -rw-r--r-- | mm/debug_vm_pgtable.c | 20 | ||||
| -rw-r--r-- | mm/hmm.c | 2 | ||||
| -rw-r--r-- | mm/hugetlb.c | 2 | ||||
| -rw-r--r-- | mm/ksm.c | 6 | ||||
| -rw-r--r-- | mm/memory-failure.c | 6 | ||||
| -rw-r--r-- | mm/memory.c | 3 | ||||
| -rw-r--r-- | mm/mempolicy.c | 4 | ||||
| -rw-r--r-- | mm/migrate.c | 6 | ||||
| -rw-r--r-- | mm/migrate_device.c | 10 | ||||
| -rw-r--r-- | mm/mprotect.c | 8 | ||||
| -rw-r--r-- | mm/page_vma_mapped.c | 8 | ||||
| -rw-r--r-- | mm/pagewalk.c | 7 | ||||
| -rw-r--r-- | mm/rmap.c | 9 |
13 files changed, 48 insertions, 43 deletions
diff --git a/mm/debug_vm_pgtable.c b/mm/debug_vm_pgtable.c index 608d1011ce03..64db85a80558 100644 --- a/mm/debug_vm_pgtable.c +++ b/mm/debug_vm_pgtable.c @@ -844,7 +844,7 @@ static void __init pmd_softleaf_tests(struct pgtable_debug_args *args) { } static void __init swap_migration_tests(struct pgtable_debug_args *args) { struct page *page; - swp_entry_t swp; + softleaf_t entry; if (!IS_ENABLED(CONFIG_MIGRATION)) return; @@ -867,17 +867,17 @@ static void __init swap_migration_tests(struct pgtable_debug_args *args) * be locked, otherwise it stumbles upon a BUG_ON(). */ __SetPageLocked(page); - swp = make_writable_migration_entry(page_to_pfn(page)); - WARN_ON(!is_migration_entry(swp)); - WARN_ON(!is_writable_migration_entry(swp)); + entry = make_writable_migration_entry(page_to_pfn(page)); + WARN_ON(!softleaf_is_migration(entry)); + WARN_ON(!softleaf_is_migration_write(entry)); - swp = make_readable_migration_entry(swp_offset(swp)); - WARN_ON(!is_migration_entry(swp)); - WARN_ON(is_writable_migration_entry(swp)); + entry = make_readable_migration_entry(swp_offset(entry)); + WARN_ON(!softleaf_is_migration(entry)); + WARN_ON(softleaf_is_migration_write(entry)); - swp = make_readable_migration_entry(page_to_pfn(page)); - WARN_ON(!is_migration_entry(swp)); - WARN_ON(is_writable_migration_entry(swp)); + entry = make_readable_migration_entry(page_to_pfn(page)); + WARN_ON(!softleaf_is_migration(entry)); + WARN_ON(softleaf_is_migration_write(entry)); __ClearPageLocked(page); } @@ -270,7 +270,7 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr, cpu_flags = HMM_PFN_VALID; if (softleaf_is_device_private_write(entry)) cpu_flags |= HMM_PFN_WRITE; - new_pfn_flags = swp_offset_pfn(entry) | cpu_flags; + new_pfn_flags = softleaf_to_pfn(entry) | cpu_flags; goto out; } diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 311c5d601310..9e7815b4f058 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -4934,7 +4934,7 @@ again: } else if (unlikely(softleaf_is_migration(softleaf))) { bool uffd_wp = pte_swp_uffd_wp(entry); - if (!is_readable_migration_entry(softleaf) && cow) { + if (!softleaf_is_migration_read(softleaf) && cow) { /* * COW mappings require pages in both * parent and child to be set to read. @@ -632,14 +632,14 @@ static int break_ksm_pmd_entry(pmd_t *pmdp, unsigned long addr, unsigned long en if (pte_present(pte)) { folio = vm_normal_folio(walk->vma, addr, pte); } else if (!pte_none(pte)) { - swp_entry_t entry = pte_to_swp_entry(pte); + const softleaf_t entry = softleaf_from_pte(pte); /* * As KSM pages remain KSM pages until freed, no need to wait * here for migration to end. */ - if (is_migration_entry(entry)) - folio = pfn_swap_entry_folio(entry); + if (softleaf_is_migration(entry)) + folio = softleaf_to_folio(entry); } /* return 1 if the page is an normal ksm page or KSM-placed zero page */ found = (folio && folio_test_ksm(folio)) || diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 1f7fb9bf287a..71652cfedcdf 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -693,10 +693,10 @@ static int check_hwpoisoned_entry(pte_t pte, unsigned long addr, short shift, if (pte_present(pte)) { pfn = pte_pfn(pte); } else { - swp_entry_t swp = pte_to_swp_entry(pte); + const softleaf_t entry = softleaf_from_pte(pte); - if (is_hwpoison_entry(swp)) - pfn = swp_offset_pfn(swp); + if (softleaf_is_hwpoison(entry)) + pfn = softleaf_to_pfn(entry); } if (!pfn || pfn != poisoned_pfn) diff --git a/mm/memory.c b/mm/memory.c index a3f001a47ecf..525da4479228 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -902,7 +902,8 @@ static void restore_exclusive_pte(struct vm_area_struct *vma, static int try_restore_exclusive_pte(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, pte_t orig_pte) { - struct page *page = pfn_swap_entry_to_page(pte_to_swp_entry(orig_pte)); + const softleaf_t entry = softleaf_from_pte(orig_pte); + struct page *page = softleaf_to_page(entry); struct folio *folio = page_folio(page); if (folio_trylock(folio)) { diff --git a/mm/mempolicy.c b/mm/mempolicy.c index dee95d5ecfd4..acb9bf89f619 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -705,7 +705,9 @@ static int queue_folios_pte_range(pmd_t *pmd, unsigned long addr, if (pte_none(ptent)) continue; if (!pte_present(ptent)) { - if (is_migration_entry(pte_to_swp_entry(ptent))) + const softleaf_t entry = softleaf_from_pte(ptent); + + if (softleaf_is_migration(entry)) qp->nr_failed++; continue; } diff --git a/mm/migrate.c b/mm/migrate.c index 5edfd0b2f63d..c39dfea1a925 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -483,7 +483,7 @@ void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, spinlock_t *ptl; pte_t *ptep; pte_t pte; - swp_entry_t entry; + softleaf_t entry; ptep = pte_offset_map_lock(mm, pmd, address, &ptl); if (!ptep) @@ -495,8 +495,8 @@ void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, if (pte_none(pte) || pte_present(pte)) goto out; - entry = pte_to_swp_entry(pte); - if (!is_migration_entry(entry)) + entry = softleaf_from_pte(pte); + if (!softleaf_is_migration(entry)) goto out; migration_entry_wait_on_locked(entry, ptl); diff --git a/mm/migrate_device.c b/mm/migrate_device.c index 592b4561507c..b1ce6e3478d6 100644 --- a/mm/migrate_device.c +++ b/mm/migrate_device.c @@ -279,7 +279,7 @@ again: unsigned long mpfn = 0, pfn; struct folio *folio; struct page *page; - swp_entry_t entry; + softleaf_t entry; pte_t pte; pte = ptep_get(ptep); @@ -298,11 +298,11 @@ again: * page table entry. Other special swap entries are not * migratable, and we ignore regular swapped page. */ - entry = pte_to_swp_entry(pte); - if (!is_device_private_entry(entry)) + entry = softleaf_from_pte(pte); + if (!softleaf_is_device_private(entry)) goto next; - page = pfn_swap_entry_to_page(entry); + page = softleaf_to_page(entry); pgmap = page_pgmap(page); if (!(migrate->flags & MIGRATE_VMA_SELECT_DEVICE_PRIVATE) || @@ -330,7 +330,7 @@ again: mpfn = migrate_pfn(page_to_pfn(page)) | MIGRATE_PFN_MIGRATE; - if (is_writable_device_private_entry(entry)) + if (softleaf_is_device_private_write(entry)) mpfn |= MIGRATE_PFN_WRITE; } else { pfn = pte_pfn(pte); diff --git a/mm/mprotect.c b/mm/mprotect.c index f910cbf41442..283889e4f1ce 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -317,11 +317,11 @@ static long change_pte_range(struct mmu_gather *tlb, pages++; } } else { - swp_entry_t entry = pte_to_swp_entry(oldpte); + softleaf_t entry = softleaf_from_pte(oldpte); pte_t newpte; - if (is_writable_migration_entry(entry)) { - struct folio *folio = pfn_swap_entry_folio(entry); + if (softleaf_is_migration_write(entry)) { + const struct folio *folio = softleaf_to_folio(entry); /* * A protection check is difficult so @@ -335,7 +335,7 @@ static long change_pte_range(struct mmu_gather *tlb, newpte = swp_entry_to_pte(entry); if (pte_swp_soft_dirty(oldpte)) newpte = pte_swp_mksoft_dirty(newpte); - } else if (is_writable_device_private_entry(entry)) { + } else if (softleaf_is_device_private_write(entry)) { /* * We do not preserve soft-dirtiness. See * copy_nonpresent_pte() for explanation. diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c index 8137d2366722..b38a1d00c971 100644 --- a/mm/page_vma_mapped.c +++ b/mm/page_vma_mapped.c @@ -49,7 +49,7 @@ again: if (is_migration) return false; } else if (!is_migration) { - swp_entry_t entry; + softleaf_t entry; /* * Handle un-addressable ZONE_DEVICE memory. @@ -67,9 +67,9 @@ again: * For more details on device private memory see HMM * (include/linux/hmm.h or mm/hmm.c). */ - entry = pte_to_swp_entry(ptent); - if (!is_device_private_entry(entry) && - !is_device_exclusive_entry(entry)) + entry = softleaf_from_pte(ptent); + if (!softleaf_is_device_private(entry) && + !softleaf_is_device_exclusive(entry)) return false; } spin_lock(*ptlp); diff --git a/mm/pagewalk.c b/mm/pagewalk.c index 378c774795fc..90cc346a6ecf 100644 --- a/mm/pagewalk.c +++ b/mm/pagewalk.c @@ -1007,11 +1007,10 @@ pte_table: goto found; } } else if (!pte_none(pte)) { - swp_entry_t entry = pte_to_swp_entry(pte); + const softleaf_t entry = softleaf_from_pte(pte); - if ((flags & FW_MIGRATION) && - is_migration_entry(entry)) { - page = pfn_swap_entry_to_page(entry); + if ((flags & FW_MIGRATION) && softleaf_is_migration(entry)) { + page = softleaf_to_page(entry); expose_page = false; goto found; } diff --git a/mm/rmap.c b/mm/rmap.c index 775710115a41..345466ad396b 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1969,7 +1969,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, if (likely(pte_present(pteval))) { pfn = pte_pfn(pteval); } else { - pfn = swp_offset_pfn(pte_to_swp_entry(pteval)); + pfn = softleaf_to_pfn(pte_to_swp_entry(pteval)); VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio); } @@ -2368,7 +2368,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma, if (likely(pte_present(pteval))) { pfn = pte_pfn(pteval); } else { - pfn = swp_offset_pfn(pte_to_swp_entry(pteval)); + pfn = softleaf_to_pfn(pte_to_swp_entry(pteval)); VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio); } @@ -2453,8 +2453,11 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma, folio_mark_dirty(folio); writable = pte_write(pteval); } else { + const softleaf_t entry = softleaf_from_pte(pteval); + pte_clear(mm, address, pvmw.pte); - writable = is_writable_device_private_entry(pte_to_swp_entry(pteval)); + + writable = softleaf_is_device_private_write(entry); } VM_WARN_ON_FOLIO(writable && folio_test_anon(folio) && |