summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/debug_vm_pgtable.c20
-rw-r--r--mm/hmm.c2
-rw-r--r--mm/hugetlb.c2
-rw-r--r--mm/ksm.c6
-rw-r--r--mm/memory-failure.c6
-rw-r--r--mm/memory.c3
-rw-r--r--mm/mempolicy.c4
-rw-r--r--mm/migrate.c6
-rw-r--r--mm/migrate_device.c10
-rw-r--r--mm/mprotect.c8
-rw-r--r--mm/page_vma_mapped.c8
-rw-r--r--mm/pagewalk.c7
-rw-r--r--mm/rmap.c9
13 files changed, 48 insertions, 43 deletions
diff --git a/mm/debug_vm_pgtable.c b/mm/debug_vm_pgtable.c
index 608d1011ce03..64db85a80558 100644
--- a/mm/debug_vm_pgtable.c
+++ b/mm/debug_vm_pgtable.c
@@ -844,7 +844,7 @@ static void __init pmd_softleaf_tests(struct pgtable_debug_args *args) { }
static void __init swap_migration_tests(struct pgtable_debug_args *args)
{
struct page *page;
- swp_entry_t swp;
+ softleaf_t entry;
if (!IS_ENABLED(CONFIG_MIGRATION))
return;
@@ -867,17 +867,17 @@ static void __init swap_migration_tests(struct pgtable_debug_args *args)
* be locked, otherwise it stumbles upon a BUG_ON().
*/
__SetPageLocked(page);
- swp = make_writable_migration_entry(page_to_pfn(page));
- WARN_ON(!is_migration_entry(swp));
- WARN_ON(!is_writable_migration_entry(swp));
+ entry = make_writable_migration_entry(page_to_pfn(page));
+ WARN_ON(!softleaf_is_migration(entry));
+ WARN_ON(!softleaf_is_migration_write(entry));
- swp = make_readable_migration_entry(swp_offset(swp));
- WARN_ON(!is_migration_entry(swp));
- WARN_ON(is_writable_migration_entry(swp));
+ entry = make_readable_migration_entry(swp_offset(entry));
+ WARN_ON(!softleaf_is_migration(entry));
+ WARN_ON(softleaf_is_migration_write(entry));
- swp = make_readable_migration_entry(page_to_pfn(page));
- WARN_ON(!is_migration_entry(swp));
- WARN_ON(is_writable_migration_entry(swp));
+ entry = make_readable_migration_entry(page_to_pfn(page));
+ WARN_ON(!softleaf_is_migration(entry));
+ WARN_ON(softleaf_is_migration_write(entry));
__ClearPageLocked(page);
}
diff --git a/mm/hmm.c b/mm/hmm.c
index 0158f2d1e027..3912d92a2b9a 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -270,7 +270,7 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
cpu_flags = HMM_PFN_VALID;
if (softleaf_is_device_private_write(entry))
cpu_flags |= HMM_PFN_WRITE;
- new_pfn_flags = swp_offset_pfn(entry) | cpu_flags;
+ new_pfn_flags = softleaf_to_pfn(entry) | cpu_flags;
goto out;
}
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 311c5d601310..9e7815b4f058 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -4934,7 +4934,7 @@ again:
} else if (unlikely(softleaf_is_migration(softleaf))) {
bool uffd_wp = pte_swp_uffd_wp(entry);
- if (!is_readable_migration_entry(softleaf) && cow) {
+ if (!softleaf_is_migration_read(softleaf) && cow) {
/*
* COW mappings require pages in both
* parent and child to be set to read.
diff --git a/mm/ksm.c b/mm/ksm.c
index f9a1a3658ead..cfc182255c7b 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -632,14 +632,14 @@ static int break_ksm_pmd_entry(pmd_t *pmdp, unsigned long addr, unsigned long en
if (pte_present(pte)) {
folio = vm_normal_folio(walk->vma, addr, pte);
} else if (!pte_none(pte)) {
- swp_entry_t entry = pte_to_swp_entry(pte);
+ const softleaf_t entry = softleaf_from_pte(pte);
/*
* As KSM pages remain KSM pages until freed, no need to wait
* here for migration to end.
*/
- if (is_migration_entry(entry))
- folio = pfn_swap_entry_folio(entry);
+ if (softleaf_is_migration(entry))
+ folio = softleaf_to_folio(entry);
}
/* return 1 if the page is an normal ksm page or KSM-placed zero page */
found = (folio && folio_test_ksm(folio)) ||
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 1f7fb9bf287a..71652cfedcdf 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -693,10 +693,10 @@ static int check_hwpoisoned_entry(pte_t pte, unsigned long addr, short shift,
if (pte_present(pte)) {
pfn = pte_pfn(pte);
} else {
- swp_entry_t swp = pte_to_swp_entry(pte);
+ const softleaf_t entry = softleaf_from_pte(pte);
- if (is_hwpoison_entry(swp))
- pfn = swp_offset_pfn(swp);
+ if (softleaf_is_hwpoison(entry))
+ pfn = softleaf_to_pfn(entry);
}
if (!pfn || pfn != poisoned_pfn)
diff --git a/mm/memory.c b/mm/memory.c
index a3f001a47ecf..525da4479228 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -902,7 +902,8 @@ static void restore_exclusive_pte(struct vm_area_struct *vma,
static int try_restore_exclusive_pte(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep, pte_t orig_pte)
{
- struct page *page = pfn_swap_entry_to_page(pte_to_swp_entry(orig_pte));
+ const softleaf_t entry = softleaf_from_pte(orig_pte);
+ struct page *page = softleaf_to_page(entry);
struct folio *folio = page_folio(page);
if (folio_trylock(folio)) {
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index dee95d5ecfd4..acb9bf89f619 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -705,7 +705,9 @@ static int queue_folios_pte_range(pmd_t *pmd, unsigned long addr,
if (pte_none(ptent))
continue;
if (!pte_present(ptent)) {
- if (is_migration_entry(pte_to_swp_entry(ptent)))
+ const softleaf_t entry = softleaf_from_pte(ptent);
+
+ if (softleaf_is_migration(entry))
qp->nr_failed++;
continue;
}
diff --git a/mm/migrate.c b/mm/migrate.c
index 5edfd0b2f63d..c39dfea1a925 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -483,7 +483,7 @@ void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
spinlock_t *ptl;
pte_t *ptep;
pte_t pte;
- swp_entry_t entry;
+ softleaf_t entry;
ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
if (!ptep)
@@ -495,8 +495,8 @@ void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
if (pte_none(pte) || pte_present(pte))
goto out;
- entry = pte_to_swp_entry(pte);
- if (!is_migration_entry(entry))
+ entry = softleaf_from_pte(pte);
+ if (!softleaf_is_migration(entry))
goto out;
migration_entry_wait_on_locked(entry, ptl);
diff --git a/mm/migrate_device.c b/mm/migrate_device.c
index 592b4561507c..b1ce6e3478d6 100644
--- a/mm/migrate_device.c
+++ b/mm/migrate_device.c
@@ -279,7 +279,7 @@ again:
unsigned long mpfn = 0, pfn;
struct folio *folio;
struct page *page;
- swp_entry_t entry;
+ softleaf_t entry;
pte_t pte;
pte = ptep_get(ptep);
@@ -298,11 +298,11 @@ again:
* page table entry. Other special swap entries are not
* migratable, and we ignore regular swapped page.
*/
- entry = pte_to_swp_entry(pte);
- if (!is_device_private_entry(entry))
+ entry = softleaf_from_pte(pte);
+ if (!softleaf_is_device_private(entry))
goto next;
- page = pfn_swap_entry_to_page(entry);
+ page = softleaf_to_page(entry);
pgmap = page_pgmap(page);
if (!(migrate->flags &
MIGRATE_VMA_SELECT_DEVICE_PRIVATE) ||
@@ -330,7 +330,7 @@ again:
mpfn = migrate_pfn(page_to_pfn(page)) |
MIGRATE_PFN_MIGRATE;
- if (is_writable_device_private_entry(entry))
+ if (softleaf_is_device_private_write(entry))
mpfn |= MIGRATE_PFN_WRITE;
} else {
pfn = pte_pfn(pte);
diff --git a/mm/mprotect.c b/mm/mprotect.c
index f910cbf41442..283889e4f1ce 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -317,11 +317,11 @@ static long change_pte_range(struct mmu_gather *tlb,
pages++;
}
} else {
- swp_entry_t entry = pte_to_swp_entry(oldpte);
+ softleaf_t entry = softleaf_from_pte(oldpte);
pte_t newpte;
- if (is_writable_migration_entry(entry)) {
- struct folio *folio = pfn_swap_entry_folio(entry);
+ if (softleaf_is_migration_write(entry)) {
+ const struct folio *folio = softleaf_to_folio(entry);
/*
* A protection check is difficult so
@@ -335,7 +335,7 @@ static long change_pte_range(struct mmu_gather *tlb,
newpte = swp_entry_to_pte(entry);
if (pte_swp_soft_dirty(oldpte))
newpte = pte_swp_mksoft_dirty(newpte);
- } else if (is_writable_device_private_entry(entry)) {
+ } else if (softleaf_is_device_private_write(entry)) {
/*
* We do not preserve soft-dirtiness. See
* copy_nonpresent_pte() for explanation.
diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
index 8137d2366722..b38a1d00c971 100644
--- a/mm/page_vma_mapped.c
+++ b/mm/page_vma_mapped.c
@@ -49,7 +49,7 @@ again:
if (is_migration)
return false;
} else if (!is_migration) {
- swp_entry_t entry;
+ softleaf_t entry;
/*
* Handle un-addressable ZONE_DEVICE memory.
@@ -67,9 +67,9 @@ again:
* For more details on device private memory see HMM
* (include/linux/hmm.h or mm/hmm.c).
*/
- entry = pte_to_swp_entry(ptent);
- if (!is_device_private_entry(entry) &&
- !is_device_exclusive_entry(entry))
+ entry = softleaf_from_pte(ptent);
+ if (!softleaf_is_device_private(entry) &&
+ !softleaf_is_device_exclusive(entry))
return false;
}
spin_lock(*ptlp);
diff --git a/mm/pagewalk.c b/mm/pagewalk.c
index 378c774795fc..90cc346a6ecf 100644
--- a/mm/pagewalk.c
+++ b/mm/pagewalk.c
@@ -1007,11 +1007,10 @@ pte_table:
goto found;
}
} else if (!pte_none(pte)) {
- swp_entry_t entry = pte_to_swp_entry(pte);
+ const softleaf_t entry = softleaf_from_pte(pte);
- if ((flags & FW_MIGRATION) &&
- is_migration_entry(entry)) {
- page = pfn_swap_entry_to_page(entry);
+ if ((flags & FW_MIGRATION) && softleaf_is_migration(entry)) {
+ page = softleaf_to_page(entry);
expose_page = false;
goto found;
}
diff --git a/mm/rmap.c b/mm/rmap.c
index 775710115a41..345466ad396b 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1969,7 +1969,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
if (likely(pte_present(pteval))) {
pfn = pte_pfn(pteval);
} else {
- pfn = swp_offset_pfn(pte_to_swp_entry(pteval));
+ pfn = softleaf_to_pfn(pte_to_swp_entry(pteval));
VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio);
}
@@ -2368,7 +2368,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
if (likely(pte_present(pteval))) {
pfn = pte_pfn(pteval);
} else {
- pfn = swp_offset_pfn(pte_to_swp_entry(pteval));
+ pfn = softleaf_to_pfn(pte_to_swp_entry(pteval));
VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio);
}
@@ -2453,8 +2453,11 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
folio_mark_dirty(folio);
writable = pte_write(pteval);
} else {
+ const softleaf_t entry = softleaf_from_pte(pteval);
+
pte_clear(mm, address, pvmw.pte);
- writable = is_writable_device_private_entry(pte_to_swp_entry(pteval));
+
+ writable = softleaf_is_device_private_write(entry);
}
VM_WARN_ON_FOLIO(writable && folio_test_anon(folio) &&