diff options
Diffstat (limited to 'mm')
| -rw-r--r-- | mm/debug_vm_pgtable.c | 2 | ||||
| -rw-r--r-- | mm/internal.h | 7 | ||||
| -rw-r--r-- | mm/memory-failure.c | 2 | ||||
| -rw-r--r-- | mm/memory.c | 16 | ||||
| -rw-r--r-- | mm/migrate.c | 2 | ||||
| -rw-r--r-- | mm/mincore.c | 4 | ||||
| -rw-r--r-- | mm/rmap.c | 8 | ||||
| -rw-r--r-- | mm/swapfile.c | 13 |
8 files changed, 36 insertions, 18 deletions
diff --git a/mm/debug_vm_pgtable.c b/mm/debug_vm_pgtable.c index 64db85a80558..1eae87dbef73 100644 --- a/mm/debug_vm_pgtable.c +++ b/mm/debug_vm_pgtable.c @@ -1229,7 +1229,7 @@ static int __init init_args(struct pgtable_debug_args *args) init_fixed_pfns(args); /* See generic_max_swapfile_size(): probe the maximum offset */ - max_swap_offset = swp_offset(pte_to_swp_entry(swp_entry_to_pte(swp_entry(0, ~0UL)))); + max_swap_offset = swp_offset(softleaf_from_pte(softleaf_to_pte(swp_entry(0, ~0UL)))); /* Create a swp entry with all possible bits set while still being swap. */ args->swp_entry = swp_entry(MAX_SWAPFILES - 1, max_swap_offset); /* Create a non-present migration entry. */ diff --git a/mm/internal.h b/mm/internal.h index 2ed041e6ebc3..929bc4a5dd98 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -334,7 +334,7 @@ unsigned int folio_pte_batch(struct folio *folio, pte_t *ptep, pte_t pte, */ static inline pte_t pte_move_swp_offset(pte_t pte, long delta) { - swp_entry_t entry = pte_to_swp_entry(pte); + const softleaf_t entry = softleaf_from_pte(pte); pte_t new = __swp_entry_to_pte(__swp_entry(swp_type(entry), (swp_offset(entry) + delta))); @@ -389,11 +389,14 @@ static inline int swap_pte_batch(pte_t *start_ptep, int max_nr, pte_t pte) cgroup_id = lookup_swap_cgroup_id(entry); while (ptep < end_ptep) { + softleaf_t entry; + pte = ptep_get(ptep); if (!pte_same(pte, expected_pte)) break; - if (lookup_swap_cgroup_id(pte_to_swp_entry(pte)) != cgroup_id) + entry = softleaf_from_pte(pte); + if (lookup_swap_cgroup_id(entry) != cgroup_id) break; expected_pte = pte_next_swp_offset(expected_pte); ptep++; diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 71652cfedcdf..7f908ad795ad 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -51,7 +51,7 @@ #include <linux/backing-dev.h> #include <linux/migrate.h> #include <linux/slab.h> -#include <linux/swapops.h> +#include <linux/leafops.h> #include <linux/hugetlb.h> #include <linux/memory_hotplug.h> #include <linux/mm_inline.h> diff --git a/mm/memory.c b/mm/memory.c index 525da4479228..50b93b45b174 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1218,7 +1218,7 @@ copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, spinlock_t *src_ptl, *dst_ptl; int progress, max_nr, ret = 0; int rss[NR_MM_COUNTERS]; - swp_entry_t entry = (swp_entry_t){0}; + softleaf_t entry = softleaf_mk_none(); struct folio *prealloc = NULL; int nr; @@ -1282,7 +1282,7 @@ again: dst_vma, src_vma, addr, rss); if (ret == -EIO) { - entry = pte_to_swp_entry(ptep_get(src_pte)); + entry = softleaf_from_pte(ptep_get(src_pte)); break; } else if (ret == -EBUSY) { break; @@ -4446,13 +4446,13 @@ static struct folio *__alloc_swap_folio(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; struct folio *folio; - swp_entry_t entry; + softleaf_t entry; folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, vmf->address); if (!folio) return NULL; - entry = pte_to_swp_entry(vmf->orig_pte); + entry = softleaf_from_pte(vmf->orig_pte); if (mem_cgroup_swapin_charge_folio(folio, vma->vm_mm, GFP_KERNEL, entry)) { folio_put(folio); @@ -4470,7 +4470,7 @@ static struct folio *__alloc_swap_folio(struct vm_fault *vmf) static bool can_swapin_thp(struct vm_fault *vmf, pte_t *ptep, int nr_pages) { unsigned long addr; - swp_entry_t entry; + softleaf_t entry; int idx; pte_t pte; @@ -4480,7 +4480,7 @@ static bool can_swapin_thp(struct vm_fault *vmf, pte_t *ptep, int nr_pages) if (!pte_same(pte, pte_move_swp_offset(vmf->orig_pte, -idx))) return false; - entry = pte_to_swp_entry(pte); + entry = softleaf_from_pte(pte); if (swap_pte_batch(ptep, nr_pages, pte) != nr_pages) return false; @@ -4526,7 +4526,7 @@ static struct folio *alloc_swap_folio(struct vm_fault *vmf) unsigned long orders; struct folio *folio; unsigned long addr; - swp_entry_t entry; + softleaf_t entry; spinlock_t *ptl; pte_t *pte; gfp_t gfp; @@ -4547,7 +4547,7 @@ static struct folio *alloc_swap_folio(struct vm_fault *vmf) if (!zswap_never_enabled()) goto fallback; - entry = pte_to_swp_entry(vmf->orig_pte); + entry = softleaf_from_pte(vmf->orig_pte); /* * Get a list of all the (large) orders below PMD_ORDER that are enabled * and suitable for swapping THP. diff --git a/mm/migrate.c b/mm/migrate.c index c39dfea1a925..b2ad78bf85d5 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -534,7 +534,7 @@ void migration_entry_wait_huge(struct vm_area_struct *vma, unsigned long addr, p * lock release in migration_entry_wait_on_locked(). */ hugetlb_vma_unlock_read(vma); - migration_entry_wait_on_locked(pte_to_swp_entry(pte), ptl); + migration_entry_wait_on_locked(entry, ptl); return; } diff --git a/mm/mincore.c b/mm/mincore.c index 9a908d8bb706..e5d13eea9234 100644 --- a/mm/mincore.c +++ b/mm/mincore.c @@ -202,7 +202,9 @@ static int mincore_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, for (i = 0; i < step; i++) vec[i] = 1; } else { /* pte is a swap entry */ - *vec = mincore_swap(pte_to_swp_entry(pte), false); + const softleaf_t entry = softleaf_from_pte(pte); + + *vec = mincore_swap(entry, false); } vec += step; } diff --git a/mm/rmap.c b/mm/rmap.c index 345466ad396b..d871f2eb821c 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1969,7 +1969,9 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, if (likely(pte_present(pteval))) { pfn = pte_pfn(pteval); } else { - pfn = softleaf_to_pfn(pte_to_swp_entry(pteval)); + const softleaf_t entry = softleaf_from_pte(pteval); + + pfn = softleaf_to_pfn(entry); VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio); } @@ -2368,7 +2370,9 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma, if (likely(pte_present(pteval))) { pfn = pte_pfn(pteval); } else { - pfn = softleaf_to_pfn(pte_to_swp_entry(pteval)); + const softleaf_t entry = softleaf_from_pte(pteval); + + pfn = softleaf_to_pfn(entry); VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio); } diff --git a/mm/swapfile.c b/mm/swapfile.c index 8c7f14061f5b..94e0f0c54168 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -3202,8 +3202,17 @@ static int claim_swapfile(struct swap_info_struct *si, struct inode *inode) */ unsigned long generic_max_swapfile_size(void) { - return swp_offset(pte_to_swp_entry( - swp_entry_to_pte(swp_entry(0, ~0UL)))) + 1; + swp_entry_t entry = swp_entry(0, ~0UL); + const pte_t pte = softleaf_to_pte(entry); + + /* + * Since the PTE can be an invalid softleaf entry (e.g. the none PTE), + * we need to do this manually. + */ + entry = __pte_to_swp_entry(pte); + entry = swp_entry(__swp_type(entry), __swp_offset(entry)); + + return swp_offset(entry) + 1; } /* Can be overridden by an architecture for additional checks. */ |