diff options
| -rw-r--r-- | mm/filemap.c | 2 | ||||
| -rw-r--r-- | mm/memory-failure.c | 2 | ||||
| -rw-r--r-- | mm/memory.c | 2 | ||||
| -rw-r--r-- | mm/shmem.c | 10 | ||||
| -rw-r--r-- | mm/swap.h | 48 | ||||
| -rw-r--r-- | mm/swap_state.c | 86 | ||||
| -rw-r--r-- | mm/swapfile.c | 8 | ||||
| -rw-r--r-- | mm/vmscan.c | 2 | ||||
| -rw-r--r-- | mm/zswap.c | 2 |
9 files changed, 103 insertions, 59 deletions
diff --git a/mm/filemap.c b/mm/filemap.c index 8d078aa2738a..2a05b1fdd445 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -4525,7 +4525,7 @@ static void filemap_cachestat(struct address_space *mapping, * invalidation, so there might not be * a shadow in the swapcache (yet). */ - shadow = get_shadow_from_swap_cache(swp); + shadow = swap_cache_get_shadow(swp); if (!shadow) goto resched; } diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 6d9134e3d115..3edebb0cda30 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -1127,7 +1127,7 @@ static int me_swapcache_clean(struct page_state *ps, struct page *p) struct folio *folio = page_folio(p); int ret; - delete_from_swap_cache(folio); + swap_cache_del_folio(folio); ret = delete_from_lru_cache(folio) ? MF_FAILED : MF_RECOVERED; folio_unlock(folio); diff --git a/mm/memory.c b/mm/memory.c index 5808c4ef21b3..41e641823558 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -4699,7 +4699,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) memcg1_swapin(entry, nr_pages); - shadow = get_shadow_from_swap_cache(entry); + shadow = swap_cache_get_shadow(entry); if (shadow) workingset_refault(folio, shadow); diff --git a/mm/shmem.c b/mm/shmem.c index 410f27bc4752..077744a9e9da 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -1661,13 +1661,13 @@ try_split: } /* - * The delete_from_swap_cache() below could be left for + * The swap_cache_del_folio() below could be left for * shrink_folio_list()'s folio_free_swap() to dispose of; * but I'm a little nervous about letting this folio out of * shmem_writeout() in a hybrid half-tmpfs-half-swap state * e.g. folio_mapping(folio) might give an unexpected answer. */ - delete_from_swap_cache(folio); + swap_cache_del_folio(folio); goto redirty; } if (nr_pages > 1) @@ -2045,7 +2045,7 @@ retry: new->swap = entry; memcg1_swapin(entry, nr_pages); - shadow = get_shadow_from_swap_cache(entry); + shadow = swap_cache_get_shadow(entry); if (shadow) workingset_refault(new, shadow); folio_add_lru(new); @@ -2183,7 +2183,7 @@ static void shmem_set_folio_swapin_error(struct inode *inode, pgoff_t index, nr_pages = folio_nr_pages(folio); folio_wait_writeback(folio); if (!skip_swapcache) - delete_from_swap_cache(folio); + swap_cache_del_folio(folio); /* * Don't treat swapin error folio as alloced. Otherwise inode->i_blocks * won't be 0 when inode is released and thus trigger WARN_ON(i_blocks) @@ -2422,7 +2422,7 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index, folio->swap.val = 0; swapcache_clear(si, swap, nr_pages); } else { - delete_from_swap_cache(folio); + swap_cache_del_folio(folio); } folio_mark_dirty(folio); swap_free_nr(swap, nr_pages); diff --git a/mm/swap.h b/mm/swap.h index 30b1039c27fe..6c4acb549bec 100644 --- a/mm/swap.h +++ b/mm/swap.h @@ -167,17 +167,29 @@ static inline bool folio_matches_swap_entry(const struct folio *folio, return folio_entry.val == round_down(entry.val, nr_pages); } +/* + * All swap cache helpers below require the caller to ensure the swap entries + * used are valid and stablize the device by any of the following ways: + * - Hold a reference by get_swap_device(): this ensures a single entry is + * valid and increases the swap device's refcount. + * - Locking a folio in the swap cache: this ensures the folio's swap entries + * are valid and pinned, also implies reference to the device. + * - Locking anything referencing the swap entry: e.g. PTL that protects + * swap entries in the page table, similar to locking swap cache folio. + * - See the comment of get_swap_device() for more complex usage. + */ +struct folio *swap_cache_get_folio(swp_entry_t entry); +void *swap_cache_get_shadow(swp_entry_t entry); +int swap_cache_add_folio(struct folio *folio, swp_entry_t entry, + gfp_t gfp, void **shadow); +void swap_cache_del_folio(struct folio *folio); +void __swap_cache_del_folio(struct folio *folio, + swp_entry_t entry, void *shadow); +void swap_cache_clear_shadow(int type, unsigned long begin, + unsigned long end); + void show_swap_cache_info(void); -void *get_shadow_from_swap_cache(swp_entry_t entry); -int add_to_swap_cache(struct folio *folio, swp_entry_t entry, - gfp_t gfp, void **shadowp); -void __delete_from_swap_cache(struct folio *folio, - swp_entry_t entry, void *shadow); -void delete_from_swap_cache(struct folio *folio); -void clear_shadow_from_swap_cache(int type, unsigned long begin, - unsigned long end); void swapcache_clear(struct swap_info_struct *si, swp_entry_t entry, int nr); -struct folio *swap_cache_get_folio(swp_entry_t entry); struct folio *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, struct vm_area_struct *vma, unsigned long addr, struct swap_iocb **plug); @@ -305,28 +317,22 @@ static inline struct folio *swap_cache_get_folio(swp_entry_t entry) return NULL; } -static inline void *get_shadow_from_swap_cache(swp_entry_t entry) +static inline void *swap_cache_get_shadow(swp_entry_t entry) { return NULL; } -static inline int add_to_swap_cache(struct folio *folio, swp_entry_t entry, - gfp_t gfp_mask, void **shadowp) -{ - return -1; -} - -static inline void __delete_from_swap_cache(struct folio *folio, - swp_entry_t entry, void *shadow) +static inline int swap_cache_add_folio(swp_entry_t entry, struct folio *folio, + gfp_t gfp, void **shadow) { + return -EINVAL; } -static inline void delete_from_swap_cache(struct folio *folio) +static inline void swap_cache_del_folio(struct folio *folio) { } -static inline void clear_shadow_from_swap_cache(int type, unsigned long begin, - unsigned long end) +static inline void __swap_cache_del_folio(struct folio *folio, swp_entry_t entry, void *shadow) { } diff --git a/mm/swap_state.c b/mm/swap_state.c index 0ad4f3b41f1b..f3a32a06a950 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -78,8 +78,8 @@ void show_swap_cache_info(void) * Context: Caller must ensure @entry is valid and protect the swap device * with reference count or locks. * Return: Returns the found folio on success, NULL otherwise. The caller - * must lock and check if the folio still matches the swap entry before - * use (e.g. with folio_matches_swap_entry). + * must lock nd check if the folio still matches the swap entry before + * use (e.g., folio_matches_swap_entry). */ struct folio *swap_cache_get_folio(swp_entry_t entry) { @@ -90,7 +90,15 @@ struct folio *swap_cache_get_folio(swp_entry_t entry) return folio; } -void *get_shadow_from_swap_cache(swp_entry_t entry) +/** + * swap_cache_get_shadow - Looks up a shadow in the swap cache. + * @entry: swap entry used for the lookup. + * + * Context: Caller must ensure @entry is valid and protect the swap device + * with reference count or locks. + * Return: Returns either NULL or an XA_VALUE (shadow). + */ +void *swap_cache_get_shadow(swp_entry_t entry) { struct address_space *address_space = swap_address_space(entry); pgoff_t idx = swap_cache_index(entry); @@ -102,12 +110,21 @@ void *get_shadow_from_swap_cache(swp_entry_t entry) return NULL; } -/* - * add_to_swap_cache resembles filemap_add_folio on swapper_space, - * but sets SwapCache flag and 'swap' instead of mapping and index. +/** + * swap_cache_add_folio - Add a folio into the swap cache. + * @folio: The folio to be added. + * @entry: The swap entry corresponding to the folio. + * @gfp: gfp_mask for XArray node allocation. + * @shadowp: If a shadow is found, return the shadow. + * + * Context: Caller must ensure @entry is valid and protect the swap device + * with reference count or locks. + * The caller also needs to mark the corresponding swap_map slots with + * SWAP_HAS_CACHE to avoid race or conflict. + * Return: Returns 0 on success, error code otherwise. */ -int add_to_swap_cache(struct folio *folio, swp_entry_t entry, - gfp_t gfp, void **shadowp) +int swap_cache_add_folio(struct folio *folio, swp_entry_t entry, + gfp_t gfp, void **shadowp) { struct address_space *address_space = swap_address_space(entry); pgoff_t idx = swap_cache_index(entry); @@ -155,12 +172,20 @@ unlock: return xas_error(&xas); } -/* - * This must be called only on folios that have - * been verified to be in the swap cache. +/** + * __swap_cache_del_folio - Removes a folio from the swap cache. + * @folio: The folio. + * @entry: The first swap entry that the folio corresponds to. + * @shadow: shadow value to be filled in the swap cache. + * + * Removes a folio from the swap cache and fills a shadow in place. + * This won't put the folio's refcount. The caller has to do that. + * + * Context: Caller must hold the xa_lock, ensure the folio is + * locked and in the swap cache, using the index of @entry. */ -void __delete_from_swap_cache(struct folio *folio, - swp_entry_t entry, void *shadow) +void __swap_cache_del_folio(struct folio *folio, + swp_entry_t entry, void *shadow) { struct address_space *address_space = swap_address_space(entry); int i; @@ -186,27 +211,40 @@ void __delete_from_swap_cache(struct folio *folio, __lruvec_stat_mod_folio(folio, NR_SWAPCACHE, -nr); } -/* - * This must be called only on folios that have - * been verified to be in the swap cache and locked. - * It will never put the folio into the free list, - * the caller has a reference on the folio. +/** + * swap_cache_del_folio - Removes a folio from the swap cache. + * @folio: The folio. + * + * Same as __swap_cache_del_folio, but handles lock and refcount. The + * caller must ensure the folio is either clean or has a swap count + * equal to zero, or it may cause data loss. + * + * Context: Caller must ensure the folio is locked and in the swap cache. */ -void delete_from_swap_cache(struct folio *folio) +void swap_cache_del_folio(struct folio *folio) { swp_entry_t entry = folio->swap; struct address_space *address_space = swap_address_space(entry); xa_lock_irq(&address_space->i_pages); - __delete_from_swap_cache(folio, entry, NULL); + __swap_cache_del_folio(folio, entry, NULL); xa_unlock_irq(&address_space->i_pages); put_swap_folio(folio, entry); folio_ref_sub(folio, folio_nr_pages(folio)); } -void clear_shadow_from_swap_cache(int type, unsigned long begin, - unsigned long end) +/** + * swap_cache_clear_shadow - Clears a set of shadows in the swap cache. + * @type: Indicates the swap device. + * @begin: Beginning offset of the range. + * @end: Ending offset of the range. + * + * Context: Caller must ensure the range is valid and hold a reference to + * the swap device. + */ +void swap_cache_clear_shadow(int type, unsigned long begin, + unsigned long end) { unsigned long curr = begin; void *old; @@ -393,7 +431,7 @@ struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, goto put_and_return; /* - * We might race against __delete_from_swap_cache(), and + * We might race against __swap_cache_del_folio(), and * stumble across a swap_map entry whose SWAP_HAS_CACHE * has not yet been cleared. Or race against another * __read_swap_cache_async(), which has set SWAP_HAS_CACHE @@ -412,7 +450,7 @@ struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, goto fail_unlock; /* May fail (-ENOMEM) if XArray node allocation failed. */ - if (add_to_swap_cache(new_folio, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow)) + if (swap_cache_add_folio(new_folio, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow)) goto fail_unlock; memcg1_swapin(entry, 1); diff --git a/mm/swapfile.c b/mm/swapfile.c index 6f7a8c98d14d..51f781c43537 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -267,7 +267,7 @@ again: if (!need_reclaim) goto out_unlock; - delete_from_swap_cache(folio); + swap_cache_del_folio(folio); folio_set_dirty(folio); ret = nr_pages; out_unlock: @@ -1124,7 +1124,7 @@ static void swap_range_free(struct swap_info_struct *si, unsigned long offset, swap_slot_free_notify(si->bdev, offset); offset++; } - clear_shadow_from_swap_cache(si->type, begin, end); + swap_cache_clear_shadow(si->type, begin, end); /* * Make sure that try_to_unuse() observes si->inuse_pages reaching 0 @@ -1289,7 +1289,7 @@ int folio_alloc_swap(struct folio *folio, gfp_t gfp) * TODO: this could cause a theoretical memory reclaim * deadlock in the swap out path. */ - if (add_to_swap_cache(folio, entry, gfp | __GFP_NOMEMALLOC, NULL)) + if (swap_cache_add_folio(folio, entry, gfp | __GFP_NOMEMALLOC, NULL)) goto out_free; return 0; @@ -1759,7 +1759,7 @@ bool folio_free_swap(struct folio *folio) if (folio_swapped(folio)) return false; - delete_from_swap_cache(folio); + swap_cache_del_folio(folio); folio_set_dirty(folio); return true; } diff --git a/mm/vmscan.c b/mm/vmscan.c index ca9e1cd3cd68..c79c6806560b 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -776,7 +776,7 @@ static int __remove_mapping(struct address_space *mapping, struct folio *folio, if (reclaimed && !mapping_exiting(mapping)) shadow = workingset_eviction(folio, target_memcg); - __delete_from_swap_cache(folio, swap, shadow); + __swap_cache_del_folio(folio, swap, shadow); memcg1_swapout(folio, swap); xa_unlock_irq(&mapping->i_pages); put_swap_folio(folio, swap); diff --git a/mm/zswap.c b/mm/zswap.c index 63045e3fb1f5..1b1edecde6a7 100644 --- a/mm/zswap.c +++ b/mm/zswap.c @@ -1069,7 +1069,7 @@ static int zswap_writeback_entry(struct zswap_entry *entry, out: if (ret && ret != -EEXIST) { - delete_from_swap_cache(folio); + swap_cache_del_folio(folio); folio_unlock(folio); } folio_put(folio); |