diff options
| author | David Hildenbrand <david@redhat.com> | 2025-07-04 12:25:01 +0200 |
|---|---|---|
| committer | Andrew Morton <akpm@linux-foundation.org> | 2025-07-13 16:38:26 -0700 |
| commit | 6ef0c1976b8fab938e732c2fb751fa8965153b2e (patch) | |
| tree | 03ff677b3c39e8153cf55eeaae2c61259a7d3217 | |
| parent | 5ec3583309ef94fcceed6807aed93b50e801b84a (diff) | |
mm/migrate: rename isolate_movable_page() to isolate_movable_ops_page()
... and start moving back to per-page things that will absolutely not be
folio things in the future. Add documentation and a comment that the
remaining folio stuff (lock, refcount) will have to be reworked as well.
While at it, convert the VM_BUG_ON() into a WARN_ON_ONCE() and handle it
gracefully (relevant with further changes), and convert a WARN_ON_ONCE()
into a VM_WARN_ON_ONCE_PAGE().
Note that we will leave anything that needs a rework (lock, refcount,
->lru) to be using folios for now: that perfectly highlights the
problematic bits.
Link: https://lkml.kernel.org/r/20250704102524.326966-8-david@redhat.com
Signed-off-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Zi Yan <ziy@nvidia.com>
Reviewed-by: Harry Yoo <harry.yoo@oracle.com>
Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Alistair Popple <apopple@nvidia.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Brendan Jackman <jackmanb@google.com>
Cc: Byungchul Park <byungchul@sk.com>
Cc: Chengming Zhou <chengming.zhou@linux.dev>
Cc: Christian Brauner <brauner@kernel.org>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: Eugenio Pé rez <eperezma@redhat.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Gregory Price <gourry@gourry.net>
Cc: "Huang, Ying" <ying.huang@linux.alibaba.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: Jason Wang <jasowang@redhat.com>
Cc: Jerrin Shaji George <jerrin.shaji-george@broadcom.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Joshua Hahn <joshua.hahnjy@gmail.com>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Madhavan Srinivasan <maddy@linux.ibm.com>
Cc: Mathew Brost <matthew.brost@intel.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: "Michael S. Tsirkin" <mst@redhat.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Naoya Horiguchi <nao.horiguchi@gmail.com>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Peter Xu <peterx@redhat.com>
Cc: Qi Zheng <zhengqi.arch@bytedance.com>
Cc: Rakie Kim <rakie.kim@sk.com>
Cc: Rik van Riel <riel@surriel.com>
Cc: Sergey Senozhatsky <senozhatsky@chromium.org>
Cc: Shakeel Butt <shakeel.butt@linux.dev>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
Cc: xu xin <xu.xin16@zte.com.cn>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
| -rw-r--r-- | include/linux/migrate.h | 4 | ||||
| -rw-r--r-- | mm/compaction.c | 2 | ||||
| -rw-r--r-- | mm/migrate.c | 39 |
3 files changed, 32 insertions, 13 deletions
diff --git a/include/linux/migrate.h b/include/linux/migrate.h index aaa2114498d6..c0ec7422837b 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -69,7 +69,7 @@ int migrate_pages(struct list_head *l, new_folio_t new, free_folio_t free, unsigned long private, enum migrate_mode mode, int reason, unsigned int *ret_succeeded); struct folio *alloc_migration_target(struct folio *src, unsigned long private); -bool isolate_movable_page(struct page *page, isolate_mode_t mode); +bool isolate_movable_ops_page(struct page *page, isolate_mode_t mode); bool isolate_folio_to_list(struct folio *folio, struct list_head *list); int migrate_huge_page_move_mapping(struct address_space *mapping, @@ -90,7 +90,7 @@ static inline int migrate_pages(struct list_head *l, new_folio_t new, static inline struct folio *alloc_migration_target(struct folio *src, unsigned long private) { return NULL; } -static inline bool isolate_movable_page(struct page *page, isolate_mode_t mode) +static inline bool isolate_movable_ops_page(struct page *page, isolate_mode_t mode) { return false; } static inline bool isolate_folio_to_list(struct folio *folio, struct list_head *list) { return false; } diff --git a/mm/compaction.c b/mm/compaction.c index 3925cb61dbb8..17455c5a4be0 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -1093,7 +1093,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, locked = NULL; } - if (isolate_movable_page(page, mode)) { + if (isolate_movable_ops_page(page, mode)) { folio = page_folio(page); goto isolate_success; } diff --git a/mm/migrate.c b/mm/migrate.c index 208d2d4a2f8d..2e648d75248e 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -51,8 +51,26 @@ #include "internal.h" #include "swap.h" -bool isolate_movable_page(struct page *page, isolate_mode_t mode) +/** + * isolate_movable_ops_page - isolate a movable_ops page for migration + * @page: The page. + * @mode: The isolation mode. + * + * Try to isolate a movable_ops page for migration. Will fail if the page is + * not a movable_ops page, if the page is already isolated for migration + * or if the page was just was released by its owner. + * + * Once isolated, the page cannot get freed until it is either putback + * or migrated. + * + * Returns true if isolation succeeded, otherwise false. + */ +bool isolate_movable_ops_page(struct page *page, isolate_mode_t mode) { + /* + * TODO: these pages will not be folios in the future. All + * folio dependencies will have to be removed. + */ struct folio *folio = folio_get_nontail_page(page); const struct movable_operations *mops; @@ -73,7 +91,7 @@ bool isolate_movable_page(struct page *page, isolate_mode_t mode) * we use non-atomic bitops on newly allocated page flags so * unconditionally grabbing the lock ruins page's owner side. */ - if (unlikely(!__folio_test_movable(folio))) + if (unlikely(!__PageMovable(page))) goto out_putfolio; /* @@ -90,18 +108,19 @@ bool isolate_movable_page(struct page *page, isolate_mode_t mode) if (unlikely(!folio_trylock(folio))) goto out_putfolio; - if (!folio_test_movable(folio) || folio_test_isolated(folio)) + if (!PageMovable(page) || PageIsolated(page)) goto out_no_isolated; - mops = folio_movable_ops(folio); - VM_BUG_ON_FOLIO(!mops, folio); + mops = page_movable_ops(page); + if (WARN_ON_ONCE(!mops)) + goto out_no_isolated; - if (!mops->isolate_page(&folio->page, mode)) + if (!mops->isolate_page(page, mode)) goto out_no_isolated; /* Driver shouldn't use the isolated flag */ - WARN_ON_ONCE(folio_test_isolated(folio)); - folio_set_isolated(folio); + VM_WARN_ON_ONCE_PAGE(PageIsolated(page), page); + SetPageIsolated(page); folio_unlock(folio); return true; @@ -175,8 +194,8 @@ bool isolate_folio_to_list(struct folio *folio, struct list_head *list) if (lru) isolated = folio_isolate_lru(folio); else - isolated = isolate_movable_page(&folio->page, - ISOLATE_UNEVICTABLE); + isolated = isolate_movable_ops_page(&folio->page, + ISOLATE_UNEVICTABLE); if (!isolated) return false; |