diff options
| author | Balbir Singh <balbirs@nvidia.com> | 2025-10-01 16:57:02 +1000 |
|---|---|---|
| committer | Andrew Morton <akpm@linux-foundation.org> | 2025-11-24 15:08:48 -0800 |
| commit | 4265d67e405a41562634279ca1ededf79fdadcd7 (patch) | |
| tree | 62c4f2568c9a23e147b13b93ae9fa7a601fc55e3 /mm | |
| parent | 56ef398996435a0021569b86293d376649f12540 (diff) | |
mm/migrate_device: add THP splitting during migration
Implement migrate_vma_split_pages() to handle THP splitting during the
migration process when destination cannot allocate compound pages.
This addresses the common scenario where migrate_vma_setup() succeeds with
MIGRATE_PFN_COMPOUND pages, but the destination device cannot allocate
large pages during the migration phase.
Key changes:
- migrate_vma_split_pages(): Split already-isolated pages during migration
- Enhanced folio_split() and __split_unmapped_folio() with isolated
parameter to avoid redundant unmap/remap operations
This provides a fallback mechansim to ensure migration succeeds even when
large page allocation fails at the destination.
[matthew.brost@intel.com: add THP splitting during migration]
Link: https://lkml.kernel.org/r/20251120230825.181072-2-matthew.brost@intel.com
Link: https://lkml.kernel.org/r/20251001065707.920170-12-balbirs@nvidia.com
Signed-off-by: Balbir Singh <balbirs@nvidia.com>
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Zi Yan <ziy@nvidia.com>
Cc: Joshua Hahn <joshua.hahnjy@gmail.com>
Cc: Rakie Kim <rakie.kim@sk.com>
Cc: Byungchul Park <byungchul@sk.com>
Cc: Gregory Price <gourry@gourry.net>
Cc: Ying Huang <ying.huang@linux.alibaba.com>
Cc: Alistair Popple <apopple@nvidia.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: "Liam R. Howlett" <Liam.Howlett@oracle.com>
Cc: Nico Pache <npache@redhat.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Dev Jain <dev.jain@arm.com>
Cc: Barry Song <baohua@kernel.org>
Cc: Lyude Paul <lyude@redhat.com>
Cc: Danilo Krummrich <dakr@kernel.org>
Cc: David Airlie <airlied@gmail.com>
Cc: Simona Vetter <simona@ffwll.ch>
Cc: Ralph Campbell <rcampbell@nvidia.com>
Cc: Mika Penttilä <mpenttil@redhat.com>
Cc: Francois Dugast <francois.dugast@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm')
| -rw-r--r-- | mm/huge_memory.c | 46 | ||||
| -rw-r--r-- | mm/migrate_device.c | 87 |
2 files changed, 101 insertions, 32 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index ded707a50af8..81e511f1ed26 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -3452,15 +3452,6 @@ static void __split_folio_to_order(struct folio *folio, int old_order, new_folio->mapping = folio->mapping; new_folio->index = folio->index + i; - /* - * page->private should not be set in tail pages. Fix up and warn once - * if private is unexpectedly set. - */ - if (unlikely(new_folio->private)) { - VM_WARN_ON_ONCE_PAGE(true, new_head); - new_folio->private = NULL; - } - if (folio_test_swapcache(folio)) new_folio->swap.val = folio->swap.val + i; @@ -3661,6 +3652,7 @@ bool uniform_split_supported(struct folio *folio, unsigned int new_order, * @lock_at: a page within @folio to be left locked to caller * @list: after-split folios will be put on it if non NULL * @uniform_split: perform uniform split or not (non-uniform split) + * @unmapped: The pages are already unmapped, they are migration entries. * * It calls __split_unmapped_folio() to perform uniform and non-uniform split. * It is in charge of checking whether the split is supported or not and @@ -3676,7 +3668,7 @@ bool uniform_split_supported(struct folio *folio, unsigned int new_order, */ static int __folio_split(struct folio *folio, unsigned int new_order, struct page *split_at, struct page *lock_at, - struct list_head *list, bool uniform_split) + struct list_head *list, bool uniform_split, bool unmapped) { struct deferred_split *ds_queue = get_deferred_split_queue(folio); XA_STATE(xas, &folio->mapping->i_pages, folio->index); @@ -3736,13 +3728,15 @@ static int __folio_split(struct folio *folio, unsigned int new_order, * is taken to serialise against parallel split or collapse * operations. */ - anon_vma = folio_get_anon_vma(folio); - if (!anon_vma) { - ret = -EBUSY; - goto out; + if (!unmapped) { + anon_vma = folio_get_anon_vma(folio); + if (!anon_vma) { + ret = -EBUSY; + goto out; + } + anon_vma_lock_write(anon_vma); } mapping = NULL; - anon_vma_lock_write(anon_vma); } else { unsigned int min_order; gfp_t gfp; @@ -3795,7 +3789,8 @@ static int __folio_split(struct folio *folio, unsigned int new_order, goto out_unlock; } - unmap_folio(folio); + if (!unmapped) + unmap_folio(folio); /* block interrupt reentry in xa_lock and spinlock */ local_irq_disable(); @@ -3882,10 +3877,13 @@ static int __folio_split(struct folio *folio, unsigned int new_order, next = folio_next(new_folio); + zone_device_private_split_cb(folio, new_folio); + expected_refs = folio_expected_ref_count(new_folio) + 1; folio_ref_unfreeze(new_folio, expected_refs); - lru_add_split_folio(folio, new_folio, lruvec, list); + if (!unmapped) + lru_add_split_folio(folio, new_folio, lruvec, list); /* * Anonymous folio with swap cache. @@ -3916,6 +3914,8 @@ static int __folio_split(struct folio *folio, unsigned int new_order, __filemap_remove_folio(new_folio, NULL); folio_put_refs(new_folio, nr_pages); } + + zone_device_private_split_cb(folio, NULL); /* * Unfreeze @folio only after all page cache entries, which * used to point to it, have been updated with new folios. @@ -3939,6 +3939,9 @@ fail: local_irq_enable(); + if (unmapped) + return ret; + if (nr_shmem_dropped) shmem_uncharge(mapping->host, nr_shmem_dropped); @@ -4029,12 +4032,13 @@ out: * Returns -EINVAL when trying to split to an order that is incompatible * with the folio. Splitting to order 0 is compatible with all folios. */ -int split_huge_page_to_list_to_order(struct page *page, struct list_head *list, - unsigned int new_order) +int __split_huge_page_to_list_to_order(struct page *page, struct list_head *list, + unsigned int new_order, bool unmapped) { struct folio *folio = page_folio(page); - return __folio_split(folio, new_order, &folio->page, page, list, true); + return __folio_split(folio, new_order, &folio->page, page, list, true, + unmapped); } /* @@ -4063,7 +4067,7 @@ int folio_split(struct folio *folio, unsigned int new_order, struct page *split_at, struct list_head *list) { return __folio_split(folio, new_order, split_at, &folio->page, list, - false); + false, false); } int min_order_for_split(struct folio *folio) diff --git a/mm/migrate_device.c b/mm/migrate_device.c index a0a315f3572a..ab373fd38961 100644 --- a/mm/migrate_device.c +++ b/mm/migrate_device.c @@ -309,6 +309,25 @@ again: pgmap->owner != migrate->pgmap_owner) goto next; + folio = page_folio(page); + if (folio_test_large(folio)) { + int ret; + + arch_leave_lazy_mmu_mode(); + pte_unmap_unlock(ptep, ptl); + ret = migrate_vma_split_folio(folio, + migrate->fault_page); + + if (ret) { + if (unmapped) + flush_tlb_range(walk->vma, start, end); + + return migrate_vma_collect_skip(addr, end, walk); + } + + goto again; + } + mpfn = migrate_pfn(page_to_pfn(page)) | MIGRATE_PFN_MIGRATE; if (is_writable_device_private_entry(entry)) @@ -885,6 +904,29 @@ abort: src[i] &= ~MIGRATE_PFN_MIGRATE; return 0; } + +static int migrate_vma_split_unmapped_folio(struct migrate_vma *migrate, + unsigned long idx, unsigned long addr, + struct folio *folio) +{ + unsigned long i; + unsigned long pfn; + unsigned long flags; + int ret = 0; + + folio_get(folio); + split_huge_pmd_address(migrate->vma, addr, true); + ret = __split_huge_page_to_list_to_order(folio_page(folio, 0), NULL, + 0, true); + if (ret) + return ret; + migrate->src[idx] &= ~MIGRATE_PFN_COMPOUND; + flags = migrate->src[idx] & ((1UL << MIGRATE_PFN_SHIFT) - 1); + pfn = migrate->src[idx] >> MIGRATE_PFN_SHIFT; + for (i = 1; i < HPAGE_PMD_NR; i++) + migrate->src[i+idx] = migrate_pfn(pfn + i) | flags; + return ret; +} #else /* !CONFIG_ARCH_ENABLE_THP_MIGRATION */ static int migrate_vma_insert_huge_pmd_page(struct migrate_vma *migrate, unsigned long addr, @@ -894,6 +936,13 @@ static int migrate_vma_insert_huge_pmd_page(struct migrate_vma *migrate, { return 0; } + +static int migrate_vma_split_unmapped_folio(struct migrate_vma *migrate, + unsigned long idx, unsigned long addr, + struct folio *folio) +{ + return 0; +} #endif static unsigned long migrate_vma_nr_pages(unsigned long *src) @@ -1055,8 +1104,9 @@ static void __migrate_device_pages(unsigned long *src_pfns, struct migrate_vma *migrate) { struct mmu_notifier_range range; - unsigned long i; + unsigned long i, j; bool notified = false; + unsigned long addr; for (i = 0; i < npages; ) { struct page *newpage = migrate_pfn_to_page(dst_pfns[i]); @@ -1098,12 +1148,16 @@ static void __migrate_device_pages(unsigned long *src_pfns, (!(dst_pfns[i] & MIGRATE_PFN_COMPOUND))) { nr = migrate_vma_nr_pages(&src_pfns[i]); src_pfns[i] &= ~MIGRATE_PFN_COMPOUND; - src_pfns[i] &= ~MIGRATE_PFN_MIGRATE; - goto next; + } else { + nr = 1; } - migrate_vma_insert_page(migrate, addr, &dst_pfns[i], - &src_pfns[i]); + for (j = 0; j < nr && i + j < npages; j++) { + src_pfns[i+j] |= MIGRATE_PFN_MIGRATE; + migrate_vma_insert_page(migrate, + addr + j * PAGE_SIZE, + &dst_pfns[i+j], &src_pfns[i+j]); + } goto next; } @@ -1125,7 +1179,13 @@ static void __migrate_device_pages(unsigned long *src_pfns, MIGRATE_PFN_COMPOUND); goto next; } - src_pfns[i] &= ~MIGRATE_PFN_MIGRATE; + nr = 1 << folio_order(folio); + addr = migrate->start + i * PAGE_SIZE; + if (migrate_vma_split_unmapped_folio(migrate, i, addr, folio)) { + src_pfns[i] &= ~(MIGRATE_PFN_MIGRATE | + MIGRATE_PFN_COMPOUND); + goto next; + } } else if ((src_pfns[i] & MIGRATE_PFN_MIGRATE) && (dst_pfns[i] & MIGRATE_PFN_COMPOUND) && !(src_pfns[i] & MIGRATE_PFN_COMPOUND)) { @@ -1161,11 +1221,16 @@ static void __migrate_device_pages(unsigned long *src_pfns, if (migrate && migrate->fault_page == page) extra_cnt = 1; - r = folio_migrate_mapping(mapping, newfolio, folio, extra_cnt); - if (r) - src_pfns[i] &= ~MIGRATE_PFN_MIGRATE; - else - folio_migrate_flags(newfolio, folio); + for (j = 0; j < nr && i + j < npages; j++) { + folio = page_folio(migrate_pfn_to_page(src_pfns[i+j])); + newfolio = page_folio(migrate_pfn_to_page(dst_pfns[i+j])); + + r = folio_migrate_mapping(mapping, newfolio, folio, extra_cnt); + if (r) + src_pfns[i+j] &= ~MIGRATE_PFN_MIGRATE; + else + folio_migrate_flags(newfolio, folio); + } next: i += nr; } |