summaryrefslogtreecommitdiff
path: root/mm/huge_memory.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/huge_memory.c')
-rw-r--r--mm/huge_memory.c46
1 files changed, 25 insertions, 21 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index ded707a50af8..81e511f1ed26 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -3452,15 +3452,6 @@ static void __split_folio_to_order(struct folio *folio, int old_order,
new_folio->mapping = folio->mapping;
new_folio->index = folio->index + i;
- /*
- * page->private should not be set in tail pages. Fix up and warn once
- * if private is unexpectedly set.
- */
- if (unlikely(new_folio->private)) {
- VM_WARN_ON_ONCE_PAGE(true, new_head);
- new_folio->private = NULL;
- }
-
if (folio_test_swapcache(folio))
new_folio->swap.val = folio->swap.val + i;
@@ -3661,6 +3652,7 @@ bool uniform_split_supported(struct folio *folio, unsigned int new_order,
* @lock_at: a page within @folio to be left locked to caller
* @list: after-split folios will be put on it if non NULL
* @uniform_split: perform uniform split or not (non-uniform split)
+ * @unmapped: The pages are already unmapped, they are migration entries.
*
* It calls __split_unmapped_folio() to perform uniform and non-uniform split.
* It is in charge of checking whether the split is supported or not and
@@ -3676,7 +3668,7 @@ bool uniform_split_supported(struct folio *folio, unsigned int new_order,
*/
static int __folio_split(struct folio *folio, unsigned int new_order,
struct page *split_at, struct page *lock_at,
- struct list_head *list, bool uniform_split)
+ struct list_head *list, bool uniform_split, bool unmapped)
{
struct deferred_split *ds_queue = get_deferred_split_queue(folio);
XA_STATE(xas, &folio->mapping->i_pages, folio->index);
@@ -3736,13 +3728,15 @@ static int __folio_split(struct folio *folio, unsigned int new_order,
* is taken to serialise against parallel split or collapse
* operations.
*/
- anon_vma = folio_get_anon_vma(folio);
- if (!anon_vma) {
- ret = -EBUSY;
- goto out;
+ if (!unmapped) {
+ anon_vma = folio_get_anon_vma(folio);
+ if (!anon_vma) {
+ ret = -EBUSY;
+ goto out;
+ }
+ anon_vma_lock_write(anon_vma);
}
mapping = NULL;
- anon_vma_lock_write(anon_vma);
} else {
unsigned int min_order;
gfp_t gfp;
@@ -3795,7 +3789,8 @@ static int __folio_split(struct folio *folio, unsigned int new_order,
goto out_unlock;
}
- unmap_folio(folio);
+ if (!unmapped)
+ unmap_folio(folio);
/* block interrupt reentry in xa_lock and spinlock */
local_irq_disable();
@@ -3882,10 +3877,13 @@ static int __folio_split(struct folio *folio, unsigned int new_order,
next = folio_next(new_folio);
+ zone_device_private_split_cb(folio, new_folio);
+
expected_refs = folio_expected_ref_count(new_folio) + 1;
folio_ref_unfreeze(new_folio, expected_refs);
- lru_add_split_folio(folio, new_folio, lruvec, list);
+ if (!unmapped)
+ lru_add_split_folio(folio, new_folio, lruvec, list);
/*
* Anonymous folio with swap cache.
@@ -3916,6 +3914,8 @@ static int __folio_split(struct folio *folio, unsigned int new_order,
__filemap_remove_folio(new_folio, NULL);
folio_put_refs(new_folio, nr_pages);
}
+
+ zone_device_private_split_cb(folio, NULL);
/*
* Unfreeze @folio only after all page cache entries, which
* used to point to it, have been updated with new folios.
@@ -3939,6 +3939,9 @@ fail:
local_irq_enable();
+ if (unmapped)
+ return ret;
+
if (nr_shmem_dropped)
shmem_uncharge(mapping->host, nr_shmem_dropped);
@@ -4029,12 +4032,13 @@ out:
* Returns -EINVAL when trying to split to an order that is incompatible
* with the folio. Splitting to order 0 is compatible with all folios.
*/
-int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
- unsigned int new_order)
+int __split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
+ unsigned int new_order, bool unmapped)
{
struct folio *folio = page_folio(page);
- return __folio_split(folio, new_order, &folio->page, page, list, true);
+ return __folio_split(folio, new_order, &folio->page, page, list, true,
+ unmapped);
}
/*
@@ -4063,7 +4067,7 @@ int folio_split(struct folio *folio, unsigned int new_order,
struct page *split_at, struct list_head *list)
{
return __folio_split(folio, new_order, split_at, &folio->page, list,
- false);
+ false, false);
}
int min_order_for_split(struct folio *folio)