summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorZi Yan <ziy@nvidia.com>2025-10-31 12:20:01 -0400
committerAndrew Morton <akpm@linux-foundation.org>2025-11-24 15:08:49 -0800
commit50d0598cf2c9d33e1f08c3b1a357752ea8a9b94a (patch)
tree148685fffa3914d2b08dd06deaf2238a74178ac4 /mm
parent689b8986776c823161fb4955cc7dc303f78a1962 (diff)
mm/huge_memory: fix kernel-doc comments for folio_split() and related
try_folio_split_to_order(), folio_split, __folio_split(), and __split_unmapped_folio() do not have correct kernel-doc comment format. Fix them. [ziy@nvidia.com: kernel-doc fixup] Link: https://lkml.kernel.org/r/BE7AC5F3-9E64-4923-861D-C2C4E0CB91EB@nvidia.com [ziy@nvidia.com: add newline to fix an error and a warning from docutils] Link: https://lkml.kernel.org/r/040B38C0-23C6-4AEA-B069-69AE6DAA828B@nvidia.com Link: https://lkml.kernel.org/r/20251031162001.670503-4-ziy@nvidia.com Signed-off-by: Zi Yan <ziy@nvidia.com> Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> Reviewed-by: Lance Yang <lance.yang@linux.dev> Reviewed-by: Barry Song <baohua@kernel.org> Reviewed-by: Miaohe Lin <linmiaohe@huawei.com> Acked-by: David Hildenbrand (Red Hat) <david@kernel.org> Reviewed-by: Wei Yang <richard.weiyang@gmail.com> Cc: Baolin Wang <baolin.wang@linux.alibaba.com> Cc: Dev Jain <dev.jain@arm.com> Cc: Jane Chu <jane.chu@oracle.com> Cc: Liam Howlett <liam.howlett@oracle.com> Cc: Luis Chamberalin <mcgrof@kernel.org> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Naoya Horiguchi <nao.horiguchi@gmail.com> Cc: Nico Pache <npache@redhat.com> Cc: Pankaj Raghav <kernel@pankajraghav.com> Cc: Ryan Roberts <ryan.roberts@arm.com> Cc: Yang Shi <shy828301@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/huge_memory.c52
1 files changed, 28 insertions, 24 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 30d6afc79016..3d87127c02cf 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -3493,8 +3493,9 @@ static void __split_folio_to_order(struct folio *folio, int old_order,
ClearPageCompound(&folio->page);
}
-/*
- * It splits an unmapped @folio to lower order smaller folios in two ways.
+/**
+ * __split_unmapped_folio() - splits an unmapped @folio to lower order folios in
+ * two ways: uniform split or non-uniform split.
* @folio: the to-be-split folio
* @new_order: the smallest order of the after split folios (since buddy
* allocator like split generates folios with orders from @folio's
@@ -3511,26 +3512,27 @@ static void __split_folio_to_order(struct folio *folio, int old_order,
* uniform_split is true.
* 2. buddy allocator like (non-uniform) split: the given @folio is split into
* half and one of the half (containing the given page) is split into half
- * until the given @page's order becomes @new_order. This is done when
+ * until the given @folio's order becomes @new_order. This is done when
* uniform_split is false.
*
* The high level flow for these two methods are:
- * 1. uniform split: a single __split_folio_to_order() is called to split the
- * @folio into @new_order, then we traverse all the resulting folios one by
- * one in PFN ascending order and perform stats, unfreeze, adding to list,
- * and file mapping index operations.
- * 2. non-uniform split: in general, folio_order - @new_order calls to
- * __split_folio_to_order() are made in a for loop to split the @folio
- * to one lower order at a time. The resulting small folios are processed
- * like what is done during the traversal in 1, except the one containing
- * @page, which is split in next for loop.
+ *
+ * 1. uniform split: @xas is split with no expectation of failure and a single
+ * __split_folio_to_order() is called to split the @folio into @new_order
+ * along with stats update.
+ * 2. non-uniform split: folio_order - @new_order calls to
+ * __split_folio_to_order() are expected to be made in a for loop to split
+ * the @folio to one lower order at a time. The folio containing @split_at
+ * is split in each iteration. @xas is split into half in each iteration and
+ * can fail. A failed @xas split leaves split folios as is without merging
+ * them back.
*
* After splitting, the caller's folio reference will be transferred to the
- * folio containing @page. The caller needs to unlock and/or free after-split
- * folios if necessary.
+ * folio containing @split_at. The caller needs to unlock and/or free
+ * after-split folios if necessary.
*
- * For !uniform_split, when -ENOMEM is returned, the original folio might be
- * split. The caller needs to check the input folio.
+ * Return: 0 - successful, <0 - failed (if -ENOMEM is returned, @folio might be
+ * split but not to @new_order, the caller needs to check)
*/
static int __split_unmapped_folio(struct folio *folio, int new_order,
struct page *split_at, struct xa_state *xas,
@@ -3650,8 +3652,8 @@ bool uniform_split_supported(struct folio *folio, unsigned int new_order,
return true;
}
-/*
- * __folio_split: split a folio at @split_at to a @new_order folio
+/**
+ * __folio_split() - split a folio at @split_at to a @new_order folio
* @folio: folio to split
* @new_order: the order of the new folio
* @split_at: a page within the new folio
@@ -3669,7 +3671,7 @@ bool uniform_split_supported(struct folio *folio, unsigned int new_order,
* 1. for uniform split, @lock_at points to one of @folio's subpages;
* 2. for buddy allocator like (non-uniform) split, @lock_at points to @folio.
*
- * return: 0: successful, <0 failed (if -ENOMEM is returned, @folio might be
+ * Return: 0 - successful, <0 - failed (if -ENOMEM is returned, @folio might be
* split but not to @new_order, the caller needs to check)
*/
static int __folio_split(struct folio *folio, unsigned int new_order,
@@ -4047,14 +4049,13 @@ int __split_huge_page_to_list_to_order(struct page *page, struct list_head *list
unmapped);
}
-/*
- * folio_split: split a folio at @split_at to a @new_order folio
+/**
+ * folio_split() - split a folio at @split_at to a @new_order folio
* @folio: folio to split
* @new_order: the order of the new folio
* @split_at: a page within the new folio
- *
- * return: 0: successful, <0 failed (if -ENOMEM is returned, @folio might be
- * split but not to @new_order, the caller needs to check)
+ * @list: after-split folios are added to @list if not null, otherwise to LRU
+ * list
*
* It has the same prerequisites and returns as
* split_huge_page_to_list_to_order().
@@ -4068,6 +4069,9 @@ int __split_huge_page_to_list_to_order(struct page *page, struct list_head *list
* [order-4, {order-3}, order-3, order-5, order-6, order-7, order-8].
*
* After split, folio is left locked for caller.
+ *
+ * Return: 0 - successful, <0 - failed (if -ENOMEM is returned, @folio might be
+ * split but not to @new_order, the caller needs to check)
*/
int folio_split(struct folio *folio, unsigned int new_order,
struct page *split_at, struct list_head *list)