summaryrefslogtreecommitdiff
path: root/mm/mm_init.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2025-10-04 11:03:10 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2025-10-04 11:03:10 -0700
commitb41048485ee395edbbb69fc83491d314268f7bdb (patch)
tree437ed25ffc82f13dea8f503ec4af344fc8e20920 /mm/mm_init.c
parentc4c8bcab18821e0c2852c38dece918512c60c732 (diff)
parente68f150bc11d0a05cbe984a4e5c0f72a95cae07d (diff)
Merge tag 'memblock-v6.18-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rppt/memblock
Pull mm-init update from Mike Rapoport: "Simplify deferred initialization of struct pages Refactor and simplify deferred initialization of the memory map. Beside the negative diffstat it gives 3ms (55ms vs 58ms) reduction in the initialization of deferred pages on single node system with 64GiB of RAM" * tag 'memblock-v6.18-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rppt/memblock: memblock: drop for_each_free_mem_pfn_range_in_zone_from() mm/mm_init: drop deferred_init_maxorder() mm/mm_init: deferred_init_memmap: use a job per zone mm/mm_init: use deferred_init_memmap_chunk() in deferred_grow_zone()
Diffstat (limited to 'mm/mm_init.c')
-rw-r--r--mm/mm_init.c197
1 files changed, 65 insertions, 132 deletions
diff --git a/mm/mm_init.c b/mm/mm_init.c
index df614556741a..3db2dea7db4c 100644
--- a/mm/mm_init.c
+++ b/mm/mm_init.c
@@ -2045,112 +2045,63 @@ static unsigned long __init deferred_init_pages(struct zone *zone,
}
/*
- * This function is meant to pre-load the iterator for the zone init from
- * a given point.
- * Specifically it walks through the ranges starting with initial index
- * passed to it until we are caught up to the first_init_pfn value and
- * exits there. If we never encounter the value we return false indicating
- * there are no valid ranges left.
- */
-static bool __init
-deferred_init_mem_pfn_range_in_zone(u64 *i, struct zone *zone,
- unsigned long *spfn, unsigned long *epfn,
- unsigned long first_init_pfn)
-{
- u64 j = *i;
-
- if (j == 0)
- __next_mem_pfn_range_in_zone(&j, zone, spfn, epfn);
-
- /*
- * Start out by walking through the ranges in this zone that have
- * already been initialized. We don't need to do anything with them
- * so we just need to flush them out of the system.
- */
- for_each_free_mem_pfn_range_in_zone_from(j, zone, spfn, epfn) {
- if (*epfn <= first_init_pfn)
- continue;
- if (*spfn < first_init_pfn)
- *spfn = first_init_pfn;
- *i = j;
- return true;
- }
-
- return false;
-}
-
-/*
- * Initialize and free pages. We do it in two loops: first we initialize
- * struct page, then free to buddy allocator, because while we are
- * freeing pages we can access pages that are ahead (computing buddy
- * page in __free_one_page()).
+ * Initialize and free pages.
+ *
+ * At this point reserved pages and struct pages that correspond to holes in
+ * memblock.memory are already intialized so every free range has a valid
+ * memory map around it.
+ * This ensures that access of pages that are ahead of the range being
+ * initialized (computing buddy page in __free_one_page()) always reads a valid
+ * struct page.
*
- * In order to try and keep some memory in the cache we have the loop
- * broken along max page order boundaries. This way we will not cause
- * any issues with the buddy page computation.
+ * In order to try and improve CPU cache locality we have the loop broken along
+ * max page order boundaries.
*/
static unsigned long __init
-deferred_init_maxorder(u64 *i, struct zone *zone, unsigned long *start_pfn,
- unsigned long *end_pfn)
+deferred_init_memmap_chunk(unsigned long start_pfn, unsigned long end_pfn,
+ struct zone *zone)
{
- unsigned long mo_pfn = ALIGN(*start_pfn + 1, MAX_ORDER_NR_PAGES);
- unsigned long spfn = *start_pfn, epfn = *end_pfn;
+ int nid = zone_to_nid(zone);
unsigned long nr_pages = 0;
- u64 j = *i;
-
- /* First we loop through and initialize the page values */
- for_each_free_mem_pfn_range_in_zone_from(j, zone, start_pfn, end_pfn) {
- unsigned long t;
-
- if (mo_pfn <= *start_pfn)
- break;
+ phys_addr_t start, end;
+ u64 i = 0;
- t = min(mo_pfn, *end_pfn);
- nr_pages += deferred_init_pages(zone, *start_pfn, t);
+ for_each_free_mem_range(i, nid, 0, &start, &end, NULL) {
+ unsigned long spfn = PFN_UP(start);
+ unsigned long epfn = PFN_DOWN(end);
- if (mo_pfn < *end_pfn) {
- *start_pfn = mo_pfn;
+ if (spfn >= end_pfn)
break;
- }
- }
- /* Reset values and now loop through freeing pages as needed */
- swap(j, *i);
+ spfn = max(spfn, start_pfn);
+ epfn = min(epfn, end_pfn);
- for_each_free_mem_pfn_range_in_zone_from(j, zone, &spfn, &epfn) {
- unsigned long t;
+ while (spfn < epfn) {
+ unsigned long mo_pfn = ALIGN(spfn + 1, MAX_ORDER_NR_PAGES);
+ unsigned long chunk_end = min(mo_pfn, epfn);
- if (mo_pfn <= spfn)
- break;
+ nr_pages += deferred_init_pages(zone, spfn, chunk_end);
+ deferred_free_pages(spfn, chunk_end - spfn);
- t = min(mo_pfn, epfn);
- deferred_free_pages(spfn, t - spfn);
+ spfn = chunk_end;
- if (mo_pfn <= epfn)
- break;
+ if (irqs_disabled())
+ touch_nmi_watchdog();
+ else
+ cond_resched();
+ }
}
return nr_pages;
}
static void __init
-deferred_init_memmap_chunk(unsigned long start_pfn, unsigned long end_pfn,
- void *arg)
+deferred_init_memmap_job(unsigned long start_pfn, unsigned long end_pfn,
+ void *arg)
{
- unsigned long spfn, epfn;
struct zone *zone = arg;
- u64 i = 0;
-
- deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, start_pfn);
- /*
- * Initialize and free pages in MAX_PAGE_ORDER sized increments so that
- * we can avoid introducing any issues with the buddy allocator.
- */
- while (spfn < end_pfn) {
- deferred_init_maxorder(&i, zone, &spfn, &epfn);
- cond_resched();
- }
+ deferred_init_memmap_chunk(start_pfn, end_pfn, zone);
}
static unsigned int __init
@@ -2164,12 +2115,10 @@ static int __init deferred_init_memmap(void *data)
{
pg_data_t *pgdat = data;
const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
- unsigned long spfn = 0, epfn = 0;
- unsigned long first_init_pfn, flags;
+ int max_threads = deferred_page_init_max_threads(cpumask);
+ unsigned long first_init_pfn, last_pfn, flags;
unsigned long start = jiffies;
struct zone *zone;
- int max_threads;
- u64 i = 0;
/* Bind memory initialisation thread to a local node if possible */
if (!cpumask_empty(cpumask))
@@ -2197,24 +2146,20 @@ static int __init deferred_init_memmap(void *data)
/* Only the highest zone is deferred */
zone = pgdat->node_zones + pgdat->nr_zones - 1;
+ last_pfn = SECTION_ALIGN_UP(zone_end_pfn(zone));
- max_threads = deferred_page_init_max_threads(cpumask);
+ struct padata_mt_job job = {
+ .thread_fn = deferred_init_memmap_job,
+ .fn_arg = zone,
+ .start = first_init_pfn,
+ .size = last_pfn - first_init_pfn,
+ .align = PAGES_PER_SECTION,
+ .min_chunk = PAGES_PER_SECTION,
+ .max_threads = max_threads,
+ .numa_aware = false,
+ };
- while (deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, first_init_pfn)) {
- first_init_pfn = ALIGN(epfn, PAGES_PER_SECTION);
- struct padata_mt_job job = {
- .thread_fn = deferred_init_memmap_chunk,
- .fn_arg = zone,
- .start = spfn,
- .size = first_init_pfn - spfn,
- .align = PAGES_PER_SECTION,
- .min_chunk = PAGES_PER_SECTION,
- .max_threads = max_threads,
- .numa_aware = false,
- };
-
- padata_do_multithreaded(&job);
- }
+ padata_do_multithreaded(&job);
/* Sanity check that the next zone really is unpopulated */
WARN_ON(pgdat->nr_zones < MAX_NR_ZONES && populated_zone(++zone));
@@ -2239,12 +2184,11 @@ static int __init deferred_init_memmap(void *data)
*/
bool __init deferred_grow_zone(struct zone *zone, unsigned int order)
{
- unsigned long nr_pages_needed = ALIGN(1 << order, PAGES_PER_SECTION);
+ unsigned long nr_pages_needed = SECTION_ALIGN_UP(1 << order);
pg_data_t *pgdat = zone->zone_pgdat;
unsigned long first_deferred_pfn = pgdat->first_deferred_pfn;
unsigned long spfn, epfn, flags;
unsigned long nr_pages = 0;
- u64 i = 0;
/* Only the last zone may have deferred pages */
if (zone_end_pfn(zone) != pgdat_end_pfn(pgdat))
@@ -2261,37 +2205,26 @@ bool __init deferred_grow_zone(struct zone *zone, unsigned int order)
return true;
}
- /* If the zone is empty somebody else may have cleared out the zone */
- if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn,
- first_deferred_pfn)) {
- pgdat->first_deferred_pfn = ULONG_MAX;
- pgdat_resize_unlock(pgdat, &flags);
- /* Retry only once. */
- return first_deferred_pfn != ULONG_MAX;
+ /*
+ * Initialize at least nr_pages_needed in section chunks.
+ * If a section has less free memory than nr_pages_needed, the next
+ * section will be also initialized.
+ * Note, that it still does not guarantee that allocation of order can
+ * be satisfied if the sections are fragmented because of memblock
+ * allocations.
+ */
+ for (spfn = first_deferred_pfn, epfn = SECTION_ALIGN_UP(spfn + 1);
+ nr_pages < nr_pages_needed && spfn < zone_end_pfn(zone);
+ spfn = epfn, epfn += PAGES_PER_SECTION) {
+ nr_pages += deferred_init_memmap_chunk(spfn, epfn, zone);
}
/*
- * Initialize and free pages in MAX_PAGE_ORDER sized increments so
- * that we can avoid introducing any issues with the buddy
- * allocator.
+ * There were no pages to initialize and free which means the zone's
+ * memory map is completely initialized.
*/
- while (spfn < epfn) {
- /* update our first deferred PFN for this section */
- first_deferred_pfn = spfn;
-
- nr_pages += deferred_init_maxorder(&i, zone, &spfn, &epfn);
- touch_nmi_watchdog();
-
- /* We should only stop along section boundaries */
- if ((first_deferred_pfn ^ spfn) < PAGES_PER_SECTION)
- continue;
-
- /* If our quota has been met we can stop here */
- if (nr_pages >= nr_pages_needed)
- break;
- }
+ pgdat->first_deferred_pfn = nr_pages ? spfn : ULONG_MAX;
- pgdat->first_deferred_pfn = spfn;
pgdat_resize_unlock(pgdat, &flags);
return nr_pages > 0;