summaryrefslogtreecommitdiff
path: root/mm/vmalloc.c
diff options
context:
space:
mode:
authorUladzislau Rezki (Sony) <urezki@gmail.com>2025-10-07 14:20:29 +0200
committerAndrew Morton <akpm@linux-foundation.org>2025-11-16 17:27:53 -0800
commit9c47753167a6a585d0305663c6912f042e131c2d (patch)
tree928961c875509967fff841076fde5a6c72a78a75 /mm/vmalloc.c
parent86e968d8ca6dc823086b4436721a6e3a10241503 (diff)
mm/vmalloc: defer freeing partly initialized vm_struct
__vmalloc_area_node() may call free_vmap_area() or vfree() on error paths, both of which can sleep. This becomes problematic if the function is invoked from an atomic context, such as when GFP_ATOMIC or GFP_NOWAIT is passed via gfp_mask. To fix this, unify error paths and defer the cleanup of partly initialized vm_struct objects to a workqueue. This ensures that freeing happens in a process context and avoids invalid sleeps in atomic regions. Link: https://lkml.kernel.org/r/20251007122035.56347-5-urezki@gmail.com Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com> Acked-by: Michal Hocko <mhocko@suse.com> Reviewed-by: Baoquan He <bhe@redhat.com> Cc: Alexander Potapenko <glider@google.com> Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com> Cc: Marco Elver <elver@google.com> Cc: Michal Hocko <mhocko@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/vmalloc.c')
-rw-r--r--mm/vmalloc.c34
1 files changed, 31 insertions, 3 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index d83c01caaabe..9e29dd767c41 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -3687,6 +3687,35 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
return nr_allocated;
}
+static LLIST_HEAD(pending_vm_area_cleanup);
+static void cleanup_vm_area_work(struct work_struct *work)
+{
+ struct vm_struct *area, *tmp;
+ struct llist_node *head;
+
+ head = llist_del_all(&pending_vm_area_cleanup);
+ if (!head)
+ return;
+
+ llist_for_each_entry_safe(area, tmp, head, llnode) {
+ if (!area->pages)
+ free_vm_area(area);
+ else
+ vfree(area->addr);
+ }
+}
+
+/*
+ * Helper for __vmalloc_area_node() to defer cleanup
+ * of partially initialized vm_struct in error paths.
+ */
+static DECLARE_WORK(cleanup_vm_area, cleanup_vm_area_work);
+static void defer_vm_area_cleanup(struct vm_struct *area)
+{
+ if (llist_add(&area->llnode, &pending_vm_area_cleanup))
+ schedule_work(&cleanup_vm_area);
+}
+
static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
pgprot_t prot, unsigned int page_shift,
int node)
@@ -3718,8 +3747,7 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
warn_alloc(gfp_mask, NULL,
"vmalloc error: size %lu, failed to allocated page array size %lu",
nr_small_pages * PAGE_SIZE, array_size);
- free_vm_area(area);
- return NULL;
+ goto fail;
}
set_vm_area_page_order(area, page_shift - PAGE_SHIFT);
@@ -3796,7 +3824,7 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
return area->addr;
fail:
- vfree(area->addr);
+ defer_vm_area_cleanup(area);
return NULL;
}