diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2025-10-22 14:57:35 -1000 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2025-10-22 14:57:35 -1000 |
| commit | 0f3ad9c6105f32d1755c0bd54a7f98c892f3ceb7 (patch) | |
| tree | 2f57d57e09e41c26747cd6a68ea99a628b73de77 | |
| parent | dd72c8fcf6d35de5d6d976f20dc1ae84ce7af08b (diff) | |
| parent | 9aa12167ef1149d9980713b120ddcb31cf17222d (diff) | |
Merge tag 'mm-hotfixes-stable-2025-10-22-12-43' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Pull hotfixes from Andrew Morton:
"17 hotfixes. 12 are cc:stable and 14 are for MM.
There's a two-patch DAMON series from SeongJae Park which addresses a
missed check and possible memory leak. Apart from that it's all
singletons - please see the changelogs for details"
* tag 'mm-hotfixes-stable-2025-10-22-12-43' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm:
csky: abiv2: adapt to new folio flags field
mm/damon/core: use damos_commit_quota_goal() for new goal commit
mm/damon/core: fix potential memory leak by cleaning ops_filter in damon_destroy_scheme
hugetlbfs: move lock assertions after early returns in huge_pmd_unshare()
vmw_balloon: indicate success when effectively deflating during migration
mm/damon/core: fix list_add_tail() call on damon_call()
mm/mremap: correctly account old mapping after MREMAP_DONTUNMAP remap
mm: prevent poison consumption when splitting THP
ocfs2: clear extent cache after moving/defragmenting extents
mm: don't spin in add_stack_record when gfp flags don't allow
dma-debug: don't report false positives with DMA_BOUNCE_UNALIGNED_KMALLOC
mm/damon/sysfs: dealloc commit test ctx always
mm/damon/sysfs: catch commit test ctx alloc failure
hung_task: fix warnings caused by unaligned lock pointers
| -rw-r--r-- | arch/csky/abiv2/cacheflush.c | 2 | ||||
| -rw-r--r-- | arch/csky/abiv2/inc/abi/cacheflush.h | 4 | ||||
| -rw-r--r-- | drivers/misc/vmw_balloon.c | 8 | ||||
| -rw-r--r-- | fs/hugetlbfs/inode.c | 9 | ||||
| -rw-r--r-- | fs/ocfs2/move_extents.c | 5 | ||||
| -rw-r--r-- | include/linux/hung_task.h | 8 | ||||
| -rw-r--r-- | kernel/dma/debug.c | 5 | ||||
| -rw-r--r-- | mm/damon/core.c | 7 | ||||
| -rw-r--r-- | mm/damon/sysfs.c | 7 | ||||
| -rw-r--r-- | mm/huge_memory.c | 3 | ||||
| -rw-r--r-- | mm/hugetlb.c | 5 | ||||
| -rw-r--r-- | mm/migrate.c | 3 | ||||
| -rw-r--r-- | mm/mremap.c | 15 | ||||
| -rw-r--r-- | mm/page_owner.c | 3 |
14 files changed, 45 insertions, 39 deletions
diff --git a/arch/csky/abiv2/cacheflush.c b/arch/csky/abiv2/cacheflush.c index 876028b1083f..064b0f0f95ca 100644 --- a/arch/csky/abiv2/cacheflush.c +++ b/arch/csky/abiv2/cacheflush.c @@ -21,7 +21,7 @@ void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma, folio = page_folio(pfn_to_page(pfn)); - if (test_and_set_bit(PG_dcache_clean, &folio->flags)) + if (test_and_set_bit(PG_dcache_clean, &folio->flags.f)) return; icache_inv_range(address, address + nr*PAGE_SIZE); diff --git a/arch/csky/abiv2/inc/abi/cacheflush.h b/arch/csky/abiv2/inc/abi/cacheflush.h index 6513ac5d2578..da51a0f02391 100644 --- a/arch/csky/abiv2/inc/abi/cacheflush.h +++ b/arch/csky/abiv2/inc/abi/cacheflush.h @@ -20,8 +20,8 @@ static inline void flush_dcache_folio(struct folio *folio) { - if (test_bit(PG_dcache_clean, &folio->flags)) - clear_bit(PG_dcache_clean, &folio->flags); + if (test_bit(PG_dcache_clean, &folio->flags.f)) + clear_bit(PG_dcache_clean, &folio->flags.f); } #define flush_dcache_folio flush_dcache_folio diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c index 6df51ee8db62..cc1d18b3df5c 100644 --- a/drivers/misc/vmw_balloon.c +++ b/drivers/misc/vmw_balloon.c @@ -1737,7 +1737,7 @@ static int vmballoon_migratepage(struct balloon_dev_info *b_dev_info, { unsigned long status, flags; struct vmballoon *b; - int ret; + int ret = 0; b = container_of(b_dev_info, struct vmballoon, b_dev_info); @@ -1796,17 +1796,15 @@ static int vmballoon_migratepage(struct balloon_dev_info *b_dev_info, * A failure happened. While we can deflate the page we just * inflated, this deflation can also encounter an error. Instead * we will decrease the size of the balloon to reflect the - * change and report failure. + * change. */ atomic64_dec(&b->size); - ret = -EBUSY; } else { /* * Success. Take a reference for the page, and we will add it to * the list after acquiring the lock. */ get_page(newpage); - ret = 0; } /* Update the balloon list under the @pages_lock */ @@ -1817,7 +1815,7 @@ static int vmballoon_migratepage(struct balloon_dev_info *b_dev_info, * If we succeed just insert it to the list and update the statistics * under the lock. */ - if (!ret) { + if (status == VMW_BALLOON_SUCCESS) { balloon_page_insert(&b->b_dev_info, newpage); __count_vm_event(BALLOON_MIGRATE); } diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index 9c94ed8c3ab0..f42548ee9083 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -478,14 +478,6 @@ hugetlb_vmdelete_list(struct rb_root_cached *root, pgoff_t start, pgoff_t end, if (!hugetlb_vma_trylock_write(vma)) continue; - /* - * Skip VMAs without shareable locks. Per the design in commit - * 40549ba8f8e0, these will be handled by remove_inode_hugepages() - * called after this function with proper locking. - */ - if (!__vma_shareable_lock(vma)) - goto skip; - v_start = vma_offset_start(vma, start); v_end = vma_offset_end(vma, end); @@ -496,7 +488,6 @@ hugetlb_vmdelete_list(struct rb_root_cached *root, pgoff_t start, pgoff_t end, * vmas. Therefore, lock is not held when calling * unmap_hugepage_range for private vmas. */ -skip: hugetlb_vma_unlock_write(vma); } } diff --git a/fs/ocfs2/move_extents.c b/fs/ocfs2/move_extents.c index 86f2631e6360..10923bf7c8b8 100644 --- a/fs/ocfs2/move_extents.c +++ b/fs/ocfs2/move_extents.c @@ -867,6 +867,11 @@ static int __ocfs2_move_extents_range(struct buffer_head *di_bh, mlog_errno(ret); goto out; } + /* + * Invalidate extent cache after moving/defragging to prevent + * stale cached data with outdated extent flags. + */ + ocfs2_extent_map_trunc(inode, cpos); context->clusters_moved += alloc_size; next: diff --git a/include/linux/hung_task.h b/include/linux/hung_task.h index 34e615c76ca5..c4403eeb7144 100644 --- a/include/linux/hung_task.h +++ b/include/linux/hung_task.h @@ -20,6 +20,10 @@ * always zero. So we can use these bits to encode the specific blocking * type. * + * Note that on architectures where this is not guaranteed, or for any + * unaligned lock, this tracking mechanism is silently skipped for that + * lock. + * * Type encoding: * 00 - Blocked on mutex (BLOCKER_TYPE_MUTEX) * 01 - Blocked on semaphore (BLOCKER_TYPE_SEM) @@ -45,7 +49,7 @@ static inline void hung_task_set_blocker(void *lock, unsigned long type) * If the lock pointer matches the BLOCKER_TYPE_MASK, return * without writing anything. */ - if (WARN_ON_ONCE(lock_ptr & BLOCKER_TYPE_MASK)) + if (lock_ptr & BLOCKER_TYPE_MASK) return; WRITE_ONCE(current->blocker, lock_ptr | type); @@ -53,8 +57,6 @@ static inline void hung_task_set_blocker(void *lock, unsigned long type) static inline void hung_task_clear_blocker(void) { - WARN_ON_ONCE(!READ_ONCE(current->blocker)); - WRITE_ONCE(current->blocker, 0UL); } diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c index 1e5c64cb6a42..138ede653de4 100644 --- a/kernel/dma/debug.c +++ b/kernel/dma/debug.c @@ -23,6 +23,7 @@ #include <linux/ctype.h> #include <linux/list.h> #include <linux/slab.h> +#include <linux/swiotlb.h> #include <asm/sections.h> #include "debug.h" @@ -594,7 +595,9 @@ static void add_dma_entry(struct dma_debug_entry *entry, unsigned long attrs) if (rc == -ENOMEM) { pr_err_once("cacheline tracking ENOMEM, dma-debug disabled\n"); global_disable = true; - } else if (rc == -EEXIST && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) { + } else if (rc == -EEXIST && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) && + !(IS_ENABLED(CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC) && + is_swiotlb_active(entry->dev))) { err_printk(entry->dev, entry, "cacheline tracking EEXIST, overlapping mappings aren't supported\n"); } diff --git a/mm/damon/core.c b/mm/damon/core.c index 93848b4c6944..109b050c795a 100644 --- a/mm/damon/core.c +++ b/mm/damon/core.c @@ -452,6 +452,9 @@ void damon_destroy_scheme(struct damos *s) damos_for_each_filter_safe(f, next, s) damos_destroy_filter(f); + damos_for_each_ops_filter_safe(f, next, s) + damos_destroy_filter(f); + kfree(s->migrate_dests.node_id_arr); kfree(s->migrate_dests.weight_arr); damon_del_scheme(s); @@ -832,7 +835,7 @@ int damos_commit_quota_goals(struct damos_quota *dst, struct damos_quota *src) src_goal->metric, src_goal->target_value); if (!new_goal) return -ENOMEM; - damos_commit_quota_goal_union(new_goal, src_goal); + damos_commit_quota_goal(new_goal, src_goal); damos_add_quota_goal(dst, new_goal); } return 0; @@ -1450,7 +1453,7 @@ int damon_call(struct damon_ctx *ctx, struct damon_call_control *control) INIT_LIST_HEAD(&control->list); mutex_lock(&ctx->call_controls_lock); - list_add_tail(&ctx->call_controls, &control->list); + list_add_tail(&control->list, &ctx->call_controls); mutex_unlock(&ctx->call_controls_lock); if (!damon_is_running(ctx)) return -EINVAL; diff --git a/mm/damon/sysfs.c b/mm/damon/sysfs.c index 2fc722f998f8..cd6815ecc04e 100644 --- a/mm/damon/sysfs.c +++ b/mm/damon/sysfs.c @@ -1473,13 +1473,14 @@ static int damon_sysfs_commit_input(void *data) if (IS_ERR(param_ctx)) return PTR_ERR(param_ctx); test_ctx = damon_new_ctx(); + if (!test_ctx) + return -ENOMEM; err = damon_commit_ctx(test_ctx, param_ctx); - if (err) { - damon_destroy_ctx(test_ctx); + if (err) goto out; - } err = damon_commit_ctx(kdamond->damon_ctx, param_ctx); out: + damon_destroy_ctx(test_ctx); damon_destroy_ctx(param_ctx); return err; } diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 1b81680b4225..1d1b74950332 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -4109,6 +4109,9 @@ static bool thp_underused(struct folio *folio) if (khugepaged_max_ptes_none == HPAGE_PMD_NR - 1) return false; + if (folio_contain_hwpoisoned_page(folio)) + return false; + for (i = 0; i < folio_nr_pages(folio); i++) { if (pages_identical(folio_page(folio, i), ZERO_PAGE(0))) { if (++num_zero_pages > khugepaged_max_ptes_none) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 795ee393eac0..0455119716ec 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -7614,13 +7614,12 @@ int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma, p4d_t *p4d = p4d_offset(pgd, addr); pud_t *pud = pud_offset(p4d, addr); - i_mmap_assert_write_locked(vma->vm_file->f_mapping); - hugetlb_vma_assert_locked(vma); if (sz != PMD_SIZE) return 0; if (!ptdesc_pmd_is_shared(virt_to_ptdesc(ptep))) return 0; - + i_mmap_assert_write_locked(vma->vm_file->f_mapping); + hugetlb_vma_assert_locked(vma); pud_clear(pud); /* * Once our caller drops the rmap lock, some other process might be diff --git a/mm/migrate.c b/mm/migrate.c index e3065c9edb55..c0e9f15be2a2 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -301,8 +301,9 @@ static bool try_to_map_unused_to_zeropage(struct page_vma_mapped_walk *pvmw, struct page *page = folio_page(folio, idx); pte_t newpte; - if (PageCompound(page)) + if (PageCompound(page) || PageHWPoison(page)) return false; + VM_BUG_ON_PAGE(!PageAnon(page), page); VM_BUG_ON_PAGE(!PageLocked(page), page); VM_BUG_ON_PAGE(pte_present(old_pte), page); diff --git a/mm/mremap.c b/mm/mremap.c index 35de0a7b910e..bd7314898ec5 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -1237,10 +1237,10 @@ static int copy_vma_and_data(struct vma_remap_struct *vrm, } /* - * Perform final tasks for MADV_DONTUNMAP operation, clearing mlock() and - * account flags on remaining VMA by convention (it cannot be mlock()'d any - * longer, as pages in range are no longer mapped), and removing anon_vma_chain - * links from it (if the entire VMA was copied over). + * Perform final tasks for MADV_DONTUNMAP operation, clearing mlock() flag on + * remaining VMA by convention (it cannot be mlock()'d any longer, as pages in + * range are no longer mapped), and removing anon_vma_chain links from it if the + * entire VMA was copied over. */ static void dontunmap_complete(struct vma_remap_struct *vrm, struct vm_area_struct *new_vma) @@ -1250,11 +1250,8 @@ static void dontunmap_complete(struct vma_remap_struct *vrm, unsigned long old_start = vrm->vma->vm_start; unsigned long old_end = vrm->vma->vm_end; - /* - * We always clear VM_LOCKED[ONFAULT] | VM_ACCOUNT on the old - * vma. - */ - vm_flags_clear(vrm->vma, VM_LOCKED_MASK | VM_ACCOUNT); + /* We always clear VM_LOCKED[ONFAULT] on the old VMA. */ + vm_flags_clear(vrm->vma, VM_LOCKED_MASK); /* * anon_vma links of the old vma is no longer needed after its page diff --git a/mm/page_owner.c b/mm/page_owner.c index c3ca21132c2c..589ec37c94aa 100644 --- a/mm/page_owner.c +++ b/mm/page_owner.c @@ -168,6 +168,9 @@ static void add_stack_record_to_list(struct stack_record *stack_record, unsigned long flags; struct stack *stack; + if (!gfpflags_allow_spinning(gfp_mask)) + return; + set_current_in_page_owner(); stack = kmalloc(sizeof(*stack), gfp_nested_mask(gfp_mask)); if (!stack) { |