diff options
| author | Andrew Morton <akpm@linux-foundation.org> | 2025-11-24 15:07:34 -0800 |
|---|---|---|
| committer | Andrew Morton <akpm@linux-foundation.org> | 2025-11-24 15:07:34 -0800 |
| commit | 87fcafc4e2b47da41300a0db558afc74d2e418b7 (patch) | |
| tree | e4e8918cad2944759fc48ab3c900d560c93c8e28 /mm | |
| parent | c7ba92bcfea34f6b4afc744c3b65c8f7420fefe0 (diff) | |
| parent | 7c9580f44f90f7a4c11fc7831efe323ebe446091 (diff) | |
Merge branch 'mm-hotfixes-stable' into mm-stable in order to merge
"mm/huge_memory: only get folio_order() once during __folio_split()" into
mm-stable.
Diffstat (limited to 'mm')
| -rw-r--r-- | mm/filemap.c | 27 | ||||
| -rw-r--r-- | mm/huge_memory.c | 22 | ||||
| -rw-r--r-- | mm/memfd.c | 27 | ||||
| -rw-r--r-- | mm/mmap_lock.c | 1 | ||||
| -rw-r--r-- | mm/swapfile.c | 4 |
5 files changed, 53 insertions, 28 deletions
diff --git a/mm/filemap.c b/mm/filemap.c index ff75bd89b68c..f0c36df1def7 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -3691,8 +3691,9 @@ static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf, struct folio *folio, unsigned long start, unsigned long addr, unsigned int nr_pages, unsigned long *rss, unsigned short *mmap_miss, - bool can_map_large) + pgoff_t file_end) { + struct address_space *mapping = folio->mapping; unsigned int ref_from_caller = 1; vm_fault_t ret = 0; struct page *page = folio_page(folio, start); @@ -3701,12 +3702,16 @@ static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf, unsigned long addr0; /* - * Map the large folio fully where possible. + * Map the large folio fully where possible: * - * The folio must not cross VMA or page table boundary. + * - The folio is fully within size of the file or belong + * to shmem/tmpfs; + * - The folio doesn't cross VMA boundary; + * - The folio doesn't cross page table boundary; */ addr0 = addr - start * PAGE_SIZE; - if (can_map_large && folio_within_vma(folio, vmf->vma) && + if ((file_end >= folio_next_index(folio) || shmem_mapping(mapping)) && + folio_within_vma(folio, vmf->vma) && (addr0 & PMD_MASK) == ((addr0 + folio_size(folio) - 1) & PMD_MASK)) { vmf->pte -= start; page -= start; @@ -3821,7 +3826,6 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf, unsigned long rss = 0; unsigned int nr_pages = 0, folio_type; unsigned short mmap_miss = 0, mmap_miss_saved; - bool can_map_large; rcu_read_lock(); folio = next_uptodate_folio(&xas, mapping, end_pgoff); @@ -3832,16 +3836,14 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf, end_pgoff = min(end_pgoff, file_end); /* - * Do not allow to map with PTEs beyond i_size and with PMD - * across i_size to preserve SIGBUS semantics. + * Do not allow to map with PMD across i_size to preserve + * SIGBUS semantics. * * Make an exception for shmem/tmpfs that for long time * intentionally mapped with PMDs across i_size. */ - can_map_large = shmem_mapping(mapping) || - file_end >= folio_next_index(folio); - - if (can_map_large && filemap_map_pmd(vmf, folio, start_pgoff)) { + if ((file_end >= folio_next_index(folio) || shmem_mapping(mapping)) && + filemap_map_pmd(vmf, folio, start_pgoff)) { ret = VM_FAULT_NOPAGE; goto out; } @@ -3870,8 +3872,7 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf, else ret |= filemap_map_folio_range(vmf, folio, xas.xa_index - folio->index, addr, - nr_pages, &rss, &mmap_miss, - can_map_large); + nr_pages, &rss, &mmap_miss, file_end); folio_unlock(folio); } while ((folio = next_uptodate_folio(&xas, mapping, end_pgoff)) != NULL); diff --git a/mm/huge_memory.c b/mm/huge_memory.c index d716c6965e27..3ae16b4a82de 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -3560,6 +3560,16 @@ static int __folio_split(struct folio *folio, unsigned int new_order, if (folio != page_folio(split_at) || folio != page_folio(lock_at)) return -EINVAL; + /* + * Folios that just got truncated cannot get split. Signal to the + * caller that there was a race. + * + * TODO: this will also currently refuse shmem folios that are in the + * swapcache. + */ + if (!is_anon && !folio->mapping) + return -EBUSY; + if (new_order >= folio_order(folio)) return -EINVAL; @@ -3600,18 +3610,6 @@ static int __folio_split(struct folio *folio, unsigned int new_order, gfp_t gfp; mapping = folio->mapping; - - /* Truncated ? */ - /* - * TODO: add support for large shmem folio in swap cache. - * When shmem is in swap cache, mapping is NULL and - * folio_test_swapcache() is true. - */ - if (!mapping) { - ret = -EBUSY; - goto out; - } - min_order = mapping_min_folio_order(folio->mapping); if (new_order < min_order) { ret = -EINVAL; diff --git a/mm/memfd.c b/mm/memfd.c index 1d109c1acf21..a405eaa451ee 100644 --- a/mm/memfd.c +++ b/mm/memfd.c @@ -96,9 +96,36 @@ struct folio *memfd_alloc_folio(struct file *memfd, pgoff_t idx) NULL, gfp_mask); if (folio) { + u32 hash; + + /* + * Zero the folio to prevent information leaks to userspace. + * Use folio_zero_user() which is optimized for huge/gigantic + * pages. Pass 0 as addr_hint since this is not a faulting path + * and we don't have a user virtual address yet. + */ + folio_zero_user(folio, 0); + + /* + * Mark the folio uptodate before adding to page cache, + * as required by filemap.c and other hugetlb paths. + */ + __folio_mark_uptodate(folio); + + /* + * Serialize hugepage allocation and instantiation to prevent + * races with concurrent allocations, as required by all other + * callers of hugetlb_add_to_page_cache(). + */ + hash = hugetlb_fault_mutex_hash(memfd->f_mapping, idx); + mutex_lock(&hugetlb_fault_mutex_table[hash]); + err = hugetlb_add_to_page_cache(folio, memfd->f_mapping, idx); + + mutex_unlock(&hugetlb_fault_mutex_table[hash]); + if (err) { folio_put(folio); goto err_unresv; diff --git a/mm/mmap_lock.c b/mm/mmap_lock.c index 39f341caf32c..f2532af6208c 100644 --- a/mm/mmap_lock.c +++ b/mm/mmap_lock.c @@ -257,6 +257,7 @@ retry: if (PTR_ERR(vma) == -EAGAIN) { count_vm_vma_lock_event(VMA_LOCK_MISS); /* The area was replaced with another one */ + mas_set(&mas, address); goto retry; } diff --git a/mm/swapfile.c b/mm/swapfile.c index c35bb8593f50..04435f7ae7bf 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -2023,10 +2023,8 @@ swp_entry_t get_swap_page_of_type(int type) local_lock(&percpu_swap_cluster.lock); offset = cluster_alloc_swap_entry(si, 0, 1); local_unlock(&percpu_swap_cluster.lock); - if (offset) { + if (offset) entry = swp_entry(si->type, offset); - atomic_long_dec(&nr_swap_pages); - } } put_swap_device(si); } |