summaryrefslogtreecommitdiff
path: root/mm/memory.c
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2025-05-12 22:12:44 -0700
committerDan Williams <dan.j.williams@intel.com>2025-05-12 22:12:44 -0700
commit15ff5d0e90bb044b94d9a4ac57276829861a954d (patch)
tree28bcf60949db629b0b772f6eebf026714dc38fe9 /mm/memory.c
parenta0372b3831785e15de605cd13d2ed78a79a9b435 (diff)
parent7c3f259dfe03f5dcd898126602818a8fbe94d3c5 (diff)
Merge branch 'for-6.16/tsm-mr' into tsm-next
Merge measurement-register infrastructure for v6.16. Resolve conflicts with the establishment of drivers/virt/coco/guest/ for cross-vendor common TSM functionality. Address a mis-merge with a fixup from Lukas: Link: http://lore.kernel.org/20250509134031.70559-1-lukas.bulwahn@redhat.com
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/mm/memory.c b/mm/memory.c
index 2d8c265fc7d6..ba3ea0a82f7f 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1361,7 +1361,7 @@ copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma)
struct mm_struct *dst_mm = dst_vma->vm_mm;
struct mm_struct *src_mm = src_vma->vm_mm;
struct mmu_notifier_range range;
- unsigned long next, pfn;
+ unsigned long next, pfn = 0;
bool is_cow;
int ret;
@@ -2938,11 +2938,11 @@ static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
if (fn) {
do {
if (create || !pte_none(ptep_get(pte))) {
- err = fn(pte++, addr, data);
+ err = fn(pte, addr, data);
if (err)
break;
}
- } while (addr += PAGE_SIZE, addr != end);
+ } while (pte++, addr += PAGE_SIZE, addr != end);
}
*mask |= PGTBL_PTE_MODIFIED;
@@ -3734,8 +3734,6 @@ static bool __wp_can_reuse_large_anon_folio(struct folio *folio,
return false;
VM_WARN_ON_ONCE(folio_test_ksm(folio));
- VM_WARN_ON_ONCE(folio_mapcount(folio) > folio_nr_pages(folio));
- VM_WARN_ON_ONCE(folio_entire_mapcount(folio));
if (unlikely(folio_test_swapcache(folio))) {
/*
@@ -3760,6 +3758,8 @@ static bool __wp_can_reuse_large_anon_folio(struct folio *folio,
if (folio_large_mapcount(folio) != folio_ref_count(folio))
goto unlock;
+ VM_WARN_ON_ONCE_FOLIO(folio_large_mapcount(folio) > folio_nr_pages(folio), folio);
+ VM_WARN_ON_ONCE_FOLIO(folio_entire_mapcount(folio), folio);
VM_WARN_ON_ONCE(folio_mm_id(folio, 0) != vma->vm_mm->mm_id &&
folio_mm_id(folio, 1) != vma->vm_mm->mm_id);