summaryrefslogtreecommitdiff
path: root/mm/hugetlb.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r--mm/hugetlb.c47
1 files changed, 24 insertions, 23 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 106e61f6e12c..96c991f54f7a 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -6037,29 +6037,28 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
}
vmf.orig_pte = huge_ptep_get(mm, vmf.address, vmf.pte);
- if (huge_pte_none_mostly(vmf.orig_pte)) {
- if (is_pte_marker(vmf.orig_pte)) {
- pte_marker marker =
- pte_marker_get(pte_to_swp_entry(vmf.orig_pte));
-
- if (marker & PTE_MARKER_POISONED) {
- ret = VM_FAULT_HWPOISON_LARGE |
- VM_FAULT_SET_HINDEX(hstate_index(h));
- goto out_mutex;
- } else if (WARN_ON_ONCE(marker & PTE_MARKER_GUARD)) {
- /* This isn't supported in hugetlb. */
- ret = VM_FAULT_SIGSEGV;
- goto out_mutex;
- }
- }
-
+ if (huge_pte_none(vmf.orig_pte))
/*
- * Other PTE markers should be handled the same way as none PTE.
- *
* hugetlb_no_page will drop vma lock and hugetlb fault
* mutex internally, which make us return immediately.
*/
return hugetlb_no_page(mapping, &vmf);
+
+ if (is_pte_marker(vmf.orig_pte)) {
+ const pte_marker marker =
+ pte_marker_get(pte_to_swp_entry(vmf.orig_pte));
+
+ if (marker & PTE_MARKER_POISONED) {
+ ret = VM_FAULT_HWPOISON_LARGE |
+ VM_FAULT_SET_HINDEX(hstate_index(h));
+ goto out_mutex;
+ } else if (WARN_ON_ONCE(marker & PTE_MARKER_GUARD)) {
+ /* This isn't supported in hugetlb. */
+ ret = VM_FAULT_SIGSEGV;
+ goto out_mutex;
+ }
+
+ return hugetlb_no_page(mapping, &vmf);
}
ret = 0;
@@ -6228,6 +6227,7 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
int ret = -ENOMEM;
struct folio *folio;
bool folio_in_pagecache = false;
+ pte_t dst_ptep;
if (uffd_flags_mode_is(flags, MFILL_ATOMIC_POISON)) {
ptl = huge_pte_lock(h, dst_mm, dst_pte);
@@ -6367,13 +6367,14 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
if (folio_test_hwpoison(folio))
goto out_release_unlock;
+ ret = -EEXIST;
+
+ dst_ptep = huge_ptep_get(dst_mm, dst_addr, dst_pte);
/*
- * We allow to overwrite a pte marker: consider when both MISSING|WP
- * registered, we firstly wr-protect a none pte which has no page cache
- * page backing it, then access the page.
+ * See comment about UFFD marker overwriting in
+ * mfill_atomic_install_pte().
*/
- ret = -EEXIST;
- if (!huge_pte_none_mostly(huge_ptep_get(dst_mm, dst_addr, dst_pte)))
+ if (!huge_pte_none(dst_ptep) && !is_uffd_pte_marker(dst_ptep))
goto out_release_unlock;
if (folio_in_pagecache)