summaryrefslogtreecommitdiff
path: root/lib/test_hmm.c
diff options
context:
space:
mode:
authorDavid Hildenbrand <david@redhat.com>2025-02-26 14:22:53 +0100
committerAndrew Morton <akpm@linux-foundation.org>2025-03-17 00:05:29 -0700
commit66add5e9093b4b4112aebacbc91d43ae4e030456 (patch)
tree858b887a33c1e2dba77c05657e430cbba5a16a1a /lib/test_hmm.c
parent173a3dc051bda746e284ff3933fcf6771d7247b8 (diff)
lib/test_hmm: make dmirror_atomic_map() consume a single page
Patch series "mm: cleanups for device-exclusive entries (hmm)", v2. Some smaller device-exclusive cleanups I have lying around. This patch (of 5): The caller now always passes a single page; let's simplify, and return "0" on success. Link: https://lkml.kernel.org/r/20250226132257.2826043-1-david@redhat.com Link: https://lkml.kernel.org/r/20250226132257.2826043-2-david@redhat.com Signed-off-by: David Hildenbrand <david@redhat.com> Reviewed-by: Alistair Popple <apopple@nvidia.com> Cc: Jason Gunthorpe <jgg@nvidia.com> Cc: Jérôme Glisse <jglisse@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'lib/test_hmm.c')
-rw-r--r--lib/test_hmm.c32
1 files changed, 10 insertions, 22 deletions
diff --git a/lib/test_hmm.c b/lib/test_hmm.c
index e4afca8d1880..39a2286f8592 100644
--- a/lib/test_hmm.c
+++ b/lib/test_hmm.c
@@ -706,34 +706,23 @@ static int dmirror_check_atomic(struct dmirror *dmirror, unsigned long start,
return 0;
}
-static int dmirror_atomic_map(unsigned long start, unsigned long end,
- struct page **pages, struct dmirror *dmirror)
+static int dmirror_atomic_map(unsigned long addr, struct page *page,
+ struct dmirror *dmirror)
{
- unsigned long pfn, mapped = 0;
- int i;
+ void *entry;
/* Map the migrated pages into the device's page tables. */
mutex_lock(&dmirror->mutex);
- for (i = 0, pfn = start >> PAGE_SHIFT; pfn < (end >> PAGE_SHIFT); pfn++, i++) {
- void *entry;
-
- if (!pages[i])
- continue;
-
- entry = pages[i];
- entry = xa_tag_pointer(entry, DPT_XA_TAG_ATOMIC);
- entry = xa_store(&dmirror->pt, pfn, entry, GFP_ATOMIC);
- if (xa_is_err(entry)) {
- mutex_unlock(&dmirror->mutex);
- return xa_err(entry);
- }
-
- mapped++;
+ entry = xa_tag_pointer(page, DPT_XA_TAG_ATOMIC);
+ entry = xa_store(&dmirror->pt, addr >> PAGE_SHIFT, entry, GFP_ATOMIC);
+ if (xa_is_err(entry)) {
+ mutex_unlock(&dmirror->mutex);
+ return xa_err(entry);
}
mutex_unlock(&dmirror->mutex);
- return mapped;
+ return 0;
}
static int dmirror_migrate_finalize_and_map(struct migrate_vma *args,
@@ -803,8 +792,7 @@ static int dmirror_exclusive(struct dmirror *dmirror,
break;
}
- ret = dmirror_atomic_map(addr, addr + PAGE_SIZE, &page, dmirror);
- ret = ret == 1 ? 0 : -EBUSY;
+ ret = dmirror_atomic_map(addr, page, dmirror);
folio_unlock(folio);
folio_put(folio);
}