summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2025-12-05 13:52:43 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2025-12-05 13:52:43 -0800
commit7203ca412fc8e8a0588e9adc0f777d3163f8dff3 (patch)
tree7cbdcdb0bc0533f0133d472f95629099c123c3f9 /drivers
parentac20755937e037e586b1ca18a6717d31b1cbce93 (diff)
parentfaf3c923523e5c8fc3baaa413d62e913774ae52f (diff)
Merge tag 'mm-stable-2025-12-03-21-26' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Pull MM updates from Andrew Morton: "__vmalloc()/kvmalloc() and no-block support" (Uladzislau Rezki) Rework the vmalloc() code to support non-blocking allocations (GFP_ATOIC, GFP_NOWAIT) "ksm: fix exec/fork inheritance" (xu xin) Fix a rare case where the KSM MMF_VM_MERGE_ANY prctl state is not inherited across fork/exec "mm/zswap: misc cleanup of code and documentations" (SeongJae Park) Some light maintenance work on the zswap code "mm/page_owner: add debugfs files 'show_handles' and 'show_stacks_handles'" (Mauricio Faria de Oliveira) Enhance the /sys/kernel/debug/page_owner debug feature by adding unique identifiers to differentiate the various stack traces so that userspace monitoring tools can better match stack traces over time "mm/page_alloc: pcp->batch cleanups" (Joshua Hahn) Minor alterations to the page allocator's per-cpu-pages feature "Improve UFFDIO_MOVE scalability by removing anon_vma lock" (Lokesh Gidra) Address a scalability issue in userfaultfd's UFFDIO_MOVE operation "kasan: cleanups for kasan_enabled() checks" (Sabyrzhan Tasbolatov) "drivers/base/node: fold node register and unregister functions" (Donet Tom) Clean up the NUMA node handling code a little "mm: some optimizations for prot numa" (Kefeng Wang) Cleanups and small optimizations to the NUMA allocation hinting code "mm/page_alloc: Batch callers of free_pcppages_bulk" (Joshua Hahn) Address long lock hold times at boot on large machines. These were causing (harmless) softlockup warnings "optimize the logic for handling dirty file folios during reclaim" (Baolin Wang) Remove some now-unnecessary work from page reclaim "mm/damon: allow DAMOS auto-tuned for per-memcg per-node memory usage" (SeongJae Park) Enhance the DAMOS auto-tuning feature "mm/damon: fixes for address alignment issues in DAMON_LRU_SORT and DAMON_RECLAIM" (Quanmin Yan) Fix DAMON_LRU_SORT and DAMON_RECLAIM with certain userspace configuration "expand mmap_prepare functionality, port more users" (Lorenzo Stoakes) Enhance the new(ish) file_operations.mmap_prepare() method and port additional callsites from the old ->mmap() over to ->mmap_prepare() "Fix stale IOTLB entries for kernel address space" (Lu Baolu) Fix a bug (and possible security issue on non-x86) in the IOMMU code. In some situations the IOMMU could be left hanging onto a stale kernel pagetable entry "mm/huge_memory: cleanup __split_unmapped_folio()" (Wei Yang) Clean up and optimize the folio splitting code "mm, swap: misc cleanup and bugfix" (Kairui Song) Some cleanups and a minor fix in the swap discard code "mm/damon: misc documentation fixups" (SeongJae Park) "mm/damon: support pin-point targets removal" (SeongJae Park) Permit userspace to remove a specific monitoring target in the middle of the current targets list "mm: MISC follow-up patches for linux/pgalloc.h" (Harry Yoo) A couple of cleanups related to mm header file inclusion "mm/swapfile.c: select swap devices of default priority round robin" (Baoquan He) improve the selection of swap devices for NUMA machines "mm: Convert memory block states (MEM_*) macros to enums" (Israel Batista) Change the memory block labels from macros to enums so they will appear in kernel debug info "ksm: perform a range-walk to jump over holes in break_ksm" (Pedro Demarchi Gomes) Address an inefficiency when KSM unmerges an address range "mm/damon/tests: fix memory bugs in kunit tests" (SeongJae Park) Fix leaks and unhandled malloc() failures in DAMON userspace unit tests "some cleanups for pageout()" (Baolin Wang) Clean up a couple of minor things in the page scanner's writeback-for-eviction code "mm/hugetlb: refactor sysfs/sysctl interfaces" (Hui Zhu) Move hugetlb's sysfs/sysctl handling code into a new file "introduce VM_MAYBE_GUARD and make it sticky" (Lorenzo Stoakes) Make the VMA guard regions available in /proc/pid/smaps and improves the mergeability of guarded VMAs "mm: perform guard region install/remove under VMA lock" (Lorenzo Stoakes) Reduce mmap lock contention for callers performing VMA guard region operations "vma_start_write_killable" (Matthew Wilcox) Start work on permitting applications to be killed when they are waiting on a read_lock on the VMA lock "mm/damon/tests: add more tests for online parameters commit" (SeongJae Park) Add additional userspace testing of DAMON's "commit" feature "mm/damon: misc cleanups" (SeongJae Park) "make VM_SOFTDIRTY a sticky VMA flag" (Lorenzo Stoakes) Address the possible loss of a VMA's VM_SOFTDIRTY flag when that VMA is merged with another "mm: support device-private THP" (Balbir Singh) Introduce support for Transparent Huge Page (THP) migration in zone device-private memory "Optimize folio split in memory failure" (Zi Yan) "mm/huge_memory: Define split_type and consolidate split support checks" (Wei Yang) Some more cleanups in the folio splitting code "mm: remove is_swap_[pte, pmd]() + non-swap entries, introduce leaf entries" (Lorenzo Stoakes) Clean up our handling of pagetable leaf entries by introducing the concept of 'software leaf entries', of type softleaf_t "reparent the THP split queue" (Muchun Song) Reparent the THP split queue to its parent memcg. This is in preparation for addressing the long-standing "dying memcg" problem, wherein dead memcg's linger for too long, consuming memory resources "unify PMD scan results and remove redundant cleanup" (Wei Yang) A little cleanup in the hugepage collapse code "zram: introduce writeback bio batching" (Sergey Senozhatsky) Improve zram writeback efficiency by introducing batched bio writeback support "memcg: cleanup the memcg stats interfaces" (Shakeel Butt) Clean up our handling of the interrupt safety of some memcg stats "make vmalloc gfp flags usage more apparent" (Vishal Moola) Clean up vmalloc's handling of incoming GFP flags "mm: Add soft-dirty and uffd-wp support for RISC-V" (Chunyan Zhang) Teach soft dirty and userfaultfd write protect tracking to use RISC-V's Svrsw60t59b extension "mm: swap: small fixes and comment cleanups" (Youngjun Park) Fix a small bug and clean up some of the swap code "initial work on making VMA flags a bitmap" (Lorenzo Stoakes) Start work on converting the vma struct's flags to a bitmap, so we stop running out of them, especially on 32-bit "mm/swapfile: fix and cleanup swap list iterations" (Youngjun Park) Address a possible bug in the swap discard code and clean things up a little [ This merge also reverts commit ebb9aeb980e5 ("vfio/nvgrace-gpu: register device memory for poison handling") because it looks broken to me, I've asked for clarification - Linus ] * tag 'mm-stable-2025-12-03-21-26' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: (321 commits) mm: fix vma_start_write_killable() signal handling mm/swapfile: use plist_for_each_entry in __folio_throttle_swaprate mm/swapfile: fix list iteration when next node is removed during discard fs/proc/task_mmu.c: fix make_uffd_wp_huge_pte() huge pte handling mm/kfence: add reboot notifier to disable KFENCE on shutdown memcg: remove inc/dec_lruvec_kmem_state helpers selftests/mm/uffd: initialize char variable to Null mm: fix DEBUG_RODATA_TEST indentation in Kconfig mm: introduce VMA flags bitmap type tools/testing/vma: eliminate dependency on vma->__vm_flags mm: simplify and rename mm flags function for clarity mm: declare VMA flags by bit zram: fix a spelling mistake mm/page_alloc: optimize lowmem_reserve max lookup using its semantic monotonicity mm/vmscan: skip increasing kswapd_failures when reclaim was boosted pagemap: update BUDDY flag documentation mm: swap: remove scan_swap_map_slots() references from comments mm: swap: change swap_alloc_slow() to void mm, swap: remove redundant comment for read_swap_cache_async mm, swap: use SWP_SOLIDSTATE to determine if swap is rotational ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/apei/ghes.c6
-rw-r--r--drivers/base/memory.c6
-rw-r--r--drivers/base/node.c90
-rw-r--r--drivers/block/zram/zram_drv.c483
-rw-r--r--drivers/block/zram/zram_drv.h2
-rw-r--r--drivers/char/mem.c86
-rw-r--r--drivers/dax/device.c37
-rw-r--r--drivers/firmware/efi/arm-runtime.c4
-rw-r--r--drivers/firmware/efi/riscv-runtime.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_migrate.c7
-rw-r--r--drivers/gpu/drm/drm_pagemap.c12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dmem.c311
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_svm.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_svm.h3
-rw-r--r--drivers/iommu/iommu-sva.c29
-rw-r--r--drivers/pci/p2pdma.c5
-rw-r--r--drivers/s390/char/sclp_sd.c3
-rw-r--r--drivers/vdpa/vdpa_user/iova_domain.h2
18 files changed, 770 insertions, 326 deletions
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index 56107aa00274..626908491d8f 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -506,12 +506,6 @@ static bool ghes_do_memory_failure(u64 physical_addr, int flags)
return false;
pfn = PHYS_PFN(physical_addr);
- if (!pfn_valid(pfn) && !arch_is_platform_page(physical_addr)) {
- pr_warn_ratelimited(FW_WARN GHES_PFX
- "Invalid address in generic error data: %#llx\n",
- physical_addr);
- return false;
- }
if (flags == MF_ACTION_REQUIRED && current->mm) {
twcb = (void *)gen_pool_alloc(ghes_estatus_pool, sizeof(*twcb));
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index fc43f2703ae0..751f248ca4a8 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -198,15 +198,15 @@ static ssize_t state_show(struct device *dev, struct device_attribute *attr,
break;
default:
WARN_ON(1);
- return sysfs_emit(buf, "ERROR-UNKNOWN-%ld\n", mem->state);
+ return sysfs_emit(buf, "ERROR-UNKNOWN-%d\n", mem->state);
}
return sysfs_emit(buf, "%s\n", output);
}
-int memory_notify(unsigned long val, void *v)
+int memory_notify(enum memory_block_state state, void *v)
{
- return blocking_notifier_call_chain(&memory_chain, val, v);
+ return blocking_notifier_call_chain(&memory_chain, state, v);
}
#if defined(CONFIG_MEMORY_FAILURE) && defined(CONFIG_MEMORY_HOTPLUG)
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 83aeb0518e1d..00cf4532f121 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -676,50 +676,6 @@ static void node_device_release(struct device *dev)
kfree(to_node(dev));
}
-/*
- * register_node - Setup a sysfs device for a node.
- * @num - Node number to use when creating the device.
- *
- * Initialize and register the node device.
- */
-static int register_node(struct node *node, int num)
-{
- int error;
-
- node->dev.id = num;
- node->dev.bus = &node_subsys;
- node->dev.release = node_device_release;
- node->dev.groups = node_dev_groups;
- error = device_register(&node->dev);
-
- if (error) {
- put_device(&node->dev);
- } else {
- hugetlb_register_node(node);
- compaction_register_node(node);
- reclaim_register_node(node);
- }
-
- return error;
-}
-
-/**
- * unregister_node - unregister a node device
- * @node: node going away
- *
- * Unregisters a node device @node. All the devices on the node must be
- * unregistered before calling this function.
- */
-void unregister_node(struct node *node)
-{
- hugetlb_unregister_node(node);
- compaction_unregister_node(node);
- reclaim_unregister_node(node);
- node_remove_accesses(node);
- node_remove_caches(node);
- device_unregister(&node->dev);
-}
-
struct node *node_devices[MAX_NUMNODES];
/*
@@ -907,7 +863,13 @@ void register_memory_blocks_under_node_hotplug(int nid, unsigned long start_pfn,
}
#endif /* CONFIG_MEMORY_HOTPLUG */
-int register_one_node(int nid)
+/**
+ * register_node - Initialize and register the node device.
+ * @nid: Node number to use when creating the device.
+ *
+ * Return: 0 on success, -errno otherwise
+ */
+int register_node(int nid)
{
int error;
int cpu;
@@ -918,14 +880,23 @@ int register_one_node(int nid)
return -ENOMEM;
INIT_LIST_HEAD(&node->access_list);
- node_devices[nid] = node;
- error = register_node(node_devices[nid], nid);
+ node->dev.id = nid;
+ node->dev.bus = &node_subsys;
+ node->dev.release = node_device_release;
+ node->dev.groups = node_dev_groups;
+
+ error = device_register(&node->dev);
if (error) {
- node_devices[nid] = NULL;
+ put_device(&node->dev);
return error;
}
+ node_devices[nid] = node;
+ hugetlb_register_node(node);
+ compaction_register_node(node);
+ reclaim_register_node(node);
+
/* link cpu under this node */
for_each_present_cpu(cpu) {
if (cpu_to_node(cpu) == nid)
@@ -936,13 +907,26 @@ int register_one_node(int nid)
return error;
}
-
-void unregister_one_node(int nid)
+/**
+ * unregister_node - unregister a node device
+ * @nid: nid of the node going away
+ *
+ * Unregisters the node device at node id @nid. All the devices on the
+ * node must be unregistered before calling this function.
+ */
+void unregister_node(int nid)
{
- if (!node_devices[nid])
+ struct node *node = node_devices[nid];
+
+ if (!node)
return;
- unregister_node(node_devices[nid]);
+ hugetlb_unregister_node(node);
+ compaction_unregister_node(node);
+ reclaim_unregister_node(node);
+ node_remove_accesses(node);
+ node_remove_caches(node);
+ device_unregister(&node->dev);
node_devices[nid] = NULL;
}
@@ -1018,7 +1002,7 @@ void __init node_dev_init(void)
* to already created cpu devices.
*/
for_each_online_node(i) {
- ret = register_one_node(i);
+ ret = register_node(i);
if (ret)
panic("%s() failed to add node: %d\n", __func__, ret);
}
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index a43074657531..5759823d6314 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -500,8 +500,31 @@ out:
}
#ifdef CONFIG_ZRAM_WRITEBACK
+#define INVALID_BDEV_BLOCK (~0UL)
+
+struct zram_wb_ctl {
+ /* idle list is accessed only by the writeback task, no concurency */
+ struct list_head idle_reqs;
+ /* done list is accessed concurrently, protect by done_lock */
+ struct list_head done_reqs;
+ wait_queue_head_t done_wait;
+ spinlock_t done_lock;
+ atomic_t num_inflight;
+};
+
+struct zram_wb_req {
+ unsigned long blk_idx;
+ struct page *page;
+ struct zram_pp_slot *pps;
+ struct bio_vec bio_vec;
+ struct bio bio;
+
+ struct list_head entry;
+};
+
static ssize_t writeback_limit_enable_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
+ struct device_attribute *attr,
+ const char *buf, size_t len)
{
struct zram *zram = dev_to_zram(dev);
u64 val;
@@ -510,33 +533,31 @@ static ssize_t writeback_limit_enable_store(struct device *dev,
if (kstrtoull(buf, 10, &val))
return ret;
- down_read(&zram->init_lock);
- spin_lock(&zram->wb_limit_lock);
+ down_write(&zram->init_lock);
zram->wb_limit_enable = val;
- spin_unlock(&zram->wb_limit_lock);
- up_read(&zram->init_lock);
+ up_write(&zram->init_lock);
ret = len;
return ret;
}
static ssize_t writeback_limit_enable_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
bool val;
struct zram *zram = dev_to_zram(dev);
down_read(&zram->init_lock);
- spin_lock(&zram->wb_limit_lock);
val = zram->wb_limit_enable;
- spin_unlock(&zram->wb_limit_lock);
up_read(&zram->init_lock);
return sysfs_emit(buf, "%d\n", val);
}
static ssize_t writeback_limit_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
+ struct device_attribute *attr,
+ const char *buf, size_t len)
{
struct zram *zram = dev_to_zram(dev);
u64 val;
@@ -545,31 +566,71 @@ static ssize_t writeback_limit_store(struct device *dev,
if (kstrtoull(buf, 10, &val))
return ret;
- down_read(&zram->init_lock);
- spin_lock(&zram->wb_limit_lock);
+ /*
+ * When the page size is greater than 4KB, if bd_wb_limit is set to
+ * a value that is not page - size aligned, it will cause value
+ * wrapping. For example, when the page size is set to 16KB and
+ * bd_wb_limit is set to 3, a single write - back operation will
+ * cause bd_wb_limit to become -1. Even more terrifying is that
+ * bd_wb_limit is an unsigned number.
+ */
+ val = rounddown(val, PAGE_SIZE / 4096);
+
+ down_write(&zram->init_lock);
zram->bd_wb_limit = val;
- spin_unlock(&zram->wb_limit_lock);
- up_read(&zram->init_lock);
+ up_write(&zram->init_lock);
ret = len;
return ret;
}
static ssize_t writeback_limit_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
u64 val;
struct zram *zram = dev_to_zram(dev);
down_read(&zram->init_lock);
- spin_lock(&zram->wb_limit_lock);
val = zram->bd_wb_limit;
- spin_unlock(&zram->wb_limit_lock);
up_read(&zram->init_lock);
return sysfs_emit(buf, "%llu\n", val);
}
+static ssize_t writeback_batch_size_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct zram *zram = dev_to_zram(dev);
+ u32 val;
+
+ if (kstrtouint(buf, 10, &val))
+ return -EINVAL;
+
+ if (!val)
+ return -EINVAL;
+
+ down_write(&zram->init_lock);
+ zram->wb_batch_size = val;
+ up_write(&zram->init_lock);
+
+ return len;
+}
+
+static ssize_t writeback_batch_size_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ u32 val;
+ struct zram *zram = dev_to_zram(dev);
+
+ down_read(&zram->init_lock);
+ val = zram->wb_batch_size;
+ up_read(&zram->init_lock);
+
+ return sysfs_emit(buf, "%u\n", val);
+}
+
static void reset_bdev(struct zram *zram)
{
if (!zram->backing_dev)
@@ -697,23 +758,20 @@ out:
return err;
}
-static unsigned long alloc_block_bdev(struct zram *zram)
+static unsigned long zram_reserve_bdev_block(struct zram *zram)
{
- unsigned long blk_idx = 1;
-retry:
- /* skip 0 bit to confuse zram.handle = 0 */
- blk_idx = find_next_zero_bit(zram->bitmap, zram->nr_pages, blk_idx);
- if (blk_idx == zram->nr_pages)
- return 0;
+ unsigned long blk_idx;
- if (test_and_set_bit(blk_idx, zram->bitmap))
- goto retry;
+ blk_idx = find_next_zero_bit(zram->bitmap, zram->nr_pages, 0);
+ if (blk_idx == zram->nr_pages)
+ return INVALID_BDEV_BLOCK;
+ set_bit(blk_idx, zram->bitmap);
atomic64_inc(&zram->stats.bd_count);
return blk_idx;
}
-static void free_block_bdev(struct zram *zram, unsigned long blk_idx)
+static void zram_release_bdev_block(struct zram *zram, unsigned long blk_idx)
{
int was_set;
@@ -734,32 +792,249 @@ static void read_from_bdev_async(struct zram *zram, struct page *page,
submit_bio(bio);
}
-static int zram_writeback_slots(struct zram *zram, struct zram_pp_ctl *ctl)
+static void release_wb_req(struct zram_wb_req *req)
{
- unsigned long blk_idx = 0;
- struct page *page = NULL;
- struct zram_pp_slot *pps;
- struct bio_vec bio_vec;
- struct bio bio;
+ __free_page(req->page);
+ kfree(req);
+}
+
+static void release_wb_ctl(struct zram_wb_ctl *wb_ctl)
+{
+ if (!wb_ctl)
+ return;
+
+ /* We should never have inflight requests at this point */
+ WARN_ON(atomic_read(&wb_ctl->num_inflight));
+ WARN_ON(!list_empty(&wb_ctl->done_reqs));
+
+ while (!list_empty(&wb_ctl->idle_reqs)) {
+ struct zram_wb_req *req;
+
+ req = list_first_entry(&wb_ctl->idle_reqs,
+ struct zram_wb_req, entry);
+ list_del(&req->entry);
+ release_wb_req(req);
+ }
+
+ kfree(wb_ctl);
+}
+
+static struct zram_wb_ctl *init_wb_ctl(struct zram *zram)
+{
+ struct zram_wb_ctl *wb_ctl;
+ int i;
+
+ wb_ctl = kmalloc(sizeof(*wb_ctl), GFP_KERNEL);
+ if (!wb_ctl)
+ return NULL;
+
+ INIT_LIST_HEAD(&wb_ctl->idle_reqs);
+ INIT_LIST_HEAD(&wb_ctl->done_reqs);
+ atomic_set(&wb_ctl->num_inflight, 0);
+ init_waitqueue_head(&wb_ctl->done_wait);
+ spin_lock_init(&wb_ctl->done_lock);
+
+ for (i = 0; i < zram->wb_batch_size; i++) {
+ struct zram_wb_req *req;
+
+ /*
+ * This is fatal condition only if we couldn't allocate
+ * any requests at all. Otherwise we just work with the
+ * requests that we have successfully allocated, so that
+ * writeback can still proceed, even if there is only one
+ * request on the idle list.
+ */
+ req = kzalloc(sizeof(*req), GFP_KERNEL | __GFP_NOWARN);
+ if (!req)
+ break;
+
+ req->page = alloc_page(GFP_KERNEL | __GFP_NOWARN);
+ if (!req->page) {
+ kfree(req);
+ break;
+ }
+
+ list_add(&req->entry, &wb_ctl->idle_reqs);
+ }
+
+ /* We couldn't allocate any requests, so writeabck is not possible */
+ if (list_empty(&wb_ctl->idle_reqs))
+ goto release_wb_ctl;
+
+ return wb_ctl;
+
+release_wb_ctl:
+ release_wb_ctl(wb_ctl);
+ return NULL;
+}
+
+static void zram_account_writeback_rollback(struct zram *zram)
+{
+ lockdep_assert_held_read(&zram->init_lock);
+
+ if (zram->wb_limit_enable)
+ zram->bd_wb_limit += 1UL << (PAGE_SHIFT - 12);
+}
+
+static void zram_account_writeback_submit(struct zram *zram)
+{
+ lockdep_assert_held_read(&zram->init_lock);
+
+ if (zram->wb_limit_enable && zram->bd_wb_limit > 0)
+ zram->bd_wb_limit -= 1UL << (PAGE_SHIFT - 12);
+}
+
+static int zram_writeback_complete(struct zram *zram, struct zram_wb_req *req)
+{
+ u32 index = req->pps->index;
+ int err;
+
+ err = blk_status_to_errno(req->bio.bi_status);
+ if (err) {
+ /*
+ * Failed wb requests should not be accounted in wb_limit
+ * (if enabled).
+ */
+ zram_account_writeback_rollback(zram);
+ zram_release_bdev_block(zram, req->blk_idx);
+ return err;
+ }
+
+ atomic64_inc(&zram->stats.bd_writes);
+ zram_slot_lock(zram, index);
+ /*
+ * We release slot lock during writeback so slot can change under us:
+ * slot_free() or slot_free() and zram_write_page(). In both cases
+ * slot loses ZRAM_PP_SLOT flag. No concurrent post-processing can
+ * set ZRAM_PP_SLOT on such slots until current post-processing
+ * finishes.
+ */
+ if (!zram_test_flag(zram, index, ZRAM_PP_SLOT)) {
+ zram_release_bdev_block(zram, req->blk_idx);
+ goto out;
+ }
+
+ zram_free_page(zram, index);
+ zram_set_flag(zram, index, ZRAM_WB);
+ zram_set_handle(zram, index, req->blk_idx);
+ atomic64_inc(&zram->stats.pages_stored);
+
+out:
+ zram_slot_unlock(zram, index);
+ return 0;
+}
+
+static void zram_writeback_endio(struct bio *bio)
+{
+ struct zram_wb_req *req = container_of(bio, struct zram_wb_req, bio);
+ struct zram_wb_ctl *wb_ctl = bio->bi_private;
+ unsigned long flags;
+
+ spin_lock_irqsave(&wb_ctl->done_lock, flags);
+ list_add(&req->entry, &wb_ctl->done_reqs);
+ spin_unlock_irqrestore(&wb_ctl->done_lock, flags);
+
+ wake_up(&wb_ctl->done_wait);
+}
+
+static void zram_submit_wb_request(struct zram *zram,
+ struct zram_wb_ctl *wb_ctl,
+ struct zram_wb_req *req)
+{
+ /*
+ * wb_limit (if enabled) should be adjusted before submission,
+ * so that we don't over-submit.
+ */
+ zram_account_writeback_submit(zram);
+ atomic_inc(&wb_ctl->num_inflight);
+ req->bio.bi_private = wb_ctl;
+ submit_bio(&req->bio);
+}
+
+static int zram_complete_done_reqs(struct zram *zram,
+ struct zram_wb_ctl *wb_ctl)
+{
+ struct zram_wb_req *req;
+ unsigned long flags;
int ret = 0, err;
- u32 index;
- page = alloc_page(GFP_KERNEL);
- if (!page)
- return -ENOMEM;
+ while (atomic_read(&wb_ctl->num_inflight) > 0) {
+ spin_lock_irqsave(&wb_ctl->done_lock, flags);
+ req = list_first_entry_or_null(&wb_ctl->done_reqs,
+ struct zram_wb_req, entry);
+ if (req)
+ list_del(&req->entry);
+ spin_unlock_irqrestore(&wb_ctl->done_lock, flags);
+
+ /* ->num_inflight > 0 doesn't mean we have done requests */
+ if (!req)
+ break;
+
+ err = zram_writeback_complete(zram, req);
+ if (err)
+ ret = err;
+
+ atomic_dec(&wb_ctl->num_inflight);
+ release_pp_slot(zram, req->pps);
+ req->pps = NULL;
+
+ list_add(&req->entry, &wb_ctl->idle_reqs);
+ }
+
+ return ret;
+}
+
+static struct zram_wb_req *zram_select_idle_req(struct zram_wb_ctl *wb_ctl)
+{
+ struct zram_wb_req *req;
+
+ req = list_first_entry_or_null(&wb_ctl->idle_reqs,
+ struct zram_wb_req, entry);
+ if (req)
+ list_del(&req->entry);
+ return req;
+}
+
+static int zram_writeback_slots(struct zram *zram,
+ struct zram_pp_ctl *ctl,
+ struct zram_wb_ctl *wb_ctl)
+{
+ unsigned long blk_idx = INVALID_BDEV_BLOCK;
+ struct zram_wb_req *req = NULL;
+ struct zram_pp_slot *pps;
+ int ret = 0, err = 0;
+ u32 index = 0;
while ((pps = select_pp_slot(ctl))) {
- spin_lock(&zram->wb_limit_lock);
if (zram->wb_limit_enable && !zram->bd_wb_limit) {
- spin_unlock(&zram->wb_limit_lock);
ret = -EIO;
break;
}
- spin_unlock(&zram->wb_limit_lock);
- if (!blk_idx) {
- blk_idx = alloc_block_bdev(zram);
- if (!blk_idx) {
+ while (!req) {
+ req = zram_select_idle_req(wb_ctl);
+ if (req)
+ break;
+
+ wait_event(wb_ctl->done_wait,
+ !list_empty(&wb_ctl->done_reqs));
+
+ err = zram_complete_done_reqs(zram, wb_ctl);
+ /*
+ * BIO errors are not fatal, we continue and simply
+ * attempt to writeback the remaining objects (pages).
+ * At the same time we need to signal user-space that
+ * some writes (at least one, but also could be all of
+ * them) were not successful and we do so by returning
+ * the most recent BIO error.
+ */
+ if (err)
+ ret = err;
+ }
+
+ if (blk_idx == INVALID_BDEV_BLOCK) {
+ blk_idx = zram_reserve_bdev_block(zram);
+ if (blk_idx == INVALID_BDEV_BLOCK) {
ret = -ENOSPC;
break;
}
@@ -768,74 +1043,54 @@ static int zram_writeback_slots(struct zram *zram, struct zram_pp_ctl *ctl)
index = pps->index;
zram_slot_lock(zram, index);
/*
- * scan_slots() sets ZRAM_PP_SLOT and relases slot lock, so
+ * scan_slots() sets ZRAM_PP_SLOT and releases slot lock, so
* slots can change in the meantime. If slots are accessed or
* freed they lose ZRAM_PP_SLOT flag and hence we don't
* post-process them.
*/
if (!zram_test_flag(zram, index, ZRAM_PP_SLOT))
goto next;
- if (zram_read_from_zspool(zram, page, index))
+ if (zram_read_from_zspool(zram, req->page, index))
goto next;
zram_slot_unlock(zram, index);
- bio_init(&bio, zram->bdev, &bio_vec, 1,
- REQ_OP_WRITE | REQ_SYNC);
- bio.bi_iter.bi_sector = blk_idx * (PAGE_SIZE >> 9);
- __bio_add_page(&bio, page, PAGE_SIZE, 0);
-
- /*
- * XXX: A single page IO would be inefficient for write
- * but it would be not bad as starter.
- */
- err = submit_bio_wait(&bio);
- if (err) {
- release_pp_slot(zram, pps);
- /*
- * BIO errors are not fatal, we continue and simply
- * attempt to writeback the remaining objects (pages).
- * At the same time we need to signal user-space that
- * some writes (at least one, but also could be all of
- * them) were not successful and we do so by returning
- * the most recent BIO error.
- */
- ret = err;
- continue;
- }
-
- atomic64_inc(&zram->stats.bd_writes);
- zram_slot_lock(zram, index);
/*
- * Same as above, we release slot lock during writeback so
- * slot can change under us: slot_free() or slot_free() and
- * reallocation (zram_write_page()). In both cases slot loses
- * ZRAM_PP_SLOT flag. No concurrent post-processing can set
- * ZRAM_PP_SLOT on such slots until current post-processing
- * finishes.
+ * From now on pp-slot is owned by the req, remove it from
+ * its pp bucket.
*/
- if (!zram_test_flag(zram, index, ZRAM_PP_SLOT))
- goto next;
+ list_del_init(&pps->entry);
+
+ req->blk_idx = blk_idx;
+ req->pps = pps;
+ bio_init(&req->bio, zram->bdev, &req->bio_vec, 1, REQ_OP_WRITE);
+ req->bio.bi_iter.bi_sector = req->blk_idx * (PAGE_SIZE >> 9);
+ req->bio.bi_end_io = zram_writeback_endio;
+ __bio_add_page(&req->bio, req->page, PAGE_SIZE, 0);
+
+ zram_submit_wb_request(zram, wb_ctl, req);
+ blk_idx = INVALID_BDEV_BLOCK;
+ req = NULL;
+ cond_resched();
+ continue;
- zram_free_page(zram, index);
- zram_set_flag(zram, index, ZRAM_WB);
- zram_set_handle(zram, index, blk_idx);
- blk_idx = 0;
- atomic64_inc(&zram->stats.pages_stored);
- spin_lock(&zram->wb_limit_lock);
- if (zram->wb_limit_enable && zram->bd_wb_limit > 0)
- zram->bd_wb_limit -= 1UL << (PAGE_SHIFT - 12);
- spin_unlock(&zram->wb_limit_lock);
next:
zram_slot_unlock(zram, index);
release_pp_slot(zram, pps);
-
- cond_resched();
}
- if (blk_idx)
- free_block_bdev(zram, blk_idx);
- if (page)
- __free_page(page);
+ /*
+ * Selected idle req, but never submitted it due to some error or
+ * wb limit.
+ */
+ if (req)
+ release_wb_req(req);
+
+ while (atomic_read(&wb_ctl->num_inflight) > 0) {
+ wait_event(wb_ctl->done_wait, !list_empty(&wb_ctl->done_reqs));
+ err = zram_complete_done_reqs(zram, wb_ctl);
+ if (err)
+ ret = err;
+ }
return ret;
}
@@ -948,7 +1203,8 @@ static ssize_t writeback_store(struct device *dev,
struct zram *zram = dev_to_zram(dev);
u64 nr_pages = zram->disksize >> PAGE_SHIFT;
unsigned long lo = 0, hi = nr_pages;
- struct zram_pp_ctl *ctl = NULL;
+ struct zram_pp_ctl *pp_ctl = NULL;
+ struct zram_wb_ctl *wb_ctl = NULL;
char *args, *param, *val;
ssize_t ret = len;
int err, mode = 0;
@@ -970,8 +1226,14 @@ static ssize_t writeback_store(struct device *dev,
goto release_init_lock;
}
- ctl = init_pp_ctl();
- if (!ctl) {
+ pp_ctl = init_pp_ctl();
+ if (!pp_ctl) {
+ ret = -ENOMEM;
+ goto release_init_lock;
+ }
+
+ wb_ctl = init_wb_ctl(zram);
+ if (!wb_ctl) {
ret = -ENOMEM;
goto release_init_lock;
}
@@ -1000,7 +1262,7 @@ static ssize_t writeback_store(struct device *dev,
goto release_init_lock;
}
- scan_slots_for_writeback(zram, mode, lo, hi, ctl);
+ scan_slots_for_writeback(zram, mode, lo, hi, pp_ctl);
break;
}
@@ -1011,7 +1273,7 @@ static ssize_t writeback_store(struct device *dev,
goto release_init_lock;
}
- scan_slots_for_writeback(zram, mode, lo, hi, ctl);
+ scan_slots_for_writeback(zram, mode, lo, hi, pp_ctl);
break;
}
@@ -1022,7 +1284,7 @@ static ssize_t writeback_store(struct device *dev,
goto release_init_lock;
}
- scan_slots_for_writeback(zram, mode, lo, hi, ctl);
+ scan_slots_for_writeback(zram, mode, lo, hi, pp_ctl);
continue;
}
@@ -1033,17 +1295,18 @@ static ssize_t writeback_store(struct device *dev,
goto release_init_lock;
}
- scan_slots_for_writeback(zram, mode, lo, hi, ctl);
+ scan_slots_for_writeback(zram, mode, lo, hi, pp_ctl);
continue;
}
}
- err = zram_writeback_slots(zram, ctl);
+ err = zram_writeback_slots(zram, pp_ctl, wb_ctl);
if (err)
ret = err;
release_init_lock:
- release_pp_ctl(zram, ctl);
+ release_pp_ctl(zram, pp_ctl);
+ release_wb_ctl(wb_ctl);
atomic_set(&zram->pp_in_progress, 0);
up_read(&zram->init_lock);
@@ -1112,7 +1375,9 @@ static int read_from_bdev(struct zram *zram, struct page *page,
return -EIO;
}
-static void free_block_bdev(struct zram *zram, unsigned long blk_idx) {};
+static void zram_release_bdev_block(struct zram *zram, unsigned long blk_idx)
+{
+}
#endif
#ifdef CONFIG_ZRAM_MEMORY_TRACKING
@@ -1634,7 +1899,7 @@ static void zram_free_page(struct zram *zram, size_t index)
if (zram_test_flag(zram, index, ZRAM_WB)) {
zram_clear_flag(zram, index, ZRAM_WB);
- free_block_bdev(zram, zram_get_handle(zram, index));
+ zram_release_bdev_block(zram, zram_get_handle(zram, index));
goto out;
}
@@ -1740,14 +2005,14 @@ static int zram_read_page(struct zram *zram, struct page *page, u32 index,
ret = zram_read_from_zspool(zram, page, index);
zram_slot_unlock(zram, index);
} else {
+ unsigned long blk_idx = zram_get_handle(zram, index);
+
/*
* The slot should be unlocked before reading from the backing
* device.
*/
zram_slot_unlock(zram, index);
-
- ret = read_from_bdev(zram, page, zram_get_handle(zram, index),
- parent);
+ ret = read_from_bdev(zram, page, blk_idx, parent);
}
/* Should NEVER happen. Return bio error if it does. */
@@ -2610,6 +2875,7 @@ static DEVICE_ATTR_RW(backing_dev);
static DEVICE_ATTR_WO(writeback);
static DEVICE_ATTR_RW(writeback_limit);
static DEVICE_ATTR_RW(writeback_limit_enable);
+static DEVICE_ATTR_RW(writeback_batch_size);
#endif
#ifdef CONFIG_ZRAM_MULTI_COMP
static DEVICE_ATTR_RW(recomp_algorithm);
@@ -2631,6 +2897,7 @@ static struct attribute *zram_disk_attrs[] = {
&dev_attr_writeback.attr,
&dev_attr_writeback_limit.attr,
&dev_attr_writeback_limit_enable.attr,
+ &dev_attr_writeback_batch_size.attr,
#endif
&dev_attr_io_stat.attr,
&dev_attr_mm_stat.attr,
@@ -2692,7 +2959,7 @@ static int zram_add(void)
init_rwsem(&zram->init_lock);
#ifdef CONFIG_ZRAM_WRITEBACK
- spin_lock_init(&zram->wb_limit_lock);
+ zram->wb_batch_size = 32;
#endif
/* gendisk structure */
diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
index 6cee93f9c0d0..c6d94501376c 100644
--- a/drivers/block/zram/zram_drv.h
+++ b/drivers/block/zram/zram_drv.h
@@ -127,8 +127,8 @@ struct zram {
bool claim; /* Protected by disk->open_mutex */
#ifdef CONFIG_ZRAM_WRITEBACK
struct file *backing_dev;
- spinlock_t wb_limit_lock;
bool wb_limit_enable;
+ u32 wb_batch_size;
u64 bd_wb_limit;
struct block_device *bdev;
unsigned long *bitmap;
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 34b815901b20..52039fae1594 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -304,13 +304,13 @@ static unsigned zero_mmap_capabilities(struct file *file)
}
/* can't do an in-place private mapping if there's no MMU */
-static inline int private_mapping_ok(struct vm_area_struct *vma)
+static inline int private_mapping_ok(struct vm_area_desc *desc)
{
- return is_nommu_shared_mapping(vma->vm_flags);
+ return is_nommu_shared_mapping(desc->vm_flags);
}
#else
-static inline int private_mapping_ok(struct vm_area_struct *vma)
+static inline int private_mapping_ok(struct vm_area_desc *desc)
{
return 1;
}
@@ -322,46 +322,49 @@ static const struct vm_operations_struct mmap_mem_ops = {
#endif
};
-static int mmap_mem(struct file *file, struct vm_area_struct *vma)
+static int mmap_filter_error(int err)
{
- size_t size = vma->vm_end - vma->vm_start;
- phys_addr_t offset = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
+ return -EAGAIN;
+}
+
+static int mmap_mem_prepare(struct vm_area_desc *desc)
+{
+ struct file *file = desc->file;
+ const size_t size = vma_desc_size(desc);
+ const phys_addr_t offset = (phys_addr_t)desc->pgoff << PAGE_SHIFT;
/* Does it even fit in phys_addr_t? */
- if (offset >> PAGE_SHIFT != vma->vm_pgoff)
+ if (offset >> PAGE_SHIFT != desc->pgoff)
return -EINVAL;
/* It's illegal to wrap around the end of the physical address space. */
if (offset + (phys_addr_t)size - 1 < offset)
return -EINVAL;
- if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
+ if (!valid_mmap_phys_addr_range(desc->pgoff, size))
return -EINVAL;
- if (!private_mapping_ok(vma))
+ if (!private_mapping_ok(desc))
return -ENOSYS;
- if (!range_is_allowed(vma->vm_pgoff, size))
+ if (!range_is_allowed(desc->pgoff, size))
return -EPERM;
- if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size,
- &vma->vm_page_prot))
+ if (!phys_mem_access_prot_allowed(file, desc->pgoff, size,
+ &desc->page_prot))
return -EINVAL;
- vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
- size,
- vma->vm_page_prot);
+ desc->page_prot = phys_mem_access_prot(file, desc->pgoff,
+ size,
+ desc->page_prot);
- vma->vm_ops = &mmap_mem_ops;
+ desc->vm_ops = &mmap_mem_ops;
+
+ /* Remap-pfn-range will mark the range VM_IO. */
+ mmap_action_remap_full(desc, desc->pgoff);
+ /* We filter remap errors to -EAGAIN. */
+ desc->action.error_hook = mmap_filter_error;
- /* Remap-pfn-range will mark the range VM_IO */
- if (remap_pfn_range(vma,
- vma->vm_start,
- vma->vm_pgoff,
- size,
- vma->vm_page_prot)) {
- return -EAGAIN;
- }
return 0;
}
@@ -501,14 +504,26 @@ static ssize_t read_zero(struct file *file, char __user *buf,
return cleared;
}
-static int mmap_zero(struct file *file, struct vm_area_struct *vma)
+static int mmap_zero_private_success(const struct vm_area_struct *vma)
+{
+ /*
+ * This is a highly unique situation where we mark a MAP_PRIVATE mapping
+ * of /dev/zero anonymous, despite it not being.
+ */
+ vma_set_anonymous((struct vm_area_struct *)vma);
+
+ return 0;
+}
+
+static int mmap_zero_prepare(struct vm_area_desc *desc)
{
#ifndef CONFIG_MMU
return -ENOSYS;
#endif
- if (vma->vm_flags & VM_SHARED)
- return shmem_zero_setup(vma);
- vma_set_anonymous(vma);
+ if (desc->vm_flags & VM_SHARED)
+ return shmem_zero_setup_desc(desc);
+
+ desc->action.success_hook = mmap_zero_private_success;
return 0;
}
@@ -526,10 +541,11 @@ static unsigned long get_unmapped_area_zero(struct file *file,
{
if (flags & MAP_SHARED) {
/*
- * mmap_zero() will call shmem_zero_setup() to create a file,
- * so use shmem's get_unmapped_area in case it can be huge;
- * and pass NULL for file as in mmap.c's get_unmapped_area(),
- * so as not to confuse shmem with our handle on "/dev/zero".
+ * mmap_zero_prepare() will call shmem_zero_setup() to create a
+ * file, so use shmem's get_unmapped_area in case it can be
+ * huge; and pass NULL for file as in mmap.c's
+ * get_unmapped_area(), so as not to confuse shmem with our
+ * handle on "/dev/zero".
*/
return shmem_get_unmapped_area(NULL, addr, len, pgoff, flags);
}
@@ -542,7 +558,7 @@ static unsigned long get_unmapped_area_zero(struct file *file,
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
return thp_get_unmapped_area(file, addr, len, pgoff, flags);
#else
- return mm_get_unmapped_area(current->mm, file, addr, len, pgoff, flags);
+ return mm_get_unmapped_area(file, addr, len, pgoff, flags);
#endif
}
#endif /* CONFIG_MMU */
@@ -632,7 +648,7 @@ static const struct file_operations __maybe_unused mem_fops = {
.llseek = memory_lseek,
.read = read_mem,
.write = write_mem,
- .mmap = mmap_mem,
+ .mmap_prepare = mmap_mem_prepare,
.open = open_mem,
#ifndef CONFIG_MMU
.get_unmapped_area = get_unmapped_area_mem,
@@ -668,7 +684,7 @@ static const struct file_operations zero_fops = {
.write_iter = write_iter_zero,
.splice_read = copy_splice_read,
.splice_write = splice_write_zero,
- .mmap = mmap_zero,
+ .mmap_prepare = mmap_zero_prepare,
.get_unmapped_area = get_unmapped_area_zero,
#ifndef CONFIG_MMU
.mmap_capabilities = zero_mmap_capabilities,
diff --git a/drivers/dax/device.c b/drivers/dax/device.c
index 2bb40a6060af..22999a402e02 100644
--- a/drivers/dax/device.c
+++ b/drivers/dax/device.c
@@ -13,8 +13,9 @@
#include "dax-private.h"
#include "bus.h"
-static int check_vma(struct dev_dax *dev_dax, struct vm_area_struct *vma,
- const char *func)
+static int __check_vma(struct dev_dax *dev_dax, vm_flags_t vm_flags,
+ unsigned long start, unsigned long end, struct file *file,
+ const char *func)
{
struct device *dev = &dev_dax->dev;
unsigned long mask;
@@ -23,7 +24,7 @@ static int check_vma(struct dev_dax *dev_dax, struct vm_area_struct *vma,
return -ENXIO;
/* prevent private mappings from being established */
- if ((vma->vm_flags & VM_MAYSHARE) != VM_MAYSHARE) {
+ if ((vm_flags & VM_MAYSHARE) != VM_MAYSHARE) {
dev_info_ratelimited(dev,
"%s: %s: fail, attempted private mapping\n",
current->comm, func);
@@ -31,15 +32,15 @@ static int check_vma(struct dev_dax *dev_dax, struct vm_area_struct *vma,
}
mask = dev_dax->align - 1;
- if (vma->vm_start & mask || vma->vm_end & mask) {
+ if (start & mask || end & mask) {
dev_info_ratelimited(dev,
"%s: %s: fail, unaligned vma (%#lx - %#lx, %#lx)\n",
- current->comm, func, vma->vm_start, vma->vm_end,
+ current->comm, func, start, end,
mask);
return -EINVAL;
}
- if (!vma_is_dax(vma)) {
+ if (!file_is_dax(file)) {
dev_info_ratelimited(dev,
"%s: %s: fail, vma is not DAX capable\n",
current->comm, func);
@@ -49,6 +50,13 @@ static int check_vma(struct dev_dax *dev_dax, struct vm_area_struct *vma,
return 0;
}
+static int check_vma(struct dev_dax *dev_dax, struct vm_area_struct *vma,
+ const char *func)
+{
+ return __check_vma(dev_dax, vma->vm_flags, vma->vm_start, vma->vm_end,
+ vma->vm_file, func);
+}
+
/* see "strong" declaration in tools/testing/nvdimm/dax-dev.c */
__weak phys_addr_t dax_pgoff_to_phys(struct dev_dax *dev_dax, pgoff_t pgoff,
unsigned long size)
@@ -285,8 +293,9 @@ static const struct vm_operations_struct dax_vm_ops = {
.pagesize = dev_dax_pagesize,
};
-static int dax_mmap(struct file *filp, struct vm_area_struct *vma)
+static int dax_mmap_prepare(struct vm_area_desc *desc)
{
+ struct file *filp = desc->file;
struct dev_dax *dev_dax = filp->private_data;
int rc, id;
@@ -297,13 +306,14 @@ static int dax_mmap(struct file *filp, struct vm_area_struct *vma)
* fault time.
*/
id = dax_read_lock();
- rc = check_vma(dev_dax, vma, __func__);
+ rc = __check_vma(dev_dax, desc->vm_flags, desc->start, desc->end, filp,
+ __func__);
dax_read_unlock(id);
if (rc)
return rc;
- vma->vm_ops = &dax_vm_ops;
- vm_flags_set(vma, VM_HUGEPAGE);
+ desc->vm_ops = &dax_vm_ops;
+ desc->vm_flags |= VM_HUGEPAGE;
return 0;
}
@@ -330,14 +340,13 @@ static unsigned long dax_get_unmapped_area(struct file *filp,
if ((off + len_align) < off)
goto out;
- addr_align = mm_get_unmapped_area(current->mm, filp, addr, len_align,
- pgoff, flags);
+ addr_align = mm_get_unmapped_area(filp, addr, len_align, pgoff, flags);
if (!IS_ERR_VALUE(addr_align)) {
addr_align += (off - addr_align) & (align - 1);
return addr_align;
}
out:
- return mm_get_unmapped_area(current->mm, filp, addr, len, pgoff, flags);
+ return mm_get_unmapped_area(filp, addr, len, pgoff, flags);
}
static const struct address_space_operations dev_dax_aops = {
@@ -377,7 +386,7 @@ static const struct file_operations dax_fops = {
.open = dax_open,
.release = dax_release,
.get_unmapped_area = dax_get_unmapped_area,
- .mmap = dax_mmap,
+ .mmap_prepare = dax_mmap_prepare,
.fop_flags = FOP_MMAP_SYNC,
};
diff --git a/drivers/firmware/efi/arm-runtime.c b/drivers/firmware/efi/arm-runtime.c
index 83092d93f36a..53a5336cde5a 100644
--- a/drivers/firmware/efi/arm-runtime.c
+++ b/drivers/firmware/efi/arm-runtime.c
@@ -12,18 +12,18 @@
#include <linux/io.h>
#include <linux/memblock.h>
#include <linux/mm_types.h>
+#include <linux/pgalloc.h>
+#include <linux/pgtable.h>
#include <linux/preempt.h>
#include <linux/rbtree.h>
#include <linux/rwsem.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
-#include <linux/pgtable.h>
#include <asm/cacheflush.h>
#include <asm/efi.h>
#include <asm/mmu.h>
-#include <asm/pgalloc.h>
#if defined(CONFIG_PTDUMP_DEBUGFS) || defined(CONFIG_ARM_PTDUMP_DEBUGFS)
#include <asm/ptdump.h>
diff --git a/drivers/firmware/efi/riscv-runtime.c b/drivers/firmware/efi/riscv-runtime.c
index 4a2588358be2..66f584a228d0 100644
--- a/drivers/firmware/efi/riscv-runtime.c
+++ b/drivers/firmware/efi/riscv-runtime.c
@@ -14,18 +14,18 @@
#include <linux/io.h>
#include <linux/memblock.h>
#include <linux/mm_types.h>
+#include <linux/pgalloc.h>
+#include <linux/pgtable.h>
#include <linux/preempt.h>
#include <linux/rbtree.h>
#include <linux/rwsem.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
-#include <linux/pgtable.h>
#include <asm/cacheflush.h>
#include <asm/efi.h>
#include <asm/mmu.h>
-#include <asm/pgalloc.h>
static bool __init efi_virtmap_init(void)
{
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
index 46c84fc60af1..af53e796ea1b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
@@ -217,7 +217,7 @@ svm_migrate_get_vram_page(struct svm_range *prange, unsigned long pfn)
page = pfn_to_page(pfn);
svm_range_bo_ref(prange->svm_bo);
page->zone_device_data = prange->svm_bo;
- zone_device_page_init(page);
+ zone_device_page_init(page, 0);
}
static void
@@ -567,8 +567,9 @@ out:
return r < 0 ? r : 0;
}
-static void svm_migrate_page_free(struct page *page)
+static void svm_migrate_folio_free(struct folio *folio)
{
+ struct page *page = &folio->page;
struct svm_range_bo *svm_bo = page->zone_device_data;
if (svm_bo) {
@@ -1008,7 +1009,7 @@ out_mmput:
}
static const struct dev_pagemap_ops svm_migrate_pgmap_ops = {
- .page_free = svm_migrate_page_free,
+ .folio_free = svm_migrate_folio_free,
.migrate_to_ram = svm_migrate_to_ram,
};
diff --git a/drivers/gpu/drm/drm_pagemap.c b/drivers/gpu/drm/drm_pagemap.c
index 22c44807e3fe..37d7cfbbb3e8 100644
--- a/drivers/gpu/drm/drm_pagemap.c
+++ b/drivers/gpu/drm/drm_pagemap.c
@@ -196,7 +196,7 @@ static void drm_pagemap_get_devmem_page(struct page *page,
struct drm_pagemap_zdd *zdd)
{
page->zone_device_data = drm_pagemap_zdd_get(zdd);
- zone_device_page_init(page);
+ zone_device_page_init(page, 0);
}
/**
@@ -752,15 +752,15 @@ err_out:
}
/**
- * drm_pagemap_page_free() - Put GPU SVM zone device data associated with a page
- * @page: Pointer to the page
+ * drm_pagemap_folio_free() - Put GPU SVM zone device data associated with a folio
+ * @folio: Pointer to the folio
*
* This function is a callback used to put the GPU SVM zone device data
* associated with a page when it is being released.
*/
-static void drm_pagemap_page_free(struct page *page)
+static void drm_pagemap_folio_free(struct folio *folio)
{
- drm_pagemap_zdd_put(page->zone_device_data);
+ drm_pagemap_zdd_put(folio->page.zone_device_data);
}
/**
@@ -788,7 +788,7 @@ static vm_fault_t drm_pagemap_migrate_to_ram(struct vm_fault *vmf)
}
static const struct dev_pagemap_ops drm_pagemap_pagemap_ops = {
- .page_free = drm_pagemap_page_free,
+ .folio_free = drm_pagemap_folio_free,
.migrate_to_ram = drm_pagemap_migrate_to_ram,
};
diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c
index ca4932a150e3..58071652679d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
@@ -50,6 +50,7 @@
*/
#define DMEM_CHUNK_SIZE (2UL << 20)
#define DMEM_CHUNK_NPAGES (DMEM_CHUNK_SIZE >> PAGE_SHIFT)
+#define NR_CHUNKS (128)
enum nouveau_aper {
NOUVEAU_APER_VIRT,
@@ -83,9 +84,15 @@ struct nouveau_dmem {
struct list_head chunks;
struct mutex mutex;
struct page *free_pages;
+ struct folio *free_folios;
spinlock_t lock;
};
+struct nouveau_dmem_dma_info {
+ dma_addr_t dma_addr;
+ size_t size;
+};
+
static struct nouveau_dmem_chunk *nouveau_page_to_chunk(struct page *page)
{
return container_of(page_pgmap(page), struct nouveau_dmem_chunk,
@@ -108,14 +115,20 @@ unsigned long nouveau_dmem_page_addr(struct page *page)
return chunk->bo->offset + off;
}
-static void nouveau_dmem_page_free(struct page *page)
+static void nouveau_dmem_folio_free(struct folio *folio)
{
+ struct page *page = &folio->page;
struct nouveau_dmem_chunk *chunk = nouveau_page_to_chunk(page);
struct nouveau_dmem *dmem = chunk->drm->dmem;
spin_lock(&dmem->lock);
- page->zone_device_data = dmem->free_pages;
- dmem->free_pages = page;
+ if (folio_order(folio)) {
+ page->zone_device_data = dmem->free_folios;
+ dmem->free_folios = folio;
+ } else {
+ page->zone_device_data = dmem->free_pages;
+ dmem->free_pages = page;
+ }
WARN_ON(!chunk->callocated);
chunk->callocated--;
@@ -139,20 +152,28 @@ static void nouveau_dmem_fence_done(struct nouveau_fence **fence)
}
}
-static int nouveau_dmem_copy_one(struct nouveau_drm *drm, struct page *spage,
- struct page *dpage, dma_addr_t *dma_addr)
+static int nouveau_dmem_copy_folio(struct nouveau_drm *drm,
+ struct folio *sfolio, struct folio *dfolio,
+ struct nouveau_dmem_dma_info *dma_info)
{
struct device *dev = drm->dev->dev;
+ struct page *dpage = folio_page(dfolio, 0);
+ struct page *spage = folio_page(sfolio, 0);
- lock_page(dpage);
+ folio_lock(dfolio);
- *dma_addr = dma_map_page(dev, dpage, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
- if (dma_mapping_error(dev, *dma_addr))
+ dma_info->dma_addr = dma_map_page(dev, dpage, 0, page_size(dpage),
+ DMA_BIDIRECTIONAL);
+ dma_info->size = page_size(dpage);
+ if (dma_mapping_error(dev, dma_info->dma_addr))
return -EIO;
- if (drm->dmem->migrate.copy_func(drm, 1, NOUVEAU_APER_HOST, *dma_addr,
- NOUVEAU_APER_VRAM, nouveau_dmem_page_addr(spage))) {
- dma_unmap_page(dev, *dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
+ if (drm->dmem->migrate.copy_func(drm, folio_nr_pages(sfolio),
+ NOUVEAU_APER_HOST, dma_info->dma_addr,
+ NOUVEAU_APER_VRAM,
+ nouveau_dmem_page_addr(spage))) {
+ dma_unmap_page(dev, dma_info->dma_addr, page_size(dpage),
+ DMA_BIDIRECTIONAL);
return -EIO;
}
@@ -165,21 +186,48 @@ static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf)
struct nouveau_dmem *dmem = drm->dmem;
struct nouveau_fence *fence;
struct nouveau_svmm *svmm;
- struct page *spage, *dpage;
- unsigned long src = 0, dst = 0;
- dma_addr_t dma_addr = 0;
+ struct page *dpage;
vm_fault_t ret = 0;
+ int err;
struct migrate_vma args = {
.vma = vmf->vma,
- .start = vmf->address,
- .end = vmf->address + PAGE_SIZE,
- .src = &src,
- .dst = &dst,
.pgmap_owner = drm->dev,
.fault_page = vmf->page,
- .flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE,
+ .flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE |
+ MIGRATE_VMA_SELECT_COMPOUND,
+ .src = NULL,
+ .dst = NULL,
};
+ unsigned int order, nr;
+ struct folio *sfolio, *dfolio;
+ struct nouveau_dmem_dma_info dma_info;
+
+ sfolio = page_folio(vmf->page);
+ order = folio_order(sfolio);
+ nr = 1 << order;
+
+ /*
+ * Handle partial unmap faults, where the folio is large, but
+ * the pmd is split.
+ */
+ if (vmf->pte) {
+ order = 0;
+ nr = 1;
+ }
+
+ if (order)
+ args.flags |= MIGRATE_VMA_SELECT_COMPOUND;
+ args.start = ALIGN_DOWN(vmf->address, (PAGE_SIZE << order));
+ args.vma = vmf->vma;
+ args.end = args.start + (PAGE_SIZE << order);
+ args.src = kcalloc(nr, sizeof(*args.src), GFP_KERNEL);
+ args.dst = kcalloc(nr, sizeof(*args.dst), GFP_KERNEL);
+
+ if (!args.src || !args.dst) {
+ ret = VM_FAULT_OOM;
+ goto err;
+ }
/*
* FIXME what we really want is to find some heuristic to migrate more
* than just one page on CPU fault. When such fault happens it is very
@@ -190,22 +238,28 @@ static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf)
if (!args.cpages)
return 0;
- spage = migrate_pfn_to_page(src);
- if (!spage || !(src & MIGRATE_PFN_MIGRATE))
- goto done;
-
- dpage = alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vmf->vma, vmf->address);
- if (!dpage)
+ if (order)
+ dpage = folio_page(vma_alloc_folio(GFP_HIGHUSER | __GFP_ZERO,
+ order, vmf->vma, vmf->address), 0);
+ else
+ dpage = alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vmf->vma,
+ vmf->address);
+ if (!dpage) {
+ ret = VM_FAULT_OOM;
goto done;
+ }
- dst = migrate_pfn(page_to_pfn(dpage));
+ args.dst[0] = migrate_pfn(page_to_pfn(dpage));
+ if (order)
+ args.dst[0] |= MIGRATE_PFN_COMPOUND;
+ dfolio = page_folio(dpage);
- svmm = spage->zone_device_data;
+ svmm = folio_zone_device_data(sfolio);
mutex_lock(&svmm->mutex);
nouveau_svmm_invalidate(svmm, args.start, args.end);
- ret = nouveau_dmem_copy_one(drm, spage, dpage, &dma_addr);
+ err = nouveau_dmem_copy_folio(drm, sfolio, dfolio, &dma_info);
mutex_unlock(&svmm->mutex);
- if (ret) {
+ if (err) {
ret = VM_FAULT_SIGBUS;
goto done;
}
@@ -213,25 +267,40 @@ static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf)
nouveau_fence_new(&fence, dmem->migrate.chan);
migrate_vma_pages(&args);
nouveau_dmem_fence_done(&fence);
- dma_unmap_page(drm->dev->dev, dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
+ dma_unmap_page(drm->dev->dev, dma_info.dma_addr, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
done:
migrate_vma_finalize(&args);
+err:
+ kfree(args.src);
+ kfree(args.dst);
return ret;
}
+static void nouveau_dmem_folio_split(struct folio *head, struct folio *tail)
+{
+ if (tail == NULL)
+ return;
+ tail->pgmap = head->pgmap;
+ tail->mapping = head->mapping;
+ folio_set_zone_device_data(tail, folio_zone_device_data(head));
+}
+
static const struct dev_pagemap_ops nouveau_dmem_pagemap_ops = {
- .page_free = nouveau_dmem_page_free,
+ .folio_free = nouveau_dmem_folio_free,
.migrate_to_ram = nouveau_dmem_migrate_to_ram,
+ .folio_split = nouveau_dmem_folio_split,
};
static int
-nouveau_dmem_chunk_alloc(struct nouveau_drm *drm, struct page **ppage)
+nouveau_dmem_chunk_alloc(struct nouveau_drm *drm, struct page **ppage,
+ bool is_large)
{
struct nouveau_dmem_chunk *chunk;
struct resource *res;
struct page *page;
void *ptr;
- unsigned long i, pfn_first;
+ unsigned long i, pfn_first, pfn;
int ret;
chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
@@ -241,7 +310,7 @@ nouveau_dmem_chunk_alloc(struct nouveau_drm *drm, struct page **ppage)
}
/* Allocate unused physical address space for device private pages. */
- res = request_free_mem_region(&iomem_resource, DMEM_CHUNK_SIZE,
+ res = request_free_mem_region(&iomem_resource, DMEM_CHUNK_SIZE * NR_CHUNKS,
"nouveau_dmem");
if (IS_ERR(res)) {
ret = PTR_ERR(res);
@@ -274,16 +343,40 @@ nouveau_dmem_chunk_alloc(struct nouveau_drm *drm, struct page **ppage)
pfn_first = chunk->pagemap.range.start >> PAGE_SHIFT;
page = pfn_to_page(pfn_first);
spin_lock(&drm->dmem->lock);
- for (i = 0; i < DMEM_CHUNK_NPAGES - 1; ++i, ++page) {
- page->zone_device_data = drm->dmem->free_pages;
- drm->dmem->free_pages = page;
+
+ pfn = pfn_first;
+ for (i = 0; i < NR_CHUNKS; i++) {
+ int j;
+
+ if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) || !is_large) {
+ for (j = 0; j < DMEM_CHUNK_NPAGES - 1; j++, pfn++) {
+ page = pfn_to_page(pfn);
+ page->zone_device_data = drm->dmem->free_pages;
+ drm->dmem->free_pages = page;
+ }
+ } else {
+ page = pfn_to_page(pfn);
+ page->zone_device_data = drm->dmem->free_folios;
+ drm->dmem->free_folios = page_folio(page);
+ pfn += DMEM_CHUNK_NPAGES;
+ }
+ }
+
+ /* Move to next page */
+ if (is_large) {
+ *ppage = &drm->dmem->free_folios->page;
+ drm->dmem->free_folios = (*ppage)->zone_device_data;
+ } else {
+ *ppage = drm->dmem->free_pages;
+ drm->dmem->free_pages = (*ppage)->zone_device_data;
}
- *ppage = page;
+
chunk->callocated++;
spin_unlock(&drm->dmem->lock);
- NV_INFO(drm, "DMEM: registered %ldMB of device memory\n",
- DMEM_CHUNK_SIZE >> 20);
+ NV_INFO(drm, "DMEM: registered %ldMB of %sdevice memory %lx %lx\n",
+ NR_CHUNKS * DMEM_CHUNK_SIZE >> 20, is_large ? "THP " : "", pfn_first,
+ nouveau_dmem_page_addr(page));
return 0;
@@ -298,27 +391,41 @@ out:
}
static struct page *
-nouveau_dmem_page_alloc_locked(struct nouveau_drm *drm)
+nouveau_dmem_page_alloc_locked(struct nouveau_drm *drm, bool is_large)
{
struct nouveau_dmem_chunk *chunk;
struct page *page = NULL;
+ struct folio *folio = NULL;
int ret;
+ unsigned int order = 0;
spin_lock(&drm->dmem->lock);
- if (drm->dmem->free_pages) {
+ if (is_large && drm->dmem->free_folios) {
+ folio = drm->dmem->free_folios;
+ page = &folio->page;
+ drm->dmem->free_folios = page->zone_device_data;
+ chunk = nouveau_page_to_chunk(&folio->page);
+ chunk->callocated++;
+ spin_unlock(&drm->dmem->lock);
+ order = ilog2(DMEM_CHUNK_NPAGES);
+ } else if (!is_large && drm->dmem->free_pages) {
page = drm->dmem->free_pages;
drm->dmem->free_pages = page->zone_device_data;
chunk = nouveau_page_to_chunk(page);
chunk->callocated++;
spin_unlock(&drm->dmem->lock);
+ folio = page_folio(page);
} else {
spin_unlock(&drm->dmem->lock);
- ret = nouveau_dmem_chunk_alloc(drm, &page);
+ ret = nouveau_dmem_chunk_alloc(drm, &page, is_large);
if (ret)
return NULL;
+ folio = page_folio(page);
+ if (is_large)
+ order = ilog2(DMEM_CHUNK_NPAGES);
}
- zone_device_page_init(page);
+ zone_device_folio_init(folio, order);
return page;
}
@@ -369,12 +476,12 @@ nouveau_dmem_evict_chunk(struct nouveau_dmem_chunk *chunk)
{
unsigned long i, npages = range_len(&chunk->pagemap.range) >> PAGE_SHIFT;
unsigned long *src_pfns, *dst_pfns;
- dma_addr_t *dma_addrs;
+ struct nouveau_dmem_dma_info *dma_info;
struct nouveau_fence *fence;
src_pfns = kvcalloc(npages, sizeof(*src_pfns), GFP_KERNEL | __GFP_NOFAIL);
dst_pfns = kvcalloc(npages, sizeof(*dst_pfns), GFP_KERNEL | __GFP_NOFAIL);
- dma_addrs = kvcalloc(npages, sizeof(*dma_addrs), GFP_KERNEL | __GFP_NOFAIL);
+ dma_info = kvcalloc(npages, sizeof(*dma_info), GFP_KERNEL | __GFP_NOFAIL);
migrate_device_range(src_pfns, chunk->pagemap.range.start >> PAGE_SHIFT,
npages);
@@ -382,17 +489,28 @@ nouveau_dmem_evict_chunk(struct nouveau_dmem_chunk *chunk)
for (i = 0; i < npages; i++) {
if (src_pfns[i] & MIGRATE_PFN_MIGRATE) {
struct page *dpage;
+ struct folio *folio = page_folio(
+ migrate_pfn_to_page(src_pfns[i]));
+ unsigned int order = folio_order(folio);
+
+ if (src_pfns[i] & MIGRATE_PFN_COMPOUND) {
+ dpage = folio_page(
+ folio_alloc(
+ GFP_HIGHUSER_MOVABLE, order), 0);
+ } else {
+ /*
+ * _GFP_NOFAIL because the GPU is going away and there
+ * is nothing sensible we can do if we can't copy the
+ * data back.
+ */
+ dpage = alloc_page(GFP_HIGHUSER | __GFP_NOFAIL);
+ }
- /*
- * _GFP_NOFAIL because the GPU is going away and there
- * is nothing sensible we can do if we can't copy the
- * data back.
- */
- dpage = alloc_page(GFP_HIGHUSER | __GFP_NOFAIL);
dst_pfns[i] = migrate_pfn(page_to_pfn(dpage));
- nouveau_dmem_copy_one(chunk->drm,
- migrate_pfn_to_page(src_pfns[i]), dpage,
- &dma_addrs[i]);
+ nouveau_dmem_copy_folio(chunk->drm,
+ page_folio(migrate_pfn_to_page(src_pfns[i])),
+ page_folio(dpage),
+ &dma_info[i]);
}
}
@@ -403,8 +521,9 @@ nouveau_dmem_evict_chunk(struct nouveau_dmem_chunk *chunk)
kvfree(src_pfns);
kvfree(dst_pfns);
for (i = 0; i < npages; i++)
- dma_unmap_page(chunk->drm->dev->dev, dma_addrs[i], PAGE_SIZE, DMA_BIDIRECTIONAL);
- kvfree(dma_addrs);
+ dma_unmap_page(chunk->drm->dev->dev, dma_info[i].dma_addr,
+ dma_info[i].size, DMA_BIDIRECTIONAL);
+ kvfree(dma_info);
}
void
@@ -607,31 +726,36 @@ nouveau_dmem_init(struct nouveau_drm *drm)
static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm,
struct nouveau_svmm *svmm, unsigned long src,
- dma_addr_t *dma_addr, u64 *pfn)
+ struct nouveau_dmem_dma_info *dma_info, u64 *pfn)
{
struct device *dev = drm->dev->dev;
struct page *dpage, *spage;
unsigned long paddr;
+ bool is_large = false;
+ unsigned long mpfn;
spage = migrate_pfn_to_page(src);
if (!(src & MIGRATE_PFN_MIGRATE))
goto out;
- dpage = nouveau_dmem_page_alloc_locked(drm);
+ is_large = src & MIGRATE_PFN_COMPOUND;
+ dpage = nouveau_dmem_page_alloc_locked(drm, is_large);
if (!dpage)
goto out;
paddr = nouveau_dmem_page_addr(dpage);
if (spage) {
- *dma_addr = dma_map_page(dev, spage, 0, page_size(spage),
+ dma_info->dma_addr = dma_map_page(dev, spage, 0, page_size(spage),
DMA_BIDIRECTIONAL);
- if (dma_mapping_error(dev, *dma_addr))
+ dma_info->size = page_size(spage);
+ if (dma_mapping_error(dev, dma_info->dma_addr))
goto out_free_page;
- if (drm->dmem->migrate.copy_func(drm, 1,
- NOUVEAU_APER_VRAM, paddr, NOUVEAU_APER_HOST, *dma_addr))
+ if (drm->dmem->migrate.copy_func(drm, folio_nr_pages(page_folio(spage)),
+ NOUVEAU_APER_VRAM, paddr, NOUVEAU_APER_HOST,
+ dma_info->dma_addr))
goto out_dma_unmap;
} else {
- *dma_addr = DMA_MAPPING_ERROR;
+ dma_info->dma_addr = DMA_MAPPING_ERROR;
if (drm->dmem->migrate.clear_func(drm, page_size(dpage),
NOUVEAU_APER_VRAM, paddr))
goto out_free_page;
@@ -642,10 +766,13 @@ static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm,
((paddr >> PAGE_SHIFT) << NVIF_VMM_PFNMAP_V0_ADDR_SHIFT);
if (src & MIGRATE_PFN_WRITE)
*pfn |= NVIF_VMM_PFNMAP_V0_W;
- return migrate_pfn(page_to_pfn(dpage));
+ mpfn = migrate_pfn(page_to_pfn(dpage));
+ if (folio_order(page_folio(dpage)))
+ mpfn |= MIGRATE_PFN_COMPOUND;
+ return mpfn;
out_dma_unmap:
- dma_unmap_page(dev, *dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
+ dma_unmap_page(dev, dma_info->dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
out_free_page:
nouveau_dmem_page_free_locked(drm, dpage);
out:
@@ -655,27 +782,38 @@ out:
static void nouveau_dmem_migrate_chunk(struct nouveau_drm *drm,
struct nouveau_svmm *svmm, struct migrate_vma *args,
- dma_addr_t *dma_addrs, u64 *pfns)
+ struct nouveau_dmem_dma_info *dma_info, u64 *pfns)
{
struct nouveau_fence *fence;
unsigned long addr = args->start, nr_dma = 0, i;
+ unsigned long order = 0;
+
+ for (i = 0; addr < args->end; ) {
+ struct folio *folio;
- for (i = 0; addr < args->end; i++) {
args->dst[i] = nouveau_dmem_migrate_copy_one(drm, svmm,
- args->src[i], dma_addrs + nr_dma, pfns + i);
- if (!dma_mapping_error(drm->dev->dev, dma_addrs[nr_dma]))
+ args->src[i], dma_info + nr_dma, pfns + i);
+ if (!args->dst[i]) {
+ i++;
+ addr += PAGE_SIZE;
+ continue;
+ }
+ if (!dma_mapping_error(drm->dev->dev, dma_info[nr_dma].dma_addr))
nr_dma++;
- addr += PAGE_SIZE;
+ folio = page_folio(migrate_pfn_to_page(args->dst[i]));
+ order = folio_order(folio);
+ i += 1 << order;
+ addr += (1 << order) * PAGE_SIZE;
}
nouveau_fence_new(&fence, drm->dmem->migrate.chan);
migrate_vma_pages(args);
nouveau_dmem_fence_done(&fence);
- nouveau_pfns_map(svmm, args->vma->vm_mm, args->start, pfns, i);
+ nouveau_pfns_map(svmm, args->vma->vm_mm, args->start, pfns, i, order);
while (nr_dma--) {
- dma_unmap_page(drm->dev->dev, dma_addrs[nr_dma], PAGE_SIZE,
- DMA_BIDIRECTIONAL);
+ dma_unmap_page(drm->dev->dev, dma_info[nr_dma].dma_addr,
+ dma_info[nr_dma].size, DMA_BIDIRECTIONAL);
}
migrate_vma_finalize(args);
}
@@ -688,20 +826,27 @@ nouveau_dmem_migrate_vma(struct nouveau_drm *drm,
unsigned long end)
{
unsigned long npages = (end - start) >> PAGE_SHIFT;
- unsigned long max = min(SG_MAX_SINGLE_ALLOC, npages);
- dma_addr_t *dma_addrs;
+ unsigned long max = npages;
struct migrate_vma args = {
.vma = vma,
.start = start,
.pgmap_owner = drm->dev,
- .flags = MIGRATE_VMA_SELECT_SYSTEM,
+ .flags = MIGRATE_VMA_SELECT_SYSTEM
+ | MIGRATE_VMA_SELECT_COMPOUND,
};
unsigned long i;
u64 *pfns;
int ret = -ENOMEM;
+ struct nouveau_dmem_dma_info *dma_info;
- if (drm->dmem == NULL)
- return -ENODEV;
+ if (drm->dmem == NULL) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
+ if (max > (unsigned long)HPAGE_PMD_NR)
+ max = (unsigned long)HPAGE_PMD_NR;
args.src = kcalloc(max, sizeof(*args.src), GFP_KERNEL);
if (!args.src)
@@ -710,8 +855,8 @@ nouveau_dmem_migrate_vma(struct nouveau_drm *drm,
if (!args.dst)
goto out_free_src;
- dma_addrs = kmalloc_array(max, sizeof(*dma_addrs), GFP_KERNEL);
- if (!dma_addrs)
+ dma_info = kmalloc_array(max, sizeof(*dma_info), GFP_KERNEL);
+ if (!dma_info)
goto out_free_dst;
pfns = nouveau_pfns_alloc(max);
@@ -729,7 +874,7 @@ nouveau_dmem_migrate_vma(struct nouveau_drm *drm,
goto out_free_pfns;
if (args.cpages)
- nouveau_dmem_migrate_chunk(drm, svmm, &args, dma_addrs,
+ nouveau_dmem_migrate_chunk(drm, svmm, &args, dma_info,
pfns);
args.start = args.end;
}
@@ -738,7 +883,7 @@ nouveau_dmem_migrate_vma(struct nouveau_drm *drm,
out_free_pfns:
nouveau_pfns_free(pfns);
out_free_dma:
- kfree(dma_addrs);
+ kfree(dma_info);
out_free_dst:
kfree(args.dst);
out_free_src:
diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c
index 6fa387da0637..b8a3378154d5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_svm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_svm.c
@@ -921,12 +921,14 @@ nouveau_pfns_free(u64 *pfns)
void
nouveau_pfns_map(struct nouveau_svmm *svmm, struct mm_struct *mm,
- unsigned long addr, u64 *pfns, unsigned long npages)
+ unsigned long addr, u64 *pfns, unsigned long npages,
+ unsigned int page_shift)
{
struct nouveau_pfnmap_args *args = nouveau_pfns_to_args(pfns);
args->p.addr = addr;
- args->p.size = npages << PAGE_SHIFT;
+ args->p.size = npages << page_shift;
+ args->p.page = page_shift;
mutex_lock(&svmm->mutex);
diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.h b/drivers/gpu/drm/nouveau/nouveau_svm.h
index e7d63d7f0c2d..3fd78662f17e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_svm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_svm.h
@@ -33,7 +33,8 @@ void nouveau_svmm_invalidate(struct nouveau_svmm *svmm, u64 start, u64 limit);
u64 *nouveau_pfns_alloc(unsigned long npages);
void nouveau_pfns_free(u64 *pfns);
void nouveau_pfns_map(struct nouveau_svmm *svmm, struct mm_struct *mm,
- unsigned long addr, u64 *pfns, unsigned long npages);
+ unsigned long addr, u64 *pfns, unsigned long npages,
+ unsigned int page_shift);
#else /* IS_ENABLED(CONFIG_DRM_NOUVEAU_SVM) */
static inline void nouveau_svm_init(struct nouveau_drm *drm) {}
static inline void nouveau_svm_fini(struct nouveau_drm *drm) {}
diff --git a/drivers/iommu/iommu-sva.c b/drivers/iommu/iommu-sva.c
index 1a51cfd82808..d236aef80a8d 100644
--- a/drivers/iommu/iommu-sva.c
+++ b/drivers/iommu/iommu-sva.c
@@ -10,6 +10,8 @@
#include "iommu-priv.h"
static DEFINE_MUTEX(iommu_sva_lock);
+static bool iommu_sva_present;
+static LIST_HEAD(iommu_sva_mms);
static struct iommu_domain *iommu_sva_domain_alloc(struct device *dev,
struct mm_struct *mm);
@@ -42,6 +44,7 @@ static struct iommu_mm_data *iommu_alloc_mm_data(struct mm_struct *mm, struct de
return ERR_PTR(-ENOSPC);
}
iommu_mm->pasid = pasid;
+ iommu_mm->mm = mm;
INIT_LIST_HEAD(&iommu_mm->sva_domains);
/*
* Make sure the write to mm->iommu_mm is not reordered in front of
@@ -132,8 +135,13 @@ struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm
if (ret)
goto out_free_domain;
domain->users = 1;
- list_add(&domain->next, &mm->iommu_mm->sva_domains);
+ if (list_empty(&iommu_mm->sva_domains)) {
+ if (list_empty(&iommu_sva_mms))
+ iommu_sva_present = true;
+ list_add(&iommu_mm->mm_list_elm, &iommu_sva_mms);
+ }
+ list_add(&domain->next, &iommu_mm->sva_domains);
out:
refcount_set(&handle->users, 1);
mutex_unlock(&iommu_sva_lock);
@@ -175,6 +183,13 @@ void iommu_sva_unbind_device(struct iommu_sva *handle)
list_del(&domain->next);
iommu_domain_free(domain);
}
+
+ if (list_empty(&iommu_mm->sva_domains)) {
+ list_del(&iommu_mm->mm_list_elm);
+ if (list_empty(&iommu_sva_mms))
+ iommu_sva_present = false;
+ }
+
mutex_unlock(&iommu_sva_lock);
kfree(handle);
}
@@ -312,3 +327,15 @@ static struct iommu_domain *iommu_sva_domain_alloc(struct device *dev,
return domain;
}
+
+void iommu_sva_invalidate_kva_range(unsigned long start, unsigned long end)
+{
+ struct iommu_mm_data *iommu_mm;
+
+ guard(mutex)(&iommu_sva_lock);
+ if (!iommu_sva_present)
+ return;
+
+ list_for_each_entry(iommu_mm, &iommu_sva_mms, mm_list_elm)
+ mmu_notifier_arch_invalidate_secondary_tlbs(iommu_mm->mm, start, end);
+}
diff --git a/drivers/pci/p2pdma.c b/drivers/pci/p2pdma.c
index 981a76b6b7c0..4a2fc7ab42c3 100644
--- a/drivers/pci/p2pdma.c
+++ b/drivers/pci/p2pdma.c
@@ -200,8 +200,9 @@ static const struct attribute_group p2pmem_group = {
.name = "p2pmem",
};
-static void p2pdma_page_free(struct page *page)
+static void p2pdma_folio_free(struct folio *folio)
{
+ struct page *page = &folio->page;
struct pci_p2pdma_pagemap *pgmap = to_p2p_pgmap(page_pgmap(page));
/* safe to dereference while a reference is held to the percpu ref */
struct pci_p2pdma *p2pdma = rcu_dereference_protected(
@@ -214,7 +215,7 @@ static void p2pdma_page_free(struct page *page)
}
static const struct dev_pagemap_ops p2pdma_pgmap_ops = {
- .page_free = p2pdma_page_free,
+ .folio_free = p2pdma_folio_free,
};
static void pci_p2pdma_release(void *data)
diff --git a/drivers/s390/char/sclp_sd.c b/drivers/s390/char/sclp_sd.c
index bb1bce70ec00..e4a90a14028a 100644
--- a/drivers/s390/char/sclp_sd.c
+++ b/drivers/s390/char/sclp_sd.c
@@ -16,8 +16,7 @@
#include <linux/vmalloc.h>
#include <linux/async.h>
#include <linux/mutex.h>
-
-#include <asm/pgalloc.h>
+#include <linux/pgalloc.h>
#include "sclp.h"
diff --git a/drivers/vdpa/vdpa_user/iova_domain.h b/drivers/vdpa/vdpa_user/iova_domain.h
index 775cad5238f3..a923971a64f5 100644
--- a/drivers/vdpa/vdpa_user/iova_domain.h
+++ b/drivers/vdpa/vdpa_user/iova_domain.h
@@ -17,8 +17,6 @@
#define IOVA_START_PFN 1
-#define INVALID_PHYS_ADDR (~(phys_addr_t)0)
-
#define BOUNCE_MAP_SHIFT 12
#define BOUNCE_MAP_SIZE (1 << BOUNCE_MAP_SHIFT)
#define BOUNCE_MAP_MASK (~(BOUNCE_MAP_SIZE - 1))