summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorjianyun.gao <jianyungao89@gmail.com>2025-09-29 08:26:08 +0800
committerAndrew Morton <akpm@linux-foundation.org>2025-11-16 17:27:52 -0800
commitb6c46600bfb28b4be4e9cff7bad4f2cf357e0fb7 (patch)
tree162c5f2a2bef1cb2b013b79334197872d6d4e94d /mm
parent37d17925480404f1293f24d027fbf3c9975603d7 (diff)
mm: fix some typos in mm module
Below are some typos in the code comments: intevals ==> intervals addesses ==> addresses unavaliable ==> unavailable facor ==> factor droping ==> dropping exlusive ==> exclusive decription ==> description confict ==> conflict desriptions ==> descriptions otherwize ==> otherwise vlaue ==> value cheching ==> checking exisitng ==> existing modifed ==> modified differenciate ==> differentiate refernece ==> reference permissons ==> permissions indepdenent ==> independent spliting ==> splitting Just fix it. Link: https://lkml.kernel.org/r/20250929002608.1633825-1-jianyungao89@gmail.com Signed-off-by: jianyun.gao <jianyungao89@gmail.com> Reviewed-by: SeongJae Park <sj@kernel.org> Reviewed-by: Wei Yang <richard.weiyang@gmail.com> Reviewed-by: Dev Jain <dev.jain@arm.com> Reviewed-by: Liam R. Howlett <Liam.Howlett@oracle.com> Acked-by: Chris Li <chrisl@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/damon/sysfs.c2
-rw-r--r--mm/gup.c2
-rw-r--r--mm/hugetlb.c6
-rw-r--r--mm/hugetlb_vmemmap.c6
-rw-r--r--mm/kmsan/core.c2
-rw-r--r--mm/ksm.c2
-rw-r--r--mm/memory-tiers.c2
-rw-r--r--mm/memory.c4
-rw-r--r--mm/secretmem.c2
-rw-r--r--mm/slab_common.c2
-rw-r--r--mm/slub.c2
-rw-r--r--mm/swapfile.c2
-rw-r--r--mm/userfaultfd.c2
-rw-r--r--mm/vma.c4
14 files changed, 20 insertions, 20 deletions
diff --git a/mm/damon/sysfs.c b/mm/damon/sysfs.c
index 3c0d727788c8..0ecd8fb84101 100644
--- a/mm/damon/sysfs.c
+++ b/mm/damon/sysfs.c
@@ -1264,7 +1264,7 @@ enum damon_sysfs_cmd {
DAMON_SYSFS_CMD_UPDATE_SCHEMES_EFFECTIVE_QUOTAS,
/*
* @DAMON_SYSFS_CMD_UPDATE_TUNED_INTERVALS: Update the tuned monitoring
- * intevals.
+ * intervals.
*/
DAMON_SYSFS_CMD_UPDATE_TUNED_INTERVALS,
/*
diff --git a/mm/gup.c b/mm/gup.c
index a8ba5112e4d0..d2524fe09338 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -2710,7 +2710,7 @@ EXPORT_SYMBOL(get_user_pages_unlocked);
*
* *) ptes can be read atomically by the architecture.
*
- * *) valid user addesses are below TASK_MAX_SIZE
+ * *) valid user addresses are below TASK_MAX_SIZE
*
* The last two assumptions can be relaxed by the addition of helper functions.
*
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 0455119716ec..4e016433e32e 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2934,7 +2934,7 @@ typedef enum {
* NOTE: This is mostly identical to MAP_CHG_NEEDED, except
* that currently vma_needs_reservation() has an unwanted side
* effect to either use end() or commit() to complete the
- * transaction. Hence it needs to differenciate from NEEDED.
+ * transaction. Hence it needs to differentiate from NEEDED.
*/
MAP_CHG_ENFORCED = 2,
} map_chg_state;
@@ -6007,7 +6007,7 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
/*
* If we unshared PMDs, the TLB flush was not recorded in mmu_gather. We
* could defer the flush until now, since by holding i_mmap_rwsem we
- * guaranteed that the last refernece would not be dropped. But we must
+ * guaranteed that the last reference would not be dropped. But we must
* do the flushing before we return, as otherwise i_mmap_rwsem will be
* dropped and the last reference to the shared PMDs page might be
* dropped as well.
@@ -7193,7 +7193,7 @@ long hugetlb_change_protection(struct vm_area_struct *vma,
} else if (unlikely(is_pte_marker(pte))) {
/*
* Do nothing on a poison marker; page is
- * corrupted, permissons do not apply. Here
+ * corrupted, permissions do not apply. Here
* pte_marker_uffd_wp()==true implies !poison
* because they're mutual exclusive.
*/
diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c
index ba0fb1b6a5a8..96ee2bd16ee1 100644
--- a/mm/hugetlb_vmemmap.c
+++ b/mm/hugetlb_vmemmap.c
@@ -75,7 +75,7 @@ static int vmemmap_split_pmd(pmd_t *pmd, struct page *head, unsigned long start,
if (likely(pmd_leaf(*pmd))) {
/*
* Higher order allocations from buddy allocator must be able to
- * be treated as indepdenent small pages (as they can be freed
+ * be treated as independent small pages (as they can be freed
* individually).
*/
if (!PageReserved(head))
@@ -684,7 +684,7 @@ static void __hugetlb_vmemmap_optimize_folios(struct hstate *h,
ret = hugetlb_vmemmap_split_folio(h, folio);
/*
- * Spliting the PMD requires allocating a page, thus lets fail
+ * Splitting the PMD requires allocating a page, thus let's fail
* early once we encounter the first OOM. No point in retrying
* as it can be dynamically done on remap with the memory
* we get back from the vmemmap deduplication.
@@ -715,7 +715,7 @@ static void __hugetlb_vmemmap_optimize_folios(struct hstate *h,
/*
* Pages to be freed may have been accumulated. If we
* encounter an ENOMEM, free what we have and try again.
- * This can occur in the case that both spliting fails
+ * This can occur in the case that both splitting fails
* halfway and head page allocation also failed. In this
* case __hugetlb_vmemmap_optimize_folio() would free memory
* allowing more vmemmap remaps to occur.
diff --git a/mm/kmsan/core.c b/mm/kmsan/core.c
index 35ceaa8adb41..90f427b95a21 100644
--- a/mm/kmsan/core.c
+++ b/mm/kmsan/core.c
@@ -33,7 +33,7 @@ bool kmsan_enabled __read_mostly;
/*
* Per-CPU KMSAN context to be used in interrupts, where current->kmsan is
- * unavaliable.
+ * unavailable.
*/
DEFINE_PER_CPU(struct kmsan_ctx, kmsan_percpu_ctx);
diff --git a/mm/ksm.c b/mm/ksm.c
index c4e730409949..cdefba633856 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -389,7 +389,7 @@ static unsigned long ewma(unsigned long prev, unsigned long curr)
* exponentially weighted moving average. The new pages_to_scan value is
* multiplied with that change factor:
*
- * new_pages_to_scan *= change facor
+ * new_pages_to_scan *= change factor
*
* The new_pages_to_scan value is limited by the cpu min and max values. It
* calculates the cpu percent for the last scan and calculates the new
diff --git a/mm/memory-tiers.c b/mm/memory-tiers.c
index 0ea5c13f10a2..864811fff409 100644
--- a/mm/memory-tiers.c
+++ b/mm/memory-tiers.c
@@ -519,7 +519,7 @@ static inline void __init_node_memory_type(int node, struct memory_dev_type *mem
* for each device getting added in the same NUMA node
* with this specific memtype, bump the map count. We
* Only take memtype device reference once, so that
- * changing a node memtype can be done by droping the
+ * changing a node memtype can be done by dropping the
* only reference count taken here.
*/
diff --git a/mm/memory.c b/mm/memory.c
index b59ae7ce42eb..61748b762876 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -4328,7 +4328,7 @@ static inline bool should_try_to_free_swap(struct folio *folio,
* If we want to map a page that's in the swapcache writable, we
* have to detect via the refcount if we're really the exclusive
* user. Try freeing the swapcache to get rid of the swapcache
- * reference only in case it's likely that we'll be the exlusive user.
+ * reference only in case it's likely that we'll be the exclusive user.
*/
return (fault_flags & FAULT_FLAG_WRITE) && !folio_test_ksm(folio) &&
folio_ref_count(folio) == (1 + folio_nr_pages(folio));
@@ -5405,7 +5405,7 @@ vm_fault_t do_set_pmd(struct vm_fault *vmf, struct folio *folio, struct page *pa
/**
* set_pte_range - Set a range of PTEs to point to pages in a folio.
- * @vmf: Fault decription.
+ * @vmf: Fault description.
* @folio: The folio that contains @page.
* @page: The first page to create a PTE for.
* @nr: The number of PTEs to create.
diff --git a/mm/secretmem.c b/mm/secretmem.c
index b59350daffe3..9b0f5d9ec6f4 100644
--- a/mm/secretmem.c
+++ b/mm/secretmem.c
@@ -227,7 +227,7 @@ SYSCALL_DEFINE1(memfd_secret, unsigned int, flags)
struct file *file;
int fd, err;
- /* make sure local flags do not confict with global fcntl.h */
+ /* make sure local flags do not conflict with global fcntl.h */
BUILD_BUG_ON(SECRETMEM_FLAGS_MASK & O_CLOEXEC);
if (!secretmem_enable || !can_set_direct_map())
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 932d13ada36c..d2824daa98cf 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -259,7 +259,7 @@ out:
* @object_size: The size of objects to be created in this cache.
* @args: Additional arguments for the cache creation (see
* &struct kmem_cache_args).
- * @flags: See the desriptions of individual flags. The common ones are listed
+ * @flags: See the descriptions of individual flags. The common ones are listed
* in the description below.
*
* Not to be called directly, use the kmem_cache_create() wrapper with the same
diff --git a/mm/slub.c b/mm/slub.c
index 1bf65c421325..927ca64b6cbe 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2533,7 +2533,7 @@ bool slab_free_hook(struct kmem_cache *s, void *x, bool init,
memset((char *)kasan_reset_tag(x) + inuse, 0,
s->size - inuse - rsize);
/*
- * Restore orig_size, otherwize kmalloc redzone overwritten
+ * Restore orig_size, otherwise kmalloc redzone overwritten
* would be reported
*/
set_orig_size(s, x, orig_size);
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 10760240a3a2..cb2392ed8e0e 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -1677,7 +1677,7 @@ static bool swap_entries_put_map_nr(struct swap_info_struct *si,
/*
* Check if it's the last ref of swap entry in the freeing path.
- * Qualified vlaue includes 1, SWAP_HAS_CACHE or SWAP_MAP_SHMEM.
+ * Qualified value includes 1, SWAP_HAS_CACHE or SWAP_MAP_SHMEM.
*/
static inline bool __maybe_unused swap_is_last_ref(unsigned char count)
{
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
index af61b95c89e4..0630f188c847 100644
--- a/mm/userfaultfd.c
+++ b/mm/userfaultfd.c
@@ -1578,7 +1578,7 @@ static int validate_move_areas(struct userfaultfd_ctx *ctx,
/*
* For now, we keep it simple and only move between writable VMAs.
- * Access flags are equal, therefore cheching only the source is enough.
+ * Access flags are equal, therefore checking only the source is enough.
*/
if (!(src_vma->vm_flags & VM_WRITE))
return -EINVAL;
diff --git a/mm/vma.c b/mm/vma.c
index abe0da33c844..9127eaeea93f 100644
--- a/mm/vma.c
+++ b/mm/vma.c
@@ -109,7 +109,7 @@ static inline bool is_mergeable_vma(struct vma_merge_struct *vmg, bool merge_nex
static bool is_mergeable_anon_vma(struct vma_merge_struct *vmg, bool merge_next)
{
struct vm_area_struct *tgt = merge_next ? vmg->next : vmg->prev;
- struct vm_area_struct *src = vmg->middle; /* exisitng merge case. */
+ struct vm_area_struct *src = vmg->middle; /* existing merge case. */
struct anon_vma *tgt_anon = tgt->anon_vma;
struct anon_vma *src_anon = vmg->anon_vma;
@@ -798,7 +798,7 @@ static bool can_merge_remove_vma(struct vm_area_struct *vma)
* Returns: The merged VMA if merge succeeds, or NULL otherwise.
*
* ASSUMPTIONS:
- * - The caller must assign the VMA to be modifed to @vmg->middle.
+ * - The caller must assign the VMA to be modified to @vmg->middle.
* - The caller must have set @vmg->prev to the previous VMA, if there is one.
* - The caller must not set @vmg->next, as we determine this.
* - The caller must hold a WRITE lock on the mm_struct->mmap_lock.