summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/memcontrol.c2
-rw-r--r--mm/zswap.c4
2 files changed, 3 insertions, 3 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 4deda33625f4..3ae5cbcaed75 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -5443,7 +5443,7 @@ bool obj_cgroup_may_zswap(struct obj_cgroup *objcg)
* @size: size of compressed object
*
* This forces the charge after obj_cgroup_may_zswap() allowed
- * compression and storage in zwap for this cgroup to go ahead.
+ * compression and storage in zswap for this cgroup to go ahead.
*/
void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, size_t size)
{
diff --git a/mm/zswap.c b/mm/zswap.c
index 80619c8589a7..f6b1c8832a4f 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -879,7 +879,7 @@ static bool zswap_compress(struct page *page, struct zswap_entry *entry,
* acomp instance, then get those requests done simultaneously. but in this
* case, zswap actually does store and load page by page, there is no
* existing method to send the second page before the first page is done
- * in one thread doing zwap.
+ * in one thread doing zswap.
* but in different threads running on different cpu, we have different
* acomp instance, so multiple threads can do (de)compression in parallel.
*/
@@ -1128,7 +1128,7 @@ static enum lru_status shrink_memcg_cb(struct list_head *item, struct list_lru_o
*
* 1. We extract the swp_entry_t to the stack, allowing
* zswap_writeback_entry() to pin the swap entry and
- * then validate the zwap entry against that swap entry's
+ * then validate the zswap entry against that swap entry's
* tree using pointer value comparison. Only when that
* is successful can the entry be dereferenced.
*