summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNhat Pham <nphamcs@gmail.com>2025-08-20 11:15:47 -0700
committerAndrew Morton <akpm@linux-foundation.org>2025-09-13 16:55:15 -0700
commit0b1bf60c324a8135f780d50bffb53d54f8863f88 (patch)
tree94169a535aceb028ec7f135fbc605778edde7497
parent0cd01c4a5cc140efb9fc203dd05ffccf3c2197d0 (diff)
mm/zswap: reduce the size of the compression buffer to a single page
Reduce the compression buffer size from 2 * PAGE_SIZE to only one page, as the compression output (in the success case) should not exceed the length of the input. In the past, Chengming tried to reduce the compression buffer size, but ran into issues with the LZO algorithm (see [2]). Herbert Xu reported that the issue has been fixed (see [3]). Now we should have the guarantee that compressors' output should not exceed one page in the success case, and the algorithm will just report failure otherwise. With this patch, we save one page per cpu (per compression algorithm). Link: https://lkml.kernel.org/r/20250820181547.3794167-1-nphamcs@gmail.com Link: https://lore.kernel.org/linux-mm/20231213-zswap-dstmem-v4-1-f228b059dd89@bytedance.com/ [1] Link: https://lore.kernel.org/lkml/0000000000000b05cd060d6b5511@google.com/ [2] Link: https://lore.kernel.org/linux-mm/aKUmyl5gUFCdXGn-@gondor.apana.org.au/ [3] Co-developed-by: Chengming Zhou <chengming.zhou@linux.dev> Signed-off-by: Chengming Zhou <chengming.zhou@linux.dev> Signed-off-by: Nhat Pham <nphamcs@gmail.com> Acked-by: SeongJae Park <sj@kernel.org> Reviewed-by: Chengming Zhou <chengming.zhou@linux.dev> Cc: Herbert Xu <herbert@gondor.apana.org.au> Cc: Johannes Weiner <hannes@cmpxchg.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-rw-r--r--mm/zswap.c9
1 files changed, 2 insertions, 7 deletions
diff --git a/mm/zswap.c b/mm/zswap.c
index 0c8dd8876d8e..e5e1f5687f5e 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -831,7 +831,7 @@ static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node)
u8 *buffer = NULL;
int ret;
- buffer = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu));
+ buffer = kmalloc_node(PAGE_SIZE, GFP_KERNEL, cpu_to_node(cpu));
if (!buffer) {
ret = -ENOMEM;
goto fail;
@@ -959,12 +959,7 @@ static bool zswap_compress(struct page *page, struct zswap_entry *entry,
sg_init_table(&input, 1);
sg_set_page(&input, page, PAGE_SIZE, 0);
- /*
- * We need PAGE_SIZE * 2 here since there maybe over-compression case,
- * and hardware-accelerators may won't check the dst buffer size, so
- * giving the dst buffer with enough length to avoid buffer overflow.
- */
- sg_init_one(&output, dst, PAGE_SIZE * 2);
+ sg_init_one(&output, dst, PAGE_SIZE);
acomp_request_set_params(acomp_ctx->req, &input, &output, PAGE_SIZE, dlen);
/*