summaryrefslogtreecommitdiff
path: root/kernel/power
diff options
context:
space:
mode:
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>2025-10-31 11:33:01 +0100
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2025-10-31 11:33:01 +0100
commit1cf9c4f115019ebc77c6c1f6b379537bb1a48908 (patch)
tree752ec814a60e0c43c9b1c16f88c776b9b2b969d3 /kernel/power
parent35e4a69b2003f20a69e7d19ae96ab1eef1aa8e8d (diff)
parent8e4ec90701efec7f2814c89b398d6d4272636814 (diff)
Merge back system sleep material for 6.19
Diffstat (limited to 'kernel/power')
-rw-r--r--kernel/power/console.c8
-rw-r--r--kernel/power/snapshot.c13
-rw-r--r--kernel/power/swap.c121
3 files changed, 115 insertions, 27 deletions
diff --git a/kernel/power/console.c b/kernel/power/console.c
index 19c48aa5355d..a906a0ac0f9b 100644
--- a/kernel/power/console.c
+++ b/kernel/power/console.c
@@ -44,9 +44,10 @@ static LIST_HEAD(pm_vt_switch_list);
* no_console_suspend argument has been passed on the command line, VT
* switches will occur.
*/
-void pm_vt_switch_required(struct device *dev, bool required)
+int pm_vt_switch_required(struct device *dev, bool required)
{
struct pm_vt_switch *entry, *tmp;
+ int ret = 0;
mutex_lock(&vt_switch_mutex);
list_for_each_entry(tmp, &pm_vt_switch_list, head) {
@@ -58,8 +59,10 @@ void pm_vt_switch_required(struct device *dev, bool required)
}
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry)
+ if (!entry) {
+ ret = -ENOMEM;
goto out;
+ }
entry->required = required;
entry->dev = dev;
@@ -67,6 +70,7 @@ void pm_vt_switch_required(struct device *dev, bool required)
list_add(&entry->head, &pm_vt_switch_list);
out:
mutex_unlock(&vt_switch_mutex);
+ return ret;
}
EXPORT_SYMBOL(pm_vt_switch_required);
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 645f42e40478..0a946932d5c1 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -2110,22 +2110,20 @@ asmlinkage __visible int swsusp_save(void)
{
unsigned int nr_pages, nr_highmem;
- pr_info("Creating image:\n");
+ pm_deferred_pr_dbg("Creating image\n");
drain_local_pages(NULL);
nr_pages = count_data_pages();
nr_highmem = count_highmem_pages();
- pr_info("Need to copy %u pages\n", nr_pages + nr_highmem);
+ pm_deferred_pr_dbg("Need to copy %u pages\n", nr_pages + nr_highmem);
if (!enough_free_mem(nr_pages, nr_highmem)) {
- pr_err("Not enough free memory\n");
+ pm_deferred_pr_dbg("Not enough free memory for image creation\n");
return -ENOMEM;
}
- if (swsusp_alloc(&copy_bm, nr_pages, nr_highmem)) {
- pr_err("Memory allocation failed\n");
+ if (swsusp_alloc(&copy_bm, nr_pages, nr_highmem))
return -ENOMEM;
- }
/*
* During allocating of suspend pagedir, new cold pages may appear.
@@ -2144,7 +2142,8 @@ asmlinkage __visible int swsusp_save(void)
nr_zero_pages = nr_pages - nr_copy_pages;
nr_meta_pages = DIV_ROUND_UP(nr_pages * sizeof(long), PAGE_SIZE);
- pr_info("Image created (%d pages copied, %d zero pages)\n", nr_copy_pages, nr_zero_pages);
+ pm_deferred_pr_dbg("Image created (%d pages copied, %d zero pages)\n",
+ nr_copy_pages, nr_zero_pages);
return 0;
}
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index 0beff7eeaaba..d173e276b494 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -519,8 +519,9 @@ static int swap_writer_finish(struct swap_map_handle *handle,
CMP_HEADER, PAGE_SIZE)
#define CMP_SIZE (CMP_PAGES * PAGE_SIZE)
-/* Maximum number of threads for compression/decompression. */
-#define CMP_THREADS 3
+/* Default number of threads for compression/decompression. */
+#define CMP_THREADS 3
+static unsigned int hibernate_compression_threads = CMP_THREADS;
/* Minimum/maximum number of pages for read buffering. */
#define CMP_MIN_RD_PAGES 1024
@@ -585,10 +586,48 @@ struct crc_data {
wait_queue_head_t go; /* start crc update */
wait_queue_head_t done; /* crc update done */
u32 *crc32; /* points to handle's crc32 */
- size_t *unc_len[CMP_THREADS]; /* uncompressed lengths */
- unsigned char *unc[CMP_THREADS]; /* uncompressed data */
+ size_t **unc_len; /* uncompressed lengths */
+ unsigned char **unc; /* uncompressed data */
};
+static struct crc_data *alloc_crc_data(int nr_threads)
+{
+ struct crc_data *crc;
+
+ crc = kzalloc(sizeof(*crc), GFP_KERNEL);
+ if (!crc)
+ return NULL;
+
+ crc->unc = kcalloc(nr_threads, sizeof(*crc->unc), GFP_KERNEL);
+ if (!crc->unc)
+ goto err_free_crc;
+
+ crc->unc_len = kcalloc(nr_threads, sizeof(*crc->unc_len), GFP_KERNEL);
+ if (!crc->unc_len)
+ goto err_free_unc;
+
+ return crc;
+
+err_free_unc:
+ kfree(crc->unc);
+err_free_crc:
+ kfree(crc);
+ return NULL;
+}
+
+static void free_crc_data(struct crc_data *crc)
+{
+ if (!crc)
+ return;
+
+ if (crc->thr)
+ kthread_stop(crc->thr);
+
+ kfree(crc->unc_len);
+ kfree(crc->unc);
+ kfree(crc);
+}
+
/*
* CRC32 update function that runs in its own thread.
*/
@@ -703,7 +742,7 @@ static int save_compressed_image(struct swap_map_handle *handle,
* footprint.
*/
nr_threads = num_online_cpus() - 1;
- nr_threads = clamp_val(nr_threads, 1, CMP_THREADS);
+ nr_threads = clamp_val(nr_threads, 1, hibernate_compression_threads);
page = (void *)__get_free_page(GFP_NOIO | __GFP_HIGH);
if (!page) {
@@ -719,7 +758,7 @@ static int save_compressed_image(struct swap_map_handle *handle,
goto out_clean;
}
- crc = kzalloc(sizeof(*crc), GFP_KERNEL);
+ crc = alloc_crc_data(nr_threads);
if (!crc) {
pr_err("Failed to allocate crc\n");
ret = -ENOMEM;
@@ -885,11 +924,7 @@ out_finish:
out_clean:
hib_finish_batch(&hb);
- if (crc) {
- if (crc->thr)
- kthread_stop(crc->thr);
- kfree(crc);
- }
+ free_crc_data(crc);
if (data) {
for (thr = 0; thr < nr_threads; thr++) {
if (data[thr].thr)
@@ -1223,7 +1258,7 @@ static int load_compressed_image(struct swap_map_handle *handle,
* footprint.
*/
nr_threads = num_online_cpus() - 1;
- nr_threads = clamp_val(nr_threads, 1, CMP_THREADS);
+ nr_threads = clamp_val(nr_threads, 1, hibernate_compression_threads);
page = vmalloc_array(CMP_MAX_RD_PAGES, sizeof(*page));
if (!page) {
@@ -1239,7 +1274,7 @@ static int load_compressed_image(struct swap_map_handle *handle,
goto out_clean;
}
- crc = kzalloc(sizeof(*crc), GFP_KERNEL);
+ crc = alloc_crc_data(nr_threads);
if (!crc) {
pr_err("Failed to allocate crc\n");
ret = -ENOMEM;
@@ -1506,11 +1541,7 @@ out_clean:
hib_finish_batch(&hb);
for (i = 0; i < ring_size; i++)
free_page((unsigned long)page[i]);
- if (crc) {
- if (crc->thr)
- kthread_stop(crc->thr);
- kfree(crc);
- }
+ free_crc_data(crc);
if (data) {
for (thr = 0; thr < nr_threads; thr++) {
if (data[thr].thr)
@@ -1658,8 +1689,46 @@ int swsusp_unmark(void)
}
#endif
+static ssize_t hibernate_compression_threads_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "%d\n", hibernate_compression_threads);
+}
+
+static ssize_t hibernate_compression_threads_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ unsigned long val;
+
+ if (kstrtoul(buf, 0, &val))
+ return -EINVAL;
+
+ if (val < 1)
+ return -EINVAL;
+
+ hibernate_compression_threads = val;
+ return n;
+}
+power_attr(hibernate_compression_threads);
+
+static struct attribute *g[] = {
+ &hibernate_compression_threads_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group attr_group = {
+ .attrs = g,
+};
+
static int __init swsusp_header_init(void)
{
+ int error;
+
+ error = sysfs_create_group(power_kobj, &attr_group);
+ if (error)
+ return -ENOMEM;
+
swsusp_header = (struct swsusp_header*) __get_free_page(GFP_KERNEL);
if (!swsusp_header)
panic("Could not allocate memory for swsusp_header\n");
@@ -1667,3 +1736,19 @@ static int __init swsusp_header_init(void)
}
core_initcall(swsusp_header_init);
+
+static int __init hibernate_compression_threads_setup(char *str)
+{
+ int rc = kstrtouint(str, 0, &hibernate_compression_threads);
+
+ if (rc)
+ return rc;
+
+ if (hibernate_compression_threads < 1)
+ hibernate_compression_threads = CMP_THREADS;
+
+ return 1;
+
+}
+
+__setup("hibernate_compression_threads=", hibernate_compression_threads_setup);