summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2025-10-01 16:08:37 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2025-10-01 16:08:37 -0700
commit991053178e08fb4d1f80398367db05c2cc4f20b4 (patch)
tree4d96d4f0dfd7b8b062df66fcd8a557039841d385
parent080ffb4bec4d49cdedca11810395f8cad812471e (diff)
parent40d2cf9c3c1a5a1a9a443389d6b57a87362e4237 (diff)
Merge tag 'pm-6.18-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
Pull power management updates from Rafael Wysocki: "The majority of these are cpufreq changes, which has been a recurring pattern for a few recent cycles. Those changes include new hardware support (AN7583 SoC support in the airoha cpufreq driver, ipq5424 support in the qcom-nvmem cpufreq driver, MT8196 support in the mediatek cpufreq driver, AM62D2 support in the ti cpufreq driver), DT bindings and Rust code updates, cleanups of the core and governors, and multiple driver fixes and cleanups. Beyond that, there are hibernation fixes (some remaining 6.16 cycle fallout and an issue related to hybrid suspend in the amdgpu driver), cleanups of the PM core code, runtime PM documentation update, cpuidle and power capping cleanups, and tooling updates. Specifics: - Rearrange variable declarations involving __free() in the cpufreq core and intel_pstate driver to follow common coding style (Rafael Wysocki) - Fix object lifecycle issue in update_qos_request(), rearrange freq QoS updates using __free(), and adjust frequency percentage computations in the intel_pstate driver (Rafael Wysocki) - Update intel_pstate to allow it to enable HWP without EPP if the new DEC (Dynamic Efficiency Control) HW feature is enabled (Rafael Wysocki) - Use on_each_cpu_mask() in drv_write() in the ACPI cpufreq driver to simplify the code (Rafael Wysocki) - Use likely() optimization in intel_pstate_sample() (Yaxiong Tian) - Remove dead EPB-related code from intel_pstate (Srinivas Pandruvada) - Use scope-based cleanup for cpufreq policy references in multiple cpufreq drivers (Zihuan Zhang) - Avoid calling get_governor() for the first policy in the cpufreq core to simplify the initial policy path (Zihuan Zhang) - Clean up the cpufreq core in multiple places (Zihuan Zhang) - Use int type to store negative error codes in the cpufreq core and update the speedstep-lib to use int for error codes (Qianfeng Rong) - Update the efficient idle check for Intel extended Families in the ondemand cpufreq governor (Sohil Mehta) - Replace sscanf() with kstrtouint() in the conservative cpufreq governor (Kaushlendra Kumar) - Rename CpumaskVar::as[_mut]_ref to from_raw[_mut] in the cpumask Rust code and mark CpumaskVar as transparent (Alice Ryhl, Baptiste Lepers) - Update ARef and AlwaysRefCounted imports from sync::aref in the OPP Rust code (Shankari Anand) - Add support for AN7583 SoC to the airoha cpufreq driver (Christian Marangi) - Enable cpufreq for ipq5424 in the qcom-nvmem cpufreq driver (Md Sadre Alam) - Add support for MT8196 to the mediatek-hw cpufreq driver, refactor that driver and add mediatek,mt8196-cpufreq-hw DT binding (Nicolas Frattaroli) - Avoid redundant conditions in the mediatek cpufreq driver (Liao Yuanhong) - Add support for AM62D2 to the ti cpufreq driver and blocklist ti,am62d2 SoC in dt-platdev (Paresh Bhagat) - Support more speed grades on AM62Px SoC in the ti cpufreq driver, allow all silicon revisions to support OPPs in it, and fix supported hardware for 1GHz OPP (Judith Mendez) - Add QCS615 compatible to DT bindings for cpufreq-qcom-hw (Taniya Das) - Minor assorted updates of the scmi, longhaul, CPPC, and armada-37xx cpufreq drivers (Akhilesh Patil, BowenYu, Dennis Beier, and Florian Fainelli) - Remove outdated cpufreq-dt.txt (Frank Li) - Fix python gnuplot package names in the amd_pstate_tracer utility (Kuan-Wei Chiu) - Saravana Kannan will maintain the virtual-cpufreq driver (Saravana Kannan) - Prevent CPU capacity updates after registering a perf domain from failing on a first CPU that is not present (Christian Loehle) - Add support for the cases in which frequency alone is not sufficient to uniquely identify an OPP (Krishna Chaitanya Chundru) - Use to_result() for OPP error handling in Rust (Onur Özkan) - Add support for LPDDR5 on Rockhip RK3588 SoC to rockchip-dfi devfreq driver (Nicolas Frattaroli) - Fix an issue where DDR cycle counts on RK3588/RK3528 with LPDDR4(X) are reported as half by adding a cycle multiplier to the DFI driver in rockchip-dfi devfreq-event driver (Nicolas Frattaroli) - Fix missing error pointer dereference check of regulator instance in the mtk-cci devfreq driver probe and remove a redundant condition from an if () statement in that driver (Dan Carpenter, Liao Yuanhong) - Fail cpuidle device registration if there is one already to avoid sysfs-related issues (Rafael Wysocki) - Use sysfs_emit()/sysfs_emit_at() instead of sprintf()/scnprintf() in cpuidle (Vivek Yadav) - Fix device and OF node leaks at probe in the qcom-spm cpuidle driver and drop unnecessary initialisations from it (Johan Hovold) - Remove unnecessary address-of operators from the intel_idle cpuidle driver (Kaushlendra Kumar) - Rearrange main loop in menu_select() to make the code in that funtion easier to follow (Rafael Wysocki) - Convert values in microseconds to ktime using us_to_ktime() where applicable in the intel_idle power capping driver (Xichao Zhao) - Annotate loops walking device links in the power management core code as _srcu and add macros for walking device links to reduce the likelihood of coding mistakes related to them (Rafael Wysocki) - Document time units for *_time functions in the runtime PM API (Brian Norris) - Clear power.must_resume in noirq suspend error path to avoid resuming a dependant device under a suspended parent or supplier (Rafael Wysocki) - Fix GFP mask handling during hybrid suspend and make the amdgpu driver handle hybrid suspend correctly (Mario Limonciello, Rafael Wysocki) - Fix GFP mask handling after aborted hibernation in platform mode and combine exit paths in power_down() to avoid code duplication (Rafael Wysocki) - Use vmalloc_array() and vcalloc() in the hibernation core to avoid open-coded size computations (Qianfeng Rong) - Fix typo in hibernation core code comment (Li Jun) - Call pm_wakeup_clear() in the same place where other functions that do bookkeeping prior to suspend_prepare() are called (Samuel Wu) - Fix and clean up the x86_energy_perf_policy utility and update its documentation (Len Brown, Kaushlendra Kumar) - Fix incorrect sorting of PMT telemetry in turbostat (Kaushlendra Kumar) - Fix incorrect size in cpuidle_state_disable() and the error return value of cpupower_write_sysfs() in cpupower (Kaushlendra Kumar)" * tag 'pm-6.18-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm: (86 commits) PM: hibernate: Combine return paths in power_down() PM: hibernate: Restrict GFP mask in power_down() PM: hibernate: Fix pm_hibernation_mode_is_suspend() build breakage PM: runtime: Documentation: ABI: Document time units for *_time tools/power x86_energy_perf_policy.8: Emphasize preference for SW interfaces tools/power x86_energy_perf_policy: Add make snapshot target tools/power x86_energy_perf_policy: Prefer driver HWP limits tools/power x86_energy_perf_policy: EPB access is only via sysfs tools/power x86_energy_perf_policy: Prepare for MSR/sysfs refactoring tools/power x86_energy_perf_policy: Enhance HWP enable tools/power x86_energy_perf_policy: Enhance HWP enabled check tools/power x86_energy_perf_policy: Fix incorrect fopen mode usage tools/power turbostat: Fix incorrect sorting of PMT telemetry drm/amd: Fix hybrid sleep PM: hibernate: Add pm_hibernation_mode_is_suspend() PM: hibernate: Fix hybrid-sleep tools/cpupower: Fix incorrect size in cpuidle_state_disable() tools/power/x86/amd_pstate_tracer: Fix python gnuplot package names cpufreq: Replace pointer subtraction with iteration macro cpuidle: Fail cpuidle device registration if there is one already ...
-rw-r--r--Documentation/ABI/testing/sysfs-devices-power8
-rw-r--r--Documentation/devicetree/bindings/cpufreq/cpufreq-dt.txt61
-rw-r--r--Documentation/devicetree/bindings/cpufreq/cpufreq-qcom-hw.yaml2
-rw-r--r--Documentation/devicetree/bindings/cpufreq/mediatek,mt8196-cpufreq-hw.yaml82
-rw-r--r--MAINTAINERS6
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62p5.dtsi2
-rw-r--r--drivers/base/base.h8
-rw-r--r--drivers/base/power/main.c32
-rw-r--r--drivers/base/power/runtime.c3
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c9
-rw-r--r--drivers/cpufreq/airoha-cpufreq.c1
-rw-r--r--drivers/cpufreq/armada-37xx-cpufreq.c4
-rw-r--r--drivers/cpufreq/brcmstb-avs-cpufreq.c4
-rw-r--r--drivers/cpufreq/cppc_cpufreq.c16
-rw-r--r--drivers/cpufreq/cpufreq-dt-platdev.c3
-rw-r--r--drivers/cpufreq/cpufreq.c44
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c24
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c25
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.h23
-rw-r--r--drivers/cpufreq/freq_table.c22
-rw-r--r--drivers/cpufreq/intel_pstate.c182
-rw-r--r--drivers/cpufreq/longhaul.c3
-rw-r--r--drivers/cpufreq/mediatek-cpufreq-hw.c134
-rw-r--r--drivers/cpufreq/mediatek-cpufreq.c11
-rw-r--r--drivers/cpufreq/qcom-cpufreq-nvmem.c5
-rw-r--r--drivers/cpufreq/s5pv210-cpufreq.c4
-rw-r--r--drivers/cpufreq/scmi-cpufreq.c10
-rw-r--r--drivers/cpufreq/sh-cpufreq.c6
-rw-r--r--drivers/cpufreq/speedstep-lib.c12
-rw-r--r--drivers/cpufreq/speedstep-lib.h10
-rw-r--r--drivers/cpufreq/tegra186-cpufreq.c4
-rw-r--r--drivers/cpufreq/ti-cpufreq.c12
-rw-r--r--drivers/cpufreq/virtual-cpufreq.c2
-rw-r--r--drivers/cpuidle/cpuidle-qcom-spm.c11
-rw-r--r--drivers/cpuidle/cpuidle.c8
-rw-r--r--drivers/cpuidle/governors/menu.c70
-rw-r--r--drivers/cpuidle/sysfs.c34
-rw-r--r--drivers/devfreq/event/rockchip-dfi.c91
-rw-r--r--drivers/devfreq/mtk-cci-devfreq.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c2
-rw-r--r--drivers/idle/intel_idle.c256
-rw-r--r--drivers/opp/core.c99
-rw-r--r--drivers/powercap/idle_inject.c5
-rw-r--r--include/linux/cpufreq.h7
-rw-r--r--include/linux/pm_opp.h30
-rw-r--r--include/linux/suspend.h6
-rw-r--r--include/soc/rockchip/rk3588_grf.h8
-rw-r--r--include/soc/rockchip/rockchip_grf.h1
-rw-r--r--kernel/power/energy_model.c11
-rw-r--r--kernel/power/hibernate.c39
-rw-r--r--kernel/power/process.c1
-rw-r--r--kernel/power/snapshot.c2
-rw-r--r--kernel/power/suspend.c1
-rw-r--r--kernel/power/swap.c6
-rw-r--r--rust/kernel/cpufreq.rs2
-rw-r--r--rust/kernel/cpumask.rs5
-rw-r--r--rust/kernel/opp.rs29
-rw-r--r--tools/power/cpupower/lib/cpuidle.c5
-rw-r--r--tools/power/cpupower/lib/cpupower.c2
-rwxr-xr-xtools/power/x86/amd_pstate_tracer/amd_pstate_trace.py2
-rw-r--r--tools/power/x86/turbostat/turbostat.c2
-rw-r--r--tools/power/x86/x86_energy_perf_policy/Makefile29
-rw-r--r--tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.815
-rw-r--r--tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c133
64 files changed, 1050 insertions, 641 deletions
diff --git a/Documentation/ABI/testing/sysfs-devices-power b/Documentation/ABI/testing/sysfs-devices-power
index e4ec5de9a5dd..9bf7c8a267c5 100644
--- a/Documentation/ABI/testing/sysfs-devices-power
+++ b/Documentation/ABI/testing/sysfs-devices-power
@@ -274,15 +274,15 @@ What: /sys/devices/.../power/runtime_active_time
Date: Jul 2010
Contact: Arjan van de Ven <arjan@linux.intel.com>
Description:
- Reports the total time that the device has been active.
- Used for runtime PM statistics.
+ Reports the total time that the device has been active, in
+ milliseconds. Used for runtime PM statistics.
What: /sys/devices/.../power/runtime_suspended_time
Date: Jul 2010
Contact: Arjan van de Ven <arjan@linux.intel.com>
Description:
- Reports total time that the device has been suspended.
- Used for runtime PM statistics.
+ Reports total time that the device has been suspended, in
+ milliseconds. Used for runtime PM statistics.
What: /sys/devices/.../power/runtime_usage
Date: Apr 2010
diff --git a/Documentation/devicetree/bindings/cpufreq/cpufreq-dt.txt b/Documentation/devicetree/bindings/cpufreq/cpufreq-dt.txt
deleted file mode 100644
index 1d7e49167666..000000000000
--- a/Documentation/devicetree/bindings/cpufreq/cpufreq-dt.txt
+++ /dev/null
@@ -1,61 +0,0 @@
-Generic cpufreq driver
-
-It is a generic DT based cpufreq driver for frequency management. It supports
-both uniprocessor (UP) and symmetric multiprocessor (SMP) systems which share
-clock and voltage across all CPUs.
-
-Both required and optional properties listed below must be defined
-under node /cpus/cpu@0.
-
-Required properties:
-- None
-
-Optional properties:
-- operating-points: Refer to Documentation/devicetree/bindings/opp/opp-v1.yaml for
- details. OPPs *must* be supplied either via DT, i.e. this property, or
- populated at runtime.
-- clock-latency: Specify the possible maximum transition latency for clock,
- in unit of nanoseconds.
-- voltage-tolerance: Specify the CPU voltage tolerance in percentage.
-- #cooling-cells:
- Please refer to
- Documentation/devicetree/bindings/thermal/thermal-cooling-devices.yaml.
-
-Examples:
-
-cpus {
- #address-cells = <1>;
- #size-cells = <0>;
-
- cpu@0 {
- compatible = "arm,cortex-a9";
- reg = <0>;
- next-level-cache = <&L2>;
- operating-points = <
- /* kHz uV */
- 792000 1100000
- 396000 950000
- 198000 850000
- >;
- clock-latency = <61036>; /* two CLK32 periods */
- #cooling-cells = <2>;
- };
-
- cpu@1 {
- compatible = "arm,cortex-a9";
- reg = <1>;
- next-level-cache = <&L2>;
- };
-
- cpu@2 {
- compatible = "arm,cortex-a9";
- reg = <2>;
- next-level-cache = <&L2>;
- };
-
- cpu@3 {
- compatible = "arm,cortex-a9";
- reg = <3>;
- next-level-cache = <&L2>;
- };
-};
diff --git a/Documentation/devicetree/bindings/cpufreq/cpufreq-qcom-hw.yaml b/Documentation/devicetree/bindings/cpufreq/cpufreq-qcom-hw.yaml
index e0242bed3342..2d42fc3d8ef8 100644
--- a/Documentation/devicetree/bindings/cpufreq/cpufreq-qcom-hw.yaml
+++ b/Documentation/devicetree/bindings/cpufreq/cpufreq-qcom-hw.yaml
@@ -22,6 +22,7 @@ properties:
items:
- enum:
- qcom,qcm2290-cpufreq-hw
+ - qcom,qcs615-cpufreq-hw
- qcom,sc7180-cpufreq-hw
- qcom,sc8180x-cpufreq-hw
- qcom,sdm670-cpufreq-hw
@@ -132,6 +133,7 @@ allOf:
compatible:
contains:
enum:
+ - qcom,qcs615-cpufreq-hw
- qcom,qdu1000-cpufreq-epss
- qcom,sa8255p-cpufreq-epss
- qcom,sa8775p-cpufreq-epss
diff --git a/Documentation/devicetree/bindings/cpufreq/mediatek,mt8196-cpufreq-hw.yaml b/Documentation/devicetree/bindings/cpufreq/mediatek,mt8196-cpufreq-hw.yaml
new file mode 100644
index 000000000000..5f3c7db3f3aa
--- /dev/null
+++ b/Documentation/devicetree/bindings/cpufreq/mediatek,mt8196-cpufreq-hw.yaml
@@ -0,0 +1,82 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/cpufreq/mediatek,mt8196-cpufreq-hw.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: MediaTek Hybrid CPUFreq for MT8196/MT6991 series SoCs
+
+maintainers:
+ - Nicolas Frattaroli <nicolas.frattaroli@collabora.com>
+
+description:
+ MT8196 uses CPUFreq management hardware that supports dynamic voltage
+ frequency scaling (dvfs), and can support several performance domains.
+
+properties:
+ compatible:
+ const: mediatek,mt8196-cpufreq-hw
+
+ reg:
+ items:
+ - description: FDVFS control register region
+ - description: OPP tables and control for performance domain 0
+ - description: OPP tables and control for performance domain 1
+ - description: OPP tables and control for performance domain 2
+
+ "#performance-domain-cells":
+ const: 1
+
+required:
+ - compatible
+ - reg
+ - "#performance-domain-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu0: cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a720";
+ enable-method = "psci";
+ performance-domains = <&performance 0>;
+ reg = <0x000>;
+ };
+
+ /* ... */
+
+ cpu6: cpu@600 {
+ device_type = "cpu";
+ compatible = "arm,cortex-x4";
+ enable-method = "psci";
+ performance-domains = <&performance 1>;
+ reg = <0x600>;
+ };
+
+ cpu7: cpu@700 {
+ device_type = "cpu";
+ compatible = "arm,cortex-x925";
+ enable-method = "psci";
+ performance-domains = <&performance 2>;
+ reg = <0x700>;
+ };
+ };
+
+ /* ... */
+
+ soc {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ performance: performance-controller@c2c2034 {
+ compatible = "mediatek,mt8196-cpufreq-hw";
+ reg = <0 0xc220400 0 0x20>, <0 0xc2c0f20 0 0x120>,
+ <0 0xc2c1040 0 0x120>, <0 0xc2c1160 0 0x120>;
+ #performance-domain-cells = <1>;
+ };
+ };
diff --git a/MAINTAINERS b/MAINTAINERS
index a6229d7a1483..9d7bd7b1a87c 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -6360,6 +6360,12 @@ F: kernel/sched/cpufreq*.c
F: rust/kernel/cpufreq.rs
F: tools/testing/selftests/cpufreq/
+CPU FREQUENCY DRIVERS - VIRTUAL MACHINE CPUFREQ
+M: Saravana Kannan <saravanak@google.com>
+L: linux-pm@vger.kernel.org
+S: Maintained
+F: drivers/cpufreq/virtual-cpufreq.c
+
CPU HOTPLUG
M: Thomas Gleixner <tglx@linutronix.de>
M: Peter Zijlstra <peterz@infradead.org>
diff --git a/arch/arm64/boot/dts/ti/k3-am62p5.dtsi b/arch/arm64/boot/dts/ti/k3-am62p5.dtsi
index 202378d9d5cf..8982a7b9f1a6 100644
--- a/arch/arm64/boot/dts/ti/k3-am62p5.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am62p5.dtsi
@@ -135,7 +135,7 @@
opp-1000000000 {
opp-hz = /bits/ 64 <1000000000>;
- opp-supported-hw = <0x01 0x0006>;
+ opp-supported-hw = <0x01 0x0007>;
clock-latency-ns = <6000000>;
};
diff --git a/drivers/base/base.h b/drivers/base/base.h
index 123031a757d9..700aecd22fd3 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -251,6 +251,14 @@ void device_links_unbind_consumers(struct device *dev);
void fw_devlink_drivers_done(void);
void fw_devlink_probing_done(void);
+#define dev_for_each_link_to_supplier(__link, __dev) \
+ list_for_each_entry_srcu(__link, &(__dev)->links.suppliers, c_node, \
+ device_links_read_lock_held())
+
+#define dev_for_each_link_to_consumer(__link, __dev) \
+ list_for_each_entry_srcu(__link, &(__dev)->links.consumers, s_node, \
+ device_links_read_lock_held())
+
/* device pm support */
void device_pm_move_to_tail(struct device *dev);
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 2ea6e05e6ec9..b9a34c3425ec 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -40,10 +40,6 @@
typedef int (*pm_callback_t)(struct device *);
-#define list_for_each_entry_rcu_locked(pos, head, member) \
- list_for_each_entry_rcu(pos, head, member, \
- device_links_read_lock_held())
-
/*
* The entries in the dpm_list list are in a depth first order, simply
* because children are guaranteed to be discovered after parents, and
@@ -281,7 +277,7 @@ static void dpm_wait_for_suppliers(struct device *dev, bool async)
* callbacks freeing the link objects for the links in the list we're
* walking.
*/
- list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
+ dev_for_each_link_to_supplier(link, dev)
if (READ_ONCE(link->status) != DL_STATE_DORMANT)
dpm_wait(link->supplier, async);
@@ -338,7 +334,7 @@ static void dpm_wait_for_consumers(struct device *dev, bool async)
* continue instead of trying to continue in parallel with its
* unregistration).
*/
- list_for_each_entry_rcu_locked(link, &dev->links.consumers, s_node)
+ dev_for_each_link_to_consumer(link, dev)
if (READ_ONCE(link->status) != DL_STATE_DORMANT)
dpm_wait(link->consumer, async);
@@ -675,7 +671,7 @@ static void dpm_async_resume_subordinate(struct device *dev, async_func_t func)
idx = device_links_read_lock();
/* Start processing the device's "async" consumers. */
- list_for_each_entry_rcu_locked(link, &dev->links.consumers, s_node)
+ dev_for_each_link_to_consumer(link, dev)
if (READ_ONCE(link->status) != DL_STATE_DORMANT)
dpm_async_with_cleanup(link->consumer, func);
@@ -724,8 +720,20 @@ static void device_resume_noirq(struct device *dev, pm_message_t state, bool asy
if (dev->power.syscore || dev->power.direct_complete)
goto Out;
- if (!dev->power.is_noirq_suspended)
+ if (!dev->power.is_noirq_suspended) {
+ /*
+ * This means that system suspend has been aborted in the noirq
+ * phase before invoking the noirq suspend callback for the
+ * device, so if device_suspend_late() has left it in suspend,
+ * device_resume_early() should leave it in suspend either in
+ * case the early resume of it depends on the noirq resume that
+ * has not run.
+ */
+ if (dev_pm_skip_suspend(dev))
+ dev->power.must_resume = false;
+
goto Out;
+ }
if (!dpm_wait_for_superior(dev, async))
goto Out;
@@ -1330,7 +1338,7 @@ static void dpm_async_suspend_superior(struct device *dev, async_func_t func)
idx = device_links_read_lock();
/* Start processing the device's "async" suppliers. */
- list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
+ dev_for_each_link_to_supplier(link, dev)
if (READ_ONCE(link->status) != DL_STATE_DORMANT)
dpm_async_with_cleanup(link->supplier, func);
@@ -1384,7 +1392,7 @@ static void dpm_superior_set_must_resume(struct device *dev)
idx = device_links_read_lock();
- list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
+ dev_for_each_link_to_supplier(link, dev)
link->supplier->power.must_resume = true;
device_links_read_unlock(idx);
@@ -1813,7 +1821,7 @@ static void dpm_clear_superiors_direct_complete(struct device *dev)
idx = device_links_read_lock();
- list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) {
+ dev_for_each_link_to_supplier(link, dev) {
spin_lock_irq(&link->supplier->power.lock);
link->supplier->power.direct_complete = false;
spin_unlock_irq(&link->supplier->power.lock);
@@ -2065,7 +2073,7 @@ static bool device_prepare_smart_suspend(struct device *dev)
idx = device_links_read_lock();
- list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) {
+ dev_for_each_link_to_supplier(link, dev) {
if (!device_link_test(link, DL_FLAG_PM_RUNTIME))
continue;
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 3e84dc4122de..7420b9851fe0 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -1903,8 +1903,7 @@ void pm_runtime_get_suppliers(struct device *dev)
idx = device_links_read_lock();
- list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
- device_links_read_lock_held())
+ dev_for_each_link_to_supplier(link, dev)
if (device_link_test(link, DL_FLAG_PM_RUNTIME)) {
link->supplier_preactivated = true;
pm_runtime_get_sync(link->supplier);
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 4f7f9201598d..083d8369a591 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -318,7 +318,6 @@ static u32 drv_read(struct acpi_cpufreq_data *data, const struct cpumask *mask)
return cmd.val;
}
-/* Called via smp_call_function_many(), on the target CPUs */
static void do_drv_write(void *_cmd)
{
struct drv_cmd *cmd = _cmd;
@@ -335,14 +334,8 @@ static void drv_write(struct acpi_cpufreq_data *data,
.val = val,
.func.write = data->cpu_freq_write,
};
- int this_cpu;
- this_cpu = get_cpu();
- if (cpumask_test_cpu(this_cpu, mask))
- do_drv_write(&cmd);
-
- smp_call_function_many(mask, do_drv_write, &cmd, 1);
- put_cpu();
+ on_each_cpu_mask(mask, do_drv_write, &cmd, true);
}
static u32 get_cur_val(const struct cpumask *mask, struct acpi_cpufreq_data *data)
diff --git a/drivers/cpufreq/airoha-cpufreq.c b/drivers/cpufreq/airoha-cpufreq.c
index 4fe39eadd163..b6b1cdc4d11d 100644
--- a/drivers/cpufreq/airoha-cpufreq.c
+++ b/drivers/cpufreq/airoha-cpufreq.c
@@ -107,6 +107,7 @@ static struct platform_driver airoha_cpufreq_driver = {
};
static const struct of_device_id airoha_cpufreq_match_list[] __initconst = {
+ { .compatible = "airoha,an7583" },
{ .compatible = "airoha,en7581" },
{},
};
diff --git a/drivers/cpufreq/armada-37xx-cpufreq.c b/drivers/cpufreq/armada-37xx-cpufreq.c
index f28a4435fba7..0efe403a5980 100644
--- a/drivers/cpufreq/armada-37xx-cpufreq.c
+++ b/drivers/cpufreq/armada-37xx-cpufreq.c
@@ -265,7 +265,7 @@ static void __init armada37xx_cpufreq_avs_configure(struct regmap *base,
*/
target_vm = avs_map[l0_vdd_min] - 100;
- target_vm = target_vm > MIN_VOLT_MV ? target_vm : MIN_VOLT_MV;
+ target_vm = max(target_vm, MIN_VOLT_MV);
dvfs->avs[1] = armada_37xx_avs_val_match(target_vm);
/*
@@ -273,7 +273,7 @@ static void __init armada37xx_cpufreq_avs_configure(struct regmap *base,
* be larger than 1000mv
*/
target_vm = avs_map[l0_vdd_min] - 150;
- target_vm = target_vm > MIN_VOLT_MV ? target_vm : MIN_VOLT_MV;
+ target_vm = max(target_vm, MIN_VOLT_MV);
dvfs->avs[2] = dvfs->avs[3] = armada_37xx_avs_val_match(target_vm);
/*
diff --git a/drivers/cpufreq/brcmstb-avs-cpufreq.c b/drivers/cpufreq/brcmstb-avs-cpufreq.c
index 5940d262374f..71450cca8e9f 100644
--- a/drivers/cpufreq/brcmstb-avs-cpufreq.c
+++ b/drivers/cpufreq/brcmstb-avs-cpufreq.c
@@ -480,7 +480,7 @@ static bool brcm_avs_is_firmware_loaded(struct private_data *priv)
static unsigned int brcm_avs_cpufreq_get(unsigned int cpu)
{
- struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
struct private_data *priv;
if (!policy)
@@ -488,8 +488,6 @@ static unsigned int brcm_avs_cpufreq_get(unsigned int cpu)
priv = policy->driver_data;
- cpufreq_cpu_put(policy);
-
return brcm_avs_get_frequency(priv->base);
}
diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
index 4a17162a392d..12de0ac7bbaf 100644
--- a/drivers/cpufreq/cppc_cpufreq.c
+++ b/drivers/cpufreq/cppc_cpufreq.c
@@ -50,8 +50,7 @@ struct cppc_freq_invariance {
static DEFINE_PER_CPU(struct cppc_freq_invariance, cppc_freq_inv);
static struct kthread_worker *kworker_fie;
-static int cppc_perf_from_fbctrs(struct cppc_cpudata *cpu_data,
- struct cppc_perf_fb_ctrs *fb_ctrs_t0,
+static int cppc_perf_from_fbctrs(struct cppc_perf_fb_ctrs *fb_ctrs_t0,
struct cppc_perf_fb_ctrs *fb_ctrs_t1);
/**
@@ -87,8 +86,7 @@ static void cppc_scale_freq_workfn(struct kthread_work *work)
return;
}
- perf = cppc_perf_from_fbctrs(cpu_data, &cppc_fi->prev_perf_fb_ctrs,
- &fb_ctrs);
+ perf = cppc_perf_from_fbctrs(&cppc_fi->prev_perf_fb_ctrs, &fb_ctrs);
if (!perf)
return;
@@ -684,8 +682,7 @@ static inline u64 get_delta(u64 t1, u64 t0)
return (u32)t1 - (u32)t0;
}
-static int cppc_perf_from_fbctrs(struct cppc_cpudata *cpu_data,
- struct cppc_perf_fb_ctrs *fb_ctrs_t0,
+static int cppc_perf_from_fbctrs(struct cppc_perf_fb_ctrs *fb_ctrs_t0,
struct cppc_perf_fb_ctrs *fb_ctrs_t1)
{
u64 delta_reference, delta_delivered;
@@ -725,8 +722,8 @@ static int cppc_get_perf_ctrs_sample(int cpu,
static unsigned int cppc_cpufreq_get_rate(unsigned int cpu)
{
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
struct cppc_perf_fb_ctrs fb_ctrs_t0 = {0}, fb_ctrs_t1 = {0};
- struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
struct cppc_cpudata *cpu_data;
u64 delivered_perf;
int ret;
@@ -736,8 +733,6 @@ static unsigned int cppc_cpufreq_get_rate(unsigned int cpu)
cpu_data = policy->driver_data;
- cpufreq_cpu_put(policy);
-
ret = cppc_get_perf_ctrs_sample(cpu, &fb_ctrs_t0, &fb_ctrs_t1);
if (ret) {
if (ret == -EFAULT)
@@ -747,8 +742,7 @@ static unsigned int cppc_cpufreq_get_rate(unsigned int cpu)
return 0;
}
- delivered_perf = cppc_perf_from_fbctrs(cpu_data, &fb_ctrs_t0,
- &fb_ctrs_t1);
+ delivered_perf = cppc_perf_from_fbctrs(&fb_ctrs_t0, &fb_ctrs_t1);
if (!delivered_perf)
goto out_invalid_counters;
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
index 015dd393eaba..cd1816a12bb9 100644
--- a/drivers/cpufreq/cpufreq-dt-platdev.c
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -103,6 +103,7 @@ static const struct of_device_id allowlist[] __initconst = {
* platforms using "operating-points-v2" property.
*/
static const struct of_device_id blocklist[] __initconst = {
+ { .compatible = "airoha,an7583", },
{ .compatible = "airoha,en7581", },
{ .compatible = "allwinner,sun50i-a100" },
@@ -188,9 +189,11 @@ static const struct of_device_id blocklist[] __initconst = {
{ .compatible = "ti,omap3", },
{ .compatible = "ti,am625", },
{ .compatible = "ti,am62a7", },
+ { .compatible = "ti,am62d2", },
{ .compatible = "ti,am62p5", },
{ .compatible = "qcom,ipq5332", },
+ { .compatible = "qcom,ipq5424", },
{ .compatible = "qcom,ipq6018", },
{ .compatible = "qcom,ipq8064", },
{ .compatible = "qcom,ipq8074", },
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 58e3839a2140..852e024facc3 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -664,10 +664,10 @@ unlock:
static unsigned int cpufreq_parse_policy(char *str_governor)
{
- if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN))
+ if (!strncasecmp(str_governor, "performance", strlen("performance")))
return CPUFREQ_POLICY_PERFORMANCE;
- if (!strncasecmp(str_governor, "powersave", CPUFREQ_NAME_LEN))
+ if (!strncasecmp(str_governor, "powersave", strlen("powersave")))
return CPUFREQ_POLICY_POWERSAVE;
return CPUFREQ_POLICY_UNKNOWN;
@@ -914,7 +914,7 @@ static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
const char *buf, size_t count)
{
unsigned int freq = 0;
- unsigned int ret;
+ int ret;
if (!policy->governor || !policy->governor->store_setspeed)
return -EINVAL;
@@ -1121,7 +1121,8 @@ static int cpufreq_init_policy(struct cpufreq_policy *policy)
if (has_target()) {
/* Update policy governor to the one used before hotplug. */
- gov = get_governor(policy->last_governor);
+ if (policy->last_governor[0] != '\0')
+ gov = get_governor(policy->last_governor);
if (gov) {
pr_debug("Restoring governor %s for cpu %d\n",
gov->name, policy->cpu);
@@ -1844,7 +1845,6 @@ static unsigned int cpufreq_verify_current_freq(struct cpufreq_policy *policy, b
*/
unsigned int cpufreq_quick_get(unsigned int cpu)
{
- struct cpufreq_policy *policy __free(put_cpufreq_policy) = NULL;
unsigned long flags;
read_lock_irqsave(&cpufreq_driver_lock, flags);
@@ -1859,7 +1859,7 @@ unsigned int cpufreq_quick_get(unsigned int cpu)
read_unlock_irqrestore(&cpufreq_driver_lock, flags);
- policy = cpufreq_cpu_get(cpu);
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
if (policy)
return policy->cur;
@@ -1875,9 +1875,7 @@ EXPORT_SYMBOL(cpufreq_quick_get);
*/
unsigned int cpufreq_quick_get_max(unsigned int cpu)
{
- struct cpufreq_policy *policy __free(put_cpufreq_policy);
-
- policy = cpufreq_cpu_get(cpu);
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
if (policy)
return policy->max;
@@ -1893,9 +1891,7 @@ EXPORT_SYMBOL(cpufreq_quick_get_max);
*/
__weak unsigned int cpufreq_get_hw_max_freq(unsigned int cpu)
{
- struct cpufreq_policy *policy __free(put_cpufreq_policy);
-
- policy = cpufreq_cpu_get(cpu);
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
if (policy)
return policy->cpuinfo.max_freq;
@@ -1919,9 +1915,7 @@ static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
*/
unsigned int cpufreq_get(unsigned int cpu)
{
- struct cpufreq_policy *policy __free(put_cpufreq_policy);
-
- policy = cpufreq_cpu_get(cpu);
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
if (!policy)
return 0;
@@ -2750,9 +2744,7 @@ static void cpufreq_policy_refresh(struct cpufreq_policy *policy)
*/
void cpufreq_update_policy(unsigned int cpu)
{
- struct cpufreq_policy *policy __free(put_cpufreq_policy);
-
- policy = cpufreq_cpu_get(cpu);
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
if (!policy)
return;
@@ -2769,9 +2761,7 @@ EXPORT_SYMBOL(cpufreq_update_policy);
*/
void cpufreq_update_limits(unsigned int cpu)
{
- struct cpufreq_policy *policy __free(put_cpufreq_policy);
-
- policy = cpufreq_cpu_get(cpu);
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
if (!policy)
return;
@@ -2792,7 +2782,7 @@ int cpufreq_boost_set_sw(struct cpufreq_policy *policy, int state)
if (!policy->freq_table)
return -ENXIO;
- ret = cpufreq_frequency_table_cpuinfo(policy, policy->freq_table);
+ ret = cpufreq_frequency_table_cpuinfo(policy);
if (ret) {
pr_err("%s: Policy frequency update failed\n", __func__);
return ret;
@@ -2921,10 +2911,8 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
return -EPROBE_DEFER;
if (!driver_data || !driver_data->verify || !driver_data->init ||
- !(driver_data->setpolicy || driver_data->target_index ||
- driver_data->target) ||
- (driver_data->setpolicy && (driver_data->target_index ||
- driver_data->target)) ||
+ (driver_data->target_index && driver_data->target) ||
+ (!!driver_data->setpolicy == (driver_data->target_index || driver_data->target)) ||
(!driver_data->get_intermediate != !driver_data->target_intermediate) ||
(!driver_data->online != !driver_data->offline) ||
(driver_data->adjust_perf && !driver_data->fast_switch))
@@ -3058,9 +3046,7 @@ static int __init cpufreq_core_init(void)
static bool cpufreq_policy_is_good_for_eas(unsigned int cpu)
{
- struct cpufreq_policy *policy __free(put_cpufreq_policy);
-
- policy = cpufreq_cpu_get(cpu);
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
if (!policy) {
pr_debug("cpufreq policy not set for CPU: %d\n", cpu);
return false;
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 56500b25d77c..cce6a8d113e1 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -152,9 +152,9 @@ static ssize_t sampling_down_factor_store(struct gov_attr_set *attr_set,
struct dbs_data *dbs_data = to_dbs_data(attr_set);
unsigned int input;
int ret;
- ret = sscanf(buf, "%u", &input);
+ ret = kstrtouint(buf, 0, &input);
- if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
+ if (ret || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
return -EINVAL;
dbs_data->sampling_down_factor = input;
@@ -168,9 +168,9 @@ static ssize_t up_threshold_store(struct gov_attr_set *attr_set,
struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
unsigned int input;
int ret;
- ret = sscanf(buf, "%u", &input);
+ ret = kstrtouint(buf, 0, &input);
- if (ret != 1 || input > 100 || input <= cs_tuners->down_threshold)
+ if (ret || input > 100 || input <= cs_tuners->down_threshold)
return -EINVAL;
dbs_data->up_threshold = input;
@@ -184,10 +184,10 @@ static ssize_t down_threshold_store(struct gov_attr_set *attr_set,
struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
unsigned int input;
int ret;
- ret = sscanf(buf, "%u", &input);
+ ret = kstrtouint(buf, 0, &input);
/* cannot be lower than 1 otherwise freq will not fall */
- if (ret != 1 || input < 1 || input >= dbs_data->up_threshold)
+ if (ret || input < 1 || input >= dbs_data->up_threshold)
return -EINVAL;
cs_tuners->down_threshold = input;
@@ -201,9 +201,9 @@ static ssize_t ignore_nice_load_store(struct gov_attr_set *attr_set,
unsigned int input;
int ret;
- ret = sscanf(buf, "%u", &input);
- if (ret != 1)
- return -EINVAL;
+ ret = kstrtouint(buf, 0, &input);
+ if (ret)
+ return ret;
if (input > 1)
input = 1;
@@ -226,10 +226,10 @@ static ssize_t freq_step_store(struct gov_attr_set *attr_set, const char *buf,
struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
unsigned int input;
int ret;
- ret = sscanf(buf, "%u", &input);
+ ret = kstrtouint(buf, 0, &input);
- if (ret != 1)
- return -EINVAL;
+ if (ret)
+ return ret;
if (input > 100)
input = 100;
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 0e65d37c9231..a6ecc203f7b7 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -30,29 +30,6 @@ static struct od_ops od_ops;
static unsigned int default_powersave_bias;
/*
- * Not all CPUs want IO time to be accounted as busy; this depends on how
- * efficient idling at a higher frequency/voltage is.
- * Pavel Machek says this is not so for various generations of AMD and old
- * Intel systems.
- * Mike Chan (android.com) claims this is also not true for ARM.
- * Because of this, whitelist specific known (series) of CPUs by default, and
- * leave all others up to the user.
- */
-static int should_io_be_busy(void)
-{
-#if defined(CONFIG_X86)
- /*
- * For Intel, Core 2 (model 15) and later have an efficient idle.
- */
- if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
- boot_cpu_data.x86 == 6 &&
- boot_cpu_data.x86_model >= 15)
- return 1;
-#endif
- return 0;
-}
-
-/*
* Find right freq to be set now with powersave_bias on.
* Returns the freq_hi to be used right now and will set freq_hi_delay_us,
* freq_lo, and freq_lo_delay_us in percpu area for averaging freqs.
@@ -377,7 +354,7 @@ static int od_init(struct dbs_data *dbs_data)
dbs_data->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
dbs_data->ignore_nice_load = 0;
tuners->powersave_bias = default_powersave_bias;
- dbs_data->io_is_busy = should_io_be_busy();
+ dbs_data->io_is_busy = od_should_io_be_busy();
dbs_data->tuners = tuners;
return 0;
diff --git a/drivers/cpufreq/cpufreq_ondemand.h b/drivers/cpufreq/cpufreq_ondemand.h
index 1af8e5c4b86f..2ca8f1aaf2e3 100644
--- a/drivers/cpufreq/cpufreq_ondemand.h
+++ b/drivers/cpufreq/cpufreq_ondemand.h
@@ -24,3 +24,26 @@ static inline struct od_policy_dbs_info *to_dbs_info(struct policy_dbs_info *pol
struct od_dbs_tuners {
unsigned int powersave_bias;
};
+
+#ifdef CONFIG_X86
+#include <asm/cpu_device_id.h>
+
+/*
+ * Not all CPUs want IO time to be accounted as busy; this depends on
+ * how efficient idling at a higher frequency/voltage is.
+ *
+ * Pavel Machek says this is not so for various generations of AMD and
+ * old Intel systems. Mike Chan (android.com) claims this is also not
+ * true for ARM.
+ *
+ * Because of this, select a known series of Intel CPUs (Family 6 and
+ * later) by default, and leave all others up to the user.
+ */
+static inline bool od_should_io_be_busy(void)
+{
+ return (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
+ boot_cpu_data.x86_vfm >= INTEL_PENTIUM_PRO);
+}
+#else
+static inline bool od_should_io_be_busy(void) { return false; }
+#endif
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c
index 35de513af6c9..7f251daf03ce 100644
--- a/drivers/cpufreq/freq_table.c
+++ b/drivers/cpufreq/freq_table.c
@@ -28,22 +28,21 @@ static bool policy_has_boost_freq(struct cpufreq_policy *policy)
return false;
}
-int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
- struct cpufreq_frequency_table *table)
+int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy)
{
- struct cpufreq_frequency_table *pos;
+ struct cpufreq_frequency_table *pos, *table = policy->freq_table;
unsigned int min_freq = ~0;
unsigned int max_freq = 0;
- unsigned int freq;
+ unsigned int freq, i;
- cpufreq_for_each_valid_entry(pos, table) {
+ cpufreq_for_each_valid_entry_idx(pos, table, i) {
freq = pos->frequency;
if ((!cpufreq_boost_enabled() || !policy->boost_enabled)
&& (pos->flags & CPUFREQ_BOOST_FREQ))
continue;
- pr_debug("table entry %u: %u kHz\n", (int)(pos - table), freq);
+ pr_debug("table entry %u: %u kHz\n", i, freq);
if (freq < min_freq)
min_freq = freq;
if (freq > max_freq)
@@ -65,10 +64,9 @@ int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
return 0;
}
-int cpufreq_frequency_table_verify(struct cpufreq_policy_data *policy,
- struct cpufreq_frequency_table *table)
+int cpufreq_frequency_table_verify(struct cpufreq_policy_data *policy)
{
- struct cpufreq_frequency_table *pos;
+ struct cpufreq_frequency_table *pos, *table = policy->freq_table;
unsigned int freq, prev_smaller = 0;
bool found = false;
@@ -110,7 +108,7 @@ int cpufreq_generic_frequency_table_verify(struct cpufreq_policy_data *policy)
if (!policy->freq_table)
return -ENODEV;
- return cpufreq_frequency_table_verify(policy, policy->freq_table);
+ return cpufreq_frequency_table_verify(policy);
}
EXPORT_SYMBOL_GPL(cpufreq_generic_frequency_table_verify);
@@ -128,7 +126,7 @@ int cpufreq_table_index_unsorted(struct cpufreq_policy *policy,
};
struct cpufreq_frequency_table *pos;
struct cpufreq_frequency_table *table = policy->freq_table;
- unsigned int freq, diff, i = 0;
+ unsigned int freq, diff, i;
int index;
pr_debug("request for target %u kHz (relation: %u) for cpu %u\n",
@@ -354,7 +352,7 @@ int cpufreq_table_validate_and_sort(struct cpufreq_policy *policy)
return 0;
}
- ret = cpufreq_frequency_table_cpuinfo(policy, policy->freq_table);
+ ret = cpufreq_frequency_table_cpuinfo(policy);
if (ret)
return ret;
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 0d5d283a5429..38897bb14a2c 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -620,24 +620,9 @@ static int min_perf_pct_min(void)
(cpu->pstate.min_pstate * 100 / turbo_pstate) : 0;
}
-static s16 intel_pstate_get_epb(struct cpudata *cpu_data)
-{
- u64 epb;
- int ret;
-
- if (!boot_cpu_has(X86_FEATURE_EPB))
- return -ENXIO;
-
- ret = rdmsrq_on_cpu(cpu_data->cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
- if (ret)
- return (s16)ret;
-
- return (s16)(epb & 0x0f);
-}
-
static s16 intel_pstate_get_epp(struct cpudata *cpu_data, u64 hwp_req_data)
{
- s16 epp;
+ s16 epp = -EOPNOTSUPP;
if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
/*
@@ -651,34 +636,13 @@ static s16 intel_pstate_get_epp(struct cpudata *cpu_data, u64 hwp_req_data)
return epp;
}
epp = (hwp_req_data >> 24) & 0xff;
- } else {
- /* When there is no EPP present, HWP uses EPB settings */
- epp = intel_pstate_get_epb(cpu_data);
}
return epp;
}
-static int intel_pstate_set_epb(int cpu, s16 pref)
-{
- u64 epb;
- int ret;
-
- if (!boot_cpu_has(X86_FEATURE_EPB))
- return -ENXIO;
-
- ret = rdmsrq_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
- if (ret)
- return ret;
-
- epb = (epb & ~0x0f) | pref;
- wrmsrq_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, epb);
-
- return 0;
-}
-
/*
- * EPP/EPB display strings corresponding to EPP index in the
+ * EPP display strings corresponding to EPP index in the
* energy_perf_strings[]
* index String
*-------------------------------------
@@ -782,7 +746,7 @@ static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data,
u32 raw_epp)
{
int epp = -EINVAL;
- int ret;
+ int ret = -EOPNOTSUPP;
if (!pref_index)
epp = cpu_data->epp_default;
@@ -802,10 +766,6 @@ static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data,
return -EBUSY;
ret = intel_pstate_set_epp(cpu_data, epp);
- } else {
- if (epp == -EINVAL)
- epp = (pref_index - 1) << 2;
- ret = intel_pstate_set_epb(cpu_data->cpu, epp);
}
return ret;
@@ -937,11 +897,19 @@ static ssize_t show_base_frequency(struct cpufreq_policy *policy, char *buf)
cpufreq_freq_attr_ro(base_frequency);
+enum hwp_cpufreq_attr_index {
+ HWP_BASE_FREQUENCY_INDEX = 0,
+ HWP_PERFORMANCE_PREFERENCE_INDEX,
+ HWP_PERFORMANCE_AVAILABLE_PREFERENCES_INDEX,
+ HWP_CPUFREQ_ATTR_COUNT,
+};
+
static struct freq_attr *hwp_cpufreq_attrs[] = {
- &energy_performance_preference,
- &energy_performance_available_preferences,
- &base_frequency,
- NULL,
+ [HWP_BASE_FREQUENCY_INDEX] = &base_frequency,
+ [HWP_PERFORMANCE_PREFERENCE_INDEX] = &energy_performance_preference,
+ [HWP_PERFORMANCE_AVAILABLE_PREFERENCES_INDEX] =
+ &energy_performance_available_preferences,
+ [HWP_CPUFREQ_ATTR_COUNT] = NULL,
};
static bool no_cas __ro_after_init;
@@ -1337,9 +1305,8 @@ static void intel_pstate_hwp_set(unsigned int cpu)
if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
value &= ~GENMASK_ULL(31, 24);
value |= (u64)epp << 24;
- } else {
- intel_pstate_set_epb(cpu, epp);
}
+
skip_epp:
WRITE_ONCE(cpu_data->hwp_req_cached, value);
wrmsrq_on_cpu(cpu, MSR_HWP_REQUEST, value);
@@ -1411,6 +1378,9 @@ static void intel_pstate_hwp_offline(struct cpudata *cpu)
#define POWER_CTL_EE_ENABLE 1
#define POWER_CTL_EE_DISABLE 2
+/* Enable bit for Dynamic Efficiency Control (DEC) */
+#define POWER_CTL_DEC_ENABLE 27
+
static int power_ctl_ee_state;
static void set_power_ctl_ee_state(bool input)
@@ -1502,9 +1472,7 @@ static void __intel_pstate_update_max_freq(struct cpufreq_policy *policy,
static bool intel_pstate_update_max_freq(struct cpudata *cpudata)
{
- struct cpufreq_policy *policy __free(put_cpufreq_policy);
-
- policy = cpufreq_cpu_get(cpudata->cpu);
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpudata->cpu);
if (!policy)
return false;
@@ -1695,41 +1663,40 @@ unlock_driver:
return count;
}
-static void update_qos_request(enum freq_qos_req_type type)
+static void update_cpu_qos_request(int cpu, enum freq_qos_req_type type)
{
+ struct cpudata *cpudata = all_cpu_data[cpu];
+ unsigned int freq = cpudata->pstate.turbo_freq;
struct freq_qos_request *req;
- struct cpufreq_policy *policy;
- int i;
-
- for_each_possible_cpu(i) {
- struct cpudata *cpu = all_cpu_data[i];
- unsigned int freq, perf_pct;
- policy = cpufreq_cpu_get(i);
- if (!policy)
- continue;
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
+ if (!policy)
+ return;
- req = policy->driver_data;
- cpufreq_cpu_put(policy);
+ req = policy->driver_data;
+ if (!req)
+ return;
- if (!req)
- continue;
+ if (hwp_active)
+ intel_pstate_get_hwp_cap(cpudata);
- if (hwp_active)
- intel_pstate_get_hwp_cap(cpu);
+ if (type == FREQ_QOS_MIN) {
+ freq = DIV_ROUND_UP(freq * global.min_perf_pct, 100);
+ } else {
+ req++;
+ freq = (freq * global.max_perf_pct) / 100;
+ }
- if (type == FREQ_QOS_MIN) {
- perf_pct = global.min_perf_pct;
- } else {
- req++;
- perf_pct = global.max_perf_pct;
- }
+ if (freq_qos_update_request(req, freq) < 0)
+ pr_warn("Failed to update freq constraint: CPU%d\n", cpu);
+}
- freq = DIV_ROUND_UP(cpu->pstate.turbo_freq * perf_pct, 100);
+static void update_qos_requests(enum freq_qos_req_type type)
+{
+ int i;
- if (freq_qos_update_request(req, freq) < 0)
- pr_warn("Failed to update freq constraint: CPU%d\n", i);
- }
+ for_each_possible_cpu(i)
+ update_cpu_qos_request(i, type);
}
static ssize_t store_max_perf_pct(struct kobject *a, struct kobj_attribute *b,
@@ -1758,7 +1725,7 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct kobj_attribute *b,
if (intel_pstate_driver == &intel_pstate)
intel_pstate_update_policies();
else
- update_qos_request(FREQ_QOS_MAX);
+ update_qos_requests(FREQ_QOS_MAX);
mutex_unlock(&intel_pstate_driver_lock);
@@ -1792,7 +1759,7 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct kobj_attribute *b,
if (intel_pstate_driver == &intel_pstate)
intel_pstate_update_policies();
else
- update_qos_request(FREQ_QOS_MIN);
+ update_qos_requests(FREQ_QOS_MIN);
mutex_unlock(&intel_pstate_driver_lock);
@@ -2575,7 +2542,7 @@ static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time)
* that sample.time will always be reset before setting the utilization
* update hook and make the caller skip the sample then.
*/
- if (cpu->last_sample_time) {
+ if (likely(cpu->last_sample_time)) {
intel_pstate_calc_avg_perf(cpu);
return true;
}
@@ -3802,6 +3769,26 @@ static const struct x86_cpu_id intel_hybrid_scaling_factor[] = {
{}
};
+static bool hwp_check_epp(void)
+{
+ if (boot_cpu_has(X86_FEATURE_HWP_EPP))
+ return true;
+
+ /* Without EPP support, don't expose EPP-related sysfs attributes. */
+ hwp_cpufreq_attrs[HWP_PERFORMANCE_PREFERENCE_INDEX] = NULL;
+ hwp_cpufreq_attrs[HWP_PERFORMANCE_AVAILABLE_PREFERENCES_INDEX] = NULL;
+
+ return false;
+}
+
+static bool hwp_check_dec(void)
+{
+ u64 power_ctl;
+
+ rdmsrq(MSR_IA32_POWER_CTL, power_ctl);
+ return !!(power_ctl & BIT(POWER_CTL_DEC_ENABLE));
+}
+
static int __init intel_pstate_init(void)
{
static struct cpudata **_all_cpu_data;
@@ -3822,23 +3809,32 @@ static int __init intel_pstate_init(void)
id = x86_match_cpu(hwp_support_ids);
if (id) {
- hwp_forced = intel_pstate_hwp_is_enabled();
+ bool epp_present = hwp_check_epp();
- if (hwp_forced)
+ /*
+ * If HWP is enabled already, there is no choice but to deal
+ * with it.
+ */
+ hwp_forced = intel_pstate_hwp_is_enabled();
+ if (hwp_forced) {
pr_info("HWP enabled by BIOS\n");
- else if (no_load)
+ no_hwp = 0;
+ } else if (no_load) {
return -ENODEV;
+ } else if (!epp_present && !hwp_check_dec()) {
+ /*
+ * Avoid enabling HWP for processors without EPP support
+ * unless the Dynamic Efficiency Control (DEC) enable
+ * bit (MSR_IA32_POWER_CTL, bit 27) is set because that
+ * means incomplete HWP implementation which is a corner
+ * case and supporting it is generally problematic.
+ */
+ no_hwp = 1;
+ }
copy_cpu_funcs(&core_funcs);
- /*
- * Avoid enabling HWP for processors without EPP support,
- * because that means incomplete HWP implementation which is a
- * corner case and supporting it is generally problematic.
- *
- * If HWP is enabled already, though, there is no choice but to
- * deal with it.
- */
- if ((!no_hwp && boot_cpu_has(X86_FEATURE_HWP_EPP)) || hwp_forced) {
+
+ if (!no_hwp) {
hwp_active = true;
hwp_mode_bdw = id->driver_data;
intel_pstate.attr = hwp_cpufreq_attrs;
diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c
index ba0e08c8486a..49e76b44468a 100644
--- a/drivers/cpufreq/longhaul.c
+++ b/drivers/cpufreq/longhaul.c
@@ -953,6 +953,9 @@ static void __exit longhaul_exit(void)
struct cpufreq_policy *policy = cpufreq_cpu_get(0);
int i;
+ if (unlikely(!policy))
+ return;
+
for (i = 0; i < numscales; i++) {
if (mults[i] == maxmult) {
struct cpufreq_freqs freqs;
diff --git a/drivers/cpufreq/mediatek-cpufreq-hw.c b/drivers/cpufreq/mediatek-cpufreq-hw.c
index 74f1b4c796e4..fce5aa5ceea0 100644
--- a/drivers/cpufreq/mediatek-cpufreq-hw.c
+++ b/drivers/cpufreq/mediatek-cpufreq-hw.c
@@ -24,6 +24,8 @@
#define POLL_USEC 1000
#define TIMEOUT_USEC 300000
+#define FDVFS_FDIV_HZ (26 * 1000)
+
enum {
REG_FREQ_LUT_TABLE,
REG_FREQ_ENABLE,
@@ -35,7 +37,14 @@ enum {
REG_ARRAY_SIZE,
};
-struct mtk_cpufreq_data {
+struct mtk_cpufreq_priv {
+ struct device *dev;
+ const struct mtk_cpufreq_variant *variant;
+ void __iomem *fdvfs;
+};
+
+struct mtk_cpufreq_domain {
+ struct mtk_cpufreq_priv *parent;
struct cpufreq_frequency_table *table;
void __iomem *reg_bases[REG_ARRAY_SIZE];
struct resource *res;
@@ -43,20 +52,51 @@ struct mtk_cpufreq_data {
int nr_opp;
};
-static const u16 cpufreq_mtk_offsets[REG_ARRAY_SIZE] = {
- [REG_FREQ_LUT_TABLE] = 0x0,
- [REG_FREQ_ENABLE] = 0x84,
- [REG_FREQ_PERF_STATE] = 0x88,
- [REG_FREQ_HW_STATE] = 0x8c,
- [REG_EM_POWER_TBL] = 0x90,
- [REG_FREQ_LATENCY] = 0x110,
+struct mtk_cpufreq_variant {
+ int (*init)(struct mtk_cpufreq_priv *priv);
+ const u16 reg_offsets[REG_ARRAY_SIZE];
+ const bool is_hybrid_dvfs;
+};
+
+static const struct mtk_cpufreq_variant cpufreq_mtk_base_variant = {
+ .reg_offsets = {
+ [REG_FREQ_LUT_TABLE] = 0x0,
+ [REG_FREQ_ENABLE] = 0x84,
+ [REG_FREQ_PERF_STATE] = 0x88,
+ [REG_FREQ_HW_STATE] = 0x8c,
+ [REG_EM_POWER_TBL] = 0x90,
+ [REG_FREQ_LATENCY] = 0x110,
+ },
+};
+
+static int mtk_cpufreq_hw_mt8196_init(struct mtk_cpufreq_priv *priv)
+{
+ priv->fdvfs = devm_of_iomap(priv->dev, priv->dev->of_node, 0, NULL);
+ if (IS_ERR(priv->fdvfs))
+ return dev_err_probe(priv->dev, PTR_ERR(priv->fdvfs),
+ "failed to get fdvfs iomem\n");
+
+ return 0;
+}
+
+static const struct mtk_cpufreq_variant cpufreq_mtk_mt8196_variant = {
+ .init = mtk_cpufreq_hw_mt8196_init,
+ .reg_offsets = {
+ [REG_FREQ_LUT_TABLE] = 0x0,
+ [REG_FREQ_ENABLE] = 0x84,
+ [REG_FREQ_PERF_STATE] = 0x88,
+ [REG_FREQ_HW_STATE] = 0x8c,
+ [REG_EM_POWER_TBL] = 0x90,
+ [REG_FREQ_LATENCY] = 0x114,
+ },
+ .is_hybrid_dvfs = true,
};
static int __maybe_unused
mtk_cpufreq_get_cpu_power(struct device *cpu_dev, unsigned long *uW,
unsigned long *KHz)
{
- struct mtk_cpufreq_data *data;
+ struct mtk_cpufreq_domain *data;
struct cpufreq_policy *policy;
int i;
@@ -80,19 +120,38 @@ mtk_cpufreq_get_cpu_power(struct device *cpu_dev, unsigned long *uW,
return 0;
}
+static void mtk_cpufreq_hw_fdvfs_switch(unsigned int target_freq,
+ struct cpufreq_policy *policy)
+{
+ struct mtk_cpufreq_domain *data = policy->driver_data;
+ struct mtk_cpufreq_priv *priv = data->parent;
+ unsigned int cpu;
+
+ target_freq = DIV_ROUND_UP(target_freq, FDVFS_FDIV_HZ);
+ for_each_cpu(cpu, policy->real_cpus) {
+ writel_relaxed(target_freq, priv->fdvfs + cpu * 4);
+ }
+}
+
static int mtk_cpufreq_hw_target_index(struct cpufreq_policy *policy,
unsigned int index)
{
- struct mtk_cpufreq_data *data = policy->driver_data;
-
- writel_relaxed(index, data->reg_bases[REG_FREQ_PERF_STATE]);
+ struct mtk_cpufreq_domain *data = policy->driver_data;
+ unsigned int target_freq;
+
+ if (data->parent->fdvfs) {
+ target_freq = policy->freq_table[index].frequency;
+ mtk_cpufreq_hw_fdvfs_switch(target_freq, policy);
+ } else {
+ writel_relaxed(index, data->reg_bases[REG_FREQ_PERF_STATE]);
+ }
return 0;
}
static unsigned int mtk_cpufreq_hw_get(unsigned int cpu)
{
- struct mtk_cpufreq_data *data;
+ struct mtk_cpufreq_domain *data;
struct cpufreq_policy *policy;
unsigned int index;
@@ -111,18 +170,21 @@ static unsigned int mtk_cpufreq_hw_get(unsigned int cpu)
static unsigned int mtk_cpufreq_hw_fast_switch(struct cpufreq_policy *policy,
unsigned int target_freq)
{
- struct mtk_cpufreq_data *data = policy->driver_data;
+ struct mtk_cpufreq_domain *data = policy->driver_data;
unsigned int index;
index = cpufreq_table_find_index_dl(policy, target_freq, false);
- writel_relaxed(index, data->reg_bases[REG_FREQ_PERF_STATE]);
+ if (data->parent->fdvfs)
+ mtk_cpufreq_hw_fdvfs_switch(target_freq, policy);
+ else
+ writel_relaxed(index, data->reg_bases[REG_FREQ_PERF_STATE]);
return policy->freq_table[index].frequency;
}
static int mtk_cpu_create_freq_table(struct platform_device *pdev,
- struct mtk_cpufreq_data *data)
+ struct mtk_cpufreq_domain *data)
{
struct device *dev = &pdev->dev;
u32 temp, i, freq, prev_freq = 0;
@@ -157,9 +219,9 @@ static int mtk_cpu_create_freq_table(struct platform_device *pdev,
static int mtk_cpu_resources_init(struct platform_device *pdev,
struct cpufreq_policy *policy,
- const u16 *offsets)
+ struct mtk_cpufreq_priv *priv)
{
- struct mtk_cpufreq_data *data;
+ struct mtk_cpufreq_domain *data;
struct device *dev = &pdev->dev;
struct resource *res;
struct of_phandle_args args;
@@ -180,6 +242,15 @@ static int mtk_cpu_resources_init(struct platform_device *pdev,
index = args.args[0];
of_node_put(args.np);
+ /*
+ * In a cpufreq with hybrid DVFS, such as the MT8196, the first declared
+ * register range is for FDVFS, followed by the frequency domain MMIOs.
+ */
+ if (priv->variant->is_hybrid_dvfs)
+ index++;
+
+ data->parent = priv;
+
res = platform_get_resource(pdev, IORESOURCE_MEM, index);
if (!res) {
dev_err(dev, "failed to get mem resource %d\n", index);
@@ -202,7 +273,7 @@ static int mtk_cpu_resources_init(struct platform_device *pdev,
data->res = res;
for (i = REG_FREQ_LUT_TABLE; i < REG_ARRAY_SIZE; i++)
- data->reg_bases[i] = base + offsets[i];
+ data->reg_bases[i] = base + priv->variant->reg_offsets[i];
ret = mtk_cpu_create_freq_table(pdev, data);
if (ret) {
@@ -223,7 +294,7 @@ static int mtk_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
{
struct platform_device *pdev = cpufreq_get_driver_data();
int sig, pwr_hw = CPUFREQ_HW_STATUS | SVS_HW_STATUS;
- struct mtk_cpufreq_data *data;
+ struct mtk_cpufreq_domain *data;
unsigned int latency;
int ret;
@@ -262,7 +333,7 @@ static int mtk_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
static void mtk_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy)
{
- struct mtk_cpufreq_data *data = policy->driver_data;
+ struct mtk_cpufreq_domain *data = policy->driver_data;
struct resource *res = data->res;
void __iomem *base = data->base;
@@ -275,7 +346,7 @@ static void mtk_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy)
static void mtk_cpufreq_register_em(struct cpufreq_policy *policy)
{
struct em_data_callback em_cb = EM_DATA_CB(mtk_cpufreq_get_cpu_power);
- struct mtk_cpufreq_data *data = policy->driver_data;
+ struct mtk_cpufreq_domain *data = policy->driver_data;
em_dev_register_perf_domain(get_cpu_device(policy->cpu), data->nr_opp,
&em_cb, policy->cpus, true);
@@ -297,6 +368,7 @@ static struct cpufreq_driver cpufreq_mtk_hw_driver = {
static int mtk_cpufreq_hw_driver_probe(struct platform_device *pdev)
{
+ struct mtk_cpufreq_priv *priv;
const void *data;
int ret, cpu;
struct device *cpu_dev;
@@ -320,7 +392,20 @@ static int mtk_cpufreq_hw_driver_probe(struct platform_device *pdev)
if (!data)
return -EINVAL;
- platform_set_drvdata(pdev, (void *) data);
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->variant = data;
+ priv->dev = &pdev->dev;
+
+ if (priv->variant->init) {
+ ret = priv->variant->init(priv);
+ if (ret)
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, priv);
cpufreq_mtk_hw_driver.driver_data = pdev;
ret = cpufreq_register_driver(&cpufreq_mtk_hw_driver);
@@ -336,7 +421,8 @@ static void mtk_cpufreq_hw_driver_remove(struct platform_device *pdev)
}
static const struct of_device_id mtk_cpufreq_hw_match[] = {
- { .compatible = "mediatek,cpufreq-hw", .data = &cpufreq_mtk_offsets },
+ { .compatible = "mediatek,cpufreq-hw", .data = &cpufreq_mtk_base_variant },
+ { .compatible = "mediatek,mt8196-cpufreq-hw", .data = &cpufreq_mtk_mt8196_variant },
{}
};
MODULE_DEVICE_TABLE(of, mtk_cpufreq_hw_match);
diff --git a/drivers/cpufreq/mediatek-cpufreq.c b/drivers/cpufreq/mediatek-cpufreq.c
index f3f02c4b6888..00de1166188a 100644
--- a/drivers/cpufreq/mediatek-cpufreq.c
+++ b/drivers/cpufreq/mediatek-cpufreq.c
@@ -123,7 +123,7 @@ static int mtk_cpufreq_voltage_tracking(struct mtk_cpu_dvfs_info *info,
soc_data->sram_max_volt);
return ret;
}
- } else if (pre_vproc > new_vproc) {
+ } else {
vproc = max(new_vproc,
pre_vsram - soc_data->max_volt_shift);
ret = regulator_set_voltage(proc_reg, vproc,
@@ -320,7 +320,6 @@ static int mtk_cpufreq_opp_notifier(struct notifier_block *nb,
struct dev_pm_opp *new_opp;
struct mtk_cpu_dvfs_info *info;
unsigned long freq, volt;
- struct cpufreq_policy *policy;
int ret = 0;
info = container_of(nb, struct mtk_cpu_dvfs_info, opp_nb);
@@ -353,12 +352,12 @@ static int mtk_cpufreq_opp_notifier(struct notifier_block *nb,
}
dev_pm_opp_put(new_opp);
- policy = cpufreq_cpu_get(info->opp_cpu);
- if (policy) {
+
+ struct cpufreq_policy *policy __free(put_cpufreq_policy)
+ = cpufreq_cpu_get(info->opp_cpu);
+ if (policy)
cpufreq_driver_target(policy, freq / 1000,
CPUFREQ_RELATION_L);
- cpufreq_cpu_put(policy);
- }
}
}
diff --git a/drivers/cpufreq/qcom-cpufreq-nvmem.c b/drivers/cpufreq/qcom-cpufreq-nvmem.c
index 54f8117103c8..765a5bb81829 100644
--- a/drivers/cpufreq/qcom-cpufreq-nvmem.c
+++ b/drivers/cpufreq/qcom-cpufreq-nvmem.c
@@ -200,6 +200,10 @@ static int qcom_cpufreq_kryo_name_version(struct device *cpu_dev,
case QCOM_ID_IPQ9574:
drv->versions = 1 << (unsigned int)(*speedbin);
break;
+ case QCOM_ID_IPQ5424:
+ case QCOM_ID_IPQ5404:
+ drv->versions = (*speedbin == 0x3b) ? BIT(1) : BIT(0);
+ break;
case QCOM_ID_MSM8996SG:
case QCOM_ID_APQ8096SG:
drv->versions = 1 << ((unsigned int)(*speedbin) + 4);
@@ -591,6 +595,7 @@ static const struct of_device_id qcom_cpufreq_match_list[] __initconst __maybe_u
{ .compatible = "qcom,msm8996", .data = &match_data_kryo },
{ .compatible = "qcom,qcs404", .data = &match_data_qcs404 },
{ .compatible = "qcom,ipq5332", .data = &match_data_kryo },
+ { .compatible = "qcom,ipq5424", .data = &match_data_kryo },
{ .compatible = "qcom,ipq6018", .data = &match_data_ipq6018 },
{ .compatible = "qcom,ipq8064", .data = &match_data_ipq8064 },
{ .compatible = "qcom,ipq8074", .data = &match_data_ipq8074 },
diff --git a/drivers/cpufreq/s5pv210-cpufreq.c b/drivers/cpufreq/s5pv210-cpufreq.c
index 76c888ed8d16..4215621deb3f 100644
--- a/drivers/cpufreq/s5pv210-cpufreq.c
+++ b/drivers/cpufreq/s5pv210-cpufreq.c
@@ -554,17 +554,15 @@ out_dmc0:
static int s5pv210_cpufreq_reboot_notifier_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(0);
int ret;
- struct cpufreq_policy *policy;
- policy = cpufreq_cpu_get(0);
if (!policy) {
pr_debug("cpufreq: get no policy for cpu0\n");
return NOTIFY_BAD;
}
ret = cpufreq_driver_target(policy, SLEEP_FREQ, 0);
- cpufreq_cpu_put(policy);
if (ret < 0)
return NOTIFY_BAD;
diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c
index ef078426bfd5..38c165d526d1 100644
--- a/drivers/cpufreq/scmi-cpufreq.c
+++ b/drivers/cpufreq/scmi-cpufreq.c
@@ -15,6 +15,7 @@
#include <linux/energy_model.h>
#include <linux/export.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/pm_opp.h>
#include <linux/pm_qos.h>
#include <linux/slab.h>
@@ -424,6 +425,15 @@ static bool scmi_dev_used_by_cpus(struct device *scmi_dev)
return true;
}
+ /*
+ * Older Broadcom STB chips had a "clocks" property for CPU node(s)
+ * that did not match the SCMI performance protocol node, if we got
+ * there, it means we had such an older Device Tree, therefore return
+ * true to preserve backwards compatibility.
+ */
+ if (of_machine_is_compatible("brcm,brcmstb"))
+ return true;
+
return false;
}
diff --git a/drivers/cpufreq/sh-cpufreq.c b/drivers/cpufreq/sh-cpufreq.c
index 9c0b01e00508..642ddb9ea217 100644
--- a/drivers/cpufreq/sh-cpufreq.c
+++ b/drivers/cpufreq/sh-cpufreq.c
@@ -89,11 +89,9 @@ static int sh_cpufreq_target(struct cpufreq_policy *policy,
static int sh_cpufreq_verify(struct cpufreq_policy_data *policy)
{
struct clk *cpuclk = &per_cpu(sh_cpuclk, policy->cpu);
- struct cpufreq_frequency_table *freq_table;
- freq_table = cpuclk->nr_freqs ? cpuclk->freq_table : NULL;
- if (freq_table)
- return cpufreq_frequency_table_verify(policy, freq_table);
+ if (policy->freq_table)
+ return cpufreq_frequency_table_verify(policy);
cpufreq_verify_within_cpu_limits(policy);
diff --git a/drivers/cpufreq/speedstep-lib.c b/drivers/cpufreq/speedstep-lib.c
index 0b66df4ed513..f8b42e981635 100644
--- a/drivers/cpufreq/speedstep-lib.c
+++ b/drivers/cpufreq/speedstep-lib.c
@@ -378,16 +378,16 @@ EXPORT_SYMBOL_GPL(speedstep_detect_processor);
* DETECT SPEEDSTEP SPEEDS *
*********************************************************************/
-unsigned int speedstep_get_freqs(enum speedstep_processor processor,
- unsigned int *low_speed,
- unsigned int *high_speed,
- unsigned int *transition_latency,
- void (*set_state) (unsigned int state))
+int speedstep_get_freqs(enum speedstep_processor processor,
+ unsigned int *low_speed,
+ unsigned int *high_speed,
+ unsigned int *transition_latency,
+ void (*set_state)(unsigned int state))
{
unsigned int prev_speed;
- unsigned int ret = 0;
unsigned long flags;
ktime_t tv1, tv2;
+ int ret = 0;
if ((!processor) || (!low_speed) || (!high_speed) || (!set_state))
return -EINVAL;
diff --git a/drivers/cpufreq/speedstep-lib.h b/drivers/cpufreq/speedstep-lib.h
index dc762ea786be..48329647d4c4 100644
--- a/drivers/cpufreq/speedstep-lib.h
+++ b/drivers/cpufreq/speedstep-lib.h
@@ -41,8 +41,8 @@ extern unsigned int speedstep_get_frequency(enum speedstep_processor processor);
* SPEEDSTEP_LOW; the second argument is zero so that no
* cpufreq_notify_transition calls are initiated.
*/
-extern unsigned int speedstep_get_freqs(enum speedstep_processor processor,
- unsigned int *low_speed,
- unsigned int *high_speed,
- unsigned int *transition_latency,
- void (*set_state) (unsigned int state));
+extern int speedstep_get_freqs(enum speedstep_processor processor,
+ unsigned int *low_speed,
+ unsigned int *high_speed,
+ unsigned int *transition_latency,
+ void (*set_state)(unsigned int state));
diff --git a/drivers/cpufreq/tegra186-cpufreq.c b/drivers/cpufreq/tegra186-cpufreq.c
index cbabb726c664..4270686fc3e3 100644
--- a/drivers/cpufreq/tegra186-cpufreq.c
+++ b/drivers/cpufreq/tegra186-cpufreq.c
@@ -103,13 +103,12 @@ static int tegra186_cpufreq_set_target(struct cpufreq_policy *policy,
static unsigned int tegra186_cpufreq_get(unsigned int cpu)
{
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
struct tegra186_cpufreq_data *data = cpufreq_get_driver_data();
struct tegra186_cpufreq_cluster *cluster;
- struct cpufreq_policy *policy;
unsigned int edvd_offset, cluster_id;
u32 ndiv;
- policy = cpufreq_cpu_get(cpu);
if (!policy)
return 0;
@@ -117,7 +116,6 @@ static unsigned int tegra186_cpufreq_get(unsigned int cpu)
ndiv = readl(data->regs + edvd_offset) & EDVD_CORE_VOLT_FREQ_F_MASK;
cluster_id = data->cpus[policy->cpu].bpmp_cluster_id;
cluster = &data->clusters[cluster_id];
- cpufreq_cpu_put(policy);
return (cluster->ref_clk_khz * ndiv) / cluster->div;
}
diff --git a/drivers/cpufreq/ti-cpufreq.c b/drivers/cpufreq/ti-cpufreq.c
index 5a5147277cd0..6ee76f5fe9c5 100644
--- a/drivers/cpufreq/ti-cpufreq.c
+++ b/drivers/cpufreq/ti-cpufreq.c
@@ -72,7 +72,9 @@ enum {
#define AM62P5_EFUSE_O_MPU_OPP 15
#define AM62P5_EFUSE_S_MPU_OPP 19
+#define AM62P5_EFUSE_T_MPU_OPP 20
#define AM62P5_EFUSE_U_MPU_OPP 21
+#define AM62P5_EFUSE_V_MPU_OPP 22
#define AM62P5_SUPPORT_O_MPU_OPP BIT(0)
#define AM62P5_SUPPORT_U_MPU_OPP BIT(2)
@@ -153,7 +155,9 @@ static unsigned long am62p5_efuse_xlate(struct ti_cpufreq_data *opp_data,
unsigned long calculated_efuse = AM62P5_SUPPORT_O_MPU_OPP;
switch (efuse) {
+ case AM62P5_EFUSE_V_MPU_OPP:
case AM62P5_EFUSE_U_MPU_OPP:
+ case AM62P5_EFUSE_T_MPU_OPP:
case AM62P5_EFUSE_S_MPU_OPP:
calculated_efuse |= AM62P5_SUPPORT_U_MPU_OPP;
fallthrough;
@@ -307,9 +311,10 @@ static struct ti_cpufreq_soc_data am3517_soc_data = {
};
static const struct soc_device_attribute k3_cpufreq_soc[] = {
- { .family = "AM62X", .revision = "SR1.0" },
- { .family = "AM62AX", .revision = "SR1.0" },
- { .family = "AM62PX", .revision = "SR1.0" },
+ { .family = "AM62X", },
+ { .family = "AM62AX", },
+ { .family = "AM62PX", },
+ { .family = "AM62DX", },
{ /* sentinel */ }
};
@@ -457,6 +462,7 @@ static const struct of_device_id ti_cpufreq_of_match[] __maybe_unused = {
{ .compatible = "ti,omap36xx", .data = &omap36xx_soc_data, },
{ .compatible = "ti,am625", .data = &am625_soc_data, },
{ .compatible = "ti,am62a7", .data = &am62a7_soc_data, },
+ { .compatible = "ti,am62d2", .data = &am62a7_soc_data, },
{ .compatible = "ti,am62p5", .data = &am62p5_soc_data, },
/* legacy */
{ .compatible = "ti,omap3430", .data = &omap34xx_soc_data, },
diff --git a/drivers/cpufreq/virtual-cpufreq.c b/drivers/cpufreq/virtual-cpufreq.c
index 7dd1b0c263c7..6ffa16d239b2 100644
--- a/drivers/cpufreq/virtual-cpufreq.c
+++ b/drivers/cpufreq/virtual-cpufreq.c
@@ -250,7 +250,7 @@ static int virt_cpufreq_offline(struct cpufreq_policy *policy)
static int virt_cpufreq_verify_policy(struct cpufreq_policy_data *policy)
{
if (policy->freq_table)
- return cpufreq_frequency_table_verify(policy, policy->freq_table);
+ return cpufreq_frequency_table_verify(policy);
cpufreq_verify_within_cpu_limits(policy);
return 0;
diff --git a/drivers/cpuidle/cpuidle-qcom-spm.c b/drivers/cpuidle/cpuidle-qcom-spm.c
index 5f386761b156..7ab6f68b96a8 100644
--- a/drivers/cpuidle/cpuidle-qcom-spm.c
+++ b/drivers/cpuidle/cpuidle-qcom-spm.c
@@ -86,9 +86,9 @@ static const struct of_device_id qcom_idle_state_match[] = {
static int spm_cpuidle_register(struct device *cpuidle_dev, int cpu)
{
- struct platform_device *pdev = NULL;
+ struct platform_device *pdev;
struct device_node *cpu_node, *saw_node;
- struct cpuidle_qcom_spm_data *data = NULL;
+ struct cpuidle_qcom_spm_data *data;
int ret;
cpu_node = of_cpu_device_node_get(cpu);
@@ -96,20 +96,23 @@ static int spm_cpuidle_register(struct device *cpuidle_dev, int cpu)
return -ENODEV;
saw_node = of_parse_phandle(cpu_node, "qcom,saw", 0);
+ of_node_put(cpu_node);
if (!saw_node)
return -ENODEV;
pdev = of_find_device_by_node(saw_node);
of_node_put(saw_node);
- of_node_put(cpu_node);
if (!pdev)
return -ENODEV;
data = devm_kzalloc(cpuidle_dev, sizeof(*data), GFP_KERNEL);
- if (!data)
+ if (!data) {
+ put_device(&pdev->dev);
return -ENOMEM;
+ }
data->spm = dev_get_drvdata(&pdev->dev);
+ put_device(&pdev->dev);
if (!data->spm)
return -EINVAL;
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 0835da449db8..56132e843c99 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -635,8 +635,14 @@ static void __cpuidle_device_init(struct cpuidle_device *dev)
static int __cpuidle_register_device(struct cpuidle_device *dev)
{
struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
+ unsigned int cpu = dev->cpu;
int i, ret;
+ if (per_cpu(cpuidle_devices, cpu)) {
+ pr_info("CPU%d: cpuidle device already registered\n", cpu);
+ return -EEXIST;
+ }
+
if (!try_module_get(drv->owner))
return -EINVAL;
@@ -648,7 +654,7 @@ static int __cpuidle_register_device(struct cpuidle_device *dev)
dev->states_usage[i].disable |= CPUIDLE_STATE_DISABLED_BY_USER;
}
- per_cpu(cpuidle_devices, dev->cpu) = dev;
+ per_cpu(cpuidle_devices, cpu) = dev;
list_add(&dev->device_list, &cpuidle_detected_devices);
ret = cpuidle_coupled_register_device(dev);
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index b2e3d0b0a116..4d9aa5ce31f0 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -314,45 +314,47 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
if (s->exit_latency_ns > latency_req)
break;
- if (s->target_residency_ns > predicted_ns) {
- /*
- * Use a physical idle state, not busy polling, unless
- * a timer is going to trigger soon enough.
- */
- if ((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) &&
- s->target_residency_ns <= data->next_timer_ns) {
- predicted_ns = s->target_residency_ns;
- idx = i;
- break;
- }
- if (predicted_ns < TICK_NSEC)
- break;
-
- if (!tick_nohz_tick_stopped()) {
- /*
- * If the state selected so far is shallow,
- * waking up early won't hurt, so retain the
- * tick in that case and let the governor run
- * again in the next iteration of the loop.
- */
- predicted_ns = drv->states[idx].target_residency_ns;
- break;
- }
+ if (s->target_residency_ns <= predicted_ns) {
+ idx = i;
+ continue;
+ }
+
+ /*
+ * Use a physical idle state, not busy polling, unless a timer
+ * is going to trigger soon enough.
+ */
+ if ((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) &&
+ s->target_residency_ns <= data->next_timer_ns) {
+ predicted_ns = s->target_residency_ns;
+ idx = i;
+ break;
+ }
+ if (predicted_ns < TICK_NSEC)
+ break;
+
+ if (!tick_nohz_tick_stopped()) {
/*
- * If the state selected so far is shallow and this
- * state's target residency matches the time till the
- * closest timer event, select this one to avoid getting
- * stuck in the shallow one for too long.
+ * If the state selected so far is shallow, waking up
+ * early won't hurt, so retain the tick in that case and
+ * let the governor run again in the next iteration of
+ * the idle loop.
*/
- if (drv->states[idx].target_residency_ns < TICK_NSEC &&
- s->target_residency_ns <= delta_tick)
- idx = i;
-
- return idx;
+ predicted_ns = drv->states[idx].target_residency_ns;
+ break;
}
- idx = i;
+ /*
+ * If the state selected so far is shallow and this state's
+ * target residency matches the time till the closest timer
+ * event, select this one to avoid getting stuck in the shallow
+ * one for too long.
+ */
+ if (drv->states[idx].target_residency_ns < TICK_NSEC &&
+ s->target_residency_ns <= delta_tick)
+ idx = i;
+
+ return idx;
}
if (idx == -1)
diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
index d6f5da61cb7d..61de64817604 100644
--- a/drivers/cpuidle/sysfs.c
+++ b/drivers/cpuidle/sysfs.c
@@ -27,14 +27,14 @@ static ssize_t show_available_governors(struct device *dev,
mutex_lock(&cpuidle_lock);
list_for_each_entry(tmp, &cpuidle_governors, governor_list) {
- if (i >= (ssize_t) (PAGE_SIZE - (CPUIDLE_NAME_LEN + 2)))
+ if (i >= (ssize_t)(PAGE_SIZE - (CPUIDLE_NAME_LEN + 2)))
goto out;
- i += scnprintf(&buf[i], CPUIDLE_NAME_LEN + 1, "%s ", tmp->name);
+ i += sysfs_emit_at(buf, i, "%.*s ", CPUIDLE_NAME_LEN, tmp->name);
}
out:
- i+= sprintf(&buf[i], "\n");
+ i += sysfs_emit_at(buf, i, "\n");
mutex_unlock(&cpuidle_lock);
return i;
}
@@ -49,9 +49,9 @@ static ssize_t show_current_driver(struct device *dev,
spin_lock(&cpuidle_driver_lock);
drv = cpuidle_get_driver();
if (drv)
- ret = sprintf(buf, "%s\n", drv->name);
+ ret = sysfs_emit(buf, "%s\n", drv->name);
else
- ret = sprintf(buf, "none\n");
+ ret = sysfs_emit(buf, "none\n");
spin_unlock(&cpuidle_driver_lock);
return ret;
@@ -65,9 +65,9 @@ static ssize_t show_current_governor(struct device *dev,
mutex_lock(&cpuidle_lock);
if (cpuidle_curr_governor)
- ret = sprintf(buf, "%s\n", cpuidle_curr_governor->name);
+ ret = sysfs_emit(buf, "%s\n", cpuidle_curr_governor->name);
else
- ret = sprintf(buf, "none\n");
+ ret = sysfs_emit(buf, "none\n");
mutex_unlock(&cpuidle_lock);
return ret;
@@ -230,7 +230,7 @@ static struct cpuidle_state_attr attr_##_name = __ATTR(_name, 0644, show, store)
static ssize_t show_state_##_name(struct cpuidle_state *state, \
struct cpuidle_state_usage *state_usage, char *buf) \
{ \
- return sprintf(buf, "%u\n", state->_name);\
+ return sysfs_emit(buf, "%u\n", state->_name);\
}
#define define_show_state_ull_function(_name) \
@@ -238,7 +238,7 @@ static ssize_t show_state_##_name(struct cpuidle_state *state, \
struct cpuidle_state_usage *state_usage, \
char *buf) \
{ \
- return sprintf(buf, "%llu\n", state_usage->_name);\
+ return sysfs_emit(buf, "%llu\n", state_usage->_name);\
}
#define define_show_state_str_function(_name) \
@@ -247,8 +247,8 @@ static ssize_t show_state_##_name(struct cpuidle_state *state, \
char *buf) \
{ \
if (state->_name[0] == '\0')\
- return sprintf(buf, "<null>\n");\
- return sprintf(buf, "%s\n", state->_name);\
+ return sysfs_emit(buf, "<null>\n");\
+ return sysfs_emit(buf, "%s\n", state->_name);\
}
#define define_show_state_time_function(_name) \
@@ -256,7 +256,7 @@ static ssize_t show_state_##_name(struct cpuidle_state *state, \
struct cpuidle_state_usage *state_usage, \
char *buf) \
{ \
- return sprintf(buf, "%llu\n", ktime_to_us(state->_name##_ns)); \
+ return sysfs_emit(buf, "%llu\n", ktime_to_us(state->_name##_ns)); \
}
define_show_state_time_function(exit_latency)
@@ -273,14 +273,14 @@ static ssize_t show_state_time(struct cpuidle_state *state,
struct cpuidle_state_usage *state_usage,
char *buf)
{
- return sprintf(buf, "%llu\n", ktime_to_us(state_usage->time_ns));
+ return sysfs_emit(buf, "%llu\n", ktime_to_us(state_usage->time_ns));
}
static ssize_t show_state_disable(struct cpuidle_state *state,
struct cpuidle_state_usage *state_usage,
char *buf)
{
- return sprintf(buf, "%llu\n",
+ return sysfs_emit(buf, "%llu\n",
state_usage->disable & CPUIDLE_STATE_DISABLED_BY_USER);
}
@@ -310,7 +310,7 @@ static ssize_t show_state_default_status(struct cpuidle_state *state,
struct cpuidle_state_usage *state_usage,
char *buf)
{
- return sprintf(buf, "%s\n",
+ return sysfs_emit(buf, "%s\n",
state->flags & CPUIDLE_FLAG_OFF ? "disabled" : "enabled");
}
@@ -358,7 +358,7 @@ static ssize_t show_state_s2idle_##_name(struct cpuidle_state *state, \
struct cpuidle_state_usage *state_usage, \
char *buf) \
{ \
- return sprintf(buf, "%llu\n", state_usage->s2idle_##_name);\
+ return sysfs_emit(buf, "%llu\n", state_usage->s2idle_##_name);\
}
define_show_state_s2idle_ull_function(usage);
@@ -550,7 +550,7 @@ static ssize_t show_driver_name(struct cpuidle_driver *drv, char *buf)
ssize_t ret;
spin_lock(&cpuidle_driver_lock);
- ret = sprintf(buf, "%s\n", drv ? drv->name : "none");
+ ret = sysfs_emit(buf, "%s\n", drv ? drv->name : "none");
spin_unlock(&cpuidle_driver_lock);
return ret;
diff --git a/drivers/devfreq/event/rockchip-dfi.c b/drivers/devfreq/event/rockchip-dfi.c
index 0470d7c175f4..5a2c9badcc64 100644
--- a/drivers/devfreq/event/rockchip-dfi.c
+++ b/drivers/devfreq/event/rockchip-dfi.c
@@ -34,15 +34,18 @@
/* DDRMON_CTRL */
#define DDRMON_CTRL 0x04
+#define DDRMON_CTRL_LPDDR5 BIT(6)
#define DDRMON_CTRL_DDR4 BIT(5)
#define DDRMON_CTRL_LPDDR4 BIT(4)
#define DDRMON_CTRL_HARDWARE_EN BIT(3)
#define DDRMON_CTRL_LPDDR23 BIT(2)
#define DDRMON_CTRL_SOFTWARE_EN BIT(1)
#define DDRMON_CTRL_TIMER_CNT_EN BIT(0)
-#define DDRMON_CTRL_DDR_TYPE_MASK (DDRMON_CTRL_DDR4 | \
+#define DDRMON_CTRL_DDR_TYPE_MASK (DDRMON_CTRL_LPDDR5 | \
+ DDRMON_CTRL_DDR4 | \
DDRMON_CTRL_LPDDR4 | \
DDRMON_CTRL_LPDDR23)
+#define DDRMON_CTRL_LP5_BANK_MODE_MASK GENMASK(8, 7)
#define DDRMON_CH0_WR_NUM 0x20
#define DDRMON_CH0_RD_NUM 0x24
@@ -116,12 +119,60 @@ struct rockchip_dfi {
int buswidth[DMC_MAX_CHANNELS];
int ddrmon_stride;
bool ddrmon_ctrl_single;
+ u32 lp5_bank_mode;
+ bool lp5_ckr; /* true if in 4:1 command-to-data clock ratio mode */
+ unsigned int count_multiplier; /* number of data clocks per count */
};
+static int rockchip_dfi_ddrtype_to_ctrl(struct rockchip_dfi *dfi, u32 *ctrl,
+ u32 *mask)
+{
+ u32 ddrmon_ver;
+
+ *mask = DDRMON_CTRL_DDR_TYPE_MASK;
+
+ switch (dfi->ddr_type) {
+ case ROCKCHIP_DDRTYPE_LPDDR2:
+ case ROCKCHIP_DDRTYPE_LPDDR3:
+ *ctrl = DDRMON_CTRL_LPDDR23;
+ break;
+ case ROCKCHIP_DDRTYPE_LPDDR4:
+ case ROCKCHIP_DDRTYPE_LPDDR4X:
+ *ctrl = DDRMON_CTRL_LPDDR4;
+ break;
+ case ROCKCHIP_DDRTYPE_LPDDR5:
+ ddrmon_ver = readl_relaxed(dfi->regs);
+ if (ddrmon_ver < 0x40) {
+ *ctrl = DDRMON_CTRL_LPDDR5 | dfi->lp5_bank_mode;
+ *mask |= DDRMON_CTRL_LP5_BANK_MODE_MASK;
+ break;
+ }
+
+ /*
+ * As it is unknown whether the unpleasant special case
+ * behaviour used by the vendor kernel is needed for any
+ * shipping hardware, ask users to report if they have
+ * some of that hardware.
+ */
+ dev_err(&dfi->edev->dev,
+ "unsupported DDRMON version 0x%04X, please let linux-rockchip know!\n",
+ ddrmon_ver);
+ return -EOPNOTSUPP;
+ default:
+ dev_err(&dfi->edev->dev, "unsupported memory type 0x%X\n",
+ dfi->ddr_type);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
static int rockchip_dfi_enable(struct rockchip_dfi *dfi)
{
void __iomem *dfi_regs = dfi->regs;
int i, ret = 0;
+ u32 ctrl;
+ u32 ctrl_mask;
mutex_lock(&dfi->mutex);
@@ -135,8 +186,11 @@ static int rockchip_dfi_enable(struct rockchip_dfi *dfi)
goto out;
}
+ ret = rockchip_dfi_ddrtype_to_ctrl(dfi, &ctrl, &ctrl_mask);
+ if (ret)
+ goto out;
+
for (i = 0; i < dfi->max_channels; i++) {
- u32 ctrl = 0;
if (!(dfi->channel_mask & BIT(i)))
continue;
@@ -146,21 +200,7 @@ static int rockchip_dfi_enable(struct rockchip_dfi *dfi)
DDRMON_CTRL_SOFTWARE_EN | DDRMON_CTRL_HARDWARE_EN),
dfi_regs + i * dfi->ddrmon_stride + DDRMON_CTRL);
- /* set ddr type to dfi */
- switch (dfi->ddr_type) {
- case ROCKCHIP_DDRTYPE_LPDDR2:
- case ROCKCHIP_DDRTYPE_LPDDR3:
- ctrl = DDRMON_CTRL_LPDDR23;
- break;
- case ROCKCHIP_DDRTYPE_LPDDR4:
- case ROCKCHIP_DDRTYPE_LPDDR4X:
- ctrl = DDRMON_CTRL_LPDDR4;
- break;
- default:
- break;
- }
-
- writel_relaxed(HIWORD_UPDATE(ctrl, DDRMON_CTRL_DDR_TYPE_MASK),
+ writel_relaxed(HIWORD_UPDATE(ctrl, ctrl_mask),
dfi_regs + i * dfi->ddrmon_stride + DDRMON_CTRL);
/* enable count, use software mode */
@@ -435,7 +475,7 @@ static u64 rockchip_ddr_perf_event_get_count(struct perf_event *event)
switch (event->attr.config) {
case PERF_EVENT_CYCLES:
- count = total.c[0].clock_cycles;
+ count = total.c[0].clock_cycles * dfi->count_multiplier;
break;
case PERF_EVENT_READ_BYTES:
for (i = 0; i < dfi->max_channels; i++)
@@ -651,10 +691,14 @@ static int rockchip_ddr_perf_init(struct rockchip_dfi *dfi)
break;
case ROCKCHIP_DDRTYPE_LPDDR4:
case ROCKCHIP_DDRTYPE_LPDDR4X:
+ case ROCKCHIP_DDRTYPE_LPDDR5:
dfi->burst_len = 16;
break;
}
+ if (!dfi->count_multiplier)
+ dfi->count_multiplier = 1;
+
ret = perf_pmu_register(pmu, "rockchip_ddr", -1);
if (ret)
return ret;
@@ -726,7 +770,7 @@ static int rk3568_dfi_init(struct rockchip_dfi *dfi)
static int rk3588_dfi_init(struct rockchip_dfi *dfi)
{
struct regmap *regmap_pmu = dfi->regmap_pmu;
- u32 reg2, reg3, reg4;
+ u32 reg2, reg3, reg4, reg6;
regmap_read(regmap_pmu, RK3588_PMUGRF_OS_REG2, &reg2);
regmap_read(regmap_pmu, RK3588_PMUGRF_OS_REG3, &reg3);
@@ -751,6 +795,15 @@ static int rk3588_dfi_init(struct rockchip_dfi *dfi)
dfi->max_channels = 4;
dfi->ddrmon_stride = 0x4000;
+ dfi->count_multiplier = 2;
+
+ if (dfi->ddr_type == ROCKCHIP_DDRTYPE_LPDDR5) {
+ regmap_read(regmap_pmu, RK3588_PMUGRF_OS_REG6, &reg6);
+ dfi->lp5_bank_mode = FIELD_GET(RK3588_PMUGRF_OS_REG6_LP5_BANK_MODE, reg6) << 7;
+ dfi->lp5_ckr = FIELD_GET(RK3588_PMUGRF_OS_REG6_LP5_CKR, reg6);
+ if (dfi->lp5_ckr)
+ dfi->count_multiplier *= 2;
+ }
return 0;
};
diff --git a/drivers/devfreq/mtk-cci-devfreq.c b/drivers/devfreq/mtk-cci-devfreq.c
index 22fe9e631f8a..4c22be728f6a 100644
--- a/drivers/devfreq/mtk-cci-devfreq.c
+++ b/drivers/devfreq/mtk-cci-devfreq.c
@@ -86,7 +86,7 @@ static int mtk_ccifreq_set_voltage(struct mtk_ccifreq_drv *drv, int new_voltage)
soc_data->sram_max_volt);
return ret;
}
- } else if (pre_voltage > new_voltage) {
+ } else {
voltage = max(new_voltage,
pre_vsram - soc_data->max_volt_shift);
ret = regulator_set_voltage(drv->proc_reg, voltage,
@@ -386,7 +386,8 @@ out_disable_cci_clk:
out_free_resources:
if (regulator_is_enabled(drv->proc_reg))
regulator_disable(drv->proc_reg);
- if (drv->sram_reg && regulator_is_enabled(drv->sram_reg))
+ if (!IS_ERR_OR_NULL(drv->sram_reg) &&
+ regulator_is_enabled(drv->sram_reg))
regulator_disable(drv->sram_reg);
return ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 395c6be901ce..dcea66aadfa3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -2665,7 +2665,7 @@ static int amdgpu_pmops_thaw(struct device *dev)
struct drm_device *drm_dev = dev_get_drvdata(dev);
/* do not resume device if it's normal hibernation */
- if (!pm_hibernate_is_recovering())
+ if (!pm_hibernate_is_recovering() && !pm_hibernation_mode_is_suspend())
return 0;
return amdgpu_device_resume(drm_dev, true);
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 91a7b7e7c0c8..9ba83954c255 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -259,7 +259,7 @@ static struct cpuidle_state nehalem_cstates[] __initdata = {
.flags = MWAIT2flg(0x00),
.exit_latency = 3,
.target_residency = 6,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
@@ -267,7 +267,7 @@ static struct cpuidle_state nehalem_cstates[] __initdata = {
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 10,
.target_residency = 20,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C3",
@@ -275,7 +275,7 @@ static struct cpuidle_state nehalem_cstates[] __initdata = {
.flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 20,
.target_residency = 80,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
@@ -283,7 +283,7 @@ static struct cpuidle_state nehalem_cstates[] __initdata = {
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 200,
.target_residency = 800,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -296,7 +296,7 @@ static struct cpuidle_state snb_cstates[] __initdata = {
.flags = MWAIT2flg(0x00),
.exit_latency = 2,
.target_residency = 2,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
@@ -304,7 +304,7 @@ static struct cpuidle_state snb_cstates[] __initdata = {
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 10,
.target_residency = 20,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C3",
@@ -312,7 +312,7 @@ static struct cpuidle_state snb_cstates[] __initdata = {
.flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 80,
.target_residency = 211,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
@@ -320,7 +320,7 @@ static struct cpuidle_state snb_cstates[] __initdata = {
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 104,
.target_residency = 345,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C7",
@@ -328,7 +328,7 @@ static struct cpuidle_state snb_cstates[] __initdata = {
.flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 109,
.target_residency = 345,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -341,7 +341,7 @@ static struct cpuidle_state byt_cstates[] __initdata = {
.flags = MWAIT2flg(0x00),
.exit_latency = 1,
.target_residency = 1,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6N",
@@ -349,7 +349,7 @@ static struct cpuidle_state byt_cstates[] __initdata = {
.flags = MWAIT2flg(0x58) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 300,
.target_residency = 275,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6S",
@@ -357,7 +357,7 @@ static struct cpuidle_state byt_cstates[] __initdata = {
.flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 500,
.target_residency = 560,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C7",
@@ -365,7 +365,7 @@ static struct cpuidle_state byt_cstates[] __initdata = {
.flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 1200,
.target_residency = 4000,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C7S",
@@ -373,7 +373,7 @@ static struct cpuidle_state byt_cstates[] __initdata = {
.flags = MWAIT2flg(0x64) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 10000,
.target_residency = 20000,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -386,7 +386,7 @@ static struct cpuidle_state cht_cstates[] __initdata = {
.flags = MWAIT2flg(0x00),
.exit_latency = 1,
.target_residency = 1,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6N",
@@ -394,7 +394,7 @@ static struct cpuidle_state cht_cstates[] __initdata = {
.flags = MWAIT2flg(0x58) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 80,
.target_residency = 275,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6S",
@@ -402,7 +402,7 @@ static struct cpuidle_state cht_cstates[] __initdata = {
.flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 200,
.target_residency = 560,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C7",
@@ -410,7 +410,7 @@ static struct cpuidle_state cht_cstates[] __initdata = {
.flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 1200,
.target_residency = 4000,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C7S",
@@ -418,7 +418,7 @@ static struct cpuidle_state cht_cstates[] __initdata = {
.flags = MWAIT2flg(0x64) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 10000,
.target_residency = 20000,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -431,7 +431,7 @@ static struct cpuidle_state ivb_cstates[] __initdata = {
.flags = MWAIT2flg(0x00),
.exit_latency = 1,
.target_residency = 1,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
@@ -439,7 +439,7 @@ static struct cpuidle_state ivb_cstates[] __initdata = {
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 10,
.target_residency = 20,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C3",
@@ -447,7 +447,7 @@ static struct cpuidle_state ivb_cstates[] __initdata = {
.flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 59,
.target_residency = 156,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
@@ -455,7 +455,7 @@ static struct cpuidle_state ivb_cstates[] __initdata = {
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 80,
.target_residency = 300,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C7",
@@ -463,7 +463,7 @@ static struct cpuidle_state ivb_cstates[] __initdata = {
.flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 87,
.target_residency = 300,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -476,7 +476,7 @@ static struct cpuidle_state ivt_cstates[] __initdata = {
.flags = MWAIT2flg(0x00),
.exit_latency = 1,
.target_residency = 1,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
@@ -484,7 +484,7 @@ static struct cpuidle_state ivt_cstates[] __initdata = {
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 10,
.target_residency = 80,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C3",
@@ -492,7 +492,7 @@ static struct cpuidle_state ivt_cstates[] __initdata = {
.flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 59,
.target_residency = 156,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
@@ -500,7 +500,7 @@ static struct cpuidle_state ivt_cstates[] __initdata = {
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 82,
.target_residency = 300,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -513,7 +513,7 @@ static struct cpuidle_state ivt_cstates_4s[] __initdata = {
.flags = MWAIT2flg(0x00),
.exit_latency = 1,
.target_residency = 1,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
@@ -521,7 +521,7 @@ static struct cpuidle_state ivt_cstates_4s[] __initdata = {
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 10,
.target_residency = 250,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C3",
@@ -529,7 +529,7 @@ static struct cpuidle_state ivt_cstates_4s[] __initdata = {
.flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 59,
.target_residency = 300,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
@@ -537,7 +537,7 @@ static struct cpuidle_state ivt_cstates_4s[] __initdata = {
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 84,
.target_residency = 400,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -550,7 +550,7 @@ static struct cpuidle_state ivt_cstates_8s[] __initdata = {
.flags = MWAIT2flg(0x00),
.exit_latency = 1,
.target_residency = 1,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
@@ -558,7 +558,7 @@ static struct cpuidle_state ivt_cstates_8s[] __initdata = {
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 10,
.target_residency = 500,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C3",
@@ -566,7 +566,7 @@ static struct cpuidle_state ivt_cstates_8s[] __initdata = {
.flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 59,
.target_residency = 600,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
@@ -574,7 +574,7 @@ static struct cpuidle_state ivt_cstates_8s[] __initdata = {
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 88,
.target_residency = 700,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -587,7 +587,7 @@ static struct cpuidle_state hsw_cstates[] __initdata = {
.flags = MWAIT2flg(0x00),
.exit_latency = 2,
.target_residency = 2,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
@@ -595,7 +595,7 @@ static struct cpuidle_state hsw_cstates[] __initdata = {
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 10,
.target_residency = 20,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C3",
@@ -603,7 +603,7 @@ static struct cpuidle_state hsw_cstates[] __initdata = {
.flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 33,
.target_residency = 100,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
@@ -611,7 +611,7 @@ static struct cpuidle_state hsw_cstates[] __initdata = {
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 133,
.target_residency = 400,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C7s",
@@ -619,7 +619,7 @@ static struct cpuidle_state hsw_cstates[] __initdata = {
.flags = MWAIT2flg(0x32) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 166,
.target_residency = 500,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C8",
@@ -627,7 +627,7 @@ static struct cpuidle_state hsw_cstates[] __initdata = {
.flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 300,
.target_residency = 900,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C9",
@@ -635,7 +635,7 @@ static struct cpuidle_state hsw_cstates[] __initdata = {
.flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 600,
.target_residency = 1800,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C10",
@@ -643,7 +643,7 @@ static struct cpuidle_state hsw_cstates[] __initdata = {
.flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 2600,
.target_residency = 7700,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -655,7 +655,7 @@ static struct cpuidle_state bdw_cstates[] __initdata = {
.flags = MWAIT2flg(0x00),
.exit_latency = 2,
.target_residency = 2,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
@@ -663,7 +663,7 @@ static struct cpuidle_state bdw_cstates[] __initdata = {
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 10,
.target_residency = 20,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C3",
@@ -671,7 +671,7 @@ static struct cpuidle_state bdw_cstates[] __initdata = {
.flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 40,
.target_residency = 100,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
@@ -679,7 +679,7 @@ static struct cpuidle_state bdw_cstates[] __initdata = {
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 133,
.target_residency = 400,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C7s",
@@ -687,7 +687,7 @@ static struct cpuidle_state bdw_cstates[] __initdata = {
.flags = MWAIT2flg(0x32) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 166,
.target_residency = 500,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C8",
@@ -695,7 +695,7 @@ static struct cpuidle_state bdw_cstates[] __initdata = {
.flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 300,
.target_residency = 900,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C9",
@@ -703,7 +703,7 @@ static struct cpuidle_state bdw_cstates[] __initdata = {
.flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 600,
.target_residency = 1800,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C10",
@@ -711,7 +711,7 @@ static struct cpuidle_state bdw_cstates[] __initdata = {
.flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 2600,
.target_residency = 7700,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -724,7 +724,7 @@ static struct cpuidle_state skl_cstates[] __initdata = {
.flags = MWAIT2flg(0x00),
.exit_latency = 2,
.target_residency = 2,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
@@ -732,7 +732,7 @@ static struct cpuidle_state skl_cstates[] __initdata = {
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 10,
.target_residency = 20,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C3",
@@ -740,7 +740,7 @@ static struct cpuidle_state skl_cstates[] __initdata = {
.flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 70,
.target_residency = 100,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
@@ -748,7 +748,7 @@ static struct cpuidle_state skl_cstates[] __initdata = {
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS,
.exit_latency = 85,
.target_residency = 200,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C7s",
@@ -756,7 +756,7 @@ static struct cpuidle_state skl_cstates[] __initdata = {
.flags = MWAIT2flg(0x33) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS,
.exit_latency = 124,
.target_residency = 800,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C8",
@@ -764,7 +764,7 @@ static struct cpuidle_state skl_cstates[] __initdata = {
.flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS,
.exit_latency = 200,
.target_residency = 800,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C9",
@@ -772,7 +772,7 @@ static struct cpuidle_state skl_cstates[] __initdata = {
.flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS,
.exit_latency = 480,
.target_residency = 5000,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C10",
@@ -780,7 +780,7 @@ static struct cpuidle_state skl_cstates[] __initdata = {
.flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS,
.exit_latency = 890,
.target_residency = 5000,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -793,7 +793,7 @@ static struct cpuidle_state skx_cstates[] __initdata = {
.flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_IRQ_ENABLE,
.exit_latency = 2,
.target_residency = 2,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
@@ -801,7 +801,7 @@ static struct cpuidle_state skx_cstates[] __initdata = {
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 10,
.target_residency = 20,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
@@ -809,7 +809,7 @@ static struct cpuidle_state skx_cstates[] __initdata = {
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS,
.exit_latency = 133,
.target_residency = 600,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -822,7 +822,7 @@ static struct cpuidle_state icx_cstates[] __initdata = {
.flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_IRQ_ENABLE,
.exit_latency = 1,
.target_residency = 1,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
@@ -830,7 +830,7 @@ static struct cpuidle_state icx_cstates[] __initdata = {
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 4,
.target_residency = 4,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
@@ -838,7 +838,7 @@ static struct cpuidle_state icx_cstates[] __initdata = {
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 170,
.target_residency = 600,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -861,7 +861,7 @@ static struct cpuidle_state adl_cstates[] __initdata = {
.flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_UNUSABLE,
.exit_latency = 1,
.target_residency = 1,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
@@ -869,7 +869,7 @@ static struct cpuidle_state adl_cstates[] __initdata = {
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 2,
.target_residency = 4,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
@@ -877,7 +877,7 @@ static struct cpuidle_state adl_cstates[] __initdata = {
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 220,
.target_residency = 600,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C8",
@@ -885,7 +885,7 @@ static struct cpuidle_state adl_cstates[] __initdata = {
.flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 280,
.target_residency = 800,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C10",
@@ -893,7 +893,7 @@ static struct cpuidle_state adl_cstates[] __initdata = {
.flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 680,
.target_residency = 2000,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -906,7 +906,7 @@ static struct cpuidle_state adl_l_cstates[] __initdata = {
.flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_UNUSABLE,
.exit_latency = 1,
.target_residency = 1,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
@@ -914,7 +914,7 @@ static struct cpuidle_state adl_l_cstates[] __initdata = {
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 2,
.target_residency = 4,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
@@ -922,7 +922,7 @@ static struct cpuidle_state adl_l_cstates[] __initdata = {
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 170,
.target_residency = 500,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C8",
@@ -930,7 +930,7 @@ static struct cpuidle_state adl_l_cstates[] __initdata = {
.flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 200,
.target_residency = 600,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C10",
@@ -938,7 +938,7 @@ static struct cpuidle_state adl_l_cstates[] __initdata = {
.flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 230,
.target_residency = 700,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -951,7 +951,7 @@ static struct cpuidle_state mtl_l_cstates[] __initdata = {
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 1,
.target_residency = 1,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
@@ -959,7 +959,7 @@ static struct cpuidle_state mtl_l_cstates[] __initdata = {
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 140,
.target_residency = 420,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C10",
@@ -967,7 +967,7 @@ static struct cpuidle_state mtl_l_cstates[] __initdata = {
.flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 310,
.target_residency = 930,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -980,7 +980,7 @@ static struct cpuidle_state gmt_cstates[] __initdata = {
.flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_UNUSABLE,
.exit_latency = 1,
.target_residency = 1,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
@@ -988,7 +988,7 @@ static struct cpuidle_state gmt_cstates[] __initdata = {
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 2,
.target_residency = 4,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
@@ -996,7 +996,7 @@ static struct cpuidle_state gmt_cstates[] __initdata = {
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 195,
.target_residency = 585,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C8",
@@ -1004,7 +1004,7 @@ static struct cpuidle_state gmt_cstates[] __initdata = {
.flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 260,
.target_residency = 1040,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C10",
@@ -1012,7 +1012,7 @@ static struct cpuidle_state gmt_cstates[] __initdata = {
.flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 660,
.target_residency = 1980,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -1025,7 +1025,7 @@ static struct cpuidle_state spr_cstates[] __initdata = {
.flags = MWAIT2flg(0x00),
.exit_latency = 1,
.target_residency = 1,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
@@ -1033,7 +1033,7 @@ static struct cpuidle_state spr_cstates[] __initdata = {
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 2,
.target_residency = 4,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
@@ -1042,7 +1042,7 @@ static struct cpuidle_state spr_cstates[] __initdata = {
CPUIDLE_FLAG_INIT_XSTATE,
.exit_latency = 290,
.target_residency = 800,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -1055,7 +1055,7 @@ static struct cpuidle_state gnr_cstates[] __initdata = {
.flags = MWAIT2flg(0x00),
.exit_latency = 1,
.target_residency = 1,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
@@ -1063,7 +1063,7 @@ static struct cpuidle_state gnr_cstates[] __initdata = {
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 4,
.target_residency = 4,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
@@ -1073,7 +1073,7 @@ static struct cpuidle_state gnr_cstates[] __initdata = {
CPUIDLE_FLAG_PARTIAL_HINT_MATCH,
.exit_latency = 170,
.target_residency = 650,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6P",
@@ -1083,7 +1083,7 @@ static struct cpuidle_state gnr_cstates[] __initdata = {
CPUIDLE_FLAG_PARTIAL_HINT_MATCH,
.exit_latency = 210,
.target_residency = 1000,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -1096,7 +1096,7 @@ static struct cpuidle_state gnrd_cstates[] __initdata = {
.flags = MWAIT2flg(0x00),
.exit_latency = 1,
.target_residency = 1,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
@@ -1104,7 +1104,7 @@ static struct cpuidle_state gnrd_cstates[] __initdata = {
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 4,
.target_residency = 4,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
@@ -1114,7 +1114,7 @@ static struct cpuidle_state gnrd_cstates[] __initdata = {
CPUIDLE_FLAG_PARTIAL_HINT_MATCH,
.exit_latency = 220,
.target_residency = 650,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6P",
@@ -1124,7 +1124,7 @@ static struct cpuidle_state gnrd_cstates[] __initdata = {
CPUIDLE_FLAG_PARTIAL_HINT_MATCH,
.exit_latency = 240,
.target_residency = 750,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -1137,7 +1137,7 @@ static struct cpuidle_state atom_cstates[] __initdata = {
.flags = MWAIT2flg(0x00),
.exit_latency = 10,
.target_residency = 20,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C2",
@@ -1145,7 +1145,7 @@ static struct cpuidle_state atom_cstates[] __initdata = {
.flags = MWAIT2flg(0x10),
.exit_latency = 20,
.target_residency = 80,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C4",
@@ -1153,7 +1153,7 @@ static struct cpuidle_state atom_cstates[] __initdata = {
.flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 100,
.target_residency = 400,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
@@ -1161,7 +1161,7 @@ static struct cpuidle_state atom_cstates[] __initdata = {
.flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 140,
.target_residency = 560,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -1173,7 +1173,7 @@ static struct cpuidle_state tangier_cstates[] __initdata = {
.flags = MWAIT2flg(0x00),
.exit_latency = 1,
.target_residency = 4,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C4",
@@ -1181,7 +1181,7 @@ static struct cpuidle_state tangier_cstates[] __initdata = {
.flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 100,
.target_residency = 400,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
@@ -1189,7 +1189,7 @@ static struct cpuidle_state tangier_cstates[] __initdata = {
.flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 140,
.target_residency = 560,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C7",
@@ -1197,7 +1197,7 @@ static struct cpuidle_state tangier_cstates[] __initdata = {
.flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 1200,
.target_residency = 4000,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C9",
@@ -1205,7 +1205,7 @@ static struct cpuidle_state tangier_cstates[] __initdata = {
.flags = MWAIT2flg(0x64) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 10000,
.target_residency = 20000,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -1217,7 +1217,7 @@ static struct cpuidle_state avn_cstates[] __initdata = {
.flags = MWAIT2flg(0x00),
.exit_latency = 2,
.target_residency = 2,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
@@ -1225,7 +1225,7 @@ static struct cpuidle_state avn_cstates[] __initdata = {
.flags = MWAIT2flg(0x51) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 15,
.target_residency = 45,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -1237,7 +1237,7 @@ static struct cpuidle_state knl_cstates[] __initdata = {
.flags = MWAIT2flg(0x00),
.exit_latency = 1,
.target_residency = 2,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle },
{
.name = "C6",
@@ -1245,7 +1245,7 @@ static struct cpuidle_state knl_cstates[] __initdata = {
.flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 120,
.target_residency = 500,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle },
{
.enter = NULL }
@@ -1258,7 +1258,7 @@ static struct cpuidle_state bxt_cstates[] __initdata = {
.flags = MWAIT2flg(0x00),
.exit_latency = 2,
.target_residency = 2,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
@@ -1266,7 +1266,7 @@ static struct cpuidle_state bxt_cstates[] __initdata = {
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 10,
.target_residency = 20,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
@@ -1274,7 +1274,7 @@ static struct cpuidle_state bxt_cstates[] __initdata = {
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 133,
.target_residency = 133,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C7s",
@@ -1282,7 +1282,7 @@ static struct cpuidle_state bxt_cstates[] __initdata = {
.flags = MWAIT2flg(0x31) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 155,
.target_residency = 155,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C8",
@@ -1290,7 +1290,7 @@ static struct cpuidle_state bxt_cstates[] __initdata = {
.flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 1000,
.target_residency = 1000,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C9",
@@ -1298,7 +1298,7 @@ static struct cpuidle_state bxt_cstates[] __initdata = {
.flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 2000,
.target_residency = 2000,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C10",
@@ -1306,7 +1306,7 @@ static struct cpuidle_state bxt_cstates[] __initdata = {
.flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 10000,
.target_residency = 10000,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -1319,7 +1319,7 @@ static struct cpuidle_state dnv_cstates[] __initdata = {
.flags = MWAIT2flg(0x00),
.exit_latency = 2,
.target_residency = 2,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
@@ -1327,7 +1327,7 @@ static struct cpuidle_state dnv_cstates[] __initdata = {
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 10,
.target_residency = 20,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
@@ -1335,7 +1335,7 @@ static struct cpuidle_state dnv_cstates[] __initdata = {
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 50,
.target_residency = 500,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -1352,7 +1352,7 @@ static struct cpuidle_state snr_cstates[] __initdata = {
.flags = MWAIT2flg(0x00),
.exit_latency = 2,
.target_residency = 2,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
@@ -1360,7 +1360,7 @@ static struct cpuidle_state snr_cstates[] __initdata = {
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 15,
.target_residency = 25,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
@@ -1368,7 +1368,7 @@ static struct cpuidle_state snr_cstates[] __initdata = {
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 130,
.target_residency = 500,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -1381,7 +1381,7 @@ static struct cpuidle_state grr_cstates[] __initdata = {
.flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 1,
.target_residency = 1,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
@@ -1389,7 +1389,7 @@ static struct cpuidle_state grr_cstates[] __initdata = {
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 2,
.target_residency = 10,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6S",
@@ -1397,7 +1397,7 @@ static struct cpuidle_state grr_cstates[] __initdata = {
.flags = MWAIT2flg(0x22) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 140,
.target_residency = 500,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -1410,7 +1410,7 @@ static struct cpuidle_state srf_cstates[] __initdata = {
.flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 1,
.target_residency = 1,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
@@ -1418,7 +1418,7 @@ static struct cpuidle_state srf_cstates[] __initdata = {
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 2,
.target_residency = 10,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6S",
@@ -1427,7 +1427,7 @@ static struct cpuidle_state srf_cstates[] __initdata = {
CPUIDLE_FLAG_PARTIAL_HINT_MATCH,
.exit_latency = 270,
.target_residency = 700,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6SP",
@@ -1436,7 +1436,7 @@ static struct cpuidle_state srf_cstates[] __initdata = {
CPUIDLE_FLAG_PARTIAL_HINT_MATCH,
.exit_latency = 310,
.target_residency = 900,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
diff --git a/drivers/opp/core.c b/drivers/opp/core.c
index edbd60501cf0..bba4f7daff8c 100644
--- a/drivers/opp/core.c
+++ b/drivers/opp/core.c
@@ -476,6 +476,16 @@ static unsigned long _read_bw(struct dev_pm_opp *opp, int index)
return opp->bandwidth[index].peak;
}
+static unsigned long _read_opp_key(struct dev_pm_opp *opp, int index,
+ struct dev_pm_opp_key *key)
+{
+ key->bw = opp->bandwidth ? opp->bandwidth[index].peak : 0;
+ key->freq = opp->rates[index];
+ key->level = opp->level;
+
+ return true;
+}
+
/* Generic comparison helpers */
static bool _compare_exact(struct dev_pm_opp **opp, struct dev_pm_opp *temp_opp,
unsigned long opp_key, unsigned long key)
@@ -509,6 +519,22 @@ static bool _compare_floor(struct dev_pm_opp **opp, struct dev_pm_opp *temp_opp,
return false;
}
+static bool _compare_opp_key_exact(struct dev_pm_opp **opp,
+ struct dev_pm_opp *temp_opp, struct dev_pm_opp_key *opp_key,
+ struct dev_pm_opp_key *key)
+{
+ bool level_match = (key->level == OPP_LEVEL_UNSET || opp_key->level == key->level);
+ bool freq_match = (key->freq == 0 || opp_key->freq == key->freq);
+ bool bw_match = (key->bw == 0 || opp_key->bw == key->bw);
+
+ if (freq_match && level_match && bw_match) {
+ *opp = temp_opp;
+ return true;
+ }
+
+ return false;
+}
+
/* Generic key finding helpers */
static struct dev_pm_opp *_opp_table_find_key(struct opp_table *opp_table,
unsigned long *key, int index, bool available,
@@ -541,6 +567,37 @@ static struct dev_pm_opp *_opp_table_find_key(struct opp_table *opp_table,
return opp;
}
+static struct dev_pm_opp *_opp_table_find_opp_key(struct opp_table *opp_table,
+ struct dev_pm_opp_key *key, bool available,
+ unsigned long (*read)(struct dev_pm_opp *opp, int index,
+ struct dev_pm_opp_key *key),
+ bool (*compare)(struct dev_pm_opp **opp, struct dev_pm_opp *temp_opp,
+ struct dev_pm_opp_key *opp_key, struct dev_pm_opp_key *key),
+ bool (*assert)(struct opp_table *opp_table, unsigned int index))
+{
+ struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
+ struct dev_pm_opp_key temp_key;
+
+ /* Assert that the requirement is met */
+ if (!assert(opp_table, 0))
+ return ERR_PTR(-EINVAL);
+
+ guard(mutex)(&opp_table->lock);
+
+ list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
+ if (temp_opp->available == available) {
+ read(temp_opp, 0, &temp_key);
+ if (compare(&opp, temp_opp, &temp_key, key)) {
+ /* Increment the reference count of OPP */
+ dev_pm_opp_get(opp);
+ break;
+ }
+ }
+ }
+
+ return opp;
+}
+
static struct dev_pm_opp *
_find_key(struct device *dev, unsigned long *key, int index, bool available,
unsigned long (*read)(struct dev_pm_opp *opp, int index),
@@ -633,6 +690,48 @@ struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact);
/**
+ * dev_pm_opp_find_key_exact() - Search for an OPP with exact key set
+ * @dev: Device for which the OPP is being searched
+ * @key: OPP key set to match
+ * @available: true/false - match for available OPP
+ *
+ * Search for an exact match of the key set in the OPP table.
+ *
+ * Return: A matching opp on success, else ERR_PTR in case of error.
+ * Possible error values:
+ * EINVAL: for bad pointers
+ * ERANGE: no match found for search
+ * ENODEV: if device not found in list of registered devices
+ *
+ * Note: 'available' is a modifier for the search. If 'available' == true,
+ * then the match is for exact matching key and is available in the stored
+ * OPP table. If false, the match is for exact key which is not available.
+ *
+ * This provides a mechanism to enable an OPP which is not available currently
+ * or the opposite as well.
+ *
+ * The callers are required to call dev_pm_opp_put() for the returned OPP after
+ * use.
+ */
+struct dev_pm_opp *dev_pm_opp_find_key_exact(struct device *dev,
+ struct dev_pm_opp_key *key,
+ bool available)
+{
+ struct opp_table *opp_table __free(put_opp_table) = _find_opp_table(dev);
+
+ if (IS_ERR(opp_table)) {
+ dev_err(dev, "%s: OPP table not found (%ld)\n", __func__,
+ PTR_ERR(opp_table));
+ return ERR_CAST(opp_table);
+ }
+
+ return _opp_table_find_opp_key(opp_table, key, available,
+ _read_opp_key, _compare_opp_key_exact,
+ assert_single_clk);
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_find_key_exact);
+
+/**
* dev_pm_opp_find_freq_exact_indexed() - Search for an exact freq for the
* clock corresponding to the index
* @dev: Device for which we do this operation
diff --git a/drivers/powercap/idle_inject.c b/drivers/powercap/idle_inject.c
index 5ad7cc438068..a25eb2018acd 100644
--- a/drivers/powercap/idle_inject.c
+++ b/drivers/powercap/idle_inject.c
@@ -133,7 +133,7 @@ static enum hrtimer_restart idle_inject_timer_fn(struct hrtimer *timer)
duration_us = READ_ONCE(ii_dev->run_duration_us);
duration_us += READ_ONCE(ii_dev->idle_duration_us);
- hrtimer_forward_now(timer, ns_to_ktime(duration_us * NSEC_PER_USEC));
+ hrtimer_forward_now(timer, us_to_ktime(duration_us));
return HRTIMER_RESTART;
}
@@ -232,8 +232,7 @@ int idle_inject_start(struct idle_inject_device *ii_dev)
idle_inject_wakeup(ii_dev);
hrtimer_start(&ii_dev->timer,
- ns_to_ktime((idle_duration_us + run_duration_us) *
- NSEC_PER_USEC),
+ us_to_ktime(idle_duration_us + run_duration_us),
HRTIMER_MODE_REL);
return 0;
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 95f3807c8c55..40966512ea18 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -780,11 +780,10 @@ struct cpufreq_frequency_table {
else
-int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
- struct cpufreq_frequency_table *table);
+int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy);
+
+int cpufreq_frequency_table_verify(struct cpufreq_policy_data *policy);
-int cpufreq_frequency_table_verify(struct cpufreq_policy_data *policy,
- struct cpufreq_frequency_table *table);
int cpufreq_generic_frequency_table_verify(struct cpufreq_policy_data *policy);
int cpufreq_table_index_unsorted(struct cpufreq_policy *policy,
diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h
index cf477beae4bb..789406d95e69 100644
--- a/include/linux/pm_opp.h
+++ b/include/linux/pm_opp.h
@@ -98,6 +98,25 @@ struct dev_pm_opp_data {
unsigned long u_volt;
};
+/**
+ * struct dev_pm_opp_key - Key used to identify OPP entries
+ * @freq: Frequency in Hz. Use 0 if frequency is not to be matched.
+ * @level: Performance level associated with the OPP entry.
+ * Use OPP_LEVEL_UNSET if level is not to be matched.
+ * @bw: Bandwidth associated with the OPP entry.
+ * Use 0 if bandwidth is not to be matched.
+ *
+ * This structure is used to uniquely identify an OPP entry based on
+ * frequency, performance level, and bandwidth. Each field can be
+ * selectively ignored during matching by setting it to its respective
+ * NOP value.
+ */
+struct dev_pm_opp_key {
+ unsigned long freq;
+ unsigned int level;
+ u32 bw;
+};
+
#if defined(CONFIG_PM_OPP)
struct opp_table *dev_pm_opp_get_opp_table(struct device *dev);
@@ -131,6 +150,10 @@ struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
unsigned long freq,
bool available);
+struct dev_pm_opp *dev_pm_opp_find_key_exact(struct device *dev,
+ struct dev_pm_opp_key *key,
+ bool available);
+
struct dev_pm_opp *
dev_pm_opp_find_freq_exact_indexed(struct device *dev, unsigned long freq,
u32 index, bool available);
@@ -289,6 +312,13 @@ static inline struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
return ERR_PTR(-EOPNOTSUPP);
}
+static inline struct dev_pm_opp *dev_pm_opp_find_key_exact(struct device *dev,
+ struct dev_pm_opp_key *key,
+ bool available)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
static inline struct dev_pm_opp *
dev_pm_opp_find_freq_exact_indexed(struct device *dev, unsigned long freq,
u32 index, bool available)
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 317ae31e89b3..b02876f1ae38 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -418,6 +418,12 @@ static inline int hibernate_quiet_exec(int (*func)(void *data), void *data) {
}
#endif /* CONFIG_HIBERNATION */
+#if defined(CONFIG_HIBERNATION) && defined(CONFIG_SUSPEND)
+bool pm_hibernation_mode_is_suspend(void);
+#else
+static inline bool pm_hibernation_mode_is_suspend(void) { return false; }
+#endif
+
int arch_resume_nosmt(void);
#ifdef CONFIG_HIBERNATION_SNAPSHOT_DEV
diff --git a/include/soc/rockchip/rk3588_grf.h b/include/soc/rockchip/rk3588_grf.h
index 630b35a55064..02a7b2432d99 100644
--- a/include/soc/rockchip/rk3588_grf.h
+++ b/include/soc/rockchip/rk3588_grf.h
@@ -12,7 +12,11 @@
#define RK3588_PMUGRF_OS_REG3_DRAMTYPE_INFO_V3 GENMASK(13, 12)
#define RK3588_PMUGRF_OS_REG3_SYSREG_VERSION GENMASK(31, 28)
-#define RK3588_PMUGRF_OS_REG4 0x210
-#define RK3588_PMUGRF_OS_REG5 0x214
+#define RK3588_PMUGRF_OS_REG4 0x210
+#define RK3588_PMUGRF_OS_REG5 0x214
+#define RK3588_PMUGRF_OS_REG6 0x218
+#define RK3588_PMUGRF_OS_REG6_LP5_BANK_MODE GENMASK(2, 1)
+/* Whether the LPDDR5 is in 2:1 (= 0) or 4:1 (= 1) CKR a.k.a. DQS mode */
+#define RK3588_PMUGRF_OS_REG6_LP5_CKR BIT(0)
#endif /* __SOC_RK3588_GRF_H */
diff --git a/include/soc/rockchip/rockchip_grf.h b/include/soc/rockchip/rockchip_grf.h
index e46fd72aea8d..41c7bb26fd53 100644
--- a/include/soc/rockchip/rockchip_grf.h
+++ b/include/soc/rockchip/rockchip_grf.h
@@ -13,6 +13,7 @@ enum {
ROCKCHIP_DDRTYPE_LPDDR3 = 6,
ROCKCHIP_DDRTYPE_LPDDR4 = 7,
ROCKCHIP_DDRTYPE_LPDDR4X = 8,
+ ROCKCHIP_DDRTYPE_LPDDR5 = 9,
};
#endif /* __SOC_ROCKCHIP_GRF_H */
diff --git a/kernel/power/energy_model.c b/kernel/power/energy_model.c
index 8df55397414a..5f17d2e8e954 100644
--- a/kernel/power/energy_model.c
+++ b/kernel/power/energy_model.c
@@ -799,7 +799,7 @@ void em_adjust_cpu_capacity(unsigned int cpu)
static void em_check_capacity_update(void)
{
cpumask_var_t cpu_done_mask;
- int cpu;
+ int cpu, failed_cpus = 0;
if (!zalloc_cpumask_var(&cpu_done_mask, GFP_KERNEL)) {
pr_warn("no free memory\n");
@@ -817,10 +817,8 @@ static void em_check_capacity_update(void)
policy = cpufreq_cpu_get(cpu);
if (!policy) {
- pr_debug("Accessing cpu%d policy failed\n", cpu);
- schedule_delayed_work(&em_update_work,
- msecs_to_jiffies(1000));
- break;
+ failed_cpus++;
+ continue;
}
cpufreq_cpu_put(policy);
@@ -835,6 +833,9 @@ static void em_check_capacity_update(void)
em_adjust_new_capacity(cpu, dev, pd);
}
+ if (failed_cpus)
+ schedule_delayed_work(&em_update_work, msecs_to_jiffies(1000));
+
free_cpumask_var(cpu_done_mask);
}
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index 2f66ab453823..14e85ff23551 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -80,6 +80,17 @@ static const struct platform_hibernation_ops *hibernation_ops;
static atomic_t hibernate_atomic = ATOMIC_INIT(1);
+#ifdef CONFIG_SUSPEND
+/**
+ * pm_hibernation_mode_is_suspend - Check if hibernation has been set to suspend
+ */
+bool pm_hibernation_mode_is_suspend(void)
+{
+ return hibernation_mode == HIBERNATION_SUSPEND;
+}
+EXPORT_SYMBOL_GPL(pm_hibernation_mode_is_suspend);
+#endif
+
bool hibernate_acquire(void)
{
return atomic_add_unless(&hibernate_atomic, -1, 0);
@@ -695,19 +706,13 @@ static void power_down(void)
#ifdef CONFIG_SUSPEND
if (hibernation_mode == HIBERNATION_SUSPEND) {
+ pm_restore_gfp_mask();
error = suspend_devices_and_enter(mem_sleep_current);
- if (error) {
- hibernation_mode = hibernation_ops ?
- HIBERNATION_PLATFORM :
- HIBERNATION_SHUTDOWN;
- } else {
- /* Restore swap signature. */
- error = swsusp_unmark();
- if (error)
- pr_err("Swap will be unusable! Try swapon -a.\n");
+ if (!error)
+ goto exit;
- return;
- }
+ hibernation_mode = hibernation_ops ? HIBERNATION_PLATFORM :
+ HIBERNATION_SHUTDOWN;
}
#endif
@@ -718,10 +723,9 @@ static void power_down(void)
case HIBERNATION_PLATFORM:
error = hibernation_platform_enter();
if (error == -EAGAIN || error == -EBUSY) {
- swsusp_unmark();
events_check_enabled = false;
pr_info("Wakeup event detected during hibernation, rolling back.\n");
- return;
+ goto exit;
}
fallthrough;
case HIBERNATION_SHUTDOWN:
@@ -740,6 +744,15 @@ static void power_down(void)
pr_crit("Power down manually\n");
while (1)
cpu_relax();
+
+exit:
+ /* Match the pm_restore_gfp_mask() call in hibernate(). */
+ pm_restrict_gfp_mask();
+
+ /* Restore swap signature. */
+ error = swsusp_unmark();
+ if (error)
+ pr_err("Swap will be unusable! Try swapon -a.\n");
}
static int load_image_and_restore(void)
diff --git a/kernel/power/process.c b/kernel/power/process.c
index dc0dfc349f22..8ff68ebaa1e0 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -132,7 +132,6 @@ int freeze_processes(void)
if (!pm_freezing)
static_branch_inc(&freezer_active);
- pm_wakeup_clear(0);
pm_freezing = true;
error = try_to_freeze_tasks(true);
if (!error)
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 501df0676a61..645f42e40478 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -363,7 +363,7 @@ static void *chain_alloc(struct chain_allocator *ca, unsigned int size)
*
* One radix tree is represented by one struct mem_zone_bm_rtree. There are
* two linked lists for the nodes of the tree, one for the inner nodes and
- * one for the leave nodes. The linked leave nodes are used for fast linear
+ * one for the leaf nodes. The linked leaf nodes are used for fast linear
* access of the memory bitmap.
*
* The struct rtree_node represents one node of the radix tree.
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index b4ca17c2fecf..4bb4686c1c08 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -595,6 +595,7 @@ static int enter_state(suspend_state_t state)
}
pm_pr_dbg("Preparing system for sleep (%s)\n", mem_sleep_labels[state]);
+ pm_wakeup_clear(0);
pm_suspend_clear_flags();
error = suspend_prepare(state);
if (error)
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index ad13c461b657..0beff7eeaaba 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -712,7 +712,7 @@ static int save_compressed_image(struct swap_map_handle *handle,
goto out_clean;
}
- data = vzalloc(array_size(nr_threads, sizeof(*data)));
+ data = vcalloc(nr_threads, sizeof(*data));
if (!data) {
pr_err("Failed to allocate %s data\n", hib_comp_algo);
ret = -ENOMEM;
@@ -1225,14 +1225,14 @@ static int load_compressed_image(struct swap_map_handle *handle,
nr_threads = num_online_cpus() - 1;
nr_threads = clamp_val(nr_threads, 1, CMP_THREADS);
- page = vmalloc(array_size(CMP_MAX_RD_PAGES, sizeof(*page)));
+ page = vmalloc_array(CMP_MAX_RD_PAGES, sizeof(*page));
if (!page) {
pr_err("Failed to allocate %s page\n", hib_comp_algo);
ret = -ENOMEM;
goto out_clean;
}
- data = vzalloc(array_size(nr_threads, sizeof(*data)));
+ data = vcalloc(nr_threads, sizeof(*data));
if (!data) {
pr_err("Failed to allocate %s data\n", hib_comp_algo);
ret = -ENOMEM;
diff --git a/rust/kernel/cpufreq.rs b/rust/kernel/cpufreq.rs
index 86c02e81729e..29bdf7fe80a1 100644
--- a/rust/kernel/cpufreq.rs
+++ b/rust/kernel/cpufreq.rs
@@ -542,7 +542,7 @@ impl Policy {
pub fn cpus(&mut self) -> &mut cpumask::Cpumask {
// SAFETY: The pointer to `cpus` is valid for writing and remains valid for the lifetime of
// the returned reference.
- unsafe { cpumask::CpumaskVar::as_mut_ref(&mut self.as_mut_ref().cpus) }
+ unsafe { cpumask::CpumaskVar::from_raw_mut(&mut self.as_mut_ref().cpus) }
}
/// Sets clock for the [`Policy`].
diff --git a/rust/kernel/cpumask.rs b/rust/kernel/cpumask.rs
index 3fcbff438670..c1d17826ae7b 100644
--- a/rust/kernel/cpumask.rs
+++ b/rust/kernel/cpumask.rs
@@ -212,6 +212,7 @@ impl Cpumask {
/// }
/// assert_eq!(mask2.weight(), count);
/// ```
+#[repr(transparent)]
pub struct CpumaskVar {
#[cfg(CONFIG_CPUMASK_OFFSTACK)]
ptr: NonNull<Cpumask>,
@@ -270,7 +271,7 @@ impl CpumaskVar {
///
/// The caller must ensure that `ptr` is valid for writing and remains valid for the lifetime
/// of the returned reference.
- pub unsafe fn as_mut_ref<'a>(ptr: *mut bindings::cpumask_var_t) -> &'a mut Self {
+ pub unsafe fn from_raw_mut<'a>(ptr: *mut bindings::cpumask_var_t) -> &'a mut Self {
// SAFETY: Guaranteed by the safety requirements of the function.
//
// INVARIANT: The caller ensures that `ptr` is valid for writing and remains valid for the
@@ -284,7 +285,7 @@ impl CpumaskVar {
///
/// The caller must ensure that `ptr` is valid for reading and remains valid for the lifetime
/// of the returned reference.
- pub unsafe fn as_ref<'a>(ptr: *const bindings::cpumask_var_t) -> &'a Self {
+ pub unsafe fn from_raw<'a>(ptr: *const bindings::cpumask_var_t) -> &'a Self {
// SAFETY: Guaranteed by the safety requirements of the function.
//
// INVARIANT: The caller ensures that `ptr` is valid for reading and remains valid for the
diff --git a/rust/kernel/opp.rs b/rust/kernel/opp.rs
index 08126035d2c6..2c763fa9276d 100644
--- a/rust/kernel/opp.rs
+++ b/rust/kernel/opp.rs
@@ -12,11 +12,12 @@ use crate::{
clk::Hertz,
cpumask::{Cpumask, CpumaskVar},
device::Device,
- error::{code::*, from_err_ptr, from_result, to_result, Error, Result, VTABLE_DEFAULT_ERROR},
+ error::{code::*, from_err_ptr, from_result, to_result, Result, VTABLE_DEFAULT_ERROR},
ffi::c_ulong,
prelude::*,
str::CString,
- types::{ARef, AlwaysRefCounted, Opaque},
+ sync::aref::{ARef, AlwaysRefCounted},
+ types::Opaque,
};
#[cfg(CONFIG_CPU_FREQ)]
@@ -162,7 +163,7 @@ impl From<MicroWatt> for c_ulong {
/// use kernel::device::Device;
/// use kernel::error::Result;
/// use kernel::opp::{Data, MicroVolt, Token};
-/// use kernel::types::ARef;
+/// use kernel::sync::aref::ARef;
///
/// fn create_opp(dev: &ARef<Device>, freq: Hertz, volt: MicroVolt, level: u32) -> Result<Token> {
/// let data = Data::new(freq, volt, level, false);
@@ -211,7 +212,7 @@ impl Drop for Token {
/// use kernel::device::Device;
/// use kernel::error::Result;
/// use kernel::opp::{Data, MicroVolt, Token};
-/// use kernel::types::ARef;
+/// use kernel::sync::aref::ARef;
///
/// fn create_opp(dev: &ARef<Device>, freq: Hertz, volt: MicroVolt, level: u32) -> Result<Token> {
/// let data = Data::new(freq, volt, level, false);
@@ -262,7 +263,7 @@ impl Data {
/// use kernel::clk::Hertz;
/// use kernel::error::Result;
/// use kernel::opp::{OPP, SearchType, Table};
-/// use kernel::types::ARef;
+/// use kernel::sync::aref::ARef;
///
/// fn find_opp(table: &Table, freq: Hertz) -> Result<ARef<OPP>> {
/// let opp = table.opp_from_freq(freq, Some(true), None, SearchType::Exact)?;
@@ -335,7 +336,7 @@ impl Drop for ConfigToken {
/// use kernel::error::Result;
/// use kernel::opp::{Config, ConfigOps, ConfigToken};
/// use kernel::str::CString;
-/// use kernel::types::ARef;
+/// use kernel::sync::aref::ARef;
/// use kernel::macros::vtable;
///
/// #[derive(Default)]
@@ -500,11 +501,8 @@ impl<T: ConfigOps + Default> Config<T> {
// requirements. The OPP core guarantees not to access fields of [`Config`] after this call
// and so we don't need to save a copy of them for future use.
let ret = unsafe { bindings::dev_pm_opp_set_config(dev.as_raw(), &mut config) };
- if ret < 0 {
- Err(Error::from_errno(ret))
- } else {
- Ok(ConfigToken(ret))
- }
+
+ to_result(ret).map(|()| ConfigToken(ret))
}
/// Config's clk callback.
@@ -581,7 +579,7 @@ impl<T: ConfigOps + Default> Config<T> {
/// use kernel::device::Device;
/// use kernel::error::Result;
/// use kernel::opp::Table;
-/// use kernel::types::ARef;
+/// use kernel::sync::aref::ARef;
///
/// fn get_table(dev: &ARef<Device>, mask: &mut Cpumask, freq: Hertz) -> Result<Table> {
/// let mut opp_table = Table::from_of_cpumask(dev, mask)?;
@@ -713,11 +711,8 @@ impl Table {
// SAFETY: The requirements are satisfied by the existence of [`Device`] and its safety
// requirements.
let ret = unsafe { bindings::dev_pm_opp_get_opp_count(self.dev.as_raw()) };
- if ret < 0 {
- Err(Error::from_errno(ret))
- } else {
- Ok(ret as u32)
- }
+
+ to_result(ret).map(|()| ret as u32)
}
/// Returns max clock latency (in nanoseconds) of the [`OPP`]s in the [`Table`].
diff --git a/tools/power/cpupower/lib/cpuidle.c b/tools/power/cpupower/lib/cpuidle.c
index 0ecac009273c..f2c1139adf71 100644
--- a/tools/power/cpupower/lib/cpuidle.c
+++ b/tools/power/cpupower/lib/cpuidle.c
@@ -233,6 +233,7 @@ int cpuidle_state_disable(unsigned int cpu,
{
char value[SYSFS_PATH_MAX];
int bytes_written;
+ int len;
if (cpuidle_state_count(cpu) <= idlestate)
return -1;
@@ -241,10 +242,10 @@ int cpuidle_state_disable(unsigned int cpu,
idlestate_value_files[IDLESTATE_DISABLE]))
return -2;
- snprintf(value, SYSFS_PATH_MAX, "%u", disable);
+ len = snprintf(value, SYSFS_PATH_MAX, "%u", disable);
bytes_written = cpuidle_state_write_file(cpu, idlestate, "disable",
- value, sizeof(disable));
+ value, len);
if (bytes_written)
return 0;
return -3;
diff --git a/tools/power/cpupower/lib/cpupower.c b/tools/power/cpupower/lib/cpupower.c
index ce8dfb8e46ab..d7f7ec6f151c 100644
--- a/tools/power/cpupower/lib/cpupower.c
+++ b/tools/power/cpupower/lib/cpupower.c
@@ -56,7 +56,7 @@ unsigned int cpupower_write_sysfs(const char *path, char *buf, size_t buflen)
if (numwritten < 1) {
perror(path);
close(fd);
- return -1;
+ return 0;
}
close(fd);
diff --git a/tools/power/x86/amd_pstate_tracer/amd_pstate_trace.py b/tools/power/x86/amd_pstate_tracer/amd_pstate_trace.py
index feb9f9421c7b..875b086550d1 100755
--- a/tools/power/x86/amd_pstate_tracer/amd_pstate_trace.py
+++ b/tools/power/x86/amd_pstate_tracer/amd_pstate_trace.py
@@ -11,7 +11,7 @@ Prerequisites:
gnuplot 5.0 or higher
gnuplot-py 1.8 or higher
(Most of the distributions have these required packages. They may be called
- gnuplot-py, phython-gnuplot or phython3-gnuplot, gnuplot-nox, ... )
+ gnuplot-py, python-gnuplot or python3-gnuplot, gnuplot-nox, ... )
Kernel config for Linux trace is enabled
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
index 47eb2d4d13a5..f2512d78bcbd 100644
--- a/tools/power/x86/turbostat/turbostat.c
+++ b/tools/power/x86/turbostat/turbostat.c
@@ -1890,7 +1890,7 @@ int pmt_telemdir_sort(const struct dirent **a, const struct dirent **b)
sscanf((*a)->d_name, "telem%u", &aidx);
sscanf((*b)->d_name, "telem%u", &bidx);
- return aidx >= bidx;
+ return (aidx > bidx) ? 1 : (aidx < bidx) ? -1 : 0;
}
const struct dirent *pmt_diriter_next(struct pmt_diriter_t *iter)
diff --git a/tools/power/x86/x86_energy_perf_policy/Makefile b/tools/power/x86/x86_energy_perf_policy/Makefile
index 666b325a62a2..d18284667400 100644
--- a/tools/power/x86/x86_energy_perf_policy/Makefile
+++ b/tools/power/x86/x86_energy_perf_policy/Makefile
@@ -1,8 +1,12 @@
# SPDX-License-Identifier: GPL-2.0
CC = $(CROSS_COMPILE)gcc
-BUILD_OUTPUT := $(CURDIR)
+BUILD_OUTPUT := $(CURDIR)
PREFIX := /usr
DESTDIR :=
+DAY := $(shell date +%Y.%m.%d)
+SNAPSHOT = x86_energy_perf_policy-$(DAY)
+
+
ifeq ("$(origin O)", "command line")
BUILD_OUTPUT := $(O)
@@ -27,3 +31,26 @@ install : x86_energy_perf_policy
install -d $(DESTDIR)$(PREFIX)/share/man/man8
install -m 644 x86_energy_perf_policy.8 $(DESTDIR)$(PREFIX)/share/man/man8
+snapshot: x86_energy_perf_policy
+ @rm -rf $(SNAPSHOT)
+ @mkdir $(SNAPSHOT)
+ @cp x86_energy_perf_policy Makefile x86_energy_perf_policy.c x86_energy_perf_policy.8 $(SNAPSHOT)
+
+ @sed -e 's/^#include <linux\/bits.h>/#include "bits.h"/' -e 's/u64/unsigned long long/' ../../../../arch/x86/include/asm/msr-index.h > $(SNAPSHOT)/msr-index.h
+ @echo '#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))' >> $(SNAPSHOT)/msr-index.h
+ @echo "#define BIT(x) (1 << (x))" > $(SNAPSHOT)/bits.h
+ @echo "#define BIT_ULL(nr) (1ULL << (nr))" >> $(SNAPSHOT)/bits.h
+ @echo "#define GENMASK(h, l) (((~0UL) << (l)) & (~0UL >> (sizeof(long) * 8 - 1 - (h))))" >> $(SNAPSHOT)/bits.h
+ @echo "#define GENMASK_ULL(h, l) (((~0ULL) << (l)) & (~0ULL >> (sizeof(long long) * 8 - 1 - (h))))" >> $(SNAPSHOT)/bits.h
+
+ @echo '#define BUILD_BUG_ON(cond) do { enum { compile_time_check ## __COUNTER__ = 1/(!(cond)) }; } while (0)' > $(SNAPSHOT)/build_bug.h
+ @echo '#define __must_be_array(arr) 0' >> $(SNAPSHOT)/build_bug.h
+
+ @echo PWD=. > $(SNAPSHOT)/Makefile
+ @echo "CFLAGS += -DMSRHEADER='\"msr-index.h\"'" >> $(SNAPSHOT)/Makefile
+ @echo "CFLAGS += -DBUILD_BUG_HEADER='\"build_bug.h\"'" >> $(SNAPSHOT)/Makefile
+ @sed -e's/.*MSRHEADER.*//' Makefile >> $(SNAPSHOT)/Makefile
+
+ @rm -f $(SNAPSHOT).tar.gz
+ tar cvzf $(SNAPSHOT).tar.gz $(SNAPSHOT)
+
diff --git a/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.8 b/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.8
index 78c6361898b1..0aa981c18e56 100644
--- a/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.8
+++ b/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.8
@@ -2,7 +2,7 @@
.\" Distributed under the GPL, Copyleft 1994.
.TH X86_ENERGY_PERF_POLICY 8
.SH NAME
-x86_energy_perf_policy \- Manage Energy vs. Performance Policy via x86 Model Specific Registers
+x86_energy_perf_policy \- Manage Energy vs. Performance Policy
.SH SYNOPSIS
.B x86_energy_perf_policy
.RB "[ options ] [ scope ] [field \ value]"
@@ -19,9 +19,14 @@ x86_energy_perf_policy \- Manage Energy vs. Performance Policy via x86 Model Spe
.SH DESCRIPTION
\fBx86_energy_perf_policy\fP
displays and updates energy-performance policy settings specific to
-Intel Architecture Processors. Settings are accessed via Model Specific Register (MSR)
-updates, no matter if the Linux cpufreq sub-system is enabled or not.
+Intel Architecture Processors. It summarizes settings available
+in standard Linux interfaces (eg. cpufreq),
+and also decodes underlying Model Specific Register (MSRs).
+While \fBx86_energy_perf_policy\fP can manage energy-performance policy
+using only MSR access, it prefers standard
+Linux kernel interfaces, when they are available.
+.SH BACKGROUND
Policy in MSR_IA32_ENERGY_PERF_BIAS (EPB)
may affect a wide range of hardware decisions,
such as how aggressively the hardware enters and exits CPU idle states (C-states)
@@ -200,7 +205,9 @@ runs only as root.
.SH FILES
.ta
.nf
-/dev/cpu/*/msr
+EPB: /sys/devices/system/cpu/cpu*/power/energy_perf_bias
+EPP: /sys/devices/system/cpu/cpu*/cpufreq/energy_performance_preference
+MSR: /dev/cpu/*/msr
.fi
.SH "SEE ALSO"
.nf
diff --git a/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c b/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
index ebda9c366b2b..884a4c746f32 100644
--- a/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
+++ b/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
@@ -4,7 +4,7 @@
* policy preference bias on recent X86 processors.
*/
/*
- * Copyright (c) 2010 - 2017 Intel Corporation.
+ * Copyright (c) 2010 - 2025 Intel Corporation.
* Len Brown <len.brown@intel.com>
*/
@@ -62,6 +62,7 @@ unsigned char turbo_update_value;
unsigned char update_hwp_epp;
unsigned char update_hwp_min;
unsigned char update_hwp_max;
+unsigned char hwp_limits_done_via_sysfs;
unsigned char update_hwp_desired;
unsigned char update_hwp_window;
unsigned char update_hwp_use_pkg;
@@ -517,7 +518,7 @@ void for_packages(unsigned long long pkg_set, int (func)(int))
void print_version(void)
{
- printf("x86_energy_perf_policy 17.05.11 (C) Len Brown <len.brown@intel.com>\n");
+ printf("x86_energy_perf_policy 2025.9.19 Len Brown <lenb@kernel.org>\n");
}
void cmdline(int argc, char **argv)
@@ -630,7 +631,7 @@ void cmdline(int argc, char **argv)
*/
FILE *fopen_or_die(const char *path, const char *mode)
{
- FILE *filep = fopen(path, "r");
+ FILE *filep = fopen(path, mode);
if (!filep)
err(1, "%s: open failed", path);
@@ -644,7 +645,7 @@ void err_on_hypervisor(void)
char *buffer;
/* On VMs /proc/cpuinfo contains a "flags" entry for hypervisor */
- cpuinfo = fopen_or_die("/proc/cpuinfo", "ro");
+ cpuinfo = fopen_or_die("/proc/cpuinfo", "r");
buffer = malloc(4096);
if (!buffer) {
@@ -809,7 +810,7 @@ void print_hwp_request_pkg(int pkg, struct msr_hwp_request *h, char *str)
h->hwp_min, h->hwp_max, h->hwp_desired, h->hwp_epp,
h->hwp_window, h->hwp_window & 0x7F, (h->hwp_window >> 7) & 0x7);
}
-void read_hwp_request(int cpu, struct msr_hwp_request *hwp_req, unsigned int msr_offset)
+void read_hwp_request_msr(int cpu, struct msr_hwp_request *hwp_req, unsigned int msr_offset)
{
unsigned long long msr;
@@ -823,7 +824,7 @@ void read_hwp_request(int cpu, struct msr_hwp_request *hwp_req, unsigned int msr
hwp_req->hwp_use_pkg = (((msr) >> 42) & 0x1);
}
-void write_hwp_request(int cpu, struct msr_hwp_request *hwp_req, unsigned int msr_offset)
+void write_hwp_request_msr(int cpu, struct msr_hwp_request *hwp_req, unsigned int msr_offset)
{
unsigned long long msr = 0;
@@ -843,7 +844,7 @@ void write_hwp_request(int cpu, struct msr_hwp_request *hwp_req, unsigned int ms
put_msr(cpu, msr_offset, msr);
}
-static int get_epb(int cpu)
+static int get_epb_sysfs(int cpu)
{
char path[SYSFS_PATH_MAX];
char linebuf[3];
@@ -865,7 +866,7 @@ static int get_epb(int cpu)
return (int)val;
}
-static int set_epb(int cpu, int val)
+static int set_epb_sysfs(int cpu, int val)
{
char path[SYSFS_PATH_MAX];
char linebuf[3];
@@ -895,14 +896,14 @@ int print_cpu_msrs(int cpu)
struct msr_hwp_cap cap;
int epb;
- epb = get_epb(cpu);
+ epb = get_epb_sysfs(cpu);
if (epb >= 0)
printf("cpu%d: EPB %u\n", cpu, (unsigned int) epb);
if (!has_hwp)
return 0;
- read_hwp_request(cpu, &req, MSR_HWP_REQUEST);
+ read_hwp_request_msr(cpu, &req, MSR_HWP_REQUEST);
print_hwp_request(cpu, &req, "");
read_hwp_cap(cpu, &cap, MSR_HWP_CAPABILITIES);
@@ -919,7 +920,7 @@ int print_pkg_msrs(int pkg)
if (!has_hwp)
return 0;
- read_hwp_request(first_cpu_in_pkg[pkg], &req, MSR_HWP_REQUEST_PKG);
+ read_hwp_request_msr(first_cpu_in_pkg[pkg], &req, MSR_HWP_REQUEST_PKG);
print_hwp_request_pkg(pkg, &req, "");
if (has_hwp_notify) {
@@ -951,8 +952,10 @@ int ratio_2_sysfs_khz(int ratio)
}
/*
* If HWP is enabled and cpufreq sysfs attribtes are present,
- * then update sysfs, so that it will not become
- * stale when we write to MSRs.
+ * then update via sysfs. The intel_pstate driver may modify (clip)
+ * this request, say, when HWP_CAP is outside of PLATFORM_INFO limits,
+ * and the driver-chosen value takes precidence.
+ *
* (intel_pstate's max_perf_pct and min_perf_pct will follow cpufreq,
* so we don't have to touch that.)
*/
@@ -1007,6 +1010,8 @@ int update_sysfs(int cpu)
if (update_hwp_max)
update_cpufreq_scaling_freq(1, cpu, req_update.hwp_max);
+ hwp_limits_done_via_sysfs = 1;
+
return 0;
}
@@ -1074,21 +1079,21 @@ int check_hwp_request_v_hwp_capabilities(int cpu, struct msr_hwp_request *req, s
return 0;
}
-int update_hwp_request(int cpu)
+int update_hwp_request_msr(int cpu)
{
struct msr_hwp_request req;
struct msr_hwp_cap cap;
int msr_offset = MSR_HWP_REQUEST;
- read_hwp_request(cpu, &req, msr_offset);
+ read_hwp_request_msr(cpu, &req, msr_offset);
if (debug)
print_hwp_request(cpu, &req, "old: ");
- if (update_hwp_min)
+ if (update_hwp_min && !hwp_limits_done_via_sysfs)
req.hwp_min = req_update.hwp_min;
- if (update_hwp_max)
+ if (update_hwp_max && !hwp_limits_done_via_sysfs)
req.hwp_max = req_update.hwp_max;
if (update_hwp_desired)
@@ -1111,15 +1116,15 @@ int update_hwp_request(int cpu)
verify_hwp_req_self_consistency(cpu, &req);
- write_hwp_request(cpu, &req, msr_offset);
+ write_hwp_request_msr(cpu, &req, msr_offset);
if (debug) {
- read_hwp_request(cpu, &req, msr_offset);
+ read_hwp_request_msr(cpu, &req, msr_offset);
print_hwp_request(cpu, &req, "new: ");
}
return 0;
}
-int update_hwp_request_pkg(int pkg)
+int update_hwp_request_pkg_msr(int pkg)
{
struct msr_hwp_request req;
struct msr_hwp_cap cap;
@@ -1127,7 +1132,7 @@ int update_hwp_request_pkg(int pkg)
int msr_offset = MSR_HWP_REQUEST_PKG;
- read_hwp_request(cpu, &req, msr_offset);
+ read_hwp_request_msr(cpu, &req, msr_offset);
if (debug)
print_hwp_request_pkg(pkg, &req, "old: ");
@@ -1155,10 +1160,10 @@ int update_hwp_request_pkg(int pkg)
verify_hwp_req_self_consistency(cpu, &req);
- write_hwp_request(cpu, &req, msr_offset);
+ write_hwp_request_msr(cpu, &req, msr_offset);
if (debug) {
- read_hwp_request(cpu, &req, msr_offset);
+ read_hwp_request_msr(cpu, &req, msr_offset);
print_hwp_request_pkg(pkg, &req, "new: ");
}
return 0;
@@ -1166,30 +1171,39 @@ int update_hwp_request_pkg(int pkg)
int enable_hwp_on_cpu(int cpu)
{
- unsigned long long msr;
+ unsigned long long old_msr, new_msr;
+
+ get_msr(cpu, MSR_PM_ENABLE, &old_msr);
+
+ if (old_msr & 1)
+ return 0; /* already enabled */
- get_msr(cpu, MSR_PM_ENABLE, &msr);
- put_msr(cpu, MSR_PM_ENABLE, 1);
+ new_msr = old_msr | 1;
+ put_msr(cpu, MSR_PM_ENABLE, new_msr);
if (verbose)
- printf("cpu%d: MSR_PM_ENABLE old: %d new: %d\n", cpu, (unsigned int) msr, 1);
+ printf("cpu%d: MSR_PM_ENABLE old: %llX new: %llX\n", cpu, old_msr, new_msr);
return 0;
}
-int update_cpu_msrs(int cpu)
+int update_cpu_epb_sysfs(int cpu)
{
- unsigned long long msr;
int epb;
- if (update_epb) {
- epb = get_epb(cpu);
- set_epb(cpu, new_epb);
+ epb = get_epb_sysfs(cpu);
+ set_epb_sysfs(cpu, new_epb);
- if (verbose)
- printf("cpu%d: ENERGY_PERF_BIAS old: %d new: %d\n",
- cpu, epb, (unsigned int) new_epb);
- }
+ if (verbose)
+ printf("cpu%d: ENERGY_PERF_BIAS old: %d new: %d\n",
+ cpu, epb, (unsigned int) new_epb);
+
+ return 0;
+}
+
+int update_cpu_msrs(int cpu)
+{
+ unsigned long long msr;
if (update_turbo) {
int turbo_is_present_and_disabled;
@@ -1224,7 +1238,7 @@ int update_cpu_msrs(int cpu)
if (!hwp_update_enabled())
return 0;
- update_hwp_request(cpu);
+ update_hwp_request_msr(cpu);
return 0;
}
@@ -1312,6 +1326,17 @@ void for_all_cpus_in_set(size_t set_size, cpu_set_t *cpu_set, int (func)(int))
if (CPU_ISSET_S(cpu_num, set_size, cpu_set))
func(cpu_num);
}
+int for_all_cpus_in_set_and(size_t set_size, cpu_set_t *cpu_set, int (func)(int))
+{
+ int cpu_num;
+ int retval = 1;
+
+ for (cpu_num = 0; cpu_num <= max_cpu_num; ++cpu_num)
+ if (CPU_ISSET_S(cpu_num, set_size, cpu_set))
+ retval &= func(cpu_num);
+
+ return retval;
+}
void init_data_structures(void)
{
@@ -1326,21 +1351,38 @@ void init_data_structures(void)
for_all_proc_cpus(mark_cpu_present);
}
-/* clear has_hwp if it is not enable (or being enabled) */
+int is_hwp_enabled_on_cpu(int cpu_num)
+{
+ unsigned long long msr;
+ int retval;
+
+ /* MSR_PM_ENABLE[1] == 1 if HWP is enabled and MSRs visible */
+ get_msr(cpu_num, MSR_PM_ENABLE, &msr);
+ retval = (msr & 1);
+
+ if (verbose)
+ fprintf(stderr, "cpu%d: %sHWP\n", cpu_num, retval ? "" : "No-");
+
+ return retval;
+}
+/*
+ * verify_hwp_is_enabled()
+ *
+ * Set (has_hwp=0) if no HWP feature or any of selected CPU set does not have HWP enabled
+ */
void verify_hwp_is_enabled(void)
{
- unsigned long long msr;
+ int retval;
if (!has_hwp) /* set in early_cpuid() */
return;
- /* MSR_PM_ENABLE[1] == 1 if HWP is enabled and MSRs visible */
- get_msr(base_cpu, MSR_PM_ENABLE, &msr);
- if ((msr & 1) == 0) {
+ retval = for_all_cpus_in_set_and(cpu_setsize, cpu_selected_set, is_hwp_enabled_on_cpu);
+
+ if (retval == 0) {
fprintf(stderr, "HWP can be enabled using '--hwp-enable'\n");
has_hwp = 0;
- return;
}
}
@@ -1551,10 +1593,13 @@ int main(int argc, char **argv)
/* update CPU set */
if (cpu_selected_set) {
+ if (update_epb)
+ for_all_cpus_in_set(cpu_setsize, cpu_selected_set, update_cpu_epb_sysfs);
for_all_cpus_in_set(cpu_setsize, cpu_selected_set, update_sysfs);
for_all_cpus_in_set(cpu_setsize, cpu_selected_set, update_cpu_msrs);
+
} else if (pkg_selected_set)
- for_packages(pkg_selected_set, update_hwp_request_pkg);
+ for_packages(pkg_selected_set, update_hwp_request_pkg_msr);
return 0;
}