summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/pm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/pm')
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_dpm.c5
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_pm.c641
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c29
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h138
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h115
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c12
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/Makefile2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c6
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c101
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c238
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h8
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c1
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c3
18 files changed, 772 insertions, 540 deletions
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
index 6a9e26905edf..faae9bf48aa4 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
@@ -78,7 +78,7 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev,
int ret = 0;
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
enum ip_power_state pwr_state = gate ? POWER_STATE_OFF : POWER_STATE_ON;
- bool is_vcn = (block_type == AMD_IP_BLOCK_TYPE_UVD || block_type == AMD_IP_BLOCK_TYPE_VCN);
+ bool is_vcn = block_type == AMD_IP_BLOCK_TYPE_VCN;
if (atomic_read(&adev->pm.pwr_state[block_type]) == pwr_state &&
(!is_vcn || adev->vcn.num_vcn_inst == 1)) {
@@ -716,6 +716,9 @@ int amdgpu_dpm_send_rma_reason(struct amdgpu_device *adev)
ret = smu_send_rma_reason(smu);
mutex_unlock(&adev->pm.mutex);
+ if (amdgpu_cper_generate_bp_threshold_record(adev))
+ dev_warn(adev->dev, "fail to generate bad page threshold cper records\n");
+
return ret;
}
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index e8ae7681bf0a..1d04f1b79ded 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -98,6 +98,85 @@ const char * const amdgpu_pp_profile_name[] = {
};
/**
+ * amdgpu_pm_dev_state_check - Check if device can be accessed.
+ * @adev: Target device.
+ * @runpm: Check runpm status for suspend state checks.
+ *
+ * Checks the state of the @adev for access. Return 0 if the device is
+ * accessible or a negative error code otherwise.
+ */
+static int amdgpu_pm_dev_state_check(struct amdgpu_device *adev, bool runpm)
+{
+ bool runpm_check = runpm ? adev->in_runpm : false;
+
+ if (amdgpu_in_reset(adev))
+ return -EPERM;
+ if (adev->in_suspend && !runpm_check)
+ return -EPERM;
+
+ return 0;
+}
+
+/**
+ * amdgpu_pm_get_access - Check if device can be accessed, resume if needed.
+ * @adev: Target device.
+ *
+ * Checks the state of the @adev for access. Use runtime pm API to resume if
+ * needed. Return 0 if the device is accessible or a negative error code
+ * otherwise.
+ */
+static int amdgpu_pm_get_access(struct amdgpu_device *adev)
+{
+ int ret;
+
+ ret = amdgpu_pm_dev_state_check(adev, true);
+ if (ret)
+ return ret;
+
+ return pm_runtime_resume_and_get(adev->dev);
+}
+
+/**
+ * amdgpu_pm_get_access_if_active - Check if device is active for access.
+ * @adev: Target device.
+ *
+ * Checks the state of the @adev for access. Use runtime pm API to determine
+ * if device is active. Allow access only if device is active.Return 0 if the
+ * device is accessible or a negative error code otherwise.
+ */
+static int amdgpu_pm_get_access_if_active(struct amdgpu_device *adev)
+{
+ int ret;
+
+ /* Ignore runpm status. If device is in suspended state, deny access */
+ ret = amdgpu_pm_dev_state_check(adev, false);
+ if (ret)
+ return ret;
+
+ /*
+ * Allow only if device is active. If runpm is disabled also, as in
+ * kernels without CONFIG_PM, allow access.
+ */
+ ret = pm_runtime_get_if_active(adev->dev);
+ if (!ret)
+ return -EPERM;
+
+ return 0;
+}
+
+/**
+ * amdgpu_pm_put_access - Put to auto suspend mode after a device access.
+ * @adev: Target device.
+ *
+ * Should be paired with amdgpu_pm_get_access* calls
+ */
+static inline void amdgpu_pm_put_access(struct amdgpu_device *adev)
+{
+ pm_runtime_mark_last_busy(adev->dev);
+ pm_runtime_put_autosuspend(adev->dev);
+}
+
+/**
* DOC: power_dpm_state
*
* The power_dpm_state file is a legacy interface and is only provided for
@@ -140,18 +219,13 @@ static ssize_t amdgpu_get_power_dpm_state(struct device *dev,
enum amd_pm_state_type pm;
int ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_get_if_active(ddev->dev);
- if (ret <= 0)
- return ret ?: -EPERM;
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
amdgpu_dpm_get_current_power_state(adev, &pm);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return sysfs_emit(buf, "%s\n",
(pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
@@ -168,11 +242,6 @@ static ssize_t amdgpu_set_power_dpm_state(struct device *dev,
enum amd_pm_state_type state;
int ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
if (strncmp("battery", buf, strlen("battery")) == 0)
state = POWER_STATE_TYPE_BATTERY;
else if (strncmp("balanced", buf, strlen("balanced")) == 0)
@@ -182,14 +251,13 @@ static ssize_t amdgpu_set_power_dpm_state(struct device *dev,
else
return -EINVAL;
- ret = pm_runtime_resume_and_get(ddev->dev);
+ ret = amdgpu_pm_get_access(adev);
if (ret < 0)
return ret;
amdgpu_dpm_set_power_state(adev, state);
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return count;
}
@@ -263,18 +331,13 @@ static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev,
enum amd_dpm_forced_level level = 0xff;
int ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_get_if_active(ddev->dev);
- if (ret <= 0)
- return ret ?: -EPERM;
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
level = amdgpu_dpm_get_performance_level(adev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return sysfs_emit(buf, "%s\n",
(level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" :
@@ -299,11 +362,6 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
enum amd_dpm_forced_level level;
int ret = 0;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
if (strncmp("low", buf, strlen("low")) == 0) {
level = AMD_DPM_FORCED_LEVEL_LOW;
} else if (strncmp("high", buf, strlen("high")) == 0) {
@@ -328,14 +386,13 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
return -EINVAL;
}
- ret = pm_runtime_resume_and_get(ddev->dev);
+ ret = amdgpu_pm_get_access(adev);
if (ret < 0)
return ret;
mutex_lock(&adev->pm.stable_pstate_ctx_lock);
if (amdgpu_dpm_force_performance_level(adev, level)) {
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
mutex_unlock(&adev->pm.stable_pstate_ctx_lock);
return -EINVAL;
}
@@ -343,8 +400,7 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
adev->pm.stable_pstate_ctx = NULL;
mutex_unlock(&adev->pm.stable_pstate_ctx_lock);
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return count;
}
@@ -359,19 +415,14 @@ static ssize_t amdgpu_get_pp_num_states(struct device *dev,
uint32_t i;
int buf_len, ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_get_if_active(ddev->dev);
- if (ret <= 0)
- return ret ?: -EPERM;
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
if (amdgpu_dpm_get_pp_num_states(adev, &data))
memset(&data, 0, sizeof(data));
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
buf_len = sysfs_emit(buf, "states: %d\n", data.nums);
for (i = 0; i < data.nums; i++)
@@ -394,20 +445,15 @@ static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
enum amd_pm_state_type pm = 0;
int i = 0, ret = 0;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_get_if_active(ddev->dev);
- if (ret <= 0)
- return ret ?: -EPERM;
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
amdgpu_dpm_get_current_power_state(adev, &pm);
ret = amdgpu_dpm_get_pp_num_states(adev, &data);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
if (ret)
return ret;
@@ -430,11 +476,6 @@ static ssize_t amdgpu_get_pp_force_state(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
if (adev->pm.pp_force_state_enabled)
return amdgpu_get_pp_cur_state(dev, attr, buf);
else
@@ -453,11 +494,6 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev,
unsigned long idx;
int ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
adev->pm.pp_force_state_enabled = false;
if (strlen(buf) == 1)
@@ -469,7 +505,7 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev,
idx = array_index_nospec(idx, ARRAY_SIZE(data.states));
- ret = pm_runtime_resume_and_get(ddev->dev);
+ ret = amdgpu_pm_get_access(adev);
if (ret < 0)
return ret;
@@ -490,14 +526,13 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev,
adev->pm.pp_force_state_enabled = true;
}
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return count;
err_out:
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
+
return ret;
}
@@ -521,18 +556,13 @@ static ssize_t amdgpu_get_pp_table(struct device *dev,
char *table = NULL;
int size, ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_get_if_active(ddev->dev);
- if (ret <= 0)
- return ret ?: -EPERM;
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
size = amdgpu_dpm_get_pp_table(adev, &table);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
if (size <= 0)
return size;
@@ -554,19 +584,13 @@ static ssize_t amdgpu_set_pp_table(struct device *dev,
struct amdgpu_device *adev = drm_to_adev(ddev);
int ret = 0;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_resume_and_get(ddev->dev);
+ ret = amdgpu_pm_get_access(adev);
if (ret < 0)
return ret;
ret = amdgpu_dpm_set_pp_table(adev, buf, count);
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
if (ret)
return ret;
@@ -735,11 +759,6 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
const char delimiter[3] = {' ', '\n', '\0'};
uint32_t type;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
if (count > 127 || count == 0)
return -EINVAL;
@@ -785,7 +804,7 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
tmp_str++;
}
- ret = pm_runtime_resume_and_get(ddev->dev);
+ ret = amdgpu_pm_get_access(adev);
if (ret < 0)
return ret;
@@ -806,14 +825,13 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
goto err_out;
}
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return count;
err_out:
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
+
return -EINVAL;
}
@@ -835,14 +853,9 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
};
uint clk_index;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_get_if_active(ddev->dev);
- if (ret <= 0)
- return ret ?: -EPERM;
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
for (clk_index = 0 ; clk_index < 6 ; clk_index++) {
ret = amdgpu_dpm_emit_clock_levels(adev, od_clocks[clk_index], buf, &size);
@@ -861,7 +874,7 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
if (size == 0)
size = sysfs_emit(buf, "\n");
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return size;
}
@@ -892,23 +905,17 @@ static ssize_t amdgpu_set_pp_features(struct device *dev,
uint64_t featuremask;
int ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
ret = kstrtou64(buf, 0, &featuremask);
if (ret)
return -EINVAL;
- ret = pm_runtime_resume_and_get(ddev->dev);
+ ret = amdgpu_pm_get_access(adev);
if (ret < 0)
return ret;
ret = amdgpu_dpm_set_ppfeature_status(adev, featuremask);
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
if (ret)
return -EINVAL;
@@ -925,20 +932,15 @@ static ssize_t amdgpu_get_pp_features(struct device *dev,
ssize_t size;
int ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_get_if_active(ddev->dev);
- if (ret <= 0)
- return ret ?: -EPERM;
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
size = amdgpu_dpm_get_ppfeature_status(adev, buf);
if (size <= 0)
size = sysfs_emit(buf, "\n");
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return size;
}
@@ -991,14 +993,9 @@ static ssize_t amdgpu_get_pp_dpm_clock(struct device *dev,
int size = 0;
int ret = 0;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_get_if_active(ddev->dev);
- if (ret <= 0)
- return ret ?: -EPERM;
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
ret = amdgpu_dpm_emit_clock_levels(adev, type, buf, &size);
if (ret == -ENOENT)
@@ -1007,7 +1004,7 @@ static ssize_t amdgpu_get_pp_dpm_clock(struct device *dev,
if (size == 0)
size = sysfs_emit(buf, "\n");
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return size;
}
@@ -1057,23 +1054,17 @@ static ssize_t amdgpu_set_pp_dpm_clock(struct device *dev,
int ret;
uint32_t mask = 0;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
ret = amdgpu_read_mask(buf, count, &mask);
if (ret)
return ret;
- ret = pm_runtime_resume_and_get(ddev->dev);
+ ret = amdgpu_pm_get_access(adev);
if (ret < 0)
return ret;
ret = amdgpu_dpm_force_clock_level(adev, type, mask);
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
if (ret)
return -EINVAL;
@@ -1240,18 +1231,13 @@ static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
uint32_t value = 0;
int ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_get_if_active(ddev->dev);
- if (ret <= 0)
- return ret ?: -EPERM;
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
value = amdgpu_dpm_get_sclk_od(adev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return sysfs_emit(buf, "%d\n", value);
}
@@ -1266,24 +1252,18 @@ static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
int ret;
long int value;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
ret = kstrtol(buf, 0, &value);
if (ret)
return -EINVAL;
- ret = pm_runtime_resume_and_get(ddev->dev);
+ ret = amdgpu_pm_get_access(adev);
if (ret < 0)
return ret;
amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return count;
}
@@ -1297,18 +1277,13 @@ static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
uint32_t value = 0;
int ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_get_if_active(ddev->dev);
- if (ret <= 0)
- return ret ?: -EPERM;
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
value = amdgpu_dpm_get_mclk_od(adev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return sysfs_emit(buf, "%d\n", value);
}
@@ -1323,24 +1298,18 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
int ret;
long int value;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
ret = kstrtol(buf, 0, &value);
if (ret)
return -EINVAL;
- ret = pm_runtime_resume_and_get(ddev->dev);
+ ret = amdgpu_pm_get_access(adev);
if (ret < 0)
return ret;
amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return count;
}
@@ -1378,20 +1347,15 @@ static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev,
ssize_t size;
int ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_get_if_active(ddev->dev);
- if (ret <= 0)
- return ret ?: -EPERM;
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
size = amdgpu_dpm_get_power_profile_mode(adev, buf);
if (size <= 0)
size = sysfs_emit(buf, "\n");
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return size;
}
@@ -1414,11 +1378,6 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
long int profile_mode = 0;
const char delimiter[3] = {' ', '\n', '\0'};
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
tmp[0] = *(buf);
tmp[1] = '\0';
ret = kstrtol(tmp, 0, &profile_mode);
@@ -1445,14 +1404,13 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
}
parameter[parameter_size] = profile_mode;
- ret = pm_runtime_resume_and_get(ddev->dev);
+ ret = amdgpu_pm_get_access(adev);
if (ret < 0)
return ret;
ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size);
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
if (!ret)
return count;
@@ -1466,19 +1424,14 @@ static int amdgpu_hwmon_get_sensor_generic(struct amdgpu_device *adev,
{
int r, size = sizeof(uint32_t);
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- r = pm_runtime_get_if_active(adev->dev);
- if (r <= 0)
- return r ?: -EPERM;
+ r = amdgpu_pm_get_access_if_active(adev);
+ if (r)
+ return r;
/* get the sensor value */
r = amdgpu_dpm_read_sensor(adev, sensor, query, &size);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ amdgpu_pm_put_access(adev);
return r;
}
@@ -1576,24 +1529,19 @@ static ssize_t amdgpu_get_pcie_bw(struct device *dev,
uint64_t count0 = 0, count1 = 0;
int ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
if (adev->flags & AMD_IS_APU)
return -ENODATA;
if (!adev->asic_funcs->get_pcie_usage)
return -ENODATA;
- ret = pm_runtime_get_if_active(ddev->dev);
- if (ret <= 0)
- return ret ?: -EPERM;
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
amdgpu_asic_get_pcie_usage(adev, &count0, &count1);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return sysfs_emit(buf, "%llu %llu %i\n",
count0, count1, pcie_get_mps(adev->pdev));
@@ -1616,11 +1564,6 @@ static ssize_t amdgpu_get_unique_id(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
if (adev->unique_id)
return sysfs_emit(buf, "%016llx\n", adev->unique_id);
@@ -1715,9 +1658,9 @@ static ssize_t amdgpu_get_apu_thermal_cap(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
- ret = pm_runtime_get_if_active(ddev->dev);
- if (ret <= 0)
- return ret ?: -EPERM;
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
ret = amdgpu_dpm_get_apu_thermal_limit(adev, &limit);
if (!ret)
@@ -1725,7 +1668,7 @@ static ssize_t amdgpu_get_apu_thermal_cap(struct device *dev,
else
size = sysfs_emit(buf, "failed to get thermal limit\n");
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return size;
}
@@ -1749,20 +1692,18 @@ static ssize_t amdgpu_set_apu_thermal_cap(struct device *dev,
return -EINVAL;
}
- ret = pm_runtime_resume_and_get(ddev->dev);
+ ret = amdgpu_pm_get_access(adev);
if (ret < 0)
return ret;
ret = amdgpu_dpm_set_apu_thermal_limit(adev, value);
if (ret) {
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
dev_err(dev, "failed to update thermal limit\n");
return ret;
}
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return count;
}
@@ -1786,18 +1727,13 @@ static ssize_t amdgpu_get_pm_metrics(struct device *dev,
ssize_t size = 0;
int ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_get_if_active(ddev->dev);
- if (ret <= 0)
- return ret ?: -EPERM;
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
size = amdgpu_dpm_get_pm_metrics(adev, buf, PAGE_SIZE);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return size;
}
@@ -1824,14 +1760,9 @@ static ssize_t amdgpu_get_gpu_metrics(struct device *dev,
ssize_t size = 0;
int ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_get_if_active(ddev->dev);
- if (ret <= 0)
- return ret ?: -EPERM;
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
size = amdgpu_dpm_get_gpu_metrics(adev, &gpu_metrics);
if (size <= 0)
@@ -1843,7 +1774,7 @@ static ssize_t amdgpu_get_gpu_metrics(struct device *dev,
memcpy(buf, gpu_metrics, size);
out:
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return size;
}
@@ -1939,19 +1870,14 @@ static ssize_t amdgpu_set_smartshift_bias(struct device *dev,
int r = 0;
int bias = 0;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- r = pm_runtime_resume_and_get(ddev->dev);
- if (r < 0)
- return r;
-
r = kstrtoint(buf, 10, &bias);
if (r)
goto out;
+ r = amdgpu_pm_get_access(adev);
+ if (r < 0)
+ return r;
+
if (bias > AMDGPU_SMARTSHIFT_MAX_BIAS)
bias = AMDGPU_SMARTSHIFT_MAX_BIAS;
else if (bias < AMDGPU_SMARTSHIFT_MIN_BIAS)
@@ -1963,8 +1889,8 @@ static ssize_t amdgpu_set_smartshift_bias(struct device *dev,
/* TODO: update bias level with SMU message */
out:
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
+
return r;
}
@@ -2006,9 +1932,10 @@ static int pp_od_clk_voltage_attr_update(struct amdgpu_device *adev, struct amdg
return 0;
}
- /* Enable pp_od_clk_voltage node for gc 9.4.3 SRIOV/BM support */
+ /* Enable pp_od_clk_voltage node for gc 9.4.3, 9.4.4, 9.5.0 SRIOV/BM support */
if (gc_ver == IP_VERSION(9, 4, 3) ||
- gc_ver == IP_VERSION(9, 4, 4)) {
+ gc_ver == IP_VERSION(9, 4, 4) ||
+ gc_ver == IP_VERSION(9, 5, 0)) {
if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
*states = ATTR_STATE_UNSUPPORTED;
return 0;
@@ -2087,7 +2014,8 @@ static int pp_dpm_clk_default_attr_update(struct amdgpu_device *adev, struct amd
gc_ver == IP_VERSION(11, 0, 2) ||
gc_ver == IP_VERSION(11, 0, 3) ||
gc_ver == IP_VERSION(9, 4, 3) ||
- gc_ver == IP_VERSION(9, 4, 4)))
+ gc_ver == IP_VERSION(9, 4, 4) ||
+ gc_ver == IP_VERSION(9, 5, 0)))
*states = ATTR_STATE_UNSUPPORTED;
} else if (DEVICE_ATTR_IS(pp_dpm_vclk1)) {
if (!((gc_ver == IP_VERSION(10, 3, 1) ||
@@ -2109,7 +2037,8 @@ static int pp_dpm_clk_default_attr_update(struct amdgpu_device *adev, struct amd
gc_ver == IP_VERSION(11, 0, 2) ||
gc_ver == IP_VERSION(11, 0, 3) ||
gc_ver == IP_VERSION(9, 4, 3) ||
- gc_ver == IP_VERSION(9, 4, 4)))
+ gc_ver == IP_VERSION(9, 4, 4) ||
+ gc_ver == IP_VERSION(9, 5, 0)))
*states = ATTR_STATE_UNSUPPORTED;
} else if (DEVICE_ATTR_IS(pp_dpm_dclk1)) {
if (!((gc_ver == IP_VERSION(10, 3, 1) ||
@@ -2120,7 +2049,8 @@ static int pp_dpm_clk_default_attr_update(struct amdgpu_device *adev, struct amd
} else if (DEVICE_ATTR_IS(pp_dpm_pcie)) {
if (gc_ver == IP_VERSION(9, 4, 2) ||
gc_ver == IP_VERSION(9, 4, 3) ||
- gc_ver == IP_VERSION(9, 4, 4))
+ gc_ver == IP_VERSION(9, 4, 4) ||
+ gc_ver == IP_VERSION(9, 5, 0))
*states = ATTR_STATE_UNSUPPORTED;
}
@@ -2218,11 +2148,6 @@ static ssize_t amdgpu_get_pm_policy_attr(struct device *dev,
policy_attr =
container_of(attr, struct amdgpu_pm_policy_attr, dev_attr);
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
return amdgpu_dpm_get_pm_policy_info(adev, policy_attr->id, buf);
}
@@ -2239,11 +2164,6 @@ static ssize_t amdgpu_set_pm_policy_attr(struct device *dev,
char *tmp, *param;
long val;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
count = min(count, sizeof(tmp_buf));
memcpy(tmp_buf, buf, count);
tmp_buf[count - 1] = '\0';
@@ -2269,14 +2189,13 @@ static ssize_t amdgpu_set_pm_policy_attr(struct device *dev,
policy_attr =
container_of(attr, struct amdgpu_pm_policy_attr, dev_attr);
- ret = pm_runtime_resume_and_get(ddev->dev);
+ ret = amdgpu_pm_get_access(adev);
if (ret < 0)
return ret;
ret = amdgpu_dpm_set_pm_policy(adev, policy_attr->id, val);
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
if (ret)
return ret;
@@ -2416,6 +2335,7 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
case IP_VERSION(9, 4, 2):
case IP_VERSION(9, 4, 3):
case IP_VERSION(9, 4, 4):
+ case IP_VERSION(9, 5, 0):
case IP_VERSION(10, 3, 0):
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 1):
@@ -2699,18 +2619,13 @@ static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
u32 pwm_mode = 0;
int ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_get_if_active(adev->dev);
- if (ret <= 0)
- return ret ?: -EPERM;
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
ret = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ amdgpu_pm_put_access(adev);
if (ret)
return -EINVAL;
@@ -2728,11 +2643,6 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
u32 pwm_mode;
int value;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
err = kstrtoint(buf, 10, &value);
if (err)
return err;
@@ -2746,14 +2656,13 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
else
return -EINVAL;
- ret = pm_runtime_resume_and_get(adev->dev);
+ ret = amdgpu_pm_get_access(adev);
if (ret < 0)
return ret;
ret = amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ amdgpu_pm_put_access(adev);
if (ret)
return -EINVAL;
@@ -2784,16 +2693,11 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
u32 value;
u32 pwm_mode;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
err = kstrtou32(buf, 10, &value);
if (err)
return err;
- err = pm_runtime_resume_and_get(adev->dev);
+ err = amdgpu_pm_get_access(adev);
if (err < 0)
return err;
@@ -2810,8 +2714,7 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
err = amdgpu_dpm_set_fan_speed_pwm(adev, value);
out:
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ amdgpu_pm_put_access(adev);
if (err)
return err;
@@ -2827,18 +2730,13 @@ static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
int err;
u32 speed = 0;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- err = pm_runtime_get_if_active(adev->dev);
- if (err <= 0)
- return err ?: -EPERM;
+ err = amdgpu_pm_get_access_if_active(adev);
+ if (err)
+ return err;
err = amdgpu_dpm_get_fan_speed_pwm(adev, &speed);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ amdgpu_pm_put_access(adev);
if (err)
return err;
@@ -2854,18 +2752,13 @@ static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
int err;
u32 speed = 0;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- err = pm_runtime_get_if_active(adev->dev);
- if (err <= 0)
- return err ?: -EPERM;
+ err = amdgpu_pm_get_access_if_active(adev);
+ if (err)
+ return err;
err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ amdgpu_pm_put_access(adev);
if (err)
return err;
@@ -2915,18 +2808,13 @@ static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev,
int err;
u32 rpm = 0;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- err = pm_runtime_get_if_active(adev->dev);
- if (err <= 0)
- return err ?: -EPERM;
+ err = amdgpu_pm_get_access_if_active(adev);
+ if (err)
+ return err;
err = amdgpu_dpm_get_fan_speed_rpm(adev, &rpm);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ amdgpu_pm_put_access(adev);
if (err)
return err;
@@ -2943,16 +2831,11 @@ static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
u32 value;
u32 pwm_mode;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
err = kstrtou32(buf, 10, &value);
if (err)
return err;
- err = pm_runtime_resume_and_get(adev->dev);
+ err = amdgpu_pm_get_access(adev);
if (err < 0)
return err;
@@ -2968,8 +2851,7 @@ static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
err = amdgpu_dpm_set_fan_speed_rpm(adev, value);
out:
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ amdgpu_pm_put_access(adev);
if (err)
return err;
@@ -2985,18 +2867,13 @@ static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev,
u32 pwm_mode = 0;
int ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_get_if_active(adev->dev);
- if (ret <= 0)
- return ret ?: -EPERM;
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
ret = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ amdgpu_pm_put_access(adev);
if (ret)
return -EINVAL;
@@ -3014,11 +2891,6 @@ static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
int value;
u32 pwm_mode;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
err = kstrtoint(buf, 10, &value);
if (err)
return err;
@@ -3030,14 +2902,13 @@ static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
else
return -EINVAL;
- err = pm_runtime_resume_and_get(adev->dev);
+ err = amdgpu_pm_get_access(adev);
if (err < 0)
return err;
err = amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ amdgpu_pm_put_access(adev);
if (err)
return -EINVAL;
@@ -3152,14 +3023,9 @@ static ssize_t amdgpu_hwmon_show_power_cap_generic(struct device *dev,
ssize_t size;
int r;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- r = pm_runtime_get_if_active(adev->dev);
- if (r <= 0)
- return r ?: -EPERM;
+ r = amdgpu_pm_get_access_if_active(adev);
+ if (r)
+ return r;
r = amdgpu_dpm_get_power_limit(adev, &limit,
pp_limit_level, power_type);
@@ -3169,7 +3035,7 @@ static ssize_t amdgpu_hwmon_show_power_cap_generic(struct device *dev,
else
size = sysfs_emit(buf, "\n");
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ amdgpu_pm_put_access(adev);
return size;
}
@@ -3230,11 +3096,6 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
int err;
u32 value;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
if (amdgpu_sriov_vf(adev))
return -EINVAL;
@@ -3245,14 +3106,13 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
value = value / 1000000; /* convert to Watt */
value |= limit_type << 24;
- err = pm_runtime_resume_and_get(adev->dev);
+ err = amdgpu_pm_get_access(adev);
if (err < 0)
return err;
err = amdgpu_dpm_set_power_limit(adev, value);
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ amdgpu_pm_put_access(adev);
if (err)
return err;
@@ -3530,7 +3390,8 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
/* Skip crit temp on APU */
if ((((adev->flags & AMD_IS_APU) && (adev->family >= AMDGPU_FAMILY_CZ)) ||
- (gc_ver == IP_VERSION(9, 4, 3) || gc_ver == IP_VERSION(9, 4, 4))) &&
+ (gc_ver == IP_VERSION(9, 4, 3) || gc_ver == IP_VERSION(9, 4, 4) ||
+ gc_ver == IP_VERSION(9, 5, 0))) &&
(attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr))
return 0;
@@ -3605,7 +3466,8 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
if ((adev->family == AMDGPU_FAMILY_SI || /* not implemented yet */
adev->family == AMDGPU_FAMILY_KV || /* not implemented yet */
(gc_ver == IP_VERSION(9, 4, 3) ||
- gc_ver == IP_VERSION(9, 4, 4))) &&
+ gc_ver == IP_VERSION(9, 4, 4) ||
+ gc_ver == IP_VERSION(9, 5, 0))) &&
(attr == &sensor_dev_attr_in0_input.dev_attr.attr ||
attr == &sensor_dev_attr_in0_label.dev_attr.attr))
return 0;
@@ -3613,7 +3475,8 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
/* only APUs other than gc 9,4,3 have vddnb */
if ((!(adev->flags & AMD_IS_APU) ||
(gc_ver == IP_VERSION(9, 4, 3) ||
- gc_ver == IP_VERSION(9, 4, 4))) &&
+ gc_ver == IP_VERSION(9, 4, 4) ||
+ gc_ver == IP_VERSION(9, 5, 0))) &&
(attr == &sensor_dev_attr_in1_input.dev_attr.attr ||
attr == &sensor_dev_attr_in1_label.dev_attr.attr))
return 0;
@@ -3636,7 +3499,8 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
/* hotspot temperature for gc 9,4,3*/
if (gc_ver == IP_VERSION(9, 4, 3) ||
- gc_ver == IP_VERSION(9, 4, 4)) {
+ gc_ver == IP_VERSION(9, 4, 4) ||
+ gc_ver == IP_VERSION(9, 5, 0)) {
if (attr == &sensor_dev_attr_temp1_input.dev_attr.attr ||
attr == &sensor_dev_attr_temp1_emergency.dev_attr.attr ||
attr == &sensor_dev_attr_temp1_label.dev_attr.attr)
@@ -3686,20 +3550,15 @@ static int amdgpu_retrieve_od_settings(struct amdgpu_device *adev,
int size = 0;
int ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_get_if_active(adev->dev);
- if (ret <= 0)
- return ret ?: -EPERM;
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
size = amdgpu_dpm_print_clock_levels(adev, od_type, buf);
if (size == 0)
size = sysfs_emit(buf, "\n");
- pm_runtime_put_autosuspend(adev->dev);
+ amdgpu_pm_put_access(adev);
return size;
}
@@ -3767,11 +3626,6 @@ amdgpu_distribute_custom_od_settings(struct amdgpu_device *adev,
long parameter[64];
int ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
ret = parse_input_od_command_lines(in_buf,
count,
&cmd_type,
@@ -3780,7 +3634,7 @@ amdgpu_distribute_custom_od_settings(struct amdgpu_device *adev,
if (ret)
return ret;
- ret = pm_runtime_resume_and_get(adev->dev);
+ ret = amdgpu_pm_get_access(adev);
if (ret < 0)
return ret;
@@ -3799,14 +3653,12 @@ amdgpu_distribute_custom_od_settings(struct amdgpu_device *adev,
goto err_out;
}
- pm_runtime_mark_last_busy(adev->dev);
- pm_runtime_put_autosuspend(adev->dev);
+ amdgpu_pm_put_access(adev);
return count;
err_out:
- pm_runtime_mark_last_busy(adev->dev);
- pm_runtime_put_autosuspend(adev->dev);
+ amdgpu_pm_put_access(adev);
return ret;
}
@@ -4776,16 +4628,10 @@ static void amdgpu_parse_cg_state(struct seq_file *m, u64 flags)
static int amdgpu_debugfs_pm_info_show(struct seq_file *m, void *unused)
{
struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
- struct drm_device *dev = adev_to_drm(adev);
u64 flags = 0;
int r;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- r = pm_runtime_resume_and_get(dev->dev);
+ r = amdgpu_pm_get_access(adev);
if (r < 0)
return r;
@@ -4802,7 +4648,7 @@ static int amdgpu_debugfs_pm_info_show(struct seq_file *m, void *unused)
seq_printf(m, "\n");
out:
- pm_runtime_put_autosuspend(dev->dev);
+ amdgpu_pm_put_access(adev);
return r;
}
@@ -4822,10 +4668,9 @@ static ssize_t amdgpu_pm_prv_buffer_read(struct file *f, char __user *buf,
void *smu_prv_buf;
int ret = 0;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
+ ret = amdgpu_pm_dev_state_check(adev, true);
+ if (ret)
+ return ret;
ret = amdgpu_dpm_get_smu_prv_buf_details(adev, &smu_prv_buf, &smu_prv_buf_size);
if (ret)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 8ca793c222ff..0b32c6cf6924 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -612,7 +612,8 @@ static int smu_sys_set_pp_table(void *handle,
return -EIO;
}
- if (!smu_table->hardcode_pptable) {
+ if (!smu_table->hardcode_pptable || smu_table->power_play_table_size < size) {
+ kfree(smu_table->hardcode_pptable);
smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL);
if (!smu_table->hardcode_pptable)
return -ENOMEM;
@@ -693,6 +694,7 @@ static int smu_set_funcs(struct amdgpu_device *adev)
renoir_set_ppt_funcs(smu);
break;
case IP_VERSION(11, 5, 0):
+ case IP_VERSION(11, 5, 2):
vangogh_set_ppt_funcs(smu);
break;
case IP_VERSION(13, 0, 1):
@@ -738,6 +740,7 @@ static int smu_set_funcs(struct amdgpu_device *adev)
case IP_VERSION(14, 0, 0):
case IP_VERSION(14, 0, 1):
case IP_VERSION(14, 0, 4):
+ case IP_VERSION(14, 0, 5):
smu_v14_0_0_set_ppt_funcs(smu);
break;
case IP_VERSION(14, 0, 2):
@@ -1251,26 +1254,11 @@ static void smu_init_xgmi_plpd_mode(struct smu_context *smu)
}
}
-static bool smu_is_workload_profile_available(struct smu_context *smu,
- u32 profile)
-{
- if (profile >= PP_SMC_POWER_PROFILE_COUNT)
- return false;
- return smu->workload_map && smu->workload_map[profile].valid_mapping;
-}
-
static void smu_init_power_profile(struct smu_context *smu)
{
- if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_UNKNOWN) {
- if (smu->is_apu ||
- !smu_is_workload_profile_available(
- smu, PP_SMC_POWER_PROFILE_FULLSCREEN3D))
- smu->power_profile_mode =
- PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
- else
- smu->power_profile_mode =
- PP_SMC_POWER_PROFILE_FULLSCREEN3D;
- }
+ if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_UNKNOWN)
+ smu->power_profile_mode =
+ PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
smu_power_profile_mode_get(smu, smu->power_profile_mode);
}
@@ -1579,6 +1567,7 @@ static int smu_smc_hw_setup(struct smu_context *smu)
case IP_VERSION(11, 0, 7):
case IP_VERSION(11, 0, 11):
case IP_VERSION(11, 5, 0):
+ case IP_VERSION(11, 5, 2):
case IP_VERSION(11, 0, 12):
if (adev->in_suspend && smu_is_dpm_running(smu)) {
dev_info(adev->dev, "dpm has been enabled\n");
@@ -1932,6 +1921,7 @@ static int smu_disable_dpms(struct smu_context *smu)
case IP_VERSION(11, 0, 7):
case IP_VERSION(11, 0, 11):
case IP_VERSION(11, 5, 0):
+ case IP_VERSION(11, 5, 2):
case IP_VERSION(11, 0, 12):
case IP_VERSION(11, 0, 13):
return 0;
@@ -2801,6 +2791,7 @@ int smu_get_power_limit(void *handle,
switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
case IP_VERSION(13, 0, 2):
case IP_VERSION(13, 0, 6):
+ case IP_VERSION(13, 0, 12):
case IP_VERSION(13, 0, 14):
case IP_VERSION(11, 0, 7):
case IP_VERSION(11, 0, 11):
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h
new file mode 100644
index 000000000000..4a1256d29d62
--- /dev/null
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h
@@ -0,0 +1,138 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef SMU_13_0_12_PMFW_H
+#define SMU_13_0_12_PMFW_H
+
+#define NUM_VCLK_DPM_LEVELS 4
+#define NUM_DCLK_DPM_LEVELS 4
+#define NUM_SOCCLK_DPM_LEVELS 4
+#define NUM_LCLK_DPM_LEVELS 4
+#define NUM_UCLK_DPM_LEVELS 4
+#define NUM_FCLK_DPM_LEVELS 4
+#define NUM_XGMI_DPM_LEVELS 2
+#define NUM_CXL_BITRATES 4
+#define NUM_PCIE_BITRATES 4
+#define NUM_XGMI_BITRATES 4
+#define NUM_XGMI_WIDTHS 3
+#define NUM_TDP_GROUPS 4
+#define NUM_SOC_P2S_TABLES 6
+#define NUM_GFX_P2S_TABLES 8
+#define NUM_PSM_DIDT_THRESHOLDS 3
+
+typedef enum {
+/*0*/ FEATURE_DATA_CALCULATION = 0,
+/*1*/ FEATURE_DPM_FCLK = 1,
+/*2*/ FEATURE_DPM_GFXCLK = 2,
+/*3*/ FEATURE_DPM_LCLK = 3,
+/*4*/ FEATURE_DPM_SOCCLK = 4,
+/*5*/ FEATURE_DPM_UCLK = 5,
+/*6*/ FEATURE_DPM_VCN = 6,
+/*7*/ FEATURE_DPM_XGMI = 7,
+/*8*/ FEATURE_DS_FCLK = 8,
+/*9*/ FEATURE_DS_GFXCLK = 9,
+/*10*/ FEATURE_DS_LCLK = 10,
+/*11*/ FEATURE_DS_MP0CLK = 11,
+/*12*/ FEATURE_DS_MP1CLK = 12,
+/*13*/ FEATURE_DS_MPIOCLK = 13,
+/*14*/ FEATURE_DS_SOCCLK = 14,
+/*15*/ FEATURE_DS_VCN = 15,
+/*16*/ FEATURE_APCC_DFLL = 16,
+/*17*/ FEATURE_APCC_PLUS = 17,
+/*18*/ FEATURE_PPT = 18,
+/*19*/ FEATURE_TDC = 19,
+/*20*/ FEATURE_THERMAL = 20,
+/*21*/ FEATURE_SOC_PCC = 21,
+/*22*/ FEATURE_PROCHOT = 22,
+/*23*/ FEATURE_FDD_AID_HBM = 23,
+/*24*/ FEATURE_FDD_AID_SOC = 24,
+/*25*/ FEATURE_FDD_XCD_EDC = 25,
+/*26*/ FEATURE_FDD_XCD_XVMIN = 26,
+/*27*/ FEATURE_FW_CTF = 27,
+/*28*/ FEATURE_SMU_CG = 28,
+/*29*/ FEATURE_PSI7 = 29,
+/*30*/ FEATURE_XGMI_PER_LINK_PWR_DOWN = 30,
+/*31*/ FEATURE_SOC_DC_RTC = 31,
+/*32*/ FEATURE_GFX_DC_RTC = 32,
+/*33*/ FEATURE_DVM_MIN_PSM = 33,
+/*34*/ FEATURE_PRC = 34,
+/*35*/ FEATURE_PSM_SQ_THROTTLER = 35,
+/*36*/ FEATURE_PIT = 36,
+/*37*/ FEATURE_DVO = 37,
+/*38*/ FEATURE_XVMINORPSM_CLKSTOP_DS = 38,
+
+/*39*/ NUM_FEATURES = 39
+} FEATURE_LIST_e;
+
+//enum for MPIO PCIe gen speed msgs
+typedef enum {
+ PCIE_LINK_SPEED_INDEX_TABLE_GEN1,
+ PCIE_LINK_SPEED_INDEX_TABLE_GEN2,
+ PCIE_LINK_SPEED_INDEX_TABLE_GEN3,
+ PCIE_LINK_SPEED_INDEX_TABLE_GEN4,
+ PCIE_LINK_SPEED_INDEX_TABLE_GEN4_ESM,
+ PCIE_LINK_SPEED_INDEX_TABLE_GEN5,
+ PCIE_LINK_SPEED_INDEX_TABLE_COUNT
+} PCIE_LINK_SPEED_INDEX_TABLE_e;
+
+typedef enum {
+ GFX_GUARDBAND_OFFSET_0,
+ GFX_GUARDBAND_OFFSET_1,
+ GFX_GUARDBAND_OFFSET_2,
+ GFX_GUARDBAND_OFFSET_3,
+ GFX_GUARDBAND_OFFSET_4,
+ GFX_GUARDBAND_OFFSET_5,
+ GFX_GUARDBAND_OFFSET_6,
+ GFX_GUARDBAND_OFFSET_7,
+ GFX_GUARDBAND_OFFSET_COUNT
+} GFX_GUARDBAND_OFFSET_e;
+
+typedef enum {
+ GFX_DVM_MARGINHI_0,
+ GFX_DVM_MARGINHI_1,
+ GFX_DVM_MARGINHI_2,
+ GFX_DVM_MARGINHI_3,
+ GFX_DVM_MARGINHI_4,
+ GFX_DVM_MARGINHI_5,
+ GFX_DVM_MARGINHI_6,
+ GFX_DVM_MARGINHI_7,
+ GFX_DVM_MARGINLO_0,
+ GFX_DVM_MARGINLO_1,
+ GFX_DVM_MARGINLO_2,
+ GFX_DVM_MARGINLO_3,
+ GFX_DVM_MARGINLO_4,
+ GFX_DVM_MARGINLO_5,
+ GFX_DVM_MARGINLO_6,
+ GFX_DVM_MARGINLO_7,
+ GFX_DVM_MARGIN_COUNT
+} GFX_DVM_MARGIN_e;
+
+#define SMU_VF_METRICS_TABLE_VERSION 0x3
+
+typedef struct __attribute__((packed, aligned(4))) {
+ uint32_t AccumulationCounter;
+ uint32_t InstGfxclk_TargFreq;
+ uint64_t AccGfxclk_TargFreq;
+ uint64_t AccGfxRsmuDpm_Busy;
+} VfMetricsTable_t;
+
+#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h
index 274b3e1cc4fb..f8ed45857878 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h
@@ -129,6 +129,7 @@ typedef enum {
#define SMU_METRICS_TABLE_VERSION 0xF
+// Unified metrics table for smu_v13_0_6
typedef struct __attribute__((packed, aligned(4))) {
uint32_t AccumulationCounter;
@@ -241,8 +242,9 @@ typedef struct __attribute__((packed, aligned(4))) {
//Total App Clock Counter
uint64_t GfxclkBelowHostLimitAcc[8];
-} MetricsTableX_t;
+} MetricsTableV0_t;
+// Metrics table for smu_v13_0_6 APUS
typedef struct __attribute__((packed, aligned(4))) {
uint32_t AccumulationCounter;
@@ -333,7 +335,116 @@ typedef struct __attribute__((packed, aligned(4))) {
// VCN/JPEG ACTIVITY
uint32_t VcnBusy[4];
uint32_t JpegBusy[32];
-} MetricsTableA_t;
+} MetricsTableV1_t;
+
+// Metrics table for smu_v13_0_12
+typedef struct __attribute__((packed, aligned(4))) {
+ uint64_t AccumulationCounter;
+
+ //TEMPERATURE
+ uint32_t MaxSocketTemperature;
+ uint32_t MaxVrTemperature;
+ uint32_t MaxHbmTemperature;
+ uint64_t MaxSocketTemperatureAcc;
+ uint64_t MaxVrTemperatureAcc;
+ uint64_t MaxHbmTemperatureAcc;
+
+ //POWER
+ uint32_t SocketPowerLimit;
+ uint32_t MaxSocketPowerLimit;
+ uint32_t SocketPower;
+
+ //ENERGY
+ uint64_t Timestamp;
+ uint64_t SocketEnergyAcc;
+ uint64_t CcdEnergyAcc;
+ uint64_t XcdEnergyAcc;
+ uint64_t AidEnergyAcc;
+ uint64_t HbmEnergyAcc;
+
+ //FREQUENCY
+ uint32_t GfxclkFrequencyLimit;
+ uint32_t FclkFrequency;
+ uint32_t UclkFrequency;
+ uint32_t SocclkFrequency[4];
+ uint32_t VclkFrequency[4];
+ uint32_t DclkFrequency[4];
+ uint32_t LclkFrequency[4];
+ uint64_t GfxclkFrequencyAcc[8];
+
+ //FREQUENCY RANGE
+ uint32_t MaxGfxclkFrequency;
+ uint32_t MinGfxclkFrequency;
+ uint32_t FclkFrequencyTable[4];
+ uint32_t UclkFrequencyTable[4];
+ uint32_t SocclkFrequencyTable[4];
+ uint32_t VclkFrequencyTable[4];
+ uint32_t DclkFrequencyTable[4];
+ uint32_t LclkFrequencyTable[4];
+ uint32_t MaxLclkDpmRange;
+ uint32_t MinLclkDpmRange;
+
+ //XGMI
+ uint32_t XgmiWidth;
+ uint32_t XgmiBitrate;
+ uint64_t XgmiReadBandwidthAcc[8];
+ uint64_t XgmiWriteBandwidthAcc[8];
+
+ //ACTIVITY
+ uint32_t SocketGfxBusy;
+ uint32_t DramBandwidthUtilization;
+ uint64_t SocketC0ResidencyAcc;
+ uint64_t SocketGfxBusyAcc;
+ uint64_t DramBandwidthAcc;
+ uint32_t MaxDramBandwidth;
+ uint64_t DramBandwidthUtilizationAcc;
+ uint64_t PcieBandwidthAcc[4];
+
+ //THROTTLERS
+ uint32_t ProchotResidencyAcc;
+ uint32_t PptResidencyAcc;
+ uint32_t SocketThmResidencyAcc;
+ uint32_t VrThmResidencyAcc;
+ uint32_t HbmThmResidencyAcc;
+ uint32_t GfxLockXCDMak;
+
+ // New Items at end to maintain driver compatibility
+ uint32_t GfxclkFrequency[8];
+
+ //PSNs
+ uint64_t PublicSerialNumber_AID[4];
+ uint64_t PublicSerialNumber_XCD[8];
+
+ //XGMI Data tranfser size
+ uint64_t XgmiReadDataSizeAcc[8];//in KByte
+ uint64_t XgmiWriteDataSizeAcc[8];//in KByte
+
+ //PCIE BW Data and error count
+ uint32_t PcieBandwidth[4];
+ uint32_t PCIeL0ToRecoveryCountAcc; // The Pcie counter itself is accumulated
+ uint32_t PCIenReplayAAcc; // The Pcie counter itself is accumulated
+ uint32_t PCIenReplayARolloverCountAcc; // The Pcie counter itself is accumulated
+ uint32_t PCIeNAKSentCountAcc; // The Pcie counter itself is accumulated
+ uint32_t PCIeNAKReceivedCountAcc; // The Pcie counter itself is accumulated
+
+ // VCN/JPEG ACTIVITY
+ uint32_t VcnBusy[4];
+ uint32_t JpegBusy[32];
+
+ // PCIE LINK Speed and width
+ uint32_t PCIeLinkSpeed;
+ uint32_t PCIeLinkWidth;
+
+ // PER XCD ACTIVITY
+ uint32_t GfxBusy[8];
+ uint64_t GfxBusyAcc[8];
+
+ //PCIE BW Data and error count
+ uint32_t PCIeOtherEndRecoveryAcc; // The Pcie counter itself is accumulated
+
+ //Total App Clock Counter
+ uint64_t GfxclkBelowHostLimitAcc[8];
+} MetricsTableV2_t;
#define SMU_VF_METRICS_TABLE_VERSION 0x5
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h
index 147bfb12fd75..288b2576432b 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h
@@ -92,7 +92,7 @@
#define PPSMC_MSG_McaBankCeDumpDW 0x3B
#define PPSMC_MSG_SelectPLPDMode 0x40
#define PPSMC_MSG_RmaDueToBadPageThreshold 0x43
-#define PPSMC_MSG_SelectPstatePolicy 0x44
+#define PPSMC_MSG_SetThrottlingPolicy 0x44
#define PPSMC_MSG_ResetSDMA 0x4D
#define PPSMC_Message_Count 0x4E
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
index e4cd6a0d13da..9ccd5a1986d3 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
@@ -273,7 +273,7 @@
__SMU_DUMMY_MAP(GetMetricsVersion), \
__SMU_DUMMY_MAP(EnableUCLKShadow), \
__SMU_DUMMY_MAP(RmaDueToBadPageThreshold), \
- __SMU_DUMMY_MAP(SelectPstatePolicy), \
+ __SMU_DUMMY_MAP(SetThrottlingPolicy), \
__SMU_DUMMY_MAP(MALLPowerController), \
__SMU_DUMMY_MAP(MALLPowerState), \
__SMU_DUMMY_MAP(ResetSDMA),
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
index 8d4a96e23326..31166974746f 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
@@ -306,5 +306,7 @@ int smu_v13_0_get_boot_freq_by_index(struct smu_context *smu,
uint32_t *value);
void smu_v13_0_interrupt_work(struct smu_context *smu);
+bool smu_v13_0_12_is_dpm_running(struct smu_context *smu);
+extern const struct cmn2asic_mapping smu_v13_0_12_feature_mask_map[];
#endif
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
index 189c6a32b6bd..78391d8f35a9 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
@@ -227,6 +227,7 @@ int smu_v11_0_check_fw_version(struct smu_context *smu)
smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Navy_Flounder;
break;
case IP_VERSION(11, 5, 0):
+ case IP_VERSION(11, 5, 2):
smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_VANGOGH;
break;
case IP_VERSION(11, 0, 12):
@@ -471,10 +472,11 @@ int smu_v11_0_init_power(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
struct smu_power_context *smu_power = &smu->smu_power;
- size_t size = amdgpu_ip_version(adev, MP1_HWIP, 0) ==
- IP_VERSION(11, 5, 0) ?
- sizeof(struct smu_11_5_power_context) :
- sizeof(struct smu_11_0_power_context);
+ u32 ip_version = amdgpu_ip_version(adev, MP1_HWIP, 0);
+ size_t size = ((ip_version == IP_VERSION(11, 5, 0)) ||
+ (ip_version == IP_VERSION(11, 5, 2))) ?
+ sizeof(struct smu_11_5_power_context) :
+ sizeof(struct smu_11_0_power_context);
smu_power->power_context = kzalloc(size, GFP_KERNEL);
if (!smu_power->power_context)
@@ -731,6 +733,7 @@ int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count)
*/
if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 11) ||
amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 5, 0) ||
+ amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 5, 2) ||
amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 12) ||
amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 13))
return 0;
@@ -1110,6 +1113,7 @@ int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable)
case IP_VERSION(11, 0, 12):
case IP_VERSION(11, 0, 13):
case IP_VERSION(11, 5, 0):
+ case IP_VERSION(11, 5, 2):
if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
return 0;
if (enable)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/Makefile b/drivers/gpu/drm/amd/pm/swsmu/smu13/Makefile
index 7f3493b6c53c..51f1fa9789ab 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/Makefile
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/Makefile
@@ -24,7 +24,7 @@
# It provides the smu management services for the driver.
SMU13_MGR = smu_v13_0.o aldebaran_ppt.o yellow_carp_ppt.o smu_v13_0_0_ppt.o smu_v13_0_4_ppt.o \
- smu_v13_0_5_ppt.o smu_v13_0_7_ppt.o smu_v13_0_6_ppt.o
+ smu_v13_0_5_ppt.o smu_v13_0_7_ppt.o smu_v13_0_6_ppt.o smu_v13_0_12_ppt.o
AMD_SWSMU_SMU13MGR = $(addprefix $(AMD_SWSMU_PATH)/smu13/,$(SMU13_MGR))
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
index fbbdfa54f6a2..0915d6377613 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
@@ -267,10 +267,7 @@ int smu_v13_0_check_fw_version(struct smu_context *smu)
smu_major = (smu_version >> 16) & 0xff;
smu_minor = (smu_version >> 8) & 0xff;
smu_debug = (smu_version >> 0) & 0xff;
- if (smu->is_apu ||
- amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 6) ||
- amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 14))
- adev->pm.fw_version = smu_version;
+ adev->pm.fw_version = smu_version;
/* only for dGPU w/ SMU13*/
if (adev->pm.fw)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index 0551a3311217..985355bf78b2 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -126,7 +126,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_0_message_map[SMU_MSG_MAX_COUNT] =
MSG_MAP(DisableSmuFeaturesHigh, PPSMC_MSG_DisableSmuFeaturesHigh, 1),
MSG_MAP(GetEnabledSmuFeaturesLow, PPSMC_MSG_GetRunningSmuFeaturesLow, 1),
MSG_MAP(GetEnabledSmuFeaturesHigh, PPSMC_MSG_GetRunningSmuFeaturesHigh, 1),
- MSG_MAP(SetWorkloadMask, PPSMC_MSG_SetWorkloadMask, 1),
+ MSG_MAP(SetWorkloadMask, PPSMC_MSG_SetWorkloadMask, 0),
MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit, 0),
MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 1),
MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 1),
@@ -140,7 +140,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_0_message_map[SMU_MSG_MAX_COUNT] =
MSG_MAP(ExitBaco, PPSMC_MSG_ExitBaco, 0),
MSG_MAP(SetSoftMinByFreq, PPSMC_MSG_SetSoftMinByFreq, 1),
MSG_MAP(SetSoftMaxByFreq, PPSMC_MSG_SetSoftMaxByFreq, 1),
- MSG_MAP(SetHardMinByFreq, PPSMC_MSG_SetHardMinByFreq, 1),
+ MSG_MAP(SetHardMinByFreq, PPSMC_MSG_SetHardMinByFreq, 0),
MSG_MAP(SetHardMaxByFreq, PPSMC_MSG_SetHardMaxByFreq, 0),
MSG_MAP(GetMinDpmFreq, PPSMC_MSG_GetMinDpmFreq, 1),
MSG_MAP(GetMaxDpmFreq, PPSMC_MSG_GetMaxDpmFreq, 1),
@@ -149,7 +149,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_0_message_map[SMU_MSG_MAX_COUNT] =
MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn, 0),
MSG_MAP(PowerUpJpeg, PPSMC_MSG_PowerUpJpeg, 0),
MSG_MAP(PowerDownJpeg, PPSMC_MSG_PowerDownJpeg, 0),
- MSG_MAP(GetDcModeMaxDpmFreq, PPSMC_MSG_GetDcModeMaxDpmFreq, 1),
+ MSG_MAP(GetDcModeMaxDpmFreq, PPSMC_MSG_GetDcModeMaxDpmFreq, 0),
MSG_MAP(OverridePcieParameters, PPSMC_MSG_OverridePcieParameters, 0),
MSG_MAP(DramLogSetDramAddrHigh, PPSMC_MSG_DramLogSetDramAddrHigh, 0),
MSG_MAP(DramLogSetDramAddrLow, PPSMC_MSG_DramLogSetDramAddrLow, 0),
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c
new file mode 100644
index 000000000000..86852e738837
--- /dev/null
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c
@@ -0,0 +1,101 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#define SWSMU_CODE_LAYER_L2
+
+#include <linux/firmware.h>
+#include "amdgpu.h"
+#include "amdgpu_smu.h"
+#include "smu_v13_0_12_pmfw.h"
+#include "smu_v13_0_6_ppt.h"
+#include "smu_v13_0.h"
+#include "amdgpu_xgmi.h"
+#include <linux/pci.h>
+#include "smu_cmn.h"
+
+#undef MP1_Public
+#undef smnMP1_FIRMWARE_FLAGS
+
+/*
+ * DO NOT use these for err/warn/info/debug messages.
+ * Use dev_err, dev_warn, dev_info and dev_dbg instead.
+ * They are more MGPU friendly.
+ */
+#undef pr_err
+#undef pr_warn
+#undef pr_info
+#undef pr_debug
+
+#define SMU_13_0_12_FEA_MAP(smu_feature, smu_13_0_12_feature) \
+ [smu_feature] = { 1, (smu_13_0_12_feature) }
+
+#define FEATURE_MASK(feature) (1ULL << feature)
+#define SMC_DPM_FEATURE \
+ (FEATURE_MASK(FEATURE_DATA_CALCULATION) | \
+ FEATURE_MASK(FEATURE_DPM_GFXCLK) | FEATURE_MASK(FEATURE_DPM_FCLK))
+
+const struct cmn2asic_mapping smu_v13_0_12_feature_mask_map[SMU_FEATURE_COUNT] = {
+ SMU_13_0_12_FEA_MAP(SMU_FEATURE_DATA_CALCULATIONS_BIT, FEATURE_DATA_CALCULATION),
+ SMU_13_0_12_FEA_MAP(SMU_FEATURE_DPM_GFXCLK_BIT, FEATURE_DPM_GFXCLK),
+ SMU_13_0_12_FEA_MAP(SMU_FEATURE_DPM_FCLK_BIT, FEATURE_DPM_FCLK),
+ SMU_13_0_12_FEA_MAP(SMU_FEATURE_DS_GFXCLK_BIT, FEATURE_DS_GFXCLK),
+ SMU_13_0_12_FEA_MAP(SMU_FEATURE_DS_SOCCLK_BIT, FEATURE_DS_SOCCLK),
+ SMU_13_0_12_FEA_MAP(SMU_FEATURE_DS_LCLK_BIT, FEATURE_DS_LCLK),
+ SMU_13_0_12_FEA_MAP(SMU_FEATURE_DS_FCLK_BIT, FEATURE_DS_FCLK),
+ SMU_13_0_12_FEA_MAP(SMU_FEATURE_PPT_BIT, FEATURE_PPT),
+ SMU_13_0_12_FEA_MAP(SMU_FEATURE_TDC_BIT, FEATURE_TDC),
+ SMU_13_0_12_FEA_MAP(SMU_FEATURE_APCC_DFLL_BIT, FEATURE_APCC_DFLL),
+ SMU_13_0_12_FEA_MAP(SMU_FEATURE_MP1_CG_BIT, FEATURE_SMU_CG),
+ SMU_13_0_12_FEA_MAP(SMU_FEATURE_FW_CTF_BIT, FEATURE_FW_CTF),
+ SMU_13_0_12_FEA_MAP(SMU_FEATURE_THERMAL_BIT, FEATURE_THERMAL),
+ SMU_13_0_12_FEA_MAP(SMU_FEATURE_SOC_PCC_BIT, FEATURE_SOC_PCC),
+ SMU_13_0_12_FEA_MAP(SMU_FEATURE_XGMI_PER_LINK_PWR_DWN_BIT, FEATURE_XGMI_PER_LINK_PWR_DOWN),
+};
+
+static int smu_v13_0_12_get_enabled_mask(struct smu_context *smu,
+ uint64_t *feature_mask)
+{
+ int ret;
+
+ ret = smu_cmn_get_enabled_mask(smu, feature_mask);
+
+ if (ret == -EIO) {
+ *feature_mask = 0;
+ ret = 0;
+ }
+
+ return ret;
+}
+
+bool smu_v13_0_12_is_dpm_running(struct smu_context *smu)
+{
+ int ret;
+ uint64_t feature_enabled;
+
+ ret = smu_v13_0_12_get_enabled_mask(smu, &feature_enabled);
+
+ if (ret)
+ return false;
+
+ return !!(feature_enabled & SMC_DPM_FEATURE);
+}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
index da7bd9227afe..9f2de69f53b2 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
@@ -105,7 +105,6 @@ MODULE_FIRMWARE("amdgpu/smu_13_0_14.bin");
enum smu_v13_0_6_caps {
SMU_CAP(DPM),
- SMU_CAP(UNI_METRICS),
SMU_CAP(DPM_POLICY),
SMU_CAP(OTHER_END_METRICS),
SMU_CAP(SET_UCLK_MAX),
@@ -193,7 +192,7 @@ static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COU
MSG_MAP(McaBankCeDumpDW, PPSMC_MSG_McaBankCeDumpDW, SMU_MSG_RAS_PRI),
MSG_MAP(SelectPLPDMode, PPSMC_MSG_SelectPLPDMode, 0),
MSG_MAP(RmaDueToBadPageThreshold, PPSMC_MSG_RmaDueToBadPageThreshold, 0),
- MSG_MAP(SelectPstatePolicy, PPSMC_MSG_SelectPstatePolicy, 0),
+ MSG_MAP(SetThrottlingPolicy, PPSMC_MSG_SetThrottlingPolicy, 0),
MSG_MAP(ResetSDMA, PPSMC_MSG_ResetSDMA, 0),
};
@@ -272,8 +271,13 @@ struct PPTable_t {
#define SMUQ10_TO_UINT(x) ((x) >> 10)
#define SMUQ10_FRAC(x) ((x) & 0x3ff)
#define SMUQ10_ROUND(x) ((SMUQ10_TO_UINT(x)) + ((SMUQ10_FRAC(x)) >= 0x200))
-#define GET_METRIC_FIELD(field, flag) ((flag) ?\
- (metrics_a->field) : (metrics_x->field))
+#define GET_GPU_METRIC_FIELD(field, version) ((version == METRICS_VERSION_V0) ?\
+ (metrics_v0->field) : (metrics_v2->field))
+#define GET_METRIC_FIELD(field, version) ((version == METRICS_VERSION_V1) ?\
+ (metrics_v1->field) : GET_GPU_METRIC_FIELD(field, version))
+#define METRICS_TABLE_SIZE (max3(sizeof(MetricsTableV0_t),\
+ sizeof(MetricsTableV1_t),\
+ sizeof(MetricsTableV2_t)))
struct smu_v13_0_6_dpm_map {
enum smu_clk_type clk_type;
@@ -282,6 +286,18 @@ struct smu_v13_0_6_dpm_map {
uint32_t *freq_table;
};
+static inline int smu_v13_0_6_get_metrics_version(struct smu_context *smu)
+{
+ if ((smu->adev->flags & AMD_IS_APU) &&
+ smu->smc_fw_version <= 0x4556900)
+ return METRICS_VERSION_V1;
+ else if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) ==
+ IP_VERSION(13, 0, 12))
+ return METRICS_VERSION_V2;
+
+ return METRICS_VERSION_V0;
+}
+
static inline void smu_v13_0_6_cap_set(struct smu_context *smu,
enum smu_v13_0_6_caps cap)
{
@@ -309,7 +325,6 @@ static inline bool smu_v13_0_6_cap_supported(struct smu_context *smu,
static void smu_v13_0_14_init_caps(struct smu_context *smu)
{
enum smu_v13_0_6_caps default_cap_list[] = { SMU_CAP(DPM),
- SMU_CAP(UNI_METRICS),
SMU_CAP(SET_UCLK_MAX),
SMU_CAP(DPM_POLICY),
SMU_CAP(PCIE_METRICS),
@@ -335,12 +350,14 @@ static void smu_v13_0_14_init_caps(struct smu_context *smu)
static void smu_v13_0_12_init_caps(struct smu_context *smu)
{
enum smu_v13_0_6_caps default_cap_list[] = { SMU_CAP(DPM),
- SMU_CAP(UNI_METRICS),
SMU_CAP(PCIE_METRICS),
SMU_CAP(CTF_LIMIT),
SMU_CAP(MCA_DEBUG_MODE),
SMU_CAP(RMA_MSG),
- SMU_CAP(ACA_SYND) };
+ SMU_CAP(ACA_SYND),
+ SMU_CAP(OTHER_END_METRICS),
+ SMU_CAP(HST_LIMIT_METRICS),
+ SMU_CAP(PER_INST_METRICS) };
uint32_t fw_ver = smu->smc_fw_version;
for (int i = 0; i < ARRAY_SIZE(default_cap_list); i++)
@@ -356,7 +373,6 @@ static void smu_v13_0_12_init_caps(struct smu_context *smu)
static void smu_v13_0_6_init_caps(struct smu_context *smu)
{
enum smu_v13_0_6_caps default_cap_list[] = { SMU_CAP(DPM),
- SMU_CAP(UNI_METRICS),
SMU_CAP(SET_UCLK_MAX),
SMU_CAP(DPM_POLICY),
SMU_CAP(PCIE_METRICS),
@@ -382,8 +398,6 @@ static void smu_v13_0_6_init_caps(struct smu_context *smu)
smu_v13_0_6_cap_clear(smu, SMU_CAP(RMA_MSG));
smu_v13_0_6_cap_clear(smu, SMU_CAP(ACA_SYND));
- if (fw_ver <= 0x4556900)
- smu_v13_0_6_cap_clear(smu, SMU_CAP(UNI_METRICS));
if (fw_ver >= 0x04556F00)
smu_v13_0_6_cap_set(smu, SMU_CAP(HST_LIMIT_METRICS));
if (fw_ver >= 0x04556A00)
@@ -450,8 +464,9 @@ static int smu_v13_0_6_init_microcode(struct smu_context *smu)
int var = (adev->pdev->device & 0xF);
char ucode_prefix[15];
- /* No need to load P2S tables in IOV mode */
- if (amdgpu_sriov_vf(adev))
+ /* No need to load P2S tables in IOV mode or for smu v13.0.12 */
+ if (amdgpu_sriov_vf(adev) ||
+ (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12)))
return 0;
if (!(adev->flags & AMD_IS_APU)) {
@@ -514,7 +529,7 @@ static int smu_v13_0_6_tables_init(struct smu_context *smu)
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS,
- max(sizeof(MetricsTableX_t), sizeof(MetricsTableA_t)),
+ METRICS_TABLE_SIZE,
PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT);
@@ -522,8 +537,7 @@ static int smu_v13_0_6_tables_init(struct smu_context *smu)
PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT);
- smu_table->metrics_table = kzalloc(max(sizeof(MetricsTableX_t),
- sizeof(MetricsTableA_t)), GFP_KERNEL);
+ smu_table->metrics_table = kzalloc(METRICS_TABLE_SIZE, GFP_KERNEL);
if (!smu_table->metrics_table)
return -ENOMEM;
smu_table->metrics_time = 0;
@@ -570,7 +584,7 @@ static int smu_v13_0_6_select_policy_soc_pstate(struct smu_context *smu,
return -EINVAL;
}
- ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SelectPstatePolicy,
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetThrottlingPolicy,
param, NULL);
if (ret)
@@ -740,10 +754,8 @@ static ssize_t smu_v13_0_6_get_pm_metrics(struct smu_context *smu,
memset(&pm_metrics->common_header, 0,
sizeof(pm_metrics->common_header));
- if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 6))
- pm_metrics->common_header.mp1_ip_discovery_version = IP_VERSION(13, 0, 6);
- if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 14))
- pm_metrics->common_header.mp1_ip_discovery_version = IP_VERSION(13, 0, 14);
+ pm_metrics->common_header.mp1_ip_discovery_version =
+ amdgpu_ip_version(smu->adev, MP1_HWIP, 0);
pm_metrics->common_header.pmfw_version = pmfw_version;
pm_metrics->common_header.pmmetrics_version = table_version;
pm_metrics->common_header.structure_size =
@@ -755,11 +767,12 @@ static ssize_t smu_v13_0_6_get_pm_metrics(struct smu_context *smu,
static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
- MetricsTableX_t *metrics_x = (MetricsTableX_t *)smu_table->metrics_table;
- MetricsTableA_t *metrics_a = (MetricsTableA_t *)smu_table->metrics_table;
+ MetricsTableV0_t *metrics_v0 = (MetricsTableV0_t *)smu_table->metrics_table;
+ MetricsTableV1_t *metrics_v1 = (MetricsTableV1_t *)smu_table->metrics_table;
+ MetricsTableV2_t *metrics_v2 = (MetricsTableV2_t *)smu_table->metrics_table;
struct PPTable_t *pptable =
(struct PPTable_t *)smu_table->driver_pptable;
- bool flag = !smu_v13_0_6_cap_supported(smu, SMU_CAP(UNI_METRICS));
+ int version = smu_v13_0_6_get_metrics_version(smu);
int ret, i, retry = 100;
uint32_t table_version;
@@ -771,7 +784,7 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu)
return ret;
/* Ensure that metrics have been updated */
- if (GET_METRIC_FIELD(AccumulationCounter, flag))
+ if (GET_METRIC_FIELD(AccumulationCounter, version))
break;
usleep_range(1000, 1100);
@@ -788,29 +801,30 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu)
table_version;
pptable->MaxSocketPowerLimit =
- SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketPowerLimit, flag));
+ SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketPowerLimit, version));
pptable->MaxGfxclkFrequency =
- SMUQ10_ROUND(GET_METRIC_FIELD(MaxGfxclkFrequency, flag));
+ SMUQ10_ROUND(GET_METRIC_FIELD(MaxGfxclkFrequency, version));
pptable->MinGfxclkFrequency =
- SMUQ10_ROUND(GET_METRIC_FIELD(MinGfxclkFrequency, flag));
+ SMUQ10_ROUND(GET_METRIC_FIELD(MinGfxclkFrequency, version));
for (i = 0; i < 4; ++i) {
pptable->FclkFrequencyTable[i] =
- SMUQ10_ROUND(GET_METRIC_FIELD(FclkFrequencyTable, flag)[i]);
+ SMUQ10_ROUND(GET_METRIC_FIELD(FclkFrequencyTable, version)[i]);
pptable->UclkFrequencyTable[i] =
- SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequencyTable, flag)[i]);
+ SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequencyTable, version)[i]);
pptable->SocclkFrequencyTable[i] = SMUQ10_ROUND(
- GET_METRIC_FIELD(SocclkFrequencyTable, flag)[i]);
+ GET_METRIC_FIELD(SocclkFrequencyTable, version)[i]);
pptable->VclkFrequencyTable[i] =
- SMUQ10_ROUND(GET_METRIC_FIELD(VclkFrequencyTable, flag)[i]);
+ SMUQ10_ROUND(GET_METRIC_FIELD(VclkFrequencyTable, version)[i]);
pptable->DclkFrequencyTable[i] =
- SMUQ10_ROUND(GET_METRIC_FIELD(DclkFrequencyTable, flag)[i]);
+ SMUQ10_ROUND(GET_METRIC_FIELD(DclkFrequencyTable, version)[i]);
pptable->LclkFrequencyTable[i] =
- SMUQ10_ROUND(GET_METRIC_FIELD(LclkFrequencyTable, flag)[i]);
+ SMUQ10_ROUND(GET_METRIC_FIELD(LclkFrequencyTable, version)[i]);
}
/* use AID0 serial number by default */
- pptable->PublicSerialNumber_AID = GET_METRIC_FIELD(PublicSerialNumber_AID, flag)[0];
+ pptable->PublicSerialNumber_AID =
+ GET_METRIC_FIELD(PublicSerialNumber_AID, version)[0];
pptable->Init = true;
}
@@ -1130,9 +1144,10 @@ static int smu_v13_0_6_get_smu_metrics_data(struct smu_context *smu,
uint32_t *value)
{
struct smu_table_context *smu_table = &smu->smu_table;
- MetricsTableX_t *metrics_x = (MetricsTableX_t *)smu_table->metrics_table;
- MetricsTableA_t *metrics_a = (MetricsTableA_t *)smu_table->metrics_table;
- bool flag = !smu_v13_0_6_cap_supported(smu, SMU_CAP(UNI_METRICS));
+ MetricsTableV0_t *metrics_v0 = (MetricsTableV0_t *)smu_table->metrics_table;
+ MetricsTableV1_t *metrics_v1 = (MetricsTableV1_t *)smu_table->metrics_table;
+ MetricsTableV2_t *metrics_v2 = (MetricsTableV2_t *)smu_table->metrics_table;
+ int version = smu_v13_0_6_get_metrics_version(smu);
struct amdgpu_device *adev = smu->adev;
int ret = 0;
int xcc_id;
@@ -1147,50 +1162,50 @@ static int smu_v13_0_6_get_smu_metrics_data(struct smu_context *smu,
case METRICS_AVERAGE_GFXCLK:
if (smu_v13_0_6_cap_supported(smu, SMU_CAP(DPM))) {
xcc_id = GET_INST(GC, 0);
- *value = SMUQ10_ROUND(GET_METRIC_FIELD(GfxclkFrequency, flag)[xcc_id]);
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(GfxclkFrequency, version)[xcc_id]);
} else {
*value = 0;
}
break;
case METRICS_CURR_SOCCLK:
case METRICS_AVERAGE_SOCCLK:
- *value = SMUQ10_ROUND(GET_METRIC_FIELD(SocclkFrequency, flag)[0]);
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(SocclkFrequency, version)[0]);
break;
case METRICS_CURR_UCLK:
case METRICS_AVERAGE_UCLK:
- *value = SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequency, flag));
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequency, version));
break;
case METRICS_CURR_VCLK:
- *value = SMUQ10_ROUND(GET_METRIC_FIELD(VclkFrequency, flag)[0]);
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(VclkFrequency, version)[0]);
break;
case METRICS_CURR_DCLK:
- *value = SMUQ10_ROUND(GET_METRIC_FIELD(DclkFrequency, flag)[0]);
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(DclkFrequency, version)[0]);
break;
case METRICS_CURR_FCLK:
- *value = SMUQ10_ROUND(GET_METRIC_FIELD(FclkFrequency, flag));
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(FclkFrequency, version));
break;
case METRICS_AVERAGE_GFXACTIVITY:
- *value = SMUQ10_ROUND(GET_METRIC_FIELD(SocketGfxBusy, flag));
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(SocketGfxBusy, version));
break;
case METRICS_AVERAGE_MEMACTIVITY:
- *value = SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilization, flag));
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilization, version));
break;
case METRICS_CURR_SOCKETPOWER:
- *value = SMUQ10_ROUND(GET_METRIC_FIELD(SocketPower, flag)) << 8;
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(SocketPower, version)) << 8;
break;
case METRICS_TEMPERATURE_HOTSPOT:
- *value = SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketTemperature, flag)) *
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketTemperature, version)) *
SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
break;
case METRICS_TEMPERATURE_MEM:
- *value = SMUQ10_ROUND(GET_METRIC_FIELD(MaxHbmTemperature, flag)) *
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(MaxHbmTemperature, version)) *
SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
break;
/* This is the max of all VRs and not just SOC VR.
* No need to define another data type for the same.
*/
case METRICS_TEMPERATURE_VRSOC:
- *value = SMUQ10_ROUND(GET_METRIC_FIELD(MaxVrTemperature, flag)) *
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(MaxVrTemperature, version)) *
SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
break;
default:
@@ -1354,6 +1369,9 @@ static int smu_v13_0_6_print_clk_levels(struct smu_context *smu,
break;
case SMU_OD_MCLK:
+ if (!smu_v13_0_6_cap_supported(smu, SMU_CAP(SET_UCLK_MAX)))
+ return 0;
+
size += sysfs_emit_at(buf, size, "%s:\n", "OD_MCLK");
size += sysfs_emit_at(buf, size, "0: %uMhz\n1: %uMhz\n",
pstate_table->uclk_pstate.curr.min,
@@ -2195,6 +2213,9 @@ static bool smu_v13_0_6_is_dpm_running(struct smu_context *smu)
int ret;
uint64_t feature_enabled;
+ if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12))
+ return smu_v13_0_12_is_dpm_running(smu);
+
ret = smu_v13_0_6_get_enabled_mask(smu, &feature_enabled);
if (ret)
@@ -2478,82 +2499,89 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
struct smu_table_context *smu_table = &smu->smu_table;
struct gpu_metrics_v1_7 *gpu_metrics =
(struct gpu_metrics_v1_7 *)smu_table->gpu_metrics_table;
- bool flag = !smu_v13_0_6_cap_supported(smu, SMU_CAP(UNI_METRICS));
+ int version = smu_v13_0_6_get_metrics_version(smu);
int ret = 0, xcc_id, inst, i, j, k, idx;
struct amdgpu_device *adev = smu->adev;
- MetricsTableX_t *metrics_x;
- MetricsTableA_t *metrics_a;
+ MetricsTableV0_t *metrics_v0;
+ MetricsTableV1_t *metrics_v1;
+ MetricsTableV2_t *metrics_v2;
struct amdgpu_xcp *xcp;
u16 link_width_level;
+ u8 num_jpeg_rings;
u32 inst_mask;
bool per_inst;
- metrics_x = kzalloc(max(sizeof(MetricsTableX_t), sizeof(MetricsTableA_t)), GFP_KERNEL);
- ret = smu_v13_0_6_get_metrics_table(smu, metrics_x, true);
+ metrics_v0 = kzalloc(METRICS_TABLE_SIZE, GFP_KERNEL);
+ ret = smu_v13_0_6_get_metrics_table(smu, metrics_v0, true);
if (ret) {
- kfree(metrics_x);
+ kfree(metrics_v0);
return ret;
}
- metrics_a = (MetricsTableA_t *)metrics_x;
+ metrics_v1 = (MetricsTableV1_t *)metrics_v0;
+ metrics_v2 = (MetricsTableV2_t *)metrics_v0;
smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 7);
gpu_metrics->temperature_hotspot =
- SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketTemperature, flag));
+ SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketTemperature, version));
/* Individual HBM stack temperature is not reported */
gpu_metrics->temperature_mem =
- SMUQ10_ROUND(GET_METRIC_FIELD(MaxHbmTemperature, flag));
+ SMUQ10_ROUND(GET_METRIC_FIELD(MaxHbmTemperature, version));
/* Reports max temperature of all voltage rails */
gpu_metrics->temperature_vrsoc =
- SMUQ10_ROUND(GET_METRIC_FIELD(MaxVrTemperature, flag));
+ SMUQ10_ROUND(GET_METRIC_FIELD(MaxVrTemperature, version));
gpu_metrics->average_gfx_activity =
- SMUQ10_ROUND(GET_METRIC_FIELD(SocketGfxBusy, flag));
+ SMUQ10_ROUND(GET_METRIC_FIELD(SocketGfxBusy, version));
gpu_metrics->average_umc_activity =
- SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilization, flag));
+ SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilization, version));
gpu_metrics->mem_max_bandwidth =
- SMUQ10_ROUND(GET_METRIC_FIELD(MaxDramBandwidth, flag));
+ SMUQ10_ROUND(GET_METRIC_FIELD(MaxDramBandwidth, version));
gpu_metrics->curr_socket_power =
- SMUQ10_ROUND(GET_METRIC_FIELD(SocketPower, flag));
+ SMUQ10_ROUND(GET_METRIC_FIELD(SocketPower, version));
/* Energy counter reported in 15.259uJ (2^-16) units */
- gpu_metrics->energy_accumulator = GET_METRIC_FIELD(SocketEnergyAcc, flag);
+ gpu_metrics->energy_accumulator = GET_METRIC_FIELD(SocketEnergyAcc, version);
for (i = 0; i < MAX_GFX_CLKS; i++) {
xcc_id = GET_INST(GC, i);
if (xcc_id >= 0)
gpu_metrics->current_gfxclk[i] =
- SMUQ10_ROUND(GET_METRIC_FIELD(GfxclkFrequency, flag)[xcc_id]);
+ SMUQ10_ROUND(GET_METRIC_FIELD(GfxclkFrequency, version)[xcc_id]);
if (i < MAX_CLKS) {
gpu_metrics->current_socclk[i] =
- SMUQ10_ROUND(GET_METRIC_FIELD(SocclkFrequency, flag)[i]);
+ SMUQ10_ROUND(GET_METRIC_FIELD(SocclkFrequency, version)[i]);
inst = GET_INST(VCN, i);
if (inst >= 0) {
gpu_metrics->current_vclk0[i] =
- SMUQ10_ROUND(GET_METRIC_FIELD(VclkFrequency, flag)[inst]);
+ SMUQ10_ROUND(GET_METRIC_FIELD(VclkFrequency,
+ version)[inst]);
gpu_metrics->current_dclk0[i] =
- SMUQ10_ROUND(GET_METRIC_FIELD(DclkFrequency, flag)[inst]);
+ SMUQ10_ROUND(GET_METRIC_FIELD(DclkFrequency,
+ version)[inst]);
}
}
}
- gpu_metrics->current_uclk = SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequency, flag));
+ gpu_metrics->current_uclk = SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequency, version));
/* Total accumulated cycle counter */
- gpu_metrics->accumulation_counter = GET_METRIC_FIELD(AccumulationCounter, flag);
+ gpu_metrics->accumulation_counter = GET_METRIC_FIELD(AccumulationCounter, version);
/* Accumulated throttler residencies */
- gpu_metrics->prochot_residency_acc = GET_METRIC_FIELD(ProchotResidencyAcc, flag);
- gpu_metrics->ppt_residency_acc = GET_METRIC_FIELD(PptResidencyAcc, flag);
- gpu_metrics->socket_thm_residency_acc = GET_METRIC_FIELD(SocketThmResidencyAcc, flag);
- gpu_metrics->vr_thm_residency_acc = GET_METRIC_FIELD(VrThmResidencyAcc, flag);
- gpu_metrics->hbm_thm_residency_acc = GET_METRIC_FIELD(HbmThmResidencyAcc, flag);
+ gpu_metrics->prochot_residency_acc = GET_METRIC_FIELD(ProchotResidencyAcc, version);
+ gpu_metrics->ppt_residency_acc = GET_METRIC_FIELD(PptResidencyAcc, version);
+ gpu_metrics->socket_thm_residency_acc = GET_METRIC_FIELD(SocketThmResidencyAcc, version);
+ gpu_metrics->vr_thm_residency_acc = GET_METRIC_FIELD(VrThmResidencyAcc, version);
+ gpu_metrics->hbm_thm_residency_acc =
+ GET_METRIC_FIELD(HbmThmResidencyAcc, version);
/* Clock Lock Status. Each bit corresponds to each GFXCLK instance */
- gpu_metrics->gfxclk_lock_status = GET_METRIC_FIELD(GfxLockXCDMak, flag) >> GET_INST(GC, 0);
+ gpu_metrics->gfxclk_lock_status = GET_METRIC_FIELD(GfxLockXCDMak,
+ version) >> GET_INST(GC, 0);
if (!(adev->flags & AMD_IS_APU)) {
/*Check smu version, PCIE link speed and width will be reported from pmfw metric
@@ -2561,9 +2589,9 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
* for pf from registers
*/
if (smu_v13_0_6_cap_supported(smu, SMU_CAP(PCIE_METRICS))) {
- gpu_metrics->pcie_link_width = metrics_x->PCIeLinkWidth;
+ gpu_metrics->pcie_link_width = GET_GPU_METRIC_FIELD(PCIeLinkWidth, version);
gpu_metrics->pcie_link_speed =
- pcie_gen_to_speed(metrics_x->PCIeLinkSpeed);
+ pcie_gen_to_speed(GET_GPU_METRIC_FIELD(PCIeLinkSpeed, version));
} else if (!amdgpu_sriov_vf(adev)) {
link_width_level = smu_v13_0_6_get_current_pcie_link_width_level(smu);
if (link_width_level > MAX_LINK_WIDTH)
@@ -2576,37 +2604,37 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
}
gpu_metrics->pcie_bandwidth_acc =
- SMUQ10_ROUND(metrics_x->PcieBandwidthAcc[0]);
+ SMUQ10_ROUND(GET_GPU_METRIC_FIELD(PcieBandwidthAcc, version)[0]);
gpu_metrics->pcie_bandwidth_inst =
- SMUQ10_ROUND(metrics_x->PcieBandwidth[0]);
+ SMUQ10_ROUND(GET_GPU_METRIC_FIELD(PcieBandwidth, version)[0]);
gpu_metrics->pcie_l0_to_recov_count_acc =
- metrics_x->PCIeL0ToRecoveryCountAcc;
+ GET_GPU_METRIC_FIELD(PCIeL0ToRecoveryCountAcc, version);
gpu_metrics->pcie_replay_count_acc =
- metrics_x->PCIenReplayAAcc;
+ GET_GPU_METRIC_FIELD(PCIenReplayAAcc, version);
gpu_metrics->pcie_replay_rover_count_acc =
- metrics_x->PCIenReplayARolloverCountAcc;
+ GET_GPU_METRIC_FIELD(PCIenReplayARolloverCountAcc, version);
gpu_metrics->pcie_nak_sent_count_acc =
- metrics_x->PCIeNAKSentCountAcc;
+ GET_GPU_METRIC_FIELD(PCIeNAKSentCountAcc, version);
gpu_metrics->pcie_nak_rcvd_count_acc =
- metrics_x->PCIeNAKReceivedCountAcc;
+ GET_GPU_METRIC_FIELD(PCIeNAKReceivedCountAcc, version);
if (smu_v13_0_6_cap_supported(smu, SMU_CAP(OTHER_END_METRICS)))
gpu_metrics->pcie_lc_perf_other_end_recovery =
- metrics_x->PCIeOtherEndRecoveryAcc;
+ GET_GPU_METRIC_FIELD(PCIeOtherEndRecoveryAcc, version);
}
gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
gpu_metrics->gfx_activity_acc =
- SMUQ10_ROUND(GET_METRIC_FIELD(SocketGfxBusyAcc, flag));
+ SMUQ10_ROUND(GET_METRIC_FIELD(SocketGfxBusyAcc, version));
gpu_metrics->mem_activity_acc =
- SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilizationAcc, flag));
+ SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilizationAcc, version));
for (i = 0; i < NUM_XGMI_LINKS; i++) {
gpu_metrics->xgmi_read_data_acc[i] =
- SMUQ10_ROUND(GET_METRIC_FIELD(XgmiReadDataSizeAcc, flag)[i]);
+ SMUQ10_ROUND(GET_METRIC_FIELD(XgmiReadDataSizeAcc, version)[i]);
gpu_metrics->xgmi_write_data_acc[i] =
- SMUQ10_ROUND(GET_METRIC_FIELD(XgmiWriteDataSizeAcc, flag)[i]);
+ SMUQ10_ROUND(GET_METRIC_FIELD(XgmiWriteDataSizeAcc, version)[i]);
ret = amdgpu_get_xgmi_link_status(adev, i);
if (ret >= 0)
gpu_metrics->xgmi_link_status[i] = ret;
@@ -2616,6 +2644,7 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
per_inst = smu_v13_0_6_cap_supported(smu, SMU_CAP(PER_INST_METRICS));
+ num_jpeg_rings = AMDGPU_MAX_JPEG_RINGS_4_0_3;
for_each_xcp(adev->xcp_mgr, xcp, i) {
amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_VCN, &inst_mask);
idx = 0;
@@ -2623,14 +2652,14 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
/* Both JPEG and VCN has same instances */
inst = GET_INST(VCN, k);
- for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
+ for (j = 0; j < num_jpeg_rings; ++j) {
gpu_metrics->xcp_stats[i].jpeg_busy
- [(idx * adev->jpeg.num_jpeg_rings) + j] =
- SMUQ10_ROUND(GET_METRIC_FIELD(JpegBusy, flag)
- [(inst * adev->jpeg.num_jpeg_rings) + j]);
+ [(idx * num_jpeg_rings) + j] =
+ SMUQ10_ROUND(GET_METRIC_FIELD(JpegBusy, version)
+ [(inst * num_jpeg_rings) + j]);
}
gpu_metrics->xcp_stats[i].vcn_busy[idx] =
- SMUQ10_ROUND(GET_METRIC_FIELD(VcnBusy, flag)[inst]);
+ SMUQ10_ROUND(GET_METRIC_FIELD(VcnBusy, version)[inst]);
idx++;
}
@@ -2641,27 +2670,29 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
for_each_inst(k, inst_mask) {
inst = GET_INST(GC, k);
gpu_metrics->xcp_stats[i].gfx_busy_inst[idx] =
- SMUQ10_ROUND(metrics_x->GfxBusy[inst]);
+ SMUQ10_ROUND(GET_GPU_METRIC_FIELD(GfxBusy, version)[inst]);
gpu_metrics->xcp_stats[i].gfx_busy_acc[idx] =
- SMUQ10_ROUND(metrics_x->GfxBusyAcc[inst]);
+ SMUQ10_ROUND(GET_GPU_METRIC_FIELD(GfxBusyAcc,
+ version)[inst]);
if (smu_v13_0_6_cap_supported(
smu, SMU_CAP(HST_LIMIT_METRICS)))
gpu_metrics->xcp_stats[i].gfx_below_host_limit_acc[idx] =
- SMUQ10_ROUND(metrics_x->GfxclkBelowHostLimitAcc
+ SMUQ10_ROUND(GET_GPU_METRIC_FIELD
+ (GfxclkBelowHostLimitAcc, version)
[inst]);
idx++;
}
}
}
- gpu_metrics->xgmi_link_width = SMUQ10_ROUND(GET_METRIC_FIELD(XgmiWidth, flag));
- gpu_metrics->xgmi_link_speed = SMUQ10_ROUND(GET_METRIC_FIELD(XgmiBitrate, flag));
+ gpu_metrics->xgmi_link_width = SMUQ10_ROUND(GET_METRIC_FIELD(XgmiWidth, version));
+ gpu_metrics->xgmi_link_speed = SMUQ10_ROUND(GET_METRIC_FIELD(XgmiBitrate, version));
- gpu_metrics->firmware_timestamp = GET_METRIC_FIELD(Timestamp, flag);
+ gpu_metrics->firmware_timestamp = GET_METRIC_FIELD(Timestamp, version);
*table = (void *)gpu_metrics;
- kfree(metrics_x);
+ kfree(metrics_v0);
return sizeof(*gpu_metrics);
}
@@ -3566,7 +3597,8 @@ void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu)
smu->ppt_funcs = &smu_v13_0_6_ppt_funcs;
smu->message_map = smu_v13_0_6_message_map;
smu->clock_map = smu_v13_0_6_clk_map;
- smu->feature_map = smu_v13_0_6_feature_mask_map;
+ smu->feature_map = (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12)) ?
+ smu_v13_0_12_feature_mask_map : smu_v13_0_6_feature_mask_map;
smu->table_map = smu_v13_0_6_table_map;
smu->smc_driver_if_version = SMU13_0_6_DRIVER_IF_VERSION;
smu->smc_fw_caps |= SMU_FW_CAP_RAS_PRI;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h
index f0fa42a645c0..717fe669882e 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h
@@ -27,6 +27,14 @@
#define SMU_13_0_6_UMD_PSTATE_SOCCLK_LEVEL 0x4
#define SMU_13_0_6_UMD_PSTATE_MCLK_LEVEL 0x2
+typedef enum {
+/*0*/ METRICS_VERSION_V0 = 0,
+/*1*/ METRICS_VERSION_V1 = 1,
+/*2*/ METRICS_VERSION_V2 = 2,
+
+/*3*/ NUM_METRICS = 3
+} METRICS_LIST_e;
+
extern void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu);
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
index 55ef18517b0f..19f47811f6db 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
@@ -2811,5 +2811,4 @@ void smu_v13_0_7_set_ppt_funcs(struct smu_context *smu)
smu->workload_map = smu_v13_0_7_workload_map;
smu->smc_driver_if_version = SMU13_0_7_DRIVER_IF_VERSION;
smu_v13_0_set_smu_mailbox_registers(smu);
- smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
index 9b2f4fe1578b..adbb6332376e 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
@@ -245,6 +245,7 @@ int smu_v14_0_check_fw_version(struct smu_context *smu)
switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
case IP_VERSION(14, 0, 0):
case IP_VERSION(14, 0, 4):
+ case IP_VERSION(14, 0, 5):
smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_0;
break;
case IP_VERSION(14, 0, 1):
@@ -769,6 +770,7 @@ int smu_v14_0_gfx_off_control(struct smu_context *smu, bool enable)
case IP_VERSION(14, 0, 2):
case IP_VERSION(14, 0, 3):
case IP_VERSION(14, 0, 4):
+ case IP_VERSION(14, 0, 5):
if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
return 0;
if (enable)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
index 9f55207ea9bc..d834d134ad2b 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
@@ -459,8 +459,7 @@ int smu_cmn_send_smc_msg_with_param(struct smu_context *smu,
}
if (read_arg) {
smu_cmn_read_arg(smu, read_arg);
- dev_dbg(adev->dev, "smu send message: %s(%d) param: 0x%08x, resp: 0x%08x,\
- readval: 0x%08x\n",
+ dev_dbg(adev->dev, "smu send message: %s(%d) param: 0x%08x, resp: 0x%08x, readval: 0x%08x\n",
smu_get_message_name(smu, msg), index, param, reg, *read_arg);
} else {
dev_dbg(adev->dev, "smu send message: %s(%d) param: 0x%08x, resp: 0x%08x\n",