summaryrefslogtreecommitdiff
path: root/arch/x86/kernel/cpu/resctrl/pseudo_lock.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/cpu/resctrl/pseudo_lock.c')
-rw-r--r--arch/x86/kernel/cpu/resctrl/pseudo_lock.c41
1 files changed, 24 insertions, 17 deletions
diff --git a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c
index 01fa7890b43f..1190c48a16b2 100644
--- a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c
+++ b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c
@@ -25,6 +25,7 @@
#include <asm/cpu_device_id.h>
#include <asm/resctrl.h>
#include <asm/perf_event.h>
+#include <asm/msr.h>
#include "../../events/perf_event.h" /* For X86_CONFIG() */
#include "internal.h"
@@ -52,7 +53,8 @@ static char *pseudo_lock_devnode(const struct device *dev, umode_t *mode)
rdtgrp = dev_get_drvdata(dev);
if (mode)
*mode = 0600;
- return kasprintf(GFP_KERNEL, "pseudo_lock/%s", rdtgrp->kn->name);
+ guard(mutex)(&rdtgroup_mutex);
+ return kasprintf(GFP_KERNEL, "pseudo_lock/%s", rdt_kn_name(rdtgrp->kn));
}
static const struct class pseudo_lock_class = {
@@ -480,8 +482,8 @@ int resctrl_arch_pseudo_lock_fn(void *_plr)
* the buffer and evict pseudo-locked memory read earlier from the
* cache.
*/
- saved_msr = __rdmsr(MSR_MISC_FEATURE_CONTROL);
- __wrmsr(MSR_MISC_FEATURE_CONTROL, prefetch_disable_bits, 0x0);
+ saved_msr = native_rdmsrq(MSR_MISC_FEATURE_CONTROL);
+ native_wrmsrq(MSR_MISC_FEATURE_CONTROL, prefetch_disable_bits);
closid_p = this_cpu_read(pqr_state.cur_closid);
rmid_p = this_cpu_read(pqr_state.cur_rmid);
mem_r = plr->kmem;
@@ -493,7 +495,7 @@ int resctrl_arch_pseudo_lock_fn(void *_plr)
* pseudo-locked followed by reading of kernel memory to load it
* into the cache.
*/
- __wrmsr(MSR_IA32_PQR_ASSOC, rmid_p, plr->closid);
+ native_wrmsr(MSR_IA32_PQR_ASSOC, rmid_p, plr->closid);
/*
* Cache was flushed earlier. Now access kernel memory to read it
@@ -530,10 +532,10 @@ int resctrl_arch_pseudo_lock_fn(void *_plr)
* Critical section end: restore closid with capacity bitmask that
* does not overlap with pseudo-locked region.
*/
- __wrmsr(MSR_IA32_PQR_ASSOC, rmid_p, closid_p);
+ native_wrmsr(MSR_IA32_PQR_ASSOC, rmid_p, closid_p);
/* Re-enable the hardware prefetcher(s) */
- wrmsrl(MSR_MISC_FEATURE_CONTROL, saved_msr);
+ wrmsrq(MSR_MISC_FEATURE_CONTROL, saved_msr);
local_irq_enable();
plr->thread_done = 1;
@@ -903,7 +905,7 @@ int resctrl_arch_measure_cycles_lat_fn(void *_plr)
* Disable hardware prefetchers.
*/
rdmsr(MSR_MISC_FEATURE_CONTROL, saved_low, saved_high);
- wrmsr(MSR_MISC_FEATURE_CONTROL, prefetch_disable_bits, 0x0);
+ wrmsrq(MSR_MISC_FEATURE_CONTROL, prefetch_disable_bits);
mem_r = READ_ONCE(plr->kmem);
/*
* Dummy execute of the time measurement to load the needed
@@ -999,7 +1001,7 @@ static int measure_residency_fn(struct perf_event_attr *miss_attr,
* Disable hardware prefetchers.
*/
rdmsr(MSR_MISC_FEATURE_CONTROL, saved_low, saved_high);
- wrmsr(MSR_MISC_FEATURE_CONTROL, prefetch_disable_bits, 0x0);
+ wrmsrq(MSR_MISC_FEATURE_CONTROL, prefetch_disable_bits);
/* Initialize rest of local variables */
/*
@@ -1017,8 +1019,8 @@ static int measure_residency_fn(struct perf_event_attr *miss_attr,
* used in L1 cache, second to capture accurate value that does not
* include cache misses incurred because of instruction loads.
*/
- rdpmcl(hit_pmcnum, hits_before);
- rdpmcl(miss_pmcnum, miss_before);
+ hits_before = rdpmc(hit_pmcnum);
+ miss_before = rdpmc(miss_pmcnum);
/*
* From SDM: Performing back-to-back fast reads are not guaranteed
* to be monotonic.
@@ -1026,8 +1028,8 @@ static int measure_residency_fn(struct perf_event_attr *miss_attr,
* before proceeding.
*/
rmb();
- rdpmcl(hit_pmcnum, hits_before);
- rdpmcl(miss_pmcnum, miss_before);
+ hits_before = rdpmc(hit_pmcnum);
+ miss_before = rdpmc(miss_pmcnum);
/*
* Use LFENCE to ensure all previous instructions are retired
* before proceeding.
@@ -1049,8 +1051,8 @@ static int measure_residency_fn(struct perf_event_attr *miss_attr,
* before proceeding.
*/
rmb();
- rdpmcl(hit_pmcnum, hits_after);
- rdpmcl(miss_pmcnum, miss_after);
+ hits_after = rdpmc(hit_pmcnum);
+ miss_after = rdpmc(miss_pmcnum);
/*
* Use LFENCE to ensure all previous instructions are retired
* before proceeding.
@@ -1298,6 +1300,7 @@ int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp)
struct task_struct *thread;
unsigned int new_minor;
struct device *dev;
+ char *kn_name __free(kfree) = NULL;
int ret;
ret = pseudo_lock_region_alloc(plr);
@@ -1309,6 +1312,11 @@ int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp)
ret = -EINVAL;
goto out_region;
}
+ kn_name = kstrdup(rdt_kn_name(rdtgrp->kn), GFP_KERNEL);
+ if (!kn_name) {
+ ret = -ENOMEM;
+ goto out_cstates;
+ }
plr->thread_done = 0;
@@ -1353,8 +1361,7 @@ int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp)
mutex_unlock(&rdtgroup_mutex);
if (!IS_ERR_OR_NULL(debugfs_resctrl)) {
- plr->debugfs_dir = debugfs_create_dir(rdtgrp->kn->name,
- debugfs_resctrl);
+ plr->debugfs_dir = debugfs_create_dir(kn_name, debugfs_resctrl);
if (!IS_ERR_OR_NULL(plr->debugfs_dir))
debugfs_create_file("pseudo_lock_measure", 0200,
plr->debugfs_dir, rdtgrp,
@@ -1363,7 +1370,7 @@ int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp)
dev = device_create(&pseudo_lock_class, NULL,
MKDEV(pseudo_lock_major, new_minor),
- rdtgrp, "%s", rdtgrp->kn->name);
+ rdtgrp, "%s", kn_name);
mutex_lock(&rdtgroup_mutex);