diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2025-12-02 11:55:58 -0800 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2025-12-02 11:55:58 -0800 |
| commit | 2ae20d651091c71ef182d28cbf10ce6f8be79c99 (patch) | |
| tree | 29099e23cd97968f4b3a2c144b4e5f20a93924dd /fs/resctrl/rdtgroup.c | |
| parent | 2a47c26e55a2bc085a2349ed1d4e065ee298155f (diff) | |
| parent | ac7de456a37f9b126eb53b89c2bb27d625dc5fd9 (diff) | |
Merge tag 'x86_cache_for_v6.19_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 resource control updates from Borislav Petkov:
- Add support for AMD's Smart Data Cache Injection feature which allows
for direct insertion of data from I/O devices into the L3 cache, thus
bypassing DRAM and saving its bandwidth; the resctrl side of the
feature allows the size of the L3 used for data injection to be
controlled
- Add Intel Clearwater Forest to the list of CPUs which support
Sub-NUMA clustering
- Other fixes and cleanups
* tag 'x86_cache_for_v6.19_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
fs/resctrl: Update bit_usage to reflect io_alloc
fs/resctrl: Introduce interface to modify io_alloc capacity bitmasks
fs/resctrl: Modify struct rdt_parse_data to pass mode and CLOSID
fs/resctrl: Introduce interface to display io_alloc CBMs
fs/resctrl: Add user interface to enable/disable io_alloc feature
fs/resctrl: Introduce interface to display "io_alloc" support
x86,fs/resctrl: Implement "io_alloc" enable/disable handlers
x86,fs/resctrl: Detect io_alloc feature
x86/resctrl: Add SDCIAE feature in the command line options
x86/cpufeatures: Add support for L3 Smart Data Cache Injection Allocation Enforcement
fs/resctrl: Consider sparse masks when initializing new group's allocation
x86/resctrl: Support Sub-NUMA Cluster (SNC) mode on Clearwater Forest
Diffstat (limited to 'fs/resctrl/rdtgroup.c')
| -rw-r--r-- | fs/resctrl/rdtgroup.c | 82 |
1 files changed, 75 insertions, 7 deletions
diff --git a/fs/resctrl/rdtgroup.c b/fs/resctrl/rdtgroup.c index 0320360cd7a6..8e39dfda56bc 100644 --- a/fs/resctrl/rdtgroup.c +++ b/fs/resctrl/rdtgroup.c @@ -226,6 +226,11 @@ bool closid_allocated(unsigned int closid) return !test_bit(closid, closid_free_map); } +bool closid_alloc_fixed(u32 closid) +{ + return __test_and_clear_bit(closid, closid_free_map); +} + /** * rdtgroup_mode_by_closid - Return mode of resource group with closid * @closid: closid if the resource group @@ -1057,15 +1062,17 @@ static int rdt_bit_usage_show(struct kernfs_open_file *of, cpus_read_lock(); mutex_lock(&rdtgroup_mutex); - hw_shareable = r->cache.shareable_bits; list_for_each_entry(dom, &r->ctrl_domains, hdr.list) { if (sep) seq_putc(seq, ';'); + hw_shareable = r->cache.shareable_bits; sw_shareable = 0; exclusive = 0; seq_printf(seq, "%d=", dom->hdr.id); for (i = 0; i < closids_supported(); i++) { - if (!closid_allocated(i)) + if (!closid_allocated(i) || + (resctrl_arch_get_io_alloc_enabled(r) && + i == resctrl_io_alloc_closid(r))) continue; ctrl_val = resctrl_arch_get_config(r, dom, i, s->conf_type); @@ -1093,6 +1100,21 @@ static int rdt_bit_usage_show(struct kernfs_open_file *of, break; } } + + /* + * When the "io_alloc" feature is enabled, a portion of the cache + * is configured for shared use between hardware and software. + * Also, when CDP is enabled the CBMs of CDP_CODE and CDP_DATA + * resources are kept in sync. So, the CBMs for "io_alloc" can + * be accessed through either resource. + */ + if (resctrl_arch_get_io_alloc_enabled(r)) { + ctrl_val = resctrl_arch_get_config(r, dom, + resctrl_io_alloc_closid(r), + s->conf_type); + hw_shareable |= ctrl_val; + } + for (i = r->cache.cbm_len - 1; i >= 0; i--) { pseudo_locked = dom->plr ? dom->plr->cbm : 0; hwb = test_bit(i, &hw_shareable); @@ -1247,7 +1269,7 @@ static int rdtgroup_mode_show(struct kernfs_open_file *of, return 0; } -static enum resctrl_conf_type resctrl_peer_type(enum resctrl_conf_type my_type) +enum resctrl_conf_type resctrl_peer_type(enum resctrl_conf_type my_type) { switch (my_type) { case CDP_CODE: @@ -1838,6 +1860,18 @@ void resctrl_bmec_files_show(struct rdt_resource *r, struct kernfs_node *l3_mon_ kernfs_put(mon_kn); } +const char *rdtgroup_name_by_closid(u32 closid) +{ + struct rdtgroup *rdtgrp; + + list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) { + if (rdtgrp->closid == closid) + return rdt_kn_name(rdtgrp->kn); + } + + return NULL; +} + /* rdtgroup information files for one cache resource. */ static struct rftype res_common_files[] = { { @@ -1948,6 +1982,20 @@ static struct rftype res_common_files[] = { .seq_show = rdt_thread_throttle_mode_show, }, { + .name = "io_alloc", + .mode = 0644, + .kf_ops = &rdtgroup_kf_single_ops, + .seq_show = resctrl_io_alloc_show, + .write = resctrl_io_alloc_write, + }, + { + .name = "io_alloc_cbm", + .mode = 0644, + .kf_ops = &rdtgroup_kf_single_ops, + .seq_show = resctrl_io_alloc_cbm_show, + .write = resctrl_io_alloc_cbm_write, + }, + { .name = "max_threshold_occupancy", .mode = 0644, .kf_ops = &rdtgroup_kf_single_ops, @@ -2138,6 +2186,23 @@ static void thread_throttle_mode_init(void) RFTYPE_CTRL_INFO | RFTYPE_RES_MB); } +/* + * The resctrl file "io_alloc" is added using L3 resource. However, it results + * in this file being visible for *all* cache resources (eg. L2 cache), + * whether it supports "io_alloc" or not. + */ +static void io_alloc_init(void) +{ + struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_L3); + + if (r->cache.io_alloc_capable) { + resctrl_file_fflags_init("io_alloc", RFTYPE_CTRL_INFO | + RFTYPE_RES_CACHE); + resctrl_file_fflags_init("io_alloc_cbm", + RFTYPE_CTRL_INFO | RFTYPE_RES_CACHE); + } +} + void resctrl_file_fflags_init(const char *config, unsigned long fflags) { struct rftype *rft; @@ -3383,11 +3448,12 @@ static u32 cbm_ensure_valid(u32 _val, struct rdt_resource *r) { unsigned int cbm_len = r->cache.cbm_len; unsigned long first_bit, zero_bit; - unsigned long val = _val; + unsigned long val; - if (!val) - return 0; + if (!_val || r->cache.arch_has_sparse_bitmasks) + return _val; + val = _val; first_bit = find_first_bit(&val, cbm_len); zero_bit = find_next_zero_bit(&val, cbm_len, first_bit); @@ -3480,7 +3546,7 @@ static int __init_one_rdt_domain(struct rdt_ctrl_domain *d, struct resctrl_schem * If there are no more shareable bits available on any domain then * the entire allocation will fail. */ -static int rdtgroup_init_cat(struct resctrl_schema *s, u32 closid) +int rdtgroup_init_cat(struct resctrl_schema *s, u32 closid) { struct rdt_ctrl_domain *d; int ret; @@ -4408,6 +4474,8 @@ int resctrl_init(void) thread_throttle_mode_init(); + io_alloc_init(); + ret = resctrl_mon_resource_init(); if (ret) return ret; |