Skip to content

Commit

Permalink
Merge tag 'x86_cache_for_v6.1_rc1' of git://git.kernel.org/pub/scm/li…
Browse files Browse the repository at this point in the history
…nux/kernel/git/tip/tip

Pull x86 cache resource control updates from Borislav Petkov:

 - More work by James Morse to disentangle the resctrl filesystem
   generic code from the architectural one with the endgoal of plugging
   ARM's MPAM implementation into it too so that the user interface
   remains the same

 - Properly restore the MSR_MISC_FEATURE_CONTROL value instead of
   blindly overwriting it to 0

* tag 'x86_cache_for_v6.1_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (22 commits)
  x86/resctrl: Make resctrl_arch_rmid_read() return values in bytes
  x86/resctrl: Add resctrl_rmid_realloc_limit to abstract x86's boot_cpu_data
  x86/resctrl: Rename and change the units of resctrl_cqm_threshold
  x86/resctrl: Move get_corrected_mbm_count() into resctrl_arch_rmid_read()
  x86/resctrl: Move mbm_overflow_count() into resctrl_arch_rmid_read()
  x86/resctrl: Pass the required parameters into resctrl_arch_rmid_read()
  x86/resctrl: Abstract __rmid_read()
  x86/resctrl: Allow per-rmid arch private storage to be reset
  x86/resctrl: Add per-rmid arch private storage for overflow and chunks
  x86/resctrl: Calculate bandwidth from the previous __mon_event_count() chunks
  x86/resctrl: Allow update_mba_bw() to update controls directly
  x86/resctrl: Remove architecture copy of mbps_val
  x86/resctrl: Switch over to the resctrl mbps_val list
  x86/resctrl: Create mba_sc configuration in the rdt_domain
  x86/resctrl: Abstract and use supports_mba_mbps()
  x86/resctrl: Remove set_mba_sc()s control array re-initialisation
  x86/resctrl: Add domain offline callback for resctrl work
  x86/resctrl: Group struct rdt_hw_domain cleanup
  x86/resctrl: Add domain online callback for resctrl work
  x86/resctrl: Merge mon_capable and mon_enabled
  ...
  • Loading branch information
torvalds committed Oct 4, 2022
2 parents b5f0b11 + f7b1843 commit 193e226
Show file tree
Hide file tree
Showing 8 changed files with 523 additions and 265 deletions.
9 changes: 9 additions & 0 deletions arch/x86/include/asm/resctrl.h
Original file line number Diff line number Diff line change
Expand Up @@ -81,6 +81,15 @@ static void __resctrl_sched_in(void)
}
}

static inline unsigned int resctrl_arch_round_mon_val(unsigned int val)
{
unsigned int scale = boot_cpu_data.x86_cache_occ_scale;

/* h/w works in units of "boot_cpu_data.x86_cache_occ_scale" */
val /= scale;
return val * scale;
}

static inline void resctrl_sched_in(void)
{
if (static_branch_likely(&rdt_enable_key))
Expand Down
117 changes: 38 additions & 79 deletions arch/x86/kernel/cpu/resctrl/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -147,7 +147,6 @@ static inline void cache_alloc_hsw_probe(void)
r->cache.shareable_bits = 0xc0000;
r->cache.min_cbm_bits = 2;
r->alloc_capable = true;
r->alloc_enabled = true;

rdt_alloc_capable = true;
}
Expand Down Expand Up @@ -211,7 +210,6 @@ static bool __get_mem_config_intel(struct rdt_resource *r)
thread_throttle_mode_init();

r->alloc_capable = true;
r->alloc_enabled = true;

return true;
}
Expand Down Expand Up @@ -242,7 +240,6 @@ static bool __rdt_get_mem_config_amd(struct rdt_resource *r)
r->data_width = 4;

r->alloc_capable = true;
r->alloc_enabled = true;

return true;
}
Expand All @@ -261,7 +258,6 @@ static void rdt_get_cache_alloc_cfg(int idx, struct rdt_resource *r)
r->cache.shareable_bits = ebx & r->default_ctrl;
r->data_width = (r->cache.cbm_len + 3) / 4;
r->alloc_capable = true;
r->alloc_enabled = true;
}

static void rdt_get_cdp_config(int level)
Expand Down Expand Up @@ -300,7 +296,7 @@ mba_wrmsr_amd(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r)
* that can be written to QOS_MSRs.
* There are currently no SKUs which support non linear delay values.
*/
u32 delay_bw_map(unsigned long bw, struct rdt_resource *r)
static u32 delay_bw_map(unsigned long bw, struct rdt_resource *r)
{
if (r->membw.delay_linear)
return MAX_MBA_BW - bw;
Expand Down Expand Up @@ -401,7 +397,7 @@ struct rdt_domain *rdt_find_domain(struct rdt_resource *r, int id,
return NULL;
}

void setup_default_ctrlval(struct rdt_resource *r, u32 *dc, u32 *dm)
static void setup_default_ctrlval(struct rdt_resource *r, u32 *dc)
{
struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
int i;
Expand All @@ -410,76 +406,65 @@ void setup_default_ctrlval(struct rdt_resource *r, u32 *dc, u32 *dm)
* Initialize the Control MSRs to having no control.
* For Cache Allocation: Set all bits in cbm
* For Memory Allocation: Set b/w requested to 100%
* and the bandwidth in MBps to U32_MAX
*/
for (i = 0; i < hw_res->num_closid; i++, dc++, dm++) {
for (i = 0; i < hw_res->num_closid; i++, dc++)
*dc = r->default_ctrl;
*dm = MBA_MAX_MBPS;
}
}

static void domain_free(struct rdt_hw_domain *hw_dom)
{
kfree(hw_dom->arch_mbm_total);
kfree(hw_dom->arch_mbm_local);
kfree(hw_dom->ctrl_val);
kfree(hw_dom);
}

static int domain_setup_ctrlval(struct rdt_resource *r, struct rdt_domain *d)
{
struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d);
struct msr_param m;
u32 *dc, *dm;
u32 *dc;

dc = kmalloc_array(hw_res->num_closid, sizeof(*hw_dom->ctrl_val),
GFP_KERNEL);
if (!dc)
return -ENOMEM;

dm = kmalloc_array(hw_res->num_closid, sizeof(*hw_dom->mbps_val),
GFP_KERNEL);
if (!dm) {
kfree(dc);
return -ENOMEM;
}

hw_dom->ctrl_val = dc;
hw_dom->mbps_val = dm;
setup_default_ctrlval(r, dc, dm);
setup_default_ctrlval(r, dc);

m.low = 0;
m.high = hw_res->num_closid;
hw_res->msr_update(d, &m, r);
return 0;
}

static int domain_setup_mon_state(struct rdt_resource *r, struct rdt_domain *d)
/**
* arch_domain_mbm_alloc() - Allocate arch private storage for the MBM counters
* @num_rmid: The size of the MBM counter array
* @hw_dom: The domain that owns the allocated arrays
*/
static int arch_domain_mbm_alloc(u32 num_rmid, struct rdt_hw_domain *hw_dom)
{
size_t tsize;

if (is_llc_occupancy_enabled()) {
d->rmid_busy_llc = bitmap_zalloc(r->num_rmid, GFP_KERNEL);
if (!d->rmid_busy_llc)
return -ENOMEM;
INIT_DELAYED_WORK(&d->cqm_limbo, cqm_handle_limbo);
}
if (is_mbm_total_enabled()) {
tsize = sizeof(*d->mbm_total);
d->mbm_total = kcalloc(r->num_rmid, tsize, GFP_KERNEL);
if (!d->mbm_total) {
bitmap_free(d->rmid_busy_llc);
tsize = sizeof(*hw_dom->arch_mbm_total);
hw_dom->arch_mbm_total = kcalloc(num_rmid, tsize, GFP_KERNEL);
if (!hw_dom->arch_mbm_total)
return -ENOMEM;
}
}
if (is_mbm_local_enabled()) {
tsize = sizeof(*d->mbm_local);
d->mbm_local = kcalloc(r->num_rmid, tsize, GFP_KERNEL);
if (!d->mbm_local) {
bitmap_free(d->rmid_busy_llc);
kfree(d->mbm_total);
tsize = sizeof(*hw_dom->arch_mbm_local);
hw_dom->arch_mbm_local = kcalloc(num_rmid, tsize, GFP_KERNEL);
if (!hw_dom->arch_mbm_local) {
kfree(hw_dom->arch_mbm_total);
hw_dom->arch_mbm_total = NULL;
return -ENOMEM;
}
}

if (is_mbm_enabled()) {
INIT_DELAYED_WORK(&d->mbm_over, mbm_handle_overflow);
mbm_setup_overflow_handler(d, MBM_OVERFLOW_INTERVAL);
}

return 0;
}

Expand All @@ -502,6 +487,7 @@ static void domain_add_cpu(int cpu, struct rdt_resource *r)
struct list_head *add_pos = NULL;
struct rdt_hw_domain *hw_dom;
struct rdt_domain *d;
int err;

d = rdt_find_domain(r, id, &add_pos);
if (IS_ERR(d)) {
Expand All @@ -527,25 +513,22 @@ static void domain_add_cpu(int cpu, struct rdt_resource *r)
rdt_domain_reconfigure_cdp(r);

if (r->alloc_capable && domain_setup_ctrlval(r, d)) {
kfree(hw_dom);
domain_free(hw_dom);
return;
}

if (r->mon_capable && domain_setup_mon_state(r, d)) {
kfree(hw_dom->ctrl_val);
kfree(hw_dom->mbps_val);
kfree(hw_dom);
if (r->mon_capable && arch_domain_mbm_alloc(r->num_rmid, hw_dom)) {
domain_free(hw_dom);
return;
}

list_add_tail(&d->list, add_pos);

/*
* If resctrl is mounted, add
* per domain monitor data directories.
*/
if (static_branch_unlikely(&rdt_mon_enable_key))
mkdir_mondata_subdir_allrdtgrp(r, d);
err = resctrl_online_domain(r, d);
if (err) {
list_del(&d->list);
domain_free(hw_dom);
}
}

static void domain_remove_cpu(int cpu, struct rdt_resource *r)
Expand All @@ -563,41 +546,17 @@ static void domain_remove_cpu(int cpu, struct rdt_resource *r)

cpumask_clear_cpu(cpu, &d->cpu_mask);
if (cpumask_empty(&d->cpu_mask)) {
/*
* If resctrl is mounted, remove all the
* per domain monitor data directories.
*/
if (static_branch_unlikely(&rdt_mon_enable_key))
rmdir_mondata_subdir_allrdtgrp(r, d->id);
resctrl_offline_domain(r, d);
list_del(&d->list);
if (r->mon_capable && is_mbm_enabled())
cancel_delayed_work(&d->mbm_over);
if (is_llc_occupancy_enabled() && has_busy_rmid(r, d)) {
/*
* When a package is going down, forcefully
* decrement rmid->ebusy. There is no way to know
* that the L3 was flushed and hence may lead to
* incorrect counts in rare scenarios, but leaving
* the RMID as busy creates RMID leaks if the
* package never comes back.
*/
__check_limbo(d, true);
cancel_delayed_work(&d->cqm_limbo);
}

/*
* rdt_domain "d" is going to be freed below, so clear
* its pointer from pseudo_lock_region struct.
*/
if (d->plr)
d->plr->d = NULL;
domain_free(hw_dom);

kfree(hw_dom->ctrl_val);
kfree(hw_dom->mbps_val);
bitmap_free(d->rmid_busy_llc);
kfree(d->mbm_total);
kfree(d->mbm_local);
kfree(hw_dom);
return;
}

Expand Down
Loading

0 comments on commit 193e226

Please sign in to comment.