Skip to content

Commit

Permalink
workqueue: Remove module param disable_numa and sysfs knobs pool_ids …
Browse files Browse the repository at this point in the history
…and numa

Unbound workqueue CPU affinity is going to receive an overhaul and the NUMA
specific knobs won't make sense anymore. Remove them. Also, the pool_ids
knob was used for debugging and not really meaningful given that there is no
visibility into the pools associated with those IDs. Remove it too. A future
patch will improve overall visibility.

Signed-off-by: Tejun Heo <[email protected]>
  • Loading branch information
htejun committed Aug 8, 2023
1 parent 797e834 commit fcecfa8
Show file tree
Hide file tree
Showing 2 changed files with 0 additions and 82 deletions.
9 changes: 0 additions & 9 deletions Documentation/admin-guide/kernel-parameters.txt
Original file line number Diff line number Diff line change
Expand Up @@ -6992,15 +6992,6 @@
threshold repeatedly. They are likely good
candidates for using WQ_UNBOUND workqueues instead.

workqueue.disable_numa
By default, all work items queued to unbound
workqueues are affine to the NUMA nodes they're
issued on, which results in better behavior in
general. If NUMA affinity needs to be disabled for
whatever reason, this option can be used. Note
that this also can be controlled per-workqueue for
workqueues visible under /sys/bus/workqueue/.

workqueue.power_efficient
Per-cpu workqueues are generally preferred because
they show better performance thanks to cache
Expand Down
73 changes: 0 additions & 73 deletions kernel/workqueue.c
Original file line number Diff line number Diff line change
Expand Up @@ -340,9 +340,6 @@ static cpumask_var_t *wq_numa_possible_cpumask;
static unsigned long wq_cpu_intensive_thresh_us = ULONG_MAX;
module_param_named(cpu_intensive_thresh_us, wq_cpu_intensive_thresh_us, ulong, 0644);

static bool wq_disable_numa;
module_param_named(disable_numa, wq_disable_numa, bool, 0444);

/* see the comment above the definition of WQ_POWER_EFFICIENT */
static bool wq_power_efficient = IS_ENABLED(CONFIG_WQ_POWER_EFFICIENT_DEFAULT);
module_param_named(power_efficient, wq_power_efficient, bool, 0444);
Expand Down Expand Up @@ -5794,10 +5791,8 @@ int workqueue_set_unbound_cpumask(cpumask_var_t cpumask)
*
* Unbound workqueues have the following extra attributes.
*
* pool_ids RO int : the associated pool IDs for each node
* nice RW int : nice value of the workers
* cpumask RW mask : bitmask of allowed CPUs for the workers
* numa RW bool : whether enable NUMA affinity
*/
struct wq_device {
struct workqueue_struct *wq;
Expand Down Expand Up @@ -5850,28 +5845,6 @@ static struct attribute *wq_sysfs_attrs[] = {
};
ATTRIBUTE_GROUPS(wq_sysfs);

static ssize_t wq_pool_ids_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct workqueue_struct *wq = dev_to_wq(dev);
const char *delim = "";
int node, written = 0;

cpus_read_lock();
rcu_read_lock();
for_each_node(node) {
written += scnprintf(buf + written, PAGE_SIZE - written,
"%s%d:%d", delim, node,
unbound_pwq_by_node(wq, node)->pool->id);
delim = " ";
}
written += scnprintf(buf + written, PAGE_SIZE - written, "\n");
rcu_read_unlock();
cpus_read_unlock();

return written;
}

static ssize_t wq_nice_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
Expand Down Expand Up @@ -5962,50 +5935,9 @@ static ssize_t wq_cpumask_store(struct device *dev,
return ret ?: count;
}

static ssize_t wq_numa_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct workqueue_struct *wq = dev_to_wq(dev);
int written;

mutex_lock(&wq->mutex);
written = scnprintf(buf, PAGE_SIZE, "%d\n",
!wq->unbound_attrs->no_numa);
mutex_unlock(&wq->mutex);

return written;
}

static ssize_t wq_numa_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct workqueue_struct *wq = dev_to_wq(dev);
struct workqueue_attrs *attrs;
int v, ret = -ENOMEM;

apply_wqattrs_lock();

attrs = wq_sysfs_prep_attrs(wq);
if (!attrs)
goto out_unlock;

ret = -EINVAL;
if (sscanf(buf, "%d", &v) == 1) {
attrs->no_numa = !v;
ret = apply_workqueue_attrs_locked(wq, attrs);
}

out_unlock:
apply_wqattrs_unlock();
free_workqueue_attrs(attrs);
return ret ?: count;
}

static struct device_attribute wq_sysfs_unbound_attrs[] = {
__ATTR(pool_ids, 0444, wq_pool_ids_show, NULL),
__ATTR(nice, 0644, wq_nice_show, wq_nice_store),
__ATTR(cpumask, 0644, wq_cpumask_show, wq_cpumask_store),
__ATTR(numa, 0644, wq_numa_show, wq_numa_store),
__ATTR_NULL,
};

Expand Down Expand Up @@ -6379,11 +6311,6 @@ static void __init wq_numa_init(void)
if (num_possible_nodes() <= 1)
return;

if (wq_disable_numa) {
pr_info("workqueue: NUMA affinity support disabled\n");
return;
}

for_each_possible_cpu(cpu) {
if (WARN_ON(cpu_to_node(cpu) == NUMA_NO_NODE)) {
pr_warn("workqueue: NUMA node mapping not available for cpu%d, disabling NUMA support\n", cpu);
Expand Down

0 comments on commit fcecfa8

Please sign in to comment.