Skip to content

Commit

Permalink
drivers/perf: xgene: Add CPU hotplug support
Browse files Browse the repository at this point in the history
If the CPU assigned to the xgene PMU is taken offline, then subsequent
perf invocations on the PMU will fail:

  # echo 0 > /sys/devices/system/cpu/cpu0/online
  # perf stat -a -e l3c0/cycle-count/,l3c0/write/ sleep 1
    Error:
    The sys_perf_event_open() syscall returned with 19 (No such device) for event (l3c0/cycle-count/).
    /bin/dmesg may provide additional information.
    No CONFIG_PERF_EVENTS=y kernel support configured?

This patch implements a hotplug notifier in the xgene PMU driver so that
the PMU context is migrated to another online CPU should its assigned
CPU disappear.

Acked-by: Mark Rutland <[email protected]>
Signed-off-by: Hoan Tran <[email protected]>
[will: Made naming of new cpuhp_state enum entry consistent]
Signed-off-by: Will Deacon <[email protected]>
  • Loading branch information
Hoan Tran authored and wildea01 committed Nov 21, 2018
1 parent 472dc9f commit cbb72a3
Show file tree
Hide file tree
Showing 2 changed files with 74 additions and 7 deletions.
80 changes: 73 additions & 7 deletions drivers/perf/xgene_pmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@

#include <linux/acpi.h>
#include <linux/clk.h>
#include <linux/cpuhotplug.h>
#include <linux/cpumask.h>
#include <linux/interrupt.h>
#include <linux/io.h>
Expand Down Expand Up @@ -130,12 +131,14 @@ struct xgene_pmu_ops {

struct xgene_pmu {
struct device *dev;
struct hlist_node node;
int version;
void __iomem *pcppmu_csr;
u32 mcb_active_mask;
u32 mc_active_mask;
u32 l3c_active_mask;
cpumask_t cpu;
int irq;
raw_spinlock_t lock;
const struct xgene_pmu_ops *ops;
struct list_head l3cpmus;
Expand Down Expand Up @@ -1806,6 +1809,53 @@ static const struct acpi_device_id xgene_pmu_acpi_match[] = {
MODULE_DEVICE_TABLE(acpi, xgene_pmu_acpi_match);
#endif

static int xgene_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
{
struct xgene_pmu *xgene_pmu = hlist_entry_safe(node, struct xgene_pmu,
node);

if (cpumask_empty(&xgene_pmu->cpu))
cpumask_set_cpu(cpu, &xgene_pmu->cpu);

/* Overflow interrupt also should use the same CPU */
WARN_ON(irq_set_affinity(xgene_pmu->irq, &xgene_pmu->cpu));

return 0;
}

static int xgene_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
{
struct xgene_pmu *xgene_pmu = hlist_entry_safe(node, struct xgene_pmu,
node);
struct xgene_pmu_dev_ctx *ctx;
unsigned int target;

if (!cpumask_test_and_clear_cpu(cpu, &xgene_pmu->cpu))
return 0;
target = cpumask_any_but(cpu_online_mask, cpu);
if (target >= nr_cpu_ids)
return 0;

list_for_each_entry(ctx, &xgene_pmu->mcpmus, next) {
perf_pmu_migrate_context(&ctx->pmu_dev->pmu, cpu, target);
}
list_for_each_entry(ctx, &xgene_pmu->mcbpmus, next) {
perf_pmu_migrate_context(&ctx->pmu_dev->pmu, cpu, target);
}
list_for_each_entry(ctx, &xgene_pmu->l3cpmus, next) {
perf_pmu_migrate_context(&ctx->pmu_dev->pmu, cpu, target);
}
list_for_each_entry(ctx, &xgene_pmu->iobpmus, next) {
perf_pmu_migrate_context(&ctx->pmu_dev->pmu, cpu, target);
}

cpumask_set_cpu(target, &xgene_pmu->cpu);
/* Overflow interrupt also should use the same CPU */
WARN_ON(irq_set_affinity(xgene_pmu->irq, &xgene_pmu->cpu));

return 0;
}

static int xgene_pmu_probe(struct platform_device *pdev)
{
const struct xgene_pmu_data *dev_data;
Expand All @@ -1815,6 +1865,14 @@ static int xgene_pmu_probe(struct platform_device *pdev)
int irq, rc;
int version;

/* Install a hook to update the reader CPU in case it goes offline */
rc = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_APM_XGENE_ONLINE,
"CPUHP_AP_PERF_ARM_APM_XGENE_ONLINE",
xgene_pmu_online_cpu,
xgene_pmu_offline_cpu);
if (rc)
return rc;

xgene_pmu = devm_kzalloc(&pdev->dev, sizeof(*xgene_pmu), GFP_KERNEL);
if (!xgene_pmu)
return -ENOMEM;
Expand Down Expand Up @@ -1865,6 +1923,7 @@ static int xgene_pmu_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "No IRQ resource\n");
return -EINVAL;
}

rc = devm_request_irq(&pdev->dev, irq, xgene_pmu_isr,
IRQF_NOBALANCING | IRQF_NO_THREAD,
dev_name(&pdev->dev), xgene_pmu);
Expand All @@ -1873,6 +1932,8 @@ static int xgene_pmu_probe(struct platform_device *pdev)
return rc;
}

xgene_pmu->irq = irq;

raw_spin_lock_init(&xgene_pmu->lock);

/* Check for active MCBs and MCUs */
Expand All @@ -1883,27 +1944,30 @@ static int xgene_pmu_probe(struct platform_device *pdev)
xgene_pmu->mc_active_mask = 0x1;
}

/* Pick one core to use for cpumask attributes */
cpumask_set_cpu(smp_processor_id(), &xgene_pmu->cpu);

/* Make sure that the overflow interrupt is handled by this CPU */
rc = irq_set_affinity(irq, &xgene_pmu->cpu);
/* Add this instance to the list used by the hotplug callback */
rc = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_APM_XGENE_ONLINE,
&xgene_pmu->node);
if (rc) {
dev_err(&pdev->dev, "Failed to set interrupt affinity!\n");
dev_err(&pdev->dev, "Error %d registering hotplug", rc);
return rc;
}

/* Walk through the tree for all PMU perf devices */
rc = xgene_pmu_probe_pmu_dev(xgene_pmu, pdev);
if (rc) {
dev_err(&pdev->dev, "No PMU perf devices found!\n");
return rc;
goto out_unregister;
}

/* Enable interrupt */
xgene_pmu->ops->unmask_int(xgene_pmu);

return 0;

out_unregister:
cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_APM_XGENE_ONLINE,
&xgene_pmu->node);
return rc;
}

static void
Expand All @@ -1924,6 +1988,8 @@ static int xgene_pmu_remove(struct platform_device *pdev)
xgene_pmu_dev_cleanup(xgene_pmu, &xgene_pmu->iobpmus);
xgene_pmu_dev_cleanup(xgene_pmu, &xgene_pmu->mcbpmus);
xgene_pmu_dev_cleanup(xgene_pmu, &xgene_pmu->mcpmus);
cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_APM_XGENE_ONLINE,
&xgene_pmu->node);

return 0;
}
Expand Down
1 change: 1 addition & 0 deletions include/linux/cpuhotplug.h
Original file line number Diff line number Diff line change
Expand Up @@ -164,6 +164,7 @@ enum cpuhp_state {
CPUHP_AP_PERF_ARM_L2X0_ONLINE,
CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE,
CPUHP_AP_PERF_ARM_QCOM_L3_ONLINE,
CPUHP_AP_PERF_ARM_APM_XGENE_ONLINE,
CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE,
CPUHP_AP_PERF_POWERPC_CORE_IMC_ONLINE,
CPUHP_AP_PERF_POWERPC_THREAD_IMC_ONLINE,
Expand Down

0 comments on commit cbb72a3

Please sign in to comment.