Skip to content

Commit

Permalink
Merge branch 'for-next/perf' of git://git.kernel.org/pub/scm/linux/ke…
Browse files Browse the repository at this point in the history
…rnel/git/will/linux into for-next/core
  • Loading branch information
wildea01 committed May 3, 2019
2 parents 24cf262 + 9bcb929 commit b33f908
Show file tree
Hide file tree
Showing 9 changed files with 1,018 additions and 47 deletions.
1 change: 1 addition & 0 deletions Documentation/arm64/silicon-errata.txt
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,7 @@ stable kernels.
| Hisilicon | Hip0{5,6,7} | #161010101 | HISILICON_ERRATUM_161010101 |
| Hisilicon | Hip0{6,7} | #161010701 | N/A |
| Hisilicon | Hip07 | #161600802 | HISILICON_ERRATUM_161600802 |
| Hisilicon | Hip08 SMMU PMCG | #162001800 | N/A |
| | | | |
| Qualcomm Tech. | Kryo/Falkor v1 | E1003 | QCOM_FALKOR_ERRATUM_1003 |
| Qualcomm Tech. | Falkor v1 | E1009 | QCOM_FALKOR_ERRATUM_1009 |
Expand Down
4 changes: 2 additions & 2 deletions arch/arm64/kernel/perf_event.c
Original file line number Diff line number Diff line change
Expand Up @@ -431,7 +431,7 @@ static inline u64 armv8pmu_read_hw_counter(struct perf_event *event)
return val;
}

static inline u64 armv8pmu_read_counter(struct perf_event *event)
static u64 armv8pmu_read_counter(struct perf_event *event)
{
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
Expand Down Expand Up @@ -468,7 +468,7 @@ static inline void armv8pmu_write_hw_counter(struct perf_event *event,
}
}

static inline void armv8pmu_write_counter(struct perf_event *event, u64 value)
static void armv8pmu_write_counter(struct perf_event *event, u64 value)
{
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
Expand Down
131 changes: 107 additions & 24 deletions drivers/acpi/arm64/iort.c
Original file line number Diff line number Diff line change
Expand Up @@ -356,7 +356,8 @@ static struct acpi_iort_node *iort_node_get_id(struct acpi_iort_node *node,
if (map->flags & ACPI_IORT_ID_SINGLE_MAPPING) {
if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT ||
node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX ||
node->type == ACPI_IORT_NODE_SMMU_V3) {
node->type == ACPI_IORT_NODE_SMMU_V3 ||
node->type == ACPI_IORT_NODE_PMCG) {
*id_out = map->output_base;
return parent;
}
Expand Down Expand Up @@ -394,6 +395,8 @@ static int iort_get_id_mapping_index(struct acpi_iort_node *node)
}

return smmu->id_mapping_index;
case ACPI_IORT_NODE_PMCG:
return 0;
default:
return -EINVAL;
}
Expand Down Expand Up @@ -1218,14 +1221,23 @@ static void __init arm_smmu_v3_init_resources(struct resource *res,
}
}

static bool __init arm_smmu_v3_is_coherent(struct acpi_iort_node *node)
static void __init arm_smmu_v3_dma_configure(struct device *dev,
struct acpi_iort_node *node)
{
struct acpi_iort_smmu_v3 *smmu;
enum dev_dma_attr attr;

/* Retrieve SMMUv3 specific data */
smmu = (struct acpi_iort_smmu_v3 *)node->node_data;

return smmu->flags & ACPI_IORT_SMMU_V3_COHACC_OVERRIDE;
attr = (smmu->flags & ACPI_IORT_SMMU_V3_COHACC_OVERRIDE) ?
DEV_DMA_COHERENT : DEV_DMA_NON_COHERENT;

/* We expect the dma masks to be equivalent for all SMMUv3 set-ups */
dev->dma_mask = &dev->coherent_dma_mask;

/* Configure DMA for the page table walker */
acpi_dma_configure(dev, attr);
}

#if defined(CONFIG_ACPI_NUMA)
Expand Down Expand Up @@ -1307,40 +1319,113 @@ static void __init arm_smmu_init_resources(struct resource *res,
}
}

static bool __init arm_smmu_is_coherent(struct acpi_iort_node *node)
static void __init arm_smmu_dma_configure(struct device *dev,
struct acpi_iort_node *node)
{
struct acpi_iort_smmu *smmu;
enum dev_dma_attr attr;

/* Retrieve SMMU specific data */
smmu = (struct acpi_iort_smmu *)node->node_data;

return smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK;
attr = (smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK) ?
DEV_DMA_COHERENT : DEV_DMA_NON_COHERENT;

/* We expect the dma masks to be equivalent for SMMU set-ups */
dev->dma_mask = &dev->coherent_dma_mask;

/* Configure DMA for the page table walker */
acpi_dma_configure(dev, attr);
}

static int __init arm_smmu_v3_pmcg_count_resources(struct acpi_iort_node *node)
{
struct acpi_iort_pmcg *pmcg;

/* Retrieve PMCG specific data */
pmcg = (struct acpi_iort_pmcg *)node->node_data;

/*
* There are always 2 memory resources.
* If the overflow_gsiv is present then add that for a total of 3.
*/
return pmcg->overflow_gsiv ? 3 : 2;
}

static void __init arm_smmu_v3_pmcg_init_resources(struct resource *res,
struct acpi_iort_node *node)
{
struct acpi_iort_pmcg *pmcg;

/* Retrieve PMCG specific data */
pmcg = (struct acpi_iort_pmcg *)node->node_data;

res[0].start = pmcg->page0_base_address;
res[0].end = pmcg->page0_base_address + SZ_4K - 1;
res[0].flags = IORESOURCE_MEM;
res[1].start = pmcg->page1_base_address;
res[1].end = pmcg->page1_base_address + SZ_4K - 1;
res[1].flags = IORESOURCE_MEM;

if (pmcg->overflow_gsiv)
acpi_iort_register_irq(pmcg->overflow_gsiv, "overflow",
ACPI_EDGE_SENSITIVE, &res[2]);
}

static struct acpi_platform_list pmcg_plat_info[] __initdata = {
/* HiSilicon Hip08 Platform */
{"HISI ", "HIP08 ", 0, ACPI_SIG_IORT, greater_than_or_equal,
"Erratum #162001800", IORT_SMMU_V3_PMCG_HISI_HIP08},
{ }
};

static int __init arm_smmu_v3_pmcg_add_platdata(struct platform_device *pdev)
{
u32 model;
int idx;

idx = acpi_match_platform_list(pmcg_plat_info);
if (idx >= 0)
model = pmcg_plat_info[idx].data;
else
model = IORT_SMMU_V3_PMCG_GENERIC;

return platform_device_add_data(pdev, &model, sizeof(model));
}

struct iort_dev_config {
const char *name;
int (*dev_init)(struct acpi_iort_node *node);
bool (*dev_is_coherent)(struct acpi_iort_node *node);
void (*dev_dma_configure)(struct device *dev,
struct acpi_iort_node *node);
int (*dev_count_resources)(struct acpi_iort_node *node);
void (*dev_init_resources)(struct resource *res,
struct acpi_iort_node *node);
int (*dev_set_proximity)(struct device *dev,
struct acpi_iort_node *node);
int (*dev_add_platdata)(struct platform_device *pdev);
};

static const struct iort_dev_config iort_arm_smmu_v3_cfg __initconst = {
.name = "arm-smmu-v3",
.dev_is_coherent = arm_smmu_v3_is_coherent,
.dev_dma_configure = arm_smmu_v3_dma_configure,
.dev_count_resources = arm_smmu_v3_count_resources,
.dev_init_resources = arm_smmu_v3_init_resources,
.dev_set_proximity = arm_smmu_v3_set_proximity,
};

static const struct iort_dev_config iort_arm_smmu_cfg __initconst = {
.name = "arm-smmu",
.dev_is_coherent = arm_smmu_is_coherent,
.dev_dma_configure = arm_smmu_dma_configure,
.dev_count_resources = arm_smmu_count_resources,
.dev_init_resources = arm_smmu_init_resources
.dev_init_resources = arm_smmu_init_resources,
};

static const struct iort_dev_config iort_arm_smmu_v3_pmcg_cfg __initconst = {
.name = "arm-smmu-v3-pmcg",
.dev_count_resources = arm_smmu_v3_pmcg_count_resources,
.dev_init_resources = arm_smmu_v3_pmcg_init_resources,
.dev_add_platdata = arm_smmu_v3_pmcg_add_platdata,
};

static __init const struct iort_dev_config *iort_get_dev_cfg(
Expand All @@ -1351,6 +1436,8 @@ static __init const struct iort_dev_config *iort_get_dev_cfg(
return &iort_arm_smmu_v3_cfg;
case ACPI_IORT_NODE_SMMU:
return &iort_arm_smmu_cfg;
case ACPI_IORT_NODE_PMCG:
return &iort_arm_smmu_v3_pmcg_cfg;
default:
return NULL;
}
Expand All @@ -1368,7 +1455,6 @@ static int __init iort_add_platform_device(struct acpi_iort_node *node,
struct fwnode_handle *fwnode;
struct platform_device *pdev;
struct resource *r;
enum dev_dma_attr attr;
int ret, count;

pdev = platform_device_alloc(ops->name, PLATFORM_DEVID_AUTO);
Expand Down Expand Up @@ -1402,19 +1488,19 @@ static int __init iort_add_platform_device(struct acpi_iort_node *node,
goto dev_put;

/*
* Add a copy of IORT node pointer to platform_data to
* be used to retrieve IORT data information.
* Platform devices based on PMCG nodes uses platform_data to
* pass the hardware model info to the driver. For others, add
* a copy of IORT node pointer to platform_data to be used to
* retrieve IORT data information.
*/
ret = platform_device_add_data(pdev, &node, sizeof(node));
if (ops->dev_add_platdata)
ret = ops->dev_add_platdata(pdev);
else
ret = platform_device_add_data(pdev, &node, sizeof(node));

if (ret)
goto dev_put;

/*
* We expect the dma masks to be equivalent for
* all SMMUs set-ups
*/
pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;

fwnode = iort_get_fwnode(node);

if (!fwnode) {
Expand All @@ -1424,11 +1510,8 @@ static int __init iort_add_platform_device(struct acpi_iort_node *node,

pdev->dev.fwnode = fwnode;

attr = ops->dev_is_coherent && ops->dev_is_coherent(node) ?
DEV_DMA_COHERENT : DEV_DMA_NON_COHERENT;

/* Configure DMA for the page table walker */
acpi_dma_configure(&pdev->dev, attr);
if (ops->dev_dma_configure)
ops->dev_dma_configure(&pdev->dev, node);

iort_set_device_domain(&pdev->dev, node);

Expand Down
9 changes: 9 additions & 0 deletions drivers/perf/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,15 @@ config ARM_PMU_ACPI
depends on ARM_PMU && ACPI
def_bool y

config ARM_SMMU_V3_PMU
tristate "ARM SMMUv3 Performance Monitors Extension"
depends on ARM64 && ACPI && ARM_SMMU_V3
help
Provides support for the ARM SMMUv3 Performance Monitor Counter
Groups (PMCG), which provide monitoring of transactions passing
through the SMMU and allow the resulting information to be filtered
based on the Stream ID of the corresponding master.

config ARM_DSU_PMU
tristate "ARM DynamIQ Shared Unit (DSU) PMU"
depends on ARM64
Expand Down
1 change: 1 addition & 0 deletions drivers/perf/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ obj-$(CONFIG_ARM_CCN) += arm-ccn.o
obj-$(CONFIG_ARM_DSU_PMU) += arm_dsu_pmu.o
obj-$(CONFIG_ARM_PMU) += arm_pmu.o arm_pmu_platform.o
obj-$(CONFIG_ARM_PMU_ACPI) += arm_pmu_acpi.o
obj-$(CONFIG_ARM_SMMU_V3_PMU) += arm_smmuv3_pmu.o
obj-$(CONFIG_HISI_PMU) += hisilicon/
obj-$(CONFIG_QCOM_L2_PMU) += qcom_l2_pmu.o
obj-$(CONFIG_QCOM_L3_PMU) += qcom_l3_pmu.o
Expand Down
21 changes: 12 additions & 9 deletions drivers/perf/arm-cci.c
Original file line number Diff line number Diff line change
Expand Up @@ -1684,21 +1684,24 @@ static int cci_pmu_probe(struct platform_device *pdev)
raw_spin_lock_init(&cci_pmu->hw_events.pmu_lock);
mutex_init(&cci_pmu->reserve_mutex);
atomic_set(&cci_pmu->active_events, 0);
cci_pmu->cpu = get_cpu();

ret = cci_pmu_init(cci_pmu, pdev);
if (ret) {
put_cpu();
return ret;
}

cci_pmu->cpu = raw_smp_processor_id();
g_cci_pmu = cci_pmu;
cpuhp_setup_state_nocalls(CPUHP_AP_PERF_ARM_CCI_ONLINE,
"perf/arm/cci:online", NULL,
cci_pmu_offline_cpu);
put_cpu();
g_cci_pmu = cci_pmu;

ret = cci_pmu_init(cci_pmu, pdev);
if (ret)
goto error_pmu_init;

pr_info("ARM %s PMU driver probed", cci_pmu->model->name);
return 0;

error_pmu_init:
cpuhp_remove_state(CPUHP_AP_PERF_ARM_CCI_ONLINE);
g_cci_pmu = NULL;
return ret;
}

static int cci_pmu_remove(struct platform_device *pdev)
Expand Down
25 changes: 13 additions & 12 deletions drivers/perf/arm-ccn.c
Original file line number Diff line number Diff line change
Expand Up @@ -167,7 +167,7 @@ struct arm_ccn_dt {

struct hrtimer hrtimer;

cpumask_t cpu;
unsigned int cpu;
struct hlist_node node;

struct pmu pmu;
Expand Down Expand Up @@ -559,7 +559,7 @@ static ssize_t arm_ccn_pmu_cpumask_show(struct device *dev,
{
struct arm_ccn *ccn = pmu_to_arm_ccn(dev_get_drvdata(dev));

return cpumap_print_to_pagebuf(true, buf, &ccn->dt.cpu);
return cpumap_print_to_pagebuf(true, buf, cpumask_of(ccn->dt.cpu));
}

static struct device_attribute arm_ccn_pmu_cpumask_attr =
Expand Down Expand Up @@ -759,7 +759,7 @@ static int arm_ccn_pmu_event_init(struct perf_event *event)
* mitigate this, we enforce CPU assignment to one, selected
* processor (the one described in the "cpumask" attribute).
*/
event->cpu = cpumask_first(&ccn->dt.cpu);
event->cpu = ccn->dt.cpu;

node_xp = CCN_CONFIG_NODE(event->attr.config);
type = CCN_CONFIG_TYPE(event->attr.config);
Expand Down Expand Up @@ -1215,15 +1215,15 @@ static int arm_ccn_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
struct arm_ccn *ccn = container_of(dt, struct arm_ccn, dt);
unsigned int target;

if (!cpumask_test_and_clear_cpu(cpu, &dt->cpu))
if (cpu != dt->cpu)
return 0;
target = cpumask_any_but(cpu_online_mask, cpu);
if (target >= nr_cpu_ids)
return 0;
perf_pmu_migrate_context(&dt->pmu, cpu, target);
cpumask_set_cpu(target, &dt->cpu);
dt->cpu = target;
if (ccn->irq)
WARN_ON(irq_set_affinity_hint(ccn->irq, &dt->cpu) != 0);
WARN_ON(irq_set_affinity_hint(ccn->irq, cpumask_of(dt->cpu)));
return 0;
}

Expand Down Expand Up @@ -1299,29 +1299,30 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn)
}

/* Pick one CPU which we will use to collect data from CCN... */
cpumask_set_cpu(get_cpu(), &ccn->dt.cpu);
ccn->dt.cpu = raw_smp_processor_id();

/* Also make sure that the overflow interrupt is handled by this CPU */
if (ccn->irq) {
err = irq_set_affinity_hint(ccn->irq, &ccn->dt.cpu);
err = irq_set_affinity_hint(ccn->irq, cpumask_of(ccn->dt.cpu));
if (err) {
dev_err(ccn->dev, "Failed to set interrupt affinity!\n");
goto error_set_affinity;
}
}

cpuhp_state_add_instance_nocalls(CPUHP_AP_PERF_ARM_CCN_ONLINE,
&ccn->dt.node);

err = perf_pmu_register(&ccn->dt.pmu, name, -1);
if (err)
goto error_pmu_register;

cpuhp_state_add_instance_nocalls(CPUHP_AP_PERF_ARM_CCN_ONLINE,
&ccn->dt.node);
put_cpu();
return 0;

error_pmu_register:
cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_CCN_ONLINE,
&ccn->dt.node);
error_set_affinity:
put_cpu();
error_choose_name:
ida_simple_remove(&arm_ccn_pmu_ida, ccn->dt.id);
for (i = 0; i < ccn->num_xps; i++)
Expand Down
Loading

0 comments on commit b33f908

Please sign in to comment.