diff options
| author | Catalin Marinas <catalin.marinas@arm.com> | 2017-04-12 10:41:13 +0100 |
|---|---|---|
| committer | Catalin Marinas <catalin.marinas@arm.com> | 2017-04-12 10:41:50 +0100 |
| commit | 494bc3cd3dd02e259d5db9372754e993e4a21902 (patch) | |
| tree | b3feaba8a9d1e4f068870288c3a87a6cfa9c51c8 | |
| parent | d91750f12c79101028cb93dc35eed6989fae4405 (diff) | |
| parent | f00fa5f4163b40c3ec8590d9a7bd845c19bf8d16 (diff) | |
| download | linux-494bc3cd3dd02e259d5db9372754e993e4a21902.tar.gz linux-494bc3cd3dd02e259d5db9372754e993e4a21902.tar.bz2 linux-494bc3cd3dd02e259d5db9372754e993e4a21902.zip | |
Merge branch 'will/for-next/perf' into for-next/core
* will/for-next/perf:
arm64: pmuv3: use arm_pmu ACPI framework
arm64: pmuv3: handle !PMUv3 when probing
drivers/perf: arm_pmu: add ACPI framework
arm64: add function to get a cpu's MADT GICC table
drivers/perf: arm_pmu: split out platform device probe logic
drivers/perf: arm_pmu: move irq request/free into probe
drivers/perf: arm_pmu: split cpu-local irq request/free
drivers/perf: arm_pmu: rename irq request/free functions
drivers/perf: arm_pmu: handle no platform_device
drivers/perf: arm_pmu: simplify cpu_pmu_request_irqs()
drivers/perf: arm_pmu: factor out pmu registration
drivers/perf: arm_pmu: fold init into alloc
drivers/perf: arm_pmu: define armpmu_init_fn
drivers/perf: arm_pmu: remove pointless PMU disabling
perf: qcom: Add L3 cache PMU driver
drivers/perf: arm_pmu: split irq request from enable
drivers/perf: arm_pmu: manage interrupts per-cpu
drivers/perf: arm_pmu: rework per-cpu allocation
MAINTAINERS: Add file patterns for perf device tree bindings
| -rw-r--r-- | Documentation/perf/qcom_l3_pmu.txt | 25 | ||||
| -rw-r--r-- | MAINTAINERS | 1 | ||||
| -rw-r--r-- | arch/arm64/include/asm/acpi.h | 2 | ||||
| -rw-r--r-- | arch/arm64/kernel/perf_event.c | 113 | ||||
| -rw-r--r-- | arch/arm64/kernel/smp.c | 10 | ||||
| -rw-r--r-- | drivers/perf/Kconfig | 14 | ||||
| -rw-r--r-- | drivers/perf/Makefile | 4 | ||||
| -rw-r--r-- | drivers/perf/arm_pmu.c | 530 | ||||
| -rw-r--r-- | drivers/perf/arm_pmu_acpi.c | 256 | ||||
| -rw-r--r-- | drivers/perf/arm_pmu_platform.c | 235 | ||||
| -rw-r--r-- | drivers/perf/qcom_l3_pmu.c | 849 | ||||
| -rw-r--r-- | include/linux/cpuhotplug.h | 2 | ||||
| -rw-r--r-- | include/linux/perf/arm_pmu.h | 29 |
13 files changed, 1660 insertions, 410 deletions
diff --git a/Documentation/perf/qcom_l3_pmu.txt b/Documentation/perf/qcom_l3_pmu.txt new file mode 100644 index 000000000000..96b3a9444a0d --- /dev/null +++ b/Documentation/perf/qcom_l3_pmu.txt @@ -0,0 +1,25 @@ +Qualcomm Datacenter Technologies L3 Cache Performance Monitoring Unit (PMU) +=========================================================================== + +This driver supports the L3 cache PMUs found in Qualcomm Datacenter Technologies +Centriq SoCs. The L3 cache on these SOCs is composed of multiple slices, shared +by all cores within a socket. Each slice is exposed as a separate uncore perf +PMU with device name l3cache_<socket>_<instance>. User space is responsible +for aggregating across slices. + +The driver provides a description of its available events and configuration +options in sysfs, see /sys/devices/l3cache*. Given that these are uncore PMUs +the driver also exposes a "cpumask" sysfs attribute which contains a mask +consisting of one CPU per socket which will be used to handle all the PMU +events on that socket. + +The hardware implements 32bit event counters and has a flat 8bit event space +exposed via the "event" format attribute. In addition to the 32bit physical +counters the driver supports virtual 64bit hardware counters by using hardware +counter chaining. This feature is exposed via the "lc" (long counter) format +flag. E.g.: + + perf stat -e l3cache_0_0/read-miss,lc/ + +Given that these are uncore PMUs the driver does not support sampling, therefore +"perf record" will not work. Per-task perf sessions are not supported. diff --git a/MAINTAINERS b/MAINTAINERS index c776906f67a9..e64ce8811b8c 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -976,6 +976,7 @@ F: arch/arm*/include/asm/perf_event.h F: drivers/perf/* F: include/linux/perf/arm_pmu.h F: Documentation/devicetree/bindings/arm/pmu.txt +F: Documentation/devicetree/bindings/perf/ ARM PORT M: Russell King <linux@armlinux.org.uk> diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h index c1976c0adca7..0e99978da3f0 100644 --- a/arch/arm64/include/asm/acpi.h +++ b/arch/arm64/include/asm/acpi.h @@ -85,6 +85,8 @@ static inline bool acpi_has_cpu_in_madt(void) return true; } +struct acpi_madt_generic_interrupt *acpi_cpu_get_madt_gicc(int cpu); + static inline void arch_fix_phys_package_id(int num, u32 slot) { } void __init acpi_init_cpus(void); diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index 57ae9d9ed9bb..98c749394c4b 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c @@ -957,11 +957,26 @@ static int armv8_vulcan_map_event(struct perf_event *event) ARMV8_PMU_EVTYPE_EVENT); } +struct armv8pmu_probe_info { + struct arm_pmu *pmu; + bool present; +}; + static void __armv8pmu_probe_pmu(void *info) { - struct arm_pmu *cpu_pmu = info; + struct armv8pmu_probe_info *probe = info; + struct arm_pmu *cpu_pmu = probe->pmu; + u64 dfr0, pmuver; u32 pmceid[2]; + dfr0 = read_sysreg(id_aa64dfr0_el1); + pmuver = cpuid_feature_extract_unsigned_field(dfr0, + ID_AA64DFR0_PMUVER_SHIFT); + if (pmuver != 1) + return; + + probe->present = true; + /* Read the nb of CNTx counters supported from PMNC */ cpu_pmu->num_events = (armv8pmu_pmcr_read() >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK; @@ -979,13 +994,27 @@ static void __armv8pmu_probe_pmu(void *info) static int armv8pmu_probe_pmu(struct arm_pmu *cpu_pmu) { - return smp_call_function_any(&cpu_pmu->supported_cpus, + struct armv8pmu_probe_info probe = { + .pmu = cpu_pmu, + .present = false, + }; + int ret; + + ret = smp_call_function_any(&cpu_pmu->supported_cpus, __armv8pmu_probe_pmu, - cpu_pmu, 1); + &probe, 1); + if (ret) + return ret; + + return probe.present ? 0 : -ENODEV; } -static void armv8_pmu_init(struct arm_pmu *cpu_pmu) +static int armv8_pmu_init(struct arm_pmu *cpu_pmu) { + int ret = armv8pmu_probe_pmu(cpu_pmu); + if (ret) + return ret; + cpu_pmu->handle_irq = armv8pmu_handle_irq, cpu_pmu->enable = armv8pmu_enable_event, cpu_pmu->disable = armv8pmu_disable_event, @@ -997,78 +1026,104 @@ static void armv8_pmu_init(struct arm_pmu *cpu_pmu) cpu_pmu->reset = armv8pmu_reset, cpu_pmu->max_period = (1LLU << 32) - 1, cpu_pmu->set_event_filter = armv8pmu_set_event_filter; + + return 0; } static int armv8_pmuv3_init(struct arm_pmu *cpu_pmu) { - armv8_pmu_init(cpu_pmu); + int ret = armv8_pmu_init(cpu_pmu); + if (ret) + return ret; + cpu_pmu->name = "armv8_pmuv3"; cpu_pmu->map_event = armv8_pmuv3_map_event; cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = &armv8_pmuv3_events_attr_group; cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = &armv8_pmuv3_format_attr_group; - return armv8pmu_probe_pmu(cpu_pmu); + + return 0; } static int armv8_a53_pmu_init(struct arm_pmu *cpu_pmu) { - armv8_pmu_init(cpu_pmu); + int ret = armv8_pmu_init(cpu_pmu); + if (ret) + return ret; + cpu_pmu->name = "armv8_cortex_a53"; cpu_pmu->map_event = armv8_a53_map_event; cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = &armv8_pmuv3_events_attr_group; cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = &armv8_pmuv3_format_attr_group; - return armv8pmu_probe_pmu(cpu_pmu); + + return 0; } static int armv8_a57_pmu_init(struct arm_pmu *cpu_pmu) { - armv8_pmu_init(cpu_pmu); + int ret = armv8_pmu_init(cpu_pmu); + if (ret) + return ret; + cpu_pmu->name = "armv8_cortex_a57"; cpu_pmu->map_event = armv8_a57_map_event; cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = &armv8_pmuv3_events_attr_group; cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = &armv8_pmuv3_format_attr_group; - return armv8pmu_probe_pmu(cpu_pmu); + + return 0; } static int armv8_a72_pmu_init(struct arm_pmu *cpu_pmu) { - armv8_pmu_init(cpu_pmu); + int ret = armv8_pmu_init(cpu_pmu); + if (ret) + return ret; + cpu_pmu->name = "armv8_cortex_a72"; cpu_pmu->map_event = armv8_a57_map_event; cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = &armv8_pmuv3_events_attr_group; cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = &armv8_pmuv3_format_attr_group; - return armv8pmu_probe_pmu(cpu_pmu); + + return 0; } static int armv8_thunder_pmu_init(struct arm_pmu *cpu_pmu) { - armv8_pmu_init(cpu_pmu); + int ret = armv8_pmu_init(cpu_pmu); + if (ret) + return ret; + cpu_pmu->name = "armv8_cavium_thunder"; cpu_pmu->map_event = armv8_thunder_map_event; cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = &armv8_pmuv3_events_attr_group; cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = &armv8_pmuv3_format_attr_group; - return armv8pmu_probe_pmu(cpu_pmu); + + return 0; } static int armv8_vulcan_pmu_init(struct arm_pmu *cpu_pmu) { - armv8_pmu_init(cpu_pmu); + int ret = armv8_pmu_init(cpu_pmu); + if (ret) + return ret; + cpu_pmu->name = "armv8_brcm_vulcan"; cpu_pmu->map_event = armv8_vulcan_map_event; cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = &armv8_pmuv3_events_attr_group; cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = &armv8_pmuv3_format_attr_group; - return armv8pmu_probe_pmu(cpu_pmu); + + return 0; } static const struct of_device_id armv8_pmu_of_device_ids[] = { @@ -1081,24 +1136,9 @@ static const struct of_device_id armv8_pmu_of_device_ids[] = { {}, }; -/* - * Non DT systems have their micro/arch events probed at run-time. - * A fairly complete list of generic events are provided and ones that - * aren't supported by the current PMU are disabled. - */ -static const struct pmu_probe_info armv8_pmu_probe_table[] = { - PMU_PROBE(0, 0, armv8_pmuv3_init), /* enable all defined counters */ - { /* sentinel value */ } -}; - static int armv8_pmu_device_probe(struct platform_device *pdev) { - if (acpi_disabled) - return arm_pmu_device_probe(pdev, armv8_pmu_of_device_ids, - NULL); - - return arm_pmu_device_probe(pdev, armv8_pmu_of_device_ids, - armv8_pmu_probe_table); + return arm_pmu_device_probe(pdev, armv8_pmu_of_device_ids, NULL); } static struct platform_driver armv8_pmu_driver = { @@ -1109,4 +1149,11 @@ static struct platform_driver armv8_pmu_driver = { .probe = armv8_pmu_device_probe, }; -builtin_platform_driver(armv8_pmu_driver); +static int __init armv8_pmu_driver_init(void) +{ + if (acpi_disabled) + return platform_driver_register(&armv8_pmu_driver); + else + return arm_pmu_acpi_probe(armv8_pmuv3_init); +} +device_initcall(armv8_pmu_driver_init) diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index ffee4e454ac5..596990039b43 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -521,6 +521,13 @@ static bool bootcpu_valid __initdata; static unsigned int cpu_count = 1; #ifdef CONFIG_ACPI +static struct acpi_madt_generic_interrupt cpu_madt_gicc[NR_CPUS]; + +struct acpi_madt_generic_interrupt *acpi_cpu_get_madt_gicc(int cpu) +{ + return &cpu_madt_gicc[cpu]; +} + /* * acpi_map_gic_cpu_interface - parse processor MADT entry * @@ -555,6 +562,7 @@ acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor) return; } bootcpu_valid = true; + cpu_madt_gicc[0] = *processor; early_map_cpu_to_node(0, acpi_numa_get_nid(0, hwid)); return; } @@ -565,6 +573,8 @@ acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor) /* map the logical cpu id to cpu MPIDR */ cpu_logical_map(cpu_count) = hwid; + cpu_madt_gicc[cpu_count] = *processor; + /* * Set-up the ACPI parking protocol cpu entries * while initializing the cpu_logical_map to diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig index 93651907874f..aa587edaf9ea 100644 --- a/drivers/perf/Kconfig +++ b/drivers/perf/Kconfig @@ -12,6 +12,10 @@ config ARM_PMU Say y if you want to use CPU performance monitors on ARM-based systems. +config ARM_PMU_ACPI + depends on ARM_PMU && ACPI + def_bool y + config QCOM_L2_PMU bool "Qualcomm Technologies L2-cache PMU" depends on ARCH_QCOM && ARM64 && PERF_EVENTS && ACPI @@ -21,6 +25,16 @@ config QCOM_L2_PMU Adds the L2 cache PMU into the perf events subsystem for monitoring L2 cache events. +config QCOM_L3_PMU + bool "Qualcomm Technologies L3-cache PMU" + depends on ARCH_QCOM && ARM64 && PERF_EVENTS && ACPI + select QCOM_IRQ_COMBINER + help + Provides support for the L3 cache performance monitor unit (PMU) + in Qualcomm Technologies processors. + Adds the L3 cache PMU into the perf events subsystem for + monitoring L3 cache events. + config XGENE_PMU depends on PERF_EVENTS && ARCH_XGENE bool "APM X-Gene SoC PMU" diff --git a/drivers/perf/Makefile b/drivers/perf/Makefile index ef24833c94a8..6420bd4394d5 100644 --- a/drivers/perf/Makefile +++ b/drivers/perf/Makefile @@ -1,3 +1,5 @@ -obj-$(CONFIG_ARM_PMU) += arm_pmu.o +obj-$(CONFIG_ARM_PMU) += arm_pmu.o arm_pmu_platform.o +obj-$(CONFIG_ARM_PMU_ACPI) += arm_pmu_acpi.o obj-$(CONFIG_QCOM_L2_PMU) += qcom_l2_pmu.o +obj-$(CONFIG_QCOM_L3_PMU) += qcom_l3_pmu.o obj-$(CONFIG_XGENE_PMU) += xgene_pmu.o diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c index 9612b84bc3e0..dc459eb1246b 100644 --- a/drivers/perf/arm_pmu.c +++ b/drivers/perf/arm_pmu.c @@ -16,7 +16,6 @@ #include <linux/cpu_pm.h> #include <linux/export.h> #include <linux/kernel.h> -#include <linux/of_device.h> #include <linux/perf/arm_pmu.h> #include <linux/platform_device.h> #include <linux/slab.h> @@ -25,7 +24,6 @@ #include <linux/irq.h> #include <linux/irqdesc.h> -#include <asm/cputype.h> #include <asm/irq_regs.h> static int @@ -235,20 +233,15 @@ armpmu_add(struct perf_event *event, int flags) struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events); struct hw_perf_event *hwc = &event->hw; int idx; - int err = 0; /* An event following a process won't be stopped earlier */ if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus)) return -ENOENT; - perf_pmu_disable(event->pmu); - /* If we don't have a space for the counter then finish early. */ idx = armpmu->get_event_idx(hw_events, event); - if (idx < 0) { - err = idx; - goto out; - } + if (idx < 0) + return idx; /* * If there is an event in the counter we are going to use then make @@ -265,9 +258,7 @@ armpmu_add(struct perf_event *event, int flags) /* Propagate our changes to the userspace mapping. */ perf_event_update_userpage(event); -out: - perf_pmu_enable(event->pmu); - return err; + return 0; } static int @@ -323,10 +314,16 @@ validate_group(struct perf_event *event) return 0; } +static struct arm_pmu_platdata *armpmu_get_platdata(struct arm_pmu *armpmu) +{ + struct platform_device *pdev = armpmu->plat_device; + + return pdev ? dev_get_platdata(&pdev->dev) : NULL; +} + static irqreturn_t armpmu_dispatch_irq(int irq, void *dev) { struct arm_pmu *armpmu; - struct platform_device *plat_device; struct arm_pmu_platdata *plat; int ret; u64 start_clock, finish_clock; @@ -338,8 +335,8 @@ static irqreturn_t armpmu_dispatch_irq(int irq, void *dev) * dereference. */ armpmu = *(void **)dev; - plat_device = armpmu->plat_device; - plat = dev_get_platdata(&plat_device->dev); + + plat = armpmu_get_platdata(armpmu); start_clock = sched_clock(); if (plat && plat->handle_irq) @@ -352,37 +349,6 @@ static irqreturn_t armpmu_dispatch_irq(int irq, void *dev) return ret; } -static void -armpmu_release_hardware(struct arm_pmu *armpmu) -{ - armpmu->free_irq(armpmu); -} - -static int -armpmu_reserve_hardware(struct arm_pmu *armpmu) -{ - int err = armpmu->request_irq(armpmu, armpmu_dispatch_irq); - if (err) { - armpmu_release_hardware(armpmu); - return err; - } - - return 0; -} - -static void -hw_perf_event_destroy(struct perf_event *event) -{ - struct arm_pmu *armpmu = to_arm_pmu(event->pmu); - atomic_t *active_events = &armpmu->active_events; - struct mutex *pmu_reserve_mutex = &armpmu->reserve_mutex; - - if (atomic_dec_and_mutex_lock(active_events, pmu_reserve_mutex)) { - armpmu_release_hardware(armpmu); - mutex_unlock(pmu_reserve_mutex); - } -} - static int event_requires_mode_exclusion(struct perf_event_attr *attr) { @@ -455,8 +421,6 @@ __hw_perf_event_init(struct perf_event *event) static int armpmu_event_init(struct perf_event *event) { struct arm_pmu *armpmu = to_arm_pmu(event->pmu); - int err = 0; - atomic_t *active_events = &armpmu->active_events; /* * Reject CPU-affine events for CPUs that are of a different class to @@ -476,26 +440,7 @@ static int armpmu_event_init(struct perf_event *event) if (armpmu->map_event(event) == -ENOENT) return -ENOENT; - event->destroy = hw_perf_event_destroy; - - if (!atomic_inc_not_zero(active_events)) { - mutex_lock(&armpmu->reserve_mutex); - if (atomic_read(active_events) == 0) - err = armpmu_reserve_hardware(armpmu); - - if (!err) - atomic_inc(active_events); - mutex_unlock(&armpmu->reserve_mutex); - } - - if (err) - return err; - - err = __hw_perf_event_init(event); - if (err) - hw_perf_event_destroy(event); - - return err; + return __hw_perf_event_init(event); } static void armpmu_enable(struct pmu *pmu) @@ -553,27 +498,6 @@ static struct attribute_group armpmu_common_attr_group = { .attrs = armpmu_common_attrs, }; -static void armpmu_init(struct arm_pmu *armpmu) -{ - atomic_set(&armpmu->active_events, 0); - mutex_init(&armpmu->reserve_mutex); - - armpmu->pmu = (struct pmu) { - .pmu_enable = armpmu_enable, - .pmu_disable = armpmu_disable, - .event_init = armpmu_event_init, - .add = armpmu_add, - .del = armpmu_del, - .start = armpmu_start, - .stop = armpmu_stop, - .read = armpmu_read, - .filter_match = armpmu_filter_match, - .attr_groups = armpmu->attr_groups, - }; - armpmu->attr_groups[ARMPMU_ATTR_GROUP_COMMON] = - &armpmu_common_attr_group; -} - /* Set at runtime when we know what CPU type we are. */ static struct arm_pmu *__oprofile_cpu_pmu; @@ -601,113 +525,85 @@ int perf_num_counters(void) } EXPORT_SYMBOL_GPL(perf_num_counters); -static void cpu_pmu_enable_percpu_irq(void *data) +void armpmu_free_irq(struct arm_pmu *armpmu, int cpu) { - int irq = *(int *)data; + struct pmu_hw_events __percpu *hw_events = armpmu->hw_events; + int irq = per_cpu(hw_events->irq, cpu); - enable_percpu_irq(irq, IRQ_TYPE_NONE); -} + if (!cpumask_test_and_clear_cpu(cpu, &armpmu->active_irqs)) + return; -static void cpu_pmu_disable_percpu_irq(void *data) -{ - int irq = *(int *)data; + if (irq_is_percpu(irq)) { + free_percpu_irq(irq, &hw_events->percpu_pmu); + cpumask_clear(&armpmu->active_irqs); + return; + } - disable_percpu_irq(irq); + free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, cpu)); } -static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu) +void armpmu_free_irqs(struct arm_pmu *armpmu) { - int i, irq, irqs; - struct platform_device *pmu_device = cpu_pmu->plat_device; - struct pmu_hw_events __percpu *hw_events = cpu_pmu->hw_events; - - irqs = min(pmu_device->num_resources, num_possible_cpus()); - - irq = platform_get_irq(pmu_device, 0); - if (irq > 0 && irq_is_percpu(irq)) { - on_each_cpu_mask(&cpu_pmu->supported_cpus, - cpu_pmu_disable_percpu_irq, &irq, 1); - free_percpu_irq(irq, &hw_events->percpu_pmu); - } else { - for (i = 0; i < irqs; ++i) { - int cpu = i; - - if (cpu_pmu->irq_affinity) - cpu = cpu_pmu->irq_affinity[i]; + int cpu; - if (!cpumask_test_and_clear_cpu(cpu, &cpu_pmu->active_irqs)) - continue; - irq = platform_get_irq(pmu_device, i); - if (irq > 0) - free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, cpu)); - } - } + for_each_cpu(cpu, &armpmu->supported_cpus) + armpmu_free_irq(armpmu, cpu); } -static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler) +int armpmu_request_irq(struct arm_pmu *armpmu, int cpu) { - int i, err, irq, irqs; - struct platform_device *pmu_device = cpu_pmu->plat_device; - struct pmu_hw_events __percpu *hw_events = cpu_pmu->hw_events; - - if (!pmu_device) - return -ENODEV; - - irqs = min(pmu_device->num_resources, num_possible_cpus()); - if (irqs < 1) { - pr_warn_once("perf/ARM: No irqs for PMU defined, sampling events not supported\n"); + int err = 0; + struct pmu_hw_events __percpu *hw_events = armpmu->hw_events; + const irq_handler_t handler = armpmu_dispatch_irq; + int irq = per_cpu(hw_events->irq, cpu); + if (!irq) return 0; - } - irq = platform_get_irq(pmu_device, 0); - if (irq > 0 && irq_is_percpu(irq)) { + if (irq_is_percpu(irq) && cpumask_empty(&armpmu->active_irqs)) { err = request_percpu_irq(irq, handler, "arm-pmu", &hw_events->percpu_pmu); - if (err) { - pr_err("unable to request IRQ%d for ARM PMU counters\n", - irq); - return err; - } + } else if (irq_is_percpu(irq)) { + int other_cpu = cpumask_first(&armpmu->active_irqs); + int other_irq = per_cpu(hw_events->irq, other_cpu); - on_each_cpu_mask(&cpu_pmu->supported_cpus, - cpu_pmu_enable_percpu_irq, &irq, 1); + if (irq != other_irq) { + pr_warn("mismatched PPIs detected.\n"); + err = -EINVAL; + } } else { - for (i = 0; i < irqs; ++i) { - int cpu = i; + err = request_irq(irq, handler, + IRQF_NOBALANCING | IRQF_NO_THREAD, "arm-pmu", + per_cpu_ptr(&hw_events->percpu_pmu, cpu)); + } - err = 0; - irq = platform_get_irq(pmu_device, i); - if (irq < 0) - continue; + if (err) { + pr_err("unable to request IRQ%d for ARM PMU counters\n", + irq); + return err; + } - if (cpu_pmu->irq_affinity) - cpu = cpu_pmu->irq_affinity[i]; + cpumask_set_cpu(cpu, &armpmu->active_irqs); - /* - * If we have a single PMU interrupt that we can't shift, - * assume that we're running on a uniprocessor machine and - * continue. Otherwise, continue without this interrupt. - */ - if (irq_set_affinity(irq, cpumask_of(cpu)) && irqs > 1) { - pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n", - irq, cpu); - continue; - } - - err = request_irq(irq, handler, - IRQF_NOBALANCING | IRQF_NO_THREAD, "arm-pmu", - per_cpu_ptr(&hw_events->percpu_pmu, cpu)); - if (err) { - pr_err("unable to request IRQ%d for ARM PMU counters\n", - irq); - return err; - } - - cpumask_set_cpu(cpu, &cpu_pmu->active_irqs); - } + return 0; +} + +int armpmu_request_irqs(struct arm_pmu *armpmu) +{ + int cpu, err; + + for_each_cpu(cpu, &armpmu->supported_cpus) { + err = armpmu_request_irq(armpmu, cpu); + if (err) + break; } - return 0; + return err; +} + +static int armpmu_get_cpu_irq(struct arm_pmu *pmu, int cpu) +{ + struct pmu_hw_events __percpu *hw_events = pmu->hw_events; + return per_cpu(hw_events->irq, cpu); } /* @@ -719,11 +615,42 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler) static int arm_perf_starting_cpu(unsigned int cpu, struct hlist_node *node) { struct arm_pmu *pmu = hlist_entry_safe(node, struct arm_pmu, node); + int irq; if (!cpumask_test_cpu(cpu, &pmu->supported_cpus)) return 0; if (pmu->reset) pmu->reset(pmu); + + irq = armpmu_get_cpu_irq(pmu, cpu); + if (irq) { + if (irq_is_percpu(irq)) { + enable_percpu_irq(irq, IRQ_TYPE_NONE); + return 0; + } + + if (irq_force_affinity(irq, cpumask_of(cpu)) && + num_possible_cpus() > 1) { + pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n", + irq, cpu); + } + } + + return 0; +} + +static int arm_perf_teardown_cpu(unsigned int cpu, struct hlist_node *node) +{ + struct arm_pmu *pmu = hlist_entry_safe(node, struct arm_pmu, node); + int irq; + + if (!cpumask_test_cpu(cpu, &pmu->supported_cpus)) + return 0; + + irq = armpmu_get_cpu_irq(pmu, cpu); + if (irq && irq_is_percpu(irq)) + disable_percpu_irq(irq); + return 0; } @@ -828,56 +755,22 @@ static inline void cpu_pm_pmu_unregister(struct arm_pmu *cpu_pmu) { } static int cpu_pmu_init(struct arm_pmu *cpu_pmu) { int err; - int cpu; - struct pmu_hw_events __percpu *cpu_hw_events; - - cpu_hw_events = alloc_percpu(struct pmu_hw_events); - if (!cpu_hw_events) - return -ENOMEM; - err = cpuhp_state_add_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING, - &cpu_pmu->node); + err = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_STARTING, + &cpu_pmu->node); if (err) - goto out_free; + goto out; err = cpu_pm_pmu_register(cpu_pmu); if (err) goto out_unregister; - for_each_possible_cpu(cpu) { - struct pmu_hw_events *events = per_cpu_ptr(cpu_hw_events, cpu); - raw_spin_lock_init(&events->pmu_lock); - events->percpu_pmu = cpu_pmu; - } - - cpu_pmu->hw_events = cpu_hw_events; - cpu_pmu->request_irq = cpu_pmu_request_irq; - cpu_pmu->free_irq = cpu_pmu_free_irq; - - /* Ensure the PMU has sane values out of reset. */ - if (cpu_pmu->reset) - on_each_cpu_mask(&cpu_pmu->supported_cpus, cpu_pmu->reset, - cpu_pmu, 1); - - /* If no interrupts available, set the corresponding capability flag */ - if (!platform_get_irq(cpu_pmu->plat_device, 0)) - cpu_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; - - /* - * This is a CPU PMU potentially in a heterogeneous configuration (e.g. - * big.LITTLE). This is not an uncore PMU, and we have taken ctx - * sharing into account (e.g. with our pmu::filter_match callback and - * pmu::event_init group validation). - */ - cpu_pmu->pmu.capabilities |= PERF_PMU_CAP_HETEROGENEOUS_CPUS; - return 0; out_unregister: cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING, &cpu_pmu->node); -out_free: - free_percpu(cpu_hw_events); +out: return err; } @@ -886,177 +779,78 @@ static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu) cpu_pm_pmu_unregister(cpu_pmu); cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING, &cpu_pmu->node); - free_percpu(cpu_pmu->hw_events); } -/* - * CPU PMU identification and probing. - */ -static int probe_current_pmu(struct arm_pmu *pmu, - const struct pmu_probe_info *info) +struct arm_pmu *armpmu_alloc(void) { - int cpu = get_cpu(); - unsigned int cpuid = read_cpuid_id(); - int ret = -ENODEV; - - pr_info("probing PMU on CPU %d\n", cpu); + struct arm_pmu *pmu; + int cpu; - for (; info->init != NULL; info++) { - if ((cpuid & info->mask) != info->cpuid) - continue; - ret = info->init(pmu); - break; + pmu = kzalloc(sizeof(*pmu), GFP_KERNEL); + if (!pmu) { + pr_info("failed to allocate PMU device!\n"); + goto out; } - put_cpu(); - return ret; -} - -static int of_pmu_irq_cfg(struct arm_pmu *pmu) -{ - int *irqs, i = 0; - bool using_spi = false; - struct platform_device *pdev = pmu->plat_device; - - irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL); - if (!irqs) - return -ENOMEM; - - do { - struct device_node *dn; - int cpu, irq; - - /* See if we have an affinity entry */ - dn = of_parse_phandle(pdev->dev.of_node, "interrupt-affinity", i); - if (!dn) - break; - - /* Check the IRQ type and prohibit a mix of PPIs and SPIs */ - irq = platform_get_irq(pdev, i); - if (irq > 0) { - bool spi = !irq_is_percpu(irq); - - if (i > 0 && spi != using_spi) { - pr_err("PPI/SPI IRQ type mismatch for %s!\n", - dn->name); - of_node_put(dn); - kfree(irqs); - return -EINVAL; - } - - using_spi = spi; - } - - /* Now look up the logical CPU number */ - for_each_possible_cpu(cpu) { - struct device_node *cpu_dn; - - cpu_dn = of_cpu_device_node_get(cpu); - of_node_put(cpu_dn); - - if (dn == cpu_dn) - break; - } + pmu->hw_events = alloc_percpu(struct pmu_hw_events); + if (!pmu->hw_events) { + pr_info("failed to allocate per-cpu PMU data.\n"); + goto out_free_pmu; + } - if (cpu >= nr_cpu_ids) { - pr_warn("Failed to find logical CPU for %s\n", - dn->name); - of_node_put(dn); - cpumask_setall(&pmu->supported_cpus); - break; - } - of_node_put(dn); + pmu->pmu = (struct pmu) { + .pmu_enable = armpmu_enable, + .pmu_disable = armpmu_disable, + .event_init = armpmu_event_init, + .add = armpmu_add, + .del = armpmu_del, + .start = armpmu_start, + .stop = armpmu_stop, + .read = armpmu_read, + .filter_match = armpmu_filter_match, + .attr_groups = pmu->attr_groups, + /* + * This is a CPU PMU potentially in a heterogeneous + * configuration (e.g. big.LITTLE). This is not an uncore PMU, + * and we have taken ctx sharing into account (e.g. with our + * pmu::filter_match callback and pmu::event_init group + * validation). + */ + .capabilities = PERF_PMU_CAP_HETEROGENEOUS_CPUS, + }; - /* For SPIs, we need to track the affinity per IRQ */ - if (using_spi) { - if (i >= pdev->num_resources) - break; + pmu->attr_groups[ARMPMU_ATTR_GROUP_COMMON] = + &armpmu_common_attr_group; - irqs[i] = cpu; - } + for_each_possible_cpu(cpu) { + struct pmu_hw_events *events; - /* Keep track of the CPUs containing this PMU type */ - cpumask_set_cpu(cpu, &pmu->supported_cpus); - i++; - } while (1); - - /* If we didn't manage to parse anything, try the interrupt affinity */ - if (cpumask_weight(&pmu->supported_cpus) == 0) { - int irq = platform_get_irq(pdev, 0); - - if (irq > 0 && irq_is_percpu(irq)) { - /* If using PPIs, check the affinity of the partition */ - int ret; - - ret = irq_get_percpu_devid_partition(irq, &pmu->supported_cpus); - if (ret) { - kfree(irqs); - return ret; - } - } else { - /* Otherwise default to all CPUs */ - cpumask_setall(&pmu->supported_cpus); - } + events = per_cpu_ptr(pmu->hw_events, cpu); + raw_spin_lock_init(&events->pmu_lock); + events->percpu_pmu = pmu; } - /* If we matched up the IRQ affinities, use them to route the SPIs */ - if (using_spi && i == pdev->num_resources) - pmu->irq_affinity = irqs; - else - kfree(irqs); + return pmu; - return 0; +out_free_pmu: + kfree(pmu); +out: + return NULL; } -int arm_pmu_device_probe(struct platform_device *pdev, - const struct of_device_id *of_table, - const struct pmu_probe_info *probe_table) |
