summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/arm/common/mcpm_entry.c281
-rw-r--r--arch/arm/include/asm/mcpm.h73
-rw-r--r--arch/arm/include/asm/perf_event.h7
-rw-r--r--arch/arm/include/asm/pmu.h19
-rw-r--r--arch/arm/kernel/Makefile4
-rw-r--r--arch/arm/kernel/perf_event.c408
-rw-r--r--arch/arm/kernel/perf_event_cpu.c421
-rw-r--r--arch/arm/kernel/perf_event_v6.c49
-rw-r--r--arch/arm/kernel/perf_event_v7.c129
-rw-r--r--arch/arm/kernel/perf_event_xscale.c32
-rw-r--r--arch/arm/mach-exynos/suspend.c8
-rw-r--r--arch/arm/mach-hisi/platmcpm.c133
-rw-r--r--drivers/cpuidle/cpuidle-big_little.c8
-rw-r--r--include/linux/perf_event.h5
-rw-r--r--kernel/events/core.c8
15 files changed, 730 insertions, 855 deletions
diff --git a/arch/arm/common/mcpm_entry.c b/arch/arm/common/mcpm_entry.c
index 5f8a52ac7edf..a923524d1040 100644
--- a/arch/arm/common/mcpm_entry.c
+++ b/arch/arm/common/mcpm_entry.c
@@ -20,6 +20,126 @@
#include <asm/cputype.h>
#include <asm/suspend.h>
+/*
+ * The public API for this code is documented in arch/arm/include/asm/mcpm.h.
+ * For a comprehensive description of the main algorithm used here, please
+ * see Documentation/arm/cluster-pm-race-avoidance.txt.
+ */
+
+struct sync_struct mcpm_sync;
+
+/*
+ * __mcpm_cpu_going_down: Indicates that the cpu is being torn down.
+ * This must be called at the point of committing to teardown of a CPU.
+ * The CPU cache (SCTRL.C bit) is expected to still be active.
+ */
+static void __mcpm_cpu_going_down(unsigned int cpu, unsigned int cluster)
+{
+ mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_GOING_DOWN;
+ sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu);
+}
+
+/*
+ * __mcpm_cpu_down: Indicates that cpu teardown is complete and that the
+ * cluster can be torn down without disrupting this CPU.
+ * To avoid deadlocks, this must be called before a CPU is powered down.
+ * The CPU cache (SCTRL.C bit) is expected to be off.
+ * However L2 cache might or might not be active.
+ */
+static void __mcpm_cpu_down(unsigned int cpu, unsigned int cluster)
+{
+ dmb();
+ mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_DOWN;
+ sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu);
+ sev();
+}
+
+/*
+ * __mcpm_outbound_leave_critical: Leave the cluster teardown critical section.
+ * @state: the final state of the cluster:
+ * CLUSTER_UP: no destructive teardown was done and the cluster has been
+ * restored to the previous state (CPU cache still active); or
+ * CLUSTER_DOWN: the cluster has been torn-down, ready for power-off
+ * (CPU cache disabled, L2 cache either enabled or disabled).
+ */
+static void __mcpm_outbound_leave_critical(unsigned int cluster, int state)
+{
+ dmb();
+ mcpm_sync.clusters[cluster].cluster = state;
+ sync_cache_w(&mcpm_sync.clusters[cluster].cluster);
+ sev();
+}
+
+/*
+ * __mcpm_outbound_enter_critical: Enter the cluster teardown critical section.
+ * This function should be called by the last man, after local CPU teardown
+ * is complete. CPU cache expected to be active.
+ *
+ * Returns:
+ * false: the critical section was not entered because an inbound CPU was
+ * observed, or the cluster is already being set up;
+ * true: the critical section was entered: it is now safe to tear down the
+ * cluster.
+ */
+static bool __mcpm_outbound_enter_critical(unsigned int cpu, unsigned int cluster)
+{
+ unsigned int i;
+ struct mcpm_sync_struct *c = &mcpm_sync.clusters[cluster];
+
+ /* Warn inbound CPUs that the cluster is being torn down: */
+ c->cluster = CLUSTER_GOING_DOWN;
+ sync_cache_w(&c->cluster);
+
+ /* Back out if the inbound cluster is already in the critical region: */
+ sync_cache_r(&c->inbound);
+ if (c->inbound == INBOUND_COMING_UP)
+ goto abort;
+
+ /*
+ * Wait for all CPUs to get out of the GOING_DOWN state, so that local
+ * teardown is complete on each CPU before tearing down the cluster.
+ *
+ * If any CPU has been woken up again from the DOWN state, then we
+ * shouldn't be taking the cluster down at all: abort in that case.
+ */
+ sync_cache_r(&c->cpus);
+ for (i = 0; i < MAX_CPUS_PER_CLUSTER; i++) {
+ int cpustate;
+
+ if (i == cpu)
+ continue;
+
+ while (1) {
+ cpustate = c->cpus[i].cpu;
+ if (cpustate != CPU_GOING_DOWN)
+ break;
+
+ wfe();
+ sync_cache_r(&c->cpus[i].cpu);
+ }
+
+ switch (cpustate) {
+ case CPU_DOWN:
+ continue;
+
+ default:
+ goto abort;
+ }
+ }
+
+ return true;
+
+abort:
+ __mcpm_outbound_leave_critical(cluster, CLUSTER_UP);
+ return false;
+}
+
+static int __mcpm_cluster_state(unsigned int cluster)
+{
+ sync_cache_r(&mcpm_sync.clusters[cluster].cluster);
+ return mcpm_sync.clusters[cluster].cluster;
+}
+
extern unsigned long mcpm_entry_vectors[MAX_NR_CLUSTERS][MAX_CPUS_PER_CLUSTER];
void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr)
@@ -78,16 +198,11 @@ int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster)
bool cpu_is_down, cluster_is_down;
int ret = 0;
+ pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
if (!platform_ops)
return -EUNATCH; /* try not to shadow power_up errors */
might_sleep();
- /* backward compatibility callback */
- if (platform_ops->power_up)
- return platform_ops->power_up(cpu, cluster);
-
- pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
-
/*
* Since this is called with IRQs enabled, and no arch_spin_lock_irq
* variant exists, we need to disable IRQs manually here.
@@ -128,29 +243,17 @@ void mcpm_cpu_power_down(void)
bool cpu_going_down, last_man;
phys_reset_t phys_reset;
+ mpidr = read_cpuid_mpidr();
+ cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
+ cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+ pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
if (WARN_ON_ONCE(!platform_ops))
return;
BUG_ON(!irqs_disabled());
- /*
- * Do this before calling into the power_down method,
- * as it might not always be safe to do afterwards.
- */
setup_mm_for_reboot();
- /* backward compatibility callback */
- if (platform_ops->power_down) {
- platform_ops->power_down();
- goto not_dead;
- }
-
- mpidr = read_cpuid_mpidr();
- cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
- cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
- pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
-
__mcpm_cpu_going_down(cpu, cluster);
-
arch_spin_lock(&mcpm_lock);
BUG_ON(__mcpm_cluster_state(cluster) != CLUSTER_UP);
@@ -187,7 +290,6 @@ void mcpm_cpu_power_down(void)
if (cpu_going_down)
wfi();
-not_dead:
/*
* It is possible for a power_up request to happen concurrently
* with a power_down request for the same CPU. In this case the
@@ -219,22 +321,11 @@ int mcpm_wait_for_cpu_powerdown(unsigned int cpu, unsigned int cluster)
return ret;
}
-void mcpm_cpu_suspend(u64 expected_residency)
+void mcpm_cpu_suspend(void)
{
if (WARN_ON_ONCE(!platform_ops))
return;
- /* backward compatibility callback */
- if (platform_ops->suspend) {
- phys_reset_t phys_reset;
- BUG_ON(!irqs_disabled());
- setup_mm_for_reboot();
- platform_ops->suspend(expected_residency);
- phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset);
- phys_reset(virt_to_phys(mcpm_entry_point));
- BUG();
- }
-
/* Some platforms might have to enable special resume modes, etc. */
if (platform_ops->cpu_suspend_prepare) {
unsigned int mpidr = read_cpuid_mpidr();
@@ -256,12 +347,6 @@ int mcpm_cpu_powered_up(void)
if (!platform_ops)
return -EUNATCH;
- /* backward compatibility callback */
- if (platform_ops->powered_up) {
- platform_ops->powered_up();
- return 0;
- }
-
mpidr = read_cpuid_mpidr();
cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
@@ -334,120 +419,6 @@ int __init mcpm_loopback(void (*cache_disable)(void))
#endif
-struct sync_struct mcpm_sync;
-
-/*
- * __mcpm_cpu_going_down: Indicates that the cpu is being torn down.
- * This must be called at the point of committing to teardown of a CPU.
- * The CPU cache (SCTRL.C bit) is expected to still be active.
- */
-void __mcpm_cpu_going_down(unsigned int cpu, unsigned int cluster)
-{
- mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_GOING_DOWN;
- sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu);
-}
-
-/*
- * __mcpm_cpu_down: Indicates that cpu teardown is complete and that the
- * cluster can be torn down without disrupting this CPU.
- * To avoid deadlocks, this must be called before a CPU is powered down.
- * The CPU cache (SCTRL.C bit) is expected to be off.
- * However L2 cache might or might not be active.
- */
-void __mcpm_cpu_down(unsigned int cpu, unsigned int cluster)
-{
- dmb();
- mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_DOWN;
- sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu);
- sev();
-}
-
-/*
- * __mcpm_outbound_leave_critical: Leave the cluster teardown critical section.
- * @state: the final state of the cluster:
- * CLUSTER_UP: no destructive teardown was done and the cluster has been
- * restored to the previous state (CPU cache still active); or
- * CLUSTER_DOWN: the cluster has been torn-down, ready for power-off
- * (CPU cache disabled, L2 cache either enabled or disabled).
- */
-void __mcpm_outbound_leave_critical(unsigned int cluster, int state)
-{
- dmb();
- mcpm_sync.clusters[cluster].cluster = state;
- sync_cache_w(&mcpm_sync.clusters[cluster].cluster);
- sev();
-}
-
-/*
- * __mcpm_outbound_enter_critical: Enter the cluster teardown critical section.
- * This function should be called by the last man, after local CPU teardown
- * is complete. CPU cache expected to be active.
- *
- * Returns:
- * false: the critical section was not entered because an inbound CPU was
- * observed, or the cluster is already being set up;
- * true: the critical section was entered: it is now safe to tear down the
- * cluster.
- */
-bool __mcpm_outbound_enter_critical(unsigned int cpu, unsigned int cluster)
-{
- unsigned int i;
- struct mcpm_sync_struct *c = &mcpm_sync.clusters[cluster];
-
- /* Warn inbound CPUs that the cluster is being torn down: */
- c->cluster = CLUSTER_GOING_DOWN;
- sync_cache_w(&c->cluster);
-
- /* Back out if the inbound cluster is already in the critical region: */
- sync_cache_r(&c->inbound);
- if (c->inbound == INBOUND_COMING_UP)
- goto abort;
-
- /*
- * Wait for all CPUs to get out of the GOING_DOWN state, so that local
- * teardown is complete on each CPU before tearing down the cluster.
- *
- * If any CPU has been woken up again from the DOWN state, then we
- * shouldn't be taking the cluster down at all: abort in that case.
- */
- sync_cache_r(&c->cpus);
- for (i = 0; i < MAX_CPUS_PER_CLUSTER; i++) {
- int cpustate;
-
- if (i == cpu)
- continue;
-
- while (1) {
- cpustate = c->cpus[i].cpu;
- if (cpustate != CPU_GOING_DOWN)
- break;
-
- wfe();
- sync_cache_r(&c->cpus[i].cpu);
- }
-
- switch (cpustate) {
- case CPU_DOWN:
- continue;
-
- default:
- goto abort;
- }
- }
-
- return true;
-
-abort:
- __mcpm_outbound_leave_critical(cluster, CLUSTER_UP);
- return false;
-}
-
-int __mcpm_cluster_state(unsigned int cluster)
-{
- sync_cache_r(&mcpm_sync.clusters[cluster].cluster);
- return mcpm_sync.clusters[cluster].cluster;
-}
-
extern unsigned long mcpm_power_up_setup_phys;
int __init mcpm_sync_init(
diff --git a/arch/arm/include/asm/mcpm.h b/arch/arm/include/asm/mcpm.h
index 50b378f59e08..acd4983d9b1f 100644
--- a/arch/arm/include/asm/mcpm.h
+++ b/arch/arm/include/asm/mcpm.h
@@ -137,17 +137,12 @@ int mcpm_wait_for_cpu_powerdown(unsigned int cpu, unsigned int cluster);
/**
* mcpm_cpu_suspend - bring the calling CPU in a suspended state
*
- * @expected_residency: duration in microseconds the CPU is expected
- * to remain suspended, or 0 if unknown/infinity.
- *
- * The calling CPU is suspended. The expected residency argument is used
- * as a hint by the platform specific backend to implement the appropriate
- * sleep state level according to the knowledge it has on wake-up latency
- * for the given hardware.
+ * The calling CPU is suspended. This is similar to mcpm_cpu_power_down()
+ * except for possible extra platform specific configuration steps to allow
+ * an asynchronous wake-up e.g. with a pending interrupt.
*
* If this CPU is found to be the "last man standing" in the cluster
- * then the cluster may be prepared for power-down too, if the expected
- * residency makes it worthwhile.
+ * then the cluster may be prepared for power-down too.
*
* This must be called with interrupts disabled.
*
@@ -157,7 +152,7 @@ int mcpm_wait_for_cpu_powerdown(unsigned int cpu, unsigned int cluster);
* This will return if mcpm_platform_register() has not been called
* previously in which case the caller should take appropriate action.
*/
-void mcpm_cpu_suspend(u64 expected_residency);
+void mcpm_cpu_suspend(void);
/**
* mcpm_cpu_powered_up - housekeeping workafter a CPU has been powered up
@@ -234,12 +229,6 @@ struct mcpm_platform_ops {
void (*cpu_is_up)(unsigned int cpu, unsigned int cluster);
void (*cluster_is_up)(unsigned int cluster);
int (*wait_for_powerdown)(unsigned int cpu, unsigned int cluster);
-
- /* deprecated callbacks */
- int (*power_up)(unsigned int cpu, unsigned int cluster);
- void (*power_down)(void);
- void (*suspend)(u64);
- void (*powered_up)(void);
};
/**
@@ -251,35 +240,6 @@ struct mcpm_platform_ops {
*/
int __init mcpm_platform_register(const struct mcpm_platform_ops *ops);
-/* Synchronisation structures for coordinating safe cluster setup/teardown: */
-
-/*
- * When modifying this structure, make sure you update the MCPM_SYNC_ defines
- * to match.
- */
-struct mcpm_sync_struct {
- /* individual CPU states */
- struct {
- s8 cpu __aligned(__CACHE_WRITEBACK_GRANULE);
- } cpus[MAX_CPUS_PER_CLUSTER];
-
- /* cluster state */
- s8 cluster __aligned(__CACHE_WRITEBACK_GRANULE);
-
- /* inbound-side state */
- s8 inbound __aligned(__CACHE_WRITEBACK_GRANULE);
-};
-
-struct sync_struct {
- struct mcpm_sync_struct clusters[MAX_NR_CLUSTERS];
-};
-
-void __mcpm_cpu_going_down(unsigned int cpu, unsigned int cluster);
-void __mcpm_cpu_down(unsigned int cpu, unsigned int cluster);
-void __mcpm_outbound_leave_critical(unsigned int cluster, int state);
-bool __mcpm_outbound_enter_critical(unsigned int this_cpu, unsigned int cluster);
-int __mcpm_cluster_state(unsigned int cluster);
-
/**
* mcpm_sync_init - Initialize the cluster synchronization support
*
@@ -318,6 +278,29 @@ int __init mcpm_loopback(void (*cache_disable)(void));
void __init mcpm_smp_set_ops(void);
+/*
+ * Synchronisation structures for coordinating safe cluster setup/teardown.
+ * This is private to the MCPM core code and shared between C and assembly.
+ * When modifying this structure, make sure you update the MCPM_SYNC_ defines
+ * to match.
+ */
+struct mcpm_sync_struct {
+ /* individual CPU states */
+ struct {
+ s8 cpu __aligned(__CACHE_WRITEBACK_GRANULE);
+ } cpus[MAX_CPUS_PER_CLUSTER];
+
+ /* cluster state */
+ s8 cluster __aligned(__CACHE_WRITEBACK_GRANULE);
+
+ /* inbound-side state */
+ s8 inbound __aligned(__CACHE_WRITEBACK_GRANULE);
+};
+
+struct sync_struct {
+ struct mcpm_sync_struct clusters[MAX_NR_CLUSTERS];
+};
+
#else
/*
diff --git a/arch/arm/include/asm/perf_event.h b/arch/arm/include/asm/perf_event.h
index d9cf138fd7d4..4f9dec489931 100644
--- a/arch/arm/include/asm/perf_event.h
+++ b/arch/arm/include/asm/perf_event.h
@@ -19,4 +19,11 @@ extern unsigned long perf_misc_flags(struct pt_regs *regs);
#define perf_misc_flags(regs) perf_misc_flags(regs)
#endif
+#define perf_arch_fetch_caller_regs(regs, __ip) { \
+ (regs)->ARM_pc = (__ip); \
+ (regs)->ARM_fp = (unsigned long) __builtin_frame_address(0); \
+ (regs)->ARM_sp = current_stack_pointer; \
+ (regs)->ARM_cpsr = SVC_MODE; \
+}
+
#endif /* __ARM_PERF_EVENT_H__ */
diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h
index 675e4ab79f68..3fc87dfd77e6 100644
--- a/arch/arm/include/asm/pmu.h
+++ b/arch/arm/include/asm/pmu.h
@@ -24,22 +24,10 @@
* interrupt and passed the address of the low level handler,
* and can be used to implement any platform specific handling
* before or after calling it.
- * @runtime_resume: an optional handler which will be called by the
- * runtime PM framework following a call to pm_runtime_get().
- * Note that if pm_runtime_get() is called more than once in
- * succession this handler will only be called once.
- * @runtime_suspend: an optional handler which will be called by the
- * runtime PM framework following a call to pm_runtime_put().
- * Note that if pm_runtime_get() is called more than once in
- * succession this handler will only be called following the
- * final call to pm_runtime_put() that actually disables the
- * hardware.
*/
struct arm_pmu_platdata {
irqreturn_t (*handle_irq)(int irq, void *dev,
irq_handler_t pmu_handler);
- int (*runtime_resume)(struct device *dev);
- int (*runtime_suspend)(struct device *dev);
};
#ifdef CONFIG_HW_PERF_EVENTS
@@ -92,6 +80,7 @@ struct pmu_hw_events {
struct arm_pmu {
struct pmu pmu;
cpumask_t active_irqs;
+ cpumask_t supported_cpus;
int *irq_affinity;
char *name;
irqreturn_t (*handle_irq)(int irq_num, void *dev);
@@ -122,8 +111,6 @@ struct arm_pmu {
#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
-extern const struct dev_pm_ops armpmu_dev_pm_ops;
-
int armpmu_register(struct arm_pmu *armpmu, int type);
u64 armpmu_event_update(struct perf_event *event);
@@ -158,6 +145,10 @@ struct pmu_probe_info {
#define XSCALE_PMU_PROBE(_version, _fn) \
PMU_PROBE(ARM_CPU_IMP_INTEL << 24 | _version, ARM_PMU_XSCALE_MASK, _fn)
+int arm_pmu_device_probe(struct platform_device *pdev,
+ const struct of_device_id *of_table,
+ const struct pmu_probe_info *probe_table);
+
#endif /* CONFIG_HW_PERF_EVENTS */
#endif /* __ARM_PMU_H__ */
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index 32c0990d1968..e69f7a19735d 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -71,7 +71,9 @@ obj-$(CONFIG_CPU_PJ4) += pj4-cp0.o
obj-$(CONFIG_CPU_PJ4B) += pj4-cp0.o
obj-$(CONFIG_IWMMXT) += iwmmxt.o
obj-$(CONFIG_PERF_EVENTS) += perf_regs.o perf_callchain.o
-obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o perf_event_cpu.o
+obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o \
+ perf_event_xscale.o perf_event_v6.o \
+ perf_event_v7.o
CFLAGS_pj4-cp0.o := -marm
AFLAGS_iwmmxt.o := -Wa,-mcpu=iwmmxt
obj-$(CONFIG_ARM_CPU_TOPOLOGY) += topology.o
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 4a86a0133ac3..357f57ea83f4 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -11,12 +11,18 @@
*/
#define pr_fmt(fmt) "hw perfevents: " fmt
+#include <linux/bitmap.h>
+#include <linux/cpumask.h>
+#include <linux/export.h>
#include <linux/kernel.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
#include <linux/irq.h>
#include <linux/irqdesc.h>
+#include <asm/cputype.h>
#include <asm/irq_regs.h>
#include <asm/pmu.h>
@@ -229,6 +235,10 @@ armpmu_add(struct perf_event *event, int flags)
int idx;
int err = 0;
+ /* An event following a process won't be stopped earlier */
+ if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
+ return -ENOENT;
+
perf_pmu_disable(event->pmu);
/* If we don't have a space for the counter then finish early. */
@@ -344,20 +354,12 @@ static void
armpmu_release_hardware(struct arm_pmu *armpmu)
{
armpmu->free_irq(armpmu);
- pm_runtime_put_sync(&armpmu->plat_device->dev);
}
static int
armpmu_reserve_hardware(struct arm_pmu *armpmu)
{
- int err;
- struct platform_device *pmu_device = armpmu->plat_device;
-
- if (!pmu_device)
- return -ENODEV;
-
- pm_runtime_get_sync(&pmu_device->dev);
- err = armpmu->request_irq(armpmu, armpmu_dispatch_irq);
+ int err = armpmu->request_irq(armpmu, armpmu_dispatch_irq);
if (err) {
armpmu_release_hardware(armpmu);
return err;
@@ -454,6 +456,17 @@ static int armpmu_event_init(struct perf_event *event)
int err = 0;
atomic_t *active_events = &armpmu->active_events;
+ /*
+ * Reject CPU-affine events for CPUs that are of a different class to
+ * that which this PMU handles. Process-following events (where
+ * event->cpu == -1) can be migrated between CPUs, and thus we have to
+ * reject them later (in armpmu_add) if they're scheduled on a
+ * different class of CPU.
+ */
+ if (event->cpu != -1 &&
+ !cpumask_test_cpu(event->cpu, &armpmu->supported_cpus))
+ return -ENOENT;
+
/* does not support taken branch sampling */
if (has_branch_stack(event))
return -EOPNOTSUPP;
@@ -489,6 +502,10 @@ static void armpmu_enable(struct pmu *pmu)
struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);
+ /* For task-bound events we may be called on other CPUs */
+ if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
+ return;
+
if (enabled)
armpmu->start(armpmu);
}
@@ -496,34 +513,25 @@ static void armpmu_enable(struct pmu *pmu)
static void armpmu_disable(struct pmu *pmu)
{
struct arm_pmu *armpmu = to_arm_pmu(pmu);
- armpmu->stop(armpmu);
-}
-
-#ifdef CONFIG_PM
-static int armpmu_runtime_resume(struct device *dev)
-{
- struct arm_pmu_platdata *plat = dev_get_platdata(dev);
- if (plat && plat->runtime_resume)
- return plat->runtime_resume(dev);
+ /* For task-bound events we may be called on other CPUs */
+ if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
+ return;
- return 0;
+ armpmu->stop(armpmu);
}
-static int armpmu_runtime_suspend(struct device *dev)
+/*
+ * In heterogeneous systems, events are specific to a particular
+ * microarchitecture, and aren't suitable for another. Thus, only match CPUs of
+ * the same microarchitecture.
+ */
+static int armpmu_filter_match(struct perf_event *event)
{
- struct arm_pmu_platdata *plat = dev_get_platdata(dev);
-
- if (plat && plat->runtime_suspend)
- return plat->runtime_suspend(dev);
-
- return 0;
+ struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
+ unsigned int cpu = smp_processor_id();
+ return cpumask_test_cpu(cpu, &armpmu->supported_cpus);
}
-#endif
-
-const struct dev_pm_ops armpmu_dev_pm_ops = {
- SET_RUNTIME_PM_OPS(armpmu_runtime_suspend, armpmu_runtime_resume, NULL)
-};
static void armpmu_init(struct arm_pmu *armpmu)
{
@@ -539,15 +547,349 @@ static void armpmu_init(struct arm_pmu *armpmu)
.start = armpmu_start,
.stop = armpmu_stop,
.read = armpmu_read,
+ .filter_match = armpmu_filter_match,
};
}
int armpmu_register(struct arm_pmu *armpmu, int type)
{
armpmu_init(armpmu);
- pm_runtime_enable(&armpmu->plat_device->dev);
pr_info("enabled with %s PMU driver, %d counters available\n",
armpmu->name, armpmu->num_events);
return perf_pmu_register(&armpmu->pmu, armpmu->name, type);
}
+/* Set at runtime when we know what CPU type we are. */
+static struct arm_pmu *__oprofile_cpu_pmu;
+
+/*
+ * Despite the names, these two functions are CPU-specific and are used
+ * by the OProfile/perf code.
+ */
+const char *perf_pmu_name(void)
+{
+ if (!__oprofile_cpu_pmu)
+ return NULL;
+
+ return __oprofile_cpu_pmu->name;
+}
+EXPORT_SYMBOL_GPL(perf_pmu_name);
+
+int perf_num_counters(void)
+{
+ int max_events = 0;
+
+ if (__oprofile_cpu_pmu != NULL)
+ max_events = __oprofile_cpu_pmu->num_events;
+
+ return max_events;
+}
+EXPORT_SYMBOL_GPL(perf_num_counters);
+
+static void cpu_pmu_enable_percpu_irq(void *data)
+{
+ int irq = *(int *)data;
+
+ enable_percpu_irq(irq, IRQ_TYPE_NONE);
+}
+
+static void cpu_pmu_disable_percpu_irq(void *data)
+{
+ int irq = *(int *)data;
+
+ disable_percpu_irq(irq);
+}
+
+static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu)
+{
+ int i, irq, irqs;
+ struct platform_device *pmu_device = cpu_pmu->plat_device;
+ struct pmu_hw_events __percpu *hw_events = cpu_pmu->hw_events;
+
+ irqs = min(pmu_device->num_resources, num_possible_cpus());
+
+ irq = platform_get_irq(pmu_device, 0);
+ if (irq >= 0 && irq_is_percpu(irq)) {
+ on_each_cpu(cpu_pmu_disable_percpu_irq, &irq, 1);
+ free_percpu_irq(irq, &hw_events->percpu_pmu);
+ } else {
+ for (i = 0; i < irqs; ++i) {
+ int cpu = i;
+
+ if (cpu_pmu->irq_affinity)
+ cpu = cpu_pmu->irq_affinity[i];
+
+ if (!cpumask_test_and_clear_cpu(cpu, &cpu_pmu->active_irqs))
+ continue;
+ irq = platform_get_irq(pmu_device, i);
+ if (irq >= 0)
+ free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, cpu));
+ }
+ }
+}
+
+static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
+{
+ int i, err, irq, irqs;
+ struct platform_device *pmu_device = cpu_pmu->plat_device;
+ struct pmu_hw_events __percpu *hw_events = cpu_pmu->hw_events;
+
+ if (!pmu_device)
+ return -ENODEV;
+
+ irqs = min(pmu_device->num_resources, num_possible_cpus());
+ if (irqs < 1) {
+ pr_warn_once("perf/ARM: No irqs for PMU defined, sampling events not supported\n");
+ return 0;
+ }
+
+ irq = platform_get_irq(pmu_device, 0);
+ if (irq >= 0 && irq_is_percpu(irq)) {
+ err = request_percpu_irq(irq, handler, "arm-pmu",
+ &hw_events->percpu_pmu);
+ if (err) {
+ pr_err("unable to request IRQ%d for ARM PMU counters\n",
+ irq);
+ return err;
+ }
+ on_each_cpu(cpu_pmu_enable_percpu_irq, &irq, 1);
+ } else {
+ for (i = 0; i < irqs; ++i) {
+ int cpu = i;
+
+ err = 0;
+ irq = platform_get_irq(pmu_device, i);
+ if (irq < 0)
+ continue;
+
+ if (cpu_pmu->irq_affinity)
+ cpu = cpu_pmu->irq_affinity[i];
+
+ /*
+ * If we have a single PMU interrupt that we can't shift,
+ * assume that we're running on a uniprocessor machine and
+ * continue. Otherwise, continue without this interrupt.
+ */
+ if (irq_set_affinity(irq, cpumask_of(cpu)) && irqs > 1) {
+ pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n",
+ irq, cpu);
+ continue;
+ }
+
+ err = request_irq(irq, handler,
+ IRQF_NOBALANCING | IRQF_NO_THREAD, "arm-pmu",
+ per_cpu_ptr(&hw_events->percpu_pmu, cpu));
+ if (err) {
+ pr_err("unable to request IRQ%d for ARM PMU counters\n",
+ irq);
+ return err;
+ }
+
+ cpumask_set_cpu(cpu, &cpu_pmu->active_irqs);
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * PMU hardware loses all context when a CPU goes offline.
+ * When a CPU is hotplugged back in, since some hardware registers are
+ * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading
+ * junk values out of them.
+ */
+static int cpu_pmu_notify(struct notifier_block *b, unsigned long action,
+ void *hcpu)
+{
+ int cpu = (unsigned long)hcpu;
+ struct arm_pmu *pmu = container_of(b, struct arm_pmu, hotplug_nb);
+
+ if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING)
+ return NOTIFY_DONE;
+
+ if (!cpumask_test_cpu(cpu, &pmu->supported_cpus))
+ return NOTIFY_DONE;
+
+ if (pmu->reset)
+ pmu->reset(pmu);
+ else
+ return NOTIFY_DONE;
+
+ return NOTIFY_OK;
+}
+
+static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
+{
+ int err;
+ int cpu;
+ struct pmu_hw_events __percpu *cpu_hw_events;
+
+ cpu_hw_events = alloc_percpu(struct pmu_hw_events);
+ if (!cpu_hw_events)
+ return -ENOMEM;
+
+ cpu_pmu->hotplug_nb.notifier_call = cpu_pmu_notify;
+ err = register_cpu_notifier(&cpu_pmu->hotplug_nb);
+ if (err)
+ goto out_hw_events;
+
+ for_each_possible_cpu(cpu) {
+ struct pmu_hw_events *events = per_cpu_ptr(cpu_hw_events, cpu);
+ raw_spin_lock_init(&events->pmu_lock);
+ events->percpu_pmu = cpu_pmu;
+ }
+
+ cpu_pmu->hw_events = cpu_hw_events;
+ cpu_pmu->request_irq = cpu_pmu_request_irq;
+ cpu_pmu->free_irq = cpu_pmu_free_irq;
+
+ /* Ensure the PMU has sane values out of reset. */
+ if (cpu_pmu->reset)
+ on_each_cpu_mask(&cpu_pmu->supported_cpus, cpu_pmu->reset,
+ cpu_pmu, 1);
+
+ /* If no interrupts available, set the corresponding capability flag */
+ if (!platform_get_irq(cpu_pmu->plat_device, 0))
+ cpu_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
+
+ return 0;
+
+out_hw_events:
+ free_percpu(cpu_hw_events);
+ return err;
+}
+
+static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu)
+{
+ unregister_cpu_notifier(&cpu_pmu->hotplug_nb);
+ free_percpu(cpu_pmu->hw_events);
+}
+
+/*
+ * CPU PMU identification and probing.
+ */
+static int probe_current_pmu(struct arm_pmu *pmu,
+ const struct pmu_probe_info *info)
+{
+ int cpu = get_cpu();
+ unsigned int cpuid = read_cpuid_id();
+ int ret = -ENODEV;
+
+ pr_info("probing PMU on CPU %d\n", cpu);
+
+ for (; info->init != NULL; info++) {
+ if ((cpuid & info->mask) != info->cpuid)
+ continue;
+ ret = info->init(pmu);
+ break;
+ }
+
+ put_cpu();
+ return ret;
+}
+
+static int of_pmu_irq_cfg(struct arm_pmu *pmu)
+{
+ int i, irq, *irqs;
+ struct platform_device *pdev = pmu->plat_device;
+
+ /* Don't bother with PPIs; they're already affine */
+ irq = platform_get_irq(pdev, 0);
+ if (irq >= 0 && irq_is_percpu(irq))
+ return 0;
+
+ irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL);
+ if (!irqs)
+ return -ENOMEM;
+
+ for (i = 0; i < pdev->num_resources; ++i) {
+ struct device_node *dn;
+ int cpu;
+
+ dn = of_parse_phandle(pdev->dev.of_node, "interrupt-affinity",
+ i);
+ if (!dn) {
+ pr_warn("Failed to parse %s/interrupt-affinity[%d]\n",
+ of_node_full_name(pdev->dev.of_node), i);
+ break;
+ }
+
+ for_each_possible_cpu(cpu)
+ if (arch_find_n_match_cpu_physical_id(dn, cpu, NULL))
+ break;
+
+ of_node_put(dn);
+ if (cpu >= nr_cpu_ids) {
+ pr_warn("Failed to find logical CPU for %s\n",
+ dn->name);
+ break;
+ }
+
+ irqs[i] = cpu;
+ cpumask_set_cpu(cpu, &pmu->supported_cpus);
+ }
+
+ if (i == pdev->num_resources) {
+ pmu->irq_affinity = irqs;
+ } else {
+ kfree(irqs);
+ cpumask_setall(&pmu->supported_cpus);
+ }
+
+ return 0;
+}
+
+int arm_pmu_device_probe(struct platform_device *pdev,
+ const struct of_device_id *of_table,
+ const struct pmu_probe_info *probe_table)
+{
+ const struct of_device_id *of_id;
+ const int (*init_fn)(struct arm_pmu *);