summaryrefslogtreecommitdiff
path: root/drivers/perf
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2024-09-16 06:55:07 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2024-09-16 06:55:07 +0200
commit114143a595895c03fbefccfd8346fc51fb4908ed (patch)
tree161f753ad69a2aabe7cd7ac7cc493c3678dbd760 /drivers/perf
parent8617d7d6298f54dfef4038281863270b5864fe83 (diff)
parent75078ba2b38a38d94017bd334f71aaed205e30a4 (diff)
downloadlinux-114143a595895c03fbefccfd8346fc51fb4908ed.tar.gz
linux-114143a595895c03fbefccfd8346fc51fb4908ed.tar.bz2
linux-114143a595895c03fbefccfd8346fc51fb4908ed.zip
Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 updates from Will Deacon: "The highlights are support for Arm's "Permission Overlay Extension" using memory protection keys, support for running as a protected guest on Android as well as perf support for a bunch of new interconnect PMUs. Summary: ACPI: - Enable PMCG erratum workaround for HiSilicon HIP10 and 11 platforms. - Ensure arm64-specific IORT header is covered by MAINTAINERS. CPU Errata: - Enable workaround for hardware access/dirty issue on Ampere-1A cores. Memory management: - Define PHYSMEM_END to fix a crash in the amdgpu driver. - Avoid tripping over invalid kernel mappings on the kexec() path. - Userspace support for the Permission Overlay Extension (POE) using protection keys. Perf and PMUs: - Add support for the "fixed instruction counter" extension in the CPU PMU architecture. - Extend and fix the event encodings for Apple's M1 CPU PMU. - Allow LSM hooks to decide on SPE permissions for physical profiling. - Add support for the CMN S3 and NI-700 PMUs. Confidential Computing: - Add support for booting an arm64 kernel as a protected guest under Android's "Protected KVM" (pKVM) hypervisor. Selftests: - Fix vector length issues in the SVE/SME sigreturn tests - Fix build warning in the ptrace tests. Timers: - Add support for PR_{G,S}ET_TSC so that 'rr' can deal with non-determinism arising from the architected counter. Miscellaneous: - Rework our IPI-based CPU stopping code to try NMIs if regular IPIs don't succeed. - Minor fixes and cleanups" * tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (94 commits) perf: arm-ni: Fix an NULL vs IS_ERR() bug arm64: hibernate: Fix warning for cast from restricted gfp_t arm64: esr: Define ESR_ELx_EC_* constants as UL arm64: pkeys: remove redundant WARN perf: arm_pmuv3: Use BR_RETIRED for HW branch event if enabled MAINTAINERS: List Arm interconnect PMUs as supported perf: Add driver for Arm NI-700 interconnect PMU dt-bindings/perf: Add Arm NI-700 PMU perf/arm-cmn: Improve format attr printing perf/arm-cmn: Clean up unnecessary NUMA_NO_NODE check arm64/mm: use lm_alias() with addresses passed to memblock_free() mm: arm64: document why pte is not advanced in contpte_ptep_set_access_flags() arm64: Expose the end of the linear map in PHYSMEM_END arm64: trans_pgd: mark PTEs entries as valid to avoid dead kexec() arm64/mm: Delete __init region from memblock.reserved perf/arm-cmn: Support CMN S3 dt-bindings: perf: arm-cmn: Add CMN S3 perf/arm-cmn: Refactor DTC PMU register access perf/arm-cmn: Make cycle counts less surprising perf/arm-cmn: Improve build-time assertion ...
Diffstat (limited to 'drivers/perf')
-rw-r--r--drivers/perf/Kconfig7
-rw-r--r--drivers/perf/Makefile1
-rw-r--r--drivers/perf/alibaba_uncore_drw_pmu.c2
-rw-r--r--drivers/perf/apple_m1_cpu_pmu.c182
-rw-r--r--drivers/perf/arm-cmn.c318
-rw-r--r--drivers/perf/arm-ni.c781
-rw-r--r--drivers/perf/arm_pmu.c11
-rw-r--r--drivers/perf/arm_pmu_platform.c2
-rw-r--r--drivers/perf/arm_pmuv3.c148
-rw-r--r--drivers/perf/arm_spe_pmu.c9
-rw-r--r--drivers/perf/arm_v6_pmu.c6
-rw-r--r--drivers/perf/arm_v7_pmu.c77
-rw-r--r--drivers/perf/arm_xscale_pmu.c12
-rw-r--r--drivers/perf/dwc_pcie_pmu.c22
-rw-r--r--drivers/perf/hisilicon/hisi_pcie_pmu.c34
15 files changed, 1243 insertions, 369 deletions
diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig
index aa9530b4064f..bab8ba64162f 100644
--- a/drivers/perf/Kconfig
+++ b/drivers/perf/Kconfig
@@ -48,6 +48,13 @@ config ARM_CMN
Support for PMU events monitoring on the Arm CMN-600 Coherent Mesh
Network interconnect.
+config ARM_NI
+ tristate "Arm NI-700 PMU support"
+ depends on ARM64 || COMPILE_TEST
+ help
+ Support for PMU events monitoring on the Arm NI-700 Network-on-Chip
+ interconnect and family.
+
config ARM_PMU
depends on ARM || ARM64
bool "ARM PMU framework"
diff --git a/drivers/perf/Makefile b/drivers/perf/Makefile
index d43df81d52f7..8268f38e42c5 100644
--- a/drivers/perf/Makefile
+++ b/drivers/perf/Makefile
@@ -3,6 +3,7 @@ obj-$(CONFIG_ARM_CCI_PMU) += arm-cci.o
obj-$(CONFIG_ARM_CCN) += arm-ccn.o
obj-$(CONFIG_ARM_CMN) += arm-cmn.o
obj-$(CONFIG_ARM_DSU_PMU) += arm_dsu_pmu.o
+obj-$(CONFIG_ARM_NI) += arm-ni.o
obj-$(CONFIG_ARM_PMU) += arm_pmu.o arm_pmu_platform.o
obj-$(CONFIG_ARM_PMU_ACPI) += arm_pmu_acpi.o
obj-$(CONFIG_ARM_PMUV3) += arm_pmuv3.o
diff --git a/drivers/perf/alibaba_uncore_drw_pmu.c b/drivers/perf/alibaba_uncore_drw_pmu.c
index 38a2947ae813..c6ff1bc7d336 100644
--- a/drivers/perf/alibaba_uncore_drw_pmu.c
+++ b/drivers/perf/alibaba_uncore_drw_pmu.c
@@ -400,7 +400,7 @@ static irqreturn_t ali_drw_pmu_isr(int irq_num, void *data)
}
/* clear common counter intr status */
- clr_status = FIELD_PREP(ALI_DRW_PMCOM_CNT_OV_INTR_MASK, 1);
+ clr_status = FIELD_PREP(ALI_DRW_PMCOM_CNT_OV_INTR_MASK, status);
writel(clr_status,
drw_pmu->cfg_base + ALI_DRW_PMU_OV_INTR_CLR);
}
diff --git a/drivers/perf/apple_m1_cpu_pmu.c b/drivers/perf/apple_m1_cpu_pmu.c
index f322e5ca1114..1d4d01e1275e 100644
--- a/drivers/perf/apple_m1_cpu_pmu.c
+++ b/drivers/perf/apple_m1_cpu_pmu.c
@@ -47,46 +47,79 @@
* implementations, we'll have to introduce per cpu-type tables.
*/
enum m1_pmu_events {
- M1_PMU_PERFCTR_UNKNOWN_01 = 0x01,
- M1_PMU_PERFCTR_CPU_CYCLES = 0x02,
- M1_PMU_PERFCTR_INSTRUCTIONS = 0x8c,
- M1_PMU_PERFCTR_UNKNOWN_8d = 0x8d,
- M1_PMU_PERFCTR_UNKNOWN_8e = 0x8e,
- M1_PMU_PERFCTR_UNKNOWN_8f = 0x8f,
- M1_PMU_PERFCTR_UNKNOWN_90 = 0x90,
- M1_PMU_PERFCTR_UNKNOWN_93 = 0x93,
- M1_PMU_PERFCTR_UNKNOWN_94 = 0x94,
- M1_PMU_PERFCTR_UNKNOWN_95 = 0x95,
- M1_PMU_PERFCTR_UNKNOWN_96 = 0x96,
- M1_PMU_PERFCTR_UNKNOWN_97 = 0x97,
- M1_PMU_PERFCTR_UNKNOWN_98 = 0x98,
- M1_PMU_PERFCTR_UNKNOWN_99 = 0x99,
- M1_PMU_PERFCTR_UNKNOWN_9a = 0x9a,
- M1_PMU_PERFCTR_UNKNOWN_9b = 0x9b,
- M1_PMU_PERFCTR_UNKNOWN_9c = 0x9c,
- M1_PMU_PERFCTR_UNKNOWN_9f = 0x9f,
- M1_PMU_PERFCTR_UNKNOWN_bf = 0xbf,
- M1_PMU_PERFCTR_UNKNOWN_c0 = 0xc0,
- M1_PMU_PERFCTR_UNKNOWN_c1 = 0xc1,
- M1_PMU_PERFCTR_UNKNOWN_c4 = 0xc4,
- M1_PMU_PERFCTR_UNKNOWN_c5 = 0xc5,
- M1_PMU_PERFCTR_UNKNOWN_c6 = 0xc6,
- M1_PMU_PERFCTR_UNKNOWN_c8 = 0xc8,
- M1_PMU_PERFCTR_UNKNOWN_ca = 0xca,
- M1_PMU_PERFCTR_UNKNOWN_cb = 0xcb,
- M1_PMU_PERFCTR_UNKNOWN_f5 = 0xf5,
- M1_PMU_PERFCTR_UNKNOWN_f6 = 0xf6,
- M1_PMU_PERFCTR_UNKNOWN_f7 = 0xf7,
- M1_PMU_PERFCTR_UNKNOWN_f8 = 0xf8,
- M1_PMU_PERFCTR_UNKNOWN_fd = 0xfd,
- M1_PMU_PERFCTR_LAST = M1_PMU_CFG_EVENT,
+ M1_PMU_PERFCTR_RETIRE_UOP = 0x1,
+ M1_PMU_PERFCTR_CORE_ACTIVE_CYCLE = 0x2,
+ M1_PMU_PERFCTR_L1I_TLB_FILL = 0x4,
+ M1_PMU_PERFCTR_L1D_TLB_FILL = 0x5,
+ M1_PMU_PERFCTR_MMU_TABLE_WALK_INSTRUCTION = 0x7,
+ M1_PMU_PERFCTR_MMU_TABLE_WALK_DATA = 0x8,
+ M1_PMU_PERFCTR_L2_TLB_MISS_INSTRUCTION = 0xa,
+ M1_PMU_PERFCTR_L2_TLB_MISS_DATA = 0xb,
+ M1_PMU_PERFCTR_MMU_VIRTUAL_MEMORY_FAULT_NONSPEC = 0xd,
+ M1_PMU_PERFCTR_SCHEDULE_UOP = 0x52,
+ M1_PMU_PERFCTR_INTERRUPT_PENDING = 0x6c,
+ M1_PMU_PERFCTR_MAP_STALL_DISPATCH = 0x70,
+ M1_PMU_PERFCTR_MAP_REWIND = 0x75,
+ M1_PMU_PERFCTR_MAP_STALL = 0x76,
+ M1_PMU_PERFCTR_MAP_INT_UOP = 0x7c,
+ M1_PMU_PERFCTR_MAP_LDST_UOP = 0x7d,
+ M1_PMU_PERFCTR_MAP_SIMD_UOP = 0x7e,
+ M1_PMU_PERFCTR_FLUSH_RESTART_OTHER_NONSPEC = 0x84,
+ M1_PMU_PERFCTR_INST_ALL = 0x8c,
+ M1_PMU_PERFCTR_INST_BRANCH = 0x8d,
+ M1_PMU_PERFCTR_INST_BRANCH_CALL = 0x8e,
+ M1_PMU_PERFCTR_INST_BRANCH_RET = 0x8f,
+ M1_PMU_PERFCTR_INST_BRANCH_TAKEN = 0x90,
+ M1_PMU_PERFCTR_INST_BRANCH_INDIR = 0x93,
+ M1_PMU_PERFCTR_INST_BRANCH_COND = 0x94,
+ M1_PMU_PERFCTR_INST_INT_LD = 0x95,
+ M1_PMU_PERFCTR_INST_INT_ST = 0x96,
+ M1_PMU_PERFCTR_INST_INT_ALU = 0x97,
+ M1_PMU_PERFCTR_INST_SIMD_LD = 0x98,
+ M1_PMU_PERFCTR_INST_SIMD_ST = 0x99,
+ M1_PMU_PERFCTR_INST_SIMD_ALU = 0x9a,
+ M1_PMU_PERFCTR_INST_LDST = 0x9b,
+ M1_PMU_PERFCTR_INST_BARRIER = 0x9c,
+ M1_PMU_PERFCTR_UNKNOWN_9f = 0x9f,
+ M1_PMU_PERFCTR_L1D_TLB_ACCESS = 0xa0,
+ M1_PMU_PERFCTR_L1D_TLB_MISS = 0xa1,
+ M1_PMU_PERFCTR_L1D_CACHE_MISS_ST = 0xa2,
+ M1_PMU_PERFCTR_L1D_CACHE_MISS_LD = 0xa3,
+ M1_PMU_PERFCTR_LD_UNIT_UOP = 0xa6,
+ M1_PMU_PERFCTR_ST_UNIT_UOP = 0xa7,
+ M1_PMU_PERFCTR_L1D_CACHE_WRITEBACK = 0xa8,
+ M1_PMU_PERFCTR_LDST_X64_UOP = 0xb1,
+ M1_PMU_PERFCTR_LDST_XPG_UOP = 0xb2,
+ M1_PMU_PERFCTR_ATOMIC_OR_EXCLUSIVE_SUCC = 0xb3,
+ M1_PMU_PERFCTR_ATOMIC_OR_EXCLUSIVE_FAIL = 0xb4,
+ M1_PMU_PERFCTR_L1D_CACHE_MISS_LD_NONSPEC = 0xbf,
+ M1_PMU_PERFCTR_L1D_CACHE_MISS_ST_NONSPEC = 0xc0,
+ M1_PMU_PERFCTR_L1D_TLB_MISS_NONSPEC = 0xc1,
+ M1_PMU_PERFCTR_ST_MEMORY_ORDER_VIOLATION_NONSPEC = 0xc4,
+ M1_PMU_PERFCTR_BRANCH_COND_MISPRED_NONSPEC = 0xc5,
+ M1_PMU_PERFCTR_BRANCH_INDIR_MISPRED_NONSPEC = 0xc6,
+ M1_PMU_PERFCTR_BRANCH_RET_INDIR_MISPRED_NONSPEC = 0xc8,
+ M1_PMU_PERFCTR_BRANCH_CALL_INDIR_MISPRED_NONSPEC = 0xca,
+ M1_PMU_PERFCTR_BRANCH_MISPRED_NONSPEC = 0xcb,
+ M1_PMU_PERFCTR_L1I_TLB_MISS_DEMAND = 0xd4,
+ M1_PMU_PERFCTR_MAP_DISPATCH_BUBBLE = 0xd6,
+ M1_PMU_PERFCTR_L1I_CACHE_MISS_DEMAND = 0xdb,
+ M1_PMU_PERFCTR_FETCH_RESTART = 0xde,
+ M1_PMU_PERFCTR_ST_NT_UOP = 0xe5,
+ M1_PMU_PERFCTR_LD_NT_UOP = 0xe6,
+ M1_PMU_PERFCTR_UNKNOWN_f5 = 0xf5,
+ M1_PMU_PERFCTR_UNKNOWN_f6 = 0xf6,
+ M1_PMU_PERFCTR_UNKNOWN_f7 = 0xf7,
+ M1_PMU_PERFCTR_UNKNOWN_f8 = 0xf8,
+ M1_PMU_PERFCTR_UNKNOWN_fd = 0xfd,
+ M1_PMU_PERFCTR_LAST = M1_PMU_CFG_EVENT,
/*
* From this point onwards, these are not actual HW events,
* but attributes that get stored in hw->config_base.
*/
- M1_PMU_CFG_COUNT_USER = BIT(8),
- M1_PMU_CFG_COUNT_KERNEL = BIT(9),
+ M1_PMU_CFG_COUNT_USER = BIT(8),
+ M1_PMU_CFG_COUNT_KERNEL = BIT(9),
};
/*
@@ -96,46 +129,45 @@ enum m1_pmu_events {
* counters had strange affinities.
*/
static const u16 m1_pmu_event_affinity[M1_PMU_PERFCTR_LAST + 1] = {
- [0 ... M1_PMU_PERFCTR_LAST] = ANY_BUT_0_1,
- [M1_PMU_PERFCTR_UNKNOWN_01] = BIT(7),
- [M1_PMU_PERFCTR_CPU_CYCLES] = ANY_BUT_0_1 | BIT(0),
- [M1_PMU_PERFCTR_INSTRUCTIONS] = BIT(7) | BIT(1),
- [M1_PMU_PERFCTR_UNKNOWN_8d] = ONLY_5_6_7,
- [M1_PMU_PERFCTR_UNKNOWN_8e] = ONLY_5_6_7,
- [M1_PMU_PERFCTR_UNKNOWN_8f] = ONLY_5_6_7,
- [M1_PMU_PERFCTR_UNKNOWN_90] = ONLY_5_6_7,
- [M1_PMU_PERFCTR_UNKNOWN_93] = ONLY_5_6_7,
- [M1_PMU_PERFCTR_UNKNOWN_94] = ONLY_5_6_7,
- [M1_PMU_PERFCTR_UNKNOWN_95] = ONLY_5_6_7,
- [M1_PMU_PERFCTR_UNKNOWN_96] = ONLY_5_6_7,
- [M1_PMU_PERFCTR_UNKNOWN_97] = BIT(7),
- [M1_PMU_PERFCTR_UNKNOWN_98] = ONLY_5_6_7,
- [M1_PMU_PERFCTR_UNKNOWN_99] = ONLY_5_6_7,
- [M1_PMU_PERFCTR_UNKNOWN_9a] = BIT(7),
- [M1_PMU_PERFCTR_UNKNOWN_9b] = ONLY_5_6_7,
- [M1_PMU_PERFCTR_UNKNOWN_9c] = ONLY_5_6_7,
- [M1_PMU_PERFCTR_UNKNOWN_9f] = BIT(7),
- [M1_PMU_PERFCTR_UNKNOWN_bf] = ONLY_5_6_7,
- [M1_PMU_PERFCTR_UNKNOWN_c0] = ONLY_5_6_7,
- [M1_PMU_PERFCTR_UNKNOWN_c1] = ONLY_5_6_7,
- [M1_PMU_PERFCTR_UNKNOWN_c4] = ONLY_5_6_7,
- [M1_PMU_PERFCTR_UNKNOWN_c5] = ONLY_5_6_7,
- [M1_PMU_PERFCTR_UNKNOWN_c6] = ONLY_5_6_7,
- [M1_PMU_PERFCTR_UNKNOWN_c8] = ONLY_5_6_7,
- [M1_PMU_PERFCTR_UNKNOWN_ca] = ONLY_5_6_7,
- [M1_PMU_PERFCTR_UNKNOWN_cb] = ONLY_5_6_7,
- [M1_PMU_PERFCTR_UNKNOWN_f5] = ONLY_2_4_6,
- [M1_PMU_PERFCTR_UNKNOWN_f6] = ONLY_2_4_6,
- [M1_PMU_PERFCTR_UNKNOWN_f7] = ONLY_2_4_6,
- [M1_PMU_PERFCTR_UNKNOWN_f8] = ONLY_2_TO_7,
- [M1_PMU_PERFCTR_UNKNOWN_fd] = ONLY_2_4_6,
+ [0 ... M1_PMU_PERFCTR_LAST] = ANY_BUT_0_1,
+ [M1_PMU_PERFCTR_RETIRE_UOP] = BIT(7),
+ [M1_PMU_PERFCTR_CORE_ACTIVE_CYCLE] = ANY_BUT_0_1 | BIT(0),
+ [M1_PMU_PERFCTR_INST_ALL] = BIT(7) | BIT(1),
+ [M1_PMU_PERFCTR_INST_BRANCH] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_INST_BRANCH_CALL] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_INST_BRANCH_RET] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_INST_BRANCH_TAKEN] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_INST_BRANCH_INDIR] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_INST_BRANCH_COND] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_INST_INT_LD] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_INST_INT_ST] = BIT(7),
+ [M1_PMU_PERFCTR_INST_INT_ALU] = BIT(7),
+ [M1_PMU_PERFCTR_INST_SIMD_LD] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_INST_SIMD_ST] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_INST_SIMD_ALU] = BIT(7),
+ [M1_PMU_PERFCTR_INST_LDST] = BIT(7),
+ [M1_PMU_PERFCTR_INST_BARRIER] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_UNKNOWN_9f] = BIT(7),
+ [M1_PMU_PERFCTR_L1D_CACHE_MISS_LD_NONSPEC] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_L1D_CACHE_MISS_ST_NONSPEC] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_L1D_TLB_MISS_NONSPEC] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_ST_MEMORY_ORDER_VIOLATION_NONSPEC] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_BRANCH_COND_MISPRED_NONSPEC] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_BRANCH_INDIR_MISPRED_NONSPEC] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_BRANCH_RET_INDIR_MISPRED_NONSPEC] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_BRANCH_CALL_INDIR_MISPRED_NONSPEC] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_BRANCH_MISPRED_NONSPEC] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_UNKNOWN_f5] = ONLY_2_4_6,
+ [M1_PMU_PERFCTR_UNKNOWN_f6] = ONLY_2_4_6,
+ [M1_PMU_PERFCTR_UNKNOWN_f7] = ONLY_2_4_6,
+ [M1_PMU_PERFCTR_UNKNOWN_f8] = ONLY_2_TO_7,
+ [M1_PMU_PERFCTR_UNKNOWN_fd] = ONLY_2_4_6,
};
static const unsigned m1_pmu_perf_map[PERF_COUNT_HW_MAX] = {
PERF_MAP_ALL_UNSUPPORTED,
- [PERF_COUNT_HW_CPU_CYCLES] = M1_PMU_PERFCTR_CPU_CYCLES,
- [PERF_COUNT_HW_INSTRUCTIONS] = M1_PMU_PERFCTR_INSTRUCTIONS,
- /* No idea about the rest yet */
+ [PERF_COUNT_HW_CPU_CYCLES] = M1_PMU_PERFCTR_CORE_ACTIVE_CYCLE,
+ [PERF_COUNT_HW_INSTRUCTIONS] = M1_PMU_PERFCTR_INST_ALL,
};
/* sysfs definitions */
@@ -154,8 +186,8 @@ static ssize_t m1_pmu_events_sysfs_show(struct device *dev,
PMU_EVENT_ATTR_ID(name, m1_pmu_events_sysfs_show, config)
static struct attribute *m1_pmu_event_attrs[] = {
- M1_PMU_EVENT_ATTR(cycles, M1_PMU_PERFCTR_CPU_CYCLES),
- M1_PMU_EVENT_ATTR(instructions, M1_PMU_PERFCTR_INSTRUCTIONS),
+ M1_PMU_EVENT_ATTR(cycles, M1_PMU_PERFCTR_CORE_ACTIVE_CYCLE),
+ M1_PMU_EVENT_ATTR(instructions, M1_PMU_PERFCTR_INST_ALL),
NULL,
};
@@ -400,7 +432,7 @@ static irqreturn_t m1_pmu_handle_irq(struct arm_pmu *cpu_pmu)
regs = get_irq_regs();
- for (idx = 0; idx < cpu_pmu->num_events; idx++) {
+ for_each_set_bit(idx, cpu_pmu->cntr_mask, M1_PMU_NR_COUNTERS) {
struct perf_event *event = cpuc->events[idx];
struct perf_sample_data data;
@@ -560,7 +592,7 @@ static int m1_pmu_init(struct arm_pmu *cpu_pmu, u32 flags)
cpu_pmu->reset = m1_pmu_reset;
cpu_pmu->set_event_filter = m1_pmu_set_event_filter;
- cpu_pmu->num_events = M1_PMU_NR_COUNTERS;
+ bitmap_set(cpu_pmu->cntr_mask, 0, M1_PMU_NR_COUNTERS);
cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = &m1_pmu_events_attr_group;
cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = &m1_pmu_format_attr_group;
return 0;
diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c
index c932d9d355cf..397a46410f7c 100644
--- a/drivers/perf/arm-cmn.c
+++ b/drivers/perf/arm-cmn.c
@@ -24,14 +24,6 @@
#define CMN_NI_NODE_ID GENMASK_ULL(31, 16)
#define CMN_NI_LOGICAL_ID GENMASK_ULL(47, 32)
-#define CMN_NODEID_DEVID(reg) ((reg) & 3)
-#define CMN_NODEID_EXT_DEVID(reg) ((reg) & 1)
-#define CMN_NODEID_PID(reg) (((reg) >> 2) & 1)
-#define CMN_NODEID_EXT_PID(reg) (((reg) >> 1) & 3)
-#define CMN_NODEID_1x1_PID(reg) (((reg) >> 2) & 7)
-#define CMN_NODEID_X(reg, bits) ((reg) >> (3 + (bits)))
-#define CMN_NODEID_Y(reg, bits) (((reg) >> 3) & ((1U << (bits)) - 1))
-
#define CMN_CHILD_INFO 0x0080
#define CMN_CI_CHILD_COUNT GENMASK_ULL(15, 0)
#define CMN_CI_CHILD_PTR_OFFSET GENMASK_ULL(31, 16)
@@ -43,6 +35,9 @@
#define CMN_MAX_XPS (CMN_MAX_DIMENSION * CMN_MAX_DIMENSION)
#define CMN_MAX_DTMS (CMN_MAX_XPS + (CMN_MAX_DIMENSION - 1) * 4)
+/* Currently XPs are the node type we can have most of; others top out at 128 */
+#define CMN_MAX_NODES_PER_EVENT CMN_MAX_XPS
+
/* The CFG node has various info besides the discovery tree */
#define CMN_CFGM_PERIPH_ID_01 0x0008
#define CMN_CFGM_PID0_PART_0 GENMASK_ULL(7, 0)
@@ -50,24 +45,28 @@
#define CMN_CFGM_PERIPH_ID_23 0x0010
#define CMN_CFGM_PID2_REVISION GENMASK_ULL(7, 4)
-#define CMN_CFGM_INFO_GLOBAL 0x900
+#define CMN_CFGM_INFO_GLOBAL 0x0900
#define CMN_INFO_MULTIPLE_DTM_EN BIT_ULL(63)
#define CMN_INFO_RSP_VC_NUM GENMASK_ULL(53, 52)
#define CMN_INFO_DAT_VC_NUM GENMASK_ULL(51, 50)
+#define CMN_INFO_DEVICE_ISO_ENABLE BIT_ULL(44)
-#define CMN_CFGM_INFO_GLOBAL_1 0x908
+#define CMN_CFGM_INFO_GLOBAL_1 0x0908
#define CMN_INFO_SNP_VC_NUM GENMASK_ULL(3, 2)
#define CMN_INFO_REQ_VC_NUM GENMASK_ULL(1, 0)
/* XPs also have some local topology info which has uses too */
#define CMN_MXP__CONNECT_INFO(p) (0x0008 + 8 * (p))
-#define CMN__CONNECT_INFO_DEVICE_TYPE GENMASK_ULL(4, 0)
+#define CMN__CONNECT_INFO_DEVICE_TYPE GENMASK_ULL(5, 0)
#define CMN_MAX_PORTS 6
#define CI700_CONNECT_INFO_P2_5_OFFSET 0x10
/* PMU registers occupy the 3rd 4KB page of each node's region */
#define CMN_PMU_OFFSET 0x2000
+/* ...except when they don't :( */
+#define CMN_S3_DTM_OFFSET 0xa000
+#define CMN_S3_PMU_OFFSET 0xd900
/* For most nodes, this is all there is */
#define CMN_PMU_EVENT_SEL 0x000
@@ -78,7 +77,8 @@
/* Technically this is 4 bits wide on DNs, but we only use 2 there anyway */
#define CMN__PMU_OCCUP1_ID GENMASK_ULL(34, 32)
-/* HN-Ps are weird... */
+/* Some types are designed to coexist with another device in the same node */
+#define CMN_CCLA_PMU_EVENT_SEL 0x008
#define CMN_HNP_PMU_EVENT_SEL 0x008
/* DTMs live in the PMU space of XP registers */
@@ -123,27 +123,28 @@
/* The DTC node is where the magic happens */
#define CMN_DT_DTC_CTL 0x0a00
#define CMN_DT_DTC_CTL_DT_EN BIT(0)
+#define CMN_DT_DTC_CTL_CG_DISABLE BIT(10)
/* DTC counters are paired in 64-bit registers on a 16-byte stride. Yuck */
#define _CMN_DT_CNT_REG(n) ((((n) / 2) * 4 + (n) % 2) * 4)
-#define CMN_DT_PMEVCNT(n) (CMN_PMU_OFFSET + _CMN_DT_CNT_REG(n))
-#define CMN_DT_PMCCNTR (CMN_PMU_OFFSET + 0x40)
+#define CMN_DT_PMEVCNT(dtc, n) ((dtc)->pmu_base + _CMN_DT_CNT_REG(n))
+#define CMN_DT_PMCCNTR(dtc) ((dtc)->pmu_base + 0x40)
-#define CMN_DT_PMEVCNTSR(n) (CMN_PMU_OFFSET + 0x50 + _CMN_DT_CNT_REG(n))
-#define CMN_DT_PMCCNTRSR (CMN_PMU_OFFSET + 0x90)
+#define CMN_DT_PMEVCNTSR(dtc, n) ((dtc)->pmu_base + 0x50 + _CMN_DT_CNT_REG(n))
+#define CMN_DT_PMCCNTRSR(dtc) ((dtc)->pmu_base + 0x90)
-#define CMN_DT_PMCR (CMN_PMU_OFFSET + 0x100)
+#define CMN_DT_PMCR(dtc) ((dtc)->pmu_base + 0x100)
#define CMN_DT_PMCR_PMU_EN BIT(0)
#define CMN_DT_PMCR_CNTR_RST BIT(5)
#define CMN_DT_PMCR_OVFL_INTR_EN BIT(6)
-#define CMN_DT_PMOVSR (CMN_PMU_OFFSET + 0x118)
-#define CMN_DT_PMOVSR_CLR (CMN_PMU_OFFSET + 0x120)
+#define CMN_DT_PMOVSR(dtc) ((dtc)->pmu_base + 0x118)
+#define CMN_DT_PMOVSR_CLR(dtc) ((dtc)->pmu_base + 0x120)
-#define CMN_DT_PMSSR (CMN_PMU_OFFSET + 0x128)
+#define CMN_DT_PMSSR(dtc) ((dtc)->pmu_base + 0x128)
#define CMN_DT_PMSSR_SS_STATUS(n) BIT(n)
-#define CMN_DT_PMSRR (CMN_PMU_OFFSET + 0x130)
+#define CMN_DT_PMSRR(dtc) ((dtc)->pmu_base + 0x130)
#define CMN_DT_PMSRR_SS_REQ BIT(0)
#define CMN_DT_NUM_COUNTERS 8
@@ -198,10 +199,11 @@ enum cmn_model {
CMN650 = 2,
CMN700 = 4,
CI700 = 8,
+ CMNS3 = 16,
/* ...and then we can use bitmap tricks for commonality */
CMN_ANY = -1,
NOT_CMN600 = -2,
- CMN_650ON = CMN650 | CMN700,
+ CMN_650ON = CMN650 | CMN700 | CMNS3,
};
/* Actual part numbers and revision IDs defined by the hardware */
@@ -210,6 +212,7 @@ enum cmn_part {
PART_CMN650 = 0x436,
PART_CMN700 = 0x43c,
PART_CI700 = 0x43a,
+ PART_CMN_S3 = 0x43e,
};
/* CMN-600 r0px shouldn't exist in silicon, thankfully */
@@ -261,6 +264,7 @@ enum cmn_node_type {
CMN_TYPE_HNS = 0x200,
CMN_TYPE_HNS_MPAM_S,
CMN_TYPE_HNS_MPAM_NS,
+ CMN_TYPE_APB = 0x1000,
/* Not a real node type */
CMN_TYPE_WP = 0x7770
};
@@ -280,8 +284,11 @@ struct arm_cmn_node {
u16 id, logid;
enum cmn_node_type type;
+ /* XP properties really, but replicated to children for convenience */
u8 dtm;
s8 dtc;
+ u8 portid_bits:4;
+ u8 deviceid_bits:4;
/* DN/HN-F/CXHA */
struct {
u8 val : 4;
@@ -307,8 +314,9 @@ struct arm_cmn_dtm {
struct arm_cmn_dtc {
void __iomem *base;
+ void __iomem *pmu_base;
int irq;
- int irq_friend;
+ s8 irq_friend;
bool cc_active;
struct perf_event *counters[CMN_DT_NUM_COUNTERS];
@@ -357,49 +365,33 @@ struct arm_cmn {
static int arm_cmn_hp_state;
struct arm_cmn_nodeid {
- u8 x;
- u8 y;
u8 port;
u8 dev;
};
static int arm_cmn_xyidbits(const struct arm_cmn *cmn)
{
- return fls((cmn->mesh_x - 1) | (cmn->mesh_y - 1) | 2);
+ return fls((cmn->mesh_x - 1) | (cmn->mesh_y - 1));
}
-static struct arm_cmn_nodeid arm_cmn_nid(const struct arm_cmn *cmn, u16 id)
+static struct arm_cmn_nodeid arm_cmn_nid(const struct arm_cmn_node *dn)
{
struct arm_cmn_nodeid nid;
- if (cmn->num_xps == 1) {
- nid.x = 0;
- nid.y = 0;
- nid.port = CMN_NODEID_1x1_PID(id);
- nid.dev = CMN_NODEID_DEVID(id);
- } else {
- int bits = arm_cmn_xyidbits(cmn);
-
- nid.x = CMN_NODEID_X(id, bits);
- nid.y = CMN_NODEID_Y(id, bits);
- if (cmn->ports_used & 0xc) {
- nid.port = CMN_NODEID_EXT_PID(id);
- nid.dev = CMN_NODEID_EXT_DEVID(id);
- } else {
- nid.port = CMN_NODEID_PID(id);
- nid.dev = CMN_NODEID_DEVID(id);
- }
- }
+ nid.dev = dn->id & ((1U << dn->deviceid_bits) - 1);
+ nid.port = (dn->id >> dn->deviceid_bits) & ((1U << dn->portid_bits) - 1);
return nid;
}
static struct arm_cmn_node *arm_cmn_node_to_xp(const struct arm_cmn *cmn,
const struct arm_cmn_node *dn)
{
- struct arm_cmn_nodeid nid = arm_cmn_nid(cmn, dn->id);
- int xp_idx = cmn->mesh_x * nid.y + nid.x;
+ int id = dn->id >> (dn->portid_bits + dn->deviceid_bits);
+ int bits = arm_cmn_xyidbits(cmn);
+ int x = id >> bits;
+ int y = id & ((1U << bits) - 1);
- return cmn->xps + xp_idx;
+ return cmn->xps + cmn->mesh_x * y + x;
}
static struct arm_cmn_node *arm_cmn_node(const struct arm_cmn *cmn,
enum cmn_node_type type)
@@ -423,15 +415,27 @@ static enum cmn_model arm_cmn_model(const struct arm_cmn *cmn)
return CMN700;
case PART_CI700:
return CI700;
+ case PART_CMN_S3:
+ return CMNS3;
default:
return 0;
};
}
+static int arm_cmn_pmu_offset(const struct arm_cmn *cmn, const struct arm_cmn_node *dn)
+{
+ if (cmn->part == PART_CMN_S3) {
+ if (dn->type == CMN_TYPE_XP)
+ return CMN_S3_DTM_OFFSET;
+ return CMN_S3_PMU_OFFSET;
+ }
+ return CMN_PMU_OFFSET;
+}
+
static u32 arm_cmn_device_connect_info(const struct arm_cmn *cmn,
const struct arm_cmn_node *xp, int port)
{
- int offset = CMN_MXP__CONNECT_INFO(port);
+ int offset = CMN_MXP__CONNECT_INFO(port) - arm_cmn_pmu_offset(cmn, xp);
if (port >= 2) {
if (cmn->part == PART_CMN600 || cmn->part == PART_CMN650)
@@ -444,7 +448,7 @@ static u32 arm_cmn_device_connect_info(const struct arm_cmn *cmn,
offset += CI700_CONNECT_INFO_P2_5_OFFSET;
}
- return readl_relaxed(xp->pmu_base - CMN_PMU_OFFSET + offset);
+ return readl_relaxed(xp->pmu_base + offset);
}
static struct dentry *arm_cmn_debugfs;
@@ -478,20 +482,25 @@ static const char *arm_cmn_device_type(u8 type)
case 0x17: return "RN-F_C_E|";
case 0x18: return " RN-F_E |";
case 0x19: return "RN-F_E_E|";
+ case 0x1a: return " HN-S |";
+ case 0x1b: return " LCN |";
case 0x1c: return " MTSX |";
case 0x1d: return " HN-V |";
case 0x1e: return " CCG |";
+ case 0x20: return " RN-F_F |";
+ case 0x21: return "RN-F_F_E|";
+ case 0x22: return " SN-F_F |";
default: return " ???? |";
}
}
-static void arm_cmn_show_logid(struct seq_file *s, int x, int y, int p, int d)
+static void arm_cmn_show_logid(struct seq_file *s, const struct arm_cmn_node *xp, int p, int d)
{
struct arm_cmn *cmn = s->private;
struct arm_cmn_node *dn;
+ u16 id = xp->id | d | (p << xp->deviceid_bits);
for (dn = cmn->dns; dn->type; dn++) {
- struct arm_cmn_nodeid nid = arm_cmn_nid(cmn, dn->id);
int pad = dn->logid < 10;
if (dn->type == CMN_TYPE_XP)
@@ -500,7 +509,7 @@ static void arm_cmn_show_logid(struct seq_file *s, int x, int y, int p, int d)
if (dn->type < CMN_TYPE_HNI)
continue;
- if (nid.x != x || nid.y != y || nid.port != p || nid.dev != d)
+ if (dn->id != id)
continue;
seq_printf(s, " %*c#%-*d |", pad + 1, ' ', 3 - pad, dn->logid);
@@ -521,6 +530,7 @@ static int arm_cmn_map_show(struct seq_file *s, void *data)
y = cmn->mesh_y;
while (y--) {
int xp_base = cmn->mesh_x * y;
+ struct arm_cmn_node *xp = cmn->xps + xp_base;
u8 port[CMN_MAX_PORTS][CMN_MAX_DIMENSION];
for (x = 0; x < cmn->mesh_x; x++)
@@ -528,16 +538,14 @@ static int arm_cmn_map_show(struct seq_file *s, void *data)
seq_printf(s, "\n%-2d |", y);
for (x = 0; x < cmn->mesh_x; x++) {
- struct arm_cmn_node *xp = cmn->xps + xp_base + x;
-
for (p = 0; p < CMN_MAX_PORTS; p++)
- port[p][x] = arm_cmn_device_connect_info(cmn, xp, p);
+ port[p][x] = arm_cmn_device_connect_info(cmn, xp + x, p);
seq_printf(s, " XP #%-3d|", xp_base + x);
}
seq_puts(s, "\n |");
for (x = 0; x < cmn->mesh_x; x++) {
- s8 dtc = cmn->xps[xp_base + x].dtc;
+ s8 dtc = xp[x].dtc;
if (dtc < 0)
seq_puts(s, " DTC ?? |");
@@ -554,10 +562,10 @@ static int arm_cmn_map_show(struct seq_file *s, void *data)
seq_puts(s, arm_cmn_device_type(port[p][x]));
seq_puts(s, "\n 0|");
for (x = 0; x < cmn->mesh_x; x++)
- arm_cmn_show_logid(s, x, y, p, 0);
+ arm_cmn_show_logid(s, xp + x, p, 0);
seq_puts(s, "\n 1|");
for (x = 0; x < cmn->mesh_x; x++)
- arm_cmn_show_logid(s, x, y, p, 1);
+ arm_cmn_show_logid(s, xp + x, p, 1);
}
seq_puts(s, "\n-----+");
}
@@ -585,7 +593,7 @@ static void arm_cmn_debugfs_init(struct arm_cmn *cmn, int id) {}
struct arm_cmn_hw_event {
struct arm_cmn_node *dn;
- u64 dtm_idx[4];
+ u64 dtm_idx[DIV_ROUND_UP(CMN_MAX_NODES_PER_EVENT * 2, 64)];
s8 dtc_idx[CMN_MAX_DTCS];
u8 num_dns;
u8 dtm_offset;
@@ -599,6 +607,7 @@ struct arm_cmn_hw_event {
bool wide_sel;
enum cmn_filter_select filter_sel;
};
+static_assert(sizeof(struct arm_cmn_hw_event) <= offsetof(struct hw_perf_event, target));
#define for_each_hw_dn(hw, dn, i) \
for (i = 0, dn = hw->dn; i < hw->num_dns; i++, dn++)
@@ -609,7 +618,6 @@ struct arm_cmn_hw_event {
static struct arm_cmn_hw_event *to_cmn_hw(struct perf_event *event)
{
- BUILD_BUG_ON(sizeof(struct arm_cmn_hw_event) > offsetof(struct hw_perf_event, target));
return (struct arm_cmn_hw_event *)&event->hw;
}
@@ -790,8 +798,8 @@ static umode_t arm_cmn_event_attr_is_visible(struct kobject *kobj,
CMN_EVENT_ATTR(CMN_ANY, cxha_##_name, CMN_TYPE_CXHA, _event)
#define CMN_EVENT_CCRA(_name, _event) \
CMN_EVENT_ATTR(CMN_ANY, ccra_##_name, CMN_TYPE_CCRA, _event)
-#define CMN_EVENT_CCHA(_name, _event) \
- CMN_EVENT_ATTR(CMN_ANY, ccha_##_name, CMN_TYPE_CCHA, _event)
+#define CMN_EVENT_CCHA(_model, _name, _event) \
+ CMN_EVENT_ATTR(_model, ccha_##_name, CMN_TYPE_CCHA, _event)
#define CMN_EVENT_CCLA(_name, _event) \
CMN_EVENT_ATTR(CMN_ANY, ccla_##_name, CMN_TYPE_CCLA, _event)
#define CMN_EVENT_CCLA_RNI(_name, _event) \
@@ -1149,42 +1157,43 @@ static struct attribute *arm_cmn_event_attrs[] = {
CMN_EVENT_CCRA(wdb_alloc, 0x59),
CMN_EVENT_CCRA(ssb_alloc, 0x5a),
- CMN_EVENT_CCHA(rddatbyp, 0x61),
- CMN_EVENT_CCHA(chirsp_up_stall, 0x62),
- CMN_EVENT_CCHA(chidat_up_stall, 0x63),
- CMN_EVENT_CCHA(snppcrd_link0_stall, 0x64),
- CMN_EVENT_CCHA(snppcrd_link1_stall, 0x65),
- CMN_EVENT_CCHA(snppcrd_link2_stall, 0x66),
- CMN_EVENT_CCHA(reqtrk_occ, 0x67),
- CMN_EVENT_CCHA(rdb_occ, 0x68),
- CMN_EVENT_CCHA(rdbyp_occ, 0x69),
- CMN_EVENT_CCHA(wdb_occ, 0x6a),
- CMN_EVENT_CCHA(snptrk_occ, 0x6b),
- CMN_EVENT_CCHA(sdb_occ, 0x6c),
- CMN_EVENT_CCHA(snphaz_occ, 0x6d),
- CMN_EVENT_CCHA(reqtrk_alloc, 0x6e),
- CMN_EVENT_CCHA(rdb_alloc, 0x6f),
- CMN_EVENT_CCHA(rdbyp_alloc, 0x70),
- CMN_EVENT_CCHA(wdb_alloc, 0x71),
- CMN_EVENT_CCHA(snptrk_alloc, 0x72),
- CMN_EVENT_CCHA(sdb_alloc, 0x73),
- CMN_EVENT_CCHA(snphaz_alloc, 0x74),
- CMN_EVENT_CCHA(pb_rhu_req_occ, 0x75),
- CMN_EVENT_CCHA(pb_rhu_req_alloc, 0x76),
- CMN_EVENT_CCHA(pb_rhu_pcie_req_occ, 0x77),
- CMN_EVENT_CCHA(pb_rhu_pcie_req_alloc, 0x78),
- CMN_EVENT_CCHA(pb_pcie_wr_req_occ, 0x79),
- CMN_EVENT_CCHA(pb_pcie_wr_req_alloc, 0x7a),
- CMN_EVENT_CCHA(pb_pcie_reg_req_occ, 0x7b),
- CMN_EVENT_CCHA(pb_pcie_reg_req_alloc, 0x7c),
- CMN_EVENT_CCHA(pb_pcie_rsvd_req_occ, 0x7d),
- CMN_EVENT_CCHA(pb_pcie_rsvd_req_alloc, 0x7e),
- CMN_EVENT_CCHA(pb_rhu_dat_occ, 0x7f),
- CMN_EVENT_CCHA(pb_rhu_dat_alloc, 0x80),
- CMN_EVENT_CCHA(pb_rhu_pcie_dat_occ, 0x81),
- CMN_EVENT_CCHA(pb_rhu_pcie_dat_alloc, 0x82),
- CMN_EVENT_CCHA(pb_pcie_wr_dat_occ, 0x83),
- CMN_EVENT_CCHA(pb_pcie_wr_dat_alloc, 0x84),
+ CMN_EVENT_CCHA(CMN_ANY, rddatbyp, 0x61),
+ CMN_EVENT_CCHA(CMN_ANY, chirsp_up_stall, 0x62),
+ CMN_EVENT_CCHA(CMN_ANY, chidat_up_stall, 0x63),
+ CMN_EVENT_CCHA(CMN_ANY, snppcrd_link0_stall, 0x64),
+ CMN_EVENT_CCHA(CMN_ANY, snppcrd_link1_stall, 0x65),
+ CMN_EVENT_CCHA(CMN_ANY, snppcrd_link2_stall, 0x66),
+ CMN_EVENT_CCHA(CMN_ANY, reqtrk_occ, 0x67),
+ CMN_EVENT_CCHA(CMN_ANY, rdb_occ, 0x68),
+ CMN_EVENT_CCHA(CMN_ANY, rdbyp_occ, 0x69),
+ CMN_EVENT_CCHA(CMN_ANY, wdb_occ, 0x6a),
+ CMN_EVENT_CCHA(CMN_ANY, snptrk_occ, 0x6b),
+ CMN_EVENT_CCHA(CMN_ANY, sdb_occ, 0x6c),
+ CMN_EVENT_CCHA(CMN_ANY, snphaz_occ, 0x6d),
+ CMN_EVENT_CCHA(CMN_ANY, reqtrk_alloc, 0x6e),
+ CMN_EVENT_CCHA(CMN_ANY, rdb_alloc, 0x6f),
+ CMN_EVENT_CCHA(CMN_ANY, rdbyp_alloc, 0x70),
+ CMN_EVENT_CCHA(CMN_ANY, wdb_alloc, 0x71),
+ CMN_EVENT_CCHA(CMN_ANY, snptrk_alloc, 0x72),
+ CMN_EVENT_CCHA(CMN_ANY, db_alloc, 0x73),
+ CMN_EVENT_CCHA(CMN_ANY, snphaz_alloc, 0x74),
+ CMN_EVENT_CCHA(CMN_ANY, pb_rhu_req_occ, 0x75),
+ CMN_EVENT_CCHA(CMN_ANY, pb_rhu_req_alloc, 0x76),
+ CMN_EVENT_CCHA(CMN_ANY, pb_rhu_pcie_req_occ, 0x77),
+ CMN_EVENT_CCHA(CMN_ANY, pb_rhu_pcie_req_alloc, 0x78),
+ CMN_EVENT_CCHA(CMN_ANY, pb_pcie_wr_req_occ, 0x79),
+ CMN_EVENT_CCHA(CMN_ANY, pb_pcie_wr_req_alloc, 0x7a),
+ CMN_EVENT_CCHA(CMN_ANY, pb_pcie_reg_req_occ, 0x7b),
+ CMN_EVENT_CCHA(CMN_ANY, pb_pcie_reg_req_alloc, 0x7c),
+ CMN_EVENT_CCHA(CMN_ANY, pb_pcie_rsvd_req_occ, 0x7d),
+ CMN_EVENT_CCHA(CMN_ANY, pb_pcie_rsvd_req_alloc, 0x7e),
+ CMN_EVENT_CCHA(CMN_ANY, pb_rhu_dat_occ, 0x7f),
+ CMN_EVENT_CCHA(CMN_ANY, pb_rhu_dat_alloc, 0x80),
+ CMN_EVENT_CCHA(CMN_ANY, pb_rhu_pcie_dat_occ, 0x81),
+ CMN_EVENT_CCHA(CMN_ANY, pb_rhu_pcie_dat_alloc, 0x82),
+ CMN_EVENT_CCHA(CMN_ANY, pb_pcie_wr_dat_occ, 0x83),
+ CMN_EVENT_CCHA(CMN_ANY, pb_pcie_wr_dat_alloc, 0x84),
+ CMN_EVENT_CCHA(CMNS3, chirsp1_up_stall, 0x85),
CMN_EVENT_CCLA(rx_cxs, 0x21),
CMN_EVENT_CCLA(tx_cxs, 0x22),
@@ -1271,15 +1280,11 @@ static ssize_t arm_cmn_format_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct arm_cmn_format_attr *fmt = container_of(attr, typeof(*fmt), attr);
- int lo = __ffs(fmt->field), hi = __fls(fmt->field);
-
- if (lo == hi)
- return sysfs_emit(buf, "config:%d\n", lo);
if (!fmt->config)
- return sysfs_emit(buf, "config:%d-%d\n", lo, hi);
+ return sysfs_emit(buf, "config:%*pbl\n", 64, &fmt->field);
- return sysfs_emit(buf, "config%d:%d-%d\n", fmt->config, lo, hi);
+ return sysfs_emit(buf, "config%d:%*pbl\n", fmt->config, 64, &fmt->field);
}
#define _CMN_FORMAT_ATTR(_name, _cfg, _fld) \
@@ -1415,7 +1420,7 @@ static u32 arm_cmn_wp_config(struct perf_event *event, int wp_idx)
static void arm_cmn_set_state(struct arm_cmn *cmn, u32 state)
{
if (!cmn->state)
- writel_relaxed(0, cmn->dtc[0].base + CMN_DT_PMCR);
+ writel_relaxed(0, CMN_DT_PMCR(&cmn->dtc[0]));
cmn->state |= state;
}
@@ -1424,7 +1429,7 @@ static void arm_cmn_clear_state(struct arm_cmn *cmn, u32 state)
cmn->state &= ~state;
if (!cmn->state)
writel_relaxed(CMN_DT_PMCR_PMU_EN | CMN_DT_PMCR_OVFL_INTR_EN,
- cmn->dtc[0].base + CMN_DT_PMCR);
+ CMN_DT_PMCR(&cmn->dtc[0]));
}
static void arm_cmn_pmu_enable(struct pmu *pmu)
@@ -1459,18 +1464,19 @@ static u64 arm_cmn_read_dtm(struct arm_cmn *cmn, struct arm_cmn_hw_event *hw,
static u64 arm_cmn_read_cc(struct arm_cmn_dtc *dtc)
{
- u64 val = readq_relaxed(dtc->base + CMN_DT_PMCCNTR);
+ void __iomem *pmccntr = CMN_DT_PMCCNTR(dtc);
+ u64 val = readq_relaxed(pmccntr);
- writeq_relaxed(CMN_CC_INIT, dtc->base + CMN_DT_PMCCNTR);
+ writeq_relaxed(CMN_CC_INIT, pmccntr);
return (val - CMN_CC_INIT) & ((CMN_CC_INIT << 1) - 1);
}
static u32 arm_cmn_read_counter(struct arm_cmn_dtc *dtc, int idx)
{
- u32 val, pmevcnt = CMN_DT_PMEVCNT(idx);
+ void __iomem *pmevcnt = CMN_DT_PMEVCNT(dtc, idx);
+ u32 val = readl_relaxed(pmevcnt);
- val = readl_relaxed(dtc->base + pmevcnt);
- writel_relaxed(CMN_COUNTER_INIT, dtc->base + pmevcnt);
+ writel_relaxed(CMN_COUNTER_INIT, pmevcnt);
return val - CMN_COUNTER_INIT;
}
@@ -1481,7 +1487,7 @@ static void arm_cmn_init_counter(struct perf_event *event)
u64 count;
for_each_hw_dtc_idx(hw, i, idx) {
- writel_relaxed(CMN_COUNTER_INIT, cmn->dtc[i].base + CMN_DT_PMEVCNT(idx));
+ writel_relaxed(CMN_COUNTER_INIT, CMN_DT_PMEVCNT(&cmn->dtc[i], idx));
cmn->dtc[i].counters[idx] = event;
}
@@ -1564,9 +1570,12 @@ s