diff options
Diffstat (limited to 'drivers/hwtracing/coresight/coresight-etm4x.c')
-rw-r--r-- | drivers/hwtracing/coresight/coresight-etm4x.c | 2702 |
1 files changed, 2702 insertions, 0 deletions
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c new file mode 100644 index 000000000000..1312e993c501 --- /dev/null +++ b/drivers/hwtracing/coresight/coresight-etm4x.c @@ -0,0 +1,2702 @@ +/* Copyright (c) 2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/kernel.h> +#include <linux/moduleparam.h> +#include <linux/init.h> +#include <linux/types.h> +#include <linux/device.h> +#include <linux/module.h> +#include <linux/io.h> +#include <linux/err.h> +#include <linux/fs.h> +#include <linux/slab.h> +#include <linux/delay.h> +#include <linux/smp.h> +#include <linux/sysfs.h> +#include <linux/stat.h> +#include <linux/clk.h> +#include <linux/cpu.h> +#include <linux/coresight.h> +#include <linux/pm_wakeup.h> +#include <linux/amba/bus.h> +#include <linux/seq_file.h> +#include <linux/uaccess.h> +#include <linux/pm_runtime.h> +#include <asm/sections.h> + +#include "coresight-etm4x.h" + +static int boot_enable; +module_param_named(boot_enable, boot_enable, int, S_IRUGO); + +/* The number of ETMv4 currently registered */ +static int etm4_count; +static struct etmv4_drvdata *etmdrvdata[NR_CPUS]; + +static void etm4_os_unlock(void *info) +{ + struct etmv4_drvdata *drvdata = (struct etmv4_drvdata *)info; + + /* Writing any value to ETMOSLAR unlocks the trace registers */ + writel_relaxed(0x0, drvdata->base + TRCOSLAR); + isb(); +} + +static bool etm4_arch_supported(u8 arch) +{ + switch (arch) { + case ETM_ARCH_V4: + break; + default: + return false; + } + return true; +} + +static int etm4_trace_id(struct coresight_device *csdev) +{ + struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); + unsigned long flags; + int trace_id = -1; + + if (!drvdata->enable) + return drvdata->trcid; + + pm_runtime_get_sync(drvdata->dev); + spin_lock_irqsave(&drvdata->spinlock, flags); + + CS_UNLOCK(drvdata->base); + trace_id = readl_relaxed(drvdata->base + TRCTRACEIDR); + trace_id &= ETM_TRACEID_MASK; + CS_LOCK(drvdata->base); + + spin_unlock_irqrestore(&drvdata->spinlock, flags); + pm_runtime_put(drvdata->dev); + + return trace_id; +} + +static void etm4_enable_hw(void *info) +{ + int i; + struct etmv4_drvdata *drvdata = info; + + CS_UNLOCK(drvdata->base); + + etm4_os_unlock(drvdata); + + /* Disable the trace unit before programming trace registers */ + writel_relaxed(0, drvdata->base + TRCPRGCTLR); + + /* wait for TRCSTATR.IDLE to go up */ + if (coresight_timeout(drvdata->base, TRCSTATR, TRCSTATR_IDLE_BIT, 1)) + dev_err(drvdata->dev, + "timeout observed when probing at offset %#x\n", + TRCSTATR); + + writel_relaxed(drvdata->pe_sel, drvdata->base + TRCPROCSELR); + writel_relaxed(drvdata->cfg, drvdata->base + TRCCONFIGR); + /* nothing specific implemented */ + writel_relaxed(0x0, drvdata->base + TRCAUXCTLR); + writel_relaxed(drvdata->eventctrl0, drvdata->base + TRCEVENTCTL0R); + writel_relaxed(drvdata->eventctrl1, drvdata->base + TRCEVENTCTL1R); + writel_relaxed(drvdata->stall_ctrl, drvdata->base + TRCSTALLCTLR); + writel_relaxed(drvdata->ts_ctrl, drvdata->base + TRCTSCTLR); + writel_relaxed(drvdata->syncfreq, drvdata->base + TRCSYNCPR); + writel_relaxed(drvdata->ccctlr, drvdata->base + TRCCCCTLR); + writel_relaxed(drvdata->bb_ctrl, drvdata->base + TRCBBCTLR); + writel_relaxed(drvdata->trcid, drvdata->base + TRCTRACEIDR); + writel_relaxed(drvdata->vinst_ctrl, drvdata->base + TRCVICTLR); + writel_relaxed(drvdata->viiectlr, drvdata->base + TRCVIIECTLR); + writel_relaxed(drvdata->vissctlr, + drvdata->base + TRCVISSCTLR); + writel_relaxed(drvdata->vipcssctlr, + drvdata->base + TRCVIPCSSCTLR); + for (i = 0; i < drvdata->nrseqstate - 1; i++) + writel_relaxed(drvdata->seq_ctrl[i], + drvdata->base + TRCSEQEVRn(i)); + writel_relaxed(drvdata->seq_rst, drvdata->base + TRCSEQRSTEVR); + writel_relaxed(drvdata->seq_state, drvdata->base + TRCSEQSTR); + writel_relaxed(drvdata->ext_inp, drvdata->base + TRCEXTINSELR); + for (i = 0; i < drvdata->nr_cntr; i++) { + writel_relaxed(drvdata->cntrldvr[i], + drvdata->base + TRCCNTRLDVRn(i)); + writel_relaxed(drvdata->cntr_ctrl[i], + drvdata->base + TRCCNTCTLRn(i)); + writel_relaxed(drvdata->cntr_val[i], + drvdata->base + TRCCNTVRn(i)); + } + for (i = 0; i < drvdata->nr_resource; i++) + writel_relaxed(drvdata->res_ctrl[i], + drvdata->base + TRCRSCTLRn(i)); + + for (i = 0; i < drvdata->nr_ss_cmp; i++) { + writel_relaxed(drvdata->ss_ctrl[i], + drvdata->base + TRCSSCCRn(i)); + writel_relaxed(drvdata->ss_status[i], + drvdata->base + TRCSSCSRn(i)); + writel_relaxed(drvdata->ss_pe_cmp[i], + drvdata->base + TRCSSPCICRn(i)); + } + for (i = 0; i < drvdata->nr_addr_cmp; i++) { + writeq_relaxed(drvdata->addr_val[i], + drvdata->base + TRCACVRn(i)); + writeq_relaxed(drvdata->addr_acc[i], + drvdata->base + TRCACATRn(i)); + } + for (i = 0; i < drvdata->numcidc; i++) + writeq_relaxed(drvdata->ctxid_val[i], + drvdata->base + TRCCIDCVRn(i)); + writel_relaxed(drvdata->ctxid_mask0, drvdata->base + TRCCIDCCTLR0); + writel_relaxed(drvdata->ctxid_mask1, drvdata->base + TRCCIDCCTLR1); + + for (i = 0; i < drvdata->numvmidc; i++) + writeq_relaxed(drvdata->vmid_val[i], + drvdata->base + TRCVMIDCVRn(i)); + writel_relaxed(drvdata->vmid_mask0, drvdata->base + TRCVMIDCCTLR0); + writel_relaxed(drvdata->vmid_mask1, drvdata->base + TRCVMIDCCTLR1); + + /* Enable the trace unit */ + writel_relaxed(1, drvdata->base + TRCPRGCTLR); + + /* wait for TRCSTATR.IDLE to go back down to '0' */ + if (coresight_timeout(drvdata->base, TRCSTATR, TRCSTATR_IDLE_BIT, 0)) + dev_err(drvdata->dev, + "timeout observed when probing at offset %#x\n", + TRCSTATR); + + CS_LOCK(drvdata->base); + + dev_dbg(drvdata->dev, "cpu: %d enable smp call done\n", drvdata->cpu); +} + +static int etm4_enable(struct coresight_device *csdev) +{ + struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); + int ret; + + pm_runtime_get_sync(drvdata->dev); + spin_lock(&drvdata->spinlock); + + /* + * Executing etm4_enable_hw on the cpu whose ETM is being enabled + * ensures that register writes occur when cpu is powered. + */ + ret = smp_call_function_single(drvdata->cpu, + etm4_enable_hw, drvdata, 1); + if (ret) + goto err; + drvdata->enable = true; + drvdata->sticky_enable = true; + + spin_unlock(&drvdata->spinlock); + + dev_info(drvdata->dev, "ETM tracing enabled\n"); + return 0; +err: + spin_unlock(&drvdata->spinlock); + pm_runtime_put(drvdata->dev); + return ret; +} + +static void etm4_disable_hw(void *info) +{ + u32 control; + struct etmv4_drvdata *drvdata = info; + + CS_UNLOCK(drvdata->base); + + control = readl_relaxed(drvdata->base + TRCPRGCTLR); + + /* EN, bit[0] Trace unit enable bit */ + control &= ~0x1; + + /* make sure everything completes before disabling */ + mb(); + isb(); + writel_relaxed(control, drvdata->base + TRCPRGCTLR); + + CS_LOCK(drvdata->base); + + dev_dbg(drvdata->dev, "cpu: %d disable smp call done\n", drvdata->cpu); +} + +static void etm4_disable(struct coresight_device *csdev) +{ + struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); + + /* + * Taking hotplug lock here protects from clocks getting disabled + * with tracing being left on (crash scenario) if user disable occurs + * after cpu online mask indicates the cpu is offline but before the + * DYING hotplug callback is serviced by the ETM driver. + */ + get_online_cpus(); + spin_lock(&drvdata->spinlock); + + /* + * Executing etm4_disable_hw on the cpu whose ETM is being disabled + * ensures that register writes occur when cpu is powered. + */ + smp_call_function_single(drvdata->cpu, etm4_disable_hw, drvdata, 1); + drvdata->enable = false; + + spin_unlock(&drvdata->spinlock); + put_online_cpus(); + + pm_runtime_put(drvdata->dev); + + dev_info(drvdata->dev, "ETM tracing disabled\n"); +} + +static const struct coresight_ops_source etm4_source_ops = { + .trace_id = etm4_trace_id, + .enable = etm4_enable, + .disable = etm4_disable, +}; + +static const struct coresight_ops etm4_cs_ops = { + .source_ops = &etm4_source_ops, +}; + +static int etm4_set_mode_exclude(struct etmv4_drvdata *drvdata, bool exclude) +{ + u8 idx = drvdata->addr_idx; + + /* + * TRCACATRn.TYPE bit[1:0]: type of comparison + * the trace unit performs + */ + if (BMVAL(drvdata->addr_acc[idx], 0, 1) == ETM_INSTR_ADDR) { + if (idx % 2 != 0) + return -EINVAL; + + /* + * We are performing instruction address comparison. Set the + * relevant bit of ViewInst Include/Exclude Control register + * for corresponding address comparator pair. + */ + if (drvdata->addr_type[idx] != ETM_ADDR_TYPE_RANGE || + drvdata->addr_type[idx + 1] != ETM_ADDR_TYPE_RANGE) + return -EINVAL; + + if (exclude == true) { + /* + * Set exclude bit and unset the include bit + * corresponding to comparator pair + */ + drvdata->viiectlr |= BIT(idx / 2 + 16); + drvdata->viiectlr &= ~BIT(idx / 2); + } else { + /* + * Set include bit and unset exclude bit + * corresponding to comparator pair + */ + drvdata->viiectlr |= BIT(idx / 2); + drvdata->viiectlr &= ~BIT(idx / 2 + 16); + } + } + return 0; +} + +static ssize_t nr_pe_cmp_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + + val = drvdata->nr_pe_cmp; + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} +static DEVICE_ATTR_RO(nr_pe_cmp); + +static ssize_t nr_addr_cmp_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + + val = drvdata->nr_addr_cmp; + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} +static DEVICE_ATTR_RO(nr_addr_cmp); + +static ssize_t nr_cntr_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + + val = drvdata->nr_cntr; + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} +static DEVICE_ATTR_RO(nr_cntr); + +static ssize_t nr_ext_inp_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + + val = drvdata->nr_ext_inp; + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} +static DEVICE_ATTR_RO(nr_ext_inp); + +static ssize_t numcidc_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + + val = drvdata->numcidc; + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} +static DEVICE_ATTR_RO(numcidc); + +static ssize_t numvmidc_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + + val = drvdata->numvmidc; + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} +static DEVICE_ATTR_RO(numvmidc); + +static ssize_t nrseqstate_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + + val = drvdata->nrseqstate; + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} +static DEVICE_ATTR_RO(nrseqstate); + +static ssize_t nr_resource_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + + val = drvdata->nr_resource; + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} +static DEVICE_ATTR_RO(nr_resource); + +static ssize_t nr_ss_cmp_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + + val = drvdata->nr_ss_cmp; + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} +static DEVICE_ATTR_RO(nr_ss_cmp); + +static ssize_t reset_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + int i; + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + + if (kstrtoul(buf, 16, &val)) + return -EINVAL; + + spin_lock(&drvdata->spinlock); + if (val) + drvdata->mode = 0x0; + + /* Disable data tracing: do not trace load and store data transfers */ + drvdata->mode &= ~(ETM_MODE_LOAD | ETM_MODE_STORE); + drvdata->cfg &= ~(BIT(1) | BIT(2)); + + /* Disable data value and data address tracing */ + drvdata->mode &= ~(ETM_MODE_DATA_TRACE_ADDR | + ETM_MODE_DATA_TRACE_VAL); + drvdata->cfg &= ~(BIT(16) | BIT(17)); + + /* Disable all events tracing */ + drvdata->eventctrl0 = 0x0; + drvdata->eventctrl1 = 0x0; + + /* Disable timestamp event */ + drvdata->ts_ctrl = 0x0; + + /* Disable stalling */ + drvdata->stall_ctrl = 0x0; + + /* Reset trace synchronization period to 2^8 = 256 bytes*/ + if (drvdata->syncpr == false) + drvdata->syncfreq = 0x8; + + /* + * Enable ViewInst to trace everything with start-stop logic in + * started state. ARM recommends start-stop logic is set before + * each trace run. + */ + drvdata->vinst_ctrl |= BIT(0); + if (drvdata->nr_addr_cmp == true) { + drvdata->mode |= ETM_MODE_VIEWINST_STARTSTOP; + /* SSSTATUS, bit[9] */ + drvdata->vinst_ctrl |= BIT(9); + } + + /* No address range filtering for ViewInst */ + drvdata->viiectlr = 0x0; + + /* No start-stop filtering for ViewInst */ + drvdata->vissctlr = 0x0; + + /* Disable seq events */ + for (i = 0; i < drvdata->nrseqstate-1; i++) + drvdata->seq_ctrl[i] = 0x0; + drvdata->seq_rst = 0x0; + drvdata->seq_state = 0x0; + + /* Disable external input events */ + drvdata->ext_inp = 0x0; + + drvdata->cntr_idx = 0x0; + for (i = 0; i < drvdata->nr_cntr; i++) { + drvdata->cntrldvr[i] = 0x0; + drvdata->cntr_ctrl[i] = 0x0; + drvdata->cntr_val[i] = 0x0; + } + + drvdata->res_idx = 0x0; + for (i = 0; i < drvdata->nr_resource; i++) + drvdata->res_ctrl[i] = 0x0; + + for (i = 0; i < drvdata->nr_ss_cmp; i++) { + drvdata->ss_ctrl[i] = 0x0; + drvdata->ss_pe_cmp[i] = 0x0; + } + + drvdata->addr_idx = 0x0; + for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) { + drvdata->addr_val[i] = 0x0; + drvdata->addr_acc[i] = 0x0; + drvdata->addr_type[i] = ETM_ADDR_TYPE_NONE; + } + + drvdata->ctxid_idx = 0x0; + for (i = 0; i < drvdata->numcidc; i++) + drvdata->ctxid_val[i] = 0x0; + drvdata->ctxid_mask0 = 0x0; + drvdata->ctxid_mask1 = 0x0; + + drvdata->vmid_idx = 0x0; + for (i = 0; i < drvdata->numvmidc; i++) + drvdata->vmid_val[i] = 0x0; + drvdata->vmid_mask0 = 0x0; + drvdata->vmid_mask1 = 0x0; + + drvdata->trcid = drvdata->cpu + 1; + spin_unlock(&drvdata->spinlock); + return size; +} +static DEVICE_ATTR_WO(reset); + +static ssize_t mode_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + + val = drvdata->mode; + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} + +static ssize_t mode_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + unsigned long val, mode; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + + if (kstrtoul(buf, 16, &val)) + return -EINVAL; + + spin_lock(&drvdata->spinlock); + drvdata->mode = val & ETMv4_MODE_ALL; + + if (drvdata->mode & ETM_MODE_EXCLUDE) + etm4_set_mode_exclude(drvdata, true); + else + etm4_set_mode_exclude(drvdata, false); + + if (drvdata->instrp0 == true) { + /* start by clearing instruction P0 field */ + drvdata->cfg &= ~(BIT(1) | BIT(2)); + if (drvdata->mode & ETM_MODE_LOAD) + /* 0b01 Trace load instructions as P0 instructions */ + drvdata->cfg |= BIT(1); + if (drvdata->mode & ETM_MODE_STORE) + /* 0b10 Trace store instructions as P0 instructions */ + drvdata->cfg |= BIT(2); + if (drvdata->mode & ETM_MODE_LOAD_STORE) + /* + * 0b11 Trace load and store instructions + * as P0 instructions + */ + drvdata->cfg |= BIT(1) | BIT(2); + } + + /* bit[3], Branch broadcast mode */ + if ((drvdata->mode & ETM_MODE_BB) && (drvdata->trcbb == true)) + drvdata->cfg |= BIT(3); + else + drvdata->cfg &= ~BIT(3); + + /* bit[4], Cycle counting instruction trace bit */ + if ((drvdata->mode & ETMv4_MODE_CYCACC) && + (drvdata->trccci == true)) + drvdata->cfg |= BIT(4); + else + drvdata->cfg &= ~BIT(4); + + /* bit[6], Context ID tracing bit */ + if ((drvdata->mode & ETMv4_MODE_CTXID) && (drvdata->ctxid_size)) + drvdata->cfg |= BIT(6); + else + drvdata->cfg &= ~BIT(6); + + if ((drvdata->mode & ETM_MODE_VMID) && (drvdata->vmid_size)) + drvdata->cfg |= BIT(7); + else + drvdata->cfg &= ~BIT(7); + + /* bits[10:8], Conditional instruction tracing bit */ + mode = ETM_MODE_COND(drvdata->mode); + if (drvdata->trccond == true) { + drvdata->cfg &= ~(BIT(8) | BIT(9) | BIT(10)); + drvdata->cfg |= mode << 8; + } + + /* bit[11], Global timestamp tracing bit */ + if ((drvdata->mode & ETMv4_MODE_TIMESTAMP) && (drvdata->ts_size)) + drvdata->cfg |= BIT(11); + else + drvdata->cfg &= ~BIT(11); + + /* bit[12], Return stack enable bit */ + if ((drvdata->mode & ETM_MODE_RETURNSTACK) && + (drvdata->retstack == true)) + drvdata->cfg |= BIT(12); + else + drvdata->cfg &= ~BIT(12); + + /* bits[14:13], Q element enable field */ + mode = ETM_MODE_QELEM(drvdata->mode); + /* start by clearing QE bits */ + drvdata->cfg &= ~(BIT(13) | BIT(14)); + /* if supported, Q elements with instruction counts are enabled */ + if ((mode & BIT(0)) && (drvdata->q_support & BIT(0))) + drvdata->cfg |= BIT(13); + /* + * if supported, Q elements with and without instruction + * counts are enabled + */ + if ((mode & BIT(1)) && (drvdata->q_support & BIT(1))) + drvdata->cfg |= BIT(14); + + /* bit[11], AMBA Trace Bus (ATB) trigger enable bit */ + if ((drvdata->mode & ETM_MODE_ATB_TRIGGER) && + (drvdata->atbtrig == true)) + drvdata->eventctrl1 |= BIT(11); + else + drvdata->eventctrl1 &= ~BIT(11); + + /* bit[12], Low-power state behavior override bit */ + if ((drvdata->mode & ETM_MODE_LPOVERRIDE) && + (drvdata->lpoverride == true)) + drvdata->eventctrl1 |= BIT(12); + else + drvdata->eventctrl1 &= ~BIT(12); + + /* bit[8], Instruction stall bit */ + if (drvdata->mode & ETM_MODE_ISTALL_EN) + drvdata->stall_ctrl |= BIT(8); + else + drvdata->stall_ctrl &= ~BIT(8); + + /* bit[10], Prioritize instruction trace bit */ + if (drvdata->mode & ETM_MODE_INSTPRIO) + drvdata->stall_ctrl |= BIT(10); + else + drvdata->stall_ctrl &= ~BIT(10); + + /* bit[13], Trace overflow prevention bit */ + if ((drvdata->mode & ETM_MODE_NOOVERFLOW) && + (drvdata->nooverflow == true)) + drvdata->stall_ctrl |= BIT(13); + else + drvdata->stall_ctrl &= ~BIT(13); + + /* bit[9] Start/stop logic control bit */ + if (drvdata->mode & ETM_MODE_VIEWINST_STARTSTOP) + drvdata->vinst_ctrl |= BIT(9); + else + drvdata->vinst_ctrl &= ~BIT(9); + + /* bit[10], Whether a trace unit must trace a Reset exception */ + if (drvdata->mode & ETM_MODE_TRACE_RESET) + drvdata->vinst_ctrl |= BIT(10); + else + drvdata->vinst_ctrl &= ~BIT(10); + + /* bit[11], Whether a trace unit must trace a system error exception */ + if ((drvdata->mode & ETM_MODE_TRACE_ERR) && + (drvdata->trc_error == true)) + drvdata->vinst_ctrl |= BIT(11); + else + drvdata->vinst_ctrl &= ~BIT(11); + + spin_unlock(&drvdata->spinlock); + return size; +} +static DEVICE_ATTR_RW(mode); + +static ssize_t pe_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + + val = drvdata->pe_sel; + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} + +static ssize_t pe_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + + if (kstrtoul(buf, 16, &val)) + return -EINVAL; + + spin_lock(&drvdata->spinlock); + if (val > drvdata->nr_pe) { + spin_unlock(&drvdata->spinlock); + return -EINVAL; + } + + drvdata->pe_sel = val; + spin_unlock(&drvdata->spinlock); + return size; +} +static DEVICE_ATTR_RW(pe); + +static ssize_t event_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + + val = drvdata->eventctrl0; + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} + +static ssize_t event_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + + if (kstrtoul(buf, 16, &val)) + return -EINVAL; + + spin_lock(&drvdata->spinlock); + switch (drvdata->nr_event) { + case 0x0: + /* EVENT0, bits[7:0] */ + drvdata->eventctrl0 = val & 0xFF; + break; + case 0x1: + /* EVENT1, bits[15:8] */ + drvdata->eventctrl0 = val & 0xFFFF; + break; + case 0x2: + /* EVENT2, bits[23:16] */ + drvdata->eventctrl0 = val & 0xFFFFFF; + break; + case 0x3: + /* EVENT3, bits[31:24] */ + drvdata->eventctrl0 = val; + break; + default: + break; + } + spin_unlock(&drvdata->spinlock); + return size; +} +static DEVICE_ATTR_RW(event); + +static ssize_t event_instren_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + + val = BMVAL(drvdata->eventctrl1, 0, 3); + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} + +static ssize_t event_instren_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + + if (kstrtoul(buf, 16, &val)) + return -EINVAL; + + spin_lock(&drvdata->spinlock); + /* start by clearing all instruction event enable bits */ + drvdata->eventctrl1 &= ~(BIT(0) | BIT(1) | BIT(2) | BIT(3)); + switch (drvdata->nr_event) { + case 0x0: + /* generate Event element for event 1 */ + drvdata->eventctrl1 |= val & BIT(1); + break; + case 0x1: + /* generate Event element for event 1 and 2 */ + drvdata->eventctrl1 |= val & (BIT(0) | BIT(1)); + break; + case 0x2: + /* generate Event element for event 1, 2 and 3 */ + drvdata->eventctrl1 |= val & (BIT(0) | BIT(1) | BIT(2)); + break; + case 0x3: + /* generate Event element for all 4 events */ + drvdata->eventctrl1 |= val & 0xF; + break; + default: + break; + } + spin_unlock(&drvdata->spinlock); + return size; +} +static DEVICE_ATTR_RW(event_instren); + +static ssize_t event_ts_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + + val = drvdata->ts_ctrl; + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} + +static ssize_t event_ts_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + + if (kstrtoul(buf, 16, &val)) + return -EINVAL; + if (!drvdata->ts_size) + return -EINVAL; + + drvdata->ts_ctrl = val & ETMv4_EVENT_MASK; + return size; +} +static DEVICE_ATTR_RW(event_ts); + +static ssize_t syncfreq_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + + val = drvdata->syncfreq; + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} + +static ssize_t syncfreq_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + + if (kstrtoul(buf, 16, &val)) + return -EINVAL; + if (drvdata->syncpr == true) + return -EINVAL; + + drvdata->syncfreq = val & ETMv4_SYNC_MASK; + return size; +} +static DEVICE_ATTR_RW(syncfreq); + +static ssize_t cyc_threshold_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + + val = drvdata->ccctlr; + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} + +static ssize_t cyc_threshold_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + + if (kstrtoul(buf, 16, &val)) + return -EINVAL; + if (val < drvdata->ccitmin) + return -EINVAL; + + drvdata->ccctlr = val & ETM_CYC_THRESHOLD_MASK; + return size; +} +static DEVICE_ATTR_RW(cyc_threshold); + +static ssize_t bb_ctrl_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + + val = drvdata->bb_ctrl; + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} + +static ssize_t bb_ctrl_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + + if (kstrtoul(buf, 16, &val)) + return -EINVAL; + if (drvdata->trcbb == false) + return -EINVAL; + if (!drvdata->nr_addr_cmp) + return -EINVAL; + /* + * Bit[7:0] selects which address range comparator is used for + * branch broadcast control. + */ + if (BMVAL(val, 0, 7) > drvdata->nr_addr_cmp) + return -EINVAL; + + drvdata->bb_ctrl = val; + return size; +} +static DEVICE_ATTR_RW(bb_ctrl); + +static ssize_t event_vinst_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + + val = drvdata->vinst_ctrl & ETMv4_EVENT_MASK; + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} + +static ssize_t event_vinst_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + + if (kstrtoul(buf, 16, &val)) + return -EINVAL; + + spin_lock(&drvdata->spinlock); + val &= ETMv4_EVENT_MASK; + drvdata->vinst_ctrl &= ~ETMv4_EVENT_MASK; + drvdata->vinst_ctrl |= val; + spin_unlock(&drvdata->spinlock); + return size; +} +static DEVICE_ATTR_RW(event_vinst); + +static ssize_t s_exlevel_vinst_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + + val = BMVAL(drvdata->vinst_ctrl, 16, 19); + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} + +static ssize_t s_exlevel_vinst_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + + if (kstrtoul(buf, 16, &val)) + return -EINVAL; + + spin_lock(&drvdata->spinlock); + /* clear all EXLEVEL_S bits (bit[18] is never implemented) */ + drvdata->vinst_ctrl &= ~(BIT(16) | BIT(17) | BIT(19)); + /* enable instruction tracing for corresponding exception level */ + val &= drvdata->s_ex_level; + drvdata->vinst_ctrl |= (val << 16); + spin_unlock(&drvdata->spinlock); + return size; +} +static DEVICE_ATTR_RW(s_exlevel_vinst); + +static ssize_t ns_exlevel_vinst_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + + /* EXLEVEL_NS, bits[23:20] */ + val = BMVAL(drvdata->vinst_ctrl, 20, 23); + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} + +static ssize_t ns_exlevel_vinst_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + + if (kstrtoul(buf, 16, &val)) + return -EINVAL; + + spin_lock(&drvdata->spinlock); + /* clear EXLEVEL_NS bits (bit[23] is never implemented */ + drvdata->vinst_ctrl &= ~(BIT(20) | BIT(21) | BIT(22)); + /* enable instruction tracing for corresponding exception level */ + val &= drvdata->ns_ex_level; + drvdata->vinst_ctrl |= (val << 20); + spin_unlock(&drvdata->spinlock); + return size; +} +static DEVICE_ATTR_RW(ns_exlevel_vinst); + +static ssize_t addr_idx_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + + val = drvdata->addr_idx; + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} + +static ssize_t addr_idx_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + + if (kstrtoul(buf, 16, &val)) + return -EINVAL; + if (val >= drvdata->nr_addr_cmp * 2) + return -EINVAL; + + /* + * Use spinlock to ensure index doesn't change while it gets + * dereferenced multiple times within a spinlock block elsewhere. + */ + spin_lock(&drvdata->spinlock); + drvdata->addr_idx = val; + spin_unlock(&drvdata->spinlock); + return size; +} +static DEVICE_ATTR_RW(addr_idx); + +static ssize_t addr_instdatatype_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + ssize_t len; + u8 val, idx; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + + spin_lock(&drvdata->spinlock); + idx = drvdata->addr_idx; + val = BMVAL(drvdata->addr_acc[idx], 0, 1); + len = scnprintf(buf, PAGE_SIZE, "%s\n", + val == ETM_INSTR_ADDR ? "instr" : + (val == ETM_DATA_LOAD_ADDR ? "data_load" : + (val == ETM_DATA_STORE_ADDR ? "data_store" : + "data_load_store"))); + spin_unlock(&drvdata->spinlock); + return len; +} + +static ssize_t addr_instdatatype_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + u8 idx; + char str[20] = ""; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + + if (strlen(buf) >= 20) + return -EINVAL; + if (sscanf(buf, "%s", str) != 1) + return -EINVAL; + + spin_lock(&drvdata->spinlock); + idx = drvdata->addr_idx; + if (!strcmp(str, "instr")) + /* TYPE, bits[1:0] */ + drvdata->addr_acc[idx] &= ~(BIT(0) | BIT(1)); + + spin_unlock(&drvdata->spinlock); + return size; +} +static DEVICE_ATTR_RW(addr_instdatatype); + +static ssize_t addr_single_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + u8 idx; + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + + idx = drvdata->addr_idx; + spin_lock(&drvdata->spinlock); + if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE || + drvdata->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) { + spin_unlock(&drvdata->spinlock); + return -EPERM; + } + val = (unsigned long)drvdata->addr_val[idx]; + spin_unlock(&drvdata->spinlock); + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} + +static ssize_t addr_single_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + u8 idx; + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); |