diff options
| author | Kan Liang <kan.liang@linux.intel.com> | 2023-01-31 15:37:39 +0800 |
|---|---|---|
| committer | Joerg Roedel <jroedel@suse.de> | 2023-02-03 11:06:08 +0100 |
| commit | 4a0d4265659b1078db3432cb80ceaf26ad921704 (patch) | |
| tree | 1234262c8a2137cac85c35f39beac9281a3e660b /drivers/iommu/intel/perfmon.c | |
| parent | 46284c6ceb5e4dfddcb00dafb7c2f3c1437fdca4 (diff) | |
| download | linux-4a0d4265659b1078db3432cb80ceaf26ad921704.tar.gz linux-4a0d4265659b1078db3432cb80ceaf26ad921704.tar.bz2 linux-4a0d4265659b1078db3432cb80ceaf26ad921704.zip | |
iommu/vt-d: Add IOMMU perfmon overflow handler support
While enabled to count events and an event occurrence causes the counter
value to increment and roll over to or past zero, this is termed a
counter overflow. The overflow can trigger an interrupt. The IOMMU
perfmon needs to handle the case properly.
New HW IRQs are allocated for each IOMMU device for perfmon. The IRQ IDs
are after the SVM range.
In the overflow handler, the counter is not frozen. It's very unlikely
that the same counter overflows again during the period. But it's
possible that other counters overflow at the same time. Read the
overflow register at the end of the handler and check whether there are
more.
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Link: https://lore.kernel.org/r/20230128200428.1459118-7-kan.liang@linux.intel.com
Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'drivers/iommu/intel/perfmon.c')
| -rw-r--r-- | drivers/iommu/intel/perfmon.c | 82 |
1 files changed, 82 insertions, 0 deletions
diff --git a/drivers/iommu/intel/perfmon.c b/drivers/iommu/intel/perfmon.c index 322d362b85e4..e17d9743a0d8 100644 --- a/drivers/iommu/intel/perfmon.c +++ b/drivers/iommu/intel/perfmon.c @@ -505,6 +505,49 @@ static void iommu_pmu_disable(struct pmu *pmu) ecmd_submit_sync(iommu, DMA_ECMD_FREEZE, 0, 0); } +static void iommu_pmu_counter_overflow(struct iommu_pmu *iommu_pmu) +{ + struct perf_event *event; + u64 status; + int i; + + /* + * Two counters may be overflowed very close. Always check + * whether there are more to handle. + */ + while ((status = dmar_readq(iommu_pmu->overflow))) { + for_each_set_bit(i, (unsigned long *)&status, iommu_pmu->num_cntr) { + /* + * Find the assigned event of the counter. + * Accumulate the value into the event->count. + */ + event = iommu_pmu->event_list[i]; + if (!event) { + pr_warn_once("Cannot find the assigned event for counter %d\n", i); + continue; + } + iommu_pmu_event_update(event); + } + + dmar_writeq(iommu_pmu->overflow, status); + } +} + +static irqreturn_t iommu_pmu_irq_handler(int irq, void *dev_id) +{ + struct intel_iommu *iommu = dev_id; + + if (!dmar_readl(iommu->reg + DMAR_PERFINTRSTS_REG)) + return IRQ_NONE; + + iommu_pmu_counter_overflow(iommu->pmu); + + /* Clear the status bit */ + dmar_writel(iommu->reg + DMAR_PERFINTRSTS_REG, DMA_PERFINTRSTS_PIS); + + return IRQ_HANDLED; +} + static int __iommu_pmu_register(struct intel_iommu *iommu) { struct iommu_pmu *iommu_pmu = iommu->pmu; @@ -698,6 +741,38 @@ void free_iommu_pmu(struct intel_iommu *iommu) iommu->pmu = NULL; } +static int iommu_pmu_set_interrupt(struct intel_iommu *iommu) +{ + struct iommu_pmu *iommu_pmu = iommu->pmu; + int irq, ret; + + irq = dmar_alloc_hwirq(IOMMU_IRQ_ID_OFFSET_PERF + iommu->seq_id, iommu->node, iommu); + if (irq <= 0) + return -EINVAL; + + snprintf(iommu_pmu->irq_name, sizeof(iommu_pmu->irq_name), "dmar%d-perf", iommu->seq_id); + + iommu->perf_irq = irq; + ret = request_threaded_irq(irq, NULL, iommu_pmu_irq_handler, + IRQF_ONESHOT, iommu_pmu->irq_name, iommu); + if (ret) { + dmar_free_hwirq(irq); + iommu->perf_irq = 0; + return ret; + } + return 0; +} + +static void iommu_pmu_unset_interrupt(struct intel_iommu *iommu) +{ + if (!iommu->perf_irq) + return; + + free_irq(iommu->perf_irq, iommu); + dmar_free_hwirq(iommu->perf_irq); + iommu->perf_irq = 0; +} + static int iommu_pmu_cpu_online(unsigned int cpu) { if (cpumask_empty(&iommu_pmu_cpu_mask)) @@ -774,8 +849,14 @@ void iommu_pmu_register(struct intel_iommu *iommu) if (iommu_pmu_cpuhp_setup(iommu_pmu)) goto unregister; + /* Set interrupt for overflow */ + if (iommu_pmu_set_interrupt(iommu)) + goto cpuhp_free; + return; +cpuhp_free: + iommu_pmu_cpuhp_free(iommu_pmu); unregister: perf_pmu_unregister(&iommu_pmu->pmu); err: @@ -790,6 +871,7 @@ void iommu_pmu_unregister(struct intel_iommu *iommu) if (!iommu_pmu) return; + iommu_pmu_unset_interrupt(iommu); iommu_pmu_cpuhp_free(iommu_pmu); perf_pmu_unregister(&iommu_pmu->pmu); } |
