summaryrefslogtreecommitdiff
path: root/kernel/events/core.c
diff options
context:
space:
mode:
authorQing Wang <wangqing7171@gmail.com>2025-04-05 22:16:35 +0800
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2025-06-19 15:28:02 +0200
commit6ce08b3ba52c1f92325fb9f487021ae1c2f779e9 (patch)
treea13c7120435daddfba2f739b18088e2b86f8108b /kernel/events/core.c
parenteec0dd17db02ae1f8e431b60da3b29e7b7dfaee9 (diff)
downloadlinux-6ce08b3ba52c1f92325fb9f487021ae1c2f779e9.tar.gz
linux-6ce08b3ba52c1f92325fb9f487021ae1c2f779e9.tar.bz2
linux-6ce08b3ba52c1f92325fb9f487021ae1c2f779e9.zip
perf/core: Fix broken throttling when max_samples_per_tick=1
[ Upstream commit f51972e6f8b9a737b2b3eb588069acb538fa72de ] According to the throttling mechanism, the pmu interrupts number can not exceed the max_samples_per_tick in one tick. But this mechanism is ineffective when max_samples_per_tick=1, because the throttling check is skipped during the first interrupt and only performed when the second interrupt arrives. Perhaps this bug may cause little influence in one tick, but if in a larger time scale, the problem can not be underestimated. When max_samples_per_tick = 1: Allowed-interrupts-per-second max-samples-per-second default-HZ ARCH 200 100 100 X86 500 250 250 ARM64 ... Obviously, the pmu interrupt number far exceed the user's expect. Fixes: e050e3f0a71b ("perf: Fix broken interrupt rate throttling") Signed-off-by: Qing Wang <wangqing7171@gmail.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lkml.kernel.org/r/20250405141635.243786-3-wangqing7171@gmail.com Signed-off-by: Sasha Levin <sashal@kernel.org>
Diffstat (limited to 'kernel/events/core.c')
-rw-r--r--kernel/events/core.c16
1 files changed, 8 insertions, 8 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 5dd6424e62fa..6460f79280ed 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -9553,14 +9553,14 @@ __perf_event_account_interrupt(struct perf_event *event, int throttle)
hwc->interrupts = 1;
} else {
hwc->interrupts++;
- if (unlikely(throttle &&
- hwc->interrupts > max_samples_per_tick)) {
- __this_cpu_inc(perf_throttled_count);
- tick_dep_set_cpu(smp_processor_id(), TICK_DEP_BIT_PERF_EVENTS);
- hwc->interrupts = MAX_INTERRUPTS;
- perf_log_throttle(event, 0);
- ret = 1;
- }
+ }
+
+ if (unlikely(throttle && hwc->interrupts >= max_samples_per_tick)) {
+ __this_cpu_inc(perf_throttled_count);
+ tick_dep_set_cpu(smp_processor_id(), TICK_DEP_BIT_PERF_EVENTS);
+ hwc->interrupts = MAX_INTERRUPTS;
+ perf_log_throttle(event, 0);
+ ret = 1;
}
if (event->attr.freq) {