diff options
author | Joshua Hahn <joshua.hahn6@gmail.com> | 2024-10-02 11:47:16 -0700 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2025-04-10 14:39:35 +0200 |
commit | 39bc1484eb0663c84b2ec72c2960b2e01ef99545 (patch) | |
tree | 96e83117b0620da14267f4efc335c46d3b0d6605 | |
parent | ef79f2dec7a9df7f935530410727bb6b0ec82496 (diff) | |
download | linux-39bc1484eb0663c84b2ec72c2960b2e01ef99545.tar.gz linux-39bc1484eb0663c84b2ec72c2960b2e01ef99545.tar.bz2 linux-39bc1484eb0663c84b2ec72c2960b2e01ef99545.zip |
cgroup/rstat: Tracking cgroup-level niced CPU time
[ Upstream commit aefa398d93d5db7c555be78a605ff015357f127d ]
Cgroup-level CPU statistics currently include time spent on
user/system processes, but do not include niced CPU time (despite
already being tracked). This patch exposes niced CPU time to the
userspace, allowing users to get a better understanding of their
hardware limits and can facilitate more informed workload distribution.
A new field 'ntime' is added to struct cgroup_base_stat as opposed to
struct task_cputime to minimize footprint.
Signed-off-by: Joshua Hahn <joshua.hahnjy@gmail.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
Stable-dep-of: c4af66a95aa3 ("cgroup/rstat: Fix forceidle time in cpu.stat")
Signed-off-by: Sasha Levin <sashal@kernel.org>
-rw-r--r-- | include/linux/cgroup-defs.h | 1 | ||||
-rw-r--r-- | kernel/cgroup/rstat.c | 19 |
2 files changed, 15 insertions, 5 deletions
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index a32eebcd23da..38b2af336e4a 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -324,6 +324,7 @@ struct cgroup_base_stat { #ifdef CONFIG_SCHED_CORE u64 forceidle_sum; #endif + u64 ntime; }; /* diff --git a/kernel/cgroup/rstat.c b/kernel/cgroup/rstat.c index ce295b73c0a3..aac91466279f 100644 --- a/kernel/cgroup/rstat.c +++ b/kernel/cgroup/rstat.c @@ -444,6 +444,7 @@ static void cgroup_base_stat_add(struct cgroup_base_stat *dst_bstat, #ifdef CONFIG_SCHED_CORE dst_bstat->forceidle_sum += src_bstat->forceidle_sum; #endif + dst_bstat->ntime += src_bstat->ntime; } static void cgroup_base_stat_sub(struct cgroup_base_stat *dst_bstat, @@ -455,6 +456,7 @@ static void cgroup_base_stat_sub(struct cgroup_base_stat *dst_bstat, #ifdef CONFIG_SCHED_CORE dst_bstat->forceidle_sum -= src_bstat->forceidle_sum; #endif + dst_bstat->ntime -= src_bstat->ntime; } static void cgroup_base_stat_flush(struct cgroup *cgrp, int cpu) @@ -534,8 +536,10 @@ void __cgroup_account_cputime_field(struct cgroup *cgrp, rstatc = cgroup_base_stat_cputime_account_begin(cgrp, &flags); switch (index) { - case CPUTIME_USER: case CPUTIME_NICE: + rstatc->bstat.ntime += delta_exec; + fallthrough; + case CPUTIME_USER: rstatc->bstat.cputime.utime += delta_exec; break; case CPUTIME_SYSTEM: @@ -590,6 +594,7 @@ static void root_cgroup_cputime(struct cgroup_base_stat *bstat) #ifdef CONFIG_SCHED_CORE bstat->forceidle_sum += cpustat[CPUTIME_FORCEIDLE]; #endif + bstat->ntime += cpustat[CPUTIME_NICE]; } } @@ -607,13 +612,14 @@ static void cgroup_force_idle_show(struct seq_file *seq, struct cgroup_base_stat void cgroup_base_stat_cputime_show(struct seq_file *seq) { struct cgroup *cgrp = seq_css(seq)->cgroup; - u64 usage, utime, stime; + u64 usage, utime, stime, ntime; if (cgroup_parent(cgrp)) { cgroup_rstat_flush_hold(cgrp); usage = cgrp->bstat.cputime.sum_exec_runtime; cputime_adjust(&cgrp->bstat.cputime, &cgrp->prev_cputime, &utime, &stime); + ntime = cgrp->bstat.ntime; cgroup_rstat_flush_release(cgrp); } else { /* cgrp->bstat of root is not actually used, reuse it */ @@ -621,16 +627,19 @@ void cgroup_base_stat_cputime_show(struct seq_file *seq) usage = cgrp->bstat.cputime.sum_exec_runtime; utime = cgrp->bstat.cputime.utime; stime = cgrp->bstat.cputime.stime; + ntime = cgrp->bstat.ntime; } do_div(usage, NSEC_PER_USEC); do_div(utime, NSEC_PER_USEC); do_div(stime, NSEC_PER_USEC); + do_div(ntime, NSEC_PER_USEC); seq_printf(seq, "usage_usec %llu\n" - "user_usec %llu\n" - "system_usec %llu\n", - usage, utime, stime); + "user_usec %llu\n" + "system_usec %llu\n" + "nice_usec %llu\n", + usage, utime, stime, ntime); cgroup_force_idle_show(seq, &cgrp->bstat); } |