diff options
| author | Mel Gorman <mgorman@techsingularity.net> | 2023-10-10 09:31:40 +0100 |
|---|---|---|
| committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2024-10-04 16:29:21 +0200 |
| commit | 6654e54ae7e7cb2ceb3da85e72ebb7487996a9d9 (patch) | |
| tree | edf5f96cc4d9c19a6ad9d4734921f340c5787da4 /kernel | |
| parent | 707e9a6c880f82d17a684278b8842b08f885c203 (diff) | |
| download | linux-6654e54ae7e7cb2ceb3da85e72ebb7487996a9d9.tar.gz linux-6654e54ae7e7cb2ceb3da85e72ebb7487996a9d9.tar.bz2 linux-6654e54ae7e7cb2ceb3da85e72ebb7487996a9d9.zip | |
sched/numa: Trace decisions related to skipping VMAs
[ Upstream commit ed2da8b725b932b1e2b2f4835bb664d47ed03031 ]
NUMA balancing skips or scans VMAs for a variety of reasons. In preparation
for completing scans of VMAs regardless of PID access, trace the reasons
why a VMA was skipped. In a later patch, the tracing will be used to track
if a VMA was forcibly scanned.
Signed-off-by: Mel Gorman <mgorman@techsingularity.net>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Link: https://lore.kernel.org/r/20231010083143.19593-4-mgorman@techsingularity.net
Stable-dep-of: f22cde4371f3 ("sched/numa: Fix the vma scan starving issue")
Signed-off-by: Sasha Levin <sashal@kernel.org>
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/sched/fair.c | 17 |
1 files changed, 13 insertions, 4 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index d07fb0a0da80..7c5f6e94b3cd 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3285,6 +3285,7 @@ static void task_numa_work(struct callback_head *work) do { if (!vma_migratable(vma) || !vma_policy_mof(vma) || is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_MIXEDMAP)) { + trace_sched_skip_vma_numa(mm, vma, NUMAB_SKIP_UNSUITABLE); continue; } @@ -3295,15 +3296,19 @@ static void task_numa_work(struct callback_head *work) * as migrating the pages will be of marginal benefit. */ if (!vma->vm_mm || - (vma->vm_file && (vma->vm_flags & (VM_READ|VM_WRITE)) == (VM_READ))) + (vma->vm_file && (vma->vm_flags & (VM_READ|VM_WRITE)) == (VM_READ))) { + trace_sched_skip_vma_numa(mm, vma, NUMAB_SKIP_SHARED_RO); continue; + } /* * Skip inaccessible VMAs to avoid any confusion between * PROT_NONE and NUMA hinting ptes */ - if (!vma_is_accessible(vma)) + if (!vma_is_accessible(vma)) { + trace_sched_skip_vma_numa(mm, vma, NUMAB_SKIP_INACCESSIBLE); continue; + } /* Initialise new per-VMA NUMAB state. */ if (!vma->numab_state) { @@ -3325,12 +3330,16 @@ static void task_numa_work(struct callback_head *work) * delay the scan for new VMAs. */ if (mm->numa_scan_seq && time_before(jiffies, - vma->numab_state->next_scan)) + vma->numab_state->next_scan)) { + trace_sched_skip_vma_numa(mm, vma, NUMAB_SKIP_SCAN_DELAY); continue; + } /* Do not scan the VMA if task has not accessed */ - if (!vma_is_accessed(vma)) + if (!vma_is_accessed(vma)) { + trace_sched_skip_vma_numa(mm, vma, NUMAB_SKIP_PID_INACTIVE); continue; + } /* * RESET access PIDs regularly for old VMAs. Resetting after checking |
