summaryrefslogtreecommitdiff
path: root/mm/huge_memory.c
diff options
context:
space:
mode:
authorZi Yan <ziy@nvidia.com>2025-07-18 14:37:20 -0400
committerAndrew Morton <akpm@linux-foundation.org>2025-07-24 19:12:39 -0700
commitfde47708f9bc7f7babe4f48284f19d92faa06891 (patch)
treecf983b845340c7fed7a9fcb942787f44f97184a5 /mm/huge_memory.c
parenta3871560ffc5755e561b75e257d2b15b19395608 (diff)
downloadlinux-fde47708f9bc7f7babe4f48284f19d92faa06891.tar.gz
linux-fde47708f9bc7f7babe4f48284f19d92faa06891.tar.bz2
linux-fde47708f9bc7f7babe4f48284f19d92faa06891.zip
mm/huge_memory: refactor after-split (page) cache code
Smatch/coverity checkers report NULL mapping referencing issues[1][2][3] every time the code is modified, because they do not understand that mapping cannot be NULL when a folio is in page cache in the code. Refactor the code to make it explicit. Remove "end = -1" for anonymous folios, since after code refactoring, end is no longer used by anonymous folio handling code. No functional change is intended. Link: https://lkml.kernel.org/r/20250718023000.4044406-7-ziy@nvidia.com Link: https://lore.kernel.org/linux-mm/2afe3d59-aca5-40f7-82a3-a6d976fb0f4f@stanley.mountain/ [1] Link: https://lore.kernel.org/oe-kbuild/64b54034-f311-4e7d-b935-c16775dbb642@suswa.mountain/ [2] Link: https://lore.kernel.org/linux-mm/20250716145804.4836-1-antonio@mandelbit.com/ [3] Link: https://lkml.kernel.org/r/20250718183720.4054515-7-ziy@nvidia.com Signed-off-by: Zi Yan <ziy@nvidia.com> Suggested-by: David Hildenbrand <david@redhat.com> Acked-by: David Hildenbrand <david@redhat.com> Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> Cc: Balbir Singh <balbirs@nvidia.com> Cc: Baolin Wang <baolin.wang@linux.alibaba.com> Cc: Barry Song <baohua@kernel.org> Cc: Dan Carpenter <dan.carpenter@linaro.org> Cc: Dev Jain <dev.jain@arm.com> Cc: Hugh Dickins <hughd@google.com> Cc: Kirill A. Shutemov <k.shutemov@gmail.com> Cc: Liam Howlett <liam.howlett@oracle.com> Cc: Mariano Pache <npache@redhat.com> Cc: Mathew Brost <matthew.brost@intel.com> Cc: Ryan Roberts <ryan.roberts@arm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/huge_memory.c')
-rw-r--r--mm/huge_memory.c44
1 files changed, 28 insertions, 16 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 19e69704fcff..9c38a95e9f09 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -3640,7 +3640,6 @@ static int __folio_split(struct folio *folio, unsigned int new_order,
ret = -EBUSY;
goto out;
}
- end = -1;
mapping = NULL;
anon_vma_lock_write(anon_vma);
} else {
@@ -3793,6 +3792,8 @@ static int __folio_split(struct folio *folio, unsigned int new_order,
*/
for (new_folio = folio_next(folio); new_folio != end_folio;
new_folio = next) {
+ unsigned long nr_pages = folio_nr_pages(new_folio);
+
next = folio_next(new_folio);
expected_refs = folio_expected_ref_count(new_folio) + 1;
@@ -3800,25 +3801,36 @@ static int __folio_split(struct folio *folio, unsigned int new_order,
lru_add_split_folio(folio, new_folio, lruvec, list);
- /* Some pages can be beyond EOF: drop them from cache */
- if (new_folio->index >= end) {
- if (shmem_mapping(mapping))
- nr_shmem_dropped += folio_nr_pages(new_folio);
- else if (folio_test_clear_dirty(new_folio))
- folio_account_cleaned(
- new_folio,
- inode_to_wb(mapping->host));
- __filemap_remove_folio(new_folio, NULL);
- folio_put_refs(new_folio,
- folio_nr_pages(new_folio));
- } else if (mapping) {
- __xa_store(&mapping->i_pages, new_folio->index,
- new_folio, 0);
- } else if (swap_cache) {
+ /*
+ * Anonymous folio with swap cache.
+ * NOTE: shmem in swap cache is not supported yet.
+ */
+ if (swap_cache) {
__xa_store(&swap_cache->i_pages,
swap_cache_index(new_folio->swap),
new_folio, 0);
+ continue;
+ }
+
+ /* Anonymous folio without swap cache */
+ if (!mapping)
+ continue;
+
+ /* Add the new folio to the page cache. */
+ if (new_folio->index < end) {
+ __xa_store(&mapping->i_pages, new_folio->index,
+ new_folio, 0);
+ continue;
}
+
+ /* Drop folio beyond EOF: ->index >= end */
+ if (shmem_mapping(mapping))
+ nr_shmem_dropped += nr_pages;
+ else if (folio_test_clear_dirty(new_folio))
+ folio_account_cleaned(
+ new_folio, inode_to_wb(mapping->host));
+ __filemap_remove_folio(new_folio, NULL);
+ folio_put_refs(new_folio, nr_pages);
}
/*
* Unfreeze @folio only after all page cache entries, which