From a8cd9d4ce35eaeb603c3ae7633bb120de5970b3c Mon Sep 17 00:00:00 2001 From: Shakeel Butt Date: Thu, 24 Oct 2024 18:23:00 -0700 Subject: memcg-v1: no need for memcg locking for dirty tracking During the era of memcg charge migration, the kernel has to be make sure that the dirty stat updates do not race with the charge migration. Otherwise it might update the dirty stats of the wrong memcg. Now with the memcg charge migration gone, there is no more race for dirty stat updates and the previous locking can be removed. Link: https://lkml.kernel.org/r/20241025012304.2473312-4-shakeel.butt@linux.dev Signed-off-by: Shakeel Butt Acked-by: Michal Hocko Reviewed-by: Roman Gushchin Acked-by: Johannes Weiner Cc: Hugh Dickins Cc: Muchun Song Cc: Yosry Ahmed Signed-off-by: Andrew Morton --- fs/buffer.c | 5 ----- 1 file changed, 5 deletions(-) (limited to 'fs/buffer.c') diff --git a/fs/buffer.c b/fs/buffer.c index 1fc9a50def0b..88e765b0699f 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -736,15 +736,12 @@ bool block_dirty_folio(struct address_space *mapping, struct folio *folio) * Lock out page's memcg migration to keep PageDirty * synchronized with per-memcg dirty page counters. */ - folio_memcg_lock(folio); newly_dirty = !folio_test_set_dirty(folio); spin_unlock(&mapping->i_private_lock); if (newly_dirty) __folio_mark_dirty(folio, mapping, 1); - folio_memcg_unlock(folio); - if (newly_dirty) __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); @@ -1194,13 +1191,11 @@ void mark_buffer_dirty(struct buffer_head *bh) struct folio *folio = bh->b_folio; struct address_space *mapping = NULL; - folio_memcg_lock(folio); if (!folio_test_set_dirty(folio)) { mapping = folio->mapping; if (mapping) __folio_mark_dirty(folio, mapping, 0); } - folio_memcg_unlock(folio); if (mapping) __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); } -- cgit v1.2.3