diff options
| author | Andreas Gruenbacher <agruenba@redhat.com> | 2024-04-12 21:58:15 +0200 |
|---|---|---|
| committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2024-12-09 10:32:38 +0100 |
| commit | d7f0c4c95652a67122c3f0f2f30d669741f3a140 (patch) | |
| tree | 5c563dcd3293c94456445d3c950b461b139dd2bf /fs/gfs2 | |
| parent | 6e92dd3ac2468363bfe1813880d27f86fa1fccb5 (diff) | |
| download | linux-d7f0c4c95652a67122c3f0f2f30d669741f3a140.tar.gz linux-d7f0c4c95652a67122c3f0f2f30d669741f3a140.tar.bz2 linux-d7f0c4c95652a67122c3f0f2f30d669741f3a140.zip | |
gfs2: Remove and replace gfs2_glock_queue_work
[ Upstream commit 1e86044402c45b70a9b31beeaefb5cc732a7470c ]
There are no more callers of gfs2_glock_queue_work() left, so remove
that helper. With that, we can now rename __gfs2_glock_queue_work()
back to gfs2_glock_queue_work() to get rid of some unnecessary clutter.
Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
Signed-off-by: Sasha Levin <sashal@kernel.org>
Diffstat (limited to 'fs/gfs2')
| -rw-r--r-- | fs/gfs2/glock.c | 35 |
1 files changed, 15 insertions, 20 deletions
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index f38d8558f4c1..2c0908a30210 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c @@ -274,7 +274,7 @@ static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl) * Enqueue the glock on the work queue. Passes one glock reference on to the * work queue. */ -static void __gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) { +static void gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) { if (!queue_delayed_work(glock_workqueue, &gl->gl_work, delay)) { /* * We are holding the lockref spinlock, and the work was still @@ -287,12 +287,6 @@ static void __gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) } } -static void gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) { - spin_lock(&gl->gl_lockref.lock); - __gfs2_glock_queue_work(gl, delay); - spin_unlock(&gl->gl_lockref.lock); -} - static void __gfs2_glock_put(struct gfs2_glock *gl) { struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; @@ -337,7 +331,8 @@ void gfs2_glock_put_async(struct gfs2_glock *gl) if (lockref_put_or_lock(&gl->gl_lockref)) return; - __gfs2_glock_queue_work(gl, 0); + GLOCK_BUG_ON(gl, gl->gl_lockref.count != 1); + gfs2_glock_queue_work(gl, 0); spin_unlock(&gl->gl_lockref.lock); } @@ -814,7 +809,7 @@ skip_inval: */ clear_bit(GLF_LOCK, &gl->gl_flags); clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags); - __gfs2_glock_queue_work(gl, GL_GLOCK_DFT_HOLD); + gfs2_glock_queue_work(gl, GL_GLOCK_DFT_HOLD); return; } else { clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags); @@ -844,7 +839,7 @@ skip_inval: /* Complete the operation now. */ finish_xmote(gl, target); - __gfs2_glock_queue_work(gl, 0); + gfs2_glock_queue_work(gl, 0); } /** @@ -891,7 +886,7 @@ out_sched: clear_bit(GLF_LOCK, &gl->gl_flags); smp_mb__after_atomic(); gl->gl_lockref.count++; - __gfs2_glock_queue_work(gl, 0); + gfs2_glock_queue_work(gl, 0); return; out_unlock: @@ -1124,12 +1119,12 @@ static void glock_work_func(struct work_struct *work) drop_refs--; if (gl->gl_name.ln_type != LM_TYPE_INODE) delay = 0; - __gfs2_glock_queue_work(gl, delay); + gfs2_glock_queue_work(gl, delay); } /* * Drop the remaining glock references manually here. (Mind that - * __gfs2_glock_queue_work depends on the lockref spinlock begin held + * gfs2_glock_queue_work depends on the lockref spinlock begin held * here as well.) */ gl->gl_lockref.count -= drop_refs; @@ -1616,7 +1611,7 @@ int gfs2_glock_nq(struct gfs2_holder *gh) test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))) { set_bit(GLF_REPLY_PENDING, &gl->gl_flags); gl->gl_lockref.count++; - __gfs2_glock_queue_work(gl, 0); + gfs2_glock_queue_work(gl, 0); } run_queue(gl, 1); spin_unlock(&gl->gl_lockref.lock); @@ -1681,7 +1676,7 @@ static void __gfs2_glock_dq(struct gfs2_holder *gh) !test_bit(GLF_DEMOTE, &gl->gl_flags) && gl->gl_name.ln_type == LM_TYPE_INODE) delay = gl->gl_hold_time; - __gfs2_glock_queue_work(gl, delay); + gfs2_glock_queue_work(gl, delay); } } @@ -1905,7 +1900,7 @@ void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state) delay = gl->gl_hold_time; } handle_callback(gl, state, delay, true); - __gfs2_glock_queue_work(gl, delay); + gfs2_glock_queue_work(gl, delay); spin_unlock(&gl->gl_lockref.lock); } @@ -1965,7 +1960,7 @@ void gfs2_glock_complete(struct gfs2_glock *gl, int ret) gl->gl_lockref.count++; set_bit(GLF_REPLY_PENDING, &gl->gl_flags); - __gfs2_glock_queue_work(gl, 0); + gfs2_glock_queue_work(gl, 0); spin_unlock(&gl->gl_lockref.lock); } @@ -2025,7 +2020,7 @@ add_back_to_lru: gl->gl_lockref.count++; if (demote_ok(gl)) handle_callback(gl, LM_ST_UNLOCKED, 0, false); - __gfs2_glock_queue_work(gl, 0); + gfs2_glock_queue_work(gl, 0); spin_unlock(&gl->gl_lockref.lock); cond_resched_lock(&lru_lock); } @@ -2163,7 +2158,7 @@ static void thaw_glock(struct gfs2_glock *gl) spin_lock(&gl->gl_lockref.lock); set_bit(GLF_REPLY_PENDING, &gl->gl_flags); - __gfs2_glock_queue_work(gl, 0); + gfs2_glock_queue_work(gl, 0); spin_unlock(&gl->gl_lockref.lock); } @@ -2182,7 +2177,7 @@ static void clear_glock(struct gfs2_glock *gl) gl->gl_lockref.count++; if (gl->gl_state != LM_ST_UNLOCKED) handle_callback(gl, LM_ST_UNLOCKED, 0, false); - __gfs2_glock_queue_work(gl, 0); + gfs2_glock_queue_work(gl, 0); } spin_unlock(&gl->gl_lockref.lock); } |
