diff options
author | Trond Myklebust <trond.myklebust@hammerspace.com> | 2024-11-04 21:09:21 -0500 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2024-12-05 14:03:05 +0100 |
commit | 8f95ffb8f86e79e3d44566b8c5a1a925ea639f8c (patch) | |
tree | c040513aa61e418bbc676a1357bcce859d019848 | |
parent | c5b75986dbae69f58cab94f05d2d28a24f3b2c34 (diff) | |
download | linux-8f95ffb8f86e79e3d44566b8c5a1a925ea639f8c.tar.gz linux-8f95ffb8f86e79e3d44566b8c5a1a925ea639f8c.tar.bz2 linux-8f95ffb8f86e79e3d44566b8c5a1a925ea639f8c.zip |
Revert "nfs: don't reuse partially completed requests in nfs_lock_and_join_requests"
[ Upstream commit 66f9dac9077c9c063552e465212abeb8f97d28a7 ]
This reverts commit b571cfcb9dcac187c6d967987792d37cb0688610.
This patch appears to assume that if one request is complete, then the
others will complete too before unlocking. That is not a valid
assumption, since other requests could hit a non-fatal error or a short
write that would cause them not to complete.
Reported-by: Igor Raits <igor@gooddata.com>
Link: https://bugzilla.kernel.org/show_bug.cgi?id=219508
Fixes: b571cfcb9dca ("nfs: don't reuse partially completed requests in nfs_lock_and_join_requests")
Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
Signed-off-by: Sasha Levin <sashal@kernel.org>
-rw-r--r-- | fs/nfs/write.c | 49 |
1 files changed, 29 insertions, 20 deletions
diff --git a/fs/nfs/write.c b/fs/nfs/write.c index ead2dc55952d..82ae2b85d393 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -144,6 +144,31 @@ static void nfs_io_completion_put(struct nfs_io_completion *ioc) kref_put(&ioc->refcount, nfs_io_completion_release); } +static void +nfs_page_set_inode_ref(struct nfs_page *req, struct inode *inode) +{ + if (!test_and_set_bit(PG_INODE_REF, &req->wb_flags)) { + kref_get(&req->wb_kref); + atomic_long_inc(&NFS_I(inode)->nrequests); + } +} + +static int +nfs_cancel_remove_inode(struct nfs_page *req, struct inode *inode) +{ + int ret; + + if (!test_bit(PG_REMOVE, &req->wb_flags)) + return 0; + ret = nfs_page_group_lock(req); + if (ret) + return ret; + if (test_and_clear_bit(PG_REMOVE, &req->wb_flags)) + nfs_page_set_inode_ref(req, inode); + nfs_page_group_unlock(req); + return 0; +} + /** * nfs_folio_find_head_request - find head request associated with a folio * @folio: pointer to folio @@ -540,7 +565,6 @@ static struct nfs_page *nfs_lock_and_join_requests(struct folio *folio) struct inode *inode = folio->mapping->host; struct nfs_page *head, *subreq; struct nfs_commit_info cinfo; - bool removed; int ret; /* @@ -565,18 +589,18 @@ retry: goto retry; } - ret = nfs_page_group_lock(head); + ret = nfs_cancel_remove_inode(head, inode); if (ret < 0) goto out_unlock; - removed = test_bit(PG_REMOVE, &head->wb_flags); + ret = nfs_page_group_lock(head); + if (ret < 0) + goto out_unlock; /* lock each request in the page group */ for (subreq = head->wb_this_page; subreq != head; subreq = subreq->wb_this_page) { - if (test_bit(PG_REMOVE, &subreq->wb_flags)) - removed = true; ret = nfs_page_group_lock_subreq(head, subreq); if (ret < 0) goto out_unlock; @@ -584,21 +608,6 @@ retry: nfs_page_group_unlock(head); - /* - * If PG_REMOVE is set on any request, I/O on that request has - * completed, but some requests were still under I/O at the time - * we locked the head request. - * - * In that case the above wait for all requests means that all I/O - * has now finished, and we can restart from a clean slate. Let the - * old requests go away and start from scratch instead. - */ - if (removed) { - nfs_unroll_locks(head, head); - nfs_unlock_and_release_request(head); - goto retry; - } - nfs_init_cinfo_from_inode(&cinfo, inode); nfs_join_page_group(head, &cinfo, inode); return head; |