diff options
| author | Christian Brauner <brauner@kernel.org> | 2024-09-03 20:28:01 +0200 |
|---|---|---|
| committer | Christian Brauner <brauner@kernel.org> | 2024-09-12 12:20:42 +0200 |
| commit | 3956e7284c41629eb8f1e7104f1e73332bd1ce97 (patch) | |
| tree | 83d12530a4f1c151e7a7f843ff702d6c0db5885c | |
| parent | 4356ab331c8f0dbed0f683abde345cd5503db1e4 (diff) | |
| parent | 4aa571d67e81b5b213abf9b4daa5523beb0e58e8 (diff) | |
| download | linux-3956e7284c41629eb8f1e7104f1e73332bd1ce97.tar.gz linux-3956e7284c41629eb8f1e7104f1e73332bd1ce97.tar.bz2 linux-3956e7284c41629eb8f1e7104f1e73332bd1ce97.zip | |
Merge branch 'netfs-writeback' of ssh://gitolite.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-fs into vfs.netfs
Merge patch series "netfs: Read/write improvements" from David Howells
<dhowells@redhat.com>.
* 'netfs-writeback' of ssh://gitolite.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-fs: (25 commits)
cifs: Don't support ITER_XARRAY
cifs: Switch crypto buffer to use a folio_queue rather than an xarray
cifs: Use iterate_and_advance*() routines directly for hashing
netfs: Cancel dirty folios that have no storage destination
cachefiles, netfs: Fix write to partial block at EOF
netfs: Remove fs/netfs/io.c
netfs: Speed up buffered reading
afs: Make read subreqs async
netfs: Simplify the writeback code
netfs: Provide an iterator-reset function
netfs: Use new folio_queue data type and iterator instead of xarray iter
cifs: Provide the capability to extract from ITER_FOLIOQ to RDMA SGEs
iov_iter: Provide copy_folio_from_iter()
mm: Define struct folio_queue and ITER_FOLIOQ to handle a sequence of folios
netfs: Use bh-disabling spinlocks for rreq->lock
netfs: Set the request work function upon allocation
netfs: Remove NETFS_COPY_TO_CACHE
netfs: Reserve netfs_sreq_source 0 as unset/unknown
netfs: Move max_len/max_nr_segs from netfs_io_subrequest to netfs_io_stream
netfs, cifs: Move CIFS_INO_MODIFIED_ATTR to netfs_inode
...
Signed-off-by: Christian Brauner <brauner@kernel.org>
41 files changed, 3519 insertions, 1983 deletions
diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c index 24fdc74caeba..819c75233235 100644 --- a/fs/9p/vfs_addr.c +++ b/fs/9p/vfs_addr.c @@ -68,17 +68,22 @@ static void v9fs_issue_read(struct netfs_io_subrequest *subreq) { struct netfs_io_request *rreq = subreq->rreq; struct p9_fid *fid = rreq->netfs_priv; + unsigned long long pos = subreq->start + subreq->transferred; int total, err; - total = p9_client_read(fid, subreq->start + subreq->transferred, - &subreq->io_iter, &err); + total = p9_client_read(fid, pos, &subreq->io_iter, &err); /* if we just extended the file size, any portion not in * cache won't be on server and is zeroes */ if (subreq->rreq->origin != NETFS_DIO_READ) __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags); + if (pos + total >= i_size_read(rreq->inode)) + __set_bit(NETFS_SREQ_HIT_EOF, &subreq->flags); - netfs_subreq_terminated(subreq, err ?: total, false); + if (!err) + subreq->transferred += total; + + netfs_read_subreq_terminated(subreq, err, false); } /** diff --git a/fs/afs/file.c b/fs/afs/file.c index ec1be0091fdb..492d857a3fa0 100644 --- a/fs/afs/file.c +++ b/fs/afs/file.c @@ -16,6 +16,7 @@ #include <linux/mm.h> #include <linux/swap.h> #include <linux/netfs.h> +#include <trace/events/netfs.h> #include "internal.h" static int afs_file_mmap(struct file *file, struct vm_area_struct *vma); @@ -242,9 +243,10 @@ static void afs_fetch_data_notify(struct afs_operation *op) req->error = error; if (subreq) { - if (subreq->rreq->origin != NETFS_DIO_READ) - __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags); - netfs_subreq_terminated(subreq, error ?: req->actual_len, false); + subreq->rreq->i_size = req->file_size; + if (req->pos + req->actual_len >= req->file_size) + __set_bit(NETFS_SREQ_HIT_EOF, &subreq->flags); + netfs_read_subreq_terminated(subreq, error, false); req->subreq = NULL; } else if (req->done) { req->done(req); @@ -262,6 +264,12 @@ static void afs_fetch_data_success(struct afs_operation *op) afs_fetch_data_notify(op); } +static void afs_fetch_data_aborted(struct afs_operation *op) +{ + afs_check_for_remote_deletion(op); + afs_fetch_data_notify(op); +} + static void afs_fetch_data_put(struct afs_operation *op) { op->fetch.req->error = afs_op_error(op); @@ -272,7 +280,7 @@ static const struct afs_operation_ops afs_fetch_data_operation = { .issue_afs_rpc = afs_fs_fetch_data, .issue_yfs_rpc = yfs_fs_fetch_data, .success = afs_fetch_data_success, - .aborted = afs_check_for_remote_deletion, + .aborted = afs_fetch_data_aborted, .failed = afs_fetch_data_notify, .put = afs_fetch_data_put, }; @@ -294,7 +302,7 @@ int afs_fetch_data(struct afs_vnode *vnode, struct afs_read *req) op = afs_alloc_operation(req->key, vnode->volume); if (IS_ERR(op)) { if (req->subreq) - netfs_subreq_terminated(req->subreq, PTR_ERR(op), false); + netfs_read_subreq_terminated(req->subreq, PTR_ERR(op), false); return PTR_ERR(op); } @@ -305,14 +313,15 @@ int afs_fetch_data(struct afs_vnode *vnode, struct afs_read *req) return afs_do_sync_operation(op); } -static void afs_issue_read(struct netfs_io_subrequest *subreq) +static void afs_read_worker(struct work_struct *work) { + struct netfs_io_subrequest *subreq = container_of(work, struct netfs_io_subrequest, work); struct afs_vnode *vnode = AFS_FS_I(subreq->rreq->inode); struct afs_read *fsreq; fsreq = afs_alloc_read(GFP_NOFS); if (!fsreq) - return netfs_subreq_terminated(subreq, -ENOMEM, false); + return netfs_read_subreq_terminated(subreq, -ENOMEM, false); fsreq->subreq = subreq; fsreq->pos = subreq->start + subreq->transferred; @@ -321,10 +330,17 @@ static void afs_issue_read(struct netfs_io_subrequest *subreq) fsreq->vnode = vnode; fsreq->iter = &subreq->io_iter; + trace_netfs_sreq(subreq, netfs_sreq_trace_submit); afs_fetch_data(fsreq->vnode, fsreq); afs_put_read(fsreq); } +static void afs_issue_read(struct netfs_io_subrequest *subreq) +{ + INIT_WORK(&subreq->work, afs_read_worker); + queue_work(system_long_wq, &subreq->work); +} + static int afs_symlink_read_folio(struct file *file, struct folio *folio) { struct afs_vnode *vnode = AFS_FS_I(folio->mapping->host); diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c index 79cd30775b7a..098fa034a1cc 100644 --- a/fs/afs/fsclient.c +++ b/fs/afs/fsclient.c @@ -304,6 +304,7 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call) struct afs_vnode_param *vp = &op->file[0]; struct afs_read *req = op->fetch.req; const __be32 *bp; + size_t count_before; int ret; _enter("{%u,%zu,%zu/%llu}", @@ -345,10 +346,14 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call) /* extract the returned data */ case 2: - _debug("extract data %zu/%llu", - iov_iter_count(call->iter), req->actual_len); + count_before = call->iov_len; + _debug("extract data %zu/%llu", count_before, req->actual_len); ret = afs_extract_data(call, true); + if (req->subreq) { + req->subreq->transferred += count_before - call->iov_len; + netfs_read_subreq_progress(req->subreq, false); + } if (ret < 0) return ret; diff --git a/fs/afs/write.c b/fs/afs/write.c index e959640694c2..34107b55f834 100644 --- a/fs/afs/write.c +++ b/fs/afs/write.c @@ -89,10 +89,12 @@ static const struct afs_operation_ops afs_store_data_operation = { */ void afs_prepare_write(struct netfs_io_subrequest *subreq) { + struct netfs_io_stream *stream = &subreq->rreq->io_streams[subreq->stream_nr]; + //if (test_bit(NETFS_SREQ_RETRYING, &subreq->flags)) // subreq->max_len = 512 * 1024; //else - subreq->max_len = 256 * 1024 * 1024; + stream->sreq_max_len = 256 * 1024 * 1024; } /* diff --git a/fs/afs/yfsclient.c b/fs/afs/yfsclient.c index f521e66d3bf6..024227aba4cd 100644 --- a/fs/afs/yfsclient.c +++ b/fs/afs/yfsclient.c @@ -355,6 +355,7 @@ static int yfs_deliver_fs_fetch_data64(struct afs_call *call) struct afs_vnode_param *vp = &op->file[0]; struct afs_read *req = op->fetch.req; const __be32 *bp; + size_t count_before; int ret; _enter("{%u,%zu, %zu/%llu}", @@ -391,10 +392,14 @@ static int yfs_deliver_fs_fetch_data64(struct afs_call *call) /* extract the returned data */ case 2: - _debug("extract data %zu/%llu", - iov_iter_count(call->iter), req->actual_len); + count_before = call->iov_len; + _debug("extract data %zu/%llu", count_before, req->actual_len); ret = afs_extract_data(call, true); + if (req->subreq) { + req->subreq->transferred += count_before - call->iov_len; + netfs_read_subreq_progress(req->subreq, false); + } if (ret < 0) return ret; diff --git a/fs/cachefiles/io.c b/fs/cachefiles/io.c index a91acd03ee12..6a821a959b59 100644 --- a/fs/cachefiles/io.c +++ b/fs/cachefiles/io.c @@ -627,11 +627,12 @@ static void cachefiles_prepare_write_subreq(struct netfs_io_subrequest *subreq) { struct netfs_io_request *wreq = subreq->rreq; struct netfs_cache_resources *cres = &wreq->cache_resources; + struct netfs_io_stream *stream = &wreq->io_streams[subreq->stream_nr]; _enter("W=%x[%x] %llx", wreq->debug_id, subreq->debug_index, subreq->start); - subreq->max_len = MAX_RW_COUNT; - subreq->max_nr_segs = BIO_MAX_VECS; + stream->sreq_max_len = MAX_RW_COUNT; + stream->sreq_max_segs = BIO_MAX_VECS; if (!cachefiles_cres_file(cres)) { if (!fscache_wait_for_operation(cres, FSCACHE_WANT_WRITE)) @@ -647,6 +648,7 @@ static void cachefiles_issue_write(struct netfs_io_subrequest *subreq) struct netfs_cache_resources *cres = &wreq->cache_resources; struct cachefiles_object *object = cachefiles_cres_object(cres); struct cachefiles_cache *cache = object->volume->cache; + struct netfs_io_stream *stream = &wreq->io_streams[subreq->stream_nr]; const struct cred *saved_cred; size_t off, pre, post, len = subreq->len; loff_t start = subreq->start; @@ -660,6 +662,7 @@ static void cachefiles_issue_write(struct netfs_io_subrequest *subreq) if (off) { pre = CACHEFILES_DIO_BLOCK_SIZE - off; if (pre >= len) { + fscache_count_dio_misfit(); netfs_write_subrequest_terminated(subreq, len, false); return; } @@ -670,10 +673,22 @@ static void cachefiles_issue_write(struct netfs_io_subrequest *subreq) } /* We also need to end on the cache granularity boundary */ + if (start + len == wreq->i_size) { + size_t part = len % CACHEFILES_DIO_BLOCK_SIZE; + size_t need = CACHEFILES_DIO_BLOCK_SIZE - part; + + if (part && stream->submit_extendable_to >= need) { + len += need; + subreq->len += need; + subreq->io_iter.count += need; + } + } + post = len & (CACHEFILES_DIO_BLOCK_SIZE - 1); if (post) { len -= post; if (len == 0) { + fscache_count_dio_misfit(); netfs_write_subrequest_terminated(subreq, post, false); return; } diff --git a/fs/cachefiles/xattr.c b/fs/cachefiles/xattr.c index 4dd8a993c60a..7c6f260a3be5 100644 --- a/fs/cachefiles/xattr.c +++ b/fs/cachefiles/xattr.c @@ -64,9 +64,15 @@ int cachefiles_set_object_xattr(struct cachefiles_object *object) memcpy(buf->data, fscache_get_aux(object->cookie), len); ret = cachefiles_inject_write_error(); - if (ret == 0) - ret = vfs_setxattr(&nop_mnt_idmap, dentry, cachefiles_xattr_cache, - buf, sizeof(struct cachefiles_xattr) + len, 0); + if (ret == 0) { + ret = mnt_want_write_file(file); + if (ret == 0) { + ret = vfs_setxattr(&nop_mnt_idmap, dentry, + cachefiles_xattr_cache, buf, + sizeof(struct cachefiles_xattr) + len, 0); + mnt_drop_write_file(file); + } + } if (ret < 0) { trace_cachefiles_vfs_error(object, file_inode(file), ret, cachefiles_trace_setxattr_error); @@ -151,8 +157,14 @@ int cachefiles_remove_object_xattr(struct cachefiles_cache *cache, int ret; ret = cachefiles_inject_remove_error(); - if (ret == 0) - ret = vfs_removexattr(&nop_mnt_idmap, dentry, cachefiles_xattr_cache); + if (ret == 0) { + ret = mnt_want_write(cache->mnt); + if (ret == 0) { + ret = vfs_removexattr(&nop_mnt_idmap, dentry, + cachefiles_xattr_cache); + mnt_drop_write(cache->mnt); + } + } if (ret < 0) { trace_cachefiles_vfs_error(object, d_inode(dentry), ret, cachefiles_trace_remxattr_error); @@ -208,9 +220,15 @@ bool cachefiles_set_volume_xattr(struct cachefiles_volume *volume) memcpy(buf->data, p, volume->vcookie->coherency_len); ret = cachefiles_inject_write_error(); - if (ret == 0) - ret = vfs_setxattr(&nop_mnt_idmap, dentry, cachefiles_xattr_cache, - buf, len, 0); + if (ret == 0) { + ret = mnt_want_write(volume->cache->mnt); + if (ret == 0) { + ret = vfs_setxattr(&nop_mnt_idmap, dentry, + cachefiles_xattr_cache, + buf, len, 0); + mnt_drop_write(volume->cache->mnt); + } + } if (ret < 0) { trace_cachefiles_vfs_error(NULL, d_inode(dentry), ret, cachefiles_trace_setxattr_error); diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index c4744a02db75..c500c1fd6b9f 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c @@ -13,6 +13,7 @@ #include <linux/iversion.h> #include <linux/ktime.h> #include <linux/netfs.h> +#include <trace/events/netfs.h> #include "super.h" #include "mds_client.h" @@ -205,21 +206,6 @@ static void ceph_netfs_expand_readahead(struct netfs_io_request *rreq) } } -static bool ceph_netfs_clamp_length(struct netfs_io_subrequest *subreq) -{ - struct inode *inode = subreq->rreq->inode; - struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode); - struct ceph_inode_info *ci = ceph_inode(inode); - u64 objno, objoff; - u32 xlen; - - /* Truncate the extent at the end of the current block */ - ceph_calc_file_object_mapping(&ci->i_layout, subreq->start, subreq->len, - &objno, &objoff, &xlen); - subreq->len = min(xlen, fsc->mount_options->rsize); - return true; -} - static void finish_netfs_read(struct ceph_osd_request *req) { struct inode *inode = req->r_inode; @@ -264,7 +250,12 @@ static void finish_netfs_read(struct ceph_osd_request *req) calc_pages_for(osd_data->alignment, osd_data->length), false); } - netfs_subreq_terminated(subreq, err, false); + if (err > 0) { + subreq->transferred = err; + err = 0; + } + trace_netfs_sreq(subreq, netfs_sreq_trace_io_progress); + netfs_read_subreq_terminated(subreq, err, false); iput(req->r_inode); ceph_dec_osd_stopping_blocker(fsc->mdsc); } @@ -278,7 +269,6 @@ static bool ceph_netfs_issue_op_inline(struct netfs_io_subrequest *subreq) struct ceph_mds_request *req; struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb); struct ceph_inode_info *ci = ceph_inode(inode); - struct iov_iter iter; ssize_t err = 0; size_t len; int mode; @@ -301,6 +291,7 @@ static bool ceph_netfs_issue_op_inline(struct netfs_io_subrequest *subreq) req->r_args.getattr.mask = cpu_to_le32(CEPH_STAT_CAP_INLINE_DATA); req->r_num_caps = 2; + trace_netfs_sreq(subreq, netfs_sreq_trace_submit); err = ceph_mdsc_do_request(mdsc, NULL, req); if (err < 0) goto out; @@ -314,17 +305,36 @@ static bool ceph_netfs_issue_op_inline(struct netfs_io_subrequest *subreq) } len = min_t(size_t, iinfo->inline_len - subreq->start, subreq->len); - iov_iter_xarray(&iter, ITER_DEST, &rreq->mapping->i_pages, subreq->start, len); - err = copy_to_iter(iinfo->inline_data + subreq->start, len, &iter); - if (err == 0) + err = copy_to_iter(iinfo->inline_data + subreq->start, len, &subreq->io_iter); + if (err == 0) { err = -EFAULT; + } else { + subreq->transferred += err; + err = 0; + } ceph_mdsc_put_request(req); out: - netfs_subreq_terminated(subreq, err, false); + netfs_read_subreq_terminated(subreq, err, false); return true; } +static int ceph_netfs_prepare_read(struct netfs_io_subrequest *subreq) +{ + struct netfs_io_request *rreq = subreq->rreq; + struct inode *inode = rreq->inode; + struct ceph_inode_info *ci = ceph_inode(inode); + struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode); + u64 objno, objoff; + u32 xlen; + + /* Truncate the extent at the end of the current block */ + ceph_calc_file_object_mapping(&ci->i_layout, subreq->start, subreq->len, + &objno, &objoff, &xlen); + rreq->io_streams[0].sreq_max_len = umin(xlen, fsc->mount_options->rsize); + return 0; +} + static void ceph_netfs_issue_read(struct netfs_io_subrequest *subreq) { struct netfs_io_request *rreq = subreq->rreq; @@ -334,9 +344,8 @@ static void ceph_netfs_issue_read(struct netfs_io_subrequest *subreq) struct ceph_client *cl = fsc->client; struct ceph_osd_request *req = NULL; struct ceph_vino vino = ceph_vino(inode); - struct iov_iter iter; - int err = 0; - u64 len = subreq->len; + int err; + u64 len; bool sparse = IS_ENCRYPTED(inode) || ceph_test_mount_opt(fsc, SPARSEREAD); u64 off = subreq->start; int extent_cnt; @@ -349,6 +358,12 @@ static void ceph_netfs_issue_read(struct netfs_io_subrequest *subreq) if (ceph_has_inline_data(ci) && ceph_netfs_issue_op_inline(subreq)) return; + // TODO: This rounding here is slightly dodgy. It *should* work, for + // now, as the cache only deals in blocks that are a multiple of + // PAGE_SIZE and fscrypt blocks are at most PAGE_SIZE. What needs to + // happen is for the fscrypt driving to be moved into netfslib and the + // data in the cache also to be stored encrypted. + len = subreq->len; ceph_fscrypt_adjust_off_and_len(inode, &off, &len); req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, vino, @@ -371,8 +386,6 @@ static void ceph_netfs_issue_read(struct netfs_io_subrequest *subreq) doutc(cl, "%llx.%llx pos=%llu orig_len=%zu len=%llu\n", ceph_vinop(inode), subreq->start, subreq->len, len); - iov_iter_xarray(&iter, ITER_DEST, &rreq->mapping->i_pages, subreq->start, len); - /* * FIXME: For now, use CEPH_OSD_DATA_TYPE_PAGES instead of _ITER for * encrypted inodes. We'd need infrastructure that handles an iov_iter @@ -384,7 +397,7 @@ static void ceph_netfs_issue_read(struct netfs_io_subrequest *subreq) struct page **pages; size_t page_off; - err = iov_iter_get_pages_alloc2(&iter, &pages, len, &page_off); + err = iov_iter_get_pages_alloc2(&subreq->io_iter, &pages, len, &page_off); if (err < 0) { doutc(cl, "%llx.%llx failed to allocate pages, %d\n", ceph_vinop(inode), err); @@ -399,7 +412,7 @@ static void ceph_netfs_issue_read(struct netfs_io_subrequest *subreq) osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0, false, false); } else { - osd_req_op_extent_osd_iter(req, 0, &iter); + osd_req_op_extent_osd_iter(req, 0, &subreq->io_iter); } if (!ceph_inc_osd_stopping_blocker(fsc->mdsc)) { err = -EIO; @@ -410,17 +423,19 @@ static void ceph_netfs_issue_read(struct netfs_io_subrequest *subreq) req->r_inode = inode; ihold(inode); + trace_netfs_sreq(subreq, netfs_sreq_trace_submit); ceph_osdc_start_request(req->r_osdc, req); out: ceph_osdc_put_request(req); if (err) - netfs_subreq_terminated(subreq, err, false); + netfs_read_subreq_terminated(subreq, err, false); doutc(cl, "%llx.%llx result %d\n", ceph_vinop(inode), err); } static int ceph_init_request(struct netfs_io_request *rreq, struct file *file) { struct inode *inode = rreq->inode; + struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode); struct ceph_client *cl = ceph_inode_to_client(inode); int got = 0, want = CEPH_CAP_FILE_CACHE; struct ceph_netfs_request_data *priv; @@ -472,6 +487,7 @@ static int ceph_init_request(struct netfs_io_request *rreq, struct file *file) priv->caps = got; rreq->netfs_priv = priv; + rreq->io_streams[0].sreq_max_len = fsc->mount_options->rsize; out: if (ret < 0) @@ -496,9 +512,9 @@ static void ceph_netfs_free_request(struct netfs_io_request *rreq) const struct netfs_request_ops ceph_netfs_ops = { .init_request = ceph_init_request, .free_request = ceph_netfs_free_request, + .prepare_read = ceph_netfs_prepare_read, .issue_read = ceph_netfs_issue_read, .expand_readahead = ceph_netfs_expand_readahead, - .clamp_length = ceph_netfs_clamp_length, .check_write_begin = ceph_netfs_check_write_begin, }; diff --git a/fs/netfs/Makefile b/fs/netfs/Makefile index 8e6781e0b10b..d08b0bfb6756 100644 --- a/fs/netfs/Makefile +++ b/fs/netfs/Makefile @@ -5,12 +5,14 @@ netfs-y := \ buffered_write.o \ direct_read.o \ direct_write.o \ - io.o \ iterator.o \ locking.o \ main.o \ misc.o \ objects.o \ + read_collect.o \ + read_pgpriv2.o \ + read_retry.o \ write_collect.o \ write_issue.o diff --git a/fs/netfs/buffered_read.c b/fs/netfs/buffered_read.c index 27c750d39476..c40e226053cc 100644 --- a/fs/netfs/buffered_read.c +++ b/fs/netfs/buffered_read.c @@ -9,266 +9,388 @@ #include <linux/task_io_accounting_ops.h> #include "internal.h" -/* - * [DEPRECATED] Unlock the folios in a read operation for when the filesystem - * is using PG_private_2 and direct writing to the cache from here rather than - * marking the page for writeback. - * - * Note that we don't touch folio->private in this code. - */ -static void netfs_rreq_unlock_folios_pgpriv2(struct netfs_io_request *rreq, - size_t *account) +static void netfs_cache_expand_readahead(struct netfs_io_request *rreq, + unsigned long long *_start, + unsigned long long *_len, + unsigned long long i_size) { - struct netfs_io_subrequest *subreq; - struct folio *folio; - pgoff_t start_page = rreq->start / PAGE_SIZE; - pgoff_t last_page = ((rreq->start + rreq->len) / PAGE_SIZE) - 1; - bool subreq_failed = false; + struct netfs_cache_resources *cres = &rreq->cache_resources; - XA_STATE(xas, &rreq->mapping->i_pages, start_page); + if (cres->ops && cres->ops->expand_readahead) + cres->ops->expand_readahead(cres, _start, _len, i_size); +} - /* Walk through the pagecache and the I/O request lists simultaneously. - * We may have a mixture of cached and uncached sections and we only - * really want to write out the uncached sections. This is slightly - * complicated by the possibility that we might have huge pages with a - * mixture inside. +static void netfs_rreq_expand(struct netfs_io_request *rreq, + struct readahead_control *ractl) +{ + /* Give the cache a chance to change the request parameters. The + * resultant request must contain the original region. */ - subreq = list_first_entry(&rreq->subrequests, - struct netfs_io_subrequest, rreq_link); - subreq_failed = (subreq->error < 0); + netfs_cache_expand_readahead(rreq, &rreq->start, &rreq->len, rreq->i_size); - trace_netfs_rreq(rreq, netfs_rreq_trace_unlock_pgpriv2); + /* Give the netfs a chance to change the request parameters. The + * resultant request must contain the original region. + */ + if (rreq->netfs_ops->expand_readahead) + rreq->netfs_ops->expand_readahead(rreq); - rcu_read_lock(); - xas_for_each(&xas, folio, last_page) { - loff_t pg_end; - bool pg_failed = false; - bool folio_started = false; + /* Expand the request if the cache wants it to start earlier. Note + * that the expansion may get further extended if the VM wishes to + * insert THPs and the preferred start and/or end wind up in the middle + * of THPs. + * + * If this is the case, however, the THP size should be an integer + * multiple of the cache granule size, so we get a whole number of + * granules to deal with. + */ + if (rreq->start != readahead_pos(ractl) || + rreq->len != readahead_length(ractl)) { + readahead_expand(ractl, rreq->start, rreq->len); + rreq->start = readahead_pos(ractl); + rreq->len = readahead_length(ractl); - if (xas_retry(&xas, folio)) - continue; + trace_netfs_read(rreq, readahead_pos(ractl), readahead_length(ractl), + netfs_read_trace_expanded); + } +} - pg_end = folio_pos(folio) + folio_size(folio) - 1; +/* + * Begin an operation, and fetch the stored zero point value from the cookie if + * available. + */ +static int netfs_begin_cache_read(struct netfs_io_request *rreq, struct netfs_inode *ctx) +{ + return fscache_begin_read_operation(&rreq->cache_resources, netfs_i_cookie(ctx)); +} - for (;;) { - loff_t sreq_end; +/* + * Decant the list of folios to read into a rolling buffer. + */ +static size_t netfs_load_buffer_from_ra(struct netfs_io_request *rreq, + struct folio_queue *folioq) +{ + unsigned int order, nr; + size_t size = 0; + + nr = __readahead_batch(rreq->ractl, (struct page **)folioq->vec.folios, + ARRAY_SIZE(folioq->vec.folios)); + folioq->vec.nr = nr; + for (int i = 0; i < nr; i++) { + struct folio *folio = folioq_folio(folioq, i); + + trace_netfs_folio(folio, netfs_folio_trace_read); + order = folio_order(folio); + folioq->orders[i] = order; + size += PAGE_SIZE << order; + } - if (!subreq) { - pg_failed = true; - break; - } + for (int i = nr; i < folioq_nr_slots(folioq); i++) + folioq_clear(folioq, i); - if (!folio_started && - test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags) && - fscache_operation_valid(&rreq->cache_resources)) { - trace_netfs_folio(folio, netfs_fol |
