summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2021-04-27 13:27:39 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2021-04-27 13:27:39 -0700
commitfafe1e39ed213221c0bce6b0b31669334368dc97 (patch)
tree154ccf523bb48002bca29c220f6351e55c8a619f
parent820c4bae40cb56466cfed6409e00d0f5165a990c (diff)
parent3003bbd0697b659944237f3459489cb596ba196c (diff)
downloadlinux-fafe1e39ed213221c0bce6b0b31669334368dc97.tar.gz
linux-fafe1e39ed213221c0bce6b0b31669334368dc97.tar.bz2
linux-fafe1e39ed213221c0bce6b0b31669334368dc97.zip
Merge tag 'afs-netfs-lib-20210426' of git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-fs
Pull AFS updates from David Howells: "Use the new netfs lib. Begin the process of overhauling the use of the fscache API by AFS and the introduction of support for features such as Transparent Huge Pages (THPs). - Add some support for THPs, including using core VM helper functions to find details of pages. - Use the ITER_XARRAY I/O iterator to mediate access to the pagecache as this handles THPs and doesn't require allocation of large bvec arrays. - Delegate address_space read/pre-write I/O methods for AFS to the netfs helper library. A method is provided to the library that allows it to issue a read against the server. This includes a change in use for PG_fscache (it now indicates a DIO write in progress from the marked page), so a number of waits need to be deployed for it. - Split the core AFS writeback function to make it easier to modify in future patches to handle writing to the cache. [This might feasibly make more sense moved out into my fscache-iter branch]. I've tested these with "xfstests -g quick" against an AFS volume (xfstests needs patching to make it work). With this, AFS without a cache passes all expected xfstests; with a cache, there's an extra failure, but that's also there before these patches. Fixing that probably requires a greater overhaul (as can be found on my fscache-iter branch, but that's for a later time). Thanks should go to Marc Dionne and Jeff Altman of AuriStor for exercising the patches in their test farm also" Link: https://lore.kernel.org/lkml/3785063.1619482429@warthog.procyon.org.uk/ * tag 'afs-netfs-lib-20210426' of git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-fs: afs: Use the netfs_write_begin() helper afs: Use new netfs lib read helper API afs: Use the fs operation ops to handle FetchData completion afs: Prepare for use of THPs afs: Extract writeback extension into its own function afs: Wait on PG_fscache before modifying/releasing a page afs: Use ITER_XARRAY for writing afs: Set up the iov_iter before calling afs_extract_data() afs: Log remote unmarshalling errors afs: Don't truncate iter during data fetch afs: Move key to afs_read struct afs: Print the operation debug_id when logging an unexpected data version afs: Pass page into dirty region helpers to provide THP size afs: Disable use of the fscache I/O routines
-rw-r--r--fs/afs/Kconfig1
-rw-r--r--fs/afs/dir.c225
-rw-r--r--fs/afs/file.c483
-rw-r--r--fs/afs/fs_operation.c4
-rw-r--r--fs/afs/fsclient.c108
-rw-r--r--fs/afs/inode.c7
-rw-r--r--fs/afs/internal.h59
-rw-r--r--fs/afs/rxrpc.c150
-rw-r--r--fs/afs/write.c657
-rw-r--r--fs/afs/yfsclient.c82
-rw-r--r--include/net/af_rxrpc.h2
-rw-r--r--include/trace/events/afs.h74
-rw-r--r--net/rxrpc/recvmsg.c9
13 files changed, 805 insertions, 1056 deletions
diff --git a/fs/afs/Kconfig b/fs/afs/Kconfig
index 1ad211d72b3b..fc8ba9142f2f 100644
--- a/fs/afs/Kconfig
+++ b/fs/afs/Kconfig
@@ -4,6 +4,7 @@ config AFS_FS
depends on INET
select AF_RXRPC
select DNS_RESOLVER
+ select NETFS_SUPPORT
help
If you say Y here, you will get an experimental Andrew File System
driver. It currently only supports unsecured read-only AFS access.
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index 17548c1faf02..117df15e5367 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -103,6 +103,35 @@ struct afs_lookup_cookie {
};
/*
+ * Drop the refs that we're holding on the pages we were reading into. We've
+ * got refs on the first nr_pages pages.
+ */
+static void afs_dir_read_cleanup(struct afs_read *req)
+{
+ struct address_space *mapping = req->vnode->vfs_inode.i_mapping;
+ struct page *page;
+ pgoff_t last = req->nr_pages - 1;
+
+ XA_STATE(xas, &mapping->i_pages, 0);
+
+ if (unlikely(!req->nr_pages))
+ return;
+
+ rcu_read_lock();
+ xas_for_each(&xas, page, last) {
+ if (xas_retry(&xas, page))
+ continue;
+ BUG_ON(xa_is_value(page));
+ BUG_ON(PageCompound(page));
+ ASSERTCMP(page->mapping, ==, mapping);
+
+ put_page(page);
+ }
+
+ rcu_read_unlock();
+}
+
+/*
* check that a directory page is valid
*/
static bool afs_dir_check_page(struct afs_vnode *dvnode, struct page *page,
@@ -127,7 +156,7 @@ static bool afs_dir_check_page(struct afs_vnode *dvnode, struct page *page,
qty /= sizeof(union afs_xdr_dir_block);
/* check them */
- dbuf = kmap(page);
+ dbuf = kmap_atomic(page);
for (tmp = 0; tmp < qty; tmp++) {
if (dbuf->blocks[tmp].hdr.magic != AFS_DIR_MAGIC) {
printk("kAFS: %s(%lx): bad magic %d/%d is %04hx\n",
@@ -146,7 +175,7 @@ static bool afs_dir_check_page(struct afs_vnode *dvnode, struct page *page,
((u8 *)&dbuf->blocks[tmp])[AFS_DIR_BLOCK_SIZE - 1] = 0;
}
- kunmap(page);
+ kunmap_atomic(dbuf);
checked:
afs_stat_v(dvnode, n_read_dir);
@@ -157,35 +186,74 @@ error:
}
/*
- * Check the contents of a directory that we've just read.
+ * Dump the contents of a directory.
*/
-static bool afs_dir_check_pages(struct afs_vnode *dvnode, struct afs_read *req)
+static void afs_dir_dump(struct afs_vnode *dvnode, struct afs_read *req)
{
struct afs_xdr_dir_page *dbuf;
- unsigned int i, j, qty = PAGE_SIZE / sizeof(union afs_xdr_dir_block);
+ struct address_space *mapping = dvnode->vfs_inode.i_mapping;
+ struct page *page;
+ unsigned int i, qty = PAGE_SIZE / sizeof(union afs_xdr_dir_block);
+ pgoff_t last = req->nr_pages - 1;
- for (i = 0; i < req->nr_pages; i++)
- if (!afs_dir_check_page(dvnode, req->pages[i], req->actual_len))
- goto bad;
- return true;
+ XA_STATE(xas, &mapping->i_pages, 0);
-bad:
- pr_warn("DIR %llx:%llx f=%llx l=%llx al=%llx r=%llx\n",
+ pr_warn("DIR %llx:%llx f=%llx l=%llx al=%llx\n",
dvnode->fid.vid, dvnode->fid.vnode,
- req->file_size, req->len, req->actual_len, req->remain);
- pr_warn("DIR %llx %x %x %x\n",
- req->pos, req->index, req->nr_pages, req->offset);
+ req->file_size, req->len, req->actual_len);
+ pr_warn("DIR %llx %x %zx %zx\n",
+ req->pos, req->nr_pages,
+ req->iter->iov_offset, iov_iter_count(req->iter));
- for (i = 0; i < req->nr_pages; i++) {
- dbuf = kmap(req->pages[i]);
- for (j = 0; j < qty; j++) {
- union afs_xdr_dir_block *block = &dbuf->blocks[j];
+ xas_for_each(&xas, page, last) {
+ if (xas_retry(&xas, page))
+ continue;
+
+ BUG_ON(PageCompound(page));
+ BUG_ON(page->mapping != mapping);
+
+ dbuf = kmap_atomic(page);
+ for (i = 0; i < qty; i++) {
+ union afs_xdr_dir_block *block = &dbuf->blocks[i];
- pr_warn("[%02x] %32phN\n", i * qty + j, block);
+ pr_warn("[%02lx] %32phN\n", page->index * qty + i, block);
}
- kunmap(req->pages[i]);
+ kunmap_atomic(dbuf);
}
- return false;
+}
+
+/*
+ * Check all the pages in a directory. All the pages are held pinned.
+ */
+static int afs_dir_check(struct afs_vnode *dvnode, struct afs_read *req)
+{
+ struct address_space *mapping = dvnode->vfs_inode.i_mapping;
+ struct page *page;
+ pgoff_t last = req->nr_pages - 1;
+ int ret = 0;
+
+ XA_STATE(xas, &mapping->i_pages, 0);
+
+ if (unlikely(!req->nr_pages))
+ return 0;
+
+ rcu_read_lock();
+ xas_for_each(&xas, page, last) {
+ if (xas_retry(&xas, page))
+ continue;
+
+ BUG_ON(PageCompound(page));
+ BUG_ON(page->mapping != mapping);
+
+ if (!afs_dir_check_page(dvnode, page, req->file_size)) {
+ afs_dir_dump(dvnode, req);
+ ret = -EIO;
+ break;
+ }
+ }
+
+ rcu_read_unlock();
+ return ret;
}
/*
@@ -214,57 +282,57 @@ static struct afs_read *afs_read_dir(struct afs_vnode *dvnode, struct key *key)
{
struct afs_read *req;
loff_t i_size;
- int nr_pages, nr_inline, i, n;
- int ret = -ENOMEM;
+ int nr_pages, i, n;
+ int ret;
+
+ _enter("");
-retry:
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return ERR_PTR(-ENOMEM);
+
+ refcount_set(&req->usage, 1);
+ req->vnode = dvnode;
+ req->key = key_get(key);
+ req->cleanup = afs_dir_read_cleanup;
+
+expand:
i_size = i_size_read(&dvnode->vfs_inode);
- if (i_size < 2048)
- return ERR_PTR(afs_bad(dvnode, afs_file_error_dir_small));
+ if (i_size < 2048) {
+ ret = afs_bad(dvnode, afs_file_error_dir_small);
+ goto error;
+ }
if (i_size > 2048 * 1024) {
trace_afs_file_error(dvnode, -EFBIG, afs_file_error_dir_big);
- return ERR_PTR(-EFBIG);
+ ret = -EFBIG;
+ goto error;
}
_enter("%llu", i_size);
- /* Get a request record to hold the page list. We want to hold it
- * inline if we can, but we don't want to make an order 1 allocation.
- */
nr_pages = (i_size + PAGE_SIZE - 1) / PAGE_SIZE;
- nr_inline = nr_pages;
- if (nr_inline > (PAGE_SIZE - sizeof(*req)) / sizeof(struct page *))
- nr_inline = 0;
- req = kzalloc(struct_size(req, array, nr_inline), GFP_KERNEL);
- if (!req)
- return ERR_PTR(-ENOMEM);
-
- refcount_set(&req->usage, 1);
- req->nr_pages = nr_pages;
req->actual_len = i_size; /* May change */
req->len = nr_pages * PAGE_SIZE; /* We can ask for more than there is */
req->data_version = dvnode->status.data_version; /* May change */
- if (nr_inline > 0) {
- req->pages = req->array;
- } else {
- req->pages = kcalloc(nr_pages, sizeof(struct page *),
- GFP_KERNEL);
- if (!req->pages)
- goto error;
- }
+ iov_iter_xarray(&req->def_iter, READ, &dvnode->vfs_inode.i_mapping->i_pages,
+ 0, i_size);
+ req->iter = &req->def_iter;
- /* Get a list of all the pages that hold or will hold the directory
- * content. We need to fill in any gaps that we might find where the
- * memory reclaimer has been at work. If there are any gaps, we will
+ /* Fill in any gaps that we might find where the memory reclaimer has
+ * been at work and pin all the pages. If there are any gaps, we will
* need to reread the entire directory contents.
*/
- i = 0;
- do {
+ i = req->nr_pages;
+ while (i < nr_pages) {
+ struct page *pages[8], *page;
+
n = find_get_pages_contig(dvnode->vfs_inode.i_mapping, i,
- req->nr_pages - i,
- req->pages + i);
- _debug("find %u at %u/%u", n, i, req->nr_pages);
+ min_t(unsigned int, nr_pages - i,
+ ARRAY_SIZE(pages)),
+ pages);
+ _debug("find %u at %u/%u", n, i, nr_pages);
+
if (n == 0) {
gfp_t gfp = dvnode->vfs_inode.i_mapping->gfp_mask;
@@ -272,22 +340,24 @@ retry:
afs_stat_v(dvnode, n_inval);
ret = -ENOMEM;
- req->pages[i] = __page_cache_alloc(gfp);
- if (!req->pages[i])
+ page = __page_cache_alloc(gfp);
+ if (!page)
goto error;
- ret = add_to_page_cache_lru(req->pages[i],
+ ret = add_to_page_cache_lru(page,
dvnode->vfs_inode.i_mapping,
i, gfp);
if (ret < 0)
goto error;
- attach_page_private(req->pages[i], (void *)1);
- unlock_page(req->pages[i]);
+ attach_page_private(page, (void *)1);
+ unlock_page(page);
+ req->nr_pages++;
i++;
} else {
+ req->nr_pages += n;
i += n;
}
- } while (i < req->nr_pages);
+ }
/* If we're going to reload, we need to lock all the pages to prevent
* races.
@@ -305,18 +375,23 @@ retry:
if (!test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags)) {
trace_afs_reload_dir(dvnode);
- ret = afs_fetch_data(dvnode, key, req);
+ ret = afs_fetch_data(dvnode, req);
if (ret < 0)
goto error_unlock;
task_io_account_read(PAGE_SIZE * req->nr_pages);
- if (req->len < req->file_size)
- goto content_has_grown;
+ if (req->len < req->file_size) {
+ /* The content has grown, so we need to expand the
+ * buffer.
+ */
+ up_write(&dvnode->validate_lock);
+ goto expand;
+ }
/* Validate the data we just read. */
- ret = -EIO;
- if (!afs_dir_check_pages(dvnode, req))
+ ret = afs_dir_check(dvnode, req);
+ if (ret < 0)
goto error_unlock;
// TODO: Trim excess pages
@@ -334,11 +409,6 @@ error:
afs_put_read(req);
_leave(" = %d", ret);
return ERR_PTR(ret);
-
-content_has_grown:
- up_write(&dvnode->validate_lock);
- afs_put_read(req);
- goto retry;
}
/*
@@ -448,6 +518,7 @@ static int afs_dir_iterate(struct inode *dir, struct dir_context *ctx,
struct afs_read *req;
struct page *page;
unsigned blkoff, limit;
+ void __rcu **slot;
int ret;
_enter("{%lu},%u,,", dir->i_ino, (unsigned)ctx->pos);
@@ -472,9 +543,15 @@ static int afs_dir_iterate(struct inode *dir, struct dir_context *ctx,
blkoff = ctx->pos & ~(sizeof(union afs_xdr_dir_block) - 1);
/* Fetch the appropriate page from the directory and re-add it
- * to the LRU.
+ * to the LRU. We have all the pages pinned with an extra ref.
*/
- page = req->pages[blkoff / PAGE_SIZE];
+ rcu_read_lock();
+ page = NULL;
+ slot = radix_tree_lookup_slot(&dvnode->vfs_inode.i_mapping->i_pages,
+ blkoff / PAGE_SIZE);
+ if (slot)
+ page = radix_tree_deref_slot(slot);
+ rcu_read_unlock();
if (!page) {
ret = afs_bad(dvnode, afs_file_error_dir_missing_page);
break;
@@ -2006,6 +2083,6 @@ static void afs_dir_invalidatepage(struct page *page, unsigned int offset,
afs_stat_v(dvnode, n_inval);
/* we clean up only if the entire page is being invalidated */
- if (offset == 0 && length == PAGE_SIZE)
+ if (offset == 0 && length == thp_size(page))
detach_page_private(page);
}
diff --git a/fs/afs/file.c b/fs/afs/file.c
index 960b64268623..db035ae2a134 100644
--- a/fs/afs/file.c
+++ b/fs/afs/file.c
@@ -14,6 +14,7 @@
#include <linux/gfp.h>
#include <linux/task_io_accounting_ops.h>
#include <linux/mm.h>
+#include <linux/netfs.h>
#include "internal.h"
static int afs_file_mmap(struct file *file, struct vm_area_struct *vma);
@@ -22,8 +23,7 @@ static void afs_invalidatepage(struct page *page, unsigned int offset,
unsigned int length);
static int afs_releasepage(struct page *page, gfp_t gfp_flags);
-static int afs_readpages(struct file *filp, struct address_space *mapping,
- struct list_head *pages, unsigned nr_pages);
+static void afs_readahead(struct readahead_control *ractl);
const struct file_operations afs_file_operations = {
.open = afs_open,
@@ -47,7 +47,7 @@ const struct inode_operations afs_file_inode_operations = {
const struct address_space_operations afs_fs_aops = {
.readpage = afs_readpage,
- .readpages = afs_readpages,
+ .readahead = afs_readahead,
.set_page_dirty = afs_set_page_dirty,
.launder_page = afs_launder_page,
.releasepage = afs_releasepage,
@@ -184,41 +184,50 @@ int afs_release(struct inode *inode, struct file *file)
}
/*
+ * Allocate a new read record.
+ */
+struct afs_read *afs_alloc_read(gfp_t gfp)
+{
+ struct afs_read *req;
+
+ req = kzalloc(sizeof(struct afs_read), gfp);
+ if (req)
+ refcount_set(&req->usage, 1);
+
+ return req;
+}
+
+/*
* Dispose of a ref to a read record.
*/
void afs_put_read(struct afs_read *req)
{
- int i;
-
if (refcount_dec_and_test(&req->usage)) {
- if (req->pages) {
- for (i = 0; i < req->nr_pages; i++)
- if (req->pages[i])
- put_page(req->pages[i]);
- if (req->pages != req->array)
- kfree(req->pages);
- }
+ if (req->cleanup)
+ req->cleanup(req);
+ key_put(req->key);
kfree(req);
}
}
-#ifdef CONFIG_AFS_FSCACHE
-/*
- * deal with notification that a page was read from the cache
- */
-static void afs_file_readpage_read_complete(struct page *page,
- void *data,
- int error)
+static void afs_fetch_data_notify(struct afs_operation *op)
{
- _enter("%p,%p,%d", page, data, error);
-
- /* if the read completes with an error, we just unlock the page and let
- * the VM reissue the readpage */
- if (!error)
- SetPageUptodate(page);
- unlock_page(page);
+ struct afs_read *req = op->fetch.req;
+ struct netfs_read_subrequest *subreq = req->subreq;
+ int error = op->error;
+
+ if (error == -ECONNABORTED)
+ error = afs_abort_to_error(op->ac.abort_code);
+ req->error = error;
+
+ if (subreq) {
+ __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
+ netfs_subreq_terminated(subreq, error ?: req->actual_len, false);
+ req->subreq = NULL;
+ } else if (req->done) {
+ req->done(req);
+ }
}
-#endif
static void afs_fetch_data_success(struct afs_operation *op)
{
@@ -228,10 +237,12 @@ static void afs_fetch_data_success(struct afs_operation *op)
afs_vnode_commit_status(op, &op->file[0]);
afs_stat_v(vnode, n_fetches);
atomic_long_add(op->fetch.req->actual_len, &op->net->n_fetch_bytes);
+ afs_fetch_data_notify(op);
}
static void afs_fetch_data_put(struct afs_operation *op)
{
+ op->fetch.req->error = op->error;
afs_put_read(op->fetch.req);
}
@@ -240,13 +251,14 @@ static const struct afs_operation_ops afs_fetch_data_operation = {
.issue_yfs_rpc = yfs_fs_fetch_data,
.success = afs_fetch_data_success,
.aborted = afs_check_for_remote_deletion,
+ .failed = afs_fetch_data_notify,
.put = afs_fetch_data_put,
};
/*
* Fetch file data from the volume.
*/
-int afs_fetch_data(struct afs_vnode *vnode, struct key *key, struct afs_read *req)
+int afs_fetch_data(struct afs_vnode *vnode, struct afs_read *req)
{
struct afs_operation *op;
@@ -255,11 +267,14 @@ int afs_fetch_data(struct afs_vnode *vnode, struct key *key, struct afs_read *re
vnode->fid.vid,
vnode->fid.vnode,
vnode->fid.unique,
- key_serial(key));
+ key_serial(req->key));
- op = afs_alloc_operation(key, vnode->volume);
- if (IS_ERR(op))
+ op = afs_alloc_operation(req->key, vnode->volume);
+ if (IS_ERR(op)) {
+ if (req->subreq)
+ netfs_subreq_terminated(req->subreq, PTR_ERR(op), false);
return PTR_ERR(op);
+ }
afs_op_set_vnode(op, 0, vnode);
@@ -268,336 +283,103 @@ int afs_fetch_data(struct afs_vnode *vnode, struct key *key, struct afs_read *re
return afs_do_sync_operation(op);
}
-/*
- * read page from file, directory or symlink, given a key to use
- */
-int afs_page_filler(void *data, struct page *page)
+static void afs_req_issue_op(struct netfs_read_subrequest *subreq)
{
- struct inode *inode = page->mapping->host;
- struct afs_vnode *vnode = AFS_FS_I(inode);
- struct afs_read *req;
- struct key *key = data;
- int ret;
-
- _enter("{%x},{%lu},{%lu}", key_serial(key), inode->i_ino, page->index);
+ struct afs_vnode *vnode = AFS_FS_I(subreq->rreq->inode);
+ struct afs_read *fsreq;
- BUG_ON(!PageLocked(page));
+ fsreq = afs_alloc_read(GFP_NOFS);
+ if (!fsreq)
+ return netfs_subreq_terminated(subreq, -ENOMEM, false);
- ret = -ESTALE;
- if (test_bit(AFS_VNODE_DELETED, &vnode->flags))
- goto error;
+ fsreq->subreq = subreq;
+ fsreq->pos = subreq->start + subreq->transferred;
+ fsreq->len = subreq->len - subreq->transferred;
+ fsreq->key = subreq->rreq->netfs_priv;
+ fsreq->vnode = vnode;
+ fsreq->iter = &fsreq->def_iter;
- /* is it cached? */
-#ifdef CONFIG_AFS_FSCACHE
- ret = fscache_read_or_alloc_page(vnode->cache,
- page,
- afs_file_readpage_read_complete,
- NULL,
- GFP_KERNEL);
-#else
- ret = -ENOBUFS;
-#endif
- switch (ret) {
- /* read BIO submitted (page in cache) */
- case 0:
- break;
-
- /* page not yet cached */
- case -ENODATA:
- _debug("cache said ENODATA");
- goto go_on;
-
- /* page will not be cached */
- case -ENOBUFS:
- _debug("cache said ENOBUFS");
-
- fallthrough;
- default:
- go_on:
- req = kzalloc(struct_size(req, array, 1), GFP_KERNEL);
- if (!req)
- goto enomem;
-
- /* We request a full page. If the page is a partial one at the
- * end of the file, the server will return a short read and the
- * unmarshalling code will clear the unfilled space.
- */
- refcount_set(&req->usage, 1);
- req->pos = (loff_t)page->index << PAGE_SHIFT;
- req->len = PAGE_SIZE;
- req->nr_pages = 1;
- req->pages = req->array;
- req->pages[0] = page;
- get_page(page);
-
- /* read the contents of the file from the server into the
- * page */
- ret = afs_fetch_data(vnode, key, req);
- afs_put_read(req);
-
- if (ret < 0) {
- if (ret == -ENOENT) {
- _debug("got NOENT from server"
- " - marking file deleted and stale");
- set_bit(AFS_VNODE_DELETED, &vnode->flags);
- ret = -ESTALE;
- }
+ iov_iter_xarray(&fsreq->def_iter, READ,
+ &fsreq->vnode->vfs_inode.i_mapping->i_pages,
+ fsreq->pos, fsreq->len);
-#ifdef CONFIG_AFS_FSCACHE
- fscache_uncache_page(vnode->cache, page);
-#endif
- BUG_ON(PageFsCache(page));
-
- if (ret == -EINTR ||
- ret == -ENOMEM ||
- ret == -ERESTARTSYS ||
- ret == -EAGAIN)
- goto error;
- goto io_error;
- }
+ afs_fetch_data(fsreq->vnode, fsreq);
+}
- SetPageUptodate(page);
+static int afs_symlink_readpage(struct page *page)
+{
+ struct afs_vnode *vnode = AFS_FS_I(page->mapping->host);
+ struct afs_read *fsreq;
+ int ret;
- /* send the page to the cache */
-#ifdef CONFIG_AFS_FSCACHE
- if (PageFsCache(page) &&
- fscache_write_page(vnode->cache, page, vnode->status.size,
- GFP_KERNEL) != 0) {
- fscache_uncache_page(vnode->cache, page);
- BUG_ON(PageFsCache(page));
- }
-#endif
- unlock_page(page);
- }
+ fsreq = afs_alloc_read(GFP_NOFS);
+ if (!fsreq)
+ return -ENOMEM;
- _leave(" = 0");
- return 0;
+ fsreq->pos = page->index * PAGE_SIZE;
+ fsreq->len = PAGE_SIZE;
+ fsreq->vnode = vnode;
+ fsreq->iter = &fsreq->def_iter;
+ iov_iter_xarray(&fsreq->def_iter, READ, &page->mapping->i_pages,
+ fsreq->pos, fsreq->len);
-io_error:
- SetPageError(page);
- goto error;
-enomem:
- ret = -ENOMEM;
-error:
- unlock_page(page);
- _leave(" = %d", ret);
+ ret = afs_fetch_data(fsreq->vnode, fsreq);
+ page_endio(page, false, ret);
return ret;
}
-/*
- * read page from file, directory or symlink, given a file to nominate the key
- * to be used
- */
-static int afs_readpage(struct file *file, struct page *page)
+static void afs_init_rreq(struct netfs_read_request *rreq, struct file *file)
{
- struct key *key;
- int ret;
-
- if (file) {
- key = afs_file_key(file);
- ASSERT(key != NULL);
- ret = afs_page_filler(key, page);
- } else {
- struct inode *inode = page->mapping->host;
- key = afs_request_key(AFS_FS_S(inode->i_sb)->cell);
- if (IS_ERR(key)) {
- ret = PTR_ERR(key);
- } else {
- ret = afs_page_filler(key, page);
- key_put(key);
- }
- }
- return ret;
+ rreq->netfs_priv = key_get(afs_file_key(file));
}
-/*
- * Make pages available as they're filled.
- */
-static void afs_readpages_page_done(struct afs_read *req)
+static bool afs_is_cache_enabled(struct inode *inode)
{
-#ifdef CONFIG_AFS_FSCACHE
- struct afs_vnode *vnode = req->vnode;
-#endif
- struct page *page = req->pages[req->index];
+ struct fscache_cookie *cookie = afs_vnode_cache(AFS_FS_I(inode));
- req->pages[req->index] = NULL;
- SetPageUptodate(page);
-
- /* send the page to the cache */
-#ifdef CONFIG_AFS_FSCACHE
- if (PageFsCache(page) &&
- fscache_write_page(vnode->cache, page, vnode->status.size,
- GFP_KERNEL) != 0) {
- fscache_uncache_page(vnode->cache, page);
- BUG_ON(PageFsCache(page));
- }
-#endif
- unlock_page(page);
- put_page(page);
+ return fscache_cookie_enabled(cookie) && !hlist_empty(&cookie->backing_objects);
}
-/*
- * Read a contiguous set of pages.
- */
-static int afs_readpages_one(struct file *file, struct address_space *mapping,
- struct list_head *pages)
+static int afs_begin_cache_operation(struct netfs_read_request *rreq)
{
- struct afs_vnode *vnode = AFS_FS_I(mapping->host);
- struct afs_read *req;
- struct list_head *p;
- struct page *first, *page;
- struct key *key = afs_file_key(file);
- pgoff_t index;
- int ret, n, i;
-
- /* Count the number of contiguous pages at the front of the list. Note
- * that the list goes prev-wards rather than next-wards.
- */
- first = lru_to_page(pages);
- index = first->index + 1;
- n = 1;
- for (p = first->lru.prev; p != pages; p = p->prev) {
- page = list_entry(p, struct page, lru);
- if (page->index != index)
- break;
- index++;
- n++;
- }
-
- req = kzalloc(struct_size(req, array, n), GFP_NOFS);
- if (!req)
- return -ENOMEM;
-
- refcount_set(&req->usage, 1);
- req->vnode = vnode;
- req->page_done = afs_readpages_page_done;
- req->pos = first->index;
- req->pos <<= PAGE_SHIFT;
- req->pages = req->array;
-
- /* Transfer the pages to the request. We add them in until one fails
- * to add to the LRU and then we stop (as that'll make a hole in the
- * contiguous run.
- *
- * Note that it's possible for the file size to change whilst we're
- * doing this, but we rely on the server returning less than we asked
- * for if the file shrank. We also rely on this to deal with a partial
- * page at the end of the file.
- */
- do {
- page = lru_to_page(pages);
- list_del(&page->lru);
- index = page->index;
- if (add_to_page_cache_lru(page, mapping, index,
- readahead_gfp_mask(mapping))) {
-#ifdef CONFIG_AFS_FSCACHE
- fscache_uncache_page(vnode->cache, page);
-#endif
- put_page(page);
- break;
- }
-
- req->pages[req->nr_pages++] = page;
- req->len += PAGE_SIZE;
- } while (req->nr_pages < n);
+ struct afs_vnode *vnode = AFS_FS_I(rreq->inode);
- if (req->nr_pages == 0) {
- kfree(req);
- return 0;
- }
-
- ret = afs_fetch_data(vnode, key, req);
- if (ret < 0)
- goto error;
-
- task_io_account_read(PAGE_SIZE * req->nr_pages);
- afs_put_read(req);
- return 0;
-
-error:
- if (ret == -ENOENT) {
- _debug("got NOENT from server"
- " - marking file deleted and stale");
- set_bit(AFS_VNODE_DELETED, &vnode->flags);
- ret = -ESTALE;
- }
-
- for (i = 0; i < req->nr_pages; i++) {
- page = req->pages[i];
- if (page) {
-#ifdef CONFIG_AFS_FSCACHE
- fscache_uncache_page(vnode->cache, page);
-#endif
- SetPageError(page);
- unlock_page(page);
- }
- }
-
- afs_put_read(req);
- return ret;
+ return fscache_begin_read_operation(rreq, afs_vnode_cache(vnode));
}
-/*
- * read a set of pages
- */
-static int afs_readpages(struct file *file, struct address_space *mapping,
- struct list_head *pages, unsigned nr_pages)
+static int afs_check_write_begin(struct file *file, loff_t pos, unsigned len,
+ struct page *page, void **_fsdata)
{
- struct key *key = afs_file_key(file);
- struct afs_vnode *vnode;
- int ret = 0;
-
- _enter("{%d},{%lu},,%d",
- key_serial(key), mapping->host->i_ino, nr_pages);
+ struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
- ASSERT(key != NULL);
+ return test_bit(AFS_VNODE_DELETED, &vnode->flags) ? -ESTALE : 0;
+}
- vnode = AFS_FS_I(mapping->host);
- if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) {
- _leave(" = -ESTALE");
- return -ESTALE;
- }
+static void afs_priv_cleanup(struct address_space *mapping, void *netfs_priv)
+{
+ key_put(netfs_priv);
+}
- /* attempt to read as many of the pages as possible */
-#ifdef CONFIG_AFS_FSCACHE
- ret = fscache_read_or_alloc_pages(vnode->cache,
- mapping,
- pages,
- &nr_pages,
- afs_file_readpage_read_complete,
- NULL,
- mapping_gfp_mask(mapping));
-#else
- ret = -ENOBUFS;
-#endif
+const struct netfs_read_request_ops afs_req_ops = {
+ .init_rreq = afs_init_rreq,
+ .is_cache_enabled = afs_is_cache_enabled,
+ .begin_cache_operation = afs_begin_cache_operation,
+ .check_write_begin = afs_check_write_begin,
+ .issue_op = afs_req_issue_op,
+ .cleanup = afs_priv_cleanup,
+};
- switch (ret) {
- /* all pages are being read from the cache */
- case 0:
- BUG_ON(!list_empty(pages));
- BUG_ON(nr_pages != 0);
- _leave(" = 0 [reading all]");
- return 0;
-
- /* there were pages that couldn't be read from the cache */
- case -ENODATA:
- case -ENOBUFS:
- break;
-
- /* other error */
- default:
- _leave(" = %d", ret);
- return ret;
- }
+static int afs_readpage(struct file *file, struct page *page)
+{
+ if (!file)
+ return afs_symlink_readpage(page);
- while (!list_empty(pages)) {
- ret = afs_readpages_one(file, mapping, pages);
- if (ret < 0)
- break;
- }
+ return netfs_readpage(file, page, &afs_req_ops, NULL);
+}
- _leave(" = %d [netting]", ret);
- return ret;
+static void afs_readahead(struct readahead_control *ractl)
+{
+ netfs_readahead(ractl, &afs_req_ops, NULL);
}
/*
@@ -625,8 +407,8 @@ static void afs_invalidate_dirty(struct page *page, unsigned int offset,
return;
/* We may need to shorten the dirty region */
- f = afs_page_dirty_from(priv);
- t = afs_page_dirty_to(priv);
+ f = afs_page_dirty_from(page, priv);
+ t = afs_page_dirty_to(page, priv);
if (t <= offset || f >= end)
return; /* Doesn't overlap */
@@ -644,17 +426,17 @@ static void afs_invalidate_dirty(struct page *page, unsigned int offset,
if (f == t)
goto undirty;
- priv = afs_page_dirty(f, t);
+ priv = afs_page_dirty(page, f, t);
set_page_private(page, priv);
- trace_afs_page_dirty(vnode, tracepoint_string("trunc"), page->index, priv);
+ trace_afs_page_dirty(vnode, tracepoint_string("trunc"), page);
return;
undirty:
- trace_afs_page_dirty(vnode, tracepoint_string("undirty"), page->index, priv);
+ trace_afs_page_dirty(vnode, tracepoint_string("undirty"), page);
clear_page_dirty_for_io(page);
full_invalidate:
- priv = (unsigned long)detach_page_private(page);
- trace_afs_page_dirty(vnode, tracepoint_string("inval"), page->index, priv);
+ trace_afs_page_dirty(vnode, tracepoint_string("inval"), page);
+ detach_page_private(page);
}
/*
@@ -669,20 +451,10 @@ static void afs_invalidatepage(struct page *page, unsigned int offset,
BUG_ON(!PageLocked(page));
-#ifdef CONFIG_AFS_FSCACHE
- /* we clean up only if the entire page is being invalidated */
- if (offset == 0 && length == PAGE_SIZE) {
- if (PageFsCache(page)) {
- struct afs_vnode *vnode = AFS_FS_I(page->mapping->host);
- fscache_wait_on_page_write(vnode->cache, page);
- fscache_uncache_page(vnode->cache, page);
- }
- }
-#endif
-
if (PagePrivate(page))
afs_invalidate_dirty(page, offset, length);
+ wait_on_page_fscache(page);
_leave("");
}
@@ -693,7 +465,6 @@ static void afs_invalidatepage(struct page *page, unsigned int offset,
static int afs_releasepage(struct page *page, gfp_t gfp_flags)
{
struct afs_vnode *vnode = AFS_FS_I(page->mapping->host);
- unsigned long priv;
_enter("{{%llx:%llu}[%lu],%lx},%x",
vnode->fid.vid, vnode->fid.vnode, page->index, page->flags,
@@ -702,16 +473,16 @@ static int afs_releasepage(struct page *page, gfp_t gfp_flags)
/* deny if page is being written to the cache and the caller hasn't
* elected to wait */
#ifdef CONFIG_AFS_FSCACHE
- if (!fscache_maybe_release_page(vnode->cache, page, gfp_