// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/fs/nfs/pagelist.c
*
* A set of helper functions for managing NFS read and write requests.
* The main purpose of these routines is to provide support for the
* coalescing of several requests into a single RPC call.
*
* Copyright 2000, 2001 (c) Trond Myklebust <trond.myklebust@fys.uio.no>
*
*/
#include <linux/slab.h>
#include <linux/file.h>
#include <linux/sched.h>
#include <linux/sunrpc/clnt.h>
#include <linux/nfs.h>
#include <linux/nfs3.h>
#include <linux/nfs4.h>
#include <linux/nfs_fs.h>
#include <linux/nfs_page.h>
#include <linux/nfs_mount.h>
#include <linux/export.h>
#include <linux/filelock.h>
#include "internal.h"
#include "pnfs.h"
#include "nfstrace.h"
#include "fscache.h"
#define NFSDBG_FACILITY NFSDBG_PAGECACHE
static struct kmem_cache *nfs_page_cachep;
static const struct rpc_call_ops nfs_pgio_common_ops;
struct nfs_page_iter_page {
const struct nfs_page *req;
size_t count;
};
static void nfs_page_iter_page_init(struct nfs_page_iter_page *i,
const struct nfs_page *req)
{
i->req = req;
i->count = 0;
}
static void nfs_page_iter_page_advance(struct nfs_page_iter_page *i, size_t sz)
{
const struct nfs_page *req = i->req;
size_t tmp = i->count + sz;
i->count = (tmp < req->wb_bytes) ? tmp : req->wb_bytes;
}
static struct page *nfs_page_iter_page_get(struct nfs_page_iter_page *i)
{
const struct nfs_page *req = i->req;
struct page *page;
if (i->count != req->wb_bytes) {
size_t base = i->count + req->wb_pgbase;
size_t len = PAGE_SIZE - offset_in_page(base);
page = nfs_page_to_page(req, base);
nfs_page_iter_page_advance(i, len);
return page;
}
return NULL;
}
static struct nfs_pgio_mirror *
nfs_pgio_get_mirror(struct nfs_pageio_descriptor *desc, u32 idx)
{
if (desc->pg_ops->pg_get_mirror)
return desc->pg_ops->pg_get_mirror(desc, idx);
return &desc->pg_mirrors[0];
}
struct nfs_pgio_mirror *
nfs_pgio_current_mirror(struct nfs_pageio_descriptor *desc)
{
return nfs_pgio_get_mirror(desc, desc->pg_mirror_idx);
}
EXPORT_SYMBOL_GPL(nfs_pgio_current_mirror);
static u32
nfs_pgio_set_current_mirror(struct nfs_pageio_descriptor *desc, u32 idx)
{
if (desc->pg_ops->pg_set_mirror)
return desc->pg_ops->pg_set_mirror(desc, idx);
return desc->pg_mirror_idx;
}
void nfs_pgheader_init(struct nfs_pageio_descriptor *desc,
struct nfs_pgio_header *hdr,
void (*release)(struct nfs_pgio_header *hdr))
{
struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
hdr->req = nfs_list_entry(mirror->pg_list.next);
hdr->inode = desc->pg_inode;
hdr->cred = nfs_req_openctx(hdr->req)->cred;
hdr->io_start = req_offset(hdr->req);
hdr->good_bytes = mirror->pg_count;
hdr->io_completion = desc->pg_io_completion;
hdr->dreq = desc->pg_dreq;
nfs_netfs_set_pgio_header(hdr, desc);
hdr->release = release;
hdr->completion_ops = desc->pg_completion_ops;
if (hdr->completion_ops->init_hdr)
hdr->completion_ops->init_hdr(hdr);
hdr->pgio_mirror_idx = desc->pg_mirror_idx;
}
EXPORT_SYMBOL_GPL(nfs_pgheader_init);
void nfs_set_pgio_error(struct nfs_pgio_header *hdr, int error, loff_t pos)
{
unsigned int new = pos - hdr->io_start;
trace_nfs_pgio_error(hdr, error, pos);
if (hdr->good_bytes > new) {
hdr->good_bytes = new;
clear_bit(NFS_IOHDR_EOF, &hdr->flags);
if (!test_and_set_bit(NFS_IOHDR_ERROR, &hdr->flags))
hdr->error = error;
}
}
static inline struct nfs_page *nfs_page_alloc(void)
{
struct nfs_page *p =
kmem_cache_zalloc(nfs_page_cachep, nfs_io_gfp_mask());
if (p)
INIT_LIST_HEAD(&p->wb_list);
return p;
}
static inline void
nfs_page_free(struct nfs_page *p)
{
kmem_cache_free(nfs_page_cachep, p);
}
/**
* nfs_iocounter_wait - wait for i/o to complete
* @l_ctx: nfs_lock_context with io_counter to use
*
* returns -ERESTARTSYS if interrupted by a fatal signal.
* Otherwise returns 0 once the io_count hits 0.
*/
int
nfs_iocounter_wait(struct nfs_lock_context *l_ctx)
{
return wait_var_event_killable(&l_ctx->io_count,
!atomic_read(&l_ctx->io_count));
}
/**
* nfs_async_iocounter_wait - wait on a rpc_waitqueue for I/O
* to complete
* @task: the rpc_task that should wait
* @l_ctx: nfs_lock_context with io_counter to check
*
* Returns true if there is outstanding I/O to wait on and the
* task has been put to sleep.
*/
bool
nfs_async_iocounter_wait(struct rpc_task *task, struct nfs_lock_context *l_ctx)
{
struct inode *inode = d_inode(l_ctx->open_context->dentry);
bool ret = false;
if (atomic_read(&l_ctx->io_count) > 0) {
rpc_sleep_on(&NFS_SERVER(inode)->uoc_rpcwaitq, task, NULL);
ret = true;
}
if (atomic_read(&l_ctx->io_count) == 0) {
rpc_wake_up_queued_task(&NFS_SERVER(inode)->uoc_rpcwaitq, task);
ret = false;
}
return ret;
}
EXPORT_SYMBOL_GPL(nfs_async_iocounter_wait);
/*
* nfs_page_lock_head_request - page lock the head of the page group
* @req: any member of the page group
*/
struct nfs_page *
nfs_page_group_lock_head(struct nfs_page *req)
{
struct nfs_page *head = req->wb_head;
while (!nfs_lock_request(head)) {
int ret = nfs_wait_on_request(head);
if (ret < 0)
return ERR_PTR(ret);
}
if (head != req)
kref_get(&head->wb_kref);
return head;
}
/*
* nfs_unroll_lo
|