/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_PAGEMAP_H
#define _LINUX_PAGEMAP_H
/*
* Copyright 1995 Linus Torvalds
*/
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/list.h>
#include <linux/highmem.h>
#include <linux/compiler.h>
#include <linux/uaccess.h>
#include <linux/gfp.h>
#include <linux/bitops.h>
#include <linux/hardirq.h> /* for in_interrupt() */
#include <linux/hugetlb_inline.h>
struct folio_batch;
static inline bool mapping_empty(struct address_space *mapping)
{
return xa_empty(&mapping->i_pages);
}
/*
* mapping_shrinkable - test if page cache state allows inode reclaim
* @mapping: the page cache mapping
*
* This checks the mapping's cache state for the pupose of inode
* reclaim and LRU management.
*
* The caller is expected to hold the i_lock, but is not required to
* hold the i_pages lock, which usually protects cache state. That's
* because the i_lock and the list_lru lock that protect the inode and
* its LRU state don't nest inside the irq-safe i_pages lock.
*
* Cache deletions are performed under the i_lock, which ensures that
* when an inode goes empty, it will reliably get queued on the LRU.
*
* Cache additions do not acquire the i_lock and may race with this
* check, in which case we'll report the inode as shrinkable when it
* has cache pages. This is okay: the shrinker also checks the
* refcount and the referenced bit, which will be elevated or set in
* the process of adding new cache pages to an inode.
*/
static inline bool mapping_shrinkable(struct address_space *mapping)
{
void *head;
/*
* On highmem systems, there could be lowmem pressure from the
* inodes before there is highmem pressure from the page
* cache. Make inodes shrinkable regardless of cache state.
*/
if (IS_ENABLED(CONFIG_HIGHMEM))
return true;
/* Cache completely empty? Shrink away. */
head = rcu_access_pointer(mapping->i_pages.xa_head);
if (!head)
return true;
/*
* The xarray stores single offset-0 entries directly in the
* head pointer, which allows non-resident page cache entries
* to escape the shadow shrinker's list of xarray nodes. The
* inode shrinker needs to pick them up under memory pressure.
*/
if (!xa_is_node(head) && xa_is_value(head))
return true;
return false;
}
/*
* Bits in mapping->flags.
*/
enum mapping_flags {
AS_EIO = 0, /* IO error on async write */
AS_ENOSPC = 1, /* ENOSPC on async write */
AS_MM_ALL_LOCKS = 2, /* under mm_take_all_locks() */
AS_UNEVICTABLE = 3, /* e.g., ramdisk, SHM_LOCK */
AS_EXITING = 4, /* final truncate in progress */
/* writeback related tags are not used */
AS_NO_WRITEBACK_TAGS = 5,
AS_LARGE_FOLIO_SUPPORT = 6,
};
/**
* mapping_set_error - record a writeback error in the address_space
* @mapping: the mapping in which an error should be set
* @error: the error to set in the mapping
*
* When writeback fails in some way, we must record that error so that
* userspace can be informed when fsync and the like are called. We endeavor
* to report errors on any file that was open at the time of the error. Some
* internal callers also need to know when writeback errors have occurred.
*
* When a writeback error occurs, most filesystems will want to call
* mapping_set_error to record the error in the mapping so that it can be
* reported when the application calls fsync(2).
*/
static inline void mapping_set_error(struct address_space *mapping, int error)
{
if (likely(!error))
return;
/* Record in wb_err for checkers using errseq_t based tracking */
__filemap_set_wb_err(mapping, error);
/* Record it in superblock */
if (mapping->host)
errseq_set(&mapping->host->i_sb->s_wb_err, error);
/* Record it in flags for now, for legacy callers */
if (error == -ENOSPC)
set_bit(AS_ENOSPC, &mapping->flags);
else
set_bit(AS_EIO, &mapping->flags);
}
static inline void mapping_set_unevictable(struct address_space *mapping)
{
set_bit(AS_UNEVICTABLE, &mapping->flags);
}
static inline void mapping_clear_unevictable(struct address_space *mapping)