/* SPDX-License-Identifier: GPL-2.0-or-later */
/* internal.h: mm/ internal definitions
*
* Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*/
#ifndef __MM_INTERNAL_H
#define __MM_INTERNAL_H
#include <linux/fs.h>
#include <linux/khugepaged.h>
#include <linux/mm.h>
#include <linux/mm_inline.h>
#include <linux/pagemap.h>
#include <linux/pagewalk.h>
#include <linux/rmap.h>
#include <linux/swap.h>
#include <linux/swapops.h>
#include <linux/swap_cgroup.h>
#include <linux/tracepoint-defs.h>
/* Internal core VMA manipulation functions. */
#include "vma.h"
struct folio_batch;
/*
* Maintains state across a page table move. The operation assumes both source
* and destination VMAs already exist and are specified by the user.
*
* Partial moves are permitted, but the old and new ranges must both reside
* within a VMA.
*
* mmap lock must be held in write and VMA write locks must be held on any VMA
* that is visible.
*
* Use the PAGETABLE_MOVE() macro to initialise this struct.
*
* The old_addr and new_addr fields are updated as the page table move is
* executed.
*
* NOTE: The page table move is affected by reading from [old_addr, old_end),
* and old_addr may be updated for better page table alignment, so len_in
* represents the length of the range being copied as specified by the user.
*/
struct pagetable_move_control {
struct vm_area_struct *old; /* Source VMA. */
struct vm_area_struct *new; /* Destination VMA. */
unsigned long old_addr; /* Address from which the move begins. */
unsigned long old_end; /* Exclusive address at which old range ends. */
unsigned long new_addr; /* Address to move page tables to. */
unsigned long len_in; /* Bytes to remap specified by user. */
bool need_rmap_locks; /* Do rmap locks need to be taken? */
bool for_stack; /* Is this an early temp stack being moved? */
};
#define PAGETABLE_MOVE(name, old_, new_, old_addr_, new_addr_, len_) \
struct pagetable_move_control name = { \
.old = old_, \
.new = new_, \
.old_addr = old_addr_, \
.old_end = (old_addr_) + (len_), \
.new_addr = new_addr_, \
.len_in = len_, \
}
/*
* The set of flags that only affect watermark checking and reclaim
* behaviour. This is used by the MM to obey the caller constraints
* about IO, FS and watermark checking while ignoring placement
* hints such as HIGHMEM usage.
*/
#define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\
__GFP_NOWARN|__GFP_RETRY_MAYFAIL|__GFP_NOFAIL|\
__GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC|\
__GFP_NOLOCKDEP)
/* The GFP flags allowed during early boot */
#define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_RECLAIM|__GFP_IO|__GFP_FS))
/* Control allocation cpuset and node placement constraints */
#define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE)
/* Do not use these with a slab allocator */
#define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK)
/*
* Different from WARN_ON_ONCE(), no warning will be issued
* when we specify __GFP_NOWARN.
*/
#define WARN_ON_ONCE_GFP(cond, gfp) ({ \
static bool __section(".data..once") __warned; \
int __ret_warn_once = !!(cond); \
\
if (unlikely(!(gfp & __GFP_NOWARN) && __ret_warn_once && !__warned)) { \
__warned = true; \
WARN_ON(1); \
} \
unlikely(__ret_warn_once); \
})
void page_writeback_init(void);
/*
* If a 16GB hugetlb folio were mapped by PTEs of all of its 4kB pages,
* its nr_pages_mapped would be 0x400000: choose the ENTIRELY_MAPPED bit
* above that range, instead of 2*(PMD_SIZE/PAGE_SIZE). Hugetlb currently
* leaves nr_pages_mapped at 0, but avoid surprise if it participates later.
*/
#define ENTIRELY_MAPPED 0x800000
#define FOLIO_PAGES_MAPPED (ENTIRELY_MAPPED - 1)
/*
* Flags passed to __show_mem() and show_free_areas() to suppress output in
* various contexts.
*/
#define SHOW_MEM_FILTER_NODES (0x0001u) /* disallowed nodes */
/*
* How many individual pages have an elevated _mapcount. Excludes
* the folio's entire_mapcount.
*
* Don't use this function outside of debugging code.
*/
static inline int folio_nr_pages_mapped(const struct folio *folio)
{
if (IS_ENABLED(CONFIG_NO_PAGE_MAPCOUNT))
return -1;
return atomic_read(&folio->_nr_pages_mapped) & FOLIO_PAGES_MAPPED;
}
/*
* Retrieve the first entry of a folio based on a provided entry within the
* folio. We cannot rely on folio->swap as there is no guarantee that it has
* been initialized. Used for calling arch_swap_restore()
*/
static inline swp_entry_t folio_swap(swp_entry_t entry,
const struct folio *folio)
{
swp_entry_t swap = {
.val = ALIGN_DOWN(entr
|