// SPDX-License-Identifier: GPL-2.0-only
/*
* This kernel test validates architecture page table helpers and
* accessors and helps in verifying their continued compliance with
* expected generic MM semantics.
*
* Copyright (C) 2019 ARM Ltd.
*
* Author: Anshuman Khandual <anshuman.khandual@arm.com>
*/
#define pr_fmt(fmt) "debug_vm_pgtable: [%-25s]: " fmt, __func__
#include <linux/gfp.h>
#include <linux/highmem.h>
#include <linux/hugetlb.h>
#include <linux/kernel.h>
#include <linux/kconfig.h>
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/mm_types.h>
#include <linux/module.h>
#include <linux/pfn_t.h>
#include <linux/printk.h>
#include <linux/pgtable.h>
#include <linux/random.h>
#include <linux/spinlock.h>
#include <linux/swap.h>
#include <linux/swapops.h>
#include <linux/start_kernel.h>
#include <linux/sched/mm.h>
#include <linux/io.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
/*
* Please refer Documentation/vm/arch_pgtable_helpers.rst for the semantics
* expectations that are being validated here. All future changes in here
* or the documentation need to be in sync.
*/
#define VMFLAGS (VM_READ|VM_WRITE|VM_EXEC)
/*
* On s390 platform, the lower 4 bits are used to identify given page table
* entry type. But these bits might affect the ability to clear entries with
* pxx_clear() because of how dynamic page table folding works on s390. So
* while loading up the entries do not change the lower 4 bits. It does not
* have affect any other platform. Also avoid the 62nd bit on ppc64 that is
* used to mark a pte entry.
*/
#define S390_SKIP_MASK GENMASK(3, 0)
#if __BITS_PER_LONG == 64
#define PPC64_SKIP_MASK GENMASK(62, 62)
#else
#define PPC64_SKIP_MASK 0x0
#endif
#define ARCH_SKIP_MASK (S390_SKIP_MASK | PPC64_SKIP_MASK)
#define RANDOM_ORVALUE (GENMASK(BITS_PER_LONG - 1, 0) & ~ARCH_SKIP_MASK)
#define RANDOM_NZVALUE GENMASK(7, 0)
static void __init pte_basic_tests(unsigned long pfn, pgprot_t prot)
{
pte_t pte = pfn_pte(pfn, prot);
pr_debug("Validating PTE basic\n");
WARN_ON(!pte_same(pte, pte));
WARN_ON(!pte_young(pte_mkyoung(pte_mkold(pte))));
WARN_ON(!pte_dirty(pte_mkdirty(pte_mkclean(pte))));
WARN_ON(!pte_write(pte_mkwrite(pte_wrprotect(pte))));
WARN_ON(pte_young(pte_mkold(pte_mkyoung(pte))));
WARN_ON(pte_dirty(pte_mkclean(pte_mkdirty(pte))));
WARN_ON(pte_write(pte_wrprotect(pte_mkwrite(pte))));
}
static void __init pte_advanced_tests(struct mm_struct *mm,
struct vm_area_struct *vma, pte_t *ptep,
unsigned long pfn, unsigned long vaddr,
pgprot_t prot)
{
pte_t pte = pfn_pte(pfn, prot);
/*
* Architectures optimize set_pte_at by avoiding TLB flush.
* This requires set_pte_at to be not used to update an
* existing pte entry. Clear pte before we do set_pte_at
*/
pr_debug("Validating PTE advanced\n");
pte = pfn_pte(pfn, prot);
set_pte_at(mm, vaddr, ptep, pte);
ptep_set_wrprotect(mm, vaddr, ptep);
pte = ptep_get(ptep);
WARN_ON(pte_write(pte));
ptep_get_and_clear(mm, vaddr, ptep);
pte = ptep_get(ptep);
WARN_ON(!pte_none(pte));
pte = pfn_pte(pfn, prot);
pte = pte_wrprotect(pte);
pte = pte_mkclean(pte);
set_pte_at(mm, vaddr, ptep, pte);
pte = pte_mkwrite(pte);
pte = pte_mkdirty(pte);
ptep_set_access_flags(vma, vaddr, ptep, pte, 1);
pte = ptep_get(ptep);
WARN_ON(!(pte_write(pte) && pte_dirty(pte)));
ptep_get_and_clear_full(mm, vaddr, ptep, 1);
pte = ptep_get(ptep);
WARN_ON(!pte_none(pte));
pte = pfn_pte(pfn, prot);
pte = pte_mkyoung(pte);
set_pte_at(mm, vaddr, ptep, pte);
ptep_test_and_clear_young(vma, vaddr, ptep);
pte = ptep_get(ptep);
WARN_ON(pte_young(pte));
}
static void __init pte_savedwrite_tests(unsigned long pfn, pgprot_t prot)
{
pte_t pte = pfn_pte(pfn, prot);
if (!IS_ENABLED(CONFIG_NUMA_BALANCING))
return;
pr_debug("Validating PTE saved write\n");
WARN_ON(!pte_savedwrite(pte_mk_savedwrite(pte_clear_savedwrite(pte))));
WARN_ON(pte_savedwrite(pte_clear_savedwrite(pte_mk_savedwrite(pte))));
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static void __init pmd_basic_tests(unsigned long pfn, pgprot_t prot)
{
pmd_t pmd = pfn_pmd(pfn, prot);
if (!has_transparent_hugepage())
return;
pr_debug("Validating PMD basic\n");
WARN_ON(!pmd_same(pmd, pmd));
WARN_ON(!pmd_young(pmd_mkyoung(pmd_mkold(pmd))));
WARN_ON(!pmd_dirty(pmd_mkdirty(pmd_mkclean(pmd))));
WARN_ON(!pmd_write(pmd_mkwrite(pmd_wrprotect(pmd))));
WARN_ON(pmd_young(pmd_mkold(pmd_mkyoung(pmd))));
WARN_ON(pmd_dirty(pmd_mkclean(pmd_mkdirty(pmd))));
WARN_ON(pmd_write(pmd_wrprotect(pmd_mkwrite(pmd))));
/*
* A huge page does not point to next level page table
* entry. Hence this must qualify as pmd_bad().
*/
WARN_ON(!pmd_bad(pmd_mkhuge(pmd)));
}
static void __init pmd_advanced_tests(struct mm_struct *mm,
struct vm_area_struct *vma, pmd_t *pmdp,
unsigned long pfn, unsigned long vaddr,
pgprot_t prot, pgtable_t pgtable)
{
pmd_t pmd = pfn_pmd(pfn, prot);
if (!has_transparent_hugepage())
return;
pr_debug("Validating PMD advanced\n");
/* Align the address wrt HPAGE_PMD_SIZE */
vaddr = (<