// SPDX-License-Identifier: GPL-2.0
/*
* Device Memory Migration functionality.
*
* Originally written by Jérôme Glisse.
*/
#include <linux/export.h>
#include <linux/memremap.h>
#include <linux/migrate.h>
#include <linux/mm.h>
#include <linux/mm_inline.h>
#include <linux/mmu_notifier.h>
#include <linux/oom.h>
#include <linux/pagewalk.h>
#include <linux/rmap.h>
#include <linux/swapops.h>
#include <asm/tlbflush.h>
#include "internal.h"
static int migrate_vma_collect_skip(unsigned long start,
unsigned long end,
struct mm_walk *walk)
{
struct migrate_vma *migrate = walk->private;
unsigned long addr;
for (addr = start; addr < end; addr += PAGE_SIZE) {
migrate->dst[migrate->npages] = 0;
migrate->src[migrate->npages++] = 0;
}
return 0;
}
static int migrate_vma_collect_hole(unsigned long start,
unsigned long end,
__always_unused int depth,
struct mm_walk *walk)
{
struct migrate_vma *migrate = walk->private;
unsigned long addr;
/* Only allow populating anonymous memory. */
if (!vma_is_anonymous(walk->vma))
return migrate_vma_collect_skip(start, end, walk);
for (addr = start; addr < end; addr += PAGE_SIZE) {
migrate->src[migrate->npages] = MIGRATE_PFN_MIGRATE;
migrate->dst[migrate->npages] = 0;
migrate->npages++;
migrate->cpages++;
}
return 0;
}
static int migrate_vma_collect_pmd(pmd_t *pmdp,
unsigned long start,
unsigned long end,
struct mm_walk *walk)
{
struct migrate_vma *migrate = walk->private;
struct vm_area_struct *vma = walk->vma;
struct mm_struct *mm = vma->vm_mm;
unsigned long addr = start, unmapped = 0;
spinlock_t *ptl;
pte_t *ptep;
again:
if (pmd_none(*pmdp))
return migrate_vma_collect_hole(start, end, -1, walk);
if (pmd_trans_huge(*pmdp)) {
struct page *page;
ptl = pmd_lock(mm, pmdp);
if (unlikely(!pmd_trans_huge(*pmdp))) {
spin_unlock(ptl);
goto again;
}
page = pmd_page(*pmdp);
if (is_huge_zero_page(page)) {
spin_unlock(ptl);
split_huge_pmd(vma, pmdp, addr);
if (pmd_trans_unstable(pmdp))
return migrate_vma_collect_skip(start, end,
walk);
} else {
int ret;
get_page(page);
spin_unlock(ptl);
if (unlikely(!trylock_page(page)))
return migrate_vma_collect_skip(start, end,
walk);
ret = split_huge_page(page);
unlock_page(page);
put_page(page);
if (ret)
return migrate_vma_collect_skip(start, end,
walk);
if (pmd_none(*pmdp))
return migrate_vma_collect_hole(start, end, -1,
walk);
}
}
if (unlikely(pmd_bad(*pmdp)))
return migrate_vma_collect_skip(start, end, walk);
ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl);
arch_enter_lazy_mmu_mode();
for (; addr < end; addr += PAGE_SIZE, ptep++) {
unsigned long mpfn = 0, pfn;
struct page *page;
swp_entry_t entry;
pte_t pte;
pte = *ptep;
if (pte_none(pte)) {
if (vma_is_anonymous(vma)) {
mpfn = MIGRATE_PFN_MIGRATE;
migrate->cpages++;
}
goto next;
}
if (!pte_present(pte)) {
/*
* Only care about unaddressable device page special
* page table entry. Other special swap entries are not
* migratable, and we ignore regular swapped page.
*/
entry = pte_to_swp_entry(pte);
if (!is_device_private_entry(entry))
goto next;
page = pfn_swap_entry_to_page(entry);
if (!(migrate->flags &
MIGRATE_VMA_SELECT_DEVICE_PRIVATE) ||
page->pgmap->owner != migrate->pgmap_owner)
goto next;
mpfn = migrate_pfn(page_to_pfn(page)) |
MIGRATE_PFN_MIGRATE;
if (is_writable_device_private_entry(entry))
mpfn |= MIGRATE_PFN_WRITE;
} else {
pfn = pte_pfn(pte);
if (is_zero_pfn(pfn) &&
(migrate->flags & MIGRATE_VMA_SELECT_SYSTEM)) {
mpfn = MIGRATE_PFN_MIGRATE;
migrate->cpages++;
goto next;
}
page = vm_normal_page(migrate->vma, addr, pte);
if (page && !is_zone_device_page(page) &&
!(migrate->flags & MIGRATE_VMA_SELECT_SYSTEM))
goto next;
else if (page && is_device_coherent_page(page) &&
(!(migrate->flags & MIGRATE_VMA_SELECT_DEVICE_COHERENT) ||
page->pgmap->owner != migrate->pgmap_owner))
goto next;
mpfn = migrate_pfn(pfn) | MIGRATE_PFN_MIGRATE;
mpfn |= pte_write(pte) ? MIGRATE_PFN_WRITE : 0;
}
/* FIXME support THP */
if (!page || !page->mapping || PageTransCompound(page)) {
mpfn = 0;
goto next;
}
/*
* By getting a reference on the page we pin it and that blocks
* any kind of migration. Side effect is that it "freezes" the
* pte.
*
* We drop this reference after isolating the page from the lru
* for non device page (device page are not on the lru and thus
* can't be dropped from it).
*/
get_page(page);
/*
* We rely on trylock_page() to avoid dead