// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
* Copyright 2020 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Christian König
*/
/* Pooling of allocated pages is necessary because changing the caching
* attributes on x86 of the linear mapping requires a costly cross CPU TLB
* invalidate for those addresses.
*
* Additional to that allocations from the DMA coherent API are pooled as well
* cause they are rather slow compared to alloc_pages+map.
*/
#include <linux/module.h>
#include <linux/dma-mapping.h>
#include <linux/debugfs.h>
#include <linux/highmem.h>
#include <linux/sched/mm.h>
#ifdef CONFIG_X86
#include <asm/set_memory.h>
#endif
#include <drm/ttm/ttm_backup.h>
#include <drm/ttm/ttm_pool.h>
#include <drm/ttm/ttm_tt.h>
#include <drm/ttm/ttm_bo.h>
#include "ttm_module.h"
#ifdef CONFIG_FAULT_INJECTION
#include <linux/fault-inject.h>
static DECLARE_FAULT_ATTR(backup_fault_inject);
#else
#define should_fail(...) false
#endif
/**
* struct ttm_pool_dma - Helper object for coherent DMA mappings
*
* @addr: original DMA address returned for the mapping
* @vaddr: original vaddr return for the mapping and order in the lower bits
*/
struct ttm_pool_dma {
dma_addr_t addr;
unsigned long vaddr;
};
/**
* struct ttm_pool_alloc_state - Current state of the tt page allocation process
* @pages: Pointer to the next tt page pointer to populate.
* @caching_divide: Pointer to the first page pointer whose page has a staged but
* not committed caching transition from write-back to @tt_caching.
* @dma_addr: Pointer to the next tt dma_address entry to populate if any.
* @remaining_pages: Remaining pages to populate.
* @tt_caching: The requested cpu-caching for the pages allocated.
*/
struct ttm_pool_alloc_state {
struct page **pages;
struct page **caching_divide;
dma_addr_t *dma_addr;
pgoff_t remaining_pages;
enum ttm_caching tt_caching;
};
/**
* struct ttm_pool_tt_restore - State representing restore from backup
* @pool: The pool used for page allocation while restoring.
* @snapshot_alloc: A snapshot of the most recent struct ttm_pool_alloc_state.
* @alloced_page: Pointer to the page most recently allocated from a pool or system.
* @first_dma: The dma address corresponding to @alloced_page if dma_mapping
* is requested.
* @alloced_pages: The number of allocated pages present in the struct ttm_tt
* page vector from this restore session.
* @restored_pages: The number of 4K pages restored for @alloced_page (which
* is typically a multi-order page).
* @page_caching: The struct ttm_tt requested caching
* @order: The order of @alloced_page.
*
* Recovery from backup might fail when we've recovered less than the
* full ttm_tt. In order not to loose any data (yet), keep information
* around that allows us to restart a failed ttm backup recovery.
*/
struct ttm_pool_tt_restore {
struct ttm_pool *pool;
struct ttm_pool_alloc_state snapshot_alloc;
struct page *alloced_page;
dma_addr_t first_dma;
pgoff_t alloced_pages;
pgoff_t restored_pages;
enum ttm_caching page_caching;
unsigned int order;
};
static unsigned long page_pool_size;
MODULE_PARM_DESC(page_pool_size, "Number of pages in the WC/UC/DMA pool");
module_param(page_pool_size, ulong, 0644);
static atomic_long_t allocated_pages;
static struct ttm_pool_type global_write_combined[NR_PAGE_ORDERS];
static struct ttm_pool_type global_uncached[NR_