// SPDX-License-Identifier: GPL-2.0-only OR MIT
/* Copyright (c) 2023 Imagination Technologies Ltd. */
#include "pvr_vm.h"
#include "pvr_device.h"
#include "pvr_drv.h"
#include "pvr_gem.h"
#include "pvr_mmu.h"
#include "pvr_rogue_fwif.h"
#include "pvr_rogue_heap_config.h"
#include <drm/drm_exec.h>
#include <drm/drm_gem.h>
#include <drm/drm_gpuvm.h>
#include <linux/bug.h>
#include <linux/container_of.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/gfp_types.h>
#include <linux/kref.h>
#include <linux/mutex.h>
#include <linux/stddef.h>
/**
* DOC: Memory context
*
* This is the "top level" datatype in the VM code. It's exposed in the public
* API as an opaque handle.
*/
/**
* struct pvr_vm_context - Context type used to represent a single VM.
*/
struct pvr_vm_context {
/**
* @pvr_dev: The PowerVR device to which this context is bound.
* This binding is immutable for the life of the context.
*/
struct pvr_device *pvr_dev;
/** @mmu_ctx: The context for binding to physical memory. */
struct pvr_mmu_context *mmu_ctx;
/** @gpuvm_mgr: GPUVM object associated with this context. */
struct drm_gpuvm gpuvm_mgr;
/** @lock: Global lock on this VM. */
struct mutex lock;
/**
* @fw_mem_ctx_obj: Firmware object representing firmware memory
* context.
*/
struct pvr_fw_object *fw_mem_ctx_obj;
/** @ref_count: Reference count of object. */
struct kref ref_count;
/**
* @dummy_gem: GEM object to enable VM reservation. All private BOs
* should use the @dummy_gem.resv and not their own _resv field.
*/
struct drm_gem_object dummy_gem;
};
static inline
struct pvr_vm_context *to_pvr_vm_context(struct drm_gpuvm *gpuvm)
{
return container_of(gpuvm, struct pvr_vm_context, gpuvm_mgr);
}
struct pvr_vm_context *pvr_vm_context_get(struct pvr_vm_context *vm_ctx)
{
if (vm_ctx)
kref_get(&vm_ctx->ref_count);
return vm_ctx;
}
/**
* pvr_vm_get_page_table_root_addr() - Get the DMA address of the root of the
* page table structure behind a VM context.
* @vm_ctx: Target VM context.
*/
dma_addr_t pvr_vm_get_page_table_root_addr(struct pvr_vm_context *vm_ctx)
{
return pvr_mmu_get_root_table_dma_addr(vm_ctx->mmu_ctx);
}
/**
* pvr_vm_get_dma_resv() - Expose the dma_resv owned by the VM context.
* @vm_ctx: Target VM context.
*
* This is used to allow private BOs to share a dma_resv for faster fence
* updates.
*
* Returns: The dma_resv pointer.
*/
struct dma_resv *pvr_vm_get_dma_resv(struct pvr_vm_context *vm_ctx)
{
return vm_ctx->dummy_gem.resv;
}
/**
* DOC: Memory mappings
*/
/**
* struct pvr_vm_gpuva - Wrapper type representing a single VM mapping.
*/
struct pvr_vm_gpuva {
/** @base: The wrapped drm_gpuva object. */
struct drm_gpuva base;
};
#define to_pvr_vm_gpuva(va) container_of_const(va, struct pvr_vm_gpuva, base)
enum pvr_vm_bind_type {
PVR_VM_BIND_TYPE_MAP,
PVR_VM_BIND_TYPE_UNMAP,
};
/**
* struct pvr_vm_bind_op - Context of a map/unmap operation.
*/
struct pvr_vm_bind_op {
/** @type: Map or unmap. */
enum pvr_vm_bind_type type;
/** @pvr_obj: Object associated with mapping (map only). */
struct pvr_gem_object *pvr_obj;
/**
* @vm_ctx: VM context where the mapping will be created or destroyed.
*/
struct pvr_vm_context *vm_ctx;
/** @mmu_op_ctx: MMU op context. */
struct pvr_mmu_op_context *mmu_op_ctx;
/** @gpuvm_bo: Prealloced wrapped BO for attaching to the gpuvm. */
struct drm_gpuvm_bo *gpuvm_bo;
/**
* @new_va: Prealloced VA mapping object (init in callback).
* Used when creating a mapping.
*/
struct pvr_vm_gpuva *new_va;
/**
* @prev_va: Prealloced VA mapping object (init in callback).
* Used when a mapping or unmapping operation overlaps an existing
* mapping and splits away the beginning into a new mapping.
*/
struct pvr_vm_gpuva *prev_va;
/**
* @next_va: Prealloced VA mapping object (init in callback).
* Used when a mapping or unmapping operation overlaps an existing
* mapping and splits away the end into a new mapping.
*/
struct pvr_vm_gpuva *next_va;
/** @offset: Offset into @pvr_obj to begin mapping from. */
u64 offset;
/** @device_addr: Device-virtual address at the start of the mapping. */
u64 device_addr;
/** @size: Size of the desired mapping. */
u64 size;
};
/**
* pvr_vm_bind_op_exec() - Execute a single bind op.
* @bind_op: Bind op context.
*
* Returns:
* * 0 on success,
* * Any error code returned by drm_gpuva_sm_map(), drm_gpuva_sm_unmap(), or
* a callback function.
*/
static int pvr_vm_bind_op_exec(struct pvr_vm_bind_op *bind_op)
{
switch (bind_op->type) {
case PVR_VM_BIND_TYPE_MAP:
return drm_gpuvm_sm_map(&bind_op->vm_ctx->gpuvm_mgr,
bind_op, bind_op->device_addr,
bind_op->size,
gem_from_pvr_gem(bind_op->pvr_obj),
bind_op->offset);
case PVR_VM_BIND_TYPE_UNMAP:
return drm_gpuvm_sm_unmap(&bind_op->vm_ctx->gpuvm_mgr,
bind_op, bind_op->device_addr,
bind_op->size);
}
/*
* This shouldn't happen unless something went wrong
* in drm_sched.
*/
WARN_ON(1);
return -EINVAL;
}
static void pvr_vm_bind_op_fini(struct pvr_vm_bind_op *bind_op)
{
drm_gpuvm_bo_put(bind_op->gpuvm_bo);
kfree(bind_op->new_va);
kfree(bind_op->prev_va);