// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2016 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*/
#include "drm/drm_file.h"
#include "drm/msm_drm.h"
#include "linux/file.h"
#include "linux/sync_file.h"
#include "msm_drv.h"
#include "msm_gem.h"
#include "msm_gpu.h"
#include "msm_mmu.h"
#include "msm_syncobj.h"
#define vm_dbg(fmt, ...) pr_debug("%s:%d: "fmt"\n", __func__, __LINE__, ##__VA_ARGS__)
static uint vm_log_shift = 0;
MODULE_PARM_DESC(vm_log_shift, "Length of VM op log");
module_param_named(vm_log_shift, vm_log_shift, uint, 0600);
/**
* struct msm_vm_map_op - create new pgtable mapping
*/
struct msm_vm_map_op {
/** @iova: start address for mapping */
uint64_t iova;
/** @range: size of the region to map */
uint64_t range;
/** @offset: offset into @sgt to map */
uint64_t offset;
/** @sgt: pages to map, or NULL for a PRR mapping */
struct sg_table *sgt;
/** @prot: the mapping protection flags */
int prot;
/**
* @queue_id: The id of the submitqueue the operation is performed
* on, or zero for (in particular) UNMAP ops triggered outside of
* a submitqueue (ie. process cleanup)
*/
int queue_id;
};
/**
* struct msm_vm_unmap_op - unmap a range of pages from pgtable
*/
struct msm_vm_unmap_op {
/** @iova: start address for unmap */
uint64_t iova;
/** @range: size of region to unmap */
uint64_t range;
/** @reason: The reason for the unmap */
const char *reason;
/**
* @queue_id: The id of the submitqueue the operation is performed
* on, or zero for (in particular) UNMAP ops triggered outside of
* a submitqueue (ie. process cleanup)
*/
int queue_id;
};
/**
* struct msm_vma_op - A MAP or UNMAP operation
*/
struct msm_vm_op {
/** @op: The operation type */
enum {
MSM_VM_OP_MAP = 1,
MSM_VM_OP_UNMAP,
} op;
union {
/** @map: Parameters used if op == MSM_VMA_OP_MAP */
struct msm_vm_map_op map;
/** @unmap: Parameters used if op == MSM_VMA_OP_UNMAP */
struct msm_vm_unmap_op unmap;
};
/** @node: list head in msm_vm_bind_job::vm_ops */
struct list_head node;
/**
* @obj: backing object for pages to be mapped/unmapped
*
* Async unmap ops, in particular, must hold a reference to the
* original GEM object backing the mapping that will be unmapped.
* But the same can be required in the map path, for example if
* there is not a corresponding unmap op, such as process exit.
*
* This ensures that the pages backing the mapping are not freed
* before the mapping is torn down.
*/
struct drm_gem_object *obj;
};
/**
* struct msm_vm_bind_job - Tracking for a VM_BIND ioctl
*
* A table of userspace requested VM updates (MSM_VM_BIND_OP_UNMAP/MAP/MAP_NULL)
* gets applied to the vm, generating a list of VM ops (MSM_VM_OP_MAP/UNMAP)
* which are applied to the pgtables asynchronously. For example a userspace
* requested MSM_VM_BIND_OP_MAP could end up generating both an MSM_VM_OP_UNMAP
* to unmap an existing mapping, and a MSM_VM_OP_MAP to apply the new mapping.
*/
struct msm_vm_bind_job {
/** @base: base class for drm_sched jobs */
struct drm_sched_job base;
/** @vm: The VM being operated on */
struct drm_gpuvm *vm;
/** @fence: The fence that is signaled when job completes */
struct dma_fence *fence;
/** @queue: The queue that the job runs on */
struct msm_gpu_submitqueue *queue;
/** @prealloc: Tracking for pre-allocated MMU pgtable pages */
struct msm_mmu_prealloc prealloc;
/** @vm_ops: a list of struct msm_vm_op */
struct list_head vm_ops;
/** @bos_pinned: are the GEM objects being bound pinned? */
bool bos_pinned;
/** @nr_ops: the number of userspace requested ops */
unsigned int nr_ops;
/**
* @ops: the userspace requested ops
*
* The userspace requested ops are copied/parsed and validated
* before we start applying the updates to try to do as much up-
* front error checking as possible, to avoid the VM being in an
* undefined state due to partially executed VM_BIND.
*
* This table also serves to hold a reference to the backing GEM
* objects.
*/
struct msm_vm_bind_op {
uint32_t op;
uint32_t flags;
union {
struct drm_gem_object *obj;
uint32_t handle;
};
uint64_t obj_offset;
uint64_t iova;
uint64_t range;
} ops[];
};
#define job_foreach_bo(obj, _job) \
for (unsigned i = 0; i < (_job)->nr_ops; i++) \
if ((obj = (_job)->ops[i].obj))
static inline struct msm_vm_bind_job *to_msm_vm_bind_job(struct drm_sched_job *job)
{
return container_of(job, struct msm_vm_bind_job, base);
}
static void
msm_gem_vm_free(struct drm_gpuvm *gpuvm)
{
struct msm_gem_vm *vm = container_of(gpuvm, struct msm_gem_vm, base);
drm_mm_takedown(&vm->mm);
if (vm->mmu)
vm->mmu->funcs->destroy(vm->mmu);
dma_fence_put(vm->last_fence);
put_pid(vm->pid);
kfree(vm->log);
kfree(vm);
}
/**
* msm_gem_vm_unusable() - Mark a VM as unusable
* @gpuvm: the VM to mark unusable
*/
void
msm_gem_vm_unusable(struct drm_gpuvm *gpuvm)
{
struct msm_gem_vm *vm = to_msm_vm(gpuvm);
uint32_t vm_log_len = (1 << vm->log_shift);
uint32_t vm_log_mask = vm_log_len - 1;
uint32_t nr_vm_logs;
int first;
vm->unusable = true;
/* Bail if no log, or empty log: */
if (!vm->log || !vm->log[0].op)
return;
mutex_lock(&vm->mmu_lock);
/*
* log_idx is the next entry to overwrite, meaning it is the oldest, or
* first, entry (other than the special case handled below where the
* log hasn't wrapped around yet)
*/
first = vm->log_idx;
if (!vm->log[first].op) {
/*
* If the next log entry has not been written yet, then only
* entries 0 to idx-1 are valid (ie. we haven't wrapped around
* yet)
*/
nr_vm_logs = MAX(0, first - 1);
first = 0;
} else {
nr_vm_logs = vm_log_len;
}
pr_err("vm-log:\n");
for (int i = 0; i < nr_vm_logs; i++) {
int idx = (i + first) & vm_log_mask;
struct msm_gem_vm_log_entry *e = &vm->log[idx];
pr_err(" - %s:%d: 0x%016llx-0x%016llx\n",
e->op, e->queue_id, e->iova,
e->iova + e->range);
}
mutex_unlock(&vm->mmu_lock);
}
static void
vm_log(struct msm_gem_vm *vm, const char *op, uint64_t iova, uint64_t range, int queue_id)
{
int idx;
if (!vm->managed)
lockdep_assert_held(&vm->mmu_lock);
vm_dbg("%s:%p:%d: %016llx %016llx", op, vm, queue_id, iova, iova + range);
if (!vm->log)
return;
idx = vm->log_idx;
vm->log[idx].op = op;
vm->log[idx].iova = iova;
vm->log[idx].range = range;
vm->log[idx].queue_id = queue_id;
vm->log_idx = (vm->log_idx + 1) & ((1 << vm->log_shift) - 1);
}
static void
vm_unmap_op(struct msm_gem_vm *vm, const s
|