// SPDX-License-Identifier: GPL-2.0 or MIT
/* Copyright 2018 Marty E. Plummer <hanetzer@startmail.com> */
/* Copyright 2019 Linaro, Ltd., Rob Herring <robh@kernel.org> */
/* Copyright 2019 Collabora ltd. */
#include <linux/list.h>
#include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/pagemap.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <drm/drm_auth.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_drv.h>
#include <drm/drm_exec.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_syncobj.h>
#include <drm/drm_utils.h>
#include <drm/gpu_scheduler.h>
#include <drm/panthor_drm.h>
#include "panthor_device.h"
#include "panthor_fw.h"
#include "panthor_gem.h"
#include "panthor_gpu.h"
#include "panthor_heap.h"
#include "panthor_mmu.h"
#include "panthor_regs.h"
#include "panthor_sched.h"
/**
* DOC: user <-> kernel object copy helpers.
*/
/**
* panthor_set_uobj() - Copy kernel object to user object.
* @usr_ptr: Users pointer.
* @usr_size: Size of the user object.
* @min_size: Minimum size for this object.
* @kern_size: Size of the kernel object.
* @in: Address of the kernel object to copy.
*
* Helper automating kernel -> user object copies.
*
* Don't use this function directly, use PANTHOR_UOBJ_SET() instead.
*
* Return: 0 on success, a negative error code otherwise.
*/
static int
panthor_set_uobj(u64 usr_ptr, u32 usr_size, u32 min_size, u32 kern_size, const void *in)
{
/* User size shouldn't be smaller than the minimal object size. */
if (usr_size < min_size)
return -EINVAL;
if (copy_to_user(u64_to_user_ptr(usr_ptr), in, min_t(u32, usr_size, kern_size)))
return -EFAULT;
/* When the kernel object is smaller than the user object, we fill the gap with
* zeros.
*/
if (usr_size > kern_size &&
clear_user(u64_to_user_ptr(usr_ptr + kern_size), usr_size - kern_size)) {
return -EFAULT;
}
return 0;
}
/**
* panthor_get_uobj_array() - Copy a user object array into a kernel accessible object array.
* @in: The object array to copy.
* @min_stride: Minimum array stride.
* @obj_size: Kernel object size.
*
* Helper automating user -> kernel object copies.
*
* Don't use this function directly, use PANTHOR_UOBJ_GET_ARRAY() instead.
*
* Return: newly allocated object array or an ERR_PTR on error.
*/
static void *
panthor_get_uobj_array(const struct drm_panthor_obj_array *in, u32 min_stride,
u32 obj_size)
{
int ret = 0;
void *out_alloc;
if (!in->count)
return NULL;
/* User stride must be at least the minimum object size, otherwise it might
* lack useful information.
*/
if (in->stride < min_stride)
return ERR_PTR(-EINVAL);
out_alloc = kvmalloc_array(in->count, obj_size, GFP_KERNEL);
if (!out_alloc)
return ERR_PTR(-ENOMEM);
if (obj_size == in->stride) {
/* Fast path when user/kernel have the same uAPI header version. */
if (copy_from_user(out_alloc, u64_to_user_ptr(in->array),
(unsigned long)obj_size * in->count))
ret = -EFAULT;
} else {
void __user *in_ptr = u64_to_user_ptr(in->array);
void *out_ptr = out_alloc;
/* If the sizes differ, we need to copy elements one by one. */
for (u32 i = 0; i < in->count; i++) {
ret = copy_struct_from_user(out_ptr, obj_size, in_ptr, in->stride);
if (ret)
break;
out_ptr += obj_size;
in_ptr += in->stride;
}
}
if (ret) {
kvfree(out_alloc);
return ERR_PTR(ret);
}
return out_alloc;
}
/**
* PANTHOR_UOBJ_MIN_SIZE_INTERNAL() - Get the minimum user object size
* @_typename: Object type.
* @_last_mandatory_field: Last mandatory field.
*
* Get the minimum user object size based on the last mandatory field name,
* A.K.A, the name of the last field of the structure at the time this
* structure was added to the uAPI.
*
* Don't use directly, use PANTHOR_UOBJ_DECL() instead.
*/
#define PANTHOR_UOBJ_MIN_SIZE_INTERNAL(_typename, _last_mandatory_field) \
(offsetof(_typename, _last_mandatory_field) + \
sizeof(((_typename *)NULL)->_last_mandatory_field))
/**
* PANTHOR_UOBJ_DECL() - Declare a new uAPI object whose subject to
* evolutions.
* @_typename: Object type.
* @_last_mandatory_field: Last mandatory field.
*
* Should be used to extend the PANTHOR_UOBJ_MIN_SIZE() list.
*/
#define PANTHOR_UOBJ_DECL(_typename, _last_mandatory_field) \
_typename : PANTHOR_UOBJ_MIN_SIZE_INTERNAL(_typename, _last_mandatory_field)
/**
* PANTHOR_UOBJ_MIN_SIZE() - Get the minimum size of a given uAPI object
* @_obj_name: Object to get the minimum size of.
*
* Don't use this macro directly, it's automatically called by
* PANTHOR_UOBJ_{SET,GET_ARRAY}().
*/
#define PANTHOR_UOBJ_MIN_SIZE(_obj_name) \
_Generic(_obj_name, \
PANTHOR_UOBJ_DECL(struct drm_panthor_gpu_info, tiler_present), \
PANTHOR_UOBJ_DECL(struct drm_panthor_csif_info, pad), \
PANTHOR_UOBJ_DECL(struct drm_panthor_sync_op, timeline_value), \
PANTHOR_UOBJ_DECL(struct drm_panthor_queue_submit, syncs), \
PANTHOR_UOBJ_DECL(struct drm_panthor_queue_create, ringbuf_size), \
PANTHOR_UOBJ_DECL(struct drm_panthor_vm_bind_op, syncs))
/**
* PANTHOR_UOBJ_SET() - Copy a kernel object to a user object.
* @_dest_usr_ptr: User pointer to copy to.
* @_usr_size: Size of the user object.
* @_src_obj: Kernel object to copy (not a pointer).
*
* Return: 0 on success, a negative error code otherwise.
*/
#define PANTHOR_UOBJ_SET(_dest_usr_ptr, _usr_size, _src_obj) \
panthor_set_uobj(_dest_usr_ptr, _usr_size, \
PANTHOR_UOBJ_MIN_SIZE(_src_obj), \
sizeof(_src_obj), &(_src_obj))
/**
* PANTHOR_UOBJ_GET_ARRAY() - Copy a user object array to a kernel accessible
* object array.
* @_dest_array: Local variable that will hold the newly allocated kernel
* object array.
* @_uobj_array: The drm_panthor_obj_array object describing the user object
* array.
*
* Return: 0 on success, a negative error code otherwise.
*/
#define PANTHOR_UOBJ_GET_ARRAY(_dest_array, _uobj_array) \
({ \
typeof(_dest_array) _tmp; \
_tmp = panthor_get_uobj_array(_uobj_array, \
PANTHOR_UOBJ_MIN_SIZE((_dest_array)[0]), \
sizeof((_dest_array)[0])); \
if (!IS_ERR(_tmp)) \
_dest_array = _tmp; \
PTR_ERR_OR_ZERO(_tmp); \
})
/**
* struct panthor_sync_signal - Represent a synchronization object point to attach
* our job fence to.
*
* This structure is here to keep track of fences that are currently bound to
* a specific syncobj point.
*
* At the beginning of a job submission, the fence
* is retrieved from the syncobj itself, and can be NULL if no fence was attached
* to this point.
*
* At the end, it points to the fence of the last job that had a
* %DRM_PANTHOR_SYNC_OP_SIGNAL on this syncobj.
*
* With jobs being submitted in batches, the fence might change several times during
* the process, allowing one job to wait on a job that's part of the same submission
* but appears earlier in the drm_panthor_group_submit::queue_submits array.
*/
struct panthor_sync_signal {
/** @node: list_head to track signal ops within a submit operation */
struct list_head node;
/** @handle: The syncobj handle. */
u32 handle;
/**
* @point: The syncobj point.
*
* Zero for regular syncobjs, and non-zero for timeline syncobjs.
*/
u64 point;
/**
* @syncobj: The sync object pointed by @handle.
*/
struct drm_syncobj *syncobj;
/**
* @chain: Chain object used to link the new fence to an existing
* timeline syncobj.
*
* NULL for regular syncobj, non-NULL for timeline syncobjs.
*/
struct dma_fence_chain *chain;
/**
* @fence: The fence to assign to the syncobj or syncobj-point.
*/
struct dma_fence *fence;
};
/**
* struct panthor_job_ctx - Job context
*/
struct panthor_job_ctx {
/** @job: The job that is about to be submitted to drm_sched. */
struct drm_sched_j
|