/*
* Copyright 2008 Jerome Glisse.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* Jerome Glisse <glisse@freedesktop.org>
*/
#include <linux/list_sort.h>
#include <drm/drmP.h>
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
#include "amdgpu_trace.h"
int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
u32 ip_instance, u32 ring,
struct amdgpu_ring **out_ring)
{
/* Right now all IPs have only one instance - multiple rings. */
if (ip_instance != 0) {
DRM_ERROR("invalid ip instance: %d\n", ip_instance);
return -EINVAL;
}
switch (ip_type) {
default:
DRM_ERROR("unknown ip type: %d\n", ip_type);
return -EINVAL;
case AMDGPU_HW_IP_GFX:
if (ring < adev->gfx.num_gfx_rings) {
*out_ring = &adev->gfx.gfx_ring[ring];
} else {
DRM_ERROR("only %d gfx rings are supported now\n",
adev->gfx.num_gfx_rings);
return -EINVAL;
}
break;
case AMDGPU_HW_IP_COMPUTE:
if (ring < adev->gfx.num_compute_rings) {
*out_ring = &adev->gfx.compute_ring[ring];
} else {
DRM_ERROR("only %d compute rings are supported now\n",
adev->gfx.num_compute_rings);
return -EINVAL;
}
break;
case AMDGPU_HW_IP_DMA:
if (ring < adev->sdma.num_instances) {
*out_ring = &adev->sdma.instance[ring].ring;
} else {
DRM_ERROR("only %d SDMA rings are supported\n",
adev->sdma.num_instances);
return -EINVAL;
}
break;
case AMDGPU_HW_IP_UVD:
*out_ring = &adev->uvd.ring;
break;
case AMDGPU_HW_IP_VCE:
if (ring < 2){
*out_ring = &adev->vce.ring[ring];
} else {
DRM_ERROR("only two VCE rings are supported\n");
return -EINVAL;
}
break;
}
return 0;
}
static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
struct drm_amdgpu_cs_chunk_fence *fence_data)
{
struct drm_gem_object *gobj;
uint32_t handle;
handle = fence_data->handle;
gobj = drm_gem_object_lookup(p->adev->ddev, p->filp,
fence_data->handle);
if (gobj == NULL)
return -EINVAL;
p->uf.bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
p->uf.offset = fence_data->offset;
if (amdgpu_ttm_tt_get_usermm(p->uf.bo->tbo.ttm)) {
drm_gem_object_unreference_unlocked(gobj);
return -EINVAL;
}
p->uf_entry.robj = amdgpu_bo_ref(p->uf.bo);
p->uf_entry.priority = 0;
p->uf_entry.tv.bo = &p->uf_entry.robj->tbo;
p->uf_entry.tv.shared = true;
drm_gem_object_unreference_unlocked(gobj);
return 0;
}
int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
{
union drm_amdgpu_cs *cs = data;
uint64_t *chunk_array_user;
uint64_t *chunk_array;
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
unsigned size;
int i;
int ret;
if (cs->in.num_chunks == 0)
return 0;
chunk_array = kmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL);
if (!chunk_array)
return -ENOMEM;
p->ctx = amdgpu_ctx_get(fpriv, cs->in.ctx_id);
if (!p->ctx) {
ret = -EINVAL;
goto free_chunk;
}
/* get chunks */
chunk_array_user = (uint64_t __user *)(unsigned long)(cs->in.chunks);
if (copy_from_user(chunk_array, chunk_array_user,
sizeof(uint64_t)*cs->in.num_chunks)) {
ret = -EFAULT;
goto put_ctx;
}
p->nchunks = cs->in.num_chunks;
p->chunks = kmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk),
GFP_KERNEL);
if (!p->chunks) {
ret = -ENOMEM;
goto put_ctx;
}
for (i = 0; i < p->nchunks; i++) {
struct drm_amdgpu_cs_chunk __user **chunk_ptr = NULL;
struct drm_amdgpu_cs_chunk user_chunk;
uint32_t __user *cdata;
chunk_ptr = (void __user *)(unsigned long)chunk_array[i];
if (copy_from_user(&user_chunk, chunk_ptr,
sizeof(struct drm_amdgpu_cs_chunk))) {
ret = -EFAULT;
i--;
goto free_partial_kdata;
}
p->chunks[i].chunk_id = user_chunk.chunk_id;
p->chunks[i].length_dw = user_chunk.length_dw;
size = p->chunks[