diff options
author | Francois Dugast <francois.dugast@intel.com> | 2023-07-31 17:30:02 +0200 |
---|---|---|
committer | Rodrigo Vivi <rodrigo.vivi@intel.com> | 2023-12-21 11:39:20 -0500 |
commit | 9b9529ce379a08e68d65231497dd6bad94281902 (patch) | |
tree | 68f61ba16bef69a0d607545e66dcc155e0418e15 | |
parent | c22a4ed0c325cd29d7baf07d4cf2c127550b8859 (diff) | |
download | linux-9b9529ce379a08e68d65231497dd6bad94281902.tar.gz linux-9b9529ce379a08e68d65231497dd6bad94281902.tar.bz2 linux-9b9529ce379a08e68d65231497dd6bad94281902.zip |
drm/xe: Rename engine to exec_queue
Engine was inappropriately used to refer to execution queues and it
also created some confusion with hardware engines. Where it applies
the exec_queue variable name is changed to q and comments are also
updated.
Link: https://gitlab.freedesktop.org/drm/xe/kernel/-/issues/162
Signed-off-by: Francois Dugast <francois.dugast@intel.com>
Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
46 files changed, 1679 insertions, 1680 deletions
diff --git a/drivers/gpu/drm/xe/tests/xe_migrate.c b/drivers/gpu/drm/xe/tests/xe_migrate.c index 9e9b228fe315..5c8d5e78d9bc 100644 --- a/drivers/gpu/drm/xe/tests/xe_migrate.c +++ b/drivers/gpu/drm/xe/tests/xe_migrate.c @@ -38,7 +38,7 @@ static int run_sanity_job(struct xe_migrate *m, struct xe_device *xe, struct kunit *test) { u64 batch_base = xe_migrate_batch_base(m, xe->info.supports_usm); - struct xe_sched_job *job = xe_bb_create_migration_job(m->eng, bb, + struct xe_sched_job *job = xe_bb_create_migration_job(m->q, bb, batch_base, second_idx); struct dma_fence *fence; @@ -215,7 +215,7 @@ static void test_pt_update(struct xe_migrate *m, struct xe_bo *pt, xe_map_memset(xe, &pt->vmap, 0, (u8)expected, pt->size); then = ktime_get(); - fence = xe_migrate_update_pgtables(m, NULL, NULL, m->eng, &update, 1, + fence = xe_migrate_update_pgtables(m, NULL, NULL, m->q, &update, 1, NULL, 0, &pt_update); now = ktime_get(); if (sanity_fence_failed(xe, fence, "Migration pagetable update", test)) @@ -257,7 +257,7 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test) return; } - big = xe_bo_create_pin_map(xe, tile, m->eng->vm, SZ_4M, + big = xe_bo_create_pin_map(xe, tile, m->q->vm, SZ_4M, ttm_bo_type_kernel, XE_BO_CREATE_VRAM_IF_DGFX(tile) | XE_BO_CREATE_PINNED_BIT); @@ -266,7 +266,7 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test) goto vunmap; } - pt = xe_bo_create_pin_map(xe, tile, m->eng->vm, XE_PAGE_SIZE, + pt = xe_bo_create_pin_map(xe, tile, m->q->vm, XE_PAGE_SIZE, ttm_bo_type_kernel, XE_BO_CREATE_VRAM_IF_DGFX(tile) | XE_BO_CREATE_PINNED_BIT); @@ -276,7 +276,7 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test) goto free_big; } - tiny = xe_bo_create_pin_map(xe, tile, m->eng->vm, + tiny = xe_bo_create_pin_map(xe, tile, m->q->vm, 2 * SZ_4K, ttm_bo_type_kernel, XE_BO_CREATE_VRAM_IF_DGFX(tile) | @@ -295,14 +295,14 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test) } kunit_info(test, "Starting tests, top level PT addr: %lx, special pagetable base addr: %lx\n", - (unsigned long)xe_bo_main_addr(m->eng->vm->pt_root[id]->bo, XE_PAGE_SIZE), + (unsigned long)xe_bo_main_addr(m->q->vm->pt_root[id]->bo, XE_PAGE_SIZE), (unsigned long)xe_bo_main_addr(m->pt_bo, XE_PAGE_SIZE)); /* First part of the test, are we updating our pagetable bo with a new entry? */ xe_map_wr(xe, &bo->vmap, XE_PAGE_SIZE * (NUM_KERNEL_PDE - 1), u64, 0xdeaddeadbeefbeef); expected = xe_pte_encode(pt, 0, XE_CACHE_WB, 0); - if (m->eng->vm->flags & XE_VM_FLAG_64K) + if (m->q->vm->flags & XE_VM_FLAG_64K) expected |= XE_PTE_PS64; if (xe_bo_is_vram(pt)) xe_res_first(pt->ttm.resource, 0, pt->size, &src_it); @@ -399,11 +399,11 @@ static int migrate_test_run_device(struct xe_device *xe) struct ww_acquire_ctx ww; kunit_info(test, "Testing tile id %d.\n", id); - xe_vm_lock(m->eng->vm, &ww, 0, true); + xe_vm_lock(m->q->vm, &ww, 0, true); xe_device_mem_access_get(xe); xe_migrate_sanity_test(m, test); xe_device_mem_access_put(xe); - xe_vm_unlock(m->eng->vm, &ww); + xe_vm_unlock(m->q->vm, &ww); } return 0; diff --git a/drivers/gpu/drm/xe/xe_bb.c b/drivers/gpu/drm/xe/xe_bb.c index b15a7cb7db4c..38f4ce83a207 100644 --- a/drivers/gpu/drm/xe/xe_bb.c +++ b/drivers/gpu/drm/xe/xe_bb.c @@ -7,7 +7,7 @@ #include "regs/xe_gpu_commands.h" #include "xe_device.h" -#include "xe_engine_types.h" +#include "xe_exec_queue_types.h" #include "xe_gt.h" #include "xe_hw_fence.h" #include "xe_sa.h" @@ -60,30 +60,30 @@ err: } static struct xe_sched_job * -__xe_bb_create_job(struct xe_engine *kernel_eng, struct xe_bb *bb, u64 *addr) +__xe_bb_create_job(struct xe_exec_queue *q, struct xe_bb *bb, u64 *addr) { u32 size = drm_suballoc_size(bb->bo); bb->cs[bb->len++] = MI_BATCH_BUFFER_END; - WARN_ON(bb->len * 4 + bb_prefetch(kernel_eng->gt) > size); + WARN_ON(bb->len * 4 + bb_prefetch(q->gt) > size); xe_sa_bo_flush_write(bb->bo); - return xe_sched_job_create(kernel_eng, addr); + return xe_sched_job_create(q, addr); } -struct xe_sched_job *xe_bb_create_wa_job(struct xe_engine *wa_eng, +struct xe_sched_job *xe_bb_create_wa_job(struct xe_exec_queue *q, struct xe_bb *bb, u64 batch_base_ofs) { u64 addr = batch_base_ofs + drm_suballoc_soffset(bb->bo); - XE_WARN_ON(!(wa_eng->vm->flags & XE_VM_FLAG_MIGRATION)); + XE_WARN_ON(!(q->vm->flags & XE_VM_FLAG_MIGRATION)); - return __xe_bb_create_job(wa_eng, bb, &addr); + return __xe_bb_create_job(q, bb, &addr); } -struct xe_sched_job *xe_bb_create_migration_job(struct xe_engine *kernel_eng, +struct xe_sched_job *xe_bb_create_migration_job(struct xe_exec_queue *q, struct xe_bb *bb, u64 batch_base_ofs, u32 second_idx) @@ -95,18 +95,18 @@ struct xe_sched_job *xe_bb_create_migration_job(struct xe_engine *kernel_eng, }; XE_WARN_ON(second_idx > bb->len); - XE_WARN_ON(!(kernel_eng->vm->flags & XE_VM_FLAG_MIGRATION)); + XE_WARN_ON(!(q->vm->flags & XE_VM_FLAG_MIGRATION)); - return __xe_bb_create_job(kernel_eng, bb, addr); + return __xe_bb_create_job(q, bb, addr); } -struct xe_sched_job *xe_bb_create_job(struct xe_engine *kernel_eng, +struct xe_sched_job *xe_bb_create_job(struct xe_exec_queue *q, struct xe_bb *bb) { u64 addr = xe_sa_bo_gpu_addr(bb->bo); - XE_WARN_ON(kernel_eng->vm && kernel_eng->vm->flags & XE_VM_FLAG_MIGRATION); - return __xe_bb_create_job(kernel_eng, bb, &addr); + XE_WARN_ON(q->vm && q->vm->flags & XE_VM_FLAG_MIGRATION); + return __xe_bb_create_job(q, bb, &addr); } void xe_bb_free(struct xe_bb *bb, struct dma_fence *fence) diff --git a/drivers/gpu/drm/xe/xe_bb.h b/drivers/gpu/drm/xe/xe_bb.h index 0cc9260c9634..c5ae0770bab5 100644 --- a/drivers/gpu/drm/xe/xe_bb.h +++ b/drivers/gpu/drm/xe/xe_bb.h @@ -11,16 +11,16 @@ struct dma_fence; struct xe_gt; -struct xe_engine; +struct xe_exec_queue; struct xe_sched_job; struct xe_bb *xe_bb_new(struct xe_gt *gt, u32 size, bool usm); -struct xe_sched_job *xe_bb_create_job(struct xe_engine *kernel_eng, +struct xe_sched_job *xe_bb_create_job(struct xe_exec_queue *q, struct xe_bb *bb); -struct xe_sched_job *xe_bb_create_migration_job(struct xe_engine *kernel_eng, +struct xe_sched_job *xe_bb_create_migration_job(struct xe_exec_queue *q, struct xe_bb *bb, u64 batch_ofs, u32 second_idx); -struct xe_sched_job *xe_bb_create_wa_job(struct xe_engine *wa_eng, +struct xe_sched_job *xe_bb_create_wa_job(struct xe_exec_queue *q, struct xe_bb *bb, u64 batch_ofs); void xe_bb_free(struct xe_bb *bb, struct dma_fence *fence); diff --git a/drivers/gpu/drm/xe/xe_devcoredump.c b/drivers/gpu/drm/xe/xe_devcoredump.c index 61ff97ea7659..68abc0b195be 100644 --- a/drivers/gpu/drm/xe/xe_devcoredump.c +++ b/drivers/gpu/drm/xe/xe_devcoredump.c @@ -53,9 +53,9 @@ static struct xe_device *coredump_to_xe(const struct xe_devcoredump *coredump) return container_of(coredump, struct xe_device, devcoredump); } -static struct xe_guc *engine_to_guc(struct xe_engine *e) +static struct xe_guc *exec_queue_to_guc(struct xe_exec_queue *q) { - return &e->gt->uc.guc; + return &q->gt->uc.guc; } static ssize_t xe_devcoredump_read(char *buffer, loff_t offset, @@ -91,7 +91,7 @@ static ssize_t xe_devcoredump_read(char *buffer, loff_t offset, drm_printf(&p, "\n**** GuC CT ****\n"); xe_guc_ct_snapshot_print(coredump->snapshot.ct, &p); - xe_guc_engine_snapshot_print(coredump->snapshot.ge, &p); + xe_guc_exec_queue_snapshot_print(coredump->snapshot.ge, &p); drm_printf(&p, "\n**** HW Engines ****\n"); for (i = 0; i < XE_NUM_HW_ENGINES; i++) @@ -112,7 +112,7 @@ static void xe_devcoredump_free(void *data) return; xe_guc_ct_snapshot_free(coredump->snapshot.ct); - xe_guc_engine_snapshot_free(coredump->snapshot.ge); + xe_guc_exec_queue_snapshot_free(coredump->snapshot.ge); for (i = 0; i < XE_NUM_HW_ENGINES; i++) if (coredump->snapshot.hwe[i]) xe_hw_engine_snapshot_free(coredump->snapshot.hwe[i]); @@ -123,14 +123,14 @@ static void xe_devcoredump_free(void *data) } static void devcoredump_snapshot(struct xe_devcoredump *coredump, - struct xe_engine *e) + struct xe_exec_queue *q) { struct xe_devcoredump_snapshot *ss = &coredump->snapshot; - struct xe_guc *guc = engine_to_guc(e); + struct xe_guc *guc = exec_queue_to_guc(q); struct xe_hw_engine *hwe; enum xe_hw_engine_id id; - u32 adj_logical_mask = e->logical_mask; - u32 width_mask = (0x1 << e->width) - 1; + u32 adj_logical_mask = q->logical_mask; + u32 width_mask = (0x1 << q->width) - 1; int i; bool cookie; @@ -138,22 +138,22 @@ static void devcoredump_snapshot(struct xe_devcoredump *coredump, ss->boot_time = ktime_get_boottime(); cookie = dma_fence_begin_signalling(); - for (i = 0; e->width > 1 && i < XE_HW_ENGINE_MAX_INSTANCE;) { + for (i = 0; q->width > 1 && i < XE_HW_ENGINE_MAX_INSTANCE;) { if (adj_logical_mask & BIT(i)) { adj_logical_mask |= width_mask << i; - i += e->width; + i += q->width; } else { ++i; } } - xe_force_wake_get(gt_to_fw(e->gt), XE_FORCEWAKE_ALL); + xe_force_wake_get(gt_to_fw(q->gt), XE_FORCEWAKE_ALL); coredump->snapshot.ct = xe_guc_ct_snapshot_capture(&guc->ct, true); - coredump->snapshot.ge = xe_guc_engine_snapshot_capture(e); + coredump->snapshot.ge = xe_guc_exec_queue_snapshot_capture(q); - for_each_hw_engine(hwe, e->gt, id) { - if (hwe->class != e->hwe->class || + for_each_hw_engine(hwe, q->gt, id) { + if (hwe->class != q->hwe->class || !(BIT(hwe->logical_instance) & adj_logical_mask)) { coredump->snapshot.hwe[id] = NULL; continue; @@ -161,21 +161,21 @@ static void devcoredump_snapshot(struct xe_devcoredump *coredump, coredump->snapshot.hwe[id] = xe_hw_engine_snapshot_capture(hwe); } - xe_force_wake_put(gt_to_fw(e->gt), XE_FORCEWAKE_ALL); + xe_force_wake_put(gt_to_fw(q->gt), XE_FORCEWAKE_ALL); dma_fence_end_signalling(cookie); } /** * xe_devcoredump - Take the required snapshots and initialize coredump device. - * @e: The faulty xe_engine, where the issue was detected. + * @q: The faulty xe_exec_queue, where the issue was detected. * * This function should be called at the crash time within the serialized * gt_reset. It is skipped if we still have the core dump device available * with the information of the 'first' snapshot. */ -void xe_devcoredump(struct xe_engine *e) +void xe_devcoredump(struct xe_exec_queue *q) { - struct xe_device *xe = gt_to_xe(e->gt); + struct xe_device *xe = gt_to_xe(q->gt); struct xe_devcoredump *coredump = &xe->devcoredump; if (coredump->captured) { @@ -184,7 +184,7 @@ void xe_devcoredump(struct xe_engine *e) } coredump->captured = true; - devcoredump_snapshot(coredump, e); + devcoredump_snapshot(coredump, q); drm_info(&xe->drm, "Xe device coredump has been created\n"); drm_info(&xe->drm, "Check your /sys/class/drm/card%d/device/devcoredump/data\n", diff --git a/drivers/gpu/drm/xe/xe_devcoredump.h b/drivers/gpu/drm/xe/xe_devcoredump.h index 854882129227..6ac218a5c194 100644 --- a/drivers/gpu/drm/xe/xe_devcoredump.h +++ b/drivers/gpu/drm/xe/xe_devcoredump.h @@ -7,12 +7,12 @@ #define _XE_DEVCOREDUMP_H_ struct xe_device; -struct xe_engine; +struct xe_exec_queue; #ifdef CONFIG_DEV_COREDUMP -void xe_devcoredump(struct xe_engine *e); +void xe_devcoredump(struct xe_exec_queue *q); #else -static inline void xe_devcoredump(struct xe_engine *e) +static inline void xe_devcoredump(struct xe_exec_queue *q) { } #endif diff --git a/drivers/gpu/drm/xe/xe_devcoredump_types.h b/drivers/gpu/drm/xe/xe_devcoredump_types.h index c0d711eb6ab3..7fdad9c3d3dd 100644 --- a/drivers/gpu/drm/xe/xe_devcoredump_types.h +++ b/drivers/gpu/drm/xe/xe_devcoredump_types.h @@ -30,7 +30,7 @@ struct xe_devcoredump_snapshot { /** @ct: GuC CT snapshot */ struct xe_guc_ct_snapshot *ct; /** @ge: Guc Engine snapshot */ - struct xe_guc_submit_engine_snapshot *ge; + struct xe_guc_submit_exec_queue_snapshot *ge; /** @hwe: HW Engine snapshot array */ struct xe_hw_engine_snapshot *hwe[XE_NUM_HW_ENGINES]; }; diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c index a8ab86379ed6..df1953759c67 100644 --- a/drivers/gpu/drm/xe/xe_device.c +++ b/drivers/gpu/drm/xe/xe_device.c @@ -53,33 +53,33 @@ static int xe_file_open(struct drm_device *dev, struct drm_file *file) mutex_init(&xef->vm.lock); xa_init_flags(&xef->vm.xa, XA_FLAGS_ALLOC1); - mutex_init(&xef->engine.lock); - xa_init_flags(&xef->engine.xa, XA_FLAGS_ALLOC1); + mutex_init(&xef->exec_queue.lock); + xa_init_flags(&xef->exec_queue.xa, XA_FLAGS_ALLOC1); file->driver_priv = xef; return 0; } -static void device_kill_persistent_engines(struct xe_device *xe, - struct xe_file *xef); +static void device_kill_persistent_exec_queues(struct xe_device *xe, + struct xe_file *xef); static void xe_file_close(struct drm_device *dev, struct drm_file *file) { struct xe_device *xe = to_xe_device(dev); struct xe_file *xef = file->driver_priv; struct xe_vm *vm; - struct xe_engine *e; + struct xe_exec_queue *q; unsigned long idx; - mutex_lock(&xef->engine.lock); - xa_for_each(&xef->engine.xa, idx, e) { - xe_engine_kill(e); - xe_engine_put(e); + mutex_lock(&xef->exec_queue.lock); + xa_for_each(&xef->exec_queue.xa, idx, q) { + xe_exec_queue_kill(q); + xe_exec_queue_put(q); } - mutex_unlock(&xef->engine.lock); - xa_destroy(&xef->engine.xa); - mutex_destroy(&xef->engine.lock); - device_kill_persistent_engines(xe, xef); + mutex_unlock(&xef->exec_queue.lock); + xa_destroy(&xef->exec_queue.xa); + mutex_destroy(&xef->exec_queue.lock); + device_kill_persistent_exec_queues(xe, xef); mutex_lock(&xef->vm.lock); xa_for_each(&xef->vm.xa, idx, vm) @@ -99,15 +99,15 @@ static const struct drm_ioctl_desc xe_ioctls[] = { DRM_IOCTL_DEF_DRV(XE_VM_CREATE, xe_vm_create_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(XE_VM_DESTROY, xe_vm_destroy_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(XE_VM_BIND, xe_vm_bind_ioctl, DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(XE_ENGINE_CREATE, xe_engine_create_ioctl, + DRM_IOCTL_DEF_DRV(XE_EXEC_QUEUE_CREATE, xe_exec_queue_create_ioctl, DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(XE_ENGINE_GET_PROPERTY, xe_engine_get_property_ioctl, + DRM_IOCTL_DEF_DRV(XE_EXEC_QUEUE_GET_PROPERTY, xe_exec_queue_get_property_ioctl, DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(XE_ENGINE_DESTROY, xe_engine_destroy_ioctl, + DRM_IOCTL_DEF_DRV(XE_EXEC_QUEUE_DESTROY, xe_exec_queue_destroy_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(XE_EXEC, xe_exec_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(XE_MMIO, xe_mmio_ioctl, DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(XE_ENGINE_SET_PROPERTY, xe_engine_set_property_ioctl, + DRM_IOCTL_DEF_DRV(XE_EXEC_QUEUE_SET_PROPERTY, xe_exec_queue_set_property_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(XE_WAIT_USER_FENCE, xe_wait_user_fence_ioctl, DRM_RENDER_ALLOW), @@ -324,33 +324,33 @@ void xe_device_shutdown(struct xe_device *xe) { } -void xe_device_add_persistent_engines(struct xe_device *xe, struct xe_engine *e) +void xe_device_add_persistent_exec_queues(struct xe_device *xe, struct xe_exec_queue *q) { mutex_lock(&xe->persistent_engines.lock); - list_add_tail(&e->persistent.link, &xe->persistent_engines.list); + list_add_tail(&q->persistent.link, &xe->persistent_engines.list); mutex_unlock(&xe->persistent_engines.lock); } -void xe_device_remove_persistent_engines(struct xe_device *xe, - struct xe_engine *e) +void xe_device_remove_persistent_exec_queues(struct xe_device *xe, + struct xe_exec_queue *q) { mutex_lock(&xe->persistent_engines.lock); - if (!list_empty(&e->persistent.link)) - list_del(&e->persistent.link); + if (!list_empty(&q->persistent.link)) + list_del(&q->persistent.link); mutex_unlock(&xe->persistent_engines.lock); } -static void device_kill_persistent_engines(struct xe_device *xe, - struct xe_file *xef) +static void device_kill_persistent_exec_queues(struct xe_device *xe, + struct xe_file *xef) { - struct xe_engine *e, *next; + struct xe_exec_queue *q, *next; mutex_lock(&xe->persistent_engines.lock); - list_for_each_entry_safe(e, next, &xe->persistent_engines.list, + list_for_each_entry_safe(q, next, &xe->persistent_engines.list, persistent.link) - if (e->persistent.xef == xef) { - xe_engine_kill(e); - list_del_init(&e->persistent.link); + if (q->persistent.xef == xef) { + xe_exec_queue_kill(q); + list_del_init(&q->persistent.link); } mutex_unlock(&xe->persistent_engines.lock); } diff --git a/drivers/gpu/drm/xe/xe_device.h b/drivers/gpu/drm/xe/xe_device.h index 61a5cf1f7300..71582094834c 100644 --- a/drivers/gpu/drm/xe/xe_device.h +++ b/drivers/gpu/drm/xe/xe_device.h @@ -6,7 +6,7 @@ #ifndef _XE_DEVICE_H_ #define _XE_DEVICE_H_ -struct xe_engine; +struct xe_exec_queue; struct xe_file; #include <drm/drm_util.h> @@ -41,9 +41,9 @@ int xe_device_probe(struct xe_device *xe); void xe_device_remove(struct xe_device *xe); void xe_device_shutdown(struct xe_device *xe); -void xe_device_add_persistent_engines(struct xe_device *xe, struct xe_engine *e); -void xe_device_remove_persistent_engines(struct xe_device *xe, - struct xe_engine *e); +void xe_device_add_persistent_exec_queues(struct xe_device *xe, struct xe_exec_queue *q); +void xe_device_remove_persistent_exec_queues(struct xe_device *xe, + struct xe_exec_queue *q); void xe_device_wmb(struct xe_device *xe); diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h index c521ffaf3871..128e0a953692 100644 --- a/drivers/gpu/drm/xe/xe_device_types.h +++ b/drivers/gpu/drm/xe/xe_device_types.h @@ -377,13 +377,13 @@ struct xe_file { struct mutex lock; } vm; - /** @engine: Submission engine state for file */ + /** @exec_queue: Submission exec queue state for file */ struct { /** @xe: xarray to store engines */ struct xarray xa; /** @lock: protects file engine state */ struct mutex lock; - } engine; + } exec_queue; }; #endif diff --git a/drivers/gpu/drm/xe/xe_engine_types.h b/drivers/gpu/drm/xe/xe_engine_types.h deleted file mode 100644 index f1d531735f6d..000000000000 --- a/drivers/gpu/drm/xe/xe_engine_types.h +++ /dev/null @@ -1,209 +0,0 @@ -/* SPDX-License-Identifier: MIT */ -/* - * Copyright © 2022 Intel Corporation - */ - -#ifndef _XE_ENGINE_TYPES_H_ -#define _XE_ENGINE_TYPES_H_ - -#include <linux/kref.h> - -#include <drm/gpu_scheduler.h> - -#include "xe_gpu_scheduler_types.h" -#include "xe_hw_engine_types.h" -#include "xe_hw_fence_types.h" -#include "xe_lrc_types.h" - -struct xe_execlist_engine; -struct xe_gt; -struct xe_guc_engine; -struct xe_hw_engine; -struct xe_vm; - -enum xe_engine_priority { - XE_ENGINE_PRIORITY_UNSET = -2, /* For execlist usage only */ - XE_ENGINE_PRIORITY_LOW = 0, - XE_ENGINE_PRIORITY_NORMAL, - XE_ENGINE_PRIORITY_HIGH, - XE_ENGINE_PRIORITY_KERNEL, - - XE_ENGINE_PRIORITY_COUNT -}; - -/** - * struct xe_engine - Submission engine - * - * Contains all state necessary for submissions. Can either be a user object or - * a kernel object. - */ -struct xe_engine { - /** @gt: graphics tile this engine can submit to */ - struct xe_gt *gt; - /** - * @hwe: A hardware of the same class. May (physical engine) or may not - * (virtual engine) be where jobs actual engine up running. Should never - * really be used for submissions. - */ - struct xe_hw_engine *hwe; - /** @refcount: ref count of this engine */ - struct kref refcount; - /** @vm: VM (address space) for this engine */ - struct xe_vm *vm; - /** @class: class of this engine */ - enum xe_engine_class class; - /** @priority: priority of this exec queue */ - enum xe_engine_priority priority; - /** - * @logical_mask: logical mask of where job submitted to engine can run - */ - u32 logical_mask; - /** @name: name of this engine */ - char name[MAX_FENCE_NAME_LEN]; - /** @width: width (number BB submitted per exec) of this engine */ - u16 width; - /** @fence_irq: fence IRQ used to signal job completion */ - struct xe_hw_fence_irq *fence_irq; - -#define ENGINE_FLAG_BANNED BIT(0) -#define ENGINE_FLAG_KERNEL BIT(1) -#define ENGINE_FLAG_PERSISTENT BIT(2) -#define ENGINE_FLAG_COMPUTE_MODE BIT(3) -/* Caller needs to hold rpm ref when creating engine with ENGINE_FLAG_VM */ -#define ENGINE_FLAG_VM BIT(4) -#define ENGINE_FLAG_BIND_ENGINE_CHILD BIT(5) -#define ENGINE_FLAG_WA BIT(6) - - /** - * @flags: flags for this engine, should statically setup aside from ban - * bit - */ - unsigned long flags; - - union { - /** @multi_gt_list: list head for VM bind engines if multi-GT */ - struct list_head multi_gt_list; - /** @multi_gt_link: link for VM bind engines if multi-GT */ - struct list_head multi_gt_link; - }; - - union { - /** @execlist: execlist backend specific state for engine */ - struct xe_execlist_engine *execlist; - /** @guc: GuC backend specific state for engine */ - struct xe_guc_engine *guc; - }; - - /** - * @persistent: persistent engine state - */ - struct { - /** @xef: file which this engine belongs to */ - struct xe_file *xef; - /** @link: link in list of persistent engines */ - struct list_head link; - } persistent; - - union { - /** - * @parallel: parallel submission state - */ - struct { - /** @composite_fence_ctx: context composite fence */ - u64 composite_fence_ctx; - /** @composite_fence_seqno: seqno for composite fence */ - u32 composite_fence_seqno; - } parallel; - /** - * @bind: bind submission state - */ - struct { - /** @fence_ctx: context bind fence */ - u64 fence_ctx; - /** @fence_seqno: seqno for bind fence */ - u32 fence_seqno; - } bind; - }; - - /** @sched_props: scheduling properties */ - struct { - /** @timeslice_us: timeslice period in micro-seconds */ - u32 timeslice_us; - /** @preempt_timeout_us: preemption timeout in micro-seconds */ - u32 preempt_timeout_us; - } sched_props; - |