From 97041ed37718dc9ba30aa23ca74093dc93ac89fb Mon Sep 17 00:00:00 2001 From: Bas Nieuwenhuizen Date: Thu, 13 Apr 2023 16:22:51 +0200 Subject: drm/amdgpu: Increase GFX6 graphics ring size. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit To ensure it supports 192 IBs per submission, so we can keep a simplified IB limit in the follow up patch without having to look at IP or GPU version. Reviewed-by: Christian König Signed-off-by: Bas Nieuwenhuizen Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c index c41219e23151..d9ce4d1c50e4 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c @@ -3073,7 +3073,7 @@ static int gfx_v6_0_sw_init(void *handle) ring = &adev->gfx.gfx_ring[i]; ring->ring_obj = NULL; sprintf(ring->name, "gfx"); - r = amdgpu_ring_init(adev, ring, 1024, + r = amdgpu_ring_init(adev, ring, 2048, &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP, AMDGPU_RING_PRIO_DEFAULT, NULL); -- cgit v1.2.3 From c30ddcece3a0a86853862a7d92678a79525ca1fb Mon Sep 17 00:00:00 2001 From: Bas Nieuwenhuizen Date: Thu, 13 Apr 2023 16:22:52 +0200 Subject: drm/amdgpu: Add a max ibs per submission limit. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit And ensure each ring supports that many submissions. This makes sure that we don't get surprises after the submission has been scheduled where the ring allocation actually gets rejected. My calculations on the existing limits: COMPUTE v10: 128 COMPUTE v11: 128 COMPUTE v6: 157 COMPUTE v7: 133 COMPUTE v8: 130 COMPUTE v9: 125 GFX v10: 208 GFX v11: 213 GFX v6: 154 (doubling this in the previous patch) GFX v7: 226 GFX v8: 213 GFX v9: 208 GFX v9 (SW): 208 SDMA CIK: 87 SDMA SI: 97 SDMA v2.4: 74 SDMA v3.0: 74 SDMA v4.0: 72 SDMA v5.0: 51 SDMA v6.0: 52 UVD ENC v6.0: 98 UVD ENC v7.0: 92 UVD v3.1: 124 UVD v4.2: 124 UVD v5.0: 83 UVD v6.0 (VM): 55 UVD v7.0: 51 VCE v2.0: 126 VCE v3.0 (VM): 98 VCE v4.0: 93 VCN DEC v1.0: 49 VCN DEC v2.0: 51 VCN DEC v3.0: 51 VCN ENC v1.0: 58 VCN ENC v2.0: 93 VCN ENC v3.0: 93 VCN ENC v4.0: 93 VCN JPEG v1.0: 17 VCN JPEG v2.0: 16 VCN JPEG v2.5: 17 VCN JPEG v3.0: 17 VCN JPEG v4.0: 17 Link: https://gitlab.freedesktop.org/drm/amd/-/issues/2498 Reviewed-by: Christian König Signed-off-by: Bas Nieuwenhuizen Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 3 +++ drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c | 29 +++++++++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h | 1 + 3 files changed, 33 insertions(+) (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 08eced097bd8..d8b3c9198d33 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -112,6 +112,9 @@ static int amdgpu_cs_p1_ib(struct amdgpu_cs_parser *p, if (r < 0) return r; + if (num_ibs[r] >= amdgpu_ring_max_ibs(chunk_ib->ip_type)) + return -EINVAL; + ++(num_ibs[r]); p->gang_leader_idx = r; return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index dc474b809604..f676c236b657 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -49,6 +49,26 @@ * them until the pointers are equal again. */ +/** + * amdgpu_ring_max_ibs - Return max IBs that fit in a single submission. + * + * @type: ring type for which to return the limit. + */ +unsigned int amdgpu_ring_max_ibs(enum amdgpu_ring_type type) +{ + switch (type) { + case AMDGPU_RING_TYPE_GFX: + /* Need to keep at least 192 on GFX7+ for old radv. */ + return 192; + case AMDGPU_RING_TYPE_COMPUTE: + return 125; + case AMDGPU_RING_TYPE_VCN_JPEG: + return 16; + default: + return 49; + } +} + /** * amdgpu_ring_alloc - allocate space on the ring buffer * @@ -182,6 +202,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, int sched_hw_submission = amdgpu_sched_hw_submission; u32 *num_sched; u32 hw_ip; + unsigned int max_ibs_dw; /* Set the hw submission limit higher for KIQ because * it's used for a number of gfx/compute tasks by both @@ -290,6 +311,14 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, return r; } + max_ibs_dw = ring->funcs->emit_frame_size + + amdgpu_ring_max_ibs(ring->funcs->type) * ring->funcs->emit_ib_size; + max_ibs_dw = (max_ibs_dw + ring->funcs->align_mask) & ~ring->funcs->align_mask; + + if (WARN_ON(max_ibs_dw > max_dw)) { + max_dw = max_ibs_dw; + } + ring->ring_size = roundup_pow_of_two(max_dw * 4 * sched_hw_submission); ring->buf_mask = (ring->ring_size / 4) - 1; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h index d8749444b689..8eca6532ed19 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h @@ -319,6 +319,7 @@ struct amdgpu_ring { #define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o)) #define amdgpu_ring_preempt_ib(r) (r)->funcs->preempt_ib(r) +unsigned int amdgpu_ring_max_ibs(enum amdgpu_ring_type type); int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw); void amdgpu_ring_ib_begin(struct amdgpu_ring *ring); void amdgpu_ring_ib_end(struct amdgpu_ring *ring); -- cgit v1.2.3 From 4f18b9a6711adbc7c76993c734a94ee3f5c61791 Mon Sep 17 00:00:00 2001 From: Bas Nieuwenhuizen Date: Thu, 13 Apr 2023 16:22:53 +0200 Subject: drm/amdgpu: Add support for querying the max ibs in a submission. (v3) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This info would be used by radv to figure out when we need to split a submission into multiple submissions. radv currently has a limit of 192 which seems to work for most gfx submissions, but is way too high for e.g. compute or sdma. Userspace is available at https://gitlab.freedesktop.org/bnieuwenhuizen/mesa/-/commits/ib-rejection-v3 v3: Completely rewrote based on suggestion of making it a separate query. Link: https://gitlab.freedesktop.org/drm/amd/-/issues/2498 Reviewed-by: Christian König Signed-off-by: Bas Nieuwenhuizen Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 0efb38539d70..1a2e342af1c0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -1140,6 +1140,15 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) kfree(caps); return r; } + case AMDGPU_INFO_MAX_IBS: { + uint32_t max_ibs[AMDGPU_HW_IP_NUM]; + + for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) + max_ibs[i] = amdgpu_ring_max_ibs(i); + + return copy_to_user(out, max_ibs, + min((size_t)size, sizeof(max_ibs))) ? -EFAULT : 0; + } default: DRM_DEBUG_KMS("Invalid request %d\n", info->query); return -EINVAL; -- cgit v1.2.3 From 1fa8d710573f02ae9118bc5f53e7ede09d6920da Mon Sep 17 00:00:00 2001 From: Alan Liu Date: Fri, 14 Apr 2023 18:39:52 +0800 Subject: drm/amdgpu: Fix desktop freezed after gpu-reset MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [Why] After gpu-reset, sometimes the driver fails to enable vblank irq, causing flip_done timed out and the desktop freezed. During gpu-reset, we disable and enable vblank irq in dm_suspend() and dm_resume(). Later on in amdgpu_irq_gpu_reset_resume_helper(), we check irqs' refcount and decide to enable or disable the irqs again. However, we have 2 sets of API for controling vblank irq, one is dm_vblank_get/put() and another is amdgpu_irq_get/put(). Each API has its own refcount and flag to store the state of vblank irq, and they are not synchronized. In drm we use the first API to control vblank irq but in amdgpu_irq_gpu_reset_resume_helper() we use the second set of API. The failure happens when vblank irq was enabled by dm_vblank_get() before gpu-reset, we have vblank->enabled true. However, during gpu-reset, in amdgpu_irq_gpu_reset_resume_helper() vblank irq's state checked from amdgpu_irq_update() is DISABLED. So finally it disables vblank irq again. After gpu-reset, if there is a cursor plane commit, the driver will try to enable vblank irq by calling drm_vblank_enable(), but the vblank->enabled is still true, so it fails to turn on vblank irq and causes flip_done can't be completed in vblank irq handler and desktop become freezed. [How] Combining the 2 vblank control APIs by letting drm's API finally calls amdgpu_irq's API, so the irq's refcount and state of both APIs can be synchronized. Also add a check to prevent refcount from being less then 0 in amdgpu_irq_put(). v2: - Add warning in amdgpu_irq_enable() if the irq is already disabled. - Call dc_interrupt_set() in dm_set_vblank() to avoid refcount change if it is in gpu-reset. v3: - Improve commit message and code comments. Signed-off-by: Alan Liu Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c index d0a1cc88832c..fafebec5b7b6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c @@ -596,6 +596,9 @@ int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src, if (!src->enabled_types || !src->funcs->set) return -EINVAL; + if (WARN_ON(!amdgpu_irq_enabled(adev, src, type))) + return -EINVAL; + if (atomic_dec_and_test(&src->enabled_types[type])) return amdgpu_irq_update(adev, src, type); -- cgit v1.2.3 From 277bd3371f11400d5b02df54f057569be4b10cea Mon Sep 17 00:00:00 2001 From: Le Ma Date: Tue, 24 May 2022 10:51:43 +0800 Subject: drm/amdgpu: convert gfx.kiq to array type (v3) v1: more kiq instances are a available in SOC (Le) v2: squash commits to avoid breaking the build (Le) v3: make the conversion for gfx/mec v11_0 (Hawking) Signed-off-by: Le Ma Reviewed-by: Hawking Zhang Signed-off-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c | 6 ++-- .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c | 6 ++-- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c | 6 ++-- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c | 6 ++-- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 34 +++++++++++----------- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c | 2 +- drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 32 ++++++++++---------- drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c | 26 ++++++++--------- drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 16 +++++----- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 24 +++++++-------- drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c | 12 ++++---- drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c | 12 ++++---- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 12 ++++---- drivers/gpu/drm/amd/amdgpu/mes_v10_1.c | 22 +++++++------- drivers/gpu/drm/amd/amdgpu/mes_v11_0.c | 26 ++++++++--------- 16 files changed, 122 insertions(+), 122 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c index 9378fc79e9ea..f599e1e74fcc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c @@ -288,7 +288,7 @@ static int kgd_hiq_mqd_load(struct amdgpu_device *adev, void *mqd, uint32_t pipe_id, uint32_t queue_id, uint32_t doorbell_off) { - struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring; + struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring; struct v10_compute_mqd *m; uint32_t mec, pipe; int r; @@ -303,7 +303,7 @@ static int kgd_hiq_mqd_load(struct amdgpu_device *adev, void *mqd, pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n", mec, pipe, queue_id); - spin_lock(&adev->gfx.kiq.ring_lock); + spin_lock(&adev->gfx.kiq[0].ring_lock); r = amdgpu_ring_alloc(kiq_ring, 7); if (r) { pr_err("Failed to alloc KIQ (%d).\n", r); @@ -330,7 +330,7 @@ static int kgd_hiq_mqd_load(struct amdgpu_device *adev, void *mqd, amdgpu_ring_commit(kiq_ring); out_unlock: - spin_unlock(&adev->gfx.kiq.ring_lock); + spin_unlock(&adev->gfx.kiq[0].ring_lock); release_queue(adev); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c index ba21ec6b35e0..5c4152ae44da 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c @@ -275,7 +275,7 @@ static int hiq_mqd_load_v10_3(struct amdgpu_device *adev, void *mqd, uint32_t pipe_id, uint32_t queue_id, uint32_t doorbell_off) { - struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring; + struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring; struct v10_compute_mqd *m; uint32_t mec, pipe; int r; @@ -290,7 +290,7 @@ static int hiq_mqd_load_v10_3(struct amdgpu_device *adev, void *mqd, pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n", mec, pipe, queue_id); - spin_lock(&adev->gfx.kiq.ring_lock); + spin_lock(&adev->gfx.kiq[0].ring_lock); r = amdgpu_ring_alloc(kiq_ring, 7); if (r) { pr_err("Failed to alloc KIQ (%d).\n", r); @@ -317,7 +317,7 @@ static int hiq_mqd_load_v10_3(struct amdgpu_device *adev, void *mqd, amdgpu_ring_commit(kiq_ring); out_unlock: - spin_unlock(&adev->gfx.kiq.ring_lock); + spin_unlock(&adev->gfx.kiq[0].ring_lock); release_queue(adev); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c index 7e80caa05060..5cdb7289d35b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c @@ -260,7 +260,7 @@ static int hiq_mqd_load_v11(struct amdgpu_device *adev, void *mqd, uint32_t pipe_id, uint32_t queue_id, uint32_t doorbell_off) { - struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring; + struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring; struct v11_compute_mqd *m; uint32_t mec, pipe; int r; @@ -275,7 +275,7 @@ static int hiq_mqd_load_v11(struct amdgpu_device *adev, void *mqd, pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n", mec, pipe, queue_id); - spin_lock(&adev->gfx.kiq.ring_lock); + spin_lock(&adev->gfx.kiq[0].ring_lock); r = amdgpu_ring_alloc(kiq_ring, 7); if (r) { pr_err("Failed to alloc KIQ (%d).\n", r); @@ -302,7 +302,7 @@ static int hiq_mqd_load_v11(struct amdgpu_device *adev, void *mqd, amdgpu_ring_commit(kiq_ring); out_unlock: - spin_unlock(&adev->gfx.kiq.ring_lock); + spin_unlock(&adev->gfx.kiq[0].ring_lock); release_queue(adev); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c index e92b93557c13..bc944ae4fd5b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c @@ -300,7 +300,7 @@ int kgd_gfx_v9_hiq_mqd_load(struct amdgpu_device *adev, void *mqd, uint32_t pipe_id, uint32_t queue_id, uint32_t doorbell_off) { - struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring; + struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring; struct v9_mqd *m; uint32_t mec, pipe; int r; @@ -315,7 +315,7 @@ int kgd_gfx_v9_hiq_mqd_load(struct amdgpu_device *adev, void *mqd, pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n", mec, pipe, queue_id); - spin_lock(&adev->gfx.kiq.ring_lock); + spin_lock(&adev->gfx.kiq[0].ring_lock); r = amdgpu_ring_alloc(kiq_ring, 7); if (r) { pr_err("Failed to alloc KIQ (%d).\n", r); @@ -342,7 +342,7 @@ int kgd_gfx_v9_hiq_mqd_load(struct amdgpu_device *adev, void *mqd, amdgpu_ring_commit(kiq_ring); out_unlock: - spin_unlock(&adev->gfx.kiq.ring_lock); + spin_unlock(&adev->gfx.kiq[0].ring_lock); release_queue(adev); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 9d3a0542c996..9b6071df1fa7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -296,7 +296,7 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev, struct amdgpu_ring *ring, struct amdgpu_irq_src *irq) { - struct amdgpu_kiq *kiq = &adev->gfx.kiq; + struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; int r = 0; spin_lock_init(&kiq->ring_lock); @@ -329,7 +329,7 @@ void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring) void amdgpu_gfx_kiq_fini(struct amdgpu_device *adev) { - struct amdgpu_kiq *kiq = &adev->gfx.kiq; + struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; amdgpu_bo_free_kernel(&kiq->eop_obj, &kiq->eop_gpu_addr, NULL); } @@ -339,7 +339,7 @@ int amdgpu_gfx_kiq_init(struct amdgpu_device *adev, { int r; u32 *hpd; - struct amdgpu_kiq *kiq = &adev->gfx.kiq; + struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; r = amdgpu_bo_create_kernel(adev, hpd_size, PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, &kiq->eop_obj, @@ -368,7 +368,7 @@ int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev, int r, i; /* create MQD for KIQ */ - ring = &adev->gfx.kiq.ring; + ring = &adev->gfx.kiq[0].ring; if (!adev->enable_mes_kiq && !ring->mqd_obj) { /* originaly the KIQ MQD is put in GTT domain, but for SRIOV VRAM domain is a must * otherwise hypervisor trigger SAVE_VF fail after driver unloaded which mean MQD @@ -458,7 +458,7 @@ void amdgpu_gfx_mqd_sw_fini(struct amdgpu_device *adev) &ring->mqd_ptr); } - ring = &adev->gfx.kiq.ring; + ring = &adev->gfx.kiq[0].ring; kfree(adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS]); amdgpu_bo_free_kernel(&ring->mqd_obj, &ring->mqd_gpu_addr, @@ -467,17 +467,17 @@ void amdgpu_gfx_mqd_sw_fini(struct amdgpu_device *adev) int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev) { - struct amdgpu_kiq *kiq = &adev->gfx.kiq; + struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; struct amdgpu_ring *kiq_ring = &kiq->ring; int i, r = 0; if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues) return -EINVAL; - spin_lock(&adev->gfx.kiq.ring_lock); + spin_lock(&adev->gfx.kiq[0].ring_lock); if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size * adev->gfx.num_compute_rings)) { - spin_unlock(&adev->gfx.kiq.ring_lock); + spin_unlock(&adev->gfx.kiq[0].ring_lock); return -ENOMEM; } @@ -485,9 +485,9 @@ int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev) kiq->pmf->kiq_unmap_queues(kiq_ring, &adev->gfx.compute_ring[i], RESET_QUEUES, 0, 0); - if (adev->gfx.kiq.ring.sched.ready && !adev->job_hang) + if (adev->gfx.kiq[0].ring.sched.ready && !adev->job_hang) r = amdgpu_ring_test_helper(kiq_ring); - spin_unlock(&adev->gfx.kiq.ring_lock); + spin_unlock(&adev->gfx.kiq[0].ring_lock); return r; } @@ -507,8 +507,8 @@ int amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device *adev, int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev) { - struct amdgpu_kiq *kiq = &adev->gfx.kiq; - struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring; + struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; + struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring; uint64_t queue_mask = 0; int r, i; @@ -532,13 +532,13 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev) DRM_INFO("kiq ring mec %d pipe %d q %d\n", kiq_ring->me, kiq_ring->pipe, kiq_ring->queue); - spin_lock(&adev->gfx.kiq.ring_lock); + spin_lock(&adev->gfx.kiq[0].ring_lock); r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size * adev->gfx.num_compute_rings + kiq->pmf->set_resources_size); if (r) { DRM_ERROR("Failed to lock KIQ (%d).\n", r); - spin_unlock(&adev->gfx.kiq.ring_lock); + spin_unlock(&adev->gfx.kiq[0].ring_lock); return r; } @@ -550,7 +550,7 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev) kiq->pmf->kiq_map_queues(kiq_ring, &adev->gfx.compute_ring[i]); r = amdgpu_ring_test_helper(kiq_ring); - spin_unlock(&adev->gfx.kiq.ring_lock); + spin_unlock(&adev->gfx.kiq[0].ring_lock); if (r) DRM_ERROR("KCQ enable failed\n"); @@ -788,7 +788,7 @@ uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg) signed long r, cnt = 0; unsigned long flags; uint32_t seq, reg_val_offs = 0, value = 0; - struct amdgpu_kiq *kiq = &adev->gfx.kiq; + struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; struct amdgpu_ring *ring = &kiq->ring; if (amdgpu_device_skip_hw_access(adev)) @@ -856,7 +856,7 @@ void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v) signed long r, cnt = 0; unsigned long flags; uint32_t seq; - struct amdgpu_kiq *kiq = &adev->gfx.kiq; + struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; struct amdgpu_ring *ring = &kiq->ring; BUG_ON(!ring->funcs->emit_wreg); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index bfabea76d166..c742b4a36979 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -296,7 +296,7 @@ struct amdgpu_gfx { struct amdgpu_ce ce; struct amdgpu_me me; struct amdgpu_mec mec; - struct amdgpu_kiq kiq; + struct amdgpu_kiq kiq[AMDGPU_MAX_GC_INSTANCES]; struct amdgpu_imu imu; bool rs64_enable; /* firmware format */ const struct firmware *me_fw; /* ME firmware */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index f2e2cbaa7fde..9dd474262c29 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -74,7 +74,7 @@ void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev, uint32_t reg0, uint32_t reg1, uint32_t ref, uint32_t mask) { - struct amdgpu_kiq *kiq = &adev->gfx.kiq; + struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; struct amdgpu_ring *ring = &kiq->ring; signed long r, cnt = 0; unsigned long flags; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index f5b5ce1051a2..d4e7de8fd9da 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -3568,7 +3568,7 @@ static void gfx10_kiq_unmap_queues(struct amdgpu_ring *kiq_ring, struct amdgpu_device *adev = kiq_ring->adev; uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0; - if (adev->enable_mes && !adev->gfx.kiq.ring.sched.ready) { + if (adev->enable_mes && !adev->gfx.kiq[0].ring.sched.ready) { amdgpu_mes_unmap_legacy_queue(adev, ring, action, gpu_addr, seq); return; } @@ -3636,7 +3636,7 @@ static const struct kiq_pm4_funcs gfx_v10_0_kiq_pm4_funcs = { static void gfx_v10_0_set_kiq_pm4_funcs(struct amdgpu_device *adev) { - adev->gfx.kiq.pmf = &gfx_v10_0_kiq_pm4_funcs; + adev->gfx.kiq[0].pmf = &gfx_v10_0_kiq_pm4_funcs; } static void gfx_v10_0_init_spm_golden_registers(struct amdgpu_device *adev) @@ -4550,7 +4550,7 @@ static int gfx_v10_0_sw_init(void *handle) /* KIQ event */ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_10_1__SRCID__CP_IB2_INTERRUPT_PKT, - &adev->gfx.kiq.irq); + &adev->gfx.kiq[0].irq); if (r) return r; @@ -4635,7 +4635,7 @@ static int gfx_v10_0_sw_init(void *handle) return r; } - kiq = &adev->gfx.kiq; + kiq = &adev->gfx.kiq[0]; r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq); if (r) return r; @@ -4693,7 +4693,7 @@ static int gfx_v10_0_sw_fini(void *handle) amdgpu_gfx_mqd_sw_fini(adev); if (!adev->enable_mes_kiq) { - amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring); + amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[0].ring); amdgpu_gfx_kiq_fini(adev); } @@ -6214,7 +6214,7 @@ static void gfx_v10_0_cp_compute_enable(struct amdgpu_device *adev, bool enable) CP_MEC_CNTL__MEC_ME2_HALT_MASK)); break; } - adev->gfx.kiq.ring.sched.ready = false; + adev->gfx.kiq[0].ring.sched.ready = false; } udelay(50); } @@ -6524,8 +6524,8 @@ static int gfx_v10_0_gfx_init_queue(struct amdgpu_ring *ring) #ifndef BRING_UP_DEBUG static int gfx_v10_0_kiq_enable_kgq(struct amdgpu_device *adev) { - struct amdgpu_kiq *kiq = &adev->gfx.kiq; - struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring; + struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; + struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring; int r, i; if (!kiq->pmf || !kiq->pmf->kiq_map_queues) @@ -6885,7 +6885,7 @@ static int gfx_v10_0_kiq_resume(struct amdgpu_device *adev) struct amdgpu_ring *ring; int r; - ring = &adev->gfx.kiq.ring; + ring = &adev->gfx.kiq[0].ring; r = amdgpu_bo_reserve(ring->mqd_obj, false); if (unlikely(r != 0)) @@ -7243,7 +7243,7 @@ static int gfx_v10_0_hw_init(void *handle) #ifndef BRING_UP_DEBUG static int gfx_v10_0_kiq_disable_kgq(struct amdgpu_device *adev) { - struct amdgpu_kiq *kiq = &adev->gfx.kiq; + struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; struct amdgpu_ring *kiq_ring = &kiq->ring; int i; @@ -8640,7 +8640,7 @@ static int gfx_v10_0_ring_preempt_ib(struct amdgpu_ring *ring) { int i, r = 0; struct amdgpu_device *adev = ring->adev; - struct amdgpu_kiq *kiq = &adev->gfx.kiq; + struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; struct amdgpu_ring *kiq_ring = &kiq->ring; unsigned long flags; @@ -9148,7 +9148,7 @@ static int gfx_v10_0_kiq_set_interrupt_state(struct amdgpu_device *adev, enum amdgpu_interrupt_state state) { uint32_t tmp, target; - struct amdgpu_ring *ring = &(adev->gfx.kiq.ring); + struct amdgpu_ring *ring = &(adev->gfx.kiq[0].ring); if (ring->me == 1) target = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE0_INT_CNTL); @@ -9192,7 +9192,7 @@ static int gfx_v10_0_kiq_irq(struct amdgpu_device *adev, struct amdgpu_iv_entry *entry) { u8 me_id, pipe_id, queue_id; - struct amdgpu_ring *ring = &(adev->gfx.kiq.ring); + struct amdgpu_ring *ring = &(adev->gfx.kiq[0].ring); me_id = (entry->ring_id & 0x0c) >> 2; pipe_id = (entry->ring_id & 0x03) >> 0; @@ -9369,7 +9369,7 @@ static void gfx_v10_0_set_ring_funcs(struct amdgpu_device *adev) { int i; - adev->gfx.kiq.ring.funcs = &gfx_v10_0_ring_funcs_kiq; + adev->gfx.kiq[0].ring.funcs = &gfx_v10_0_ring_funcs_kiq; for (i = 0; i < adev->gfx.num_gfx_rings; i++) adev->gfx.gfx_ring[i].funcs = &gfx_v10_0_ring_funcs_gfx; @@ -9403,8 +9403,8 @@ static void gfx_v10_0_set_irq_funcs(struct amdgpu_device *adev) adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST; adev->gfx.eop_irq.funcs = &gfx_v10_0_eop_irq_funcs; - adev->gfx.kiq.irq.num_types = AMDGPU_CP_KIQ_IRQ_LAST; - adev->gfx.kiq.irq.funcs = &gfx_v10_0_kiq_irq_funcs; + adev->gfx.kiq[0].irq.num_types = AMDGPU_CP_KIQ_IRQ_LAST; + adev->gfx.kiq[0].irq.funcs = &gfx_v10_0_kiq_irq_funcs; adev->gfx.priv_reg_irq.num_types = 1; adev->gfx.priv_reg_irq.funcs = &gfx_v10_0_priv_reg_irq_funcs; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index a9da0486467a..6a5435255e6d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -192,7 +192,7 @@ static void gfx11_kiq_unmap_queues(struct amdgpu_ring *kiq_ring, struct amdgpu_device *adev = kiq_ring->adev; uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0; - if (adev->enable_mes && !adev->gfx.kiq.ring.sched.ready) { + if (adev->enable_mes && !adev->gfx.kiq[0].ring.sched.ready) { amdgpu_mes_unmap_legacy_queue(adev, ring, action, gpu_addr, seq); return; } @@ -260,7 +260,7 @@ static const struct kiq_pm4_funcs gfx_v11_0_kiq_pm4_funcs = { static void gfx_v11_0_set_kiq_pm4_funcs(struct amdgpu_device *adev) { - adev->gfx.kiq.pmf = &gfx_v11_0_kiq_pm4_funcs; + adev->gfx.kiq[0].pmf = &gfx_v11_0_kiq_pm4_funcs; } static void gfx_v11_0_init_golden_registers(struct amdgpu_device *adev) @@ -1395,7 +1395,7 @@ static int gfx_v11_0_sw_init(void *handle) return r; } - kiq = &adev->gfx.kiq; + kiq = &adev->gfx.kiq[0]; r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq); if (r) return r; @@ -1466,7 +1466,7 @@ static int gfx_v11_0_sw_fini(void *handle) amdgpu_gfx_mqd_sw_fini(adev); if (!adev->enable_mes_kiq) { - amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring); + amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[0].ring); amdgpu_gfx_kiq_fini(adev); } @@ -3337,7 +3337,7 @@ static void gfx_v11_0_cp_compute_enable(struct amdgpu_device *adev, bool enable) WREG32_SOC15(GC, 0, regCP_MEC_CNTL, data); } - adev->gfx.kiq.ring.sched.ready = enable; + adev->gfx.kiq[0].ring.sched.ready = enable; udelay(50); } @@ -3732,8 +3732,8 @@ static int gfx_v11_0_gfx_init_queue(struct amdgpu_ring *ring) #ifndef BRING_UP_DEBUG static int gfx_v11_0_kiq_enable_kgq(struct amdgpu_device *adev) { - struct amdgpu_kiq *kiq = &adev->gfx.kiq; - struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring; + struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; + struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring; int r, i; if (!kiq->pmf || !kiq->pmf->kiq_map_queues) @@ -4108,7 +4108,7 @@ static int gfx_v11_0_kiq_resume(struct amdgpu_device *adev) struct amdgpu_ring *ring; int r; - ring = &adev->gfx.kiq.ring; + ring = &adev->gfx.kiq[0].ring; r = amdgpu_bo_reserve(ring->mqd_obj, false); if (unlikely(r != 0)) @@ -4417,7 +4417,7 @@ static int gfx_v11_0_hw_init(void *handle) #ifndef BRING_UP_DEBUG static int gfx_v11_0_kiq_disable_kgq(struct amdgpu_device *adev) { - struct amdgpu_kiq *kiq = &adev->gfx.kiq; + struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; struct amdgpu_ring *kiq_ring = &kiq->ring; int i, r = 0; @@ -4432,7 +4432,7 @@ static int gfx_v11_0_kiq_disable_kgq(struct amdgpu_device *adev) kiq->pmf->kiq_unmap_queues(kiq_ring, &adev->gfx.gfx_ring[i], PREEMPT_QUEUES, 0, 0); - if (adev->gfx.kiq.ring.sched.ready) + if (adev->gfx.kiq[0].ring.sched.ready) r = amdgpu_ring_test_helper(kiq_ring); return r; @@ -5622,7 +5622,7 @@ static int gfx_v11_0_ring_preempt_ib(struct amdgpu_ring *ring) { int i, r = 0; struct amdgpu_device *adev = ring->adev; - struct amdgpu_kiq *kiq = &adev->gfx.kiq; + struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; struct amdgpu_ring *kiq_ring = &kiq->ring; unsigned long flags; @@ -6120,7 +6120,7 @@ static int gfx_v11_0_kiq_set_interrupt_state(struct amdgpu_device *adev, enum amdgpu_interrupt_state state) { uint32_t tmp, target; - struct amdgpu_ring *ring = &(adev->gfx.kiq.ring); + struct amdgpu_ring *ring = &(adev->gfx.kiq[0].ring); target = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE0_INT_CNTL); target += ring->pipe; @@ -6317,7 +6317,7 @@ static void gfx_v11_0_set_ring_funcs(struct amdgpu_device *adev) { int i; - adev->gfx.kiq.ring.funcs = &gfx_v11_0_ring_funcs_kiq; + adev->gfx.kiq[0].ring.funcs = &gfx_v11_0_ring_funcs_kiq; for (i = 0; i < adev->gfx.num_gfx_rings; i++) adev->gfx.gfx_ring[i].funcs = &gfx_v11_0_ring_funcs_gfx; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index b1f2684d854a..ed04bad8543d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -2021,7 +2021,7 @@ static int gfx_v8_0_sw_init(void *handle) return r; } - kiq = &adev->gfx.kiq; + kiq = &adev->gfx.kiq[0]; r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq); if (r) return r; @@ -2051,7 +2051,7 @@ static int gfx_v8_0_sw_fini(void *handle) amdgpu_ring_fini(&adev->gfx.compute_ring[i]); amdgpu_gfx_mqd_sw_fini(adev); - amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring); + amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[0].ring); amdgpu_gfx_kiq_fini(adev); gfx_v8_0_mec_fini(adev); @@ -4292,7 +4292,7 @@ static void gfx_v8_0_cp_compute_enable(struct amdgpu_device *adev, bool enable) WREG32(mmCP_MEC_CNTL, 0); } else { WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK)); - adev->gfx.kiq.ring.sched.ready = false; + adev->gfx.kiq[0].ring.sched.ready = false; } udelay(50); } @@ -4314,7 +4314,7 @@ static void gfx_v8_0_kiq_setting(struct amdgpu_ring *ring) static int gfx_v8_0_kiq_kcq_enable(struct amdgpu_device *adev) { - struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring; + struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring; uint64_t queue_mask = 0; int r, i; @@ -4678,7 +4678,7 @@ static int gfx_v8_0_kiq_resume(struct amdgpu_device *adev) struct amdgpu_ring *ring; int r; - ring = &adev->gfx.kiq.ring; + ring = &adev->gfx.kiq[0].ring; r = amdgpu_bo_reserve(ring->mqd_obj, false); if (unlikely(r != 0)) @@ -4741,7 +4741,7 @@ static int gfx_v8_0_cp_test_all_rings(struct amdgpu_device *adev) if (r) return r; - ring = &adev->gfx.kiq.ring; + ring = &adev->gfx.kiq[0].ring; r = amdgpu_ring_test_helper(ring); if (r) return r; @@ -4808,7 +4808,7 @@ static int gfx_v8_0_hw_init(void *handle) static int gfx_v8_0_kcq_disable(struct amdgpu_device *adev) { int r, i; - struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring; + struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring; r = amdgpu_ring_alloc(kiq_ring, 6 * adev->gfx.num_compute_rings); if (r) @@ -7001,7 +7001,7 @@ static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev) { int i; - adev->gfx.kiq.ring.funcs = &gfx_v8_0_ring_funcs_kiq; + adev->gfx.kiq[0].ring.funcs = &gfx_v8_0_ring_funcs_kiq; for (i = 0; i < adev->gfx.num_gfx_rings; i++) adev->gfx.gfx_ring[i].funcs = &gfx_v8_0_ring_funcs_gfx; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index adbcd8127c82..adf86bc7ed36 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -898,7 +898,7 @@ static const struct kiq_pm4_funcs gfx_v9_0_kiq_pm4_funcs = { static void gfx_v9_0_set_kiq_pm4_funcs(struct amdgpu_device *adev) { - adev->gfx.kiq.pmf = &gfx_v9_0_kiq_pm4_funcs; + adev->gfx.kiq[0].pmf = &gfx_v9_0_kiq_pm4_funcs; } static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev) @@ -2174,7 +2174,7 @@ static int gfx_v9_0_sw_init(void *handle) return r; } - kiq = &adev->gfx.kiq; + kiq = &adev->gfx.kiq[0]; r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq); if (r) return r; @@ -2216,7 +2216,7 @@ static int gfx_v9_0_sw_fini(void *handle) amdgpu_ring_fini(&adev->gfx.compute_ring[i]); amdgpu_gfx_mqd_sw_fini(adev); - amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring); + amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[0].ring); amdgpu_gfx_kiq_fini(adev); gfx_v9_0_mec_fini(adev); @@ -3155,7 +3155,7 @@ static void gfx_v9_0_cp_compute_enable(struct amdgpu_device *adev, bool enable) } else { WREG32_SOC15_RLC(GC, 0, mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK)); - adev->gfx.kiq.ring.sched.ready = false; + adev->gfx.kiq[0].ring.sched.ready = false; } udelay(50); } @@ -3610,7 +3610,7 @@ static int gfx_v9_0_kiq_resume(struct amdgpu_device *adev) struct amdgpu_ring *ring; int r; - ring = &adev->gfx.kiq.ring; + ring = &adev->gfx.kiq[0].ring; r = amdgpu_bo_reserve(ring->mqd_obj, false); if (unlikely(r != 0)) @@ -3789,10 +3789,10 @@ static int gfx_v9_0_hw_fini(void *handle) */ if (!amdgpu_in_reset(adev) && !adev->in_suspend) { mutex_lock(&adev->srbm_mutex); - soc15_grbm_select(adev, adev->gfx.kiq.ring.me, - adev->gfx.kiq.ring.pipe, - adev->gfx.kiq.ring.queue, 0); - gfx_v9_0_kiq_fini_register(&adev->gfx.kiq.ring); + soc15_grbm_select(adev, adev->gfx.kiq[0].ring.me, + adev->gfx.kiq[0].ring.pipe, + adev->gfx.kiq[0].ring.queue, 0); + gfx_v9_0_kiq_fini_register(&adev->gfx.kiq[0].ring); soc15_grbm_select(adev, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); } @@ -3913,7 +3913,7 @@ static uint64_t gfx_v9_0_kiq_read_clock(struct amdgpu_device *adev) unsigned long flags; uint32_t seq, reg_val_offs = 0; uint64_t value = 0; - struct amdgpu_kiq *kiq = &adev->gfx.kiq; + struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; struct amdgpu_ring *ring = &kiq->ring; BUG_ON(!ring->funcs->emit_rreg); @@ -5385,7 +5385,7 @@ static int gfx_v9_0_ring_preempt_ib(struct amdgpu_ring *ring) { int i, r = 0; struct amdgpu_device *adev = ring->adev; - struct amdgpu_kiq *kiq = &adev->gfx.kiq; + struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; struct amdgpu_ring *kiq_ring = &kiq->ring; unsigned long flags; @@ -6964,7 +6964,7 @@ static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev) { int i; - adev->gfx.kiq.ring.funcs = &gfx_v9_0_ring_funcs_kiq; + adev->gfx.kiq[0].ring.funcs = &gfx_v9_0_ring_funcs_kiq; for (i = 0; i < adev->gfx.num_gfx_rings; i++) adev->gfx.gfx_ring[i].funcs = &gfx_v9_0_ring_funcs_gfx; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c index 7d6f4a68f416..23d4081eca00 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c @@ -343,7 +343,7 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, /* For SRIOV run time, driver shouldn't access the register through MMIO * Directly use kiq to do the vm invalidation instead */ - if (adev->gfx.kiq.ring.sched.ready && !adev->enable_mes && + if (adev->gfx.kiq[0].ring.sched.ready && !adev->enable_mes && (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) && down_read_trylock(&adev->reset_domain->sem)) { struct amdgpu_vmhub *hub = &adev->vmhub[vmhub]; @@ -428,11 +428,11 @@ static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, uint16_t queried_pasid; bool ret; u32 usec_timeout = amdgpu_sriov_vf(adev) ? SRIOV_USEC_TIMEOUT : adev->usec_timeout; - struct amdgpu_ring *ring = &adev->gfx.kiq.ring; - struct amdgpu_kiq *kiq = &adev->gfx.kiq; + struct amdgpu_ring *ring = &adev->gfx.kiq[0].ring; + struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; if (amdgpu_emu_mode == 0 && ring->sched.ready) { - spin_lock(&adev->gfx.kiq.ring_lock); + spin_lock(&adev->gfx.kiq[0].ring_lock); /* 2 dwords flush + 8 dwords fence */ amdgpu_ring_alloc(ring, kiq->pmf->invalidate_tlbs_size + 8); kiq->pmf->kiq_invalidate_tlbs(ring, @@ -440,12 +440,12 @@ static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT); if (r) { amdgpu_ring_undo(ring); - spin_unlock(&adev->gfx.kiq.ring_lock); + spin_unlock(&adev->gfx.kiq[0].ring_lock); return -ETIME; } amdgpu_ring_commit(ring); - spin_unlock(&adev->gfx.kiq.ring_lock); + spin_unlock(&adev->gfx.kiq[0].ring_lock); r = amdgpu_fence_wait_polling(ring, seq, usec_timeout); if (r < 1) { dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c index d809f2ed5600..3828ca95899f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c @@ -291,7 +291,7 @@ static void gmc_v11_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, /* For SRIOV run time, driver shouldn't access the register through MMIO * Directly use kiq to do the vm invalidation instead */ - if ((adev->gfx.kiq.ring.sched.ready || adev->mes.ring.sched.ready) && + if ((adev->gfx.kiq[0].ring.sched.ready || adev->mes.ring.sched.ready) && (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev))) { struct amdgpu_vmhub *hub = &adev->vmhub[vmhub]; const unsigned eng = 17; @@ -329,11 +329,11 @@ static int gmc_v11_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, uint32_t seq; uint16_t queried_pasid; bool ret; - struct amdgpu_ring *ring = &adev->gfx.kiq.ring; - struct amdgpu_kiq *kiq = &adev->gfx.kiq; + struct amdgpu_ring *ring = &adev->gfx.kiq[0].ring; + struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; if (amdgpu_emu_mode == 0 && ring->sched.ready) { - spin_lock(&adev->gfx.kiq.ring_lock); + spin_lock(&adev->gfx.kiq[0].ring_lock); /* 2 dwords flush + 8 dwords fence */ amdgpu_ring_alloc(ring, kiq->pmf->invalidate_tlbs_size + 8); kiq->pmf->kiq_invalidate_tlbs(ring, @@ -341,12 +341,12 @@ static int gmc_v11_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT); if (r) { amdgpu_ring_undo(ring); - spin_unlock(&adev->gfx.kiq.ring_lock); + spin_unlock(&adev->gfx.kiq[0].ring_lock); return -ETIME; } amdgpu_ring_commit(ring); - spin_unlock(&adev->gfx.kiq.ring_lock); + spin_unlock(&adev->gfx.kiq[0].ring_lock); r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout); if (r < 1) { dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 64ab1a306dfe..290804a06e05 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -824,7 +824,7 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, /* This is necessary for a HW workaround under SRIOV as well * as GFXOFF under bare metal */ - if (adev->gfx.kiq.ring.sched.ready && + if (adev->gfx.kiq[0].ring.sched.ready && (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) && down_read_trylock(&adev->reset_domain->sem)) { uint32_t req = hub->vm_inv_eng0_req + hub->eng_distance * eng; @@ -934,8 +934,8 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, uint16_t queried_pasid; bool ret; u32 usec_timeout = amdgpu_sriov_vf(adev) ? SRIOV_USEC_TIMEOUT : adev->usec_timeout; - struct amdgpu_ring *ring = &adev->gfx.kiq.ring; - struct amdgpu_kiq *kiq = &adev->gfx.kiq; + struct amdgpu_ring *ring = &adev->gfx.kiq[0].ring; + struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; if (amdgpu_in_reset(adev)) return -EIO; @@ -955,7 +955,7 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, if (vega20_xgmi_wa) ndw += kiq->pmf->invalidate_tlbs_size; - spin_lock(&adev->gfx.kiq.ring_lock); + spin_lock(&adev->gfx.kiq[0].ring_lock); /* 2 dwords flush + 8 dwords fence */ amdgpu_ring_alloc(ring, ndw); if (vega20_xgmi_wa) @@ -966,13 +966,13 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT); if (r) { amdgpu_ring_undo(ring); - spin_unlock(&adev->gfx.kiq.ring_lock); + spin_unlock(&adev->gfx.kiq[0].ring_lock); up_read(&adev->reset_domain->sem); return -ETIME; } amdgpu_ring_commit(ring); - spin_unlock(&adev->gfx.kiq.ring_lock); + spin_unlock(&adev->gfx.kiq[0].ring_lock); r = amdgpu_fence_wait_polling(ring, seq, usec_timeout); if (r < 1) { dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r); diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c b/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c index 2e2062636d5f..0599f8a6813e 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c @@ -797,8 +797,8 @@ static void mes_v10_1_queue_init_register(struct amdgpu_ring *ring) static int mes_v10_1_kiq_enable_queue(struct amdgpu_device *adev) { - struct amdgpu_kiq *kiq = &adev->gfx.kiq; - struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring; + struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; + struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring; int r; if (!kiq->pmf || !kiq->pmf->kiq_map_queues) @@ -863,9 +863,9 @@ static int mes_v10_1_kiq_ring_init(struct amdgpu_device *adev) { struct amdgpu_ring *ring; - spin_lock_init(&adev->gfx.kiq.ring_lock); + spin_lock_init(&adev->gfx.kiq[0].ring_lock); - ring = &adev->gfx.kiq.ring; + ring = &adev->gfx.kiq[0].ring; ring->me = 3; ring->pipe = 1; @@ -891,7 +891,7 @@ static int mes_v10_1_mqd_sw_init(struct amdgpu_device *adev, struct amdgpu_ring *ring; if (pipe == AMDGPU_MES_KIQ_PIPE) - ring = &adev->gfx.kiq.ring; + ring = &adev->gfx.kiq[0].ring; else if (pipe == AMDGPU_MES_SCHED_PIPE) ring = &adev->mes.ring; else @@ -974,15 +974,15 @@ static int mes_v10_1_sw_fini(void *handle) amdgpu_ucode_release(&adev->mes.fw[pipe]); } - amdgpu_bo_free_kernel(&adev->gfx.kiq.ring.mqd_obj, - &adev->gfx.kiq.ring.mqd_gpu_addr, - &adev->gfx.kiq.ring.mqd_ptr); + amdgpu_bo_free_kernel(&adev->gfx.kiq[0].ring.mqd_obj, + &adev->gfx.kiq[0].ring.mqd_gpu_addr, + &adev->gfx.kiq[0].ring.mqd_ptr); amdgpu_bo_free_kernel(&adev->mes.ring.mqd_obj, &adev->mes.ring.mqd_gpu_addr, &adev->mes.ring.mqd_ptr); - amdgpu_ring_fini(&adev->gfx.kiq.ring); + amdgpu_ring_fini(&adev->gfx.kiq[0].ring); amdgpu_ring_fini(&adev->mes.ring); amdgpu_mes_fini(adev); @@ -1038,7 +1038,7 @@ static int mes_v10_1_kiq_hw_init(struct amdgpu_device *adev) mes_v10_1_enable(adev, true); - mes_v10_1_kiq_setting(&adev->gfx.kiq.ring); + mes_v10_1_kiq_setting(&adev->gfx.kiq[0].ring); r = mes_v10_1_queue_init(adev); if (r) @@ -1090,7 +1090,7 @@ static int mes_v10_1_hw_init(void *handle) * MES uses KIQ ring exclusively so driver cannot access KIQ ring * with MES enabled. */ - adev->gfx.kiq.ring.sched.ready = false; + adev->gfx.kiq[0].ring.sched.ready = false; adev->mes.ring.sched.ready = true; return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c index 45280f047180..e853bcb892fc 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c @@ -864,8 +864,8 @@ static void mes_v11_0_queue_init_register(struct amdgpu_ring *ring) static int mes_v11_0_kiq_enable_queue(struct amdgpu_device *adev) { - struct amdgpu_kiq *kiq = &adev->gfx.kiq; - struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring; + struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; + struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring; int r; if (!kiq->pmf || !kiq->pmf->kiq_map_queues) @@ -894,7 +894,7 @@ static int mes_v11_0_queue_init(struct amdgpu_device *adev, int r; if (pipe == AMDGPU_MES_KIQ_PIPE) - ring = &adev->gfx.kiq.ring; + ring = &adev->gfx.kiq[0].ring; else if (pipe == AMDGPU_MES_SCHED_PIPE) ring = &adev->mes.ring; else @@ -961,9 +961,9 @@ static int mes_v11_0_kiq_ring_init(struct amdgpu_device *adev) { struct amdgpu_ring *ring; - spin_lock_init(&adev->gfx.kiq.ring_lock); + spin_lock_init(&adev->gfx.kiq[0].ring_lock); - ring = &adev->gfx.kiq.ring; + ring = &adev->gfx.kiq[0].ring; ring->me = 3; ring->pipe = 1; @@ -989,7 +989,7 @@ static int mes_v11_0_mqd_sw_init(struct amdgpu_device *adev, struct amdgpu_ring *ring; if (pipe == AMDGPU_MES_KIQ_PIPE) - ring = &adev->gfx.kiq.ring; + ring = &adev->gfx.kiq[0].ring; else if (pipe == AMDGPU_MES_SCHED_PIPE) ring = &adev->mes.ring; else @@ -1074,15 +1074,15 @@ static int mes_v11_0_sw_fini(void *handle) amdgpu_ucode_release(&adev->mes.fw[pipe]); } - amdgpu_bo_free_kernel(&adev->gfx.kiq.ring.mqd_obj, - &adev->gfx.kiq.ring.mqd_gpu_addr, - &adev->gfx.kiq.ring.mqd_ptr); + amdgpu_bo_free_kernel(&adev->gfx.kiq[0].ring.mqd_obj, + &adev->gfx.kiq[0].ring.mqd_gpu_addr, + &adev->gfx.kiq[0].ring.mqd_ptr); amdgpu_bo_free_kernel(&adev->mes.ring.mqd_obj, &adev->mes.ring.mqd_gpu_addr, &adev->mes.ring.mqd_ptr); - amdgpu_ring_fini(&adev->gfx.kiq.ring); + amdgpu_ring_fini(&adev->gfx.kiq[0].ring); amdgpu_ring_fini(&adev->mes.ring); if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) { @@ -1175,7 +1175,7 @@ static int mes_v11_0_kiq_hw_init(struct amdgpu_device *adev) mes_v11_0_enable(adev, true); - mes_v11_0_kiq_setting(&adev->gfx.kiq.ring); + mes_v11_0_kiq_setting(&adev->gfx.kiq[0].ring); r = mes_v11_0_queue_init(adev, AMDGPU_MES_KIQ_PIPE); if (r) @@ -1196,7 +1196,7 @@ static int mes_v11_0_kiq_hw_fini(struct amdgpu_device *adev) } if (amdgpu_sriov_vf(adev)) { - mes_v11_0_kiq_dequeue(&adev->gfx.kiq.ring); + mes_v11_0_kiq_dequeue(&adev->gfx.kiq[0].ring); mes_v11_0_kiq_clear(adev); } @@ -1244,7 +1244,7 @@ static int mes_v11_0_hw_init(void *handle) * MES uses KIQ ring exclusively so driver cannot access KIQ ring * with MES enabled. */ - adev->gfx.kiq.ring.sched.ready = false; + adev->gfx.kiq[0].ring.sched.ready = false; adev->mes.ring.sched.ready = true; return 0; -- cgit v1.2.3 From be697aa3a78ef83a6b8d49e1f0671a002e502cd0 Mon Sep 17 00:00:00 2001 From: Le Ma Date: Wed, 27 Jul 2022 14:35:49 +0800 Subject: drm/amdgpu: move queue_bitmap to an independent structure (v3) To allocate independent queue_bitmap for each XCD, then the old bitmap policy can be continued to use with a clear logic. Use mec_bitmap[0] as default for all non-GC 9.4.3 IPs. v2: squash commits to avoid breaking the build v3: unify naming style Signed-off-by: Le Ma Reviewed-by: Hawking Zhang Signed-off-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 41 ++++++++++++++--------- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h | 7 ++-- drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 6 ++-- drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c | 6 ++-- drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | 5 +-- drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 7 ++-- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 5 +-- 9 files changed, 48 insertions(+), 33 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 0385f7f69278..fed8bb9a721f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -162,7 +162,7 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev) * clear */ bitmap_complement(gpu_resources.cp_queue_bitmap, - adev->gfx.mec.queue_bitmap, + adev->gfx.mec_bitmap[0].queue_bitmap, KGD_MAX_QUEUES); /* According to linux/bitmap.h we shouldn't use bitmap_clear if diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c index bc944ae4fd5b..03875b971ba6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c @@ -778,7 +778,7 @@ void kgd_gfx_v9_get_cu_occupancy(struct amdgpu_device *adev, int pasid, * Iterate through the shader engines and arrays of the device * to get number of waves in flight */ - bitmap_complement(cp_queue_bitmap, adev->gfx.mec.queue_bitmap, + bitmap_complement(cp_queue_bitmap, adev->gfx.mec_bitmap[0].queue_bitmap, KGD_MAX_QUEUES); max_queue_cnt = adev->gfx.mec.num_pipe_per_mec * adev->gfx.mec.num_queue_per_pipe; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 9b6071df1fa7..b300b1784210 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -63,10 +63,10 @@ void amdgpu_queue_mask_bit_to_mec_queue(struct amdgpu_device *adev, int bit, } bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev, - int mec, int pipe, int queue) + int xcc_id, int mec, int pipe, int queue) { return test_bit(amdgpu_gfx_mec_queue_to_bit(adev, mec, pipe, queue), - adev->gfx.mec.queue_bitmap); + adev->gfx.mec_bitmap[xcc_id].queue_bitmap); } int amdgpu_gfx_me_queue_to_bit(struct amdgpu_device *adev, @@ -204,29 +204,38 @@ bool amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device *adev, void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev) { - int i, queue, pipe; + int i, j, queue, pipe; bool multipipe_policy = amdgpu_gfx_is_compute_multipipe_capable(adev); int max_queues_per_mec = min(adev->gfx.mec.num_pipe_per_mec * adev->gfx.mec.num_queue_per_pipe, adev->gfx.num_compute_rings); + int num_xcd = (adev->gfx.num_xcd > 1) ? adev->gfx.num_xcd : 1; if (multipipe_policy) { - /* policy: make queues evenly cross all pipes on MEC1 only */ - for (i = 0; i < max_queues_per_mec; i++) { - pipe = i % adev->gfx.mec.num_pipe_per_mec; - queue = (i / adev->gfx.mec.num_pipe_per_mec) % - adev->gfx.mec.num_queue_per_pipe; - - set_bit(pipe * adev->gfx.mec.num_queue_per_pipe + queue, - adev->gfx.mec.queue_bitmap); + /* policy: make queues evenly cross all pipes on MEC1 only + * for multiple xcc, just use the original policy for simplicity */ + for (j = 0; j < num_xcd; j++) { + for (i = 0; i < max_queues_per_mec; i++) { + pipe = i % adev->gfx.mec.num_pipe_per_mec; + queue = (i / adev->gfx.mec.num_pipe_per_mec) % + adev->gfx.mec.num_queue_per_pipe; + + set_bit(pipe * adev->gfx.mec.num_queue_per_pipe + queue, + adev->gfx.mec_bitmap[j].queue_bitmap); + } } } else { /* policy: amdgpu owns all queues in the given pipe */ - for (i = 0; i < max_queues_per_mec; ++i) - set_bit(i, adev->gfx.mec.queue_bitmap); + for (j = 0; j < num_xcd; j++) { + for (i = 0; i < max_queues_per_mec; ++i) + set_bit(i, adev->gfx.mec_bitmap[j].queue_bitmap); + } } - dev_dbg(adev->dev, "mec queue bitmap weight=%d\n", bitmap_weight(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES)); + for (j = 0; j < num_xcd; j++) { + dev_dbg(adev->dev, "mec queue bitmap weight=%d\n", + bitmap_weight(adev->gfx.mec_bitmap[j].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES)); + } } void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev) @@ -268,7 +277,7 @@ static int amdgpu_gfx_kiq_acquire(struct amdgpu_device *adev, * adev->gfx.mec.num_queue_per_pipe; while (--queue_bit >= 0) { - if (test_bit(queue_bit, adev->gfx.mec.queue_bitmap)) + if (test_bit(queue_bit, adev->gfx.mec_bitmap[0].queue_bitmap)) continue; amdgpu_queue_mask_bit_to_mec_queue(adev, queue_bit, &mec, &pipe, &queue); @@ -516,7 +525,7 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev) return -EINVAL; for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) { - if (!test_bit(i, adev->gfx.mec.queue_bitmap)) + if (!test_bit(i, adev->gfx.mec_bitmap[0].queue_bitmap)) continue; /* This situation may be hit in the future if a new HW diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index c742b4a36979..830323310694 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -76,7 +76,9 @@ struct amdgpu_mec { u32 num_pipe_per_mec; u32 num_queue_per_pipe; void *mqd_backup[AMDGPU_MAX_COMPUTE_RINGS + 1]; +}; +struct amdgpu_mec_bitmap { /* These are the resources for which amdgpu takes ownership */ DECLARE_BITMAP(queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); }; @@ -296,6 +298,7 @@ struct amdgpu_gfx { struct amdgpu_ce ce; struct amdgpu_me me; struct amdgpu_mec mec; + struct amdgpu_mec_bitmap mec_bitmap[AMDGPU_MAX_GC_INSTANCES]; struct amdgpu_kiq kiq[AMDGPU_MAX_GC_INSTANCES]; struct amdgpu_imu imu; bool rs64_enable; /* firmware format */ @@ -425,8 +428,8 @@ int amdgpu_gfx_mec_queue_to_bit(struct amdgpu_device *adev, int mec, int pipe, int queue); void amdgpu_queue_mask_bit_to_mec_queue(struct amdgpu_device *adev, int bit, int *mec, int *pipe, int *queue); -bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev, int mec, - int pipe, int queue); +bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev, int inst, + int mec, int pipe, int queue); bool amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device *adev, struct amdgpu_ring *ring); bool amdgpu_gfx_is_high_priority_graphics_queue(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index d4e7de8fd9da..88f8424ea1e4 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -4219,7 +4219,7 @@ static int gfx_v10_0_mec_init(struct amdgpu_device *adev) const struct gfx_firmware_header_v1_0 *mec_hdr = NULL; - bitmap_zero(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); + bitmap_zero(adev->gfx.mec_bitmap[0].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); /* take ownership of the relevant compute queues */ amdgpu_gfx_compute_queue_acquire(adev); @@ -4614,8 +4614,8 @@ static int gfx_v10_0_sw_init(void *handle) for (i = 0; i < adev->gfx.mec.num_mec; ++i) { for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) { for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) { - if (!amdgpu_gfx_is_mec_queue_enabled(adev, i, k, - j)) + if (!amdgpu_gfx_is_mec_queue_enabled(adev, 0, i, + k, j)) continue; r = gfx_v10_0_compute_ring_init(adev, ring_id, diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index 6a5435255e6d..3e42a44f10a7 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -699,7 +699,7 @@ static int gfx_v11_0_mec_init(struct amdgpu_device *adev) u32 *hpd; size_t mec_hpd_size; - bitmap_zero(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); + bitmap_zero(adev->gfx.mec_bitmap[0].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); /* take ownership of the relevant compute queues */ amdgpu_gfx_compute_queue_acquire(adev); @@ -1374,8 +1374,8 @@ static int gfx_v11_0_sw_init(void *handle) for (i = 0; i < adev->gfx.mec.num_mec; ++i) { for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) { for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) { - if (!amdgpu_gfx_is_mec_queue_enabled(adev, i, k, - j)) + if (!amdgpu_gfx_is_mec_queue_enabled(adev, 0, i, + k, j)) continue; r = gfx_v11_0_compute_ring_init(adev, ring_id, diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 9d5c1e29b4a3..46740ad9a80f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -2728,7 +2728,7 @@ static int gfx_v7_0_mec_init(struct amdgpu_device *adev) u32 *hpd; size_t mec_hpd_size; - bitmap_zero(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); + bitmap_zero(adev->gfx.mec_bitmap[0].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); /* take ownership of the relevant compute queues */ amdgpu_gfx_compute_queue_acquire(adev); @@ -4456,7 +4456,8 @@ static int gfx_v7_0_sw_init(void *handle) for (i = 0; i < adev->gfx.mec.num_mec; ++i) { for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) { for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) { - if (!amdgpu_gfx_is_mec_queue_enabled(adev, i, k, j)) + if (!amdgpu_gfx_is_mec_queue_enabled(adev, 0, i, + k, j)) continue; r = gfx_v7_0_compute_ring_init(adev, diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index ed04bad8543d..18722450e265 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -1304,7 +1304,7 @@ static int gfx_v8_0_mec_init(struct amdgpu_device *adev) u32 *hpd; size_t mec_hpd_size; - bitmap_zero(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); + bitmap_zero(adev->gfx.mec_bitmap[0].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); /* take ownership of the relevant compute queues */ amdgpu_gfx_compute_queue_acquire(adev); @@ -2001,7 +2001,8 @@ static int gfx_v8_0_sw_init(void *handle) for (i = 0; i < adev->gfx.mec.num_mec; ++i) { for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) { for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) { - if (!amdgpu_gfx_is_mec_queue_enabled(adev, i, k, j)) + if (!amdgpu_gfx_is_mec_queue_enabled(adev, 0, i, + k, j)) continue; r = gfx_v8_0_compute_ring_init(adev, @@ -4319,7 +4320,7 @@ static int gfx_v8_0_kiq_kcq_enable(struct amdgpu_device *adev) int r, i; for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) { - if (!test_bit(i, adev->gfx.mec.queue_bitmap)) + if (!test_bit(i, adev->gfx.mec_bitmap[0].queue_bitmap)) continue; /* This situation may be hit in the future if a new HW diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index adf86bc7ed36..49adc36dcc6f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -1713,7 +1713,7 @@ static int gfx_v9_0_mec_init(struct amdgpu_device *adev) const struct gfx_firmware_header_v1_0 *mec_hdr; - bitmap_zero(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); + bitmap_zero(adev->gfx.mec_bitmap[0].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); /* take ownership of the relevant compute queues */ amdgpu_gfx_compute_queue_acquire(adev); @@ -2154,7 +2154,8 @@ static int gfx_v9_0_sw_init(void *handle) for (i = 0; i < adev->gfx.mec.num_mec; ++i) { for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) { for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) { - if (!amdgpu_gfx_is_mec_queue_enabled(adev, i, k, j)) + if (!amdgpu_gfx_is_mec_queue_enabled(adev, 0, i, + k, j)) continue; r = gfx_v9_0_compute_ring_init(adev, -- cgit v1.2.3 From c38be07035bcb31274ce5f85e3b249f691c5b8db Mon Sep 17 00:00:00 2001 From: Le Ma Date: Tue, 16 Nov 2021 21:56:34 +0800 Subject: drm/amdgpu: separate the mqd_backup for kiq from kcq This will benifit the mqd indexing for kiq/kcq in multi XCD case. Signed-off-by: Le Ma Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index 830323310694..d811cb038e94 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -75,7 +75,7 @@ struct amdgpu_mec { u32 num_mec; u32 num_pipe_per_mec; u32 num_queue_per_pipe; - void *mqd_backup[AMDGPU_MAX_COMPUTE_RINGS + 1]; + void *mqd_backup[AMDGPU_MAX_COMPUTE_RINGS * AMDGPU_MAX_GC_INSTANCES]; }; struct amdgpu_mec_bitmap { @@ -122,6 +122,7 @@ struct amdgpu_kiq { struct amdgpu_ring ring; struct amdgpu_irq_src irq; const struct kiq_pm4_funcs *pmf; + void *mqd_backup; }; /* -- cgit v1.2.3 From def799c6596d078112095c24c25e162cb5102d90 Mon Sep 17 00:00:00 2001 From: Le Ma Date: Tue, 24 May 2022 12:23:03 +0800 Subject: drm/amdgpu: add multi-xcc support to amdgpu_gfx interfaces (v4) v1: Modify kiq_init/fini, mqd_sw_init/fini and enable/disable_kcq to adapt to multi-die case. Pass 0 as default to all asics with single xcc (Le) v2: squash commits to avoid breaking the build (Le) v3: unify naming style (Le) v4: apply the changes to gc v11_0 (Hawking) Signed-off-by: Le Ma Reviewed-by: Hawking Zhang Signed-off-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 75 ++++++++++++++++++--------------- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h | 16 +++---- drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 23 +++++----- drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c | 23 +++++----- drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 19 ++++----- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 25 ++++++----- 6 files changed, 93 insertions(+), 88 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index b300b1784210..7f5c60381103 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -267,7 +267,7 @@ void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev) } static int amdgpu_gfx_kiq_acquire(struct amdgpu_device *adev, - struct amdgpu_ring *ring) + struct amdgpu_ring *ring, int xcc_id) { int q