diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c')
| -rw-r--r-- | drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 439 |
1 files changed, 298 insertions, 141 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 676832da75eb..e80685d1e6c6 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -47,6 +47,7 @@ #include "amdgpu_ras.h" +#include "amdgpu_ring_mux.h" #include "gfx_v9_4.h" #include "gfx_v9_0.h" #include "gfx_v9_4_2.h" @@ -56,6 +57,7 @@ #include "asic_reg/gc/gc_9_0_default.h" #define GFX9_NUM_GFX_RINGS 1 +#define GFX9_NUM_SW_GFX_RINGS 2 #define GFX9_MEC_HPD_SIZE 4096 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L #define RLC_SAVE_RESTORE_ADDR_STARTING_OFFSET 0x00000000L @@ -753,7 +755,7 @@ static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev); static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev, struct amdgpu_cu_info *cu_info); static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev); -static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring); +static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume); static u64 gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring); static void gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev, void *ras_error_status); @@ -826,9 +828,10 @@ static void gfx_v9_0_kiq_unmap_queues(struct amdgpu_ring *kiq_ring, PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index)); if (action == PREEMPT_QUEUES_NO_UNMAP) { - amdgpu_ring_write(kiq_ring, lower_32_bits(gpu_addr)); - amdgpu_ring_write(kiq_ring, upper_32_bits(gpu_addr)); - amdgpu_ring_write(kiq_ring, seq); + amdgpu_ring_write(kiq_ring, lower_32_bits(ring->wptr & ring->buf_mask)); + amdgpu_ring_write(kiq_ring, 0); + amdgpu_ring_write(kiq_ring, 0); + } else { amdgpu_ring_write(kiq_ring, 0); amdgpu_ring_write(kiq_ring, 0); @@ -1075,18 +1078,12 @@ err1: static void gfx_v9_0_free_microcode(struct amdgpu_device *adev) { - release_firmware(adev->gfx.pfp_fw); - adev->gfx.pfp_fw = NULL; - release_firmware(adev->gfx.me_fw); - adev->gfx.me_fw = NULL; - release_firmware(adev->gfx.ce_fw); - adev->gfx.ce_fw = NULL; - release_firmware(adev->gfx.rlc_fw); - adev->gfx.rlc_fw = NULL; - release_firmware(adev->gfx.mec_fw); - adev->gfx.mec_fw = NULL; - release_firmware(adev->gfx.mec2_fw); - adev->gfx.mec2_fw = NULL; + amdgpu_ucode_release(&adev->gfx.pfp_fw); + amdgpu_ucode_release(&adev->gfx.me_fw); + amdgpu_ucode_release(&adev->gfx.ce_fw); + amdgpu_ucode_release(&adev->gfx.rlc_fw); + amdgpu_ucode_release(&adev->gfx.mec_fw); + amdgpu_ucode_release(&adev->gfx.mec2_fw); kfree(adev->gfx.rlc.register_list_format); } @@ -1248,55 +1245,40 @@ static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev) } static int gfx_v9_0_init_cp_gfx_microcode(struct amdgpu_device *adev, - const char *chip_name) + char *chip_name) { char fw_name[30]; int err; snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name); - err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev); - if (err) - goto out; - err = amdgpu_ucode_validate(adev->gfx.pfp_fw); + err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw, fw_name); if (err) goto out; amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_PFP); snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name); - err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev); - if (err) - goto out; - err = amdgpu_ucode_validate(adev->gfx.me_fw); + err = amdgpu_ucode_request(adev, &adev->gfx.me_fw, fw_name); if (err) goto out; amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_ME); snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name); - err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev); - if (err) - goto out; - err = amdgpu_ucode_validate(adev->gfx.ce_fw); + err = amdgpu_ucode_request(adev, &adev->gfx.ce_fw, fw_name); if (err) goto out; amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_CE); out: if (err) { - dev_err(adev->dev, - "gfx9: Failed to init firmware \"%s\"\n", - fw_name); - release_firmware(adev->gfx.pfp_fw); - adev->gfx.pfp_fw = NULL; - release_firmware(adev->gfx.me_fw); - adev->gfx.me_fw = NULL; - release_firmware(adev->gfx.ce_fw); - adev->gfx.ce_fw = NULL; + amdgpu_ucode_release(&adev->gfx.pfp_fw); + amdgpu_ucode_release(&adev->gfx.me_fw); + amdgpu_ucode_release(&adev->gfx.ce_fw); } return err; } static int gfx_v9_0_init_rlc_microcode(struct amdgpu_device *adev, - const char *chip_name) + char *chip_name) { char fw_name[30]; int err; @@ -1325,10 +1307,7 @@ static int gfx_v9_0_init_rlc_microcode(struct amdgpu_device *adev, snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_kicker_rlc.bin", chip_name); else snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name); - err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev); - if (err) - goto out; - err = amdgpu_ucode_validate(adev->gfx.rlc_fw); + err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, fw_name); if (err) goto out; rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; @@ -1337,13 +1316,9 @@ static int gfx_v9_0_init_rlc_microcode(struct amdgpu_device *adev, version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor); err = amdgpu_gfx_rlc_init_microcode(adev, version_major, version_minor); out: - if (err) { - dev_err(adev->dev, - "gfx9: Failed to init firmware \"%s\"\n", - fw_name); - release_firmware(adev->gfx.rlc_fw); - adev->gfx.rlc_fw = NULL; - } + if (err) + amdgpu_ucode_release(&adev->gfx.rlc_fw); + return err; } @@ -1358,7 +1333,7 @@ static bool gfx_v9_0_load_mec2_fw_bin_support(struct amdgpu_device *adev) } static int gfx_v9_0_init_cp_compute_microcode(struct amdgpu_device *adev, - const char *chip_name) + char *chip_name) { char fw_name[30]; int err; @@ -1368,12 +1343,9 @@ static int gfx_v9_0_init_cp_compute_microcode(struct amdgpu_device *adev, else snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name); - err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev); - if (err) - goto out; - err = amdgpu_ucode_validate(adev->gfx.mec_fw); + err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw, fw_name); if (err) - goto out; + return err; amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1); amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1_JT); @@ -1383,11 +1355,8 @@ static int gfx_v9_0_init_cp_compute_microcode(struct amdgpu_device *adev, else snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name); - err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev); + err = amdgpu_ucode_request(adev, &adev->gfx.mec2_fw, fw_name); if (!err) { - err = amdgpu_ucode_validate(adev->gfx.mec2_fw); - if (err) - goto out; amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC2); amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC2_JT); } else { @@ -1399,75 +1368,35 @@ static int gfx_v9_0_init_cp_compute_microcode(struct amdgpu_device *adev, adev->gfx.mec2_feature_version = adev->gfx.mec_feature_version; } -out: gfx_v9_0_check_if_need_gfxoff(adev); gfx_v9_0_check_fw_write_wait(adev); if (err) { - dev_err(adev->dev, - "gfx9: Failed to init firmware \"%s\"\n", - fw_name); - release_firmware(adev->gfx.mec_fw); - adev->gfx.mec_fw = NULL; - release_firmware(adev->gfx.mec2_fw); - adev->gfx.mec2_fw = NULL; + amdgpu_ucode_release(&adev->gfx.mec_fw); + amdgpu_ucode_release(&adev->gfx.mec2_fw); } return err; } static int gfx_v9_0_init_microcode(struct amdgpu_device *adev) { - const char *chip_name; + char ucode_prefix[30]; int r; DRM_DEBUG("\n"); - - switch (adev->ip_versions[GC_HWIP][0]) { - case IP_VERSION(9, 0, 1): - chip_name = "vega10"; - break; - case IP_VERSION(9, 2, 1): - chip_name = "vega12"; - break; - case IP_VERSION(9, 4, 0): - chip_name = "vega20"; - break; - case IP_VERSION(9, 2, 2): - case IP_VERSION(9, 1, 0): - if (adev->apu_flags & AMD_APU_IS_RAVEN2) - chip_name = "raven2"; - else if (adev->apu_flags & AMD_APU_IS_PICASSO) - chip_name = "picasso"; - else - chip_name = "raven"; - break; - case IP_VERSION(9, 4, 1): - chip_name = "arcturus"; - break; - case IP_VERSION(9, 3, 0): - if (adev->apu_flags & AMD_APU_IS_RENOIR) - chip_name = "renoir"; - else - chip_name = "green_sardine"; - break; - case IP_VERSION(9, 4, 2): - chip_name = "aldebaran"; - break; - default: - BUG(); - } + amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix)); /* No CPG in Arcturus */ if (adev->gfx.num_gfx_rings) { - r = gfx_v9_0_init_cp_gfx_microcode(adev, chip_name); + r = gfx_v9_0_init_cp_gfx_microcode(adev, ucode_prefix); if (r) return r; } - r = gfx_v9_0_init_rlc_microcode(adev, chip_name); + r = gfx_v9_0_init_rlc_microcode(adev, ucode_prefix); if (r) return r; - r = gfx_v9_0_init_cp_compute_microcode(adev, chip_name); + r = gfx_v9_0_init_cp_compute_microcode(adev, ucode_prefix); if (r) return r; @@ -1780,7 +1709,8 @@ static int gfx_v9_0_mec_init(struct amdgpu_device *adev) mec_hpd_size = adev->gfx.num_compute_rings * GFX9_MEC_HPD_SIZE; if (mec_hpd_size) { r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_VRAM, + AMDGPU_GEM_DOMAIN_VRAM | + AMDGPU_GEM_DOMAIN_GTT, &adev->gfx.mec.hpd_eop_obj, &adev->gfx.mec.hpd_eop_gpu_addr, (void **)&hpd); @@ -2103,6 +2033,7 @@ static int gfx_v9_0_sw_init(void *handle) struct amdgpu_ring *ring; struct amdgpu_kiq *kiq; struct amdgpu_device *adev = (struct amdgpu_device *)handle; + unsigned int hw_prio; switch (adev->ip_versions[GC_HWIP][0]) { case IP_VERSION(9, 0, 1): @@ -2154,12 +2085,6 @@ static int gfx_v9_0_sw_init(void *handle) adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE; - r = gfx_v9_0_init_microcode(adev); - if (r) { - DRM_ERROR("Failed to load gfx firmware!\n"); - return r; - } - if (adev->gfx.rlc.funcs) { if (adev->gfx.rlc.funcs->init) { r = adev->gfx.rlc.funcs->init(adev); @@ -2186,6 +2111,9 @@ static int gfx_v9_0_sw_init(void *handle) sprintf(ring->name, "gfx_%d", i); ring->use_doorbell = true; ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1; + + /* disable scheduler on the real ring */ + ring->no_scheduler = true; r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP, AMDGPU_RING_PRIO_DEFAULT, NULL); @@ -2193,6 +2121,41 @@ static int gfx_v9_0_sw_init(void *handle) return r; } + /* set up the software rings */ + if (adev->gfx.num_gfx_rings) { + for (i = 0; i < GFX9_NUM_SW_GFX_RINGS; i++) { + ring = &adev->gfx.sw_gfx_ring[i]; + ring->ring_obj = NULL; + sprintf(ring->name, amdgpu_sw_ring_name(i)); + ring->use_doorbell = true; + ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1; + ring->is_sw_ring = true; + hw_prio = amdgpu_sw_ring_priority(i); + r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, + AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP, hw_prio, + NULL); + if (r) + return r; + ring->wptr = 0; + } + + /* init the muxer and add software rings */ + r = amdgpu_ring_mux_init(&adev->gfx.muxer, &adev->gfx.gfx_ring[0], + GFX9_NUM_SW_GFX_RINGS); + if (r) { + DRM_ERROR("amdgpu_ring_mux_init failed(%d)\n", r); + return r; + } + for (i = 0; i < GFX9_NUM_SW_GFX_RINGS; i++) { + r = amdgpu_ring_mux_add_sw_ring(&adev->gfx.muxer, + &adev->gfx.sw_gfx_ring[i]); + if (r) { + DRM_ERROR("amdgpu_ring_mux_add_sw_ring failed(%d)\n", r); + return r; + } + } + } + /* set up the compute queues - allocate horizontally across pipes */ ring_id = 0; for (i = 0; i < adev->gfx.mec.num_mec; ++i) { @@ -2243,6 +2206,12 @@ static int gfx_v9_0_sw_fini(void *handle) int i; struct amdgpu_device *adev = (struct amdgpu_device *)handle; + if (adev->gfx.num_gfx_rings) { + for (i = 0; i < GFX9_NUM_SW_GFX_RINGS; i++) + amdgpu_ring_fini(&adev->gfx.sw_gfx_ring[i]); + amdgpu_ring_mux_fini(&adev->gfx.muxer); + } + for (i = 0; i < adev->gfx.num_gfx_rings; i++) amdgpu_ring_fini(&adev->gfx.gfx_ring[i]); for (i = 0; i < adev->gfx.num_compute_rings; i++) @@ -4557,7 +4526,7 @@ static int gfx_v9_0_early_init(void *handle) /* init rlcg reg access ctrl */ gfx_v9_0_init_rlcg_reg_access_ctrl(adev); - return 0; + return gfx_v9_0_init_microcode(adev); } static int gfx_v9_0_ecc_late_init(void *handle) @@ -5157,11 +5126,17 @@ static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, control |= ib->length_dw | (vmid << 24); - if (amdgpu_sriov_vf(ring->adev) && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) { + if (ib->flags & AMDGPU_IB_FLAG_PREEMPT) { control |= INDIRECT_BUFFER_PRE_ENB(1); + if (flags & AMDGPU_IB_PREEMPTED) + control |= INDIRECT_BUFFER_PRE_RESUME(1); + if (!(ib->flags & AMDGPU_IB_FLAG_CE) && vmid) - gfx_v9_0_ring_emit_de_meta(ring); + gfx_v9_0_ring_emit_de_meta(ring, + (!amdgpu_sriov_vf(ring->adev) && + flags & AMDGPU_IB_PREEMPTED) ? + true : false); } amdgpu_ring_write(ring, header); @@ -5216,17 +5191,24 @@ static void gfx_v9_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT; bool int_sel = flags & AMDGPU_FENCE_FLAG_INT; bool writeback = flags & AMDGPU_FENCE_FLAG_TC_WB_ONLY; + bool exec = flags & AMDGPU_FENCE_FLAG_EXEC; + uint32_t dw2 = 0; /* RELEASE_MEM - flush caches, send int */ amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6)); - amdgpu_ring_write(ring, ((writeback ? (EOP_TC_WB_ACTION_EN | - EOP_TC_NC_ACTION_EN) : - (EOP_TCL1_ACTION_EN | - EOP_TC_ACTION_EN | - EOP_TC_WB_ACTION_EN | - EOP_TC_MD_ACTION_EN)) | - EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | - EVENT_INDEX(5))); + + if (writeback) { + dw2 = EOP_TC_NC_ACTION_EN; + } else { + dw2 = EOP_TCL1_ACTION_EN | EOP_TC_ACTION_EN | + EOP_TC_MD_ACTION_EN; + } + dw2 |= EOP_TC_WB_ACTION_EN | EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | + EVENT_INDEX(5); + if (exec) + dw2 |= EOP_EXEC; + + amdgpu_ring_write(ring, dw2); amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0)); /* @@ -5331,33 +5313,135 @@ static void gfx_v9_ring_emit_sb(struct amdgpu_ring *ring) amdgpu_ring_write(ring, 0); } -static void gfx_v9_0_ring_emit_ce_meta(struct amdgpu_ring *ring) +static void gfx_v9_0_ring_emit_ce_meta(struct amdgpu_ring *ring, bool resume) { + struct amdgpu_device *adev = ring->adev; struct v9_ce_ib_state ce_payload = {0}; - uint64_t csa_addr; + uint64_t offset, ce_payload_gpu_addr; + void *ce_payload_cpu_addr; int cnt; cnt = (sizeof(ce_payload) >> 2) + 4 - 2; - csa_addr = amdgpu_csa_vaddr(ring->adev); + + if (ring->is_mes_queue) { + offset = offsetof(struct amdgpu_mes_ctx_meta_data, + gfx[0].gfx_meta_data) + + offsetof(struct v9_gfx_meta_data, ce_payload); + ce_payload_gpu_addr = + amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset); + ce_payload_cpu_addr = + amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset); + } else { + offset = offsetof(struct v9_gfx_meta_data, ce_payload); + ce_payload_gpu_addr = amdgpu_csa_vaddr(ring->adev) + offset; + ce_payload_cpu_addr = adev->virt.csa_cpu_addr + offset; + } amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt)); amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) | WRITE_DATA_DST_SEL(8) | WR_CONFIRM) | WRITE_DATA_CACHE_POLICY(0)); - amdgpu_ring_write(ring, lower_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, ce_payload))); - amdgpu_ring_write(ring, upper_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, ce_payload))); - amdgpu_ring_write_multiple(ring, (void *)&ce_payload, sizeof(ce_payload) >> 2); + amdgpu_ring_write(ring, lower_32_bits(ce_payload_gpu_addr)); + amdgpu_ring_write(ring, upper_32_bits(ce_payload_gpu_addr)); + + if (resume) + amdgpu_ring_write_multiple(ring, ce_payload_cpu_addr, + sizeof(ce_payload) >> 2); + else + amdgpu_ring_write_multiple(ring, (void *)&ce_payload, + sizeof(ce_payload) >> 2); +} + +static int gfx_v9_0_ring_preempt_ib(struct amdgpu_ring *ring) +{ + int i, r = 0; + struct amdgpu_device *adev = ring->adev; + struct amdgpu_kiq *kiq = &adev->gfx.kiq; + struct amdgpu_ring *kiq_ring = &kiq->ring; + unsigned long flags; + + if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues) + return -EINVAL; + + spin_lock_irqsave(&kiq->ring_lock, flags); + + if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) { + spin_unlock_irqrestore(&kiq->ring_lock, flags); + return -ENOMEM; + } + + /* assert preemption condition */ + amdgpu_ring_set_preempt_cond_exec(ring, false); + + ring->trail_seq += 1; + amdgpu_ring_alloc(ring, 13); + gfx_v9_0_ring_emit_fence(ring, ring->trail_fence_gpu_addr, + ring->trail_seq, AMDGPU_FENCE_FLAG_EXEC | AMDGPU_FENCE_FLAG_INT); + /*reset the CP_VMID_PREEMPT after trailing fence*/ + amdgpu_ring_emit_wreg(ring, + SOC15_REG_OFFSET(GC, 0, mmCP_VMID_PREEMPT), + 0x0); + + /* assert IB preemption, emit the trailing fence */ + kiq->pmf->kiq_unmap_queues(kiq_ring, ring, PREEMPT_QUEUES_NO_UNMAP, + ring->trail_fence_gpu_addr, + ring->trail_seq); + + amdgpu_ring_commit(kiq_ring); + spin_unlock_irqrestore(&kiq->ring_lock, flags); + + /* poll the trailing fence */ + for (i = 0; i < adev->usec_timeout; i++) { + if (ring->trail_seq == + le32_to_cpu(*ring->trail_fence_cpu_addr)) + break; + udelay(1); + } + + if (i >= adev->usec_timeout) { + r = -EINVAL; + DRM_WARN("ring %d timeout to preempt ib\n", ring->idx); + } + + amdgpu_ring_commit(ring); + + /* deassert preemption condition */ + amdgpu_ring_set_preempt_cond_exec(ring, true); + return r; } -static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring) +static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume) { + struct amdgpu_device *adev = ring->adev; struct v9_de_ib_state de_payload = {0}; - uint64_t csa_addr, gds_addr; + uint64_t offset, gds_addr, de_payload_gpu_addr; + void *de_payload_cpu_addr; int cnt; - csa_addr = amdgpu_csa_vaddr(ring->adev); - gds_addr = csa_addr + 4096; + if (ring->is_mes_queue) { + offset = offsetof(struct amdgpu_mes_ctx_meta_data, + gfx[0].gfx_meta_data) + + offsetof(struct v9_gfx_meta_data, de_payload); + de_payload_gpu_addr = + amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset); + de_payload_cpu_addr = + amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset); + + offset = offsetof(struct amdgpu_mes_ctx_meta_data, + gfx[0].gds_backup) + + offsetof(struct v9_gfx_meta_data, de_payload); + gds_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset); + } else { + offset = offsetof(struct v9_gfx_meta_data, de_payload); + de_payload_gpu_addr = amdgpu_csa_vaddr(ring->adev) + offset; + de_payload_cpu_addr = adev->virt.csa_cpu_addr + offset; + + gds_addr = ALIGN(amdgpu_csa_vaddr(ring->adev) + + AMDGPU_CSA_SIZE - adev->gds.gds_size, + PAGE_SIZE); + } + de_payload.gds_backup_addrlo = lower_32_bits(gds_addr); de_payload.gds_backup_addrhi = upper_32_bits(gds_addr); @@ -5367,9 +5451,15 @@ static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring) WRITE_DATA_DST_SEL(8) | WR_CONFIRM) | WRITE_DATA_CACHE_POLICY(0)); - amdgpu_ring_write(ring, lower_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, de_payload))); - amdgpu_ring_write(ring, upper_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, de_payload))); - amdgpu_ring_write_multiple(ring, (void *)&de_payload, sizeof(de_payload) >> 2); + amdgpu_ring_write(ring, lower_32_bits(de_payload_gpu_addr)); + amdgpu_ring_write(ring, upper_32_bits(de_payload_gpu_addr)); + + if (resume) + amdgpu_ring_write_multiple(ring, de_payload_cpu_addr, + sizeof(de_payload) >> 2); + else + amdgpu_ring_write_multiple(ring, (void *)&de_payload, + sizeof(de_payload) >> 2); } static void gfx_v9_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start, @@ -5385,8 +5475,9 @@ static void gfx_v9_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags) { uint32_t dw2 = 0; - if (amdgpu_sriov_vf(ring->adev)) - gfx_v9_0_ring_emit_ce_meta(ring); + gfx_v9_0_ring_emit_ce_meta(ring, + (!amdgpu_sriov_vf(ring->adev) && + flags & AMDGPU_IB_PREEMPTED) ? true : false); dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */ if (flags & AMDGPU_HAVE_CTX_SWITCH) { @@ -5712,7 +5803,12 @@ static int gfx_v9_0_eop_irq(struct amdgpu_device *adev, switch (me_id) { case 0: - amdgpu_fence_process(&adev->gfx.gfx_ring[0]); + if (adev->gfx.num_gfx_rings && + !amdgpu_mcbp_handle_trailing_fence_irq(&adev->gfx.muxer)) { + /* Fence signals are handled on the software rings*/ + for (i = 0; i < GFX9_NUM_SW_GFX_RINGS; i++) + amdgpu_fence_process(&adev->gfx.sw_gfx_ring[i]); + } break; case 1: case 2: @@ -6709,6 +6805,62 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = { .emit_cntxcntl = gfx_v9_ring_emit_cntxcntl, .init_cond_exec = gfx_v9_0_ring_emit_init_cond_exec, .patch_cond_exec = gfx_v9_0_ring_emit_patch_cond_exec, + .preempt_ib = gfx_v9_0_ring_preempt_ib, + .emit_frame_cntl = gfx_v9_0_ring_emit_frame_cntl, + .emit_wreg = gfx_v9_0_ring_emit_wreg, + .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait, + .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait, + .soft_recovery = gfx_v9_0_ring_soft_recovery, + .emit_mem_sync = gfx_v9_0_emit_mem_sync, +}; + +static const struct amdgpu_ring_funcs gfx_v9_0_sw_ring_funcs_gfx = { + .type = AMDGPU_RING_TYPE_GFX, + .align_mask = 0xff, + .nop = PACKET3(PACKET3_NOP, 0x3FFF), + .support_64bit_ptrs = true, + .secure_submission_supported = true, + .vmhub = AMDGPU_GFXHUB_0, + .get_rptr = amdgpu_sw_ring_get_rptr_gfx, + .get_wptr = amdgpu_sw_ring_get_wptr_gfx, + .set_wptr = amdgpu_sw_ring_set_wptr_gfx, + .emit_frame_size = /* totally 242 maximum if 16 IBs */ + 5 + /* COND_EXEC */ + 7 + /* PIPELINE_SYNC */ + SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 + + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 + + 2 + /* VM_FLUSH */ + 8 + /* FENCE for VM_FLUSH */ + 20 + /* GDS switch */ + 4 + /* double SWITCH_BUFFER, + * the first COND_EXEC jump to the place just + * prior to this double SWITCH_BUFFER + */ + 5 + /* COND_EXEC */ + 7 + /* HDP_flush */ + 4 + /* VGT_flush */ + 14 + /* CE_META */ + 31 + /* DE_META */ + 3 + /* CNTX_CTRL */ + 5 + /* HDP_INVL */ + 8 + 8 + /* FENCE x2 */ + 2 + /* SWITCH_BUFFER */ + 7, /* gfx_v9_0_emit_mem_sync */ + .emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_gfx */ + .emit_ib = gfx_v9_0_ring_emit_ib_gfx, + .emit_fence = gfx_v9_0_ring_emit_fence, + .emit_pipeline_sync = gfx_v9_0_ring_emit_pipeline_sync, + .emit_vm_flush = gfx_v9_0_ring_emit_vm_flush, + .emit_gds_switch = gfx_v9_0_ring_emit_gds_switch, + .emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush, + .test_ring = gfx_v9_0_ring_test_ring, + .test_ib = gfx_v9_0_ring_test_ib, + .insert_nop = amdgpu_sw_ring_insert_nop, + .pad_ib = amdgpu_ring_generic_pad_ib, + .emit_switch_buffer = gfx_v9_ring_emit_sb, + .emit_cntxcntl = gfx_v9_ring_emit_cntxcntl, + .init_cond_exec = gfx_v9_0_ring_emit_init_cond_exec, + .patch_cond_exec = gfx_v9_0_ring_emit_patch_cond_exec, .emit_frame_cntl = gfx_v9_0_ring_emit_frame_cntl, .emit_wreg = gfx_v9_0_ring_emit_wreg, .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait, @@ -6794,6 +6946,11 @@ static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev) for (i = 0; i < adev->gfx.num_gfx_rings; i++) adev->gfx.gfx_ring[i].funcs = &gfx_v9_0_ring_funcs_gfx; + if (adev->gfx.num_gfx_rings) { + for (i = 0; i < GFX9_NUM_SW_GFX_RINGS; i++) + adev->gfx.sw_gfx_ring[i].funcs = &gfx_v9_0_sw_ring_funcs_gfx; + } + for (i = 0; i < adev->gfx.num_compute_rings; i++) adev->gfx.compute_ring[i].funcs = &gfx_v9_0_ring_funcs_compute; } |
