diff options
| author | Dave Airlie <airlied@redhat.com> | 2022-09-28 11:35:25 +1000 |
|---|---|---|
| committer | Dave Airlie <airlied@redhat.com> | 2022-09-28 11:35:25 +1000 |
| commit | 95d8c67187bcfaa519bafcdef9091cd906505454 (patch) | |
| tree | 965d6836c4a2737c91affe96728647289b71bb17 /drivers/gpu/drm/msm | |
| parent | d601cc93036aff7c603dda6d22cf6a8211dfed16 (diff) | |
| parent | e8b595f7b058c7909e410f3e0736d95e8f909d01 (diff) | |
| download | linux-95d8c67187bcfaa519bafcdef9091cd906505454.tar.gz linux-95d8c67187bcfaa519bafcdef9091cd906505454.tar.bz2 linux-95d8c67187bcfaa519bafcdef9091cd906505454.zip | |
Merge tag 'drm-msm-next-2022-09-22' of https://gitlab.freedesktop.org/drm/msm into drm-next
msm-next for v6.1
DPU:
- simplified VBIF configuration
- cleaned up CTL interfaces to accept indices rather than flush masks
DSI:
- removed unused msm_display_dsc_config struct
- switch regulator calls to new bulk API
- switched to use PANEL_BRIDGE for directly attached panels
DSI PHY:
- converted drivers to use parent_hws instead of parent_names
DP:
- cleaned up pixel_rate handling
HDMI PHY:
- turned hdmi-phy-8996 into OF clk provider
core:
- misc dt-bindings fixes
- choose eDP as primary display if it's available
- support getting interconnects from either the mdss or the mdp5/dpu
device nodes
gpu+gem:
- Shrinker + LRU re-work:
- adds a shared GEM LRU+shrinker helper and moves msm over to that
- reduces lock contention between retire and submit by avoiding the
need to acquire obj lock in retire path (and instead using resv
seeing obj's busyness in the shrinker
- fix reclaim vs submit issues
- GEM fault injection for triggering userspace error paths
- Map/unmap optimization
- Improved robustness for a6xx GPU recovery
Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Rob Clark <robdclark@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/CAF6AEGsrfrr9v1oR9S4oYfOs9jm=jbKQiwPBTrCRHrjYerJJFA@mail.gmail.com
Diffstat (limited to 'drivers/gpu/drm/msm')
59 files changed, 1405 insertions, 2040 deletions
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c index 0ab0e1dd8bbb..2c8b9899625b 100644 --- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c @@ -68,7 +68,7 @@ static void a3xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) /* BIT(31) of CACHE_FLUSH_TS triggers CACHE_FLUSH_TS IRQ from GPU */ OUT_PKT3(ring, CP_EVENT_WRITE, 3); - OUT_RING(ring, CACHE_FLUSH_TS | BIT(31)); + OUT_RING(ring, CACHE_FLUSH_TS | CP_EVENT_WRITE_0_IRQ); OUT_RING(ring, rbmemptr(ring, fence)); OUT_RING(ring, submit->seqno); diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c index 0c6b2a6d0b4c..7cb8d9849c07 100644 --- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c @@ -62,7 +62,7 @@ static void a4xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) /* BIT(31) of CACHE_FLUSH_TS triggers CACHE_FLUSH_TS IRQ from GPU */ OUT_PKT3(ring, CP_EVENT_WRITE, 3); - OUT_RING(ring, CACHE_FLUSH_TS | BIT(31)); + OUT_RING(ring, CACHE_FLUSH_TS | CP_EVENT_WRITE_0_IRQ); OUT_RING(ring, rbmemptr(ring, fence)); OUT_RING(ring, submit->seqno); diff --git a/drivers/gpu/drm/msm/adreno/a6xx.xml.h b/drivers/gpu/drm/msm/adreno/a6xx.xml.h index b03e2c413ab1..beea4a7fc1df 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx.xml.h +++ b/drivers/gpu/drm/msm/adreno/a6xx.xml.h @@ -1413,6 +1413,10 @@ static inline uint32_t REG_A6XX_RBBM_PERFCTR_RBBM_SEL(uint32_t i0) { return 0x00 #define REG_A6XX_RBBM_GBIF_CLIENT_QOS_CNTL 0x00000011 +#define REG_A6XX_RBBM_GBIF_HALT 0x00000016 + +#define REG_A6XX_RBBM_GBIF_HALT_ACK 0x00000017 + #define REG_A6XX_RBBM_WAIT_FOR_GPU_IDLE_CMD 0x0000001c #define A6XX_RBBM_WAIT_FOR_GPU_IDLE_CMD_WAIT_GPU_IDLE 0x00000001 diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c index 310a317885a1..e033d6a67a20 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c @@ -873,9 +873,47 @@ static void a6xx_gmu_rpmh_off(struct a6xx_gmu *gmu) (val & 1), 100, 1000); } +#define GBIF_CLIENT_HALT_MASK BIT(0) +#define GBIF_ARB_HALT_MASK BIT(1) + +static void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu) +{ + struct msm_gpu *gpu = &adreno_gpu->base; + + if (!a6xx_has_gbif(adreno_gpu)) { + gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0xf); + spin_until((gpu_read(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL1) & + 0xf) == 0xf); + gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0); + + return; + } + + /* Halt the gx side of GBIF */ + gpu_write(gpu, REG_A6XX_RBBM_GBIF_HALT, 1); + spin_until(gpu_read(gpu, REG_A6XX_RBBM_GBIF_HALT_ACK) & 1); + + /* Halt new client requests on GBIF */ + gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_CLIENT_HALT_MASK); + spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) & + (GBIF_CLIENT_HALT_MASK)) == GBIF_CLIENT_HALT_MASK); + + /* Halt all AXI requests on GBIF */ + gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_ARB_HALT_MASK); + spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) & + (GBIF_ARB_HALT_MASK)) == GBIF_ARB_HALT_MASK); + + /* The GBIF halt needs to be explicitly cleared */ + gpu_write(gpu, REG_A6XX_GBIF_HALT, 0x0); +} + /* Force the GMU off in case it isn't responsive */ static void a6xx_gmu_force_off(struct a6xx_gmu *gmu) { + struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; + struct msm_gpu *gpu = &adreno_gpu->base; + /* Flush all the queues */ a6xx_hfi_stop(gmu); @@ -887,6 +925,15 @@ static void a6xx_gmu_force_off(struct a6xx_gmu *gmu) /* Make sure there are no outstanding RPMh votes */ a6xx_gmu_rpmh_off(gmu); + + /* Halt the gmu cm3 core */ + gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 1); + + a6xx_bus_clear_pending_transactions(adreno_gpu); + + /* Reset GPU core blocks */ + gpu_write(gpu, REG_A6XX_RBBM_SW_RESET_CMD, 1); + udelay(100); } static void a6xx_gmu_set_initial_freq(struct msm_gpu *gpu, struct a6xx_gmu *gmu) @@ -1014,36 +1061,6 @@ bool a6xx_gmu_isidle(struct a6xx_gmu *gmu) return true; } -#define GBIF_CLIENT_HALT_MASK BIT(0) -#define GBIF_ARB_HALT_MASK BIT(1) - -static void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu) -{ - struct msm_gpu *gpu = &adreno_gpu->base; - - if (!a6xx_has_gbif(adreno_gpu)) { - gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0xf); - spin_until((gpu_read(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL1) & - 0xf) == 0xf); - gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0); - - return; - } - - /* Halt new client requests on GBIF */ - gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_CLIENT_HALT_MASK); - spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) & - (GBIF_CLIENT_HALT_MASK)) == GBIF_CLIENT_HALT_MASK); - - /* Halt all AXI requests on GBIF */ - gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_ARB_HALT_MASK); - spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) & - (GBIF_ARB_HALT_MASK)) == GBIF_ARB_HALT_MASK); - - /* The GBIF halt needs to be explicitly cleared */ - gpu_write(gpu, REG_A6XX_GBIF_HALT, 0x0); -} - /* Gracefully try to shut down the GMU and by extension the GPU */ static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu) { @@ -1069,7 +1086,11 @@ static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu) a6xx_bus_clear_pending_transactions(adreno_gpu); /* tell the GMU we want to slumber */ - a6xx_gmu_notify_slumber(gmu); + ret = a6xx_gmu_notify_slumber(gmu); + if (ret) { + a6xx_gmu_force_off(gmu); + return; + } ret = gmu_poll_timeout(gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS, val, diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c index 4d501100b9e4..fdc578016e0b 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c @@ -10,6 +10,7 @@ #include <linux/bitfield.h> #include <linux/devfreq.h> +#include <linux/reset.h> #include <linux/soc/qcom/llcc-qcom.h> #define GPU_PAS_ID 13 @@ -146,7 +147,7 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu, */ OUT_PKT7(ring, CP_EVENT_WRITE, 1); - OUT_RING(ring, 0x31); + OUT_RING(ring, CACHE_INVALIDATE); if (!sysprof) { /* @@ -987,6 +988,10 @@ static int hw_init(struct msm_gpu *gpu) /* Make sure the GMU keeps the GPU on while we set it up */ a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET); + /* Clear GBIF halt in case GX domain was not collapsed */ + if (a6xx_has_gbif(adreno_gpu)) + gpu_write(gpu, REG_A6XX_RBBM_GBIF_HALT, 0); + gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_CNTL, 0); /* @@ -1261,7 +1266,7 @@ static void a6xx_recover(struct msm_gpu *gpu) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); - int i; + int i, active_submits; adreno_dump_info(gpu); @@ -1272,14 +1277,46 @@ static void a6xx_recover(struct msm_gpu *gpu) if (hang_debug) a6xx_dump(gpu); + /* Halt SQE first */ + gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 3); + /* * Turn off keep alive that might have been enabled by the hang * interrupt */ gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE, 0); - gpu->funcs->pm_suspend(gpu); - gpu->funcs->pm_resume(gpu); + pm_runtime_dont_use_autosuspend(&gpu->pdev->dev); + + /* active_submit won't change until we make a submission */ + mutex_lock(&gpu->active_lock); + active_submits = gpu->active_submits; + + /* + * Temporarily clear active_submits count to silence a WARN() in the + * runtime suspend cb + */ + gpu->active_submits = 0; + + /* Drop the rpm refcount from active submits */ + if (active_submits) + pm_runtime_put(&gpu->pdev->dev); + + /* And the final one from recover worker */ + pm_runtime_put_sync(&gpu->pdev->dev); + + /* Call into gpucc driver to poll for cx gdsc collapse */ + reset_control_reset(gpu->cx_collapse); + + pm_runtime_use_autosuspend(&gpu->pdev->dev); + + if (active_submits) + pm_runtime_get(&gpu->pdev->dev); + + pm_runtime_get_sync(&gpu->pdev->dev); + + gpu->active_submits = active_submits; + mutex_unlock(&gpu->active_lock); msm_gpu_hw_init(gpu); } diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c index 781dcd3fb283..13ce321283ff 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c @@ -412,7 +412,6 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc, struct dpu_format *format; struct dpu_hw_ctl *ctl = mixer->lm_ctl; - u32 flush_mask; uint32_t stage_idx, lm_idx; int zpos_cnt[DPU_STAGE_MAX + 1] = { 0 }; bool bg_alpha_enable = false; @@ -420,6 +419,8 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc, memset(fetch_active, 0, sizeof(fetch_active)); drm_atomic_crtc_for_each_plane(plane, crtc) { + enum dpu_sspp sspp_idx; + state = plane->state; if (!state) continue; @@ -430,14 +431,14 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc, pstate = to_dpu_plane_state(state); fb = state->fb; - dpu_plane_get_ctl_flush(plane, ctl, &flush_mask); - set_bit(dpu_plane_pipe(plane), fetch_active); + sspp_idx = dpu_plane_pipe(plane); + set_bit(sspp_idx, fetch_active); DRM_DEBUG_ATOMIC("crtc %d stage:%d - plane %d sspp %d fb %d\n", crtc->base.id, pstate->stage, plane->base.id, - dpu_plane_pipe(plane) - SSPP_VIG0, + sspp_idx - SSPP_VIG0, state->fb ? state->fb->base.id : -1); format = to_dpu_format(msm_framebuffer_format(pstate->base.fb)); @@ -447,13 +448,13 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc, stage_idx = zpos_cnt[pstate->stage]++; stage_cfg->stage[pstate->stage][stage_idx] = - dpu_plane_pipe(plane); + sspp_idx; stage_cfg->multirect_index[pstate->stage][stage_idx] = pstate->multirect_index; trace_dpu_crtc_setup_mixer(DRMID(crtc), DRMID(plane), state, pstate, stage_idx, - dpu_plane_pipe(plane) - SSPP_VIG0, + sspp_idx - SSPP_VIG0, format->base.pixel_format, fb ? fb->modifier : 0); @@ -462,7 +463,8 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc, _dpu_crtc_setup_blend_cfg(mixer + lm_idx, pstate, format); - mixer[lm_idx].flush_mask |= flush_mask; + mixer[lm_idx].lm_ctl->ops.update_pending_flush_sspp(mixer[lm_idx].lm_ctl, + sspp_idx); if (bg_alpha_enable && !format->alpha_enable) mixer[lm_idx].mixer_op_mode = 0; @@ -496,7 +498,6 @@ static void _dpu_crtc_blend_setup(struct drm_crtc *crtc) for (i = 0; i < cstate->num_mixers; i++) { mixer[i].mixer_op_mode = 0; - mixer[i].flush_mask = 0; if (mixer[i].lm_ctl->ops.clear_all_blendstages) mixer[i].lm_ctl->ops.clear_all_blendstages( mixer[i].lm_ctl); @@ -513,17 +514,14 @@ static void _dpu_crtc_blend_setup(struct drm_crtc *crtc) lm->ops.setup_alpha_out(lm, mixer[i].mixer_op_mode); - mixer[i].flush_mask |= ctl->ops.get_bitmask_mixer(ctl, - mixer[i].hw_lm->idx); - /* stage config flush mask */ - ctl->ops.update_pending_flush(ctl, mixer[i].flush_mask); + ctl->ops.update_pending_flush_mixer(ctl, + mixer[i].hw_lm->idx); - DRM_DEBUG_ATOMIC("lm %d, op_mode 0x%X, ctl %d, flush mask 0x%x\n", + DRM_DEBUG_ATOMIC("lm %d, op_mode 0x%X, ctl %d\n", mixer[i].hw_lm->idx - LM_0, mixer[i].mixer_op_mode, - ctl->idx - CTL_0, - mixer[i].flush_mask); + ctl->idx - CTL_0); ctl->ops.setup_blendstage(ctl, mixer[i].hw_lm->idx, &stage_cfg); @@ -767,16 +765,9 @@ static void _dpu_crtc_setup_cp_blocks(struct drm_crtc *crtc) dspp->ops.setup_pcc(dspp, &cfg); } - mixer[i].flush_mask |= ctl->ops.get_bitmask_dspp(ctl, - mixer[i].hw_dspp->idx); - /* stage config flush mask */ - ctl->ops.update_pending_flush(ctl, mixer[i].flush_mask); - - DRM_DEBUG_ATOMIC("lm %d, ctl %d, flush mask 0x%x\n", - mixer[i].hw_lm->idx - DSPP_0, - ctl->idx - CTL_0, - mixer[i].flush_mask); + ctl->ops.update_pending_flush_dspp(ctl, + mixer[i].hw_dspp->idx); } } @@ -1235,17 +1226,8 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc, } for (i = 1; i < SSPP_MAX; i++) { - if (pipe_staged[i]) { + if (pipe_staged[i]) dpu_plane_clear_multirect(pipe_staged[i]); - - if (is_dpu_plane_virtual(pipe_staged[i]->plane)) { - DPU_ERROR( - "r1 only virt plane:%d not supported\n", - pipe_staged[i]->plane->base.id); - rc = -EINVAL; - goto end; - } - } } z_pos = -1; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h index 9b67645c2574..539b68b1626a 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h @@ -97,7 +97,6 @@ struct dpu_crtc_mixer { struct dpu_hw_ctl *lm_ctl; struct dpu_hw_dspp *hw_dspp; u32 mixer_op_mode; - u32 flush_mask; }; /** diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c index c682d4e02d1b..9c6817b5a194 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c @@ -162,7 +162,7 @@ enum dpu_enc_rc_states { * @vsync_event_work: worker to handle vsync event for autorefresh * @topology: topology of the display * @idle_timeout: idle timeout duration in milliseconds - * @dsc: msm_display_dsc_config pointer, for DSC-enabled encoders + * @dsc: drm_dsc_config pointer, for DSC-enabled encoders */ struct dpu_encoder_virt { struct drm_encoder base; @@ -208,7 +208,7 @@ struct dpu_encoder_virt { bool wide_bus_en; /* DSC configuration */ - struct msm_display_dsc_config *dsc; + struct drm_dsc_config *dsc; }; #define to_dpu_encoder_virt(x) container_of(x, struct dpu_encoder_virt, base) @@ -1791,12 +1791,12 @@ static void dpu_encoder_vsync_event_work_handler(struct kthread_work *work) } static u32 -dpu_encoder_dsc_initial_line_calc(struct msm_display_dsc_config *dsc, +dpu_encoder_dsc_initial_line_calc(struct drm_dsc_config *dsc, u32 enc_ip_width) { int ssm_delay, total_pixels, soft_slice_per_enc; - soft_slice_per_enc = enc_ip_width / dsc->drm->slice_width; + soft_slice_per_enc = enc_ip_width / dsc->slice_width; /* * minimum number of initial line pixels is a sum of: @@ -1808,16 +1808,16 @@ dpu_encoder_dsc_initial_line_calc(struct msm_display_dsc_config *dsc, * 5. 6 additional pixels as the output of the rate buffer is * 48 bits wide */ - ssm_delay = ((dsc->drm->bits_per_component < 10) ? 84 : 92); - total_pixels = ssm_delay * 3 + dsc->drm->initial_xmit_delay + 47; + ssm_delay = ((dsc->bits_per_component < 10) ? 84 : 92); + total_pixels = ssm_delay * 3 + dsc->initial_xmit_delay + 47; if (soft_slice_per_enc > 1) total_pixels += (ssm_delay * 3); - return DIV_ROUND_UP(total_pixels, dsc->drm->slice_width); + return DIV_ROUND_UP(total_pixels, dsc->slice_width); } static void dpu_encoder_dsc_pipe_cfg(struct dpu_hw_dsc *hw_dsc, struct dpu_hw_pingpong *hw_pp, - struct msm_display_dsc_config *dsc, + struct drm_dsc_config *dsc, u32 common_mode, u32 initial_lines) { @@ -1835,7 +1835,7 @@ static void dpu_encoder_dsc_pipe_cfg(struct dpu_hw_dsc *hw_dsc, } static void dpu_encoder_prep_dsc(struct dpu_encoder_virt *dpu_enc, - struct msm_display_dsc_config *dsc) + struct drm_dsc_config *dsc) { /* coding only for 2LM, 2enc, 1 dsc config */ struct dpu_encoder_phys *enc_master = dpu_enc->cur_master; @@ -1858,14 +1858,15 @@ static void dpu_encoder_prep_dsc(struct dpu_encoder_virt *dpu_enc, } } - pic_width = dsc->drm->pic_width; + dsc_common_mode = 0; + pic_width = dsc->pic_width; dsc_common_mode = DSC_MODE_MULTIPLEX | DSC_MODE_SPLIT_PANEL; if (enc_master->intf_mode == INTF_MODE_VIDEO) dsc_common_mode |= DSC_MODE_VIDEO; - this_frame_slices = pic_width / dsc->drm->slice_width; - intf_ip_w = this_frame_slices * dsc->drm->slice_width; + this_frame_slices = pic_width / dsc->slice_width; + intf_ip_w = this_frame_slices * dsc->slice_width; /* * dsc merge case: when using 2 encoders for the same stream, @@ -1980,7 +1981,6 @@ static void dpu_encoder_helper_reset_mixers(struct dpu_encoder_phys *phys_enc) { struct dpu_hw_mixer_cfg mixer; int i, num_lm; - u32 flush_mask = 0; struct dpu_global_state *global_state; struct dpu_hw_blk *hw_lm[2]; struct dpu_hw_mixer *hw_mixer[2]; @@ -1999,9 +1999,8 @@ static void dpu_encoder_helper_reset_mixers(struct dpu_encoder_phys *phys_enc) for (i = 0; i < num_lm; i++) { hw_mixer[i] = to_dpu_hw_mixer(hw_lm[i]); - flush_mask = phys_enc->hw_ctl->ops.get_bitmask_mixer(ctl, hw_mixer[i]->idx); - if (phys_enc->hw_ctl->ops.update_pending_flush) - phys_enc->hw_ctl->ops.update_pending_flush(ctl, flush_mask); + if (phys_enc->hw_ctl->ops.update_pending_flush_mixer) + phys_enc->hw_ctl->ops.update_pending_flush_mixer(ctl, hw_mixer[i]->idx); /* clear all blendstages */ if (phys_enc->hw_ctl->ops.setup_blendstage) @@ -2061,6 +2060,12 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc) intf_cfg.stream_sel = 0; /* Don't care value for vide |
