diff options
Diffstat (limited to 'drivers/gpu/drm/i915/gt')
33 files changed, 1009 insertions, 271 deletions
diff --git a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c index c30adc05fa98..680bd9442eb0 100644 --- a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c +++ b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c @@ -131,17 +131,17 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm, vaddr = kmap_atomic_px(i915_pt_entry(pd, act_pt)); do { - GEM_BUG_ON(iter.sg->length < I915_GTT_PAGE_SIZE); + GEM_BUG_ON(sg_dma_len(iter.sg) < I915_GTT_PAGE_SIZE); vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma); iter.dma += I915_GTT_PAGE_SIZE; if (iter.dma == iter.max) { iter.sg = __sg_next(iter.sg); - if (!iter.sg) + if (!iter.sg || sg_dma_len(iter.sg) == 0) break; iter.dma = sg_dma_address(iter.sg); - iter.max = iter.dma + iter.sg->length; + iter.max = iter.dma + sg_dma_len(iter.sg); } if (++act_pte == GEN6_PTES) { diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c index 38c7069b7749..a37c968ef8f7 100644 --- a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c +++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c @@ -372,19 +372,19 @@ gen8_ppgtt_insert_pte(struct i915_ppgtt *ppgtt, pd = i915_pd_entry(pdp, gen8_pd_index(idx, 2)); vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1))); do { - GEM_BUG_ON(iter->sg->length < I915_GTT_PAGE_SIZE); + GEM_BUG_ON(sg_dma_len(iter->sg) < I915_GTT_PAGE_SIZE); vaddr[gen8_pd_index(idx, 0)] = pte_encode | iter->dma; iter->dma += I915_GTT_PAGE_SIZE; if (iter->dma >= iter->max) { iter->sg = __sg_next(iter->sg); - if (!iter->sg) { + if (!iter->sg || sg_dma_len(iter->sg) == 0) { idx = 0; break; } iter->dma = sg_dma_address(iter->sg); - iter->max = iter->dma + iter->sg->length; + iter->max = iter->dma + sg_dma_len(iter->sg); } if (gen8_pd_index(++idx, 0) == 0) { @@ -413,8 +413,8 @@ static void gen8_ppgtt_insert_huge(struct i915_vma *vma, u32 flags) { const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags); + unsigned int rem = sg_dma_len(iter->sg); u64 start = vma->node.start; - dma_addr_t rem = iter->sg->length; GEM_BUG_ON(!i915_vm_is_4lvl(vma->vm)); @@ -456,7 +456,7 @@ static void gen8_ppgtt_insert_huge(struct i915_vma *vma, } do { - GEM_BUG_ON(iter->sg->length < page_size); + GEM_BUG_ON(sg_dma_len(iter->sg) < page_size); vaddr[index++] = encode | iter->dma; start += page_size; @@ -467,7 +467,10 @@ static void gen8_ppgtt_insert_huge(struct i915_vma *vma, if (!iter->sg) break; - rem = iter->sg->length; + rem = sg_dma_len(iter->sg); + if (!rem) + break; + iter->dma = sg_dma_address(iter->sg); iter->max = iter->dma + rem; @@ -525,7 +528,7 @@ static void gen8_ppgtt_insert_huge(struct i915_vma *vma, } vma->page_sizes.gtt |= page_size; - } while (iter->sg); + } while (iter->sg && sg_dma_len(iter->sg)); } static void gen8_ppgtt_insert(struct i915_address_space *vm, diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index efdeb7b7b2a0..0b31670343f5 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -305,8 +305,9 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id) engine->i915 = i915; engine->gt = gt; engine->uncore = gt->uncore; - engine->hw_id = engine->guc_id = info->hw_id; engine->mmio_base = __engine_mmio_base(i915, info->mmio_bases); + engine->hw_id = info->hw_id; + engine->guc_id = MAKE_GUC_ID(info->class, info->instance); engine->class = info->class; engine->instance = info->instance; @@ -1600,6 +1601,41 @@ static unsigned long list_count(struct list_head *list) return count; } +static unsigned long read_ul(void *p, size_t x) +{ + return *(unsigned long *)(p + x); +} + +static void print_properties(struct intel_engine_cs *engine, + struct drm_printer *m) +{ + static const struct pmap { + size_t offset; + const char *name; + } props[] = { +#define P(x) { \ + .offset = offsetof(typeof(engine->props), x), \ + .name = #x \ +} + P(heartbeat_interval_ms), + P(max_busywait_duration_ns), + P(preempt_timeout_ms), + P(stop_timeout_ms), + P(timeslice_duration_ms), + + {}, +#undef P + }; + const struct pmap *p; + + drm_printf(m, "\tProperties:\n"); + for (p = props; p->name; p++) + drm_printf(m, "\t\t%s: %lu [default %lu]\n", + p->name, + read_ul(&engine->props, p->offset), + read_ul(&engine->defaults, p->offset)); +} + void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *m, const char *header, ...) @@ -1642,6 +1678,7 @@ void intel_engine_dump(struct intel_engine_cs *engine, drm_printf(m, "\tReset count: %d (global %d)\n", i915_reset_engine_count(error, engine), i915_reset_count(error)); + print_properties(engine, m); drm_printf(m, "\tRequests:\n"); diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c index 5067d0524d4b..9060385cd69e 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c @@ -41,6 +41,8 @@ static void idle_pulse(struct intel_engine_cs *engine, struct i915_request *rq) { engine->wakeref_serial = READ_ONCE(engine->serial) + 1; i915_request_add_active_barriers(rq); + if (!engine->heartbeat.systole && intel_engine_has_heartbeat(engine)) + engine->heartbeat.systole = i915_request_get(rq); } static void show_heartbeat(const struct i915_request *rq, @@ -144,8 +146,6 @@ static void heartbeat(struct work_struct *wrk) goto unlock; idle_pulse(engine, rq); - if (engine->i915->params.enable_hangcheck) - engine->heartbeat.systole = i915_request_get(rq); __i915_request_commit(rq); __i915_request_queue(rq, &attr); @@ -153,7 +153,7 @@ static void heartbeat(struct work_struct *wrk) unlock: mutex_unlock(&ce->timeline->mutex); out: - if (!next_heartbeat(engine)) + if (!engine->i915->params.enable_hangcheck || !next_heartbeat(engine)) i915_request_put(fetch_and_zero(&engine->heartbeat.systole)); intel_engine_pm_put(engine); } diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c index f7b2e07e2229..499b09cb4acf 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c @@ -17,6 +17,25 @@ #include "intel_ring.h" #include "shmem_utils.h" +static void dbg_poison_ce(struct intel_context *ce) +{ + if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) + return; + + if (ce->state) { + struct drm_i915_gem_object *obj = ce->state->obj; + int type = i915_coherent_map_type(ce->engine->i915); + void *map; + + map = i915_gem_object_pin_map(obj, type); + if (!IS_ERR(map)) { + memset(map, CONTEXT_REDZONE, obj->base.size); + i915_gem_object_flush_map(obj); + i915_gem_object_unpin_map(obj); + } + } +} + static int __engine_unpark(struct intel_wakeref *wf) { struct intel_engine_cs *engine = @@ -32,20 +51,14 @@ static int __engine_unpark(struct intel_wakeref *wf) if (ce) { GEM_BUG_ON(test_bit(CONTEXT_VALID_BIT, &ce->flags)); + /* Flush all pending HW writes before we touch the context */ + while (unlikely(intel_context_inflight(ce))) + intel_engine_flush_submission(engine); + /* First poison the image to verify we never fully trust it */ - if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) && ce->state) { - struct drm_i915_gem_object *obj = ce->state->obj; - int type = i915_coherent_map_type(engine->i915); - void *map; - - map = i915_gem_object_pin_map(obj, type); - if (!IS_ERR(map)) { - memset(map, CONTEXT_REDZONE, obj->base.size); - i915_gem_object_flush_map(obj); - i915_gem_object_unpin_map(obj); - } - } + dbg_poison_ce(ce); + /* Scrub the context image after our loss of control */ ce->ops->reset(ce); } diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c index 81c05f551b9c..cf94525be2c1 100644 --- a/drivers/gpu/drm/i915/gt/intel_ggtt.c +++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c @@ -835,7 +835,7 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt) u16 snb_gmch_ctl; /* TODO: We're not aware of mappable constraints on gen8 yet */ - if (!IS_DGFX(i915)) { + if (!HAS_LMEM(i915)) { ggtt->gmadr = pci_resource(pdev, 2); ggtt->mappable_end = resource_size(&ggtt->gmadr); } @@ -1383,7 +1383,7 @@ intel_partial_pages(const struct i915_ggtt_view *view, if (ret) goto err_sg_alloc; - iter = i915_gem_object_get_sg(obj, view->partial.offset, &offset); + iter = i915_gem_object_get_sg_dma(obj, view->partial.offset, &offset); GEM_BUG_ON(!iter); sg = st->sgl; @@ -1391,7 +1391,7 @@ intel_partial_pages(const struct i915_ggtt_view *view, do { unsigned int len; - len = min(iter->length - (offset << PAGE_SHIFT), + len = min(sg_dma_len(iter) - (offset << PAGE_SHIFT), count << PAGE_SHIFT); sg_set_page(sg, NULL, len, 0); sg_dma_address(sg) = diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c index 39b428c5049c..44f1d51e5ae5 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.c +++ b/drivers/gpu/drm/i915/gt/intel_gt.c @@ -614,6 +614,8 @@ void intel_gt_driver_remove(struct intel_gt *gt) void intel_gt_driver_unregister(struct intel_gt *gt) { + intel_wakeref_t wakeref; + intel_rps_driver_unregister(>->rps); /* @@ -622,16 +624,15 @@ void intel_gt_driver_unregister(struct intel_gt *gt) * resources. */ intel_gt_set_wedged(gt); + + /* Scrub all HW state upon release */ + with_intel_runtime_pm(gt->uncore->rpm, wakeref) + __intel_gt_reset(gt, ALL_ENGINES); } void intel_gt_driver_release(struct intel_gt *gt) { struct i915_address_space *vm; - intel_wakeref_t wakeref; - - /* Scrub all HW state upon release */ - with_intel_runtime_pm(gt->uncore->rpm, wakeref) - __intel_gt_reset(gt, ALL_ENGINES); vm = fetch_and_zero(>->vm); if (vm) /* FIXME being called twice on error paths :( */ diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c b/drivers/gpu/drm/i915/gt/intel_gtt.c index 3f1114b58b01..7bfe9072be9a 100644 --- a/drivers/gpu/drm/i915/gt/intel_gtt.c +++ b/drivers/gpu/drm/i915/gt/intel_gtt.c @@ -324,7 +324,7 @@ static void cnl_setup_private_ppat(struct intel_uncore *uncore) GEN8_PPAT_WC | GEN8_PPAT_LLCELLC); intel_uncore_write(uncore, GEN10_PAT_INDEX(2), - GEN8_PPAT_WT | GEN8_PPAT_LLCELLC); + GEN8_PPAT_WB | GEN8_PPAT_ELLC_OVERRIDE); intel_uncore_write(uncore, GEN10_PAT_INDEX(3), GEN8_PPAT_UC); @@ -349,17 +349,23 @@ static void cnl_setup_private_ppat(struct intel_uncore *uncore) */ static void bdw_setup_private_ppat(struct intel_uncore *uncore) { + struct drm_i915_private *i915 = uncore->i915; u64 pat; pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC) | /* for normal objects, no eLLC */ GEN8_PPAT(1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC) | /* for something pointing to ptes? */ - GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC) | /* for scanout with eLLC */ GEN8_PPAT(3, GEN8_PPAT_UC) | /* Uncached objects, mostly for scanout */ GEN8_PPAT(4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)) | GEN8_PPAT(5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)) | GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) | GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3)); + /* for scanout with eLLC */ + if (INTEL_GEN(i915) >= 9) + pat |= GEN8_PPAT(2, GEN8_PPAT_WB | GEN8_PPAT_ELLC_OVERRIDE); + else + pat |= GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC); + intel_uncore_write(uncore, GEN8_PRIVATE_PAT_LO, lower_32_bits(pat)); intel_uncore_write(uncore, GEN8_PRIVATE_PAT_HI, upper_32_bits(pat)); } diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h b/drivers/gpu/drm/i915/gt/intel_gtt.h index c13c650ced22..8a33940a71f3 100644 --- a/drivers/gpu/drm/i915/gt/intel_gtt.h +++ b/drivers/gpu/drm/i915/gt/intel_gtt.h @@ -580,7 +580,7 @@ static inline struct sgt_dma { struct scatterlist *sg = vma->pages->sgl; dma_addr_t addr = sg_dma_address(sg); - return (struct sgt_dma){ sg, addr, addr + sg->length }; + return (struct sgt_dma){ sg, addr, addr + sg_dma_len(sg) }; } #endif diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index 724b2cb897d3..7614a3d24fca 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -1216,7 +1216,8 @@ static void intel_engine_context_out(struct intel_engine_cs *engine) static void execlists_check_context(const struct intel_context *ce, - const struct intel_engine_cs *engine) + const struct intel_engine_cs *engine, + const char *when) { const struct intel_ring *ring = ce->ring; u32 *regs = ce->lrc_reg_state; @@ -1251,7 +1252,7 @@ execlists_check_context(const struct intel_context *ce, valid = false; } - WARN_ONCE(!valid, "Invalid lrc state found before submission\n"); + WARN_ONCE(!valid, "Invalid lrc state found %s submission\n", when); } static void restore_default_state(struct intel_context *ce, @@ -1347,7 +1348,7 @@ __execlists_schedule_in(struct i915_request *rq) reset_active(rq, engine); if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) - execlists_check_context(ce, engine); + execlists_check_context(ce, engine, "before"); if (ce->tag) { /* Use a fixed tag for OA and friends */ @@ -1418,6 +1419,9 @@ __execlists_schedule_out(struct i915_request *rq, * refrain from doing non-trivial work here. */ + if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) + execlists_check_context(ce, engine, "after"); + /* * If we have just completed this context, the engine may now be * idle and we want to re-enter powersaving. @@ -2496,25 +2500,11 @@ invalidate_csb_entries(const u64 *first, const u64 *last) * bits 47-57: sw context id of the lrc the GT switched away from * bits 58-63: sw counter of the lrc the GT switched away from */ -static inline bool gen12_csb_parse(const u64 *csb) +static inline bool gen12_csb_parse(const u64 csb) { - bool ctx_away_valid; - bool new_queue; - u64 entry; - - /* HSD#22011248461 */ - entry = READ_ONCE(*csb); - if (unlikely(entry == -1)) { - preempt_disable(); - if (wait_for_atomic_us((entry = READ_ONCE(*csb)) != -1, 50)) - GEM_WARN_ON("50us CSB timeout"); - preempt_enable(); - } - WRITE_ONCE(*(u64 *)csb, -1); - - ctx_away_valid = GEN12_CSB_CTX_VALID(upper_32_bits(entry)); - new_queue = - lower_32_bits(entry) & GEN12_CTX_STATUS_SWITCHED_TO_NEW_QUEUE; + bool ctx_away_valid = GEN12_CSB_CTX_VALID(upper_32_bits(csb)); + bool new_queue = + lower_32_bits(csb) & GEN12_CTX_STATUS_SWITCHED_TO_NEW_QUEUE; /* * The context switch detail is not guaranteed to be 5 when a preemption @@ -2524,7 +2514,7 @@ static inline bool gen12_csb_parse(const u64 *csb) * would require some extra handling, but we don't support that. */ if (!ctx_away_valid || new_queue) { - GEM_BUG_ON(!GEN12_CSB_CTX_VALID(lower_32_bits(entry))); + GEM_BUG_ON(!GEN12_CSB_CTX_VALID(lower_32_bits(csb))); return true; } @@ -2533,19 +2523,79 @@ static inline bool gen12_csb_parse(const u64 *csb) * context switch on an unsuccessful wait instruction since we always * use polling mode. */ - GEM_BUG_ON(GEN12_CTX_SWITCH_DETAIL(upper_32_bits(entry))); + GEM_BUG_ON(GEN12_CTX_SWITCH_DETAIL(upper_32_bits(csb))); return false; } -static inline bool gen8_csb_parse(const u64 *csb) +static inline bool gen8_csb_parse(const u64 csb) +{ + return csb & (GEN8_CTX_STATUS_IDLE_ACTIVE | GEN8_CTX_STATUS_PREEMPTED); +} + +static noinline u64 +wa_csb_read(const struct intel_engine_cs *engine, u64 * const csb) { - return *csb & (GEN8_CTX_STATUS_IDLE_ACTIVE | GEN8_CTX_STATUS_PREEMPTED); + u64 entry; + + /* + * Reading from the HWSP has one particular advantage: we can detect + * a stale entry. Since the write into HWSP is broken, we have no reason + * to trust the HW at all, the mmio entry may equally be unordered, so + * we prefer the path that is self-checking and as a last resort, + * return the mmio value. + * + * tgl,dg1:HSDES#22011327657 + */ + preempt_disable(); + if (wait_for_atomic_us((entry = READ_ONCE(*csb)) != -1, 10)) { + int idx = csb - engine->execlists.csb_status; + int status; + + status = GEN8_EXECLISTS_STATUS_BUF; + if (idx >= 6) { + status = GEN11_EXECLISTS_STATUS_BUF2; + idx -= 6; + } + status += sizeof(u64) * idx; + + entry = intel_uncore_read64(engine->uncore, + _MMIO(engine->mmio_base + status)); + } + preempt_enable(); + + return entry; +} + +static inline u64 +csb_read(const struct intel_engine_cs *engine, u64 * const csb) +{ + u64 entry = READ_ONCE(*csb); + + /* + * Unfortunately, the GPU does not always serialise its write + * of the CSB entries before its write of the CSB pointer, at least + * from the perspective of the CPU, using what is known as a Global + * Observation Point. We may read a new CSB tail pointer, but then + * read the stale CSB entries, causing us to misinterpret the + * context-switch events, and eventually declare the GPU hung. + * + * icl:HSDES#1806554093 + * tgl:HSDES#22011248461 + */ + if (unlikely(entry == -1)) + entry = wa_csb_read(engine, csb); + + /* Consume this entry so that we can spot its future reuse. */ + WRITE_ONCE(*csb, -1); + + /* ELSP is an implicit wmb() before the GPU wraps and overwrites csb */ + return entry; } static void process_csb(struct intel_engine_cs *engine) { struct intel_engine_execlists * const execlists = &engine->execlists; - const u64 * const buf = execlists->csb_status; + u64 * const buf = execlists->csb_status; const u8 num_entries = execlists->csb_size; u8 head, tail; @@ -2603,6 +2653,7 @@ static void process_csb(struct intel_engine_cs *engine) rmb(); do { bool promote; + u64 csb; if (++head == num_entries) head = 0; @@ -2625,15 +2676,14 @@ static void process_csb(struct intel_engine_cs *engine) * status notifier. */ + csb = csb_read(engine, buf + head); ENGINE_TRACE(engine, "csb[%d]: status=0x%08x:0x%08x\n", - head, - upper_32_bits(buf[head]), - lower_32_bits(buf[head])); + head, upper_32_bits(csb), lower_32_bits(csb)); if (INTEL_GEN(engine->i915) >= 12) - promote = gen12_csb_parse(buf + head); + promote = gen12_csb_parse(csb); else - promote = gen8_csb_parse(buf + head); + promote = gen8_csb_parse(csb); if (promote) { struct i915_request * const *old = execlists->active; @@ -2991,6 +3041,8 @@ static struct execlists_capture *capture_regs(struct intel_engine_cs *engine) if (!cap->error->gt->engine) goto err_gt; + cap->error->gt->engine->hung = true; + return cap; err_gt: @@ -4053,6 +4105,8 @@ static void reset_csb_pointers(struct intel_engine_cs *engine) static void execlists_sanitize(struct intel_engine_cs *engine) { + GEM_BUG_ON(execlists_active(&engine->execlists)); + /* * Poison residual state on resume, in case the suspend didn't! * @@ -4382,6 +4436,7 @@ static void execlists_reset_cancel(struct intel_engine_cs *engine) /* Mark all executing requests as skipped. */ list_for_each_entry(rq, &engine->active.requests, sched.link) mark_eio(rq); + intel_engine_signal_breadcrumbs(engine); /* Flush the queued requests to the timeline list (for retiring). */ while ((rb = rb_first_cached(&execlists->queue))) { @@ -5974,18 +6029,6 @@ int intel_virtual_engine_attach_bond(struct intel_engine_cs *engine, return 0; } -struct intel_engine_cs * -intel_virtual_engine_get_sibling(struct intel_engine_cs *engine, - unsigned int sibling) -{ - struct virtual_engine *ve = to_virtual_engine(engine); - - if (sibling >= ve->num_siblings) - return NULL; - - return ve->siblings[sibling]; -} - void intel_execlists_show_requests(struct intel_engine_cs *engine, struct drm_printer *m, void (*show_request)(struct drm_printer *m, diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.h b/drivers/gpu/drm/i915/gt/intel_lrc.h index 91fd8e452d9b..c2d287f25497 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.h +++ b/drivers/gpu/drm/i915/gt/intel_lrc.h @@ -121,10 +121,6 @@ int intel_virtual_engine_attach_bond(struct intel_engine_cs *engine, const struct intel_engine_cs *master, const struct intel_engine_cs *sibling); -struct intel_engine_cs * -intel_virtual_engine_get_sibling(struct intel_engine_cs *engine, - unsigned int sibling); - bool intel_engine_in_execlists_submission_mode(const struct intel_engine_cs *engine); diff --git a/drivers/gpu/drm/i915/gt/intel_lrc_reg.h b/drivers/gpu/drm/i915/gt/intel_lrc_reg.h index 93cb6c460508..1b51f7b9a5c3 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc_reg.h +++ b/drivers/gpu/drm/i915/gt/intel_lrc_reg.h @@ -49,4 +49,7 @@ #define GEN11_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x1A #define GEN12_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0xD +#define GEN8_EXECLISTS_STATUS_BUF 0x370 +#define GEN11_EXECLISTS_STATUS_BUF2 0x3c0 + #endif /* _INTEL_LRC_REG_H_ */ diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c index 413dadfac2d1..ab6870242e18 100644 --- a/drivers/gpu/drm/i915/gt/intel_mocs.c +++ b/drivers/gpu/drm/i915/gt/intel_mocs.c @@ -108,7 +108,7 @@ struct drm_i915_mocs_table { * they will be initialized to PTE. Gen >= 12 onwards don't have a setting for * PTE and will be initialized to an invalid value. * - * The last two entries are reserved by the hardware. For ICL+ they + * The last few entries are reserved by the hardware. For ICL+ they * should be initialized according to bspec and never used, for older * platforms they should never be written to. * @@ -123,7 +123,7 @@ struct drm_i915_mocs_table { LE_1_UC | LE_TC_2_LLC_ELLC, \ L3_1_UC), \ MOCS_ENTRY(I915_MOCS_PTE, \ - LE_0_PAGETABLE | LE_TC_2_LLC_ELLC | LE_LRUM(3), \ + LE_0_PAGETABLE | LE_TC_0_PAGETABLE | LE_LRUM(3), \ L3_3_WB) static const struct drm_i915_mocs_entry skl_mocs_table[] = { @@ -292,12 +292,45 @@ static const struct drm_i915_mocs_entry icl_mocs_table[] = { L3_1_UC), /* Base - L3 + LeCC:PAT (Deprecated) */ MOCS_ENTRY(I915_MOCS_PTE, - LE_0_PAGETABLE | LE_TC_1_LLC, + LE_0_PAGETABLE | LE_TC_0_PAGETABLE, L3_3_WB), GEN11_MOCS_ENTRIES }; +static const struct drm_i915_mocs_entry dg1_mocs_table[] = { + /* Error */ + MOCS_ENTRY(0, 0, L3_0_DIRECT), + + /* UC */ + MOCS_ENTRY(1, 0, L3_1_UC), + + /* Reserved */ + MOCS_ENTRY(2, 0, L3_0_DIRECT), + MOCS_ENTRY(3, 0, L3_0_DIRECT), + MOCS_ENTRY(4, 0, L3_0_DIRECT), + + /* WB - L3 */ + MOCS_ENTRY(5, 0, L3_3_WB), + /* WB - L3 50% */ + MOCS_ENTRY(6, 0, L3_ESC(1) | L3_SCC(1) | L3_3_WB), + /* WB - L3 25% */ + MOCS_ENTRY(7, 0, L3_ESC(1) | L3_SCC(3) | L3_3_WB), + /* WB - L3 12.5% */ + MOCS_ENTRY(8, 0, L3_ESC(1) | L3_SCC(7) | L3_3_WB), + + /* HDC:L1 + L3 */ + MOCS_ENTRY(48, 0, L3_3_WB), + /* HDC:L1 */ + MOCS_ENTRY(49, 0, L3_1_UC), + + /* HW Reserved */ + MOCS_ENTRY(60, 0, L3_1_UC), + MOCS_ENTRY(61, 0, L3_1_UC), + MOCS_ENTRY(62, 0, L3_1_UC), + MOCS_ENTRY(63, 0, L3_1_UC), +}; + enum { HAS_GLOBAL_MOCS = BIT(0), HAS_ENGINE_MOCS = BIT(1), @@ -324,7 +357,11 @@ static unsigned int get_mocs_settings(const struct drm_i915_private *i915, { unsigned int flags; - if (INTEL_GEN(i915) >= 12) { + if (IS_DG1(i915)) { + table->size = ARRAY_SIZE(dg1_mocs_table); + table->table = dg1_mocs_table; + table->n_entries = GEN9_NUM_MOCS_ENTRIES; + } else if (INTEL_GEN(i915) >= 12) { table->size = ARRAY_SIZE(tgl_mocs_table); table->table = tgl_mocs_table; table->n_entries = GEN9_NUM_MOCS_ENTRIES; diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c index ac36b67fb46b..3654c955e6be 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.c +++ b/drivers/gpu/drm/i915/gt/intel_reset.c @@ -19,6 +19,7 @@ #include "intel_engine_pm.h" #include "intel_gt.h" #include "intel_gt_pm.h" +#include "intel_gt_requests.h" #include "intel_reset.h" #include "uc/intel_guc.h" @@ -1190,14 +1191,14 @@ static void intel_gt_reset_global(struct intel_gt *gt, /* Use a watchdog to ensure that our reset completes */ intel_wedge_on_timeout(&w, gt, 5 * HZ) { - intel_prepare_reset(gt->i915); + intel_display_prepare_reset(gt->i915); /* Flush everyone using a resource about to be clobbered */ synchronize_srcu_expedited(>->reset.backoff_srcu); intel_gt_reset(gt, engine_mask, reason); - intel_finish_reset(gt->i915); + intel_display_finish_reset(gt->i915); } if (!test_bit(I915_WEDGED, >->reset.flags)) @@ -1250,7 +1251,7 @@ void intel_gt_handle_error(struct intel_gt *gt, engine_mask &= gt->info.engine_mask; if (flags & I915_ERROR_CAPTURE) { - i915_capture_error_state(gt->i915); + i915_capture_error_state(gt, engine_mask); intel_gt_clear_error_registers(gt, engine_mask); } @@ -1370,6 +1371,7 @@ void intel_gt_set_wedged_on_fini(struct intel_gt *gt) { intel_gt_set_wedged(gt); set_bit(I915_WEDGED_ON_FINI, >->reset.flags); + intel_gt_retire_requests(gt); /* cleanup any wedged requests */ }< |
