summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/i915_gem.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem.c')
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c127
1 files changed, 66 insertions, 61 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 1632f18e6a64..43afa1c1b14f 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -240,8 +240,8 @@ err_phys:
static void __start_cpu_write(struct drm_i915_gem_object *obj)
{
- obj->base.read_domains = I915_GEM_DOMAIN_CPU;
- obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+ obj->read_domains = I915_GEM_DOMAIN_CPU;
+ obj->write_domain = I915_GEM_DOMAIN_CPU;
if (cpu_write_needs_clflush(obj))
obj->cache_dirty = true;
}
@@ -257,7 +257,7 @@ __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
obj->mm.dirty = false;
if (needs_clflush &&
- (obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0 &&
+ (obj->read_domains & I915_GEM_DOMAIN_CPU) == 0 &&
!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
drm_clflush_sg(pages);
@@ -703,10 +703,10 @@ flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains)
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
struct i915_vma *vma;
- if (!(obj->base.write_domain & flush_domains))
+ if (!(obj->write_domain & flush_domains))
return;
- switch (obj->base.write_domain) {
+ switch (obj->write_domain) {
case I915_GEM_DOMAIN_GTT:
i915_gem_flush_ggtt_writes(dev_priv);
@@ -731,7 +731,7 @@ flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains)
break;
}
- obj->base.write_domain = 0;
+ obj->write_domain = 0;
}
static inline int
@@ -831,7 +831,7 @@ int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
* anyway again before the next pread happens.
*/
if (!obj->cache_dirty &&
- !(obj->base.read_domains & I915_GEM_DOMAIN_CPU))
+ !(obj->read_domains & I915_GEM_DOMAIN_CPU))
*needs_clflush = CLFLUSH_BEFORE;
out:
@@ -890,7 +890,7 @@ int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
* Same trick applies to invalidate partially written
* cachelines read before writing.
*/
- if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU))
+ if (!(obj->read_domains & I915_GEM_DOMAIN_CPU))
*needs_clflush |= CLFLUSH_BEFORE;
}
@@ -2391,8 +2391,8 @@ static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
* wasn't in the GTT, there shouldn't be any way it could have been in
* a GPU cache
*/
- GEM_BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
- GEM_BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
+ GEM_BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
+ GEM_BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
st = kmalloc(sizeof(*st), GFP_KERNEL);
if (st == NULL)
@@ -3205,6 +3205,9 @@ void i915_gem_set_wedged(struct drm_i915_private *i915)
intel_engine_dump(engine, &p, "%s\n", engine->name);
}
+ set_bit(I915_WEDGED, &i915->gpu_error.flags);
+ smp_mb__after_atomic();
+
/*
* First, stop submission to hw, but do not yet complete requests by
* rolling the global seqno forward (since this would complete requests
@@ -3229,8 +3232,11 @@ void i915_gem_set_wedged(struct drm_i915_private *i915)
* start to complete all requests.
*/
engine->submit_request = nop_complete_submit_request;
+ engine->schedule = NULL;
}
+ i915->caps.scheduler = 0;
+
/*
* Make sure no request can slip through without getting completed by
* either this call here to intel_engine_init_global_seqno, or the one
@@ -3241,7 +3247,8 @@ void i915_gem_set_wedged(struct drm_i915_private *i915)
for_each_engine(engine, i915, id) {
unsigned long flags;
- /* Mark all pending requests as complete so that any concurrent
+ /*
+ * Mark all pending requests as complete so that any concurrent
* (lockless) lookup doesn't try and wait upon the request as we
* reset it.
*/
@@ -3251,7 +3258,6 @@ void i915_gem_set_wedged(struct drm_i915_private *i915)
spin_unlock_irqrestore(&engine->timeline->lock, flags);
}
- set_bit(I915_WEDGED, &i915->gpu_error.flags);
wake_up_all(&i915->gpu_error.reset_queue);
}
@@ -3697,7 +3703,7 @@ static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj)
flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
if (obj->cache_dirty)
i915_gem_clflush_object(obj, I915_CLFLUSH_FORCE);
- obj->base.write_domain = 0;
+ obj->write_domain = 0;
}
void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj)
@@ -3734,7 +3740,7 @@ i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write)
if (ret)
return ret;
- if (obj->base.write_domain == I915_GEM_DOMAIN_WC)
+ if (obj->write_domain == I915_GEM_DOMAIN_WC)
return 0;
/* Flush and acquire obj->pages so that we are coherent through
@@ -3755,17 +3761,17 @@ i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write)
* coherent writes from the GPU, by effectively invalidating the
* WC domain upon first access.
*/
- if ((obj->base.read_domains & I915_GEM_DOMAIN_WC) == 0)
+ if ((obj->read_domains & I915_GEM_DOMAIN_WC) == 0)
mb();
/* It should now be out of any other write domains, and we can update
* the domain values for our changes.
*/
- GEM_BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_WC) != 0);
- obj->base.read_domains |= I915_GEM_DOMAIN_WC;
+ GEM_BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_WC) != 0);
+ obj->read_domains |= I915_GEM_DOMAIN_WC;
if (write) {
- obj->base.read_domains = I915_GEM_DOMAIN_WC;
- obj->base.write_domain = I915_GEM_DOMAIN_WC;
+ obj->read_domains = I915_GEM_DOMAIN_WC;
+ obj->write_domain = I915_GEM_DOMAIN_WC;
obj->mm.dirty = true;
}
@@ -3797,7 +3803,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
if (ret)
return ret;
- if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
+ if (obj->write_domain == I915_GEM_DOMAIN_GTT)
return 0;
/* Flush and acquire obj->pages so that we are coherent through
@@ -3818,17 +3824,17 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
* coherent writes from the GPU, by effectively invalidating the
* GTT domain upon first access.
*/
- if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
+ if ((obj->read_domains & I915_GEM_DOMAIN_GTT) == 0)
mb();
/* It should now be out of any other write domains, and we can update
* the domain values for our changes.
*/
- GEM_BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
- obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
+ GEM_BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
+ obj->read_domains |= I915_GEM_DOMAIN_GTT;
if (write) {
- obj->base.read_domains = I915_GEM_DOMAIN_GTT;
- obj->base.write_domain = I915_GEM_DOMAIN_GTT;
+ obj->read_domains = I915_GEM_DOMAIN_GTT;
+ obj->write_domain = I915_GEM_DOMAIN_GTT;
obj->mm.dirty = true;
}
@@ -4072,7 +4078,8 @@ out:
struct i915_vma *
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
u32 alignment,
- const struct i915_ggtt_view *view)
+ const struct i915_ggtt_view *view,
+ unsigned int flags)
{
struct i915_vma *vma;
int ret;
@@ -4109,25 +4116,14 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
* try to preserve the existing ABI).
*/
vma = ERR_PTR(-ENOSPC);
- if (!view || view->type == I915_GGTT_VIEW_NORMAL)
+ if ((flags & PIN_MAPPABLE) == 0 &&
+ (!view || view->type == I915_GGTT_VIEW_NORMAL))
vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment,
- PIN_MAPPABLE | PIN_NONBLOCK);
- if (IS_ERR(vma)) {
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
- unsigned int flags;
-
- /* Valleyview is definitely limited to scanning out the first
- * 512MiB. Lets presume this behaviour was inherited from the
- * g4x display engine and that all earlier gen are similarly
- * limited. Testing suggests that it is a little more
- * complicated than this. For example, Cherryview appears quite
- * happy to scanout from anywhere within its global aperture.
- */
- flags = 0;
- if (HAS_GMCH_DISPLAY(i915))
- flags = PIN_MAPPABLE;
+ flags |
+ PIN_MAPPABLE |
+ PIN_NONBLOCK);
+ if (IS_ERR(vma))
vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, flags);
- }
if (IS_ERR(vma))
goto err_unpin_global;
@@ -4140,7 +4136,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
/* It should now be out of any other write domains, and we can update
* the domain values for our changes.
*/
- obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
+ obj->read_domains |= I915_GEM_DOMAIN_GTT;
return vma;
@@ -4193,15 +4189,15 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
/* Flush the CPU cache if it's still invalid. */
- if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
+ if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC);
- obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
+ obj->read_domains |= I915_GEM_DOMAIN_CPU;
}
/* It should now be out of any other write domains, and we can update
* the domain values for our changes.
*/
- GEM_BUG_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
+ GEM_BUG_ON(obj->write_domain & ~I915_GEM_DOMAIN_CPU);
/* If we're writing through the CPU, then the GPU read domains will
* need to be invalidated at next use.
@@ -4276,7 +4272,8 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
lockdep_assert_held(&obj->base.dev->struct_mutex);
- if (!view && flags & PIN_MAPPABLE) {
+ if (flags & PIN_MAPPABLE &&
+ (!view || view->type == I915_GGTT_VIEW_NORMAL)) {
/* If the required space is larger than the available
* aperture, we will not able to find a slot for the
* object and unbinding the object now will be in
@@ -4637,8 +4634,8 @@ i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size)
i915_gem_object_init(obj, &i915_gem_object_ops);
- obj->base.write_domain = I915_GEM_DOMAIN_CPU;
- obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+ obj->write_domain = I915_GEM_DOMAIN_CPU;
+ obj->read_domains = I915_GEM_DOMAIN_CPU;
if (HAS_LLC(dev_priv))
/* On some devices, we can have the GPU use the LLC (the CPU
@@ -4752,6 +4749,9 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
kfree(obj->bit_17);
i915_gem_object_free(obj);
+ GEM_BUG_ON(!atomic_read(&i915->mm.free_count));
+ atomic_dec(&i915->mm.free_count);
+
if (on)
cond_resched();
}
@@ -4840,6 +4840,7 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
* i915_gem_busy_ioctl(). For the corresponding synchronized
* lookup see i915_gem_object_lookup_rcu().
*/
+ atomic_inc(&to_i915(obj->base.dev)->mm.free_count);
call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
}
@@ -4882,10 +4883,8 @@ void i915_gem_sanitize(struct drm_i915_private *i915)
* it may impact the display and we are uncertain about the stability
* of the reset, so this could be applied to even earlier gen.
*/
- if (INTEL_GEN(i915) >= 5) {
- int reset = intel_gpu_reset(i915, ALL_ENGINES);
- WARN_ON(reset && reset != -ENODEV);
- }
+ if (INTEL_GEN(i915) >= 5 && intel_has_gpu_reset(i915))
+ WARN_ON(intel_gpu_reset(i915, ALL_ENGINES));
}
int i915_gem_suspend(struct drm_i915_private *dev_priv)
@@ -5065,8 +5064,11 @@ static int __i915_gem_restart_engines(void *data)
for_each_engine(engine, i915, id) {
err = engine->init_hw(engine);
- if (err)
+ if (err) {
+ DRM_ERROR("Failed to restart %s (%d)\n",
+ engine->name, err);
return err;
+ }
}
return 0;
@@ -5118,14 +5120,16 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv)
ret = i915_ppgtt_init_hw(dev_priv);
if (ret) {
- DRM_ERROR("PPGTT enable HW failed %d\n", ret);
+ DRM_ERROR("Enabling PPGTT failed (%d)\n", ret);
goto out;
}
/* We can't enable contexts until all firmware is loaded */
ret = intel_uc_init_hw(dev_priv);
- if (ret)
+ if (ret) {
+ DRM_ERROR("Enabling uc failed (%d)\n", ret);
goto out;
+ }
intel_mocs_init_l3cc_table(dev_priv);
@@ -5415,10 +5419,10 @@ i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
{
int i;
- if (INTEL_INFO(dev_priv)->gen >= 7 && !IS_VALLEYVIEW(dev_priv) &&
+ if (INTEL_GEN(dev_priv) >= 7 && !IS_VALLEYVIEW(dev_priv) &&
!IS_CHERRYVIEW(dev_priv))
dev_priv->num_fence_regs = 32;
- else if (INTEL_INFO(dev_priv)->gen >= 4 ||
+ else if (INTEL_GEN(dev_priv) >= 4 ||
IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
IS_G33(dev_priv) || IS_PINEVIEW(dev_priv))
dev_priv->num_fence_regs = 16;
@@ -5537,7 +5541,8 @@ err_out:
void i915_gem_load_cleanup(struct drm_i915_private *dev_priv)
{
i915_gem_drain_freed_objects(dev_priv);
- WARN_ON(!llist_empty(&dev_priv->mm.free_list));
+ GEM_BUG_ON(!llist_empty(&dev_priv->mm.free_list));
+ GEM_BUG_ON(atomic_read(&dev_priv->mm.free_count));
WARN_ON(dev_priv->mm.object_count);
mutex_lock(&dev_priv->drm.struct_mutex);
@@ -5693,7 +5698,7 @@ i915_gem_object_create_from_data(struct drm_i915_private *dev_priv,
if (IS_ERR(obj))
return obj;
- GEM_BUG_ON(obj->base.write_domain != I915_GEM_DOMAIN_CPU);
+ GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU);
file = obj->base.filp;
offset = 0;