diff options
Diffstat (limited to 'drivers/gpu')
42 files changed, 930 insertions, 610 deletions
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig index 4c59793c4ccb..20a5d0455e19 100644 --- a/drivers/gpu/drm/i915/Kconfig +++ b/drivers/gpu/drm/i915/Kconfig @@ -45,3 +45,14 @@ config DRM_I915_PRELIMINARY_HW_SUPPORT option changes the default for that module option. If in doubt, say "N". + +config DRM_I915_USERPTR + bool "Always enable userptr support" + depends on DRM_I915 + select MMU_NOTIFIER + default y + help + This option selects CONFIG_MMU_NOTIFIER if it isn't already + selected to enabled full userptr support. + + If in doubt, say "Y". diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index ec0c2a05eed6..a0f1bd711b53 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -117,9 +117,8 @@ static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj) u64 size = 0; struct i915_vma *vma; - list_for_each_entry(vma, &obj->vma_list, vma_link) { - if (i915_is_ggtt(vma->vm) && - drm_mm_node_allocated(&vma->node)) + list_for_each_entry(vma, &obj->vma_list, obj_link) { + if (vma->is_ggtt && drm_mm_node_allocated(&vma->node)) size += vma->node.size; } @@ -155,7 +154,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) obj->madv == I915_MADV_DONTNEED ? " purgeable" : ""); if (obj->base.name) seq_printf(m, " (name: %d)", obj->base.name); - list_for_each_entry(vma, &obj->vma_list, vma_link) { + list_for_each_entry(vma, &obj->vma_list, obj_link) { if (vma->pin_count > 0) pin_count++; } @@ -164,14 +163,13 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) seq_printf(m, " (display)"); if (obj->fence_reg != I915_FENCE_REG_NONE) seq_printf(m, " (fence: %d)", obj->fence_reg); - list_for_each_entry(vma, &obj->vma_list, vma_link) { + list_for_each_entry(vma, &obj->vma_list, obj_link) { seq_printf(m, " (%sgtt offset: %08llx, size: %08llx", - i915_is_ggtt(vma->vm) ? "g" : "pp", + vma->is_ggtt ? "g" : "pp", vma->node.start, vma->node.size); - if (i915_is_ggtt(vma->vm)) - seq_printf(m, ", type: %u)", vma->ggtt_view.type); - else - seq_puts(m, ")"); + if (vma->is_ggtt) + seq_printf(m, ", type: %u", vma->ggtt_view.type); + seq_puts(m, ")"); } if (obj->stolen) seq_printf(m, " (stolen: %08llx)", obj->stolen->start); @@ -230,7 +228,7 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) } total_obj_size = total_gtt_size = count = 0; - list_for_each_entry(vma, head, mm_list) { + list_for_each_entry(vma, head, vm_link) { seq_printf(m, " "); describe_obj(m, vma->obj); seq_printf(m, "\n"); @@ -342,13 +340,13 @@ static int per_file_stats(int id, void *ptr, void *data) stats->shared += obj->base.size; if (USES_FULL_PPGTT(obj->base.dev)) { - list_for_each_entry(vma, &obj->vma_list, vma_link) { + list_for_each_entry(vma, &obj->vma_list, obj_link) { struct i915_hw_ppgtt *ppgtt; if (!drm_mm_node_allocated(&vma->node)) continue; - if (i915_is_ggtt(vma->vm)) { + if (vma->is_ggtt) { stats->global += obj->base.size; continue; } @@ -454,12 +452,12 @@ static int i915_gem_object_info(struct seq_file *m, void* data) count, mappable_count, size, mappable_size); size = count = mappable_size = mappable_count = 0; - count_vmas(&vm->active_list, mm_list); + count_vmas(&vm->active_list, vm_link); seq_printf(m, " %u [%u] active objects, %llu [%llu] bytes\n", count, mappable_count, size, mappable_size); size = count = mappable_size = mappable_count = 0; - count_vmas(&vm->inactive_list, mm_list); + count_vmas(&vm->inactive_list, vm_link); seq_printf(m, " %u [%u] inactive objects, %llu [%llu] bytes\n", count, mappable_count, size, mappable_size); @@ -825,8 +823,11 @@ static int i915_interrupt_info(struct seq_file *m, void *data) } for_each_pipe(dev_priv, pipe) { - if (!intel_display_power_is_enabled(dev_priv, - POWER_DOMAIN_PIPE(pipe))) { + enum intel_display_power_domain power_domain; + + power_domain = POWER_DOMAIN_PIPE(pipe); + if (!intel_display_power_get_if_enabled(dev_priv, + power_domain)) { seq_printf(m, "Pipe %c power disabled\n", pipe_name(pipe)); continue; @@ -840,6 +841,8 @@ static int i915_interrupt_info(struct seq_file *m, void *data) seq_printf(m, "Pipe %c IER:\t%08x\n", pipe_name(pipe), I915_READ(GEN8_DE_PIPE_IER(pipe))); + + intel_display_power_put(dev_priv, power_domain); } seq_printf(m, "Display Engine port interrupt mask:\t%08x\n", @@ -4004,6 +4007,7 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe, struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; struct intel_crtc *crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe)); + enum intel_display_power_domain power_domain; u32 val = 0; /* shut up gcc */ int ret; @@ -4014,7 +4018,8 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe, if (pipe_crc->source && source) return -EINVAL; - if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PIPE(pipe))) { + power_domain = POWER_DOMAIN_PIPE(pipe); + if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) { DRM_DEBUG_KMS("Trying to capture CRC while pipe is off\n"); return -EIO; } @@ -4031,7 +4036,7 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe, ret = ivb_pipe_crc_ctl_reg(dev, pipe, &source, &val); if (ret != 0) - return ret; + goto out; /* none -> real source transition */ if (source) { @@ -4043,8 +4048,10 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe, entries = kcalloc(INTEL_PIPE_CRC_ENTRIES_NR, sizeof(pipe_crc->entries[0]), GFP_KERNEL); - if (!entries) - return -ENOMEM; + if (!entries) { + ret = -ENOMEM; + goto out; + } /* * When IPS gets enabled, the pipe CRC changes. Since IPS gets @@ -4100,7 +4107,12 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe, hsw_enable_ips(crtc); } - return 0; + ret = 0; + +out: + intel_display_power_put(dev_priv, power_domain); + + return ret; } /* diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 2df2fac04708..1c6d227aae7c 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -444,8 +444,8 @@ static int i915_load_modeset_init(struct drm_device *dev) cleanup_gem: mutex_lock(&dev->struct_mutex); + i915_gem_cleanup_ringbuffer(dev); i915_gem_context_fini(dev); - i915_gem_cleanup_engines(dev); mutex_unlock(&dev->struct_mutex); cleanup_irq: intel_guc_ucode_fini(dev); @@ -1256,8 +1256,8 @@ int i915_driver_unload(struct drm_device *dev) intel_guc_ucode_fini(dev); mutex_lock(&dev->struct_mutex); + i915_gem_cleanup_ringbuffer(dev); i915_gem_context_fini(dev); - i915_gem_cleanup_engines(dev); mutex_unlock(&dev->struct_mutex); intel_fbc_cleanup_cfb(dev_priv); diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 44912ecebc1a..20e82008b8b6 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -603,13 +603,7 @@ static int i915_drm_suspend(struct drm_device *dev) intel_suspend_gt_powersave(dev); - /* - * Disable CRTCs directly since we want to preserve sw state - * for _thaw. Also, power gate the CRTC power wells. - */ - drm_modeset_lock_all(dev); intel_display_suspend(dev); - drm_modeset_unlock_all(dev); intel_dp_mst_suspend(dev); @@ -764,9 +758,7 @@ static int i915_drm_resume(struct drm_device *dev) dev_priv->display.hpd_irq_setup(dev); spin_unlock_irq(&dev_priv->irq_lock); - drm_modeset_lock_all(dev); intel_display_resume(dev); - drm_modeset_unlock_all(dev); intel_dp_mst_resume(dev); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 64cfd446453c..10480939159c 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -59,7 +59,7 @@ #define DRIVER_NAME "i915" #define DRIVER_DESC "Intel Graphics" -#define DRIVER_DATE "20160214" +#define DRIVER_DATE "20160229" #undef WARN_ON /* Many gcc seem to no see through this and fall over :( */ @@ -261,6 +261,9 @@ struct i915_hotplug { #define for_each_pipe(__dev_priv, __p) \ for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++) +#define for_each_pipe_masked(__dev_priv, __p, __mask) \ + for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++) \ + for_each_if ((__mask) & (1 << (__p))) #define for_each_plane(__dev_priv, __pipe, __p) \ for ((__p) = 0; \ (__p) < INTEL_INFO(__dev_priv)->num_sprites[(__pipe)] + 1; \ @@ -746,6 +749,7 @@ struct intel_csr { uint32_t mmio_count; i915_reg_t mmioaddr[8]; uint32_t mmiodata[8]; + uint32_t dc_state; }; #define DEV_INFO_FOR_EACH_FLAG(func, sep) \ @@ -1848,6 +1852,7 @@ struct drm_i915_private { enum modeset_restore modeset_restore; struct mutex modeset_restore_lock; + struct drm_atomic_state *modeset_restore_state; struct list_head vm_list; /* Global list of all address spaces */ struct i915_gtt gtt; /* VM representing the global address space */ @@ -3058,7 +3063,7 @@ int i915_gem_init_rings(struct drm_device *dev); int __must_check i915_gem_init_hw(struct drm_device *dev); int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice); void i915_gem_init_swizzling(struct drm_device *dev); -void i915_gem_cleanup_engines(struct drm_device *dev); +void i915_gem_cleanup_ringbuffer(struct drm_device *dev); int __must_check i915_gpu_idle(struct drm_device *dev); int __must_check i915_gem_suspend(struct drm_device *dev); void __i915_add_request(struct drm_i915_gem_request *req, @@ -3151,18 +3156,11 @@ bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj); /* Some GGTT VM helpers */ #define i915_obj_to_ggtt(obj) \ (&((struct drm_i915_private *)(obj)->base.dev->dev_private)->gtt.base) -static inline bool i915_is_ggtt(struct i915_address_space *vm) -{ - struct i915_address_space *ggtt = - &((struct drm_i915_private *)(vm)->dev->dev_private)->gtt.base; - return vm == ggtt; -} static inline struct i915_hw_ppgtt * i915_vm_to_ppgtt(struct i915_address_space *vm) { WARN_ON(i915_is_ggtt(vm)); - return container_of(vm, struct i915_hw_ppgtt, base); } diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index de57e7f0be0f..3d31d3ac589e 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -138,10 +138,10 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, pinned = 0; mutex_lock(&dev->struct_mutex); - list_for_each_entry(vma, &ggtt->base.active_list, mm_list) + list_for_each_entry(vma, &ggtt->base.active_list, vm_link) if (vma->pin_count) pinned += vma->node.size; - list_for_each_entry(vma, &ggtt->base.inactive_list, mm_list) + list_for_each_entry(vma, &ggtt->base.inactive_list, vm_link) if (vma->pin_count) pinned += vma->node.size; mutex_unlock(&dev->struct_mutex); @@ -272,7 +272,7 @@ drop_pages(struct drm_i915_gem_object *obj) int ret; drm_gem_object_reference(&obj->base); - list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) + list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) if (i915_vma_unbind(vma)) break; @@ -489,7 +489,7 @@ int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj, *needs_clflush = 0; - if (!obj->base.filp) + if (WARN_ON((obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE) == 0)) return -EINVAL; if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) { @@ -2416,7 +2416,7 @@ void i915_vma_move_to_active(struct i915_vma *vma, list_move_tail(&obj->ring_list[ring->id], &ring->active_list); i915_gem_request_assign(&obj->last_read_req[ring->id], req); - list_move_tail(&vma->mm_list, &vma->vm->active_list); + list_move_tail(&vma->vm_link, &vma->vm->active_list); } static void @@ -2454,9 +2454,9 @@ i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring) list_move_tail(&obj->global_list, &to_i915(obj->base.dev)->mm.bound_list); - list_for_each_entry(vma, &obj->vma_list, vma_link) { - if (!list_empty(&vma->mm_list)) - list_move_tail(&vma->mm_list, &vma->vm->inactive_list); + list_for_each_entry(vma, &obj->vma_list, obj_link) { + if (!list_empty(&vma->vm_link)) + list_move_tail(&vma->vm_link, &vma->vm->inactive_list); } i915_gem_request_assign(&obj->last_fenced_req, NULL); @@ -2970,11 +2970,9 @@ i915_gem_retire_requests(struct drm_device *dev) i915_gem_retire_requests_ring(ring); idle &= list_empty(&ring->request_list); if (i915.enable_execlists) { - unsigned long flags; - - spin_lock_irqsave(&ring->execlist_lock, flags); + spin_lock_irq(&ring->execlist_lock); idle &= list_empty(&ring->execlist_queue); - spin_unlock_irqrestore(&ring->execlist_lock, flags); + spin_unlock_irq(&ring->execlist_lock); intel_execlists_retire_requests(ring); } @@ -3319,7 +3317,7 @@ static int __i915_vma_unbind(struct i915_vma *vma, bool wait) struct drm_i915_private *dev_priv = obj->base.dev->dev_private; int ret; - if (list_empty(&vma->vma_link)) + if (list_empty(&vma->obj_link)) return 0; if (!drm_mm_node_allocated(&vma->node)) { @@ -3338,8 +3336,7 @@ static int __i915_vma_unbind(struct i915_vma *vma, bool wait) return ret; } - if (i915_is_ggtt(vma->vm) && - vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) { + if (vma->is_ggtt && vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) { i915_gem_object_finish_gtt(obj); /* release the fence reg _after_ flushing */ @@ -3353,8 +3350,8 @@ static int __i915_vma_unbind(struct i915_vma *vma, bool wait) vma->vm->unbind_vma(vma); vma->bound = 0; - list_del_init(&vma->mm_list); - if (i915_is_ggtt(vma->vm)) { + list_del_init(&vma->vm_link); + if (vma->is_ggtt) { if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) { obj->map_and_fenceable = false; } else if (vma->ggtt_view.pages) { @@ -3611,7 +3608,7 @@ search_free: goto err_remove_node; list_move_tail(&obj->global_list, &dev_priv->mm.bound_list); - list_add_tail(&vma->mm_list, &vm->inactive_list); + list_add_tail(&vma->vm_link, &vm->inactive_list); return vma; @@ -3776,7 +3773,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) /* And bump the LRU for this access */ vma = i915_gem_obj_to_ggtt(obj); if (vma && drm_mm_node_allocated(&vma->node) && !obj->active) - list_move_tail(&vma->mm_list, + list_move_tail(&vma->vm_link, &to_i915(obj->base.dev)->gtt.base.inactive_list); return 0; @@ -3811,7 +3808,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, * catch the issue of the CS prefetch crossing page boundaries and * reading an invalid PTE on older architectures. */ - list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) { + list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) { if (!drm_mm_node_allocated(&vma->node)) continue; @@ -3874,7 +3871,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, */ } - list_for_each_entry(vma, &obj->vma_list, vma_link) { + list_for_each_entry(vma, &obj->vma_list, obj_link) { if (!drm_mm_node_allocated(&vma->node)) continue; @@ -3884,7 +3881,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, } } - list_for_each_entry(vma, &obj->vma_list, vma_link) + list_for_each_entry(vma, &obj->vma_list, obj_link) vma->node.color = cache_level; obj->cache_level = cache_level; @@ -4558,7 +4555,7 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj) trace_i915_gem_object_destroy(obj); - list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) { + list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) { int ret; vma->pin_count = 0; @@ -4615,7 +4612,7 @@ struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj, struct i915_address_space *vm) { struct i915_vma *vma; - list_for_each_entry(vma, &obj->vma_list, vma_link) { + list_for_each_entry(vma, &obj->vma_list, obj_link) { if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL && vma->vm == vm) return vma; @@ -4632,7 +4629,7 @@ struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj, if (WARN_ONCE(!view, "no view specified")) return ERR_PTR(-EINVAL); - list_for_each_entry(vma, &obj->vma_list, vma_link) + list_for_each_entry(vma, &obj->vma_list, obj_link) if (vma->vm == ggtt && i915_ggtt_view_equal(&vma->ggtt_view, view)) return vma; @@ -4641,19 +4638,16 @@ struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj, void i915_gem_vma_destroy(struct i915_vma *vma) { - struct i915_address_space *vm = NULL; WARN_ON(vma->node.allocated); /* Keep the vma as a placeholder in the execbuffer reservation lists */ if (!list_empty(&vma->exec_list)) return; - vm = vma->vm; + if (!vma->is_ggtt) + i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm)); - if (!i915_is_ggtt(vm)) - i915_ppgtt_put(i915_vm_to_ppgtt(vm)); - - list_del(&vma->vma_link); + list_del(&vma->obj_link); kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma); } @@ -4913,7 +4907,7 @@ i915_gem_init_hw(struct drm_device *dev) req = i915_gem_request_alloc(ring, NULL); if (IS_ERR(req)) { ret = PTR_ERR(req); - i915_gem_cleanup_engines(dev); + i915_gem_cleanup_ringbuffer(dev); goto out; } @@ -4926,7 +4920,7 @@ i915_gem_init_hw(struct drm_device *dev) if (ret && ret != -EIO) { DRM_ERROR("PPGTT enable ring #%d failed %d\n", i, ret); i915_gem_request_cancel(req); - i915_gem_cleanup_engines(dev); + i915_gem_cleanup_ringbuffer(dev); goto out; } @@ -4934,7 +4928,7 @@ i915_gem_init_hw(struct drm_device *dev) if (ret && ret != -EIO) { DRM_ERROR("Context enable ring #%d failed %d\n", i, ret); i915_gem_request_cancel(req); - i915_gem_cleanup_engines(dev); + i915_gem_cleanup_ringbuffer(dev); goto out; } @@ -5009,7 +5003,7 @@ out_unlock: } void -i915_gem_cleanup_engines(struct drm_device *dev) +i915_gem_cleanup_ringbuffer(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *ring; @@ -5018,14 +5012,13 @@ i915_gem_cleanup_engines(struct drm_device *dev) for_each_ring(ring, dev_priv, i) dev_priv->gt.cleanup_ring(ring); - if (i915.enable_execlists) { - /* - * Neither the BIOS, ourselves or any other kernel - * expects the system to be in execlists mode on startup, - * so we need to reset the GPU back to legacy mode. - */ - intel_gpu_reset(dev); - } + if (i915.enable_execlists) + /* + * Neither the BIOS, ourselves or any other kernel + * expects the system to be in execlists mode on startup, + * so we need to reset the GPU back to legacy mode. + */ + intel_gpu_reset(dev); } static void @@ -5204,8 +5197,8 @@ u64 i915_gem_obj_offset(struct drm_i915_gem_object *o, WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base); - list_for_each_entry(vma, &o->vma_list, vma_link) { - if (i915_is_ggtt(vma->vm) && + list_for_each_entry(vma, &o->vma_list, obj_link) { + if (vma->is_ggtt && vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) continue; if (vma->vm == vm) @@ -5223,7 +5216,7 @@ u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o, struct i915_address_space *ggtt = i915_obj_to_ggtt(o); struct i915_vma *vma; - list_for_each_entry(vma, &o->vma_list, vma_link) + list_for_each_entry(vma, &o->vma_list, obj_link) if (vma->vm == ggtt && i915_ggtt_view_equal(&vma->ggtt_view, view)) return vma->node.start; @@ -5237,8 +5230,8 @@ bool i915_gem_obj_bound(struct drm_i915_gem_object *o, { struct i915_vma *vma; - list_for_each_entry(vma, &o->vma_list, vma_link) { - if (i915_is_ggtt(vma->vm) && + list_for_each_entry(vma, &o->vma_list, obj_link) { + if (vma->is_ggtt && vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) continue; if (vma->vm == vm && drm_mm_node_allocated(&vma->node)) @@ -5254,7 +5247,7 @@ bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o, struct i915_address_space *ggtt = i915_obj_to_ggtt(o); struct i915_vma *vma; - list_for_each_entry(vma, &o->vma_list, vma_link) + list_for_each_entry(vma, &o->vma_list, obj_link) if (vma->vm == ggtt && i915_ggtt_view_equal(&vma->ggtt_view, view) && drm_mm_node_allocated(&vma->node)) @@ -5267,7 +5260,7 @@ bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o) { struct i915_vma *vma; - list_for_each_entry(vma, &o->vma_list, vma_link) + list_for_each_entry(vma, &o->vma_list, obj_link) if (drm_mm_node_allocated(&vma->node)) return true; @@ -5284,8 +5277,8 @@ unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o, BUG_ON(list_empty(&o->vma_list)); - list_for_each_entry(vma, &o->vma_list, vma_link) { - if (i915_is_ggtt(vma->vm) && + list_for_each_entry(vma, &o->vma_list, obj_link) { + if (vma->is_ggtt && vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) continue; if (vma->vm == vm) @@ -5297,7 +5290,7 @@ unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o, bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj) { struct i915_vma *vma; - list_for_each_entry(vma, &obj->vma_list, vma_link) + list_for_each_entry(vma, &obj->vma_list, obj_link) if (vma->pin_count > 0) return true; diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 83a097c94911..5dd84e148bba 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -142,7 +142,7 @@ static void i915_gem_context_clean(struct intel_context *ctx) return; list_for_each_entry_safe(vma, next, &ppgtt->base.inactive_list, - mm_list) { + vm_link) { if (WARN_ON(__i915_vma_unbind_no_wait(vma))) break; } @@ -855,6 +855,9 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, if (!contexts_enabled(dev)) return -ENODEV; + if (args->pad != 0) + return -EINVAL; + ret = i915_mutex_lock_interruptible(dev); if (ret) return ret; @@ -878,6 +881,9 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, struct intel_context *ctx; int ret; + if (args->pad != 0) + return -EINVAL; + if (args->ctx_id == DEFAULT_CONTEXT_HANDLE) return -ENOENT; diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index 07c6e4d320c9..ea1f8d1bd228 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c @@ -116,7 +116,7 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm, search_again: /* First see if there is a large enough contiguous idle region... */ - list_for_each_entry(vma, &vm->inactive_list, mm_list) { + list_for_each_entry(vma, &vm->inactive_list, vm_link) { if (mark_free(vma, &unwind_list)) goto found; } @@ -125,7 +125,7 @@ search_again: goto none; /* Now merge in the soon-to-be-expired objects... */ - list_for_each_entry(vma, &vm->active_list, mm_list) { + list_for_each_entry(vma, &vm->active_list, vm_link) { if (mark_free(vma, &unwind_list)) goto found; } @@ -270,7 +270,7 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle) WARN_ON(!list_empty(&vm->active_list)); } - list_for_each_entry_safe(vma, next, &vm->inactive_list, mm_list) + list_for_each_entry_safe(vma, next, &vm->inactive_list, vm_link) if (vma->pin_count == 0) WARN_ON(i915_vma_unbind(vma)); |
