diff options
| author | Dave Airlie <airlied@redhat.com> | 2014-11-04 07:36:06 +1000 |
|---|---|---|
| committer | Dave Airlie <airlied@redhat.com> | 2014-11-04 07:36:06 +1000 |
| commit | 041df3573d0ce74b7f2f505c4224c8ee9be14a7c (patch) | |
| tree | bd70ff8a5613cd1f4bd0e316874aa90d00a0c5ff | |
| parent | bbf0ef0334f2267687a92ec6d8114fd67b8157a3 (diff) | |
| parent | 3eebaec630c2413a5e67bb7f49f0c6a53069a399 (diff) | |
| download | linux-041df3573d0ce74b7f2f505c4224c8ee9be14a7c.tar.gz linux-041df3573d0ce74b7f2f505c4224c8ee9be14a7c.tar.bz2 linux-041df3573d0ce74b7f2f505c4224c8ee9be14a7c.zip | |
Merge tag 'drm-intel-next-2014-10-24' of git://anongit.freedesktop.org/drm-intel into drm-next
- suspend/resume/freeze/thaw unification from Imre
- wa list improvements from Mika&Arun
- display pll precomputation from Ander Conselvan, this removed the last
->mode_set callbacks, a big step towards implementing atomic modesets
- more kerneldoc for the interrupt code
- 180 rotation for cursors (Ville&Sonika)
- ULT/ULX feature check macros cleaned up thanks to Damien
- piles and piles of fixes all over, bug team seems to work!
* tag 'drm-intel-next-2014-10-24' of git://anongit.freedesktop.org/drm-intel: (61 commits)
drm/i915: Update DRIVER_DATE to 20141024
drm/i915: add comments on what stage a given PM handler is called
drm/i915: unify switcheroo and legacy suspend/resume handlers
drm/i915: add poweroff_late handler
drm/i915: sanitize suspend/resume helper function names
drm/i915: unify S3 and S4 suspend/resume handlers
drm/i915: disable/re-enable PCI device around S4 freeze/thaw
drm/i915: enable output polling during S4 thaw
drm/i915: check for GT faults in all resume handlers and driver load time
drm/i915: remove unused restore_gtt_mappings optimization during suspend
drm/i915: fix S4 suspend while switcheroo state is off
drm/i915: vlv: fix switcheroo/legacy suspend/resume
drm/i915: propagate error from legacy resume handler
drm/i915: unify legacy S3 suspend and S4 freeze handlers
drm/i915: factor out i915_drm_suspend_late
drm/i915: Emit even number of dwords when emitting LRIs
drm/i915: Add rotation support for cursor plane (v5)
drm/i915: Correctly reject invalid flags for wait_ioctl
drm/i915: use macros to assign mmio access functions
drm/i915: only run hsw_power_well_post_enable when really needed
...
24 files changed, 1145 insertions, 936 deletions
diff --git a/Documentation/DocBook/drm.tmpl b/Documentation/DocBook/drm.tmpl index d7cfc98be159..f6a9d7b21380 100644 --- a/Documentation/DocBook/drm.tmpl +++ b/Documentation/DocBook/drm.tmpl @@ -3831,6 +3831,11 @@ int num_ioctls;</synopsis> !Fdrivers/gpu/drm/i915/i915_gem.c i915_gem_track_fb </sect2> <sect2> + <title>Display FIFO Underrun Reporting</title> +!Pdrivers/gpu/drm/i915/intel_fifo_underrun.c fifo underrun handling +!Idrivers/gpu/drm/i915/intel_fifo_underrun.c + </sect2> + <sect2> <title>Plane Configuration</title> <para> This section covers plane configuration and composition with the diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 3a6bce047f6f..75fd7de9bf4b 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -45,6 +45,7 @@ i915-y += intel_renderstate_gen6.o \ # modesetting core code i915-y += intel_bios.o \ intel_display.o \ + intel_fifo_underrun.o \ intel_frontbuffer.o \ intel_modes.o \ intel_overlay.o \ diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index da4036d0bab9..e60d5c2f4a35 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -1848,6 +1848,8 @@ static int i915_execlists(struct seq_file *m, void *data) if (ret) return ret; + intel_runtime_pm_get(dev_priv); + for_each_ring(ring, dev_priv, ring_id) { struct intel_ctx_submit_request *head_req = NULL; int count = 0; @@ -1899,6 +1901,7 @@ static int i915_execlists(struct seq_file *m, void *data) seq_putc(m, '\n'); } + intel_runtime_pm_put(dev_priv); mutex_unlock(&dev->struct_mutex); return 0; @@ -2655,18 +2658,18 @@ static int i915_wa_registers(struct seq_file *m, void *unused) intel_runtime_pm_get(dev_priv); - seq_printf(m, "Workarounds applied: %d\n", dev_priv->num_wa_regs); - for (i = 0; i < dev_priv->num_wa_regs; ++i) { - u32 addr, mask; - - addr = dev_priv->intel_wa_regs[i].addr; - mask = dev_priv->intel_wa_regs[i].mask; - dev_priv->intel_wa_regs[i].value = I915_READ(addr) | mask; - if (dev_priv->intel_wa_regs[i].addr) - seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n", - dev_priv->intel_wa_regs[i].addr, - dev_priv->intel_wa_regs[i].value, - dev_priv->intel_wa_regs[i].mask); + seq_printf(m, "Workarounds applied: %d\n", dev_priv->workarounds.count); + for (i = 0; i < dev_priv->workarounds.count; ++i) { + u32 addr, mask, value, read; + bool ok; + + addr = dev_priv->workarounds.reg[i].addr; + mask = dev_priv->workarounds.reg[i].mask; + value = dev_priv->workarounds.reg[i].value; + read = I915_READ(addr); + ok = (value & mask) == (read & mask); + seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X, read: 0x%08x, status: %s\n", + addr, value, mask, read, ok ? "OK" : "FAIL"); } intel_runtime_pm_put(dev_priv); @@ -3255,6 +3258,8 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe, { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; + struct intel_crtc *crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, + pipe)); u32 val = 0; /* shut up gcc */ int ret; @@ -3290,6 +3295,14 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe, if (!pipe_crc->entries) return -ENOMEM; + /* + * When IPS gets enabled, the pipe CRC changes. Since IPS gets + * enabled and disabled dynamically based on package C states, + * user space can't make reliable use of the CRCs, so let's just + * completely disable it. + */ + hsw_disable_ips(crtc); + spin_lock_irq(&pipe_crc->lock); pipe_crc->head = 0; pipe_crc->tail = 0; @@ -3328,6 +3341,8 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe, vlv_undo_pipe_scramble_reset(dev, pipe); else if (IS_HASWELL(dev) && pipe == PIPE_A) hsw_undo_trans_edp_pipe_A_crc_wa(dev); + + hsw_enable_ips(crtc); } return 0; diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 85d14e169409..9a7353302b3f 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1275,12 +1275,12 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_ dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; /* i915 resume handler doesn't set to D0 */ pci_set_power_state(dev->pdev, PCI_D0); - i915_resume(dev); + i915_resume_legacy(dev); dev->switch_power_state = DRM_SWITCH_POWER_ON; } else { pr_err("switched off\n"); dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; - i915_suspend(dev, pmm); + i915_suspend_legacy(dev, pmm); dev->switch_power_state = DRM_SWITCH_POWER_OFF; } } @@ -1853,8 +1853,12 @@ int i915_driver_unload(struct drm_device *dev) acpi_video_unregister(); - if (drm_core_check_feature(dev, DRIVER_MODESET)) { + if (drm_core_check_feature(dev, DRIVER_MODESET)) intel_fbdev_fini(dev); + + drm_vblank_cleanup(dev); + + if (drm_core_check_feature(dev, DRIVER_MODESET)) { intel_modeset_cleanup(dev); /* @@ -1895,8 +1899,6 @@ int i915_driver_unload(struct drm_device *dev) i915_free_hws(dev); } - drm_vblank_cleanup(dev); - intel_teardown_gmbus(dev); intel_teardown_mchbar(dev); diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index bd7978cb094f..035ec94ca3c7 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -463,7 +463,7 @@ void intel_detect_pch(struct drm_device *dev) dev_priv->pch_type = PCH_LPT; DRM_DEBUG_KMS("Found LynxPoint PCH\n"); WARN_ON(!IS_HASWELL(dev)); - WARN_ON(IS_ULT(dev)); + WARN_ON(IS_HSW_ULT(dev)); } else if (IS_BROADWELL(dev)) { dev_priv->pch_type = PCH_LPT; dev_priv->pch_id = @@ -474,17 +474,15 @@ void intel_detect_pch(struct drm_device *dev) dev_priv->pch_type = PCH_LPT; DRM_DEBUG_KMS("Found LynxPoint LP PCH\n"); WARN_ON(!IS_HASWELL(dev)); - WARN_ON(!IS_ULT(dev)); + WARN_ON(!IS_HSW_ULT(dev)); } else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) { dev_priv->pch_type = PCH_SPT; DRM_DEBUG_KMS("Found SunrisePoint PCH\n"); WARN_ON(!IS_SKYLAKE(dev)); - WARN_ON(IS_ULT(dev)); } else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) { dev_priv->pch_type = PCH_SPT; DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n"); WARN_ON(!IS_SKYLAKE(dev)); - WARN_ON(!IS_ULT(dev)); } else continue; @@ -556,7 +554,7 @@ static int intel_suspend_complete(struct drm_i915_private *dev_priv); static int intel_resume_prepare(struct drm_i915_private *dev_priv, bool rpm_resume); -static int i915_drm_freeze(struct drm_device *dev) +static int i915_drm_suspend(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_crtc *crtc; @@ -632,7 +630,26 @@ static int i915_drm_freeze(struct drm_device *dev) return 0; } -int i915_suspend(struct drm_device *dev, pm_message_t state) +static int i915_drm_suspend_late(struct drm_device *drm_dev) +{ + struct drm_i915_private *dev_priv = drm_dev->dev_private; + int ret; + + ret = intel_suspend_complete(dev_priv); + + if (ret) { + DRM_ERROR("Suspend complete failed: %d\n", ret); + + return ret; + } + + pci_disable_device(drm_dev->pdev); + pci_set_power_state(drm_dev->pdev, PCI_D3hot); + + return 0; +} + +int i915_suspend_legacy(struct drm_device *dev, pm_message_t state) { int error; @@ -642,48 +659,25 @@ int i915_suspend(struct drm_device *dev, pm_message_t state) return -ENODEV; } - if (state.event == PM_EVENT_PRETHAW) - return 0; - + if (WARN_ON_ONCE(state.event != PM_EVENT_SUSPEND && + state.event != PM_EVENT_FREEZE)) + return -EINVAL; if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) return 0; - error = i915_drm_freeze(dev); + error = i915_drm_suspend(dev); if (error) return error; - if (state.event == PM_EVENT_SUSPEND) { - /* Shut down the device */ - pci_disable_device(dev->pdev); - pci_set_power_state(dev->pdev, PCI_D3hot); - } - - return 0; + return i915_drm_suspend_late(dev); } -static int i915_drm_thaw_early(struct drm_device *dev) +static int i915_drm_resume(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - int ret; - - ret = intel_resume_prepare(dev_priv, false); - if (ret) - DRM_ERROR("Resume prepare failed: %d,Continuing resume\n", ret); - - intel_uncore_early_sanitize(dev, true); - intel_uncore_sanitize(dev); - intel_power_domains_init_hw(dev_priv); - return ret; -} - -static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - if (drm_core_check_feature(dev, DRIVER_MODESET) && - restore_gtt_mappings) { + if (drm_core_check_feature(dev, DRIVER_MODESET)) { mutex_lock(&dev->struct_mutex); i915_gem_restore_gtt_mappings(dev); mutex_unlock(&dev->struct_mutex); @@ -742,21 +736,15 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings) intel_opregion_notify_adapter(dev, PCI_D0); - return 0; -} - -static int i915_drm_thaw(struct drm_device *dev) -{ - if (drm_core_check_feature(dev, DRIVER_MODESET)) - i915_check_and_clear_faults(dev); + drm_kms_helper_poll_enable(dev); - return __i915_drm_thaw(dev, true); + return 0; } -static int i915_resume_early(struct drm_device *dev) +static int i915_drm_resume_early(struct drm_device *dev) { - if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) - return 0; + struct drm_i915_private *dev_priv = dev->dev_private; + int ret; /* * We have a resume ordering issue with the snd-hda driver also @@ -772,33 +760,29 @@ static int i915_resume_early(struct drm_device *dev) pci_set_master(dev->pdev); - return i915_drm_thaw_early(dev); + ret = intel_resume_prepare(dev_priv, false); + if (ret) + DRM_ERROR("Resume prepare failed: %d,Continuing resume\n", ret); + + intel_uncore_early_sanitize(dev, true); + intel_uncore_sanitize(dev); + intel_power_domains_init_hw(dev_priv); + + return ret; } -int i915_resume(struct drm_device *dev) +int i915_resume_legacy(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; int ret; - /* - * Platforms with opregion should have sane BIOS, older ones (gen3 and - * earlier) need to restore the GTT mappings since the BIOS might clear - * all our scratch PTEs. - */ - ret = __i915_drm_thaw(dev, !dev_priv->opregion.header); + if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) + return 0; + + ret = i915_drm_resume_early(dev); if (ret) return ret; - drm_kms_helper_poll_enable(dev); - return 0; -} - -static int i915_resume_legacy(struct drm_device *dev) -{ - i915_resume_early(dev); - i915_resume(dev); - - return 0; + return i915_drm_resume(dev); } /** @@ -950,15 +934,13 @@ static int i915_pm_suspend(struct device *dev) if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) return 0; - return i915_drm_freeze(drm_dev); + return i915_drm_suspend(drm_dev); } static int i915_pm_suspend_late(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct drm_device *drm_dev = pci_get_drvdata(pdev); - struct drm_i915_private *dev_priv = drm_dev->dev_private; - int ret; /* * We have a suspedn ordering issue with the snd-hda driver also @@ -972,16 +954,7 @@ static int i915_pm_suspend_late(struct device *dev) if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) return 0; - ret = intel_suspend_complete(dev_priv); - - if (ret) - DRM_ERROR("Suspend complete failed: %d\n", ret); - else { - pci_disable_device(pdev); - pci_set_power_state(pdev, PCI_D3hot); - } - - return ret; + return i915_drm_suspend_late(drm_dev); } static int i915_pm_resume_early(struct device *dev) @@ -989,52 +962,21 @@ static int i915_pm_resume_early(struct device *dev) struct pci_dev *pdev = to_pci_dev(dev); struct drm_device *drm_dev = pci_get_drvdata(pdev); - return i915_resume_early(drm_dev); -} - -static int i915_pm_resume(struct device *dev) -{ - struct pci_dev *pdev = to_pci_dev(dev); - struct drm_device *drm_dev = pci_get_drvdata(pdev); - - return i915_resume(drm_dev); -} - -static int i915_pm_freeze(struct device *dev) -{ - struct pci_dev *pdev = to_pci_dev(dev); - struct drm_device *drm_dev = pci_get_drvdata(pdev); - - if (!drm_dev || !drm_dev->dev_private) { - dev_err(dev, "DRM not initialized, aborting suspend.\n"); - return -ENODEV; - } - - return i915_drm_freeze(drm_dev); -} - -static int i915_pm_thaw_early(struct device *dev) -{ - struct pci_dev *pdev = to_pci_dev(dev); - struct drm_device *drm_dev = pci_get_drvdata(pdev); + if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) + return 0; - return i915_drm_thaw_early(drm_dev); + return i915_drm_resume_early(drm_dev); } -static int i915_pm_thaw(struct device *dev) +static int i915_pm_resume(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct drm_device *drm_dev = pci_get_drvdata(pdev); - return i915_drm_thaw(drm_dev); -} - -static int i915_pm_poweroff(struct device *dev) -{ - struct pci_dev *pdev = to_pci_dev(dev); - struct drm_device *drm_dev = pci_get_drvdata(pdev); + if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) + return 0; - return i915_drm_freeze(drm_dev); + return i915_drm_resume(drm_dev); } static int hsw_suspend_complete(struct drm_i915_private *dev_priv) @@ -1592,16 +1534,40 @@ static int intel_resume_prepare(struct drm_i915_private *dev_priv, } static const struct dev_pm_ops i915_pm_ops = { + /* + * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND, + * PMSG_RESUME] + */ .suspend = i915_pm_suspend, .suspend_late = i915_pm_suspend_late, .resume_early = i915_pm_resume_early, .resume = i915_pm_resume, - .freeze = i915_pm_freeze, - .thaw_early = i915_pm_thaw_early, - .thaw = i915_pm_thaw, - .poweroff = i915_pm_poweroff, + + /* + * S4 event handlers + * @freeze, @freeze_late : called (1) before creating the + * hibernation image [PMSG_FREEZE] and + * (2) after rebooting, before restoring + * the image [PMSG_QUIESCE] + * @thaw, @thaw_early : called (1) after creating the hibernation + * image, before writing it [PMSG_THAW] + * and (2) after failing to create or + * restore the image [PMSG_RECOVER] + * @poweroff, @poweroff_late: called after writing the hibernation + * image, before rebooting [PMSG_HIBERNATE] + * @restore, @restore_early : called after rebooting and restoring the + * hibernation image [PMSG_RESTORE] + */ + .freeze = i915_pm_suspend, + .freeze_late = i915_pm_suspend_late, + .thaw_early = i915_pm_resume_early, + .thaw = i915_pm_resume, + .poweroff = i915_pm_suspend, + .poweroff_late = i915_pm_suspend_late, .restore_early = i915_pm_resume_early, .restore = i915_pm_resume, + + /* S0ix (via runtime suspend) event handlers */ .runtime_suspend = intel_runtime_suspend, .runtime_resume = intel_runtime_resume, }; @@ -1643,7 +1609,7 @@ static struct drm_driver driver = { .set_busid = drm_pci_set_busid, /* Used in place of i915_pm_ops for non-DRIVER_MODESET */ - .suspend = i915_suspend, + .suspend = i915_suspend_legacy, .resume = i915_resume_legacy, .device_is_agp = i915_driver_device_is_agp, diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 9962da202456..583c97debeb7 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -55,7 +55,7 @@ #define DRIVER_NAME "i915" #define DRIVER_DESC "Intel Graphics" -#define DRIVER_DATE "20141003" +#define DRIVER_DATE "20141024" enum pipe { INVALID_PIPE = -1, @@ -460,7 +460,7 @@ struct drm_i915_display_funcs { * Returns true on success, false on failure. */ bool (*find_dpll)(const struct intel_limit *limit, - struct drm_crtc *crtc, + struct intel_crtc *crtc, int target, int refclk, struct dpll *match_clock, struct dpll *best_clock); @@ -476,7 +476,7 @@ struct drm_i915_display_funcs { struct intel_crtc_config *); void (*get_plane_config)(struct intel_crtc *, struct intel_plane_config *); - int (*crtc_mode_set)(struct drm_crtc *crtc, + int (*crtc_mode_set)(struct intel_crtc *crtc, int x, int y, struct drm_framebuffer *old_fb); void (*crtc_enable)(struct drm_crtc *crtc); @@ -1448,6 +1448,20 @@ struct i915_frontbuffer_tracking { unsigned flip_bits; }; +struct i915_wa_reg { + u32 addr; + u32 value; + /* bitmask representing WA bits */ + u32 mask; +}; + +#define I915_MAX_WA_REGS 16 + +struct i915_workarounds { + struct i915_wa_reg reg[I915_MAX_WA_REGS]; + u32 count; +}; + struct drm_i915_private { struct drm_device *dev; struct kmem_cache *slab; @@ -1527,6 +1541,8 @@ struct drm_i915_private { struct intel_opregion opregion; struct intel_vbt_data vbt; + bool preserve_bios_swizzle; + /* overlay */ struct intel_overlay *overlay; @@ -1590,19 +1606,7 @@ struct drm_i915_private { struct intel_shared_dpll shared_dplls[I915_NUM_PLLS]; int dpio_phy_iosf_port[I915_NUM_PHYS_VLV]; - /* - * workarounds are currently applied at different places and - * changes are being done to consolidate them so exact count is - * not clear at this point, use a max value for now. - */ -#define I915_MAX_WA_REGS 16 - struct { - u32 addr; - u32 value; - /* bitmask representing WA bits */ - u32 mask; - } intel_wa_regs[I915_MAX_WA_REGS]; - u32 num_wa_regs; + struct i915_workarounds workarounds; /* Reclocking support */ bool render_reclock_avail; @@ -2107,7 +2111,6 @@ struct drm_i915_cmd_table { (INTEL_DEVID(dev) & 0x00F0) == 0x0020) #define IS_HSW_ULT(dev) (IS_HASWELL(dev) && \ (INTEL_DEVID(dev) & 0xFF00) == 0x0A00) -#define IS_ULT(dev) (IS_HSW_ULT(dev) || IS_BDW_ULT(dev)) #define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \ (INTEL_DEVID(dev) & 0x00F0) == 0x0020) /* ULX machines are also considered ULT. */ @@ -2141,7 +2144,7 @@ struct drm_i915_cmd_table { #define HAS_VEBOX(dev) (INTEL_INFO(dev)->ring_mask & VEBOX_RING) #define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc) #define HAS_WT(dev) ((IS_HASWELL(dev) || IS_BROADWELL(dev)) && \ - to_i915(dev)->ellc_size) + __I915__(dev)->ellc_size) #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) #define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6) @@ -2178,13 +2181,15 @@ struct drm_i915_cmd_table { #define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr) #define HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc) -#define HAS_IPS(dev) (IS_ULT(dev) || IS_BROADWELL(dev)) +#define HAS_IPS(dev) (IS_HSW_ULT(dev) || IS_BROADWELL(dev)) #define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi) #define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg) #define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev)) #define HAS_RUNTIME_PM(dev) (IS_GEN6(dev) || IS_HASWELL(dev) || \ IS_BROADWELL(dev) || IS_VALLEYVIEW(dev)) +#define HAS_RC6(dev) (INTEL_INFO(dev)->gen >= 6) +#define HAS_RC6p(dev) (INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev)) #define INTEL_PCH_DEVICE_ID_MASK 0xff00 #define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 @@ -2195,7 +2200,7 @@ struct drm_i915_cmd_table { #define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100 #define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00 -#define INTEL_PCH_TYPE(dev) (to_i915(dev)->pch_type) +#define INTEL_PCH_TYPE(dev) (__I915__(dev)->pch_type) #define HAS_PCH_SPT(dev) (INTEL_PCH_TYPE(dev) == PCH_SPT) #define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT) #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) @@ -2216,8 +2221,8 @@ struct drm_i915_cmd_table { extern const struct drm_ioctl_desc i915_ioctls[]; extern int i915_max_ioctl; -extern int i915_suspend(struct drm_device *dev, pm_message_t state); -extern int i915_resume(struct drm_device *dev); +extern int i915_suspend_legacy(struct drm_device *dev, pm_message_t state); +extern int i915_resume_legacy(struct drm_device *dev); extern int i915_master_create(struct drm_device *dev, struct drm_master *master); extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master); @@ -2312,6 +2317,17 @@ i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv); void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv); +void +ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask); +void +ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask); +void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, + uint32_t interrupt_mask, + uint32_t enabled_irq_mask); +#define ibx_enable_display_interrupt(dev_priv, bits) \ + ibx_display_interrupt_update((dev_priv), (bits), (bits)) +#define ibx_disable_display_interrupt(dev_priv, bits) \ + ibx_display_interrupt_update((dev_priv), (bits), 0) /* i915_gem.c */ int i915_gem_init_ioctl(struct drm_device *dev, void *data, diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 2719c25588cb..827edb589883 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1466,6 +1466,16 @@ unlock: * * While the mapping holds a reference on the contents of the object, it doesn't * imply a ref on the object itself. + * + * IMPORTANT: + * + * DRM driver writers who look a this function as an example for how to do GEM + * mmap support, please don't implement mmap support like here. The modern way + * to implement DRM mmap support is with an mmap offset ioctl (like + * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly. + * That way debug tooling like valgrind will understand what's going on, hiding + * the mmap call in a driver private ioctl will break that. The i915 driver only + * does cpu mmaps this way because we didn't know better. */ int i915_gem_mmap_ioctl(struct drm_device *dev, void *data, @@ -2800,6 +2810,9 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) u32 seqno = 0; int ret = 0; + if (args->flags != 0) + return -EINVAL; + ret = i915_mutex_lock_interruptible(dev); if (ret) return ret; @@ -5259,7 +5272,7 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr) struct drm_device *dev = dev_priv->dev; struct drm_i915_gem_object *obj; unsigned long timeout = msecs_to_jiffies(5000) + 1; - unsigned long pinned, bound, unbound, freed; + unsigned long pinned, bound, unbound, freed_pages; bool was_interruptible; bool unlock; @@ -5276,7 +5289,7 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr) was_interruptible = dev_priv->mm.interruptible; dev_priv->mm.interruptible = false; - freed = i915_gem_shrink_all(dev_priv); + freed_pages = i915_gem_shrink_all(dev_priv); dev_priv->mm.interruptible = was_interruptible; @@ -5307,14 +5320,15 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr) if (unlock) mutex_unlock(&dev->struct_mutex); - pr_info("Purging GPU memory, %lu bytes freed, %lu bytes still pinned.\n", - freed, pinned); + if (freed_pages || unbound || bound) + pr_info("Purging GPU memory, %lu bytes freed, %lu bytes still pinned.\n", + freed_pages << PAGE_SHIFT, pinned); if (unbound || bound) pr_err("%lu and %lu bytes still available in the " "bound and unbound GPU page lists.\n", bound, unbound); - *(unsigned long *)ptr += freed; + *(unsigned long *)ptr += freed_pages; return NOTIFY_DONE; } diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index 2cefb597df6d..d1e7a3e088aa 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c @@ -102,22 +102,33 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev) swizzle_x = I915_BIT_6_SWIZZLE_NONE; swizzle_y = I915_BIT_6_SWIZZLE_NONE; } else if (INTEL_INFO(dev)->gen >= 6) { - uint32_t dimm_c0, dimm_c1; - dimm_c0 = I915_READ(MAD_DIMM_C0); - dimm_c1 = I915_READ(MAD_DIMM_C1); - dimm_c0 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK; - dimm_c1 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK; - /* Enable swizzling when the channels are populated with - * identically sized dimms. We don't need to check the 3rd - * channel because no cpu with gpu attached ships in that - * configuration. Also, swizzling only makes sense for 2 - * channels anyway. */ - if (dimm_c0 == dimm_c1) { - swizzle_x = I915_BIT_6_SWIZZLE_9_10; - swizzle_y = I915_BIT_6_SWIZZLE_9; + if (dev_priv->preserve_bios_swizzle) { + if (I915_READ(DISP_ARB_CTL) & + DISP_TILE_SURFACE_SWIZZLING) { + swizzle_x = I915_BIT_6_SWIZZLE_9_10; + swizzle_y = I915_BIT_6_SWIZZLE_9; + } else { + swizzle_x = I915_BIT_6_SWIZZLE_NONE; + swizzle_y = I915_BIT_6_SWIZZLE_NONE; + } } else { - swizzle_x = I915_BIT_6_SWIZZLE_NONE; - swizzle_y = I915_BIT_6_SWIZZLE_NONE; + uint32_t dimm_c0, dimm_c1; + dimm_c0 = I915_READ(MAD_DIMM_C0); + dimm_c1 = I915_READ(MAD_DIMM_C1); + dimm_c0 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK; + dimm_c1 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK; + /* Enable swizzling when the channels are populated + * with identically sized dimms. We don't need to check + * the 3rd channel because no cpu with gpu attached + * ships in that configuration. Also, swizzling only + * makes sense for 2 channels anyway. */ + if (dimm_c0 == dimm_c1) { + swizzle_x = I915_BIT_6_SWIZZLE_9_10; + swizzle_y = I915_BIT_6_SWIZZLE_9; + } else { + swizzle_x = I915_BIT_6_SWIZZLE_NONE; + swizzle_y = I915_BIT_6_SWIZZLE_NONE; + } } } else if (IS_GEN5(dev)) { /* On Ironlake whatever DRAM config, GPU always do diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c index 2e0613e26251..176de6322e4d 100644 --- a/drivers/gpu/drm/i915/i915_ioc32.c +++ b/drivers/gpu/drm/i915/i915_ioc32.c @@ -189,7 +189,6 @@ static drm_ioctl_compat_t *i915_compat_ioctls[] = { [DRM_I915_ALLOC] = compat_i915_alloc }; -#ifdef CONFIG_COMPAT /** * Called whenever a 32-bit process running under a 64-bit kernel * performs an ioctl on /dev/dri/card<n>. @@ -218,4 +217,3 @@ long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) return ret; } -#endif diff --git a/dr |
