summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJani Nikula <jani.nikula@intel.com>2020-02-11 18:14:51 +0200
committerJani Nikula <jani.nikula@intel.com>2020-02-14 13:26:51 +0200
commit926b005cd8c4e325ab918edea0fbdd1d25d1ba28 (patch)
tree339c16f9fd3a091099955db2e5d040e8c7f42577
parent06d3ff6e745114852204ff6667df9a0420ab9931 (diff)
downloadlinux-926b005cd8c4e325ab918edea0fbdd1d25d1ba28.tar.gz
linux-926b005cd8c4e325ab918edea0fbdd1d25d1ba28.tar.bz2
linux-926b005cd8c4e325ab918edea0fbdd1d25d1ba28.zip
drm/i915: split out display debugfs to a separate file
The i915_debugfs.c has grown more than a little unwieldy. Split out the display related debugfs code to a file of its own under display/, initialized with a separate call. No functional changes. v2: - Also moved i915_frontbuffer_tracking, i915_gem_framebuffer, i915_power_domain_info, i915_dmc_info, i915_ipc_status (Ville) Cc: Ville Syrjälä <ville.syrjala@linux.intel.com> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Jani Nikula <jani.nikula@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20200211161451.6867-2-jani.nikula@intel.com
-rw-r--r--drivers/gpu/drm/i915/Makefile1
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.c2120
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.h20
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c3
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c2086
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.h8
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c2
8 files changed, 2156 insertions, 2087 deletions
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index a2fab3c43563..eb13294cb439 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -68,6 +68,7 @@ i915-$(CONFIG_COMPAT) += i915_ioc32.o
i915-$(CONFIG_DEBUG_FS) += \
i915_debugfs.o \
i915_debugfs_params.o \
+ display/intel_display_debugfs.o \
display/intel_pipe_crc.o
i915-$(CONFIG_PERF_EVENTS) += i915_pmu.o
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
new file mode 100644
index 000000000000..50baac726e70
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
@@ -0,0 +1,2120 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include <drm/drm_debugfs.h>
+#include <drm/drm_fourcc.h>
+
+#include "i915_debugfs.h"
+#include "intel_csr.h"
+#include "intel_display_debugfs.h"
+#include "intel_display_types.h"
+#include "intel_dp.h"
+#include "intel_fbc.h"
+#include "intel_hdcp.h"
+#include "intel_hdmi.h"
+#include "intel_pm.h"
+#include "intel_psr.h"
+#include "intel_sideband.h"
+
+static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
+{
+ return to_i915(node->minor->dev);
+}
+
+static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
+{
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+
+ seq_printf(m, "FB tracking busy bits: 0x%08x\n",
+ dev_priv->fb_tracking.busy_bits);
+
+ seq_printf(m, "FB tracking flip bits: 0x%08x\n",
+ dev_priv->fb_tracking.flip_bits);
+
+ return 0;
+}
+
+static int i915_fbc_status(struct seq_file *m, void *unused)
+{
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct intel_fbc *fbc = &dev_priv->fbc;
+ intel_wakeref_t wakeref;
+
+ if (!HAS_FBC(dev_priv))
+ return -ENODEV;
+
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+ mutex_lock(&fbc->lock);
+
+ if (intel_fbc_is_active(dev_priv))
+ seq_puts(m, "FBC enabled\n");
+ else
+ seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
+
+ if (intel_fbc_is_active(dev_priv)) {
+ u32 mask;
+
+ if (INTEL_GEN(dev_priv) >= 8)
+ mask = I915_READ(IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
+ else if (INTEL_GEN(dev_priv) >= 7)
+ mask = I915_READ(IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
+ else if (INTEL_GEN(dev_priv) >= 5)
+ mask = I915_READ(ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
+ else if (IS_G4X(dev_priv))
+ mask = I915_READ(DPFC_STATUS) & DPFC_COMP_SEG_MASK;
+ else
+ mask = I915_READ(FBC_STATUS) & (FBC_STAT_COMPRESSING |
+ FBC_STAT_COMPRESSED);
+
+ seq_printf(m, "Compressing: %s\n", yesno(mask));
+ }
+
+ mutex_unlock(&fbc->lock);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+
+ return 0;
+}
+
+static int i915_fbc_false_color_get(void *data, u64 *val)
+{
+ struct drm_i915_private *dev_priv = data;
+
+ if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
+ return -ENODEV;
+
+ *val = dev_priv->fbc.false_color;
+
+ return 0;
+}
+
+static int i915_fbc_false_color_set(void *data, u64 val)
+{
+ struct drm_i915_private *dev_priv = data;
+ u32 reg;
+
+ if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
+ return -ENODEV;
+
+ mutex_lock(&dev_priv->fbc.lock);
+
+ reg = I915_READ(ILK_DPFC_CONTROL);
+ dev_priv->fbc.false_color = val;
+
+ I915_WRITE(ILK_DPFC_CONTROL, val ?
+ (reg | FBC_CTL_FALSE_COLOR) :
+ (reg & ~FBC_CTL_FALSE_COLOR));
+
+ mutex_unlock(&dev_priv->fbc.lock);
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
+ i915_fbc_false_color_get, i915_fbc_false_color_set,
+ "%llu\n");
+
+static int i915_ips_status(struct seq_file *m, void *unused)
+{
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ intel_wakeref_t wakeref;
+
+ if (!HAS_IPS(dev_priv))
+ return -ENODEV;
+
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+
+ seq_printf(m, "Enabled by kernel parameter: %s\n",
+ yesno(i915_modparams.enable_ips));
+
+ if (INTEL_GEN(dev_priv) >= 8) {
+ seq_puts(m, "Currently: unknown\n");
+ } else {
+ if (I915_READ(IPS_CTL) & IPS_ENABLE)
+ seq_puts(m, "Currently: enabled\n");
+ else
+ seq_puts(m, "Currently: disabled\n");
+ }
+
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+
+ return 0;
+}
+
+static int i915_sr_status(struct seq_file *m, void *unused)
+{
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ intel_wakeref_t wakeref;
+ bool sr_enabled = false;
+
+ wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
+
+ if (INTEL_GEN(dev_priv) >= 9)
+ /* no global SR status; inspect per-plane WM */;
+ else if (HAS_PCH_SPLIT(dev_priv))
+ sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
+ else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
+ IS_I945G(dev_priv) || IS_I945GM(dev_priv))
+ sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
+ else if (IS_I915GM(dev_priv))
+ sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
+ else if (IS_PINEVIEW(dev_priv))
+ sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
+ else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
+
+ intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
+
+ seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
+
+ return 0;
+}
+
+static int i915_opregion(struct seq_file *m, void *unused)
+{
+ struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
+
+ if (opregion->header)
+ seq_write(m, opregion->header, OPREGION_SIZE);
+
+ return 0;
+}
+
+static int i915_vbt(struct seq_file *m, void *unused)
+{
+ struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
+
+ if (opregion->vbt)
+ seq_write(m, opregion->vbt, opregion->vbt_size);
+
+ return 0;
+}
+
+static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
+{
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_device *dev = &dev_priv->drm;
+ struct intel_framebuffer *fbdev_fb = NULL;
+ struct drm_framebuffer *drm_fb;
+
+#ifdef CONFIG_DRM_FBDEV_EMULATION
+ if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
+ fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
+
+ seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
+ fbdev_fb->base.width,
+ fbdev_fb->base.height,
+ fbdev_fb->base.format->depth,
+ fbdev_fb->base.format->cpp[0] * 8,
+ fbdev_fb->base.modifier,
+ drm_framebuffer_read_refcount(&fbdev_fb->base));
+ i915_debugfs_describe_obj(m, intel_fb_obj(&fbdev_fb->base));
+ seq_putc(m, '\n');
+ }
+#endif
+
+ mutex_lock(&dev->mode_config.fb_lock);
+ drm_for_each_fb(drm_fb, dev) {
+ struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
+ if (fb == fbdev_fb)
+ continue;
+
+ seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
+ fb->base.width,
+ fb->base.height,
+ fb->base.format->depth,
+ fb->base.format->cpp[0] * 8,
+ fb->base.modifier,
+ drm_framebuffer_read_refcount(&fb->base));
+ i915_debugfs_describe_obj(m, intel_fb_obj(&fb->base));
+ seq_putc(m, '\n');
+ }
+ mutex_unlock(&dev->mode_config.fb_lock);
+
+ return 0;
+}
+
+static int i915_psr_sink_status_show(struct seq_file *m, void *data)
+{
+ u8 val;
+ static const char * const sink_status[] = {
+ "inactive",
+ "transition to active, capture and display",
+ "active, display from RFB",
+ "active, capture and display on sink device timings",
+ "transition to inactive, capture and display, timing re-sync",
+ "reserved",
+ "reserved",
+ "sink internal error",
+ };
+ struct drm_connector *connector = m->private;
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
+ struct intel_dp *intel_dp =
+ intel_attached_dp(to_intel_connector(connector));
+ int ret;
+
+ if (!CAN_PSR(dev_priv)) {
+ seq_puts(m, "PSR Unsupported\n");
+ return -ENODEV;
+ }
+
+ if (connector->status != connector_status_connected)
+ return -ENODEV;
+
+ ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
+
+ if (ret == 1) {
+ const char *str = "unknown";
+
+ val &= DP_PSR_SINK_STATE_MASK;
+ if (val < ARRAY_SIZE(sink_status))
+ str = sink_status[val];
+ seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
+ } else {
+ return ret;
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
+
+static void
+psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
+{
+ u32 val, status_val;
+ const char *status = "unknown";
+
+ if (dev_priv->psr.psr2_enabled) {
+ static const char * const live_status[] = {
+ "IDLE",
+ "CAPTURE",
+ "CAPTURE_FS",
+ "SLEEP",
+ "BUFON_FW",
+ "ML_UP",
+ "SU_STANDBY",
+ "FAST_SLEEP",
+ "DEEP_SLEEP",
+ "BUF_ON",
+ "TG_ON"
+ };
+ val = I915_READ(EDP_PSR2_STATUS(dev_priv->psr.transcoder));
+ status_val = (val & EDP_PSR2_STATUS_STATE_MASK) >>
+ EDP_PSR2_STATUS_STATE_SHIFT;
+ if (status_val < ARRAY_SIZE(live_status))
+ status = live_status[status_val];
+ } else {
+ static const char * const live_status[] = {
+ "IDLE",
+ "SRDONACK",
+ "SRDENT",
+ "BUFOFF",
+ "BUFON",
+ "AUXACK",
+ "SRDOFFACK",
+ "SRDENT_ON",
+ };
+ val = I915_READ(EDP_PSR_STATUS(dev_priv->psr.transcoder));
+ status_val = (val & EDP_PSR_STATUS_STATE_MASK) >>
+ EDP_PSR_STATUS_STATE_SHIFT;
+ if (status_val < ARRAY_SIZE(live_status))
+ status = live_status[status_val];
+ }
+
+ seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val);
+}
+
+static int i915_edp_psr_status(struct seq_file *m, void *data)
+{
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct i915_psr *psr = &dev_priv->psr;
+ intel_wakeref_t wakeref;
+ const char *status;
+ bool enabled;
+ u32 val;
+
+ if (!HAS_PSR(dev_priv))
+ return -ENODEV;
+
+ seq_printf(m, "Sink support: %s", yesno(psr->sink_support));
+ if (psr->dp)
+ seq_printf(m, " [0x%02x]", psr->dp->psr_dpcd[0]);
+ seq_puts(m, "\n");
+
+ if (!psr->sink_support)
+ return 0;
+
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+ mutex_lock(&psr->lock);
+
+ if (psr->enabled)
+ status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
+ else
+ status = "disabled";
+ seq_printf(m, "PSR mode: %s\n", status);
+
+ if (!psr->enabled) {
+ seq_printf(m, "PSR sink not reliable: %s\n",
+ yesno(psr->sink_not_reliable));
+
+ goto unlock;
+ }
+
+ if (psr->psr2_enabled) {
+ val = I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder));
+ enabled = val & EDP_PSR2_ENABLE;
+ } else {
+ val = I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder));
+ enabled = val & EDP_PSR_ENABLE;
+ }
+ seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
+ enableddisabled(enabled), val);
+ psr_source_status(dev_priv, m);
+ seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
+ psr->busy_frontbuffer_bits);
+
+ /*
+ * SKL+ Perf counter is reset to 0 everytime DC state is entered
+ */
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
+ val = I915_READ(EDP_PSR_PERF_CNT(dev_priv->psr.transcoder));
+ val &= EDP_PSR_PERF_CNT_MASK;
+ seq_printf(m, "Performance counter: %u\n", val);
+ }
+
+ if (psr->debug & I915_PSR_DEBUG_IRQ) {
+ seq_printf(m, "Last attempted entry at: %lld\n",
+ psr->last_entry_attempt);
+ seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
+ }
+
+ if (psr->psr2_enabled) {
+ u32 su_frames_val[3];
+ int frame;
+
+ /*
+ * Reading all 3 registers before hand to minimize crossing a
+ * frame boundary between register reads
+ */
+ for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
+ val = I915_READ(PSR2_SU_STATUS(dev_priv->psr.transcoder,
+ frame));
+ su_frames_val[frame / 3] = val;
+ }
+
+ seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
+
+ for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
+ u32 su_blocks;
+
+ su_blocks = su_frames_val[frame / 3] &
+ PSR2_SU_STATUS_MASK(frame);
+ su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
+ seq_printf(m, "%d\t%d\n", frame, su_blocks);
+ }
+ }
+
+unlock:
+ mutex_unlock(&psr->lock);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+
+ return 0;
+}
+
+static int
+i915_edp_psr_debug_set(void *data, u64 val)
+{
+ struct drm_i915_private *dev_priv = data;
+ intel_wakeref_t wakeref;
+ int ret;
+
+ if (!CAN_PSR(dev_priv))
+ return -ENODEV;
+
+ drm_dbg_kms(&dev_priv->drm, "Setting PSR debug to %llx\n", val);
+
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+
+ ret = intel_psr_debug_set(dev_priv, val);
+
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+
+ return ret;
+}
+
+static int
+i915_edp_psr_debug_get(void *data, u64 *val)
+{
+ struct drm_i915_private *dev_priv = data;
+
+ if (!CAN_PSR(dev_priv))
+ return -ENODEV;
+
+ *val = READ_ONCE(dev_priv->psr.debug);
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
+ i915_edp_psr_debug_get, i915_edp_psr_debug_set,
+ "%llu\n");
+
+static int i915_power_domain_info(struct seq_file *m, void *unused)
+{
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ int i;
+
+ mutex_lock(&power_domains->lock);
+
+ seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
+ for (i = 0; i < power_domains->power_well_count; i++) {
+ struct i915_power_well *power_well;
+ enum intel_display_power_domain power_domain;
+
+ power_well = &power_domains->power_wells[i];
+ seq_printf(m, "%-25s %d\n", power_well->desc->name,
+ power_well->count);
+
+ for_each_power_domain(power_domain, power_well->desc->domains)
+ seq_printf(m, " %-23s %d\n",
+ intel_display_power_domain_str(power_domain),
+ power_domains->domain_use_count[power_domain]);
+ }
+
+ mutex_unlock(&power_domains->lock);
+
+ return 0;
+}
+
+static int i915_dmc_info(struct seq_file *m, void *unused)
+{
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ intel_wakeref_t wakeref;
+ struct intel_csr *csr;
+ i915_reg_t dc5_reg, dc6_reg = {};
+
+ if (!HAS_CSR(dev_priv))
+ return -ENODEV;
+
+ csr = &dev_priv->csr;
+
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+
+ seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
+ seq_printf(m, "path: %s\n", csr->fw_path);
+
+ if (!csr->dmc_payload)
+ goto out;
+
+ seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
+ CSR_VERSION_MINOR(csr->version));
+
+ if (INTEL_GEN(dev_priv) >= 12) {
+ dc5_reg = TGL_DMC_DEBUG_DC5_COUNT;
+ dc6_reg = TGL_DMC_DEBUG_DC6_COUNT;
+ /*
+ * NOTE: DMC_DEBUG3 is a general purpose reg.
+ * According to B.Specs:49196 DMC f/w reuses DC5/6 counter
+ * reg for DC3CO debugging and validation,
+ * but TGL DMC f/w is using DMC_DEBUG3 reg for DC3CO counter.
+ */
+ seq_printf(m, "DC3CO count: %d\n", I915_READ(DMC_DEBUG3));
+ } else {
+ dc5_reg = IS_BROXTON(dev_priv) ? BXT_CSR_DC3_DC5_COUNT :
+ SKL_CSR_DC3_DC5_COUNT;
+ if (!IS_GEN9_LP(dev_priv))
+ dc6_reg = SKL_CSR_DC5_DC6_COUNT;
+ }
+
+ seq_printf(m, "DC3 -> DC5 count: %d\n", I915_READ(dc5_reg));
+ if (dc6_reg.reg)
+ seq_printf(m, "DC5 -> DC6 count: %d\n", I915_READ(dc6_reg));
+
+out:
+ seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0)));
+ seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE));
+ seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL));
+
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+
+ return 0;
+}
+
+static void intel_seq_print_mode(struct seq_file *m, int tabs,
+ const struct drm_display_mode *mode)
+{
+ int i;
+
+ for (i = 0; i < tabs; i++)
+ seq_putc(m, '\t');
+
+ seq_printf(m, DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
+}
+
+static void intel_encoder_info(struct seq_file *m,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_connector_list_iter conn_iter;
+ struct drm_connector *connector;
+
+ seq_printf(m, "\t[ENCODER:%d:%s]: connectors:\n",
+ encoder->base.base.id, encoder->base.name);
+
+ drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+ const struct drm_connector_state *conn_state =
+ connector->state;
+
+ if (conn_state->best_encoder != &encoder->base)
+ continue;
+
+ seq_printf(m, "\t\t[CONNECTOR:%d:%s]\n",
+ connector->base.id, connector->name);
+ }
+ drm_connector_list_iter_end(&conn_iter);
+}
+
+static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
+{
+ const struct drm_display_mode *mode = panel->fixed_mode;
+
+ seq_printf(m, "\tfixed mode: " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
+}
+
+static void intel_hdcp_info(struct seq_file *m,
+ struct intel_connector *intel_connector)
+{
+ bool hdcp_cap, hdcp2_cap;
+
+ hdcp_cap = intel_hdcp_capable(intel_connector);
+ hdcp2_cap = intel_hdcp2_capable(intel_connector);
+
+ if (hdcp_cap)
+ seq_puts(m, "HDCP1.4 ");
+ if (hdcp2_cap)
+ seq_puts(m, "HDCP2.2 ");
+
+ if (!hdcp_cap && !hdcp2_cap)
+ seq_puts(m, "None");
+
+ seq_puts(m, "\n");
+}
+
+static void intel_dp_info(struct seq_file *m,
+ struct intel_connector *intel_connector)
+{
+ struct intel_encoder *intel_encoder = intel_attached_encoder(intel_connector);
+ struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder);
+
+ seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
+ seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
+ if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
+ intel_panel_info(m, &intel_connector->panel);
+
+ drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
+ &intel_dp->aux);
+ if (intel_connector->hdcp.shim) {
+ seq_puts(m, "\tHDCP version: ");
+ intel_hdcp_info(m, intel_connector);
+ }
+}
+
+static void intel_dp_mst_info(struct seq_file *m,
+ struct intel_connector *intel_connector)
+{
+ struct intel_encoder *intel_encoder = intel_attached_encoder(intel_connector);
+ struct intel_dp_mst_encoder *intel_mst =
+ enc_to_mst(intel_encoder);
+ struct intel_digital_port *intel_dig_port = intel_mst->primary;
+ struct intel_dp *intel_dp = &intel_dig_port->dp;
+ bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
+ intel_connector->port);
+
+ seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
+}
+
+static void intel_hdmi_info(struct seq_file *m,
+ struct intel_connector *intel_connector)
+{
+ struct intel_encoder *intel_encoder = intel_attached_encoder(intel_connector);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(intel_encoder);
+
+ seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
+ if (intel_connector->hdcp.shim) {
+ seq_puts(m, "\tHDCP version: ");
+ intel_hdcp_info(m, intel_connector);
+ }
+}
+
+static void intel_lvds_info(struct seq_file *m,
+ struct intel_connector *intel_connector)
+{
+ intel_panel_info(m, &intel_connector->panel);
+}
+
+static void intel_connector_info(struct seq_file *m,
+ struct drm_connector *connector)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ const struct drm_connector_state *conn_state = connector->state;
+ struct intel_encoder *encoder =
+ to_intel_encoder(conn_state->best_encoder);
+ const struct drm_display_mode *mode;
+
+ seq_printf(m, "[CONNECTOR:%d:%s]: status: %s\n",
+ connector->base.id, connector->name,
+ drm_get_connector_status_name(connector->status));
+
+ if (connector->status == connector_status_disconnected)
+ return;
+
+ seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
+ connector->display_info.width_mm,
+ connector->display_info.height_mm);
+ seq_printf(m, "\tsubpixel order: %s\n",
+ drm_get_subpixel_order_name(connector->display_info.subpixel_order));
+ seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev);
+
+ if (!encoder)
+ return;
+
+ switch (connector->connector_type) {
+ case DRM_MODE_CONNECTOR_DisplayPort:
+ case DRM_MODE_CONNECTOR_eDP:
+ if (encoder->type == INTEL_OUTPUT_DP_MST)
+ intel_dp_mst_info(m, intel_connector);
+ else
+ intel_dp_info(m, intel_connector);
+ break;
+ case DRM_MODE_CONNECTOR_LVDS:
+ if (encoder->type == INTEL_OUTPUT_LVDS)
+ intel_lvds_info(m, intel_connector);
+ break;
+ case DRM_MODE_CONNECTOR_HDMIA:
+ if (encoder->type == INTEL_OUTPUT_HDMI ||
+ encoder->type == INTEL_OUTPUT_DDI)
+ intel_hdmi_info(m, intel_connector);
+ break;
+ default:
+ break;
+ }
+
+ seq_printf(m, "\tmodes:\n");
+ list_for_each_entry(mode, &connector->modes, head)
+ intel_seq_print_mode(m, 2, mode);
+}
+
+static const char *plane_type(enum drm_plane_type type)
+{
+ switch (type) {
+ case DRM_PLANE_TYPE_OVERLAY:
+ return "OVL";
+ case DRM_PLANE_TYPE_PRIMARY:
+ return "PRI";
+ case DRM_PLANE_TYPE_CURSOR:
+ return "CUR";
+ /*
+ * Deliberately omitting default: to generate compiler warnings
+ * when a new drm_plane_type gets added.
+ */
+ }
+
+ return "unknown";
+}
+
+static void plane_rotation(char *buf, size_t bufsize, unsigned int rotation)
+{
+ /*
+ * According to doc only one DRM_MODE_ROTATE_ is allowed but this
+ * will print them all to visualize if the values are misused
+ */
+ snprintf(buf, bufsize,
+ "%s%s%s%s%s%s(0x%08x)",
+ (rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
+ (rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
+ (rotation & DRM_MODE_ROTATE_180) ? "180 " : "",
+ (rotation & DRM_MODE_ROTATE_270) ? "270 " : "",
+ (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
+ (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
+ rotation);
+}
+
+static void intel_plane_uapi_info(struct seq_file *m, struct intel_plane *plane)
+{
+ const struct intel_plane_state *plane_state =
+ to_intel_plane_state(plane->base.state);
+ const struct drm_framebuffer *fb = plane_state->uapi.fb;
+ struct drm_format_name_buf format_name;
+ struct drm_rect src, dst;
+ char rot_str[48];
+
+ src = drm_plane_state_src(&plane_state->uapi);
+ dst = drm_plane_state_dest(&plane_state->uapi);
+
+ if (fb)
+ drm_get_format_name(fb->format->format, &format_name);
+
+ plane_rotation(rot_str, sizeof(rot_str),
+ plane_state->uapi.rotation);
+
+ seq_printf(m, "\t\tuapi: fb=%d,%s,%dx%d, src=" DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT ", rotation=%s\n",
+ fb ? fb->base.id : 0, fb ? format_name.str : "n/a",
+ fb ? fb->width : 0, fb ? fb->height : 0,
+ DRM_RECT_FP_ARG(&src),
+ DRM_RECT_ARG(&dst),
+ rot_str);
+}
+
+static void intel_plane_hw_info(struct seq_file *m, struct intel_plane *plane)
+{
+ const struct intel_plane_state *plane_state =
+ to_intel_plane_state(plane->base.state);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ struct drm_format_name_buf format_name;
+ char rot_str[48];
+
+ if (!fb)
+ return;
+
+ drm_get_format_name(fb->format->format, &format_name);
+
+ plane_rotation(rot_str, sizeof(rot_str),
+ plane_state->hw.rotation);
+
+ seq_printf(m, "\t\thw: fb=%d,%s,%dx%d, visible=%s, src=" DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT ", rotation=%s\n",
+ fb->base.id, format_name.str,
+ fb->width, fb->height,
+ yesno(plane_state->uapi.visible),
+ DRM_RECT_FP_ARG(&plane_state->uapi.src),
+ DRM_RECT_ARG(&plane_state->uapi.dst),
+ rot_str);
+}
+
+static void intel_plane_info(struct seq_file *m, struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct intel_plane *plane;
+
+ for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
+ seq_printf(m, "\t[PLANE:%d:%s]: type=%s\n",
+ plane->base.base.id, plane->base.name,
+ plane_type(plane->base.type));
+ intel_plane_uapi_info(m, plane);
+ intel_plane_hw_info(m, plane);
+ }
+}
+
+static void intel_scaler_info(struct seq_file *m, struct intel_crtc *crtc)
+{
+ const struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ int num_scalers = crtc->num_scalers;
+ int i;
+
+ /* Not all platformas have a scaler */
+ if (num_scalers) {
+ seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
+ num_scalers,
+ crtc_state->scaler_state.scaler_users,
+ crtc_state->scaler_state.scaler_id);
+
+ for (i = 0; i < num_scalers; i++) {
+ const struct intel_scaler *sc =
+ &crtc_state->scaler_state.scalers[i];
+
+ seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
+ i, yesno(sc->in_use), sc->mode);
+ }
+ seq_puts(m, "\n");
+ } else {
+ seq_puts(m, "\tNo scalers available on this platform\n");
+ }
+}
+
+static void intel_crtc_info(struct seq_file *m, struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ const struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ struct intel_encoder *encoder;
+
+ seq_printf(m, "[CRTC:%d:%s]:\n",
+ crtc->base.base.id, crtc->base.name);
+
+ seq_printf(m, "\tuapi: enable=%s, active=%s, mode=" DRM_MODE_FMT "\n",
+ yesno(crtc_state->uapi.enable),
+ yesno(crtc_state->uapi.active),
+ DRM_MODE_ARG(&crtc_state->uapi.mode));
+
+ if (crtc_state->hw.enable) {
+ seq_printf(m, "\thw: active=%s, adjusted_mode=" DRM_MODE_FMT "\n",
+ yesno(crtc_state->hw.active),
+ DRM_MODE_ARG(&crtc_state->hw.adjusted_mode));
+
+ seq_printf(m, "\tpipe src size=%dx%d, dither=%s, bpp=%d\n",
+ crtc_state->pipe_src_w, crtc_state->pipe_src_h,
+ yesno(crtc_state->dither), crtc_state->pipe_bpp);
+
+ intel_scaler_info(m, crtc);
+ }
+
+ for_each_intel_encoder_mask(&dev_priv->drm, encoder,
+ crtc_state->uapi.encoder_mask)
+ intel_encoder_info(m, crtc, encoder);
+
+ intel_plane_info(m, crtc);
+
+ seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s\n",
+ yesno(!crtc->cpu_fifo_underrun_disabled),
+ yesno(!crtc->pch_fifo_underrun_disabled));
+}
+
+static int i915_display_info(struct seq_file *m, void *unused)
+{
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_device *dev = &dev_priv->drm;
+ struct intel_crtc *crtc;
+ struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
+ intel_wakeref_t wakeref;
+
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+
+ drm_modeset_lock_all(dev);
+
+ seq_printf(m, "CRTC info\n");
+ seq_printf(m, "---------\n");
+ for_each_intel_crtc(dev, crtc)
+ intel_crtc_info(m, crtc);
+
+ seq_printf(m, "\n");
+ seq_printf(m, "Connector info\n");
+ seq_printf(m, "--------------\n");
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter)
+ intel_connector_info(m, connector);
+ drm_connector_list_iter_end(&conn_iter);
+
+ drm_modeset_unlock_all(dev);
+
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+
+ return 0;
+}
+
+static int i915_shared_dplls_info(struct seq_file *m, void *unused)
+{
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_device *dev = &dev_priv->drm;
+ int i;
+
+ drm_modeset_lock_all(dev);
+ for (i = 0; i < dev_priv->num_shared_dpll; i++) {
+ struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
+
+ seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name,
+ pll->info->id);
+ seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
+ pll->state.crtc_mask, pll->active_mask, yesno(pll->on));
+ seq_printf(m, " tracked hardware state:\n");
+ seq_printf(m, " dpll: 0x%08x\n", pll->state.hw_state.dpll);
+ seq_printf(m, " dpll_md: 0x%08x\n",
+ pll->state.hw_state.dpll_md);
+ seq_printf(m, " fp0: 0x%08x\n", pll->state.hw_state.fp0);
+ seq_printf(m, " fp1: 0x%08x\n", pll->state.hw_state.fp1);
+ seq_printf(m, " wrpll: 0x%08x\n", pll->state.hw_state.wrpll);
+ seq_printf(m, " cfgcr0: 0x%08x\n", pll->state.hw_state.cfgcr0);
+ seq_printf(m, " cfgcr1: 0x%08x\n", pll->state.hw_state.cfgcr1);
+ seq_printf(m, " mg_refclkin_ctl: 0x%08x\n",
+ pll->state.hw_state.mg_refclkin_ctl);
+ seq_printf(m, " mg_clktop2_coreclkctl1: 0x%08x\n",
+ pll->state.hw_state.mg_clktop2_coreclkctl1);
+ seq_printf(m, " mg_clktop2_hsclkctl: 0x%08x\n",
+ pll->state.hw_state.mg_clktop2_hsclkctl);
+ seq_printf(m, " mg_pll_div0: 0x%08x\n",
+ pll->state.hw_state.mg_pll_div0);
+ seq_printf(m, " mg_pll_div1: 0x%08x\n",
+ pll->state.hw_state.mg_pll_div1);
+ seq_printf(m, " mg_pll_lf: 0x%08x\n",
+ pll->state.hw_state.mg_pll_lf);
+ seq_printf(m, " mg_pll_frac_lock: 0x%08x\n",
+ pll->state.hw_state.mg_pll_frac_lock);
+ seq_printf(m, " mg_pll_ssc: 0x%08x\n",
+ pll->state.hw_state.mg_pll_ssc);
+ seq_printf(m, " mg_pll_bias: 0x%08x\n",
+ pll->state.hw_state.mg_pll_bias);
+ seq_printf(m, " mg_pll_tdc_coldst_bias: 0x%08x\n",
+ pll->state.hw_state.mg_pll_tdc_coldst_bias);
+ }
+ drm_modeset_unlock_all(dev);
+
+ return 0;
+}
+
+static int i915_ipc_status_show(struct seq_file *m, void *data)
+{
+ struct drm_i915_private *dev_priv = m->private;
+
+ seq_printf(m, "Isochronous Priority Control: %s\n",
+ yesno(dev_priv->ipc_enabled));
+ return 0;
+}
+
+static int i915_ipc_status_open(struct inode *inode, struct file *file)
+{
+ struct drm_i915_private *dev_priv = inode->i_private;
+
+ if (!HAS_IPC(dev_priv))
+ return -ENODEV;
+
+ return single_open(file, i915_ipc_status_show, dev_priv);
+}
+
+static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ struct drm_i915_private *dev_priv = m->private;
+ intel_wakeref_t wakeref;
+ bool enable;
+ int ret;
+
+ ret = kstrtobool_from_user(ubuf, len, &enable);
+ if (ret < 0)
+ return ret;
+
+ with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
+ if (!dev_priv->ipc_enabled && enable)
+ drm_info(&dev_priv->drm,
+ "Enabling IPC: WM will be proper only after next commit\n");
+ dev_priv->wm.distrust_bios_wm = true;
+ dev_priv->ipc_enabled = enable;
+ intel_enable_ipc(dev_priv);
+ }
+
+ return len;
+}
+
+static const struct file_operations i915_ipc_status_fops = {
+ .owner = THIS_MODULE,
+ .open = i915_ipc_status_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = i915_ipc_status_write
+};
+
+static int i915_ddb_info(struct seq_file *m, void *unused)
+{
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_device *dev = &dev_priv->drm;
+ struct skl_ddb_entry *entry;
+ struct intel_crtc *crtc;
+
+ if (INTEL_GEN(dev_priv) < 9)
+ return -ENODEV;
+