summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/display
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/display')
-rw-r--r--drivers/gpu/drm/display/Kconfig21
-rw-r--r--drivers/gpu/drm/display/Makefile2
-rw-r--r--drivers/gpu/drm/display/drm_dp_aux_bus.c2
-rw-r--r--drivers/gpu/drm/display/drm_dp_helper.c179
-rw-r--r--drivers/gpu/drm/display/drm_dp_mst_topology.c23
-rw-r--r--drivers/gpu/drm/display/drm_dp_tunnel.c1949
6 files changed, 2156 insertions, 20 deletions
diff --git a/drivers/gpu/drm/display/Kconfig b/drivers/gpu/drm/display/Kconfig
index 09712b88a5b8..c0f56888c328 100644
--- a/drivers/gpu/drm/display/Kconfig
+++ b/drivers/gpu/drm/display/Kconfig
@@ -17,6 +17,27 @@ config DRM_DISPLAY_DP_HELPER
help
DRM display helpers for DisplayPort.
+config DRM_DISPLAY_DP_TUNNEL
+ bool
+ select DRM_DISPLAY_DP_HELPER
+ help
+ Enable support for DisplayPort tunnels. This allows drivers to use
+ DP tunnel features like the Bandwidth Allocation mode to maximize the
+ BW utilization for display streams on Thunderbolt links.
+
+config DRM_DISPLAY_DEBUG_DP_TUNNEL_STATE
+ bool "Enable debugging the DP tunnel state"
+ depends on REF_TRACKER
+ depends on DRM_DISPLAY_DP_TUNNEL
+ depends on DEBUG_KERNEL
+ depends on EXPERT
+ help
+ Enables debugging the DP tunnel manager's state, including the
+ consistency of all managed tunnels' reference counting and the state of
+ streams contained in tunnels.
+
+ If in doubt, say "N".
+
config DRM_DISPLAY_HDCP_HELPER
bool
depends on DRM_DISPLAY_HELPER
diff --git a/drivers/gpu/drm/display/Makefile b/drivers/gpu/drm/display/Makefile
index 17ac4a1006a8..7ca61333c669 100644
--- a/drivers/gpu/drm/display/Makefile
+++ b/drivers/gpu/drm/display/Makefile
@@ -8,6 +8,8 @@ drm_display_helper-$(CONFIG_DRM_DISPLAY_DP_HELPER) += \
drm_dp_helper.o \
drm_dp_mst_topology.o \
drm_dsc_helper.o
+drm_display_helper-$(CONFIG_DRM_DISPLAY_DP_TUNNEL) += \
+ drm_dp_tunnel.o
drm_display_helper-$(CONFIG_DRM_DISPLAY_HDCP_HELPER) += drm_hdcp_helper.o
drm_display_helper-$(CONFIG_DRM_DISPLAY_HDMI_HELPER) += \
drm_hdmi_helper.o \
diff --git a/drivers/gpu/drm/display/drm_dp_aux_bus.c b/drivers/gpu/drm/display/drm_dp_aux_bus.c
index 8a165be1a821..5afc26be9d2a 100644
--- a/drivers/gpu/drm/display/drm_dp_aux_bus.c
+++ b/drivers/gpu/drm/display/drm_dp_aux_bus.c
@@ -127,7 +127,7 @@ static void dp_aux_ep_shutdown(struct device *dev)
aux_ep_drv->shutdown(to_dp_aux_ep_dev(dev));
}
-static struct bus_type dp_aux_bus_type = {
+static const struct bus_type dp_aux_bus_type = {
.name = "dp-aux",
.match = dp_aux_ep_match,
.probe = dp_aux_ep_probe,
diff --git a/drivers/gpu/drm/display/drm_dp_helper.c b/drivers/gpu/drm/display/drm_dp_helper.c
index 26c188ce5f1c..f5d4be897866 100644
--- a/drivers/gpu/drm/display/drm_dp_helper.c
+++ b/drivers/gpu/drm/display/drm_dp_helper.c
@@ -533,6 +533,15 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
mutex_lock(&aux->hw_mutex);
/*
+ * If the device attached to the aux bus is powered down then there's
+ * no reason to attempt a transfer. Error out immediately.
+ */
+ if (aux->powered_down) {
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ /*
* The specification doesn't give any recommendation on how often to
* retry native transactions. We used to retry 7 times like for
* aux i2c transactions but real world devices this wasn't
@@ -600,6 +609,29 @@ int drm_dp_dpcd_probe(struct drm_dp_aux *aux, unsigned int offset)
EXPORT_SYMBOL(drm_dp_dpcd_probe);
/**
+ * drm_dp_dpcd_set_powered() - Set whether the DP device is powered
+ * @aux: DisplayPort AUX channel; for convenience it's OK to pass NULL here
+ * and the function will be a no-op.
+ * @powered: true if powered; false if not
+ *
+ * If the endpoint device on the DP AUX bus is known to be powered down
+ * then this function can be called to make future transfers fail immediately
+ * instead of needing to time out.
+ *
+ * If this function is never called then a device defaults to being powered.
+ */
+void drm_dp_dpcd_set_powered(struct drm_dp_aux *aux, bool powered)
+{
+ if (!aux)
+ return;
+
+ mutex_lock(&aux->hw_mutex);
+ aux->powered_down = !powered;
+ mutex_unlock(&aux->hw_mutex);
+}
+EXPORT_SYMBOL(drm_dp_dpcd_set_powered);
+
+/**
* drm_dp_dpcd_read() - read a series of bytes from the DPCD
* @aux: DisplayPort AUX channel (SST or MST)
* @offset: address of the (first) register to read
@@ -1858,6 +1890,9 @@ static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
struct drm_dp_aux_msg msg;
int err = 0;
+ if (aux->powered_down)
+ return -EBUSY;
+
dp_aux_i2c_transfer_size = clamp(dp_aux_i2c_transfer_size, 1, DP_AUX_MAX_PAYLOAD_BYTES);
memset(&msg, 0, sizeof(msg));
@@ -2897,26 +2932,120 @@ static const char *dp_content_type_get_name(enum dp_content_type content_type)
}
}
-void drm_dp_vsc_sdp_log(const char *level, struct device *dev,
- const struct drm_dp_vsc_sdp *vsc)
+void drm_dp_vsc_sdp_log(struct drm_printer *p, const struct drm_dp_vsc_sdp *vsc)
{
-#define DP_SDP_LOG(fmt, ...) dev_printk(level, dev, fmt, ##__VA_ARGS__)
- DP_SDP_LOG("DP SDP: %s, revision %u, length %u\n", "VSC",
+ drm_printf(p, "DP SDP: VSC, revision %u, length %u\n",
vsc->revision, vsc->length);
- DP_SDP_LOG(" pixelformat: %s\n",
+ drm_printf(p, " pixelformat: %s\n",
dp_pixelformat_get_name(vsc->pixelformat));
- DP_SDP_LOG(" colorimetry: %s\n",
+ drm_printf(p, " colorimetry: %s\n",
dp_colorimetry_get_name(vsc->pixelformat, vsc->colorimetry));
- DP_SDP_LOG(" bpc: %u\n", vsc->bpc);
- DP_SDP_LOG(" dynamic range: %s\n",
+ drm_printf(p, " bpc: %u\n", vsc->bpc);
+ drm_printf(p, " dynamic range: %s\n",
dp_dynamic_range_get_name(vsc->dynamic_range));
- DP_SDP_LOG(" content type: %s\n",
+ drm_printf(p, " content type: %s\n",
dp_content_type_get_name(vsc->content_type));
-#undef DP_SDP_LOG
}
EXPORT_SYMBOL(drm_dp_vsc_sdp_log);
/**
+ * drm_dp_vsc_sdp_supported() - check if vsc sdp is supported
+ * @aux: DisplayPort AUX channel
+ * @dpcd: DisplayPort configuration data
+ *
+ * Returns true if vsc sdp is supported, else returns false
+ */
+bool drm_dp_vsc_sdp_supported(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ u8 rx_feature;
+
+ if (dpcd[DP_DPCD_REV] < DP_DPCD_REV_13)
+ return false;
+
+ if (drm_dp_dpcd_readb(aux, DP_DPRX_FEATURE_ENUMERATION_LIST, &rx_feature) != 1) {
+ drm_dbg_dp(aux->drm_dev, "failed to read DP_DPRX_FEATURE_ENUMERATION_LIST\n");
+ return false;
+ }
+
+ return (rx_feature & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED);
+}
+EXPORT_SYMBOL(drm_dp_vsc_sdp_supported);
+
+/**
+ * drm_dp_vsc_sdp_pack() - pack a given vsc sdp into generic dp_sdp
+ * @vsc: vsc sdp initialized according to its purpose as defined in
+ * table 2-118 - table 2-120 in DP 1.4a specification
+ * @sdp: valid handle to the generic dp_sdp which will be packed
+ *
+ * Returns length of sdp on success and error code on failure
+ */
+ssize_t drm_dp_vsc_sdp_pack(const struct drm_dp_vsc_sdp *vsc,
+ struct dp_sdp *sdp)
+{
+ size_t length = sizeof(struct dp_sdp);
+
+ memset(sdp, 0, sizeof(struct dp_sdp));
+
+ /*
+ * Prepare VSC Header for SU as per DP 1.4a spec, Table 2-119
+ * VSC SDP Header Bytes
+ */
+ sdp->sdp_header.HB0 = 0; /* Secondary-Data Packet ID = 0 */
+ sdp->sdp_header.HB1 = vsc->sdp_type; /* Secondary-data Packet Type */
+ sdp->sdp_header.HB2 = vsc->revision; /* Revision Number */
+ sdp->sdp_header.HB3 = vsc->length; /* Number of Valid Data Bytes */
+
+ if (vsc->revision == 0x6) {
+ sdp->db[0] = 1;
+ sdp->db[3] = 1;
+ }
+
+ /*
+ * Revision 0x5 and revision 0x7 supports Pixel Encoding/Colorimetry
+ * Format as per DP 1.4a spec and DP 2.0 respectively.
+ */
+ if (!(vsc->revision == 0x5 || vsc->revision == 0x7))
+ goto out;
+
+ /* VSC SDP Payload for DB16 through DB18 */
+ /* Pixel Encoding and Colorimetry Formats */
+ sdp->db[16] = (vsc->pixelformat & 0xf) << 4; /* DB16[7:4] */
+ sdp->db[16] |= vsc->colorimetry & 0xf; /* DB16[3:0] */
+
+ switch (vsc->bpc) {
+ case 6:
+ /* 6bpc: 0x0 */
+ break;
+ case 8:
+ sdp->db[17] = 0x1; /* DB17[3:0] */
+ break;
+ case 10:
+ sdp->db[17] = 0x2;
+ break;
+ case 12:
+ sdp->db[17] = 0x3;
+ break;
+ case 16:
+ sdp->db[17] = 0x4;
+ break;
+ default:
+ WARN(1, "Missing case %d\n", vsc->bpc);
+ return -EINVAL;
+ }
+
+ /* Dynamic Range and Component Bit Depth */
+ if (vsc->dynamic_range == DP_DYNAMIC_RANGE_CTA)
+ sdp->db[17] |= 0x80; /* DB17[7] */
+
+ /* Content Type */
+ sdp->db[18] = vsc->content_type & 0x7;
+
+out:
+ return length;
+}
+EXPORT_SYMBOL(drm_dp_vsc_sdp_pack);
+
+/**
* drm_dp_get_pcon_max_frl_bw() - maximum frl supported by PCON
* @dpcd: DisplayPort configuration data
* @port_cap: port capabilities
@@ -4065,3 +4194,33 @@ int drm_dp_bw_channel_coding_efficiency(bool is_uhbr)
return 800000;
}
EXPORT_SYMBOL(drm_dp_bw_channel_coding_efficiency);
+
+/**
+ * drm_dp_max_dprx_data_rate - Get the max data bandwidth of a DPRX sink
+ * @max_link_rate: max DPRX link rate in 10kbps units
+ * @max_lanes: max DPRX lane count
+ *
+ * Given a link rate and lanes, get the data bandwidth.
+ *
+ * Data bandwidth is the actual payload rate, which depends on the data
+ * bandwidth efficiency and the link rate.
+ *
+ * Note that protocol layers above the DPRX link level considered here can
+ * further limit the maximum data rate. Such layers are the MST topology (with
+ * limits on the link between the source and first branch device as well as on
+ * the whole MST path until the DPRX link) and (Thunderbolt) DP tunnels -
+ * which in turn can encapsulate an MST link with its own limit - with each
+ * SST or MST encapsulated tunnel sharing the BW of a tunnel group.
+ *
+ * Returns the maximum data rate in kBps units.
+ */
+int drm_dp_max_dprx_data_rate(int max_link_rate, int max_lanes)
+{
+ int ch_coding_efficiency =
+ drm_dp_bw_channel_coding_efficiency(drm_dp_is_uhbr_rate(max_link_rate));
+
+ return DIV_ROUND_DOWN_ULL(mul_u32_u32(max_link_rate * 10 * max_lanes,
+ ch_coding_efficiency),
+ 1000000 * 8);
+}
+EXPORT_SYMBOL(drm_dp_max_dprx_data_rate);
diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c
index f7c6b60629c2..03d528209426 100644
--- a/drivers/gpu/drm/display/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c
@@ -1306,7 +1306,8 @@ static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
}
out:
if (unlikely(ret == -EIO) && drm_debug_enabled(DRM_UT_DP)) {
- struct drm_printer p = drm_debug_printer(DBG_PREFIX);
+ struct drm_printer p = drm_dbg_printer(mgr->dev, DRM_UT_DP,
+ DBG_PREFIX);
drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
}
@@ -1593,10 +1594,11 @@ topology_ref_type_to_str(enum drm_dp_mst_topology_ref_type type)
}
static void
-__dump_topology_ref_history(struct drm_dp_mst_topology_ref_history *history,
+__dump_topology_ref_history(struct drm_device *drm,
+ struct drm_dp_mst_topology_ref_history *history,
void *ptr, const char *type_str)
{
- struct drm_printer p = drm_debug_printer(DBG_PREFIX);
+ struct drm_printer p = drm_dbg_printer(drm, DRM_UT_DP, DBG_PREFIX);
char *buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
int i;
@@ -1638,15 +1640,15 @@ out:
static __always_inline void
drm_dp_mst_dump_mstb_topology_history(struct drm_dp_mst_branch *mstb)
{
- __dump_topology_ref_history(&mstb->topology_ref_history, mstb,
- "MSTB");
+ __dump_topology_ref_history(mstb->mgr->dev, &mstb->topology_ref_history,
+ mstb, "MSTB");
}
static __always_inline void
drm_dp_mst_dump_port_topology_history(struct drm_dp_mst_port *port)
{
- __dump_topology_ref_history(&port->topology_ref_history, port,
- "Port");
+ __dump_topology_ref_history(port->mgr->dev, &port->topology_ref_history,
+ port, "Port");
}
static __always_inline void
@@ -2824,7 +2826,9 @@ static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
ret = drm_dp_send_sideband_msg(mgr, up, chunk, idx);
if (ret) {
if (drm_debug_enabled(DRM_UT_DP)) {
- struct drm_printer p = drm_debug_printer(DBG_PREFIX);
+ struct drm_printer p = drm_dbg_printer(mgr->dev,
+ DRM_UT_DP,
+ DBG_PREFIX);
drm_printf(&p, "sideband msg failed to send\n");
drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
@@ -2869,7 +2873,8 @@ static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
list_add_tail(&txmsg->next, &mgr->tx_msg_downq);
if (drm_debug_enabled(DRM_UT_DP)) {
- struct drm_printer p = drm_debug_printer(DBG_PREFIX);
+ struct drm_printer p = drm_dbg_printer(mgr->dev, DRM_UT_DP,
+ DBG_PREFIX);
drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
}
diff --git a/drivers/gpu/drm/display/drm_dp_tunnel.c b/drivers/gpu/drm/display/drm_dp_tunnel.c
new file mode 100644
index 000000000000..120e0de674c1
--- /dev/null
+++ b/drivers/gpu/drm/display/drm_dp_tunnel.c
@@ -0,0 +1,1949 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include <linux/ref_tracker.h>
+#include <linux/types.h>
+
+#include <drm/drm_atomic_state_helper.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_print.h>
+#include <drm/display/drm_dp.h>
+#include <drm/display/drm_dp_helper.h>
+#include <drm/display/drm_dp_tunnel.h>
+
+#define to_group(__private_obj) \
+ container_of(__private_obj, struct drm_dp_tunnel_group, base)
+
+#define to_group_state(__private_state) \
+ container_of(__private_state, struct drm_dp_tunnel_group_state, base)
+
+#define is_dp_tunnel_private_obj(__obj) \
+ ((__obj)->funcs == &tunnel_group_funcs)
+
+#define for_each_new_group_in_state(__state, __new_group_state, __i) \
+ for ((__i) = 0; \
+ (__i) < (__state)->num_private_objs; \
+ (__i)++) \
+ for_each_if ((__state)->private_objs[__i].ptr && \
+ is_dp_tunnel_private_obj((__state)->private_objs[__i].ptr) && \
+ ((__new_group_state) = \
+ to_group_state((__state)->private_objs[__i].new_state), 1))
+
+#define for_each_old_group_in_state(__state, __old_group_state, __i) \
+ for ((__i) = 0; \
+ (__i) < (__state)->num_private_objs; \
+ (__i)++) \
+ for_each_if ((__state)->private_objs[__i].ptr && \
+ is_dp_tunnel_private_obj((__state)->private_objs[__i].ptr) && \
+ ((__old_group_state) = \
+ to_group_state((__state)->private_objs[__i].old_state), 1))
+
+#define for_each_tunnel_in_group(__group, __tunnel) \
+ list_for_each_entry(__tunnel, &(__group)->tunnels, node)
+
+#define for_each_tunnel_state(__group_state, __tunnel_state) \
+ list_for_each_entry(__tunnel_state, &(__group_state)->tunnel_states, node)
+
+#define for_each_tunnel_state_safe(__group_state, __tunnel_state, __tunnel_state_tmp) \
+ list_for_each_entry_safe(__tunnel_state, __tunnel_state_tmp, \
+ &(__group_state)->tunnel_states, node)
+
+#define kbytes_to_mbits(__kbytes) \
+ DIV_ROUND_UP((__kbytes) * 8, 1000)
+
+#define DPTUN_BW_ARG(__bw) ((__bw) < 0 ? (__bw) : kbytes_to_mbits(__bw))
+
+#define __tun_prn(__tunnel, __level, __type, __fmt, ...) \
+ drm_##__level##__type((__tunnel)->group->mgr->dev, \
+ "[DPTUN %s][%s] " __fmt, \
+ drm_dp_tunnel_name(__tunnel), \
+ (__tunnel)->aux->name, ## \
+ __VA_ARGS__)
+
+#define tun_dbg(__tunnel, __fmt, ...) \
+ __tun_prn(__tunnel, dbg, _kms, __fmt, ## __VA_ARGS__)
+
+#define tun_dbg_stat(__tunnel, __err, __fmt, ...) do { \
+ if (__err) \
+ __tun_prn(__tunnel, dbg, _kms, __fmt " (Failed, err: %pe)\n", \
+ ## __VA_ARGS__, ERR_PTR(__err)); \
+ else \
+ __tun_prn(__tunnel, dbg, _kms, __fmt " (Ok)\n", \
+ ## __VA_ARGS__); \
+} while (0)
+
+#define tun_dbg_atomic(__tunnel, __fmt, ...) \
+ __tun_prn(__tunnel, dbg, _atomic, __fmt, ## __VA_ARGS__)
+
+#define tun_grp_dbg(__group, __fmt, ...) \
+ drm_dbg_kms((__group)->mgr->dev, \
+ "[DPTUN %s] " __fmt, \
+ drm_dp_tunnel_group_name(__group), ## \
+ __VA_ARGS__)
+
+#define DP_TUNNELING_BASE DP_TUNNELING_OUI
+
+#define __DPTUN_REG_RANGE(__start, __size) \
+ GENMASK_ULL((__start) + (__size) - 1, (__start))
+
+#define DPTUN_REG_RANGE(__addr, __size) \
+ __DPTUN_REG_RANGE((__addr) - DP_TUNNELING_BASE, (__size))
+
+#define DPTUN_REG(__addr) DPTUN_REG_RANGE(__addr, 1)
+
+#define DPTUN_INFO_REG_MASK ( \
+ DPTUN_REG_RANGE(DP_TUNNELING_OUI, DP_TUNNELING_OUI_BYTES) | \
+ DPTUN_REG_RANGE(DP_TUNNELING_DEV_ID, DP_TUNNELING_DEV_ID_BYTES) | \
+ DPTUN_REG(DP_TUNNELING_HW_REV) | \
+ DPTUN_REG(DP_TUNNELING_SW_REV_MAJOR) | \
+ DPTUN_REG(DP_TUNNELING_SW_REV_MINOR) | \
+ DPTUN_REG(DP_TUNNELING_CAPABILITIES) | \
+ DPTUN_REG(DP_IN_ADAPTER_INFO) | \
+ DPTUN_REG(DP_USB4_DRIVER_ID) | \
+ DPTUN_REG(DP_USB4_DRIVER_BW_CAPABILITY) | \
+ DPTUN_REG(DP_IN_ADAPTER_TUNNEL_INFORMATION) | \
+ DPTUN_REG(DP_BW_GRANULARITY) | \
+ DPTUN_REG(DP_ESTIMATED_BW) | \
+ DPTUN_REG(DP_ALLOCATED_BW) | \
+ DPTUN_REG(DP_TUNNELING_MAX_LINK_RATE) | \
+ DPTUN_REG(DP_TUNNELING_MAX_LANE_COUNT) | \
+ DPTUN_REG(DP_DPTX_BW_ALLOCATION_MODE_CONTROL))
+
+static const DECLARE_BITMAP(dptun_info_regs, 64) = {
+ DPTUN_INFO_REG_MASK & -1UL,
+#if BITS_PER_LONG == 32
+ DPTUN_INFO_REG_MASK >> 32,
+#endif
+};
+
+struct drm_dp_tunnel_regs {
+ u8 buf[HWEIGHT64(DPTUN_INFO_REG_MASK)];
+};
+
+struct drm_dp_tunnel_group;
+
+struct drm_dp_tunnel {
+ struct drm_dp_tunnel_group *group;
+
+ struct list_head node;
+
+ struct kref kref;
+ struct ref_tracker *tracker;
+ struct drm_dp_aux *aux;
+ char name[8];
+
+ int bw_granularity;
+ int estimated_bw;
+ int allocated_bw;
+
+ int max_dprx_rate;
+ u8 max_dprx_lane_count;
+
+ u8 adapter_id;
+
+ bool bw_alloc_supported:1;
+ bool bw_alloc_enabled:1;
+ bool has_io_error:1;
+ bool destroyed:1;
+};
+
+struct drm_dp_tunnel_group_state;
+
+struct drm_dp_tunnel_state {
+ struct drm_dp_tunnel_group_state *group_state;
+
+ struct drm_dp_tunnel_ref tunnel_ref;
+
+ struct list_head node;
+
+ u32 stream_mask;
+ int *stream_bw;
+};
+
+struct drm_dp_tunnel_group_state {
+ struct drm_private_state base;
+
+ struct list_head tunnel_states;
+};
+
+struct drm_dp_tunnel_group {
+ struct drm_private_obj base;
+ struct drm_dp_tunnel_mgr *mgr;
+
+ struct list_head tunnels;
+
+ /* available BW including the allocated_bw of all tunnels in the group */
+ int available_bw;
+
+ u8 drv_group_id;
+ char name[8];
+
+ bool active:1;
+};
+
+struct drm_dp_tunnel_mgr {
+ struct drm_device *dev;
+
+ int group_count;
+ struct drm_dp_tunnel_group *groups;
+ wait_queue_head_t bw_req_queue;
+
+#ifdef CONFIG_DRM_DISPLAY_DEBUG_DP_TUNNEL_STATE
+ struct ref_tracker_dir ref_tracker;
+#endif
+};
+
+/*
+ * The following helpers provide a way to read out the tunneling DPCD
+ * registers with a minimal amount of AUX transfers (1 transfer per contiguous
+ * range, as permitted by the 16 byte per transfer AUX limit), not accessing
+ * other registers to avoid any read side-effects.
+ */
+static int next_reg_area(int *offset)
+{
+ *offset = find_next_bit(dptun_info_regs, 64, *offset);
+
+ return find_next_zero_bit(dptun_info_regs, 64, *offset + 1) - *offset;
+}
+
+#define tunnel_reg_ptr(__regs, __address) ({ \
+ WARN_ON(!test_bit((__address) - DP_TUNNELING_BASE, dptun_info_regs)); \
+ &(__regs)->buf[bitmap_weight(dptun_info_regs, (__address) - DP_TUNNELING_BASE)]; \
+})
+
+static int read_tunnel_regs(struct drm_dp_aux *aux, struct drm_dp_tunnel_regs *regs)
+{
+ int offset = 0;
+ int len;
+
+ while ((len = next_reg_area(&offset))) {
+ int address = DP_TUNNELING_BASE + offset;
+
+ if (drm_dp_dpcd_read(aux, address, tunnel_reg_ptr(regs, address), len) < 0)
+ return -EIO;
+
+ offset += len;
+ }
+
+ return 0;
+}
+
+static u8 tunnel_reg(const struct drm_dp_tunnel_regs *regs, int address)
+{
+ return *tunnel_reg_ptr(regs, address);
+}
+
+static u8 tunnel_reg_drv_group_id(const struct drm_dp_tunnel_regs *regs)
+{
+ u8 drv_id = tunnel_reg(regs, DP_USB4_DRIVER_ID) & DP_USB4_DRIVER_ID_MASK;
+ u8 group_id = tunnel_reg(regs, DP_IN_ADAPTER_TUNNEL_INFORMATION) & DP_GROUP_ID_MASK;
+
+ if (!group_id)
+ return 0;
+
+ return (drv_id << DP_GROUP_ID_BITS) | group_id;
+}
+
+/* Return granularity in kB/s units */
+static int tunnel_reg_bw_granularity(const struct drm_dp_tunnel_regs *regs)
+{
+ int gr = tunnel_reg(regs, DP_BW_GRANULARITY) & DP_BW_GRANULARITY_MASK;
+
+ if (gr > 2)
+ return -1;
+
+ return (250000 << gr) / 8;
+}
+
+static int tunnel_reg_max_dprx_rate(const struct drm_dp_tunnel_regs *regs)
+{
+ u8 bw_code = tunnel_reg(regs, DP_TUNNELING_MAX_LINK_RATE);
+
+ return drm_dp_bw_code_to_link_rate(bw_code);
+}
+
+static int tunnel_reg_max_dprx_lane_count(const struct drm_dp_tunnel_regs *regs)
+{
+ return tunnel_reg(regs, DP_TUNNELING_MAX_LANE_COUNT) &
+ DP_TUNNELING_MAX_LANE_COUNT_MASK;
+}
+
+static bool tunnel_reg_bw_alloc_supported(const struct drm_dp_tunnel_regs *regs)
+{
+ u8 cap_mask = DP_TUNNELING_SUPPORT | DP_IN_BW_ALLOCATION_MODE_SUPPORT;
+
+ if ((tunnel_reg(regs, DP_TUNNELING_CAPABILITIES) & cap_mask) != cap_mask)
+ return false;
+
+ return tunnel_reg(regs, DP_USB4_DRIVER_BW_CAPABILITY) &
+ DP_USB4_DRIVER_BW_ALLOCATION_MODE_SUPPORT;
+}
+
+static bool tunnel_reg_bw_alloc_enabled(const struct drm_dp_tunnel_regs *regs)
+{
+ return tunnel_reg(regs, DP_DPTX_BW_ALLOCATION_MODE_CONTROL) &
+ DP_DISPLAY_DRIVER_BW_ALLOCATION_MODE_ENABLE;
+}
+
+static u8 tunnel_group_drv_id(u8 drv_group_id)
+{
+ return drv_group_id >> DP_GROUP_ID_BITS;
+}
+
+static u8 tunnel_group_id(u8 drv_group_id)
+{
+ return drv_group_id & DP_GROUP_ID_MASK;
+}
+
+const char *drm_dp_tunnel_name(const struct drm_dp_tunnel *tunnel)
+{
+ return tunnel->name;
+}
+EXPORT_SYMBOL(drm_dp_tunnel_name);
+
+static const char *drm_dp_tunnel_group_name(const struct drm_dp_tunnel_group *group)
+{
+ return group->name;
+}
+
+static struct drm_dp_tunnel_group *
+lookup_or_alloc_group(struct drm_dp_tunnel_mgr *mgr, u8 drv_group_id)
+{
+ struct drm_dp_tunnel_group *group = NULL;
+ int i;
+
+ for (i = 0; i < mgr->group_count; i++) {
+ /*
+ * A tunnel group with 0 group ID shouldn't have more than one
+ * tunnels.
+ */
+ if (tunnel_group_id(drv_group_id) &&
+ mgr->groups[i].drv_group_id == drv_group_id)
+ return &mgr->groups[i];
+
+ if (!group && !mgr->groups[i].active)
+ group = &mgr->groups[i];
+ }
+
+ if (!group) {
+ drm_dbg_kms(mgr->dev,
+ "DPTUN: Can't allocate more tunnel groups\n");
+ return NULL;
+ }
+
+ group->drv_group_id = drv_group_id;
+ group->active = true;
+
+ /*
+ * The group name format here and elsewhere: Driver-ID:Group-ID:*
+ * (* standing for all DP-Adapters/tunnels in the group).
+ */
+ snprintf(group->name, sizeof(group->name), "%d:%d:*",
+ tunnel_group_drv_id(drv_group_id) & ((1 << DP_GROUP_ID_BITS) - 1),
+ tunnel_group_id(drv_group_id) & ((1 << DP_USB4_DRIVER_ID_BITS) - 1));
+
+ return group;
+}
+
+static void free_group(struct drm_dp_tunnel_group *group)
+{
+ struct drm_dp_tunnel_mgr *mgr = group->mgr;
+
+ if (drm_WARN_ON(mgr->dev, !list_empty(&group->tunnels)))
+ return;
+
+ group->drv_group_id = 0;
+ group->available_bw = -1;
+ group->active = false;
+}
+
+static struct drm_dp_tunnel *
+tunnel_get(struct drm_dp_tunnel *tunnel)
+{
+ kref_get(&tunnel->kref);
+
+ return tunnel;
+}
+
+static void free_tunnel(struct kref *kref)
+{
+ struct drm_dp_tunnel *tunnel = container_of(kref, typeof(*tunnel), kref);
+ struct drm_dp_tunnel_group *group = tunnel->group;
+
+ list_del(&tunnel->node);
+ if (list_empty(&group->tunnels))
+ free_group(group);
+
+ kfree(tunnel);
+}
+
+static void tunnel_put(struct drm_dp_tunnel *tunnel)
+{
+ kref_put(&tunnel->kref, free_tunnel);
+}
+
+#ifdef CONFIG_DRM_DISPLAY_DEBUG_DP_TUNNEL_STATE
+static void track_tunnel_ref(struct drm_dp_tunnel *tunnel,
+ struct ref_tracker **tracker)
+{
+ ref_tracker_alloc(&tunnel->group->mgr->ref_tracker,
+ tracker, GFP_KERNEL);
+}
+
+static void untrack_tunnel_ref(struct drm_dp_tunnel *tunnel,
+ struct ref_tracker **tracker)
+{
+ ref_tracker_free(&tunnel->group->mgr->ref_tracker,
+ tracker);
+}
+#else
+static void track_tunnel_ref(struct drm_dp_tunnel *tunnel,
+ struct ref_tracker **tracker)
+{
+}
+
+static void untrack_tunnel_ref(struct drm_dp_tunnel *tunnel,
+ struct ref_tracker **tracker)
+{
+}
+#endif
+
+/**
+ * drm_dp_tunnel_get - Get a reference for a DP tunnel
+ * @tunnel: Tunnel object
+ * @tracker: Debug tracker for the reference
+ *
+ * Get a reference for @tunnel, along with a debug tracker to help locating
+ * the source of a reference leak/double reference put etc. issue.
+ *
+ * The reference must be dropped after use calling drm_dp_tunnel_put()
+ * passing @tunnel and *@tracker returned from here.
+ *
+ * Returns @tunnel - as a convenience - along with *@tracker.
+ */
+struct drm_dp_tunnel *
+drm_dp_tunnel_get(struct drm_dp_tunnel *tunnel,
+ struct ref_tracker **tracker)
+{
+ track_tunnel_ref(tunnel, tracker);
+
+ return tunnel_get(tunnel);
+}
+EXPORT_SYMBOL(drm_dp_tunnel_get);
+
+/**
+ * drm_dp_tunnel_put - Put a reference for a DP tunnel
+ * @tunnel - Tunnel object
+ * @tracker - Debug tracker for the reference
+ *
+ * Put a reference for @tunnel along with its debug *@tracker, which
+ * was obtained with drm_dp_tunnel_get().
+ */
+void drm_dp_tunnel_put(struct drm_dp_tunnel *tunnel,
+ struct ref_tracker **tracker)
+{
+ untrack_tunnel_ref(tunnel, tracker);
+
+ tunnel_put(tunnel);
+}
+EXPORT_SYMBOL(drm_dp_tunnel_put);
+
+static bool add_tunnel_to_group(struct drm_dp_tunnel_mgr *mgr,
+ u8 drv_group_id,
+ struct drm_dp_tunnel *tunnel)
+{
+ struct drm_dp_tunnel_group *group;
+
+ group = lookup_or_alloc_group(mgr, drv_group_id);
+ if (!group)
+ return false;
+
+ tunnel->group = group;
+ list_add(&tunnel->node, &group->tunnels);
+
+ return true;
+}
+
+static struct drm_dp_tunnel *
+create_tunnel(struct drm_dp_tunnel_mgr *mgr,
+ struct drm_dp_aux *aux,
+ const struct drm_dp_tunnel_regs *regs)
+{
+ u8 drv_group_id = tunnel_reg_drv_group_id(regs);
+ struct drm_dp_tunnel *tunnel;
+
+ tunnel = kzalloc(sizeof(*tunnel), GFP_KERNEL);
+ if (!tunnel)
+ return NULL;
+
+ INIT_LIST_HEAD(&tunnel->node);
+
+ kref_init(&tunnel->kref);
+
+ tunnel->aux = aux;
+
+ tunnel->adapter_id = tunnel_reg(regs, DP_IN_ADAPTER_INFO) & DP_IN_ADAPTER_NUMBER_MASK;
+
+ snprintf(tunnel->name, sizeof(tunnel->name), "%d:%d:%d",
+ tunnel_group_drv_id(drv_group_id) & ((1 << DP_GROUP_ID_BITS) - 1),
+ tunnel_group_id(drv_group_id) & ((1 << DP_USB4_DRIVER_ID_BITS) - 1),
+ tunnel->adapter_id & ((1 << DP_IN_ADAPTER_NUMBER_BITS) - 1));
+
+ tunnel->bw_granularity = tunnel_reg_bw_granularity(regs);
+ tunnel->allocated_bw = tunnel_reg(regs, DP_ALLOCATED_BW) *
+ tunnel->bw_granularity;
+ /*
+ * An initial allocated BW of 0 indicates an undefined state: the
+ * actual allocation is determined by the TBT CM, usually following a
+ * legacy allocation policy (based on the max DPRX caps). From the
+ * driver's POV the state becomes defined only after the first
+ * allocation request.
+ */
+ if (!tunnel->allocated_bw)
+ tunnel->allocated_bw = -1;
+
+ tunnel->bw_alloc_supported = tunnel_reg_bw_alloc_supported(regs);
+ tunnel->bw_alloc_enabled = tunnel_reg_bw_alloc_enabled(regs);
+
+ if (!add_tunnel_to_group(mgr, drv_group_id, tunnel)) {
+ kfree(tunnel);
+
+ return NULL;
+ }
+
+ track_tunnel_ref(tunnel, &tunnel->tracker);
+
+ return tunnel;
+}
+
+static void destroy_tunnel(struct drm_dp_tunnel *tunnel)
+{
+ untrack_tunnel_ref(tunnel, &tunnel->tracker);
+ tunnel_put(tunnel);
+}
+
+/**
+ * drm_dp_tunnel_set_io_error - Set the IO error flag for a DP tunnel
+ * @tunnel: Tunnel object
+ *
+ * Set the IO error flag for @tunnel. Drivers can call this function upon
+ * detecting a failure that affects the tunnel functionality, for instance
+ * after a DP AUX transfer failure on the port @tunnel is connected to.
+ *
+ * This disables further management of @tunnel, including any related
+ * AUX accesses for tunneling DPCD registers, returning error to the
+ * initiators of these. The driver is supposed to drop this tunnel and -
+ * optionally - recreate it.
+ */
+void drm_dp_tunnel_set_io_error(struct drm_dp_tunnel *tunnel)
+{
+ tunnel->has_io_error = true;
+}
+EXPORT_SYMBOL(drm_dp_tunnel_set_io_error);
+
+#define SKIP_DPRX_CAPS_CHECK BIT(0)
+#define ALLOW_ALLOCATED_BW_CHANGE BIT(1)
+static bool tunnel_regs_are_valid(struct drm_dp_tunnel_mgr *mgr,
+ const struct drm_dp_tunnel_regs *regs,
+ unsigned int flags)
+{
+ u8 drv_group_id = tunnel_reg_drv_group_id(regs);
+ bool check_dprx = !(flags & SKIP_DPRX_CAPS_CHECK);
+ bool ret = true;
+
+ if (!tunnel_reg_bw_alloc_supported(regs)) {
+ if (tunnel_group_id(drv_group_id)) {
+ drm_dbg_kms(mgr->dev,
+ "DPTUN: A non-zero group ID is only allowed with BWA support\n");
+ ret = false;
+ }
+
+ if (tunnel_reg(regs, DP_ALLOCATED_BW)) {
+ drm_dbg_kms(mgr->dev,
+ "DPTUN: BW is allocated without BWA support\n");
+ ret = false;
+ }
+
+ return ret;
+ }
+
+ if (!tunnel_group_id(drv_group_id)) {
+ drm_dbg_kms(mgr->dev,
+ "DPTUN: BWA support requires a non-zero group ID\n");
+ ret = false;
+ }
+
+ if (check_dprx && hweight8(tunnel_reg_max_dprx_lane_count(regs)) != 1) {
+ drm_dbg_kms(mgr->dev,
+ "DPTUN: Invalid DPRX lane count: %d\n",
+ tunnel_reg_max_dprx_lane_count(regs));
+
+ ret = false;
+ }
+
+ if (check_dprx && !tunnel_reg_max_dprx_rate(regs)) {
+ drm_dbg_kms(mgr->dev,
+ "DPTUN: DPRX rate is 0\n");
+
+ ret = false;
+ }
+
+ if (tunnel_reg_bw_granularity(regs) < 0) {
+ drm_dbg_kms(mgr->dev,
+ "DPTUN: Invalid BW granularity\n");
+
+ ret = false;
+ }
+
+ if (tunnel_reg(regs, DP_ALLOCATED_BW) > tunnel_reg(regs, DP_ESTIMATED_BW)) {
+ drm_dbg_kms(mgr->dev,
+ "DPTUN: Allocated BW %d > estimated BW %d Mb/s\n",
+ DPTUN_BW_ARG(tunnel_reg(regs, DP_ALLOCATED_BW) *
+ tunnel_reg_bw_granularity(regs)),
+ DPTUN_BW_ARG(tunnel_reg(regs, DP_ESTIMATED_BW) *
+ tunnel_reg_bw_granularity(regs)));
+
+ ret = false;
+ }
+
+ return ret;
+}
+
+static int tunnel_allocated_bw(const struct drm_dp_tunnel *tunnel)
+{
+ return max(tunnel->allocated_bw, 0);
+}
+
+static bool tunnel_info_changes_are_valid(struct drm_dp_tunnel *tunnel,
+ const struct drm_dp_tunnel_regs *regs,
+ unsigned int flags)
+{
+ u8 new_drv_group_id = tunnel_reg_drv_group_id(regs);
+ bool ret = true;
+
+ if (tunnel->bw_alloc_supported != tunnel_reg_bw_alloc_supported(regs)) {
+ tun_dbg(tunnel,
+ "BW alloc support has changed %s -> %s\n",
+ str_yes_no(tunnel->bw_alloc_supported),
+ str_yes_no(tunnel_reg_bw_alloc_supported(regs)));
+
+ ret = false;
+ }
+
+ if (tunnel->group->drv_group_id != new_drv_group_id) {
+ tun_dbg(tunnel,
+ "Driver/group ID has changed %d:%d:* -> %d:%d:*\n",
+ tunnel_group_drv_id(tunnel->group->drv_group_id),
+ tunnel_group_id(tunnel->group->drv_group_id),
+ tunnel_group_drv_id(new_drv_group_id),
+ tunnel_group_id(new_drv_group_id));
+
+ ret = false;
+ }
+
+ if (!tunnel->bw_alloc_supported)
+ return ret;
+
+ if (tunnel->bw_granularity != tunnel_reg_bw_granularity(regs)) {
+ tun_dbg(tunnel,
+ "BW granularity has changed: %d -> %d Mb/s\n",
+ DPTUN_BW_ARG(tunnel->bw_granularity),
+ DPTUN_BW_ARG(tunnel_reg_bw_granularity(regs)));
+
+ ret = false;
+ }
+
+ /*
+ * On some devices at least the BW alloc mode enabled status is always
+ * reported as 0, so skip checking that here.
+ */
+
+ if (!(flags & ALLOW_ALLOCATED_BW_CHANGE) &&
+ tunnel_allocated_bw(tunnel) !=
+ tunnel_reg(regs, DP_ALLOCATED_BW) * tunnel->bw_granularity) {
+ tun_dbg(tunnel,
+ "Allocated BW has changed: %d -> %d Mb/s\n",
+ DPTUN_BW_ARG(tunnel->allocated_bw),
+ DPTUN_BW_ARG(tunnel_reg(regs, DP_ALLOCATED_BW) * tunnel->bw_granularity));
+
+ ret = false;
+ }
+
+ return ret;
+}
+
+static int
+read_and_verify_tunnel_regs(struct drm_dp_tunnel *tunnel,
+ struct drm_dp_tunnel_regs *regs,
+ unsigned int flags)
+{
+ int err;
+
+ err = read_tunnel_regs(tunnel->aux, regs);
+ if (err < 0) {
+ drm_dp_tunnel_set_io_error(tunnel);
+
+ return err;
+ }
+
+ if (!tunnel_regs_are_valid(tunnel->group->mgr, regs, flags))
+ return -EINVAL;
+
+ if (!tunnel_info_changes_are_valid(tunnel, regs, flags))
+ return -EINVAL;
+
+ return 0;
+}
+
<