diff options
| author | Dave Airlie <airlied@redhat.com> | 2024-02-28 11:02:54 +1000 |
|---|---|---|
| committer | Dave Airlie <airlied@redhat.com> | 2024-02-28 11:02:55 +1000 |
| commit | ca7a1d0d18acbd2b49aeec5265083d05c49222df (patch) | |
| tree | d3683d5a15a155a895832f648a9177ec259b2ee5 /drivers/gpu/drm/display | |
| parent | 3fe262eca5bd97cbde65ec71b4491c6461ffc7a7 (diff) | |
| parent | e60cff453b82789a652239c6200bd90d5178d2a0 (diff) | |
| download | linux-ca7a1d0d18acbd2b49aeec5265083d05c49222df.tar.gz linux-ca7a1d0d18acbd2b49aeec5265083d05c49222df.tar.bz2 linux-ca7a1d0d18acbd2b49aeec5265083d05c49222df.zip | |
Merge tag 'drm-intel-next-2024-02-27-1' of git://anongit.freedesktop.org/drm/drm-intel into drm-next
drm/i915 feature pull #2 for v6.9:
Features and functionality:
- DP tunneling and bandwidth allocation support (Imre)
- Add more ADL-N PCI IDs (Gustavo)
- Enable fastboot also on older platforms (Ville)
- Bigjoiner force enable debugfs option for testing (Stan)
Refactoring and cleanups:
- Remove unused structs and struct members (Jiri Slaby)
- Use per-device debug logging (Ville)
- State check improvements (Ville)
- Hardcoded cd2x divider cleanups (Ville)
- CDCLK documentation updates (Ville, Rodrigo)
Fixes:
- HDCP MST Type1 fixes (Suraj)
- Fix MTL C20 PHY PLL values (Ravi)
- More hardware access prevention during init (Imre)
- Always enable decompression with tile4 on Xe2 (Juha-Pekka)
- Improve LNL package C residency (Suraj)
drm core changes:
- DP tunneling and bandwidth allocation helpers (Imre)
Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/87sf1devbj.fsf@intel.com
Diffstat (limited to 'drivers/gpu/drm/display')
| -rw-r--r-- | drivers/gpu/drm/display/Kconfig | 21 | ||||
| -rw-r--r-- | drivers/gpu/drm/display/Makefile | 2 | ||||
| -rw-r--r-- | drivers/gpu/drm/display/drm_dp_helper.c | 30 | ||||
| -rw-r--r-- | drivers/gpu/drm/display/drm_dp_tunnel.c | 1949 |
4 files changed, 2002 insertions, 0 deletions
diff --git a/drivers/gpu/drm/display/Kconfig b/drivers/gpu/drm/display/Kconfig index 09712b88a5b8..c0f56888c328 100644 --- a/drivers/gpu/drm/display/Kconfig +++ b/drivers/gpu/drm/display/Kconfig @@ -17,6 +17,27 @@ config DRM_DISPLAY_DP_HELPER help DRM display helpers for DisplayPort. +config DRM_DISPLAY_DP_TUNNEL + bool + select DRM_DISPLAY_DP_HELPER + help + Enable support for DisplayPort tunnels. This allows drivers to use + DP tunnel features like the Bandwidth Allocation mode to maximize the + BW utilization for display streams on Thunderbolt links. + +config DRM_DISPLAY_DEBUG_DP_TUNNEL_STATE + bool "Enable debugging the DP tunnel state" + depends on REF_TRACKER + depends on DRM_DISPLAY_DP_TUNNEL + depends on DEBUG_KERNEL + depends on EXPERT + help + Enables debugging the DP tunnel manager's state, including the + consistency of all managed tunnels' reference counting and the state of + streams contained in tunnels. + + If in doubt, say "N". + config DRM_DISPLAY_HDCP_HELPER bool depends on DRM_DISPLAY_HELPER diff --git a/drivers/gpu/drm/display/Makefile b/drivers/gpu/drm/display/Makefile index 17ac4a1006a8..7ca61333c669 100644 --- a/drivers/gpu/drm/display/Makefile +++ b/drivers/gpu/drm/display/Makefile @@ -8,6 +8,8 @@ drm_display_helper-$(CONFIG_DRM_DISPLAY_DP_HELPER) += \ drm_dp_helper.o \ drm_dp_mst_topology.o \ drm_dsc_helper.o +drm_display_helper-$(CONFIG_DRM_DISPLAY_DP_TUNNEL) += \ + drm_dp_tunnel.o drm_display_helper-$(CONFIG_DRM_DISPLAY_HDCP_HELPER) += drm_hdcp_helper.o drm_display_helper-$(CONFIG_DRM_DISPLAY_HDMI_HELPER) += \ drm_hdmi_helper.o \ diff --git a/drivers/gpu/drm/display/drm_dp_helper.c b/drivers/gpu/drm/display/drm_dp_helper.c index 8d6ce46471ae..d046dfa79504 100644 --- a/drivers/gpu/drm/display/drm_dp_helper.c +++ b/drivers/gpu/drm/display/drm_dp_helper.c @@ -4055,3 +4055,33 @@ int drm_dp_bw_channel_coding_efficiency(bool is_uhbr) return 800000; } EXPORT_SYMBOL(drm_dp_bw_channel_coding_efficiency); + +/** + * drm_dp_max_dprx_data_rate - Get the max data bandwidth of a DPRX sink + * @max_link_rate: max DPRX link rate in 10kbps units + * @max_lanes: max DPRX lane count + * + * Given a link rate and lanes, get the data bandwidth. + * + * Data bandwidth is the actual payload rate, which depends on the data + * bandwidth efficiency and the link rate. + * + * Note that protocol layers above the DPRX link level considered here can + * further limit the maximum data rate. Such layers are the MST topology (with + * limits on the link between the source and first branch device as well as on + * the whole MST path until the DPRX link) and (Thunderbolt) DP tunnels - + * which in turn can encapsulate an MST link with its own limit - with each + * SST or MST encapsulated tunnel sharing the BW of a tunnel group. + * + * Returns the maximum data rate in kBps units. + */ +int drm_dp_max_dprx_data_rate(int max_link_rate, int max_lanes) +{ + int ch_coding_efficiency = + drm_dp_bw_channel_coding_efficiency(drm_dp_is_uhbr_rate(max_link_rate)); + + return DIV_ROUND_DOWN_ULL(mul_u32_u32(max_link_rate * 10 * max_lanes, + ch_coding_efficiency), + 1000000 * 8); +} +EXPORT_SYMBOL(drm_dp_max_dprx_data_rate); diff --git a/drivers/gpu/drm/display/drm_dp_tunnel.c b/drivers/gpu/drm/display/drm_dp_tunnel.c new file mode 100644 index 000000000000..120e0de674c1 --- /dev/null +++ b/drivers/gpu/drm/display/drm_dp_tunnel.c @@ -0,0 +1,1949 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2023 Intel Corporation + */ + +#include <linux/ref_tracker.h> +#include <linux/types.h> + +#include <drm/drm_atomic_state_helper.h> + +#include <drm/drm_atomic.h> +#include <drm/drm_print.h> +#include <drm/display/drm_dp.h> +#include <drm/display/drm_dp_helper.h> +#include <drm/display/drm_dp_tunnel.h> + +#define to_group(__private_obj) \ + container_of(__private_obj, struct drm_dp_tunnel_group, base) + +#define to_group_state(__private_state) \ + container_of(__private_state, struct drm_dp_tunnel_group_state, base) + +#define is_dp_tunnel_private_obj(__obj) \ + ((__obj)->funcs == &tunnel_group_funcs) + +#define for_each_new_group_in_state(__state, __new_group_state, __i) \ + for ((__i) = 0; \ + (__i) < (__state)->num_private_objs; \ + (__i)++) \ + for_each_if ((__state)->private_objs[__i].ptr && \ + is_dp_tunnel_private_obj((__state)->private_objs[__i].ptr) && \ + ((__new_group_state) = \ + to_group_state((__state)->private_objs[__i].new_state), 1)) + +#define for_each_old_group_in_state(__state, __old_group_state, __i) \ + for ((__i) = 0; \ + (__i) < (__state)->num_private_objs; \ + (__i)++) \ + for_each_if ((__state)->private_objs[__i].ptr && \ + is_dp_tunnel_private_obj((__state)->private_objs[__i].ptr) && \ + ((__old_group_state) = \ + to_group_state((__state)->private_objs[__i].old_state), 1)) + +#define for_each_tunnel_in_group(__group, __tunnel) \ + list_for_each_entry(__tunnel, &(__group)->tunnels, node) + +#define for_each_tunnel_state(__group_state, __tunnel_state) \ + list_for_each_entry(__tunnel_state, &(__group_state)->tunnel_states, node) + +#define for_each_tunnel_state_safe(__group_state, __tunnel_state, __tunnel_state_tmp) \ + list_for_each_entry_safe(__tunnel_state, __tunnel_state_tmp, \ + &(__group_state)->tunnel_states, node) + +#define kbytes_to_mbits(__kbytes) \ + DIV_ROUND_UP((__kbytes) * 8, 1000) + +#define DPTUN_BW_ARG(__bw) ((__bw) < 0 ? (__bw) : kbytes_to_mbits(__bw)) + +#define __tun_prn(__tunnel, __level, __type, __fmt, ...) \ + drm_##__level##__type((__tunnel)->group->mgr->dev, \ + "[DPTUN %s][%s] " __fmt, \ + drm_dp_tunnel_name(__tunnel), \ + (__tunnel)->aux->name, ## \ + __VA_ARGS__) + +#define tun_dbg(__tunnel, __fmt, ...) \ + __tun_prn(__tunnel, dbg, _kms, __fmt, ## __VA_ARGS__) + +#define tun_dbg_stat(__tunnel, __err, __fmt, ...) do { \ + if (__err) \ + __tun_prn(__tunnel, dbg, _kms, __fmt " (Failed, err: %pe)\n", \ + ## __VA_ARGS__, ERR_PTR(__err)); \ + else \ + __tun_prn(__tunnel, dbg, _kms, __fmt " (Ok)\n", \ + ## __VA_ARGS__); \ +} while (0) + +#define tun_dbg_atomic(__tunnel, __fmt, ...) \ + __tun_prn(__tunnel, dbg, _atomic, __fmt, ## __VA_ARGS__) + +#define tun_grp_dbg(__group, __fmt, ...) \ + drm_dbg_kms((__group)->mgr->dev, \ + "[DPTUN %s] " __fmt, \ + drm_dp_tunnel_group_name(__group), ## \ + __VA_ARGS__) + +#define DP_TUNNELING_BASE DP_TUNNELING_OUI + +#define __DPTUN_REG_RANGE(__start, __size) \ + GENMASK_ULL((__start) + (__size) - 1, (__start)) + +#define DPTUN_REG_RANGE(__addr, __size) \ + __DPTUN_REG_RANGE((__addr) - DP_TUNNELING_BASE, (__size)) + +#define DPTUN_REG(__addr) DPTUN_REG_RANGE(__addr, 1) + +#define DPTUN_INFO_REG_MASK ( \ + DPTUN_REG_RANGE(DP_TUNNELING_OUI, DP_TUNNELING_OUI_BYTES) | \ + DPTUN_REG_RANGE(DP_TUNNELING_DEV_ID, DP_TUNNELING_DEV_ID_BYTES) | \ + DPTUN_REG(DP_TUNNELING_HW_REV) | \ + DPTUN_REG(DP_TUNNELING_SW_REV_MAJOR) | \ + DPTUN_REG(DP_TUNNELING_SW_REV_MINOR) | \ + DPTUN_REG(DP_TUNNELING_CAPABILITIES) | \ + DPTUN_REG(DP_IN_ADAPTER_INFO) | \ + DPTUN_REG(DP_USB4_DRIVER_ID) | \ + DPTUN_REG(DP_USB4_DRIVER_BW_CAPABILITY) | \ + DPTUN_REG(DP_IN_ADAPTER_TUNNEL_INFORMATION) | \ + DPTUN_REG(DP_BW_GRANULARITY) | \ + DPTUN_REG(DP_ESTIMATED_BW) | \ + DPTUN_REG(DP_ALLOCATED_BW) | \ + DPTUN_REG(DP_TUNNELING_MAX_LINK_RATE) | \ + DPTUN_REG(DP_TUNNELING_MAX_LANE_COUNT) | \ + DPTUN_REG(DP_DPTX_BW_ALLOCATION_MODE_CONTROL)) + +static const DECLARE_BITMAP(dptun_info_regs, 64) = { + DPTUN_INFO_REG_MASK & -1UL, +#if BITS_PER_LONG == 32 + DPTUN_INFO_REG_MASK >> 32, +#endif +}; + +struct drm_dp_tunnel_regs { + u8 buf[HWEIGHT64(DPTUN_INFO_REG_MASK)]; +}; + +struct drm_dp_tunnel_group; + +struct drm_dp_tunnel { + struct drm_dp_tunnel_group *group; + + struct list_head node; + + struct kref kref; + struct ref_tracker *tracker; + struct drm_dp_aux *aux; + char name[8]; + + int bw_granularity; + int estimated_bw; + int allocated_bw; + + int max_dprx_rate; + u8 max_dprx_lane_count; + + u8 adapter_id; + + bool bw_alloc_supported:1; + bool bw_alloc_enabled:1; + bool has_io_error:1; + bool destroyed:1; +}; + +struct drm_dp_tunnel_group_state; + +struct drm_dp_tunnel_state { + struct drm_dp_tunnel_group_state *group_state; + + struct drm_dp_tunnel_ref tunnel_ref; + + struct list_head node; + + u32 stream_mask; + int *stream_bw; +}; + +struct drm_dp_tunnel_group_state { + struct drm_private_state base; + + struct list_head tunnel_states; +}; + +struct drm_dp_tunnel_group { + struct drm_private_obj base; + struct drm_dp_tunnel_mgr *mgr; + + struct list_head tunnels; + + /* available BW including the allocated_bw of all tunnels in the group */ + int available_bw; + + u8 drv_group_id; + char name[8]; + + bool active:1; +}; + +struct drm_dp_tunnel_mgr { + struct drm_device *dev; + + int group_count; + struct drm_dp_tunnel_group *groups; + wait_queue_head_t bw_req_queue; + +#ifdef CONFIG_DRM_DISPLAY_DEBUG_DP_TUNNEL_STATE + struct ref_tracker_dir ref_tracker; +#endif +}; + +/* + * The following helpers provide a way to read out the tunneling DPCD + * registers with a minimal amount of AUX transfers (1 transfer per contiguous + * range, as permitted by the 16 byte per transfer AUX limit), not accessing + * other registers to avoid any read side-effects. + */ +static int next_reg_area(int *offset) +{ + *offset = find_next_bit(dptun_info_regs, 64, *offset); + + return find_next_zero_bit(dptun_info_regs, 64, *offset + 1) - *offset; +} + +#define tunnel_reg_ptr(__regs, __address) ({ \ + WARN_ON(!test_bit((__address) - DP_TUNNELING_BASE, dptun_info_regs)); \ + &(__regs)->buf[bitmap_weight(dptun_info_regs, (__address) - DP_TUNNELING_BASE)]; \ +}) + +static int read_tunnel_regs(struct drm_dp_aux *aux, struct drm_dp_tunnel_regs *regs) +{ + int offset = 0; + int len; + + while ((len = next_reg_area(&offset))) { + int address = DP_TUNNELING_BASE + offset; + + if (drm_dp_dpcd_read(aux, address, tunnel_reg_ptr(regs, address), len) < 0) + return -EIO; + + offset += len; + } + + return 0; +} + +static u8 tunnel_reg(const struct drm_dp_tunnel_regs *regs, int address) +{ + return *tunnel_reg_ptr(regs, address); +} + +static u8 tunnel_reg_drv_group_id(const struct drm_dp_tunnel_regs *regs) +{ + u8 drv_id = tunnel_reg(regs, DP_USB4_DRIVER_ID) & DP_USB4_DRIVER_ID_MASK; + u8 group_id = tunnel_reg(regs, DP_IN_ADAPTER_TUNNEL_INFORMATION) & DP_GROUP_ID_MASK; + + if (!group_id) + return 0; + + return (drv_id << DP_GROUP_ID_BITS) | group_id; +} + +/* Return granularity in kB/s units */ +static int tunnel_reg_bw_granularity(const struct drm_dp_tunnel_regs *regs) +{ + int gr = tunnel_reg(regs, DP_BW_GRANULARITY) & DP_BW_GRANULARITY_MASK; + + if (gr > 2) + return -1; + + return (250000 << gr) / 8; +} + +static int tunnel_reg_max_dprx_rate(const struct drm_dp_tunnel_regs *regs) +{ + u8 bw_code = tunnel_reg(regs, DP_TUNNELING_MAX_LINK_RATE); + + return drm_dp_bw_code_to_link_rate(bw_code); +} + +static int tunnel_reg_max_dprx_lane_count(const struct drm_dp_tunnel_regs *regs) +{ + return tunnel_reg(regs, DP_TUNNELING_MAX_LANE_COUNT) & + DP_TUNNELING_MAX_LANE_COUNT_MASK; +} + +static bool tunnel_reg_bw_alloc_supported(const struct drm_dp_tunnel_regs *regs) +{ + u8 cap_mask = DP_TUNNELING_SUPPORT | DP_IN_BW_ALLOCATION_MODE_SUPPORT; + + if ((tunnel_reg(regs, DP_TUNNELING_CAPABILITIES) & cap_mask) != cap_mask) + return false; + + return tunnel_reg(regs, DP_USB4_DRIVER_BW_CAPABILITY) & + DP_USB4_DRIVER_BW_ALLOCATION_MODE_SUPPORT; +} + +static bool tunnel_reg_bw_alloc_enabled(const struct drm_dp_tunnel_regs *regs) +{ + return tunnel_reg(regs, DP_DPTX_BW_ALLOCATION_MODE_CONTROL) & + DP_DISPLAY_DRIVER_BW_ALLOCATION_MODE_ENABLE; +} + +static u8 tunnel_group_drv_id(u8 drv_group_id) +{ + return drv_group_id >> DP_GROUP_ID_BITS; +} + +static u8 tunnel_group_id(u8 drv_group_id) +{ + return drv_group_id & DP_GROUP_ID_MASK; +} + +const char *drm_dp_tunnel_name(const struct drm_dp_tunnel *tunnel) +{ + return tunnel->name; +} +EXPORT_SYMBOL(drm_dp_tunnel_name); + +static const char *drm_dp_tunnel_group_name(const struct drm_dp_tunnel_group *group) +{ + return group->name; +} + +static struct drm_dp_tunnel_group * +lookup_or_alloc_group(struct drm_dp_tunnel_mgr *mgr, u8 drv_group_id) +{ + struct drm_dp_tunnel_group *group = NULL; + int i; + + for (i = 0; i < mgr->group_count; i++) { + /* + * A tunnel group with 0 group ID shouldn't have more than one + * tunnels. + */ + if (tunnel_group_id(drv_group_id) && + mgr->groups[i].drv_group_id == drv_group_id) + return &mgr->groups[i]; + + if (!group && !mgr->groups[i].active) + group = &mgr->groups[i]; + } + + if (!group) { + drm_dbg_kms(mgr->dev, + "DPTUN: Can't allocate more tunnel groups\n"); + return NULL; + } + + group->drv_group_id = drv_group_id; + group->active = true; + + /* + * The group name format here and elsewhere: Driver-ID:Group-ID:* + * (* standing for all DP-Adapters/tunnels in the group). + */ + snprintf(group->name, sizeof(group->name), "%d:%d:*", + tunnel_group_drv_id(drv_group_id) & ((1 << DP_GROUP_ID_BITS) - 1), + tunnel_group_id(drv_group_id) & ((1 << DP_USB4_DRIVER_ID_BITS) - 1)); + + return group; +} + +static void free_group(struct drm_dp_tunnel_group *group) +{ + struct drm_dp_tunnel_mgr *mgr = group->mgr; + + if (drm_WARN_ON(mgr->dev, !list_empty(&group->tunnels))) + return; + + group->drv_group_id = 0; + group->available_bw = -1; + group->active = false; +} + +static struct drm_dp_tunnel * +tunnel_get(struct drm_dp_tunnel *tunnel) +{ + kref_get(&tunnel->kref); + + return tunnel; +} + +static void free_tunnel(struct kref *kref) +{ + struct drm_dp_tunnel *tunnel = container_of(kref, typeof(*tunnel), kref); + struct drm_dp_tunnel_group *group = tunnel->group; + + list_del(&tunnel->node); + if (list_empty(&group->tunnels)) + free_group(group); + + kfree(tunnel); +} + +static void tunnel_put(struct drm_dp_tunnel *tunnel) +{ + kref_put(&tunnel->kref, free_tunnel); +} + +#ifdef CONFIG_DRM_DISPLAY_DEBUG_DP_TUNNEL_STATE +static void track_tunnel_ref(struct drm_dp_tunnel *tunnel, + struct ref_tracker **tracker) +{ + ref_tracker_alloc(&tunnel->group->mgr->ref_tracker, + tracker, GFP_KERNEL); +} + +static void untrack_tunnel_ref(struct drm_dp_tunnel *tunnel, + struct ref_tracker **tracker) +{ + ref_tracker_free(&tunnel->group->mgr->ref_tracker, + tracker); +} +#else +static void track_tunnel_ref(struct drm_dp_tunnel *tunnel, + struct ref_tracker **tracker) +{ +} + +static void untrack_tunnel_ref(struct drm_dp_tunnel *tunnel, + struct ref_tracker **tracker) +{ +} +#endif + +/** + * drm_dp_tunnel_get - Get a reference for a DP tunnel + * @tunnel: Tunnel object + * @tracker: Debug tracker for the reference + * + * Get a reference for @tunnel, along with a debug tracker to help locating + * the source of a reference leak/double reference put etc. issue. + * + * The reference must be dropped after use calling drm_dp_tunnel_put() + * passing @tunnel and *@tracker returned from here. + * + * Returns @tunnel - as a convenience - along with *@tracker. + */ +struct drm_dp_tunnel * +drm_dp_tunnel_get(struct drm_dp_tunnel *tunnel, + struct ref_tracker **tracker) +{ + track_tunnel_ref(tunnel, tracker); + + return tunnel_get(tunnel); +} +EXPORT_SYMBOL(drm_dp_tunnel_get); + +/** + * drm_dp_tunnel_put - Put a reference for a DP tunnel + * @tunnel - Tunnel object + * @tracker - Debug tracker for the reference + * + * Put a reference for @tunnel along with its debug *@tracker, which + * was obtained with drm_dp_tunnel_get(). + */ +void drm_dp_tunnel_put(struct drm_dp_tunnel *tunnel, + struct ref_tracker **tracker) +{ + untrack_tunnel_ref(tunnel, tracker); + + tunnel_put(tunnel); +} +EXPORT_SYMBOL(drm_dp_tunnel_put); + +static bool add_tunnel_to_group(struct drm_dp_tunnel_mgr *mgr, + u8 drv_group_id, + struct drm_dp_tunnel *tunnel) +{ + struct drm_dp_tunnel_group *group; + + group = lookup_or_alloc_group(mgr, drv_group_id); + if (!group) + return false; + + tunnel->group = group; + list_add(&tunnel->node, &group->tunnels); + + return true; +} + +static struct drm_dp_tunnel * +create_tunnel(struct drm_dp_tunnel_mgr *mgr, + struct drm_dp_aux *aux, + const struct drm_dp_tunnel_regs *regs) +{ + u8 drv_group_id = tunnel_reg_drv_group_id(regs); + struct drm_dp_tunnel *tunnel; + + tunnel = kzalloc(sizeof(*tunnel), GFP_KERNEL); + if (!tunnel) + return NULL; + + INIT_LIST_HEAD(&tunnel->node); + + kref_init(&tunnel->kref); + + tunnel->aux = aux; + + tunnel->adapter_id = tunnel_reg(regs, DP_IN_ADAPTER_INFO) & DP_IN_ADAPTER_NUMBER_MASK; + + snprintf(tunnel->name, sizeof(tunnel->name), "%d:%d:%d", + tunnel_group_drv_id(drv_group_id) & ((1 << DP_GROUP_ID_BITS) - 1), + tunnel_group_id(drv_group_id) & ((1 << DP_USB4_DRIVER_ID_BITS) - 1), + tunnel->adapter_id & ((1 << DP_IN_ADAPTER_NUMBER_BITS) - 1)); + + tunnel->bw_granularity = tunnel_reg_bw_granularity(regs); + tunnel->allocated_bw = tunnel_reg(regs, DP_ALLOCATED_BW) * + tunnel->bw_granularity; + /* + * An initial allocated BW of 0 indicates an undefined state: the + * actual allocation is determined by the TBT CM, usually following a + * legacy allocation policy (based on the max DPRX caps). From the + * driver's POV the state becomes defined only after the first + * allocation request. + */ + if (!tunnel->allocated_bw) + tunnel->allocated_bw = -1; + + tunnel->bw_alloc_supported = tunnel_reg_bw_alloc_supported(regs); + tunnel->bw_alloc_enabled = tunnel_reg_bw_alloc_enabled(regs); + + if (!add_tunnel_to_group(mgr, drv_group_id, tunnel)) { + kfree(tunnel); + + return NULL; + } + + track_tunnel_ref(tunnel, &tunnel->tracker); + + return tunnel; +} + +static void destroy_tunnel(struct drm_dp_tunnel *tunnel) +{ + untrack_tunnel_ref(tunnel, &tunnel->tracker); + tunnel_put(tunnel); +} + +/** + * drm_dp_tunnel_set_io_error - Set the IO error flag for a DP tunnel + * @tunnel: Tunnel object + * + * Set the IO error flag for @tunnel. Drivers can call this function upon + * detecting a failure that affects the tunnel functionality, for instance + * after a DP AUX transfer failure on the port @tunnel is connected to. + * + * This disables further management of @tunnel, including any related + * AUX accesses for tunneling DPCD registers, returning error to the + * initiators of these. The driver is supposed to drop this tunnel and - + * optionally - recreate it. + */ +void drm_dp_tunnel_set_io_error(struct drm_dp_tunnel *tunnel) +{ + tunnel->has_io_error = true; +} +EXPORT_SYMBOL(drm_dp_tunnel_set_io_error); + +#define SKIP_DPRX_CAPS_CHECK BIT(0) +#define ALLOW_ALLOCATED_BW_CHANGE BIT(1) +static bool tunnel_regs_are_valid(struct drm_dp_tunnel_mgr *mgr, + const struct drm_dp_tunnel_regs *regs, + unsigned int flags) +{ + u8 drv_group_id = tunnel_reg_drv_group_id(regs); + bool check_dprx = !(flags & SKIP_DPRX_CAPS_CHECK); + bool ret = true; + + if (!tunnel_reg_bw_alloc_supported(regs)) { + if (tunnel_group_id(drv_group_id)) { + drm_dbg_kms(mgr->dev, + "DPTUN: A non-zero group ID is only allowed with BWA support\n"); + ret = false; + } + + if (tunnel_reg(regs, DP_ALLOCATED_BW)) { + drm_dbg_kms(mgr->dev, + "DPTUN: BW is allocated without BWA support\n"); + ret = false; + } + + return ret; + } + + if (!tunnel_group_id(drv_group_id)) { + drm_dbg_kms(mgr->dev, + "DPTUN: BWA support requires a non-zero group ID\n"); + ret = false; + } + + if (check_dprx && hweight8(tunnel_reg_max_dprx_lane_count(regs)) != 1) { + drm_dbg_kms(mgr->dev, + "DPTUN: Invalid DPRX lane count: %d\n", + tunnel_reg_max_dprx_lane_count(regs)); + + ret = false; + } + + if (check_dprx && !tunnel_reg_max_dprx_rate(regs)) { + drm_dbg_kms(mgr->dev, + "DPTUN: DPRX rate is 0\n"); + + ret = false; + } + + if (tunnel_reg_bw_granularity(regs) < 0) { + drm_dbg_kms(mgr->dev, + "DPTUN: Invalid BW granularity\n"); + + ret = false; + } + + if (tunnel_reg(regs, DP_ALLOCATED_BW) > tunnel_reg(regs, DP_ESTIMATED_BW)) { + drm_dbg_kms(mgr->dev, + "DPTUN: Allocated BW %d > estimated BW %d Mb/s\n", + DPTUN_BW_ARG(tunnel_reg(regs, DP_ALLOCATED_BW) * + tunnel_reg_bw_granularity(regs)), + DPTUN_BW_ARG(tunnel_reg(regs, DP_ESTIMATED_BW) * + tunnel_reg_bw_granularity(regs))); + + ret = false; + } + + return ret; +} + +static int tunnel_allocated_bw(const struct drm_dp_tunnel *tunnel) +{ + return max(tunnel->allocated_bw, 0); +} + +static bool tunnel_info_changes_are_valid(struct drm_dp_tunnel *tunnel, + const struct drm_dp_tunnel_regs *regs, + unsigned int flags) +{ + u8 new_drv_group_id = tunnel_reg_drv_group_id(regs); + bool ret = true; + + if (tunnel->bw_alloc_supported != tunnel_reg_bw_alloc_supported(regs)) { + tun_dbg(tunnel, + "BW alloc support has changed %s -> %s\n", + str_yes_no(tunnel->bw_alloc_supported), + str_yes_no(tunnel_reg_bw_alloc_supported(regs))); + + ret = false; + } + + if (tunnel->group->drv_group_id != new_drv_group_id) { + tun_dbg(tunnel, + "Driver/group ID has changed %d:%d:* -> %d:%d:*\n", + tunnel_group_drv_id(tunnel->group->drv_group_id), + tunnel_group_id(tunnel->group->drv_group_id), + tunnel_group_drv_id(new_drv_group_id), + tunnel_group_id(new_drv_group_id)); + + ret = false; + } + + if (!tunnel->bw_alloc_supported) + return ret; + + if (tunnel->bw_granularity != tunnel_reg_bw_granularity(regs)) { + tun_dbg(tunnel, + "BW granularity has changed: %d -> %d Mb/s\n", + DPTUN_BW_ARG(tunnel->bw_granularity), + DPTUN_BW_ARG(tunnel_reg_bw_granularity(regs))); + + ret = false; + } + + /* + * On some devices at least the BW alloc mode enabled status is always + * reported as 0, so skip checking that here. + */ + + if (!(flags & ALLOW_ALLOCATED_BW_CHANGE) && + tunnel_allocated_bw(tunnel) != + tunnel_reg(regs, DP_ALLOCATED_BW) * tunnel->bw_granularity) { + tun_dbg(tunnel, + "Allocated BW has changed: %d -> %d Mb/s\n", + DPTUN_BW_ARG(tunnel->allocated_bw), + DPTUN_BW_ARG(tunnel_reg(regs, DP_ALLOCATED_BW) * tunnel->bw_granularity)); + + ret = false; + } + + return ret; +} + +static int +read_and_verify_tunnel_regs(struct drm_dp_tunnel *tunnel, + struct drm_dp_tunnel_regs *regs, + unsigned int flags) +{ + int err; + + err = read_tunnel_regs(tunnel->aux, regs); + if (err < 0) { + drm_dp_tunnel_set_io_error(tunnel); + + return err; + } + + if (!tunnel_regs_are_valid(tunnel->group->mgr, regs, flags)) + return -EINVAL; + + if (!tunnel_info_changes_are_valid(tunnel, regs, flags)) + return -EINVAL; + + return 0; +} + +static bool update_dprx_caps(struct drm_dp_tunnel *tunnel, const struct drm_dp_tunnel_regs *regs) +{ + bool changed = false; + + if (tunnel_reg_max_dprx_rate(regs) != tunnel->max_dprx_rate) { + tunnel->max_dprx_rate = tunnel_reg_max_dprx_rate(regs); + changed = true; + } + + if (tunnel_reg_max_dprx_lane_count(regs) != tunnel->max_dprx_lane_count) { + tunnel->max_dprx_lane_count = tunnel_reg_max_dprx_lane_count(regs); + changed = true; + } + + return changed; +} + +static int dev_id_len(const u8 *dev_id, int max_len) +{ + while (max_len && dev_id[max_len - 1] == '\0') + max_len--; + + return max_len; +} + +static int get_max_dprx_bw(const struct drm_dp_tunnel *tunnel) +{ + int max_dprx_bw = drm_dp_max_dprx_data_rate(tunnel->max_dprx_rate, + tunnel->max_dprx_lane_count); + + /* + * A BW request of roundup(max_dprx_bw, tunnel->bw_granularity) results in + * an allocation of max_dprx_bw. A BW request above this rounded-up + * value will fail. + */ + return min(roundup(max_dprx_bw, tunnel->bw_granularity), + MAX_DP_REQUEST_BW * tunnel->bw_granularity); +} + +static int get_max_tunnel_bw(const struct drm_dp_tunnel *tunnel) +{ + return min(get_max_dprx_bw(tunnel), tunnel->group->available_bw); +} + +/** + * drm_dp_tunnel_detect - Detect DP tunnel on the link + * @mgr: Tunnel manager + * @aux: DP AUX on which the tunnel will be detected + * + * Detect if there is any DP tunnel on the link and add it to the tunnel + * group's tunnel list. + * + * Returns a pointer to a tunnel on success, or an ERR_PTR() error on + * failure. + */ +struct drm_dp_tunnel * +drm_dp_tunnel_detect(struct drm_dp_tunnel_mgr *mgr, + struct drm_dp_aux *aux) +{ + struct drm_dp_tunnel_regs regs; + struct drm_dp_tunnel *tunnel; + int err; + + err = read_tunnel_regs(aux, ®s); + if (err) + return ERR_PTR(err); + + if (!(tunnel_reg(®s, DP_TUNNELING_CAPABILITIES) & + DP_TUNNELING_SUPPORT)) + return ERR_PTR(-ENODEV); + + /* The DPRX caps are valid only after enabling BW alloc mode. */ + if (!tunnel_regs_are_valid(mgr, ®s, SKIP_DPRX_CAPS_CHECK)) + return ERR_PTR(-EINVAL); + + tunnel = create_tunnel(mgr, aux, ®s); + if (!tunnel) + return ERR_PTR(-ENOMEM); + + tun_dbg(tunnel, + "OUI:%*phD DevID:%*pE Rev-HW:%d.%d SW:%d.%d PR-Sup:%s BWA-Sup:%s BWA-En:%s\n", + DP_TUNNELING_OUI_BYTES, + tunnel_reg_ptr(®s, DP_TUNNELING_OUI), + dev_id_len(tunnel_reg_ptr(®s, DP_TUNNELING_DEV_ID), DP_TUNNELING_DEV_ID_BYTES), + tunnel_reg_ptr(®s, DP_TUNNELING_DEV_ID), + (tunnel_reg(®s, DP_TUNNELING_HW_REV) & DP_TUNNELING_HW_REV_MAJOR_MASK) >> + DP_TUNNELING_HW_REV_MAJOR_SHIFT, + (tunnel_reg(®s, DP_TUNNELING_HW_REV) & DP_TUNNELING_HW_REV_MINOR_MASK) >> + DP_TUNNELING_HW_REV_MINOR_SHIFT, + tunnel_reg(®s, DP_TUNNELING_SW_REV_MAJOR), + tunnel_reg(®s, DP_TUNNELING_SW_REV_MINOR), + str_yes_no(tunnel_reg(®s, DP_TUNNELING_CAPABILITIES) & + DP_PANEL_REPLAY_OPTIMIZATION_SUPPORT), + str_yes_no(tunnel->bw_alloc_supported), + str_yes_no(tunnel->bw_alloc_enabled)); + + return tunnel; +} +EXPORT_SYMBOL(drm_dp_tunnel_detect); + +/** + * drm_dp_tunnel_destroy - Destroy tunnel object + * @tunnel: Tunnel object + * + * Remove the tunnel from the tunnel topology and destroy it. + * + * Returns 0 on success, -ENODEV if the tunnel has been destroyed already. + */ +int drm_dp_tunnel_destroy(struct drm_dp_tunnel *tunnel) +{ + if (!tunnel) + return 0; + + if (drm_WARN_ON(tunnel->group->mgr->dev, tunnel->destroyed)) + return -ENODEV; + + tun_dbg(tunnel, "destroying\n"); + + tunnel->destroyed = true; + destroy_tunnel(tunnel); + + return 0; +} +EXPORT_SYMBOL(drm_dp_tunnel_destroy); + +static int check_tunnel(const struct drm_dp_tunnel *tunnel) +{ + if (tunnel->destroyed) + return -ENODEV; + + if (tunnel->has_io_error) + return -EIO; + + return 0; +} + +static int group_allocated_bw(struct drm_dp_tunnel_group *group) +{ + struct drm_dp_tunnel *tunnel; + int group_allocated_bw = 0; + + for_each_tunnel_in_group(group, tunnel) { + if (check_tunnel(tunnel) == 0 && + tunnel->bw_alloc_enabled) + group_allocated_bw += tunnel_allocated_bw(tunnel); + } + + return group_allocated_bw; +} + +/* + * The estimated BW reported by the TBT Connection Manager for each tunnel in + * a group includes the BW already allocated for the given tunnel and the + * unallocated BW which is free to be used by any tunnel in the group. + */ +static int group_free_bw(const struct drm_dp_tunnel *tunnel) +{ + return tunnel->estimated_bw - tunnel_allocated_bw(tunnel); +} + +static int calc_group_available_bw(const struct drm_dp_tunnel *tunnel) +{ + return group_allocated_bw(tunnel->group) + + group_free_bw(tunnel); +} + +static int update_group_available_bw(struct drm_dp_tunnel *tunnel, + const struct drm_dp_tunnel_regs *regs) +{ + struct drm_dp_tunnel *tunnel_iter; + int group_available_bw; + bool changed; + + tunnel->estimated_bw = tunnel_reg(regs, DP_ESTIMATED_BW) * tunnel->bw_granularity; + + if (calc_group_available_bw(tunnel) == tunnel->group->available_bw) + return 0; + + for_each_tunnel_in_group(tunnel->group, tunnel_iter) { + int err; + + if (tunnel_iter == tunnel) + continue; + + if (check_tunnel(tunnel_iter) != 0 || + !tunnel_iter->bw_alloc_enabled) + continue; + + err = drm_dp_dpcd_probe(tunnel_iter->aux, DP_DPCD_REV); + if (err) { + tun_dbg(tunnel_iter, + "Probe failed, assume disconnected (err %pe)\n", + ERR_PTR(err)); + drm_dp_tunnel_set_io_error(tunnel_iter); + } + } + + group_available_bw = calc_group_available_bw(tunnel); + + tun_dbg(tunnel, "Updated group available BW: %d->%d\n", + DPTUN_BW_ARG(tunnel->group->available_bw), + DPTUN_BW_ARG(group_available_bw)); + + changed = tunnel->group->available_bw != group_available_bw; + + tunnel->group->available_bw = group_available_bw; + + return changed ? 1 : 0; +} + +static int set_bw_alloc_mode(struct drm_dp_tunnel *tunnel, bool enable) +{ + u8 mask = DP_DISPLAY_DRIVER_BW_ALLOCATION_MODE_ENABLE | DP_UNMASK_BW_ALLOCATION_IRQ; + u8 val; + + if (drm_dp_dpcd_readb(tunnel->aux, DP_DPTX_BW_ALLOCATION_MODE_CONTROL, &val) < 0) + goto out_err; + + if (enable) + val |= mask; + else + val &= ~mask; + + if (drm_dp_dpcd_writeb(tunnel->aux, DP_DPTX_BW_ALLOCATION_MODE_CONTROL, val) < 0) + goto out_err; + + tunnel->bw_alloc_enabled = enable; + + return 0; + +out_err: + drm_dp_tunnel_set_io_error(tunnel); + + return -EIO; +} + +/** + * drm_dp_tunnel_enable_bw_alloc - Enable DP tunnel BW allocation mode + * @tunnel: Tunnel object + * + * Enable the DP tunnel BW allocation mode on @tunnel if it supports it. + * + * Returns 0 in case of success, negative error code otherwise. + */ +int drm_dp_tunnel_enable_bw_alloc(struct drm_dp_tunnel *tunnel) +{ + struct drm_dp_tunnel_regs regs; + int err; + + err = check_tunnel(tunnel); + if (err) + return err; + + if (!tunnel->bw_alloc_supported) + return -EOPNOTSUPP; + + if (!tunnel_group_id(tunnel->group->drv_group_id)) + return -EINVAL; + + err = set_bw_alloc_mode(tunnel, true); + if (err) + goto out; + + /* + * After a BWA disable/re-enable sequence the allocated BW can either + * stay at its last requested value or, for instance aft |
