// SPDX-License-Identifier: GPL-2.0
/*
* Thunderbolt Time Management Unit (TMU) support
*
* Copyright (C) 2019, Intel Corporation
* Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
* Rajmohan Mani <rajmohan.mani@intel.com>
*/
#include <linux/delay.h>
#include "tb.h"
static const unsigned int tmu_rates[] = {
[TB_SWITCH_TMU_MODE_OFF] = 0,
[TB_SWITCH_TMU_MODE_LOWRES] = 1000,
[TB_SWITCH_TMU_MODE_HIFI_UNI] = 16,
[TB_SWITCH_TMU_MODE_HIFI_BI] = 16,
[TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI] = 16,
};
const struct {
unsigned int freq_meas_window;
unsigned int avg_const;
unsigned int delta_avg_const;
unsigned int repl_timeout;
unsigned int repl_threshold;
unsigned int repl_n;
unsigned int dirswitch_n;
} tmu_params[] = {
[TB_SWITCH_TMU_MODE_OFF] = { },
[TB_SWITCH_TMU_MODE_LOWRES] = { 30, 4, },
[TB_SWITCH_TMU_MODE_HIFI_UNI] = { 800, 8, },
[TB_SWITCH_TMU_MODE_HIFI_BI] = { 800, 8, },
[TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI] = {
800, 4, 0, 3125, 25, 128, 255,
},
};
static const char *tmu_mode_name(enum tb_switch_tmu_mode mode)
{
switch (mode) {
case TB_SWITCH_TMU_MODE_OFF:
return "off";
case TB_SWITCH_TMU_MODE_LOWRES:
return "uni-directional, LowRes";
case TB_SWITCH_TMU_MODE_HIFI_UNI:
return "uni-directional, HiFi";
case TB_SWITCH_TMU_MODE_HIFI_BI:
return "bi-directional, HiFi";
case TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI:
return "enhanced uni-directional, MedRes";
default:
return "unknown";
}
}
static bool tb_switch_tmu_enhanced_is_supported(const struct tb_switch *sw)
{
return usb4_switch_version(sw) > 1;
}
static int tb_switch_set_tmu_mode_params(struct tb_switch *sw,
enum tb_switch_tmu_mode mode)
{
u32 freq, avg, val;
int ret;
freq = tmu_params[mode].freq_meas_window;
avg = tmu_params[mode].avg_const;
ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
sw->tmu.cap + TMU_RTR_CS_0, 1);
if (ret)
return ret;
val &= ~TMU_RTR_CS_0_FREQ_WIND_MASK;
val |= FIELD_PREP(TMU_RTR_CS_0_FREQ_WIND_MASK, freq);
ret = tb_sw_write(sw, &val, TB_CFG_SWITCH,
sw->tmu.cap + TMU_RTR_CS_0, 1);
if (ret)
return ret;
ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
sw->tmu.cap + TMU_RTR_CS_15, 1);
if (ret)
return ret;
val &= ~TMU_RTR_CS_15_FREQ_AVG_MASK &
~TMU_RTR_CS_15_DELAY_AVG_MASK &
~TMU_RTR_CS_15_OFFSET_AVG_MASK &
~TMU_RTR_CS_15_ERROR_AVG_MASK;
val |= FIELD_PREP(TMU_RTR_CS_15_FREQ_AVG_MASK, avg) |
FIELD_PREP(TMU_RTR_CS_15_DELAY_AVG_MASK, avg) |
FIELD_PREP(TMU_RTR_CS_15_OFFSET_AVG_MASK, avg) |
FIELD_PREP(TMU_RTR_CS_15_ERROR_AVG_MASK, avg);
ret = tb_sw_write(sw, &val, TB_CFG_SWITCH,
sw->tmu.cap + TMU_RTR_CS_15, 1);
if (ret)
return ret;
if (tb_switch_tmu_enhanced_is_supported(sw)) {
u32 delta_avg = tmu_params[mode].delta_avg_const;
ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
sw->tmu.cap + TMU_RTR_CS_18, 1);
if (ret)
return ret;
val &= ~TMU_RTR_CS_18_DELTA_AVG_CONST_MASK;
val |= FIELD_PREP(TMU_RTR_CS_18_DELTA_AVG_CONST_MASK, delta_avg);
ret = tb_sw_write(sw, &val, TB_CFG_SWITCH,
sw->tmu.cap + TMU_RTR_CS_18, 1);
}
return ret;
}
static bool tb_switch_tmu_ucap_is_supported(struct tb_switch *sw)
{
int ret;
u32 val;
ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
sw->tmu.cap + TMU_RTR_CS_0, 1);
if (ret)
return false;
return !!(val & TMU_RTR_CS_0_UCAP);
}
static int tb_switch_tmu_rate_read(struct tb_switch *sw)
{
int ret;
u32 val;
ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
sw->tmu.cap + TMU_RTR_CS_3, 1);
if (ret)
return ret;
val >>= TMU_RTR_CS_3_TS_PACKET_INTERVAL_SHIFT;
return val;
}
static int tb_switch_tmu_rate_write(struct tb_switch *sw, int rate)
{
int ret;
u32 val;
ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
sw->tmu.cap + TMU_RTR_CS_3, 1);
if (ret)
return ret;
val &= ~TMU_RTR_CS_3_TS_PACKET_INTERVAL_MASK;
val |= rate << TMU_RTR_CS_3_TS_PACKET_INTERVAL_SHIFT;
return tb_sw_write(sw, &val, TB_CFG_SWITCH,
sw->tmu.cap + TMU_RTR_CS_3, 1);
}
static int tb_port_tmu_write(struct tb_port *port, u8 offset,