diff options
Diffstat (limited to 'drivers/net/ethernet/airoha')
-rw-r--r-- | drivers/net/ethernet/airoha/Kconfig | 27 | ||||
-rw-r--r-- | drivers/net/ethernet/airoha/Makefile | 9 | ||||
-rw-r--r-- | drivers/net/ethernet/airoha/airoha_eth.c | 2683 | ||||
-rw-r--r-- | drivers/net/ethernet/airoha/airoha_eth.h | 552 | ||||
-rw-r--r-- | drivers/net/ethernet/airoha/airoha_npu.c | 520 | ||||
-rw-r--r-- | drivers/net/ethernet/airoha/airoha_npu.h | 34 | ||||
-rw-r--r-- | drivers/net/ethernet/airoha/airoha_ppe.c | 910 | ||||
-rw-r--r-- | drivers/net/ethernet/airoha/airoha_ppe_debugfs.c | 181 | ||||
-rw-r--r-- | drivers/net/ethernet/airoha/airoha_regs.h | 803 |
9 files changed, 5719 insertions, 0 deletions
diff --git a/drivers/net/ethernet/airoha/Kconfig b/drivers/net/ethernet/airoha/Kconfig new file mode 100644 index 000000000000..1a4cf6a259f6 --- /dev/null +++ b/drivers/net/ethernet/airoha/Kconfig @@ -0,0 +1,27 @@ +# SPDX-License-Identifier: GPL-2.0-only +config NET_VENDOR_AIROHA + bool "Airoha devices" + depends on ARCH_AIROHA || COMPILE_TEST + help + If you have a Airoha SoC with ethernet, say Y. + +if NET_VENDOR_AIROHA + +config NET_AIROHA_NPU + tristate "Airoha NPU support" + select WANT_DEV_COREDUMP + select REGMAP_MMIO + help + This driver supports Airoha Network Processor (NPU) available + on the Airoha Soc family. + +config NET_AIROHA + tristate "Airoha SoC Gigabit Ethernet support" + depends on NET_DSA || !NET_DSA + select NET_AIROHA_NPU + select PAGE_POOL + help + This driver supports the gigabit ethernet MACs in the + Airoha SoC family. + +endif #NET_VENDOR_AIROHA diff --git a/drivers/net/ethernet/airoha/Makefile b/drivers/net/ethernet/airoha/Makefile new file mode 100644 index 000000000000..94468053e34b --- /dev/null +++ b/drivers/net/ethernet/airoha/Makefile @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Airoha for the Mediatek SoCs built-in ethernet macs +# + +obj-$(CONFIG_NET_AIROHA) += airoha-eth.o +airoha-eth-y := airoha_eth.o airoha_ppe.o +airoha-eth-$(CONFIG_DEBUG_FS) += airoha_ppe_debugfs.o +obj-$(CONFIG_NET_AIROHA_NPU) += airoha_npu.o diff --git a/drivers/net/ethernet/airoha/airoha_eth.c b/drivers/net/ethernet/airoha/airoha_eth.c new file mode 100644 index 000000000000..c0a642568ac1 --- /dev/null +++ b/drivers/net/ethernet/airoha/airoha_eth.c @@ -0,0 +1,2683 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2024 AIROHA Inc + * Author: Lorenzo Bianconi <lorenzo@kernel.org> + */ +#include <linux/of.h> +#include <linux/of_net.h> +#include <linux/platform_device.h> +#include <linux/tcp.h> +#include <linux/u64_stats_sync.h> +#include <net/dst_metadata.h> +#include <net/page_pool/helpers.h> +#include <net/pkt_cls.h> +#include <uapi/linux/ppp_defs.h> + +#include "airoha_regs.h" +#include "airoha_eth.h" + +u32 airoha_rr(void __iomem *base, u32 offset) +{ + return readl(base + offset); +} + +void airoha_wr(void __iomem *base, u32 offset, u32 val) +{ + writel(val, base + offset); +} + +u32 airoha_rmw(void __iomem *base, u32 offset, u32 mask, u32 val) +{ + val |= (airoha_rr(base, offset) & ~mask); + airoha_wr(base, offset, val); + + return val; +} + +static void airoha_qdma_set_irqmask(struct airoha_qdma *qdma, int index, + u32 clear, u32 set) +{ + unsigned long flags; + + if (WARN_ON_ONCE(index >= ARRAY_SIZE(qdma->irqmask))) + return; + + spin_lock_irqsave(&qdma->irq_lock, flags); + + qdma->irqmask[index] &= ~clear; + qdma->irqmask[index] |= set; + airoha_qdma_wr(qdma, REG_INT_ENABLE(index), qdma->irqmask[index]); + /* Read irq_enable register in order to guarantee the update above + * completes in the spinlock critical section. + */ + airoha_qdma_rr(qdma, REG_INT_ENABLE(index)); + + spin_unlock_irqrestore(&qdma->irq_lock, flags); +} + +static void airoha_qdma_irq_enable(struct airoha_qdma *qdma, int index, + u32 mask) +{ + airoha_qdma_set_irqmask(qdma, index, 0, mask); +} + +static void airoha_qdma_irq_disable(struct airoha_qdma *qdma, int index, + u32 mask) +{ + airoha_qdma_set_irqmask(qdma, index, mask, 0); +} + +static bool airhoa_is_lan_gdm_port(struct airoha_gdm_port *port) +{ + /* GDM1 port on EN7581 SoC is connected to the lan dsa switch. + * GDM{2,3,4} can be used as wan port connected to an external + * phy module. + */ + return port->id == 1; +} + +static void airoha_set_macaddr(struct airoha_gdm_port *port, const u8 *addr) +{ + struct airoha_eth *eth = port->qdma->eth; + u32 val, reg; + + reg = airhoa_is_lan_gdm_port(port) ? REG_FE_LAN_MAC_H + : REG_FE_WAN_MAC_H; + val = (addr[0] << 16) | (addr[1] << 8) | addr[2]; + airoha_fe_wr(eth, reg, val); + + val = (addr[3] << 16) | (addr[4] << 8) | addr[5]; + airoha_fe_wr(eth, REG_FE_MAC_LMIN(reg), val); + airoha_fe_wr(eth, REG_FE_MAC_LMAX(reg), val); +} + +static void airoha_set_gdm_port_fwd_cfg(struct airoha_eth *eth, u32 addr, + u32 val) +{ + airoha_fe_rmw(eth, addr, GDM_OCFQ_MASK, + FIELD_PREP(GDM_OCFQ_MASK, val)); + airoha_fe_rmw(eth, addr, GDM_MCFQ_MASK, + FIELD_PREP(GDM_MCFQ_MASK, val)); + airoha_fe_rmw(eth, addr, GDM_BCFQ_MASK, + FIELD_PREP(GDM_BCFQ_MASK, val)); + airoha_fe_rmw(eth, addr, GDM_UCFQ_MASK, + FIELD_PREP(GDM_UCFQ_MASK, val)); +} + +static int airoha_set_vip_for_gdm_port(struct airoha_gdm_port *port, + bool enable) +{ + struct airoha_eth *eth = port->qdma->eth; + u32 vip_port; + + switch (port->id) { + case 3: + /* FIXME: handle XSI_PCIE1_PORT */ + vip_port = XSI_PCIE0_VIP_PORT_MASK; + break; + case 4: + /* FIXME: handle XSI_USB_PORT */ + vip_port = XSI_ETH_VIP_PORT_MASK; + break; + default: + return 0; + } + + if (enable) { + airoha_fe_set(eth, REG_FE_VIP_PORT_EN, vip_port); + airoha_fe_set(eth, REG_FE_IFC_PORT_EN, vip_port); + } else { + airoha_fe_clear(eth, REG_FE_VIP_PORT_EN, vip_port); + airoha_fe_clear(eth, REG_FE_IFC_PORT_EN, vip_port); + } + + return 0; +} + +static void airoha_fe_maccr_init(struct airoha_eth *eth) +{ + int p; + + for (p = 1; p <= ARRAY_SIZE(eth->ports); p++) + airoha_fe_set(eth, REG_GDM_FWD_CFG(p), + GDM_TCP_CKSUM | GDM_UDP_CKSUM | GDM_IP4_CKSUM | + GDM_DROP_CRC_ERR); + + airoha_fe_rmw(eth, REG_CDM1_VLAN_CTRL, CDM1_VLAN_MASK, + FIELD_PREP(CDM1_VLAN_MASK, 0x8100)); + + airoha_fe_set(eth, REG_FE_CPORT_CFG, FE_CPORT_PAD); +} + +static void airoha_fe_vip_setup(struct airoha_eth *eth) +{ + airoha_fe_wr(eth, REG_FE_VIP_PATN(3), ETH_P_PPP_DISC); + airoha_fe_wr(eth, REG_FE_VIP_EN(3), PATN_FCPU_EN_MASK | PATN_EN_MASK); + + airoha_fe_wr(eth, REG_FE_VIP_PATN(4), PPP_LCP); + airoha_fe_wr(eth, REG_FE_VIP_EN(4), + PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) | + PATN_EN_MASK); + + airoha_fe_wr(eth, REG_FE_VIP_PATN(6), PPP_IPCP); + airoha_fe_wr(eth, REG_FE_VIP_EN(6), + PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) | + PATN_EN_MASK); + + airoha_fe_wr(eth, REG_FE_VIP_PATN(7), PPP_CHAP); + airoha_fe_wr(eth, REG_FE_VIP_EN(7), + PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) | + PATN_EN_MASK); + + /* BOOTP (0x43) */ + airoha_fe_wr(eth, REG_FE_VIP_PATN(8), 0x43); + airoha_fe_wr(eth, REG_FE_VIP_EN(8), + PATN_FCPU_EN_MASK | PATN_SP_EN_MASK | + FIELD_PREP(PATN_TYPE_MASK, 4) | PATN_EN_MASK); + + /* BOOTP (0x44) */ + airoha_fe_wr(eth, REG_FE_VIP_PATN(9), 0x44); + airoha_fe_wr(eth, REG_FE_VIP_EN(9), + PATN_FCPU_EN_MASK | PATN_SP_EN_MASK | + FIELD_PREP(PATN_TYPE_MASK, 4) | PATN_EN_MASK); + + /* ISAKMP */ + airoha_fe_wr(eth, REG_FE_VIP_PATN(10), 0x1f401f4); + airoha_fe_wr(eth, REG_FE_VIP_EN(10), + PATN_FCPU_EN_MASK | PATN_DP_EN_MASK | PATN_SP_EN_MASK | + FIELD_PREP(PATN_TYPE_MASK, 4) | PATN_EN_MASK); + + airoha_fe_wr(eth, REG_FE_VIP_PATN(11), PPP_IPV6CP); + airoha_fe_wr(eth, REG_FE_VIP_EN(11), + PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) | + PATN_EN_MASK); + + /* DHCPv6 */ + airoha_fe_wr(eth, REG_FE_VIP_PATN(12), 0x2220223); + airoha_fe_wr(eth, REG_FE_VIP_EN(12), + PATN_FCPU_EN_MASK | PATN_DP_EN_MASK | PATN_SP_EN_MASK | + FIELD_PREP(PATN_TYPE_MASK, 4) | PATN_EN_MASK); + + airoha_fe_wr(eth, REG_FE_VIP_PATN(19), PPP_PAP); + airoha_fe_wr(eth, REG_FE_VIP_EN(19), + PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) | + PATN_EN_MASK); + + /* ETH->ETH_P_1905 (0x893a) */ + airoha_fe_wr(eth, REG_FE_VIP_PATN(20), 0x893a); + airoha_fe_wr(eth, REG_FE_VIP_EN(20), + PATN_FCPU_EN_MASK | PATN_EN_MASK); + + airoha_fe_wr(eth, REG_FE_VIP_PATN(21), ETH_P_LLDP); + airoha_fe_wr(eth, REG_FE_VIP_EN(21), + PATN_FCPU_EN_MASK | PATN_EN_MASK); +} + +static u32 airoha_fe_get_pse_queue_rsv_pages(struct airoha_eth *eth, + u32 port, u32 queue) +{ + u32 val; + + airoha_fe_rmw(eth, REG_FE_PSE_QUEUE_CFG_WR, + PSE_CFG_PORT_ID_MASK | PSE_CFG_QUEUE_ID_MASK, + FIELD_PREP(PSE_CFG_PORT_ID_MASK, port) | + FIELD_PREP(PSE_CFG_QUEUE_ID_MASK, queue)); + val = airoha_fe_rr(eth, REG_FE_PSE_QUEUE_CFG_VAL); + + return FIELD_GET(PSE_CFG_OQ_RSV_MASK, val); +} + +static void airoha_fe_set_pse_queue_rsv_pages(struct airoha_eth *eth, + u32 port, u32 queue, u32 val) +{ + airoha_fe_rmw(eth, REG_FE_PSE_QUEUE_CFG_VAL, PSE_CFG_OQ_RSV_MASK, + FIELD_PREP(PSE_CFG_OQ_RSV_MASK, val)); + airoha_fe_rmw(eth, REG_FE_PSE_QUEUE_CFG_WR, + PSE_CFG_PORT_ID_MASK | PSE_CFG_QUEUE_ID_MASK | + PSE_CFG_WR_EN_MASK | PSE_CFG_OQRSV_SEL_MASK, + FIELD_PREP(PSE_CFG_PORT_ID_MASK, port) | + FIELD_PREP(PSE_CFG_QUEUE_ID_MASK, queue) | + PSE_CFG_WR_EN_MASK | PSE_CFG_OQRSV_SEL_MASK); +} + +static u32 airoha_fe_get_pse_all_rsv(struct airoha_eth *eth) +{ + u32 val = airoha_fe_rr(eth, REG_FE_PSE_BUF_SET); + + return FIELD_GET(PSE_ALLRSV_MASK, val); +} + +static int airoha_fe_set_pse_oq_rsv(struct airoha_eth *eth, + u32 port, u32 queue, u32 val) +{ + u32 orig_val = airoha_fe_get_pse_queue_rsv_pages(eth, port, queue); + u32 tmp, all_rsv, fq_limit; + + airoha_fe_set_pse_queue_rsv_pages(eth, port, queue, val); + + /* modify all rsv */ + all_rsv = airoha_fe_get_pse_all_rsv(eth); + all_rsv += (val - orig_val); + airoha_fe_rmw(eth, REG_FE_PSE_BUF_SET, PSE_ALLRSV_MASK, + FIELD_PREP(PSE_ALLRSV_MASK, all_rsv)); + + /* modify hthd */ + tmp = airoha_fe_rr(eth, PSE_FQ_CFG); + fq_limit = FIELD_GET(PSE_FQ_LIMIT_MASK, tmp); + tmp = fq_limit - all_rsv - 0x20; + airoha_fe_rmw(eth, REG_PSE_SHARE_USED_THD, + PSE_SHARE_USED_HTHD_MASK, + FIELD_PREP(PSE_SHARE_USED_HTHD_MASK, tmp)); + + tmp = fq_limit - all_rsv - 0x100; + airoha_fe_rmw(eth, REG_PSE_SHARE_USED_THD, + PSE_SHARE_USED_MTHD_MASK, + FIELD_PREP(PSE_SHARE_USED_MTHD_MASK, tmp)); + tmp = (3 * tmp) >> 2; + airoha_fe_rmw(eth, REG_FE_PSE_BUF_SET, + PSE_SHARE_USED_LTHD_MASK, + FIELD_PREP(PSE_SHARE_USED_LTHD_MASK, tmp)); + + return 0; +} + +static void airoha_fe_pse_ports_init(struct airoha_eth *eth) +{ + const u32 pse_port_num_queues[] = { + [FE_PSE_PORT_CDM1] = 6, + [FE_PSE_PORT_GDM1] = 6, + [FE_PSE_PORT_GDM2] = 32, + [FE_PSE_PORT_GDM3] = 6, + [FE_PSE_PORT_PPE1] = 4, + [FE_PSE_PORT_CDM2] = 6, + [FE_PSE_PORT_CDM3] = 8, + [FE_PSE_PORT_CDM4] = 10, + [FE_PSE_PORT_PPE2] = 4, + [FE_PSE_PORT_GDM4] = 2, + [FE_PSE_PORT_CDM5] = 2, + }; + u32 all_rsv; + int q; + + all_rsv = airoha_fe_get_pse_all_rsv(eth); + /* hw misses PPE2 oq rsv */ + all_rsv += PSE_RSV_PAGES * pse_port_num_queues[FE_PSE_PORT_PPE2]; + airoha_fe_set(eth, REG_FE_PSE_BUF_SET, all_rsv); + + /* CMD1 */ + for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM1]; q++) + airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM1, q, + PSE_QUEUE_RSV_PAGES); + /* GMD1 */ + for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_GDM1]; q++) + airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM1, q, + PSE_QUEUE_RSV_PAGES); + /* GMD2 */ + for (q = 6; q < pse_port_num_queues[FE_PSE_PORT_GDM2]; q++) + airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM2, q, 0); + /* GMD3 */ + for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_GDM3]; q++) + airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM3, q, + PSE_QUEUE_RSV_PAGES); + /* PPE1 */ + for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_PPE1]; q++) { + if (q < pse_port_num_queues[FE_PSE_PORT_PPE1]) + airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE1, q, + PSE_QUEUE_RSV_PAGES); + else + airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE1, q, 0); + } + /* CDM2 */ + for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM2]; q++) + airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM2, q, + PSE_QUEUE_RSV_PAGES); + /* CDM3 */ + for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM3] - 1; q++) + airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM3, q, 0); + /* CDM4 */ + for (q = 4; q < pse_port_num_queues[FE_PSE_PORT_CDM4]; q++) + airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM4, q, + PSE_QUEUE_RSV_PAGES); + /* PPE2 */ + for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_PPE2]; q++) { + if (q < pse_port_num_queues[FE_PSE_PORT_PPE2] / 2) + airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE2, q, + PSE_QUEUE_RSV_PAGES); + else + airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE2, q, 0); + } + /* GMD4 */ + for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_GDM4]; q++) + airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM4, q, + PSE_QUEUE_RSV_PAGES); + /* CDM5 */ + for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM5]; q++) + airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM5, q, + PSE_QUEUE_RSV_PAGES); +} + +static int airoha_fe_mc_vlan_clear(struct airoha_eth *eth) +{ + int i; + + for (i = 0; i < AIROHA_FE_MC_MAX_VLAN_TABLE; i++) { + int err, j; + u32 val; + + airoha_fe_wr(eth, REG_MC_VLAN_DATA, 0x0); + + val = FIELD_PREP(MC_VLAN_CFG_TABLE_ID_MASK, i) | + MC_VLAN_CFG_TABLE_SEL_MASK | MC_VLAN_CFG_RW_MASK; + airoha_fe_wr(eth, REG_MC_VLAN_CFG, val); + err = read_poll_timeout(airoha_fe_rr, val, + val & MC_VLAN_CFG_CMD_DONE_MASK, + USEC_PER_MSEC, 5 * USEC_PER_MSEC, + false, eth, REG_MC_VLAN_CFG); + if (err) + return err; + + for (j = 0; j < AIROHA_FE_MC_MAX_VLAN_PORT; j++) { + airoha_fe_wr(eth, REG_MC_VLAN_DATA, 0x0); + + val = FIELD_PREP(MC_VLAN_CFG_TABLE_ID_MASK, i) | + FIELD_PREP(MC_VLAN_CFG_PORT_ID_MASK, j) | + MC_VLAN_CFG_RW_MASK; + airoha_fe_wr(eth, REG_MC_VLAN_CFG, val); + err = read_poll_timeout(airoha_fe_rr, val, + val & MC_VLAN_CFG_CMD_DONE_MASK, + USEC_PER_MSEC, + 5 * USEC_PER_MSEC, false, eth, + REG_MC_VLAN_CFG); + if (err) + return err; + } + } + + return 0; +} + +static void airoha_fe_crsn_qsel_init(struct airoha_eth *eth) +{ + /* CDM1_CRSN_QSEL */ + airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_22 >> 2), + CDM1_CRSN_QSEL_REASON_MASK(CRSN_22), + FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_22), + CDM_CRSN_QSEL_Q1)); + airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_08 >> 2), + CDM1_CRSN_QSEL_REASON_MASK(CRSN_08), + FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_08), + CDM_CRSN_QSEL_Q1)); + airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_21 >> 2), + CDM1_CRSN_QSEL_REASON_MASK(CRSN_21), + FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_21), + CDM_CRSN_QSEL_Q1)); + airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_24 >> 2), + CDM1_CRSN_QSEL_REASON_MASK(CRSN_24), + FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_24), + CDM_CRSN_QSEL_Q6)); + airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_25 >> 2), + CDM1_CRSN_QSEL_REASON_MASK(CRSN_25), + FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_25), + CDM_CRSN_QSEL_Q1)); + /* CDM2_CRSN_QSEL */ + airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_08 >> 2), + CDM2_CRSN_QSEL_REASON_MASK(CRSN_08), + FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_08), + CDM_CRSN_QSEL_Q1)); + airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_21 >> 2), + CDM2_CRSN_QSEL_REASON_MASK(CRSN_21), + FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_21), + CDM_CRSN_QSEL_Q1)); + airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_22 >> 2), + CDM2_CRSN_QSEL_REASON_MASK(CRSN_22), + FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_22), + CDM_CRSN_QSEL_Q1)); + airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_24 >> 2), + CDM2_CRSN_QSEL_REASON_MASK(CRSN_24), + FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_24), + CDM_CRSN_QSEL_Q6)); + airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_25 >> 2), + CDM2_CRSN_QSEL_REASON_MASK(CRSN_25), + FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_25), + CDM_CRSN_QSEL_Q1)); +} + +static int airoha_fe_init(struct airoha_eth *eth) +{ + airoha_fe_maccr_init(eth); + + /* PSE IQ reserve */ + airoha_fe_rmw(eth, REG_PSE_IQ_REV1, PSE_IQ_RES1_P2_MASK, + FIELD_PREP(PSE_IQ_RES1_P2_MASK, 0x10)); + airoha_fe_rmw(eth, REG_PSE_IQ_REV2, + PSE_IQ_RES2_P5_MASK | PSE_IQ_RES2_P4_MASK, + FIELD_PREP(PSE_IQ_RES2_P5_MASK, 0x40) | + FIELD_PREP(PSE_IQ_RES2_P4_MASK, 0x34)); + + /* enable FE copy engine for MC/KA/DPI */ + airoha_fe_wr(eth, REG_FE_PCE_CFG, + PCE_DPI_EN_MASK | PCE_KA_EN_MASK | PCE_MC_EN_MASK); + /* set vip queue selection to ring 1 */ + airoha_fe_rmw(eth, REG_CDM1_FWD_CFG, CDM1_VIP_QSEL_MASK, + FIELD_PREP(CDM1_VIP_QSEL_MASK, 0x4)); + airoha_fe_rmw(eth, REG_CDM2_FWD_CFG, CDM2_VIP_QSEL_MASK, + FIELD_PREP(CDM2_VIP_QSEL_MASK, 0x4)); + /* set GDM4 source interface offset to 8 */ + airoha_fe_rmw(eth, REG_GDM4_SRC_PORT_SET, + GDM4_SPORT_OFF2_MASK | + GDM4_SPORT_OFF1_MASK | + GDM4_SPORT_OFF0_MASK, + FIELD_PREP(GDM4_SPORT_OFF2_MASK, 8) | + FIELD_PREP(GDM4_SPORT_OFF1_MASK, 8) | + FIELD_PREP(GDM4_SPORT_OFF0_MASK, 8)); + + /* set PSE Page as 128B */ + airoha_fe_rmw(eth, REG_FE_DMA_GLO_CFG, + FE_DMA_GLO_L2_SPACE_MASK | FE_DMA_GLO_PG_SZ_MASK, + FIELD_PREP(FE_DMA_GLO_L2_SPACE_MASK, 2) | + FE_DMA_GLO_PG_SZ_MASK); + airoha_fe_wr(eth, REG_FE_RST_GLO_CFG, + FE_RST_CORE_MASK | FE_RST_GDM3_MBI_ARB_MASK | + FE_RST_GDM4_MBI_ARB_MASK); + usleep_range(1000, 2000); + + /* connect RxRing1 and RxRing15 to PSE Port0 OQ-1 + * connect other rings to PSE Port0 OQ-0 + */ + airoha_fe_wr(eth, REG_FE_CDM1_OQ_MAP0, BIT(4)); + airoha_fe_wr(eth, REG_FE_CDM1_OQ_MAP1, BIT(28)); + airoha_fe_wr(eth, REG_FE_CDM1_OQ_MAP2, BIT(4)); + airoha_fe_wr(eth, REG_FE_CDM1_OQ_MAP3, BIT(28)); + + airoha_fe_vip_setup(eth); + airoha_fe_pse_ports_init(eth); + + airoha_fe_set(eth, REG_GDM_MISC_CFG, + GDM2_RDM_ACK_WAIT_PREF_MASK | + GDM2_CHN_VLD_MODE_MASK); + airoha_fe_rmw(eth, REG_CDM2_FWD_CFG, CDM2_OAM_QSEL_MASK, + FIELD_PREP(CDM2_OAM_QSEL_MASK, 15)); + + /* init fragment and assemble Force Port */ + /* NPU Core-3, NPU Bridge Channel-3 */ + airoha_fe_rmw(eth, REG_IP_FRAG_FP, + IP_FRAGMENT_PORT_MASK | IP_FRAGMENT_NBQ_MASK, + FIELD_PREP(IP_FRAGMENT_PORT_MASK, 6) | + FIELD_PREP(IP_FRAGMENT_NBQ_MASK, 3)); + /* QDMA LAN, RX Ring-22 */ + airoha_fe_rmw(eth, REG_IP_FRAG_FP, + IP_ASSEMBLE_PORT_MASK | IP_ASSEMBLE_NBQ_MASK, + FIELD_PREP(IP_ASSEMBLE_PORT_MASK, 0) | + FIELD_PREP(IP_ASSEMBLE_NBQ_MASK, 22)); + + airoha_fe_set(eth, REG_GDM3_FWD_CFG, GDM3_PAD_EN_MASK); + airoha_fe_set(eth, REG_GDM4_FWD_CFG, GDM4_PAD_EN_MASK); + + airoha_fe_crsn_qsel_init(eth); + + airoha_fe_clear(eth, REG_FE_CPORT_CFG, FE_CPORT_QUEUE_XFC_MASK); + airoha_fe_set(eth, REG_FE_CPORT_CFG, FE_CPORT_PORT_XFC_MASK); + + /* default aging mode for mbi unlock issue */ + airoha_fe_rmw(eth, REG_GDM2_CHN_RLS, + MBI_RX_AGE_SEL_MASK | MBI_TX_AGE_SEL_MASK, + FIELD_PREP(MBI_RX_AGE_SEL_MASK, 3) | + FIELD_PREP(MBI_TX_AGE_SEL_MASK, 3)); + + /* disable IFC by default */ + airoha_fe_clear(eth, REG_FE_CSR_IFC_CFG, FE_IFC_EN_MASK); + + /* enable 1:N vlan action, init vlan table */ + airoha_fe_set(eth, REG_MC_VLAN_EN, MC_VLAN_EN_MASK); + + return airoha_fe_mc_vlan_clear(eth); +} + +static int airoha_qdma_fill_rx_queue(struct airoha_queue *q) +{ + enum dma_data_direction dir = page_pool_get_dma_dir(q->page_pool); + struct airoha_qdma *qdma = q->qdma; + struct airoha_eth *eth = qdma->eth; + int qid = q - &qdma->q_rx[0]; + int nframes = 0; + + while (q->queued < q->ndesc - 1) { + struct airoha_queue_entry *e = &q->entry[q->head]; + struct airoha_qdma_desc *desc = &q->desc[q->head]; + struct page *page; + int offset; + u32 val; + + page = page_pool_dev_alloc_frag(q->page_pool, &offset, + q->buf_size); + if (!page) + break; + + q->head = (q->head + 1) % q->ndesc; + q->queued++; + nframes++; + + e->buf = page_address(page) + offset; + e->dma_addr = page_pool_get_dma_addr(page) + offset; + e->dma_len = SKB_WITH_OVERHEAD(q->buf_size); + + dma_sync_single_for_device(eth->dev, e->dma_addr, e->dma_len, + dir); + + val = FIELD_PREP(QDMA_DESC_LEN_MASK, e->dma_len); + WRITE_ONCE(desc->ctrl, cpu_to_le32(val)); + WRITE_ONCE(desc->addr, cpu_to_le32(e->dma_addr)); + val = FIELD_PREP(QDMA_DESC_NEXT_ID_MASK, q->head); + WRITE_ONCE(desc->data, cpu_to_le32(val)); + WRITE_ONCE(desc->msg0, 0); + WRITE_ONCE(desc->msg1, 0); + WRITE_ONCE(desc->msg2, 0); + WRITE_ONCE(desc->msg3, 0); + + airoha_qdma_rmw(qdma, REG_RX_CPU_IDX(qid), + RX_RING_CPU_IDX_MASK, + FIELD_PREP(RX_RING_CPU_IDX_MASK, q->head)); + } + + return nframes; +} + +static int airoha_qdma_get_gdm_port(struct airoha_eth *eth, + struct airoha_qdma_desc *desc) +{ + u32 port, sport, msg1 = le32_to_cpu(desc->msg1); + + sport = FIELD_GET(QDMA_ETH_RXMSG_SPORT_MASK, msg1); + switch (sport) { + case 0x10 ... 0x14: + port = 0; + break; + case 0x2 ... 0x4: + port = sport - 1; + break; + default: + return -EINVAL; + } + + return port >= ARRAY_SIZE(eth->ports) ? -EINVAL : port; +} + +static int airoha_qdma_rx_process(struct airoha_queue *q, int budget) +{ + enum dma_data_direction dir = page_pool_get_dma_dir(q->page_pool); + struct airoha_qdma *qdma = q->qdma; + struct airoha_eth *eth = qdma->eth; + int qid = q - &qdma->q_rx[0]; + int done = 0; + + while (done < budget) { + struct airoha_queue_entry *e = &q->entry[q->tail]; + struct airoha_qdma_desc *desc = &q->desc[q->tail]; + u32 hash, reason, msg1 = le32_to_cpu(desc->msg1); + dma_addr_t dma_addr = le32_to_cpu(desc->addr); + struct page *page = virt_to_head_page(e->buf); + u32 desc_ctrl = le32_to_cpu(desc->ctrl); + struct airoha_gdm_port *port; + int data_len, len, p; + + if (!(desc_ctrl & QDMA_DESC_DONE_MASK)) + break; + + if (!dma_addr) + break; + + len = FIELD_GET(QDMA_DESC_LEN_MASK, desc_ctrl); + if (!len) + break; + + q->tail = (q->tail + 1) % q->ndesc; + q->queued--; + + dma_sync_single_for_cpu(eth->dev, dma_addr, + SKB_WITH_OVERHEAD(q->buf_size), dir); + + data_len = q->skb ? q->buf_size + : SKB_WITH_OVERHEAD(q->buf_size); + if (data_len < len) + goto free_frag; + + p = airoha_qdma_get_gdm_port(eth, desc); + if (p < 0 || !eth->ports[p]) + goto free_frag; + + port = eth->ports[p]; + if (!q->skb) { /* first buffer */ + q->skb = napi_build_skb(e->buf, q->buf_size); + if (!q->skb) + goto free_frag; + + __skb_put(q->skb, len); + skb_mark_for_recycle(q->skb); + q->skb->dev = port->dev; + q->skb->protocol = eth_type_trans(q->skb, port->dev); + q->skb->ip_summed = CHECKSUM_UNNECESSARY; + skb_record_rx_queue(q->skb, qid); + } else { /* scattered frame */ + struct skb_shared_info *shinfo = skb_shinfo(q->skb); + int nr_frags = shinfo->nr_frags; + + if (nr_frags >= ARRAY_SIZE(shinfo->frags)) + goto free_frag; + + skb_add_rx_frag(q->skb, nr_frags, page, + e->buf - page_address(page), len, + q->buf_size); + } + + if (FIELD_GET(QDMA_DESC_MORE_MASK, desc_ctrl)) + continue; + + if (netdev_uses_dsa(port->dev)) { + /* PPE module requires untagged packets to work + * properly and it provides DSA port index via the + * DMA descriptor. Report DSA tag to the DSA stack + * via skb dst info. + */ + u32 sptag = FIELD_GET(QDMA_ETH_RXMSG_SPTAG, + le32_to_cpu(desc->msg0)); + + if (sptag < ARRAY_SIZE(port->dsa_meta) && + port->dsa_meta[sptag]) + skb_dst_set_noref(q->skb, + &port->dsa_meta[sptag]->dst); + } + + hash = FIELD_GET(AIROHA_RXD4_FOE_ENTRY, msg1); + if (hash != AIROHA_RXD4_FOE_ENTRY) + skb_set_hash(q->skb, jhash_1word(hash, 0), + PKT_HASH_TYPE_L4); + + reason = FIELD_GET(AIROHA_RXD4_PPE_CPU_REASON, msg1); + if (reason == PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED) + airoha_ppe_check_skb(eth->ppe, hash); + + done++; + napi_gro_receive(&q->napi, q->skb); + q->skb = NULL; + continue; +free_frag: + page_pool_put_full_page(q->page_pool, page, true); + dev_kfree_skb(q->skb); + q->skb = NULL; + } + airoha_qdma_fill_rx_queue(q); + + return done; +} + +static int airoha_qdma_rx_napi_poll(struct napi_struct *napi, int budget) +{ + struct airoha_queue *q = container_of(napi, struct airoha_queue, napi); + int cur, done = 0; + + do { + cur = airoha_qdma_rx_process(q, budget - done); + done += cur; + } while (cur && done < budget); + + if (done < budget && napi_complete(napi)) + airoha_qdma_irq_enable(q->qdma, QDMA_INT_REG_IDX1, + RX_DONE_INT_MASK); + + return done; +} + +static int airoha_qdma_init_rx_queue(struct airoha_queue *q, + struct airoha_qdma *qdma, int ndesc) +{ + const struct page_pool_params pp_params = { + .order = 0, + .pool_size = 256, + .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV, + .dma_dir = DMA_FROM_DEVICE, + .max_len = PAGE_SIZE, + .nid = NUMA_NO_NODE, + .dev = qdma->eth->dev, + .napi = &q->napi, + }; + struct airoha_eth *eth = qdma->eth; + int qid = q - &qdma->q_rx[0], thr; + dma_addr_t dma_addr; + + q->buf_size = PAGE_SIZE / 2; + q->ndesc = ndesc; + q->qdma = qdma; + + q->entry = devm_kzalloc(eth->dev, q->ndesc * sizeof(*q->entry), + GFP_KERNEL); + if (!q->entry) + return -ENOMEM; + + q->page_pool = page_pool_create(&pp_params); + if (IS_ERR(q->page_pool)) { + int err = PTR_ERR(q->page_pool); + + q->page_pool = NULL; + return err; + } + + q->desc = dmam_alloc_coherent(eth->dev, q->ndesc * sizeof(*q->desc), + &dma_addr, GFP_KERNEL); + if (!q->desc) + return -ENOMEM; + + netif_napi_add(eth->napi_dev, &q->napi, airoha_qdma_rx_napi_poll); + + airoha_qdma_wr(qdma, REG_RX_RING_BASE(qid), dma_addr); + airoha_qdma_rmw(qdma, REG_RX_RING_SIZE(qid), + RX_RING_SIZE_MASK, + FIELD_PREP(RX_RING_SIZE_MASK, ndesc)); + + thr = clamp(ndesc >> 3, 1, 32); + airoha_qdma_rmw(qdma, REG_RX_RING_SIZE(qid), RX_RING_THR_MASK, + FIELD_PREP(RX_RING_THR_MASK, thr)); + airoha_qdma_rmw(qdma, REG_RX_DMA_IDX(qid), RX_RING_DMA_IDX_MASK, + FIELD_PREP(RX_RING_DMA_IDX_MASK, q->head)); + airoha_qdma_set(qdma, REG_RX_SCATTER_CFG(qid), RX_RING_SG_EN_MASK); + + airoha_qdma_fill_rx_queue(q); + + return 0; +} + +static void airoha_qdma_cleanup_rx_queue(struct airoha_queue *q) +{ + struct airoha_eth *eth = q->qdma->eth; + + while (q->queued) { + struct airoha_queue_entry *e = &q->entry[q->tail]; + struct page *page = virt_to_head_page(e->buf); + + dma_sync_single_for_cpu(eth->dev, e->dma_addr, e->dma_len, + page_pool_get_dma_dir(q->page_pool)); + page_pool_put_full_page(q->page_pool, page, false); + q->tail = (q->tail + 1) % q->ndesc; + q->queued--; + } +} + +static int airoha_qdma_init_rx(struct airoha_qdma *qdma) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) { + int err; + + if (!(RX_DONE_INT_MASK & BIT(i))) { + /* rx-queue not binded to irq */ + continue; + } + + err = airoha_qdma_init_rx_queue(&qdma->q_rx[i], qdma, + RX_DSCP_NUM(i)); + if (err) + return err; + } + + return 0; +} + +static int airoha_qdma_tx_napi_poll(struct napi_struct *napi, int budget) +{ + struct airoha_tx_irq_queue *irq_q; + int id, done = 0, irq_queued; + struct airoha_qdma *qdma; + struct airoha_eth *eth; + u32 status, head; + + irq_q = container_of(napi, struct airoha_tx_irq_queue, napi); + qdma = irq_q->qdma; + id = irq_q - &qdma->q_tx_irq[0]; + eth = qdma->eth; + + status = airoha_qdma_rr(qdma, REG_IRQ_STATUS(id)); + head = FIELD_GET(IRQ_HEAD_IDX_MASK, status); + head = head % irq_q->size; + irq_queued = FIELD_GET(IRQ_ENTRY_LEN_MASK, status); + + while (irq_queued > 0 && done < budget) { + u32 qid, val = irq_q->q[head]; + struct airoha_qdma_desc *desc; + struct airoha_queue_entry *e; + struct airoha_queue *q; + u32 index, desc_ctrl; + struct sk_buff *skb; + + if (val == 0xff) + break; + + irq_q->q[head] = 0xff; /* mark as done */ + head = (head + 1) % irq_q->size; + irq_queued--; + done++; + + qid = FIELD_GET(IRQ_RING_IDX_MASK, val); + if (qid >= ARRAY_SIZE(qdma->q_tx)) + continue; + + q = &qdma->q_tx[qid]; + if (!q->ndesc) + continue; + + index = FIELD_GET(IRQ_DESC_IDX_MASK, val); + if (index >= q->ndesc) + continue; + + spin_lock_bh(&q->lock); + + if (!q->queued) + goto unlock; + + desc = &q->desc[index]; + desc_ctrl = le32_to_cpu(desc->ctrl); + + if (!(desc_ctrl & QDMA_DESC_DONE_MASK) && + !(desc_ctrl & QDMA_DESC_DROP_MASK)) + goto unlock; + + e = &q->entry[index]; + skb = e->skb; + + dma_unmap_single(eth->dev, e->dma_addr, e->dma_len, + DMA_TO_DEVICE); + memset(e, 0, sizeof(*e)); + WRITE_ONCE(desc->msg0, 0); + WRITE_ONCE(desc->msg1, 0); + q->queued--; + + /* completion ring can report out-of-order indexes if hw QoS + * is enabled and packets with different priority are queued + * to same DMA ring. Take into account possible out-of-order + * reports incrementing DMA ring tail pointer + */ + while (q->tail != q->head && !q->entry[q->tail].dma_addr) + q->tail = (q->tail + 1) % q->ndesc; + + if (skb) { + u16 queue = skb_get_queue_mapping(skb); + struct netdev_queue *txq; + + txq = netdev_get_tx_queue(skb->dev, queue); + netdev_tx_completed_queue(txq, 1, skb->len); + if (netif_tx_queue_stopped(txq) && + q->ndesc - q->queued >= q->free_thr) + netif_tx_wake_queue(txq); + + dev_kfree_skb_any(skb); + } +unlock: + spin_unlock_bh(&q->lock); + } + + if (done) { + int i, len = done >> 7; + + for (i = 0; i < len; i++) + airoha_qdma_rmw(qdma, REG_IRQ_CLEAR_LEN(id), + IRQ_CLEAR_LEN_MASK, 0x80); + airoha_qdma_rmw(qdma, REG_IRQ_CLEAR_LEN(id), + IRQ_CLEAR_LEN_MASK, (done & 0x7f)); + } + + if (done < budget && napi_complete(napi)) + airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX0, + TX_DONE_INT_MASK(id)); + + return done; +} + +static int airoha_qdma_init_tx_queue(struct airoha_queue *q, + struct airoha_qdma *qdma, int size) +{ + struct airoha_eth *eth = qdma->eth; + int i, qid = q - &qdma->q_tx[0]; + dma_addr_t dma_addr; + + spin_lock_init(&q->lock); + q->ndesc = size; + q->qdma = qdma; + q->free_thr = 1 + MAX_SKB_FRAGS; + + q->entry = devm_kzalloc(eth->dev, q->ndesc * sizeof(*q->entry), + GFP_KERNEL); + if (!q->entry) + return -ENOMEM; + + q->desc = dmam_alloc_coherent(eth->dev, q->ndesc * sizeof(*q->desc), + &dma_addr, GFP_KERNEL); + if (!q->desc) + return -ENOMEM; + + for (i = 0; i < q->ndesc; i++) { + u32 val; + + val = FIELD_PREP(QDMA_DESC_DONE_MASK, 1); + WRITE_ONCE(q->desc[i].ctrl, cpu_to_le32(val)); + } + + /* xmit ring drop default setting */ + airoha_qdma_set(qdma, REG_TX_RING_BLOCKING(qid), + TX_RING_IRQ_BLOCKING_TX_DROP_EN_MASK); + + airoha_qdma_wr(qdma, REG_TX_RING_BASE(qid), dma_addr); + airoha_qdma_rmw(qdma, REG_TX_CPU_IDX(qid), TX_RING_CPU_IDX_MASK, + FIELD_PREP(TX_RING_CPU_IDX_MASK, q->head)); + airoha_qdma_rmw(qdma, REG_TX_DMA_IDX(qid), TX_RING_DMA_IDX_MASK, + FIELD_PREP(TX_RING_DMA_IDX_MASK, q->head)); + + return 0; +} + +static int airoha_qdma_tx_irq_init(struct airoha_tx_irq_queue *irq_q, + struct airoha_qdma *qdma, int size) +{ + int id = irq_q - &qdma->q_tx_irq[0]; + struct airoha_eth *eth = qdma->eth; + dma_addr_t dma_addr; + + netif_napi_add_tx(eth->napi_dev, &irq_q->napi, + airoha_qdma_tx_napi_poll); + irq_q->q = dmam_alloc_coherent(eth->dev, size * sizeof(u32), + &dma_addr, GFP_KERNEL); + if (!irq_q->q) + return -ENOMEM; + + memset(irq_q->q, 0xff, size * sizeof(u32)); + irq_q->size = size; + irq_q->qdma = qdma; + + airoha_qdma_wr(qdma, REG_TX_IRQ_BASE(id), dma_addr); + airoha_qdma_rmw(qdma, REG_TX_IRQ_CFG(id), TX_IRQ_DEPT |